Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_PAGE_REF_H
0003 #define _LINUX_PAGE_REF_H
0004 
0005 #include <linux/atomic.h>
0006 #include <linux/mm_types.h>
0007 #include <linux/page-flags.h>
0008 #include <linux/tracepoint-defs.h>
0009 
0010 DECLARE_TRACEPOINT(page_ref_set);
0011 DECLARE_TRACEPOINT(page_ref_mod);
0012 DECLARE_TRACEPOINT(page_ref_mod_and_test);
0013 DECLARE_TRACEPOINT(page_ref_mod_and_return);
0014 DECLARE_TRACEPOINT(page_ref_mod_unless);
0015 DECLARE_TRACEPOINT(page_ref_freeze);
0016 DECLARE_TRACEPOINT(page_ref_unfreeze);
0017 
0018 #ifdef CONFIG_DEBUG_PAGE_REF
0019 
0020 /*
0021  * Ideally we would want to use the trace_<tracepoint>_enabled() helper
0022  * functions. But due to include header file issues, that is not
0023  * feasible. Instead we have to open code the static key functions.
0024  *
0025  * See trace_##name##_enabled(void) in include/linux/tracepoint.h
0026  */
0027 #define page_ref_tracepoint_active(t) tracepoint_enabled(t)
0028 
0029 extern void __page_ref_set(struct page *page, int v);
0030 extern void __page_ref_mod(struct page *page, int v);
0031 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
0032 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
0033 extern void __page_ref_mod_unless(struct page *page, int v, int u);
0034 extern void __page_ref_freeze(struct page *page, int v, int ret);
0035 extern void __page_ref_unfreeze(struct page *page, int v);
0036 
0037 #else
0038 
0039 #define page_ref_tracepoint_active(t) false
0040 
0041 static inline void __page_ref_set(struct page *page, int v)
0042 {
0043 }
0044 static inline void __page_ref_mod(struct page *page, int v)
0045 {
0046 }
0047 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
0048 {
0049 }
0050 static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
0051 {
0052 }
0053 static inline void __page_ref_mod_unless(struct page *page, int v, int u)
0054 {
0055 }
0056 static inline void __page_ref_freeze(struct page *page, int v, int ret)
0057 {
0058 }
0059 static inline void __page_ref_unfreeze(struct page *page, int v)
0060 {
0061 }
0062 
0063 #endif
0064 
0065 static inline int page_ref_count(const struct page *page)
0066 {
0067     return atomic_read(&page->_refcount);
0068 }
0069 
0070 /**
0071  * folio_ref_count - The reference count on this folio.
0072  * @folio: The folio.
0073  *
0074  * The refcount is usually incremented by calls to folio_get() and
0075  * decremented by calls to folio_put().  Some typical users of the
0076  * folio refcount:
0077  *
0078  * - Each reference from a page table
0079  * - The page cache
0080  * - Filesystem private data
0081  * - The LRU list
0082  * - Pipes
0083  * - Direct IO which references this page in the process address space
0084  *
0085  * Return: The number of references to this folio.
0086  */
0087 static inline int folio_ref_count(const struct folio *folio)
0088 {
0089     return page_ref_count(&folio->page);
0090 }
0091 
0092 static inline int page_count(const struct page *page)
0093 {
0094     return folio_ref_count(page_folio(page));
0095 }
0096 
0097 static inline void set_page_count(struct page *page, int v)
0098 {
0099     atomic_set(&page->_refcount, v);
0100     if (page_ref_tracepoint_active(page_ref_set))
0101         __page_ref_set(page, v);
0102 }
0103 
0104 static inline void folio_set_count(struct folio *folio, int v)
0105 {
0106     set_page_count(&folio->page, v);
0107 }
0108 
0109 /*
0110  * Setup the page count before being freed into the page allocator for
0111  * the first time (boot or memory hotplug)
0112  */
0113 static inline void init_page_count(struct page *page)
0114 {
0115     set_page_count(page, 1);
0116 }
0117 
0118 static inline void page_ref_add(struct page *page, int nr)
0119 {
0120     atomic_add(nr, &page->_refcount);
0121     if (page_ref_tracepoint_active(page_ref_mod))
0122         __page_ref_mod(page, nr);
0123 }
0124 
0125 static inline void folio_ref_add(struct folio *folio, int nr)
0126 {
0127     page_ref_add(&folio->page, nr);
0128 }
0129 
0130 static inline void page_ref_sub(struct page *page, int nr)
0131 {
0132     atomic_sub(nr, &page->_refcount);
0133     if (page_ref_tracepoint_active(page_ref_mod))
0134         __page_ref_mod(page, -nr);
0135 }
0136 
0137 static inline void folio_ref_sub(struct folio *folio, int nr)
0138 {
0139     page_ref_sub(&folio->page, nr);
0140 }
0141 
0142 static inline int page_ref_sub_return(struct page *page, int nr)
0143 {
0144     int ret = atomic_sub_return(nr, &page->_refcount);
0145 
0146     if (page_ref_tracepoint_active(page_ref_mod_and_return))
0147         __page_ref_mod_and_return(page, -nr, ret);
0148     return ret;
0149 }
0150 
0151 static inline int folio_ref_sub_return(struct folio *folio, int nr)
0152 {
0153     return page_ref_sub_return(&folio->page, nr);
0154 }
0155 
0156 static inline void page_ref_inc(struct page *page)
0157 {
0158     atomic_inc(&page->_refcount);
0159     if (page_ref_tracepoint_active(page_ref_mod))
0160         __page_ref_mod(page, 1);
0161 }
0162 
0163 static inline void folio_ref_inc(struct folio *folio)
0164 {
0165     page_ref_inc(&folio->page);
0166 }
0167 
0168 static inline void page_ref_dec(struct page *page)
0169 {
0170     atomic_dec(&page->_refcount);
0171     if (page_ref_tracepoint_active(page_ref_mod))
0172         __page_ref_mod(page, -1);
0173 }
0174 
0175 static inline void folio_ref_dec(struct folio *folio)
0176 {
0177     page_ref_dec(&folio->page);
0178 }
0179 
0180 static inline int page_ref_sub_and_test(struct page *page, int nr)
0181 {
0182     int ret = atomic_sub_and_test(nr, &page->_refcount);
0183 
0184     if (page_ref_tracepoint_active(page_ref_mod_and_test))
0185         __page_ref_mod_and_test(page, -nr, ret);
0186     return ret;
0187 }
0188 
0189 static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
0190 {
0191     return page_ref_sub_and_test(&folio->page, nr);
0192 }
0193 
0194 static inline int page_ref_inc_return(struct page *page)
0195 {
0196     int ret = atomic_inc_return(&page->_refcount);
0197 
0198     if (page_ref_tracepoint_active(page_ref_mod_and_return))
0199         __page_ref_mod_and_return(page, 1, ret);
0200     return ret;
0201 }
0202 
0203 static inline int folio_ref_inc_return(struct folio *folio)
0204 {
0205     return page_ref_inc_return(&folio->page);
0206 }
0207 
0208 static inline int page_ref_dec_and_test(struct page *page)
0209 {
0210     int ret = atomic_dec_and_test(&page->_refcount);
0211 
0212     if (page_ref_tracepoint_active(page_ref_mod_and_test))
0213         __page_ref_mod_and_test(page, -1, ret);
0214     return ret;
0215 }
0216 
0217 static inline int folio_ref_dec_and_test(struct folio *folio)
0218 {
0219     return page_ref_dec_and_test(&folio->page);
0220 }
0221 
0222 static inline int page_ref_dec_return(struct page *page)
0223 {
0224     int ret = atomic_dec_return(&page->_refcount);
0225 
0226     if (page_ref_tracepoint_active(page_ref_mod_and_return))
0227         __page_ref_mod_and_return(page, -1, ret);
0228     return ret;
0229 }
0230 
0231 static inline int folio_ref_dec_return(struct folio *folio)
0232 {
0233     return page_ref_dec_return(&folio->page);
0234 }
0235 
0236 static inline bool page_ref_add_unless(struct page *page, int nr, int u)
0237 {
0238     bool ret = atomic_add_unless(&page->_refcount, nr, u);
0239 
0240     if (page_ref_tracepoint_active(page_ref_mod_unless))
0241         __page_ref_mod_unless(page, nr, ret);
0242     return ret;
0243 }
0244 
0245 static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
0246 {
0247     return page_ref_add_unless(&folio->page, nr, u);
0248 }
0249 
0250 /**
0251  * folio_try_get - Attempt to increase the refcount on a folio.
0252  * @folio: The folio.
0253  *
0254  * If you do not already have a reference to a folio, you can attempt to
0255  * get one using this function.  It may fail if, for example, the folio
0256  * has been freed since you found a pointer to it, or it is frozen for
0257  * the purposes of splitting or migration.
0258  *
0259  * Return: True if the reference count was successfully incremented.
0260  */
0261 static inline bool folio_try_get(struct folio *folio)
0262 {
0263     return folio_ref_add_unless(folio, 1, 0);
0264 }
0265 
0266 static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
0267 {
0268 #ifdef CONFIG_TINY_RCU
0269     /*
0270      * The caller guarantees the folio will not be freed from interrupt
0271      * context, so (on !SMP) we only need preemption to be disabled
0272      * and TINY_RCU does that for us.
0273      */
0274 # ifdef CONFIG_PREEMPT_COUNT
0275     VM_BUG_ON(!in_atomic() && !irqs_disabled());
0276 # endif
0277     VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
0278     folio_ref_add(folio, count);
0279 #else
0280     if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
0281         /* Either the folio has been freed, or will be freed. */
0282         return false;
0283     }
0284 #endif
0285     return true;
0286 }
0287 
0288 /**
0289  * folio_try_get_rcu - Attempt to increase the refcount on a folio.
0290  * @folio: The folio.
0291  *
0292  * This is a version of folio_try_get() optimised for non-SMP kernels.
0293  * If you are still holding the rcu_read_lock() after looking up the
0294  * page and know that the page cannot have its refcount decreased to
0295  * zero in interrupt context, you can use this instead of folio_try_get().
0296  *
0297  * Example users include get_user_pages_fast() (as pages are not unmapped
0298  * from interrupt context) and the page cache lookups (as pages are not
0299  * truncated from interrupt context).  We also know that pages are not
0300  * frozen in interrupt context for the purposes of splitting or migration.
0301  *
0302  * You can also use this function if you're holding a lock that prevents
0303  * pages being frozen & removed; eg the i_pages lock for the page cache
0304  * or the mmap_sem or page table lock for page tables.  In this case,
0305  * it will always succeed, and you could have used a plain folio_get(),
0306  * but it's sometimes more convenient to have a common function called
0307  * from both locked and RCU-protected contexts.
0308  *
0309  * Return: True if the reference count was successfully incremented.
0310  */
0311 static inline bool folio_try_get_rcu(struct folio *folio)
0312 {
0313     return folio_ref_try_add_rcu(folio, 1);
0314 }
0315 
0316 static inline int page_ref_freeze(struct page *page, int count)
0317 {
0318     int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
0319 
0320     if (page_ref_tracepoint_active(page_ref_freeze))
0321         __page_ref_freeze(page, count, ret);
0322     return ret;
0323 }
0324 
0325 static inline int folio_ref_freeze(struct folio *folio, int count)
0326 {
0327     return page_ref_freeze(&folio->page, count);
0328 }
0329 
0330 static inline void page_ref_unfreeze(struct page *page, int count)
0331 {
0332     VM_BUG_ON_PAGE(page_count(page) != 0, page);
0333     VM_BUG_ON(count == 0);
0334 
0335     atomic_set_release(&page->_refcount, count);
0336     if (page_ref_tracepoint_active(page_ref_unfreeze))
0337         __page_ref_unfreeze(page, count);
0338 }
0339 
0340 static inline void folio_ref_unfreeze(struct folio *folio, int count)
0341 {
0342     page_ref_unfreeze(&folio->page, count);
0343 }
0344 #endif