Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef LINUX_MM_INLINE_H
0003 #define LINUX_MM_INLINE_H
0004 
0005 #include <linux/atomic.h>
0006 #include <linux/huge_mm.h>
0007 #include <linux/swap.h>
0008 #include <linux/string.h>
0009 #include <linux/userfaultfd_k.h>
0010 #include <linux/swapops.h>
0011 
0012 /**
0013  * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
0014  * @folio: The folio to test.
0015  *
0016  * We would like to get this info without a page flag, but the state
0017  * needs to survive until the folio is last deleted from the LRU, which
0018  * could be as far down as __page_cache_release.
0019  *
0020  * Return: An integer (not a boolean!) used to sort a folio onto the
0021  * right LRU list and to account folios correctly.
0022  * 1 if @folio is a regular filesystem backed page cache folio
0023  * or a lazily freed anonymous folio (e.g. via MADV_FREE).
0024  * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
0025  * ram or swap backed folio.
0026  */
0027 static inline int folio_is_file_lru(struct folio *folio)
0028 {
0029     return !folio_test_swapbacked(folio);
0030 }
0031 
0032 static inline int page_is_file_lru(struct page *page)
0033 {
0034     return folio_is_file_lru(page_folio(page));
0035 }
0036 
0037 static __always_inline void update_lru_size(struct lruvec *lruvec,
0038                 enum lru_list lru, enum zone_type zid,
0039                 long nr_pages)
0040 {
0041     struct pglist_data *pgdat = lruvec_pgdat(lruvec);
0042 
0043     __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
0044     __mod_zone_page_state(&pgdat->node_zones[zid],
0045                 NR_ZONE_LRU_BASE + lru, nr_pages);
0046 #ifdef CONFIG_MEMCG
0047     mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
0048 #endif
0049 }
0050 
0051 /**
0052  * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
0053  * @folio: The folio that was on lru and now has a zero reference.
0054  */
0055 static __always_inline void __folio_clear_lru_flags(struct folio *folio)
0056 {
0057     VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
0058 
0059     __folio_clear_lru(folio);
0060 
0061     /* this shouldn't happen, so leave the flags to bad_page() */
0062     if (folio_test_active(folio) && folio_test_unevictable(folio))
0063         return;
0064 
0065     __folio_clear_active(folio);
0066     __folio_clear_unevictable(folio);
0067 }
0068 
0069 static __always_inline void __clear_page_lru_flags(struct page *page)
0070 {
0071     __folio_clear_lru_flags(page_folio(page));
0072 }
0073 
0074 /**
0075  * folio_lru_list - Which LRU list should a folio be on?
0076  * @folio: The folio to test.
0077  *
0078  * Return: The LRU list a folio should be on, as an index
0079  * into the array of LRU lists.
0080  */
0081 static __always_inline enum lru_list folio_lru_list(struct folio *folio)
0082 {
0083     enum lru_list lru;
0084 
0085     VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
0086 
0087     if (folio_test_unevictable(folio))
0088         return LRU_UNEVICTABLE;
0089 
0090     lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
0091     if (folio_test_active(folio))
0092         lru += LRU_ACTIVE;
0093 
0094     return lru;
0095 }
0096 
0097 static __always_inline
0098 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
0099 {
0100     enum lru_list lru = folio_lru_list(folio);
0101 
0102     update_lru_size(lruvec, lru, folio_zonenum(folio),
0103             folio_nr_pages(folio));
0104     if (lru != LRU_UNEVICTABLE)
0105         list_add(&folio->lru, &lruvec->lists[lru]);
0106 }
0107 
0108 static __always_inline void add_page_to_lru_list(struct page *page,
0109                 struct lruvec *lruvec)
0110 {
0111     lruvec_add_folio(lruvec, page_folio(page));
0112 }
0113 
0114 static __always_inline
0115 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
0116 {
0117     enum lru_list lru = folio_lru_list(folio);
0118 
0119     update_lru_size(lruvec, lru, folio_zonenum(folio),
0120             folio_nr_pages(folio));
0121     /* This is not expected to be used on LRU_UNEVICTABLE */
0122     list_add_tail(&folio->lru, &lruvec->lists[lru]);
0123 }
0124 
0125 static __always_inline void add_page_to_lru_list_tail(struct page *page,
0126                 struct lruvec *lruvec)
0127 {
0128     lruvec_add_folio_tail(lruvec, page_folio(page));
0129 }
0130 
0131 static __always_inline
0132 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
0133 {
0134     enum lru_list lru = folio_lru_list(folio);
0135 
0136     if (lru != LRU_UNEVICTABLE)
0137         list_del(&folio->lru);
0138     update_lru_size(lruvec, lru, folio_zonenum(folio),
0139             -folio_nr_pages(folio));
0140 }
0141 
0142 static __always_inline void del_page_from_lru_list(struct page *page,
0143                 struct lruvec *lruvec)
0144 {
0145     lruvec_del_folio(lruvec, page_folio(page));
0146 }
0147 
0148 #ifdef CONFIG_ANON_VMA_NAME
0149 /*
0150  * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
0151  * either keep holding the lock while using the returned pointer or it should
0152  * raise anon_vma_name refcount before releasing the lock.
0153  */
0154 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
0155 extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
0156 extern void anon_vma_name_free(struct kref *kref);
0157 
0158 /* mmap_lock should be read-locked */
0159 static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
0160 {
0161     if (anon_name)
0162         kref_get(&anon_name->kref);
0163 }
0164 
0165 static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
0166 {
0167     if (anon_name)
0168         kref_put(&anon_name->kref, anon_vma_name_free);
0169 }
0170 
0171 static inline
0172 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
0173 {
0174     /* Prevent anon_name refcount saturation early on */
0175     if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
0176         anon_vma_name_get(anon_name);
0177         return anon_name;
0178 
0179     }
0180     return anon_vma_name_alloc(anon_name->name);
0181 }
0182 
0183 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
0184                      struct vm_area_struct *new_vma)
0185 {
0186     struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
0187 
0188     if (anon_name)
0189         new_vma->anon_name = anon_vma_name_reuse(anon_name);
0190 }
0191 
0192 static inline void free_anon_vma_name(struct vm_area_struct *vma)
0193 {
0194     /*
0195      * Not using anon_vma_name because it generates a warning if mmap_lock
0196      * is not held, which might be the case here.
0197      */
0198     if (!vma->vm_file)
0199         anon_vma_name_put(vma->anon_name);
0200 }
0201 
0202 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
0203                     struct anon_vma_name *anon_name2)
0204 {
0205     if (anon_name1 == anon_name2)
0206         return true;
0207 
0208     return anon_name1 && anon_name2 &&
0209         !strcmp(anon_name1->name, anon_name2->name);
0210 }
0211 
0212 #else /* CONFIG_ANON_VMA_NAME */
0213 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
0214 {
0215     return NULL;
0216 }
0217 
0218 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
0219 {
0220     return NULL;
0221 }
0222 
0223 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
0224 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
0225 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
0226                      struct vm_area_struct *new_vma) {}
0227 static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
0228 
0229 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
0230                     struct anon_vma_name *anon_name2)
0231 {
0232     return true;
0233 }
0234 
0235 #endif  /* CONFIG_ANON_VMA_NAME */
0236 
0237 static inline void init_tlb_flush_pending(struct mm_struct *mm)
0238 {
0239     atomic_set(&mm->tlb_flush_pending, 0);
0240 }
0241 
0242 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
0243 {
0244     atomic_inc(&mm->tlb_flush_pending);
0245     /*
0246      * The only time this value is relevant is when there are indeed pages
0247      * to flush. And we'll only flush pages after changing them, which
0248      * requires the PTL.
0249      *
0250      * So the ordering here is:
0251      *
0252      *  atomic_inc(&mm->tlb_flush_pending);
0253      *  spin_lock(&ptl);
0254      *  ...
0255      *  set_pte_at();
0256      *  spin_unlock(&ptl);
0257      *
0258      *              spin_lock(&ptl)
0259      *              mm_tlb_flush_pending();
0260      *              ....
0261      *              spin_unlock(&ptl);
0262      *
0263      *  flush_tlb_range();
0264      *  atomic_dec(&mm->tlb_flush_pending);
0265      *
0266      * Where the increment if constrained by the PTL unlock, it thus
0267      * ensures that the increment is visible if the PTE modification is
0268      * visible. After all, if there is no PTE modification, nobody cares
0269      * about TLB flushes either.
0270      *
0271      * This very much relies on users (mm_tlb_flush_pending() and
0272      * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
0273      * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
0274      * locks (PPC) the unlock of one doesn't order against the lock of
0275      * another PTL.
0276      *
0277      * The decrement is ordered by the flush_tlb_range(), such that
0278      * mm_tlb_flush_pending() will not return false unless all flushes have
0279      * completed.
0280      */
0281 }
0282 
0283 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
0284 {
0285     /*
0286      * See inc_tlb_flush_pending().
0287      *
0288      * This cannot be smp_mb__before_atomic() because smp_mb() simply does
0289      * not order against TLB invalidate completion, which is what we need.
0290      *
0291      * Therefore we must rely on tlb_flush_*() to guarantee order.
0292      */
0293     atomic_dec(&mm->tlb_flush_pending);
0294 }
0295 
0296 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
0297 {
0298     /*
0299      * Must be called after having acquired the PTL; orders against that
0300      * PTLs release and therefore ensures that if we observe the modified
0301      * PTE we must also observe the increment from inc_tlb_flush_pending().
0302      *
0303      * That is, it only guarantees to return true if there is a flush
0304      * pending for _this_ PTL.
0305      */
0306     return atomic_read(&mm->tlb_flush_pending);
0307 }
0308 
0309 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
0310 {
0311     /*
0312      * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
0313      * for which there is a TLB flush pending in order to guarantee
0314      * we've seen both that PTE modification and the increment.
0315      *
0316      * (no requirement on actually still holding the PTL, that is irrelevant)
0317      */
0318     return atomic_read(&mm->tlb_flush_pending) > 1;
0319 }
0320 
0321 /*
0322  * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
0323  * replace a none pte.  NOTE!  This should only be called when *pte is already
0324  * cleared so we will never accidentally replace something valuable.  Meanwhile
0325  * none pte also means we are not demoting the pte so tlb flushed is not needed.
0326  * E.g., when pte cleared the caller should have taken care of the tlb flush.
0327  *
0328  * Must be called with pgtable lock held so that no thread will see the none
0329  * pte, and if they see it, they'll fault and serialize at the pgtable lock.
0330  *
0331  * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
0332  */
0333 static inline void
0334 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
0335                   pte_t *pte, pte_t pteval)
0336 {
0337 #ifdef CONFIG_PTE_MARKER_UFFD_WP
0338     bool arm_uffd_pte = false;
0339 
0340     /* The current status of the pte should be "cleared" before calling */
0341     WARN_ON_ONCE(!pte_none(*pte));
0342 
0343     if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
0344         return;
0345 
0346     /* A uffd-wp wr-protected normal pte */
0347     if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
0348         arm_uffd_pte = true;
0349 
0350     /*
0351      * A uffd-wp wr-protected swap pte.  Note: this should even cover an
0352      * existing pte marker with uffd-wp bit set.
0353      */
0354     if (unlikely(pte_swp_uffd_wp_any(pteval)))
0355         arm_uffd_pte = true;
0356 
0357     if (unlikely(arm_uffd_pte))
0358         set_pte_at(vma->vm_mm, addr, pte,
0359                make_pte_marker(PTE_MARKER_UFFD_WP));
0360 #endif
0361 }
0362 
0363 #endif