0001
0002 #ifndef LINUX_MM_INLINE_H
0003 #define LINUX_MM_INLINE_H
0004
0005 #include <linux/atomic.h>
0006 #include <linux/huge_mm.h>
0007 #include <linux/swap.h>
0008 #include <linux/string.h>
0009 #include <linux/userfaultfd_k.h>
0010 #include <linux/swapops.h>
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 static inline int folio_is_file_lru(struct folio *folio)
0028 {
0029 return !folio_test_swapbacked(folio);
0030 }
0031
0032 static inline int page_is_file_lru(struct page *page)
0033 {
0034 return folio_is_file_lru(page_folio(page));
0035 }
0036
0037 static __always_inline void update_lru_size(struct lruvec *lruvec,
0038 enum lru_list lru, enum zone_type zid,
0039 long nr_pages)
0040 {
0041 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
0042
0043 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
0044 __mod_zone_page_state(&pgdat->node_zones[zid],
0045 NR_ZONE_LRU_BASE + lru, nr_pages);
0046 #ifdef CONFIG_MEMCG
0047 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
0048 #endif
0049 }
0050
0051
0052
0053
0054
0055 static __always_inline void __folio_clear_lru_flags(struct folio *folio)
0056 {
0057 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
0058
0059 __folio_clear_lru(folio);
0060
0061
0062 if (folio_test_active(folio) && folio_test_unevictable(folio))
0063 return;
0064
0065 __folio_clear_active(folio);
0066 __folio_clear_unevictable(folio);
0067 }
0068
0069 static __always_inline void __clear_page_lru_flags(struct page *page)
0070 {
0071 __folio_clear_lru_flags(page_folio(page));
0072 }
0073
0074
0075
0076
0077
0078
0079
0080
0081 static __always_inline enum lru_list folio_lru_list(struct folio *folio)
0082 {
0083 enum lru_list lru;
0084
0085 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
0086
0087 if (folio_test_unevictable(folio))
0088 return LRU_UNEVICTABLE;
0089
0090 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
0091 if (folio_test_active(folio))
0092 lru += LRU_ACTIVE;
0093
0094 return lru;
0095 }
0096
0097 static __always_inline
0098 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
0099 {
0100 enum lru_list lru = folio_lru_list(folio);
0101
0102 update_lru_size(lruvec, lru, folio_zonenum(folio),
0103 folio_nr_pages(folio));
0104 if (lru != LRU_UNEVICTABLE)
0105 list_add(&folio->lru, &lruvec->lists[lru]);
0106 }
0107
0108 static __always_inline void add_page_to_lru_list(struct page *page,
0109 struct lruvec *lruvec)
0110 {
0111 lruvec_add_folio(lruvec, page_folio(page));
0112 }
0113
0114 static __always_inline
0115 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
0116 {
0117 enum lru_list lru = folio_lru_list(folio);
0118
0119 update_lru_size(lruvec, lru, folio_zonenum(folio),
0120 folio_nr_pages(folio));
0121
0122 list_add_tail(&folio->lru, &lruvec->lists[lru]);
0123 }
0124
0125 static __always_inline void add_page_to_lru_list_tail(struct page *page,
0126 struct lruvec *lruvec)
0127 {
0128 lruvec_add_folio_tail(lruvec, page_folio(page));
0129 }
0130
0131 static __always_inline
0132 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
0133 {
0134 enum lru_list lru = folio_lru_list(folio);
0135
0136 if (lru != LRU_UNEVICTABLE)
0137 list_del(&folio->lru);
0138 update_lru_size(lruvec, lru, folio_zonenum(folio),
0139 -folio_nr_pages(folio));
0140 }
0141
0142 static __always_inline void del_page_from_lru_list(struct page *page,
0143 struct lruvec *lruvec)
0144 {
0145 lruvec_del_folio(lruvec, page_folio(page));
0146 }
0147
0148 #ifdef CONFIG_ANON_VMA_NAME
0149
0150
0151
0152
0153
0154 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
0155 extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
0156 extern void anon_vma_name_free(struct kref *kref);
0157
0158
0159 static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
0160 {
0161 if (anon_name)
0162 kref_get(&anon_name->kref);
0163 }
0164
0165 static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
0166 {
0167 if (anon_name)
0168 kref_put(&anon_name->kref, anon_vma_name_free);
0169 }
0170
0171 static inline
0172 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
0173 {
0174
0175 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
0176 anon_vma_name_get(anon_name);
0177 return anon_name;
0178
0179 }
0180 return anon_vma_name_alloc(anon_name->name);
0181 }
0182
0183 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
0184 struct vm_area_struct *new_vma)
0185 {
0186 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
0187
0188 if (anon_name)
0189 new_vma->anon_name = anon_vma_name_reuse(anon_name);
0190 }
0191
0192 static inline void free_anon_vma_name(struct vm_area_struct *vma)
0193 {
0194
0195
0196
0197
0198 if (!vma->vm_file)
0199 anon_vma_name_put(vma->anon_name);
0200 }
0201
0202 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
0203 struct anon_vma_name *anon_name2)
0204 {
0205 if (anon_name1 == anon_name2)
0206 return true;
0207
0208 return anon_name1 && anon_name2 &&
0209 !strcmp(anon_name1->name, anon_name2->name);
0210 }
0211
0212 #else
0213 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
0214 {
0215 return NULL;
0216 }
0217
0218 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
0219 {
0220 return NULL;
0221 }
0222
0223 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
0224 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
0225 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
0226 struct vm_area_struct *new_vma) {}
0227 static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
0228
0229 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
0230 struct anon_vma_name *anon_name2)
0231 {
0232 return true;
0233 }
0234
0235 #endif
0236
0237 static inline void init_tlb_flush_pending(struct mm_struct *mm)
0238 {
0239 atomic_set(&mm->tlb_flush_pending, 0);
0240 }
0241
0242 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
0243 {
0244 atomic_inc(&mm->tlb_flush_pending);
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 }
0282
0283 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
0284 {
0285
0286
0287
0288
0289
0290
0291
0292
0293 atomic_dec(&mm->tlb_flush_pending);
0294 }
0295
0296 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
0297 {
0298
0299
0300
0301
0302
0303
0304
0305
0306 return atomic_read(&mm->tlb_flush_pending);
0307 }
0308
0309 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
0310 {
0311
0312
0313
0314
0315
0316
0317
0318 return atomic_read(&mm->tlb_flush_pending) > 1;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 static inline void
0334 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
0335 pte_t *pte, pte_t pteval)
0336 {
0337 #ifdef CONFIG_PTE_MARKER_UFFD_WP
0338 bool arm_uffd_pte = false;
0339
0340
0341 WARN_ON_ONCE(!pte_none(*pte));
0342
0343 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
0344 return;
0345
0346
0347 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
0348 arm_uffd_pte = true;
0349
0350
0351
0352
0353
0354 if (unlikely(pte_swp_uffd_wp_any(pteval)))
0355 arm_uffd_pte = true;
0356
0357 if (unlikely(arm_uffd_pte))
0358 set_pte_at(vma->vm_mm, addr, pte,
0359 make_pte_marker(PTE_MARKER_UFFD_WP));
0360 #endif
0361 }
0362
0363 #endif