0001
0002 #ifndef _LINUX_MM_H
0003 #define _LINUX_MM_H
0004
0005 #include <linux/errno.h>
0006 #include <linux/mmdebug.h>
0007 #include <linux/gfp.h>
0008 #include <linux/bug.h>
0009 #include <linux/list.h>
0010 #include <linux/mmzone.h>
0011 #include <linux/rbtree.h>
0012 #include <linux/atomic.h>
0013 #include <linux/debug_locks.h>
0014 #include <linux/mm_types.h>
0015 #include <linux/mmap_lock.h>
0016 #include <linux/range.h>
0017 #include <linux/pfn.h>
0018 #include <linux/percpu-refcount.h>
0019 #include <linux/bit_spinlock.h>
0020 #include <linux/shrinker.h>
0021 #include <linux/resource.h>
0022 #include <linux/page_ext.h>
0023 #include <linux/err.h>
0024 #include <linux/page-flags.h>
0025 #include <linux/page_ref.h>
0026 #include <linux/overflow.h>
0027 #include <linux/sizes.h>
0028 #include <linux/sched.h>
0029 #include <linux/pgtable.h>
0030 #include <linux/kasan.h>
0031 #include <linux/memremap.h>
0032
0033 struct mempolicy;
0034 struct anon_vma;
0035 struct anon_vma_chain;
0036 struct user_struct;
0037 struct pt_regs;
0038
0039 extern int sysctl_page_lock_unfairness;
0040
0041 void init_mm_internals(void);
0042
0043 #ifndef CONFIG_NUMA
0044 extern unsigned long max_mapnr;
0045
0046 static inline void set_max_mapnr(unsigned long limit)
0047 {
0048 max_mapnr = limit;
0049 }
0050 #else
0051 static inline void set_max_mapnr(unsigned long limit) { }
0052 #endif
0053
0054 extern atomic_long_t _totalram_pages;
0055 static inline unsigned long totalram_pages(void)
0056 {
0057 return (unsigned long)atomic_long_read(&_totalram_pages);
0058 }
0059
0060 static inline void totalram_pages_inc(void)
0061 {
0062 atomic_long_inc(&_totalram_pages);
0063 }
0064
0065 static inline void totalram_pages_dec(void)
0066 {
0067 atomic_long_dec(&_totalram_pages);
0068 }
0069
0070 static inline void totalram_pages_add(long count)
0071 {
0072 atomic_long_add(count, &_totalram_pages);
0073 }
0074
0075 extern void * high_memory;
0076 extern int page_cluster;
0077
0078 #ifdef CONFIG_SYSCTL
0079 extern int sysctl_legacy_va_layout;
0080 #else
0081 #define sysctl_legacy_va_layout 0
0082 #endif
0083
0084 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
0085 extern const int mmap_rnd_bits_min;
0086 extern const int mmap_rnd_bits_max;
0087 extern int mmap_rnd_bits __read_mostly;
0088 #endif
0089 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
0090 extern const int mmap_rnd_compat_bits_min;
0091 extern const int mmap_rnd_compat_bits_max;
0092 extern int mmap_rnd_compat_bits __read_mostly;
0093 #endif
0094
0095 #include <asm/page.h>
0096 #include <asm/processor.h>
0097
0098
0099
0100
0101
0102
0103
0104
0105 #ifndef untagged_addr
0106 #define untagged_addr(addr) (addr)
0107 #endif
0108
0109 #ifndef __pa_symbol
0110 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
0111 #endif
0112
0113 #ifndef page_to_virt
0114 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
0115 #endif
0116
0117 #ifndef lm_alias
0118 #define lm_alias(x) __va(__pa_symbol(x))
0119 #endif
0120
0121
0122
0123
0124
0125
0126
0127
0128 #ifndef mm_forbids_zeropage
0129 #define mm_forbids_zeropage(X) (0)
0130 #endif
0131
0132
0133
0134
0135
0136
0137
0138 #if BITS_PER_LONG == 64
0139
0140
0141
0142
0143
0144
0145 #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
0146 static inline void __mm_zero_struct_page(struct page *page)
0147 {
0148 unsigned long *_pp = (void *)page;
0149
0150
0151 BUILD_BUG_ON(sizeof(struct page) & 7);
0152 BUILD_BUG_ON(sizeof(struct page) < 56);
0153 BUILD_BUG_ON(sizeof(struct page) > 80);
0154
0155 switch (sizeof(struct page)) {
0156 case 80:
0157 _pp[9] = 0;
0158 fallthrough;
0159 case 72:
0160 _pp[8] = 0;
0161 fallthrough;
0162 case 64:
0163 _pp[7] = 0;
0164 fallthrough;
0165 case 56:
0166 _pp[6] = 0;
0167 _pp[5] = 0;
0168 _pp[4] = 0;
0169 _pp[3] = 0;
0170 _pp[2] = 0;
0171 _pp[1] = 0;
0172 _pp[0] = 0;
0173 }
0174 }
0175 #else
0176 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
0177 #endif
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 #define MAPCOUNT_ELF_CORE_MARGIN (5)
0196 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
0197
0198 extern int sysctl_max_map_count;
0199
0200 extern unsigned long sysctl_user_reserve_kbytes;
0201 extern unsigned long sysctl_admin_reserve_kbytes;
0202
0203 extern int sysctl_overcommit_memory;
0204 extern int sysctl_overcommit_ratio;
0205 extern unsigned long sysctl_overcommit_kbytes;
0206
0207 int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
0208 loff_t *);
0209 int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
0210 loff_t *);
0211 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
0212 loff_t *);
0213
0214 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
0215 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
0216 #define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
0217 #else
0218 #define nth_page(page,n) ((page) + (n))
0219 #define folio_page_idx(folio, p) ((p) - &(folio)->page)
0220 #endif
0221
0222
0223 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
0224
0225
0226 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
0227
0228
0229 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
0230
0231 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
0232 static inline struct folio *lru_to_folio(struct list_head *head)
0233 {
0234 return list_entry((head)->prev, struct folio, lru);
0235 }
0236
0237 void setup_initial_init_mm(void *start_code, void *end_code,
0238 void *end_data, void *brk);
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
0250 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
0251 void vm_area_free(struct vm_area_struct *);
0252
0253 #ifndef CONFIG_MMU
0254 extern struct rb_root nommu_region_tree;
0255 extern struct rw_semaphore nommu_region_sem;
0256
0257 extern unsigned int kobjsize(const void *objp);
0258 #endif
0259
0260
0261
0262
0263
0264 #define VM_NONE 0x00000000
0265
0266 #define VM_READ 0x00000001
0267 #define VM_WRITE 0x00000002
0268 #define VM_EXEC 0x00000004
0269 #define VM_SHARED 0x00000008
0270
0271
0272 #define VM_MAYREAD 0x00000010
0273 #define VM_MAYWRITE 0x00000020
0274 #define VM_MAYEXEC 0x00000040
0275 #define VM_MAYSHARE 0x00000080
0276
0277 #define VM_GROWSDOWN 0x00000100
0278 #define VM_UFFD_MISSING 0x00000200
0279 #define VM_PFNMAP 0x00000400
0280 #define VM_UFFD_WP 0x00001000
0281
0282 #define VM_LOCKED 0x00002000
0283 #define VM_IO 0x00004000
0284
0285
0286 #define VM_SEQ_READ 0x00008000
0287 #define VM_RAND_READ 0x00010000
0288
0289 #define VM_DONTCOPY 0x00020000
0290 #define VM_DONTEXPAND 0x00040000
0291 #define VM_LOCKONFAULT 0x00080000
0292 #define VM_ACCOUNT 0x00100000
0293 #define VM_NORESERVE 0x00200000
0294 #define VM_HUGETLB 0x00400000
0295 #define VM_SYNC 0x00800000
0296 #define VM_ARCH_1 0x01000000
0297 #define VM_WIPEONFORK 0x02000000
0298 #define VM_DONTDUMP 0x04000000
0299
0300 #ifdef CONFIG_MEM_SOFT_DIRTY
0301 # define VM_SOFTDIRTY 0x08000000
0302 #else
0303 # define VM_SOFTDIRTY 0
0304 #endif
0305
0306 #define VM_MIXEDMAP 0x10000000
0307 #define VM_HUGEPAGE 0x20000000
0308 #define VM_NOHUGEPAGE 0x40000000
0309 #define VM_MERGEABLE 0x80000000
0310
0311 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
0312 #define VM_HIGH_ARCH_BIT_0 32
0313 #define VM_HIGH_ARCH_BIT_1 33
0314 #define VM_HIGH_ARCH_BIT_2 34
0315 #define VM_HIGH_ARCH_BIT_3 35
0316 #define VM_HIGH_ARCH_BIT_4 36
0317 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
0318 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
0319 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
0320 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
0321 #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
0322 #endif
0323
0324 #ifdef CONFIG_ARCH_HAS_PKEYS
0325 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
0326 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0
0327 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1
0328 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2
0329 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3
0330 #ifdef CONFIG_PPC
0331 # define VM_PKEY_BIT4 VM_HIGH_ARCH_4
0332 #else
0333 # define VM_PKEY_BIT4 0
0334 #endif
0335 #endif
0336
0337 #if defined(CONFIG_X86)
0338 # define VM_PAT VM_ARCH_1
0339 #elif defined(CONFIG_PPC)
0340 # define VM_SAO VM_ARCH_1
0341 #elif defined(CONFIG_PARISC)
0342 # define VM_GROWSUP VM_ARCH_1
0343 #elif defined(CONFIG_IA64)
0344 # define VM_GROWSUP VM_ARCH_1
0345 #elif defined(CONFIG_SPARC64)
0346 # define VM_SPARC_ADI VM_ARCH_1
0347 # define VM_ARCH_CLEAR VM_SPARC_ADI
0348 #elif defined(CONFIG_ARM64)
0349 # define VM_ARM64_BTI VM_ARCH_1
0350 # define VM_ARCH_CLEAR VM_ARM64_BTI
0351 #elif !defined(CONFIG_MMU)
0352 # define VM_MAPPED_COPY VM_ARCH_1
0353 #endif
0354
0355 #if defined(CONFIG_ARM64_MTE)
0356 # define VM_MTE VM_HIGH_ARCH_0
0357 # define VM_MTE_ALLOWED VM_HIGH_ARCH_1
0358 #else
0359 # define VM_MTE VM_NONE
0360 # define VM_MTE_ALLOWED VM_NONE
0361 #endif
0362
0363 #ifndef VM_GROWSUP
0364 # define VM_GROWSUP VM_NONE
0365 #endif
0366
0367 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
0368 # define VM_UFFD_MINOR_BIT 37
0369 # define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT)
0370 #else
0371 # define VM_UFFD_MINOR VM_NONE
0372 #endif
0373
0374
0375 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
0376
0377 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
0378
0379
0380 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
0381 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
0382 #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
0383 VM_MAYWRITE | VM_MAYEXEC)
0384 #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
0385 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
0386
0387 #ifndef VM_DATA_DEFAULT_FLAGS
0388 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
0389 #endif
0390
0391 #ifndef VM_STACK_DEFAULT_FLAGS
0392 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
0393 #endif
0394
0395 #ifdef CONFIG_STACK_GROWSUP
0396 #define VM_STACK VM_GROWSUP
0397 #else
0398 #define VM_STACK VM_GROWSDOWN
0399 #endif
0400
0401 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
0402
0403
0404 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
0405
0406
0407
0408
0409
0410 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
0411
0412
0413 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
0414
0415
0416 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE
0417
0418
0419 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
0420
0421
0422 #ifndef VM_ARCH_CLEAR
0423 # define VM_ARCH_CLEAR VM_NONE
0424 #endif
0425 #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436 #define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
0437 FAULT_FLAG_KILLABLE | \
0438 FAULT_FLAG_INTERRUPTIBLE)
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
0454 {
0455 return (flags & FAULT_FLAG_ALLOW_RETRY) &&
0456 (!(flags & FAULT_FLAG_TRIED));
0457 }
0458
0459 #define FAULT_FLAG_TRACE \
0460 { FAULT_FLAG_WRITE, "WRITE" }, \
0461 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
0462 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
0463 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
0464 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
0465 { FAULT_FLAG_TRIED, "TRIED" }, \
0466 { FAULT_FLAG_USER, "USER" }, \
0467 { FAULT_FLAG_REMOTE, "REMOTE" }, \
0468 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
0469 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 struct vm_fault {
0482 const struct {
0483 struct vm_area_struct *vma;
0484 gfp_t gfp_mask;
0485 pgoff_t pgoff;
0486 unsigned long address;
0487 unsigned long real_address;
0488 };
0489 enum fault_flag flags;
0490
0491 pmd_t *pmd;
0492
0493 pud_t *pud;
0494
0495
0496 union {
0497 pte_t orig_pte;
0498 pmd_t orig_pmd;
0499
0500
0501 };
0502
0503 struct page *cow_page;
0504 struct page *page;
0505
0506
0507
0508
0509
0510 pte_t *pte;
0511
0512
0513
0514 spinlock_t *ptl;
0515
0516
0517
0518 pgtable_t prealloc_pte;
0519
0520
0521
0522
0523
0524
0525 };
0526
0527
0528 enum page_entry_size {
0529 PE_SIZE_PTE = 0,
0530 PE_SIZE_PMD,
0531 PE_SIZE_PUD,
0532 };
0533
0534
0535
0536
0537
0538
0539 struct vm_operations_struct {
0540 void (*open)(struct vm_area_struct * area);
0541
0542
0543
0544
0545 void (*close)(struct vm_area_struct * area);
0546
0547 int (*may_split)(struct vm_area_struct *area, unsigned long addr);
0548 int (*mremap)(struct vm_area_struct *area);
0549
0550
0551
0552
0553
0554 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
0555 unsigned long end, unsigned long newflags);
0556 vm_fault_t (*fault)(struct vm_fault *vmf);
0557 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
0558 enum page_entry_size pe_size);
0559 vm_fault_t (*map_pages)(struct vm_fault *vmf,
0560 pgoff_t start_pgoff, pgoff_t end_pgoff);
0561 unsigned long (*pagesize)(struct vm_area_struct * area);
0562
0563
0564
0565 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
0566
0567
0568 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
0569
0570
0571
0572
0573
0574 int (*access)(struct vm_area_struct *vma, unsigned long addr,
0575 void *buf, int len, int write);
0576
0577
0578
0579
0580 const char *(*name)(struct vm_area_struct *vma);
0581
0582 #ifdef CONFIG_NUMA
0583
0584
0585
0586
0587
0588
0589
0590 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
0603 unsigned long addr);
0604 #endif
0605
0606
0607
0608
0609
0610 struct page *(*find_special_page)(struct vm_area_struct *vma,
0611 unsigned long addr);
0612 };
0613
0614 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
0615 {
0616 static const struct vm_operations_struct dummy_vm_ops = {};
0617
0618 memset(vma, 0, sizeof(*vma));
0619 vma->vm_mm = mm;
0620 vma->vm_ops = &dummy_vm_ops;
0621 INIT_LIST_HEAD(&vma->anon_vma_chain);
0622 }
0623
0624 static inline void vma_set_anonymous(struct vm_area_struct *vma)
0625 {
0626 vma->vm_ops = NULL;
0627 }
0628
0629 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
0630 {
0631 return !vma->vm_ops;
0632 }
0633
0634 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
0635 {
0636 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
0637
0638 if (!maybe_stack)
0639 return false;
0640
0641 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
0642 VM_STACK_INCOMPLETE_SETUP)
0643 return true;
0644
0645 return false;
0646 }
0647
0648 static inline bool vma_is_foreign(struct vm_area_struct *vma)
0649 {
0650 if (!current->mm)
0651 return true;
0652
0653 if (current->mm != vma->vm_mm)
0654 return true;
0655
0656 return false;
0657 }
0658
0659 static inline bool vma_is_accessible(struct vm_area_struct *vma)
0660 {
0661 return vma->vm_flags & VM_ACCESS_FLAGS;
0662 }
0663
0664 #ifdef CONFIG_SHMEM
0665
0666
0667
0668
0669 bool vma_is_shmem(struct vm_area_struct *vma);
0670 #else
0671 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
0672 #endif
0673
0674 int vma_is_stack_for_current(struct vm_area_struct *vma);
0675
0676
0677 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
0678
0679 struct mmu_gather;
0680 struct inode;
0681
0682 static inline unsigned int compound_order(struct page *page)
0683 {
0684 if (!PageHead(page))
0685 return 0;
0686 return page[1].compound_order;
0687 }
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 static inline unsigned int folio_order(struct folio *folio)
0699 {
0700 return compound_order(&folio->page);
0701 }
0702
0703 #include <linux/huge_mm.h>
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 static inline int put_page_testzero(struct page *page)
0722 {
0723 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
0724 return page_ref_dec_and_test(page);
0725 }
0726
0727 static inline int folio_put_testzero(struct folio *folio)
0728 {
0729 return put_page_testzero(&folio->page);
0730 }
0731
0732
0733
0734
0735
0736
0737
0738 static inline bool get_page_unless_zero(struct page *page)
0739 {
0740 return page_ref_add_unless(page, 1, 0);
0741 }
0742
0743 extern int page_is_ram(unsigned long pfn);
0744
0745 enum {
0746 REGION_INTERSECTS,
0747 REGION_DISJOINT,
0748 REGION_MIXED,
0749 };
0750
0751 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
0752 unsigned long desc);
0753
0754
0755 struct page *vmalloc_to_page(const void *addr);
0756 unsigned long vmalloc_to_pfn(const void *addr);
0757
0758
0759
0760
0761
0762
0763
0764
0765 #ifndef is_ioremap_addr
0766 #define is_ioremap_addr(x) is_vmalloc_addr(x)
0767 #endif
0768
0769 #ifdef CONFIG_MMU
0770 extern bool is_vmalloc_addr(const void *x);
0771 extern int is_vmalloc_or_module_addr(const void *x);
0772 #else
0773 static inline bool is_vmalloc_addr(const void *x)
0774 {
0775 return false;
0776 }
0777 static inline int is_vmalloc_or_module_addr(const void *x)
0778 {
0779 return 0;
0780 }
0781 #endif
0782
0783
0784
0785
0786
0787
0788
0789 static inline int folio_entire_mapcount(struct folio *folio)
0790 {
0791 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
0792 return atomic_read(folio_mapcount_ptr(folio)) + 1;
0793 }
0794
0795
0796
0797
0798
0799
0800 static inline int compound_mapcount(struct page *page)
0801 {
0802 return folio_entire_mapcount(page_folio(page));
0803 }
0804
0805
0806
0807
0808
0809
0810 static inline void page_mapcount_reset(struct page *page)
0811 {
0812 atomic_set(&(page)->_mapcount, -1);
0813 }
0814
0815 int __page_mapcount(struct page *page);
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825 static inline int page_mapcount(struct page *page)
0826 {
0827 if (unlikely(PageCompound(page)))
0828 return __page_mapcount(page);
0829 return atomic_read(&page->_mapcount) + 1;
0830 }
0831
0832 int folio_mapcount(struct folio *folio);
0833
0834 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0835 static inline int total_mapcount(struct page *page)
0836 {
0837 return folio_mapcount(page_folio(page));
0838 }
0839
0840 #else
0841 static inline int total_mapcount(struct page *page)
0842 {
0843 return page_mapcount(page);
0844 }
0845 #endif
0846
0847 static inline struct page *virt_to_head_page(const void *x)
0848 {
0849 struct page *page = virt_to_page(x);
0850
0851 return compound_head(page);
0852 }
0853
0854 static inline struct folio *virt_to_folio(const void *x)
0855 {
0856 struct page *page = virt_to_page(x);
0857
0858 return page_folio(page);
0859 }
0860
0861 void __folio_put(struct folio *folio);
0862
0863 void put_pages_list(struct list_head *pages);
0864
0865 void split_page(struct page *page, unsigned int order);
0866 void folio_copy(struct folio *dst, struct folio *src);
0867
0868 unsigned long nr_free_buffer_pages(void);
0869
0870
0871
0872
0873
0874
0875 typedef void compound_page_dtor(struct page *);
0876
0877
0878 enum compound_dtor_id {
0879 NULL_COMPOUND_DTOR,
0880 COMPOUND_PAGE_DTOR,
0881 #ifdef CONFIG_HUGETLB_PAGE
0882 HUGETLB_PAGE_DTOR,
0883 #endif
0884 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0885 TRANSHUGE_PAGE_DTOR,
0886 #endif
0887 NR_COMPOUND_DTORS,
0888 };
0889 extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
0890
0891 static inline void set_compound_page_dtor(struct page *page,
0892 enum compound_dtor_id compound_dtor)
0893 {
0894 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
0895 page[1].compound_dtor = compound_dtor;
0896 }
0897
0898 void destroy_large_folio(struct folio *folio);
0899
0900 static inline int head_compound_pincount(struct page *head)
0901 {
0902 return atomic_read(compound_pincount_ptr(head));
0903 }
0904
0905 static inline void set_compound_order(struct page *page, unsigned int order)
0906 {
0907 page[1].compound_order = order;
0908 #ifdef CONFIG_64BIT
0909 page[1].compound_nr = 1U << order;
0910 #endif
0911 }
0912
0913
0914 static inline unsigned long compound_nr(struct page *page)
0915 {
0916 if (!PageHead(page))
0917 return 1;
0918 #ifdef CONFIG_64BIT
0919 return page[1].compound_nr;
0920 #else
0921 return 1UL << compound_order(page);
0922 #endif
0923 }
0924
0925
0926 static inline unsigned long page_size(struct page *page)
0927 {
0928 return PAGE_SIZE << compound_order(page);
0929 }
0930
0931
0932 static inline unsigned int page_shift(struct page *page)
0933 {
0934 return PAGE_SHIFT + compound_order(page);
0935 }
0936
0937
0938
0939
0940
0941 static inline unsigned int thp_order(struct page *page)
0942 {
0943 VM_BUG_ON_PGFLAGS(PageTail(page), page);
0944 return compound_order(page);
0945 }
0946
0947
0948
0949
0950
0951 static inline int thp_nr_pages(struct page *page)
0952 {
0953 VM_BUG_ON_PGFLAGS(PageTail(page), page);
0954 return compound_nr(page);
0955 }
0956
0957
0958
0959
0960
0961
0962
0963 static inline unsigned long thp_size(struct page *page)
0964 {
0965 return PAGE_SIZE << thp_order(page);
0966 }
0967
0968 void free_compound_page(struct page *page);
0969
0970 #ifdef CONFIG_MMU
0971
0972
0973
0974
0975
0976
0977 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
0978 {
0979 if (likely(vma->vm_flags & VM_WRITE))
0980 pte = pte_mkwrite(pte);
0981 return pte;
0982 }
0983
0984 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
0985 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
0986
0987 vm_fault_t finish_fault(struct vm_fault *vmf);
0988 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
0989 #endif
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
1052 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1053
1054 bool __put_devmap_managed_page_refs(struct page *page, int refs);
1055 static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
1056 {
1057 if (!static_branch_unlikely(&devmap_managed_key))
1058 return false;
1059 if (!is_zone_device_page(page))
1060 return false;
1061 return __put_devmap_managed_page_refs(page, refs);
1062 }
1063 #else
1064 static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
1065 {
1066 return false;
1067 }
1068 #endif
1069
1070 static inline bool put_devmap_managed_page(struct page *page)
1071 {
1072 return put_devmap_managed_page_refs(page, 1);
1073 }
1074
1075
1076 #define folio_ref_zero_or_close_to_overflow(folio) \
1077 ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 static inline void folio_get(struct folio *folio)
1088 {
1089 VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1090 folio_ref_inc(folio);
1091 }
1092
1093 static inline void get_page(struct page *page)
1094 {
1095 folio_get(page_folio(page));
1096 }
1097
1098 bool __must_check try_grab_page(struct page *page, unsigned int flags);
1099
1100 static inline __must_check bool try_get_page(struct page *page)
1101 {
1102 page = compound_head(page);
1103 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1104 return false;
1105 page_ref_inc(page);
1106 return true;
1107 }
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 static inline void folio_put(struct folio *folio)
1123 {
1124 if (folio_put_testzero(folio))
1125 __folio_put(folio);
1126 }
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 static inline void folio_put_refs(struct folio *folio, int refs)
1143 {
1144 if (folio_ref_sub_and_test(folio, refs))
1145 __folio_put(folio);
1146 }
1147
1148 void release_pages(struct page **pages, int nr);
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 static inline void folios_put(struct folio **folios, unsigned int nr)
1163 {
1164 release_pages((struct page **)folios, nr);
1165 }
1166
1167 static inline void put_page(struct page *page)
1168 {
1169 struct folio *folio = page_folio(page);
1170
1171
1172
1173
1174
1175 if (put_devmap_managed_page(&folio->page))
1176 return;
1177 folio_put(folio);
1178 }
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 #define GUP_PIN_COUNTING_BIAS (1U << 10)
1211
1212 void unpin_user_page(struct page *page);
1213 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1214 bool make_dirty);
1215 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1216 bool make_dirty);
1217 void unpin_user_pages(struct page **pages, unsigned long npages);
1218
1219 static inline bool is_cow_mapping(vm_flags_t flags)
1220 {
1221 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1222 }
1223
1224 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1225 #define SECTION_IN_PAGE_FLAGS
1226 #endif
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236 static inline int page_zone_id(struct page *page)
1237 {
1238 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1239 }
1240
1241 #ifdef NODE_NOT_IN_PAGE_FLAGS
1242 extern int page_to_nid(const struct page *page);
1243 #else
1244 static inline int page_to_nid(const struct page *page)
1245 {
1246 struct page *p = (struct page *)page;
1247
1248 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1249 }
1250 #endif
1251
1252 static inline int folio_nid(const struct folio *folio)
1253 {
1254 return page_to_nid(&folio->page);
1255 }
1256
1257 #ifdef CONFIG_NUMA_BALANCING
1258 static inline int cpu_pid_to_cpupid(int cpu, int pid)
1259 {
1260 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1261 }
1262
1263 static inline int cpupid_to_pid(int cpupid)
1264 {
1265 return cpupid & LAST__PID_MASK;
1266 }
1267
1268 static inline int cpupid_to_cpu(int cpupid)
1269 {
1270 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1271 }
1272
1273 static inline int cpupid_to_nid(int cpupid)
1274 {
1275 return cpu_to_node(cpupid_to_cpu(cpupid));
1276 }
1277
1278 static inline bool cpupid_pid_unset(int cpupid)
1279 {
1280 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1281 }
1282
1283 static inline bool cpupid_cpu_unset(int cpupid)
1284 {
1285 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1286 }
1287
1288 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1289 {
1290 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1291 }
1292
1293 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1294 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1295 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1296 {
1297 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1298 }
1299
1300 static inline int page_cpupid_last(struct page *page)
1301 {
1302 return page->_last_cpupid;
1303 }
1304 static inline void page_cpupid_reset_last(struct page *page)
1305 {
1306 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1307 }
1308 #else
1309 static inline int page_cpupid_last(struct page *page)
1310 {
1311 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1312 }
1313
1314 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1315
1316 static inline void page_cpupid_reset_last(struct page *page)
1317 {
1318 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1319 }
1320 #endif
1321 #else
1322 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1323 {
1324 return page_to_nid(page);
1325 }
1326
1327 static inline int page_cpupid_last(struct page *page)
1328 {
1329 return page_to_nid(page);
1330 }
1331
1332 static inline int cpupid_to_nid(int cpupid)
1333 {
1334 return -1;
1335 }
1336
1337 static inline int cpupid_to_pid(int cpupid)
1338 {
1339 return -1;
1340 }
1341
1342 static inline int cpupid_to_cpu(int cpupid)
1343 {
1344 return -1;
1345 }
1346
1347 static inline int cpu_pid_to_cpupid(int nid, int pid)
1348 {
1349 return -1;
1350 }
1351
1352 static inline bool cpupid_pid_unset(int cpupid)
1353 {
1354 return true;
1355 }
1356
1357 static inline void page_cpupid_reset_last(struct page *page)
1358 {
1359 }
1360
1361 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1362 {
1363 return false;
1364 }
1365 #endif
1366
1367 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1368
1369
1370
1371
1372
1373
1374
1375 static inline u8 page_kasan_tag(const struct page *page)
1376 {
1377 u8 tag = 0xff;
1378
1379 if (kasan_enabled()) {
1380 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1381 tag ^= 0xff;
1382 }
1383
1384 return tag;
1385 }
1386
1387 static inline void page_kasan_tag_set(struct page *page, u8 tag)
1388 {
1389 unsigned long old_flags, flags;
1390
1391 if (!kasan_enabled())
1392 return;
1393
1394 tag ^= 0xff;
1395 old_flags = READ_ONCE(page->flags);
1396 do {
1397 flags = old_flags;
1398 flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1399 flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1400 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1401 }
1402
1403 static inline void page_kasan_tag_reset(struct page *page)
1404 {
1405 if (kasan_enabled())
1406 page_kasan_tag_set(page, 0xff);
1407 }
1408
1409 #else
1410
1411 static inline u8 page_kasan_tag(const struct page *page)
1412 {
1413 return 0xff;
1414 }
1415
1416 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1417 static inline void page_kasan_tag_reset(struct page *page) { }
1418
1419 #endif
1420
1421 static inline struct zone *page_zone(const struct page *page)
1422 {
1423 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1424 }
1425
1426 static inline pg_data_t *page_pgdat(const struct page *page)
1427 {
1428 return NODE_DATA(page_to_nid(page));
1429 }
1430
1431 static inline struct zone *folio_zone(const struct folio *folio)
1432 {
1433 return page_zone(&folio->page);
1434 }
1435
1436 static inline pg_data_t *folio_pgdat(const struct folio *folio)
1437 {
1438 return page_pgdat(&folio->page);
1439 }
1440
1441 #ifdef SECTION_IN_PAGE_FLAGS
1442 static inline void set_page_section(struct page *page, unsigned long section)
1443 {
1444 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1445 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1446 }
1447
1448 static inline unsigned long page_to_section(const struct page *page)
1449 {
1450 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1451 }
1452 #endif
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 static inline unsigned long folio_pfn(struct folio *folio)
1464 {
1465 return page_to_pfn(&folio->page);
1466 }
1467
1468 static inline atomic_t *folio_pincount_ptr(struct folio *folio)
1469 {
1470 return &folio_page(folio, 1)->compound_pincount;
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 static inline bool folio_maybe_dma_pinned(struct folio *folio)
1499 {
1500 if (folio_test_large(folio))
1501 return atomic_read(folio_pincount_ptr(folio)) > 0;
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 return ((unsigned int)folio_ref_count(folio)) >=
1512 GUP_PIN_COUNTING_BIAS;
1513 }
1514
1515 static inline bool page_maybe_dma_pinned(struct page *page)
1516 {
1517 return folio_maybe_dma_pinned(page_folio(page));
1518 }
1519
1520
1521
1522
1523
1524
1525
1526 static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
1527 struct page *page)
1528 {
1529 VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
1530
1531 if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1532 return false;
1533
1534 return page_maybe_dma_pinned(page);
1535 }
1536
1537
1538 #ifdef CONFIG_MIGRATION
1539 static inline bool is_longterm_pinnable_page(struct page *page)
1540 {
1541 #ifdef CONFIG_CMA
1542 int mt = get_pageblock_migratetype(page);
1543
1544 if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
1545 return false;
1546 #endif
1547
1548 if (is_zero_pfn(page_to_pfn(page)))
1549 return true;
1550
1551
1552 if (is_device_coherent_page(page))
1553 return false;
1554
1555
1556 return !is_zone_movable_page(page);
1557 }
1558 #else
1559 static inline bool is_longterm_pinnable_page(struct page *page)
1560 {
1561 return true;
1562 }
1563 #endif
1564
1565 static inline bool folio_is_longterm_pinnable(struct folio *folio)
1566 {
1567 return is_longterm_pinnable_page(&folio->page);
1568 }
1569
1570 static inline void set_page_zone(struct page *page, enum zone_type zone)
1571 {
1572 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1573 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1574 }
1575
1576 static inline void set_page_node(struct page *page, unsigned long node)
1577 {
1578 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1579 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1580 }
1581
1582 static inline void set_page_links(struct page *page, enum zone_type zone,
1583 unsigned long node, unsigned long pfn)
1584 {
1585 set_page_zone(page, zone);
1586 set_page_node(page, node);
1587 #ifdef SECTION_IN_PAGE_FLAGS
1588 set_page_section(page, pfn_to_section_nr(pfn));
1589 #endif
1590 }
1591
1592
1593
1594
1595
1596
1597
1598 static inline long folio_nr_pages(struct folio *folio)
1599 {
1600 return compound_nr(&folio->page);
1601 }
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 static inline struct folio *folio_next(struct folio *folio)
1618 {
1619 return (struct folio *)folio_page(folio, folio_nr_pages(folio));
1620 }
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634 static inline unsigned int folio_shift(struct folio *folio)
1635 {
1636 return PAGE_SHIFT + folio_order(folio);
1637 }
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 static inline size_t folio_size(struct folio *folio)
1648 {
1649 return PAGE_SIZE << folio_order(folio);
1650 }
1651
1652 #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
1653 static inline int arch_make_page_accessible(struct page *page)
1654 {
1655 return 0;
1656 }
1657 #endif
1658
1659 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
1660 static inline int arch_make_folio_accessible(struct folio *folio)
1661 {
1662 int ret;
1663 long i, nr = folio_nr_pages(folio);
1664
1665 for (i = 0; i < nr; i++) {
1666 ret = arch_make_page_accessible(folio_page(folio, i));
1667 if (ret)
1668 break;
1669 }
1670
1671 return ret;
1672 }
1673 #endif
1674
1675
1676
1677
1678 #include <linux/vmstat.h>
1679
1680 static __always_inline void *lowmem_page_address(const struct page *page)
1681 {
1682 return page_to_virt(page);
1683 }
1684
1685 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1686 #define HASHED_PAGE_VIRTUAL
1687 #endif
1688
1689 #if defined(WANT_PAGE_VIRTUAL)
1690 static inline void *page_address(const struct page *page)
1691 {
1692 return page->virtual;
1693 }
1694 static inline void set_page_address(struct page *page, void *address)
1695 {
1696 page->virtual = address;
1697 }
1698 #define page_address_init() do { } while(0)
1699 #endif
1700
1701 #if defined(HASHED_PAGE_VIRTUAL)
1702 void *page_address(const struct page *page);
1703 void set_page_address(struct page *page, void *virtual);
1704 void page_address_init(void);
1705 #endif
1706
1707 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1708 #define page_address(page) lowmem_page_address(page)
1709 #define set_page_address(page, address) do { } while(0)
1710 #define page_address_init() do { } while(0)
1711 #endif
1712
1713 static inline void *folio_address(const struct folio *folio)
1714 {
1715 return page_address(&folio->page);
1716 }
1717
1718 extern void *page_rmapping(struct page *page);
1719 extern pgoff_t __page_file_index(struct page *page);
1720
1721
1722
1723
1724
1725 static inline pgoff_t page_index(struct page *page)
1726 {
1727 if (unlikely(PageSwapCache(page)))
1728 return __page_file_index(page);
1729 return page->index;
1730 }
1731
1732 bool page_mapped(struct page *page);
1733 bool folio_mapped(struct folio *folio);
1734
1735
1736
1737
1738
1739
1740 static inline bool page_is_pfmemalloc(const struct page *page)
1741 {
1742
1743
1744
1745
1746
1747 return (uintptr_t)page->lru.next & BIT(1);
1748 }
1749
1750
1751
1752
1753
1754 static inline void set_page_pfmemalloc(struct page *page)
1755 {
1756 page->lru.next = (void *)BIT(1);
1757 }
1758
1759 static inline void clear_page_pfmemalloc(struct page *page)
1760 {
1761 page->lru.next = NULL;
1762 }
1763
1764
1765
1766
1767 extern void pagefault_out_of_memory(void);
1768
1769 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1770 #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
1771 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
1772
1773
1774
1775
1776
1777 #define SHOW_MEM_FILTER_NODES (0x0001u)
1778
1779 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1780
1781 #ifdef CONFIG_MMU
1782 extern bool can_do_mlock(void);
1783 #else
1784 static inline bool can_do_mlock(void) { return false; }
1785 #endif
1786 extern int user_shm_lock(size_t, struct ucounts *);
1787 extern void user_shm_unlock(size_t, struct ucounts *);
1788
1789 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1790 pte_t pte);
1791 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1792 pmd_t pmd);
1793
1794 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1795 unsigned long size);
1796 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1797 unsigned long size);
1798 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1799 unsigned long start, unsigned long end);
1800
1801 struct mmu_notifier_range;
1802
1803 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1804 unsigned long end, unsigned long floor, unsigned long ceiling);
1805 int
1806 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
1807 int follow_pte(struct mm_struct *mm, unsigned long address,
1808 pte_t **ptepp, spinlock_t **ptlp);
1809 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1810 unsigned long *pfn);
1811 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1812 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1813 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1814 void *buf, int len, int write);
1815
1816 extern void truncate_pagecache(struct inode *inode, loff_t new);
1817 extern void truncate_setsize(struct inode *inode, loff_t newsize);
1818 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1819 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1820 int generic_error_remove_page(struct address_space *mapping, struct page *page);
1821
1822 #ifdef CONFIG_MMU
1823 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1824 unsigned long address, unsigned int flags,
1825 struct pt_regs *regs);
1826 extern int fixup_user_fault(struct mm_struct *mm,
1827 unsigned long address, unsigned int fault_flags,
1828 bool *unlocked);
1829 void unmap_mapping_pages(struct address_space *mapping,
1830 pgoff_t start, pgoff_t nr, bool even_cows);
1831 void unmap_mapping_range(struct address_space *mapping,
1832 loff_t const holebegin, loff_t const holelen, int even_cows);
1833 #else
1834 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1835 unsigned long address, unsigned int flags,
1836 struct pt_regs *regs)
1837 {
1838
1839 BUG();
1840 return VM_FAULT_SIGBUS;
1841 }
1842 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
1843 unsigned int fault_flags, bool *unlocked)
1844 {
1845
1846 BUG();
1847 return -EFAULT;
1848 }
1849 static inline void unmap_mapping_pages(struct address_space *mapping,
1850 pgoff_t start, pgoff_t nr, bool even_cows) { }
1851 static inline void unmap_mapping_range(struct address_space *mapping,
1852 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1853 #endif
1854
1855 static inline void unmap_shared_mapping_range(struct address_space *mapping,
1856 loff_t const holebegin, loff_t const holelen)
1857 {
1858 unmap_mapping_range(mapping, holebegin, holelen, 0);
1859 }
1860
1861 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1862 void *buf, int len, unsigned int gup_flags);
1863 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1864 void *buf, int len, unsigned int gup_flags);
1865 extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1866 void *buf, int len, unsigned int gup_flags);
1867
1868 long get_user_pages_remote(struct mm_struct *mm,
1869 unsigned long start, unsigned long nr_pages,
1870 unsigned int gup_flags, struct page **pages,
1871 struct vm_area_struct **vmas, int *locked);
1872 long pin_user_pages_remote(struct mm_struct *mm,
1873 unsigned long start, unsigned long nr_pages,
1874 unsigned int gup_flags, struct page **pages,
1875 struct vm_area_struct **vmas, int *locked);
1876 long get_user_pages(unsigned long start, unsigned long nr_pages,
1877 unsigned int gup_flags, struct page **pages,
1878 struct vm_area_struct **vmas);
1879 long pin_user_pages(unsigned long start, unsigned long nr_pages,
1880 unsigned int gup_flags, struct page **pages,
1881 struct vm_area_struct **vmas);
1882 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1883 struct page **pages, unsigned int gup_flags);
1884 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1885 struct page **pages, unsigned int gup_flags);
1886
1887 int get_user_pages_fast(unsigned long start, int nr_pages,
1888 unsigned int gup_flags, struct page **pages);
1889 int pin_user_pages_fast(unsigned long start, int nr_pages,
1890 unsigned int gup_flags, struct page **pages);
1891
1892 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1893 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1894 struct task_struct *task, bool bypass_rlim);
1895
1896 struct kvec;
1897 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1898 struct page **pages);
1899 struct page *get_dump_page(unsigned long addr);
1900
1901 bool folio_mark_dirty(struct folio *folio);
1902 bool set_page_dirty(struct page *page);
1903 int set_page_dirty_lock(struct page *page);
1904
1905 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1906
1907 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1908 unsigned long old_addr, struct vm_area_struct *new_vma,
1909 unsigned long new_addr, unsigned long len,
1910 bool need_rmap_locks);
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 #define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
1924
1925 #define MM_CP_PROT_NUMA (1UL << 1)
1926
1927 #define MM_CP_UFFD_WP (1UL << 2)
1928 #define MM_CP_UFFD_WP_RESOLVE (1UL << 3)
1929 #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
1930 MM_CP_UFFD_WP_RESOLVE)
1931
1932 extern unsigned long change_protection(struct mmu_gather *tlb,
1933 struct vm_area_struct *vma, unsigned long start,
1934 unsigned long end, pgprot_t newprot,
1935 unsigned long cp_flags);
1936 extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
1937 struct vm_area_struct **pprev, unsigned long start,
1938 unsigned long end, unsigned long newflags);
1939
1940
1941
1942
1943 int get_user_pages_fast_only(unsigned long start, int nr_pages,
1944 unsigned int gup_flags, struct page **pages);
1945 int pin_user_pages_fast_only(unsigned long start, int nr_pages,
1946 unsigned int gup_flags, struct page **pages);
1947
1948 static inline bool get_user_page_fast_only(unsigned long addr,
1949 unsigned int gup_flags, struct page **pagep)
1950 {
1951 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
1952 }
1953
1954
1955
1956 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1957 {
1958 long val = atomic_long_read(&mm->rss_stat.count[member]);
1959
1960 #ifdef SPLIT_RSS_COUNTING
1961
1962
1963
1964
1965 if (val < 0)
1966 val = 0;
1967 #endif
1968 return (unsigned long)val;
1969 }
1970
1971 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
1972
1973 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1974 {
1975 long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
1976
1977 mm_trace_rss_stat(mm, member, count);
1978 }
1979
1980 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1981 {
1982 long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
1983
1984 mm_trace_rss_stat(mm, member, count);
1985 }
1986
1987 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1988 {
1989 long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
1990
1991 mm_trace_rss_stat(mm, member, count);
1992 }
1993
1994
1995 static inline int mm_counter_file(struct page *page)
1996 {
1997 if (PageSwapBacked(page))
1998 return MM_SHMEMPAGES;
1999 return MM_FILEPAGES;
2000 }
2001
2002 static inline int mm_counter(struct page *page)
2003 {
2004 if (PageAnon(page))
2005 return MM_ANONPAGES;
2006 return mm_counter_file(page);
2007 }
2008
2009 static inline unsigned long get_mm_rss(struct mm_struct *mm)
2010 {
2011 return get_mm_counter(mm, MM_FILEPAGES) +
2012 get_mm_counter(mm, MM_ANONPAGES) +
2013 get_mm_counter(mm, MM_SHMEMPAGES);
2014 }
2015
2016 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2017 {
2018 return max(mm->hiwater_rss, get_mm_rss(mm));
2019 }
2020
2021 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2022 {
2023 return max(mm->hiwater_vm, mm->total_vm);
2024 }
2025
2026 static inline void update_hiwater_rss(struct mm_struct *mm)
2027 {
2028 unsigned long _rss = get_mm_rss(mm);
2029
2030 if ((mm)->hiwater_rss < _rss)
2031 (mm)->hiwater_rss = _rss;
2032 }
2033
2034 static inline void update_hiwater_vm(struct mm_struct *mm)
2035 {
2036 if (mm->hiwater_vm < mm->total_vm)
2037 mm->hiwater_vm = mm->total_vm;
2038 }
2039
2040 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2041 {
2042 mm->hiwater_rss = get_mm_rss(mm);
2043 }
2044
2045 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2046 struct mm_struct *mm)
2047 {
2048 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2049
2050 if (*maxrss < hiwater_rss)
2051 *maxrss = hiwater_rss;
2052 }
2053
2054 #if defined(SPLIT_RSS_COUNTING)
2055 void sync_mm_rss(struct mm_struct *mm);
2056 #else
2057 static inline void sync_mm_rss(struct mm_struct *mm)
2058 {
2059 }
2060 #endif
2061
2062 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2063 static inline int pte_special(pte_t pte)
2064 {
2065 return 0;
2066 }
2067
2068 static inline pte_t pte_mkspecial(pte_t pte)
2069 {
2070 return pte;
2071 }
2072 #endif
2073
2074 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2075 static inline int pte_devmap(pte_t pte)
2076 {
2077 return 0;
2078 }
2079 #endif
2080
2081 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2082
2083 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2084 spinlock_t **ptl);
2085 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2086 spinlock_t **ptl)
2087 {
2088 pte_t *ptep;
2089 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2090 return ptep;
2091 }
2092
2093 #ifdef __PAGETABLE_P4D_FOLDED
2094 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2095 unsigned long address)
2096 {
2097 return 0;
2098 }
2099 #else
2100 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2101 #endif
2102
2103 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2104 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2105 unsigned long address)
2106 {
2107 return 0;
2108 }
2109 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2110 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2111
2112 #else
2113 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2114
2115 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2116 {
2117 if (mm_pud_folded(mm))
2118 return;
2119 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2120 }
2121
2122 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2123 {
2124 if (mm_pud_folded(mm))
2125 return;
2126 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2127 }
2128 #endif
2129
2130 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2131 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2132 unsigned long address)
2133 {
2134 return 0;
2135 }
2136
2137 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2138 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2139
2140 #else
2141 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2142
2143 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2144 {
2145 if (mm_pmd_folded(mm))
2146 return;
2147 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2148 }
2149
2150 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2151 {
2152 if (mm_pmd_folded(mm))
2153 return;
2154 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2155 }
2156 #endif
2157
2158 #ifdef CONFIG_MMU
2159 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2160 {
2161 atomic_long_set(&mm->pgtables_bytes, 0);
2162 }
2163
2164 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2165 {
2166 return atomic_long_read(&mm->pgtables_bytes);
2167 }
2168
2169 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2170 {
2171 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2172 }
2173
2174 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2175 {
2176 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2177 }
2178 #else
2179
2180 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2181 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2182 {
2183 return 0;
2184 }
2185
2186 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2187 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2188 #endif
2189
2190 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2191 int __pte_alloc_kernel(pmd_t *pmd);
2192
2193 #if defined(CONFIG_MMU)
2194
2195 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2196 unsigned long address)
2197 {
2198 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2199 NULL : p4d_offset(pgd, address);
2200 }
2201
2202 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2203 unsigned long address)
2204 {
2205 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2206 NULL : pud_offset(p4d, address);
2207 }
2208
2209 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2210 {
2211 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2212 NULL: pmd_offset(pud, address);
2213 }
2214 #endif
2215
2216 #if USE_SPLIT_PTE_PTLOCKS
2217 #if ALLOC_SPLIT_PTLOCKS
2218 void __init ptlock_cache_init(void);
2219 extern bool ptlock_alloc(struct page *page);
2220 extern void ptlock_free(struct page *page);
2221
2222 static inline spinlock_t *ptlock_ptr(struct page *page)
2223 {
2224 return page->ptl;
2225 }
2226 #else
2227 static inline void ptlock_cache_init(void)
2228 {
2229 }
2230
2231 static inline bool ptlock_alloc(struct page *page)
2232 {
2233 return true;
2234 }
2235
2236 static inline void ptlock_free(struct page *page)
2237 {
2238 }
2239
2240 static inline spinlock_t *ptlock_ptr(struct page *page)
2241 {
2242 return &page->ptl;
2243 }
2244 #endif
2245
2246 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2247 {
2248 return ptlock_ptr(pmd_page(*pmd));
2249 }
2250
2251 static inline bool ptlock_init(struct page *page)
2252 {
2253
2254
2255
2256
2257
2258
2259
2260 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
2261 if (!ptlock_alloc(page))
2262 return false;
2263 spin_lock_init(ptlock_ptr(page));
2264 return true;
2265 }
2266
2267 #else
2268
2269
2270
2271 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2272 {
2273 return &mm->page_table_lock;
2274 }
2275 static inline void ptlock_cache_init(void) {}
2276 static inline bool ptlock_init(struct page *page) { return true; }
2277 static inline void ptlock_free(struct page *page) {}
2278 #endif
2279
2280 static inline void pgtable_init(void)
2281 {
2282 ptlock_cache_init();
2283 pgtable_cache_init();
2284 }
2285
2286 static inline bool pgtable_pte_page_ctor(struct page *page)
2287 {
2288 if (!ptlock_init(page))
2289 return false;
2290 __SetPageTable(page);
2291 inc_lruvec_page_state(page, NR_PAGETABLE);
2292 return true;
2293 }
2294
2295 static inline void pgtable_pte_page_dtor(struct page *page)
2296 {
2297 ptlock_free(page);
2298 __ClearPageTable(page);
2299 dec_lruvec_page_state(page, NR_PAGETABLE);
2300 }
2301
2302 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
2303 ({ \
2304 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
2305 pte_t *__pte = pte_offset_map(pmd, address); \
2306 *(ptlp) = __ptl; \
2307 spin_lock(__ptl); \
2308 __pte; \
2309 })
2310
2311 #define pte_unmap_unlock(pte, ptl) do { \
2312 spin_unlock(ptl); \
2313 pte_unmap(pte); \
2314 } while (0)
2315
2316 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2317
2318 #define pte_alloc_map(mm, pmd, address) \
2319 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2320
2321 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2322 (pte_alloc(mm, pmd) ? \
2323 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2324
2325 #define pte_alloc_kernel(pmd, address) \
2326 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2327 NULL: pte_offset_kernel(pmd, address))
2328
2329 #if USE_SPLIT_PMD_PTLOCKS
2330
2331 static struct page *pmd_to_page(pmd_t *pmd)
2332 {
2333 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2334 return virt_to_page((void *)((unsigned long) pmd & mask));
2335 }
2336
2337 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2338 {
2339 return ptlock_ptr(pmd_to_page(pmd));
2340 }
2341
2342 static inline bool pmd_ptlock_init(struct page *page)
2343 {
2344 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2345 page->pmd_huge_pte = NULL;
2346 #endif
2347 return ptlock_init(page);
2348 }
2349
2350 static inline void pmd_ptlock_free(struct page *page)
2351 {
2352 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2353 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2354 #endif
2355 ptlock_free(page);
2356 }
2357
2358 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2359
2360 #else
2361
2362 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2363 {
2364 return &mm->page_table_lock;
2365 }
2366
2367 static inline bool pmd_ptlock_init(struct page *page) { return true; }
2368 static inline void pmd_ptlock_free(struct page *page) {}
2369
2370 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2371
2372 #endif
2373
2374 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2375 {
2376 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2377 spin_lock(ptl);
2378 return ptl;
2379 }
2380
2381 static inline bool pgtable_pmd_page_ctor(struct page *page)
2382 {
2383 if (!pmd_ptlock_init(page))
2384 return false;
2385 __SetPageTable(page);
2386 inc_lruvec_page_state(page, NR_PAGETABLE);
2387 return true;
2388 }
2389
2390 static inline void pgtable_pmd_page_dtor(struct page *page)
2391 {
2392 pmd_ptlock_free(page);
2393 __ClearPageTable(page);
2394 dec_lruvec_page_state(page, NR_PAGETABLE);
2395 }
2396
2397
2398
2399
2400
2401
2402
2403 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2404 {
2405 return &mm->page_table_lock;
2406 }
2407
2408 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2409 {
2410 spinlock_t *ptl = pud_lockptr(mm, pud);
2411
2412 spin_lock(ptl);
2413 return ptl;
2414 }
2415
2416 extern void __init pagecache_init(void);
2417 extern void free_initmem(void);
2418
2419
2420
2421
2422
2423
2424
2425 extern unsigned long free_reserved_area(void *start, void *end,
2426 int poison, const char *s);
2427
2428 extern void adjust_managed_page_count(struct page *page, long count);
2429 extern void mem_init_print_info(void);
2430
2431 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2432
2433
2434 static inline void free_reserved_page(struct page *page)
2435 {
2436 ClearPageReserved(page);
2437 init_page_count(page);
2438 __free_page(page);
2439 adjust_managed_page_count(page, 1);
2440 }
2441 #define free_highmem_page(page) free_reserved_page(page)
2442
2443 static inline void mark_page_reserved(struct page *page)
2444 {
2445 SetPageReserved(page);
2446 adjust_managed_page_count(page, -1);
2447 }
2448
2449
2450
2451
2452
2453
2454
2455 static inline unsigned long free_initmem_default(int poison)
2456 {
2457 extern char __init_begin[], __init_end[];
2458
2459 return free_reserved_area(&__init_begin, &__init_end,
2460 poison, "unused kernel image (initmem)");
2461 }
2462
2463 static inline unsigned long get_num_physpages(void)
2464 {
2465 int nid;
2466 unsigned long phys_pages = 0;
2467
2468 for_each_online_node(nid)
2469 phys_pages += node_present_pages(nid);
2470
2471 return phys_pages;
2472 }
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490 void free_area_init(unsigned long *max_zone_pfn);
2491 unsigned long node_map_pfn_alignment(void);
2492 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2493 unsigned long end_pfn);
2494 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2495 unsigned long end_pfn);
2496 extern void get_pfn_range_for_nid(unsigned int nid,
2497 unsigned long *start_pfn, unsigned long *end_pfn);
2498 extern unsigned long find_min_pfn_with_active_regions(void);
2499
2500 #ifndef CONFIG_NUMA
2501 static inline int early_pfn_to_nid(unsigned long pfn)
2502 {
2503 return 0;
2504 }
2505 #else
2506
2507 extern int __meminit early_pfn_to_nid(unsigned long pfn);
2508 #endif
2509
2510 extern void set_dma_reserve(unsigned long new_dma_reserve);
2511 extern void memmap_init_range(unsigned long, int, unsigned long,
2512 unsigned long, unsigned long, enum meminit_context,
2513 struct vmem_altmap *, int migratetype);
2514 extern void setup_per_zone_wmarks(void);
2515 extern void calculate_min_free_kbytes(void);
2516 extern int __meminit init_per_zone_wmark_min(void);
2517 extern void mem_init(void);
2518 extern void __init mmap_init(void);
2519 extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2520 extern long si_mem_available(void);
2521 extern void si_meminfo(struct sysinfo * val);
2522 extern void si_meminfo_node(struct sysinfo *val, int nid);
2523 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2524 extern unsigned long arch_reserved_kernel_pages(void);
2525 #endif
2526
2527 extern __printf(3, 4)
2528 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2529
2530 extern void setup_per_cpu_pageset(void);
2531
2532
2533 extern int min_free_kbytes;
2534 extern int watermark_boost_factor;
2535 extern int watermark_scale_factor;
2536 extern bool arch_has_descending_max_zone_pfns(void);
2537
2538
2539 extern atomic_long_t mmap_pages_allocated;
2540 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2541
2542
2543 void vma_interval_tree_insert(struct vm_area_struct *node,
2544 struct rb_root_cached *root);
2545 void vma_interval_tree_insert_after(struct vm_area_struct *node,
2546 struct vm_area_struct *prev,
2547 struct rb_root_cached *root);
2548 void vma_interval_tree_remove(struct vm_area_struct *node,
2549 struct rb_root_cached *root);
2550 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2551 unsigned long start, unsigned long last);
2552 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2553 unsigned long start, unsigned long last);
2554
2555 #define vma_interval_tree_foreach(vma, root, start, last) \
2556 for (vma = vma_interval_tree_iter_first(root, start, last); \
2557 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2558
2559 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2560 struct rb_root_cached *root);
2561 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2562 struct rb_root_cached *root);
2563 struct anon_vma_chain *
2564 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2565 unsigned long start, unsigned long last);
2566 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2567 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2568 #ifdef CONFIG_DEBUG_VM_RB
2569 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2570 #endif
2571
2572 #define anon_vma_interval_tree_foreach(avc, root, start, last) \
2573 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2574 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2575
2576
2577 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2578 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2579 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2580 struct vm_area_struct *expand);
2581 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2582 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2583 {
2584 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2585 }
2586 extern struct vm_area_struct *vma_merge(struct mm_struct *,
2587 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2588 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2589 struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
2590 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2591 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2592 unsigned long addr, int new_below);
2593 extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2594 unsigned long addr, int new_below);
2595 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2596 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2597 struct rb_node **, struct rb_node *);
2598 extern void unlink_file_vma(struct vm_area_struct *);
2599 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2600 unsigned long addr, unsigned long len, pgoff_t pgoff,
2601 bool *need_rmap_locks);
2602 extern void exit_mmap(struct mm_struct *);
2603
2604 static inline int check_data_rlimit(unsigned long rlim,
2605 unsigned long new,
2606 unsigned long start,
2607 unsigned long end_data,
2608 unsigned long start_data)
2609 {
2610 if (rlim < RLIM_INFINITY) {
2611 if (((new - start) + (end_data - start_data)) > rlim)
2612 return -ENOSPC;
2613 }
2614
2615 return 0;
2616 }
2617
2618 extern int mm_take_all_locks(struct mm_struct *mm);
2619 extern void mm_drop_all_locks(struct mm_struct *mm);
2620
2621 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2622 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2623 extern struct file *get_mm_exe_file(struct mm_struct *mm);
2624 extern struct file *get_task_exe_file(struct task_struct *task);
2625
2626 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2627 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2628
2629 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2630 const struct vm_special_mapping *sm);
2631 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2632 unsigned long addr, unsigned long len,
2633 unsigned long flags,
2634 const struct vm_special_mapping *spec);
2635
2636 extern int install_special_mapping(struct mm_struct *mm,
2637 unsigned long addr, unsigned long len,
2638 unsigned long flags, struct page **pages);
2639
2640 unsigned long randomize_stack_top(unsigned long stack_top);
2641 unsigned long randomize_page(unsigned long start, unsigned long range);
2642
2643 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2644
2645 extern unsigned long mmap_region(struct file *file, unsigned long addr,
2646 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2647 struct list_head *uf);
2648 extern unsigned long do_mmap(struct file *file, unsigned long addr,
2649 unsigned long len, unsigned long prot, unsigned long flags,
2650 unsigned long pgoff, unsigned long *populate, struct list_head *uf);
2651 extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2652 struct list_head *uf, bool downgrade);
2653 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2654 struct list_head *uf);
2655 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
2656
2657 #ifdef CONFIG_MMU
2658 extern int __mm_populate(unsigned long addr, unsigned long len,
2659 int ignore_errors);
2660 static inline void mm_populate(unsigned long addr, unsigned long len)
2661 {
2662
2663 (void) __mm_populate(addr, len, 1);
2664 }
2665 #else
2666 static inline void mm_populate(unsigned long addr, unsigned long len) {}
2667 #endif
2668
2669
2670 extern int __must_check vm_brk(unsigned long, unsigned long);
2671 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2672 extern int vm_munmap(unsigned long, size_t);
2673 extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2674 unsigned long, unsigned long,
2675 unsigned long, unsigned long);
2676
2677 struct vm_unmapped_area_info {
2678 #define VM_UNMAPPED_AREA_TOPDOWN 1
2679 unsigned long flags;
2680 unsigned long length;
2681 unsigned long low_limit;
2682 unsigned long high_limit;
2683 unsigned long align_mask;
2684 unsigned long align_offset;
2685 };
2686
2687 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
2688
2689
2690 extern void truncate_inode_pages(struct address_space *, loff_t);
2691 extern void truncate_inode_pages_range(struct address_space *,
2692 loff_t lstart, loff_t lend);
2693 extern void truncate_inode_pages_final(struct address_space *);
2694
2695
2696 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2697 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
2698 pgoff_t start_pgoff, pgoff_t end_pgoff);
2699 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2700
2701 extern unsigned long stack_guard_gap;
2702
2703 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2704
2705
2706 extern int expand_downwards(struct vm_area_struct *vma,
2707 unsigned long address);
2708 #if VM_GROWSUP
2709 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2710 #else
2711 #define expand_upwards(vma, address) (0)
2712 #endif
2713
2714
2715 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2716 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2717 struct vm_area_struct **pprev);
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728 static inline
2729 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
2730 unsigned long start_addr,
2731 unsigned long end_addr)
2732 {
2733 struct vm_area_struct *vma = find_vma(mm, start_addr);
2734
2735 if (vma && end_addr <= vma->vm_start)
2736 vma = NULL;
2737 return vma;
2738 }
2739
2740
2741
2742
2743
2744
2745
2746
2747 static inline
2748 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
2749 {
2750 struct vm_area_struct *vma = find_vma(mm, addr);
2751
2752 if (vma && addr < vma->vm_start)
2753 vma = NULL;
2754
2755 return vma;
2756 }
2757
2758 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2759 {
2760 unsigned long vm_start = vma->vm_start;
2761
2762 if (vma->vm_flags & VM_GROWSDOWN) {
2763 vm_start -= stack_guard_gap;
2764 if (vm_start > vma->vm_start)
2765 vm_start = 0;
2766 }
2767 return vm_start;
2768 }
2769
2770 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2771 {
2772 unsigned long vm_end = vma->vm_end;
2773
2774 if (vma->vm_flags & VM_GROWSUP) {
2775 vm_end += stack_guard_gap;
2776 if (vm_end < vma->vm_end)
2777 vm_end = -PAGE_SIZE;
2778 }
2779 return vm_end;
2780 }
2781
2782 static inline unsigned long vma_pages(struct vm_area_struct *vma)
2783 {
2784 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2785 }
2786
2787
2788 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2789 unsigned long vm_start, unsigned long vm_end)
2790 {
2791 struct vm_area_struct *vma = find_vma(mm, vm_start);
2792
2793 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2794 vma = NULL;
2795
2796 return vma;
2797 }
2798
2799 static inline bool range_in_vma(struct vm_area_struct *vma,
2800 unsigned long start, unsigned long end)
2801 {
2802 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2803 }
2804
2805 #ifdef CONFIG_MMU
2806 pgprot_t vm_get_page_prot(unsigned long vm_flags);
2807 void vma_set_page_prot(struct vm_area_struct *vma);
2808 #else
2809 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2810 {
2811 return __pgprot(0);
2812 }
2813 static inline void vma_set_page_prot(struct vm_area_struct *vma)
2814 {
2815 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2816 }
2817 #endif
2818
2819 void vma_set_file(struct vm_area_struct *vma, struct file *file);
2820
2821 #ifdef CONFIG_NUMA_BALANCING
2822 unsigned long change_prot_numa(struct vm_area_struct *vma,
2823 unsigned long start, unsigned long end);
2824 #endif
2825
2826 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2827 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2828 unsigned long pfn, unsigned long size, pgprot_t);
2829 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2830 unsigned long pfn, unsigned long size, pgprot_t prot);
2831 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2832 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2833 struct page **pages, unsigned long *num);
2834 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2835 unsigned long num);
2836 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2837 unsigned long num);
2838 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2839 unsigned long pfn);
2840 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2841 unsigned long pfn, pgprot_t pgprot);
2842 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2843 pfn_t pfn);
2844 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2845 pfn_t pfn, pgprot_t pgprot);
2846 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2847 unsigned long addr, pfn_t pfn);
2848 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2849
2850 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2851 unsigned long addr, struct page *page)
2852 {
2853 int err = vm_insert_page(vma, addr, page);
2854
2855 if (err == -ENOMEM)
2856 return VM_FAULT_OOM;
2857 if (err < 0 && err != -EBUSY)
2858 return VM_FAULT_SIGBUS;
2859
2860 return VM_FAULT_NOPAGE;
2861 }
2862
2863 #ifndef io_remap_pfn_range
2864 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
2865 unsigned long addr, unsigned long pfn,
2866 unsigned long size, pgprot_t prot)
2867 {
2868 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
2869 }
2870 #endif
2871
2872 static inline vm_fault_t vmf_error(int err)
2873 {
2874 if (err == -ENOMEM)
2875 return VM_FAULT_OOM;
2876 return VM_FAULT_SIGBUS;
2877 }
2878
2879 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2880 unsigned int foll_flags);
2881
2882 #define FOLL_WRITE 0x01
2883 #define FOLL_TOUCH 0x02
2884 #define FOLL_GET 0x04
2885 #define FOLL_DUMP 0x08
2886 #define FOLL_FORCE 0x10
2887 #define FOLL_NOWAIT 0x20
2888
2889 #define FOLL_NOFAULT 0x80
2890 #define FOLL_HWPOISON 0x100
2891 #define FOLL_NUMA 0x200
2892 #define FOLL_MIGRATION 0x400
2893 #define FOLL_TRIED 0x800
2894 #define FOLL_REMOTE 0x2000
2895 #define FOLL_ANON 0x8000
2896 #define FOLL_LONGTERM 0x10000
2897 #define FOLL_SPLIT_PMD 0x20000
2898 #define FOLL_PIN 0x40000
2899 #define FOLL_FAST_ONLY 0x80000
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2958 {
2959 if (vm_fault & VM_FAULT_OOM)
2960 return -ENOMEM;
2961 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2962 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2963 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2964 return -EFAULT;
2965 return 0;
2966 }
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985 static inline bool gup_must_unshare(unsigned int flags, struct page *page)
2986 {
2987
2988
2989
2990
2991
2992 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
2993 return false;
2994
2995
2996
2997
2998 if (!PageAnon(page))
2999 return false;
3000
3001
3002
3003
3004 return !PageAnonExclusive(page);
3005 }
3006
3007 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
3008 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3009 unsigned long size, pte_fn_t fn, void *data);
3010 extern int apply_to_existing_page_range(struct mm_struct *mm,
3011 unsigned long address, unsigned long size,
3012 pte_fn_t fn, void *data);
3013
3014 extern void init_mem_debugging_and_hardening(void);
3015 #ifdef CONFIG_PAGE_POISONING
3016 extern void __kernel_poison_pages(struct page *page, int numpages);
3017 extern void __kernel_unpoison_pages(struct page *page, int numpages);
3018 extern bool _page_poisoning_enabled_early;
3019 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
3020 static inline bool page_poisoning_enabled(void)
3021 {
3022 return _page_poisoning_enabled_early;
3023 }
3024
3025
3026
3027
3028 static inline bool page_poisoning_enabled_static(void)
3029 {
3030 return static_branch_unlikely(&_page_poisoning_enabled);
3031 }
3032 static inline void kernel_poison_pages(struct page *page, int numpages)
3033 {
3034 if (page_poisoning_enabled_static())
3035 __kernel_poison_pages(page, numpages);
3036 }
3037 static inline void kernel_unpoison_pages(struct page *page, int numpages)
3038 {
3039 if (page_poisoning_enabled_static())
3040 __kernel_unpoison_pages(page, numpages);
3041 }
3042 #else
3043 static inline bool page_poisoning_enabled(void) { return false; }
3044 static inline bool page_poisoning_enabled_static(void) { return false; }
3045 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
3046 static inline void kernel_poison_pages(struct page *page, int numpages) { }
3047 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3048 #endif
3049
3050 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
3051 static inline bool want_init_on_alloc(gfp_t flags)
3052 {
3053 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3054 &init_on_alloc))
3055 return true;
3056 return flags & __GFP_ZERO;
3057 }
3058
3059 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
3060 static inline bool want_init_on_free(void)
3061 {
3062 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3063 &init_on_free);
3064 }
3065
3066 extern bool _debug_pagealloc_enabled_early;
3067 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3068
3069 static inline bool debug_pagealloc_enabled(void)
3070 {
3071 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3072 _debug_pagealloc_enabled_early;
3073 }
3074
3075
3076
3077
3078
3079 static inline bool debug_pagealloc_enabled_static(void)
3080 {
3081 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3082 return false;
3083
3084 return static_branch_unlikely(&_debug_pagealloc_enabled);
3085 }
3086
3087 #ifdef CONFIG_DEBUG_PAGEALLOC
3088
3089
3090
3091
3092 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3093
3094 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3095 {
3096 if (debug_pagealloc_enabled_static())
3097 __kernel_map_pages(page, numpages, 1);
3098 }
3099
3100 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3101 {
3102 if (debug_pagealloc_enabled_static())
3103 __kernel_map_pages(page, numpages, 0);
3104 }
3105 #else
3106 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3107 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3108 #endif
3109
3110 #ifdef __HAVE_ARCH_GATE_AREA
3111 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3112 extern int in_gate_area_no_mm(unsigned long addr);
3113 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3114 #else
3115 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3116 {
3117 return NULL;
3118 }
3119 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3120 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3121 {
3122 return 0;
3123 }
3124 #endif
3125
3126 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3127
3128 #ifdef CONFIG_SYSCTL
3129 extern int sysctl_drop_caches;
3130 int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
3131 loff_t *);
3132 #endif
3133
3134 void drop_slab(void);
3135
3136 #ifndef CONFIG_MMU
3137 #define randomize_va_space 0
3138 #else
3139 extern int randomize_va_space;
3140 #endif
3141
3142 const char * arch_vma_name(struct vm_area_struct *vma);
3143 #ifdef CONFIG_MMU
3144 void print_vma_addr(char *prefix, unsigned long rip);
3145 #else
3146 static inline void print_vma_addr(char *prefix, unsigned long rip)
3147 {
3148 }
3149 #endif
3150
3151 void *sparse_buffer_alloc(unsigned long size);
3152 struct page * __populate_section_memmap(unsigned long pfn,
3153 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3154 struct dev_pagemap *pgmap);
3155 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3156 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3157 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3158 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3159 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3160 struct vmem_altmap *altmap, struct page *reuse);
3161 void *vmemmap_alloc_block(unsigned long size, int node);
3162 struct vmem_altmap;
3163 void *vmemmap_alloc_block_buf(unsigned long size, int node,
3164 struct vmem_altmap *altmap);
3165 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3166 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3167 int node, struct vmem_altmap *altmap);
3168 int vmemmap_populate(unsigned long start, unsigned long end, int node,
3169 struct vmem_altmap *altmap);
3170 void vmemmap_populate_print_last(void);
3171 #ifdef CONFIG_MEMORY_HOTPLUG
3172 void vmemmap_free(unsigned long start, unsigned long end,
3173 struct vmem_altmap *altmap);
3174 #endif
3175 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3176 unsigned long nr_pages);
3177
3178 enum mf_flags {
3179 MF_COUNT_INCREASED = 1 << 0,
3180 MF_ACTION_REQUIRED = 1 << 1,
3181 MF_MUST_KILL = 1 << 2,
3182 MF_SOFT_OFFLINE = 1 << 3,
3183 MF_UNPOISON = 1 << 4,
3184 MF_SW_SIMULATED = 1 << 5,
3185 MF_NO_RETRY = 1 << 6,
3186 };
3187 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
3188 unsigned long count, int mf_flags);
3189 extern int memory_failure(unsigned long pfn, int flags);
3190 extern void memory_failure_queue(unsigned long pfn, int flags);
3191 extern void memory_failure_queue_kick(int cpu);
3192 extern int unpoison_memory(unsigned long pfn);
3193 extern int sysctl_memory_failure_early_kill;
3194 extern int sysctl_memory_failure_recovery;
3195 extern void shake_page(struct page *p);
3196 extern atomic_long_t num_poisoned_pages __read_mostly;
3197 extern int soft_offline_page(unsigned long pfn, int flags);
3198 #ifdef CONFIG_MEMORY_FAILURE
3199 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
3200 #else
3201 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
3202 {
3203 return 0;
3204 }
3205 #endif
3206
3207 #ifndef arch_memory_failure
3208 static inline int arch_memory_failure(unsigned long pfn, int flags)
3209 {
3210 return -ENXIO;
3211 }
3212 #endif
3213
3214 #ifndef arch_is_platform_page
3215 static inline bool arch_is_platform_page(u64 paddr)
3216 {
3217 return false;
3218 }
3219 #endif
3220
3221
3222
3223
3224 enum mf_result {
3225 MF_IGNORED,
3226 MF_FAILED,
3227 MF_DELAYED,
3228 MF_RECOVERED,
3229 };
3230
3231 enum mf_action_page_type {
3232 MF_MSG_KERNEL,
3233 MF_MSG_KERNEL_HIGH_ORDER,
3234 MF_MSG_SLAB,
3235 MF_MSG_DIFFERENT_COMPOUND,
3236 MF_MSG_HUGE,
3237 MF_MSG_FREE_HUGE,
3238 MF_MSG_UNMAP_FAILED,
3239 MF_MSG_DIRTY_SWAPCACHE,
3240 MF_MSG_CLEAN_SWAPCACHE,
3241 MF_MSG_DIRTY_MLOCKED_LRU,
3242 MF_MSG_CLEAN_MLOCKED_LRU,
3243 MF_MSG_DIRTY_UNEVICTABLE_LRU,
3244 MF_MSG_CLEAN_UNEVICTABLE_LRU,
3245 MF_MSG_DIRTY_LRU,
3246 MF_MSG_CLEAN_LRU,
3247 MF_MSG_TRUNCATED_LRU,
3248 MF_MSG_BUDDY,
3249 MF_MSG_DAX,
3250 MF_MSG_UNSPLIT_THP,
3251 MF_MSG_UNKNOWN,
3252 };
3253
3254 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
3255 extern void clear_huge_page(struct page *page,
3256 unsigned long addr_hint,
3257 unsigned int pages_per_huge_page);
3258 extern void copy_user_huge_page(struct page *dst, struct page *src,
3259 unsigned long addr_hint,
3260 struct vm_area_struct *vma,
3261 unsigned int pages_per_huge_page);
3262 extern long copy_huge_page_from_user(struct page *dst_page,
3263 const void __user *usr_src,
3264 unsigned int pages_per_huge_page,
3265 bool allow_pagefault);
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277 static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
3278 {
3279 return vma_is_dax(vma) || (vma->vm_file &&
3280 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
3281 }
3282
3283 #endif
3284
3285 #ifdef CONFIG_DEBUG_PAGEALLOC
3286 extern unsigned int _debug_guardpage_minorder;
3287 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3288
3289 static inline unsigned int debug_guardpage_minorder(void)
3290 {
3291 return _debug_guardpage_minorder;
3292 }
3293
3294 static inline bool debug_guardpage_enabled(void)
3295 {
3296 return static_branch_unlikely(&_debug_guardpage_enabled);
3297 }
3298
3299 static inline bool page_is_guard(struct page *page)
3300 {
3301 if (!debug_guardpage_enabled())
3302 return false;
3303
3304 return PageGuard(page);
3305 }
3306 #else
3307 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3308 static inline bool debug_guardpage_enabled(void) { return false; }
3309 static inline bool page_is_guard(struct page *page) { return false; }
3310 #endif
3311
3312 #if MAX_NUMNODES > 1
3313 void __init setup_nr_node_ids(void);
3314 #else
3315 static inline void setup_nr_node_ids(void) {}
3316 #endif
3317
3318 extern int memcmp_pages(struct page *page1, struct page *page2);
3319
3320 static inline int pages_identical(struct page *page1, struct page *page2)
3321 {
3322 return !memcmp_pages(page1, page2);
3323 }
3324
3325 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
3326 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
3327 pgoff_t first_index, pgoff_t nr,
3328 pgoff_t bitmap_pgoff,
3329 unsigned long *bitmap,
3330 pgoff_t *start,
3331 pgoff_t *end);
3332
3333 unsigned long wp_shared_mapping_range(struct address_space *mapping,
3334 pgoff_t first_index, pgoff_t nr);
3335 #endif
3336
3337 extern int sysctl_nr_trim_pages;
3338
3339 #ifdef CONFIG_PRINTK
3340 void mem_dump_obj(void *object);
3341 #else
3342 static inline void mem_dump_obj(void *object) {}
3343 #endif
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353 static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
3354 {
3355 if (seals & F_SEAL_FUTURE_WRITE) {
3356
3357
3358
3359
3360 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
3361 return -EPERM;
3362
3363
3364
3365
3366
3367
3368
3369
3370 if (vma->vm_flags & VM_SHARED)
3371 vma->vm_flags &= ~(VM_MAYWRITE);
3372 }
3373
3374 return 0;
3375 }
3376
3377 #ifdef CONFIG_ANON_VMA_NAME
3378 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
3379 unsigned long len_in,
3380 struct anon_vma_name *anon_name);
3381 #else
3382 static inline int
3383 madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
3384 unsigned long len_in, struct anon_vma_name *anon_name) {
3385 return 0;
3386 }
3387 #endif
3388
3389
3390
3391
3392
3393
3394
3395 #define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
3396
3397 #endif