0001
0002
0003
0004
0005
0006
0007 #ifndef __MM_INTERNAL_H
0008 #define __MM_INTERNAL_H
0009
0010 #include <linux/fs.h>
0011 #include <linux/mm.h>
0012 #include <linux/pagemap.h>
0013 #include <linux/rmap.h>
0014 #include <linux/tracepoint-defs.h>
0015
0016 struct folio_batch;
0017
0018
0019
0020
0021
0022
0023
0024 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
0025 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
0026 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
0027 __GFP_ATOMIC|__GFP_NOLOCKDEP)
0028
0029
0030 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
0031
0032
0033 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
0034
0035
0036 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
0037
0038
0039
0040
0041
0042 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \
0043 static bool __section(".data.once") __warned; \
0044 int __ret_warn_once = !!(cond); \
0045 \
0046 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
0047 __warned = true; \
0048 WARN_ON(1); \
0049 } \
0050 unlikely(__ret_warn_once); \
0051 })
0052
0053 void page_writeback_init(void);
0054
0055 static inline void *folio_raw_mapping(struct folio *folio)
0056 {
0057 unsigned long mapping = (unsigned long)folio->mapping;
0058
0059 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
0060 }
0061
0062 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
0063 int nr_throttled);
0064 static inline void acct_reclaim_writeback(struct folio *folio)
0065 {
0066 pg_data_t *pgdat = folio_pgdat(folio);
0067 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
0068
0069 if (nr_throttled)
0070 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
0071 }
0072
0073 static inline void wake_throttle_isolated(pg_data_t *pgdat)
0074 {
0075 wait_queue_head_t *wqh;
0076
0077 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
0078 if (waitqueue_active(wqh))
0079 wake_up(wqh);
0080 }
0081
0082 vm_fault_t do_swap_page(struct vm_fault *vmf);
0083 void folio_rotate_reclaimable(struct folio *folio);
0084 bool __folio_end_writeback(struct folio *folio);
0085 void deactivate_file_folio(struct folio *folio);
0086
0087 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
0088 unsigned long floor, unsigned long ceiling);
0089 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
0090
0091 struct zap_details;
0092 void unmap_page_range(struct mmu_gather *tlb,
0093 struct vm_area_struct *vma,
0094 unsigned long addr, unsigned long end,
0095 struct zap_details *details);
0096
0097 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
0098 unsigned int order);
0099 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
0100 static inline void force_page_cache_readahead(struct address_space *mapping,
0101 struct file *file, pgoff_t index, unsigned long nr_to_read)
0102 {
0103 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
0104 force_page_cache_ra(&ractl, nr_to_read);
0105 }
0106
0107 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
0108 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
0109 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
0110 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
0111 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
0112 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
0113 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
0114 loff_t end);
0115 long invalidate_inode_page(struct page *page);
0116 unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
0117 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 static inline bool folio_evictable(struct folio *folio)
0131 {
0132 bool ret;
0133
0134
0135 rcu_read_lock();
0136 ret = !mapping_unevictable(folio_mapping(folio)) &&
0137 !folio_test_mlocked(folio);
0138 rcu_read_unlock();
0139 return ret;
0140 }
0141
0142 static inline bool page_evictable(struct page *page)
0143 {
0144 bool ret;
0145
0146
0147 rcu_read_lock();
0148 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
0149 rcu_read_unlock();
0150 return ret;
0151 }
0152
0153
0154
0155
0156
0157 static inline void set_page_refcounted(struct page *page)
0158 {
0159 VM_BUG_ON_PAGE(PageTail(page), page);
0160 VM_BUG_ON_PAGE(page_ref_count(page), page);
0161 set_page_count(page, 1);
0162 }
0163
0164 extern unsigned long highest_memmap_pfn;
0165
0166
0167
0168
0169
0170 #define MAX_RECLAIM_RETRIES 16
0171
0172
0173
0174
0175 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
0176 unsigned long size, pgprot_t prot);
0177
0178
0179
0180
0181 int isolate_lru_page(struct page *page);
0182 int folio_isolate_lru(struct folio *folio);
0183 void putback_lru_page(struct page *page);
0184 void folio_putback_lru(struct folio *folio);
0185 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
0186
0187
0188
0189
0190 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209 struct alloc_context {
0210 struct zonelist *zonelist;
0211 nodemask_t *nodemask;
0212 struct zoneref *preferred_zoneref;
0213 int migratetype;
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 enum zone_type highest_zoneidx;
0226 bool spread_dirty_pages;
0227 };
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 static inline unsigned int buddy_order(struct page *page)
0238 {
0239
0240 return page_private(page);
0241 }
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 static inline bool page_is_buddy(struct page *page, struct page *buddy,
0270 unsigned int order)
0271 {
0272 if (!page_is_guard(buddy) && !PageBuddy(buddy))
0273 return false;
0274
0275 if (buddy_order(buddy) != order)
0276 return false;
0277
0278
0279
0280
0281
0282 if (page_zone_id(page) != page_zone_id(buddy))
0283 return false;
0284
0285 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
0286
0287 return true;
0288 }
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 static inline unsigned long
0308 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
0309 {
0310 return page_pfn ^ (1 << order);
0311 }
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327 static inline struct page *find_buddy_page_pfn(struct page *page,
0328 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
0329 {
0330 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
0331 struct page *buddy;
0332
0333 buddy = page + (__buddy_pfn - pfn);
0334 if (buddy_pfn)
0335 *buddy_pfn = __buddy_pfn;
0336
0337 if (page_is_buddy(page, buddy, order))
0338 return buddy;
0339 return NULL;
0340 }
0341
0342 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
0343 unsigned long end_pfn, struct zone *zone);
0344
0345 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
0346 unsigned long end_pfn, struct zone *zone)
0347 {
0348 if (zone->contiguous)
0349 return pfn_to_page(start_pfn);
0350
0351 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
0352 }
0353
0354 extern int __isolate_free_page(struct page *page, unsigned int order);
0355 extern void __putback_isolated_page(struct page *page, unsigned int order,
0356 int mt);
0357 extern void memblock_free_pages(struct page *page, unsigned long pfn,
0358 unsigned int order);
0359 extern void __free_pages_core(struct page *page, unsigned int order);
0360 extern void prep_compound_page(struct page *page, unsigned int order);
0361 extern void post_alloc_hook(struct page *page, unsigned int order,
0362 gfp_t gfp_flags);
0363 extern int user_min_free_kbytes;
0364
0365 extern void free_unref_page(struct page *page, unsigned int order);
0366 extern void free_unref_page_list(struct list_head *list);
0367
0368 extern void zone_pcp_update(struct zone *zone, int cpu_online);
0369 extern void zone_pcp_reset(struct zone *zone);
0370 extern void zone_pcp_disable(struct zone *zone);
0371 extern void zone_pcp_enable(struct zone *zone);
0372
0373 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
0374 phys_addr_t min_addr,
0375 int nid, bool exact_nid);
0376
0377 int split_free_page(struct page *free_page,
0378 unsigned int order, unsigned long split_pfn_offset);
0379
0380 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 struct compact_control {
0393 struct list_head freepages;
0394 struct list_head migratepages;
0395 unsigned int nr_freepages;
0396 unsigned int nr_migratepages;
0397 unsigned long free_pfn;
0398
0399
0400
0401
0402
0403
0404 unsigned long migrate_pfn;
0405 unsigned long fast_start_pfn;
0406 struct zone *zone;
0407 unsigned long total_migrate_scanned;
0408 unsigned long total_free_scanned;
0409 unsigned short fast_search_fail;
0410 short search_order;
0411 const gfp_t gfp_mask;
0412 int order;
0413 int migratetype;
0414 const unsigned int alloc_flags;
0415 const int highest_zoneidx;
0416 enum migrate_mode mode;
0417 bool ignore_skip_hint;
0418 bool no_set_skip_hint;
0419 bool ignore_block_suitable;
0420 bool direct_compaction;
0421 bool proactive_compaction;
0422 bool whole_zone;
0423 bool contended;
0424 bool rescan;
0425 bool alloc_contig;
0426 };
0427
0428
0429
0430
0431
0432 struct capture_control {
0433 struct compact_control *cc;
0434 struct page *page;
0435 };
0436
0437 unsigned long
0438 isolate_freepages_range(struct compact_control *cc,
0439 unsigned long start_pfn, unsigned long end_pfn);
0440 int
0441 isolate_migratepages_range(struct compact_control *cc,
0442 unsigned long low_pfn, unsigned long end_pfn);
0443
0444 int __alloc_contig_migrate_range(struct compact_control *cc,
0445 unsigned long start, unsigned long end);
0446 #endif
0447 int find_suitable_fallback(struct free_area *area, unsigned int order,
0448 int migratetype, bool only_stealable, bool *can_steal);
0449
0450
0451
0452
0453
0454
0455
0456
0457 static inline bool is_exec_mapping(vm_flags_t flags)
0458 {
0459 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
0460 }
0461
0462
0463
0464
0465
0466
0467
0468 static inline bool is_stack_mapping(vm_flags_t flags)
0469 {
0470 return (flags & VM_STACK) == VM_STACK;
0471 }
0472
0473
0474
0475
0476 static inline bool is_data_mapping(vm_flags_t flags)
0477 {
0478 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
0479 }
0480
0481
0482 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
0483 struct vm_area_struct *prev);
0484 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
0485 struct anon_vma *folio_anon_vma(struct folio *folio);
0486
0487 #ifdef CONFIG_MMU
0488 void unmap_mapping_folio(struct folio *folio);
0489 extern long populate_vma_page_range(struct vm_area_struct *vma,
0490 unsigned long start, unsigned long end, int *locked);
0491 extern long faultin_vma_page_range(struct vm_area_struct *vma,
0492 unsigned long start, unsigned long end,
0493 bool write, int *locked);
0494 extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
0495 unsigned long len);
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510 void mlock_folio(struct folio *folio);
0511 static inline void mlock_vma_folio(struct folio *folio,
0512 struct vm_area_struct *vma, bool compound)
0513 {
0514
0515
0516
0517
0518
0519
0520
0521
0522 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
0523 (compound || !folio_test_large(folio)))
0524 mlock_folio(folio);
0525 }
0526
0527 static inline void mlock_vma_page(struct page *page,
0528 struct vm_area_struct *vma, bool compound)
0529 {
0530 mlock_vma_folio(page_folio(page), vma, compound);
0531 }
0532
0533 void munlock_page(struct page *page);
0534 static inline void munlock_vma_page(struct page *page,
0535 struct vm_area_struct *vma, bool compound)
0536 {
0537 if (unlikely(vma->vm_flags & VM_LOCKED) &&
0538 (compound || !PageTransCompound(page)))
0539 munlock_page(page);
0540 }
0541 void mlock_new_page(struct page *page);
0542 bool need_mlock_page_drain(int cpu);
0543 void mlock_page_drain_local(void);
0544 void mlock_page_drain_remote(int cpu);
0545
0546 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
0547
0548
0549
0550
0551
0552 static inline unsigned long
0553 vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
0554 struct vm_area_struct *vma)
0555 {
0556 unsigned long address;
0557
0558 if (pgoff >= vma->vm_pgoff) {
0559 address = vma->vm_start +
0560 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
0561
0562 if (address < vma->vm_start || address >= vma->vm_end)
0563 address = -EFAULT;
0564 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
0565
0566 address = vma->vm_start;
0567 } else {
0568 address = -EFAULT;
0569 }
0570 return address;
0571 }
0572
0573
0574
0575
0576
0577
0578 static inline unsigned long
0579 vma_address(struct page *page, struct vm_area_struct *vma)
0580 {
0581 VM_BUG_ON_PAGE(PageKsm(page), page);
0582 return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
0583 }
0584
0585
0586
0587
0588
0589 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
0590 {
0591 struct vm_area_struct *vma = pvmw->vma;
0592 pgoff_t pgoff;
0593 unsigned long address;
0594
0595
0596 if (pvmw->nr_pages == 1)
0597 return pvmw->address + PAGE_SIZE;
0598
0599 pgoff = pvmw->pgoff + pvmw->nr_pages;
0600 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
0601
0602 if (address < vma->vm_start || address > vma->vm_end)
0603 address = vma->vm_end;
0604 return address;
0605 }
0606
0607 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
0608 struct file *fpin)
0609 {
0610 int flags = vmf->flags;
0611
0612 if (fpin)
0613 return fpin;
0614
0615
0616
0617
0618
0619
0620 if (fault_flag_allow_retry_first(flags) &&
0621 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
0622 fpin = get_file(vmf->vma->vm_file);
0623 mmap_read_unlock(vmf->vma->vm_mm);
0624 }
0625 return fpin;
0626 }
0627 #else
0628 static inline void unmap_mapping_folio(struct folio *folio) { }
0629 static inline void mlock_vma_page(struct page *page,
0630 struct vm_area_struct *vma, bool compound) { }
0631 static inline void munlock_vma_page(struct page *page,
0632 struct vm_area_struct *vma, bool compound) { }
0633 static inline void mlock_new_page(struct page *page) { }
0634 static inline bool need_mlock_page_drain(int cpu) { return false; }
0635 static inline void mlock_page_drain_local(void) { }
0636 static inline void mlock_page_drain_remote(int cpu) { }
0637 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
0638 {
0639 }
0640 #endif
0641
0642
0643
0644
0645
0646
0647 static inline struct page *mem_map_offset(struct page *base, int offset)
0648 {
0649 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
0650 return nth_page(base, offset);
0651 return base + offset;
0652 }
0653
0654
0655
0656
0657
0658 static inline struct page *mem_map_next(struct page *iter,
0659 struct page *base, int offset)
0660 {
0661 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
0662 unsigned long pfn = page_to_pfn(base) + offset;
0663 if (!pfn_valid(pfn))
0664 return NULL;
0665 return pfn_to_page(pfn);
0666 }
0667 return iter + 1;
0668 }
0669
0670
0671 enum mminit_level {
0672 MMINIT_WARNING,
0673 MMINIT_VERIFY,
0674 MMINIT_TRACE
0675 };
0676
0677 #ifdef CONFIG_DEBUG_MEMORY_INIT
0678
0679 extern int mminit_loglevel;
0680
0681 #define mminit_dprintk(level, prefix, fmt, arg...) \
0682 do { \
0683 if (level < mminit_loglevel) { \
0684 if (level <= MMINIT_WARNING) \
0685 pr_warn("mminit::" prefix " " fmt, ##arg); \
0686 else \
0687 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
0688 } \
0689 } while (0)
0690
0691 extern void mminit_verify_pageflags_layout(void);
0692 extern void mminit_verify_zonelist(void);
0693 #else
0694
0695 static inline void mminit_dprintk(enum mminit_level level,
0696 const char *prefix, const char *fmt, ...)
0697 {
0698 }
0699
0700 static inline void mminit_verify_pageflags_layout(void)
0701 {
0702 }
0703
0704 static inline void mminit_verify_zonelist(void)
0705 {
0706 }
0707 #endif
0708
0709 #define NODE_RECLAIM_NOSCAN -2
0710 #define NODE_RECLAIM_FULL -1
0711 #define NODE_RECLAIM_SOME 0
0712 #define NODE_RECLAIM_SUCCESS 1
0713
0714 #ifdef CONFIG_NUMA
0715 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
0716 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
0717 #else
0718 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
0719 unsigned int order)
0720 {
0721 return NODE_RECLAIM_NOSCAN;
0722 }
0723 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
0724 {
0725 return NUMA_NO_NODE;
0726 }
0727 #endif
0728
0729
0730
0731
0732 extern int hwpoison_filter(struct page *p);
0733
0734 extern u32 hwpoison_filter_dev_major;
0735 extern u32 hwpoison_filter_dev_minor;
0736 extern u64 hwpoison_filter_flags_mask;
0737 extern u64 hwpoison_filter_flags_value;
0738 extern u64 hwpoison_filter_memcg;
0739 extern u32 hwpoison_filter_enable;
0740
0741 #ifdef CONFIG_MEMORY_FAILURE
0742 void clear_hwpoisoned_pages(struct page *memmap, int nr_pages);
0743 #else
0744 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
0745 {
0746 }
0747 #endif
0748
0749 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
0750 unsigned long, unsigned long,
0751 unsigned long, unsigned long);
0752
0753 extern void set_pageblock_order(void);
0754 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
0755 struct list_head *page_list);
0756
0757 #define ALLOC_WMARK_MIN WMARK_MIN
0758 #define ALLOC_WMARK_LOW WMARK_LOW
0759 #define ALLOC_WMARK_HIGH WMARK_HIGH
0760 #define ALLOC_NO_WATERMARKS 0x04
0761
0762
0763 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
0764
0765
0766
0767
0768
0769
0770 #ifdef CONFIG_MMU
0771 #define ALLOC_OOM 0x08
0772 #else
0773 #define ALLOC_OOM ALLOC_NO_WATERMARKS
0774 #endif
0775
0776 #define ALLOC_HARDER 0x10
0777 #define ALLOC_HIGH 0x20
0778 #define ALLOC_CPUSET 0x40
0779 #define ALLOC_CMA 0x80
0780 #ifdef CONFIG_ZONE_DMA32
0781 #define ALLOC_NOFRAGMENT 0x100
0782 #else
0783 #define ALLOC_NOFRAGMENT 0x0
0784 #endif
0785 #define ALLOC_KSWAPD 0x800
0786
0787 enum ttu_flags;
0788 struct tlbflush_unmap_batch;
0789
0790
0791
0792
0793
0794
0795 extern struct workqueue_struct *mm_percpu_wq;
0796
0797 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
0798 void try_to_unmap_flush(void);
0799 void try_to_unmap_flush_dirty(void);
0800 void flush_tlb_batched_pending(struct mm_struct *mm);
0801 #else
0802 static inline void try_to_unmap_flush(void)
0803 {
0804 }
0805 static inline void try_to_unmap_flush_dirty(void)
0806 {
0807 }
0808 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
0809 {
0810 }
0811 #endif
0812
0813 extern const struct trace_print_flags pageflag_names[];
0814 extern const struct trace_print_flags vmaflag_names[];
0815 extern const struct trace_print_flags gfpflag_names[];
0816
0817 static inline bool is_migrate_highatomic(enum migratetype migratetype)
0818 {
0819 return migratetype == MIGRATE_HIGHATOMIC;
0820 }
0821
0822 static inline bool is_migrate_highatomic_page(struct page *page)
0823 {
0824 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
0825 }
0826
0827 void setup_zone_pageset(struct zone *zone);
0828
0829 struct migration_target_control {
0830 int nid;
0831 nodemask_t *nmask;
0832 gfp_t gfp_mask;
0833 };
0834
0835
0836
0837
0838 #ifdef CONFIG_MMU
0839 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
0840 pgprot_t prot, struct page **pages, unsigned int page_shift);
0841 #else
0842 static inline
0843 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
0844 pgprot_t prot, struct page **pages, unsigned int page_shift)
0845 {
0846 return -EINVAL;
0847 }
0848 #endif
0849
0850 void vunmap_range_noflush(unsigned long start, unsigned long end);
0851
0852 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
0853 unsigned long addr, int page_nid, int *flags);
0854
0855 void free_zone_device_page(struct page *page);
0856 int migrate_device_coherent_page(struct page *page);
0857
0858
0859
0860
0861 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
0862
0863 DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
0864
0865 extern bool mirrored_kernelcore;
0866
0867 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
0868 {
0869
0870
0871
0872
0873
0874
0875 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
0876 return false;
0877
0878
0879
0880
0881
0882 return !(vma->vm_flags & VM_SOFTDIRTY);
0883 }
0884
0885 #endif