0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/stddef.h>
0019 #include <linux/mm.h>
0020 #include <linux/highmem.h>
0021 #include <linux/swap.h>
0022 #include <linux/swapops.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/pagemap.h>
0025 #include <linux/jiffies.h>
0026 #include <linux/memblock.h>
0027 #include <linux/compiler.h>
0028 #include <linux/kernel.h>
0029 #include <linux/kasan.h>
0030 #include <linux/module.h>
0031 #include <linux/suspend.h>
0032 #include <linux/pagevec.h>
0033 #include <linux/blkdev.h>
0034 #include <linux/slab.h>
0035 #include <linux/ratelimit.h>
0036 #include <linux/oom.h>
0037 #include <linux/topology.h>
0038 #include <linux/sysctl.h>
0039 #include <linux/cpu.h>
0040 #include <linux/cpuset.h>
0041 #include <linux/memory_hotplug.h>
0042 #include <linux/nodemask.h>
0043 #include <linux/vmalloc.h>
0044 #include <linux/vmstat.h>
0045 #include <linux/mempolicy.h>
0046 #include <linux/memremap.h>
0047 #include <linux/stop_machine.h>
0048 #include <linux/random.h>
0049 #include <linux/sort.h>
0050 #include <linux/pfn.h>
0051 #include <linux/backing-dev.h>
0052 #include <linux/fault-inject.h>
0053 #include <linux/page-isolation.h>
0054 #include <linux/debugobjects.h>
0055 #include <linux/kmemleak.h>
0056 #include <linux/compaction.h>
0057 #include <trace/events/kmem.h>
0058 #include <trace/events/oom.h>
0059 #include <linux/prefetch.h>
0060 #include <linux/mm_inline.h>
0061 #include <linux/mmu_notifier.h>
0062 #include <linux/migrate.h>
0063 #include <linux/hugetlb.h>
0064 #include <linux/sched/rt.h>
0065 #include <linux/sched/mm.h>
0066 #include <linux/page_owner.h>
0067 #include <linux/page_table_check.h>
0068 #include <linux/kthread.h>
0069 #include <linux/memcontrol.h>
0070 #include <linux/ftrace.h>
0071 #include <linux/lockdep.h>
0072 #include <linux/nmi.h>
0073 #include <linux/psi.h>
0074 #include <linux/padata.h>
0075 #include <linux/khugepaged.h>
0076 #include <linux/buffer_head.h>
0077 #include <linux/delayacct.h>
0078 #include <asm/sections.h>
0079 #include <asm/tlbflush.h>
0080 #include <asm/div64.h>
0081 #include "internal.h"
0082 #include "shuffle.h"
0083 #include "page_reporting.h"
0084 #include "swap.h"
0085
0086
0087 typedef int __bitwise fpi_t;
0088
0089
0090 #define FPI_NONE ((__force fpi_t)0)
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2))
0124
0125
0126 static DEFINE_MUTEX(pcp_batch_high_lock);
0127 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
0128
0129 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
0130
0131
0132
0133
0134 #define pcp_trylock_prepare(flags) do { } while (0)
0135 #define pcp_trylock_finish(flag) do { } while (0)
0136 #else
0137
0138
0139 #define pcp_trylock_prepare(flags) local_irq_save(flags)
0140 #define pcp_trylock_finish(flags) local_irq_restore(flags)
0141 #endif
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 #ifndef CONFIG_PREEMPT_RT
0152 #define pcpu_task_pin() preempt_disable()
0153 #define pcpu_task_unpin() preempt_enable()
0154 #else
0155 #define pcpu_task_pin() migrate_disable()
0156 #define pcpu_task_unpin() migrate_enable()
0157 #endif
0158
0159
0160
0161
0162
0163 #define pcpu_spin_lock(type, member, ptr) \
0164 ({ \
0165 type *_ret; \
0166 pcpu_task_pin(); \
0167 _ret = this_cpu_ptr(ptr); \
0168 spin_lock(&_ret->member); \
0169 _ret; \
0170 })
0171
0172 #define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
0173 ({ \
0174 type *_ret; \
0175 pcpu_task_pin(); \
0176 _ret = this_cpu_ptr(ptr); \
0177 spin_lock_irqsave(&_ret->member, flags); \
0178 _ret; \
0179 })
0180
0181 #define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
0182 ({ \
0183 type *_ret; \
0184 pcpu_task_pin(); \
0185 _ret = this_cpu_ptr(ptr); \
0186 if (!spin_trylock_irqsave(&_ret->member, flags)) { \
0187 pcpu_task_unpin(); \
0188 _ret = NULL; \
0189 } \
0190 _ret; \
0191 })
0192
0193 #define pcpu_spin_unlock(member, ptr) \
0194 ({ \
0195 spin_unlock(&ptr->member); \
0196 pcpu_task_unpin(); \
0197 })
0198
0199 #define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
0200 ({ \
0201 spin_unlock_irqrestore(&ptr->member, flags); \
0202 pcpu_task_unpin(); \
0203 })
0204
0205
0206 #define pcp_spin_lock(ptr) \
0207 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
0208
0209 #define pcp_spin_lock_irqsave(ptr, flags) \
0210 pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
0211
0212 #define pcp_spin_trylock_irqsave(ptr, flags) \
0213 pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
0214
0215 #define pcp_spin_unlock(ptr) \
0216 pcpu_spin_unlock(lock, ptr)
0217
0218 #define pcp_spin_unlock_irqrestore(ptr, flags) \
0219 pcpu_spin_unlock_irqrestore(lock, ptr, flags)
0220 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
0221 DEFINE_PER_CPU(int, numa_node);
0222 EXPORT_PER_CPU_SYMBOL(numa_node);
0223 #endif
0224
0225 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
0226
0227 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
0228
0229
0230
0231
0232
0233
0234 DEFINE_PER_CPU(int, _numa_mem_);
0235 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
0236 #endif
0237
0238 static DEFINE_MUTEX(pcpu_drain_mutex);
0239
0240 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
0241 volatile unsigned long latent_entropy __latent_entropy;
0242 EXPORT_SYMBOL(latent_entropy);
0243 #endif
0244
0245
0246
0247
0248 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
0249 [N_POSSIBLE] = NODE_MASK_ALL,
0250 [N_ONLINE] = { { [0] = 1UL } },
0251 #ifndef CONFIG_NUMA
0252 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
0253 #ifdef CONFIG_HIGHMEM
0254 [N_HIGH_MEMORY] = { { [0] = 1UL } },
0255 #endif
0256 [N_MEMORY] = { { [0] = 1UL } },
0257 [N_CPU] = { { [0] = 1UL } },
0258 #endif
0259 };
0260 EXPORT_SYMBOL(node_states);
0261
0262 atomic_long_t _totalram_pages __read_mostly;
0263 EXPORT_SYMBOL(_totalram_pages);
0264 unsigned long totalreserve_pages __read_mostly;
0265 unsigned long totalcma_pages __read_mostly;
0266
0267 int percpu_pagelist_high_fraction;
0268 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
0269 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
0270 EXPORT_SYMBOL(init_on_alloc);
0271
0272 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
0273 EXPORT_SYMBOL(init_on_free);
0274
0275 static bool _init_on_alloc_enabled_early __read_mostly
0276 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
0277 static int __init early_init_on_alloc(char *buf)
0278 {
0279
0280 return kstrtobool(buf, &_init_on_alloc_enabled_early);
0281 }
0282 early_param("init_on_alloc", early_init_on_alloc);
0283
0284 static bool _init_on_free_enabled_early __read_mostly
0285 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
0286 static int __init early_init_on_free(char *buf)
0287 {
0288 return kstrtobool(buf, &_init_on_free_enabled_early);
0289 }
0290 early_param("init_on_free", early_init_on_free);
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 static inline int get_pcppage_migratetype(struct page *page)
0301 {
0302 return page->index;
0303 }
0304
0305 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
0306 {
0307 page->index = migratetype;
0308 }
0309
0310 #ifdef CONFIG_PM_SLEEP
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 static gfp_t saved_gfp_mask;
0322
0323 void pm_restore_gfp_mask(void)
0324 {
0325 WARN_ON(!mutex_is_locked(&system_transition_mutex));
0326 if (saved_gfp_mask) {
0327 gfp_allowed_mask = saved_gfp_mask;
0328 saved_gfp_mask = 0;
0329 }
0330 }
0331
0332 void pm_restrict_gfp_mask(void)
0333 {
0334 WARN_ON(!mutex_is_locked(&system_transition_mutex));
0335 WARN_ON(saved_gfp_mask);
0336 saved_gfp_mask = gfp_allowed_mask;
0337 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
0338 }
0339
0340 bool pm_suspended_storage(void)
0341 {
0342 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
0343 return false;
0344 return true;
0345 }
0346 #endif
0347
0348 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
0349 unsigned int pageblock_order __read_mostly;
0350 #endif
0351
0352 static void __free_pages_ok(struct page *page, unsigned int order,
0353 fpi_t fpi_flags);
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
0367 #ifdef CONFIG_ZONE_DMA
0368 [ZONE_DMA] = 256,
0369 #endif
0370 #ifdef CONFIG_ZONE_DMA32
0371 [ZONE_DMA32] = 256,
0372 #endif
0373 [ZONE_NORMAL] = 32,
0374 #ifdef CONFIG_HIGHMEM
0375 [ZONE_HIGHMEM] = 0,
0376 #endif
0377 [ZONE_MOVABLE] = 0,
0378 };
0379
0380 static char * const zone_names[MAX_NR_ZONES] = {
0381 #ifdef CONFIG_ZONE_DMA
0382 "DMA",
0383 #endif
0384 #ifdef CONFIG_ZONE_DMA32
0385 "DMA32",
0386 #endif
0387 "Normal",
0388 #ifdef CONFIG_HIGHMEM
0389 "HighMem",
0390 #endif
0391 "Movable",
0392 #ifdef CONFIG_ZONE_DEVICE
0393 "Device",
0394 #endif
0395 };
0396
0397 const char * const migratetype_names[MIGRATE_TYPES] = {
0398 "Unmovable",
0399 "Movable",
0400 "Reclaimable",
0401 "HighAtomic",
0402 #ifdef CONFIG_CMA
0403 "CMA",
0404 #endif
0405 #ifdef CONFIG_MEMORY_ISOLATION
0406 "Isolate",
0407 #endif
0408 };
0409
0410 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
0411 [NULL_COMPOUND_DTOR] = NULL,
0412 [COMPOUND_PAGE_DTOR] = free_compound_page,
0413 #ifdef CONFIG_HUGETLB_PAGE
0414 [HUGETLB_PAGE_DTOR] = free_huge_page,
0415 #endif
0416 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0417 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
0418 #endif
0419 };
0420
0421 int min_free_kbytes = 1024;
0422 int user_min_free_kbytes = -1;
0423 int watermark_boost_factor __read_mostly = 15000;
0424 int watermark_scale_factor = 10;
0425
0426 static unsigned long nr_kernel_pages __initdata;
0427 static unsigned long nr_all_pages __initdata;
0428 static unsigned long dma_reserve __initdata;
0429
0430 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
0431 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
0432 static unsigned long required_kernelcore __initdata;
0433 static unsigned long required_kernelcore_percent __initdata;
0434 static unsigned long required_movablecore __initdata;
0435 static unsigned long required_movablecore_percent __initdata;
0436 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
0437 bool mirrored_kernelcore __initdata_memblock;
0438
0439
0440 int movable_zone;
0441 EXPORT_SYMBOL(movable_zone);
0442
0443 #if MAX_NUMNODES > 1
0444 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
0445 unsigned int nr_online_nodes __read_mostly = 1;
0446 EXPORT_SYMBOL(nr_node_ids);
0447 EXPORT_SYMBOL(nr_online_nodes);
0448 #endif
0449
0450 int page_group_by_mobility_disabled __read_mostly;
0451
0452 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0453
0454
0455
0456
0457
0458 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
0459
0460 static inline bool deferred_pages_enabled(void)
0461 {
0462 return static_branch_unlikely(&deferred_pages);
0463 }
0464
0465
0466 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
0467 {
0468 int nid = early_pfn_to_nid(pfn);
0469
0470 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
0471 return true;
0472
0473 return false;
0474 }
0475
0476
0477
0478
0479
0480 static bool __meminit
0481 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
0482 {
0483 static unsigned long prev_end_pfn, nr_initialised;
0484
0485
0486
0487
0488
0489 if (prev_end_pfn != end_pfn) {
0490 prev_end_pfn = end_pfn;
0491 nr_initialised = 0;
0492 }
0493
0494
0495 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
0496 return false;
0497
0498 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
0499 return true;
0500
0501
0502
0503
0504 nr_initialised++;
0505 if ((nr_initialised > PAGES_PER_SECTION) &&
0506 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
0507 NODE_DATA(nid)->first_deferred_pfn = pfn;
0508 return true;
0509 }
0510 return false;
0511 }
0512 #else
0513 static inline bool deferred_pages_enabled(void)
0514 {
0515 return false;
0516 }
0517
0518 static inline bool early_page_uninitialised(unsigned long pfn)
0519 {
0520 return false;
0521 }
0522
0523 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
0524 {
0525 return false;
0526 }
0527 #endif
0528
0529
0530 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
0531 unsigned long pfn)
0532 {
0533 #ifdef CONFIG_SPARSEMEM
0534 return section_to_usemap(__pfn_to_section(pfn));
0535 #else
0536 return page_zone(page)->pageblock_flags;
0537 #endif
0538 }
0539
0540 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
0541 {
0542 #ifdef CONFIG_SPARSEMEM
0543 pfn &= (PAGES_PER_SECTION-1);
0544 #else
0545 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
0546 #endif
0547 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
0548 }
0549
0550 static __always_inline
0551 unsigned long __get_pfnblock_flags_mask(const struct page *page,
0552 unsigned long pfn,
0553 unsigned long mask)
0554 {
0555 unsigned long *bitmap;
0556 unsigned long bitidx, word_bitidx;
0557 unsigned long word;
0558
0559 bitmap = get_pageblock_bitmap(page, pfn);
0560 bitidx = pfn_to_bitidx(page, pfn);
0561 word_bitidx = bitidx / BITS_PER_LONG;
0562 bitidx &= (BITS_PER_LONG-1);
0563
0564
0565
0566
0567
0568 word = READ_ONCE(bitmap[word_bitidx]);
0569 return (word >> bitidx) & mask;
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580 unsigned long get_pfnblock_flags_mask(const struct page *page,
0581 unsigned long pfn, unsigned long mask)
0582 {
0583 return __get_pfnblock_flags_mask(page, pfn, mask);
0584 }
0585
0586 static __always_inline int get_pfnblock_migratetype(const struct page *page,
0587 unsigned long pfn)
0588 {
0589 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
0590 }
0591
0592
0593
0594
0595
0596
0597
0598
0599 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
0600 unsigned long pfn,
0601 unsigned long mask)
0602 {
0603 unsigned long *bitmap;
0604 unsigned long bitidx, word_bitidx;
0605 unsigned long word;
0606
0607 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
0608 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
0609
0610 bitmap = get_pageblock_bitmap(page, pfn);
0611 bitidx = pfn_to_bitidx(page, pfn);
0612 word_bitidx = bitidx / BITS_PER_LONG;
0613 bitidx &= (BITS_PER_LONG-1);
0614
0615 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
0616
0617 mask <<= bitidx;
0618 flags <<= bitidx;
0619
0620 word = READ_ONCE(bitmap[word_bitidx]);
0621 do {
0622 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
0623 }
0624
0625 void set_pageblock_migratetype(struct page *page, int migratetype)
0626 {
0627 if (unlikely(page_group_by_mobility_disabled &&
0628 migratetype < MIGRATE_PCPTYPES))
0629 migratetype = MIGRATE_UNMOVABLE;
0630
0631 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
0632 page_to_pfn(page), MIGRATETYPE_MASK);
0633 }
0634
0635 #ifdef CONFIG_DEBUG_VM
0636 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
0637 {
0638 int ret = 0;
0639 unsigned seq;
0640 unsigned long pfn = page_to_pfn(page);
0641 unsigned long sp, start_pfn;
0642
0643 do {
0644 seq = zone_span_seqbegin(zone);
0645 start_pfn = zone->zone_start_pfn;
0646 sp = zone->spanned_pages;
0647 if (!zone_spans_pfn(zone, pfn))
0648 ret = 1;
0649 } while (zone_span_seqretry(zone, seq));
0650
0651 if (ret)
0652 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
0653 pfn, zone_to_nid(zone), zone->name,
0654 start_pfn, start_pfn + sp);
0655
0656 return ret;
0657 }
0658
0659 static int page_is_consistent(struct zone *zone, struct page *page)
0660 {
0661 if (zone != page_zone(page))
0662 return 0;
0663
0664 return 1;
0665 }
0666
0667
0668
0669 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
0670 {
0671 if (page_outside_zone_boundaries(zone, page))
0672 return 1;
0673 if (!page_is_consistent(zone, page))
0674 return 1;
0675
0676 return 0;
0677 }
0678 #else
0679 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
0680 {
0681 return 0;
0682 }
0683 #endif
0684
0685 static void bad_page(struct page *page, const char *reason)
0686 {
0687 static unsigned long resume;
0688 static unsigned long nr_shown;
0689 static unsigned long nr_unshown;
0690
0691
0692
0693
0694
0695 if (nr_shown == 60) {
0696 if (time_before(jiffies, resume)) {
0697 nr_unshown++;
0698 goto out;
0699 }
0700 if (nr_unshown) {
0701 pr_alert(
0702 "BUG: Bad page state: %lu messages suppressed\n",
0703 nr_unshown);
0704 nr_unshown = 0;
0705 }
0706 nr_shown = 0;
0707 }
0708 if (nr_shown++ == 0)
0709 resume = jiffies + 60 * HZ;
0710
0711 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
0712 current->comm, page_to_pfn(page));
0713 dump_page(page, reason);
0714
0715 print_modules();
0716 dump_stack();
0717 out:
0718
0719 page_mapcount_reset(page);
0720 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
0721 }
0722
0723 static inline unsigned int order_to_pindex(int migratetype, int order)
0724 {
0725 int base = order;
0726
0727 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0728 if (order > PAGE_ALLOC_COSTLY_ORDER) {
0729 VM_BUG_ON(order != pageblock_order);
0730 return NR_LOWORDER_PCP_LISTS;
0731 }
0732 #else
0733 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
0734 #endif
0735
0736 return (MIGRATE_PCPTYPES * base) + migratetype;
0737 }
0738
0739 static inline int pindex_to_order(unsigned int pindex)
0740 {
0741 int order = pindex / MIGRATE_PCPTYPES;
0742
0743 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0744 if (pindex == NR_LOWORDER_PCP_LISTS)
0745 order = pageblock_order;
0746 #else
0747 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
0748 #endif
0749
0750 return order;
0751 }
0752
0753 static inline bool pcp_allowed_order(unsigned int order)
0754 {
0755 if (order <= PAGE_ALLOC_COSTLY_ORDER)
0756 return true;
0757 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0758 if (order == pageblock_order)
0759 return true;
0760 #endif
0761 return false;
0762 }
0763
0764 static inline void free_the_page(struct page *page, unsigned int order)
0765 {
0766 if (pcp_allowed_order(order))
0767 free_unref_page(page, order);
0768 else
0769 __free_pages_ok(page, order, FPI_NONE);
0770 }
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787 void free_compound_page(struct page *page)
0788 {
0789 mem_cgroup_uncharge(page_folio(page));
0790 free_the_page(page, compound_order(page));
0791 }
0792
0793 static void prep_compound_head(struct page *page, unsigned int order)
0794 {
0795 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
0796 set_compound_order(page, order);
0797 atomic_set(compound_mapcount_ptr(page), -1);
0798 atomic_set(compound_pincount_ptr(page), 0);
0799 }
0800
0801 static void prep_compound_tail(struct page *head, int tail_idx)
0802 {
0803 struct page *p = head + tail_idx;
0804
0805 p->mapping = TAIL_MAPPING;
0806 set_compound_head(p, head);
0807 }
0808
0809 void prep_compound_page(struct page *page, unsigned int order)
0810 {
0811 int i;
0812 int nr_pages = 1 << order;
0813
0814 __SetPageHead(page);
0815 for (i = 1; i < nr_pages; i++)
0816 prep_compound_tail(page, i);
0817
0818 prep_compound_head(page, order);
0819 }
0820
0821 void destroy_large_folio(struct folio *folio)
0822 {
0823 enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor;
0824
0825 VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
0826 compound_page_dtors[dtor](&folio->page);
0827 }
0828
0829 #ifdef CONFIG_DEBUG_PAGEALLOC
0830 unsigned int _debug_guardpage_minorder;
0831
0832 bool _debug_pagealloc_enabled_early __read_mostly
0833 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
0834 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
0835 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
0836 EXPORT_SYMBOL(_debug_pagealloc_enabled);
0837
0838 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
0839
0840 static int __init early_debug_pagealloc(char *buf)
0841 {
0842 return kstrtobool(buf, &_debug_pagealloc_enabled_early);
0843 }
0844 early_param("debug_pagealloc", early_debug_pagealloc);
0845
0846 static int __init debug_guardpage_minorder_setup(char *buf)
0847 {
0848 unsigned long res;
0849
0850 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
0851 pr_err("Bad debug_guardpage_minorder value\n");
0852 return 0;
0853 }
0854 _debug_guardpage_minorder = res;
0855 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
0856 return 0;
0857 }
0858 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
0859
0860 static inline bool set_page_guard(struct zone *zone, struct page *page,
0861 unsigned int order, int migratetype)
0862 {
0863 if (!debug_guardpage_enabled())
0864 return false;
0865
0866 if (order >= debug_guardpage_minorder())
0867 return false;
0868
0869 __SetPageGuard(page);
0870 INIT_LIST_HEAD(&page->buddy_list);
0871 set_page_private(page, order);
0872
0873 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
0874
0875 return true;
0876 }
0877
0878 static inline void clear_page_guard(struct zone *zone, struct page *page,
0879 unsigned int order, int migratetype)
0880 {
0881 if (!debug_guardpage_enabled())
0882 return;
0883
0884 __ClearPageGuard(page);
0885
0886 set_page_private(page, 0);
0887 if (!is_migrate_isolate(migratetype))
0888 __mod_zone_freepage_state(zone, (1 << order), migratetype);
0889 }
0890 #else
0891 static inline bool set_page_guard(struct zone *zone, struct page *page,
0892 unsigned int order, int migratetype) { return false; }
0893 static inline void clear_page_guard(struct zone *zone, struct page *page,
0894 unsigned int order, int migratetype) {}
0895 #endif
0896
0897
0898
0899
0900
0901
0902
0903 void init_mem_debugging_and_hardening(void)
0904 {
0905 bool page_poisoning_requested = false;
0906
0907 #ifdef CONFIG_PAGE_POISONING
0908
0909
0910
0911
0912 if (page_poisoning_enabled() ||
0913 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
0914 debug_pagealloc_enabled())) {
0915 static_branch_enable(&_page_poisoning_enabled);
0916 page_poisoning_requested = true;
0917 }
0918 #endif
0919
0920 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
0921 page_poisoning_requested) {
0922 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
0923 "will take precedence over init_on_alloc and init_on_free\n");
0924 _init_on_alloc_enabled_early = false;
0925 _init_on_free_enabled_early = false;
0926 }
0927
0928 if (_init_on_alloc_enabled_early)
0929 static_branch_enable(&init_on_alloc);
0930 else
0931 static_branch_disable(&init_on_alloc);
0932
0933 if (_init_on_free_enabled_early)
0934 static_branch_enable(&init_on_free);
0935 else
0936 static_branch_disable(&init_on_free);
0937
0938 #ifdef CONFIG_DEBUG_PAGEALLOC
0939 if (!debug_pagealloc_enabled())
0940 return;
0941
0942 static_branch_enable(&_debug_pagealloc_enabled);
0943
0944 if (!debug_guardpage_minorder())
0945 return;
0946
0947 static_branch_enable(&_debug_guardpage_enabled);
0948 #endif
0949 }
0950
0951 static inline void set_buddy_order(struct page *page, unsigned int order)
0952 {
0953 set_page_private(page, order);
0954 __SetPageBuddy(page);
0955 }
0956
0957 #ifdef CONFIG_COMPACTION
0958 static inline struct capture_control *task_capc(struct zone *zone)
0959 {
0960 struct capture_control *capc = current->capture_control;
0961
0962 return unlikely(capc) &&
0963 !(current->flags & PF_KTHREAD) &&
0964 !capc->page &&
0965 capc->cc->zone == zone ? capc : NULL;
0966 }
0967
0968 static inline bool
0969 compaction_capture(struct capture_control *capc, struct page *page,
0970 int order, int migratetype)
0971 {
0972 if (!capc || order != capc->cc->order)
0973 return false;
0974
0975
0976 if (is_migrate_cma(migratetype) ||
0977 is_migrate_isolate(migratetype))
0978 return false;
0979
0980
0981
0982
0983
0984
0985
0986 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
0987 return false;
0988
0989 capc->page = page;
0990 return true;
0991 }
0992
0993 #else
0994 static inline struct capture_control *task_capc(struct zone *zone)
0995 {
0996 return NULL;
0997 }
0998
0999 static inline bool
1000 compaction_capture(struct capture_control *capc, struct page *page,
1001 int order, int migratetype)
1002 {
1003 return false;
1004 }
1005 #endif
1006
1007
1008 static inline void add_to_free_list(struct page *page, struct zone *zone,
1009 unsigned int order, int migratetype)
1010 {
1011 struct free_area *area = &zone->free_area[order];
1012
1013 list_add(&page->buddy_list, &area->free_list[migratetype]);
1014 area->nr_free++;
1015 }
1016
1017
1018 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
1019 unsigned int order, int migratetype)
1020 {
1021 struct free_area *area = &zone->free_area[order];
1022
1023 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
1024 area->nr_free++;
1025 }
1026
1027
1028
1029
1030
1031
1032 static inline void move_to_free_list(struct page *page, struct zone *zone,
1033 unsigned int order, int migratetype)
1034 {
1035 struct free_area *area = &zone->free_area[order];
1036
1037 list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
1038 }
1039
1040 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
1041 unsigned int order)
1042 {
1043
1044 if (page_reported(page))
1045 __ClearPageReported(page);
1046
1047 list_del(&page->buddy_list);
1048 __ClearPageBuddy(page);
1049 set_page_private(page, 0);
1050 zone->free_area[order].nr_free--;
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 static inline bool
1062 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1063 struct page *page, unsigned int order)
1064 {
1065 unsigned long higher_page_pfn;
1066 struct page *higher_page;
1067
1068 if (order >= MAX_ORDER - 2)
1069 return false;
1070
1071 higher_page_pfn = buddy_pfn & pfn;
1072 higher_page = page + (higher_page_pfn - pfn);
1073
1074 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
1075 NULL) != NULL;
1076 }
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 static inline void __free_one_page(struct page *page,
1103 unsigned long pfn,
1104 struct zone *zone, unsigned int order,
1105 int migratetype, fpi_t fpi_flags)
1106 {
1107 struct capture_control *capc = task_capc(zone);
1108 unsigned long buddy_pfn;
1109 unsigned long combined_pfn;
1110 struct page *buddy;
1111 bool to_tail;
1112
1113 VM_BUG_ON(!zone_is_initialized(zone));
1114 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1115
1116 VM_BUG_ON(migratetype == -1);
1117 if (likely(!is_migrate_isolate(migratetype)))
1118 __mod_zone_freepage_state(zone, 1 << order, migratetype);
1119
1120 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1121 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1122
1123 while (order < MAX_ORDER - 1) {
1124 if (compaction_capture(capc, page, order, migratetype)) {
1125 __mod_zone_freepage_state(zone, -(1 << order),
1126 migratetype);
1127 return;
1128 }
1129
1130 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
1131 if (!buddy)
1132 goto done_merging;
1133
1134 if (unlikely(order >= pageblock_order)) {
1135
1136
1137
1138
1139
1140
1141 int buddy_mt = get_pageblock_migratetype(buddy);
1142
1143 if (migratetype != buddy_mt
1144 && (!migratetype_is_mergeable(migratetype) ||
1145 !migratetype_is_mergeable(buddy_mt)))
1146 goto done_merging;
1147 }
1148
1149
1150
1151
1152
1153 if (page_is_guard(buddy))
1154 clear_page_guard(zone, buddy, order, migratetype);
1155 else
1156 del_page_from_free_list(buddy, zone, order);
1157 combined_pfn = buddy_pfn & pfn;
1158 page = page + (combined_pfn - pfn);
1159 pfn = combined_pfn;
1160 order++;
1161 }
1162
1163 done_merging:
1164 set_buddy_order(page, order);
1165
1166 if (fpi_flags & FPI_TO_TAIL)
1167 to_tail = true;
1168 else if (is_shuffle_order(order))
1169 to_tail = shuffle_pick_tail();
1170 else
1171 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1172
1173 if (to_tail)
1174 add_to_free_list_tail(page, zone, order, migratetype);
1175 else
1176 add_to_free_list(page, zone, order, migratetype);
1177
1178
1179 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1180 page_reporting_notify_free(order);
1181 }
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 int split_free_page(struct page *free_page,
1197 unsigned int order, unsigned long split_pfn_offset)
1198 {
1199 struct zone *zone = page_zone(free_page);
1200 unsigned long free_page_pfn = page_to_pfn(free_page);
1201 unsigned long pfn;
1202 unsigned long flags;
1203 int free_page_order;
1204 int mt;
1205 int ret = 0;
1206
1207 if (split_pfn_offset == 0)
1208 return ret;
1209
1210 spin_lock_irqsave(&zone->lock, flags);
1211
1212 if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
1213 ret = -ENOENT;
1214 goto out;
1215 }
1216
1217 mt = get_pageblock_migratetype(free_page);
1218 if (likely(!is_migrate_isolate(mt)))
1219 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1220
1221 del_page_from_free_list(free_page, zone, order);
1222 for (pfn = free_page_pfn;
1223 pfn < free_page_pfn + (1UL << order);) {
1224 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
1225
1226 free_page_order = min_t(unsigned int,
1227 pfn ? __ffs(pfn) : order,
1228 __fls(split_pfn_offset));
1229 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
1230 mt, FPI_NONE);
1231 pfn += 1UL << free_page_order;
1232 split_pfn_offset -= (1UL << free_page_order);
1233
1234 if (split_pfn_offset == 0)
1235 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
1236 }
1237 out:
1238 spin_unlock_irqrestore(&zone->lock, flags);
1239 return ret;
1240 }
1241
1242
1243
1244
1245
1246 static inline bool page_expected_state(struct page *page,
1247 unsigned long check_flags)
1248 {
1249 if (unlikely(atomic_read(&page->_mapcount) != -1))
1250 return false;
1251
1252 if (unlikely((unsigned long)page->mapping |
1253 page_ref_count(page) |
1254 #ifdef CONFIG_MEMCG
1255 page->memcg_data |
1256 #endif
1257 (page->flags & check_flags)))
1258 return false;
1259
1260 return true;
1261 }
1262
1263 static const char *page_bad_reason(struct page *page, unsigned long flags)
1264 {
1265 const char *bad_reason = NULL;
1266
1267 if (unlikely(atomic_read(&page->_mapcount) != -1))
1268 bad_reason = "nonzero mapcount";
1269 if (unlikely(page->mapping != NULL))
1270 bad_reason = "non-NULL mapping";
1271 if (unlikely(page_ref_count(page) != 0))
1272 bad_reason = "nonzero _refcount";
1273 if (unlikely(page->flags & flags)) {
1274 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1275 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1276 else
1277 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1278 }
1279 #ifdef CONFIG_MEMCG
1280 if (unlikely(page->memcg_data))
1281 bad_reason = "page still charged to cgroup";
1282 #endif
1283 return bad_reason;
1284 }
1285
1286 static void check_free_page_bad(struct page *page)
1287 {
1288 bad_page(page,
1289 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1290 }
1291
1292 static inline int check_free_page(struct page *page)
1293 {
1294 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1295 return 0;
1296
1297
1298 check_free_page_bad(page);
1299 return 1;
1300 }
1301
1302 static int free_tail_pages_check(struct page *head_page, struct page *page)
1303 {
1304 int ret = 1;
1305
1306
1307
1308
1309
1310 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1311
1312 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1313 ret = 0;
1314 goto out;
1315 }
1316 switch (page - head_page) {
1317 case 1:
1318
1319 if (unlikely(compound_mapcount(page))) {
1320 bad_page(page, "nonzero compound_mapcount");
1321 goto out;
1322 }
1323 break;
1324 case 2:
1325
1326
1327
1328
1329 break;
1330 default:
1331 if (page->mapping != TAIL_MAPPING) {
1332 bad_page(page, "corrupted mapping in tail page");
1333 goto out;
1334 }
1335 break;
1336 }
1337 if (unlikely(!PageTail(page))) {
1338 bad_page(page, "PageTail not set");
1339 goto out;
1340 }
1341 if (unlikely(compound_head(page) != head_page)) {
1342 bad_page(page, "compound_head not consistent");
1343 goto out;
1344 }
1345 ret = 0;
1346 out:
1347 page->mapping = NULL;
1348 clear_compound_head(page);
1349 return ret;
1350 }
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1374 {
1375 return deferred_pages_enabled() ||
1376 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
1377 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
1378 PageSkipKASanPoison(page);
1379 }
1380
1381 static void kernel_init_pages(struct page *page, int numpages)
1382 {
1383 int i;
1384
1385
1386 kasan_disable_current();
1387 for (i = 0; i < numpages; i++)
1388 clear_highpage_kasan_tagged(page + i);
1389 kasan_enable_current();
1390 }
1391
1392 static __always_inline bool free_pages_prepare(struct page *page,
1393 unsigned int order, bool check_free, fpi_t fpi_flags)
1394 {
1395 int bad = 0;
1396 bool init = want_init_on_free();
1397
1398 VM_BUG_ON_PAGE(PageTail(page), page);
1399
1400 trace_mm_page_free(page, order);
1401
1402 if (unlikely(PageHWPoison(page)) && !order) {
1403
1404
1405
1406
1407 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1408 __memcg_kmem_uncharge_page(page, order);
1409 reset_page_owner(page, order);
1410 page_table_check_free(page, order);
1411 return false;
1412 }
1413
1414
1415
1416
1417
1418 if (unlikely(order)) {
1419 bool compound = PageCompound(page);
1420 int i;
1421
1422 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1423
1424 if (compound) {
1425 ClearPageDoubleMap(page);
1426 ClearPageHasHWPoisoned(page);
1427 }
1428 for (i = 1; i < (1 << order); i++) {
1429 if (compound)
1430 bad += free_tail_pages_check(page, page + i);
1431 if (unlikely(check_free_page(page + i))) {
1432 bad++;
1433 continue;
1434 }
1435 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1436 }
1437 }
1438 if (PageMappingFlags(page))
1439 page->mapping = NULL;
1440 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1441 __memcg_kmem_uncharge_page(page, order);
1442 if (check_free)
1443 bad += check_free_page(page);
1444 if (bad)
1445 return false;
1446
1447 page_cpupid_reset_last(page);
1448 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1449 reset_page_owner(page, order);
1450 page_table_check_free(page, order);
1451
1452 if (!PageHighMem(page)) {
1453 debug_check_no_locks_freed(page_address(page),
1454 PAGE_SIZE << order);
1455 debug_check_no_obj_freed(page_address(page),
1456 PAGE_SIZE << order);
1457 }
1458
1459 kernel_poison_pages(page, 1 << order);
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 if (!should_skip_kasan_poison(page, fpi_flags)) {
1470 kasan_poison_pages(page, order, init);
1471
1472
1473 if (kasan_has_integrated_init())
1474 init = false;
1475 }
1476 if (init)
1477 kernel_init_pages(page, 1 << order);
1478
1479
1480
1481
1482
1483
1484 arch_free_page(page, order);
1485
1486 debug_pagealloc_unmap_pages(page, 1 << order);
1487
1488 return true;
1489 }
1490
1491 #ifdef CONFIG_DEBUG_VM
1492
1493
1494
1495
1496
1497 static bool free_pcp_prepare(struct page *page, unsigned int order)
1498 {
1499 return free_pages_prepare(page, order, true, FPI_NONE);
1500 }
1501
1502 static bool bulkfree_pcp_prepare(struct page *page)
1503 {
1504 if (debug_pagealloc_enabled_static())
1505 return check_free_page(page);
1506 else
1507 return false;
1508 }
1509 #else
1510
1511
1512
1513
1514
1515
1516 static bool free_pcp_prepare(struct page *page, unsigned int order)
1517 {
1518 if (debug_pagealloc_enabled_static())
1519 return free_pages_prepare(page, order, true, FPI_NONE);
1520 else
1521 return free_pages_prepare(page, order, false, FPI_NONE);
1522 }
1523
1524 static bool bulkfree_pcp_prepare(struct page *page)
1525 {
1526 return check_free_page(page);
1527 }
1528 #endif
1529
1530
1531
1532
1533
1534
1535 static void free_pcppages_bulk(struct zone *zone, int count,
1536 struct per_cpu_pages *pcp,
1537 int pindex)
1538 {
1539 int min_pindex = 0;
1540 int max_pindex = NR_PCP_LISTS - 1;
1541 unsigned int order;
1542 bool isolated_pageblocks;
1543 struct page *page;
1544
1545
1546
1547
1548
1549 count = min(pcp->count, count);
1550
1551
1552 pindex = pindex - 1;
1553
1554
1555 spin_lock(&zone->lock);
1556 isolated_pageblocks = has_isolate_pageblock(zone);
1557
1558 while (count > 0) {
1559 struct list_head *list;
1560 int nr_pages;
1561
1562
1563 do {
1564 if (++pindex > max_pindex)
1565 pindex = min_pindex;
1566 list = &pcp->lists[pindex];
1567 if (!list_empty(list))
1568 break;
1569
1570 if (pindex == max_pindex)
1571 max_pindex--;
1572 if (pindex == min_pindex)
1573 min_pindex++;
1574 } while (1);
1575
1576 order = pindex_to_order(pindex);
1577 nr_pages = 1 << order;
1578 BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
1579 do {
1580 int mt;
1581
1582 page = list_last_entry(list, struct page, pcp_list);
1583 mt = get_pcppage_migratetype(page);
1584
1585
1586 list_del(&page->pcp_list);
1587 count -= nr_pages;
1588 pcp->count -= nr_pages;
1589
1590 if (bulkfree_pcp_prepare(page))
1591 continue;
1592
1593
1594 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1595
1596 if (unlikely(isolated_pageblocks))
1597 mt = get_pageblock_migratetype(page);
1598
1599 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1600 trace_mm_page_pcpu_drain(page, order, mt);
1601 } while (count > 0 && !list_empty(list));
1602 }
1603
1604 spin_unlock(&zone->lock);
1605 }
1606
1607 static void free_one_page(struct zone *zone,
1608 struct page *page, unsigned long pfn,
1609 unsigned int order,
1610 int migratetype, fpi_t fpi_flags)
1611 {
1612 unsigned long flags;
1613
1614 spin_lock_irqsave(&zone->lock, flags);
1615 if (unlikely(has_isolate_pageblock(zone) ||
1616 is_migrate_isolate(migratetype))) {
1617 migratetype = get_pfnblock_migratetype(page, pfn);
1618 }
1619 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1620 spin_unlock_irqrestore(&zone->lock, flags);
1621 }
1622
1623 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1624 unsigned long zone, int nid)
1625 {
1626 mm_zero_struct_page(page);
1627 set_page_links(page, zone, nid, pfn);
1628 init_page_count(page);
1629 page_mapcount_reset(page);
1630 page_cpupid_reset_last(page);
1631 page_kasan_tag_reset(page);
1632
1633 INIT_LIST_HEAD(&page->lru);
1634 #ifdef WANT_PAGE_VIRTUAL
1635
1636 if (!is_highmem_idx(zone))
1637 set_page_address(page, __va(pfn << PAGE_SHIFT));
1638 #endif
1639 }
1640
1641 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1642 static void __meminit init_reserved_page(unsigned long pfn)
1643 {
1644 pg_data_t *pgdat;
1645 int nid, zid;
1646
1647 if (!early_page_uninitialised(pfn))
1648 return;
1649
1650 nid = early_pfn_to_nid(pfn);
1651 pgdat = NODE_DATA(nid);
1652
1653 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1654 struct zone *zone = &pgdat->node_zones[zid];
1655
1656 if (zone_spans_pfn(zone, pfn))
1657 break;
1658 }
1659 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1660 }
1661 #else
1662 static inline void init_reserved_page(unsigned long pfn)
1663 {
1664 }
1665 #endif
1666
1667
1668
1669
1670
1671
1672
1673 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1674 {
1675 unsigned long start_pfn = PFN_DOWN(start);
1676 unsigned long end_pfn = PFN_UP(end);
1677
1678 for (; start_pfn < end_pfn; start_pfn++) {
1679 if (pfn_valid(start_pfn)) {
1680 struct page *page = pfn_to_page(start_pfn);
1681
1682 init_reserved_page(start_pfn);
1683
1684
1685 INIT_LIST_HEAD(&page->lru);
1686
1687
1688
1689
1690
1691
1692 __SetPageReserved(page);
1693 }
1694 }
1695 }
1696
1697 static void __free_pages_ok(struct page *page, unsigned int order,
1698 fpi_t fpi_flags)
1699 {
1700 unsigned long flags;
1701 int migratetype;
1702 unsigned long pfn = page_to_pfn(page);
1703 struct zone *zone = page_zone(page);
1704
1705 if (!free_pages_prepare(page, order, true, fpi_flags))
1706 return;
1707
1708 migratetype = get_pfnblock_migratetype(page, pfn);
1709
1710 spin_lock_irqsave(&zone->lock, flags);
1711 if (unlikely(has_isolate_pageblock(zone) ||
1712 is_migrate_isolate(migratetype))) {
1713 migratetype = get_pfnblock_migratetype(page, pfn);
1714 }
1715 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1716 spin_unlock_irqrestore(&zone->lock, flags);
1717
1718 __count_vm_events(PGFREE, 1 << order);
1719 }
1720
1721 void __free_pages_core(struct page *page, unsigned int order)
1722 {
1723 unsigned int nr_pages = 1 << order;
1724 struct page *p = page;
1725 unsigned int loop;
1726
1727
1728
1729
1730
1731
1732 prefetchw(p);
1733 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1734 prefetchw(p + 1);
1735 __ClearPageReserved(p);
1736 set_page_count(p, 0);
1737 }
1738 __ClearPageReserved(p);
1739 set_page_count(p, 0);
1740
1741 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1742
1743
1744
1745
1746
1747 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1748 }
1749
1750 #ifdef CONFIG_NUMA
1751
1752
1753
1754
1755
1756
1757 struct mminit_pfnnid_cache {
1758 unsigned long last_start;
1759 unsigned long last_end;
1760 int last_nid;
1761 };
1762
1763 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1764
1765
1766
1767
1768 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1769 struct mminit_pfnnid_cache *state)
1770 {
1771 unsigned long start_pfn, end_pfn;
1772 int nid;
1773
1774 if (state->last_start <= pfn && pfn < state->last_end)
1775 return state->last_nid;
1776
1777 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1778 if (nid != NUMA_NO_NODE) {
1779 state->last_start = start_pfn;
1780 state->last_end = end_pfn;
1781 state->last_nid = nid;
1782 }
1783
1784 return nid;
1785 }
1786
1787 int __meminit early_pfn_to_nid(unsigned long pfn)
1788 {
1789 static DEFINE_SPINLOCK(early_pfn_lock);
1790 int nid;
1791
1792 spin_lock(&early_pfn_lock);
1793 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1794 if (nid < 0)
1795 nid = first_online_node;
1796 spin_unlock(&early_pfn_lock);
1797
1798 return nid;
1799 }
1800 #endif
1801
1802 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1803 unsigned int order)
1804 {
1805 if (early_page_uninitialised(pfn))
1806 return;
1807 __free_pages_core(page, order);
1808 }
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1826 unsigned long end_pfn, struct zone *zone)
1827 {
1828 struct page *start_page;
1829 struct page *end_page;
1830
1831
1832 end_pfn--;
1833
1834 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1835 return NULL;
1836
1837 start_page = pfn_to_online_page(start_pfn);
1838 if (!start_page)
1839 return NULL;
1840
1841 if (page_zone(start_page) != zone)
1842 return NULL;
1843
1844 end_page = pfn_to_page(end_pfn);
1845
1846
1847 if (page_zone_id(start_page) != page_zone_id(end_page))
1848 return NULL;
1849
1850 return start_page;
1851 }
1852
1853 void set_zone_contiguous(struct zone *zone)
1854 {
1855 unsigned long block_start_pfn = zone->zone_start_pfn;
1856 unsigned long block_end_pfn;
1857
1858 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1859 for (; block_start_pfn < zone_end_pfn(zone);
1860 block_start_pfn = block_end_pfn,
1861 block_end_pfn += pageblock_nr_pages) {
1862
1863 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1864
1865 if (!__pageblock_pfn_to_page(block_start_pfn,
1866 block_end_pfn, zone))
1867 return;
1868 cond_resched();
1869 }
1870
1871
1872 zone->contiguous = true;
1873 }
1874
1875 void clear_zone_contiguous(struct zone *zone)
1876 {
1877 zone->contiguous = false;
1878 }
1879
1880 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1881 static void __init deferred_free_range(unsigned long pfn,
1882 unsigned long nr_pages)
1883 {
1884 struct page *page;
1885 unsigned long i;
1886
1887 if (!nr_pages)
1888 return;
1889
1890 page = pfn_to_page(pfn);
1891
1892
1893 if (nr_pages == pageblock_nr_pages &&
1894 (pfn & (pageblock_nr_pages - 1)) == 0) {
1895 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1896 __free_pages_core(page, pageblock_order);
1897 return;
1898 }
1899
1900 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1901 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1902 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1903 __free_pages_core(page, 0);
1904 }
1905 }
1906
1907
1908 static atomic_t pgdat_init_n_undone __initdata;
1909 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1910
1911 static inline void __init pgdat_init_report_one_done(void)
1912 {
1913 if (atomic_dec_and_test(&pgdat_init_n_undone))
1914 complete(&pgdat_init_all_done_comp);
1915 }
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1928 {
1929 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1930 return false;
1931 return true;
1932 }
1933
1934
1935
1936
1937
1938 static void __init deferred_free_pages(unsigned long pfn,
1939 unsigned long end_pfn)
1940 {
1941 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1942 unsigned long nr_free = 0;
1943
1944 for (; pfn < end_pfn; pfn++) {
1945 if (!deferred_pfn_valid(pfn)) {
1946 deferred_free_range(pfn - nr_free, nr_free);
1947 nr_free = 0;
1948 } else if (!(pfn & nr_pgmask)) {
1949 deferred_free_range(pfn - nr_free, nr_free);
1950 nr_free = 1;
1951 } else {
1952 nr_free++;
1953 }
1954 }
1955
1956 deferred_free_range(pfn - nr_free, nr_free);
1957 }
1958
1959
1960
1961
1962
1963
1964 static unsigned long __init deferred_init_pages(struct zone *zone,
1965 unsigned long pfn,
1966 unsigned long end_pfn)
1967 {
1968 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1969 int nid = zone_to_nid(zone);
1970 unsigned long nr_pages = 0;
1971 int zid = zone_idx(zone);
1972 struct page *page = NULL;
1973
1974 for (; pfn < end_pfn; pfn++) {
1975 if (!deferred_pfn_valid(pfn)) {
1976 page = NULL;
1977 continue;
1978 } else if (!page || !(pfn & nr_pgmask)) {
1979 page = pfn_to_page(pfn);
1980 } else {
1981 page++;
1982 }
1983 __init_single_page(page, pfn, zid, nid);
1984 nr_pages++;
1985 }
1986 return (nr_pages);
1987 }
1988
1989
1990
1991
1992
1993
1994
1995 static bool __init
1996 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1997 unsigned long *spfn, unsigned long *epfn,
1998 unsigned long first_init_pfn)
1999 {
2000 u64 j;
2001
2002
2003
2004
2005
2006
2007 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2008 if (*epfn <= first_init_pfn)
2009 continue;
2010 if (*spfn < first_init_pfn)
2011 *spfn = first_init_pfn;
2012 *i = j;
2013 return true;
2014 }
2015
2016 return false;
2017 }
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 static unsigned long __init
2030 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2031 unsigned long *end_pfn)
2032 {
2033 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2034 unsigned long spfn = *start_pfn, epfn = *end_pfn;
2035 unsigned long nr_pages = 0;
2036 u64 j = *i;
2037
2038
2039 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2040 unsigned long t;
2041
2042 if (mo_pfn <= *start_pfn)
2043 break;
2044
2045 t = min(mo_pfn, *end_pfn);
2046 nr_pages += deferred_init_pages(zone, *start_pfn, t);
2047
2048 if (mo_pfn < *end_pfn) {
2049 *start_pfn = mo_pfn;
2050 break;
2051 }
2052 }
2053
2054
2055 swap(j, *i);
2056
2057 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2058 unsigned long t;
2059
2060 if (mo_pfn <= spfn)
2061 break;
2062
2063 t = min(mo_pfn, epfn);
2064 deferred_free_pages(spfn, t);
2065
2066 if (mo_pfn <= epfn)
2067 break;
2068 }
2069
2070 return nr_pages;
2071 }
2072
2073 static void __init
2074 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2075 void *arg)
2076 {
2077 unsigned long spfn, epfn;
2078 struct zone *zone = arg;
2079 u64 i;
2080
2081 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2082
2083
2084
2085
2086
2087 while (spfn < end_pfn) {
2088 deferred_init_maxorder(&i, zone, &spfn, &epfn);
2089 cond_resched();
2090 }
2091 }
2092
2093
2094 __weak int __init
2095 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2096 {
2097 return 1;
2098 }
2099
2100
2101 static int __init deferred_init_memmap(void *data)
2102 {
2103 pg_data_t *pgdat = data;
2104 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2105 unsigned long spfn = 0, epfn = 0;
2106 unsigned long first_init_pfn, flags;
2107 unsigned long start = jiffies;
2108 struct zone *zone;
2109 int zid, max_threads;
2110 u64 i;
2111
2112
2113 if (!cpumask_empty(cpumask))
2114 set_cpus_allowed_ptr(current, cpumask);
2115
2116 pgdat_resize_lock(pgdat, &flags);
2117 first_init_pfn = pgdat->first_deferred_pfn;
2118 if (first_init_pfn == ULONG_MAX) {
2119 pgdat_resize_unlock(pgdat, &flags);
2120 pgdat_init_report_one_done();
2121 return 0;
2122 }
2123
2124
2125 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2126 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2127 pgdat->first_deferred_pfn = ULONG_MAX;
2128
2129
2130
2131
2132
2133
2134 pgdat_resize_unlock(pgdat, &flags);
2135
2136
2137 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2138 zone = pgdat->node_zones + zid;
2139 if (first_init_pfn < zone_end_pfn(zone))
2140 break;
2141 }
2142
2143
2144 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2145 first_init_pfn))
2146 goto zone_empty;
2147
2148 max_threads = deferred_page_init_max_threads(cpumask);
2149
2150 while (spfn < epfn) {
2151 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2152 struct padata_mt_job job = {
2153 .thread_fn = deferred_init_memmap_chunk,
2154 .fn_arg = zone,
2155 .start = spfn,
2156 .size = epfn_align - spfn,
2157 .align = PAGES_PER_SECTION,
2158 .min_chunk = PAGES_PER_SECTION,
2159 .max_threads = max_threads,
2160 };
2161
2162 padata_do_multithreaded(&job);
2163 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2164 epfn_align);
2165 }
2166 zone_empty:
2167
2168 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2169
2170 pr_info("node %d deferred pages initialised in %ums\n",
2171 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2172
2173 pgdat_init_report_one_done();
2174 return 0;
2175 }
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 static noinline bool __init
2193 deferred_grow_zone(struct zone *zone, unsigned int order)
2194 {
2195 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2196 pg_data_t *pgdat = zone->zone_pgdat;
2197 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2198 unsigned long spfn, epfn, flags;
2199 unsigned long nr_pages = 0;
2200 u64 i;
2201
2202
2203 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2204 return false;
2205
2206 pgdat_resize_lock(pgdat, &flags);
2207
2208
2209
2210
2211
2212 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2213 pgdat_resize_unlock(pgdat, &flags);
2214 return true;
2215 }
2216
2217
2218 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2219 first_deferred_pfn)) {
2220 pgdat->first_deferred_pfn = ULONG_MAX;
2221 pgdat_resize_unlock(pgdat, &flags);
2222
2223 return first_deferred_pfn != ULONG_MAX;
2224 }
2225
2226
2227
2228
2229
2230
2231 while (spfn < epfn) {
2232
2233 first_deferred_pfn = spfn;
2234
2235 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2236 touch_nmi_watchdog();
2237
2238
2239 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2240 continue;
2241
2242
2243 if (nr_pages >= nr_pages_needed)
2244 break;
2245 }
2246
2247 pgdat->first_deferred_pfn = spfn;
2248 pgdat_resize_unlock(pgdat, &flags);
2249
2250 return nr_pages > 0;
2251 }
2252
2253
2254
2255
2256
2257
2258
2259 static bool __ref
2260 _deferred_grow_zone(struct zone *zone, unsigned int order)
2261 {
2262 return deferred_grow_zone(zone, order);
2263 }
2264
2265 #endif
2266
2267 void __init page_alloc_init_late(void)
2268 {
2269 struct zone *zone;
2270 int nid;
2271
2272 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2273
2274
2275 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2276 for_each_node_state(nid, N_MEMORY) {
2277 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2278 }
2279
2280
2281 wait_for_completion(&pgdat_init_all_done_comp);
2282
2283
2284
2285
2286
2287 static_branch_disable(&deferred_pages);
2288
2289
2290 files_maxfiles_init();
2291 #endif
2292
2293 buffer_init();
2294
2295
2296 memblock_discard();
2297
2298 for_each_node_state(nid, N_MEMORY)
2299 shuffle_free_memory(NODE_DATA(nid));
2300
2301 for_each_populated_zone(zone)
2302 set_zone_contiguous(zone);
2303 }
2304
2305 #ifdef CONFIG_CMA
2306
2307 void __init init_cma_reserved_pageblock(struct page *page)
2308 {
2309 unsigned i = pageblock_nr_pages;
2310 struct page *p = page;
2311
2312 do {
2313 __ClearPageReserved(p);
2314 set_page_count(p, 0);
2315 } while (++p, --i);
2316
2317 set_pageblock_migratetype(page, MIGRATE_CMA);
2318 set_page_refcounted(page);
2319 __free_pages(page, pageblock_order);
2320
2321 adjust_managed_page_count(page, pageblock_nr_pages);
2322 page_zone(page)->cma_pages += pageblock_nr_pages;
2323 }
2324 #endif
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 static inline void expand(struct zone *zone, struct page *page,
2341 int low, int high, int migratetype)
2342 {
2343 unsigned long size = 1 << high;
2344
2345 while (high > low) {
2346 high--;
2347 size >>= 1;
2348 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2349
2350
2351
2352
2353
2354
2355
2356 if (set_page_guard(zone, &page[size], high, migratetype))
2357 continue;
2358
2359 add_to_free_list(&page[size], zone, high, migratetype);
2360 set_buddy_order(&page[size], high);
2361 }
2362 }
2363
2364 static void check_new_page_bad(struct page *page)
2365 {
2366 if (unlikely(page->flags & __PG_HWPOISON)) {
2367
2368 page_mapcount_reset(page);
2369 return;
2370 }
2371
2372 bad_page(page,
2373 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2374 }
2375
2376
2377
2378
2379 static inline int check_new_page(struct page *page)
2380 {
2381 if (likely(page_expected_state(page,
2382 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2383 return 0;
2384
2385 check_new_page_bad(page);
2386 return 1;
2387 }
2388
2389 static bool check_new_pages(struct page *page, unsigned int order)
2390 {
2391 int i;
2392 for (i = 0; i < (1 << order); i++) {
2393 struct page *p = page + i;
2394
2395 if (unlikely(check_new_page(p)))
2396 return true;
2397 }
2398
2399 return false;
2400 }
2401
2402 #ifdef CONFIG_DEBUG_VM
2403
2404
2405
2406
2407
2408 static inline bool check_pcp_refill(struct page *page, unsigned int order)
2409 {
2410 if (debug_pagealloc_enabled_static())
2411 return check_new_pages(page, order);
2412 else
2413 return false;
2414 }
2415
2416 static inline bool check_new_pcp(struct page *page, unsigned int order)
2417 {
2418 return check_new_pages(page, order);
2419 }
2420 #else
2421
2422
2423
2424
2425
2426 static inline bool check_pcp_refill(struct page *page, unsigned int order)
2427 {
2428 return check_new_pages(page, order);
2429 }
2430 static inline bool check_new_pcp(struct page *page, unsigned int order)
2431 {
2432 if (debug_pagealloc_enabled_static())
2433 return check_new_pages(page, order);
2434 else
2435 return false;
2436 }
2437 #endif
2438
2439 static inline bool should_skip_kasan_unpoison(gfp_t flags)
2440 {
2441
2442 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
2443 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
2444 return false;
2445
2446
2447 if (!kasan_hw_tags_enabled())
2448 return true;
2449
2450
2451
2452
2453
2454 return flags & __GFP_SKIP_KASAN_UNPOISON;
2455 }
2456
2457 static inline bool should_skip_init(gfp_t flags)
2458 {
2459
2460 if (!kasan_hw_tags_enabled())
2461 return false;
2462
2463
2464 return (flags & __GFP_SKIP_ZERO);
2465 }
2466
2467 inline void post_alloc_hook(struct page *page, unsigned int order,
2468 gfp_t gfp_flags)
2469 {
2470 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
2471 !should_skip_init(gfp_flags);
2472 bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
2473 int i;
2474
2475 set_page_private(page, 0);
2476 set_page_refcounted(page);
2477
2478 arch_alloc_page(page, order);
2479 debug_pagealloc_map_pages(page, 1 << order);
2480
2481
2482
2483
2484
2485
2486 kernel_unpoison_pages(page, 1 << order);
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498 if (init_tags) {
2499
2500 for (i = 0; i != 1 << order; ++i)
2501 tag_clear_highpage(page + i);
2502
2503
2504 init = false;
2505 }
2506 if (!should_skip_kasan_unpoison(gfp_flags)) {
2507
2508 kasan_unpoison_pages(page, order, init);
2509
2510
2511 if (kasan_has_integrated_init())
2512 init = false;
2513 } else {
2514
2515 for (i = 0; i != 1 << order; ++i)
2516 page_kasan_tag_reset(page + i);
2517 }
2518
2519 if (init)
2520 kernel_init_pages(page, 1 << order);
2521
2522 if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
2523 SetPageSkipKASanPoison(page);
2524
2525 set_page_owner(page, order, gfp_flags);
2526 page_table_check_alloc(page, order);
2527 }
2528
2529 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2530 unsigned int alloc_flags)
2531 {
2532 post_alloc_hook(page, order, gfp_flags);
2533
2534 if (order && (gfp_flags & __GFP_COMP))
2535 prep_compound_page(page, order);
2536
2537
2538
2539
2540
2541
2542
2543 if (alloc_flags & ALLOC_NO_WATERMARKS)
2544 set_page_pfmemalloc(page);
2545 else
2546 clear_page_pfmemalloc(page);
2547 }
2548
2549
2550
2551
2552
2553 static __always_inline
2554 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2555 int migratetype)
2556 {
2557 unsigned int current_order;
2558 struct free_area *area;
2559 struct page *page;
2560
2561
2562 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2563 area = &(zone->free_area[current_order]);
2564 page = get_page_from_free_area(area, migratetype);
2565 if (!page)
2566 continue;
2567 del_page_from_free_list(page, zone, current_order);
2568 expand(zone, page, order, current_order, migratetype);
2569 set_pcppage_migratetype(page, migratetype);
2570 trace_mm_page_alloc_zone_locked(page, order, migratetype,
2571 pcp_allowed_order(order) &&
2572 migratetype < MIGRATE_PCPTYPES);
2573 return page;
2574 }
2575
2576 return NULL;
2577 }
2578
2579
2580
2581
2582
2583
2584
2585
2586 static int fallbacks[MIGRATE_TYPES][3] = {
2587 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2588 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2589 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2590 };
2591
2592 #ifdef CONFIG_CMA
2593 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2594 unsigned int order)
2595 {
2596 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2597 }
2598 #else
2599 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2600 unsigned int order) { return NULL; }
2601 #endif
2602
2603
2604
2605
2606
2607
2608 static int move_freepages(struct zone *zone,
2609 unsigned long start_pfn, unsigned long end_pfn,
2610 int migratetype, int *num_movable)
2611 {
2612 struct page *page;
2613 unsigned long pfn;
2614 unsigned int order;
2615 int pages_moved = 0;
2616
2617 for (pfn = start_pfn; pfn <= end_pfn;) {
2618 page = pfn_to_page(pfn);
2619 if (!PageBuddy(page)) {
2620
2621
2622
2623
2624
2625 if (num_movable &&
2626 (PageLRU(page) || __PageMovable(page)))
2627 (*num_movable)++;
2628 pfn++;
2629 continue;
2630 }
2631
2632
2633 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2634 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2635
2636 order = buddy_order(page);
2637 move_to_free_list(page, zone, order, migratetype);
2638 pfn += 1 << order;
2639 pages_moved += 1 << order;
2640 }
2641
2642 return pages_moved;
2643 }
2644
2645 int move_freepages_block(struct zone *zone, struct page *page,
2646 int migratetype, int *num_movable)
2647 {
2648 unsigned long start_pfn, end_pfn, pfn;
2649
2650 if (num_movable)
2651 *num_movable = 0;
2652
2653 pfn = page_to_pfn(page);
2654 start_pfn = pfn & ~(pageblock_nr_pages - 1);
2655 end_pfn = start_pfn + pageblock_nr_pages - 1;
2656
2657
2658 if (!zone_spans_pfn(zone, start_pfn))
2659 start_pfn = pfn;
2660 if (!zone_spans_pfn(zone, end_pfn))
2661 return 0;
2662
2663 return move_freepages(zone, start_pfn, end_pfn, migratetype,
2664 num_movable);
2665 }
2666
2667 static void change_pageblock_range(struct page *pageblock_page,
2668 int start_order, int migratetype)
2669 {
2670 int nr_pageblocks = 1 << (start_order - pageblock_order);
2671
2672 while (nr_pageblocks--) {
2673 set_pageblock_migratetype(pageblock_page, migratetype);
2674 pageblock_page += pageblock_nr_pages;
2675 }
2676 }
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690 static bool can_steal_fallback(unsigned int order, int start_mt)
2691 {
2692
2693
2694
2695
2696
2697
2698
2699 if (order >= pageblock_order)
2700 return true;
2701
2702 if (order >= pageblock_order / 2 ||
2703 start_mt == MIGRATE_RECLAIMABLE ||
2704 start_mt == MIGRATE_UNMOVABLE ||
2705 page_group_by_mobility_disabled)
2706 return true;
2707
2708 return false;
2709 }
2710
2711 static inline bool boost_watermark(struct zone *zone)
2712 {
2713 unsigned long max_boost;
2714
2715 if (!watermark_boost_factor)
2716 return false;
2717
2718
2719
2720
2721
2722
2723 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2724 return false;
2725
2726 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2727 watermark_boost_factor, 10000);
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737 if (!max_boost)
2738 return false;
2739
2740 max_boost = max(pageblock_nr_pages, max_boost);
2741
2742 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2743 max_boost);
2744
2745 return true;
2746 }
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2757 unsigned int alloc_flags, int start_type, bool whole_block)
2758 {
2759 unsigned int current_order = buddy_order(page);
2760 int free_pages, movable_pages, alike_pages;
2761 int old_block_type;
2762
2763 old_block_type = get_pageblock_migratetype(page);
2764
2765
2766
2767
2768
2769 if (is_migrate_highatomic(old_block_type))
2770 goto single_page;
2771
2772
2773 if (current_order >= pageblock_order) {
2774 change_pageblock_range(page, current_order, start_type);
2775 goto single_page;
2776 }
2777
2778
2779
2780
2781
2782
2783 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2784 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2785
2786
2787 if (!whole_block)
2788 goto single_page;
2789
2790 free_pages = move_freepages_block(zone, page, start_type,
2791 &movable_pages);
2792
2793
2794
2795
2796
2797 if (start_type == MIGRATE_MOVABLE) {
2798 alike_pages = movable_pages;
2799 } else {
2800
2801
2802
2803
2804
2805
2806
2807 if (old_block_type == MIGRATE_MOVABLE)
2808 alike_pages = pageblock_nr_pages
2809 - (free_pages + movable_pages);
2810 else
2811 alike_pages = 0;
2812 }
2813
2814
2815 if (!free_pages)
2816 goto single_page;
2817
2818
2819
2820
2821
2822 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2823 page_group_by_mobility_disabled)
2824 set_pageblock_migratetype(page, start_type);
2825
2826 return;
2827
2828 single_page:
2829 move_to_free_list(page, zone, current_order, start_type);
2830 }
2831
2832
2833
2834
2835
2836
2837
2838 int find_suitable_fallback(struct free_area *area, unsigned int order,
2839 int migratetype, bool only_stealable, bool *can_steal)
2840 {
2841 int i;
2842 int fallback_mt;
2843
2844 if (area->nr_free == 0)
2845 return -1;
2846
2847 *can_steal = false;
2848 for (i = 0;; i++) {
2849 fallback_mt = fallbacks[migratetype][i];
2850 if (fallback_mt == MIGRATE_TYPES)
2851 break;
2852
2853 if (free_area_empty(area, fallback_mt))
2854 continue;
2855
2856 if (can_steal_fallback(order, migratetype))
2857 *can_steal = true;
2858
2859 if (!only_stealable)
2860 return fallback_mt;
2861
2862 if (*can_steal)
2863 return fallback_mt;
2864 }
2865
2866 return -1;
2867 }
2868
2869
2870
2871
2872
2873 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2874 unsigned int alloc_order)
2875 {
2876 int mt;
2877 unsigned long max_managed, flags;
2878
2879
2880
2881
2882
2883 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2884 if (zone->nr_reserved_highatomic >= max_managed)
2885 return;
2886
2887 spin_lock_irqsave(&zone->lock, flags);
2888
2889
2890 if (zone->nr_reserved_highatomic >= max_managed)
2891 goto out_unlock;
2892
2893
2894 mt = get_pageblock_migratetype(page);
2895
2896 if (migratetype_is_mergeable(mt)) {
2897 zone->nr_reserved_highatomic += pageblock_nr_pages;
2898 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2899 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2900 }
2901
2902 out_unlock:
2903 spin_unlock_irqrestore(&zone->lock, flags);
2904 }
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2916 bool force)
2917 {
2918 struct zonelist *zonelist = ac->zonelist;
2919 unsigned long flags;
2920 struct zoneref *z;
2921 struct zone *zone;
2922 struct page *page;
2923 int order;
2924 bool ret;
2925
2926 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2927 ac->nodemask) {
2928
2929
2930
2931
2932 if (!force && zone->nr_reserved_highatomic <=
2933 pageblock_nr_pages)
2934 continue;
2935
2936 spin_lock_irqsave(&zone->lock, flags);
2937 for (order = 0; order < MAX_ORDER; order++) {
2938 struct free_area *area = &(zone->free_area[order]);
2939
2940 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2941 if (!page)
2942 continue;
2943
2944
2945
2946
2947
2948
2949
2950
2951 if (is_migrate_highatomic_page(page)) {
2952
2953
2954
2955
2956
2957
2958
2959 zone->nr_reserved_highatomic -= min(
2960 pageblock_nr_pages,
2961 zone->nr_reserved_highatomic);
2962 }
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973 set_pageblock_migratetype(page, ac->migratetype);
2974 ret = move_freepages_block(zone, page, ac->migratetype,
2975 NULL);
2976 if (ret) {
2977 spin_unlock_irqrestore(&zone->lock, flags);
2978 return ret;
2979 }
2980 }
2981 spin_unlock_irqrestore(&zone->lock, flags);
2982 }
2983
2984 return false;
2985 }
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997 static __always_inline bool
2998 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2999 unsigned int alloc_flags)
3000 {
3001 struct free_area *area;
3002 int current_order;
3003 int min_order = order;
3004 struct page *page;
3005 int fallback_mt;
3006 bool can_steal;
3007
3008
3009
3010
3011
3012
3013 if (alloc_flags & ALLOC_NOFRAGMENT)
3014 min_order = pageblock_order;
3015
3016
3017
3018
3019
3020
3021 for (current_order = MAX_ORDER - 1; current_order >= min_order;
3022 --current_order) {
3023 area = &(zone->free_area[current_order]);
3024 fallback_mt = find_suitable_fallback(area, current_order,
3025 start_migratetype, false, &can_steal);
3026 if (fallback_mt == -1)
3027 continue;
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
3038 && current_order > order)
3039 goto find_smallest;
3040
3041 goto do_steal;
3042 }
3043
3044 return false;
3045
3046 find_smallest:
3047 for (current_order = order; current_order < MAX_ORDER;
3048 current_order++) {
3049 area = &(zone->free_area[current_order]);
3050 fallback_mt = find_suitable_fallback(area, current_order,
3051 start_migratetype, false, &can_steal);
3052 if (fallback_mt != -1)
3053 break;
3054 }
3055
3056
3057
3058
3059
3060 VM_BUG_ON(current_order == MAX_ORDER);
3061
3062 do_steal:
3063 page = get_page_from_free_area(area, fallback_mt);
3064
3065 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
3066 can_steal);
3067
3068 trace_mm_page_alloc_extfrag(page, order, current_order,
3069 start_migratetype, fallback_mt);
3070
3071 return true;
3072
3073 }
3074
3075
3076
3077
3078
3079 static __always_inline struct page *
3080 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
3081 unsigned int alloc_flags)
3082 {
3083 struct page *page;
3084
3085 if (IS_ENABLED(CONFIG_CMA)) {
3086
3087
3088
3089
3090
3091 if (alloc_flags & ALLOC_CMA &&
3092 zone_page_state(zone, NR_FREE_CMA_PAGES) >
3093 zone_page_state(zone, NR_FREE_PAGES) / 2) {
3094 page = __rmqueue_cma_fallback(zone, order);
3095 if (page)
3096 return page;
3097 }
3098 }
3099 retry:
3100 page = __rmqueue_smallest(zone, order, migratetype);
3101 if (unlikely(!page)) {
3102 if (alloc_flags & ALLOC_CMA)
3103 page = __rmqueue_cma_fallback(zone, order);
3104
3105 if (!page && __rmqueue_fallback(zone, order, migratetype,
3106 alloc_flags))
3107 goto retry;
3108 }
3109 return page;
3110 }
3111
3112
3113
3114
3115
3116
3117 static int rmqueue_bulk(struct zone *zone, unsigned int order,
3118 unsigned long count, struct list_head *list,
3119 int migratetype, unsigned int alloc_flags)
3120 {
3121 int i, allocated = 0;
3122
3123
3124 spin_lock(&zone->lock);
3125 for (i = 0; i < count; ++i) {
3126 struct page *page = __rmqueue(zone, order, migratetype,
3127 alloc_flags);
3128 if (unlikely(page == NULL))
3129 break;
3130
3131 if (unlikely(check_pcp_refill(page, order)))
3132 continue;
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144 list_add_tail(&page->pcp_list, list);
3145 allocated++;
3146 if (is_migrate_cma(get_pcppage_migratetype(page)))
3147 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3148 -(1 << order));
3149 }
3150
3151
3152
3153
3154
3155
3156
3157 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3158 spin_unlock(&zone->lock);
3159 return allocated;
3160 }
3161
3162 #ifdef CONFIG_NUMA
3163
3164
3165
3166
3167
3168 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3169 {
3170 int to_drain, batch;
3171
3172 batch = READ_ONCE(pcp->batch);
3173 to_drain = min(pcp->count, batch);
3174 if (to_drain > 0) {
3175 unsigned long flags;
3176
3177
3178
3179
3180
3181
3182 spin_lock_irqsave(&pcp->lock, flags);
3183 free_pcppages_bulk(zone, to_drain, pcp, 0);
3184 spin_unlock_irqrestore(&pcp->lock, flags);
3185 }
3186 }
3187 #endif
3188
3189
3190
3191
3192 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3193 {
3194 struct per_cpu_pages *pcp;
3195
3196 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3197 if (pcp->count) {
3198 unsigned long flags;
3199
3200
3201 spin_lock_irqsave(&pcp->lock, flags);
3202 free_pcppages_bulk(zone, pcp->count, pcp, 0);
3203 spin_unlock_irqrestore(&pcp->lock, flags);
3204 }
3205 }
3206
3207
3208
3209
3210 static void drain_pages(unsigned int cpu)
3211 {
3212 struct zone *zone;
3213
3214 for_each_populated_zone(zone) {
3215 drain_pages_zone(cpu, zone);
3216 }
3217 }
3218
3219
3220
3221
3222 void drain_local_pages(struct zone *zone)
3223 {
3224 int cpu = smp_processor_id();
3225
3226 if (zone)
3227 drain_pages_zone(cpu, zone);
3228 else
3229 drain_pages(cpu);
3230 }
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3243 {
3244 int cpu;
3245
3246
3247
3248
3249
3250 static cpumask_t cpus_with_pcps;
3251
3252
3253
3254
3255
3256
3257 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3258 if (!zone)
3259 return;
3260 mutex_lock(&pcpu_drain_mutex);
3261 }
3262
3263
3264
3265
3266
3267
3268
3269 for_each_online_cpu(cpu) {
3270 struct per_cpu_pages *pcp;
3271 struct zone *z;
3272 bool has_pcps = false;
3273
3274 if (force_all_cpus) {
3275
3276
3277
3278
3279 has_pcps = true;
3280 } else if (zone) {
3281 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
3282 if (pcp->count)
3283 has_pcps = true;
3284 } else {
3285 for_each_populated_zone(z) {
3286 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
3287 if (pcp->count) {
3288 has_pcps = true;
3289 break;
3290 }
3291 }
3292 }
3293
3294 if (has_pcps)
3295 cpumask_set_cpu(cpu, &cpus_with_pcps);
3296 else
3297 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3298 }
3299
3300 for_each_cpu(cpu, &cpus_with_pcps) {
3301 if (zone)
3302 drain_pages_zone(cpu, zone);
3303 else
3304 drain_pages(cpu);
3305 }
3306
3307 mutex_unlock(&pcpu_drain_mutex);
3308 }
3309
3310
3311
3312
3313
3314
3315 void drain_all_pages(struct zone *zone)
3316 {
3317 __drain_all_pages(zone, false);
3318 }
3319
3320 #ifdef CONFIG_HIBERNATION
3321
3322
3323
3324
3325 #define WD_PAGE_COUNT (128*1024)
3326
3327 void mark_free_pages(struct zone *zone)
3328 {
3329 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3330 unsigned long flags;
3331 unsigned int order, t;
3332 struct page *page;
3333
3334 if (zone_is_empty(zone))
3335 return;
3336
3337 spin_lock_irqsave(&zone->lock, flags);
3338
3339 max_zone_pfn = zone_end_pfn(zone);
3340 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3341 if (pfn_valid(pfn)) {
3342 page = pfn_to_page(pfn);
3343
3344 if (!--page_count) {
3345 touch_nmi_watchdog();
3346 page_count = WD_PAGE_COUNT;
3347 }
3348
3349 if (page_zone(page) != zone)
3350 continue;
3351
3352 if (!swsusp_page_is_forbidden(page))
3353 swsusp_unset_page_free(page);
3354 }
3355
3356 for_each_migratetype_order(order, t) {
3357 list_for_each_entry(page,
3358 &zone->free_area[order].free_list[t], buddy_list) {
3359 unsigned long i;
3360
3361 pfn = page_to_pfn(page);
3362 for (i = 0; i < (1UL << order); i++) {
3363 if (!--page_count) {
3364 touch_nmi_watchdog();
3365 page_count = WD_PAGE_COUNT;
3366 }
3367 swsusp_set_page_free(pfn_to_page(pfn + i));
3368 }
3369 }
3370 }
3371 spin_unlock_irqrestore(&zone->lock, flags);
3372 }
3373 #endif
3374
3375 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3376 unsigned int order)
3377 {
3378 int migratetype;
3379
3380 if (!free_pcp_prepare(page, order))
3381 return false;
3382
3383 migratetype = get_pfnblock_migratetype(page, pfn);
3384 set_pcppage_migratetype(page, migratetype);
3385 return true;
3386 }
3387
3388 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch,
3389 bool free_high)
3390 {
3391 int min_nr_free, max_nr_free;
3392
3393
3394 if (unlikely(free_high))
3395 return pcp->count;
3396
3397
3398 if (unlikely(high < batch))
3399 return 1;
3400
3401
3402 min_nr_free = batch;
3403 max_nr_free = high - batch;
3404
3405
3406
3407
3408
3409 batch <<= pcp->free_factor;
3410 if (batch < max_nr_free)
3411 pcp->free_factor++;
3412 batch = clamp(batch, min_nr_free, max_nr_free);
3413
3414 return batch;
3415 }
3416
3417 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
3418 bool free_high)
3419 {
3420 int high = READ_ONCE(pcp->high);
3421
3422 if (unlikely(!high || free_high))
3423 return 0;
3424
3425 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3426 return high;
3427
3428
3429
3430
3431
3432 return min(READ_ONCE(pcp->batch) << 2, high);
3433 }
3434
3435 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
3436 struct page *page, int migratetype,
3437 unsigned int order)
3438 {
3439 int high;
3440 int pindex;
3441 bool free_high;
3442
3443 __count_vm_event(PGFREE);
3444 pindex = order_to_pindex(migratetype, order);
3445 list_add(&page->pcp_list, &pcp->lists[pindex]);
3446 pcp->count += 1 << order;
3447
3448
3449
3450
3451
3452
3453
3454 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
3455
3456 high = nr_pcp_high(pcp, zone, free_high);
3457 if (pcp->count >= high) {
3458 int batch = READ_ONCE(pcp->batch);
3459
3460 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex);
3461 }
3462 }
3463
3464
3465
3466
3467 void free_unref_page(struct page *page, unsigned int order)
3468 {
3469 unsigned long flags;
3470 unsigned long __maybe_unused UP_flags;
3471 struct per_cpu_pages *pcp;
3472 struct zone *zone;
3473 unsigned long pfn = page_to_pfn(page);
3474 int migratetype;
3475
3476 if (!free_unref_page_prepare(page, pfn, order))
3477 return;
3478
3479
3480
3481
3482
3483
3484
3485
3486 migratetype = get_pcppage_migratetype(page);
3487 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3488 if (unlikely(is_migrate_isolate(migratetype))) {
3489 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3490 return;
3491 }
3492 migratetype = MIGRATE_MOVABLE;
3493 }
3494
3495 zone = page_zone(page);
3496 pcp_trylock_prepare(UP_flags);
3497 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3498 if (pcp) {
3499 free_unref_page_commit(zone, pcp, page, migratetype, order);
3500 pcp_spin_unlock_irqrestore(pcp, flags);
3501 } else {
3502 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
3503 }
3504 pcp_trylock_finish(UP_flags);
3505 }
3506
3507
3508
3509
3510 void free_unref_page_list(struct list_head *list)
3511 {
3512 struct page *page, *next;
3513 struct per_cpu_pages *pcp = NULL;
3514 struct zone *locked_zone = NULL;
3515 unsigned long flags;
3516 int batch_count = 0;
3517 int migratetype;
3518
3519
3520 list_for_each_entry_safe(page, next, list, lru) {
3521 unsigned long pfn = page_to_pfn(page);
3522 if (!free_unref_page_prepare(page, pfn, 0)) {
3523 list_del(&page->lru);
3524 continue;
3525 }
3526
3527
3528
3529
3530
3531 migratetype = get_pcppage_migratetype(page);
3532 if (unlikely(is_migrate_isolate(migratetype))) {
3533 list_del(&page->lru);
3534 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3535 continue;
3536 }
3537 }
3538
3539 list_for_each_entry_safe(page, next, list, lru) {
3540 struct zone *zone = page_zone(page);
3541
3542
3543 if (zone != locked_zone) {
3544 if (pcp)
3545 pcp_spin_unlock_irqrestore(pcp, flags);
3546
3547 locked_zone = zone;
3548 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
3549 }
3550
3551
3552
3553
3554
3555 migratetype = get_pcppage_migratetype(page);
3556 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3557 migratetype = MIGRATE_MOVABLE;
3558
3559 trace_mm_page_free_batched(page);
3560 free_unref_page_commit(zone, pcp, page, migratetype, 0);
3561
3562
3563
3564
3565
3566 if (++batch_count == SWAP_CLUSTER_MAX) {
3567 pcp_spin_unlock_irqrestore(pcp, flags);
3568 batch_count = 0;
3569 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
3570 }
3571 }
3572
3573 if (pcp)
3574 pcp_spin_unlock_irqrestore(pcp, flags);
3575 }
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585 void split_page(struct page *page, unsigned int order)
3586 {
3587 int i;
3588
3589 VM_BUG_ON_PAGE(PageCompound(page), page);
3590 VM_BUG_ON_PAGE(!page_count(page), page);
3591
3592 for (i = 1; i < (1 << order); i++)
3593 set_page_refcounted(page + i);
3594 split_page_owner(page, 1 << order);
3595 split_page_memcg(page, 1 << order);
3596 }
3597 EXPORT_SYMBOL_GPL(split_page);
3598
3599 int __isolate_free_page(struct page *page, unsigned int order)
3600 {
3601 unsigned long watermark;
3602 struct zone *zone;
3603 int mt;
3604
3605 BUG_ON(!PageBuddy(page));
3606
3607 zone = page_zone(page);
3608 mt = get_pageblock_migratetype(page);
3609
3610 if (!is_migrate_isolate(mt)) {
3611
3612
3613
3614
3615
3616
3617 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3618 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3619 return 0;
3620
3621 __mod_zone_freepage_state(zone, -(1UL << order), mt);
3622 }
3623
3624
3625
3626 del_page_from_free_list(page, zone, order);
3627
3628
3629
3630
3631
3632 if (order >= pageblock_order - 1) {
3633 struct page *endpage = page + (1 << order) - 1;
3634 for (; page < endpage; page += pageblock_nr_pages) {
3635 int mt = get_pageblock_migratetype(page);
3636
3637
3638
3639
3640 if (migratetype_is_mergeable(mt))
3641 set_pageblock_migratetype(page,
3642 MIGRATE_MOVABLE);
3643 }
3644 }
3645
3646
3647 return 1UL << order;
3648 }
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3660 {
3661 struct zone *zone = page_zone(page);
3662
3663
3664 lockdep_assert_held(&zone->lock);
3665
3666
3667 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3668 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3669 }
3670
3671
3672
3673
3674
3675
3676 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3677 long nr_account)
3678 {
3679 #ifdef CONFIG_NUMA
3680 enum numa_stat_item local_stat = NUMA_LOCAL;
3681
3682
3683 if (!static_branch_likely(&vm_numa_stat_key))
3684 return;
3685
3686 if (zone_to_nid(z) != numa_node_id())
3687 local_stat = NUMA_OTHER;
3688
3689 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3690 __count_numa_events(z, NUMA_HIT, nr_account);
3691 else {
3692 __count_numa_events(z, NUMA_MISS, nr_account);
3693 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3694 }
3695 __count_numa_events(z, local_stat, nr_account);
3696 #endif
3697 }
3698
3699 static __always_inline
3700 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3701 unsigned int order, unsigned int alloc_flags,
3702 int migratetype)
3703 {
3704 struct page *page;
3705 unsigned long flags;
3706
3707 do {
3708 page = NULL;
3709 spin_lock_irqsave(&zone->lock, flags);
3710
3711
3712
3713
3714
3715
3716 if (order > 0 && alloc_flags & ALLOC_HARDER)
3717 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3718 if (!page) {
3719 page = __rmqueue(zone, order, migratetype, alloc_flags);
3720 if (!page) {
3721 spin_unlock_irqrestore(&zone->lock, flags);
3722 return NULL;
3723 }
3724 }
3725 __mod_zone_freepage_state(zone, -(1 << order),
3726 get_pcppage_migratetype(page));
3727 spin_unlock_irqrestore(&zone->lock, flags);
3728 } while (check_new_pages(page, order));
3729
3730 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3731 zone_statistics(preferred_zone, zone, 1);
3732
3733 return page;
3734 }
3735
3736
3737 static inline
3738 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3739 int migratetype,
3740 unsigned int alloc_flags,
3741 struct per_cpu_pages *pcp,
3742 struct list_head *list)
3743 {
3744 struct page *page;
3745
3746 do {
3747 if (list_empty(list)) {
3748 int batch = READ_ONCE(pcp->batch);
3749 int alloced;
3750
3751
3752
3753
3754
3755
3756
3757
3758 if (batch > 1)
3759 batch = max(batch >> order, 2);
3760 alloced = rmqueue_bulk(zone, order,
3761 batch, list,
3762 migratetype, alloc_flags);
3763
3764 pcp->count += alloced << order;
3765 if (unlikely(list_empty(list)))
3766 return NULL;
3767 }
3768
3769 page = list_first_entry(list, struct page, pcp_list);
3770 list_del(&page->pcp_list);
3771 pcp->count -= 1 << order;
3772 } while (check_new_pcp(page, order));
3773
3774 return page;
3775 }
3776
3777
3778 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3779 struct zone *zone, unsigned int order,
3780 gfp_t gfp_flags, int migratetype,
3781 unsigned int alloc_flags)
3782 {
3783 struct per_cpu_pages *pcp;
3784 struct list_head *list;
3785 struct page *page;
3786 unsigned long flags;
3787 unsigned long __maybe_unused UP_flags;
3788
3789
3790
3791
3792
3793 pcp_trylock_prepare(UP_flags);
3794 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
3795 if (!pcp) {
3796 pcp_trylock_finish(UP_flags);
3797 return NULL;
3798 }
3799
3800
3801
3802
3803
3804
3805 pcp->free_factor >>= 1;
3806 list = &pcp->lists[order_to_pindex(migratetype, order)];
3807 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3808 pcp_spin_unlock_irqrestore(pcp, flags);
3809 pcp_trylock_finish(UP_flags);
3810 if (page) {
3811 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3812 zone_statistics(preferred_zone, zone, 1);
3813 }
3814 return page;
3815 }
3816
3817
3818
3819
3820 static inline
3821 struct page *rmqueue(struct zone *preferred_zone,
3822 struct zone *zone, unsigned int order,
3823 gfp_t gfp_flags, unsigned int alloc_flags,
3824 int migratetype)
3825 {
3826 struct page *page;
3827
3828
3829
3830
3831
3832 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3833
3834 if (likely(pcp_allowed_order(order))) {
3835
3836
3837
3838
3839 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3840 migratetype != MIGRATE_MOVABLE) {
3841 page = rmqueue_pcplist(preferred_zone, zone, order,
3842 gfp_flags, migratetype, alloc_flags);
3843 if (likely(page))
3844 goto out;
3845 }
3846 }
3847
3848 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3849 migratetype);
3850
3851 out:
3852
3853 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3854 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3855 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3856 }
3857
3858 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3859 return page;
3860 }
3861
3862 #ifdef CONFIG_FAIL_PAGE_ALLOC
3863
3864 static struct {
3865 struct fault_attr attr;
3866
3867 bool ignore_gfp_highmem;
3868 bool ignore_gfp_reclaim;
3869 u32 min_order;
3870 } fail_page_alloc = {
3871 .attr = FAULT_ATTR_INITIALIZER,
3872 .ignore_gfp_reclaim = true,
3873 .ignore_gfp_highmem = true,
3874 .min_order = 1,
3875 };
3876
3877 static int __init setup_fail_page_alloc(char *str)
3878 {
3879 return setup_fault_attr(&fail_page_alloc.attr, str);
3880 }
3881 __setup("fail_page_alloc=", setup_fail_page_alloc);
3882
3883 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3884 {
3885 if (order < fail_page_alloc.min_order)
3886 return false;
3887 if (gfp_mask & __GFP_NOFAIL)
3888 return false;
3889 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3890 return false;
3891 if (fail_page_alloc.ignore_gfp_reclaim &&
3892 (gfp_mask & __GFP_DIRECT_RECLAIM))
3893 return false;
3894
3895 if (gfp_mask & __GFP_NOWARN)
3896 fail_page_alloc.attr.no_warn = true;
3897
3898 return should_fail(&fail_page_alloc.attr, 1 << order);
3899 }
3900
3901 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3902
3903 static int __init fail_page_alloc_debugfs(void)
3904 {
3905 umode_t mode = S_IFREG | 0600;
3906 struct dentry *dir;
3907
3908 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3909 &fail_page_alloc.attr);
3910
3911 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3912 &fail_page_alloc.ignore_gfp_reclaim);
3913 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3914 &fail_page_alloc.ignore_gfp_highmem);
3915 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3916
3917 return 0;
3918 }
3919
3920 late_initcall(fail_page_alloc_debugfs);
3921
3922 #endif
3923
3924 #else
3925
3926 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3927 {
3928 return false;
3929 }
3930
3931 #endif
3932
3933 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3934 {
3935 return __should_fail_alloc_page(gfp_mask, order);
3936 }
3937 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3938
3939 static inline long __zone_watermark_unusable_free(struct zone *z,
3940 unsigned int order, unsigned int alloc_flags)
3941 {
3942 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3943 long unusable_free = (1 << order) - 1;
3944
3945
3946
3947
3948
3949
3950 if (likely(!alloc_harder))
3951 unusable_free += z->nr_reserved_highatomic;
3952
3953 #ifdef CONFIG_CMA
3954
3955 if (!(alloc_flags & ALLOC_CMA))
3956 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3957 #endif
3958
3959 return unusable_free;
3960 }
3961
3962
3963
3964
3965
3966
3967
3968 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3969 int highest_zoneidx, unsigned int alloc_flags,
3970 long free_pages)
3971 {
3972 long min = mark;
3973 int o;
3974 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3975
3976
3977 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3978
3979 if (alloc_flags & ALLOC_HIGH)
3980 min -= min / 2;
3981
3982 if (unlikely(alloc_harder)) {
3983
3984
3985
3986
3987
3988
3989 if (alloc_flags & ALLOC_OOM)
3990 min -= min / 2;
3991 else
3992 min -= min / 4;
3993 }
3994
3995
3996
3997
3998
3999
4000 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
4001 return false;
4002
4003
4004 if (!order)
4005 return true;
4006
4007
4008 for (o = order; o < MAX_ORDER; o++) {
4009 struct free_area *area = &z->free_area[o];
4010 int mt;
4011
4012 if (!area->nr_free)
4013 continue;
4014
4015 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
4016 if (!free_area_empty(area, mt))
4017 return true;
4018 }
4019
4020 #ifdef CONFIG_CMA
4021 if ((alloc_flags & ALLOC_CMA) &&
4022 !free_area_empty(area, MIGRATE_CMA)) {
4023 return true;
4024 }
4025 #endif
4026 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
4027 return true;
4028 }
4029 return false;
4030 }
4031
4032 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
4033 int highest_zoneidx, unsigned int alloc_flags)
4034 {
4035 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4036 zone_page_state(z, NR_FREE_PAGES));
4037 }
4038
4039 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
4040 unsigned long mark, int highest_zoneidx,
4041 unsigned int alloc_flags, gfp_t gfp_mask)
4042 {
4043 long free_pages;
4044
4045 free_pages = zone_page_state(z, NR_FREE_PAGES);
4046
4047
4048
4049
4050
4051 if (!order) {
4052 long usable_free;
4053 long reserved;
4054
4055 usable_free = free_pages;
4056 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
4057
4058
4059 usable_free -= min(usable_free, reserved);
4060 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
4061 return true;
4062 }
4063
4064 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4065 free_pages))
4066 return true;
4067
4068
4069
4070
4071
4072
4073 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
4074 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
4075 mark = z->_watermark[WMARK_MIN];
4076 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
4077 alloc_flags, free_pages);
4078 }
4079
4080 return false;
4081 }
4082
4083 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
4084 unsigned long mark, int highest_zoneidx)
4085 {
4086 long free_pages = zone_page_state(z, NR_FREE_PAGES);
4087
4088 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
4089 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
4090
4091 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
4092 free_pages);
4093 }
4094
4095 #ifdef CONFIG_NUMA
4096 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
4097
4098 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4099 {
4100 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
4101 node_reclaim_distance;
4102 }
4103 #else
4104 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4105 {
4106 return true;
4107 }
4108 #endif
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118 static inline unsigned int
4119 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
4120 {
4121 unsigned int alloc_flags;
4122
4123
4124
4125
4126
4127 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
4128
4129 #ifdef CONFIG_ZONE_DMA32
4130 if (!zone)
4131 return alloc_flags;
4132
4133 if (zone_idx(zone) != ZONE_NORMAL)
4134 return alloc_flags;
4135
4136
4137
4138
4139
4140
4141 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4142 if (nr_online_nodes > 1 && !populated_zone(--zone))
4143 return alloc_flags;
4144
4145 alloc_flags |= ALLOC_NOFRAGMENT;
4146 #endif
4147 return alloc_flags;
4148 }
4149
4150
4151 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4152 unsigned int alloc_flags)
4153 {
4154 #ifdef CONFIG_CMA
4155 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4156 alloc_flags |= ALLOC_CMA;
4157 #endif
4158 return alloc_flags;
4159 }
4160
4161
4162
4163
4164
4165 static struct page *
4166 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4167 const struct alloc_context *ac)
4168 {
4169 struct zoneref *z;
4170 struct zone *zone;
4171 struct pglist_data *last_pgdat = NULL;
4172 bool last_pgdat_dirty_ok = false;
4173 bool no_fallback;
4174
4175 retry:
4176
4177
4178
4179
4180 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4181 z = ac->preferred_zoneref;
4182 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4183 ac->nodemask) {
4184 struct page *page;
4185 unsigned long mark;
4186
4187 if (cpusets_enabled() &&
4188 (alloc_flags & ALLOC_CPUSET) &&
4189 !__cpuset_zone_allowed(zone, gfp_mask))
4190 continue;
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210 if (ac->spread_dirty_pages) {
4211 if (last_pgdat != zone->zone_pgdat) {
4212 last_pgdat = zone->zone_pgdat;
4213 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
4214 }
4215
4216 if (!last_pgdat_dirty_ok)
4217 continue;
4218 }
4219
4220 if (no_fallback && nr_online_nodes > 1 &&
4221 zone != ac->preferred_zoneref->zone) {
4222 int local_nid;
4223
4224
4225
4226
4227
4228
4229 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4230 if (zone_to_nid(zone) != local_nid) {
4231 alloc_flags &= ~ALLOC_NOFRAGMENT;
4232 goto retry;
4233 }
4234 }
4235
4236 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4237 if (!zone_watermark_fast(zone, order, mark,
4238 ac->highest_zoneidx, alloc_flags,
4239 gfp_mask)) {
4240 int ret;
4241
4242 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4243
4244
4245
4246
4247 if (static_branch_unlikely(&deferred_pages)) {
4248 if (_deferred_grow_zone(zone, order))
4249 goto try_this_zone;
4250 }
4251 #endif
4252
4253 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4254 if (alloc_flags & ALLOC_NO_WATERMARKS)
4255 goto try_this_zone;
4256
4257 if (!node_reclaim_enabled() ||
4258 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4259 continue;
4260
4261 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4262 switch (ret) {
4263 case NODE_RECLAIM_NOSCAN:
4264
4265 continue;
4266 case NODE_RECLAIM_FULL:
4267
4268 continue;
4269 default:
4270
4271 if (zone_watermark_ok(zone, order, mark,
4272 ac->highest_zoneidx, alloc_flags))
4273 goto try_this_zone;
4274
4275 continue;
4276 }
4277 }
4278
4279 try_this_zone:
4280 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4281 gfp_mask, alloc_flags, ac->migratetype);
4282 if (page) {
4283 prep_new_page(page, order, gfp_mask, alloc_flags);
4284
4285
4286
4287
4288
4289 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4290 reserve_highatomic_pageblock(page, zone, order);
4291
4292 return page;
4293 } else {
4294 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4295
4296 if (static_branch_unlikely(&deferred_pages)) {
4297 if (_deferred_grow_zone(zone, order))
4298 goto try_this_zone;
4299 }
4300 #endif
4301 }
4302 }
4303
4304
4305
4306
4307
4308 if (no_fallback) {
4309 alloc_flags &= ~ALLOC_NOFRAGMENT;
4310 goto retry;
4311 }
4312
4313 return NULL;
4314 }
4315
4316 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4317 {
4318 unsigned int filter = SHOW_MEM_FILTER_NODES;
4319
4320
4321
4322
4323
4324
4325 if (!(gfp_mask & __GFP_NOMEMALLOC))
4326 if (tsk_is_oom_victim(current) ||
4327 (current->flags & (PF_MEMALLOC | PF_EXITING)))
4328 filter &= ~SHOW_MEM_FILTER_NODES;
4329 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4330 filter &= ~SHOW_MEM_FILTER_NODES;
4331
4332 show_mem(filter, nodemask);
4333 }
4334
4335 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4336 {
4337 struct va_format vaf;
4338 va_list args;
4339 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4340
4341 if ((gfp_mask & __GFP_NOWARN) ||
4342 !__ratelimit(&nopage_rs) ||
4343 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4344 return;
4345
4346 va_start(args, fmt);
4347 vaf.fmt = fmt;
4348 vaf.va = &args;
4349 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4350 current->comm, &vaf, gfp_mask, &gfp_mask,
4351 nodemask_pr_args(nodemask));
4352 va_end(args);
4353
4354 cpuset_print_current_mems_allowed();
4355 pr_cont("\n");
4356 dump_stack();
4357 warn_alloc_show_mem(gfp_mask, nodemask);
4358 }
4359
4360 static inline struct page *
4361 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4362 unsigned int alloc_flags,
4363 const struct alloc_context *ac)
4364 {
4365 struct page *page;
4366
4367 page = get_page_from_freelist(gfp_mask, order,
4368 alloc_flags|ALLOC_CPUSET, ac);
4369
4370
4371
4372
4373 if (!page)
4374 page = get_page_from_freelist(gfp_mask, order,
4375 alloc_flags, ac);
4376
4377 return page;
4378 }
4379
4380 static inline struct page *
4381 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4382 const struct alloc_context *ac, unsigned long *did_some_progress)
4383 {
4384 struct oom_control oc = {
4385 .zonelist = ac->zonelist,
4386 .nodemask = ac->nodemask,
4387 .memcg = NULL,
4388 .gfp_mask = gfp_mask,
4389 .order = order,
4390 };
4391 struct page *page;
4392
4393 *did_some_progress = 0;
4394
4395
4396
4397
4398
4399 if (!mutex_trylock(&oom_lock)) {
4400 *did_some_progress = 1;
4401 schedule_timeout_uninterruptible(1);
4402 return NULL;
4403 }
4404
4405
4406
4407
4408
4409
4410
4411
4412 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4413 ~__GFP_DIRECT_RECLAIM, order,
4414 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4415 if (page)
4416 goto out;
4417
4418
4419 if (current->flags & PF_DUMPCORE)
4420 goto out;
4421
4422 if (order > PAGE_ALLOC_COSTLY_ORDER)
4423 goto out;
4424
4425
4426
4427
4428
4429
4430
4431
4432 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4433 goto out;
4434
4435 if (ac->highest_zoneidx < ZONE_NORMAL)
4436 goto out;
4437 if (pm_suspended_storage())
4438 goto out;
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450 if (out_of_memory(&oc) ||
4451 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
4452 *did_some_progress = 1;
4453
4454
4455
4456
4457
4458 if (gfp_mask & __GFP_NOFAIL)
4459 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4460 ALLOC_NO_WATERMARKS, ac);
4461 }
4462 out:
4463 mutex_unlock(&oom_lock);
4464 return page;
4465 }
4466
4467
4468
4469
4470
4471 #define MAX_COMPACT_RETRIES 16
4472
4473 #ifdef CONFIG_COMPACTION
4474
4475 static struct page *
4476 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4477 unsigned int alloc_flags, const struct alloc_context *ac,
4478 enum compact_priority prio, enum compact_result *compact_result)
4479 {
4480 struct page *page = NULL;
4481 unsigned long pflags;
4482 unsigned int noreclaim_flag;
4483
4484 if (!order)
4485 return NULL;
4486
4487 psi_memstall_enter(&pflags);
4488 delayacct_compact_start();
4489 noreclaim_flag = memalloc_noreclaim_save();
4490
4491 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4492 prio, &page);
4493
4494 memalloc_noreclaim_restore(noreclaim_flag);
4495 psi_memstall_leave(&pflags);
4496 delayacct_compact_end();
4497
4498 if (*compact_result == COMPACT_SKIPPED)
4499 return NULL;
4500
4501
4502
4503
4504 count_vm_event(COMPACTSTALL);
4505
4506
4507 if (page)
4508 prep_new_page(page, order, gfp_mask, alloc_flags);
4509
4510
4511 if (!page)
4512 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4513
4514 if (page) {
4515 struct zone *zone = page_zone(page);
4516
4517 zone->compact_blockskip_flush = false;
4518 compaction_defer_reset(zone, order, true);
4519 count_vm_event(COMPACTSUCCESS);
4520 return page;
4521 }
4522
4523
4524
4525
4526
4527 count_vm_event(COMPACTFAIL);
4528
4529 cond_resched();
4530
4531 return NULL;
4532 }
4533
4534 static inline bool
4535 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4536 enum compact_result compact_result,
4537 enum compact_priority *compact_priority,
4538 int *compaction_retries)
4539 {
4540 int max_retries = MAX_COMPACT_RETRIES;
4541 int min_priority;
4542 bool ret = false;
4543 int retries = *compaction_retries;
4544 enum compact_priority priority = *compact_priority;
4545
4546 if (!order)
4547 return false;
4548
4549 if (fatal_signal_pending(current))
4550 return false;
4551
4552 if (compaction_made_progress(compact_result))
4553 (*compaction_retries)++;
4554
4555
4556
4557
4558
4559
4560 if (compaction_failed(compact_result))
4561 goto check_priority;
4562
4563
4564
4565
4566
4567 if (compaction_needs_reclaim(compact_result)) {
4568 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4569 goto out;
4570 }
4571
4572
4573
4574
4575
4576
4577
4578 if (compaction_withdrawn(compact_result)) {
4579 goto check_priority;
4580 }
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590 if (order > PAGE_ALLOC_COSTLY_ORDER)
4591 max_retries /= 4;
4592 if (*compaction_retries <= max_retries) {
4593 ret = true;
4594 goto out;
4595 }
4596
4597
4598
4599
4600
4601 check_priority:
4602 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4603 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4604
4605 if (*compact_priority > min_priority) {
4606 (*compact_priority)--;
4607 *compaction_retries = 0;
4608 ret = true;
4609 }
4610 out:
4611 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4612 return ret;
4613 }
4614 #else
4615 static inline struct page *
4616 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4617 unsigned int alloc_flags, const struct alloc_context *ac,
4618 enum compact_priority prio, enum compact_result *compact_result)
4619 {
4620 *compact_result = COMPACT_SKIPPED;
4621 return NULL;
4622 }
4623
4624 static inline bool
4625 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4626 enum compact_result compact_result,
4627 enum compact_priority *compact_priority,
4628 int *compaction_retries)
4629 {
4630 struct zone *zone;
4631 struct zoneref *z;
4632
4633 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4634 return false;
4635
4636
4637
4638
4639
4640
4641
4642 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4643 ac->highest_zoneidx, ac->nodemask) {
4644 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4645 ac->highest_zoneidx, alloc_flags))
4646 return true;
4647 }
4648 return false;
4649 }
4650 #endif
4651
4652 #ifdef CONFIG_LOCKDEP
4653 static struct lockdep_map __fs_reclaim_map =
4654 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4655
4656 static bool __need_reclaim(gfp_t gfp_mask)
4657 {
4658
4659 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4660 return false;
4661
4662
4663 if (current->flags & PF_MEMALLOC)
4664 return false;
4665
4666 if (gfp_mask & __GFP_NOLOCKDEP)
4667 return false;
4668
4669 return true;
4670 }
4671
4672 void __fs_reclaim_acquire(unsigned long ip)
4673 {
4674 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4675 }
4676
4677 void __fs_reclaim_release(unsigned long ip)
4678 {
4679 lock_release(&__fs_reclaim_map, ip);
4680 }
4681
4682 void fs_reclaim_acquire(gfp_t gfp_mask)
4683 {
4684 gfp_mask = current_gfp_context(gfp_mask);
4685
4686 if (__need_reclaim(gfp_mask)) {
4687 if (gfp_mask & __GFP_FS)
4688 __fs_reclaim_acquire(_RET_IP_);
4689
4690 #ifdef CONFIG_MMU_NOTIFIER
4691 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4692 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4693 #endif
4694
4695 }
4696 }
4697 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4698
4699 void fs_reclaim_release(gfp_t gfp_mask)
4700 {
4701 gfp_mask = current_gfp_context(gfp_mask);
4702
4703 if (__need_reclaim(gfp_mask)) {
4704 if (gfp_mask & __GFP_FS)
4705 __fs_reclaim_release(_RET_IP_);
4706 }
4707 }
4708 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4709 #endif
4710
4711
4712
4713
4714
4715
4716
4717 static DEFINE_SEQLOCK(zonelist_update_seq);
4718
4719 static unsigned int zonelist_iter_begin(void)
4720 {
4721 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4722 return read_seqbegin(&zonelist_update_seq);
4723
4724 return 0;
4725 }
4726
4727 static unsigned int check_retry_zonelist(unsigned int seq)
4728 {
4729 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4730 return read_seqretry(&zonelist_update_seq, seq);
4731
4732 return seq;
4733 }
4734
4735
4736 static unsigned long
4737 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4738 const struct alloc_context *ac)
4739 {
4740 unsigned int noreclaim_flag;
4741 unsigned long progress;
4742
4743 cond_resched();
4744
4745
4746 cpuset_memory_pressure_bump();
4747 fs_reclaim_acquire(gfp_mask);
4748 noreclaim_flag = memalloc_noreclaim_save();
4749
4750 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4751 ac->nodemask);
4752
4753 memalloc_noreclaim_restore(noreclaim_flag);
4754 fs_reclaim_release(gfp_mask);
4755
4756 cond_resched();
4757
4758 return progress;
4759 }
4760
4761
4762 static inline struct page *
4763 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4764 unsigned int alloc_flags, const struct alloc_context *ac,
4765 unsigned long *did_some_progress)
4766 {
4767 struct page *page = NULL;
4768 unsigned long pflags;
4769 bool drained = false;
4770
4771 psi_memstall_enter(&pflags);
4772 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4773 if (unlikely(!(*did_some_progress)))
4774 goto out;
4775
4776 retry:
4777 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4778
4779
4780
4781
4782
4783
4784 if (!page && !drained) {
4785 unreserve_highatomic_pageblock(ac, false);
4786 drain_all_pages(NULL);
4787 drained = true;
4788 goto retry;
4789 }
4790 out:
4791 psi_memstall_leave(&pflags);
4792
4793 return page;
4794 }
4795
4796 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4797 const struct alloc_context *ac)
4798 {
4799 struct zoneref *z;
4800 struct zone *zone;
4801 pg_data_t *last_pgdat = NULL;
4802 enum zone_type highest_zoneidx = ac->highest_zoneidx;
4803
4804 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4805 ac->nodemask) {
4806 if (!managed_zone(zone))
4807 continue;
4808 if (last_pgdat != zone->zone_pgdat) {
4809 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4810 last_pgdat = zone->zone_pgdat;
4811 }
4812 }
4813 }
4814
4815 static inline unsigned int
4816 gfp_to_alloc_flags(gfp_t gfp_mask)
4817 {
4818 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4819
4820
4821
4822
4823
4824
4825 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4826 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4827
4828
4829
4830
4831
4832
4833
4834 alloc_flags |= (__force int)
4835 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4836
4837 if (gfp_mask & __GFP_ATOMIC) {
4838
4839
4840
4841
4842 if (!(gfp_mask & __GFP_NOMEMALLOC))
4843 alloc_flags |= ALLOC_HARDER;
4844
4845
4846
4847
4848 alloc_flags &= ~ALLOC_CPUSET;
4849 } else if (unlikely(rt_task(current)) && in_task())
4850 alloc_flags |= ALLOC_HARDER;
4851
4852 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4853
4854 return alloc_flags;
4855 }
4856
4857 static bool oom_reserves_allowed(struct task_struct *tsk)
4858 {
4859 if (!tsk_is_oom_victim(tsk))
4860 return false;
4861
4862
4863
4864
4865
4866 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4867 return false;
4868
4869 return true;
4870 }
4871
4872
4873
4874
4875
4876 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4877 {
4878 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4879 return 0;
4880 if (gfp_mask & __GFP_MEMALLOC)
4881 return ALLOC_NO_WATERMARKS;
4882 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4883 return ALLOC_NO_WATERMARKS;
4884 if (!in_interrupt()) {
4885 if (current->flags & PF_MEMALLOC)
4886 return ALLOC_NO_WATERMARKS;
4887 else if (oom_reserves_allowed(current))
4888 return ALLOC_OOM;
4889 }
4890
4891 return 0;
4892 }
4893
4894 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4895 {
4896 return !!__gfp_pfmemalloc_flags(gfp_mask);
4897 }
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909 static inline bool
4910 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4911 struct alloc_context *ac, int alloc_flags,
4912 bool did_some_progress, int *no_progress_loops)
4913 {
4914 struct zone *zone;
4915 struct zoneref *z;
4916 bool ret = false;
4917
4918
4919
4920
4921
4922
4923 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4924 *no_progress_loops = 0;
4925 else
4926 (*no_progress_loops)++;
4927
4928
4929
4930
4931
4932 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4933
4934 return unreserve_highatomic_pageblock(ac, true);
4935 }
4936
4937
4938
4939
4940
4941
4942
4943 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4944 ac->highest_zoneidx, ac->nodemask) {
4945 unsigned long available;
4946 unsigned long reclaimable;
4947 unsigned long min_wmark = min_wmark_pages(zone);
4948 bool wmark;
4949
4950 available = reclaimable = zone_reclaimable_pages(zone);
4951 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4952
4953
4954
4955
4956
4957 wmark = __zone_watermark_ok(zone, order, min_wmark,
4958 ac->highest_zoneidx, alloc_flags, available);
4959 trace_reclaim_retry_zone(z, order, reclaimable,
4960 available, min_wmark, *no_progress_loops, wmark);
4961 if (wmark) {
4962 ret = true;
4963 break;
4964 }
4965 }
4966
4967
4968
4969
4970
4971
4972
4973
4974 if (current->flags & PF_WQ_WORKER)
4975 schedule_timeout_uninterruptible(1);
4976 else
4977 cond_resched();
4978 return ret;
4979 }
4980
4981 static inline bool
4982 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4983 {
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995 if (cpusets_enabled() && ac->nodemask &&
4996 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4997 ac->nodemask = NULL;
4998 return true;
4999 }
5000
5001
5002
5003
5004
5005
5006
5007
5008 if (read_mems_allowed_retry(cpuset_mems_cookie))
5009 return true;
5010
5011 return false;
5012 }
5013
5014 static inline struct page *
5015 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
5016 struct alloc_context *ac)
5017 {
5018 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
5019 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
5020 struct page *page = NULL;
5021 unsigned int alloc_flags;
5022 unsigned long did_some_progress;
5023 enum compact_priority compact_priority;
5024 enum compact_result compact_result;
5025 int compaction_retries;
5026 int no_progress_loops;
5027 unsigned int cpuset_mems_cookie;
5028 unsigned int zonelist_iter_cookie;
5029 int reserve_flags;
5030
5031
5032
5033
5034
5035 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
5036 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
5037 gfp_mask &= ~__GFP_ATOMIC;
5038
5039 restart:
5040 compaction_retries = 0;
5041 no_progress_loops = 0;
5042 compact_priority = DEF_COMPACT_PRIORITY;
5043 cpuset_mems_cookie = read_mems_allowed_begin();
5044 zonelist_iter_cookie = zonelist_iter_begin();
5045
5046
5047
5048
5049
5050
5051 alloc_flags = gfp_to_alloc_flags(gfp_mask);
5052
5053
5054
5055
5056
5057
5058
5059 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5060 ac->highest_zoneidx, ac->nodemask);
5061 if (!ac->preferred_zoneref->zone)
5062 goto nopage;
5063
5064
5065
5066
5067
5068
5069 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
5070 struct zoneref *z = first_zones_zonelist(ac->zonelist,
5071 ac->highest_zoneidx,
5072 &cpuset_current_mems_allowed);
5073 if (!z->zone)
5074 goto nopage;
5075 }
5076
5077 if (alloc_flags & ALLOC_KSWAPD)
5078 wake_all_kswapds(order, gfp_mask, ac);
5079
5080
5081
5082
5083
5084 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5085 if (page)
5086 goto got_pg;
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097 if (can_direct_reclaim &&
5098 (costly_order ||
5099 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
5100 && !gfp_pfmemalloc_allowed(gfp_mask)) {
5101 page = __alloc_pages_direct_compact(gfp_mask, order,
5102 alloc_flags, ac,
5103 INIT_COMPACT_PRIORITY,
5104 &compact_result);
5105 if (page)
5106 goto got_pg;
5107
5108
5109
5110
5111
5112 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130 if (compact_result == COMPACT_SKIPPED ||
5131 compact_result == COMPACT_DEFERRED)
5132 goto nopage;
5133
5134
5135
5136
5137
5138
5139 compact_priority = INIT_COMPACT_PRIORITY;
5140 }
5141 }
5142
5143 retry:
5144
5145 if (alloc_flags & ALLOC_KSWAPD)
5146 wake_all_kswapds(order, gfp_mask, ac);
5147
5148 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
5149 if (reserve_flags)
5150 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
5151
5152
5153
5154
5155
5156
5157 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
5158 ac->nodemask = NULL;
5159 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5160 ac->highest_zoneidx, ac->nodemask);
5161 }
5162
5163
5164 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5165 if (page)
5166 goto got_pg;
5167
5168
5169 if (!can_direct_reclaim)
5170 goto nopage;
5171
5172
5173 if (current->flags & PF_MEMALLOC)
5174 goto nopage;
5175
5176
5177 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5178 &did_some_progress);
5179 if (page)
5180 goto got_pg;
5181
5182
5183 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
5184 compact_priority, &compact_result);
5185 if (page)
5186 goto got_pg;
5187
5188
5189 if (gfp_mask & __GFP_NORETRY)
5190 goto nopage;
5191
5192
5193
5194
5195
5196 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5197 goto nopage;
5198
5199 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5200 did_some_progress > 0, &no_progress_loops))
5201 goto retry;
5202
5203
5204
5205
5206
5207
5208
5209 if (did_some_progress > 0 &&
5210 should_compact_retry(ac, order, alloc_flags,
5211 compact_result, &compact_priority,
5212 &compaction_retries))
5213 goto retry;
5214
5215
5216
5217
5218
5219
5220 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5221 check_retry_zonelist(zonelist_iter_cookie))
5222 goto restart;
5223
5224
5225 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5226 if (page)
5227 goto got_pg;
5228
5229
5230 if (tsk_is_oom_victim(current) &&
5231 (alloc_flags & ALLOC_OOM ||
5232 (gfp_mask & __GFP_NOMEMALLOC)))
5233 goto nopage;
5234
5235
5236 if (did_some_progress) {
5237 no_progress_loops = 0;
5238 goto retry;
5239 }
5240
5241 nopage:
5242
5243
5244
5245
5246 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5247 check_retry_zonelist(zonelist_iter_cookie))
5248 goto restart;
5249
5250
5251
5252
5253
5254 if (gfp_mask & __GFP_NOFAIL) {
5255
5256
5257
5258
5259 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
5260 goto fail;
5261
5262
5263
5264
5265
5266
5267 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
5268
5269
5270
5271
5272
5273
5274
5275 WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
5276
5277
5278
5279
5280
5281
5282
5283 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5284 if (page)
5285 goto got_pg;
5286
5287 cond_resched();
5288 goto retry;
5289 }
5290 fail:
5291 warn_alloc(gfp_mask, ac->nodemask,
5292 "page allocation failure: order:%u", order);
5293 got_pg:
5294 return page;
5295 }
5296
5297 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5298 int preferred_nid, nodemask_t *nodemask,
5299 struct alloc_context *ac, gfp_t *alloc_gfp,
5300 unsigned int *alloc_flags)
5301 {
5302 ac->highest_zoneidx = gfp_zone(gfp_mask);
5303 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5304 ac->nodemask = nodemask;
5305 ac->migratetype = gfp_migratetype(gfp_mask);
5306
5307 if (cpusets_enabled()) {
5308 *alloc_gfp |= __GFP_HARDWALL;
5309
5310
5311
5312
5313 if (in_task() && !ac->nodemask)
5314 ac->nodemask = &cpuset_current_mems_allowed;
5315 else
5316 *alloc_flags |= ALLOC_CPUSET;
5317 }
5318
5319 might_alloc(gfp_mask);
5320
5321 if (should_fail_alloc_page(gfp_mask, order))
5322 return false;
5323
5324 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5325
5326
5327 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5328
5329
5330
5331
5332
5333
5334 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5335 ac->highest_zoneidx, ac->nodemask);
5336
5337 return true;
5338 }
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5361 nodemask_t *nodemask, int nr_pages,
5362 struct list_head *page_list,
5363 struct page **page_array)
5364 {
5365 struct page *page;
5366 unsigned long flags;
5367 unsigned long __maybe_unused UP_flags;
5368 struct zone *zone;
5369 struct zoneref *z;
5370 struct per_cpu_pages *pcp;
5371 struct list_head *pcp_list;
5372 struct alloc_context ac;
5373 gfp_t alloc_gfp;
5374 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5375 int nr_populated = 0, nr_account = 0;
5376
5377
5378
5379
5380
5381 while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5382 nr_populated++;
5383
5384
5385 if (unlikely(nr_pages <= 0))
5386 goto out;
5387
5388
5389 if (unlikely(page_array && nr_pages - nr_populated == 0))
5390 goto out;
5391
5392
5393 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
5394 goto failed;
5395
5396
5397 if (nr_pages - nr_populated == 1)
5398 goto failed;
5399
5400 #ifdef CONFIG_PAGE_OWNER
5401
5402
5403
5404
5405
5406
5407
5408 if (static_branch_unlikely(&page_owner_inited))
5409 goto failed;
5410 #endif
5411
5412
5413 gfp &= gfp_allowed_mask;
5414 alloc_gfp = gfp;
5415 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5416 goto out;
5417 gfp = alloc_gfp;
5418
5419
5420 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5421 unsigned long mark;
5422
5423 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5424 !__cpuset_zone_allowed(zone, gfp)) {
5425 continue;
5426 }
5427
5428 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5429 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5430 goto failed;
5431 }
5432
5433 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5434 if (zone_watermark_fast(zone, 0, mark,
5435 zonelist_zone_idx(ac.preferred_zoneref),
5436 alloc_flags, gfp)) {
5437 break;
5438 }
5439 }
5440
5441
5442
5443
5444
5445 if (unlikely(!zone))
5446 goto failed;
5447
5448
5449 pcp_trylock_prepare(UP_flags);
5450 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
5451 if (!pcp)
5452 goto failed_irq;
5453
5454
5455 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5456 while (nr_populated < nr_pages) {
5457
5458
5459 if (page_array && page_array[nr_populated]) {
5460 nr_populated++;
5461 continue;
5462 }
5463
5464 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5465 pcp, pcp_list);
5466 if (unlikely(!page)) {
5467
5468 if (!nr_account) {
5469 pcp_spin_unlock_irqrestore(pcp, flags);
5470 goto failed_irq;
5471 }
5472 break;
5473 }
5474 nr_account++;
5475
5476 prep_new_page(page, 0, gfp, 0);
5477 if (page_list)
5478 list_add(&page->lru, page_list);
5479 else
5480 page_array[nr_populated] = page;
5481 nr_populated++;
5482 }
5483
5484 pcp_spin_unlock_irqrestore(pcp, flags);
5485 pcp_trylock_finish(UP_flags);
5486
5487 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5488 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5489
5490 out:
5491 return nr_populated;
5492
5493 failed_irq:
5494 pcp_trylock_finish(UP_flags);
5495
5496 failed:
5497 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5498 if (page) {
5499 if (page_list)
5500 list_add(&page->lru, page_list);
5501 else
5502 page_array[nr_populated] = page;
5503 nr_populated++;
5504 }
5505
5506 goto out;
5507 }
5508 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5509
5510
5511
5512
5513 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5514 nodemask_t *nodemask)
5515 {
5516 struct page *page;
5517 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5518 gfp_t alloc_gfp;
5519 struct alloc_context ac = { };
5520
5521
5522
5523
5524
5525 if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
5526 return NULL;
5527
5528 gfp &= gfp_allowed_mask;
5529
5530
5531
5532
5533
5534
5535
5536 gfp = current_gfp_context(gfp);
5537 alloc_gfp = gfp;
5538 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5539 &alloc_gfp, &alloc_flags))
5540 return NULL;
5541
5542
5543
5544
5545
5546 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5547
5548
5549 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5550 if (likely(page))
5551 goto out;
5552
5553 alloc_gfp = gfp;
5554 ac.spread_dirty_pages = false;
5555
5556
5557
5558
5559
5560 ac.nodemask = nodemask;
5561
5562 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5563
5564 out:
5565 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5566 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5567 __free_pages(page, order);
5568 page = NULL;
5569 }
5570
5571 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5572
5573 return page;
5574 }
5575 EXPORT_SYMBOL(__alloc_pages);
5576
5577 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
5578 nodemask_t *nodemask)
5579 {
5580 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
5581 preferred_nid, nodemask);
5582
5583 if (page && order > 1)
5584 prep_transhuge_page(page);
5585 return (struct folio *)page;
5586 }
5587 EXPORT_SYMBOL(__folio_alloc);
5588
5589
5590
5591
5592
5593
5594 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5595 {
5596 struct page *page;
5597
5598 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5599 if (!page)
5600 return 0;
5601 return (unsigned long) page_address(page);
5602 }
5603 EXPORT_SYMBOL(__get_free_pages);
5604
5605 unsigned long get_zeroed_page(gfp_t gfp_mask)
5606 {
5607 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5608 }
5609 EXPORT_SYMBOL(get_zeroed_page);
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631 void __free_pages(struct page *page, unsigned int order)
5632 {
5633 if (put_page_testzero(page))
5634 free_the_page(page, order);
5635 else if (!PageHead(page))
5636 while (order-- > 0)
5637 free_the_page(page + (1 << order), order);
5638 }
5639 EXPORT_SYMBOL(__free_pages);
5640
5641 void free_pages(unsigned long addr, unsigned int order)
5642 {
5643 if (addr != 0) {
5644 VM_BUG_ON(!virt_addr_valid((void *)addr));
5645 __free_pages(virt_to_page((void *)addr), order);
5646 }
5647 }
5648
5649 EXPORT_SYMBOL(free_pages);
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5663 gfp_t gfp_mask)
5664 {
5665 struct page *page = NULL;
5666 gfp_t gfp = gfp_mask;
5667
5668 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5669 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5670 __GFP_NOMEMALLOC;
5671 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5672 PAGE_FRAG_CACHE_MAX_ORDER);
5673 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5674 #endif
5675 if (unlikely(!page))
5676 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5677
5678 nc->va = page ? page_address(page) : NULL;
5679
5680 return page;
5681 }
5682
5683 void __page_frag_cache_drain(struct page *page, unsigned int count)
5684 {
5685 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5686
5687 if (page_ref_sub_and_test(page, count))
5688 free_the_page(page, compound_order(page));
5689 }
5690 EXPORT_SYMBOL(__page_frag_cache_drain);
5691
5692 void *page_frag_alloc_align(struct page_frag_cache *nc,
5693 unsigned int fragsz, gfp_t gfp_mask,
5694 unsigned int align_mask)
5695 {
5696 unsigned int size = PAGE_SIZE;
5697 struct page *page;
5698 int offset;
5699
5700 if (unlikely(!nc->va)) {
5701 refill:
5702 page = __page_frag_cache_refill(nc, gfp_mask);
5703 if (!page)
5704 return NULL;
5705
5706 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5707
5708 size = nc->size;
5709 #endif
5710
5711
5712
5713 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5714
5715
5716 nc->pfmemalloc = page_is_pfmemalloc(page);
5717 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5718 nc->offset = size;
5719 }
5720
5721 offset = nc->offset - fragsz;
5722 if (unlikely(offset < 0)) {
5723 page = virt_to_page(nc->va);
5724
5725 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5726 goto refill;
5727
5728 if (unlikely(nc->pfmemalloc)) {
5729 free_the_page(page, compound_order(page));
5730 goto refill;
5731 }
5732
5733 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5734
5735 size = nc->size;
5736 #endif
5737
5738 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5739
5740
5741 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5742 offset = size - fragsz;
5743 if (unlikely(offset < 0)) {
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753 return NULL;
5754 }
5755 }
5756
5757 nc->pagecnt_bias--;
5758 offset &= align_mask;
5759 nc->offset = offset;
5760
5761 return nc->va + offset;
5762 }
5763 EXPORT_SYMBOL(page_frag_alloc_align);
5764
5765
5766
5767
5768 void page_frag_free(void *addr)
5769 {
5770 struct page *page = virt_to_head_page(addr);
5771
5772 if (unlikely(put_page_testzero(page)))
5773 free_the_page(page, compound_order(page));
5774 }
5775 EXPORT_SYMBOL(page_frag_free);
5776
5777 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5778 size_t size)
5779 {
5780 if (addr) {
5781 unsigned long alloc_end = addr + (PAGE_SIZE << order);
5782 unsigned long used = addr + PAGE_ALIGN(size);
5783
5784 split_page(virt_to_page((void *)addr), order);
5785 while (used < alloc_end) {
5786 free_page(used);
5787 used += PAGE_SIZE;
5788 }
5789 }
5790 return (void *)addr;
5791 }
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5809 {
5810 unsigned int order = get_order(size);
5811 unsigned long addr;
5812
5813 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5814 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5815
5816 addr = __get_free_pages(gfp_mask, order);
5817 return make_alloc_exact(addr, order, size);
5818 }
5819 EXPORT_SYMBOL(alloc_pages_exact);
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5834 {
5835 unsigned int order = get_order(size);
5836 struct page *p;
5837
5838 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5839 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5840
5841 p = alloc_pages_node(nid, gfp_mask, order);
5842 if (!p)
5843 return NULL;
5844 return make_alloc_exact((unsigned long)page_address(p), order, size);
5845 }
5846
5847
5848
5849
5850
5851
5852
5853
5854 void free_pages_exact(void *virt, size_t size)
5855 {
5856 unsigned long addr = (unsigned long)virt;
5857 unsigned long end = addr + PAGE_ALIGN(size);
5858
5859 while (addr < end) {
5860 free_page(addr);
5861 addr += PAGE_SIZE;
5862 }
5863 }
5864 EXPORT_SYMBOL(free_pages_exact);
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878 static unsigned long nr_free_zone_pages(int offset)
5879 {
5880 struct zoneref *z;
5881 struct zone *zone;
5882
5883
5884 unsigned long sum = 0;
5885
5886 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5887
5888 for_each_zone_zonelist(zone, z, zonelist, offset) {
5889 unsigned long size = zone_managed_pages(zone);
5890 unsigned long high = high_wmark_pages(zone);
5891 if (size > high)
5892 sum += size - high;
5893 }
5894
5895 return sum;
5896 }
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907 unsigned long nr_free_buffer_pages(void)
5908 {
5909 return nr_free_zone_pages(gfp_zone(GFP_USER));
5910 }
5911 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5912
5913 static inline void show_node(struct zone *zone)
5914 {
5915 if (IS_ENABLED(CONFIG_NUMA))
5916 printk("Node %d ", zone_to_nid(zone));
5917 }
5918
5919 long si_mem_available(void)
5920 {
5921 long available;
5922 unsigned long pagecache;
5923 unsigned long wmark_low = 0;
5924 unsigned long pages[NR_LRU_LISTS];
5925 unsigned long reclaimable;
5926 struct zone *zone;
5927 int lru;
5928
5929 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5930 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5931
5932 for_each_zone(zone)
5933 wmark_low += low_wmark_pages(zone);
5934
5935
5936
5937
5938
5939 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5940
5941
5942
5943
5944
5945
5946 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5947 pagecache -= min(pagecache / 2, wmark_low);
5948 available += pagecache;
5949
5950
5951
5952
5953
5954
5955 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5956 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5957 available += reclaimable - min(reclaimable / 2, wmark_low);
5958
5959 if (available < 0)
5960 available = 0;
5961 return available;
5962 }
5963 EXPORT_SYMBOL_GPL(si_mem_available);
5964
5965 void si_meminfo(struct sysinfo *val)
5966 {
5967 val->totalram = totalram_pages();
5968 val->sharedram = global_node_page_state(NR_SHMEM);
5969 val->freeram = global_zone_page_state(NR_FREE_PAGES);
5970 val->bufferram = nr_blockdev_pages();
5971 val->totalhigh = totalhigh_pages();
5972 val->freehigh = nr_free_highpages();
5973 val->mem_unit = PAGE_SIZE;
5974 }
5975
5976 EXPORT_SYMBOL(si_meminfo);
5977
5978 #ifdef CONFIG_NUMA
5979 void si_meminfo_node(struct sysinfo *val, int nid)
5980 {
5981 int zone_type;
5982 unsigned long managed_pages = 0;
5983 unsigned long managed_highpages = 0;
5984 unsigned long free_highpages = 0;
5985 pg_data_t *pgdat = NODE_DATA(nid);
5986
5987 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5988 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5989 val->totalram = managed_pages;
5990 val->sharedram = node_page_state(pgdat, NR_SHMEM);
5991 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5992 #ifdef CONFIG_HIGHMEM
5993 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5994 struct zone *zone = &pgdat->node_zones[zone_type];
5995
5996 if (is_highmem(zone)) {
5997 managed_highpages += zone_managed_pages(zone);
5998 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5999 }
6000 }
6001 val->totalhigh = managed_highpages;
6002 val->freehigh = free_highpages;
6003 #else
6004 val->totalhigh = managed_highpages;
6005 val->freehigh = free_highpages;
6006 #endif
6007 val->mem_unit = PAGE_SIZE;
6008 }
6009 #endif
6010
6011
6012
6013
6014
6015 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
6016 {
6017 if (!(flags & SHOW_MEM_FILTER_NODES))
6018 return false;
6019
6020
6021
6022
6023
6024
6025 if (!nodemask)
6026 nodemask = &cpuset_current_mems_allowed;
6027
6028 return !node_isset(nid, *nodemask);
6029 }
6030
6031 #define K(x) ((x) << (PAGE_SHIFT-10))
6032
6033 static void show_migration_types(unsigned char type)
6034 {
6035 static const char types[MIGRATE_TYPES] = {
6036 [MIGRATE_UNMOVABLE] = 'U',
6037 [MIGRATE_MOVABLE] = 'M',
6038 [MIGRATE_RECLAIMABLE] = 'E',
6039 [MIGRATE_HIGHATOMIC] = 'H',
6040 #ifdef CONFIG_CMA
6041 [MIGRATE_CMA] = 'C',
6042 #endif
6043 #ifdef CONFIG_MEMORY_ISOLATION
6044 [MIGRATE_ISOLATE] = 'I',
6045 #endif
6046 };
6047 char tmp[MIGRATE_TYPES + 1];
6048 char *p = tmp;
6049 int i;
6050
6051 for (i = 0; i < MIGRATE_TYPES; i++) {
6052 if (type & (1 << i))
6053 *p++ = types[i];
6054 }
6055
6056 *p = '\0';
6057 printk(KERN_CONT "(%s) ", tmp);
6058 }
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
6070 {
6071 unsigned long free_pcp = 0;
6072 int cpu, nid;
6073 struct zone *zone;
6074 pg_data_t *pgdat;
6075
6076 for_each_populated_zone(zone) {
6077 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6078 continue;
6079
6080 for_each_online_cpu(cpu)
6081 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
6082 }
6083
6084 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
6085 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
6086 " unevictable:%lu dirty:%lu writeback:%lu\n"
6087 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
6088 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
6089 " kernel_misc_reclaimable:%lu\n"
6090 " free:%lu free_pcp:%lu free_cma:%lu\n",
6091 global_node_page_state(NR_ACTIVE_ANON),
6092 global_node_page_state(NR_INACTIVE_ANON),
6093 global_node_page_state(NR_ISOLATED_ANON),
6094 global_node_page_state(NR_ACTIVE_FILE),
6095 global_node_page_state(NR_INACTIVE_FILE),
6096 global_node_page_state(NR_ISOLATED_FILE),
6097 global_node_page_state(NR_UNEVICTABLE),
6098 global_node_page_state(NR_FILE_DIRTY),
6099 global_node_page_state(NR_WRITEBACK),
6100 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
6101 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
6102 global_node_page_state(NR_FILE_MAPPED),
6103 global_node_page_state(NR_SHMEM),
6104 global_node_page_state(NR_PAGETABLE),
6105 global_zone_page_state(NR_BOUNCE),
6106 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
6107 global_zone_page_state(NR_FREE_PAGES),
6108 free_pcp,
6109 global_zone_page_state(NR_FREE_CMA_PAGES));
6110
6111 for_each_online_pgdat(pgdat) {
6112 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
6113 continue;
6114
6115 printk("Node %d"
6116 " active_anon:%lukB"
6117 " inactive_anon:%lukB"
6118 " active_file:%lukB"
6119 " inactive_file:%lukB"
6120 " unevictable:%lukB"
6121 " isolated(anon):%lukB"
6122 " isolated(file):%lukB"
6123 " mapped:%lukB"
6124 " dirty:%lukB"
6125 " writeback:%lukB"
6126 " shmem:%lukB"
6127 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6128 " shmem_thp: %lukB"
6129 " shmem_pmdmapped: %lukB"
6130 " anon_thp: %lukB"
6131 #endif
6132 " writeback_tmp:%lukB"
6133 " kernel_stack:%lukB"
6134 #ifdef CONFIG_SHADOW_CALL_STACK
6135 " shadow_call_stack:%lukB"
6136 #endif
6137 " pagetables:%lukB"
6138 " all_unreclaimable? %s"
6139 "\n",
6140 pgdat->node_id,
6141 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
6142 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
6143 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
6144 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
6145 K(node_page_state(pgdat, NR_UNEVICTABLE)),
6146 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
6147 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
6148 K(node_page_state(pgdat, NR_FILE_MAPPED)),
6149 K(node_page_state(pgdat, NR_FILE_DIRTY)),
6150 K(node_page_state(pgdat, NR_WRITEBACK)),
6151 K(node_page_state(pgdat, NR_SHMEM)),
6152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6153 K(node_page_state(pgdat, NR_SHMEM_THPS)),
6154 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
6155 K(node_page_state(pgdat, NR_ANON_THPS)),
6156 #endif
6157 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
6158 node_page_state(pgdat, NR_KERNEL_STACK_KB),
6159 #ifdef CONFIG_SHADOW_CALL_STACK
6160 node_page_state(pgdat, NR_KERNEL_SCS_KB),
6161 #endif
6162 K(node_page_state(pgdat, NR_PAGETABLE)),
6163 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
6164 "yes" : "no");
6165 }
6166
6167 for_each_populated_zone(zone) {
6168 int i;
6169
6170 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6171 continue;
6172
6173 free_pcp = 0;
6174 for_each_online_cpu(cpu)
6175 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
6176
6177 show_node(zone);
6178 printk(KERN_CONT
6179 "%s"
6180 " free:%lukB"
6181 " boost:%lukB"
6182 " min:%lukB"
6183 " low:%lukB"
6184 " high:%lukB"
6185 " reserved_highatomic:%luKB"
6186 " active_anon:%lukB"
6187 " inactive_anon:%lukB"
6188 " active_file:%lukB"
6189 " inactive_file:%lukB"
6190 " unevictable:%lukB"
6191 " writepending:%lukB"
6192 " present:%lukB"
6193 " managed:%lukB"
6194 " mlocked:%lukB"
6195 " bounce:%lukB"
6196 " free_pcp:%lukB"
6197 " local_pcp:%ukB"
6198 " free_cma:%lukB"
6199 "\n",
6200 zone->name,
6201 K(zone_page_state(zone, NR_FREE_PAGES)),
6202 K(zone->watermark_boost),
6203 K(min_wmark_pages(zone)),
6204 K(low_wmark_pages(zone)),
6205 K(high_wmark_pages(zone)),
6206 K(zone->nr_reserved_highatomic),
6207 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6208 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6209 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6210 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6211 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6212 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6213 K(zone->present_pages),
6214 K(zone_managed_pages(zone)),
6215 K(zone_page_state(zone, NR_MLOCK)),
6216 K(zone_page_state(zone, NR_BOUNCE)),
6217 K(free_pcp),
6218 K(this_cpu_read(zone->per_cpu_pageset->count)),
6219 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6220 printk("lowmem_reserve[]:");
6221 for (i = 0; i < MAX_NR_ZONES; i++)
6222 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6223 printk(KERN_CONT "\n");
6224 }
6225
6226 for_each_populated_zone(zone) {
6227 unsigned int order;
6228 unsigned long nr[MAX_ORDER], flags, total = 0;
6229 unsigned char types[MAX_ORDER];
6230
6231 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6232 continue;
6233 show_node(zone);
6234 printk(KERN_CONT "%s: ", zone->name);
6235
6236 spin_lock_irqsave(&zone->lock, flags);
6237 for (order = 0; order < MAX_ORDER; order++) {
6238 struct free_area *area = &zone->free_area[order];
6239 int type;
6240
6241 nr[order] = area->nr_free;
6242 total += nr[order] << order;
6243
6244 types[order] = 0;
6245 for (type = 0; type < MIGRATE_TYPES; type++) {
6246 if (!free_area_empty(area, type))
6247 types[order] |= 1 << type;
6248 }
6249 }
6250 spin_unlock_irqrestore(&zone->lock, flags);
6251 for (order = 0; order < MAX_ORDER; order++) {
6252 printk(KERN_CONT "%lu*%lukB ",
6253 nr[order], K(1UL) << order);
6254 if (nr[order])
6255 show_migration_types(types[order]);
6256 }
6257 printk(KERN_CONT "= %lukB\n", K(total));
6258 }
6259
6260 for_each_online_node(nid) {
6261 if (show_mem_node_skip(filter, nid, nodemask))
6262 continue;
6263 hugetlb_show_meminfo_node(nid);
6264 }
6265
6266 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6267
6268 show_swap_cache_info();
6269 }
6270
6271 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6272 {
6273 zoneref->zone = zone;
6274 zoneref->zone_idx = zone_idx(zone);
6275 }
6276
6277
6278
6279
6280
6281
6282 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6283 {
6284 struct zone *zone;
6285 enum zone_type zone_type = MAX_NR_ZONES;
6286 int nr_zones = 0;
6287
6288 do {
6289 zone_type--;
6290 zone = pgdat->node_zones + zone_type;
6291 if (populated_zone(zone)) {
6292 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6293 check_highest_zone(zone_type);
6294 }
6295 } while (zone_type);
6296
6297 return nr_zones;
6298 }
6299
6300 #ifdef CONFIG_NUMA
6301
6302 static int __parse_numa_zonelist_order(char *s)
6303 {
6304
6305
6306
6307
6308
6309
6310 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6311 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
6312 return -EINVAL;
6313 }
6314 return 0;
6315 }
6316
6317 char numa_zonelist_order[] = "Node";
6318
6319
6320
6321
6322 int numa_zonelist_order_handler(struct ctl_table *table, int write,
6323 void *buffer, size_t *length, loff_t *ppos)
6324 {
6325 if (write)
6326 return __parse_numa_zonelist_order(buffer);
6327 return proc_dostring(table, write, buffer, length, ppos);
6328 }
6329
6330
6331 static int node_load[MAX_NUMNODES];
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348 int find_next_best_node(int node, nodemask_t *used_node_mask)
6349 {
6350 int n, val;
6351 int min_val = INT_MAX;
6352 int best_node = NUMA_NO_NODE;
6353
6354
6355 if (!node_isset(node, *used_node_mask)) {
6356 node_set(node, *used_node_mask);
6357 return node;
6358 }
6359
6360 for_each_node_state(n, N_MEMORY) {
6361
6362
6363 if (node_isset(n, *used_node_mask))
6364 continue;
6365
6366
6367 val = node_distance(node, n);
6368
6369
6370 val += (n < node);
6371
6372
6373 if (!cpumask_empty(cpumask_of_node(n)))
6374 val += PENALTY_FOR_NODE_WITH_CPUS;
6375
6376
6377 val *= MAX_NUMNODES;
6378 val += node_load[n];
6379
6380 if (val < min_val) {
6381 min_val = val;
6382 best_node = n;
6383 }
6384 }
6385
6386 if (best_node >= 0)
6387 node_set(best_node, *used_node_mask);
6388
6389 return best_node;
6390 }
6391
6392
6393
6394
6395
6396
6397
6398 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6399 unsigned nr_nodes)
6400 {
6401 struct zoneref *zonerefs;
6402 int i;
6403
6404 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6405
6406 for (i = 0; i < nr_nodes; i++) {
6407 int nr_zones;
6408
6409 pg_data_t *node = NODE_DATA(node_order[i]);
6410
6411 nr_zones = build_zonerefs_node(node, zonerefs);
6412 zonerefs += nr_zones;
6413 }
6414 zonerefs->zone = NULL;
6415 zonerefs->zone_idx = 0;
6416 }
6417
6418
6419
6420
6421 static void build_thisnode_zonelists(pg_data_t *pgdat)
6422 {
6423 struct zoneref *zonerefs;
6424 int nr_zones;
6425
6426 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6427 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6428 zonerefs += nr_zones;
6429 zonerefs->zone = NULL;
6430 zonerefs->zone_idx = 0;
6431 }
6432
6433
6434
6435
6436
6437
6438
6439
6440 static void build_zonelists(pg_data_t *pgdat)
6441 {
6442 static int node_order[MAX_NUMNODES];
6443 int node, nr_nodes = 0;
6444 nodemask_t used_mask = NODE_MASK_NONE;
6445 int local_node, prev_node;
6446
6447
6448 local_node = pgdat->node_id;
6449 prev_node = local_node;
6450
6451 memset(node_order, 0, sizeof(node_order));
6452 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6453
6454
6455
6456
6457
6458 if (node_distance(local_node, node) !=
6459 node_distance(local_node, prev_node))
6460 node_load[node] += 1;
6461
6462 node_order[nr_nodes++] = node;
6463 prev_node = node;
6464 }
6465
6466 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6467 build_thisnode_zonelists(pgdat);
6468 pr_info("Fallback order for Node %d: ", local_node);
6469 for (node = 0; node < nr_nodes; node++)
6470 pr_cont("%d ", node_order[node]);
6471 pr_cont("\n");
6472 }
6473
6474 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6475
6476
6477
6478
6479
6480
6481 int local_memory_node(int node)
6482 {
6483 struct zoneref *z;
6484
6485 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6486 gfp_zone(GFP_KERNEL),
6487 NULL);
6488 return zone_to_nid(z->zone);
6489 }
6490 #endif
6491
6492 static void setup_min_unmapped_ratio(void);
6493 static void setup_min_slab_ratio(void);
6494 #else
6495
6496 static void build_zonelists(pg_data_t *pgdat)
6497 {
6498 int node, local_node;
6499 struct zoneref *zonerefs;
6500 int nr_zones;
6501
6502 local_node = pgdat->node_id;
6503
6504 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6505 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6506 zonerefs += nr_zones;
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6517 if (!node_online(node))
6518 continue;
6519 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6520 zonerefs += nr_zones;
6521 }
6522 for (node = 0; node < local_node; node++) {
6523 if (!node_online(node))
6524 continue;
6525 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6526 zonerefs += nr_zones;
6527 }
6528
6529 zonerefs->zone = NULL;
6530 zonerefs->zone_idx = 0;
6531 }
6532
6533 #endif
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6551
6552 #define BOOT_PAGESET_HIGH 0
6553 #define BOOT_PAGESET_BATCH 1
6554 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
6555 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6556 DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6557
6558 static void __build_all_zonelists(void *data)
6559 {
6560 int nid;
6561 int __maybe_unused cpu;
6562 pg_data_t *self = data;
6563
6564 write_seqlock(&zonelist_update_seq);
6565
6566 #ifdef CONFIG_NUMA
6567 memset(node_load, 0, sizeof(node_load));
6568 #endif
6569
6570
6571
6572
6573
6574 if (self && !node_online(self->node_id)) {
6575 build_zonelists(self);
6576 } else {
6577
6578
6579
6580
6581 for_each_node(nid) {
6582 pg_data_t *pgdat = NODE_DATA(nid);
6583
6584 build_zonelists(pgdat);
6585 }
6586
6587 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6588
6589
6590
6591
6592
6593
6594
6595
6596 for_each_online_cpu(cpu)
6597 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6598 #endif
6599 }
6600
6601 write_sequnlock(&zonelist_update_seq);
6602 }
6603
6604 static noinline void __init
6605 build_all_zonelists_init(void)
6606 {
6607 int cpu;
6608
6609 __build_all_zonelists(NULL);
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624 for_each_possible_cpu(cpu)
6625 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
6626
6627 mminit_verify_zonelist();
6628 cpuset_init_current_mems_allowed();
6629 }
6630
6631
6632
6633
6634
6635
6636
6637 void __ref build_all_zonelists(pg_data_t *pgdat)
6638 {
6639 unsigned long vm_total_pages;
6640
6641 if (system_state == SYSTEM_BOOTING) {
6642 build_all_zonelists_init();
6643 } else {
6644 __build_all_zonelists(pgdat);
6645
6646 }
6647
6648 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6649
6650
6651
6652
6653
6654
6655
6656 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6657 page_group_by_mobility_disabled = 1;
6658 else
6659 page_group_by_mobility_disabled = 0;
6660
6661 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
6662 nr_online_nodes,
6663 page_group_by_mobility_disabled ? "off" : "on",
6664 vm_total_pages);
6665 #ifdef CONFIG_NUMA
6666 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6667 #endif
6668 }
6669
6670
6671 static bool __meminit
6672 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6673 {
6674 static struct memblock_region *r;
6675
6676 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6677 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6678 for_each_mem_region(r) {
6679 if (*pfn < memblock_region_memory_end_pfn(r))
6680 break;
6681 }
6682 }
6683 if (*pfn >= memblock_region_memory_base_pfn(r) &&
6684 memblock_is_mirror(r)) {
6685 *pfn = memblock_region_memory_end_pfn(r);
6686 return true;
6687 }
6688 }
6689 return false;
6690 }
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6702 unsigned long start_pfn, unsigned long zone_end_pfn,
6703 enum meminit_context context,
6704 struct vmem_altmap *altmap, int migratetype)
6705 {
6706 unsigned long pfn, end_pfn = start_pfn + size;
6707 struct page *page;
6708
6709 if (highest_memmap_pfn < end_pfn - 1)
6710 highest_memmap_pfn = end_pfn - 1;
6711
6712 #ifdef CONFIG_ZONE_DEVICE
6713
6714
6715
6716
6717
6718
6719
6720 if (zone == ZONE_DEVICE) {
6721 if (!altmap)
6722 return;
6723
6724 if (start_pfn == altmap->base_pfn)
6725 start_pfn += altmap->reserve;
6726 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6727 }
6728 #endif
6729
6730 for (pfn = start_pfn; pfn < end_pfn; ) {
6731
6732
6733
6734
6735 if (context == MEMINIT_EARLY) {
6736 if (overlap_memmap_init(zone, &pfn))
6737 continue;
6738 if (defer_init(nid, pfn, zone_end_pfn))
6739 break;
6740 }
6741
6742 page = pfn_to_page(pfn);
6743 __init_single_page(page, pfn, zone, nid);
6744 if (context == MEMINIT_HOTPLUG)
6745 __SetPageReserved(page);
6746
6747
6748
6749
6750
6751
6752 if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6753 set_pageblock_migratetype(page, migratetype);
6754 cond_resched();
6755 }
6756 pfn++;
6757 }
6758 }
6759
6760 #ifdef CONFIG_ZONE_DEVICE
6761 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
6762 unsigned long zone_idx, int nid,
6763 struct dev_pagemap *pgmap)
6764 {
6765
6766 __init_single_page(page, pfn, zone_idx, nid);
6767
6768
6769
6770
6771
6772
6773
6774
6775 __SetPageReserved(page);
6776
6777
6778
6779
6780
6781
6782 page->pgmap = pgmap;
6783 page->zone_device_data = NULL;
6784
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795 if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6796 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6797 cond_resched();
6798 }
6799 }
6800
6801
6802
6803
6804
6805
6806
6807
6808
6809 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
6810 unsigned long nr_pages)
6811 {
6812 return is_power_of_2(sizeof(struct page)) &&
6813 !altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages;
6814 }
6815
6816 static void __ref memmap_init_compound(struct page *head,
6817 unsigned long head_pfn,
6818 unsigned long zone_idx, int nid,
6819 struct dev_pagemap *pgmap,
6820 unsigned long nr_pages)
6821 {
6822 unsigned long pfn, end_pfn = head_pfn + nr_pages;
6823 unsigned int order = pgmap->vmemmap_shift;
6824
6825 __SetPageHead(head);
6826 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
6827 struct page *page = pfn_to_page(pfn);
6828
6829 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6830 prep_compound_tail(head, pfn - head_pfn);
6831 set_page_count(page, 0);
6832
6833
6834
6835
6836
6837
6838
6839
6840 if (pfn == head_pfn + 2)
6841 prep_compound_head(head, order);
6842 }
6843 }
6844
6845 void __ref memmap_init_zone_device(struct zone *zone,
6846 unsigned long start_pfn,
6847 unsigned long nr_pages,
6848 struct dev_pagemap *pgmap)
6849 {
6850 unsigned long pfn, end_pfn = start_pfn + nr_pages;
6851 struct pglist_data *pgdat = zone->zone_pgdat;
6852 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6853 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
6854 unsigned long zone_idx = zone_idx(zone);
6855 unsigned long start = jiffies;
6856 int nid = pgdat->node_id;
6857
6858 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6859 return;
6860
6861
6862
6863
6864
6865
6866 if (altmap) {
6867 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6868 nr_pages = end_pfn - start_pfn;
6869 }
6870
6871 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
6872 struct page *page = pfn_to_page(pfn);
6873
6874 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
6875
6876 if (pfns_per_compound == 1)
6877 continue;
6878
6879 memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
6880 compound_nr_pages(altmap, pfns_per_compound));
6881 }
6882
6883 pr_info("%s initialised %lu pages in %ums\n", __func__,
6884 nr_pages, jiffies_to_msecs(jiffies - start));
6885 }
6886
6887 #endif
6888 static void __meminit zone_init_free_lists(struct zone *zone)
6889 {
6890 unsigned int order, t;
6891 for_each_migratetype_order(order, t) {
6892 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6893 zone->free_area[order].nr_free = 0;
6894 }
6895 }
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919 static void __init init_unavailable_range(unsigned long spfn,
6920 unsigned long epfn,
6921 int zone, int node)
6922 {
6923 unsigned long pfn;
6924 u64 pgcnt = 0;
6925
6926 for (pfn = spfn; pfn < epfn; pfn++) {
6927 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6928 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6929 + pageblock_nr_pages - 1;
6930 continue;
6931 }
6932 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
6933 __SetPageReserved(pfn_to_page(pfn));
6934 pgcnt++;
6935 }
6936
6937 if (pgcnt)
6938 pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6939 node, zone_names[zone], pgcnt);
6940 }
6941
6942 static void __init memmap_init_zone_range(struct zone *zone,
6943 unsigned long start_pfn,
6944 unsigned long end_pfn,
6945 unsigned long *hole_pfn)
6946 {
6947 unsigned long zone_start_pfn = zone->zone_start_pfn;
6948 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6949 int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6950
6951 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6952 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6953
6954 if (start_pfn >= end_pfn)
6955 return;
6956
6957 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6958 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6959
6960 if (*hole_pfn < start_pfn)
6961 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6962
6963 *hole_pfn = end_pfn;
6964 }
6965
6966 static void __init memmap_init(void)
6967 {
6968 unsigned long start_pfn, end_pfn;
6969 unsigned long hole_pfn = 0;
6970 int i, j, zone_id = 0, nid;
6971
6972 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6973 struct pglist_data *node = NODE_DATA(nid);
6974
6975 for (j = 0; j < MAX_NR_ZONES; j++) {
6976 struct zone *zone = node->node_zones + j;
6977
6978 if (!populated_zone(zone))
6979 continue;
6980
6981 memmap_init_zone_range(zone, start_pfn, end_pfn,
6982 &hole_pfn);
6983 zone_id = j;
6984 }
6985 }
6986
6987 #ifdef CONFIG_SPARSEMEM
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997 end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
6998 if (hole_pfn < end_pfn)
6999 #endif
7000 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
7001 }
7002
7003 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
7004 phys_addr_t min_addr, int nid, bool exact_nid)
7005 {
7006 void *ptr;
7007
7008 if (exact_nid)
7009 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
7010 MEMBLOCK_ALLOC_ACCESSIBLE,
7011 nid);
7012 else
7013 ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
7014 MEMBLOCK_ALLOC_ACCESSIBLE,
7015 nid);
7016
7017 if (ptr && size > 0)
7018 page_init_poison(ptr, size);
7019
7020 return ptr;
7021 }
7022
7023 static int zone_batchsize(struct zone *zone)
7024 {
7025 #ifdef CONFIG_MMU
7026 int batch;
7027
7028
7029
7030
7031
7032
7033
7034 batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
7035 batch /= 4;
7036 if (batch < 1)
7037 batch = 1;
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049 batch = rounddown_pow_of_two(batch + batch/2) - 1;
7050
7051 return batch;
7052
7053 #else
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067 return 0;
7068 #endif
7069 }
7070
7071 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
7072 {
7073 #ifdef CONFIG_MMU
7074 int high;
7075 int nr_split_cpus;
7076 unsigned long total_pages;
7077
7078 if (!percpu_pagelist_high_fraction) {
7079
7080
7081
7082
7083
7084 total_pages = low_wmark_pages(zone);
7085 } else {
7086
7087
7088
7089
7090
7091 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
7092 }
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
7103 if (!nr_split_cpus)
7104 nr_split_cpus = num_online_cpus();
7105 high = total_pages / nr_split_cpus;
7106
7107
7108
7109
7110
7111 high = max(high, batch << 2);
7112
7113 return high;
7114 #else
7115 return 0;
7116 #endif
7117 }
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
7136 unsigned long batch)
7137 {
7138 WRITE_ONCE(pcp->batch, batch);
7139 WRITE_ONCE(pcp->high, high);
7140 }
7141
7142 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
7143 {
7144 int pindex;
7145
7146 memset(pcp, 0, sizeof(*pcp));
7147 memset(pzstats, 0, sizeof(*pzstats));
7148
7149 spin_lock_init(&pcp->lock);
7150 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
7151 INIT_LIST_HEAD(&pcp->lists[pindex]);
7152
7153
7154
7155
7156
7157
7158
7159 pcp->high = BOOT_PAGESET_HIGH;
7160 pcp->batch = BOOT_PAGESET_BATCH;
7161 pcp->free_factor = 0;
7162 }
7163
7164 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
7165 unsigned long batch)
7166 {
7167 struct per_cpu_pages *pcp;
7168 int cpu;
7169
7170 for_each_possible_cpu(cpu) {
7171 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7172 pageset_update(pcp, high, batch);
7173 }
7174 }
7175
7176
7177
7178
7179
7180 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
7181 {
7182 int new_high, new_batch;
7183
7184 new_batch = max(1, zone_batchsize(zone));
7185 new_high = zone_highsize(zone, new_batch, cpu_online);
7186
7187 if (zone->pageset_high == new_high &&
7188 zone->pageset_batch == new_batch)
7189 return;
7190
7191 zone->pageset_high = new_high;
7192 zone->pageset_batch = new_batch;
7193
7194 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
7195 }
7196
7197 void __meminit setup_zone_pageset(struct zone *zone)
7198 {
7199 int cpu;
7200
7201
7202 if (sizeof(struct per_cpu_zonestat) > 0)
7203 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
7204
7205 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
7206 for_each_possible_cpu(cpu) {
7207 struct per_cpu_pages *pcp;
7208 struct per_cpu_zonestat *pzstats;
7209
7210 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
7211 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7212 per_cpu_pages_init(pcp, pzstats);
7213 }
7214
7215 zone_set_pageset_high_and_batch(zone, 0);
7216 }
7217
7218
7219
7220
7221
7222 void __init setup_per_cpu_pageset(void)
7223 {
7224 struct pglist_data *pgdat;
7225 struct zone *zone;
7226 int __maybe_unused cpu;
7227
7228 for_each_populated_zone(zone)
7229 setup_zone_pageset(zone);
7230
7231 #ifdef CONFIG_NUMA
7232
7233
7234
7235
7236
7237
7238 for_each_possible_cpu(cpu) {
7239 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
7240 memset(pzstats->vm_numa_event, 0,
7241 sizeof(pzstats->vm_numa_event));
7242 }
7243 #endif
7244
7245 for_each_online_pgdat(pgdat)
7246 pgdat->per_cpu_nodestats =
7247 alloc_percpu(struct per_cpu_nodestat);
7248 }
7249
7250 static __meminit void zone_pcp_init(struct zone *zone)
7251 {
7252
7253
7254
7255
7256
7257 zone->per_cpu_pageset = &boot_pageset;
7258 zone->per_cpu_zonestats = &boot_zonestats;
7259 zone->pageset_high = BOOT_PAGESET_HIGH;
7260 zone->pageset_batch = BOOT_PAGESET_BATCH;
7261
7262 if (populated_zone(zone))
7263 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
7264 zone->present_pages, zone_batchsize(zone));
7265 }
7266
7267 void __meminit init_currently_empty_zone(struct zone *zone,
7268 unsigned long zone_start_pfn,
7269 unsigned long size)
7270 {
7271 struct pglist_data *pgdat = zone->zone_pgdat;
7272 int zone_idx = zone_idx(zone) + 1;
7273
7274 if (zone_idx > pgdat->nr_zones)
7275 pgdat->nr_zones = zone_idx;
7276
7277 zone->zone_start_pfn = zone_start_pfn;
7278
7279 mminit_dprintk(MMINIT_TRACE, "memmap_init",
7280 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
7281 pgdat->node_id,
7282 (unsigned long)zone_idx(zone),
7283 zone_start_pfn, (zone_start_pfn + size));
7284
7285 zone_init_free_lists(zone);
7286 zone->initialized = 1;
7287 }
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297
7298
7299
7300 void __init get_pfn_range_for_nid(unsigned int nid,
7301 unsigned long *start_pfn, unsigned long *end_pfn)
7302 {
7303 unsigned long this_start_pfn, this_end_pfn;
7304 int i;
7305
7306 *start_pfn = -1UL;
7307 *end_pfn = 0;
7308
7309 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7310 *start_pfn = min(*start_pfn, this_start_pfn);
7311 *end_pfn = max(*end_pfn, this_end_pfn);
7312 }
7313
7314 if (*start_pfn == -1UL)
7315 *start_pfn = 0;
7316 }
7317
7318
7319
7320
7321
7322
7323 static void __init find_usable_zone_for_movable(void)
7324 {
7325 int zone_index;
7326 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7327 if (zone_index == ZONE_MOVABLE)
7328 continue;
7329
7330 if (arch_zone_highest_possible_pfn[zone_index] >
7331 arch_zone_lowest_possible_pfn[zone_index])
7332 break;
7333 }
7334
7335 VM_BUG_ON(zone_index == -1);
7336 movable_zone = zone_index;
7337 }
7338
7339
7340
7341
7342
7343
7344
7345
7346
7347
7348
7349 static void __init adjust_zone_range_for_zone_movable(int nid,
7350 unsigned long zone_type,
7351 unsigned long node_start_pfn,
7352 unsigned long node_end_pfn,
7353 unsigned long *zone_start_pfn,
7354 unsigned long *zone_end_pfn)
7355 {
7356
7357 if (zone_movable_pfn[nid]) {
7358
7359 if (zone_type == ZONE_MOVABLE) {
7360 *zone_start_pfn = zone_movable_pfn[nid];
7361 *zone_end_pfn = min(node_end_pfn,
7362 arch_zone_highest_possible_pfn[movable_zone]);
7363
7364
7365 } else if (!mirrored_kernelcore &&
7366 *zone_start_pfn < zone_movable_pfn[nid] &&
7367 *zone_end_pfn > zone_movable_pfn[nid]) {
7368 *zone_end_pfn = zone_movable_pfn[nid];
7369
7370
7371 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
7372 *zone_start_pfn = *zone_end_pfn;
7373 }
7374 }
7375
7376
7377
7378
7379
7380 static unsigned long __init zone_spanned_pages_in_node(int nid,
7381 unsigned long zone_type,
7382 unsigned long node_start_pfn,
7383 unsigned long node_end_pfn,
7384 unsigned long *zone_start_pfn,
7385 unsigned long *zone_end_pfn)
7386 {
7387 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7388 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7389
7390 if (!node_start_pfn && !node_end_pfn)
7391 return 0;
7392
7393
7394 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7395 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7396 adjust_zone_range_for_zone_movable(nid, zone_type,
7397 node_start_pfn, node_end_pfn,
7398 zone_start_pfn, zone_end_pfn);
7399
7400
7401 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
7402 return 0;
7403
7404
7405 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7406 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
7407
7408
7409 return *zone_end_pfn - *zone_start_pfn;
7410 }
7411
7412
7413
7414
7415
7416 unsigned long __init __absent_pages_in_range(int nid,
7417 unsigned long range_start_pfn,
7418 unsigned long range_end_pfn)
7419 {
7420 unsigned long nr_absent = range_end_pfn - range_start_pfn;
7421 unsigned long start_pfn, end_pfn;
7422 int i;
7423
7424 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7425 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7426 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7427 nr_absent -= end_pfn - start_pfn;
7428 }
7429 return nr_absent;
7430 }
7431
7432
7433
7434
7435
7436
7437
7438
7439 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7440 unsigned long end_pfn)
7441 {
7442 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7443 }
7444
7445
7446 static unsigned long __init zone_absent_pages_in_node(int nid,
7447 unsigned long zone_type,
7448 unsigned long node_start_pfn,
7449 unsigned long node_end_pfn)
7450 {
7451 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7452 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7453 unsigned long zone_start_pfn, zone_end_pfn;
7454 unsigned long nr_absent;
7455
7456
7457 if (!node_start_pfn && !node_end_pfn)
7458 return 0;
7459
7460 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7461 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7462
7463 adjust_zone_range_for_zone_movable(nid, zone_type,
7464 node_start_pfn, node_end_pfn,
7465 &zone_start_pfn, &zone_end_pfn);
7466 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7467
7468
7469
7470
7471
7472
7473 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7474 unsigned long start_pfn, end_pfn;
7475 struct memblock_region *r;
7476
7477 for_each_mem_region(r) {
7478 start_pfn = clamp(memblock_region_memory_base_pfn(r),
7479 zone_start_pfn, zone_end_pfn);
7480 end_pfn = clamp(memblock_region_memory_end_pfn(r),
7481 zone_start_pfn, zone_end_pfn);
7482
7483 if (zone_type == ZONE_MOVABLE &&
7484 memblock_is_mirror(r))
7485 nr_absent += end_pfn - start_pfn;
7486
7487 if (zone_type == ZONE_NORMAL &&
7488 !memblock_is_mirror(r))
7489 nr_absent += end_pfn - start_pfn;
7490 }
7491 }
7492
7493 return nr_absent;
7494 }
7495
7496 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7497 unsigned long node_start_pfn,
7498 unsigned long node_end_pfn)
7499 {
7500 unsigned long realtotalpages = 0, totalpages = 0;
7501 enum zone_type i;
7502
7503 for (i = 0; i < MAX_NR_ZONES; i++) {
7504 struct zone *zone = pgdat->node_zones + i;
7505 unsigned long zone_start_pfn, zone_end_pfn;
7506 unsigned long spanned, absent;
7507 unsigned long size, real_size;
7508
7509 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7510 node_start_pfn,
7511 node_end_pfn,
7512 &zone_start_pfn,
7513 &zone_end_pfn);
7514 absent = zone_absent_pages_in_node(pgdat->node_id, i,
7515 node_start_pfn,
7516 node_end_pfn);
7517
7518 size = spanned;
7519 real_size = size - absent;
7520
7521 if (size)
7522 zone->zone_start_pfn = zone_start_pfn;
7523 else
7524 zone->zone_start_pfn = 0;
7525 zone->spanned_pages = size;
7526 zone->present_pages = real_size;
7527 #if defined(CONFIG_MEMORY_HOTPLUG)
7528 zone->present_early_pages = real_size;
7529 #endif
7530
7531 totalpages += size;
7532 realtotalpages += real_size;
7533 }
7534
7535 pgdat->node_spanned_pages = totalpages;
7536 pgdat->node_present_pages = realtotalpages;
7537 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
7538 }
7539
7540 #ifndef CONFIG_SPARSEMEM
7541
7542
7543
7544
7545
7546
7547
7548 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7549 {
7550 unsigned long usemapsize;
7551
7552 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7553 usemapsize = roundup(zonesize, pageblock_nr_pages);
7554 usemapsize = usemapsize >> pageblock_order;
7555 usemapsize *= NR_PAGEBLOCK_BITS;
7556 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7557
7558 return usemapsize / 8;
7559 }
7560
7561 static void __ref setup_usemap(struct zone *zone)
7562 {
7563 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7564 zone->spanned_pages);
7565 zone->pageblock_flags = NULL;
7566 if (usemapsize) {
7567 zone->pageblock_flags =
7568 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7569 zone_to_nid(zone));
7570 if (!zone->pageblock_flags)
7571 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7572 usemapsize, zone->name, zone_to_nid(zone));
7573 }
7574 }
7575 #else
7576 static inline void setup_usemap(struct zone *zone) {}
7577 #endif
7578
7579 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7580
7581
7582 void __init set_pageblock_order(void)
7583 {
7584 unsigned int order = MAX_ORDER - 1;
7585
7586
7587 if (pageblock_order)
7588 return;
7589
7590
7591 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
7592 order = HUGETLB_PAGE_ORDER;
7593
7594
7595
7596
7597
7598
7599 pageblock_order = order;
7600 }
7601 #else
7602
7603
7604
7605
7606
7607
7608
7609 void __init set_pageblock_order(void)
7610 {
7611 }
7612
7613 #endif
7614
7615 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7616 unsigned long present_pages)
7617 {
7618 unsigned long pages = spanned_pages;
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628 if (spanned_pages > present_pages + (present_pages >> 4) &&
7629 IS_ENABLED(CONFIG_SPARSEMEM))
7630 pages = present_pages;
7631
7632 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7633 }
7634
7635 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7636 static void pgdat_init_split_queue(struct pglist_data *pgdat)
7637 {
7638 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7639
7640 spin_lock_init(&ds_queue->split_queue_lock);
7641 INIT_LIST_HEAD(&ds_queue->split_queue);
7642 ds_queue->split_queue_len = 0;
7643 }
7644 #else
7645 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7646 #endif
7647
7648 #ifdef CONFIG_COMPACTION
7649 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7650 {
7651 init_waitqueue_head(&pgdat->kcompactd_wait);
7652 }
7653 #else
7654 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7655 #endif
7656
7657 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7658 {
7659 int i;
7660
7661 pgdat_resize_init(pgdat);
7662
7663 pgdat_init_split_queue(pgdat);
7664 pgdat_init_kcompactd(pgdat);
7665
7666 init_waitqueue_head(&pgdat->kswapd_wait);
7667 init_waitqueue_head(&pgdat->pfmemalloc_wait);
7668
7669 for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
7670 init_waitqueue_head(&pgdat->reclaim_wait[i]);
7671
7672 pgdat_page_ext_init(pgdat);
7673 lruvec_init(&pgdat->__lruvec);
7674 }
7675
7676 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7677 unsigned long remaining_pages)
7678 {
7679 atomic_long_set(&zone->managed_pages, remaining_pages);
7680 zone_set_nid(zone, nid);
7681 zone->name = zone_names[idx];
7682 zone->zone_pgdat = NODE_DATA(nid);
7683 spin_lock_init(&zone->lock);
7684 zone_seqlock_init(zone);
7685 zone_pcp_init(zone);
7686 }
7687
7688
7689
7690
7691
7692
7693
7694
7695 #ifdef CONFIG_MEMORY_HOTPLUG
7696 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
7697 {
7698 int nid = pgdat->node_id;
7699 enum zone_type z;
7700 int cpu;
7701
7702 pgdat_init_internals(pgdat);
7703
7704 if (pgdat->per_cpu_nodestats == &boot_nodestats)
7705 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
7706
7707
7708
7709
7710
7711
7712 pgdat->nr_zones = 0;
7713 pgdat->kswapd_order = 0;
7714 pgdat->kswapd_highest_zoneidx = 0;
7715 pgdat->node_start_pfn = 0;
7716 for_each_online_cpu(cpu) {
7717 struct per_cpu_nodestat *p;
7718
7719 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
7720 memset(p, 0, sizeof(*p));
7721 }
7722
7723 for (z = 0; z < MAX_NR_ZONES; z++)
7724 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7725 }
7726 #endif
7727
7728
7729
7730
7731
7732
7733
7734
7735
7736
7737 static void __init free_area_init_core(struct pglist_data *pgdat)
7738 {
7739 enum zone_type j;
7740 int nid = pgdat->node_id;
7741
7742 pgdat_init_internals(pgdat);
7743 pgdat->per_cpu_nodestats = &boot_nodestats;
7744
7745 for (j = 0; j < MAX_NR_ZONES; j++) {
7746 struct zone *zone = pgdat->node_zones + j;
7747 unsigned long size, freesize, memmap_pages;
7748
7749 size = zone->spanned_pages;
7750 freesize = zone->present_pages;
7751
7752
7753
7754
7755
7756
7757 memmap_pages = calc_memmap_size(size, freesize);
7758 if (!is_highmem_idx(j)) {
7759 if (freesize >= memmap_pages) {
7760 freesize -= memmap_pages;
7761 if (memmap_pages)
7762 pr_debug(" %s zone: %lu pages used for memmap\n",
7763 zone_names[j], memmap_pages);
7764 } else
7765 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
7766 zone_names[j], memmap_pages, freesize);
7767 }
7768
7769
7770 if (j == 0 && freesize > dma_reserve) {
7771 freesize -= dma_reserve;
7772 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
7773 }
7774
7775 if (!is_highmem_idx(j))
7776 nr_kernel_pages += freesize;
7777
7778 else if (nr_kernel_pages > memmap_pages * 2)
7779 nr_kernel_pages -= memmap_pages;
7780 nr_all_pages += freesize;
7781
7782
7783
7784
7785
7786
7787 zone_init_internals(zone, j, nid, freesize);
7788
7789 if (!size)
7790 continue;
7791
7792 set_pageblock_order();
7793 setup_usemap(zone);
7794 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7795 }
7796 }
7797
7798 #ifdef CONFIG_FLATMEM
7799 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
7800 {
7801 unsigned long __maybe_unused start = 0;
7802 unsigned long __maybe_unused offset = 0;
7803
7804
7805 if (!pgdat->node_spanned_pages)
7806 return;
7807
7808 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7809 offset = pgdat->node_start_pfn - start;
7810
7811 if (!pgdat->node_mem_map) {
7812 unsigned long size, end;
7813 struct page *map;
7814
7815
7816
7817
7818
7819
7820 end = pgdat_end_pfn(pgdat);
7821 end = ALIGN(end, MAX_ORDER_NR_PAGES);
7822 size = (end - start) * sizeof(struct page);
7823 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
7824 pgdat->node_id, false);
7825 if (!map)
7826 panic("Failed to allocate %ld bytes for node %d memory map\n",
7827 size, pgdat->node_id);
7828 pgdat->node_mem_map = map + offset;
7829 }
7830 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7831 __func__, pgdat->node_id, (unsigned long)pgdat,
7832 (unsigned long)pgdat->node_mem_map);
7833 #ifndef CONFIG_NUMA
7834
7835
7836
7837 if (pgdat == NODE_DATA(0)) {
7838 mem_map = NODE_DATA(0)->node_mem_map;
7839 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7840 mem_map -= offset;
7841 }
7842 #endif
7843 }
7844 #else
7845 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
7846 #endif
7847
7848 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7849 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7850 {
7851 pgdat->first_deferred_pfn = ULONG_MAX;
7852 }
7853 #else
7854 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7855 #endif
7856
7857 static void __init free_area_init_node(int nid)
7858 {
7859 pg_data_t *pgdat = NODE_DATA(nid);
7860 unsigned long start_pfn = 0;
7861 unsigned long end_pfn = 0;
7862
7863
7864 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7865
7866 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7867
7868 pgdat->node_id = nid;
7869 pgdat->node_start_pfn = start_pfn;
7870 pgdat->per_cpu_nodestats = NULL;
7871
7872 if (start_pfn != end_pfn) {
7873 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7874 (u64)start_pfn << PAGE_SHIFT,
7875 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7876 } else {
7877 pr_info("Initmem setup node %d as memoryless\n", nid);
7878 }
7879
7880 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7881
7882 alloc_node_mem_map(pgdat);
7883 pgdat_set_deferred_range(pgdat);
7884
7885 free_area_init_core(pgdat);
7886 }
7887
7888 static void __init free_area_init_memoryless_node(int nid)
7889 {
7890 free_area_init_node(nid);
7891 }
7892
7893 #if MAX_NUMNODES > 1
7894
7895
7896
7897 void __init setup_nr_node_ids(void)
7898 {
7899 unsigned int highest;
7900
7901 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7902 nr_node_ids = highest + 1;
7903 }
7904 #endif
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917
7918
7919
7920
7921
7922
7923
7924
7925 unsigned long __init node_map_pfn_alignment(void)
7926 {
7927 unsigned long accl_mask = 0, last_end = 0;
7928 unsigned long start, end, mask;
7929 int last_nid = NUMA_NO_NODE;
7930 int i, nid;
7931
7932 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7933 if (!start || last_nid < 0 || last_nid == nid) {
7934 last_nid = nid;
7935 last_end = end;
7936 continue;
7937 }
7938
7939
7940
7941
7942
7943
7944 mask = ~((1 << __ffs(start)) - 1);
7945 while (mask && last_end <= (start & (mask << 1)))
7946 mask <<= 1;
7947
7948
7949 accl_mask |= mask;
7950 }
7951
7952
7953 return ~accl_mask + 1;
7954 }
7955
7956
7957
7958
7959
7960
7961
7962 unsigned long __init find_min_pfn_with_active_regions(void)
7963 {
7964 return PHYS_PFN(memblock_start_of_DRAM());
7965 }
7966
7967
7968
7969
7970
7971
7972 static unsigned long __init early_calculate_totalpages(void)
7973 {
7974 unsigned long totalpages = 0;
7975 unsigned long start_pfn, end_pfn;
7976 int i, nid;
7977
7978 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7979 unsigned long pages = end_pfn - start_pfn;
7980
7981 totalpages += pages;
7982 if (pages)
7983 node_set_state(nid, N_MEMORY);
7984 }
7985 return totalpages;
7986 }
7987
7988
7989
7990
7991
7992
7993
7994 static void __init find_zone_movable_pfns_for_nodes(void)
7995 {
7996 int i, nid;
7997 unsigned long usable_startpfn;
7998 unsigned long kernelcore_node, kernelcore_remaining;
7999
8000 nodemask_t saved_node_state = node_states[N_MEMORY];
8001 unsigned long totalpages = early_calculate_totalpages();
8002 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
8003 struct memblock_region *r;
8004
8005
8006 find_usable_zone_for_movable();
8007
8008
8009
8010
8011
8012 if (movable_node_is_enabled()) {
8013 for_each_mem_region(r) {
8014 if (!memblock_is_hotpluggable(r))
8015 continue;
8016
8017 nid = memblock_get_region_node(r);
8018
8019 usable_startpfn = PFN_DOWN(r->base);
8020 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8021 min(usable_startpfn, zone_movable_pfn[nid]) :
8022 usable_startpfn;
8023 }
8024
8025 goto out2;
8026 }
8027
8028
8029
8030
8031 if (mirrored_kernelcore) {
8032 bool mem_below_4gb_not_mirrored = false;
8033
8034 for_each_mem_region(r) {
8035 if (memblock_is_mirror(r))
8036 continue;
8037
8038 nid = memblock_get_region_node(r);
8039
8040 usable_startpfn = memblock_region_memory_base_pfn(r);
8041
8042 if (usable_startpfn < PHYS_PFN(SZ_4G)) {
8043 mem_below_4gb_not_mirrored = true;
8044 continue;
8045 }
8046
8047 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8048 min(usable_startpfn, zone_movable_pfn[nid]) :
8049 usable_startpfn;
8050 }
8051
8052 if (mem_below_4gb_not_mirrored)
8053 pr_warn("This configuration results in unmirrored kernel memory.\n");
8054
8055 goto out2;
8056 }
8057
8058
8059
8060
8061
8062 if (required_kernelcore_percent)
8063 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
8064 10000UL;
8065 if (required_movablecore_percent)
8066 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
8067 10000UL;
8068
8069
8070
8071
8072
8073
8074
8075
8076
8077 if (required_movablecore) {
8078 unsigned long corepages;
8079
8080
8081
8082
8083
8084 required_movablecore =
8085 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
8086 required_movablecore = min(totalpages, required_movablecore);
8087 corepages = totalpages - required_movablecore;
8088
8089 required_kernelcore = max(required_kernelcore, corepages);
8090 }
8091
8092
8093
8094
8095
8096 if (!required_kernelcore || required_kernelcore >= totalpages)
8097 goto out;
8098
8099
8100 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
8101
8102 restart:
8103
8104 kernelcore_node = required_kernelcore / usable_nodes;
8105 for_each_node_state(nid, N_MEMORY) {
8106 unsigned long start_pfn, end_pfn;
8107
8108
8109
8110
8111
8112
8113 if (required_kernelcore < kernelcore_node)
8114 kernelcore_node = required_kernelcore / usable_nodes;
8115
8116
8117
8118
8119
8120
8121 kernelcore_remaining = kernelcore_node;
8122
8123
8124 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
8125 unsigned long size_pages;
8126
8127 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
8128 if (start_pfn >= end_pfn)
8129 continue;
8130
8131
8132 if (start_pfn < usable_startpfn) {
8133 unsigned long kernel_pages;
8134 kernel_pages = min(end_pfn, usable_startpfn)
8135 - start_pfn;
8136
8137 kernelcore_remaining -= min(kernel_pages,
8138 kernelcore_remaining);
8139 required_kernelcore -= min(kernel_pages,
8140 required_kernelcore);
8141
8142
8143 if (end_pfn <= usable_startpfn) {
8144
8145
8146
8147
8148
8149
8150
8151 zone_movable_pfn[nid] = end_pfn;
8152 continue;
8153 }
8154 start_pfn = usable_startpfn;
8155 }
8156
8157
8158
8159
8160
8161
8162 size_pages = end_pfn - start_pfn;
8163 if (size_pages > kernelcore_remaining)
8164 size_pages = kernelcore_remaining;
8165 zone_movable_pfn[nid] = start_pfn + size_pages;
8166
8167
8168
8169
8170
8171
8172 required_kernelcore -= min(required_kernelcore,
8173 size_pages);
8174 kernelcore_remaining -= size_pages;
8175 if (!kernelcore_remaining)
8176 break;
8177 }
8178 }
8179
8180
8181
8182
8183
8184
8185
8186 usable_nodes--;
8187 if (usable_nodes && required_kernelcore > usable_nodes)
8188 goto restart;
8189
8190 out2:
8191
8192 for (nid = 0; nid < MAX_NUMNODES; nid++) {
8193 unsigned long start_pfn, end_pfn;
8194
8195 zone_movable_pfn[nid] =
8196 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
8197
8198 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8199 if (zone_movable_pfn[nid] >= end_pfn)
8200 zone_movable_pfn[nid] = 0;
8201 }
8202
8203 out:
8204
8205 node_states[N_MEMORY] = saved_node_state;
8206 }
8207
8208
8209 static void check_for_memory(pg_data_t *pgdat, int nid)
8210 {
8211 enum zone_type zone_type;
8212
8213 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
8214 struct zone *zone = &pgdat->node_zones[zone_type];
8215 if (populated_zone(zone)) {
8216 if (IS_ENABLED(CONFIG_HIGHMEM))
8217 node_set_state(nid, N_HIGH_MEMORY);
8218 if (zone_type <= ZONE_NORMAL)
8219 node_set_state(nid, N_NORMAL_MEMORY);
8220 break;
8221 }
8222 }
8223 }
8224
8225
8226
8227
8228
8229 bool __weak arch_has_descending_max_zone_pfns(void)
8230 {
8231 return false;
8232 }
8233
8234
8235
8236
8237
8238
8239
8240
8241
8242
8243
8244
8245
8246
8247 void __init free_area_init(unsigned long *max_zone_pfn)
8248 {
8249 unsigned long start_pfn, end_pfn;
8250 int i, nid, zone;
8251 bool descending;
8252
8253
8254 memset(arch_zone_lowest_possible_pfn, 0,
8255 sizeof(arch_zone_lowest_possible_pfn));
8256 memset(arch_zone_highest_possible_pfn, 0,
8257 sizeof(arch_zone_highest_possible_pfn));
8258
8259 start_pfn = find_min_pfn_with_active_regions();
8260 descending = arch_has_descending_max_zone_pfns();
8261
8262 for (i = 0; i < MAX_NR_ZONES; i++) {
8263 if (descending)
8264 zone = MAX_NR_ZONES - i - 1;
8265 else
8266 zone = i;
8267
8268 if (zone == ZONE_MOVABLE)
8269 continue;
8270
8271 end_pfn = max(max_zone_pfn[zone], start_pfn);
8272 arch_zone_lowest_possible_pfn[zone] = start_pfn;
8273 arch_zone_highest_possible_pfn[zone] = end_pfn;
8274
8275 start_pfn = end_pfn;
8276 }
8277
8278
8279 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
8280 find_zone_movable_pfns_for_nodes();
8281
8282
8283 pr_info("Zone ranges:\n");
8284 for (i = 0; i < MAX_NR_ZONES; i++) {
8285 if (i == ZONE_MOVABLE)
8286 continue;
8287 pr_info(" %-8s ", zone_names[i]);
8288 if (arch_zone_lowest_possible_pfn[i] ==
8289 arch_zone_highest_possible_pfn[i])
8290 pr_cont("empty\n");
8291 else
8292 pr_cont("[mem %#018Lx-%#018Lx]\n",
8293 (u64)arch_zone_lowest_possible_pfn[i]
8294 << PAGE_SHIFT,
8295 ((u64)arch_zone_highest_possible_pfn[i]
8296 << PAGE_SHIFT) - 1);
8297 }
8298
8299
8300 pr_info("Movable zone start for each node\n");
8301 for (i = 0; i < MAX_NUMNODES; i++) {
8302 if (zone_movable_pfn[i])
8303 pr_info(" Node %d: %#018Lx\n", i,
8304 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
8305 }
8306
8307
8308
8309
8310
8311
8312 pr_info("Early memory node ranges\n");
8313 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8314 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
8315 (u64)start_pfn << PAGE_SHIFT,
8316 ((u64)end_pfn << PAGE_SHIFT) - 1);
8317 subsection_map_init(start_pfn, end_pfn - start_pfn);
8318 }
8319
8320
8321 mminit_verify_pageflags_layout();
8322 setup_nr_node_ids();
8323 for_each_node(nid) {
8324 pg_data_t *pgdat;
8325
8326 if (!node_online(nid)) {
8327 pr_info("Initializing node %d as memoryless\n", nid);
8328
8329
8330 pgdat = arch_alloc_nodedata(nid);
8331 if (!pgdat) {
8332 pr_err("Cannot allocate %zuB for node %d.\n",
8333 sizeof(*pgdat), nid);
8334 continue;
8335 }
8336 arch_refresh_nodedata(nid, pgdat);
8337 free_area_init_memoryless_node(nid);
8338
8339
8340
8341
8342
8343
8344
8345
8346
8347
8348
8349 continue;
8350 }
8351
8352 pgdat = NODE_DATA(nid);
8353 free_area_init_node(nid);
8354
8355
8356 if (pgdat->node_present_pages)
8357 node_set_state(nid, N_MEMORY);
8358 check_for_memory(pgdat, nid);
8359 }
8360
8361 memmap_init();
8362 }
8363
8364 static int __init cmdline_parse_core(char *p, unsigned long *core,
8365 unsigned long *percent)
8366 {
8367 unsigned long long coremem;
8368 char *endptr;
8369
8370 if (!p)
8371 return -EINVAL;
8372
8373
8374 coremem = simple_strtoull(p, &endptr, 0);
8375 if (*endptr == '%') {
8376
8377 WARN_ON(coremem > 100);
8378
8379 *percent = coremem;
8380 } else {
8381 coremem = memparse(p, &p);
8382
8383 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
8384
8385 *core = coremem >> PAGE_SHIFT;
8386 *percent = 0UL;
8387 }
8388 return 0;
8389 }
8390
8391
8392
8393
8394
8395 static int __init cmdline_parse_kernelcore(char *p)
8396 {
8397
8398 if (parse_option_str(p, "mirror")) {
8399 mirrored_kernelcore = true;
8400 return 0;
8401 }
8402
8403 return cmdline_parse_core(p, &required_kernelcore,
8404 &required_kernelcore_percent);
8405 }
8406
8407
8408
8409
8410
8411 static int __init cmdline_parse_movablecore(char *p)
8412 {
8413 return cmdline_parse_core(p, &required_movablecore,
8414 &required_movablecore_percent);
8415 }
8416
8417 early_param("kernelcore", cmdline_parse_kernelcore);
8418 early_param("movablecore", cmdline_parse_movablecore);
8419
8420 void adjust_managed_page_count(struct page *page, long count)
8421 {
8422 atomic_long_add(count, &page_zone(page)->managed_pages);
8423 totalram_pages_add(count);
8424 #ifdef CONFIG_HIGHMEM
8425 if (PageHighMem(page))
8426 totalhigh_pages_add(count);
8427 #endif
8428 }
8429 EXPORT_SYMBOL(adjust_managed_page_count);
8430
8431 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
8432 {
8433 void *pos;
8434 unsigned long pages = 0;
8435
8436 start = (void *)PAGE_ALIGN((unsigned long)start);
8437 end = (void *)((unsigned long)end & PAGE_MASK);
8438 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
8439 struct page *page = virt_to_page(pos);
8440 void *direct_map_addr;
8441
8442
8443
8444
8445
8446
8447
8448
8449 direct_map_addr = page_address(page);
8450
8451
8452
8453
8454 direct_map_addr = kasan_reset_tag(direct_map_addr);
8455 if ((unsigned int)poison <= 0xFF)
8456 memset(direct_map_addr, poison, PAGE_SIZE);
8457
8458 free_reserved_page(page);
8459 }
8460
8461 if (pages && s)
8462 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
8463
8464 return pages;
8465 }
8466
8467 void __init mem_init_print_info(void)
8468 {
8469 unsigned long physpages, codesize, datasize, rosize, bss_size;
8470 unsigned long init_code_size, init_data_size;
8471
8472 physpages = get_num_physpages();
8473 codesize = _etext - _stext;
8474 datasize = _edata - _sdata;
8475 rosize = __end_rodata - __start_rodata;
8476 bss_size = __bss_stop - __bss_start;
8477 init_data_size = __init_end - __init_begin;
8478 init_code_size = _einittext - _sinittext;
8479
8480
8481
8482
8483
8484
8485
8486
8487 #define adj_init_size(start, end, size, pos, adj) \
8488 do { \
8489 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
8490 size -= adj; \
8491 } while (0)
8492
8493 adj_init_size(__init_begin, __init_end, init_data_size,
8494 _sinittext, init_code_size);
8495 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8496 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8497 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8498 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8499
8500 #undef adj_init_size
8501
8502 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8503 #ifdef CONFIG_HIGHMEM
8504 ", %luK highmem"
8505 #endif
8506 ")\n",
8507 K(nr_free_pages()), K(physpages),
8508 codesize >> 10, datasize >> 10, rosize >> 10,
8509 (init_data_size + init_code_size) >> 10, bss_size >> 10,
8510 K(physpages - totalram_pages() - totalcma_pages),
8511 K(totalcma_pages)
8512 #ifdef CONFIG_HIGHMEM
8513 , K(totalhigh_pages())
8514 #endif
8515 );
8516 }
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528
8529 void __init set_dma_reserve(unsigned long new_dma_reserve)
8530 {
8531 dma_reserve = new_dma_reserve;
8532 }
8533
8534 static int page_alloc_cpu_dead(unsigned int cpu)
8535 {
8536 struct zone *zone;
8537
8538 lru_add_drain_cpu(cpu);
8539 mlock_page_drain_remote(cpu);
8540 drain_pages(cpu);
8541
8542
8543
8544
8545
8546
8547
8548 vm_events_fold_cpu(cpu);
8549
8550
8551
8552
8553
8554
8555
8556
8557 cpu_vm_stats_fold(cpu);
8558
8559 for_each_populated_zone(zone)
8560 zone_pcp_update(zone, 0);
8561
8562 return 0;
8563 }
8564
8565 static int page_alloc_cpu_online(unsigned int cpu)
8566 {
8567 struct zone *zone;
8568
8569 for_each_populated_zone(zone)
8570 zone_pcp_update(zone, 1);
8571 return 0;
8572 }
8573
8574 #ifdef CONFIG_NUMA
8575 int hashdist = HASHDIST_DEFAULT;
8576
8577 static int __init set_hashdist(char *str)
8578 {
8579 if (!str)
8580 return 0;
8581 hashdist = simple_strtoul(str, &str, 0);
8582 return 1;
8583 }
8584 __setup("hashdist=", set_hashdist);
8585 #endif
8586
8587 void __init page_alloc_init(void)
8588 {
8589 int ret;
8590
8591 #ifdef CONFIG_NUMA
8592 if (num_node_state(N_MEMORY) == 1)
8593 hashdist = 0;
8594 #endif
8595
8596 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8597 "mm/page_alloc:pcp",
8598 page_alloc_cpu_online,
8599 page_alloc_cpu_dead);
8600 WARN_ON(ret < 0);
8601 }
8602
8603
8604
8605
8606
8607 static void calculate_totalreserve_pages(void)
8608 {
8609 struct pglist_data *pgdat;
8610 unsigned long reserve_pages = 0;
8611 enum zone_type i, j;
8612
8613 for_each_online_pgdat(pgdat) {
8614
8615 pgdat->totalreserve_pages = 0;
8616
8617 for (i = 0; i < MAX_NR_ZONES; i++) {
8618 struct zone *zone = pgdat->node_zones + i;
8619 long max = 0;
8620 unsigned long managed_pages = zone_managed_pages(zone);
8621
8622
8623 for (j = i; j < MAX_NR_ZONES; j++) {
8624 if (zone->lowmem_reserve[j] > max)
8625 max = zone->lowmem_reserve[j];
8626 }
8627
8628
8629 max += high_wmark_pages(zone);
8630
8631 if (max > managed_pages)
8632 max = managed_pages;
8633
8634 pgdat->totalreserve_pages += max;
8635
8636 reserve_pages += max;
8637 }
8638 }
8639 totalreserve_pages = reserve_pages;
8640 }
8641
8642
8643
8644
8645
8646
8647
8648 static void setup_per_zone_lowmem_reserve(void)
8649 {
8650 struct pglist_data *pgdat;
8651 enum zone_type i, j;
8652
8653 for_each_online_pgdat(pgdat) {
8654 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8655 struct zone *zone = &pgdat->node_zones[i];
8656 int ratio = sysctl_lowmem_reserve_ratio[i];
8657 bool clear = !ratio || !zone_managed_pages(zone);
8658 unsigned long managed_pages = 0;
8659
8660 for (j = i + 1; j < MAX_NR_ZONES; j++) {
8661 struct zone *upper_zone = &pgdat->node_zones[j];
8662
8663 managed_pages += zone_managed_pages(upper_zone);
8664
8665 if (clear)
8666 zone->lowmem_reserve[j] = 0;
8667 else
8668 zone->lowmem_reserve[j] = managed_pages / ratio;
8669 }
8670 }
8671 }
8672
8673
8674 calculate_totalreserve_pages();
8675 }
8676
8677 static void __setup_per_zone_wmarks(void)
8678 {
8679 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8680 unsigned long lowmem_pages = 0;
8681 struct zone *zone;
8682 unsigned long flags;
8683
8684
8685 for_each_zone(zone) {
8686 if (!is_highmem(zone))
8687 lowmem_pages += zone_managed_pages(zone);
8688 }
8689
8690 for_each_zone(zone) {
8691 u64 tmp;
8692
8693 spin_lock_irqsave(&zone->lock, flags);
8694 tmp = (u64)pages_min * zone_managed_pages(zone);
8695 do_div(tmp, lowmem_pages);
8696 if (is_highmem(zone)) {
8697
8698
8699
8700
8701
8702
8703
8704
8705
8706 unsigned long min_pages;
8707
8708 min_pages = zone_managed_pages(zone) / 1024;
8709 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8710 zone->_watermark[WMARK_MIN] = min_pages;
8711 } else {
8712
8713
8714
8715
8716 zone->_watermark[WMARK_MIN] = tmp;
8717 }
8718
8719
8720
8721
8722
8723
8724 tmp = max_t(u64, tmp >> 2,
8725 mult_frac(zone_managed_pages(zone),
8726 watermark_scale_factor, 10000));
8727
8728 zone->watermark_boost = 0;
8729 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
8730 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
8731 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
8732
8733 spin_unlock_irqrestore(&zone->lock, flags);
8734 }
8735
8736
8737 calculate_totalreserve_pages();
8738 }
8739
8740
8741
8742
8743
8744
8745
8746
8747 void setup_per_zone_wmarks(void)
8748 {
8749 struct zone *zone;
8750 static DEFINE_SPINLOCK(lock);
8751
8752 spin_lock(&lock);
8753 __setup_per_zone_wmarks();
8754 spin_unlock(&lock);
8755
8756
8757
8758
8759
8760 for_each_zone(zone)
8761 zone_pcp_update(zone, 0);
8762 }
8763
8764
8765
8766
8767
8768
8769
8770
8771
8772
8773
8774
8775
8776
8777
8778
8779
8780
8781
8782
8783
8784
8785
8786
8787
8788 void calculate_min_free_kbytes(void)
8789 {
8790 unsigned long lowmem_kbytes;
8791 int new_min_free_kbytes;
8792
8793 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8794 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8795
8796 if (new_min_free_kbytes > user_min_free_kbytes)
8797 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
8798 else
8799 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8800 new_min_free_kbytes, user_min_free_kbytes);
8801
8802 }
8803
8804 int __meminit init_per_zone_wmark_min(void)
8805 {
8806 calculate_min_free_kbytes();
8807 setup_per_zone_wmarks();
8808 refresh_zone_stat_thresholds();
8809 setup_per_zone_lowmem_reserve();
8810
8811 #ifdef CONFIG_NUMA
8812 setup_min_unmapped_ratio();
8813 setup_min_slab_ratio();
8814 #endif
8815
8816 khugepaged_min_free_kbytes_update();
8817
8818 return 0;
8819 }
8820 postcore_initcall(init_per_zone_wmark_min)
8821
8822
8823
8824
8825
8826
8827 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8828 void *buffer, size_t *length, loff_t *ppos)
8829 {
8830 int rc;
8831
8832 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8833 if (rc)
8834 return rc;
8835
8836 if (write) {
8837 user_min_free_kbytes = min_free_kbytes;
8838 setup_per_zone_wmarks();
8839 }
8840 return 0;
8841 }
8842
8843 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8844 void *buffer, size_t *length, loff_t *ppos)
8845 {
8846 int rc;
8847
8848 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8849 if (rc)
8850 return rc;
8851
8852 if (write)
8853 setup_per_zone_wmarks();
8854
8855 return 0;
8856 }
8857
8858 #ifdef CONFIG_NUMA
8859 static void setup_min_unmapped_ratio(void)
8860 {
8861 pg_data_t *pgdat;
8862 struct zone *zone;
8863
8864 for_each_online_pgdat(pgdat)
8865 pgdat->min_unmapped_pages = 0;
8866
8867 for_each_zone(zone)
8868 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8869 sysctl_min_unmapped_ratio) / 100;
8870 }
8871
8872
8873 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8874 void *buffer, size_t *length, loff_t *ppos)
8875 {
8876 int rc;
8877
8878 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8879 if (rc)
8880 return rc;
8881
8882 setup_min_unmapped_ratio();
8883
8884 return 0;
8885 }
8886
8887 static void setup_min_slab_ratio(void)
8888 {
8889 pg_data_t *pgdat;
8890 struct zone *zone;
8891
8892 for_each_online_pgdat(pgdat)
8893 pgdat->min_slab_pages = 0;
8894
8895 for_each_zone(zone)
8896 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8897 sysctl_min_slab_ratio) / 100;
8898 }
8899
8900 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8901 void *buffer, size_t *length, loff_t *ppos)
8902 {
8903 int rc;
8904
8905 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8906 if (rc)
8907 return rc;
8908
8909 setup_min_slab_ratio();
8910
8911 return 0;
8912 }
8913 #endif
8914
8915
8916
8917
8918
8919
8920
8921
8922
8923
8924 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8925 void *buffer, size_t *length, loff_t *ppos)
8926 {
8927 int i;
8928
8929 proc_dointvec_minmax(table, write, buffer, length, ppos);
8930
8931 for (i = 0; i < MAX_NR_ZONES; i++) {
8932 if (sysctl_lowmem_reserve_ratio[i] < 1)
8933 sysctl_lowmem_reserve_ratio[i] = 0;
8934 }
8935
8936 setup_per_zone_lowmem_reserve();
8937 return 0;
8938 }
8939
8940
8941
8942
8943
8944
8945 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
8946 int write, void *buffer, size_t *length, loff_t *ppos)
8947 {
8948 struct zone *zone;
8949 int old_percpu_pagelist_high_fraction;
8950 int ret;
8951
8952 mutex_lock(&pcp_batch_high_lock);
8953 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
8954
8955 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8956 if (!write || ret < 0)
8957 goto out;
8958
8959
8960 if (percpu_pagelist_high_fraction &&
8961 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
8962 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
8963 ret = -EINVAL;
8964 goto out;
8965 }
8966
8967
8968 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
8969 goto out;
8970
8971 for_each_populated_zone(zone)
8972 zone_set_pageset_high_and_batch(zone, 0);
8973 out:
8974 mutex_unlock(&pcp_batch_high_lock);
8975 return ret;
8976 }
8977
8978 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8979
8980
8981
8982
8983 static unsigned long __init arch_reserved_kernel_pages(void)
8984 {
8985 return 0;
8986 }
8987 #endif
8988
8989
8990
8991
8992
8993
8994
8995
8996
8997
8998 #if __BITS_PER_LONG > 32
8999 #define ADAPT_SCALE_BASE (64ul << 30)
9000 #define ADAPT_SCALE_SHIFT 2
9001 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
9002 #endif
9003
9004
9005
9006
9007
9008
9009
9010 void *__init alloc_large_system_hash(const char *tablename,
9011 unsigned long bucketsize,
9012 unsigned long numentries,
9013 int scale,
9014 int flags,
9015 unsigned int *_hash_shift,
9016 unsigned int *_hash_mask,
9017 unsigned long low_limit,
9018 unsigned long high_limit)
9019 {
9020 unsigned long long max = high_limit;
9021 unsigned long log2qty, size;
9022 void *table = NULL;
9023 gfp_t gfp_flags;
9024 bool virt;
9025 bool huge;
9026
9027
9028 if (!numentries) {
9029
9030 numentries = nr_kernel_pages;
9031 numentries -= arch_reserved_kernel_pages();
9032
9033
9034 if (PAGE_SHIFT < 20)
9035 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
9036
9037 #if __BITS_PER_LONG > 32
9038 if (!high_limit) {
9039 unsigned long adapt;
9040
9041 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
9042 adapt <<= ADAPT_SCALE_SHIFT)
9043 scale++;
9044 }
9045 #endif
9046
9047
9048 if (scale > PAGE_SHIFT)
9049 numentries >>= (scale - PAGE_SHIFT);
9050 else
9051 numentries <<= (PAGE_SHIFT - scale);
9052
9053
9054 if (unlikely(flags & HASH_SMALL)) {
9055
9056 WARN_ON(!(flags & HASH_EARLY));
9057 if (!(numentries >> *_hash_shift)) {
9058 numentries = 1UL << *_hash_shift;
9059 BUG_ON(!numentries);
9060 }
9061 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9062 numentries = PAGE_SIZE / bucketsize;
9063 }
9064 numentries = roundup_pow_of_two(numentries);
9065
9066
9067 if (max == 0) {
9068 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
9069 do_div(max, bucketsize);
9070 }
9071 max = min(max, 0x80000000ULL);
9072
9073 if (numentries < low_limit)
9074 numentries = low_limit;
9075 if (numentries > max)
9076 numentries = max;
9077
9078 log2qty = ilog2(numentries);
9079
9080 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
9081 do {
9082 virt = false;
9083 size = bucketsize << log2qty;
9084 if (flags & HASH_EARLY) {
9085 if (flags & HASH_ZERO)
9086 table = memblock_alloc(size, SMP_CACHE_BYTES);
9087 else
9088 table = memblock_alloc_raw(size,
9089 SMP_CACHE_BYTES);
9090 } else if (get_order(size) >= MAX_ORDER || hashdist) {
9091 table = vmalloc_huge(size, gfp_flags);
9092 virt = true;
9093 if (table)
9094 huge = is_vm_area_hugepages(table);
9095 } else {
9096
9097
9098
9099
9100
9101 table = alloc_pages_exact(size, gfp_flags);
9102 kmemleak_alloc(table, size, 1, gfp_flags);
9103 }
9104 } while (!table && size > PAGE_SIZE && --log2qty);
9105
9106 if (!table)
9107 panic("Failed to allocate %s hash table\n", tablename);
9108
9109 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
9110 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
9111 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
9112
9113 if (_hash_shift)
9114 *_hash_shift = log2qty;
9115 if (_hash_mask)
9116 *_hash_mask = (1 << log2qty) - 1;
9117
9118 return table;
9119 }
9120
9121 #ifdef CONFIG_CONTIG_ALLOC
9122 #if defined(CONFIG_DYNAMIC_DEBUG) || \
9123 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
9124
9125 static void alloc_contig_dump_pages(struct list_head *page_list)
9126 {
9127 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
9128
9129 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
9130 struct page *page;
9131
9132 dump_stack();
9133 list_for_each_entry(page, page_list, lru)
9134 dump_page(page, "migration failure");
9135 }
9136 }
9137 #else
9138 static inline void alloc_contig_dump_pages(struct list_head *page_list)
9139 {
9140 }
9141 #endif
9142
9143
9144 int __alloc_contig_migrate_range(struct compact_control *cc,
9145 unsigned long start, unsigned long end)
9146 {
9147
9148 unsigned int nr_reclaimed;
9149 unsigned long pfn = start;
9150 unsigned int tries = 0;
9151 int ret = 0;
9152 struct migration_target_control mtc = {
9153 .nid = zone_to_nid(cc->zone),
9154 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
9155 };
9156
9157 lru_cache_disable();
9158
9159 while (pfn < end || !list_empty(&cc->migratepages)) {
9160 if (fatal_signal_pending(current)) {
9161 ret = -EINTR;
9162 break;
9163 }
9164
9165 if (list_empty(&cc->migratepages)) {
9166 cc->nr_migratepages = 0;
9167 ret = isolate_migratepages_range(cc, pfn, end);
9168 if (ret && ret != -EAGAIN)
9169 break;
9170 pfn = cc->migrate_pfn;
9171 tries = 0;
9172 } else if (++tries == 5) {
9173 ret = -EBUSY;
9174 break;
9175 }
9176
9177 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
9178 &cc->migratepages);
9179 cc->nr_migratepages -= nr_reclaimed;
9180
9181 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
9182 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
9183
9184
9185
9186
9187
9188 if (ret == -ENOMEM)
9189 break;
9190 }
9191
9192 lru_cache_enable();
9193 if (ret < 0) {
9194 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
9195 alloc_contig_dump_pages(&cc->migratepages);
9196 putback_movable_pages(&cc->migratepages);
9197 return ret;
9198 }
9199 return 0;
9200 }
9201
9202
9203
9204
9205
9206
9207
9208
9209
9210
9211
9212
9213
9214
9215
9216
9217
9218
9219
9220
9221
9222
9223 int alloc_contig_range(unsigned long start, unsigned long end,
9224 unsigned migratetype, gfp_t gfp_mask)
9225 {
9226 unsigned long outer_start, outer_end;
9227 int order;
9228 int ret = 0;
9229
9230 struct compact_control cc = {
9231 .nr_migratepages = 0,
9232 .order = -1,
9233 .zone = page_zone(pfn_to_page(start)),
9234 .mode = MIGRATE_SYNC,
9235 .ignore_skip_hint = true,
9236 .no_set_skip_hint = true,
9237 .gfp_mask = current_gfp_context(gfp_mask),
9238 .alloc_contig = true,
9239 };
9240 INIT_LIST_HEAD(&cc.migratepages);
9241
9242
9243
9244
9245
9246
9247
9248
9249
9250
9251
9252
9253
9254
9255
9256
9257
9258
9259
9260
9261
9262
9263 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
9264 if (ret)
9265 goto done;
9266
9267 drain_all_pages(cc.zone);
9268
9269
9270
9271
9272
9273
9274
9275
9276
9277
9278
9279 ret = __alloc_contig_migrate_range(&cc, start, end);
9280 if (ret && ret != -EBUSY)
9281 goto done;
9282 ret = 0;
9283
9284
9285
9286
9287
9288
9289
9290
9291
9292
9293
9294
9295
9296
9297
9298
9299
9300
9301 order = 0;
9302 outer_start = start;
9303 while (!PageBuddy(pfn_to_page(outer_start))) {
9304 if (++order >= MAX_ORDER) {
9305 outer_start = start;
9306 break;
9307 }
9308 outer_start &= ~0UL << order;
9309 }
9310
9311 if (outer_start != start) {
9312 order = buddy_order(pfn_to_page(outer_start));
9313
9314
9315
9316
9317
9318
9319
9320 if (outer_start + (1UL << order) <= start)
9321 outer_start = start;
9322 }
9323
9324
9325 if (test_pages_isolated(outer_start, end, 0)) {
9326 ret = -EBUSY;
9327 goto done;
9328 }
9329
9330
9331 outer_end = isolate_freepages_range(&cc, outer_start, end);
9332 if (!outer_end) {
9333 ret = -EBUSY;
9334 goto done;
9335 }
9336
9337
9338 if (start != outer_start)
9339 free_contig_range(outer_start, start - outer_start);
9340 if (end != outer_end)
9341 free_contig_range(end, outer_end - end);
9342
9343 done:
9344 undo_isolate_page_range(start, end, migratetype);
9345 return ret;
9346 }
9347 EXPORT_SYMBOL(alloc_contig_range);
9348
9349 static int __alloc_contig_pages(unsigned long start_pfn,
9350 unsigned long nr_pages, gfp_t gfp_mask)
9351 {
9352 unsigned long end_pfn = start_pfn + nr_pages;
9353
9354 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9355 gfp_mask);
9356 }
9357
9358 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9359 unsigned long nr_pages)
9360 {
9361 unsigned long i, end_pfn = start_pfn + nr_pages;
9362 struct page *page;
9363
9364 for (i = start_pfn; i < end_pfn; i++) {
9365 page = pfn_to_online_page(i);
9366 if (!page)
9367 return false;
9368
9369 if (page_zone(page) != z)
9370 return false;
9371
9372 if (PageReserved(page))
9373 return false;
9374 }
9375 return true;
9376 }
9377
9378 static bool zone_spans_last_pfn(const struct zone *zone,
9379 unsigned long start_pfn, unsigned long nr_pages)
9380 {
9381 unsigned long last_pfn = start_pfn + nr_pages - 1;
9382
9383 return zone_spans_pfn(zone, last_pfn);
9384 }
9385
9386
9387
9388
9389
9390
9391
9392
9393
9394
9395
9396
9397
9398
9399
9400
9401
9402
9403
9404
9405
9406
9407 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9408 int nid, nodemask_t *nodemask)
9409 {
9410 unsigned long ret, pfn, flags;
9411 struct zonelist *zonelist;
9412 struct zone *zone;
9413 struct zoneref *z;
9414
9415 zonelist = node_zonelist(nid, gfp_mask);
9416 for_each_zone_zonelist_nodemask(zone, z, zonelist,
9417 gfp_zone(gfp_mask), nodemask) {
9418 spin_lock_irqsave(&zone->lock, flags);
9419
9420 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9421 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9422 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9423
9424
9425
9426
9427
9428
9429
9430 spin_unlock_irqrestore(&zone->lock, flags);
9431 ret = __alloc_contig_pages(pfn, nr_pages,
9432 gfp_mask);
9433 if (!ret)
9434 return pfn_to_page(pfn);
9435 spin_lock_irqsave(&zone->lock, flags);
9436 }
9437 pfn += nr_pages;
9438 }
9439 spin_unlock_irqrestore(&zone->lock, flags);
9440 }
9441 return NULL;
9442 }
9443 #endif
9444
9445 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9446 {
9447 unsigned long count = 0;
9448
9449 for (; nr_pages--; pfn++) {
9450 struct page *page = pfn_to_page(pfn);
9451
9452 count += page_count(page) != 1;
9453 __free_page(page);
9454 }
9455 WARN(count != 0, "%lu pages are still in use!\n", count);
9456 }
9457 EXPORT_SYMBOL(free_contig_range);
9458
9459
9460
9461
9462
9463 void zone_pcp_update(struct zone *zone, int cpu_online)
9464 {
9465 mutex_lock(&pcp_batch_high_lock);
9466 zone_set_pageset_high_and_batch(zone, cpu_online);
9467 mutex_unlock(&pcp_batch_high_lock);
9468 }
9469
9470
9471
9472
9473
9474
9475
9476
9477
9478 void zone_pcp_disable(struct zone *zone)
9479 {
9480 mutex_lock(&pcp_batch_high_lock);
9481 __zone_set_pageset_high_and_batch(zone, 0, 1);
9482 __drain_all_pages(zone, true);
9483 }
9484
9485 void zone_pcp_enable(struct zone *zone)
9486 {
9487 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9488 mutex_unlock(&pcp_batch_high_lock);
9489 }
9490
9491 void zone_pcp_reset(struct zone *zone)
9492 {
9493 int cpu;
9494 struct per_cpu_zonestat *pzstats;
9495
9496 if (zone->per_cpu_pageset != &boot_pageset) {
9497 for_each_online_cpu(cpu) {
9498 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9499 drain_zonestat(zone, pzstats);
9500 }
9501 free_percpu(zone->per_cpu_pageset);
9502 free_percpu(zone->per_cpu_zonestats);
9503 zone->per_cpu_pageset = &boot_pageset;
9504 zone->per_cpu_zonestats = &boot_zonestats;
9505 }
9506 }
9507
9508 #ifdef CONFIG_MEMORY_HOTREMOVE
9509
9510
9511
9512
9513 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9514 {
9515 unsigned long pfn = start_pfn;
9516 struct page *page;
9517 struct zone *zone;
9518 unsigned int order;
9519 unsigned long flags;
9520
9521 offline_mem_sections(pfn, end_pfn);
9522 zone = page_zone(pfn_to_page(pfn));
9523 spin_lock_irqsave(&zone->lock, flags);
9524 while (pfn < end_pfn) {
9525 page = pfn_to_page(pfn);
9526
9527
9528
9529
9530 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9531 pfn++;
9532 continue;
9533 }
9534
9535
9536
9537
9538 if (PageOffline(page)) {
9539 BUG_ON(page_count(page));
9540 BUG_ON(PageBuddy(page));
9541 pfn++;
9542 continue;
9543 }
9544
9545 BUG_ON(page_count(page));
9546 BUG_ON(!PageBuddy(page));
9547 order = buddy_order(page);
9548 del_page_from_free_list(page, zone, order);
9549 pfn += (1 << order);
9550 }
9551 spin_unlock_irqrestore(&zone->lock, flags);
9552 }
9553 #endif
9554
9555
9556
9557
9558 bool is_free_buddy_page(struct page *page)
9559 {
9560 unsigned long pfn = page_to_pfn(page);
9561 unsigned int order;
9562
9563 for (order = 0; order < MAX_ORDER; order++) {
9564 struct page *page_head = page - (pfn & ((1 << order) - 1));
9565
9566 if (PageBuddy(page_head) &&
9567 buddy_order_unsafe(page_head) >= order)
9568 break;
9569 }
9570
9571 return order < MAX_ORDER;
9572 }
9573 EXPORT_SYMBOL(is_free_buddy_page);
9574
9575 #ifdef CONFIG_MEMORY_FAILURE
9576
9577
9578
9579
9580 static void break_down_buddy_pages(struct zone *zone, struct page *page,
9581 struct page *target, int low, int high,
9582 int migratetype)
9583 {
9584 unsigned long size = 1 << high;
9585 struct page *current_buddy, *next_page;
9586
9587 while (high > low) {
9588 high--;
9589 size >>= 1;
9590
9591 if (target >= &page[size]) {
9592 next_page = page + size;
9593 current_buddy = page;
9594 } else {
9595 next_page = page;
9596 current_buddy = page + size;
9597 }
9598
9599 if (set_page_guard(zone, current_buddy, high, migratetype))
9600 continue;
9601
9602 if (current_buddy != target) {
9603 add_to_free_list(current_buddy, zone, high, migratetype);
9604 set_buddy_order(current_buddy, high);
9605 page = next_page;
9606 }
9607 }
9608 }
9609
9610
9611
9612
9613 bool take_page_off_buddy(struct page *page)
9614 {
9615 struct zone *zone = page_zone(page);
9616 unsigned long pfn = page_to_pfn(page);
9617 unsigned long flags;
9618 unsigned int order;
9619 bool ret = false;
9620
9621 spin_lock_irqsave(&zone->lock, flags);
9622 for (order = 0; order < MAX_ORDER; order++) {
9623 struct page *page_head = page - (pfn & ((1 << order) - 1));
9624 int page_order = buddy_order(page_head);
9625
9626 if (PageBuddy(page_head) && page_order >= order) {
9627 unsigned long pfn_head = page_to_pfn(page_head);
9628 int migratetype = get_pfnblock_migratetype(page_head,
9629 pfn_head);
9630
9631 del_page_from_free_list(page_head, zone, page_order);
9632 break_down_buddy_pages(zone, page_head, page, 0,
9633 page_order, migratetype);
9634 SetPageHWPoisonTakenOff(page);
9635 if (!is_migrate_isolate(migratetype))
9636 __mod_zone_freepage_state(zone, -1, migratetype);
9637 ret = true;
9638 break;
9639 }
9640 if (page_count(page_head) > 0)
9641 break;
9642 }
9643 spin_unlock_irqrestore(&zone->lock, flags);
9644 return ret;
9645 }
9646
9647
9648
9649
9650 bool put_page_back_buddy(struct page *page)
9651 {
9652 struct zone *zone = page_zone(page);
9653 unsigned long pfn = page_to_pfn(page);
9654 unsigned long flags;
9655 int migratetype = get_pfnblock_migratetype(page, pfn);
9656 bool ret = false;
9657
9658 spin_lock_irqsave(&zone->lock, flags);
9659 if (put_page_testzero(page)) {
9660 ClearPageHWPoisonTakenOff(page);
9661 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
9662 if (TestClearPageHWPoison(page)) {
9663 ret = true;
9664 }
9665 }
9666 spin_unlock_irqrestore(&zone->lock, flags);
9667
9668 return ret;
9669 }
9670 #endif
9671
9672 #ifdef CONFIG_ZONE_DMA
9673 bool has_managed_dma(void)
9674 {
9675 struct pglist_data *pgdat;
9676
9677 for_each_online_pgdat(pgdat) {
9678 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
9679
9680 if (managed_zone(zone))
9681 return true;
9682 }
9683 return false;
9684 }
9685 #endif