0001
0002 #ifndef _LINUX_MMZONE_H
0003 #define _LINUX_MMZONE_H
0004
0005 #ifndef __ASSEMBLY__
0006 #ifndef __GENERATING_BOUNDS_H
0007
0008 #include <linux/spinlock.h>
0009 #include <linux/list.h>
0010 #include <linux/wait.h>
0011 #include <linux/bitops.h>
0012 #include <linux/cache.h>
0013 #include <linux/threads.h>
0014 #include <linux/numa.h>
0015 #include <linux/init.h>
0016 #include <linux/seqlock.h>
0017 #include <linux/nodemask.h>
0018 #include <linux/pageblock-flags.h>
0019 #include <linux/page-flags-layout.h>
0020 #include <linux/atomic.h>
0021 #include <linux/mm_types.h>
0022 #include <linux/page-flags.h>
0023 #include <linux/local_lock.h>
0024 #include <asm/page.h>
0025
0026
0027 #ifndef CONFIG_FORCE_MAX_ZONEORDER
0028 #define MAX_ORDER 11
0029 #else
0030 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
0031 #endif
0032 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
0033
0034
0035
0036
0037
0038
0039
0040 #define PAGE_ALLOC_COSTLY_ORDER 3
0041
0042 enum migratetype {
0043 MIGRATE_UNMOVABLE,
0044 MIGRATE_MOVABLE,
0045 MIGRATE_RECLAIMABLE,
0046 MIGRATE_PCPTYPES,
0047 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
0048 #ifdef CONFIG_CMA
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 MIGRATE_CMA,
0060 #endif
0061 #ifdef CONFIG_MEMORY_ISOLATION
0062 MIGRATE_ISOLATE,
0063 #endif
0064 MIGRATE_TYPES
0065 };
0066
0067
0068 extern const char * const migratetype_names[MIGRATE_TYPES];
0069
0070 #ifdef CONFIG_CMA
0071 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
0072 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
0073 #else
0074 # define is_migrate_cma(migratetype) false
0075 # define is_migrate_cma_page(_page) false
0076 #endif
0077
0078 static inline bool is_migrate_movable(int mt)
0079 {
0080 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
0081 }
0082
0083
0084
0085
0086
0087
0088
0089 static inline bool migratetype_is_mergeable(int mt)
0090 {
0091 return mt < MIGRATE_PCPTYPES;
0092 }
0093
0094 #define for_each_migratetype_order(order, type) \
0095 for (order = 0; order < MAX_ORDER; order++) \
0096 for (type = 0; type < MIGRATE_TYPES; type++)
0097
0098 extern int page_group_by_mobility_disabled;
0099
0100 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
0101
0102 #define get_pageblock_migratetype(page) \
0103 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
0104
0105 struct free_area {
0106 struct list_head free_list[MIGRATE_TYPES];
0107 unsigned long nr_free;
0108 };
0109
0110 static inline struct page *get_page_from_free_area(struct free_area *area,
0111 int migratetype)
0112 {
0113 return list_first_entry_or_null(&area->free_list[migratetype],
0114 struct page, lru);
0115 }
0116
0117 static inline bool free_area_empty(struct free_area *area, int migratetype)
0118 {
0119 return list_empty(&area->free_list[migratetype]);
0120 }
0121
0122 struct pglist_data;
0123
0124
0125
0126
0127
0128
0129 #if defined(CONFIG_SMP)
0130 struct zone_padding {
0131 char x[0];
0132 } ____cacheline_internodealigned_in_smp;
0133 #define ZONE_PADDING(name) struct zone_padding name;
0134 #else
0135 #define ZONE_PADDING(name)
0136 #endif
0137
0138 #ifdef CONFIG_NUMA
0139 enum numa_stat_item {
0140 NUMA_HIT,
0141 NUMA_MISS,
0142 NUMA_FOREIGN,
0143 NUMA_INTERLEAVE_HIT,
0144 NUMA_LOCAL,
0145 NUMA_OTHER,
0146 NR_VM_NUMA_EVENT_ITEMS
0147 };
0148 #else
0149 #define NR_VM_NUMA_EVENT_ITEMS 0
0150 #endif
0151
0152 enum zone_stat_item {
0153
0154 NR_FREE_PAGES,
0155 NR_ZONE_LRU_BASE,
0156 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
0157 NR_ZONE_ACTIVE_ANON,
0158 NR_ZONE_INACTIVE_FILE,
0159 NR_ZONE_ACTIVE_FILE,
0160 NR_ZONE_UNEVICTABLE,
0161 NR_ZONE_WRITE_PENDING,
0162 NR_MLOCK,
0163
0164 NR_BOUNCE,
0165 #if IS_ENABLED(CONFIG_ZSMALLOC)
0166 NR_ZSPAGES,
0167 #endif
0168 NR_FREE_CMA_PAGES,
0169 NR_VM_ZONE_STAT_ITEMS };
0170
0171 enum node_stat_item {
0172 NR_LRU_BASE,
0173 NR_INACTIVE_ANON = NR_LRU_BASE,
0174 NR_ACTIVE_ANON,
0175 NR_INACTIVE_FILE,
0176 NR_ACTIVE_FILE,
0177 NR_UNEVICTABLE,
0178 NR_SLAB_RECLAIMABLE_B,
0179 NR_SLAB_UNRECLAIMABLE_B,
0180 NR_ISOLATED_ANON,
0181 NR_ISOLATED_FILE,
0182 WORKINGSET_NODES,
0183 WORKINGSET_REFAULT_BASE,
0184 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
0185 WORKINGSET_REFAULT_FILE,
0186 WORKINGSET_ACTIVATE_BASE,
0187 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
0188 WORKINGSET_ACTIVATE_FILE,
0189 WORKINGSET_RESTORE_BASE,
0190 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
0191 WORKINGSET_RESTORE_FILE,
0192 WORKINGSET_NODERECLAIM,
0193 NR_ANON_MAPPED,
0194 NR_FILE_MAPPED,
0195
0196 NR_FILE_PAGES,
0197 NR_FILE_DIRTY,
0198 NR_WRITEBACK,
0199 NR_WRITEBACK_TEMP,
0200 NR_SHMEM,
0201 NR_SHMEM_THPS,
0202 NR_SHMEM_PMDMAPPED,
0203 NR_FILE_THPS,
0204 NR_FILE_PMDMAPPED,
0205 NR_ANON_THPS,
0206 NR_VMSCAN_WRITE,
0207 NR_VMSCAN_IMMEDIATE,
0208 NR_DIRTIED,
0209 NR_WRITTEN,
0210 NR_THROTTLED_WRITTEN,
0211 NR_KERNEL_MISC_RECLAIMABLE,
0212 NR_FOLL_PIN_ACQUIRED,
0213 NR_FOLL_PIN_RELEASED,
0214 NR_KERNEL_STACK_KB,
0215 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
0216 NR_KERNEL_SCS_KB,
0217 #endif
0218 NR_PAGETABLE,
0219 #ifdef CONFIG_SWAP
0220 NR_SWAPCACHE,
0221 #endif
0222 #ifdef CONFIG_NUMA_BALANCING
0223 PGPROMOTE_SUCCESS,
0224 #endif
0225 NR_VM_NODE_STAT_ITEMS
0226 };
0227
0228
0229
0230
0231
0232
0233 static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
0234 {
0235 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
0236 return false;
0237
0238 return item == NR_ANON_THPS ||
0239 item == NR_FILE_THPS ||
0240 item == NR_SHMEM_THPS ||
0241 item == NR_SHMEM_PMDMAPPED ||
0242 item == NR_FILE_PMDMAPPED;
0243 }
0244
0245
0246
0247
0248
0249
0250 static __always_inline bool vmstat_item_in_bytes(int idx)
0251 {
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 return (idx == NR_SLAB_RECLAIMABLE_B ||
0262 idx == NR_SLAB_UNRECLAIMABLE_B);
0263 }
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274 #define LRU_BASE 0
0275 #define LRU_ACTIVE 1
0276 #define LRU_FILE 2
0277
0278 enum lru_list {
0279 LRU_INACTIVE_ANON = LRU_BASE,
0280 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
0281 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
0282 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
0283 LRU_UNEVICTABLE,
0284 NR_LRU_LISTS
0285 };
0286
0287 enum vmscan_throttle_state {
0288 VMSCAN_THROTTLE_WRITEBACK,
0289 VMSCAN_THROTTLE_ISOLATED,
0290 VMSCAN_THROTTLE_NOPROGRESS,
0291 VMSCAN_THROTTLE_CONGESTED,
0292 NR_VMSCAN_THROTTLE,
0293 };
0294
0295 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
0296
0297 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
0298
0299 static inline bool is_file_lru(enum lru_list lru)
0300 {
0301 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
0302 }
0303
0304 static inline bool is_active_lru(enum lru_list lru)
0305 {
0306 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
0307 }
0308
0309 #define ANON_AND_FILE 2
0310
0311 enum lruvec_flags {
0312 LRUVEC_CONGESTED,
0313
0314
0315 };
0316
0317 struct lruvec {
0318 struct list_head lists[NR_LRU_LISTS];
0319
0320 spinlock_t lru_lock;
0321
0322
0323
0324
0325
0326 unsigned long anon_cost;
0327 unsigned long file_cost;
0328
0329 atomic_long_t nonresident_age;
0330
0331 unsigned long refaults[ANON_AND_FILE];
0332
0333 unsigned long flags;
0334 #ifdef CONFIG_MEMCG
0335 struct pglist_data *pgdat;
0336 #endif
0337 };
0338
0339
0340 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
0341
0342 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
0343
0344 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
0345
0346
0347 typedef unsigned __bitwise isolate_mode_t;
0348
0349 enum zone_watermarks {
0350 WMARK_MIN,
0351 WMARK_LOW,
0352 WMARK_HIGH,
0353 WMARK_PROMO,
0354 NR_WMARK
0355 };
0356
0357
0358
0359
0360
0361
0362
0363 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0364 #define NR_PCP_THP 1
0365 #else
0366 #define NR_PCP_THP 0
0367 #endif
0368 #define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
0369 #define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
0370
0371
0372
0373
0374
0375 #define NR_PCP_ORDER_WIDTH 8
0376 #define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
0377
0378 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
0379 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
0380 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
0381 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
0382
0383
0384 struct per_cpu_pages {
0385 spinlock_t lock;
0386 int count;
0387 int high;
0388 int batch;
0389 short free_factor;
0390 #ifdef CONFIG_NUMA
0391 short expire;
0392 #endif
0393
0394
0395 struct list_head lists[NR_PCP_LISTS];
0396 } ____cacheline_aligned_in_smp;
0397
0398 struct per_cpu_zonestat {
0399 #ifdef CONFIG_SMP
0400 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
0401 s8 stat_threshold;
0402 #endif
0403 #ifdef CONFIG_NUMA
0404
0405
0406
0407
0408
0409 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
0410 #endif
0411 };
0412
0413 struct per_cpu_nodestat {
0414 s8 stat_threshold;
0415 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
0416 };
0417
0418 #endif
0419
0420 enum zone_type {
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 #ifdef CONFIG_ZONE_DMA
0432 ZONE_DMA,
0433 #endif
0434 #ifdef CONFIG_ZONE_DMA32
0435 ZONE_DMA32,
0436 #endif
0437
0438
0439
0440
0441
0442 ZONE_NORMAL,
0443 #ifdef CONFIG_HIGHMEM
0444
0445
0446
0447
0448
0449
0450
0451
0452 ZONE_HIGHMEM,
0453 #endif
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 ZONE_MOVABLE,
0504 #ifdef CONFIG_ZONE_DEVICE
0505 ZONE_DEVICE,
0506 #endif
0507 __MAX_NR_ZONES
0508
0509 };
0510
0511 #ifndef __GENERATING_BOUNDS_H
0512
0513 #define ASYNC_AND_SYNC 2
0514
0515 struct zone {
0516
0517
0518
0519 unsigned long _watermark[NR_WMARK];
0520 unsigned long watermark_boost;
0521
0522 unsigned long nr_reserved_highatomic;
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533 long lowmem_reserve[MAX_NR_ZONES];
0534
0535 #ifdef CONFIG_NUMA
0536 int node;
0537 #endif
0538 struct pglist_data *zone_pgdat;
0539 struct per_cpu_pages __percpu *per_cpu_pageset;
0540 struct per_cpu_zonestat __percpu *per_cpu_zonestats;
0541
0542
0543
0544
0545 int pageset_high;
0546 int pageset_batch;
0547
0548 #ifndef CONFIG_SPARSEMEM
0549
0550
0551
0552
0553 unsigned long *pageblock_flags;
0554 #endif
0555
0556
0557 unsigned long zone_start_pfn;
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601 atomic_long_t managed_pages;
0602 unsigned long spanned_pages;
0603 unsigned long present_pages;
0604 #if defined(CONFIG_MEMORY_HOTPLUG)
0605 unsigned long present_early_pages;
0606 #endif
0607 #ifdef CONFIG_CMA
0608 unsigned long cma_pages;
0609 #endif
0610
0611 const char *name;
0612
0613 #ifdef CONFIG_MEMORY_ISOLATION
0614
0615
0616
0617
0618
0619 unsigned long nr_isolate_pageblock;
0620 #endif
0621
0622 #ifdef CONFIG_MEMORY_HOTPLUG
0623
0624 seqlock_t span_seqlock;
0625 #endif
0626
0627 int initialized;
0628
0629
0630 ZONE_PADDING(_pad1_)
0631
0632
0633 struct free_area free_area[MAX_ORDER];
0634
0635
0636 unsigned long flags;
0637
0638
0639 spinlock_t lock;
0640
0641
0642 ZONE_PADDING(_pad2_)
0643
0644
0645
0646
0647
0648
0649 unsigned long percpu_drift_mark;
0650
0651 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
0652
0653 unsigned long compact_cached_free_pfn;
0654
0655 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
0656 unsigned long compact_init_migrate_pfn;
0657 unsigned long compact_init_free_pfn;
0658 #endif
0659
0660 #ifdef CONFIG_COMPACTION
0661
0662
0663
0664
0665
0666
0667 unsigned int compact_considered;
0668 unsigned int compact_defer_shift;
0669 int compact_order_failed;
0670 #endif
0671
0672 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
0673
0674 bool compact_blockskip_flush;
0675 #endif
0676
0677 bool contiguous;
0678
0679 ZONE_PADDING(_pad3_)
0680
0681 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
0682 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
0683 } ____cacheline_internodealigned_in_smp;
0684
0685 enum pgdat_flags {
0686 PGDAT_DIRTY,
0687
0688
0689
0690 PGDAT_WRITEBACK,
0691
0692
0693 PGDAT_RECLAIM_LOCKED,
0694 };
0695
0696 enum zone_flags {
0697 ZONE_BOOSTED_WATERMARK,
0698
0699
0700 ZONE_RECLAIM_ACTIVE,
0701 };
0702
0703 static inline unsigned long zone_managed_pages(struct zone *zone)
0704 {
0705 return (unsigned long)atomic_long_read(&zone->managed_pages);
0706 }
0707
0708 static inline unsigned long zone_cma_pages(struct zone *zone)
0709 {
0710 #ifdef CONFIG_CMA
0711 return zone->cma_pages;
0712 #else
0713 return 0;
0714 #endif
0715 }
0716
0717 static inline unsigned long zone_end_pfn(const struct zone *zone)
0718 {
0719 return zone->zone_start_pfn + zone->spanned_pages;
0720 }
0721
0722 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
0723 {
0724 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
0725 }
0726
0727 static inline bool zone_is_initialized(struct zone *zone)
0728 {
0729 return zone->initialized;
0730 }
0731
0732 static inline bool zone_is_empty(struct zone *zone)
0733 {
0734 return zone->spanned_pages == 0;
0735 }
0736
0737 #ifndef BUILD_VDSO32_64
0738
0739
0740
0741
0742
0743
0744 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
0745 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
0746 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
0747 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
0748 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
0749
0750
0751
0752
0753
0754
0755 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
0756 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
0757 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
0758 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
0759 #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
0760
0761
0762 #ifdef NODE_NOT_IN_PAGE_FLAGS
0763 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
0764 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
0765 SECTIONS_PGOFF : ZONES_PGOFF)
0766 #else
0767 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
0768 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
0769 NODES_PGOFF : ZONES_PGOFF)
0770 #endif
0771
0772 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
0773
0774 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
0775 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
0776 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
0777 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
0778 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
0779 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
0780
0781 static inline enum zone_type page_zonenum(const struct page *page)
0782 {
0783 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
0784 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
0785 }
0786
0787 static inline enum zone_type folio_zonenum(const struct folio *folio)
0788 {
0789 return page_zonenum(&folio->page);
0790 }
0791
0792 #ifdef CONFIG_ZONE_DEVICE
0793 static inline bool is_zone_device_page(const struct page *page)
0794 {
0795 return page_zonenum(page) == ZONE_DEVICE;
0796 }
0797 extern void memmap_init_zone_device(struct zone *, unsigned long,
0798 unsigned long, struct dev_pagemap *);
0799 #else
0800 static inline bool is_zone_device_page(const struct page *page)
0801 {
0802 return false;
0803 }
0804 #endif
0805
0806 static inline bool folio_is_zone_device(const struct folio *folio)
0807 {
0808 return is_zone_device_page(&folio->page);
0809 }
0810
0811 static inline bool is_zone_movable_page(const struct page *page)
0812 {
0813 return page_zonenum(page) == ZONE_MOVABLE;
0814 }
0815 #endif
0816
0817
0818
0819
0820
0821 static inline bool zone_intersects(struct zone *zone,
0822 unsigned long start_pfn, unsigned long nr_pages)
0823 {
0824 if (zone_is_empty(zone))
0825 return false;
0826 if (start_pfn >= zone_end_pfn(zone) ||
0827 start_pfn + nr_pages <= zone->zone_start_pfn)
0828 return false;
0829
0830 return true;
0831 }
0832
0833
0834
0835
0836
0837
0838 #define DEF_PRIORITY 12
0839
0840
0841 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
0842
0843 enum {
0844 ZONELIST_FALLBACK,
0845 #ifdef CONFIG_NUMA
0846
0847
0848
0849
0850 ZONELIST_NOFALLBACK,
0851 #endif
0852 MAX_ZONELISTS
0853 };
0854
0855
0856
0857
0858
0859 struct zoneref {
0860 struct zone *zone;
0861 int zone_idx;
0862 };
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878 struct zonelist {
0879 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
0880 };
0881
0882
0883
0884
0885
0886
0887 extern struct page *mem_map;
0888
0889 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0890 struct deferred_split {
0891 spinlock_t split_queue_lock;
0892 struct list_head split_queue;
0893 unsigned long split_queue_len;
0894 };
0895 #endif
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905 typedef struct pglist_data {
0906
0907
0908
0909
0910
0911 struct zone node_zones[MAX_NR_ZONES];
0912
0913
0914
0915
0916
0917
0918 struct zonelist node_zonelists[MAX_ZONELISTS];
0919
0920 int nr_zones;
0921 #ifdef CONFIG_FLATMEM
0922 struct page *node_mem_map;
0923 #ifdef CONFIG_PAGE_EXTENSION
0924 struct page_ext *node_page_ext;
0925 #endif
0926 #endif
0927 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940 spinlock_t node_size_lock;
0941 #endif
0942 unsigned long node_start_pfn;
0943 unsigned long node_present_pages;
0944 unsigned long node_spanned_pages;
0945
0946 int node_id;
0947 wait_queue_head_t kswapd_wait;
0948 wait_queue_head_t pfmemalloc_wait;
0949
0950
0951 wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
0952
0953 atomic_t nr_writeback_throttled;
0954 unsigned long nr_reclaim_start;
0955
0956 struct task_struct *kswapd;
0957
0958 int kswapd_order;
0959 enum zone_type kswapd_highest_zoneidx;
0960
0961 int kswapd_failures;
0962
0963 #ifdef CONFIG_COMPACTION
0964 int kcompactd_max_order;
0965 enum zone_type kcompactd_highest_zoneidx;
0966 wait_queue_head_t kcompactd_wait;
0967 struct task_struct *kcompactd;
0968 bool proactive_compact_trigger;
0969 #endif
0970
0971
0972
0973
0974 unsigned long totalreserve_pages;
0975
0976 #ifdef CONFIG_NUMA
0977
0978
0979
0980 unsigned long min_unmapped_pages;
0981 unsigned long min_slab_pages;
0982 #endif
0983
0984
0985 ZONE_PADDING(_pad1_)
0986
0987 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0988
0989
0990
0991
0992 unsigned long first_deferred_pfn;
0993 #endif
0994
0995 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0996 struct deferred_split deferred_split_queue;
0997 #endif
0998
0999
1000
1001
1002
1003
1004
1005
1006 struct lruvec __lruvec;
1007
1008 unsigned long flags;
1009
1010 ZONE_PADDING(_pad2_)
1011
1012
1013 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
1014 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
1015 } pg_data_t;
1016
1017 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
1018 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
1019
1020 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
1021 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
1022
1023 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
1024 {
1025 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
1026 }
1027
1028 static inline bool pgdat_is_empty(pg_data_t *pgdat)
1029 {
1030 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
1031 }
1032
1033 #include <linux/memory_hotplug.h>
1034
1035 void build_all_zonelists(pg_data_t *pgdat);
1036 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
1037 enum zone_type highest_zoneidx);
1038 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1039 int highest_zoneidx, unsigned int alloc_flags,
1040 long free_pages);
1041 bool zone_watermark_ok(struct zone *z, unsigned int order,
1042 unsigned long mark, int highest_zoneidx,
1043 unsigned int alloc_flags);
1044 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
1045 unsigned long mark, int highest_zoneidx);
1046
1047
1048
1049
1050 enum meminit_context {
1051 MEMINIT_EARLY,
1052 MEMINIT_HOTPLUG,
1053 };
1054
1055 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
1056 unsigned long size);
1057
1058 extern void lruvec_init(struct lruvec *lruvec);
1059
1060 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
1061 {
1062 #ifdef CONFIG_MEMCG
1063 return lruvec->pgdat;
1064 #else
1065 return container_of(lruvec, struct pglist_data, __lruvec);
1066 #endif
1067 }
1068
1069 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
1070 int local_memory_node(int node_id);
1071 #else
1072 static inline int local_memory_node(int node_id) { return node_id; };
1073 #endif
1074
1075
1076
1077
1078 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
1079
1080 #ifdef CONFIG_ZONE_DEVICE
1081 static inline bool zone_is_zone_device(struct zone *zone)
1082 {
1083 return zone_idx(zone) == ZONE_DEVICE;
1084 }
1085 #else
1086 static inline bool zone_is_zone_device(struct zone *zone)
1087 {
1088 return false;
1089 }
1090 #endif
1091
1092
1093
1094
1095
1096
1097
1098 static inline bool managed_zone(struct zone *zone)
1099 {
1100 return zone_managed_pages(zone);
1101 }
1102
1103
1104 static inline bool populated_zone(struct zone *zone)
1105 {
1106 return zone->present_pages;
1107 }
1108
1109 #ifdef CONFIG_NUMA
1110 static inline int zone_to_nid(struct zone *zone)
1111 {
1112 return zone->node;
1113 }
1114
1115 static inline void zone_set_nid(struct zone *zone, int nid)
1116 {
1117 zone->node = nid;
1118 }
1119 #else
1120 static inline int zone_to_nid(struct zone *zone)
1121 {
1122 return 0;
1123 }
1124
1125 static inline void zone_set_nid(struct zone *zone, int nid) {}
1126 #endif
1127
1128 extern int movable_zone;
1129
1130 static inline int is_highmem_idx(enum zone_type idx)
1131 {
1132 #ifdef CONFIG_HIGHMEM
1133 return (idx == ZONE_HIGHMEM ||
1134 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
1135 #else
1136 return 0;
1137 #endif
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147 static inline int is_highmem(struct zone *zone)
1148 {
1149 return is_highmem_idx(zone_idx(zone));
1150 }
1151
1152 #ifdef CONFIG_ZONE_DMA
1153 bool has_managed_dma(void);
1154 #else
1155 static inline bool has_managed_dma(void)
1156 {
1157 return false;
1158 }
1159 #endif
1160
1161
1162 struct ctl_table;
1163
1164 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
1165 loff_t *);
1166 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
1167 size_t *, loff_t *);
1168 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
1169 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
1170 size_t *, loff_t *);
1171 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
1172 void *, size_t *, loff_t *);
1173 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
1174 void *, size_t *, loff_t *);
1175 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
1176 void *, size_t *, loff_t *);
1177 int numa_zonelist_order_handler(struct ctl_table *, int,
1178 void *, size_t *, loff_t *);
1179 extern int percpu_pagelist_high_fraction;
1180 extern char numa_zonelist_order[];
1181 #define NUMA_ZONELIST_ORDER_LEN 16
1182
1183 #ifndef CONFIG_NUMA
1184
1185 extern struct pglist_data contig_page_data;
1186 static inline struct pglist_data *NODE_DATA(int nid)
1187 {
1188 return &contig_page_data;
1189 }
1190
1191 #else
1192
1193 #include <asm/mmzone.h>
1194
1195 #endif
1196
1197 extern struct pglist_data *first_online_pgdat(void);
1198 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1199 extern struct zone *next_zone(struct zone *zone);
1200
1201
1202
1203
1204
1205 #define for_each_online_pgdat(pgdat) \
1206 for (pgdat = first_online_pgdat(); \
1207 pgdat; \
1208 pgdat = next_online_pgdat(pgdat))
1209
1210
1211
1212
1213
1214
1215
1216 #define for_each_zone(zone) \
1217 for (zone = (first_online_pgdat())->node_zones; \
1218 zone; \
1219 zone = next_zone(zone))
1220
1221 #define for_each_populated_zone(zone) \
1222 for (zone = (first_online_pgdat())->node_zones; \
1223 zone; \
1224 zone = next_zone(zone)) \
1225 if (!populated_zone(zone)) \
1226 ; \
1227 else
1228
1229 static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1230 {
1231 return zoneref->zone;
1232 }
1233
1234 static inline int zonelist_zone_idx(struct zoneref *zoneref)
1235 {
1236 return zoneref->zone_idx;
1237 }
1238
1239 static inline int zonelist_node_idx(struct zoneref *zoneref)
1240 {
1241 return zone_to_nid(zoneref->zone);
1242 }
1243
1244 struct zoneref *__next_zones_zonelist(struct zoneref *z,
1245 enum zone_type highest_zoneidx,
1246 nodemask_t *nodes);
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1264 enum zone_type highest_zoneidx,
1265 nodemask_t *nodes)
1266 {
1267 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1268 return z;
1269 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1270 }
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1290 enum zone_type highest_zoneidx,
1291 nodemask_t *nodes)
1292 {
1293 return next_zones_zonelist(zonelist->_zonerefs,
1294 highest_zoneidx, nodes);
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1309 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1310 zone; \
1311 z = next_zones_zonelist(++z, highidx, nodemask), \
1312 zone = zonelist_zone(z))
1313
1314 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
1315 for (zone = z->zone; \
1316 zone; \
1317 z = next_zones_zonelist(++z, highidx, nodemask), \
1318 zone = zonelist_zone(z))
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 #define for_each_zone_zonelist(zone, z, zlist, highidx) \
1331 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1332
1333
1334 static inline bool movable_only_nodes(nodemask_t *nodes)
1335 {
1336 struct zonelist *zonelist;
1337 struct zoneref *z;
1338 int nid;
1339
1340 if (nodes_empty(*nodes))
1341 return false;
1342
1343
1344
1345
1346
1347
1348 nid = first_node(*nodes);
1349 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
1350 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
1351 return (!z->zone) ? true : false;
1352 }
1353
1354
1355 #ifdef CONFIG_SPARSEMEM
1356 #include <asm/sparsemem.h>
1357 #endif
1358
1359 #ifdef CONFIG_FLATMEM
1360 #define pfn_to_nid(pfn) (0)
1361 #endif
1362
1363 #ifdef CONFIG_SPARSEMEM
1364
1365
1366
1367
1368
1369 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1370 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1371
1372 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1373
1374 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1375 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1376
1377 #define SECTION_BLOCKFLAGS_BITS \
1378 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1379
1380 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1381 #error Allocator MAX_ORDER exceeds SECTION_SIZE
1382 #endif
1383
1384 static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1385 {
1386 return pfn >> PFN_SECTION_SHIFT;
1387 }
1388 static inline unsigned long section_nr_to_pfn(unsigned long sec)
1389 {
1390 return sec << PFN_SECTION_SHIFT;
1391 }
1392
1393 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1394 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1395
1396 #define SUBSECTION_SHIFT 21
1397 #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
1398
1399 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1400 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1401 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1402
1403 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1404 #error Subsection size exceeds section size
1405 #else
1406 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1407 #endif
1408
1409 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1410 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1411
1412 struct mem_section_usage {
1413 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1414 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1415 #endif
1416
1417 unsigned long pageblock_flags[0];
1418 };
1419
1420 void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1421
1422 struct page;
1423 struct page_ext;
1424 struct mem_section {
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 unsigned long section_mem_map;
1438
1439 struct mem_section_usage *usage;
1440 #ifdef CONFIG_PAGE_EXTENSION
1441
1442
1443
1444
1445 struct page_ext *page_ext;
1446 unsigned long pad;
1447 #endif
1448
1449
1450
1451
1452 };
1453
1454 #ifdef CONFIG_SPARSEMEM_EXTREME
1455 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1456 #else
1457 #define SECTIONS_PER_ROOT 1
1458 #endif
1459
1460 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1461 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1462 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1463
1464 #ifdef CONFIG_SPARSEMEM_EXTREME
1465 extern struct mem_section **mem_section;
1466 #else
1467 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1468 #endif
1469
1470 static inline unsigned long *section_to_usemap(struct mem_section *ms)
1471 {
1472 return ms->usage->pageblock_flags;
1473 }
1474
1475 static inline struct mem_section *__nr_to_section(unsigned long nr)
1476 {
1477 unsigned long root = SECTION_NR_TO_ROOT(nr);
1478
1479 if (unlikely(root >= NR_SECTION_ROOTS))
1480 return NULL;
1481
1482 #ifdef CONFIG_SPARSEMEM_EXTREME
1483 if (!mem_section || !mem_section[root])
1484 return NULL;
1485 #endif
1486 return &mem_section[root][nr & SECTION_ROOT_MASK];
1487 }
1488 extern size_t mem_section_usage_size(void);
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507 enum {
1508 SECTION_MARKED_PRESENT_BIT,
1509 SECTION_HAS_MEM_MAP_BIT,
1510 SECTION_IS_ONLINE_BIT,
1511 SECTION_IS_EARLY_BIT,
1512 #ifdef CONFIG_ZONE_DEVICE
1513 SECTION_TAINT_ZONE_DEVICE_BIT,
1514 #endif
1515 SECTION_MAP_LAST_BIT,
1516 };
1517
1518 #define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
1519 #define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
1520 #define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
1521 #define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
1522 #ifdef CONFIG_ZONE_DEVICE
1523 #define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
1524 #endif
1525 #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
1526 #define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
1527
1528 static inline struct page *__section_mem_map_addr(struct mem_section *section)
1529 {
1530 unsigned long map = section->section_mem_map;
1531 map &= SECTION_MAP_MASK;
1532 return (struct page *)map;
1533 }
1534
1535 static inline int present_section(struct mem_section *section)
1536 {
1537 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1538 }
1539
1540 static inline int present_section_nr(unsigned long nr)
1541 {
1542 return present_section(__nr_to_section(nr));
1543 }
1544
1545 static inline int valid_section(struct mem_section *section)
1546 {
1547 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1548 }
1549
1550 static inline int early_section(struct mem_section *section)
1551 {
1552 return (section && (section->section_mem_map & SECTION_IS_EARLY));
1553 }
1554
1555 static inline int valid_section_nr(unsigned long nr)
1556 {
1557 return valid_section(__nr_to_section(nr));
1558 }
1559
1560 static inline int online_section(struct mem_section *section)
1561 {
1562 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1563 }
1564
1565 #ifdef CONFIG_ZONE_DEVICE
1566 static inline int online_device_section(struct mem_section *section)
1567 {
1568 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
1569
1570 return section && ((section->section_mem_map & flags) == flags);
1571 }
1572 #else
1573 static inline int online_device_section(struct mem_section *section)
1574 {
1575 return 0;
1576 }
1577 #endif
1578
1579 static inline int online_section_nr(unsigned long nr)
1580 {
1581 return online_section(__nr_to_section(nr));
1582 }
1583
1584 #ifdef CONFIG_MEMORY_HOTPLUG
1585 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1586 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1587 #endif
1588
1589 static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1590 {
1591 return __nr_to_section(pfn_to_section_nr(pfn));
1592 }
1593
1594 extern unsigned long __highest_present_section_nr;
1595
1596 static inline int subsection_map_index(unsigned long pfn)
1597 {
1598 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
1599 }
1600
1601 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1602 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1603 {
1604 int idx = subsection_map_index(pfn);
1605
1606 return test_bit(idx, ms->usage->subsection_map);
1607 }
1608 #else
1609 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
1610 {
1611 return 1;
1612 }
1613 #endif
1614
1615 #ifndef CONFIG_HAVE_ARCH_PFN_VALID
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627 static inline int pfn_valid(unsigned long pfn)
1628 {
1629 struct mem_section *ms;
1630
1631
1632
1633
1634
1635
1636
1637 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
1638 return 0;
1639
1640 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1641 return 0;
1642 ms = __pfn_to_section(pfn);
1643 if (!valid_section(ms))
1644 return 0;
1645
1646
1647
1648
1649 return early_section(ms) || pfn_section_valid(ms, pfn);
1650 }
1651 #endif
1652
1653 static inline int pfn_in_present_section(unsigned long pfn)
1654 {
1655 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1656 return 0;
1657 return present_section(__pfn_to_section(pfn));
1658 }
1659
1660 static inline unsigned long next_present_section_nr(unsigned long section_nr)
1661 {
1662 while (++section_nr <= __highest_present_section_nr) {
1663 if (present_section_nr(section_nr))
1664 return section_nr;
1665 }
1666
1667 return -1;
1668 }
1669
1670
1671
1672
1673
1674
1675 #ifdef CONFIG_NUMA
1676 #define pfn_to_nid(pfn) \
1677 ({ \
1678 unsigned long __pfn_to_nid_pfn = (pfn); \
1679 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1680 })
1681 #else
1682 #define pfn_to_nid(pfn) (0)
1683 #endif
1684
1685 void sparse_init(void);
1686 #else
1687 #define sparse_init() do {} while (0)
1688 #define sparse_index_init(_sec, _nid) do {} while (0)
1689 #define pfn_in_present_section pfn_valid
1690 #define subsection_map_init(_pfn, _nr_pages) do {} while (0)
1691 #endif
1692
1693 #endif
1694 #endif
1695 #endif