Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Procedures for maintaining information about logical memory blocks.
0004  *
0005  * Peter Bergner, IBM Corp. June 2001.
0006  * Copyright (C) 2001 Peter Bergner.
0007  */
0008 
0009 #include <linux/kernel.h>
0010 #include <linux/slab.h>
0011 #include <linux/init.h>
0012 #include <linux/bitops.h>
0013 #include <linux/poison.h>
0014 #include <linux/pfn.h>
0015 #include <linux/debugfs.h>
0016 #include <linux/kmemleak.h>
0017 #include <linux/seq_file.h>
0018 #include <linux/memblock.h>
0019 
0020 #include <asm/sections.h>
0021 #include <linux/io.h>
0022 
0023 #include "internal.h"
0024 
0025 #define INIT_MEMBLOCK_REGIONS           128
0026 #define INIT_PHYSMEM_REGIONS            4
0027 
0028 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
0029 # define INIT_MEMBLOCK_RESERVED_REGIONS     INIT_MEMBLOCK_REGIONS
0030 #endif
0031 
0032 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
0033 #define INIT_MEMBLOCK_MEMORY_REGIONS        INIT_MEMBLOCK_REGIONS
0034 #endif
0035 
0036 /**
0037  * DOC: memblock overview
0038  *
0039  * Memblock is a method of managing memory regions during the early
0040  * boot period when the usual kernel memory allocators are not up and
0041  * running.
0042  *
0043  * Memblock views the system memory as collections of contiguous
0044  * regions. There are several types of these collections:
0045  *
0046  * * ``memory`` - describes the physical memory available to the
0047  *   kernel; this may differ from the actual physical memory installed
0048  *   in the system, for instance when the memory is restricted with
0049  *   ``mem=`` command line parameter
0050  * * ``reserved`` - describes the regions that were allocated
0051  * * ``physmem`` - describes the actual physical memory available during
0052  *   boot regardless of the possible restrictions and memory hot(un)plug;
0053  *   the ``physmem`` type is only available on some architectures.
0054  *
0055  * Each region is represented by struct memblock_region that
0056  * defines the region extents, its attributes and NUMA node id on NUMA
0057  * systems. Every memory type is described by the struct memblock_type
0058  * which contains an array of memory regions along with
0059  * the allocator metadata. The "memory" and "reserved" types are nicely
0060  * wrapped with struct memblock. This structure is statically
0061  * initialized at build time. The region arrays are initially sized to
0062  * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
0063  * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
0064  * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
0065  * The memblock_allow_resize() enables automatic resizing of the region
0066  * arrays during addition of new regions. This feature should be used
0067  * with care so that memory allocated for the region array will not
0068  * overlap with areas that should be reserved, for example initrd.
0069  *
0070  * The early architecture setup should tell memblock what the physical
0071  * memory layout is by using memblock_add() or memblock_add_node()
0072  * functions. The first function does not assign the region to a NUMA
0073  * node and it is appropriate for UMA systems. Yet, it is possible to
0074  * use it on NUMA systems as well and assign the region to a NUMA node
0075  * later in the setup process using memblock_set_node(). The
0076  * memblock_add_node() performs such an assignment directly.
0077  *
0078  * Once memblock is setup the memory can be allocated using one of the
0079  * API variants:
0080  *
0081  * * memblock_phys_alloc*() - these functions return the **physical**
0082  *   address of the allocated memory
0083  * * memblock_alloc*() - these functions return the **virtual** address
0084  *   of the allocated memory.
0085  *
0086  * Note, that both API variants use implicit assumptions about allowed
0087  * memory ranges and the fallback methods. Consult the documentation
0088  * of memblock_alloc_internal() and memblock_alloc_range_nid()
0089  * functions for more elaborate description.
0090  *
0091  * As the system boot progresses, the architecture specific mem_init()
0092  * function frees all the memory to the buddy page allocator.
0093  *
0094  * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
0095  * memblock data structures (except "physmem") will be discarded after the
0096  * system initialization completes.
0097  */
0098 
0099 #ifndef CONFIG_NUMA
0100 struct pglist_data __refdata contig_page_data;
0101 EXPORT_SYMBOL(contig_page_data);
0102 #endif
0103 
0104 unsigned long max_low_pfn;
0105 unsigned long min_low_pfn;
0106 unsigned long max_pfn;
0107 unsigned long long max_possible_pfn;
0108 
0109 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
0110 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
0111 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0112 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
0113 #endif
0114 
0115 struct memblock memblock __initdata_memblock = {
0116     .memory.regions     = memblock_memory_init_regions,
0117     .memory.cnt     = 1,    /* empty dummy entry */
0118     .memory.max     = INIT_MEMBLOCK_MEMORY_REGIONS,
0119     .memory.name        = "memory",
0120 
0121     .reserved.regions   = memblock_reserved_init_regions,
0122     .reserved.cnt       = 1,    /* empty dummy entry */
0123     .reserved.max       = INIT_MEMBLOCK_RESERVED_REGIONS,
0124     .reserved.name      = "reserved",
0125 
0126     .bottom_up      = false,
0127     .current_limit      = MEMBLOCK_ALLOC_ANYWHERE,
0128 };
0129 
0130 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0131 struct memblock_type physmem = {
0132     .regions        = memblock_physmem_init_regions,
0133     .cnt            = 1,    /* empty dummy entry */
0134     .max            = INIT_PHYSMEM_REGIONS,
0135     .name           = "physmem",
0136 };
0137 #endif
0138 
0139 /*
0140  * keep a pointer to &memblock.memory in the text section to use it in
0141  * __next_mem_range() and its helpers.
0142  *  For architectures that do not keep memblock data after init, this
0143  * pointer will be reset to NULL at memblock_discard()
0144  */
0145 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
0146 
0147 #define for_each_memblock_type(i, memblock_type, rgn)           \
0148     for (i = 0, rgn = &memblock_type->regions[0];           \
0149          i < memblock_type->cnt;                    \
0150          i++, rgn = &memblock_type->regions[i])
0151 
0152 #define memblock_dbg(fmt, ...)                      \
0153     do {                                \
0154         if (memblock_debug)                 \
0155             pr_info(fmt, ##__VA_ARGS__);            \
0156     } while (0)
0157 
0158 static int memblock_debug __initdata_memblock;
0159 static bool system_has_some_mirror __initdata_memblock = false;
0160 static int memblock_can_resize __initdata_memblock;
0161 static int memblock_memory_in_slab __initdata_memblock = 0;
0162 static int memblock_reserved_in_slab __initdata_memblock = 0;
0163 
0164 static enum memblock_flags __init_memblock choose_memblock_flags(void)
0165 {
0166     return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
0167 }
0168 
0169 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
0170 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
0171 {
0172     return *size = min(*size, PHYS_ADDR_MAX - base);
0173 }
0174 
0175 /*
0176  * Address comparison utilities
0177  */
0178 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
0179                        phys_addr_t base2, phys_addr_t size2)
0180 {
0181     return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
0182 }
0183 
0184 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
0185                     phys_addr_t base, phys_addr_t size)
0186 {
0187     unsigned long i;
0188 
0189     memblock_cap_size(base, &size);
0190 
0191     for (i = 0; i < type->cnt; i++)
0192         if (memblock_addrs_overlap(base, size, type->regions[i].base,
0193                        type->regions[i].size))
0194             break;
0195     return i < type->cnt;
0196 }
0197 
0198 /**
0199  * __memblock_find_range_bottom_up - find free area utility in bottom-up
0200  * @start: start of candidate range
0201  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
0202  *       %MEMBLOCK_ALLOC_ACCESSIBLE
0203  * @size: size of free area to find
0204  * @align: alignment of free area to find
0205  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
0206  * @flags: pick from blocks based on memory attributes
0207  *
0208  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
0209  *
0210  * Return:
0211  * Found address on success, 0 on failure.
0212  */
0213 static phys_addr_t __init_memblock
0214 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
0215                 phys_addr_t size, phys_addr_t align, int nid,
0216                 enum memblock_flags flags)
0217 {
0218     phys_addr_t this_start, this_end, cand;
0219     u64 i;
0220 
0221     for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
0222         this_start = clamp(this_start, start, end);
0223         this_end = clamp(this_end, start, end);
0224 
0225         cand = round_up(this_start, align);
0226         if (cand < this_end && this_end - cand >= size)
0227             return cand;
0228     }
0229 
0230     return 0;
0231 }
0232 
0233 /**
0234  * __memblock_find_range_top_down - find free area utility, in top-down
0235  * @start: start of candidate range
0236  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
0237  *       %MEMBLOCK_ALLOC_ACCESSIBLE
0238  * @size: size of free area to find
0239  * @align: alignment of free area to find
0240  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
0241  * @flags: pick from blocks based on memory attributes
0242  *
0243  * Utility called from memblock_find_in_range_node(), find free area top-down.
0244  *
0245  * Return:
0246  * Found address on success, 0 on failure.
0247  */
0248 static phys_addr_t __init_memblock
0249 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
0250                    phys_addr_t size, phys_addr_t align, int nid,
0251                    enum memblock_flags flags)
0252 {
0253     phys_addr_t this_start, this_end, cand;
0254     u64 i;
0255 
0256     for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
0257                     NULL) {
0258         this_start = clamp(this_start, start, end);
0259         this_end = clamp(this_end, start, end);
0260 
0261         if (this_end < size)
0262             continue;
0263 
0264         cand = round_down(this_end - size, align);
0265         if (cand >= this_start)
0266             return cand;
0267     }
0268 
0269     return 0;
0270 }
0271 
0272 /**
0273  * memblock_find_in_range_node - find free area in given range and node
0274  * @size: size of free area to find
0275  * @align: alignment of free area to find
0276  * @start: start of candidate range
0277  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
0278  *       %MEMBLOCK_ALLOC_ACCESSIBLE
0279  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
0280  * @flags: pick from blocks based on memory attributes
0281  *
0282  * Find @size free area aligned to @align in the specified range and node.
0283  *
0284  * Return:
0285  * Found address on success, 0 on failure.
0286  */
0287 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
0288                     phys_addr_t align, phys_addr_t start,
0289                     phys_addr_t end, int nid,
0290                     enum memblock_flags flags)
0291 {
0292     /* pump up @end */
0293     if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
0294         end == MEMBLOCK_ALLOC_NOLEAKTRACE)
0295         end = memblock.current_limit;
0296 
0297     /* avoid allocating the first page */
0298     start = max_t(phys_addr_t, start, PAGE_SIZE);
0299     end = max(start, end);
0300 
0301     if (memblock_bottom_up())
0302         return __memblock_find_range_bottom_up(start, end, size, align,
0303                                nid, flags);
0304     else
0305         return __memblock_find_range_top_down(start, end, size, align,
0306                               nid, flags);
0307 }
0308 
0309 /**
0310  * memblock_find_in_range - find free area in given range
0311  * @start: start of candidate range
0312  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
0313  *       %MEMBLOCK_ALLOC_ACCESSIBLE
0314  * @size: size of free area to find
0315  * @align: alignment of free area to find
0316  *
0317  * Find @size free area aligned to @align in the specified range.
0318  *
0319  * Return:
0320  * Found address on success, 0 on failure.
0321  */
0322 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
0323                     phys_addr_t end, phys_addr_t size,
0324                     phys_addr_t align)
0325 {
0326     phys_addr_t ret;
0327     enum memblock_flags flags = choose_memblock_flags();
0328 
0329 again:
0330     ret = memblock_find_in_range_node(size, align, start, end,
0331                         NUMA_NO_NODE, flags);
0332 
0333     if (!ret && (flags & MEMBLOCK_MIRROR)) {
0334         pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
0335             &size);
0336         flags &= ~MEMBLOCK_MIRROR;
0337         goto again;
0338     }
0339 
0340     return ret;
0341 }
0342 
0343 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
0344 {
0345     type->total_size -= type->regions[r].size;
0346     memmove(&type->regions[r], &type->regions[r + 1],
0347         (type->cnt - (r + 1)) * sizeof(type->regions[r]));
0348     type->cnt--;
0349 
0350     /* Special case for empty arrays */
0351     if (type->cnt == 0) {
0352         WARN_ON(type->total_size != 0);
0353         type->cnt = 1;
0354         type->regions[0].base = 0;
0355         type->regions[0].size = 0;
0356         type->regions[0].flags = 0;
0357         memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
0358     }
0359 }
0360 
0361 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
0362 /**
0363  * memblock_discard - discard memory and reserved arrays if they were allocated
0364  */
0365 void __init memblock_discard(void)
0366 {
0367     phys_addr_t addr, size;
0368 
0369     if (memblock.reserved.regions != memblock_reserved_init_regions) {
0370         addr = __pa(memblock.reserved.regions);
0371         size = PAGE_ALIGN(sizeof(struct memblock_region) *
0372                   memblock.reserved.max);
0373         if (memblock_reserved_in_slab)
0374             kfree(memblock.reserved.regions);
0375         else
0376             memblock_free_late(addr, size);
0377     }
0378 
0379     if (memblock.memory.regions != memblock_memory_init_regions) {
0380         addr = __pa(memblock.memory.regions);
0381         size = PAGE_ALIGN(sizeof(struct memblock_region) *
0382                   memblock.memory.max);
0383         if (memblock_memory_in_slab)
0384             kfree(memblock.memory.regions);
0385         else
0386             memblock_free_late(addr, size);
0387     }
0388 
0389     memblock_memory = NULL;
0390 }
0391 #endif
0392 
0393 /**
0394  * memblock_double_array - double the size of the memblock regions array
0395  * @type: memblock type of the regions array being doubled
0396  * @new_area_start: starting address of memory range to avoid overlap with
0397  * @new_area_size: size of memory range to avoid overlap with
0398  *
0399  * Double the size of the @type regions array. If memblock is being used to
0400  * allocate memory for a new reserved regions array and there is a previously
0401  * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
0402  * waiting to be reserved, ensure the memory used by the new array does
0403  * not overlap.
0404  *
0405  * Return:
0406  * 0 on success, -1 on failure.
0407  */
0408 static int __init_memblock memblock_double_array(struct memblock_type *type,
0409                         phys_addr_t new_area_start,
0410                         phys_addr_t new_area_size)
0411 {
0412     struct memblock_region *new_array, *old_array;
0413     phys_addr_t old_alloc_size, new_alloc_size;
0414     phys_addr_t old_size, new_size, addr, new_end;
0415     int use_slab = slab_is_available();
0416     int *in_slab;
0417 
0418     /* We don't allow resizing until we know about the reserved regions
0419      * of memory that aren't suitable for allocation
0420      */
0421     if (!memblock_can_resize)
0422         return -1;
0423 
0424     /* Calculate new doubled size */
0425     old_size = type->max * sizeof(struct memblock_region);
0426     new_size = old_size << 1;
0427     /*
0428      * We need to allocated new one align to PAGE_SIZE,
0429      *   so we can free them completely later.
0430      */
0431     old_alloc_size = PAGE_ALIGN(old_size);
0432     new_alloc_size = PAGE_ALIGN(new_size);
0433 
0434     /* Retrieve the slab flag */
0435     if (type == &memblock.memory)
0436         in_slab = &memblock_memory_in_slab;
0437     else
0438         in_slab = &memblock_reserved_in_slab;
0439 
0440     /* Try to find some space for it */
0441     if (use_slab) {
0442         new_array = kmalloc(new_size, GFP_KERNEL);
0443         addr = new_array ? __pa(new_array) : 0;
0444     } else {
0445         /* only exclude range when trying to double reserved.regions */
0446         if (type != &memblock.reserved)
0447             new_area_start = new_area_size = 0;
0448 
0449         addr = memblock_find_in_range(new_area_start + new_area_size,
0450                         memblock.current_limit,
0451                         new_alloc_size, PAGE_SIZE);
0452         if (!addr && new_area_size)
0453             addr = memblock_find_in_range(0,
0454                 min(new_area_start, memblock.current_limit),
0455                 new_alloc_size, PAGE_SIZE);
0456 
0457         new_array = addr ? __va(addr) : NULL;
0458     }
0459     if (!addr) {
0460         pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
0461                type->name, type->max, type->max * 2);
0462         return -1;
0463     }
0464 
0465     new_end = addr + new_size - 1;
0466     memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
0467             type->name, type->max * 2, &addr, &new_end);
0468 
0469     /*
0470      * Found space, we now need to move the array over before we add the
0471      * reserved region since it may be our reserved array itself that is
0472      * full.
0473      */
0474     memcpy(new_array, type->regions, old_size);
0475     memset(new_array + type->max, 0, old_size);
0476     old_array = type->regions;
0477     type->regions = new_array;
0478     type->max <<= 1;
0479 
0480     /* Free old array. We needn't free it if the array is the static one */
0481     if (*in_slab)
0482         kfree(old_array);
0483     else if (old_array != memblock_memory_init_regions &&
0484          old_array != memblock_reserved_init_regions)
0485         memblock_free(old_array, old_alloc_size);
0486 
0487     /*
0488      * Reserve the new array if that comes from the memblock.  Otherwise, we
0489      * needn't do it
0490      */
0491     if (!use_slab)
0492         BUG_ON(memblock_reserve(addr, new_alloc_size));
0493 
0494     /* Update slab flag */
0495     *in_slab = use_slab;
0496 
0497     return 0;
0498 }
0499 
0500 /**
0501  * memblock_merge_regions - merge neighboring compatible regions
0502  * @type: memblock type to scan
0503  *
0504  * Scan @type and merge neighboring compatible regions.
0505  */
0506 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
0507 {
0508     int i = 0;
0509 
0510     /* cnt never goes below 1 */
0511     while (i < type->cnt - 1) {
0512         struct memblock_region *this = &type->regions[i];
0513         struct memblock_region *next = &type->regions[i + 1];
0514 
0515         if (this->base + this->size != next->base ||
0516             memblock_get_region_node(this) !=
0517             memblock_get_region_node(next) ||
0518             this->flags != next->flags) {
0519             BUG_ON(this->base + this->size > next->base);
0520             i++;
0521             continue;
0522         }
0523 
0524         this->size += next->size;
0525         /* move forward from next + 1, index of which is i + 2 */
0526         memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
0527         type->cnt--;
0528     }
0529 }
0530 
0531 /**
0532  * memblock_insert_region - insert new memblock region
0533  * @type:   memblock type to insert into
0534  * @idx:    index for the insertion point
0535  * @base:   base address of the new region
0536  * @size:   size of the new region
0537  * @nid:    node id of the new region
0538  * @flags:  flags of the new region
0539  *
0540  * Insert new memblock region [@base, @base + @size) into @type at @idx.
0541  * @type must already have extra room to accommodate the new region.
0542  */
0543 static void __init_memblock memblock_insert_region(struct memblock_type *type,
0544                            int idx, phys_addr_t base,
0545                            phys_addr_t size,
0546                            int nid,
0547                            enum memblock_flags flags)
0548 {
0549     struct memblock_region *rgn = &type->regions[idx];
0550 
0551     BUG_ON(type->cnt >= type->max);
0552     memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
0553     rgn->base = base;
0554     rgn->size = size;
0555     rgn->flags = flags;
0556     memblock_set_region_node(rgn, nid);
0557     type->cnt++;
0558     type->total_size += size;
0559 }
0560 
0561 /**
0562  * memblock_add_range - add new memblock region
0563  * @type: memblock type to add new region into
0564  * @base: base address of the new region
0565  * @size: size of the new region
0566  * @nid: nid of the new region
0567  * @flags: flags of the new region
0568  *
0569  * Add new memblock region [@base, @base + @size) into @type.  The new region
0570  * is allowed to overlap with existing ones - overlaps don't affect already
0571  * existing regions.  @type is guaranteed to be minimal (all neighbouring
0572  * compatible regions are merged) after the addition.
0573  *
0574  * Return:
0575  * 0 on success, -errno on failure.
0576  */
0577 static int __init_memblock memblock_add_range(struct memblock_type *type,
0578                 phys_addr_t base, phys_addr_t size,
0579                 int nid, enum memblock_flags flags)
0580 {
0581     bool insert = false;
0582     phys_addr_t obase = base;
0583     phys_addr_t end = base + memblock_cap_size(base, &size);
0584     int idx, nr_new;
0585     struct memblock_region *rgn;
0586 
0587     if (!size)
0588         return 0;
0589 
0590     /* special case for empty array */
0591     if (type->regions[0].size == 0) {
0592         WARN_ON(type->cnt != 1 || type->total_size);
0593         type->regions[0].base = base;
0594         type->regions[0].size = size;
0595         type->regions[0].flags = flags;
0596         memblock_set_region_node(&type->regions[0], nid);
0597         type->total_size = size;
0598         return 0;
0599     }
0600 
0601     /*
0602      * The worst case is when new range overlaps all existing regions,
0603      * then we'll need type->cnt + 1 empty regions in @type. So if
0604      * type->cnt * 2 + 1 is less than type->max, we know
0605      * that there is enough empty regions in @type, and we can insert
0606      * regions directly.
0607      */
0608     if (type->cnt * 2 + 1 < type->max)
0609         insert = true;
0610 
0611 repeat:
0612     /*
0613      * The following is executed twice.  Once with %false @insert and
0614      * then with %true.  The first counts the number of regions needed
0615      * to accommodate the new area.  The second actually inserts them.
0616      */
0617     base = obase;
0618     nr_new = 0;
0619 
0620     for_each_memblock_type(idx, type, rgn) {
0621         phys_addr_t rbase = rgn->base;
0622         phys_addr_t rend = rbase + rgn->size;
0623 
0624         if (rbase >= end)
0625             break;
0626         if (rend <= base)
0627             continue;
0628         /*
0629          * @rgn overlaps.  If it separates the lower part of new
0630          * area, insert that portion.
0631          */
0632         if (rbase > base) {
0633 #ifdef CONFIG_NUMA
0634             WARN_ON(nid != memblock_get_region_node(rgn));
0635 #endif
0636             WARN_ON(flags != rgn->flags);
0637             nr_new++;
0638             if (insert)
0639                 memblock_insert_region(type, idx++, base,
0640                                rbase - base, nid,
0641                                flags);
0642         }
0643         /* area below @rend is dealt with, forget about it */
0644         base = min(rend, end);
0645     }
0646 
0647     /* insert the remaining portion */
0648     if (base < end) {
0649         nr_new++;
0650         if (insert)
0651             memblock_insert_region(type, idx, base, end - base,
0652                            nid, flags);
0653     }
0654 
0655     if (!nr_new)
0656         return 0;
0657 
0658     /*
0659      * If this was the first round, resize array and repeat for actual
0660      * insertions; otherwise, merge and return.
0661      */
0662     if (!insert) {
0663         while (type->cnt + nr_new > type->max)
0664             if (memblock_double_array(type, obase, size) < 0)
0665                 return -ENOMEM;
0666         insert = true;
0667         goto repeat;
0668     } else {
0669         memblock_merge_regions(type);
0670         return 0;
0671     }
0672 }
0673 
0674 /**
0675  * memblock_add_node - add new memblock region within a NUMA node
0676  * @base: base address of the new region
0677  * @size: size of the new region
0678  * @nid: nid of the new region
0679  * @flags: flags of the new region
0680  *
0681  * Add new memblock region [@base, @base + @size) to the "memory"
0682  * type. See memblock_add_range() description for mode details
0683  *
0684  * Return:
0685  * 0 on success, -errno on failure.
0686  */
0687 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
0688                       int nid, enum memblock_flags flags)
0689 {
0690     phys_addr_t end = base + size - 1;
0691 
0692     memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
0693              &base, &end, nid, flags, (void *)_RET_IP_);
0694 
0695     return memblock_add_range(&memblock.memory, base, size, nid, flags);
0696 }
0697 
0698 /**
0699  * memblock_add - add new memblock region
0700  * @base: base address of the new region
0701  * @size: size of the new region
0702  *
0703  * Add new memblock region [@base, @base + @size) to the "memory"
0704  * type. See memblock_add_range() description for mode details
0705  *
0706  * Return:
0707  * 0 on success, -errno on failure.
0708  */
0709 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
0710 {
0711     phys_addr_t end = base + size - 1;
0712 
0713     memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0714              &base, &end, (void *)_RET_IP_);
0715 
0716     return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
0717 }
0718 
0719 /**
0720  * memblock_isolate_range - isolate given range into disjoint memblocks
0721  * @type: memblock type to isolate range for
0722  * @base: base of range to isolate
0723  * @size: size of range to isolate
0724  * @start_rgn: out parameter for the start of isolated region
0725  * @end_rgn: out parameter for the end of isolated region
0726  *
0727  * Walk @type and ensure that regions don't cross the boundaries defined by
0728  * [@base, @base + @size).  Crossing regions are split at the boundaries,
0729  * which may create at most two more regions.  The index of the first
0730  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
0731  *
0732  * Return:
0733  * 0 on success, -errno on failure.
0734  */
0735 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
0736                     phys_addr_t base, phys_addr_t size,
0737                     int *start_rgn, int *end_rgn)
0738 {
0739     phys_addr_t end = base + memblock_cap_size(base, &size);
0740     int idx;
0741     struct memblock_region *rgn;
0742 
0743     *start_rgn = *end_rgn = 0;
0744 
0745     if (!size)
0746         return 0;
0747 
0748     /* we'll create at most two more regions */
0749     while (type->cnt + 2 > type->max)
0750         if (memblock_double_array(type, base, size) < 0)
0751             return -ENOMEM;
0752 
0753     for_each_memblock_type(idx, type, rgn) {
0754         phys_addr_t rbase = rgn->base;
0755         phys_addr_t rend = rbase + rgn->size;
0756 
0757         if (rbase >= end)
0758             break;
0759         if (rend <= base)
0760             continue;
0761 
0762         if (rbase < base) {
0763             /*
0764              * @rgn intersects from below.  Split and continue
0765              * to process the next region - the new top half.
0766              */
0767             rgn->base = base;
0768             rgn->size -= base - rbase;
0769             type->total_size -= base - rbase;
0770             memblock_insert_region(type, idx, rbase, base - rbase,
0771                            memblock_get_region_node(rgn),
0772                            rgn->flags);
0773         } else if (rend > end) {
0774             /*
0775              * @rgn intersects from above.  Split and redo the
0776              * current region - the new bottom half.
0777              */
0778             rgn->base = end;
0779             rgn->size -= end - rbase;
0780             type->total_size -= end - rbase;
0781             memblock_insert_region(type, idx--, rbase, end - rbase,
0782                            memblock_get_region_node(rgn),
0783                            rgn->flags);
0784         } else {
0785             /* @rgn is fully contained, record it */
0786             if (!*end_rgn)
0787                 *start_rgn = idx;
0788             *end_rgn = idx + 1;
0789         }
0790     }
0791 
0792     return 0;
0793 }
0794 
0795 static int __init_memblock memblock_remove_range(struct memblock_type *type,
0796                       phys_addr_t base, phys_addr_t size)
0797 {
0798     int start_rgn, end_rgn;
0799     int i, ret;
0800 
0801     ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
0802     if (ret)
0803         return ret;
0804 
0805     for (i = end_rgn - 1; i >= start_rgn; i--)
0806         memblock_remove_region(type, i);
0807     return 0;
0808 }
0809 
0810 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
0811 {
0812     phys_addr_t end = base + size - 1;
0813 
0814     memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0815              &base, &end, (void *)_RET_IP_);
0816 
0817     return memblock_remove_range(&memblock.memory, base, size);
0818 }
0819 
0820 /**
0821  * memblock_free - free boot memory allocation
0822  * @ptr: starting address of the  boot memory allocation
0823  * @size: size of the boot memory block in bytes
0824  *
0825  * Free boot memory block previously allocated by memblock_alloc_xx() API.
0826  * The freeing memory will not be released to the buddy allocator.
0827  */
0828 void __init_memblock memblock_free(void *ptr, size_t size)
0829 {
0830     if (ptr)
0831         memblock_phys_free(__pa(ptr), size);
0832 }
0833 
0834 /**
0835  * memblock_phys_free - free boot memory block
0836  * @base: phys starting address of the  boot memory block
0837  * @size: size of the boot memory block in bytes
0838  *
0839  * Free boot memory block previously allocated by memblock_alloc_xx() API.
0840  * The freeing memory will not be released to the buddy allocator.
0841  */
0842 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
0843 {
0844     phys_addr_t end = base + size - 1;
0845 
0846     memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0847              &base, &end, (void *)_RET_IP_);
0848 
0849     kmemleak_free_part_phys(base, size);
0850     return memblock_remove_range(&memblock.reserved, base, size);
0851 }
0852 
0853 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
0854 {
0855     phys_addr_t end = base + size - 1;
0856 
0857     memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0858              &base, &end, (void *)_RET_IP_);
0859 
0860     return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
0861 }
0862 
0863 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0864 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
0865 {
0866     phys_addr_t end = base + size - 1;
0867 
0868     memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0869              &base, &end, (void *)_RET_IP_);
0870 
0871     return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
0872 }
0873 #endif
0874 
0875 /**
0876  * memblock_setclr_flag - set or clear flag for a memory region
0877  * @base: base address of the region
0878  * @size: size of the region
0879  * @set: set or clear the flag
0880  * @flag: the flag to update
0881  *
0882  * This function isolates region [@base, @base + @size), and sets/clears flag
0883  *
0884  * Return: 0 on success, -errno on failure.
0885  */
0886 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
0887                 phys_addr_t size, int set, int flag)
0888 {
0889     struct memblock_type *type = &memblock.memory;
0890     int i, ret, start_rgn, end_rgn;
0891 
0892     ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
0893     if (ret)
0894         return ret;
0895 
0896     for (i = start_rgn; i < end_rgn; i++) {
0897         struct memblock_region *r = &type->regions[i];
0898 
0899         if (set)
0900             r->flags |= flag;
0901         else
0902             r->flags &= ~flag;
0903     }
0904 
0905     memblock_merge_regions(type);
0906     return 0;
0907 }
0908 
0909 /**
0910  * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
0911  * @base: the base phys addr of the region
0912  * @size: the size of the region
0913  *
0914  * Return: 0 on success, -errno on failure.
0915  */
0916 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
0917 {
0918     return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
0919 }
0920 
0921 /**
0922  * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
0923  * @base: the base phys addr of the region
0924  * @size: the size of the region
0925  *
0926  * Return: 0 on success, -errno on failure.
0927  */
0928 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
0929 {
0930     return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
0931 }
0932 
0933 /**
0934  * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
0935  * @base: the base phys addr of the region
0936  * @size: the size of the region
0937  *
0938  * Return: 0 on success, -errno on failure.
0939  */
0940 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
0941 {
0942     if (!mirrored_kernelcore)
0943         return 0;
0944 
0945     system_has_some_mirror = true;
0946 
0947     return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
0948 }
0949 
0950 /**
0951  * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
0952  * @base: the base phys addr of the region
0953  * @size: the size of the region
0954  *
0955  * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
0956  * direct mapping of the physical memory. These regions will still be
0957  * covered by the memory map. The struct page representing NOMAP memory
0958  * frames in the memory map will be PageReserved()
0959  *
0960  * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
0961  * memblock, the caller must inform kmemleak to ignore that memory
0962  *
0963  * Return: 0 on success, -errno on failure.
0964  */
0965 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
0966 {
0967     return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
0968 }
0969 
0970 /**
0971  * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
0972  * @base: the base phys addr of the region
0973  * @size: the size of the region
0974  *
0975  * Return: 0 on success, -errno on failure.
0976  */
0977 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
0978 {
0979     return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
0980 }
0981 
0982 static bool should_skip_region(struct memblock_type *type,
0983                    struct memblock_region *m,
0984                    int nid, int flags)
0985 {
0986     int m_nid = memblock_get_region_node(m);
0987 
0988     /* we never skip regions when iterating memblock.reserved or physmem */
0989     if (type != memblock_memory)
0990         return false;
0991 
0992     /* only memory regions are associated with nodes, check it */
0993     if (nid != NUMA_NO_NODE && nid != m_nid)
0994         return true;
0995 
0996     /* skip hotpluggable memory regions if needed */
0997     if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
0998         !(flags & MEMBLOCK_HOTPLUG))
0999         return true;
1000 
1001     /* if we want mirror memory skip non-mirror memory regions */
1002     if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1003         return true;
1004 
1005     /* skip nomap memory unless we were asked for it explicitly */
1006     if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1007         return true;
1008 
1009     /* skip driver-managed memory unless we were asked for it explicitly */
1010     if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
1011         return true;
1012 
1013     return false;
1014 }
1015 
1016 /**
1017  * __next_mem_range - next function for for_each_free_mem_range() etc.
1018  * @idx: pointer to u64 loop variable
1019  * @nid: node selector, %NUMA_NO_NODE for all nodes
1020  * @flags: pick from blocks based on memory attributes
1021  * @type_a: pointer to memblock_type from where the range is taken
1022  * @type_b: pointer to memblock_type which excludes memory from being taken
1023  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1024  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1025  * @out_nid: ptr to int for nid of the range, can be %NULL
1026  *
1027  * Find the first area from *@idx which matches @nid, fill the out
1028  * parameters, and update *@idx for the next iteration.  The lower 32bit of
1029  * *@idx contains index into type_a and the upper 32bit indexes the
1030  * areas before each region in type_b.  For example, if type_b regions
1031  * look like the following,
1032  *
1033  *  0:[0-16), 1:[32-48), 2:[128-130)
1034  *
1035  * The upper 32bit indexes the following regions.
1036  *
1037  *  0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1038  *
1039  * As both region arrays are sorted, the function advances the two indices
1040  * in lockstep and returns each intersection.
1041  */
1042 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1043               struct memblock_type *type_a,
1044               struct memblock_type *type_b, phys_addr_t *out_start,
1045               phys_addr_t *out_end, int *out_nid)
1046 {
1047     int idx_a = *idx & 0xffffffff;
1048     int idx_b = *idx >> 32;
1049 
1050     if (WARN_ONCE(nid == MAX_NUMNODES,
1051     "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1052         nid = NUMA_NO_NODE;
1053 
1054     for (; idx_a < type_a->cnt; idx_a++) {
1055         struct memblock_region *m = &type_a->regions[idx_a];
1056 
1057         phys_addr_t m_start = m->base;
1058         phys_addr_t m_end = m->base + m->size;
1059         int     m_nid = memblock_get_region_node(m);
1060 
1061         if (should_skip_region(type_a, m, nid, flags))
1062             continue;
1063 
1064         if (!type_b) {
1065             if (out_start)
1066                 *out_start = m_start;
1067             if (out_end)
1068                 *out_end = m_end;
1069             if (out_nid)
1070                 *out_nid = m_nid;
1071             idx_a++;
1072             *idx = (u32)idx_a | (u64)idx_b << 32;
1073             return;
1074         }
1075 
1076         /* scan areas before each reservation */
1077         for (; idx_b < type_b->cnt + 1; idx_b++) {
1078             struct memblock_region *r;
1079             phys_addr_t r_start;
1080             phys_addr_t r_end;
1081 
1082             r = &type_b->regions[idx_b];
1083             r_start = idx_b ? r[-1].base + r[-1].size : 0;
1084             r_end = idx_b < type_b->cnt ?
1085                 r->base : PHYS_ADDR_MAX;
1086 
1087             /*
1088              * if idx_b advanced past idx_a,
1089              * break out to advance idx_a
1090              */
1091             if (r_start >= m_end)
1092                 break;
1093             /* if the two regions intersect, we're done */
1094             if (m_start < r_end) {
1095                 if (out_start)
1096                     *out_start =
1097                         max(m_start, r_start);
1098                 if (out_end)
1099                     *out_end = min(m_end, r_end);
1100                 if (out_nid)
1101                     *out_nid = m_nid;
1102                 /*
1103                  * The region which ends first is
1104                  * advanced for the next iteration.
1105                  */
1106                 if (m_end <= r_end)
1107                     idx_a++;
1108                 else
1109                     idx_b++;
1110                 *idx = (u32)idx_a | (u64)idx_b << 32;
1111                 return;
1112             }
1113         }
1114     }
1115 
1116     /* signal end of iteration */
1117     *idx = ULLONG_MAX;
1118 }
1119 
1120 /**
1121  * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1122  *
1123  * @idx: pointer to u64 loop variable
1124  * @nid: node selector, %NUMA_NO_NODE for all nodes
1125  * @flags: pick from blocks based on memory attributes
1126  * @type_a: pointer to memblock_type from where the range is taken
1127  * @type_b: pointer to memblock_type which excludes memory from being taken
1128  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1129  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1130  * @out_nid: ptr to int for nid of the range, can be %NULL
1131  *
1132  * Finds the next range from type_a which is not marked as unsuitable
1133  * in type_b.
1134  *
1135  * Reverse of __next_mem_range().
1136  */
1137 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1138                       enum memblock_flags flags,
1139                       struct memblock_type *type_a,
1140                       struct memblock_type *type_b,
1141                       phys_addr_t *out_start,
1142                       phys_addr_t *out_end, int *out_nid)
1143 {
1144     int idx_a = *idx & 0xffffffff;
1145     int idx_b = *idx >> 32;
1146 
1147     if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1148         nid = NUMA_NO_NODE;
1149 
1150     if (*idx == (u64)ULLONG_MAX) {
1151         idx_a = type_a->cnt - 1;
1152         if (type_b != NULL)
1153             idx_b = type_b->cnt;
1154         else
1155             idx_b = 0;
1156     }
1157 
1158     for (; idx_a >= 0; idx_a--) {
1159         struct memblock_region *m = &type_a->regions[idx_a];
1160 
1161         phys_addr_t m_start = m->base;
1162         phys_addr_t m_end = m->base + m->size;
1163         int m_nid = memblock_get_region_node(m);
1164 
1165         if (should_skip_region(type_a, m, nid, flags))
1166             continue;
1167 
1168         if (!type_b) {
1169             if (out_start)
1170                 *out_start = m_start;
1171             if (out_end)
1172                 *out_end = m_end;
1173             if (out_nid)
1174                 *out_nid = m_nid;
1175             idx_a--;
1176             *idx = (u32)idx_a | (u64)idx_b << 32;
1177             return;
1178         }
1179 
1180         /* scan areas before each reservation */
1181         for (; idx_b >= 0; idx_b--) {
1182             struct memblock_region *r;
1183             phys_addr_t r_start;
1184             phys_addr_t r_end;
1185 
1186             r = &type_b->regions[idx_b];
1187             r_start = idx_b ? r[-1].base + r[-1].size : 0;
1188             r_end = idx_b < type_b->cnt ?
1189                 r->base : PHYS_ADDR_MAX;
1190             /*
1191              * if idx_b advanced past idx_a,
1192              * break out to advance idx_a
1193              */
1194 
1195             if (r_end <= m_start)
1196                 break;
1197             /* if the two regions intersect, we're done */
1198             if (m_end > r_start) {
1199                 if (out_start)
1200                     *out_start = max(m_start, r_start);
1201                 if (out_end)
1202                     *out_end = min(m_end, r_end);
1203                 if (out_nid)
1204                     *out_nid = m_nid;
1205                 if (m_start >= r_start)
1206                     idx_a--;
1207                 else
1208                     idx_b--;
1209                 *idx = (u32)idx_a | (u64)idx_b << 32;
1210                 return;
1211             }
1212         }
1213     }
1214     /* signal end of iteration */
1215     *idx = ULLONG_MAX;
1216 }
1217 
1218 /*
1219  * Common iterator interface used to define for_each_mem_pfn_range().
1220  */
1221 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1222                 unsigned long *out_start_pfn,
1223                 unsigned long *out_end_pfn, int *out_nid)
1224 {
1225     struct memblock_type *type = &memblock.memory;
1226     struct memblock_region *r;
1227     int r_nid;
1228 
1229     while (++*idx < type->cnt) {
1230         r = &type->regions[*idx];
1231         r_nid = memblock_get_region_node(r);
1232 
1233         if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1234             continue;
1235         if (nid == MAX_NUMNODES || nid == r_nid)
1236             break;
1237     }
1238     if (*idx >= type->cnt) {
1239         *idx = -1;
1240         return;
1241     }
1242 
1243     if (out_start_pfn)
1244         *out_start_pfn = PFN_UP(r->base);
1245     if (out_end_pfn)
1246         *out_end_pfn = PFN_DOWN(r->base + r->size);
1247     if (out_nid)
1248         *out_nid = r_nid;
1249 }
1250 
1251 /**
1252  * memblock_set_node - set node ID on memblock regions
1253  * @base: base of area to set node ID for
1254  * @size: size of area to set node ID for
1255  * @type: memblock type to set node ID for
1256  * @nid: node ID to set
1257  *
1258  * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1259  * Regions which cross the area boundaries are split as necessary.
1260  *
1261  * Return:
1262  * 0 on success, -errno on failure.
1263  */
1264 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1265                       struct memblock_type *type, int nid)
1266 {
1267 #ifdef CONFIG_NUMA
1268     int start_rgn, end_rgn;
1269     int i, ret;
1270 
1271     ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1272     if (ret)
1273         return ret;
1274 
1275     for (i = start_rgn; i < end_rgn; i++)
1276         memblock_set_region_node(&type->regions[i], nid);
1277 
1278     memblock_merge_regions(type);
1279 #endif
1280     return 0;
1281 }
1282 
1283 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1284 /**
1285  * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1286  *
1287  * @idx: pointer to u64 loop variable
1288  * @zone: zone in which all of the memory blocks reside
1289  * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1290  * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1291  *
1292  * This function is meant to be a zone/pfn specific wrapper for the
1293  * for_each_mem_range type iterators. Specifically they are used in the
1294  * deferred memory init routines and as such we were duplicating much of
1295  * this logic throughout the code. So instead of having it in multiple
1296  * locations it seemed like it would make more sense to centralize this to
1297  * one new iterator that does everything they need.
1298  */
1299 void __init_memblock
1300 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1301                  unsigned long *out_spfn, unsigned long *out_epfn)
1302 {
1303     int zone_nid = zone_to_nid(zone);
1304     phys_addr_t spa, epa;
1305 
1306     __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1307              &memblock.memory, &memblock.reserved,
1308              &spa, &epa, NULL);
1309 
1310     while (*idx != U64_MAX) {
1311         unsigned long epfn = PFN_DOWN(epa);
1312         unsigned long spfn = PFN_UP(spa);
1313 
1314         /*
1315          * Verify the end is at least past the start of the zone and
1316          * that we have at least one PFN to initialize.
1317          */
1318         if (zone->zone_start_pfn < epfn && spfn < epfn) {
1319             /* if we went too far just stop searching */
1320             if (zone_end_pfn(zone) <= spfn) {
1321                 *idx = U64_MAX;
1322                 break;
1323             }
1324 
1325             if (out_spfn)
1326                 *out_spfn = max(zone->zone_start_pfn, spfn);
1327             if (out_epfn)
1328                 *out_epfn = min(zone_end_pfn(zone), epfn);
1329 
1330             return;
1331         }
1332 
1333         __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1334                  &memblock.memory, &memblock.reserved,
1335                  &spa, &epa, NULL);
1336     }
1337 
1338     /* signal end of iteration */
1339     if (out_spfn)
1340         *out_spfn = ULONG_MAX;
1341     if (out_epfn)
1342         *out_epfn = 0;
1343 }
1344 
1345 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1346 
1347 /**
1348  * memblock_alloc_range_nid - allocate boot memory block
1349  * @size: size of memory block to be allocated in bytes
1350  * @align: alignment of the region and block's size
1351  * @start: the lower bound of the memory region to allocate (phys address)
1352  * @end: the upper bound of the memory region to allocate (phys address)
1353  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1354  * @exact_nid: control the allocation fall back to other nodes
1355  *
1356  * The allocation is performed from memory region limited by
1357  * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1358  *
1359  * If the specified node can not hold the requested memory and @exact_nid
1360  * is false, the allocation falls back to any node in the system.
1361  *
1362  * For systems with memory mirroring, the allocation is attempted first
1363  * from the regions with mirroring enabled and then retried from any
1364  * memory region.
1365  *
1366  * In addition, function using kmemleak_alloc_phys for allocated boot
1367  * memory block, it is never reported as leaks.
1368  *
1369  * Return:
1370  * Physical address of allocated memory block on success, %0 on failure.
1371  */
1372 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1373                     phys_addr_t align, phys_addr_t start,
1374                     phys_addr_t end, int nid,
1375                     bool exact_nid)
1376 {
1377     enum memblock_flags flags = choose_memblock_flags();
1378     phys_addr_t found;
1379 
1380     if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1381         nid = NUMA_NO_NODE;
1382 
1383     if (!align) {
1384         /* Can't use WARNs this early in boot on powerpc */
1385         dump_stack();
1386         align = SMP_CACHE_BYTES;
1387     }
1388 
1389 again:
1390     found = memblock_find_in_range_node(size, align, start, end, nid,
1391                         flags);
1392     if (found && !memblock_reserve(found, size))
1393         goto done;
1394 
1395     if (nid != NUMA_NO_NODE && !exact_nid) {
1396         found = memblock_find_in_range_node(size, align, start,
1397                             end, NUMA_NO_NODE,
1398                             flags);
1399         if (found && !memblock_reserve(found, size))
1400             goto done;
1401     }
1402 
1403     if (flags & MEMBLOCK_MIRROR) {
1404         flags &= ~MEMBLOCK_MIRROR;
1405         pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1406             &size);
1407         goto again;
1408     }
1409 
1410     return 0;
1411 
1412 done:
1413     /*
1414      * Skip kmemleak for those places like kasan_init() and
1415      * early_pgtable_alloc() due to high volume.
1416      */
1417     if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1418         /*
1419          * Memblock allocated blocks are never reported as
1420          * leaks. This is because many of these blocks are
1421          * only referred via the physical address which is
1422          * not looked up by kmemleak.
1423          */
1424         kmemleak_alloc_phys(found, size, 0);
1425 
1426     return found;
1427 }
1428 
1429 /**
1430  * memblock_phys_alloc_range - allocate a memory block inside specified range
1431  * @size: size of memory block to be allocated in bytes
1432  * @align: alignment of the region and block's size
1433  * @start: the lower bound of the memory region to allocate (physical address)
1434  * @end: the upper bound of the memory region to allocate (physical address)
1435  *
1436  * Allocate @size bytes in the between @start and @end.
1437  *
1438  * Return: physical address of the allocated memory block on success,
1439  * %0 on failure.
1440  */
1441 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1442                          phys_addr_t align,
1443                          phys_addr_t start,
1444                          phys_addr_t end)
1445 {
1446     memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1447              __func__, (u64)size, (u64)align, &start, &end,
1448              (void *)_RET_IP_);
1449     return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1450                     false);
1451 }
1452 
1453 /**
1454  * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1455  * @size: size of memory block to be allocated in bytes
1456  * @align: alignment of the region and block's size
1457  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1458  *
1459  * Allocates memory block from the specified NUMA node. If the node
1460  * has no available memory, attempts to allocated from any node in the
1461  * system.
1462  *
1463  * Return: physical address of the allocated memory block on success,
1464  * %0 on failure.
1465  */
1466 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1467 {
1468     return memblock_alloc_range_nid(size, align, 0,
1469                     MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1470 }
1471 
1472 /**
1473  * memblock_alloc_internal - allocate boot memory block
1474  * @size: size of memory block to be allocated in bytes
1475  * @align: alignment of the region and block's size
1476  * @min_addr: the lower bound of the memory region to allocate (phys address)
1477  * @max_addr: the upper bound of the memory region to allocate (phys address)
1478  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1479  * @exact_nid: control the allocation fall back to other nodes
1480  *
1481  * Allocates memory block using memblock_alloc_range_nid() and
1482  * converts the returned physical address to virtual.
1483  *
1484  * The @min_addr limit is dropped if it can not be satisfied and the allocation
1485  * will fall back to memory below @min_addr. Other constraints, such
1486  * as node and mirrored memory will be handled again in
1487  * memblock_alloc_range_nid().
1488  *
1489  * Return:
1490  * Virtual address of allocated memory block on success, NULL on failure.
1491  */
1492 static void * __init memblock_alloc_internal(
1493                 phys_addr_t size, phys_addr_t align,
1494                 phys_addr_t min_addr, phys_addr_t max_addr,
1495                 int nid, bool exact_nid)
1496 {
1497     phys_addr_t alloc;
1498 
1499     /*
1500      * Detect any accidental use of these APIs after slab is ready, as at
1501      * this moment memblock may be deinitialized already and its
1502      * internal data may be destroyed (after execution of memblock_free_all)
1503      */
1504     if (WARN_ON_ONCE(slab_is_available()))
1505         return kzalloc_node(size, GFP_NOWAIT, nid);
1506 
1507     if (max_addr > memblock.current_limit)
1508         max_addr = memblock.current_limit;
1509 
1510     alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1511                     exact_nid);
1512 
1513     /* retry allocation without lower limit */
1514     if (!alloc && min_addr)
1515         alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1516                         exact_nid);
1517 
1518     if (!alloc)
1519         return NULL;
1520 
1521     return phys_to_virt(alloc);
1522 }
1523 
1524 /**
1525  * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1526  * without zeroing memory
1527  * @size: size of memory block to be allocated in bytes
1528  * @align: alignment of the region and block's size
1529  * @min_addr: the lower bound of the memory region from where the allocation
1530  *    is preferred (phys address)
1531  * @max_addr: the upper bound of the memory region from where the allocation
1532  *        is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1533  *        allocate only from memory limited by memblock.current_limit value
1534  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1535  *
1536  * Public function, provides additional debug information (including caller
1537  * info), if enabled. Does not zero allocated memory.
1538  *
1539  * Return:
1540  * Virtual address of allocated memory block on success, NULL on failure.
1541  */
1542 void * __init memblock_alloc_exact_nid_raw(
1543             phys_addr_t size, phys_addr_t align,
1544             phys_addr_t min_addr, phys_addr_t max_addr,
1545             int nid)
1546 {
1547     memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1548              __func__, (u64)size, (u64)align, nid, &min_addr,
1549              &max_addr, (void *)_RET_IP_);
1550 
1551     return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1552                        true);
1553 }
1554 
1555 /**
1556  * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1557  * memory and without panicking
1558  * @size: size of memory block to be allocated in bytes
1559  * @align: alignment of the region and block's size
1560  * @min_addr: the lower bound of the memory region from where the allocation
1561  *    is preferred (phys address)
1562  * @max_addr: the upper bound of the memory region from where the allocation
1563  *        is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1564  *        allocate only from memory limited by memblock.current_limit value
1565  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1566  *
1567  * Public function, provides additional debug information (including caller
1568  * info), if enabled. Does not zero allocated memory, does not panic if request
1569  * cannot be satisfied.
1570  *
1571  * Return:
1572  * Virtual address of allocated memory block on success, NULL on failure.
1573  */
1574 void * __init memblock_alloc_try_nid_raw(
1575             phys_addr_t size, phys_addr_t align,
1576             phys_addr_t min_addr, phys_addr_t max_addr,
1577             int nid)
1578 {
1579     memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1580              __func__, (u64)size, (u64)align, nid, &min_addr,
1581              &max_addr, (void *)_RET_IP_);
1582 
1583     return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1584                        false);
1585 }
1586 
1587 /**
1588  * memblock_alloc_try_nid - allocate boot memory block
1589  * @size: size of memory block to be allocated in bytes
1590  * @align: alignment of the region and block's size
1591  * @min_addr: the lower bound of the memory region from where the allocation
1592  *    is preferred (phys address)
1593  * @max_addr: the upper bound of the memory region from where the allocation
1594  *        is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1595  *        allocate only from memory limited by memblock.current_limit value
1596  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1597  *
1598  * Public function, provides additional debug information (including caller
1599  * info), if enabled. This function zeroes the allocated memory.
1600  *
1601  * Return:
1602  * Virtual address of allocated memory block on success, NULL on failure.
1603  */
1604 void * __init memblock_alloc_try_nid(
1605             phys_addr_t size, phys_addr_t align,
1606             phys_addr_t min_addr, phys_addr_t max_addr,
1607             int nid)
1608 {
1609     void *ptr;
1610 
1611     memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1612              __func__, (u64)size, (u64)align, nid, &min_addr,
1613              &max_addr, (void *)_RET_IP_);
1614     ptr = memblock_alloc_internal(size, align,
1615                        min_addr, max_addr, nid, false);
1616     if (ptr)
1617         memset(ptr, 0, size);
1618 
1619     return ptr;
1620 }
1621 
1622 /**
1623  * memblock_free_late - free pages directly to buddy allocator
1624  * @base: phys starting address of the  boot memory block
1625  * @size: size of the boot memory block in bytes
1626  *
1627  * This is only useful when the memblock allocator has already been torn
1628  * down, but we are still initializing the system.  Pages are released directly
1629  * to the buddy allocator.
1630  */
1631 void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
1632 {
1633     phys_addr_t cursor, end;
1634 
1635     end = base + size - 1;
1636     memblock_dbg("%s: [%pa-%pa] %pS\n",
1637              __func__, &base, &end, (void *)_RET_IP_);
1638     kmemleak_free_part_phys(base, size);
1639     cursor = PFN_UP(base);
1640     end = PFN_DOWN(base + size);
1641 
1642     for (; cursor < end; cursor++) {
1643         memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1644         totalram_pages_inc();
1645     }
1646 }
1647 
1648 /*
1649  * Remaining API functions
1650  */
1651 
1652 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1653 {
1654     return memblock.memory.total_size;
1655 }
1656 
1657 phys_addr_t __init_memblock memblock_reserved_size(void)
1658 {
1659     return memblock.reserved.total_size;
1660 }
1661 
1662 /* lowest address */
1663 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1664 {
1665     return memblock.memory.regions[0].base;
1666 }
1667 
1668 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1669 {
1670     int idx = memblock.memory.cnt - 1;
1671 
1672     return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1673 }
1674 
1675 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1676 {
1677     phys_addr_t max_addr = PHYS_ADDR_MAX;
1678     struct memblock_region *r;
1679 
1680     /*
1681      * translate the memory @limit size into the max address within one of
1682      * the memory memblock regions, if the @limit exceeds the total size
1683      * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1684      */
1685     for_each_mem_region(r) {
1686         if (limit <= r->size) {
1687             max_addr = r->base + limit;
1688             break;
1689         }
1690         limit -= r->size;
1691     }
1692 
1693     return max_addr;
1694 }
1695 
1696 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1697 {
1698     phys_addr_t max_addr;
1699 
1700     if (!limit)
1701         return;
1702 
1703     max_addr = __find_max_addr(limit);
1704 
1705     /* @limit exceeds the total size of the memory, do nothing */
1706     if (max_addr == PHYS_ADDR_MAX)
1707         return;
1708 
1709     /* truncate both memory and reserved regions */
1710     memblock_remove_range(&memblock.memory, max_addr,
1711                   PHYS_ADDR_MAX);
1712     memblock_remove_range(&memblock.reserved, max_addr,
1713                   PHYS_ADDR_MAX);
1714 }
1715 
1716 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1717 {
1718     int start_rgn, end_rgn;
1719     int i, ret;
1720 
1721     if (!size)
1722         return;
1723 
1724     if (!memblock_memory->total_size) {
1725         pr_warn("%s: No memory registered yet\n", __func__);
1726         return;
1727     }
1728 
1729     ret = memblock_isolate_range(&memblock.memory, base, size,
1730                         &start_rgn, &end_rgn);
1731     if (ret)
1732         return;
1733 
1734     /* remove all the MAP regions */
1735     for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1736         if (!memblock_is_nomap(&memblock.memory.regions[i]))
1737             memblock_remove_region(&memblock.memory, i);
1738 
1739     for (i = start_rgn - 1; i >= 0; i--)
1740         if (!memblock_is_nomap(&memblock.memory.regions[i]))
1741             memblock_remove_region(&memblock.memory, i);
1742 
1743     /* truncate the reserved regions */
1744     memblock_remove_range(&memblock.reserved, 0, base);
1745     memblock_remove_range(&memblock.reserved,
1746             base + size, PHYS_ADDR_MAX);
1747 }
1748 
1749 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1750 {
1751     phys_addr_t max_addr;
1752 
1753     if (!limit)
1754         return;
1755 
1756     max_addr = __find_max_addr(limit);
1757 
1758     /* @limit exceeds the total size of the memory, do nothing */
1759     if (max_addr == PHYS_ADDR_MAX)
1760         return;
1761 
1762     memblock_cap_memory_range(0, max_addr);
1763 }
1764 
1765 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1766 {
1767     unsigned int left = 0, right = type->cnt;
1768 
1769     do {
1770         unsigned int mid = (right + left) / 2;
1771 
1772         if (addr < type->regions[mid].base)
1773             right = mid;
1774         else if (addr >= (type->regions[mid].base +
1775                   type->regions[mid].size))
1776             left = mid + 1;
1777         else
1778             return mid;
1779     } while (left < right);
1780     return -1;
1781 }
1782 
1783 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1784 {
1785     return memblock_search(&memblock.reserved, addr) != -1;
1786 }
1787 
1788 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1789 {
1790     return memblock_search(&memblock.memory, addr) != -1;
1791 }
1792 
1793 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1794 {
1795     int i = memblock_search(&memblock.memory, addr);
1796 
1797     if (i == -1)
1798         return false;
1799     return !memblock_is_nomap(&memblock.memory.regions[i]);
1800 }
1801 
1802 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1803              unsigned long *start_pfn, unsigned long *end_pfn)
1804 {
1805     struct memblock_type *type = &memblock.memory;
1806     int mid = memblock_search(type, PFN_PHYS(pfn));
1807 
1808     if (mid == -1)
1809         return -1;
1810 
1811     *start_pfn = PFN_DOWN(type->regions[mid].base);
1812     *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1813 
1814     return memblock_get_region_node(&type->regions[mid]);
1815 }
1816 
1817 /**
1818  * memblock_is_region_memory - check if a region is a subset of memory
1819  * @base: base of region to check
1820  * @size: size of region to check
1821  *
1822  * Check if the region [@base, @base + @size) is a subset of a memory block.
1823  *
1824  * Return:
1825  * 0 if false, non-zero if true
1826  */
1827 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1828 {
1829     int idx = memblock_search(&memblock.memory, base);
1830     phys_addr_t end = base + memblock_cap_size(base, &size);
1831 
1832     if (idx == -1)
1833         return false;
1834     return (memblock.memory.regions[idx].base +
1835          memblock.memory.regions[idx].size) >= end;
1836 }
1837 
1838 /**
1839  * memblock_is_region_reserved - check if a region intersects reserved memory
1840  * @base: base of region to check
1841  * @size: size of region to check
1842  *
1843  * Check if the region [@base, @base + @size) intersects a reserved
1844  * memory block.
1845  *
1846  * Return:
1847  * True if they intersect, false if not.
1848  */
1849 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1850 {
1851     return memblock_overlaps_region(&memblock.reserved, base, size);
1852 }
1853 
1854 void __init_memblock memblock_trim_memory(phys_addr_t align)
1855 {
1856     phys_addr_t start, end, orig_start, orig_end;
1857     struct memblock_region *r;
1858 
1859     for_each_mem_region(r) {
1860         orig_start = r->base;
1861         orig_end = r->base + r->size;
1862         start = round_up(orig_start, align);
1863         end = round_down(orig_end, align);
1864 
1865         if (start == orig_start && end == orig_end)
1866             continue;
1867 
1868         if (start < end) {
1869             r->base = start;
1870             r->size = end - start;
1871         } else {
1872             memblock_remove_region(&memblock.memory,
1873                            r - memblock.memory.regions);
1874             r--;
1875         }
1876     }
1877 }
1878 
1879 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1880 {
1881     memblock.current_limit = limit;
1882 }
1883 
1884 phys_addr_t __init_memblock memblock_get_current_limit(void)
1885 {
1886     return memblock.current_limit;
1887 }
1888 
1889 static void __init_memblock memblock_dump(struct memblock_type *type)
1890 {
1891     phys_addr_t base, end, size;
1892     enum memblock_flags flags;
1893     int idx;
1894     struct memblock_region *rgn;
1895 
1896     pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1897 
1898     for_each_memblock_type(idx, type, rgn) {
1899         char nid_buf[32] = "";
1900 
1901         base = rgn->base;
1902         size = rgn->size;
1903         end = base + size - 1;
1904         flags = rgn->flags;
1905 #ifdef CONFIG_NUMA
1906         if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1907             snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1908                  memblock_get_region_node(rgn));
1909 #endif
1910         pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1911             type->name, idx, &base, &end, &size, nid_buf, flags);
1912     }
1913 }
1914 
1915 static void __init_memblock __memblock_dump_all(void)
1916 {
1917     pr_info("MEMBLOCK configuration:\n");
1918     pr_info(" memory size = %pa reserved size = %pa\n",
1919         &memblock.memory.total_size,
1920         &memblock.reserved.total_size);
1921 
1922     memblock_dump(&memblock.memory);
1923     memblock_dump(&memblock.reserved);
1924 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1925     memblock_dump(&physmem);
1926 #endif
1927 }
1928 
1929 void __init_memblock memblock_dump_all(void)
1930 {
1931     if (memblock_debug)
1932         __memblock_dump_all();
1933 }
1934 
1935 void __init memblock_allow_resize(void)
1936 {
1937     memblock_can_resize = 1;
1938 }
1939 
1940 static int __init early_memblock(char *p)
1941 {
1942     if (p && strstr(p, "debug"))
1943         memblock_debug = 1;
1944     return 0;
1945 }
1946 early_param("memblock", early_memblock);
1947 
1948 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1949 {
1950     struct page *start_pg, *end_pg;
1951     phys_addr_t pg, pgend;
1952 
1953     /*
1954      * Convert start_pfn/end_pfn to a struct page pointer.
1955      */
1956     start_pg = pfn_to_page(start_pfn - 1) + 1;
1957     end_pg = pfn_to_page(end_pfn - 1) + 1;
1958 
1959     /*
1960      * Convert to physical addresses, and round start upwards and end
1961      * downwards.
1962      */
1963     pg = PAGE_ALIGN(__pa(start_pg));
1964     pgend = __pa(end_pg) & PAGE_MASK;
1965 
1966     /*
1967      * If there are free pages between these, free the section of the
1968      * memmap array.
1969      */
1970     if (pg < pgend)
1971         memblock_phys_free(pg, pgend - pg);
1972 }
1973 
1974 /*
1975  * The mem_map array can get very big.  Free the unused area of the memory map.
1976  */
1977 static void __init free_unused_memmap(void)
1978 {
1979     unsigned long start, end, prev_end = 0;
1980     int i;
1981 
1982     if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1983         IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1984         return;
1985 
1986     /*
1987      * This relies on each bank being in address order.
1988      * The banks are sorted previously in bootmem_init().
1989      */
1990     for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1991 #ifdef CONFIG_SPARSEMEM
1992         /*
1993          * Take care not to free memmap entries that don't exist
1994          * due to SPARSEMEM sections which aren't present.
1995          */
1996         start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1997 #endif
1998         /*
1999          * Align down here since many operations in VM subsystem
2000          * presume that there are no holes in the memory map inside
2001          * a pageblock
2002          */
2003         start = round_down(start, pageblock_nr_pages);
2004 
2005         /*
2006          * If we had a previous bank, and there is a space
2007          * between the current bank and the previous, free it.
2008          */
2009         if (prev_end && prev_end < start)
2010             free_memmap(prev_end, start);
2011 
2012         /*
2013          * Align up here since many operations in VM subsystem
2014          * presume that there are no holes in the memory map inside
2015          * a pageblock
2016          */
2017         prev_end = ALIGN(end, pageblock_nr_pages);
2018     }
2019 
2020 #ifdef CONFIG_SPARSEMEM
2021     if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
2022         prev_end = ALIGN(end, pageblock_nr_pages);
2023         free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2024     }
2025 #endif
2026 }
2027 
2028 static void __init __free_pages_memory(unsigned long start, unsigned long end)
2029 {
2030     int order;
2031 
2032     while (start < end) {
2033         order = min(MAX_ORDER - 1UL, __ffs(start));
2034 
2035         while (start + (1UL << order) > end)
2036             order--;
2037 
2038         memblock_free_pages(pfn_to_page(start), start, order);
2039 
2040         start += (1UL << order);
2041     }
2042 }
2043 
2044 static unsigned long __init __free_memory_core(phys_addr_t start,
2045                  phys_addr_t end)
2046 {
2047     unsigned long start_pfn = PFN_UP(start);
2048     unsigned long end_pfn = min_t(unsigned long,
2049                       PFN_DOWN(end), max_low_pfn);
2050 
2051     if (start_pfn >= end_pfn)
2052         return 0;
2053 
2054     __free_pages_memory(start_pfn, end_pfn);
2055 
2056     return end_pfn - start_pfn;
2057 }
2058 
2059 static void __init memmap_init_reserved_pages(void)
2060 {
2061     struct memblock_region *region;
2062     phys_addr_t start, end;
2063     u64 i;
2064 
2065     /* initialize struct pages for the reserved regions */
2066     for_each_reserved_mem_range(i, &start, &end)
2067         reserve_bootmem_region(start, end);
2068 
2069     /* and also treat struct pages for the NOMAP regions as PageReserved */
2070     for_each_mem_region(region) {
2071         if (memblock_is_nomap(region)) {
2072             start = region->base;
2073             end = start + region->size;
2074             reserve_bootmem_region(start, end);
2075         }
2076     }
2077 }
2078 
2079 static unsigned long __init free_low_memory_core_early(void)
2080 {
2081     unsigned long count = 0;
2082     phys_addr_t start, end;
2083     u64 i;
2084 
2085     memblock_clear_hotplug(0, -1);
2086 
2087     memmap_init_reserved_pages();
2088 
2089     /*
2090      * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2091      *  because in some case like Node0 doesn't have RAM installed
2092      *  low ram will be on Node1
2093      */
2094     for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2095                 NULL)
2096         count += __free_memory_core(start, end);
2097 
2098     return count;
2099 }
2100 
2101 static int reset_managed_pages_done __initdata;
2102 
2103 void reset_node_managed_pages(pg_data_t *pgdat)
2104 {
2105     struct zone *z;
2106 
2107     for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2108         atomic_long_set(&z->managed_pages, 0);
2109 }
2110 
2111 void __init reset_all_zones_managed_pages(void)
2112 {
2113     struct pglist_data *pgdat;
2114 
2115     if (reset_managed_pages_done)
2116         return;
2117 
2118     for_each_online_pgdat(pgdat)
2119         reset_node_managed_pages(pgdat);
2120 
2121     reset_managed_pages_done = 1;
2122 }
2123 
2124 /**
2125  * memblock_free_all - release free pages to the buddy allocator
2126  */
2127 void __init memblock_free_all(void)
2128 {
2129     unsigned long pages;
2130 
2131     free_unused_memmap();
2132     reset_all_zones_managed_pages();
2133 
2134     pages = free_low_memory_core_early();
2135     totalram_pages_add(pages);
2136 }
2137 
2138 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2139 
2140 static int memblock_debug_show(struct seq_file *m, void *private)
2141 {
2142     struct memblock_type *type = m->private;
2143     struct memblock_region *reg;
2144     int i;
2145     phys_addr_t end;
2146 
2147     for (i = 0; i < type->cnt; i++) {
2148         reg = &type->regions[i];
2149         end = reg->base + reg->size - 1;
2150 
2151         seq_printf(m, "%4d: ", i);
2152         seq_printf(m, "%pa..%pa\n", &reg->base, &end);
2153     }
2154     return 0;
2155 }
2156 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2157 
2158 static int __init memblock_init_debugfs(void)
2159 {
2160     struct dentry *root = debugfs_create_dir("memblock", NULL);
2161 
2162     debugfs_create_file("memory", 0444, root,
2163                 &memblock.memory, &memblock_debug_fops);
2164     debugfs_create_file("reserved", 0444, root,
2165                 &memblock.reserved, &memblock_debug_fops);
2166 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2167     debugfs_create_file("physmem", 0444, root, &physmem,
2168                 &memblock_debug_fops);
2169 #endif
2170 
2171     return 0;
2172 }
2173 __initcall(memblock_init_debugfs);
2174 
2175 #endif /* CONFIG_DEBUG_FS */