Back to home page

LXR

 
 

    


0001 /*
0002  *  bootmem - A boot-time physical memory allocator and configurator
0003  *
0004  *  Copyright (C) 1999 Ingo Molnar
0005  *                1999 Kanoj Sarcar, SGI
0006  *                2008 Johannes Weiner
0007  *
0008  * Access to this subsystem has to be serialized externally (which is true
0009  * for the boot process anyway).
0010  */
0011 #include <linux/init.h>
0012 #include <linux/pfn.h>
0013 #include <linux/slab.h>
0014 #include <linux/export.h>
0015 #include <linux/kmemleak.h>
0016 #include <linux/range.h>
0017 #include <linux/bug.h>
0018 #include <linux/io.h>
0019 #include <linux/bootmem.h>
0020 
0021 #include "internal.h"
0022 
0023 #ifndef CONFIG_NEED_MULTIPLE_NODES
0024 struct pglist_data __refdata contig_page_data = {
0025     .bdata = &bootmem_node_data[0]
0026 };
0027 EXPORT_SYMBOL(contig_page_data);
0028 #endif
0029 
0030 unsigned long max_low_pfn;
0031 unsigned long min_low_pfn;
0032 unsigned long max_pfn;
0033 unsigned long long max_possible_pfn;
0034 
0035 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
0036 
0037 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
0038 
0039 static int bootmem_debug;
0040 
0041 static int __init bootmem_debug_setup(char *buf)
0042 {
0043     bootmem_debug = 1;
0044     return 0;
0045 }
0046 early_param("bootmem_debug", bootmem_debug_setup);
0047 
0048 #define bdebug(fmt, args...) ({             \
0049     if (unlikely(bootmem_debug))            \
0050         pr_info("bootmem::%s " fmt,     \
0051             __func__, ## args);     \
0052 })
0053 
0054 static unsigned long __init bootmap_bytes(unsigned long pages)
0055 {
0056     unsigned long bytes = DIV_ROUND_UP(pages, 8);
0057 
0058     return ALIGN(bytes, sizeof(long));
0059 }
0060 
0061 /**
0062  * bootmem_bootmap_pages - calculate bitmap size in pages
0063  * @pages: number of pages the bitmap has to represent
0064  */
0065 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
0066 {
0067     unsigned long bytes = bootmap_bytes(pages);
0068 
0069     return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
0070 }
0071 
0072 /*
0073  * link bdata in order
0074  */
0075 static void __init link_bootmem(bootmem_data_t *bdata)
0076 {
0077     bootmem_data_t *ent;
0078 
0079     list_for_each_entry(ent, &bdata_list, list) {
0080         if (bdata->node_min_pfn < ent->node_min_pfn) {
0081             list_add_tail(&bdata->list, &ent->list);
0082             return;
0083         }
0084     }
0085 
0086     list_add_tail(&bdata->list, &bdata_list);
0087 }
0088 
0089 /*
0090  * Called once to set up the allocator itself.
0091  */
0092 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
0093     unsigned long mapstart, unsigned long start, unsigned long end)
0094 {
0095     unsigned long mapsize;
0096 
0097     mminit_validate_memmodel_limits(&start, &end);
0098     bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
0099     bdata->node_min_pfn = start;
0100     bdata->node_low_pfn = end;
0101     link_bootmem(bdata);
0102 
0103     /*
0104      * Initially all pages are reserved - setup_arch() has to
0105      * register free RAM areas explicitly.
0106      */
0107     mapsize = bootmap_bytes(end - start);
0108     memset(bdata->node_bootmem_map, 0xff, mapsize);
0109 
0110     bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
0111         bdata - bootmem_node_data, start, mapstart, end, mapsize);
0112 
0113     return mapsize;
0114 }
0115 
0116 /**
0117  * init_bootmem_node - register a node as boot memory
0118  * @pgdat: node to register
0119  * @freepfn: pfn where the bitmap for this node is to be placed
0120  * @startpfn: first pfn on the node
0121  * @endpfn: first pfn after the node
0122  *
0123  * Returns the number of bytes needed to hold the bitmap for this node.
0124  */
0125 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
0126                 unsigned long startpfn, unsigned long endpfn)
0127 {
0128     return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
0129 }
0130 
0131 /**
0132  * init_bootmem - register boot memory
0133  * @start: pfn where the bitmap is to be placed
0134  * @pages: number of available physical pages
0135  *
0136  * Returns the number of bytes needed to hold the bitmap.
0137  */
0138 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
0139 {
0140     max_low_pfn = pages;
0141     min_low_pfn = start;
0142     return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
0143 }
0144 
0145 /*
0146  * free_bootmem_late - free bootmem pages directly to page allocator
0147  * @addr: starting physical address of the range
0148  * @size: size of the range in bytes
0149  *
0150  * This is only useful when the bootmem allocator has already been torn
0151  * down, but we are still initializing the system.  Pages are given directly
0152  * to the page allocator, no bootmem metadata is updated because it is gone.
0153  */
0154 void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
0155 {
0156     unsigned long cursor, end;
0157 
0158     kmemleak_free_part_phys(physaddr, size);
0159 
0160     cursor = PFN_UP(physaddr);
0161     end = PFN_DOWN(physaddr + size);
0162 
0163     for (; cursor < end; cursor++) {
0164         __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
0165         totalram_pages++;
0166     }
0167 }
0168 
0169 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
0170 {
0171     struct page *page;
0172     unsigned long *map, start, end, pages, cur, count = 0;
0173 
0174     if (!bdata->node_bootmem_map)
0175         return 0;
0176 
0177     map = bdata->node_bootmem_map;
0178     start = bdata->node_min_pfn;
0179     end = bdata->node_low_pfn;
0180 
0181     bdebug("nid=%td start=%lx end=%lx\n",
0182         bdata - bootmem_node_data, start, end);
0183 
0184     while (start < end) {
0185         unsigned long idx, vec;
0186         unsigned shift;
0187 
0188         idx = start - bdata->node_min_pfn;
0189         shift = idx & (BITS_PER_LONG - 1);
0190         /*
0191          * vec holds at most BITS_PER_LONG map bits,
0192          * bit 0 corresponds to start.
0193          */
0194         vec = ~map[idx / BITS_PER_LONG];
0195 
0196         if (shift) {
0197             vec >>= shift;
0198             if (end - start >= BITS_PER_LONG)
0199                 vec |= ~map[idx / BITS_PER_LONG + 1] <<
0200                     (BITS_PER_LONG - shift);
0201         }
0202         /*
0203          * If we have a properly aligned and fully unreserved
0204          * BITS_PER_LONG block of pages in front of us, free
0205          * it in one go.
0206          */
0207         if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
0208             int order = ilog2(BITS_PER_LONG);
0209 
0210             __free_pages_bootmem(pfn_to_page(start), start, order);
0211             count += BITS_PER_LONG;
0212             start += BITS_PER_LONG;
0213         } else {
0214             cur = start;
0215 
0216             start = ALIGN(start + 1, BITS_PER_LONG);
0217             while (vec && cur != start) {
0218                 if (vec & 1) {
0219                     page = pfn_to_page(cur);
0220                     __free_pages_bootmem(page, cur, 0);
0221                     count++;
0222                 }
0223                 vec >>= 1;
0224                 ++cur;
0225             }
0226         }
0227     }
0228 
0229     cur = bdata->node_min_pfn;
0230     page = virt_to_page(bdata->node_bootmem_map);
0231     pages = bdata->node_low_pfn - bdata->node_min_pfn;
0232     pages = bootmem_bootmap_pages(pages);
0233     count += pages;
0234     while (pages--)
0235         __free_pages_bootmem(page++, cur++, 0);
0236     bdata->node_bootmem_map = NULL;
0237 
0238     bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
0239 
0240     return count;
0241 }
0242 
0243 static int reset_managed_pages_done __initdata;
0244 
0245 void reset_node_managed_pages(pg_data_t *pgdat)
0246 {
0247     struct zone *z;
0248 
0249     for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
0250         z->managed_pages = 0;
0251 }
0252 
0253 void __init reset_all_zones_managed_pages(void)
0254 {
0255     struct pglist_data *pgdat;
0256 
0257     if (reset_managed_pages_done)
0258         return;
0259 
0260     for_each_online_pgdat(pgdat)
0261         reset_node_managed_pages(pgdat);
0262 
0263     reset_managed_pages_done = 1;
0264 }
0265 
0266 /**
0267  * free_all_bootmem - release free pages to the buddy allocator
0268  *
0269  * Returns the number of pages actually released.
0270  */
0271 unsigned long __init free_all_bootmem(void)
0272 {
0273     unsigned long total_pages = 0;
0274     bootmem_data_t *bdata;
0275 
0276     reset_all_zones_managed_pages();
0277 
0278     list_for_each_entry(bdata, &bdata_list, list)
0279         total_pages += free_all_bootmem_core(bdata);
0280 
0281     totalram_pages += total_pages;
0282 
0283     return total_pages;
0284 }
0285 
0286 static void __init __free(bootmem_data_t *bdata,
0287             unsigned long sidx, unsigned long eidx)
0288 {
0289     unsigned long idx;
0290 
0291     bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
0292         sidx + bdata->node_min_pfn,
0293         eidx + bdata->node_min_pfn);
0294 
0295     if (WARN_ON(bdata->node_bootmem_map == NULL))
0296         return;
0297 
0298     if (bdata->hint_idx > sidx)
0299         bdata->hint_idx = sidx;
0300 
0301     for (idx = sidx; idx < eidx; idx++)
0302         if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
0303             BUG();
0304 }
0305 
0306 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
0307             unsigned long eidx, int flags)
0308 {
0309     unsigned long idx;
0310     int exclusive = flags & BOOTMEM_EXCLUSIVE;
0311 
0312     bdebug("nid=%td start=%lx end=%lx flags=%x\n",
0313         bdata - bootmem_node_data,
0314         sidx + bdata->node_min_pfn,
0315         eidx + bdata->node_min_pfn,
0316         flags);
0317 
0318     if (WARN_ON(bdata->node_bootmem_map == NULL))
0319         return 0;
0320 
0321     for (idx = sidx; idx < eidx; idx++)
0322         if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
0323             if (exclusive) {
0324                 __free(bdata, sidx, idx);
0325                 return -EBUSY;
0326             }
0327             bdebug("silent double reserve of PFN %lx\n",
0328                 idx + bdata->node_min_pfn);
0329         }
0330     return 0;
0331 }
0332 
0333 static int __init mark_bootmem_node(bootmem_data_t *bdata,
0334                 unsigned long start, unsigned long end,
0335                 int reserve, int flags)
0336 {
0337     unsigned long sidx, eidx;
0338 
0339     bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
0340         bdata - bootmem_node_data, start, end, reserve, flags);
0341 
0342     BUG_ON(start < bdata->node_min_pfn);
0343     BUG_ON(end > bdata->node_low_pfn);
0344 
0345     sidx = start - bdata->node_min_pfn;
0346     eidx = end - bdata->node_min_pfn;
0347 
0348     if (reserve)
0349         return __reserve(bdata, sidx, eidx, flags);
0350     else
0351         __free(bdata, sidx, eidx);
0352     return 0;
0353 }
0354 
0355 static int __init mark_bootmem(unsigned long start, unsigned long end,
0356                 int reserve, int flags)
0357 {
0358     unsigned long pos;
0359     bootmem_data_t *bdata;
0360 
0361     pos = start;
0362     list_for_each_entry(bdata, &bdata_list, list) {
0363         int err;
0364         unsigned long max;
0365 
0366         if (pos < bdata->node_min_pfn ||
0367             pos >= bdata->node_low_pfn) {
0368             BUG_ON(pos != start);
0369             continue;
0370         }
0371 
0372         max = min(bdata->node_low_pfn, end);
0373 
0374         err = mark_bootmem_node(bdata, pos, max, reserve, flags);
0375         if (reserve && err) {
0376             mark_bootmem(start, pos, 0, 0);
0377             return err;
0378         }
0379 
0380         if (max == end)
0381             return 0;
0382         pos = bdata->node_low_pfn;
0383     }
0384     BUG();
0385 }
0386 
0387 /**
0388  * free_bootmem_node - mark a page range as usable
0389  * @pgdat: node the range resides on
0390  * @physaddr: starting address of the range
0391  * @size: size of the range in bytes
0392  *
0393  * Partial pages will be considered reserved and left as they are.
0394  *
0395  * The range must reside completely on the specified node.
0396  */
0397 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
0398                   unsigned long size)
0399 {
0400     unsigned long start, end;
0401 
0402     kmemleak_free_part_phys(physaddr, size);
0403 
0404     start = PFN_UP(physaddr);
0405     end = PFN_DOWN(physaddr + size);
0406 
0407     mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
0408 }
0409 
0410 /**
0411  * free_bootmem - mark a page range as usable
0412  * @addr: starting physical address of the range
0413  * @size: size of the range in bytes
0414  *
0415  * Partial pages will be considered reserved and left as they are.
0416  *
0417  * The range must be contiguous but may span node boundaries.
0418  */
0419 void __init free_bootmem(unsigned long physaddr, unsigned long size)
0420 {
0421     unsigned long start, end;
0422 
0423     kmemleak_free_part_phys(physaddr, size);
0424 
0425     start = PFN_UP(physaddr);
0426     end = PFN_DOWN(physaddr + size);
0427 
0428     mark_bootmem(start, end, 0, 0);
0429 }
0430 
0431 /**
0432  * reserve_bootmem_node - mark a page range as reserved
0433  * @pgdat: node the range resides on
0434  * @physaddr: starting address of the range
0435  * @size: size of the range in bytes
0436  * @flags: reservation flags (see linux/bootmem.h)
0437  *
0438  * Partial pages will be reserved.
0439  *
0440  * The range must reside completely on the specified node.
0441  */
0442 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
0443                  unsigned long size, int flags)
0444 {
0445     unsigned long start, end;
0446 
0447     start = PFN_DOWN(physaddr);
0448     end = PFN_UP(physaddr + size);
0449 
0450     return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
0451 }
0452 
0453 /**
0454  * reserve_bootmem - mark a page range as reserved
0455  * @addr: starting address of the range
0456  * @size: size of the range in bytes
0457  * @flags: reservation flags (see linux/bootmem.h)
0458  *
0459  * Partial pages will be reserved.
0460  *
0461  * The range must be contiguous but may span node boundaries.
0462  */
0463 int __init reserve_bootmem(unsigned long addr, unsigned long size,
0464                 int flags)
0465 {
0466     unsigned long start, end;
0467 
0468     start = PFN_DOWN(addr);
0469     end = PFN_UP(addr + size);
0470 
0471     return mark_bootmem(start, end, 1, flags);
0472 }
0473 
0474 static unsigned long __init align_idx(struct bootmem_data *bdata,
0475                       unsigned long idx, unsigned long step)
0476 {
0477     unsigned long base = bdata->node_min_pfn;
0478 
0479     /*
0480      * Align the index with respect to the node start so that the
0481      * combination of both satisfies the requested alignment.
0482      */
0483 
0484     return ALIGN(base + idx, step) - base;
0485 }
0486 
0487 static unsigned long __init align_off(struct bootmem_data *bdata,
0488                       unsigned long off, unsigned long align)
0489 {
0490     unsigned long base = PFN_PHYS(bdata->node_min_pfn);
0491 
0492     /* Same as align_idx for byte offsets */
0493 
0494     return ALIGN(base + off, align) - base;
0495 }
0496 
0497 static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
0498                     unsigned long size, unsigned long align,
0499                     unsigned long goal, unsigned long limit)
0500 {
0501     unsigned long fallback = 0;
0502     unsigned long min, max, start, sidx, midx, step;
0503 
0504     bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
0505         bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
0506         align, goal, limit);
0507 
0508     BUG_ON(!size);
0509     BUG_ON(align & (align - 1));
0510     BUG_ON(limit && goal + size > limit);
0511 
0512     if (!bdata->node_bootmem_map)
0513         return NULL;
0514 
0515     min = bdata->node_min_pfn;
0516     max = bdata->node_low_pfn;
0517 
0518     goal >>= PAGE_SHIFT;
0519     limit >>= PAGE_SHIFT;
0520 
0521     if (limit && max > limit)
0522         max = limit;
0523     if (max <= min)
0524         return NULL;
0525 
0526     step = max(align >> PAGE_SHIFT, 1UL);
0527 
0528     if (goal && min < goal && goal < max)
0529         start = ALIGN(goal, step);
0530     else
0531         start = ALIGN(min, step);
0532 
0533     sidx = start - bdata->node_min_pfn;
0534     midx = max - bdata->node_min_pfn;
0535 
0536     if (bdata->hint_idx > sidx) {
0537         /*
0538          * Handle the valid case of sidx being zero and still
0539          * catch the fallback below.
0540          */
0541         fallback = sidx + 1;
0542         sidx = align_idx(bdata, bdata->hint_idx, step);
0543     }
0544 
0545     while (1) {
0546         int merge;
0547         void *region;
0548         unsigned long eidx, i, start_off, end_off;
0549 find_block:
0550         sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
0551         sidx = align_idx(bdata, sidx, step);
0552         eidx = sidx + PFN_UP(size);
0553 
0554         if (sidx >= midx || eidx > midx)
0555             break;
0556 
0557         for (i = sidx; i < eidx; i++)
0558             if (test_bit(i, bdata->node_bootmem_map)) {
0559                 sidx = align_idx(bdata, i, step);
0560                 if (sidx == i)
0561                     sidx += step;
0562                 goto find_block;
0563             }
0564 
0565         if (bdata->last_end_off & (PAGE_SIZE - 1) &&
0566                 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
0567             start_off = align_off(bdata, bdata->last_end_off, align);
0568         else
0569             start_off = PFN_PHYS(sidx);
0570 
0571         merge = PFN_DOWN(start_off) < sidx;
0572         end_off = start_off + size;
0573 
0574         bdata->last_end_off = end_off;
0575         bdata->hint_idx = PFN_UP(end_off);
0576 
0577         /*
0578          * Reserve the area now:
0579          */
0580         if (__reserve(bdata, PFN_DOWN(start_off) + merge,
0581                 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
0582             BUG();
0583 
0584         region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
0585                 start_off);
0586         memset(region, 0, size);
0587         /*
0588          * The min_count is set to 0 so that bootmem allocated blocks
0589          * are never reported as leaks.
0590          */
0591         kmemleak_alloc(region, size, 0, 0);
0592         return region;
0593     }
0594 
0595     if (fallback) {
0596         sidx = align_idx(bdata, fallback - 1, step);
0597         fallback = 0;
0598         goto find_block;
0599     }
0600 
0601     return NULL;
0602 }
0603 
0604 static void * __init alloc_bootmem_core(unsigned long size,
0605                     unsigned long align,
0606                     unsigned long goal,
0607                     unsigned long limit)
0608 {
0609     bootmem_data_t *bdata;
0610     void *region;
0611 
0612     if (WARN_ON_ONCE(slab_is_available()))
0613         return kzalloc(size, GFP_NOWAIT);
0614 
0615     list_for_each_entry(bdata, &bdata_list, list) {
0616         if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
0617             continue;
0618         if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
0619             break;
0620 
0621         region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
0622         if (region)
0623             return region;
0624     }
0625 
0626     return NULL;
0627 }
0628 
0629 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
0630                           unsigned long align,
0631                           unsigned long goal,
0632                           unsigned long limit)
0633 {
0634     void *ptr;
0635 
0636 restart:
0637     ptr = alloc_bootmem_core(size, align, goal, limit);
0638     if (ptr)
0639         return ptr;
0640     if (goal) {
0641         goal = 0;
0642         goto restart;
0643     }
0644 
0645     return NULL;
0646 }
0647 
0648 /**
0649  * __alloc_bootmem_nopanic - allocate boot memory without panicking
0650  * @size: size of the request in bytes
0651  * @align: alignment of the region
0652  * @goal: preferred starting address of the region
0653  *
0654  * The goal is dropped if it can not be satisfied and the allocation will
0655  * fall back to memory below @goal.
0656  *
0657  * Allocation may happen on any node in the system.
0658  *
0659  * Returns NULL on failure.
0660  */
0661 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
0662                     unsigned long goal)
0663 {
0664     unsigned long limit = 0;
0665 
0666     return ___alloc_bootmem_nopanic(size, align, goal, limit);
0667 }
0668 
0669 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
0670                     unsigned long goal, unsigned long limit)
0671 {
0672     void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
0673 
0674     if (mem)
0675         return mem;
0676     /*
0677      * Whoops, we cannot satisfy the allocation request.
0678      */
0679     pr_alert("bootmem alloc of %lu bytes failed!\n", size);
0680     panic("Out of memory");
0681     return NULL;
0682 }
0683 
0684 /**
0685  * __alloc_bootmem - allocate boot memory
0686  * @size: size of the request in bytes
0687  * @align: alignment of the region
0688  * @goal: preferred starting address of the region
0689  *
0690  * The goal is dropped if it can not be satisfied and the allocation will
0691  * fall back to memory below @goal.
0692  *
0693  * Allocation may happen on any node in the system.
0694  *
0695  * The function panics if the request can not be satisfied.
0696  */
0697 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
0698                   unsigned long goal)
0699 {
0700     unsigned long limit = 0;
0701 
0702     return ___alloc_bootmem(size, align, goal, limit);
0703 }
0704 
0705 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
0706                 unsigned long size, unsigned long align,
0707                 unsigned long goal, unsigned long limit)
0708 {
0709     void *ptr;
0710 
0711     if (WARN_ON_ONCE(slab_is_available()))
0712         return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
0713 again:
0714 
0715     /* do not panic in alloc_bootmem_bdata() */
0716     if (limit && goal + size > limit)
0717         limit = 0;
0718 
0719     ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
0720     if (ptr)
0721         return ptr;
0722 
0723     ptr = alloc_bootmem_core(size, align, goal, limit);
0724     if (ptr)
0725         return ptr;
0726 
0727     if (goal) {
0728         goal = 0;
0729         goto again;
0730     }
0731 
0732     return NULL;
0733 }
0734 
0735 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
0736                    unsigned long align, unsigned long goal)
0737 {
0738     return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
0739 }
0740 
0741 void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
0742                     unsigned long align, unsigned long goal,
0743                     unsigned long limit)
0744 {
0745     void *ptr;
0746 
0747     ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
0748     if (ptr)
0749         return ptr;
0750 
0751     pr_alert("bootmem alloc of %lu bytes failed!\n", size);
0752     panic("Out of memory");
0753     return NULL;
0754 }
0755 
0756 /**
0757  * __alloc_bootmem_node - allocate boot memory from a specific node
0758  * @pgdat: node to allocate from
0759  * @size: size of the request in bytes
0760  * @align: alignment of the region
0761  * @goal: preferred starting address of the region
0762  *
0763  * The goal is dropped if it can not be satisfied and the allocation will
0764  * fall back to memory below @goal.
0765  *
0766  * Allocation may fall back to any node in the system if the specified node
0767  * can not hold the requested memory.
0768  *
0769  * The function panics if the request can not be satisfied.
0770  */
0771 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
0772                    unsigned long align, unsigned long goal)
0773 {
0774     if (WARN_ON_ONCE(slab_is_available()))
0775         return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
0776 
0777     return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
0778 }
0779 
0780 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
0781                    unsigned long align, unsigned long goal)
0782 {
0783 #ifdef MAX_DMA32_PFN
0784     unsigned long end_pfn;
0785 
0786     if (WARN_ON_ONCE(slab_is_available()))
0787         return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
0788 
0789     /* update goal according ...MAX_DMA32_PFN */
0790     end_pfn = pgdat_end_pfn(pgdat);
0791 
0792     if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
0793         (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
0794         void *ptr;
0795         unsigned long new_goal;
0796 
0797         new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
0798         ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
0799                          new_goal, 0);
0800         if (ptr)
0801             return ptr;
0802     }
0803 #endif
0804 
0805     return __alloc_bootmem_node(pgdat, size, align, goal);
0806 
0807 }
0808 
0809 /**
0810  * __alloc_bootmem_low - allocate low boot memory
0811  * @size: size of the request in bytes
0812  * @align: alignment of the region
0813  * @goal: preferred starting address of the region
0814  *
0815  * The goal is dropped if it can not be satisfied and the allocation will
0816  * fall back to memory below @goal.
0817  *
0818  * Allocation may happen on any node in the system.
0819  *
0820  * The function panics if the request can not be satisfied.
0821  */
0822 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
0823                   unsigned long goal)
0824 {
0825     return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
0826 }
0827 
0828 void * __init __alloc_bootmem_low_nopanic(unsigned long size,
0829                       unsigned long align,
0830                       unsigned long goal)
0831 {
0832     return ___alloc_bootmem_nopanic(size, align, goal,
0833                     ARCH_LOW_ADDRESS_LIMIT);
0834 }
0835 
0836 /**
0837  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
0838  * @pgdat: node to allocate from
0839  * @size: size of the request in bytes
0840  * @align: alignment of the region
0841  * @goal: preferred starting address of the region
0842  *
0843  * The goal is dropped if it can not be satisfied and the allocation will
0844  * fall back to memory below @goal.
0845  *
0846  * Allocation may fall back to any node in the system if the specified node
0847  * can not hold the requested memory.
0848  *
0849  * The function panics if the request can not be satisfied.
0850  */
0851 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
0852                        unsigned long align, unsigned long goal)
0853 {
0854     if (WARN_ON_ONCE(slab_is_available()))
0855         return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
0856 
0857     return ___alloc_bootmem_node(pgdat, size, align,
0858                      goal, ARCH_LOW_ADDRESS_LIMIT);
0859 }