0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/slab.h>
0011 #include <linux/init.h>
0012 #include <linux/bitops.h>
0013 #include <linux/poison.h>
0014 #include <linux/pfn.h>
0015 #include <linux/debugfs.h>
0016 #include <linux/kmemleak.h>
0017 #include <linux/seq_file.h>
0018 #include <linux/memblock.h>
0019
0020 #include <asm/sections.h>
0021 #include <linux/io.h>
0022
0023 #include "internal.h"
0024
0025 #define INIT_MEMBLOCK_REGIONS 128
0026 #define INIT_PHYSMEM_REGIONS 4
0027
0028 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
0029 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
0030 #endif
0031
0032 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
0033 #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
0034 #endif
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 #ifndef CONFIG_NUMA
0100 struct pglist_data __refdata contig_page_data;
0101 EXPORT_SYMBOL(contig_page_data);
0102 #endif
0103
0104 unsigned long max_low_pfn;
0105 unsigned long min_low_pfn;
0106 unsigned long max_pfn;
0107 unsigned long long max_possible_pfn;
0108
0109 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
0110 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
0111 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0112 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
0113 #endif
0114
0115 struct memblock memblock __initdata_memblock = {
0116 .memory.regions = memblock_memory_init_regions,
0117 .memory.cnt = 1,
0118 .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS,
0119 .memory.name = "memory",
0120
0121 .reserved.regions = memblock_reserved_init_regions,
0122 .reserved.cnt = 1,
0123 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
0124 .reserved.name = "reserved",
0125
0126 .bottom_up = false,
0127 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
0128 };
0129
0130 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0131 struct memblock_type physmem = {
0132 .regions = memblock_physmem_init_regions,
0133 .cnt = 1,
0134 .max = INIT_PHYSMEM_REGIONS,
0135 .name = "physmem",
0136 };
0137 #endif
0138
0139
0140
0141
0142
0143
0144
0145 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
0146
0147 #define for_each_memblock_type(i, memblock_type, rgn) \
0148 for (i = 0, rgn = &memblock_type->regions[0]; \
0149 i < memblock_type->cnt; \
0150 i++, rgn = &memblock_type->regions[i])
0151
0152 #define memblock_dbg(fmt, ...) \
0153 do { \
0154 if (memblock_debug) \
0155 pr_info(fmt, ##__VA_ARGS__); \
0156 } while (0)
0157
0158 static int memblock_debug __initdata_memblock;
0159 static bool system_has_some_mirror __initdata_memblock = false;
0160 static int memblock_can_resize __initdata_memblock;
0161 static int memblock_memory_in_slab __initdata_memblock = 0;
0162 static int memblock_reserved_in_slab __initdata_memblock = 0;
0163
0164 static enum memblock_flags __init_memblock choose_memblock_flags(void)
0165 {
0166 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
0167 }
0168
0169
0170 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
0171 {
0172 return *size = min(*size, PHYS_ADDR_MAX - base);
0173 }
0174
0175
0176
0177
0178 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
0179 phys_addr_t base2, phys_addr_t size2)
0180 {
0181 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
0182 }
0183
0184 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
0185 phys_addr_t base, phys_addr_t size)
0186 {
0187 unsigned long i;
0188
0189 memblock_cap_size(base, &size);
0190
0191 for (i = 0; i < type->cnt; i++)
0192 if (memblock_addrs_overlap(base, size, type->regions[i].base,
0193 type->regions[i].size))
0194 break;
0195 return i < type->cnt;
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213 static phys_addr_t __init_memblock
0214 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
0215 phys_addr_t size, phys_addr_t align, int nid,
0216 enum memblock_flags flags)
0217 {
0218 phys_addr_t this_start, this_end, cand;
0219 u64 i;
0220
0221 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
0222 this_start = clamp(this_start, start, end);
0223 this_end = clamp(this_end, start, end);
0224
0225 cand = round_up(this_start, align);
0226 if (cand < this_end && this_end - cand >= size)
0227 return cand;
0228 }
0229
0230 return 0;
0231 }
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 static phys_addr_t __init_memblock
0249 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
0250 phys_addr_t size, phys_addr_t align, int nid,
0251 enum memblock_flags flags)
0252 {
0253 phys_addr_t this_start, this_end, cand;
0254 u64 i;
0255
0256 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
0257 NULL) {
0258 this_start = clamp(this_start, start, end);
0259 this_end = clamp(this_end, start, end);
0260
0261 if (this_end < size)
0262 continue;
0263
0264 cand = round_down(this_end - size, align);
0265 if (cand >= this_start)
0266 return cand;
0267 }
0268
0269 return 0;
0270 }
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
0288 phys_addr_t align, phys_addr_t start,
0289 phys_addr_t end, int nid,
0290 enum memblock_flags flags)
0291 {
0292
0293 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
0294 end == MEMBLOCK_ALLOC_NOLEAKTRACE)
0295 end = memblock.current_limit;
0296
0297
0298 start = max_t(phys_addr_t, start, PAGE_SIZE);
0299 end = max(start, end);
0300
0301 if (memblock_bottom_up())
0302 return __memblock_find_range_bottom_up(start, end, size, align,
0303 nid, flags);
0304 else
0305 return __memblock_find_range_top_down(start, end, size, align,
0306 nid, flags);
0307 }
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
0323 phys_addr_t end, phys_addr_t size,
0324 phys_addr_t align)
0325 {
0326 phys_addr_t ret;
0327 enum memblock_flags flags = choose_memblock_flags();
0328
0329 again:
0330 ret = memblock_find_in_range_node(size, align, start, end,
0331 NUMA_NO_NODE, flags);
0332
0333 if (!ret && (flags & MEMBLOCK_MIRROR)) {
0334 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
0335 &size);
0336 flags &= ~MEMBLOCK_MIRROR;
0337 goto again;
0338 }
0339
0340 return ret;
0341 }
0342
0343 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
0344 {
0345 type->total_size -= type->regions[r].size;
0346 memmove(&type->regions[r], &type->regions[r + 1],
0347 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
0348 type->cnt--;
0349
0350
0351 if (type->cnt == 0) {
0352 WARN_ON(type->total_size != 0);
0353 type->cnt = 1;
0354 type->regions[0].base = 0;
0355 type->regions[0].size = 0;
0356 type->regions[0].flags = 0;
0357 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
0358 }
0359 }
0360
0361 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
0362
0363
0364
0365 void __init memblock_discard(void)
0366 {
0367 phys_addr_t addr, size;
0368
0369 if (memblock.reserved.regions != memblock_reserved_init_regions) {
0370 addr = __pa(memblock.reserved.regions);
0371 size = PAGE_ALIGN(sizeof(struct memblock_region) *
0372 memblock.reserved.max);
0373 if (memblock_reserved_in_slab)
0374 kfree(memblock.reserved.regions);
0375 else
0376 memblock_free_late(addr, size);
0377 }
0378
0379 if (memblock.memory.regions != memblock_memory_init_regions) {
0380 addr = __pa(memblock.memory.regions);
0381 size = PAGE_ALIGN(sizeof(struct memblock_region) *
0382 memblock.memory.max);
0383 if (memblock_memory_in_slab)
0384 kfree(memblock.memory.regions);
0385 else
0386 memblock_free_late(addr, size);
0387 }
0388
0389 memblock_memory = NULL;
0390 }
0391 #endif
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408 static int __init_memblock memblock_double_array(struct memblock_type *type,
0409 phys_addr_t new_area_start,
0410 phys_addr_t new_area_size)
0411 {
0412 struct memblock_region *new_array, *old_array;
0413 phys_addr_t old_alloc_size, new_alloc_size;
0414 phys_addr_t old_size, new_size, addr, new_end;
0415 int use_slab = slab_is_available();
0416 int *in_slab;
0417
0418
0419
0420
0421 if (!memblock_can_resize)
0422 return -1;
0423
0424
0425 old_size = type->max * sizeof(struct memblock_region);
0426 new_size = old_size << 1;
0427
0428
0429
0430
0431 old_alloc_size = PAGE_ALIGN(old_size);
0432 new_alloc_size = PAGE_ALIGN(new_size);
0433
0434
0435 if (type == &memblock.memory)
0436 in_slab = &memblock_memory_in_slab;
0437 else
0438 in_slab = &memblock_reserved_in_slab;
0439
0440
0441 if (use_slab) {
0442 new_array = kmalloc(new_size, GFP_KERNEL);
0443 addr = new_array ? __pa(new_array) : 0;
0444 } else {
0445
0446 if (type != &memblock.reserved)
0447 new_area_start = new_area_size = 0;
0448
0449 addr = memblock_find_in_range(new_area_start + new_area_size,
0450 memblock.current_limit,
0451 new_alloc_size, PAGE_SIZE);
0452 if (!addr && new_area_size)
0453 addr = memblock_find_in_range(0,
0454 min(new_area_start, memblock.current_limit),
0455 new_alloc_size, PAGE_SIZE);
0456
0457 new_array = addr ? __va(addr) : NULL;
0458 }
0459 if (!addr) {
0460 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
0461 type->name, type->max, type->max * 2);
0462 return -1;
0463 }
0464
0465 new_end = addr + new_size - 1;
0466 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
0467 type->name, type->max * 2, &addr, &new_end);
0468
0469
0470
0471
0472
0473
0474 memcpy(new_array, type->regions, old_size);
0475 memset(new_array + type->max, 0, old_size);
0476 old_array = type->regions;
0477 type->regions = new_array;
0478 type->max <<= 1;
0479
0480
0481 if (*in_slab)
0482 kfree(old_array);
0483 else if (old_array != memblock_memory_init_regions &&
0484 old_array != memblock_reserved_init_regions)
0485 memblock_free(old_array, old_alloc_size);
0486
0487
0488
0489
0490
0491 if (!use_slab)
0492 BUG_ON(memblock_reserve(addr, new_alloc_size));
0493
0494
0495 *in_slab = use_slab;
0496
0497 return 0;
0498 }
0499
0500
0501
0502
0503
0504
0505
0506 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
0507 {
0508 int i = 0;
0509
0510
0511 while (i < type->cnt - 1) {
0512 struct memblock_region *this = &type->regions[i];
0513 struct memblock_region *next = &type->regions[i + 1];
0514
0515 if (this->base + this->size != next->base ||
0516 memblock_get_region_node(this) !=
0517 memblock_get_region_node(next) ||
0518 this->flags != next->flags) {
0519 BUG_ON(this->base + this->size > next->base);
0520 i++;
0521 continue;
0522 }
0523
0524 this->size += next->size;
0525
0526 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
0527 type->cnt--;
0528 }
0529 }
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543 static void __init_memblock memblock_insert_region(struct memblock_type *type,
0544 int idx, phys_addr_t base,
0545 phys_addr_t size,
0546 int nid,
0547 enum memblock_flags flags)
0548 {
0549 struct memblock_region *rgn = &type->regions[idx];
0550
0551 BUG_ON(type->cnt >= type->max);
0552 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
0553 rgn->base = base;
0554 rgn->size = size;
0555 rgn->flags = flags;
0556 memblock_set_region_node(rgn, nid);
0557 type->cnt++;
0558 type->total_size += size;
0559 }
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 static int __init_memblock memblock_add_range(struct memblock_type *type,
0578 phys_addr_t base, phys_addr_t size,
0579 int nid, enum memblock_flags flags)
0580 {
0581 bool insert = false;
0582 phys_addr_t obase = base;
0583 phys_addr_t end = base + memblock_cap_size(base, &size);
0584 int idx, nr_new;
0585 struct memblock_region *rgn;
0586
0587 if (!size)
0588 return 0;
0589
0590
0591 if (type->regions[0].size == 0) {
0592 WARN_ON(type->cnt != 1 || type->total_size);
0593 type->regions[0].base = base;
0594 type->regions[0].size = size;
0595 type->regions[0].flags = flags;
0596 memblock_set_region_node(&type->regions[0], nid);
0597 type->total_size = size;
0598 return 0;
0599 }
0600
0601
0602
0603
0604
0605
0606
0607
0608 if (type->cnt * 2 + 1 < type->max)
0609 insert = true;
0610
0611 repeat:
0612
0613
0614
0615
0616
0617 base = obase;
0618 nr_new = 0;
0619
0620 for_each_memblock_type(idx, type, rgn) {
0621 phys_addr_t rbase = rgn->base;
0622 phys_addr_t rend = rbase + rgn->size;
0623
0624 if (rbase >= end)
0625 break;
0626 if (rend <= base)
0627 continue;
0628
0629
0630
0631
0632 if (rbase > base) {
0633 #ifdef CONFIG_NUMA
0634 WARN_ON(nid != memblock_get_region_node(rgn));
0635 #endif
0636 WARN_ON(flags != rgn->flags);
0637 nr_new++;
0638 if (insert)
0639 memblock_insert_region(type, idx++, base,
0640 rbase - base, nid,
0641 flags);
0642 }
0643
0644 base = min(rend, end);
0645 }
0646
0647
0648 if (base < end) {
0649 nr_new++;
0650 if (insert)
0651 memblock_insert_region(type, idx, base, end - base,
0652 nid, flags);
0653 }
0654
0655 if (!nr_new)
0656 return 0;
0657
0658
0659
0660
0661
0662 if (!insert) {
0663 while (type->cnt + nr_new > type->max)
0664 if (memblock_double_array(type, obase, size) < 0)
0665 return -ENOMEM;
0666 insert = true;
0667 goto repeat;
0668 } else {
0669 memblock_merge_regions(type);
0670 return 0;
0671 }
0672 }
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
0688 int nid, enum memblock_flags flags)
0689 {
0690 phys_addr_t end = base + size - 1;
0691
0692 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
0693 &base, &end, nid, flags, (void *)_RET_IP_);
0694
0695 return memblock_add_range(&memblock.memory, base, size, nid, flags);
0696 }
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
0710 {
0711 phys_addr_t end = base + size - 1;
0712
0713 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0714 &base, &end, (void *)_RET_IP_);
0715
0716 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
0717 }
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
0736 phys_addr_t base, phys_addr_t size,
0737 int *start_rgn, int *end_rgn)
0738 {
0739 phys_addr_t end = base + memblock_cap_size(base, &size);
0740 int idx;
0741 struct memblock_region *rgn;
0742
0743 *start_rgn = *end_rgn = 0;
0744
0745 if (!size)
0746 return 0;
0747
0748
0749 while (type->cnt + 2 > type->max)
0750 if (memblock_double_array(type, base, size) < 0)
0751 return -ENOMEM;
0752
0753 for_each_memblock_type(idx, type, rgn) {
0754 phys_addr_t rbase = rgn->base;
0755 phys_addr_t rend = rbase + rgn->size;
0756
0757 if (rbase >= end)
0758 break;
0759 if (rend <= base)
0760 continue;
0761
0762 if (rbase < base) {
0763
0764
0765
0766
0767 rgn->base = base;
0768 rgn->size -= base - rbase;
0769 type->total_size -= base - rbase;
0770 memblock_insert_region(type, idx, rbase, base - rbase,
0771 memblock_get_region_node(rgn),
0772 rgn->flags);
0773 } else if (rend > end) {
0774
0775
0776
0777
0778 rgn->base = end;
0779 rgn->size -= end - rbase;
0780 type->total_size -= end - rbase;
0781 memblock_insert_region(type, idx--, rbase, end - rbase,
0782 memblock_get_region_node(rgn),
0783 rgn->flags);
0784 } else {
0785
0786 if (!*end_rgn)
0787 *start_rgn = idx;
0788 *end_rgn = idx + 1;
0789 }
0790 }
0791
0792 return 0;
0793 }
0794
0795 static int __init_memblock memblock_remove_range(struct memblock_type *type,
0796 phys_addr_t base, phys_addr_t size)
0797 {
0798 int start_rgn, end_rgn;
0799 int i, ret;
0800
0801 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
0802 if (ret)
0803 return ret;
0804
0805 for (i = end_rgn - 1; i >= start_rgn; i--)
0806 memblock_remove_region(type, i);
0807 return 0;
0808 }
0809
0810 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
0811 {
0812 phys_addr_t end = base + size - 1;
0813
0814 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0815 &base, &end, (void *)_RET_IP_);
0816
0817 return memblock_remove_range(&memblock.memory, base, size);
0818 }
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828 void __init_memblock memblock_free(void *ptr, size_t size)
0829 {
0830 if (ptr)
0831 memblock_phys_free(__pa(ptr), size);
0832 }
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
0843 {
0844 phys_addr_t end = base + size - 1;
0845
0846 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0847 &base, &end, (void *)_RET_IP_);
0848
0849 kmemleak_free_part_phys(base, size);
0850 return memblock_remove_range(&memblock.reserved, base, size);
0851 }
0852
0853 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
0854 {
0855 phys_addr_t end = base + size - 1;
0856
0857 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0858 &base, &end, (void *)_RET_IP_);
0859
0860 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
0861 }
0862
0863 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0864 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
0865 {
0866 phys_addr_t end = base + size - 1;
0867
0868 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
0869 &base, &end, (void *)_RET_IP_);
0870
0871 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
0872 }
0873 #endif
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
0887 phys_addr_t size, int set, int flag)
0888 {
0889 struct memblock_type *type = &memblock.memory;
0890 int i, ret, start_rgn, end_rgn;
0891
0892 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
0893 if (ret)
0894 return ret;
0895
0896 for (i = start_rgn; i < end_rgn; i++) {
0897 struct memblock_region *r = &type->regions[i];
0898
0899 if (set)
0900 r->flags |= flag;
0901 else
0902 r->flags &= ~flag;
0903 }
0904
0905 memblock_merge_regions(type);
0906 return 0;
0907 }
0908
0909
0910
0911
0912
0913
0914
0915
0916 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
0917 {
0918 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
0919 }
0920
0921
0922
0923
0924
0925
0926
0927
0928 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
0929 {
0930 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
0931 }
0932
0933
0934
0935
0936
0937
0938
0939
0940 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
0941 {
0942 if (!mirrored_kernelcore)
0943 return 0;
0944
0945 system_has_some_mirror = true;
0946
0947 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
0948 }
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
0966 {
0967 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
0968 }
0969
0970
0971
0972
0973
0974
0975
0976
0977 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
0978 {
0979 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
0980 }
0981
0982 static bool should_skip_region(struct memblock_type *type,
0983 struct memblock_region *m,
0984 int nid, int flags)
0985 {
0986 int m_nid = memblock_get_region_node(m);
0987
0988
0989 if (type != memblock_memory)
0990 return false;
0991
0992
0993 if (nid != NUMA_NO_NODE && nid != m_nid)
0994 return true;
0995
0996
0997 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
0998 !(flags & MEMBLOCK_HOTPLUG))
0999 return true;
1000
1001
1002 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1003 return true;
1004
1005
1006 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1007 return true;
1008
1009
1010 if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
1011 return true;
1012
1013 return false;
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1043 struct memblock_type *type_a,
1044 struct memblock_type *type_b, phys_addr_t *out_start,
1045 phys_addr_t *out_end, int *out_nid)
1046 {
1047 int idx_a = *idx & 0xffffffff;
1048 int idx_b = *idx >> 32;
1049
1050 if (WARN_ONCE(nid == MAX_NUMNODES,
1051 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1052 nid = NUMA_NO_NODE;
1053
1054 for (; idx_a < type_a->cnt; idx_a++) {
1055 struct memblock_region *m = &type_a->regions[idx_a];
1056
1057 phys_addr_t m_start = m->base;
1058 phys_addr_t m_end = m->base + m->size;
1059 int m_nid = memblock_get_region_node(m);
1060
1061 if (should_skip_region(type_a, m, nid, flags))
1062 continue;
1063
1064 if (!type_b) {
1065 if (out_start)
1066 *out_start = m_start;
1067 if (out_end)
1068 *out_end = m_end;
1069 if (out_nid)
1070 *out_nid = m_nid;
1071 idx_a++;
1072 *idx = (u32)idx_a | (u64)idx_b << 32;
1073 return;
1074 }
1075
1076
1077 for (; idx_b < type_b->cnt + 1; idx_b++) {
1078 struct memblock_region *r;
1079 phys_addr_t r_start;
1080 phys_addr_t r_end;
1081
1082 r = &type_b->regions[idx_b];
1083 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1084 r_end = idx_b < type_b->cnt ?
1085 r->base : PHYS_ADDR_MAX;
1086
1087
1088
1089
1090
1091 if (r_start >= m_end)
1092 break;
1093
1094 if (m_start < r_end) {
1095 if (out_start)
1096 *out_start =
1097 max(m_start, r_start);
1098 if (out_end)
1099 *out_end = min(m_end, r_end);
1100 if (out_nid)
1101 *out_nid = m_nid;
1102
1103
1104
1105
1106 if (m_end <= r_end)
1107 idx_a++;
1108 else
1109 idx_b++;
1110 *idx = (u32)idx_a | (u64)idx_b << 32;
1111 return;
1112 }
1113 }
1114 }
1115
1116
1117 *idx = ULLONG_MAX;
1118 }
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1138 enum memblock_flags flags,
1139 struct memblock_type *type_a,
1140 struct memblock_type *type_b,
1141 phys_addr_t *out_start,
1142 phys_addr_t *out_end, int *out_nid)
1143 {
1144 int idx_a = *idx & 0xffffffff;
1145 int idx_b = *idx >> 32;
1146
1147 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1148 nid = NUMA_NO_NODE;
1149
1150 if (*idx == (u64)ULLONG_MAX) {
1151 idx_a = type_a->cnt - 1;
1152 if (type_b != NULL)
1153 idx_b = type_b->cnt;
1154 else
1155 idx_b = 0;
1156 }
1157
1158 for (; idx_a >= 0; idx_a--) {
1159 struct memblock_region *m = &type_a->regions[idx_a];
1160
1161 phys_addr_t m_start = m->base;
1162 phys_addr_t m_end = m->base + m->size;
1163 int m_nid = memblock_get_region_node(m);
1164
1165 if (should_skip_region(type_a, m, nid, flags))
1166 continue;
1167
1168 if (!type_b) {
1169 if (out_start)
1170 *out_start = m_start;
1171 if (out_end)
1172 *out_end = m_end;
1173 if (out_nid)
1174 *out_nid = m_nid;
1175 idx_a--;
1176 *idx = (u32)idx_a | (u64)idx_b << 32;
1177 return;
1178 }
1179
1180
1181 for (; idx_b >= 0; idx_b--) {
1182 struct memblock_region *r;
1183 phys_addr_t r_start;
1184 phys_addr_t r_end;
1185
1186 r = &type_b->regions[idx_b];
1187 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1188 r_end = idx_b < type_b->cnt ?
1189 r->base : PHYS_ADDR_MAX;
1190
1191
1192
1193
1194
1195 if (r_end <= m_start)
1196 break;
1197
1198 if (m_end > r_start) {
1199 if (out_start)
1200 *out_start = max(m_start, r_start);
1201 if (out_end)
1202 *out_end = min(m_end, r_end);
1203 if (out_nid)
1204 *out_nid = m_nid;
1205 if (m_start >= r_start)
1206 idx_a--;
1207 else
1208 idx_b--;
1209 *idx = (u32)idx_a | (u64)idx_b << 32;
1210 return;
1211 }
1212 }
1213 }
1214
1215 *idx = ULLONG_MAX;
1216 }
1217
1218
1219
1220
1221 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1222 unsigned long *out_start_pfn,
1223 unsigned long *out_end_pfn, int *out_nid)
1224 {
1225 struct memblock_type *type = &memblock.memory;
1226 struct memblock_region *r;
1227 int r_nid;
1228
1229 while (++*idx < type->cnt) {
1230 r = &type->regions[*idx];
1231 r_nid = memblock_get_region_node(r);
1232
1233 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1234 continue;
1235 if (nid == MAX_NUMNODES || nid == r_nid)
1236 break;
1237 }
1238 if (*idx >= type->cnt) {
1239 *idx = -1;
1240 return;
1241 }
1242
1243 if (out_start_pfn)
1244 *out_start_pfn = PFN_UP(r->base);
1245 if (out_end_pfn)
1246 *out_end_pfn = PFN_DOWN(r->base + r->size);
1247 if (out_nid)
1248 *out_nid = r_nid;
1249 }
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1265 struct memblock_type *type, int nid)
1266 {
1267 #ifdef CONFIG_NUMA
1268 int start_rgn, end_rgn;
1269 int i, ret;
1270
1271 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1272 if (ret)
1273 return ret;
1274
1275 for (i = start_rgn; i < end_rgn; i++)
1276 memblock_set_region_node(&type->regions[i], nid);
1277
1278 memblock_merge_regions(type);
1279 #endif
1280 return 0;
1281 }
1282
1283 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 void __init_memblock
1300 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1301 unsigned long *out_spfn, unsigned long *out_epfn)
1302 {
1303 int zone_nid = zone_to_nid(zone);
1304 phys_addr_t spa, epa;
1305
1306 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1307 &memblock.memory, &memblock.reserved,
1308 &spa, &epa, NULL);
1309
1310 while (*idx != U64_MAX) {
1311 unsigned long epfn = PFN_DOWN(epa);
1312 unsigned long spfn = PFN_UP(spa);
1313
1314
1315
1316
1317
1318 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1319
1320 if (zone_end_pfn(zone) <= spfn) {
1321 *idx = U64_MAX;
1322 break;
1323 }
1324
1325 if (out_spfn)
1326 *out_spfn = max(zone->zone_start_pfn, spfn);
1327 if (out_epfn)
1328 *out_epfn = min(zone_end_pfn(zone), epfn);
1329
1330 return;
1331 }
1332
1333 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1334 &memblock.memory, &memblock.reserved,
1335 &spa, &epa, NULL);
1336 }
1337
1338
1339 if (out_spfn)
1340 *out_spfn = ULONG_MAX;
1341 if (out_epfn)
1342 *out_epfn = 0;
1343 }
1344
1345 #endif
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1373 phys_addr_t align, phys_addr_t start,
1374 phys_addr_t end, int nid,
1375 bool exact_nid)
1376 {
1377 enum memblock_flags flags = choose_memblock_flags();
1378 phys_addr_t found;
1379
1380 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1381 nid = NUMA_NO_NODE;
1382
1383 if (!align) {
1384
1385 dump_stack();
1386 align = SMP_CACHE_BYTES;
1387 }
1388
1389 again:
1390 found = memblock_find_in_range_node(size, align, start, end, nid,
1391 flags);
1392 if (found && !memblock_reserve(found, size))
1393 goto done;
1394
1395 if (nid != NUMA_NO_NODE && !exact_nid) {
1396 found = memblock_find_in_range_node(size, align, start,
1397 end, NUMA_NO_NODE,
1398 flags);
1399 if (found && !memblock_reserve(found, size))
1400 goto done;
1401 }
1402
1403 if (flags & MEMBLOCK_MIRROR) {
1404 flags &= ~MEMBLOCK_MIRROR;
1405 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1406 &size);
1407 goto again;
1408 }
1409
1410 return 0;
1411
1412 done:
1413
1414
1415
1416
1417 if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1418
1419
1420
1421
1422
1423
1424 kmemleak_alloc_phys(found, size, 0);
1425
1426 return found;
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1442 phys_addr_t align,
1443 phys_addr_t start,
1444 phys_addr_t end)
1445 {
1446 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1447 __func__, (u64)size, (u64)align, &start, &end,
1448 (void *)_RET_IP_);
1449 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1450 false);
1451 }
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1467 {
1468 return memblock_alloc_range_nid(size, align, 0,
1469 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1470 }
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 static void * __init memblock_alloc_internal(
1493 phys_addr_t size, phys_addr_t align,
1494 phys_addr_t min_addr, phys_addr_t max_addr,
1495 int nid, bool exact_nid)
1496 {
1497 phys_addr_t alloc;
1498
1499
1500
1501
1502
1503
1504 if (WARN_ON_ONCE(slab_is_available()))
1505 return kzalloc_node(size, GFP_NOWAIT, nid);
1506
1507 if (max_addr > memblock.current_limit)
1508 max_addr = memblock.current_limit;
1509
1510 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1511 exact_nid);
1512
1513
1514 if (!alloc && min_addr)
1515 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1516 exact_nid);
1517
1518 if (!alloc)
1519 return NULL;
1520
1521 return phys_to_virt(alloc);
1522 }
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 void * __init memblock_alloc_exact_nid_raw(
1543 phys_addr_t size, phys_addr_t align,
1544 phys_addr_t min_addr, phys_addr_t max_addr,
1545 int nid)
1546 {
1547 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1548 __func__, (u64)size, (u64)align, nid, &min_addr,
1549 &max_addr, (void *)_RET_IP_);
1550
1551 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1552 true);
1553 }
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574 void * __init memblock_alloc_try_nid_raw(
1575 phys_addr_t size, phys_addr_t align,
1576 phys_addr_t min_addr, phys_addr_t max_addr,
1577 int nid)
1578 {
1579 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1580 __func__, (u64)size, (u64)align, nid, &min_addr,
1581 &max_addr, (void *)_RET_IP_);
1582
1583 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1584 false);
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 void * __init memblock_alloc_try_nid(
1605 phys_addr_t size, phys_addr_t align,
1606 phys_addr_t min_addr, phys_addr_t max_addr,
1607 int nid)
1608 {
1609 void *ptr;
1610
1611 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1612 __func__, (u64)size, (u64)align, nid, &min_addr,
1613 &max_addr, (void *)_RET_IP_);
1614 ptr = memblock_alloc_internal(size, align,
1615 min_addr, max_addr, nid, false);
1616 if (ptr)
1617 memset(ptr, 0, size);
1618
1619 return ptr;
1620 }
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631 void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
1632 {
1633 phys_addr_t cursor, end;
1634
1635 end = base + size - 1;
1636 memblock_dbg("%s: [%pa-%pa] %pS\n",
1637 __func__, &base, &end, (void *)_RET_IP_);
1638 kmemleak_free_part_phys(base, size);
1639 cursor = PFN_UP(base);
1640 end = PFN_DOWN(base + size);
1641
1642 for (; cursor < end; cursor++) {
1643 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1644 totalram_pages_inc();
1645 }
1646 }
1647
1648
1649
1650
1651
1652 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1653 {
1654 return memblock.memory.total_size;
1655 }
1656
1657 phys_addr_t __init_memblock memblock_reserved_size(void)
1658 {
1659 return memblock.reserved.total_size;
1660 }
1661
1662
1663 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1664 {
1665 return memblock.memory.regions[0].base;
1666 }
1667
1668 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1669 {
1670 int idx = memblock.memory.cnt - 1;
1671
1672 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1673 }
1674
1675 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1676 {
1677 phys_addr_t max_addr = PHYS_ADDR_MAX;
1678 struct memblock_region *r;
1679
1680
1681
1682
1683
1684
1685 for_each_mem_region(r) {
1686 if (limit <= r->size) {
1687 max_addr = r->base + limit;
1688 break;
1689 }
1690 limit -= r->size;
1691 }
1692
1693 return max_addr;
1694 }
1695
1696 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1697 {
1698 phys_addr_t max_addr;
1699
1700 if (!limit)
1701 return;
1702
1703 max_addr = __find_max_addr(limit);
1704
1705
1706 if (max_addr == PHYS_ADDR_MAX)
1707 return;
1708
1709
1710 memblock_remove_range(&memblock.memory, max_addr,
1711 PHYS_ADDR_MAX);
1712 memblock_remove_range(&memblock.reserved, max_addr,
1713 PHYS_ADDR_MAX);
1714 }
1715
1716 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1717 {
1718 int start_rgn, end_rgn;
1719 int i, ret;
1720
1721 if (!size)
1722 return;
1723
1724 if (!memblock_memory->total_size) {
1725 pr_warn("%s: No memory registered yet\n", __func__);
1726 return;
1727 }
1728
1729 ret = memblock_isolate_range(&memblock.memory, base, size,
1730 &start_rgn, &end_rgn);
1731 if (ret)
1732 return;
1733
1734
1735 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1736 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1737 memblock_remove_region(&memblock.memory, i);
1738
1739 for (i = start_rgn - 1; i >= 0; i--)
1740 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1741 memblock_remove_region(&memblock.memory, i);
1742
1743
1744 memblock_remove_range(&memblock.reserved, 0, base);
1745 memblock_remove_range(&memblock.reserved,
1746 base + size, PHYS_ADDR_MAX);
1747 }
1748
1749 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1750 {
1751 phys_addr_t max_addr;
1752
1753 if (!limit)
1754 return;
1755
1756 max_addr = __find_max_addr(limit);
1757
1758
1759 if (max_addr == PHYS_ADDR_MAX)
1760 return;
1761
1762 memblock_cap_memory_range(0, max_addr);
1763 }
1764
1765 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1766 {
1767 unsigned int left = 0, right = type->cnt;
1768
1769 do {
1770 unsigned int mid = (right + left) / 2;
1771
1772 if (addr < type->regions[mid].base)
1773 right = mid;
1774 else if (addr >= (type->regions[mid].base +
1775 type->regions[mid].size))
1776 left = mid + 1;
1777 else
1778 return mid;
1779 } while (left < right);
1780 return -1;
1781 }
1782
1783 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1784 {
1785 return memblock_search(&memblock.reserved, addr) != -1;
1786 }
1787
1788 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1789 {
1790 return memblock_search(&memblock.memory, addr) != -1;
1791 }
1792
1793 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1794 {
1795 int i = memblock_search(&memblock.memory, addr);
1796
1797 if (i == -1)
1798 return false;
1799 return !memblock_is_nomap(&memblock.memory.regions[i]);
1800 }
1801
1802 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1803 unsigned long *start_pfn, unsigned long *end_pfn)
1804 {
1805 struct memblock_type *type = &memblock.memory;
1806 int mid = memblock_search(type, PFN_PHYS(pfn));
1807
1808 if (mid == -1)
1809 return -1;
1810
1811 *start_pfn = PFN_DOWN(type->regions[mid].base);
1812 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1813
1814 return memblock_get_region_node(&type->regions[mid]);
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1828 {
1829 int idx = memblock_search(&memblock.memory, base);
1830 phys_addr_t end = base + memblock_cap_size(base, &size);
1831
1832 if (idx == -1)
1833 return false;
1834 return (memblock.memory.regions[idx].base +
1835 memblock.memory.regions[idx].size) >= end;
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1850 {
1851 return memblock_overlaps_region(&memblock.reserved, base, size);
1852 }
1853
1854 void __init_memblock memblock_trim_memory(phys_addr_t align)
1855 {
1856 phys_addr_t start, end, orig_start, orig_end;
1857 struct memblock_region *r;
1858
1859 for_each_mem_region(r) {
1860 orig_start = r->base;
1861 orig_end = r->base + r->size;
1862 start = round_up(orig_start, align);
1863 end = round_down(orig_end, align);
1864
1865 if (start == orig_start && end == orig_end)
1866 continue;
1867
1868 if (start < end) {
1869 r->base = start;
1870 r->size = end - start;
1871 } else {
1872 memblock_remove_region(&memblock.memory,
1873 r - memblock.memory.regions);
1874 r--;
1875 }
1876 }
1877 }
1878
1879 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1880 {
1881 memblock.current_limit = limit;
1882 }
1883
1884 phys_addr_t __init_memblock memblock_get_current_limit(void)
1885 {
1886 return memblock.current_limit;
1887 }
1888
1889 static void __init_memblock memblock_dump(struct memblock_type *type)
1890 {
1891 phys_addr_t base, end, size;
1892 enum memblock_flags flags;
1893 int idx;
1894 struct memblock_region *rgn;
1895
1896 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1897
1898 for_each_memblock_type(idx, type, rgn) {
1899 char nid_buf[32] = "";
1900
1901 base = rgn->base;
1902 size = rgn->size;
1903 end = base + size - 1;
1904 flags = rgn->flags;
1905 #ifdef CONFIG_NUMA
1906 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1907 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1908 memblock_get_region_node(rgn));
1909 #endif
1910 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1911 type->name, idx, &base, &end, &size, nid_buf, flags);
1912 }
1913 }
1914
1915 static void __init_memblock __memblock_dump_all(void)
1916 {
1917 pr_info("MEMBLOCK configuration:\n");
1918 pr_info(" memory size = %pa reserved size = %pa\n",
1919 &memblock.memory.total_size,
1920 &memblock.reserved.total_size);
1921
1922 memblock_dump(&memblock.memory);
1923 memblock_dump(&memblock.reserved);
1924 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1925 memblock_dump(&physmem);
1926 #endif
1927 }
1928
1929 void __init_memblock memblock_dump_all(void)
1930 {
1931 if (memblock_debug)
1932 __memblock_dump_all();
1933 }
1934
1935 void __init memblock_allow_resize(void)
1936 {
1937 memblock_can_resize = 1;
1938 }
1939
1940 static int __init early_memblock(char *p)
1941 {
1942 if (p && strstr(p, "debug"))
1943 memblock_debug = 1;
1944 return 0;
1945 }
1946 early_param("memblock", early_memblock);
1947
1948 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1949 {
1950 struct page *start_pg, *end_pg;
1951 phys_addr_t pg, pgend;
1952
1953
1954
1955
1956 start_pg = pfn_to_page(start_pfn - 1) + 1;
1957 end_pg = pfn_to_page(end_pfn - 1) + 1;
1958
1959
1960
1961
1962
1963 pg = PAGE_ALIGN(__pa(start_pg));
1964 pgend = __pa(end_pg) & PAGE_MASK;
1965
1966
1967
1968
1969
1970 if (pg < pgend)
1971 memblock_phys_free(pg, pgend - pg);
1972 }
1973
1974
1975
1976
1977 static void __init free_unused_memmap(void)
1978 {
1979 unsigned long start, end, prev_end = 0;
1980 int i;
1981
1982 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1983 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1984 return;
1985
1986
1987
1988
1989
1990 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1991 #ifdef CONFIG_SPARSEMEM
1992
1993
1994
1995
1996 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1997 #endif
1998
1999
2000
2001
2002
2003 start = round_down(start, pageblock_nr_pages);
2004
2005
2006
2007
2008
2009 if (prev_end && prev_end < start)
2010 free_memmap(prev_end, start);
2011
2012
2013
2014
2015
2016
2017 prev_end = ALIGN(end, pageblock_nr_pages);
2018 }
2019
2020 #ifdef CONFIG_SPARSEMEM
2021 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
2022 prev_end = ALIGN(end, pageblock_nr_pages);
2023 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2024 }
2025 #endif
2026 }
2027
2028 static void __init __free_pages_memory(unsigned long start, unsigned long end)
2029 {
2030 int order;
2031
2032 while (start < end) {
2033 order = min(MAX_ORDER - 1UL, __ffs(start));
2034
2035 while (start + (1UL << order) > end)
2036 order--;
2037
2038 memblock_free_pages(pfn_to_page(start), start, order);
2039
2040 start += (1UL << order);
2041 }
2042 }
2043
2044 static unsigned long __init __free_memory_core(phys_addr_t start,
2045 phys_addr_t end)
2046 {
2047 unsigned long start_pfn = PFN_UP(start);
2048 unsigned long end_pfn = min_t(unsigned long,
2049 PFN_DOWN(end), max_low_pfn);
2050
2051 if (start_pfn >= end_pfn)
2052 return 0;
2053
2054 __free_pages_memory(start_pfn, end_pfn);
2055
2056 return end_pfn - start_pfn;
2057 }
2058
2059 static void __init memmap_init_reserved_pages(void)
2060 {
2061 struct memblock_region *region;
2062 phys_addr_t start, end;
2063 u64 i;
2064
2065
2066 for_each_reserved_mem_range(i, &start, &end)
2067 reserve_bootmem_region(start, end);
2068
2069
2070 for_each_mem_region(region) {
2071 if (memblock_is_nomap(region)) {
2072 start = region->base;
2073 end = start + region->size;
2074 reserve_bootmem_region(start, end);
2075 }
2076 }
2077 }
2078
2079 static unsigned long __init free_low_memory_core_early(void)
2080 {
2081 unsigned long count = 0;
2082 phys_addr_t start, end;
2083 u64 i;
2084
2085 memblock_clear_hotplug(0, -1);
2086
2087 memmap_init_reserved_pages();
2088
2089
2090
2091
2092
2093
2094 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2095 NULL)
2096 count += __free_memory_core(start, end);
2097
2098 return count;
2099 }
2100
2101 static int reset_managed_pages_done __initdata;
2102
2103 void reset_node_managed_pages(pg_data_t *pgdat)
2104 {
2105 struct zone *z;
2106
2107 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2108 atomic_long_set(&z->managed_pages, 0);
2109 }
2110
2111 void __init reset_all_zones_managed_pages(void)
2112 {
2113 struct pglist_data *pgdat;
2114
2115 if (reset_managed_pages_done)
2116 return;
2117
2118 for_each_online_pgdat(pgdat)
2119 reset_node_managed_pages(pgdat);
2120
2121 reset_managed_pages_done = 1;
2122 }
2123
2124
2125
2126
2127 void __init memblock_free_all(void)
2128 {
2129 unsigned long pages;
2130
2131 free_unused_memmap();
2132 reset_all_zones_managed_pages();
2133
2134 pages = free_low_memory_core_early();
2135 totalram_pages_add(pages);
2136 }
2137
2138 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2139
2140 static int memblock_debug_show(struct seq_file *m, void *private)
2141 {
2142 struct memblock_type *type = m->private;
2143 struct memblock_region *reg;
2144 int i;
2145 phys_addr_t end;
2146
2147 for (i = 0; i < type->cnt; i++) {
2148 reg = &type->regions[i];
2149 end = reg->base + reg->size - 1;
2150
2151 seq_printf(m, "%4d: ", i);
2152 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2153 }
2154 return 0;
2155 }
2156 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2157
2158 static int __init memblock_init_debugfs(void)
2159 {
2160 struct dentry *root = debugfs_create_dir("memblock", NULL);
2161
2162 debugfs_create_file("memory", 0444, root,
2163 &memblock.memory, &memblock_debug_fops);
2164 debugfs_create_file("reserved", 0444, root,
2165 &memblock.reserved, &memblock_debug_fops);
2166 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2167 debugfs_create_file("physmem", 0444, root, &physmem,
2168 &memblock_debug_fops);
2169 #endif
2170
2171 return 0;
2172 }
2173 __initcall(memblock_init_debugfs);
2174
2175 #endif