Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Low level x86 E820 memory map handling functions.
0004  *
0005  * The firmware and bootloader passes us the "E820 table", which is the primary
0006  * physical memory layout description available about x86 systems.
0007  *
0008  * The kernel takes the E820 memory layout and optionally modifies it with
0009  * quirks and other tweaks, and feeds that into the generic Linux memory
0010  * allocation code routines via a platform independent interface (memblock, etc.).
0011  */
0012 #include <linux/crash_dump.h>
0013 #include <linux/memblock.h>
0014 #include <linux/suspend.h>
0015 #include <linux/acpi.h>
0016 #include <linux/firmware-map.h>
0017 #include <linux/sort.h>
0018 #include <linux/memory_hotplug.h>
0019 
0020 #include <asm/e820/api.h>
0021 #include <asm/setup.h>
0022 
0023 /*
0024  * We organize the E820 table into three main data structures:
0025  *
0026  * - 'e820_table_firmware': the original firmware version passed to us by the
0027  *   bootloader - not modified by the kernel. It is composed of two parts:
0028  *   the first 128 E820 memory entries in boot_params.e820_table and the remaining
0029  *   (if any) entries of the SETUP_E820_EXT nodes. We use this to:
0030  *
0031  *       - inform the user about the firmware's notion of memory layout
0032  *         via /sys/firmware/memmap
0033  *
0034  *       - the hibernation code uses it to generate a kernel-independent CRC32
0035  *         checksum of the physical memory layout of a system.
0036  *
0037  * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
0038  *   passed to us by the bootloader - the major difference between
0039  *   e820_table_firmware[] and this one is that, the latter marks the setup_data
0040  *   list created by the EFI boot stub as reserved, so that kexec can reuse the
0041  *   setup_data information in the second kernel. Besides, e820_table_kexec[]
0042  *   might also be modified by the kexec itself to fake a mptable.
0043  *   We use this to:
0044  *
0045  *       - kexec, which is a bootloader in disguise, uses the original E820
0046  *         layout to pass to the kexec-ed kernel. This way the original kernel
0047  *         can have a restricted E820 map while the kexec()-ed kexec-kernel
0048  *         can have access to full memory - etc.
0049  *
0050  * - 'e820_table': this is the main E820 table that is massaged by the
0051  *   low level x86 platform code, or modified by boot parameters, before
0052  *   passed on to higher level MM layers.
0053  *
0054  * Once the E820 map has been converted to the standard Linux memory layout
0055  * information its role stops - modifying it has no effect and does not get
0056  * re-propagated. So itsmain role is a temporary bootstrap storage of firmware
0057  * specific memory layout data during early bootup.
0058  */
0059 static struct e820_table e820_table_init        __initdata;
0060 static struct e820_table e820_table_kexec_init      __initdata;
0061 static struct e820_table e820_table_firmware_init   __initdata;
0062 
0063 struct e820_table *e820_table __refdata         = &e820_table_init;
0064 struct e820_table *e820_table_kexec __refdata       = &e820_table_kexec_init;
0065 struct e820_table *e820_table_firmware __refdata    = &e820_table_firmware_init;
0066 
0067 /* For PCI or other memory-mapped resources */
0068 unsigned long pci_mem_start = 0xaeedbabe;
0069 #ifdef CONFIG_PCI
0070 EXPORT_SYMBOL(pci_mem_start);
0071 #endif
0072 
0073 /*
0074  * This function checks if any part of the range <start,end> is mapped
0075  * with type.
0076  */
0077 static bool _e820__mapped_any(struct e820_table *table,
0078                   u64 start, u64 end, enum e820_type type)
0079 {
0080     int i;
0081 
0082     for (i = 0; i < table->nr_entries; i++) {
0083         struct e820_entry *entry = &table->entries[i];
0084 
0085         if (type && entry->type != type)
0086             continue;
0087         if (entry->addr >= end || entry->addr + entry->size <= start)
0088             continue;
0089         return true;
0090     }
0091     return false;
0092 }
0093 
0094 bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type)
0095 {
0096     return _e820__mapped_any(e820_table_firmware, start, end, type);
0097 }
0098 EXPORT_SYMBOL_GPL(e820__mapped_raw_any);
0099 
0100 bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
0101 {
0102     return _e820__mapped_any(e820_table, start, end, type);
0103 }
0104 EXPORT_SYMBOL_GPL(e820__mapped_any);
0105 
0106 /*
0107  * This function checks if the entire <start,end> range is mapped with 'type'.
0108  *
0109  * Note: this function only works correctly once the E820 table is sorted and
0110  * not-overlapping (at least for the range specified), which is the case normally.
0111  */
0112 static struct e820_entry *__e820__mapped_all(u64 start, u64 end,
0113                          enum e820_type type)
0114 {
0115     int i;
0116 
0117     for (i = 0; i < e820_table->nr_entries; i++) {
0118         struct e820_entry *entry = &e820_table->entries[i];
0119 
0120         if (type && entry->type != type)
0121             continue;
0122 
0123         /* Is the region (part) in overlap with the current region? */
0124         if (entry->addr >= end || entry->addr + entry->size <= start)
0125             continue;
0126 
0127         /*
0128          * If the region is at the beginning of <start,end> we move
0129          * 'start' to the end of the region since it's ok until there
0130          */
0131         if (entry->addr <= start)
0132             start = entry->addr + entry->size;
0133 
0134         /*
0135          * If 'start' is now at or beyond 'end', we're done, full
0136          * coverage of the desired range exists:
0137          */
0138         if (start >= end)
0139             return entry;
0140     }
0141 
0142     return NULL;
0143 }
0144 
0145 /*
0146  * This function checks if the entire range <start,end> is mapped with type.
0147  */
0148 bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type)
0149 {
0150     return __e820__mapped_all(start, end, type);
0151 }
0152 
0153 /*
0154  * This function returns the type associated with the range <start,end>.
0155  */
0156 int e820__get_entry_type(u64 start, u64 end)
0157 {
0158     struct e820_entry *entry = __e820__mapped_all(start, end, 0);
0159 
0160     return entry ? entry->type : -EINVAL;
0161 }
0162 
0163 /*
0164  * Add a memory region to the kernel E820 map.
0165  */
0166 static void __init __e820__range_add(struct e820_table *table, u64 start, u64 size, enum e820_type type)
0167 {
0168     int x = table->nr_entries;
0169 
0170     if (x >= ARRAY_SIZE(table->entries)) {
0171         pr_err("too many entries; ignoring [mem %#010llx-%#010llx]\n",
0172                start, start + size - 1);
0173         return;
0174     }
0175 
0176     table->entries[x].addr = start;
0177     table->entries[x].size = size;
0178     table->entries[x].type = type;
0179     table->nr_entries++;
0180 }
0181 
0182 void __init e820__range_add(u64 start, u64 size, enum e820_type type)
0183 {
0184     __e820__range_add(e820_table, start, size, type);
0185 }
0186 
0187 static void __init e820_print_type(enum e820_type type)
0188 {
0189     switch (type) {
0190     case E820_TYPE_RAM:     /* Fall through: */
0191     case E820_TYPE_RESERVED_KERN:   pr_cont("usable");          break;
0192     case E820_TYPE_RESERVED:    pr_cont("reserved");            break;
0193     case E820_TYPE_SOFT_RESERVED:   pr_cont("soft reserved");       break;
0194     case E820_TYPE_ACPI:        pr_cont("ACPI data");           break;
0195     case E820_TYPE_NVS:     pr_cont("ACPI NVS");            break;
0196     case E820_TYPE_UNUSABLE:    pr_cont("unusable");            break;
0197     case E820_TYPE_PMEM:        /* Fall through: */
0198     case E820_TYPE_PRAM:        pr_cont("persistent (type %u)", type);  break;
0199     default:            pr_cont("type %u", type);       break;
0200     }
0201 }
0202 
0203 void __init e820__print_table(char *who)
0204 {
0205     int i;
0206 
0207     for (i = 0; i < e820_table->nr_entries; i++) {
0208         pr_info("%s: [mem %#018Lx-%#018Lx] ",
0209             who,
0210             e820_table->entries[i].addr,
0211             e820_table->entries[i].addr + e820_table->entries[i].size - 1);
0212 
0213         e820_print_type(e820_table->entries[i].type);
0214         pr_cont("\n");
0215     }
0216 }
0217 
0218 /*
0219  * Sanitize an E820 map.
0220  *
0221  * Some E820 layouts include overlapping entries. The following
0222  * replaces the original E820 map with a new one, removing overlaps,
0223  * and resolving conflicting memory types in favor of highest
0224  * numbered type.
0225  *
0226  * The input parameter 'entries' points to an array of 'struct
0227  * e820_entry' which on entry has elements in the range [0, *nr_entries)
0228  * valid, and which has space for up to max_nr_entries entries.
0229  * On return, the resulting sanitized E820 map entries will be in
0230  * overwritten in the same location, starting at 'entries'.
0231  *
0232  * The integer pointed to by nr_entries must be valid on entry (the
0233  * current number of valid entries located at 'entries'). If the
0234  * sanitizing succeeds the *nr_entries will be updated with the new
0235  * number of valid entries (something no more than max_nr_entries).
0236  *
0237  * The return value from e820__update_table() is zero if it
0238  * successfully 'sanitized' the map entries passed in, and is -1
0239  * if it did nothing, which can happen if either of (1) it was
0240  * only passed one map entry, or (2) any of the input map entries
0241  * were invalid (start + size < start, meaning that the size was
0242  * so big the described memory range wrapped around through zero.)
0243  *
0244  *  Visually we're performing the following
0245  *  (1,2,3,4 = memory types)...
0246  *
0247  *  Sample memory map (w/overlaps):
0248  *     ____22__________________
0249  *     ______________________4_
0250  *     ____1111________________
0251  *     _44_____________________
0252  *     11111111________________
0253  *     ____________________33__
0254  *     ___________44___________
0255  *     __________33333_________
0256  *     ______________22________
0257  *     ___________________2222_
0258  *     _________111111111______
0259  *     _____________________11_
0260  *     _________________4______
0261  *
0262  *  Sanitized equivalent (no overlap):
0263  *     1_______________________
0264  *     _44_____________________
0265  *     ___1____________________
0266  *     ____22__________________
0267  *     ______11________________
0268  *     _________1______________
0269  *     __________3_____________
0270  *     ___________44___________
0271  *     _____________33_________
0272  *     _______________2________
0273  *     ________________1_______
0274  *     _________________4______
0275  *     ___________________2____
0276  *     ____________________33__
0277  *     ______________________4_
0278  */
0279 struct change_member {
0280     /* Pointer to the original entry: */
0281     struct e820_entry   *entry;
0282     /* Address for this change point: */
0283     unsigned long long  addr;
0284 };
0285 
0286 static struct change_member change_point_list[2*E820_MAX_ENTRIES]   __initdata;
0287 static struct change_member *change_point[2*E820_MAX_ENTRIES]   __initdata;
0288 static struct e820_entry    *overlap_list[E820_MAX_ENTRIES]     __initdata;
0289 static struct e820_entry    new_entries[E820_MAX_ENTRIES]       __initdata;
0290 
0291 static int __init cpcompare(const void *a, const void *b)
0292 {
0293     struct change_member * const *app = a, * const *bpp = b;
0294     const struct change_member *ap = *app, *bp = *bpp;
0295 
0296     /*
0297      * Inputs are pointers to two elements of change_point[].  If their
0298      * addresses are not equal, their difference dominates.  If the addresses
0299      * are equal, then consider one that represents the end of its region
0300      * to be greater than one that does not.
0301      */
0302     if (ap->addr != bp->addr)
0303         return ap->addr > bp->addr ? 1 : -1;
0304 
0305     return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr);
0306 }
0307 
0308 static bool e820_nomerge(enum e820_type type)
0309 {
0310     /*
0311      * These types may indicate distinct platform ranges aligned to
0312      * numa node, protection domain, performance domain, or other
0313      * boundaries. Do not merge them.
0314      */
0315     if (type == E820_TYPE_PRAM)
0316         return true;
0317     if (type == E820_TYPE_SOFT_RESERVED)
0318         return true;
0319     return false;
0320 }
0321 
0322 int __init e820__update_table(struct e820_table *table)
0323 {
0324     struct e820_entry *entries = table->entries;
0325     u32 max_nr_entries = ARRAY_SIZE(table->entries);
0326     enum e820_type current_type, last_type;
0327     unsigned long long last_addr;
0328     u32 new_nr_entries, overlap_entries;
0329     u32 i, chg_idx, chg_nr;
0330 
0331     /* If there's only one memory region, don't bother: */
0332     if (table->nr_entries < 2)
0333         return -1;
0334 
0335     BUG_ON(table->nr_entries > max_nr_entries);
0336 
0337     /* Bail out if we find any unreasonable addresses in the map: */
0338     for (i = 0; i < table->nr_entries; i++) {
0339         if (entries[i].addr + entries[i].size < entries[i].addr)
0340             return -1;
0341     }
0342 
0343     /* Create pointers for initial change-point information (for sorting): */
0344     for (i = 0; i < 2 * table->nr_entries; i++)
0345         change_point[i] = &change_point_list[i];
0346 
0347     /*
0348      * Record all known change-points (starting and ending addresses),
0349      * omitting empty memory regions:
0350      */
0351     chg_idx = 0;
0352     for (i = 0; i < table->nr_entries; i++) {
0353         if (entries[i].size != 0) {
0354             change_point[chg_idx]->addr = entries[i].addr;
0355             change_point[chg_idx++]->entry  = &entries[i];
0356             change_point[chg_idx]->addr = entries[i].addr + entries[i].size;
0357             change_point[chg_idx++]->entry  = &entries[i];
0358         }
0359     }
0360     chg_nr = chg_idx;
0361 
0362     /* Sort change-point list by memory addresses (low -> high): */
0363     sort(change_point, chg_nr, sizeof(*change_point), cpcompare, NULL);
0364 
0365     /* Create a new memory map, removing overlaps: */
0366     overlap_entries = 0;     /* Number of entries in the overlap table */
0367     new_nr_entries = 0;  /* Index for creating new map entries */
0368     last_type = 0;       /* Start with undefined memory type */
0369     last_addr = 0;       /* Start with 0 as last starting address */
0370 
0371     /* Loop through change-points, determining effect on the new map: */
0372     for (chg_idx = 0; chg_idx < chg_nr; chg_idx++) {
0373         /* Keep track of all overlapping entries */
0374         if (change_point[chg_idx]->addr == change_point[chg_idx]->entry->addr) {
0375             /* Add map entry to overlap list (> 1 entry implies an overlap) */
0376             overlap_list[overlap_entries++] = change_point[chg_idx]->entry;
0377         } else {
0378             /* Remove entry from list (order independent, so swap with last): */
0379             for (i = 0; i < overlap_entries; i++) {
0380                 if (overlap_list[i] == change_point[chg_idx]->entry)
0381                     overlap_list[i] = overlap_list[overlap_entries-1];
0382             }
0383             overlap_entries--;
0384         }
0385         /*
0386          * If there are overlapping entries, decide which
0387          * "type" to use (larger value takes precedence --
0388          * 1=usable, 2,3,4,4+=unusable)
0389          */
0390         current_type = 0;
0391         for (i = 0; i < overlap_entries; i++) {
0392             if (overlap_list[i]->type > current_type)
0393                 current_type = overlap_list[i]->type;
0394         }
0395 
0396         /* Continue building up new map based on this information: */
0397         if (current_type != last_type || e820_nomerge(current_type)) {
0398             if (last_type != 0)  {
0399                 new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr;
0400                 /* Move forward only if the new size was non-zero: */
0401                 if (new_entries[new_nr_entries].size != 0)
0402                     /* No more space left for new entries? */
0403                     if (++new_nr_entries >= max_nr_entries)
0404                         break;
0405             }
0406             if (current_type != 0)  {
0407                 new_entries[new_nr_entries].addr = change_point[chg_idx]->addr;
0408                 new_entries[new_nr_entries].type = current_type;
0409                 last_addr = change_point[chg_idx]->addr;
0410             }
0411             last_type = current_type;
0412         }
0413     }
0414 
0415     /* Copy the new entries into the original location: */
0416     memcpy(entries, new_entries, new_nr_entries*sizeof(*entries));
0417     table->nr_entries = new_nr_entries;
0418 
0419     return 0;
0420 }
0421 
0422 static int __init __append_e820_table(struct boot_e820_entry *entries, u32 nr_entries)
0423 {
0424     struct boot_e820_entry *entry = entries;
0425 
0426     while (nr_entries) {
0427         u64 start = entry->addr;
0428         u64 size = entry->size;
0429         u64 end = start + size - 1;
0430         u32 type = entry->type;
0431 
0432         /* Ignore the entry on 64-bit overflow: */
0433         if (start > end && likely(size))
0434             return -1;
0435 
0436         e820__range_add(start, size, type);
0437 
0438         entry++;
0439         nr_entries--;
0440     }
0441     return 0;
0442 }
0443 
0444 /*
0445  * Copy the BIOS E820 map into a safe place.
0446  *
0447  * Sanity-check it while we're at it..
0448  *
0449  * If we're lucky and live on a modern system, the setup code
0450  * will have given us a memory map that we can use to properly
0451  * set up memory.  If we aren't, we'll fake a memory map.
0452  */
0453 static int __init append_e820_table(struct boot_e820_entry *entries, u32 nr_entries)
0454 {
0455     /* Only one memory region (or negative)? Ignore it */
0456     if (nr_entries < 2)
0457         return -1;
0458 
0459     return __append_e820_table(entries, nr_entries);
0460 }
0461 
0462 static u64 __init
0463 __e820__range_update(struct e820_table *table, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
0464 {
0465     u64 end;
0466     unsigned int i;
0467     u64 real_updated_size = 0;
0468 
0469     BUG_ON(old_type == new_type);
0470 
0471     if (size > (ULLONG_MAX - start))
0472         size = ULLONG_MAX - start;
0473 
0474     end = start + size;
0475     printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ", start, end - 1);
0476     e820_print_type(old_type);
0477     pr_cont(" ==> ");
0478     e820_print_type(new_type);
0479     pr_cont("\n");
0480 
0481     for (i = 0; i < table->nr_entries; i++) {
0482         struct e820_entry *entry = &table->entries[i];
0483         u64 final_start, final_end;
0484         u64 entry_end;
0485 
0486         if (entry->type != old_type)
0487             continue;
0488 
0489         entry_end = entry->addr + entry->size;
0490 
0491         /* Completely covered by new range? */
0492         if (entry->addr >= start && entry_end <= end) {
0493             entry->type = new_type;
0494             real_updated_size += entry->size;
0495             continue;
0496         }
0497 
0498         /* New range is completely covered? */
0499         if (entry->addr < start && entry_end > end) {
0500             __e820__range_add(table, start, size, new_type);
0501             __e820__range_add(table, end, entry_end - end, entry->type);
0502             entry->size = start - entry->addr;
0503             real_updated_size += size;
0504             continue;
0505         }
0506 
0507         /* Partially covered: */
0508         final_start = max(start, entry->addr);
0509         final_end = min(end, entry_end);
0510         if (final_start >= final_end)
0511             continue;
0512 
0513         __e820__range_add(table, final_start, final_end - final_start, new_type);
0514 
0515         real_updated_size += final_end - final_start;
0516 
0517         /*
0518          * Left range could be head or tail, so need to update
0519          * its size first:
0520          */
0521         entry->size -= final_end - final_start;
0522         if (entry->addr < final_start)
0523             continue;
0524 
0525         entry->addr = final_end;
0526     }
0527     return real_updated_size;
0528 }
0529 
0530 u64 __init e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
0531 {
0532     return __e820__range_update(e820_table, start, size, old_type, new_type);
0533 }
0534 
0535 static u64 __init e820__range_update_kexec(u64 start, u64 size, enum e820_type old_type, enum e820_type  new_type)
0536 {
0537     return __e820__range_update(e820_table_kexec, start, size, old_type, new_type);
0538 }
0539 
0540 /* Remove a range of memory from the E820 table: */
0541 u64 __init e820__range_remove(u64 start, u64 size, enum e820_type old_type, bool check_type)
0542 {
0543     int i;
0544     u64 end;
0545     u64 real_removed_size = 0;
0546 
0547     if (size > (ULLONG_MAX - start))
0548         size = ULLONG_MAX - start;
0549 
0550     end = start + size;
0551     printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ", start, end - 1);
0552     if (check_type)
0553         e820_print_type(old_type);
0554     pr_cont("\n");
0555 
0556     for (i = 0; i < e820_table->nr_entries; i++) {
0557         struct e820_entry *entry = &e820_table->entries[i];
0558         u64 final_start, final_end;
0559         u64 entry_end;
0560 
0561         if (check_type && entry->type != old_type)
0562             continue;
0563 
0564         entry_end = entry->addr + entry->size;
0565 
0566         /* Completely covered? */
0567         if (entry->addr >= start && entry_end <= end) {
0568             real_removed_size += entry->size;
0569             memset(entry, 0, sizeof(*entry));
0570             continue;
0571         }
0572 
0573         /* Is the new range completely covered? */
0574         if (entry->addr < start && entry_end > end) {
0575             e820__range_add(end, entry_end - end, entry->type);
0576             entry->size = start - entry->addr;
0577             real_removed_size += size;
0578             continue;
0579         }
0580 
0581         /* Partially covered: */
0582         final_start = max(start, entry->addr);
0583         final_end = min(end, entry_end);
0584         if (final_start >= final_end)
0585             continue;
0586 
0587         real_removed_size += final_end - final_start;
0588 
0589         /*
0590          * Left range could be head or tail, so need to update
0591          * the size first:
0592          */
0593         entry->size -= final_end - final_start;
0594         if (entry->addr < final_start)
0595             continue;
0596 
0597         entry->addr = final_end;
0598     }
0599     return real_removed_size;
0600 }
0601 
0602 void __init e820__update_table_print(void)
0603 {
0604     if (e820__update_table(e820_table))
0605         return;
0606 
0607     pr_info("modified physical RAM map:\n");
0608     e820__print_table("modified");
0609 }
0610 
0611 static void __init e820__update_table_kexec(void)
0612 {
0613     e820__update_table(e820_table_kexec);
0614 }
0615 
0616 #define MAX_GAP_END 0x100000000ull
0617 
0618 /*
0619  * Search for a gap in the E820 memory space from 0 to MAX_GAP_END (4GB).
0620  */
0621 static int __init e820_search_gap(unsigned long *gapstart, unsigned long *gapsize)
0622 {
0623     unsigned long long last = MAX_GAP_END;
0624     int i = e820_table->nr_entries;
0625     int found = 0;
0626 
0627     while (--i >= 0) {
0628         unsigned long long start = e820_table->entries[i].addr;
0629         unsigned long long end = start + e820_table->entries[i].size;
0630 
0631         /*
0632          * Since "last" is at most 4GB, we know we'll
0633          * fit in 32 bits if this condition is true:
0634          */
0635         if (last > end) {
0636             unsigned long gap = last - end;
0637 
0638             if (gap >= *gapsize) {
0639                 *gapsize = gap;
0640                 *gapstart = end;
0641                 found = 1;
0642             }
0643         }
0644         if (start < last)
0645             last = start;
0646     }
0647     return found;
0648 }
0649 
0650 /*
0651  * Search for the biggest gap in the low 32 bits of the E820
0652  * memory space. We pass this space to the PCI subsystem, so
0653  * that it can assign MMIO resources for hotplug or
0654  * unconfigured devices in.
0655  *
0656  * Hopefully the BIOS let enough space left.
0657  */
0658 __init void e820__setup_pci_gap(void)
0659 {
0660     unsigned long gapstart, gapsize;
0661     int found;
0662 
0663     gapsize = 0x400000;
0664     found  = e820_search_gap(&gapstart, &gapsize);
0665 
0666     if (!found) {
0667 #ifdef CONFIG_X86_64
0668         gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
0669         pr_err("Cannot find an available gap in the 32-bit address range\n");
0670         pr_err("PCI devices with unassigned 32-bit BARs may not work!\n");
0671 #else
0672         gapstart = 0x10000000;
0673 #endif
0674     }
0675 
0676     /*
0677      * e820__reserve_resources_late() protects stolen RAM already:
0678      */
0679     pci_mem_start = gapstart;
0680 
0681     pr_info("[mem %#010lx-%#010lx] available for PCI devices\n",
0682         gapstart, gapstart + gapsize - 1);
0683 }
0684 
0685 /*
0686  * Called late during init, in free_initmem().
0687  *
0688  * Initial e820_table and e820_table_kexec are largish __initdata arrays.
0689  *
0690  * Copy them to a (usually much smaller) dynamically allocated area that is
0691  * sized precisely after the number of e820 entries.
0692  *
0693  * This is done after we've performed all the fixes and tweaks to the tables.
0694  * All functions which modify them are __init functions, which won't exist
0695  * after free_initmem().
0696  */
0697 __init void e820__reallocate_tables(void)
0698 {
0699     struct e820_table *n;
0700     int size;
0701 
0702     size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries;
0703     n = kmemdup(e820_table, size, GFP_KERNEL);
0704     BUG_ON(!n);
0705     e820_table = n;
0706 
0707     size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries;
0708     n = kmemdup(e820_table_kexec, size, GFP_KERNEL);
0709     BUG_ON(!n);
0710     e820_table_kexec = n;
0711 
0712     size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries;
0713     n = kmemdup(e820_table_firmware, size, GFP_KERNEL);
0714     BUG_ON(!n);
0715     e820_table_firmware = n;
0716 }
0717 
0718 /*
0719  * Because of the small fixed size of struct boot_params, only the first
0720  * 128 E820 memory entries are passed to the kernel via boot_params.e820_table,
0721  * the remaining (if any) entries are passed via the SETUP_E820_EXT node of
0722  * struct setup_data, which is parsed here.
0723  */
0724 void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
0725 {
0726     int entries;
0727     struct boot_e820_entry *extmap;
0728     struct setup_data *sdata;
0729 
0730     sdata = early_memremap(phys_addr, data_len);
0731     entries = sdata->len / sizeof(*extmap);
0732     extmap = (struct boot_e820_entry *)(sdata->data);
0733 
0734     __append_e820_table(extmap, entries);
0735     e820__update_table(e820_table);
0736 
0737     memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
0738     memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
0739 
0740     early_memunmap(sdata, data_len);
0741     pr_info("extended physical RAM map:\n");
0742     e820__print_table("extended");
0743 }
0744 
0745 /*
0746  * Find the ranges of physical addresses that do not correspond to
0747  * E820 RAM areas and register the corresponding pages as 'nosave' for
0748  * hibernation (32-bit) or software suspend and suspend to RAM (64-bit).
0749  *
0750  * This function requires the E820 map to be sorted and without any
0751  * overlapping entries.
0752  */
0753 void __init e820__register_nosave_regions(unsigned long limit_pfn)
0754 {
0755     int i;
0756     unsigned long pfn = 0;
0757 
0758     for (i = 0; i < e820_table->nr_entries; i++) {
0759         struct e820_entry *entry = &e820_table->entries[i];
0760 
0761         if (pfn < PFN_UP(entry->addr))
0762             register_nosave_region(pfn, PFN_UP(entry->addr));
0763 
0764         pfn = PFN_DOWN(entry->addr + entry->size);
0765 
0766         if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
0767             register_nosave_region(PFN_UP(entry->addr), pfn);
0768 
0769         if (pfn >= limit_pfn)
0770             break;
0771     }
0772 }
0773 
0774 #ifdef CONFIG_ACPI
0775 /*
0776  * Register ACPI NVS memory regions, so that we can save/restore them during
0777  * hibernation and the subsequent resume:
0778  */
0779 static int __init e820__register_nvs_regions(void)
0780 {
0781     int i;
0782 
0783     for (i = 0; i < e820_table->nr_entries; i++) {
0784         struct e820_entry *entry = &e820_table->entries[i];
0785 
0786         if (entry->type == E820_TYPE_NVS)
0787             acpi_nvs_register(entry->addr, entry->size);
0788     }
0789 
0790     return 0;
0791 }
0792 core_initcall(e820__register_nvs_regions);
0793 #endif
0794 
0795 /*
0796  * Allocate the requested number of bytes with the requested alignment
0797  * and return (the physical address) to the caller. Also register this
0798  * range in the 'kexec' E820 table as a reserved range.
0799  *
0800  * This allows kexec to fake a new mptable, as if it came from the real
0801  * system.
0802  */
0803 u64 __init e820__memblock_alloc_reserved(u64 size, u64 align)
0804 {
0805     u64 addr;
0806 
0807     addr = memblock_phys_alloc(size, align);
0808     if (addr) {
0809         e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
0810         pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n");
0811         e820__update_table_kexec();
0812     }
0813 
0814     return addr;
0815 }
0816 
0817 #ifdef CONFIG_X86_32
0818 # ifdef CONFIG_X86_PAE
0819 #  define MAX_ARCH_PFN      (1ULL<<(36-PAGE_SHIFT))
0820 # else
0821 #  define MAX_ARCH_PFN      (1ULL<<(32-PAGE_SHIFT))
0822 # endif
0823 #else /* CONFIG_X86_32 */
0824 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
0825 #endif
0826 
0827 /*
0828  * Find the highest page frame number we have available
0829  */
0830 static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type type)
0831 {
0832     int i;
0833     unsigned long last_pfn = 0;
0834     unsigned long max_arch_pfn = MAX_ARCH_PFN;
0835 
0836     for (i = 0; i < e820_table->nr_entries; i++) {
0837         struct e820_entry *entry = &e820_table->entries[i];
0838         unsigned long start_pfn;
0839         unsigned long end_pfn;
0840 
0841         if (entry->type != type)
0842             continue;
0843 
0844         start_pfn = entry->addr >> PAGE_SHIFT;
0845         end_pfn = (entry->addr + entry->size) >> PAGE_SHIFT;
0846 
0847         if (start_pfn >= limit_pfn)
0848             continue;
0849         if (end_pfn > limit_pfn) {
0850             last_pfn = limit_pfn;
0851             break;
0852         }
0853         if (end_pfn > last_pfn)
0854             last_pfn = end_pfn;
0855     }
0856 
0857     if (last_pfn > max_arch_pfn)
0858         last_pfn = max_arch_pfn;
0859 
0860     pr_info("last_pfn = %#lx max_arch_pfn = %#lx\n",
0861         last_pfn, max_arch_pfn);
0862     return last_pfn;
0863 }
0864 
0865 unsigned long __init e820__end_of_ram_pfn(void)
0866 {
0867     return e820_end_pfn(MAX_ARCH_PFN, E820_TYPE_RAM);
0868 }
0869 
0870 unsigned long __init e820__end_of_low_ram_pfn(void)
0871 {
0872     return e820_end_pfn(1UL << (32 - PAGE_SHIFT), E820_TYPE_RAM);
0873 }
0874 
0875 static void __init early_panic(char *msg)
0876 {
0877     early_printk(msg);
0878     panic(msg);
0879 }
0880 
0881 static int userdef __initdata;
0882 
0883 /* The "mem=nopentium" boot option disables 4MB page tables on 32-bit kernels: */
0884 static int __init parse_memopt(char *p)
0885 {
0886     u64 mem_size;
0887 
0888     if (!p)
0889         return -EINVAL;
0890 
0891     if (!strcmp(p, "nopentium")) {
0892 #ifdef CONFIG_X86_32
0893         setup_clear_cpu_cap(X86_FEATURE_PSE);
0894         return 0;
0895 #else
0896         pr_warn("mem=nopentium ignored! (only supported on x86_32)\n");
0897         return -EINVAL;
0898 #endif
0899     }
0900 
0901     userdef = 1;
0902     mem_size = memparse(p, &p);
0903 
0904     /* Don't remove all memory when getting "mem={invalid}" parameter: */
0905     if (mem_size == 0)
0906         return -EINVAL;
0907 
0908     e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1);
0909 
0910 #ifdef CONFIG_MEMORY_HOTPLUG
0911     max_mem_size = mem_size;
0912 #endif
0913 
0914     return 0;
0915 }
0916 early_param("mem", parse_memopt);
0917 
0918 static int __init parse_memmap_one(char *p)
0919 {
0920     char *oldp;
0921     u64 start_at, mem_size;
0922 
0923     if (!p)
0924         return -EINVAL;
0925 
0926     if (!strncmp(p, "exactmap", 8)) {
0927         e820_table->nr_entries = 0;
0928         userdef = 1;
0929         return 0;
0930     }
0931 
0932     oldp = p;
0933     mem_size = memparse(p, &p);
0934     if (p == oldp)
0935         return -EINVAL;
0936 
0937     userdef = 1;
0938     if (*p == '@') {
0939         start_at = memparse(p+1, &p);
0940         e820__range_add(start_at, mem_size, E820_TYPE_RAM);
0941     } else if (*p == '#') {
0942         start_at = memparse(p+1, &p);
0943         e820__range_add(start_at, mem_size, E820_TYPE_ACPI);
0944     } else if (*p == '$') {
0945         start_at = memparse(p+1, &p);
0946         e820__range_add(start_at, mem_size, E820_TYPE_RESERVED);
0947     } else if (*p == '!') {
0948         start_at = memparse(p+1, &p);
0949         e820__range_add(start_at, mem_size, E820_TYPE_PRAM);
0950     } else if (*p == '%') {
0951         enum e820_type from = 0, to = 0;
0952 
0953         start_at = memparse(p + 1, &p);
0954         if (*p == '-')
0955             from = simple_strtoull(p + 1, &p, 0);
0956         if (*p == '+')
0957             to = simple_strtoull(p + 1, &p, 0);
0958         if (*p != '\0')
0959             return -EINVAL;
0960         if (from && to)
0961             e820__range_update(start_at, mem_size, from, to);
0962         else if (to)
0963             e820__range_add(start_at, mem_size, to);
0964         else if (from)
0965             e820__range_remove(start_at, mem_size, from, 1);
0966         else
0967             e820__range_remove(start_at, mem_size, 0, 0);
0968     } else {
0969         e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1);
0970     }
0971 
0972     return *p == '\0' ? 0 : -EINVAL;
0973 }
0974 
0975 static int __init parse_memmap_opt(char *str)
0976 {
0977     while (str) {
0978         char *k = strchr(str, ',');
0979 
0980         if (k)
0981             *k++ = 0;
0982 
0983         parse_memmap_one(str);
0984         str = k;
0985     }
0986 
0987     return 0;
0988 }
0989 early_param("memmap", parse_memmap_opt);
0990 
0991 /*
0992  * Reserve all entries from the bootloader's extensible data nodes list,
0993  * because if present we are going to use it later on to fetch e820
0994  * entries from it:
0995  */
0996 void __init e820__reserve_setup_data(void)
0997 {
0998     struct setup_indirect *indirect;
0999     struct setup_data *data;
1000     u64 pa_data, pa_next;
1001     u32 len;
1002 
1003     pa_data = boot_params.hdr.setup_data;
1004     if (!pa_data)
1005         return;
1006 
1007     while (pa_data) {
1008         data = early_memremap(pa_data, sizeof(*data));
1009         if (!data) {
1010             pr_warn("e820: failed to memremap setup_data entry\n");
1011             return;
1012         }
1013 
1014         len = sizeof(*data);
1015         pa_next = data->next;
1016 
1017         e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
1018 
1019         /*
1020          * SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
1021          * to be reserved.
1022          */
1023         if (data->type != SETUP_EFI && data->type != SETUP_IMA)
1024             e820__range_update_kexec(pa_data,
1025                          sizeof(*data) + data->len,
1026                          E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
1027 
1028         if (data->type == SETUP_INDIRECT) {
1029             len += data->len;
1030             early_memunmap(data, sizeof(*data));
1031             data = early_memremap(pa_data, len);
1032             if (!data) {
1033                 pr_warn("e820: failed to memremap indirect setup_data\n");
1034                 return;
1035             }
1036 
1037             indirect = (struct setup_indirect *)data->data;
1038 
1039             if (indirect->type != SETUP_INDIRECT) {
1040                 e820__range_update(indirect->addr, indirect->len,
1041                            E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
1042                 e820__range_update_kexec(indirect->addr, indirect->len,
1043                              E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
1044             }
1045         }
1046 
1047         pa_data = pa_next;
1048         early_memunmap(data, len);
1049     }
1050 
1051     e820__update_table(e820_table);
1052     e820__update_table(e820_table_kexec);
1053 
1054     pr_info("extended physical RAM map:\n");
1055     e820__print_table("reserve setup_data");
1056 }
1057 
1058 /*
1059  * Called after parse_early_param(), after early parameters (such as mem=)
1060  * have been processed, in which case we already have an E820 table filled in
1061  * via the parameter callback function(s), but it's not sorted and printed yet:
1062  */
1063 void __init e820__finish_early_params(void)
1064 {
1065     if (userdef) {
1066         if (e820__update_table(e820_table) < 0)
1067             early_panic("Invalid user supplied memory map");
1068 
1069         pr_info("user-defined physical RAM map:\n");
1070         e820__print_table("user");
1071     }
1072 }
1073 
1074 static const char *__init e820_type_to_string(struct e820_entry *entry)
1075 {
1076     switch (entry->type) {
1077     case E820_TYPE_RESERVED_KERN:   /* Fall-through: */
1078     case E820_TYPE_RAM:     return "System RAM";
1079     case E820_TYPE_ACPI:        return "ACPI Tables";
1080     case E820_TYPE_NVS:     return "ACPI Non-volatile Storage";
1081     case E820_TYPE_UNUSABLE:    return "Unusable memory";
1082     case E820_TYPE_PRAM:        return "Persistent Memory (legacy)";
1083     case E820_TYPE_PMEM:        return "Persistent Memory";
1084     case E820_TYPE_RESERVED:    return "Reserved";
1085     case E820_TYPE_SOFT_RESERVED:   return "Soft Reserved";
1086     default:            return "Unknown E820 type";
1087     }
1088 }
1089 
1090 static unsigned long __init e820_type_to_iomem_type(struct e820_entry *entry)
1091 {
1092     switch (entry->type) {
1093     case E820_TYPE_RESERVED_KERN:   /* Fall-through: */
1094     case E820_TYPE_RAM:     return IORESOURCE_SYSTEM_RAM;
1095     case E820_TYPE_ACPI:        /* Fall-through: */
1096     case E820_TYPE_NVS:     /* Fall-through: */
1097     case E820_TYPE_UNUSABLE:    /* Fall-through: */
1098     case E820_TYPE_PRAM:        /* Fall-through: */
1099     case E820_TYPE_PMEM:        /* Fall-through: */
1100     case E820_TYPE_RESERVED:    /* Fall-through: */
1101     case E820_TYPE_SOFT_RESERVED:   /* Fall-through: */
1102     default:            return IORESOURCE_MEM;
1103     }
1104 }
1105 
1106 static unsigned long __init e820_type_to_iores_desc(struct e820_entry *entry)
1107 {
1108     switch (entry->type) {
1109     case E820_TYPE_ACPI:        return IORES_DESC_ACPI_TABLES;
1110     case E820_TYPE_NVS:     return IORES_DESC_ACPI_NV_STORAGE;
1111     case E820_TYPE_PMEM:        return IORES_DESC_PERSISTENT_MEMORY;
1112     case E820_TYPE_PRAM:        return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
1113     case E820_TYPE_RESERVED:    return IORES_DESC_RESERVED;
1114     case E820_TYPE_SOFT_RESERVED:   return IORES_DESC_SOFT_RESERVED;
1115     case E820_TYPE_RESERVED_KERN:   /* Fall-through: */
1116     case E820_TYPE_RAM:     /* Fall-through: */
1117     case E820_TYPE_UNUSABLE:    /* Fall-through: */
1118     default:            return IORES_DESC_NONE;
1119     }
1120 }
1121 
1122 static bool __init do_mark_busy(enum e820_type type, struct resource *res)
1123 {
1124     /* this is the legacy bios/dos rom-shadow + mmio region */
1125     if (res->start < (1ULL<<20))
1126         return true;
1127 
1128     /*
1129      * Treat persistent memory and other special memory ranges like
1130      * device memory, i.e. reserve it for exclusive use of a driver
1131      */
1132     switch (type) {
1133     case E820_TYPE_RESERVED:
1134     case E820_TYPE_SOFT_RESERVED:
1135     case E820_TYPE_PRAM:
1136     case E820_TYPE_PMEM:
1137         return false;
1138     case E820_TYPE_RESERVED_KERN:
1139     case E820_TYPE_RAM:
1140     case E820_TYPE_ACPI:
1141     case E820_TYPE_NVS:
1142     case E820_TYPE_UNUSABLE:
1143     default:
1144         return true;
1145     }
1146 }
1147 
1148 /*
1149  * Mark E820 reserved areas as busy for the resource manager:
1150  */
1151 
1152 static struct resource __initdata *e820_res;
1153 
1154 void __init e820__reserve_resources(void)
1155 {
1156     int i;
1157     struct resource *res;
1158     u64 end;
1159 
1160     res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
1161                  SMP_CACHE_BYTES);
1162     if (!res)
1163         panic("%s: Failed to allocate %zu bytes\n", __func__,
1164               sizeof(*res) * e820_table->nr_entries);
1165     e820_res = res;
1166 
1167     for (i = 0; i < e820_table->nr_entries; i++) {
1168         struct e820_entry *entry = e820_table->entries + i;
1169 
1170         end = entry->addr + entry->size - 1;
1171         if (end != (resource_size_t)end) {
1172             res++;
1173             continue;
1174         }
1175         res->start = entry->addr;
1176         res->end   = end;
1177         res->name  = e820_type_to_string(entry);
1178         res->flags = e820_type_to_iomem_type(entry);
1179         res->desc  = e820_type_to_iores_desc(entry);
1180 
1181         /*
1182          * Don't register the region that could be conflicted with
1183          * PCI device BAR resources and insert them later in
1184          * pcibios_resource_survey():
1185          */
1186         if (do_mark_busy(entry->type, res)) {
1187             res->flags |= IORESOURCE_BUSY;
1188             insert_resource(&iomem_resource, res);
1189         }
1190         res++;
1191     }
1192 
1193     /* Expose the bootloader-provided memory layout to the sysfs. */
1194     for (i = 0; i < e820_table_firmware->nr_entries; i++) {
1195         struct e820_entry *entry = e820_table_firmware->entries + i;
1196 
1197         firmware_map_add_early(entry->addr, entry->addr + entry->size, e820_type_to_string(entry));
1198     }
1199 }
1200 
1201 /*
1202  * How much should we pad the end of RAM, depending on where it is?
1203  */
1204 static unsigned long __init ram_alignment(resource_size_t pos)
1205 {
1206     unsigned long mb = pos >> 20;
1207 
1208     /* To 64kB in the first megabyte */
1209     if (!mb)
1210         return 64*1024;
1211 
1212     /* To 1MB in the first 16MB */
1213     if (mb < 16)
1214         return 1024*1024;
1215 
1216     /* To 64MB for anything above that */
1217     return 64*1024*1024;
1218 }
1219 
1220 #define MAX_RESOURCE_SIZE ((resource_size_t)-1)
1221 
1222 void __init e820__reserve_resources_late(void)
1223 {
1224     int i;
1225     struct resource *res;
1226 
1227     res = e820_res;
1228     for (i = 0; i < e820_table->nr_entries; i++) {
1229         if (!res->parent && res->end)
1230             insert_resource_expand_to_fit(&iomem_resource, res);
1231         res++;
1232     }
1233 
1234     /*
1235      * Try to bump up RAM regions to reasonable boundaries, to
1236      * avoid stolen RAM:
1237      */
1238     for (i = 0; i < e820_table->nr_entries; i++) {
1239         struct e820_entry *entry = &e820_table->entries[i];
1240         u64 start, end;
1241 
1242         if (entry->type != E820_TYPE_RAM)
1243             continue;
1244 
1245         start = entry->addr + entry->size;
1246         end = round_up(start, ram_alignment(start)) - 1;
1247         if (end > MAX_RESOURCE_SIZE)
1248             end = MAX_RESOURCE_SIZE;
1249         if (start >= end)
1250             continue;
1251 
1252         printk(KERN_DEBUG "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n", start, end);
1253         reserve_region_with_split(&iomem_resource, start, end, "RAM buffer");
1254     }
1255 }
1256 
1257 /*
1258  * Pass the firmware (bootloader) E820 map to the kernel and process it:
1259  */
1260 char *__init e820__memory_setup_default(void)
1261 {
1262     char *who = "BIOS-e820";
1263 
1264     /*
1265      * Try to copy the BIOS-supplied E820-map.
1266      *
1267      * Otherwise fake a memory map; one section from 0k->640k,
1268      * the next section from 1mb->appropriate_mem_k
1269      */
1270     if (append_e820_table(boot_params.e820_table, boot_params.e820_entries) < 0) {
1271         u64 mem_size;
1272 
1273         /* Compare results from other methods and take the one that gives more RAM: */
1274         if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) {
1275             mem_size = boot_params.screen_info.ext_mem_k;
1276             who = "BIOS-88";
1277         } else {
1278             mem_size = boot_params.alt_mem_k;
1279             who = "BIOS-e801";
1280         }
1281 
1282         e820_table->nr_entries = 0;
1283         e820__range_add(0, LOWMEMSIZE(), E820_TYPE_RAM);
1284         e820__range_add(HIGH_MEMORY, mem_size << 10, E820_TYPE_RAM);
1285     }
1286 
1287     /* We just appended a lot of ranges, sanitize the table: */
1288     e820__update_table(e820_table);
1289 
1290     return who;
1291 }
1292 
1293 /*
1294  * Calls e820__memory_setup_default() in essence to pick up the firmware/bootloader
1295  * E820 map - with an optional platform quirk available for virtual platforms
1296  * to override this method of boot environment processing:
1297  */
1298 void __init e820__memory_setup(void)
1299 {
1300     char *who;
1301 
1302     /* This is a firmware interface ABI - make sure we don't break it: */
1303     BUILD_BUG_ON(sizeof(struct boot_e820_entry) != 20);
1304 
1305     who = x86_init.resources.memory_setup();
1306 
1307     memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
1308     memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
1309 
1310     pr_info("BIOS-provided physical RAM map:\n");
1311     e820__print_table(who);
1312 }
1313 
1314 void __init e820__memblock_setup(void)
1315 {
1316     int i;
1317     u64 end;
1318 
1319     /*
1320      * The bootstrap memblock region count maximum is 128 entries
1321      * (INIT_MEMBLOCK_REGIONS), but EFI might pass us more E820 entries
1322      * than that - so allow memblock resizing.
1323      *
1324      * This is safe, because this call happens pretty late during x86 setup,
1325      * so we know about reserved memory regions already. (This is important
1326      * so that memblock resizing does no stomp over reserved areas.)
1327      */
1328     memblock_allow_resize();
1329 
1330     for (i = 0; i < e820_table->nr_entries; i++) {
1331         struct e820_entry *entry = &e820_table->entries[i];
1332 
1333         end = entry->addr + entry->size;
1334         if (end != (resource_size_t)end)
1335             continue;
1336 
1337         if (entry->type == E820_TYPE_SOFT_RESERVED)
1338             memblock_reserve(entry->addr, entry->size);
1339 
1340         if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
1341             continue;
1342 
1343         memblock_add(entry->addr, entry->size);
1344     }
1345 
1346     /* Throw away partial pages: */
1347     memblock_trim_memory(PAGE_SIZE);
1348 
1349     memblock_dump_all();
1350 }