Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  Copyright (C) 1995  Linus Torvalds
0004  *
0005  * This file contains the setup_arch() code, which handles the architecture-dependent
0006  * parts of early kernel initialization.
0007  */
0008 #include <linux/acpi.h>
0009 #include <linux/console.h>
0010 #include <linux/crash_dump.h>
0011 #include <linux/dma-map-ops.h>
0012 #include <linux/dmi.h>
0013 #include <linux/efi.h>
0014 #include <linux/ima.h>
0015 #include <linux/init_ohci1394_dma.h>
0016 #include <linux/initrd.h>
0017 #include <linux/iscsi_ibft.h>
0018 #include <linux/memblock.h>
0019 #include <linux/panic_notifier.h>
0020 #include <linux/pci.h>
0021 #include <linux/root_dev.h>
0022 #include <linux/hugetlb.h>
0023 #include <linux/tboot.h>
0024 #include <linux/usb/xhci-dbgp.h>
0025 #include <linux/static_call.h>
0026 #include <linux/swiotlb.h>
0027 #include <linux/random.h>
0028 
0029 #include <uapi/linux/mount.h>
0030 
0031 #include <xen/xen.h>
0032 
0033 #include <asm/apic.h>
0034 #include <asm/numa.h>
0035 #include <asm/bios_ebda.h>
0036 #include <asm/bugs.h>
0037 #include <asm/cpu.h>
0038 #include <asm/efi.h>
0039 #include <asm/gart.h>
0040 #include <asm/hypervisor.h>
0041 #include <asm/io_apic.h>
0042 #include <asm/kasan.h>
0043 #include <asm/kaslr.h>
0044 #include <asm/mce.h>
0045 #include <asm/memtype.h>
0046 #include <asm/mtrr.h>
0047 #include <asm/realmode.h>
0048 #include <asm/olpc_ofw.h>
0049 #include <asm/pci-direct.h>
0050 #include <asm/prom.h>
0051 #include <asm/proto.h>
0052 #include <asm/thermal.h>
0053 #include <asm/unwind.h>
0054 #include <asm/vsyscall.h>
0055 #include <linux/vmalloc.h>
0056 
0057 /*
0058  * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
0059  * max_pfn_mapped:     highest directly mapped pfn > 4 GB
0060  *
0061  * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are
0062  * represented by pfn_mapped[].
0063  */
0064 unsigned long max_low_pfn_mapped;
0065 unsigned long max_pfn_mapped;
0066 
0067 #ifdef CONFIG_DMI
0068 RESERVE_BRK(dmi_alloc, 65536);
0069 #endif
0070 
0071 
0072 unsigned long _brk_start = (unsigned long)__brk_base;
0073 unsigned long _brk_end   = (unsigned long)__brk_base;
0074 
0075 struct boot_params boot_params;
0076 
0077 /*
0078  * These are the four main kernel memory regions, we put them into
0079  * the resource tree so that kdump tools and other debugging tools
0080  * recover it:
0081  */
0082 
0083 static struct resource rodata_resource = {
0084     .name   = "Kernel rodata",
0085     .start  = 0,
0086     .end    = 0,
0087     .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
0088 };
0089 
0090 static struct resource data_resource = {
0091     .name   = "Kernel data",
0092     .start  = 0,
0093     .end    = 0,
0094     .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
0095 };
0096 
0097 static struct resource code_resource = {
0098     .name   = "Kernel code",
0099     .start  = 0,
0100     .end    = 0,
0101     .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
0102 };
0103 
0104 static struct resource bss_resource = {
0105     .name   = "Kernel bss",
0106     .start  = 0,
0107     .end    = 0,
0108     .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
0109 };
0110 
0111 
0112 #ifdef CONFIG_X86_32
0113 /* CPU data as detected by the assembly code in head_32.S */
0114 struct cpuinfo_x86 new_cpu_data;
0115 
0116 /* Common CPU data for all CPUs */
0117 struct cpuinfo_x86 boot_cpu_data __read_mostly;
0118 EXPORT_SYMBOL(boot_cpu_data);
0119 
0120 unsigned int def_to_bigsmp;
0121 
0122 struct apm_info apm_info;
0123 EXPORT_SYMBOL(apm_info);
0124 
0125 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
0126     defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
0127 struct ist_info ist_info;
0128 EXPORT_SYMBOL(ist_info);
0129 #else
0130 struct ist_info ist_info;
0131 #endif
0132 
0133 #else
0134 struct cpuinfo_x86 boot_cpu_data __read_mostly;
0135 EXPORT_SYMBOL(boot_cpu_data);
0136 #endif
0137 
0138 
0139 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
0140 __visible unsigned long mmu_cr4_features __ro_after_init;
0141 #else
0142 __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
0143 #endif
0144 
0145 #ifdef CONFIG_IMA
0146 static phys_addr_t ima_kexec_buffer_phys;
0147 static size_t ima_kexec_buffer_size;
0148 #endif
0149 
0150 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
0151 int bootloader_type, bootloader_version;
0152 
0153 /*
0154  * Setup options
0155  */
0156 struct screen_info screen_info;
0157 EXPORT_SYMBOL(screen_info);
0158 struct edid_info edid_info;
0159 EXPORT_SYMBOL_GPL(edid_info);
0160 
0161 extern int root_mountflags;
0162 
0163 unsigned long saved_video_mode;
0164 
0165 #define RAMDISK_IMAGE_START_MASK    0x07FF
0166 #define RAMDISK_PROMPT_FLAG     0x8000
0167 #define RAMDISK_LOAD_FLAG       0x4000
0168 
0169 static char __initdata command_line[COMMAND_LINE_SIZE];
0170 #ifdef CONFIG_CMDLINE_BOOL
0171 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
0172 #endif
0173 
0174 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
0175 struct edd edd;
0176 #ifdef CONFIG_EDD_MODULE
0177 EXPORT_SYMBOL(edd);
0178 #endif
0179 /**
0180  * copy_edd() - Copy the BIOS EDD information
0181  *              from boot_params into a safe place.
0182  *
0183  */
0184 static inline void __init copy_edd(void)
0185 {
0186      memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
0187         sizeof(edd.mbr_signature));
0188      memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
0189      edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
0190      edd.edd_info_nr = boot_params.eddbuf_entries;
0191 }
0192 #else
0193 static inline void __init copy_edd(void)
0194 {
0195 }
0196 #endif
0197 
0198 void * __init extend_brk(size_t size, size_t align)
0199 {
0200     size_t mask = align - 1;
0201     void *ret;
0202 
0203     BUG_ON(_brk_start == 0);
0204     BUG_ON(align & mask);
0205 
0206     _brk_end = (_brk_end + mask) & ~mask;
0207     BUG_ON((char *)(_brk_end + size) > __brk_limit);
0208 
0209     ret = (void *)_brk_end;
0210     _brk_end += size;
0211 
0212     memset(ret, 0, size);
0213 
0214     return ret;
0215 }
0216 
0217 #ifdef CONFIG_X86_32
0218 static void __init cleanup_highmap(void)
0219 {
0220 }
0221 #endif
0222 
0223 static void __init reserve_brk(void)
0224 {
0225     if (_brk_end > _brk_start)
0226         memblock_reserve(__pa_symbol(_brk_start),
0227                  _brk_end - _brk_start);
0228 
0229     /* Mark brk area as locked down and no longer taking any
0230        new allocations */
0231     _brk_start = 0;
0232 }
0233 
0234 u64 relocated_ramdisk;
0235 
0236 #ifdef CONFIG_BLK_DEV_INITRD
0237 
0238 static u64 __init get_ramdisk_image(void)
0239 {
0240     u64 ramdisk_image = boot_params.hdr.ramdisk_image;
0241 
0242     ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
0243 
0244     if (ramdisk_image == 0)
0245         ramdisk_image = phys_initrd_start;
0246 
0247     return ramdisk_image;
0248 }
0249 static u64 __init get_ramdisk_size(void)
0250 {
0251     u64 ramdisk_size = boot_params.hdr.ramdisk_size;
0252 
0253     ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
0254 
0255     if (ramdisk_size == 0)
0256         ramdisk_size = phys_initrd_size;
0257 
0258     return ramdisk_size;
0259 }
0260 
0261 static void __init relocate_initrd(void)
0262 {
0263     /* Assume only end is not page aligned */
0264     u64 ramdisk_image = get_ramdisk_image();
0265     u64 ramdisk_size  = get_ramdisk_size();
0266     u64 area_size     = PAGE_ALIGN(ramdisk_size);
0267 
0268     /* We need to move the initrd down into directly mapped mem */
0269     relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
0270                               PFN_PHYS(max_pfn_mapped));
0271     if (!relocated_ramdisk)
0272         panic("Cannot find place for new RAMDISK of size %lld\n",
0273               ramdisk_size);
0274 
0275     initrd_start = relocated_ramdisk + PAGE_OFFSET;
0276     initrd_end   = initrd_start + ramdisk_size;
0277     printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
0278            relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
0279 
0280     copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
0281 
0282     printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
0283         " [mem %#010llx-%#010llx]\n",
0284         ramdisk_image, ramdisk_image + ramdisk_size - 1,
0285         relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
0286 }
0287 
0288 static void __init early_reserve_initrd(void)
0289 {
0290     /* Assume only end is not page aligned */
0291     u64 ramdisk_image = get_ramdisk_image();
0292     u64 ramdisk_size  = get_ramdisk_size();
0293     u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
0294 
0295     if (!boot_params.hdr.type_of_loader ||
0296         !ramdisk_image || !ramdisk_size)
0297         return;     /* No initrd provided by bootloader */
0298 
0299     memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
0300 }
0301 
0302 static void __init reserve_initrd(void)
0303 {
0304     /* Assume only end is not page aligned */
0305     u64 ramdisk_image = get_ramdisk_image();
0306     u64 ramdisk_size  = get_ramdisk_size();
0307     u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
0308 
0309     if (!boot_params.hdr.type_of_loader ||
0310         !ramdisk_image || !ramdisk_size)
0311         return;     /* No initrd provided by bootloader */
0312 
0313     initrd_start = 0;
0314 
0315     printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
0316             ramdisk_end - 1);
0317 
0318     if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
0319                 PFN_DOWN(ramdisk_end))) {
0320         /* All are mapped, easy case */
0321         initrd_start = ramdisk_image + PAGE_OFFSET;
0322         initrd_end = initrd_start + ramdisk_size;
0323         return;
0324     }
0325 
0326     relocate_initrd();
0327 
0328     memblock_phys_free(ramdisk_image, ramdisk_end - ramdisk_image);
0329 }
0330 
0331 #else
0332 static void __init early_reserve_initrd(void)
0333 {
0334 }
0335 static void __init reserve_initrd(void)
0336 {
0337 }
0338 #endif /* CONFIG_BLK_DEV_INITRD */
0339 
0340 static void __init add_early_ima_buffer(u64 phys_addr)
0341 {
0342 #ifdef CONFIG_IMA
0343     struct ima_setup_data *data;
0344 
0345     data = early_memremap(phys_addr + sizeof(struct setup_data), sizeof(*data));
0346     if (!data) {
0347         pr_warn("setup: failed to memremap ima_setup_data entry\n");
0348         return;
0349     }
0350 
0351     if (data->size) {
0352         memblock_reserve(data->addr, data->size);
0353         ima_kexec_buffer_phys = data->addr;
0354         ima_kexec_buffer_size = data->size;
0355     }
0356 
0357     early_memunmap(data, sizeof(*data));
0358 #else
0359     pr_warn("Passed IMA kexec data, but CONFIG_IMA not set. Ignoring.\n");
0360 #endif
0361 }
0362 
0363 #if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
0364 int __init ima_free_kexec_buffer(void)
0365 {
0366     int rc;
0367 
0368     if (!ima_kexec_buffer_size)
0369         return -ENOENT;
0370 
0371     rc = memblock_phys_free(ima_kexec_buffer_phys,
0372                 ima_kexec_buffer_size);
0373     if (rc)
0374         return rc;
0375 
0376     ima_kexec_buffer_phys = 0;
0377     ima_kexec_buffer_size = 0;
0378 
0379     return 0;
0380 }
0381 
0382 int __init ima_get_kexec_buffer(void **addr, size_t *size)
0383 {
0384     if (!ima_kexec_buffer_size)
0385         return -ENOENT;
0386 
0387     *addr = __va(ima_kexec_buffer_phys);
0388     *size = ima_kexec_buffer_size;
0389 
0390     return 0;
0391 }
0392 #endif
0393 
0394 static void __init parse_setup_data(void)
0395 {
0396     struct setup_data *data;
0397     u64 pa_data, pa_next;
0398 
0399     pa_data = boot_params.hdr.setup_data;
0400     while (pa_data) {
0401         u32 data_len, data_type;
0402 
0403         data = early_memremap(pa_data, sizeof(*data));
0404         data_len = data->len + sizeof(struct setup_data);
0405         data_type = data->type;
0406         pa_next = data->next;
0407         early_memunmap(data, sizeof(*data));
0408 
0409         switch (data_type) {
0410         case SETUP_E820_EXT:
0411             e820__memory_setup_extended(pa_data, data_len);
0412             break;
0413         case SETUP_DTB:
0414             add_dtb(pa_data);
0415             break;
0416         case SETUP_EFI:
0417             parse_efi_setup(pa_data, data_len);
0418             break;
0419         case SETUP_IMA:
0420             add_early_ima_buffer(pa_data);
0421             break;
0422         case SETUP_RNG_SEED:
0423             data = early_memremap(pa_data, data_len);
0424             add_bootloader_randomness(data->data, data->len);
0425             /* Zero seed for forward secrecy. */
0426             memzero_explicit(data->data, data->len);
0427             /* Zero length in case we find ourselves back here by accident. */
0428             memzero_explicit(&data->len, sizeof(data->len));
0429             early_memunmap(data, data_len);
0430             break;
0431         default:
0432             break;
0433         }
0434         pa_data = pa_next;
0435     }
0436 }
0437 
0438 static void __init memblock_x86_reserve_range_setup_data(void)
0439 {
0440     struct setup_indirect *indirect;
0441     struct setup_data *data;
0442     u64 pa_data, pa_next;
0443     u32 len;
0444 
0445     pa_data = boot_params.hdr.setup_data;
0446     while (pa_data) {
0447         data = early_memremap(pa_data, sizeof(*data));
0448         if (!data) {
0449             pr_warn("setup: failed to memremap setup_data entry\n");
0450             return;
0451         }
0452 
0453         len = sizeof(*data);
0454         pa_next = data->next;
0455 
0456         memblock_reserve(pa_data, sizeof(*data) + data->len);
0457 
0458         if (data->type == SETUP_INDIRECT) {
0459             len += data->len;
0460             early_memunmap(data, sizeof(*data));
0461             data = early_memremap(pa_data, len);
0462             if (!data) {
0463                 pr_warn("setup: failed to memremap indirect setup_data\n");
0464                 return;
0465             }
0466 
0467             indirect = (struct setup_indirect *)data->data;
0468 
0469             if (indirect->type != SETUP_INDIRECT)
0470                 memblock_reserve(indirect->addr, indirect->len);
0471         }
0472 
0473         pa_data = pa_next;
0474         early_memunmap(data, len);
0475     }
0476 }
0477 
0478 /*
0479  * --------- Crashkernel reservation ------------------------------
0480  */
0481 
0482 /* 16M alignment for crash kernel regions */
0483 #define CRASH_ALIGN     SZ_16M
0484 
0485 /*
0486  * Keep the crash kernel below this limit.
0487  *
0488  * Earlier 32-bits kernels would limit the kernel to the low 512 MB range
0489  * due to mapping restrictions.
0490  *
0491  * 64-bit kdump kernels need to be restricted to be under 64 TB, which is
0492  * the upper limit of system RAM in 4-level paging mode. Since the kdump
0493  * jump could be from 5-level paging to 4-level paging, the jump will fail if
0494  * the kernel is put above 64 TB, and during the 1st kernel bootup there's
0495  * no good way to detect the paging mode of the target kernel which will be
0496  * loaded for dumping.
0497  */
0498 #ifdef CONFIG_X86_32
0499 # define CRASH_ADDR_LOW_MAX SZ_512M
0500 # define CRASH_ADDR_HIGH_MAX    SZ_512M
0501 #else
0502 # define CRASH_ADDR_LOW_MAX SZ_4G
0503 # define CRASH_ADDR_HIGH_MAX    SZ_64T
0504 #endif
0505 
0506 static int __init reserve_crashkernel_low(void)
0507 {
0508 #ifdef CONFIG_X86_64
0509     unsigned long long base, low_base = 0, low_size = 0;
0510     unsigned long low_mem_limit;
0511     int ret;
0512 
0513     low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX);
0514 
0515     /* crashkernel=Y,low */
0516     ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base);
0517     if (ret) {
0518         /*
0519          * two parts from kernel/dma/swiotlb.c:
0520          * -swiotlb size: user-specified with swiotlb= or default.
0521          *
0522          * -swiotlb overflow buffer: now hardcoded to 32k. We round it
0523          * to 8M for other buffers that may need to stay low too. Also
0524          * make sure we allocate enough extra low memory so that we
0525          * don't run out of DMA buffers for 32-bit devices.
0526          */
0527         low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
0528     } else {
0529         /* passed with crashkernel=0,low ? */
0530         if (!low_size)
0531             return 0;
0532     }
0533 
0534     low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
0535     if (!low_base) {
0536         pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
0537                (unsigned long)(low_size >> 20));
0538         return -ENOMEM;
0539     }
0540 
0541     pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n",
0542         (unsigned long)(low_size >> 20),
0543         (unsigned long)(low_base >> 20),
0544         (unsigned long)(low_mem_limit >> 20));
0545 
0546     crashk_low_res.start = low_base;
0547     crashk_low_res.end   = low_base + low_size - 1;
0548     insert_resource(&iomem_resource, &crashk_low_res);
0549 #endif
0550     return 0;
0551 }
0552 
0553 static void __init reserve_crashkernel(void)
0554 {
0555     unsigned long long crash_size, crash_base, total_mem;
0556     bool high = false;
0557     int ret;
0558 
0559     if (!IS_ENABLED(CONFIG_KEXEC_CORE))
0560         return;
0561 
0562     total_mem = memblock_phys_mem_size();
0563 
0564     /* crashkernel=XM */
0565     ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
0566     if (ret != 0 || crash_size <= 0) {
0567         /* crashkernel=X,high */
0568         ret = parse_crashkernel_high(boot_command_line, total_mem,
0569                          &crash_size, &crash_base);
0570         if (ret != 0 || crash_size <= 0)
0571             return;
0572         high = true;
0573     }
0574 
0575     if (xen_pv_domain()) {
0576         pr_info("Ignoring crashkernel for a Xen PV domain\n");
0577         return;
0578     }
0579 
0580     /* 0 means: find the address automatically */
0581     if (!crash_base) {
0582         /*
0583          * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
0584          * crashkernel=x,high reserves memory over 4G, also allocates
0585          * 256M extra low memory for DMA buffers and swiotlb.
0586          * But the extra memory is not required for all machines.
0587          * So try low memory first and fall back to high memory
0588          * unless "crashkernel=size[KMG],high" is specified.
0589          */
0590         if (!high)
0591             crash_base = memblock_phys_alloc_range(crash_size,
0592                         CRASH_ALIGN, CRASH_ALIGN,
0593                         CRASH_ADDR_LOW_MAX);
0594         if (!crash_base)
0595             crash_base = memblock_phys_alloc_range(crash_size,
0596                         CRASH_ALIGN, CRASH_ALIGN,
0597                         CRASH_ADDR_HIGH_MAX);
0598         if (!crash_base) {
0599             pr_info("crashkernel reservation failed - No suitable area found.\n");
0600             return;
0601         }
0602     } else {
0603         unsigned long long start;
0604 
0605         start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base,
0606                           crash_base + crash_size);
0607         if (start != crash_base) {
0608             pr_info("crashkernel reservation failed - memory is in use.\n");
0609             return;
0610         }
0611     }
0612 
0613     if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
0614         memblock_phys_free(crash_base, crash_size);
0615         return;
0616     }
0617 
0618     pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
0619         (unsigned long)(crash_size >> 20),
0620         (unsigned long)(crash_base >> 20),
0621         (unsigned long)(total_mem >> 20));
0622 
0623     crashk_res.start = crash_base;
0624     crashk_res.end   = crash_base + crash_size - 1;
0625     insert_resource(&iomem_resource, &crashk_res);
0626 }
0627 
0628 static struct resource standard_io_resources[] = {
0629     { .name = "dma1", .start = 0x00, .end = 0x1f,
0630         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0631     { .name = "pic1", .start = 0x20, .end = 0x21,
0632         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0633     { .name = "timer0", .start = 0x40, .end = 0x43,
0634         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0635     { .name = "timer1", .start = 0x50, .end = 0x53,
0636         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0637     { .name = "keyboard", .start = 0x60, .end = 0x60,
0638         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0639     { .name = "keyboard", .start = 0x64, .end = 0x64,
0640         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0641     { .name = "dma page reg", .start = 0x80, .end = 0x8f,
0642         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0643     { .name = "pic2", .start = 0xa0, .end = 0xa1,
0644         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0645     { .name = "dma2", .start = 0xc0, .end = 0xdf,
0646         .flags = IORESOURCE_BUSY | IORESOURCE_IO },
0647     { .name = "fpu", .start = 0xf0, .end = 0xff,
0648         .flags = IORESOURCE_BUSY | IORESOURCE_IO }
0649 };
0650 
0651 void __init reserve_standard_io_resources(void)
0652 {
0653     int i;
0654 
0655     /* request I/O space for devices used on all i[345]86 PCs */
0656     for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
0657         request_resource(&ioport_resource, &standard_io_resources[i]);
0658 
0659 }
0660 
0661 static bool __init snb_gfx_workaround_needed(void)
0662 {
0663 #ifdef CONFIG_PCI
0664     int i;
0665     u16 vendor, devid;
0666     static const __initconst u16 snb_ids[] = {
0667         0x0102,
0668         0x0112,
0669         0x0122,
0670         0x0106,
0671         0x0116,
0672         0x0126,
0673         0x010a,
0674     };
0675 
0676     /* Assume no if something weird is going on with PCI */
0677     if (!early_pci_allowed())
0678         return false;
0679 
0680     vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
0681     if (vendor != 0x8086)
0682         return false;
0683 
0684     devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
0685     for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
0686         if (devid == snb_ids[i])
0687             return true;
0688 #endif
0689 
0690     return false;
0691 }
0692 
0693 /*
0694  * Sandy Bridge graphics has trouble with certain ranges, exclude
0695  * them from allocation.
0696  */
0697 static void __init trim_snb_memory(void)
0698 {
0699     static const __initconst unsigned long bad_pages[] = {
0700         0x20050000,
0701         0x20110000,
0702         0x20130000,
0703         0x20138000,
0704         0x40004000,
0705     };
0706     int i;
0707 
0708     if (!snb_gfx_workaround_needed())
0709         return;
0710 
0711     printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
0712 
0713     /*
0714      * SandyBridge integrated graphics devices have a bug that prevents
0715      * them from accessing certain memory ranges, namely anything below
0716      * 1M and in the pages listed in bad_pages[] above.
0717      *
0718      * To avoid these pages being ever accessed by SNB gfx devices reserve
0719      * bad_pages that have not already been reserved at boot time.
0720      * All memory below the 1 MB mark is anyway reserved later during
0721      * setup_arch(), so there is no need to reserve it here.
0722      */
0723 
0724     for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
0725         if (memblock_reserve(bad_pages[i], PAGE_SIZE))
0726             printk(KERN_WARNING "failed to reserve 0x%08lx\n",
0727                    bad_pages[i]);
0728     }
0729 }
0730 
0731 static void __init trim_bios_range(void)
0732 {
0733     /*
0734      * A special case is the first 4Kb of memory;
0735      * This is a BIOS owned area, not kernel ram, but generally
0736      * not listed as such in the E820 table.
0737      *
0738      * This typically reserves additional memory (64KiB by default)
0739      * since some BIOSes are known to corrupt low memory.  See the
0740      * Kconfig help text for X86_RESERVE_LOW.
0741      */
0742     e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
0743 
0744     /*
0745      * special case: Some BIOSes report the PC BIOS
0746      * area (640Kb -> 1Mb) as RAM even though it is not.
0747      * take them out.
0748      */
0749     e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
0750 
0751     e820__update_table(e820_table);
0752 }
0753 
0754 /* called before trim_bios_range() to spare extra sanitize */
0755 static void __init e820_add_kernel_range(void)
0756 {
0757     u64 start = __pa_symbol(_text);
0758     u64 size = __pa_symbol(_end) - start;
0759 
0760     /*
0761      * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and
0762      * attempt to fix it by adding the range. We may have a confused BIOS,
0763      * or the user may have used memmap=exactmap or memmap=xxM$yyM to
0764      * exclude kernel range. If we really are running on top non-RAM,
0765      * we will crash later anyways.
0766      */
0767     if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
0768         return;
0769 
0770     pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
0771     e820__range_remove(start, size, E820_TYPE_RAM, 0);
0772     e820__range_add(start, size, E820_TYPE_RAM);
0773 }
0774 
0775 static void __init early_reserve_memory(void)
0776 {
0777     /*
0778      * Reserve the memory occupied by the kernel between _text and
0779      * __end_of_kernel_reserve symbols. Any kernel sections after the
0780      * __end_of_kernel_reserve symbol must be explicitly reserved with a
0781      * separate memblock_reserve() or they will be discarded.
0782      */
0783     memblock_reserve(__pa_symbol(_text),
0784              (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
0785 
0786     /*
0787      * The first 4Kb of memory is a BIOS owned area, but generally it is
0788      * not listed as such in the E820 table.
0789      *
0790      * Reserve the first 64K of memory since some BIOSes are known to
0791      * corrupt low memory. After the real mode trampoline is allocated the
0792      * rest of the memory below 640k is reserved.
0793      *
0794      * In addition, make sure page 0 is always reserved because on
0795      * systems with L1TF its contents can be leaked to user processes.
0796      */
0797     memblock_reserve(0, SZ_64K);
0798 
0799     early_reserve_initrd();
0800 
0801     memblock_x86_reserve_range_setup_data();
0802 
0803     reserve_ibft_region();
0804     reserve_bios_regions();
0805     trim_snb_memory();
0806 }
0807 
0808 /*
0809  * Dump out kernel offset information on panic.
0810  */
0811 static int
0812 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
0813 {
0814     if (kaslr_enabled()) {
0815         pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
0816              kaslr_offset(),
0817              __START_KERNEL,
0818              __START_KERNEL_map,
0819              MODULES_VADDR-1);
0820     } else {
0821         pr_emerg("Kernel Offset: disabled\n");
0822     }
0823 
0824     return 0;
0825 }
0826 
0827 void x86_configure_nx(void)
0828 {
0829     if (boot_cpu_has(X86_FEATURE_NX))
0830         __supported_pte_mask |= _PAGE_NX;
0831     else
0832         __supported_pte_mask &= ~_PAGE_NX;
0833 }
0834 
0835 static void __init x86_report_nx(void)
0836 {
0837     if (!boot_cpu_has(X86_FEATURE_NX)) {
0838         printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
0839                "missing in CPU!\n");
0840     } else {
0841 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
0842         printk(KERN_INFO "NX (Execute Disable) protection: active\n");
0843 #else
0844         /* 32bit non-PAE kernel, NX cannot be used */
0845         printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
0846                "cannot be enabled: non-PAE kernel!\n");
0847 #endif
0848     }
0849 }
0850 
0851 /*
0852  * Determine if we were loaded by an EFI loader.  If so, then we have also been
0853  * passed the efi memmap, systab, etc., so we should use these data structures
0854  * for initialization.  Note, the efi init code path is determined by the
0855  * global efi_enabled. This allows the same kernel image to be used on existing
0856  * systems (with a traditional BIOS) as well as on EFI systems.
0857  */
0858 /*
0859  * setup_arch - architecture-specific boot-time initializations
0860  *
0861  * Note: On x86_64, fixmaps are ready for use even before this is called.
0862  */
0863 
0864 void __init setup_arch(char **cmdline_p)
0865 {
0866 #ifdef CONFIG_X86_32
0867     memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
0868 
0869     /*
0870      * copy kernel address range established so far and switch
0871      * to the proper swapper page table
0872      */
0873     clone_pgd_range(swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
0874             initial_page_table + KERNEL_PGD_BOUNDARY,
0875             KERNEL_PGD_PTRS);
0876 
0877     load_cr3(swapper_pg_dir);
0878     /*
0879      * Note: Quark X1000 CPUs advertise PGE incorrectly and require
0880      * a cr3 based tlb flush, so the following __flush_tlb_all()
0881      * will not flush anything because the CPU quirk which clears
0882      * X86_FEATURE_PGE has not been invoked yet. Though due to the
0883      * load_cr3() above the TLB has been flushed already. The
0884      * quirk is invoked before subsequent calls to __flush_tlb_all()
0885      * so proper operation is guaranteed.
0886      */
0887     __flush_tlb_all();
0888 #else
0889     printk(KERN_INFO "Command line: %s\n", boot_command_line);
0890     boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
0891 #endif
0892 
0893     /*
0894      * If we have OLPC OFW, we might end up relocating the fixmap due to
0895      * reserve_top(), so do this before touching the ioremap area.
0896      */
0897     olpc_ofw_detect();
0898 
0899     idt_setup_early_traps();
0900     early_cpu_init();
0901     jump_label_init();
0902     static_call_init();
0903     early_ioremap_init();
0904 
0905     setup_olpc_ofw_pgd();
0906 
0907     ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
0908     screen_info = boot_params.screen_info;
0909     edid_info = boot_params.edid_info;
0910 #ifdef CONFIG_X86_32
0911     apm_info.bios = boot_params.apm_bios_info;
0912     ist_info = boot_params.ist_info;
0913 #endif
0914     saved_video_mode = boot_params.hdr.vid_mode;
0915     bootloader_type = boot_params.hdr.type_of_loader;
0916     if ((bootloader_type >> 4) == 0xe) {
0917         bootloader_type &= 0xf;
0918         bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
0919     }
0920     bootloader_version  = bootloader_type & 0xf;
0921     bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
0922 
0923 #ifdef CONFIG_BLK_DEV_RAM
0924     rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
0925 #endif
0926 #ifdef CONFIG_EFI
0927     if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
0928              EFI32_LOADER_SIGNATURE, 4)) {
0929         set_bit(EFI_BOOT, &efi.flags);
0930     } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
0931              EFI64_LOADER_SIGNATURE, 4)) {
0932         set_bit(EFI_BOOT, &efi.flags);
0933         set_bit(EFI_64BIT, &efi.flags);
0934     }
0935 #endif
0936 
0937     x86_init.oem.arch_setup();
0938 
0939     /*
0940      * Do some memory reservations *before* memory is added to memblock, so
0941      * memblock allocations won't overwrite it.
0942      *
0943      * After this point, everything still needed from the boot loader or
0944      * firmware or kernel text should be early reserved or marked not RAM in
0945      * e820. All other memory is free game.
0946      *
0947      * This call needs to happen before e820__memory_setup() which calls the
0948      * xen_memory_setup() on Xen dom0 which relies on the fact that those
0949      * early reservations have happened already.
0950      */
0951     early_reserve_memory();
0952 
0953     iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
0954     e820__memory_setup();
0955     parse_setup_data();
0956 
0957     copy_edd();
0958 
0959     if (!boot_params.hdr.root_flags)
0960         root_mountflags &= ~MS_RDONLY;
0961     setup_initial_init_mm(_text, _etext, _edata, (void *)_brk_end);
0962 
0963     code_resource.start = __pa_symbol(_text);
0964     code_resource.end = __pa_symbol(_etext)-1;
0965     rodata_resource.start = __pa_symbol(__start_rodata);
0966     rodata_resource.end = __pa_symbol(__end_rodata)-1;
0967     data_resource.start = __pa_symbol(_sdata);
0968     data_resource.end = __pa_symbol(_edata)-1;
0969     bss_resource.start = __pa_symbol(__bss_start);
0970     bss_resource.end = __pa_symbol(__bss_stop)-1;
0971 
0972 #ifdef CONFIG_CMDLINE_BOOL
0973 #ifdef CONFIG_CMDLINE_OVERRIDE
0974     strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
0975 #else
0976     if (builtin_cmdline[0]) {
0977         /* append boot loader cmdline to builtin */
0978         strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
0979         strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
0980         strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
0981     }
0982 #endif
0983 #endif
0984 
0985     strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
0986     *cmdline_p = command_line;
0987 
0988     /*
0989      * x86_configure_nx() is called before parse_early_param() to detect
0990      * whether hardware doesn't support NX (so that the early EHCI debug
0991      * console setup can safely call set_fixmap()).
0992      */
0993     x86_configure_nx();
0994 
0995     parse_early_param();
0996 
0997     if (efi_enabled(EFI_BOOT))
0998         efi_memblock_x86_reserve_range();
0999 
1000 #ifdef CONFIG_MEMORY_HOTPLUG
1001     /*
1002      * Memory used by the kernel cannot be hot-removed because Linux
1003      * cannot migrate the kernel pages. When memory hotplug is
1004      * enabled, we should prevent memblock from allocating memory
1005      * for the kernel.
1006      *
1007      * ACPI SRAT records all hotpluggable memory ranges. But before
1008      * SRAT is parsed, we don't know about it.
1009      *
1010      * The kernel image is loaded into memory at very early time. We
1011      * cannot prevent this anyway. So on NUMA system, we set any
1012      * node the kernel resides in as un-hotpluggable.
1013      *
1014      * Since on modern servers, one node could have double-digit
1015      * gigabytes memory, we can assume the memory around the kernel
1016      * image is also un-hotpluggable. So before SRAT is parsed, just
1017      * allocate memory near the kernel image to try the best to keep
1018      * the kernel away from hotpluggable memory.
1019      */
1020     if (movable_node_is_enabled())
1021         memblock_set_bottom_up(true);
1022 #endif
1023 
1024     x86_report_nx();
1025 
1026     if (acpi_mps_check()) {
1027 #ifdef CONFIG_X86_LOCAL_APIC
1028         disable_apic = 1;
1029 #endif
1030         setup_clear_cpu_cap(X86_FEATURE_APIC);
1031     }
1032 
1033     e820__reserve_setup_data();
1034     e820__finish_early_params();
1035 
1036     if (efi_enabled(EFI_BOOT))
1037         efi_init();
1038 
1039     dmi_setup();
1040 
1041     /*
1042      * VMware detection requires dmi to be available, so this
1043      * needs to be done after dmi_setup(), for the boot CPU.
1044      */
1045     init_hypervisor_platform();
1046 
1047     tsc_early_init();
1048     x86_init.resources.probe_roms();
1049 
1050     /* after parse_early_param, so could debug it */
1051     insert_resource(&iomem_resource, &code_resource);
1052     insert_resource(&iomem_resource, &rodata_resource);
1053     insert_resource(&iomem_resource, &data_resource);
1054     insert_resource(&iomem_resource, &bss_resource);
1055 
1056     e820_add_kernel_range();
1057     trim_bios_range();
1058 #ifdef CONFIG_X86_32
1059     if (ppro_with_ram_bug()) {
1060         e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
1061                   E820_TYPE_RESERVED);
1062         e820__update_table(e820_table);
1063         printk(KERN_INFO "fixed physical RAM map:\n");
1064         e820__print_table("bad_ppro");
1065     }
1066 #else
1067     early_gart_iommu_check();
1068 #endif
1069 
1070     /*
1071      * partially used pages are not usable - thus
1072      * we are rounding upwards:
1073      */
1074     max_pfn = e820__end_of_ram_pfn();
1075 
1076     /* update e820 for memory not covered by WB MTRRs */
1077     if (IS_ENABLED(CONFIG_MTRR))
1078         mtrr_bp_init();
1079     else
1080         pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
1081 
1082     if (mtrr_trim_uncached_memory(max_pfn))
1083         max_pfn = e820__end_of_ram_pfn();
1084 
1085     max_possible_pfn = max_pfn;
1086 
1087     /*
1088      * This call is required when the CPU does not support PAT. If
1089      * mtrr_bp_init() invoked it already via pat_init() the call has no
1090      * effect.
1091      */
1092     init_cache_modes();
1093 
1094     /*
1095      * Define random base addresses for memory sections after max_pfn is
1096      * defined and before each memory section base is used.
1097      */
1098     kernel_randomize_memory();
1099 
1100 #ifdef CONFIG_X86_32
1101     /* max_low_pfn get updated here */
1102     find_low_pfn_range();
1103 #else
1104     check_x2apic();
1105 
1106     /* How many end-of-memory variables you have, grandma! */
1107     /* need this before calling reserve_initrd */
1108     if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1109         max_low_pfn = e820__end_of_low_ram_pfn();
1110     else
1111         max_low_pfn = max_pfn;
1112 
1113     high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1114 #endif
1115 
1116     /*
1117      * Find and reserve possible boot-time SMP configuration:
1118      */
1119     find_smp_config();
1120 
1121     early_alloc_pgt_buf();
1122 
1123     /*
1124      * Need to conclude brk, before e820__memblock_setup()
1125      * it could use memblock_find_in_range, could overlap with
1126      * brk area.
1127      */
1128     reserve_brk();
1129 
1130     cleanup_highmap();
1131 
1132     memblock_set_current_limit(ISA_END_ADDRESS);
1133     e820__memblock_setup();
1134 
1135     /*
1136      * Needs to run after memblock setup because it needs the physical
1137      * memory size.
1138      */
1139     sev_setup_arch();
1140 
1141     efi_fake_memmap();
1142     efi_find_mirror();
1143     efi_esrt_init();
1144     efi_mokvar_table_init();
1145 
1146     /*
1147      * The EFI specification says that boot service code won't be
1148      * called after ExitBootServices(). This is, in fact, a lie.
1149      */
1150     efi_reserve_boot_services();
1151 
1152     /* preallocate 4k for mptable mpc */
1153     e820__memblock_alloc_reserved_mpc_new();
1154 
1155 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1156     setup_bios_corruption_check();
1157 #endif
1158 
1159 #ifdef CONFIG_X86_32
1160     printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1161             (max_pfn_mapped<<PAGE_SHIFT) - 1);
1162 #endif
1163 
1164     /*
1165      * Find free memory for the real mode trampoline and place it there. If
1166      * there is not enough free memory under 1M, on EFI-enabled systems
1167      * there will be additional attempt to reclaim the memory for the real
1168      * mode trampoline at efi_free_boot_services().
1169      *
1170      * Unconditionally reserve the entire first 1M of RAM because BIOSes
1171      * are known to corrupt low memory and several hundred kilobytes are not
1172      * worth complex detection what memory gets clobbered. Windows does the
1173      * same thing for very similar reasons.
1174      *
1175      * Moreover, on machines with SandyBridge graphics or in setups that use
1176      * crashkernel the entire 1M is reserved anyway.
1177      */
1178     reserve_real_mode();
1179 
1180     init_mem_mapping();
1181 
1182     idt_setup_early_pf();
1183 
1184     /*
1185      * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1186      * with the current CR4 value.  This may not be necessary, but
1187      * auditing all the early-boot CR4 manipulation would be needed to
1188      * rule it out.
1189      *
1190      * Mask off features that don't work outside long mode (just
1191      * PCIDE for now).
1192      */
1193     mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
1194 
1195     memblock_set_current_limit(get_max_mapped());
1196 
1197     /*
1198      * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1199      */
1200 
1201 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1202     if (init_ohci1394_dma_early)
1203         init_ohci1394_dma_on_all_controllers();
1204 #endif
1205     /* Allocate bigger log buffer */
1206     setup_log_buf(1);
1207 
1208     if (efi_enabled(EFI_BOOT)) {
1209         switch (boot_params.secure_boot) {
1210         case efi_secureboot_mode_disabled:
1211             pr_info("Secure boot disabled\n");
1212             break;
1213         case efi_secureboot_mode_enabled:
1214             pr_info("Secure boot enabled\n");
1215             break;
1216         default:
1217             pr_info("Secure boot could not be determined\n");
1218             break;
1219         }
1220     }
1221 
1222     reserve_initrd();
1223 
1224     acpi_table_upgrade();
1225     /* Look for ACPI tables and reserve memory occupied by them. */
1226     acpi_boot_table_init();
1227 
1228     vsmp_init();
1229 
1230     io_delay_init();
1231 
1232     early_platform_quirks();
1233 
1234     early_acpi_boot_init();
1235 
1236     initmem_init();
1237     dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1238 
1239     if (boot_cpu_has(X86_FEATURE_GBPAGES))
1240         hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1241 
1242     /*
1243      * Reserve memory for crash kernel after SRAT is parsed so that it
1244      * won't consume hotpluggable memory.
1245      */
1246     reserve_crashkernel();
1247 
1248     memblock_find_dma_reserve();
1249 
1250     if (!early_xdbc_setup_hardware())
1251         early_xdbc_register_console();
1252 
1253     x86_init.paging.pagetable_init();
1254 
1255     kasan_init();
1256 
1257     /*
1258      * Sync back kernel address range.
1259      *
1260      * FIXME: Can the later sync in setup_cpu_entry_areas() replace
1261      * this call?
1262      */
1263     sync_initial_page_table();
1264 
1265     tboot_probe();
1266 
1267     map_vsyscall();
1268 
1269     generic_apic_probe();
1270 
1271     early_quirks();
1272 
1273     /*
1274      * Read APIC and some other early information from ACPI tables.
1275      */
1276     acpi_boot_init();
1277     x86_dtb_init();
1278 
1279     /*
1280      * get boot-time SMP configuration:
1281      */
1282     get_smp_config();
1283 
1284     /*
1285      * Systems w/o ACPI and mptables might not have it mapped the local
1286      * APIC yet, but prefill_possible_map() might need to access it.
1287      */
1288     init_apic_mappings();
1289 
1290     prefill_possible_map();
1291 
1292     init_cpu_to_node();
1293     init_gi_nodes();
1294 
1295     io_apic_init_mappings();
1296 
1297     x86_init.hyper.guest_late_init();
1298 
1299     e820__reserve_resources();
1300     e820__register_nosave_regions(max_pfn);
1301 
1302     x86_init.resources.reserve_resources();
1303 
1304     e820__setup_pci_gap();
1305 
1306 #ifdef CONFIG_VT
1307 #if defined(CONFIG_VGA_CONSOLE)
1308     if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1309         conswitchp = &vga_con;
1310 #endif
1311 #endif
1312     x86_init.oem.banner();
1313 
1314     x86_init.timers.wallclock_init();
1315 
1316     /*
1317      * This needs to run before setup_local_APIC() which soft-disables the
1318      * local APIC temporarily and that masks the thermal LVT interrupt,
1319      * leading to softlockups on machines which have configured SMI
1320      * interrupt delivery.
1321      */
1322     therm_lvt_init();
1323 
1324     mcheck_init();
1325 
1326     register_refined_jiffies(CLOCK_TICK_RATE);
1327 
1328 #ifdef CONFIG_EFI
1329     if (efi_enabled(EFI_BOOT))
1330         efi_apply_memmap_quirks();
1331 #endif
1332 
1333     unwind_init();
1334 }
1335 
1336 #ifdef CONFIG_X86_32
1337 
1338 static struct resource video_ram_resource = {
1339     .name   = "Video RAM area",
1340     .start  = 0xa0000,
1341     .end    = 0xbffff,
1342     .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
1343 };
1344 
1345 void __init i386_reserve_resources(void)
1346 {
1347     request_resource(&iomem_resource, &video_ram_resource);
1348     reserve_standard_io_resources();
1349 }
1350 
1351 #endif /* CONFIG_X86_32 */
1352 
1353 static struct notifier_block kernel_offset_notifier = {
1354     .notifier_call = dump_kernel_offset
1355 };
1356 
1357 static int __init register_kernel_offset_dumper(void)
1358 {
1359     atomic_notifier_chain_register(&panic_notifier_list,
1360                     &kernel_offset_notifier);
1361     return 0;
1362 }
1363 __initcall(register_kernel_offset_dumper);