Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  prepare to run common code
0004  *
0005  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
0006  */
0007 
0008 #define DISABLE_BRANCH_PROFILING
0009 
0010 /* cpu_feature_enabled() cannot be used this early */
0011 #define USE_EARLY_PGTABLE_L5
0012 
0013 #include <linux/init.h>
0014 #include <linux/linkage.h>
0015 #include <linux/types.h>
0016 #include <linux/kernel.h>
0017 #include <linux/string.h>
0018 #include <linux/percpu.h>
0019 #include <linux/start_kernel.h>
0020 #include <linux/io.h>
0021 #include <linux/memblock.h>
0022 #include <linux/cc_platform.h>
0023 #include <linux/pgtable.h>
0024 
0025 #include <asm/processor.h>
0026 #include <asm/proto.h>
0027 #include <asm/smp.h>
0028 #include <asm/setup.h>
0029 #include <asm/desc.h>
0030 #include <asm/tlbflush.h>
0031 #include <asm/sections.h>
0032 #include <asm/kdebug.h>
0033 #include <asm/e820/api.h>
0034 #include <asm/bios_ebda.h>
0035 #include <asm/bootparam_utils.h>
0036 #include <asm/microcode.h>
0037 #include <asm/kasan.h>
0038 #include <asm/fixmap.h>
0039 #include <asm/realmode.h>
0040 #include <asm/extable.h>
0041 #include <asm/trapnr.h>
0042 #include <asm/sev.h>
0043 #include <asm/tdx.h>
0044 
0045 /*
0046  * Manage page tables very early on.
0047  */
0048 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
0049 static unsigned int __initdata next_early_pgt;
0050 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
0051 
0052 #ifdef CONFIG_X86_5LEVEL
0053 unsigned int __pgtable_l5_enabled __ro_after_init;
0054 unsigned int pgdir_shift __ro_after_init = 39;
0055 EXPORT_SYMBOL(pgdir_shift);
0056 unsigned int ptrs_per_p4d __ro_after_init = 1;
0057 EXPORT_SYMBOL(ptrs_per_p4d);
0058 #endif
0059 
0060 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
0061 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
0062 EXPORT_SYMBOL(page_offset_base);
0063 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
0064 EXPORT_SYMBOL(vmalloc_base);
0065 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
0066 EXPORT_SYMBOL(vmemmap_base);
0067 #endif
0068 
0069 /*
0070  * GDT used on the boot CPU before switching to virtual addresses.
0071  */
0072 static struct desc_struct startup_gdt[GDT_ENTRIES] = {
0073     [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
0074     [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
0075     [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
0076 };
0077 
0078 /*
0079  * Address needs to be set at runtime because it references the startup_gdt
0080  * while the kernel still uses a direct mapping.
0081  */
0082 static struct desc_ptr startup_gdt_descr = {
0083     .size = sizeof(startup_gdt),
0084     .address = 0,
0085 };
0086 
0087 #define __head  __section(".head.text")
0088 
0089 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
0090 {
0091     return ptr - (void *)_text + (void *)physaddr;
0092 }
0093 
0094 static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
0095 {
0096     return fixup_pointer(ptr, physaddr);
0097 }
0098 
0099 #ifdef CONFIG_X86_5LEVEL
0100 static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
0101 {
0102     return fixup_pointer(ptr, physaddr);
0103 }
0104 
0105 static bool __head check_la57_support(unsigned long physaddr)
0106 {
0107     /*
0108      * 5-level paging is detected and enabled at kernel decompression
0109      * stage. Only check if it has been enabled there.
0110      */
0111     if (!(native_read_cr4() & X86_CR4_LA57))
0112         return false;
0113 
0114     *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
0115     *fixup_int(&pgdir_shift, physaddr) = 48;
0116     *fixup_int(&ptrs_per_p4d, physaddr) = 512;
0117     *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
0118     *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
0119     *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
0120 
0121     return true;
0122 }
0123 #else
0124 static bool __head check_la57_support(unsigned long physaddr)
0125 {
0126     return false;
0127 }
0128 #endif
0129 
0130 static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
0131 {
0132     unsigned long vaddr, vaddr_end;
0133     int i;
0134 
0135     /* Encrypt the kernel and related (if SME is active) */
0136     sme_encrypt_kernel(bp);
0137 
0138     /*
0139      * Clear the memory encryption mask from the .bss..decrypted section.
0140      * The bss section will be memset to zero later in the initialization so
0141      * there is no need to zero it after changing the memory encryption
0142      * attribute.
0143      */
0144     if (sme_get_me_mask()) {
0145         vaddr = (unsigned long)__start_bss_decrypted;
0146         vaddr_end = (unsigned long)__end_bss_decrypted;
0147 
0148         for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
0149             /*
0150              * On SNP, transition the page to shared in the RMP table so that
0151              * it is consistent with the page table attribute change.
0152              *
0153              * __start_bss_decrypted has a virtual address in the high range
0154              * mapping (kernel .text). PVALIDATE, by way of
0155              * early_snp_set_memory_shared(), requires a valid virtual
0156              * address but the kernel is currently running off of the identity
0157              * mapping so use __pa() to get a *currently* valid virtual address.
0158              */
0159             early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
0160 
0161             i = pmd_index(vaddr);
0162             pmd[i] -= sme_get_me_mask();
0163         }
0164     }
0165 
0166     /*
0167      * Return the SME encryption mask (if SME is active) to be used as a
0168      * modifier for the initial pgdir entry programmed into CR3.
0169      */
0170     return sme_get_me_mask();
0171 }
0172 
0173 /* Code in __startup_64() can be relocated during execution, but the compiler
0174  * doesn't have to generate PC-relative relocations when accessing globals from
0175  * that function. Clang actually does not generate them, which leads to
0176  * boot-time crashes. To work around this problem, every global pointer must
0177  * be adjusted using fixup_pointer().
0178  */
0179 unsigned long __head __startup_64(unsigned long physaddr,
0180                   struct boot_params *bp)
0181 {
0182     unsigned long load_delta, *p;
0183     unsigned long pgtable_flags;
0184     pgdval_t *pgd;
0185     p4dval_t *p4d;
0186     pudval_t *pud;
0187     pmdval_t *pmd, pmd_entry;
0188     pteval_t *mask_ptr;
0189     bool la57;
0190     int i;
0191     unsigned int *next_pgt_ptr;
0192 
0193     la57 = check_la57_support(physaddr);
0194 
0195     /* Is the address too large? */
0196     if (physaddr >> MAX_PHYSMEM_BITS)
0197         for (;;);
0198 
0199     /*
0200      * Compute the delta between the address I am compiled to run at
0201      * and the address I am actually running at.
0202      */
0203     load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
0204 
0205     /* Is the address not 2M aligned? */
0206     if (load_delta & ~PMD_PAGE_MASK)
0207         for (;;);
0208 
0209     /* Include the SME encryption mask in the fixup value */
0210     load_delta += sme_get_me_mask();
0211 
0212     /* Fixup the physical addresses in the page table */
0213 
0214     pgd = fixup_pointer(&early_top_pgt, physaddr);
0215     p = pgd + pgd_index(__START_KERNEL_map);
0216     if (la57)
0217         *p = (unsigned long)level4_kernel_pgt;
0218     else
0219         *p = (unsigned long)level3_kernel_pgt;
0220     *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
0221 
0222     if (la57) {
0223         p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
0224         p4d[511] += load_delta;
0225     }
0226 
0227     pud = fixup_pointer(&level3_kernel_pgt, physaddr);
0228     pud[510] += load_delta;
0229     pud[511] += load_delta;
0230 
0231     pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
0232     for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
0233         pmd[i] += load_delta;
0234 
0235     /*
0236      * Set up the identity mapping for the switchover.  These
0237      * entries should *NOT* have the global bit set!  This also
0238      * creates a bunch of nonsense entries but that is fine --
0239      * it avoids problems around wraparound.
0240      */
0241 
0242     next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
0243     pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
0244     pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
0245 
0246     pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
0247 
0248     if (la57) {
0249         p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
0250                     physaddr);
0251 
0252         i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
0253         pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
0254         pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
0255 
0256         i = physaddr >> P4D_SHIFT;
0257         p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
0258         p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
0259     } else {
0260         i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
0261         pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
0262         pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
0263     }
0264 
0265     i = physaddr >> PUD_SHIFT;
0266     pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
0267     pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
0268 
0269     pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
0270     /* Filter out unsupported __PAGE_KERNEL_* bits: */
0271     mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
0272     pmd_entry &= *mask_ptr;
0273     pmd_entry += sme_get_me_mask();
0274     pmd_entry +=  physaddr;
0275 
0276     for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
0277         int idx = i + (physaddr >> PMD_SHIFT);
0278 
0279         pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
0280     }
0281 
0282     /*
0283      * Fixup the kernel text+data virtual addresses. Note that
0284      * we might write invalid pmds, when the kernel is relocated
0285      * cleanup_highmap() fixes this up along with the mappings
0286      * beyond _end.
0287      *
0288      * Only the region occupied by the kernel image has so far
0289      * been checked against the table of usable memory regions
0290      * provided by the firmware, so invalidate pages outside that
0291      * region. A page table entry that maps to a reserved area of
0292      * memory would allow processor speculation into that area,
0293      * and on some hardware (particularly the UV platform) even
0294      * speculative access to some reserved areas is caught as an
0295      * error, causing the BIOS to halt the system.
0296      */
0297 
0298     pmd = fixup_pointer(level2_kernel_pgt, physaddr);
0299 
0300     /* invalidate pages before the kernel image */
0301     for (i = 0; i < pmd_index((unsigned long)_text); i++)
0302         pmd[i] &= ~_PAGE_PRESENT;
0303 
0304     /* fixup pages that are part of the kernel image */
0305     for (; i <= pmd_index((unsigned long)_end); i++)
0306         if (pmd[i] & _PAGE_PRESENT)
0307             pmd[i] += load_delta;
0308 
0309     /* invalidate pages after the kernel image */
0310     for (; i < PTRS_PER_PMD; i++)
0311         pmd[i] &= ~_PAGE_PRESENT;
0312 
0313     /*
0314      * Fixup phys_base - remove the memory encryption mask to obtain
0315      * the true physical address.
0316      */
0317     *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
0318 
0319     return sme_postprocess_startup(bp, pmd);
0320 }
0321 
0322 /* Wipe all early page tables except for the kernel symbol map */
0323 static void __init reset_early_page_tables(void)
0324 {
0325     memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
0326     next_early_pgt = 0;
0327     write_cr3(__sme_pa_nodebug(early_top_pgt));
0328 }
0329 
0330 /* Create a new PMD entry */
0331 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
0332 {
0333     unsigned long physaddr = address - __PAGE_OFFSET;
0334     pgdval_t pgd, *pgd_p;
0335     p4dval_t p4d, *p4d_p;
0336     pudval_t pud, *pud_p;
0337     pmdval_t *pmd_p;
0338 
0339     /* Invalid address or early pgt is done ?  */
0340     if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
0341         return false;
0342 
0343 again:
0344     pgd_p = &early_top_pgt[pgd_index(address)].pgd;
0345     pgd = *pgd_p;
0346 
0347     /*
0348      * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
0349      * critical -- __PAGE_OFFSET would point us back into the dynamic
0350      * range and we might end up looping forever...
0351      */
0352     if (!pgtable_l5_enabled())
0353         p4d_p = pgd_p;
0354     else if (pgd)
0355         p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
0356     else {
0357         if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
0358             reset_early_page_tables();
0359             goto again;
0360         }
0361 
0362         p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
0363         memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
0364         *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
0365     }
0366     p4d_p += p4d_index(address);
0367     p4d = *p4d_p;
0368 
0369     if (p4d)
0370         pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
0371     else {
0372         if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
0373             reset_early_page_tables();
0374             goto again;
0375         }
0376 
0377         pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
0378         memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
0379         *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
0380     }
0381     pud_p += pud_index(address);
0382     pud = *pud_p;
0383 
0384     if (pud)
0385         pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
0386     else {
0387         if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
0388             reset_early_page_tables();
0389             goto again;
0390         }
0391 
0392         pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
0393         memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
0394         *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
0395     }
0396     pmd_p[pmd_index(address)] = pmd;
0397 
0398     return true;
0399 }
0400 
0401 static bool __init early_make_pgtable(unsigned long address)
0402 {
0403     unsigned long physaddr = address - __PAGE_OFFSET;
0404     pmdval_t pmd;
0405 
0406     pmd = (physaddr & PMD_MASK) + early_pmd_flags;
0407 
0408     return __early_make_pgtable(address, pmd);
0409 }
0410 
0411 void __init do_early_exception(struct pt_regs *regs, int trapnr)
0412 {
0413     if (trapnr == X86_TRAP_PF &&
0414         early_make_pgtable(native_read_cr2()))
0415         return;
0416 
0417     if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
0418         trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
0419         return;
0420 
0421     if (trapnr == X86_TRAP_VE && tdx_early_handle_ve(regs))
0422         return;
0423 
0424     early_fixup_exception(regs, trapnr);
0425 }
0426 
0427 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
0428    yet. */
0429 void __init clear_bss(void)
0430 {
0431     memset(__bss_start, 0,
0432            (unsigned long) __bss_stop - (unsigned long) __bss_start);
0433     memset(__brk_base, 0,
0434            (unsigned long) __brk_limit - (unsigned long) __brk_base);
0435 }
0436 
0437 static unsigned long get_cmd_line_ptr(void)
0438 {
0439     unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
0440 
0441     cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
0442 
0443     return cmd_line_ptr;
0444 }
0445 
0446 static void __init copy_bootdata(char *real_mode_data)
0447 {
0448     char * command_line;
0449     unsigned long cmd_line_ptr;
0450 
0451     /*
0452      * If SME is active, this will create decrypted mappings of the
0453      * boot data in advance of the copy operations.
0454      */
0455     sme_map_bootdata(real_mode_data);
0456 
0457     memcpy(&boot_params, real_mode_data, sizeof(boot_params));
0458     sanitize_boot_params(&boot_params);
0459     cmd_line_ptr = get_cmd_line_ptr();
0460     if (cmd_line_ptr) {
0461         command_line = __va(cmd_line_ptr);
0462         memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
0463     }
0464 
0465     /*
0466      * The old boot data is no longer needed and won't be reserved,
0467      * freeing up that memory for use by the system. If SME is active,
0468      * we need to remove the mappings that were created so that the
0469      * memory doesn't remain mapped as decrypted.
0470      */
0471     sme_unmap_bootdata(real_mode_data);
0472 }
0473 
0474 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
0475 {
0476     /*
0477      * Build-time sanity checks on the kernel image and module
0478      * area mappings. (these are purely build-time and produce no code)
0479      */
0480     BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
0481     BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
0482     BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
0483     BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
0484     BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
0485     BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
0486     MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
0487                 (__START_KERNEL & PGDIR_MASK)));
0488     BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
0489 
0490     cr4_init_shadow();
0491 
0492     /* Kill off the identity-map trampoline */
0493     reset_early_page_tables();
0494 
0495     clear_bss();
0496 
0497     /*
0498      * This needs to happen *before* kasan_early_init() because latter maps stuff
0499      * into that page.
0500      */
0501     clear_page(init_top_pgt);
0502 
0503     /*
0504      * SME support may update early_pmd_flags to include the memory
0505      * encryption mask, so it needs to be called before anything
0506      * that may generate a page fault.
0507      */
0508     sme_early_init();
0509 
0510     kasan_early_init();
0511 
0512     /*
0513      * Flush global TLB entries which could be left over from the trampoline page
0514      * table.
0515      *
0516      * This needs to happen *after* kasan_early_init() as KASAN-enabled .configs
0517      * instrument native_write_cr4() so KASAN must be initialized for that
0518      * instrumentation to work.
0519      */
0520     __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
0521 
0522     idt_setup_early_handler();
0523 
0524     /* Needed before cc_platform_has() can be used for TDX */
0525     tdx_early_init();
0526 
0527     copy_bootdata(__va(real_mode_data));
0528 
0529     /*
0530      * Load microcode early on BSP.
0531      */
0532     load_ucode_bsp();
0533 
0534     /* set init_top_pgt kernel high mapping*/
0535     init_top_pgt[511] = early_top_pgt[511];
0536 
0537     x86_64_start_reservations(real_mode_data);
0538 }
0539 
0540 void __init x86_64_start_reservations(char *real_mode_data)
0541 {
0542     /* version is always not zero if it is copied */
0543     if (!boot_params.hdr.version)
0544         copy_bootdata(__va(real_mode_data));
0545 
0546     x86_early_init_platform_quirks();
0547 
0548     switch (boot_params.hdr.hardware_subarch) {
0549     case X86_SUBARCH_INTEL_MID:
0550         x86_intel_mid_early_setup();
0551         break;
0552     default:
0553         break;
0554     }
0555 
0556     start_kernel();
0557 }
0558 
0559 /*
0560  * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
0561  * used until the idt_table takes over. On the boot CPU this happens in
0562  * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
0563  * this happens in the functions called from head_64.S.
0564  *
0565  * The idt_table can't be used that early because all the code modifying it is
0566  * in idt.c and can be instrumented by tracing or KASAN, which both don't work
0567  * during early CPU bringup. Also the idt_table has the runtime vectors
0568  * configured which require certain CPU state to be setup already (like TSS),
0569  * which also hasn't happened yet in early CPU bringup.
0570  */
0571 static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
0572 
0573 static struct desc_ptr bringup_idt_descr = {
0574     .size       = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
0575     .address    = 0, /* Set at runtime */
0576 };
0577 
0578 static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
0579 {
0580 #ifdef CONFIG_AMD_MEM_ENCRYPT
0581     struct idt_data data;
0582     gate_desc desc;
0583 
0584     init_idt_data(&data, n, handler);
0585     idt_init_desc(&desc, &data);
0586     native_write_idt_entry(idt, n, &desc);
0587 #endif
0588 }
0589 
0590 /* This runs while still in the direct mapping */
0591 static void startup_64_load_idt(unsigned long physbase)
0592 {
0593     struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
0594     gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
0595 
0596 
0597     if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
0598         void *handler;
0599 
0600         /* VMM Communication Exception */
0601         handler = fixup_pointer(vc_no_ghcb, physbase);
0602         set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
0603     }
0604 
0605     desc->address = (unsigned long)idt;
0606     native_load_idt(desc);
0607 }
0608 
0609 /* This is used when running on kernel addresses */
0610 void early_setup_idt(void)
0611 {
0612     /* VMM Communication Exception */
0613     if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
0614         setup_ghcb();
0615         set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb);
0616     }
0617 
0618     bringup_idt_descr.address = (unsigned long)bringup_idt_table;
0619     native_load_idt(&bringup_idt_descr);
0620 }
0621 
0622 /*
0623  * Setup boot CPU state needed before kernel switches to virtual addresses.
0624  */
0625 void __head startup_64_setup_env(unsigned long physbase)
0626 {
0627     /* Load GDT */
0628     startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
0629     native_load_gdt(&startup_gdt_descr);
0630 
0631     /* New GDT is live - reload data segment registers */
0632     asm volatile("movl %%eax, %%ds\n"
0633              "movl %%eax, %%ss\n"
0634              "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
0635 
0636     startup_64_load_idt(physbase);
0637 }