Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/io.h>
0003 #include <linux/slab.h>
0004 #include <linux/memblock.h>
0005 #include <linux/cc_platform.h>
0006 #include <linux/pgtable.h>
0007 
0008 #include <asm/set_memory.h>
0009 #include <asm/realmode.h>
0010 #include <asm/tlbflush.h>
0011 #include <asm/crash.h>
0012 #include <asm/sev.h>
0013 
0014 struct real_mode_header *real_mode_header;
0015 u32 *trampoline_cr4_features;
0016 
0017 /* Hold the pgd entry used on booting additional CPUs */
0018 pgd_t trampoline_pgd_entry;
0019 
0020 void load_trampoline_pgtable(void)
0021 {
0022 #ifdef CONFIG_X86_32
0023     load_cr3(initial_page_table);
0024 #else
0025     /*
0026      * This function is called before exiting to real-mode and that will
0027      * fail with CR4.PCIDE still set.
0028      */
0029     if (boot_cpu_has(X86_FEATURE_PCID))
0030         cr4_clear_bits(X86_CR4_PCIDE);
0031 
0032     write_cr3(real_mode_header->trampoline_pgd);
0033 #endif
0034 
0035     /*
0036      * The CR3 write above will not flush global TLB entries.
0037      * Stale, global entries from previous page tables may still be
0038      * present.  Flush those stale entries.
0039      *
0040      * This ensures that memory accessed while running with
0041      * trampoline_pgd is *actually* mapped into trampoline_pgd.
0042      */
0043     __flush_tlb_all();
0044 }
0045 
0046 void __init reserve_real_mode(void)
0047 {
0048     phys_addr_t mem;
0049     size_t size = real_mode_size_needed();
0050 
0051     if (!size)
0052         return;
0053 
0054     WARN_ON(slab_is_available());
0055 
0056     /* Has to be under 1M so we can execute real-mode AP code. */
0057     mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20);
0058     if (!mem)
0059         pr_info("No sub-1M memory is available for the trampoline\n");
0060     else
0061         set_real_mode_mem(mem);
0062 
0063     /*
0064      * Unconditionally reserve the entire fisrt 1M, see comment in
0065      * setup_arch().
0066      */
0067     memblock_reserve(0, SZ_1M);
0068 }
0069 
0070 static void __init sme_sev_setup_real_mode(struct trampoline_header *th)
0071 {
0072 #ifdef CONFIG_AMD_MEM_ENCRYPT
0073     if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
0074         th->flags |= TH_FLAGS_SME_ACTIVE;
0075 
0076     if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
0077         /*
0078          * Skip the call to verify_cpu() in secondary_startup_64 as it
0079          * will cause #VC exceptions when the AP can't handle them yet.
0080          */
0081         th->start = (u64) secondary_startup_64_no_verify;
0082 
0083         if (sev_es_setup_ap_jump_table(real_mode_header))
0084             panic("Failed to get/update SEV-ES AP Jump Table");
0085     }
0086 #endif
0087 }
0088 
0089 static void __init setup_real_mode(void)
0090 {
0091     u16 real_mode_seg;
0092     const u32 *rel;
0093     u32 count;
0094     unsigned char *base;
0095     unsigned long phys_base;
0096     struct trampoline_header *trampoline_header;
0097     size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
0098 #ifdef CONFIG_X86_64
0099     u64 *trampoline_pgd;
0100     u64 efer;
0101     int i;
0102 #endif
0103 
0104     base = (unsigned char *)real_mode_header;
0105 
0106     /*
0107      * If SME is active, the trampoline area will need to be in
0108      * decrypted memory in order to bring up other processors
0109      * successfully. This is not needed for SEV.
0110      */
0111     if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
0112         set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
0113 
0114     memcpy(base, real_mode_blob, size);
0115 
0116     phys_base = __pa(base);
0117     real_mode_seg = phys_base >> 4;
0118 
0119     rel = (u32 *) real_mode_relocs;
0120 
0121     /* 16-bit segment relocations. */
0122     count = *rel++;
0123     while (count--) {
0124         u16 *seg = (u16 *) (base + *rel++);
0125         *seg = real_mode_seg;
0126     }
0127 
0128     /* 32-bit linear relocations. */
0129     count = *rel++;
0130     while (count--) {
0131         u32 *ptr = (u32 *) (base + *rel++);
0132         *ptr += phys_base;
0133     }
0134 
0135     /* Must be performed *after* relocation. */
0136     trampoline_header = (struct trampoline_header *)
0137         __va(real_mode_header->trampoline_header);
0138 
0139 #ifdef CONFIG_X86_32
0140     trampoline_header->start = __pa_symbol(startup_32_smp);
0141     trampoline_header->gdt_limit = __BOOT_DS + 7;
0142     trampoline_header->gdt_base = __pa_symbol(boot_gdt);
0143 #else
0144     /*
0145      * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
0146      * so we need to mask it out.
0147      */
0148     rdmsrl(MSR_EFER, efer);
0149     trampoline_header->efer = efer & ~EFER_LMA;
0150 
0151     trampoline_header->start = (u64) secondary_startup_64;
0152     trampoline_cr4_features = &trampoline_header->cr4;
0153     *trampoline_cr4_features = mmu_cr4_features;
0154 
0155     trampoline_header->flags = 0;
0156 
0157     trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
0158 
0159     /* Map the real mode stub as virtual == physical */
0160     trampoline_pgd[0] = trampoline_pgd_entry.pgd;
0161 
0162     /*
0163      * Include the entirety of the kernel mapping into the trampoline
0164      * PGD.  This way, all mappings present in the normal kernel page
0165      * tables are usable while running on trampoline_pgd.
0166      */
0167     for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
0168         trampoline_pgd[i] = init_top_pgt[i].pgd;
0169 #endif
0170 
0171     sme_sev_setup_real_mode(trampoline_header);
0172 }
0173 
0174 /*
0175  * reserve_real_mode() gets called very early, to guarantee the
0176  * availability of low memory. This is before the proper kernel page
0177  * tables are set up, so we cannot set page permissions in that
0178  * function. Also trampoline code will be executed by APs so we
0179  * need to mark it executable at do_pre_smp_initcalls() at least,
0180  * thus run it as a early_initcall().
0181  */
0182 static void __init set_real_mode_permissions(void)
0183 {
0184     unsigned char *base = (unsigned char *) real_mode_header;
0185     size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
0186 
0187     size_t ro_size =
0188         PAGE_ALIGN(real_mode_header->ro_end) -
0189         __pa(base);
0190 
0191     size_t text_size =
0192         PAGE_ALIGN(real_mode_header->ro_end) -
0193         real_mode_header->text_start;
0194 
0195     unsigned long text_start =
0196         (unsigned long) __va(real_mode_header->text_start);
0197 
0198     set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
0199     set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
0200     set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
0201 }
0202 
0203 static int __init init_real_mode(void)
0204 {
0205     if (!real_mode_header)
0206         panic("Real mode trampoline was not allocated");
0207 
0208     setup_real_mode();
0209     set_real_mode_permissions();
0210 
0211     return 0;
0212 }
0213 early_initcall(init_real_mode);