Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  *
0004  *  Trampoline.S    Derived from Setup.S by Linus Torvalds
0005  *
0006  *  4 Jan 1997 Michael Chastain: changed to gnu as.
0007  *  15 Sept 2005 Eric Biederman: 64bit PIC support
0008  *
0009  *  Entry: CS:IP point to the start of our code, we are
0010  *  in real mode with no stack, but the rest of the
0011  *  trampoline page to make our stack and everything else
0012  *  is a mystery.
0013  *
0014  *  On entry to trampoline_start, the processor is in real mode
0015  *  with 16-bit addressing and 16-bit data.  CS has some value
0016  *  and IP is zero.  Thus, data addresses need to be absolute
0017  *  (no relocation) and are taken with regard to r_base.
0018  *
0019  *  With the addition of trampoline_level4_pgt this code can
0020  *  now enter a 64bit kernel that lives at arbitrary 64bit
0021  *  physical addresses.
0022  *
0023  *  If you work on this file, check the object module with objdump
0024  *  --full-contents --reloc to make sure there are no relocation
0025  *  entries.
0026  */
0027 
0028 #include <linux/linkage.h>
0029 #include <asm/pgtable_types.h>
0030 #include <asm/page_types.h>
0031 #include <asm/msr.h>
0032 #include <asm/segment.h>
0033 #include <asm/processor-flags.h>
0034 #include <asm/realmode.h>
0035 #include "realmode.h"
0036 
0037     .text
0038     .code16
0039 
0040     .balign PAGE_SIZE
0041 SYM_CODE_START(trampoline_start)
0042     cli         # We should be safe anyway
0043     wbinvd
0044 
0045     LJMPW_RM(1f)
0046 1:
0047     mov %cs, %ax    # Code and data in the same place
0048     mov %ax, %ds
0049     mov %ax, %es
0050     mov %ax, %ss
0051 
0052     # Setup stack
0053     movl    $rm_stack_end, %esp
0054 
0055     call    verify_cpu      # Verify the cpu supports long mode
0056     testl   %eax, %eax      # Check for return code
0057     jnz no_longmode
0058 
0059 .Lswitch_to_protected:
0060     /*
0061      * GDT tables in non default location kernel can be beyond 16MB and
0062      * lgdt will not be able to load the address as in real mode default
0063      * operand size is 16bit. Use lgdtl instead to force operand size
0064      * to 32 bit.
0065      */
0066 
0067     lidtl   tr_idt  # load idt with 0, 0
0068     lgdtl   tr_gdt  # load gdt with whatever is appropriate
0069 
0070     movw    $__KERNEL_DS, %dx   # Data segment descriptor
0071 
0072     # Enable protected mode
0073     movl    $(CR0_STATE & ~X86_CR0_PG), %eax
0074     movl    %eax, %cr0      # into protected mode
0075 
0076     # flush prefetch and jump to startup_32
0077     ljmpl   $__KERNEL32_CS, $pa_startup_32
0078 
0079 no_longmode:
0080     hlt
0081     jmp no_longmode
0082 SYM_CODE_END(trampoline_start)
0083 
0084 #ifdef CONFIG_AMD_MEM_ENCRYPT
0085 /* SEV-ES supports non-zero IP for entry points - no alignment needed */
0086 SYM_CODE_START(sev_es_trampoline_start)
0087     cli         # We should be safe anyway
0088 
0089     LJMPW_RM(1f)
0090 1:
0091     mov %cs, %ax    # Code and data in the same place
0092     mov %ax, %ds
0093     mov %ax, %es
0094     mov %ax, %ss
0095 
0096     # Setup stack
0097     movl    $rm_stack_end, %esp
0098 
0099     jmp .Lswitch_to_protected
0100 SYM_CODE_END(sev_es_trampoline_start)
0101 #endif  /* CONFIG_AMD_MEM_ENCRYPT */
0102 
0103 #include "../kernel/verify_cpu.S"
0104 
0105     .section ".text32","ax"
0106     .code32
0107     .balign 4
0108 SYM_CODE_START(startup_32)
0109     movl    %edx, %ss
0110     addl    $pa_real_mode_base, %esp
0111     movl    %edx, %ds
0112     movl    %edx, %es
0113     movl    %edx, %fs
0114     movl    %edx, %gs
0115 
0116     /*
0117      * Check for memory encryption support. This is a safety net in
0118      * case BIOS hasn't done the necessary step of setting the bit in
0119      * the MSR for this AP. If SME is active and we've gotten this far
0120      * then it is safe for us to set the MSR bit and continue. If we
0121      * don't we'll eventually crash trying to execute encrypted
0122      * instructions.
0123      */
0124     btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
0125     jnc .Ldone
0126     movl    $MSR_AMD64_SYSCFG, %ecx
0127     rdmsr
0128     bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
0129     jc  .Ldone
0130 
0131     /*
0132      * Memory encryption is enabled but the SME enable bit for this
0133      * CPU has has not been set.  It is safe to set it, so do so.
0134      */
0135     wrmsr
0136 .Ldone:
0137 
0138     movl    pa_tr_cr4, %eax
0139     movl    %eax, %cr4      # Enable PAE mode
0140 
0141     # Setup trampoline 4 level pagetables
0142     movl    $pa_trampoline_pgd, %eax
0143     movl    %eax, %cr3
0144 
0145     # Set up EFER
0146     movl    $MSR_EFER, %ecx
0147     rdmsr
0148     /*
0149      * Skip writing to EFER if the register already has desired
0150      * value (to avoid #VE for the TDX guest).
0151      */
0152     cmp pa_tr_efer, %eax
0153     jne .Lwrite_efer
0154     cmp pa_tr_efer + 4, %edx
0155     je  .Ldone_efer
0156 .Lwrite_efer:
0157     movl    pa_tr_efer, %eax
0158     movl    pa_tr_efer + 4, %edx
0159     wrmsr
0160 
0161 .Ldone_efer:
0162     # Enable paging and in turn activate Long Mode.
0163     movl    $CR0_STATE, %eax
0164     movl    %eax, %cr0
0165 
0166     /*
0167      * At this point we're in long mode but in 32bit compatibility mode
0168      * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
0169      * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
0170      * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
0171      */
0172     ljmpl   $__KERNEL_CS, $pa_startup_64
0173 SYM_CODE_END(startup_32)
0174 
0175 SYM_CODE_START(pa_trampoline_compat)
0176     /*
0177      * In compatibility mode.  Prep ESP and DX for startup_32, then disable
0178      * paging and complete the switch to legacy 32-bit mode.
0179      */
0180     movl    $rm_stack_end, %esp
0181     movw    $__KERNEL_DS, %dx
0182 
0183     movl    $(CR0_STATE & ~X86_CR0_PG), %eax
0184     movl    %eax, %cr0
0185     ljmpl   $__KERNEL32_CS, $pa_startup_32
0186 SYM_CODE_END(pa_trampoline_compat)
0187 
0188     .section ".text64","ax"
0189     .code64
0190     .balign 4
0191 SYM_CODE_START(startup_64)
0192     # Now jump into the kernel using virtual addresses
0193     jmpq    *tr_start(%rip)
0194 SYM_CODE_END(startup_64)
0195 
0196 SYM_CODE_START(trampoline_start64)
0197     /*
0198      * APs start here on a direct transfer from 64-bit BIOS with identity
0199      * mapped page tables.  Load the kernel's GDT in order to gear down to
0200      * 32-bit mode (to handle 4-level vs. 5-level paging), and to (re)load
0201      * segment registers.  Load the zero IDT so any fault triggers a
0202      * shutdown instead of jumping back into BIOS.
0203      */
0204     lidt    tr_idt(%rip)
0205     lgdt    tr_gdt64(%rip)
0206 
0207     ljmpl   *tr_compat(%rip)
0208 SYM_CODE_END(trampoline_start64)
0209 
0210     .section ".rodata","a"
0211     # Duplicate the global descriptor table
0212     # so the kernel can live anywhere
0213     .balign 16
0214 SYM_DATA_START(tr_gdt)
0215     .short  tr_gdt_end - tr_gdt - 1 # gdt limit
0216     .long   pa_tr_gdt
0217     .short  0
0218     .quad   0x00cf9b000000ffff  # __KERNEL32_CS
0219     .quad   0x00af9b000000ffff  # __KERNEL_CS
0220     .quad   0x00cf93000000ffff  # __KERNEL_DS
0221 SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end)
0222 
0223 SYM_DATA_START(tr_gdt64)
0224     .short  tr_gdt_end - tr_gdt - 1 # gdt limit
0225     .long   pa_tr_gdt
0226     .long   0
0227 SYM_DATA_END(tr_gdt64)
0228 
0229 SYM_DATA_START(tr_compat)
0230     .long   pa_trampoline_compat
0231     .short  __KERNEL32_CS
0232 SYM_DATA_END(tr_compat)
0233 
0234     .bss
0235     .balign PAGE_SIZE
0236 SYM_DATA(trampoline_pgd, .space PAGE_SIZE)
0237 
0238     .balign 8
0239 SYM_DATA_START(trampoline_header)
0240     SYM_DATA_LOCAL(tr_start,    .space 8)
0241     SYM_DATA(tr_efer,       .space 8)
0242     SYM_DATA(tr_cr4,        .space 4)
0243     SYM_DATA(tr_flags,      .space 4)
0244 SYM_DATA_END(trampoline_header)
0245 
0246 #include "trampoline_common.S"