Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #include <linux/linkage.h>
0003 #include <asm/asm.h>
0004 #include <asm/bitsperlong.h>
0005 #include <asm/kvm_vcpu_regs.h>
0006 #include <asm/nospec-branch.h>
0007 
0008 #define WORD_SIZE (BITS_PER_LONG / 8)
0009 
0010 /* Intentionally omit RAX as it's context switched by hardware */
0011 #define VCPU_RCX    __VCPU_REGS_RCX * WORD_SIZE
0012 #define VCPU_RDX    __VCPU_REGS_RDX * WORD_SIZE
0013 #define VCPU_RBX    __VCPU_REGS_RBX * WORD_SIZE
0014 /* Intentionally omit RSP as it's context switched by hardware */
0015 #define VCPU_RBP    __VCPU_REGS_RBP * WORD_SIZE
0016 #define VCPU_RSI    __VCPU_REGS_RSI * WORD_SIZE
0017 #define VCPU_RDI    __VCPU_REGS_RDI * WORD_SIZE
0018 
0019 #ifdef CONFIG_X86_64
0020 #define VCPU_R8     __VCPU_REGS_R8  * WORD_SIZE
0021 #define VCPU_R9     __VCPU_REGS_R9  * WORD_SIZE
0022 #define VCPU_R10    __VCPU_REGS_R10 * WORD_SIZE
0023 #define VCPU_R11    __VCPU_REGS_R11 * WORD_SIZE
0024 #define VCPU_R12    __VCPU_REGS_R12 * WORD_SIZE
0025 #define VCPU_R13    __VCPU_REGS_R13 * WORD_SIZE
0026 #define VCPU_R14    __VCPU_REGS_R14 * WORD_SIZE
0027 #define VCPU_R15    __VCPU_REGS_R15 * WORD_SIZE
0028 #endif
0029 
0030 .section .noinstr.text, "ax"
0031 
0032 /**
0033  * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
0034  * @vmcb_pa:    unsigned long
0035  * @regs:   unsigned long * (to guest registers)
0036  */
0037 SYM_FUNC_START(__svm_vcpu_run)
0038     push %_ASM_BP
0039 #ifdef CONFIG_X86_64
0040     push %r15
0041     push %r14
0042     push %r13
0043     push %r12
0044 #else
0045     push %edi
0046     push %esi
0047 #endif
0048     push %_ASM_BX
0049 
0050     /* Save @regs. */
0051     push %_ASM_ARG2
0052 
0053     /* Save @vmcb. */
0054     push %_ASM_ARG1
0055 
0056     /* Move @regs to RAX. */
0057     mov %_ASM_ARG2, %_ASM_AX
0058 
0059     /* Load guest registers. */
0060     mov VCPU_RCX(%_ASM_AX), %_ASM_CX
0061     mov VCPU_RDX(%_ASM_AX), %_ASM_DX
0062     mov VCPU_RBX(%_ASM_AX), %_ASM_BX
0063     mov VCPU_RBP(%_ASM_AX), %_ASM_BP
0064     mov VCPU_RSI(%_ASM_AX), %_ASM_SI
0065     mov VCPU_RDI(%_ASM_AX), %_ASM_DI
0066 #ifdef CONFIG_X86_64
0067     mov VCPU_R8 (%_ASM_AX),  %r8
0068     mov VCPU_R9 (%_ASM_AX),  %r9
0069     mov VCPU_R10(%_ASM_AX), %r10
0070     mov VCPU_R11(%_ASM_AX), %r11
0071     mov VCPU_R12(%_ASM_AX), %r12
0072     mov VCPU_R13(%_ASM_AX), %r13
0073     mov VCPU_R14(%_ASM_AX), %r14
0074     mov VCPU_R15(%_ASM_AX), %r15
0075 #endif
0076 
0077     /* "POP" @vmcb to RAX. */
0078     pop %_ASM_AX
0079 
0080     /* Enter guest mode */
0081     sti
0082 
0083 1:  vmrun %_ASM_AX
0084 
0085 2:  cli
0086 
0087 #ifdef CONFIG_RETPOLINE
0088     /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
0089     FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
0090 #endif
0091 
0092     /* "POP" @regs to RAX. */
0093     pop %_ASM_AX
0094 
0095     /* Save all guest registers.  */
0096     mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
0097     mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
0098     mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
0099     mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
0100     mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
0101     mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
0102 #ifdef CONFIG_X86_64
0103     mov %r8,  VCPU_R8 (%_ASM_AX)
0104     mov %r9,  VCPU_R9 (%_ASM_AX)
0105     mov %r10, VCPU_R10(%_ASM_AX)
0106     mov %r11, VCPU_R11(%_ASM_AX)
0107     mov %r12, VCPU_R12(%_ASM_AX)
0108     mov %r13, VCPU_R13(%_ASM_AX)
0109     mov %r14, VCPU_R14(%_ASM_AX)
0110     mov %r15, VCPU_R15(%_ASM_AX)
0111 #endif
0112 
0113     /*
0114      * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
0115      * untrained as soon as we exit the VM and are back to the
0116      * kernel. This should be done before re-enabling interrupts
0117      * because interrupt handlers won't sanitize 'ret' if the return is
0118      * from the kernel.
0119      */
0120     UNTRAIN_RET
0121 
0122     /*
0123      * Clear all general purpose registers except RSP and RAX to prevent
0124      * speculative use of the guest's values, even those that are reloaded
0125      * via the stack.  In theory, an L1 cache miss when restoring registers
0126      * could lead to speculative execution with the guest's values.
0127      * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
0128      * free.  RSP and RAX are exempt as they are restored by hardware
0129      * during VM-Exit.
0130      */
0131     xor %ecx, %ecx
0132     xor %edx, %edx
0133     xor %ebx, %ebx
0134     xor %ebp, %ebp
0135     xor %esi, %esi
0136     xor %edi, %edi
0137 #ifdef CONFIG_X86_64
0138     xor %r8d,  %r8d
0139     xor %r9d,  %r9d
0140     xor %r10d, %r10d
0141     xor %r11d, %r11d
0142     xor %r12d, %r12d
0143     xor %r13d, %r13d
0144     xor %r14d, %r14d
0145     xor %r15d, %r15d
0146 #endif
0147 
0148     pop %_ASM_BX
0149 
0150 #ifdef CONFIG_X86_64
0151     pop %r12
0152     pop %r13
0153     pop %r14
0154     pop %r15
0155 #else
0156     pop %esi
0157     pop %edi
0158 #endif
0159     pop %_ASM_BP
0160     RET
0161 
0162 3:  cmpb $0, kvm_rebooting
0163     jne 2b
0164     ud2
0165 
0166     _ASM_EXTABLE(1b, 3b)
0167 
0168 SYM_FUNC_END(__svm_vcpu_run)
0169 
0170 /**
0171  * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
0172  * @vmcb_pa:    unsigned long
0173  */
0174 SYM_FUNC_START(__svm_sev_es_vcpu_run)
0175     push %_ASM_BP
0176 #ifdef CONFIG_X86_64
0177     push %r15
0178     push %r14
0179     push %r13
0180     push %r12
0181 #else
0182     push %edi
0183     push %esi
0184 #endif
0185     push %_ASM_BX
0186 
0187     /* Move @vmcb to RAX. */
0188     mov %_ASM_ARG1, %_ASM_AX
0189 
0190     /* Enter guest mode */
0191     sti
0192 
0193 1:  vmrun %_ASM_AX
0194 
0195 2:  cli
0196 
0197 #ifdef CONFIG_RETPOLINE
0198     /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
0199     FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
0200 #endif
0201 
0202     /*
0203      * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
0204      * untrained as soon as we exit the VM and are back to the
0205      * kernel. This should be done before re-enabling interrupts
0206      * because interrupt handlers won't sanitize RET if the return is
0207      * from the kernel.
0208      */
0209     UNTRAIN_RET
0210 
0211     pop %_ASM_BX
0212 
0213 #ifdef CONFIG_X86_64
0214     pop %r12
0215     pop %r13
0216     pop %r14
0217     pop %r15
0218 #else
0219     pop %esi
0220     pop %edi
0221 #endif
0222     pop %_ASM_BP
0223     RET
0224 
0225 3:  cmpb $0, kvm_rebooting
0226     jne 2b
0227     ud2
0228 
0229     _ASM_EXTABLE(1b, 3b)
0230 
0231 SYM_FUNC_END(__svm_sev_es_vcpu_run)