Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __KVM_X86_VMX_INSN_H
0003 #define __KVM_X86_VMX_INSN_H
0004 
0005 #include <linux/nospec.h>
0006 
0007 #include <asm/vmx.h>
0008 
0009 #include "evmcs.h"
0010 #include "vmcs.h"
0011 #include "../x86.h"
0012 
0013 asmlinkage void vmread_error(unsigned long field, bool fault);
0014 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
0015                              bool fault);
0016 void vmwrite_error(unsigned long field, unsigned long value);
0017 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
0018 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
0019 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
0020 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
0021 
0022 static __always_inline void vmcs_check16(unsigned long field)
0023 {
0024     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
0025              "16-bit accessor invalid for 64-bit field");
0026     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
0027              "16-bit accessor invalid for 64-bit high field");
0028     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
0029              "16-bit accessor invalid for 32-bit high field");
0030     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
0031              "16-bit accessor invalid for natural width field");
0032 }
0033 
0034 static __always_inline void vmcs_check32(unsigned long field)
0035 {
0036     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
0037              "32-bit accessor invalid for 16-bit field");
0038     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
0039              "32-bit accessor invalid for 64-bit field");
0040     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
0041              "32-bit accessor invalid for 64-bit high field");
0042     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
0043              "32-bit accessor invalid for natural width field");
0044 }
0045 
0046 static __always_inline void vmcs_check64(unsigned long field)
0047 {
0048     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
0049              "64-bit accessor invalid for 16-bit field");
0050     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
0051              "64-bit accessor invalid for 64-bit high field");
0052     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
0053              "64-bit accessor invalid for 32-bit field");
0054     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
0055              "64-bit accessor invalid for natural width field");
0056 }
0057 
0058 static __always_inline void vmcs_checkl(unsigned long field)
0059 {
0060     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
0061              "Natural width accessor invalid for 16-bit field");
0062     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
0063              "Natural width accessor invalid for 64-bit field");
0064     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
0065              "Natural width accessor invalid for 64-bit high field");
0066     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
0067              "Natural width accessor invalid for 32-bit field");
0068 }
0069 
0070 static __always_inline unsigned long __vmcs_readl(unsigned long field)
0071 {
0072     unsigned long value;
0073 
0074 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0075 
0076     asm_volatile_goto("1: vmread %[field], %[output]\n\t"
0077               "jna %l[do_fail]\n\t"
0078 
0079               _ASM_EXTABLE(1b, %l[do_exception])
0080 
0081               : [output] "=r" (value)
0082               : [field] "r" (field)
0083               : "cc"
0084               : do_fail, do_exception);
0085 
0086     return value;
0087 
0088 do_fail:
0089     WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field);
0090     pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field);
0091     return 0;
0092 
0093 do_exception:
0094     kvm_spurious_fault();
0095     return 0;
0096 
0097 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
0098 
0099     asm volatile("1: vmread %2, %1\n\t"
0100              ".byte 0x3e\n\t" /* branch taken hint */
0101              "ja 3f\n\t"
0102 
0103              /*
0104               * VMREAD failed.  Push '0' for @fault, push the failing
0105               * @field, and bounce through the trampoline to preserve
0106               * volatile registers.
0107               */
0108              "xorl %k1, %k1\n\t"
0109              "2:\n\t"
0110              "push %1\n\t"
0111              "push %2\n\t"
0112              "call vmread_error_trampoline\n\t"
0113 
0114              /*
0115               * Unwind the stack.  Note, the trampoline zeros out the
0116               * memory for @fault so that the result is '0' on error.
0117               */
0118              "pop %2\n\t"
0119              "pop %1\n\t"
0120              "3:\n\t"
0121 
0122              /* VMREAD faulted.  As above, except push '1' for @fault. */
0123              _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
0124 
0125              : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
0126     return value;
0127 
0128 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
0129 }
0130 
0131 static __always_inline u16 vmcs_read16(unsigned long field)
0132 {
0133     vmcs_check16(field);
0134     if (static_branch_unlikely(&enable_evmcs))
0135         return evmcs_read16(field);
0136     return __vmcs_readl(field);
0137 }
0138 
0139 static __always_inline u32 vmcs_read32(unsigned long field)
0140 {
0141     vmcs_check32(field);
0142     if (static_branch_unlikely(&enable_evmcs))
0143         return evmcs_read32(field);
0144     return __vmcs_readl(field);
0145 }
0146 
0147 static __always_inline u64 vmcs_read64(unsigned long field)
0148 {
0149     vmcs_check64(field);
0150     if (static_branch_unlikely(&enable_evmcs))
0151         return evmcs_read64(field);
0152 #ifdef CONFIG_X86_64
0153     return __vmcs_readl(field);
0154 #else
0155     return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
0156 #endif
0157 }
0158 
0159 static __always_inline unsigned long vmcs_readl(unsigned long field)
0160 {
0161     vmcs_checkl(field);
0162     if (static_branch_unlikely(&enable_evmcs))
0163         return evmcs_read64(field);
0164     return __vmcs_readl(field);
0165 }
0166 
0167 #define vmx_asm1(insn, op1, error_args...)              \
0168 do {                                    \
0169     asm_volatile_goto("1: " __stringify(insn) " %0\n\t"     \
0170               ".byte 0x2e\n\t" /* branch not taken hint */  \
0171               "jna %l[error]\n\t"               \
0172               _ASM_EXTABLE(1b, %l[fault])           \
0173               : : op1 : "cc" : error, fault);       \
0174     return;                             \
0175 error:                                  \
0176     instrumentation_begin();                    \
0177     insn##_error(error_args);                   \
0178     instrumentation_end();                      \
0179     return;                             \
0180 fault:                                  \
0181     kvm_spurious_fault();                       \
0182 } while (0)
0183 
0184 #define vmx_asm2(insn, op1, op2, error_args...)             \
0185 do {                                    \
0186     asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"    \
0187               ".byte 0x2e\n\t" /* branch not taken hint */  \
0188               "jna %l[error]\n\t"               \
0189               _ASM_EXTABLE(1b, %l[fault])           \
0190               : : op1, op2 : "cc" : error, fault);      \
0191     return;                             \
0192 error:                                  \
0193     instrumentation_begin();                    \
0194     insn##_error(error_args);                   \
0195     instrumentation_end();                      \
0196     return;                             \
0197 fault:                                  \
0198     kvm_spurious_fault();                       \
0199 } while (0)
0200 
0201 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
0202 {
0203     vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
0204 }
0205 
0206 static __always_inline void vmcs_write16(unsigned long field, u16 value)
0207 {
0208     vmcs_check16(field);
0209     if (static_branch_unlikely(&enable_evmcs))
0210         return evmcs_write16(field, value);
0211 
0212     __vmcs_writel(field, value);
0213 }
0214 
0215 static __always_inline void vmcs_write32(unsigned long field, u32 value)
0216 {
0217     vmcs_check32(field);
0218     if (static_branch_unlikely(&enable_evmcs))
0219         return evmcs_write32(field, value);
0220 
0221     __vmcs_writel(field, value);
0222 }
0223 
0224 static __always_inline void vmcs_write64(unsigned long field, u64 value)
0225 {
0226     vmcs_check64(field);
0227     if (static_branch_unlikely(&enable_evmcs))
0228         return evmcs_write64(field, value);
0229 
0230     __vmcs_writel(field, value);
0231 #ifndef CONFIG_X86_64
0232     __vmcs_writel(field+1, value >> 32);
0233 #endif
0234 }
0235 
0236 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
0237 {
0238     vmcs_checkl(field);
0239     if (static_branch_unlikely(&enable_evmcs))
0240         return evmcs_write64(field, value);
0241 
0242     __vmcs_writel(field, value);
0243 }
0244 
0245 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
0246 {
0247     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
0248              "vmcs_clear_bits does not support 64-bit fields");
0249     if (static_branch_unlikely(&enable_evmcs))
0250         return evmcs_write32(field, evmcs_read32(field) & ~mask);
0251 
0252     __vmcs_writel(field, __vmcs_readl(field) & ~mask);
0253 }
0254 
0255 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
0256 {
0257     BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
0258              "vmcs_set_bits does not support 64-bit fields");
0259     if (static_branch_unlikely(&enable_evmcs))
0260         return evmcs_write32(field, evmcs_read32(field) | mask);
0261 
0262     __vmcs_writel(field, __vmcs_readl(field) | mask);
0263 }
0264 
0265 static inline void vmcs_clear(struct vmcs *vmcs)
0266 {
0267     u64 phys_addr = __pa(vmcs);
0268 
0269     vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
0270 }
0271 
0272 static inline void vmcs_load(struct vmcs *vmcs)
0273 {
0274     u64 phys_addr = __pa(vmcs);
0275 
0276     if (static_branch_unlikely(&enable_evmcs))
0277         return evmcs_load(phys_addr);
0278 
0279     vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
0280 }
0281 
0282 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
0283 {
0284     struct {
0285         u64 vpid : 16;
0286         u64 rsvd : 48;
0287         u64 gva;
0288     } operand = { vpid, 0, gva };
0289 
0290     vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
0291 }
0292 
0293 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
0294 {
0295     struct {
0296         u64 eptp, gpa;
0297     } operand = {eptp, gpa};
0298 
0299     vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
0300 }
0301 
0302 static inline void vpid_sync_vcpu_single(int vpid)
0303 {
0304     if (vpid == 0)
0305         return;
0306 
0307     __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
0308 }
0309 
0310 static inline void vpid_sync_vcpu_global(void)
0311 {
0312     __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
0313 }
0314 
0315 static inline void vpid_sync_context(int vpid)
0316 {
0317     if (cpu_has_vmx_invvpid_single())
0318         vpid_sync_vcpu_single(vpid);
0319     else if (vpid != 0)
0320         vpid_sync_vcpu_global();
0321 }
0322 
0323 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
0324 {
0325     if (vpid == 0)
0326         return;
0327 
0328     if (cpu_has_vmx_invvpid_individual_addr())
0329         __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
0330     else
0331         vpid_sync_context(vpid);
0332 }
0333 
0334 static inline void ept_sync_global(void)
0335 {
0336     __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
0337 }
0338 
0339 static inline void ept_sync_context(u64 eptp)
0340 {
0341     if (cpu_has_vmx_invept_context())
0342         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
0343     else
0344         ept_sync_global();
0345 }
0346 
0347 #endif /* __KVM_X86_VMX_INSN_H */