Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /* CPU virtualization extensions handling
0003  *
0004  * This should carry the code for handling CPU virtualization extensions
0005  * that needs to live in the kernel core.
0006  *
0007  * Author: Eduardo Habkost <ehabkost@redhat.com>
0008  *
0009  * Copyright (C) 2008, Red Hat Inc.
0010  *
0011  * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
0012  */
0013 #ifndef _ASM_X86_VIRTEX_H
0014 #define _ASM_X86_VIRTEX_H
0015 
0016 #include <asm/processor.h>
0017 
0018 #include <asm/vmx.h>
0019 #include <asm/svm.h>
0020 #include <asm/tlbflush.h>
0021 
0022 /*
0023  * VMX functions:
0024  */
0025 
0026 static inline int cpu_has_vmx(void)
0027 {
0028     unsigned long ecx = cpuid_ecx(1);
0029     return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
0030 }
0031 
0032 
0033 /**
0034  * cpu_vmxoff() - Disable VMX on the current CPU
0035  *
0036  * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
0037  *
0038  * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
0039  * atomically track post-VMXON state, e.g. this may be called in NMI context.
0040  * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
0041  * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
0042  * magically in RM, VM86, compat mode, or at CPL>0.
0043  */
0044 static inline int cpu_vmxoff(void)
0045 {
0046     asm_volatile_goto("1: vmxoff\n\t"
0047               _ASM_EXTABLE(1b, %l[fault])
0048               ::: "cc", "memory" : fault);
0049 
0050     cr4_clear_bits(X86_CR4_VMXE);
0051     return 0;
0052 
0053 fault:
0054     cr4_clear_bits(X86_CR4_VMXE);
0055     return -EIO;
0056 }
0057 
0058 static inline int cpu_vmx_enabled(void)
0059 {
0060     return __read_cr4() & X86_CR4_VMXE;
0061 }
0062 
0063 /** Disable VMX if it is enabled on the current CPU
0064  *
0065  * You shouldn't call this if cpu_has_vmx() returns 0.
0066  */
0067 static inline void __cpu_emergency_vmxoff(void)
0068 {
0069     if (cpu_vmx_enabled())
0070         cpu_vmxoff();
0071 }
0072 
0073 /** Disable VMX if it is supported and enabled on the current CPU
0074  */
0075 static inline void cpu_emergency_vmxoff(void)
0076 {
0077     if (cpu_has_vmx())
0078         __cpu_emergency_vmxoff();
0079 }
0080 
0081 
0082 
0083 
0084 /*
0085  * SVM functions:
0086  */
0087 
0088 /** Check if the CPU has SVM support
0089  *
0090  * You can use the 'msg' arg to get a message describing the problem,
0091  * if the function returns zero. Simply pass NULL if you are not interested
0092  * on the messages; gcc should take care of not generating code for
0093  * the messages on this case.
0094  */
0095 static inline int cpu_has_svm(const char **msg)
0096 {
0097     if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
0098         boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
0099         if (msg)
0100             *msg = "not amd or hygon";
0101         return 0;
0102     }
0103 
0104     if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
0105         if (msg)
0106             *msg = "can't execute cpuid_8000000a";
0107         return 0;
0108     }
0109 
0110     if (!boot_cpu_has(X86_FEATURE_SVM)) {
0111         if (msg)
0112             *msg = "svm not available";
0113         return 0;
0114     }
0115     return 1;
0116 }
0117 
0118 
0119 /** Disable SVM on the current CPU
0120  *
0121  * You should call this only if cpu_has_svm() returned true.
0122  */
0123 static inline void cpu_svm_disable(void)
0124 {
0125     uint64_t efer;
0126 
0127     wrmsrl(MSR_VM_HSAVE_PA, 0);
0128     rdmsrl(MSR_EFER, efer);
0129     wrmsrl(MSR_EFER, efer & ~EFER_SVME);
0130 }
0131 
0132 /** Makes sure SVM is disabled, if it is supported on the CPU
0133  */
0134 static inline void cpu_emergency_svm_disable(void)
0135 {
0136     if (cpu_has_svm(NULL))
0137         cpu_svm_disable();
0138 }
0139 
0140 #endif /* _ASM_X86_VIRTEX_H */