0001
0002 #ifndef __KVM_X86_SVM_OPS_H
0003 #define __KVM_X86_SVM_OPS_H
0004
0005 #include <linux/compiler_types.h>
0006
0007 #include "x86.h"
0008
0009 #define svm_asm(insn, clobber...) \
0010 do { \
0011 asm_volatile_goto("1: " __stringify(insn) "\n\t" \
0012 _ASM_EXTABLE(1b, %l[fault]) \
0013 ::: clobber : fault); \
0014 return; \
0015 fault: \
0016 kvm_spurious_fault(); \
0017 } while (0)
0018
0019 #define svm_asm1(insn, op1, clobber...) \
0020 do { \
0021 asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
0022 _ASM_EXTABLE(1b, %l[fault]) \
0023 :: op1 : clobber : fault); \
0024 return; \
0025 fault: \
0026 kvm_spurious_fault(); \
0027 } while (0)
0028
0029 #define svm_asm2(insn, op1, op2, clobber...) \
0030 do { \
0031 asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
0032 _ASM_EXTABLE(1b, %l[fault]) \
0033 :: op1, op2 : clobber : fault); \
0034 return; \
0035 fault: \
0036 kvm_spurious_fault(); \
0037 } while (0)
0038
0039 static inline void clgi(void)
0040 {
0041 svm_asm(clgi);
0042 }
0043
0044 static inline void stgi(void)
0045 {
0046 svm_asm(stgi);
0047 }
0048
0049 static inline void invlpga(unsigned long addr, u32 asid)
0050 {
0051 svm_asm2(invlpga, "c"(asid), "a"(addr));
0052 }
0053
0054
0055
0056
0057
0058
0059 static __always_inline void vmsave(unsigned long pa)
0060 {
0061 svm_asm1(vmsave, "a" (pa), "memory");
0062 }
0063
0064 static __always_inline void vmload(unsigned long pa)
0065 {
0066 svm_asm1(vmload, "a" (pa), "memory");
0067 }
0068
0069 #endif