Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_KVM_PARA_H
0003 #define _ASM_X86_KVM_PARA_H
0004 
0005 #include <asm/processor.h>
0006 #include <asm/alternative.h>
0007 #include <linux/interrupt.h>
0008 #include <uapi/asm/kvm_para.h>
0009 
0010 #include <asm/tdx.h>
0011 
0012 #ifdef CONFIG_KVM_GUEST
0013 bool kvm_check_and_clear_guest_paused(void);
0014 #else
0015 static inline bool kvm_check_and_clear_guest_paused(void)
0016 {
0017     return false;
0018 }
0019 #endif /* CONFIG_KVM_GUEST */
0020 
0021 #define KVM_HYPERCALL \
0022         ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL)
0023 
0024 /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
0025  * instruction.  The hypervisor may replace it with something else but only the
0026  * instructions are guaranteed to be supported.
0027  *
0028  * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
0029  * The hypercall number should be placed in rax and the return value will be
0030  * placed in rax.  No other registers will be clobbered unless explicitly
0031  * noted by the particular hypercall.
0032  */
0033 
0034 static inline long kvm_hypercall0(unsigned int nr)
0035 {
0036     long ret;
0037 
0038     if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
0039         return tdx_kvm_hypercall(nr, 0, 0, 0, 0);
0040 
0041     asm volatile(KVM_HYPERCALL
0042              : "=a"(ret)
0043              : "a"(nr)
0044              : "memory");
0045     return ret;
0046 }
0047 
0048 static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
0049 {
0050     long ret;
0051 
0052     if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
0053         return tdx_kvm_hypercall(nr, p1, 0, 0, 0);
0054 
0055     asm volatile(KVM_HYPERCALL
0056              : "=a"(ret)
0057              : "a"(nr), "b"(p1)
0058              : "memory");
0059     return ret;
0060 }
0061 
0062 static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
0063                   unsigned long p2)
0064 {
0065     long ret;
0066 
0067     if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
0068         return tdx_kvm_hypercall(nr, p1, p2, 0, 0);
0069 
0070     asm volatile(KVM_HYPERCALL
0071              : "=a"(ret)
0072              : "a"(nr), "b"(p1), "c"(p2)
0073              : "memory");
0074     return ret;
0075 }
0076 
0077 static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
0078                   unsigned long p2, unsigned long p3)
0079 {
0080     long ret;
0081 
0082     if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
0083         return tdx_kvm_hypercall(nr, p1, p2, p3, 0);
0084 
0085     asm volatile(KVM_HYPERCALL
0086              : "=a"(ret)
0087              : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
0088              : "memory");
0089     return ret;
0090 }
0091 
0092 static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
0093                   unsigned long p2, unsigned long p3,
0094                   unsigned long p4)
0095 {
0096     long ret;
0097 
0098     if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
0099         return tdx_kvm_hypercall(nr, p1, p2, p3, p4);
0100 
0101     asm volatile(KVM_HYPERCALL
0102              : "=a"(ret)
0103              : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
0104              : "memory");
0105     return ret;
0106 }
0107 
0108 static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1,
0109                       unsigned long p2, unsigned long p3)
0110 {
0111     long ret;
0112 
0113     asm volatile("vmmcall"
0114              : "=a"(ret)
0115              : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
0116              : "memory");
0117     return ret;
0118 }
0119 
0120 #ifdef CONFIG_KVM_GUEST
0121 void kvmclock_init(void);
0122 void kvmclock_disable(void);
0123 bool kvm_para_available(void);
0124 unsigned int kvm_arch_para_features(void);
0125 unsigned int kvm_arch_para_hints(void);
0126 void kvm_async_pf_task_wait_schedule(u32 token);
0127 void kvm_async_pf_task_wake(u32 token);
0128 u32 kvm_read_and_reset_apf_flags(void);
0129 bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
0130 
0131 DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
0132 
0133 static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
0134 {
0135     if (static_branch_unlikely(&kvm_async_pf_enabled))
0136         return __kvm_handle_async_pf(regs, token);
0137     else
0138         return false;
0139 }
0140 
0141 #ifdef CONFIG_PARAVIRT_SPINLOCKS
0142 void __init kvm_spinlock_init(void);
0143 #else /* !CONFIG_PARAVIRT_SPINLOCKS */
0144 static inline void kvm_spinlock_init(void)
0145 {
0146 }
0147 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
0148 
0149 #else /* CONFIG_KVM_GUEST */
0150 #define kvm_async_pf_task_wait_schedule(T) do {} while(0)
0151 #define kvm_async_pf_task_wake(T) do {} while(0)
0152 
0153 static inline bool kvm_para_available(void)
0154 {
0155     return false;
0156 }
0157 
0158 static inline unsigned int kvm_arch_para_features(void)
0159 {
0160     return 0;
0161 }
0162 
0163 static inline unsigned int kvm_arch_para_hints(void)
0164 {
0165     return 0;
0166 }
0167 
0168 static inline u32 kvm_read_and_reset_apf_flags(void)
0169 {
0170     return 0;
0171 }
0172 
0173 static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
0174 {
0175     return false;
0176 }
0177 #endif
0178 
0179 #endif /* _ASM_X86_KVM_PARA_H */