Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef ASM_KVM_CACHE_REGS_H
0003 #define ASM_KVM_CACHE_REGS_H
0004 
0005 #include <linux/kvm_host.h>
0006 
0007 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
0008 #define KVM_POSSIBLE_CR4_GUEST_BITS               \
0009     (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
0010      | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
0011 
0012 #define X86_CR0_PDPTR_BITS    (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
0013 #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
0014 #define X86_CR4_PDPTR_BITS    (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
0015 
0016 static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
0017 
0018 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                     \
0019 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
0020 {                                         \
0021     return vcpu->arch.regs[VCPU_REGS_##uname];                \
0022 }                                         \
0023 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,        \
0024                         unsigned long val)        \
0025 {                                         \
0026     vcpu->arch.regs[VCPU_REGS_##uname] = val;                 \
0027 }
0028 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
0029 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
0030 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
0031 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
0032 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
0033 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
0034 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
0035 #ifdef CONFIG_X86_64
0036 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
0037 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
0038 BUILD_KVM_GPR_ACCESSORS(r10, R10)
0039 BUILD_KVM_GPR_ACCESSORS(r11, R11)
0040 BUILD_KVM_GPR_ACCESSORS(r12, R12)
0041 BUILD_KVM_GPR_ACCESSORS(r13, R13)
0042 BUILD_KVM_GPR_ACCESSORS(r14, R14)
0043 BUILD_KVM_GPR_ACCESSORS(r15, R15)
0044 #endif
0045 
0046 /*
0047  * avail  dirty
0048  * 0      0   register in VMCS/VMCB
0049  * 0      1   *INVALID*
0050  * 1      0   register in vcpu->arch
0051  * 1      1   register in vcpu->arch, needs to be stored back
0052  */
0053 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
0054                          enum kvm_reg reg)
0055 {
0056     return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
0057 }
0058 
0059 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
0060                      enum kvm_reg reg)
0061 {
0062     return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
0063 }
0064 
0065 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
0066                            enum kvm_reg reg)
0067 {
0068     __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
0069 }
0070 
0071 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
0072                        enum kvm_reg reg)
0073 {
0074     __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
0075     __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
0076 }
0077 
0078 /*
0079  * The "raw" register helpers are only for cases where the full 64 bits of a
0080  * register are read/written irrespective of current vCPU mode.  In other words,
0081  * odds are good you shouldn't be using the raw variants.
0082  */
0083 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
0084 {
0085     if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
0086         return 0;
0087 
0088     if (!kvm_register_is_available(vcpu, reg))
0089         static_call(kvm_x86_cache_reg)(vcpu, reg);
0090 
0091     return vcpu->arch.regs[reg];
0092 }
0093 
0094 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
0095                       unsigned long val)
0096 {
0097     if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
0098         return;
0099 
0100     vcpu->arch.regs[reg] = val;
0101     kvm_register_mark_dirty(vcpu, reg);
0102 }
0103 
0104 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
0105 {
0106     return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
0107 }
0108 
0109 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
0110 {
0111     kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
0112 }
0113 
0114 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
0115 {
0116     return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
0117 }
0118 
0119 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
0120 {
0121     kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
0122 }
0123 
0124 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
0125 {
0126     might_sleep();  /* on svm */
0127 
0128     if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
0129         static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
0130 
0131     return vcpu->arch.walk_mmu->pdptrs[index];
0132 }
0133 
0134 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
0135 {
0136     vcpu->arch.walk_mmu->pdptrs[index] = value;
0137 }
0138 
0139 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
0140 {
0141     ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
0142     if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
0143         !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
0144         static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
0145     return vcpu->arch.cr0 & mask;
0146 }
0147 
0148 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
0149 {
0150     return kvm_read_cr0_bits(vcpu, ~0UL);
0151 }
0152 
0153 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
0154 {
0155     ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
0156     if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
0157         !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
0158         static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
0159     return vcpu->arch.cr4 & mask;
0160 }
0161 
0162 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
0163 {
0164     if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
0165         static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
0166     return vcpu->arch.cr3;
0167 }
0168 
0169 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
0170 {
0171     return kvm_read_cr4_bits(vcpu, ~0UL);
0172 }
0173 
0174 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
0175 {
0176     return (kvm_rax_read(vcpu) & -1u)
0177         | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
0178 }
0179 
0180 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
0181 {
0182     vcpu->arch.hflags |= HF_GUEST_MASK;
0183     vcpu->stat.guest_mode = 1;
0184 }
0185 
0186 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
0187 {
0188     vcpu->arch.hflags &= ~HF_GUEST_MASK;
0189 
0190     if (vcpu->arch.load_eoi_exitmap_pending) {
0191         vcpu->arch.load_eoi_exitmap_pending = false;
0192         kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
0193     }
0194 
0195     vcpu->stat.guest_mode = 0;
0196 }
0197 
0198 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
0199 {
0200     return vcpu->arch.hflags & HF_GUEST_MASK;
0201 }
0202 
0203 static inline bool is_smm(struct kvm_vcpu *vcpu)
0204 {
0205     return vcpu->arch.hflags & HF_SMM_MASK;
0206 }
0207 
0208 #endif