0001
0002 #ifndef ARCH_X86_KVM_CPUID_H
0003 #define ARCH_X86_KVM_CPUID_H
0004
0005 #include "x86.h"
0006 #include "reverse_cpuid.h"
0007 #include <asm/cpu.h>
0008 #include <asm/processor.h>
0009 #include <uapi/asm/kvm_para.h>
0010
0011 extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
0012 void kvm_set_cpu_caps(void);
0013
0014 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
0015 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
0016 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
0017 u32 function, u32 index);
0018 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
0019 u32 function);
0020 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
0021 struct kvm_cpuid_entry2 __user *entries,
0022 unsigned int type);
0023 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
0024 struct kvm_cpuid *cpuid,
0025 struct kvm_cpuid_entry __user *entries);
0026 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
0027 struct kvm_cpuid2 *cpuid,
0028 struct kvm_cpuid_entry2 __user *entries);
0029 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
0030 struct kvm_cpuid2 *cpuid,
0031 struct kvm_cpuid_entry2 __user *entries);
0032 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
0033 u32 *ecx, u32 *edx, bool exact_only);
0034
0035 u32 xstate_required_size(u64 xstate_bv, bool compacted);
0036
0037 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
0038 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
0039
0040 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
0041 {
0042 return vcpu->arch.maxphyaddr;
0043 }
0044
0045 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
0046 {
0047 return !(gpa & vcpu->arch.reserved_gpa_bits);
0048 }
0049
0050 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
0051 {
0052 return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
0053 }
0054
0055 static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
0056 gpa_t gpa, gpa_t alignment)
0057 {
0058 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
0059 }
0060
0061 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
0062 {
0063 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
0064 }
0065
0066 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
0067 unsigned int leaf)
0068 {
0069 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
0070
0071 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
0072 *reg = kvm_cpu_caps[leaf];
0073 }
0074
0075 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
0076 unsigned int x86_feature)
0077 {
0078 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
0079 struct kvm_cpuid_entry2 *entry;
0080
0081 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
0082 if (!entry)
0083 return NULL;
0084
0085 return __cpuid_entry_get_reg(entry, cpuid.reg);
0086 }
0087
0088 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
0089 unsigned int x86_feature)
0090 {
0091 u32 *reg;
0092
0093 reg = guest_cpuid_get_register(vcpu, x86_feature);
0094 if (!reg)
0095 return false;
0096
0097 return *reg & __feature_bit(x86_feature);
0098 }
0099
0100 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
0101 unsigned int x86_feature)
0102 {
0103 u32 *reg;
0104
0105 reg = guest_cpuid_get_register(vcpu, x86_feature);
0106 if (reg)
0107 *reg &= ~__feature_bit(x86_feature);
0108 }
0109
0110 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
0111 {
0112 struct kvm_cpuid_entry2 *best;
0113
0114 best = kvm_find_cpuid_entry(vcpu, 0);
0115 return best &&
0116 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
0117 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
0118 }
0119
0120 static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
0121 {
0122 struct kvm_cpuid_entry2 *best;
0123
0124 best = kvm_find_cpuid_entry(vcpu, 0);
0125 return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
0126 }
0127
0128 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
0129 {
0130 struct kvm_cpuid_entry2 *best;
0131
0132 best = kvm_find_cpuid_entry(vcpu, 0x1);
0133 if (!best)
0134 return -1;
0135
0136 return x86_family(best->eax);
0137 }
0138
0139 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
0140 {
0141 struct kvm_cpuid_entry2 *best;
0142
0143 best = kvm_find_cpuid_entry(vcpu, 0x1);
0144 if (!best)
0145 return -1;
0146
0147 return x86_model(best->eax);
0148 }
0149
0150 static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
0151 {
0152 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
0153 }
0154
0155 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
0156 {
0157 struct kvm_cpuid_entry2 *best;
0158
0159 best = kvm_find_cpuid_entry(vcpu, 0x1);
0160 if (!best)
0161 return -1;
0162
0163 return x86_stepping(best->eax);
0164 }
0165
0166 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
0167 {
0168 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
0169 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
0170 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
0171 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
0172 }
0173
0174 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
0175 {
0176 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
0177 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
0178 }
0179
0180 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
0181 {
0182 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
0183 }
0184
0185 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
0186 {
0187 return vcpu->arch.msr_misc_features_enables &
0188 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
0189 }
0190
0191 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
0192 {
0193 unsigned int x86_leaf = __feature_leaf(x86_feature);
0194
0195 reverse_cpuid_check(x86_leaf);
0196 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
0197 }
0198
0199 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
0200 {
0201 unsigned int x86_leaf = __feature_leaf(x86_feature);
0202
0203 reverse_cpuid_check(x86_leaf);
0204 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
0205 }
0206
0207 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
0208 {
0209 unsigned int x86_leaf = __feature_leaf(x86_feature);
0210
0211 reverse_cpuid_check(x86_leaf);
0212 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
0213 }
0214
0215 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
0216 {
0217 return !!kvm_cpu_cap_get(x86_feature);
0218 }
0219
0220 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
0221 {
0222 if (boot_cpu_has(x86_feature))
0223 kvm_cpu_cap_set(x86_feature);
0224 }
0225
0226 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
0227 unsigned int kvm_feature)
0228 {
0229 if (!vcpu->arch.pv_cpuid.enforce)
0230 return true;
0231
0232 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
0233 }
0234
0235 #endif