0001
0002 #ifndef __KVM_X86_VMX_NESTED_H
0003 #define __KVM_X86_VMX_NESTED_H
0004
0005 #include "kvm_cache_regs.h"
0006 #include "vmcs12.h"
0007 #include "vmx.h"
0008
0009
0010
0011
0012 enum nvmx_vmentry_status {
0013 NVMX_VMENTRY_SUCCESS,
0014 NVMX_VMENTRY_VMFAIL,
0015 NVMX_VMENTRY_VMEXIT,
0016 NVMX_VMENTRY_KVM_INTERNAL_ERROR,
0017 };
0018
0019 void vmx_leave_nested(struct kvm_vcpu *vcpu);
0020 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
0021 void nested_vmx_hardware_unsetup(void);
0022 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
0023 void nested_vmx_set_vmcs_shadowing_bitmap(void);
0024 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
0025 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
0026 bool from_vmentry);
0027 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu);
0028 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
0029 u32 exit_intr_info, unsigned long exit_qualification);
0030 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
0031 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
0032 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
0033 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
0034 u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
0035 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
0036 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
0037 int size);
0038
0039 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
0040 {
0041 return to_vmx(vcpu)->nested.cached_vmcs12;
0042 }
0043
0044 static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
0045 {
0046 return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
0047 }
0048
0049
0050
0051
0052
0053
0054 static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
0055 {
0056 struct vcpu_vmx *vmx = to_vmx(vcpu);
0057
0058
0059 return vmx->nested.current_vmptr != -1ull ||
0060 vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID;
0061 }
0062
0063 static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
0064 {
0065 struct vcpu_vmx *vmx = to_vmx(vcpu);
0066
0067 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
0068 }
0069
0070 static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
0071 {
0072
0073 return get_vmcs12(vcpu)->ept_pointer;
0074 }
0075
0076 static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
0077 {
0078 return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
0079 }
0080
0081
0082
0083
0084
0085
0086 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
0087 {
0088 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
0089 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
0090 }
0091 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
0092 {
0093 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
0094 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
0095 }
0096
0097 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
0098 {
0099 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
0100 }
0101
0102
0103
0104
0105
0106
0107 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
0108 {
0109 return to_vmx(vcpu)->nested.msrs.misc_low &
0110 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
0111 }
0112
0113 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
0114 {
0115 return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
0116 }
0117
0118 static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
0119 {
0120 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
0121 CPU_BASED_MONITOR_TRAP_FLAG;
0122 }
0123
0124 static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
0125 {
0126 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
0127 SECONDARY_EXEC_SHADOW_VMCS;
0128 }
0129
0130 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
0131 {
0132 return vmcs12->cpu_based_vm_exec_control & bit;
0133 }
0134
0135 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
0136 {
0137 return (vmcs12->cpu_based_vm_exec_control &
0138 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
0139 (vmcs12->secondary_vm_exec_control & bit);
0140 }
0141
0142 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
0143 {
0144 return vmcs12->pin_based_vm_exec_control &
0145 PIN_BASED_VMX_PREEMPTION_TIMER;
0146 }
0147
0148 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
0149 {
0150 return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
0151 }
0152
0153 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
0154 {
0155 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
0156 }
0157
0158 static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
0159 {
0160 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
0161 }
0162
0163 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
0164 {
0165 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
0166 }
0167
0168 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
0169 {
0170 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
0171 }
0172
0173 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
0174 {
0175 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
0176 }
0177
0178 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
0179 {
0180 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
0181 }
0182
0183 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
0184 {
0185 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
0186 }
0187
0188 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
0189 {
0190 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
0191 }
0192
0193 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
0194 {
0195 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
0196 }
0197
0198 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
0199 {
0200 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
0201 }
0202
0203 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
0204 {
0205 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
0206 }
0207
0208 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
0209 {
0210 return nested_cpu_has_vmfunc(vmcs12) &&
0211 (vmcs12->vm_function_control &
0212 VMX_VMFUNC_EPTP_SWITCHING);
0213 }
0214
0215 static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
0216 {
0217 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
0218 }
0219
0220 static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
0221 {
0222 return vmcs12->vm_exit_controls &
0223 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
0224 }
0225
0226 static inline bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
0227 {
0228 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
0229 }
0230
0231
0232
0233
0234
0235 static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
0236 {
0237 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
0238 PIN_BASED_EXT_INTR_MASK;
0239 }
0240
0241 static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
0242 {
0243 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING);
0244 }
0245
0246
0247
0248
0249
0250 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
0251 {
0252 return ((val & fixed1) | fixed0) == val;
0253 }
0254
0255 static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
0256 {
0257 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
0258 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
0259 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
0260
0261 if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
0262 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
0263 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
0264 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
0265
0266 return fixed_bits_valid(val, fixed0, fixed1);
0267 }
0268
0269 static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
0270 {
0271 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
0272 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
0273
0274 return fixed_bits_valid(val, fixed0, fixed1);
0275 }
0276
0277 static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
0278 {
0279 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
0280 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
0281
0282 return fixed_bits_valid(val, fixed0, fixed1) &&
0283 __kvm_is_valid_cr4(vcpu, val);
0284 }
0285
0286
0287 #define nested_guest_cr4_valid nested_cr4_valid
0288 #define nested_host_cr4_valid nested_cr4_valid
0289
0290 extern struct kvm_x86_nested_ops vmx_nested_ops;
0291
0292 #endif