0001
0002 #ifndef __KVM_X86_VMX_H
0003 #define __KVM_X86_VMX_H
0004
0005 #include <linux/kvm_host.h>
0006
0007 #include <asm/kvm.h>
0008 #include <asm/intel_pt.h>
0009 #include <asm/perf_event.h>
0010
0011 #include "capabilities.h"
0012 #include "../kvm_cache_regs.h"
0013 #include "posted_intr.h"
0014 #include "vmcs.h"
0015 #include "vmx_ops.h"
0016 #include "../cpuid.h"
0017 #include "run_flags.h"
0018
0019 #define MSR_TYPE_R 1
0020 #define MSR_TYPE_W 2
0021 #define MSR_TYPE_RW 3
0022
0023 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
0024
0025 #ifdef CONFIG_X86_64
0026 #define MAX_NR_USER_RETURN_MSRS 7
0027 #else
0028 #define MAX_NR_USER_RETURN_MSRS 4
0029 #endif
0030
0031 #define MAX_NR_LOADSTORE_MSRS 8
0032
0033 struct vmx_msrs {
0034 unsigned int nr;
0035 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
0036 };
0037
0038 struct vmx_uret_msr {
0039 bool load_into_hardware;
0040 u64 data;
0041 u64 mask;
0042 };
0043
0044 enum segment_cache_field {
0045 SEG_FIELD_SEL = 0,
0046 SEG_FIELD_BASE = 1,
0047 SEG_FIELD_LIMIT = 2,
0048 SEG_FIELD_AR = 3,
0049
0050 SEG_FIELD_NR = 4
0051 };
0052
0053 #define RTIT_ADDR_RANGE 4
0054
0055 struct pt_ctx {
0056 u64 ctl;
0057 u64 status;
0058 u64 output_base;
0059 u64 output_mask;
0060 u64 cr3_match;
0061 u64 addr_a[RTIT_ADDR_RANGE];
0062 u64 addr_b[RTIT_ADDR_RANGE];
0063 };
0064
0065 struct pt_desc {
0066 u64 ctl_bitmask;
0067 u32 num_address_ranges;
0068 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
0069 struct pt_ctx host;
0070 struct pt_ctx guest;
0071 };
0072
0073 union vmx_exit_reason {
0074 struct {
0075 u32 basic : 16;
0076 u32 reserved16 : 1;
0077 u32 reserved17 : 1;
0078 u32 reserved18 : 1;
0079 u32 reserved19 : 1;
0080 u32 reserved20 : 1;
0081 u32 reserved21 : 1;
0082 u32 reserved22 : 1;
0083 u32 reserved23 : 1;
0084 u32 reserved24 : 1;
0085 u32 reserved25 : 1;
0086 u32 bus_lock_detected : 1;
0087 u32 enclave_mode : 1;
0088 u32 smi_pending_mtf : 1;
0089 u32 smi_from_vmx_root : 1;
0090 u32 reserved30 : 1;
0091 u32 failed_vmentry : 1;
0092 };
0093 u32 full;
0094 };
0095
0096 static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
0097 {
0098
0099
0100
0101
0102
0103
0104
0105 return pmu->version > 1;
0106 }
0107
0108 struct lbr_desc {
0109
0110 struct x86_pmu_lbr records;
0111
0112
0113
0114
0115
0116
0117
0118 struct perf_event *event;
0119
0120
0121 bool msr_passthrough;
0122 };
0123
0124
0125
0126
0127
0128 struct nested_vmx {
0129
0130 bool vmxon;
0131 gpa_t vmxon_ptr;
0132 bool pml_full;
0133
0134
0135 gpa_t current_vmptr;
0136
0137
0138
0139
0140
0141 struct vmcs12 *cached_vmcs12;
0142
0143
0144
0145
0146
0147 struct vmcs12 *cached_shadow_vmcs12;
0148
0149
0150
0151
0152 struct gfn_to_hva_cache shadow_vmcs12_cache;
0153
0154
0155
0156
0157 struct gfn_to_hva_cache vmcs12_cache;
0158
0159
0160
0161
0162
0163 bool need_vmcs12_to_shadow_sync;
0164 bool dirty_vmcs12;
0165
0166
0167
0168
0169
0170
0171
0172
0173 bool force_msr_bitmap_recalc;
0174
0175
0176
0177
0178
0179 bool need_sync_vmcs02_to_vmcs12_rare;
0180
0181
0182
0183
0184
0185
0186 bool vmcs02_initialized;
0187
0188 bool change_vmcs01_virtual_apic_mode;
0189 bool reload_vmcs01_apic_access_page;
0190 bool update_vmcs01_cpu_dirty_logging;
0191 bool update_vmcs01_apicv_status;
0192
0193
0194
0195
0196
0197
0198 bool enlightened_vmcs_enabled;
0199
0200
0201 bool nested_run_pending;
0202
0203
0204 bool mtf_pending;
0205
0206 struct loaded_vmcs vmcs02;
0207
0208
0209
0210
0211
0212 struct kvm_host_map apic_access_page_map;
0213 struct kvm_host_map virtual_apic_map;
0214 struct kvm_host_map pi_desc_map;
0215
0216 struct kvm_host_map msr_bitmap_map;
0217
0218 struct pi_desc *pi_desc;
0219 bool pi_pending;
0220 u16 posted_intr_nv;
0221
0222 struct hrtimer preemption_timer;
0223 u64 preemption_timer_deadline;
0224 bool has_preemption_timer_deadline;
0225 bool preemption_timer_expired;
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 u64 pre_vmenter_debugctl;
0238 u64 pre_vmenter_bndcfgs;
0239
0240
0241 int l1_tpr_threshold;
0242
0243 u16 vpid02;
0244 u16 last_vpid;
0245
0246 struct nested_vmx_msrs msrs;
0247
0248
0249 struct {
0250
0251 bool vmxon;
0252
0253 bool guest_mode;
0254 } smm;
0255
0256 gpa_t hv_evmcs_vmptr;
0257 struct kvm_host_map hv_evmcs_map;
0258 struct hv_enlightened_vmcs *hv_evmcs;
0259 };
0260
0261 struct vcpu_vmx {
0262 struct kvm_vcpu vcpu;
0263 u8 fail;
0264 u8 x2apic_msr_bitmap_mode;
0265
0266
0267
0268
0269
0270
0271
0272
0273 bool guest_state_loaded;
0274
0275 unsigned long exit_qualification;
0276 u32 exit_intr_info;
0277 u32 idt_vectoring_info;
0278 ulong rflags;
0279
0280
0281
0282
0283
0284
0285
0286 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
0287 bool guest_uret_msrs_loaded;
0288 #ifdef CONFIG_X86_64
0289 u64 msr_host_kernel_gs_base;
0290 u64 msr_guest_kernel_gs_base;
0291 #endif
0292
0293 u64 spec_ctrl;
0294 u32 msr_ia32_umwait_control;
0295
0296
0297
0298
0299
0300
0301 struct loaded_vmcs vmcs01;
0302 struct loaded_vmcs *loaded_vmcs;
0303
0304 struct msr_autoload {
0305 struct vmx_msrs guest;
0306 struct vmx_msrs host;
0307 } msr_autoload;
0308
0309 struct msr_autostore {
0310 struct vmx_msrs guest;
0311 } msr_autostore;
0312
0313 struct {
0314 int vm86_active;
0315 ulong save_rflags;
0316 struct kvm_segment segs[8];
0317 } rmode;
0318 struct {
0319 u32 bitmask;
0320 struct kvm_save_segment {
0321 u16 selector;
0322 unsigned long base;
0323 u32 limit;
0324 u32 ar;
0325 } seg[8];
0326 } segment_cache;
0327 int vpid;
0328 bool emulation_required;
0329
0330 union vmx_exit_reason exit_reason;
0331
0332
0333 struct pi_desc pi_desc;
0334
0335
0336 struct list_head pi_wakeup_list;
0337
0338
0339 struct nested_vmx nested;
0340
0341
0342 unsigned int ple_window;
0343 bool ple_window_dirty;
0344
0345 bool req_immediate_exit;
0346
0347
0348 #define PML_ENTITY_NUM 512
0349 struct page *pml_pg;
0350
0351
0352 u64 hv_deadline_tsc;
0353
0354 unsigned long host_debugctlmsr;
0355
0356
0357
0358
0359
0360
0361 u64 msr_ia32_feature_control;
0362 u64 msr_ia32_feature_control_valid_bits;
0363
0364 u64 msr_ia32_sgxlepubkeyhash[4];
0365 u64 msr_ia32_mcu_opt_ctrl;
0366 bool disable_fb_clear;
0367
0368 struct pt_desc pt_desc;
0369 struct lbr_desc lbr_desc;
0370
0371
0372 #define MAX_POSSIBLE_PASSTHROUGH_MSRS 15
0373 struct {
0374 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
0375 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
0376 } shadow_msr_intercept;
0377 };
0378
0379 struct kvm_vmx {
0380 struct kvm kvm;
0381
0382 unsigned int tss_addr;
0383 bool ept_identity_pagetable_done;
0384 gpa_t ept_identity_map_addr;
0385
0386 u64 *pid_table;
0387 };
0388
0389 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
0390 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
0391 struct loaded_vmcs *buddy);
0392 int allocate_vpid(void);
0393 void free_vpid(int vpid);
0394 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
0395 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
0396 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
0397 unsigned long fs_base, unsigned long gs_base);
0398 int vmx_get_cpl(struct kvm_vcpu *vcpu);
0399 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
0400 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
0401 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
0402 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
0403 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
0404 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
0405 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
0406 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
0407 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
0408 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
0409 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
0410 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
0411 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
0412
0413 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
0414 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
0415 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
0416 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
0417 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
0418 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
0419 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
0420 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
0421 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
0422 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
0423 void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
0424 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
0425 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
0426 unsigned int flags);
0427 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
0428 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
0429
0430 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
0431 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
0432
0433 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
0434 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
0435
0436 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
0437 int type, bool value)
0438 {
0439 if (value)
0440 vmx_enable_intercept_for_msr(vcpu, msr, type);
0441 else
0442 vmx_disable_intercept_for_msr(vcpu, msr, type);
0443 }
0444
0445 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
0456 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
0457 u32 msr) \
0458 { \
0459 int f = sizeof(unsigned long); \
0460 \
0461 if (msr <= 0x1fff) \
0462 return bitop##_bit(msr, bitmap + base / f); \
0463 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
0464 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
0465 return (rtype)true; \
0466 }
0467 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
0468 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
0469 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
0470
0471 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
0472 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
0473 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
0474
0475 static inline u8 vmx_get_rvi(void)
0476 {
0477 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
0478 }
0479
0480 #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
0481 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
0482 { \
0483 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
0484 vmcs_write##bits(uname, val); \
0485 vmx->loaded_vmcs->controls_shadow.lname = val; \
0486 } \
0487 } \
0488 static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
0489 { \
0490 return vmcs->controls_shadow.lname; \
0491 } \
0492 static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
0493 { \
0494 return __##lname##_controls_get(vmx->loaded_vmcs); \
0495 } \
0496 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
0497 { \
0498 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
0499 } \
0500 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
0501 { \
0502 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
0503 }
0504 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
0505 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
0506 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
0507 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
0508 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
0509 BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
0510
0511
0512
0513
0514
0515
0516 #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
0517 (1 << VCPU_REGS_RSP) | \
0518 (1 << VCPU_EXREG_RFLAGS) | \
0519 (1 << VCPU_EXREG_PDPTR) | \
0520 (1 << VCPU_EXREG_SEGMENTS) | \
0521 (1 << VCPU_EXREG_CR0) | \
0522 (1 << VCPU_EXREG_CR3) | \
0523 (1 << VCPU_EXREG_CR4) | \
0524 (1 << VCPU_EXREG_EXIT_INFO_1) | \
0525 (1 << VCPU_EXREG_EXIT_INFO_2))
0526
0527 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
0528 {
0529 return container_of(kvm, struct kvm_vmx, kvm);
0530 }
0531
0532 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
0533 {
0534 return container_of(vcpu, struct vcpu_vmx, vcpu);
0535 }
0536
0537 static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
0538 {
0539 return &to_vmx(vcpu)->lbr_desc;
0540 }
0541
0542 static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
0543 {
0544 return &vcpu_to_lbr_desc(vcpu)->records;
0545 }
0546
0547 static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
0548 {
0549 return !!vcpu_to_lbr_records(vcpu)->nr;
0550 }
0551
0552 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
0553 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
0554 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
0555
0556 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
0557 {
0558 struct vcpu_vmx *vmx = to_vmx(vcpu);
0559
0560 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
0561 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
0562 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
0563 }
0564 return vmx->exit_qualification;
0565 }
0566
0567 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
0568 {
0569 struct vcpu_vmx *vmx = to_vmx(vcpu);
0570
0571 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
0572 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
0573 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
0574 }
0575 return vmx->exit_intr_info;
0576 }
0577
0578 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
0579 void free_vmcs(struct vmcs *vmcs);
0580 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
0581 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
0582 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
0583
0584 static inline struct vmcs *alloc_vmcs(bool shadow)
0585 {
0586 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
0587 GFP_KERNEL_ACCOUNT);
0588 }
0589
0590 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
0591 {
0592 return secondary_exec_controls_get(vmx) &
0593 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
0594 }
0595
0596 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
0597 {
0598 if (!enable_ept)
0599 return true;
0600
0601 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
0602 }
0603
0604 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
0605 {
0606 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
0607 (secondary_exec_controls_get(to_vmx(vcpu)) &
0608 SECONDARY_EXEC_UNRESTRICTED_GUEST));
0609 }
0610
0611 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
0612 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
0613 {
0614 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
0615 }
0616
0617 void dump_vmcs(struct kvm_vcpu *vcpu);
0618
0619 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
0620 {
0621 return (vmx_instr_info >> 28) & 0xf;
0622 }
0623
0624 static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
0625 {
0626 return lapic_in_kernel(vcpu) && enable_ipiv;
0627 }
0628
0629 #endif