0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #define pr_fmt(fmt) "SVM: " fmt
0016
0017 #include <linux/kvm_types.h>
0018 #include <linux/kvm_host.h>
0019 #include <linux/kernel.h>
0020
0021 #include <asm/msr-index.h>
0022 #include <asm/debugreg.h>
0023
0024 #include "kvm_emulate.h"
0025 #include "trace.h"
0026 #include "mmu.h"
0027 #include "x86.h"
0028 #include "cpuid.h"
0029 #include "lapic.h"
0030 #include "svm.h"
0031 #include "hyperv.h"
0032
0033 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
0034
0035 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
0036 struct x86_exception *fault)
0037 {
0038 struct vcpu_svm *svm = to_svm(vcpu);
0039 struct vmcb *vmcb = svm->vmcb;
0040
0041 if (vmcb->control.exit_code != SVM_EXIT_NPF) {
0042
0043
0044
0045
0046 vmcb->control.exit_code = SVM_EXIT_NPF;
0047 vmcb->control.exit_code_hi = 0;
0048 vmcb->control.exit_info_1 = (1ULL << 32);
0049 vmcb->control.exit_info_2 = fault->address;
0050 }
0051
0052 vmcb->control.exit_info_1 &= ~0xffffffffULL;
0053 vmcb->control.exit_info_1 |= fault->error_code;
0054
0055 nested_svm_vmexit(svm);
0056 }
0057
0058 static bool nested_svm_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
0059 struct x86_exception *fault)
0060 {
0061 struct vcpu_svm *svm = to_svm(vcpu);
0062 struct vmcb *vmcb = svm->vmcb;
0063
0064 WARN_ON(!is_guest_mode(vcpu));
0065
0066 if (vmcb12_is_intercept(&svm->nested.ctl,
0067 INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
0068 !WARN_ON_ONCE(svm->nested.nested_run_pending)) {
0069 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
0070 vmcb->control.exit_code_hi = 0;
0071 vmcb->control.exit_info_1 = fault->error_code;
0072 vmcb->control.exit_info_2 = fault->address;
0073 nested_svm_vmexit(svm);
0074 return true;
0075 }
0076
0077 return false;
0078 }
0079
0080 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
0081 {
0082 struct vcpu_svm *svm = to_svm(vcpu);
0083 u64 cr3 = svm->nested.ctl.nested_cr3;
0084 u64 pdpte;
0085 int ret;
0086
0087 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
0088 offset_in_page(cr3) + index * 8, 8);
0089 if (ret)
0090 return 0;
0091 return pdpte;
0092 }
0093
0094 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
0095 {
0096 struct vcpu_svm *svm = to_svm(vcpu);
0097
0098 return svm->nested.ctl.nested_cr3;
0099 }
0100
0101 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
0102 {
0103 struct vcpu_svm *svm = to_svm(vcpu);
0104
0105 WARN_ON(mmu_is_nested(vcpu));
0106
0107 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
0108
0109
0110
0111
0112
0113
0114 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
0115 svm->vmcb01.ptr->save.efer,
0116 svm->nested.ctl.nested_cr3);
0117 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
0118 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
0119 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
0120 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
0121 }
0122
0123 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
0124 {
0125 vcpu->arch.mmu = &vcpu->arch.root_mmu;
0126 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
0127 }
0128
0129 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
0130 {
0131 if (!svm->v_vmload_vmsave_enabled)
0132 return true;
0133
0134 if (!nested_npt_enabled(svm))
0135 return true;
0136
0137 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
0138 return true;
0139
0140 return false;
0141 }
0142
0143 void recalc_intercepts(struct vcpu_svm *svm)
0144 {
0145 struct vmcb_control_area *c, *h;
0146 struct vmcb_ctrl_area_cached *g;
0147 unsigned int i;
0148
0149 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
0150
0151 if (!is_guest_mode(&svm->vcpu))
0152 return;
0153
0154 c = &svm->vmcb->control;
0155 h = &svm->vmcb01.ptr->control;
0156 g = &svm->nested.ctl;
0157
0158 for (i = 0; i < MAX_INTERCEPT; i++)
0159 c->intercepts[i] = h->intercepts[i];
0160
0161 if (g->int_ctl & V_INTR_MASKING_MASK) {
0162
0163 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
0164 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
0165
0166
0167
0168
0169
0170
0171 vmcb_clr_intercept(c, INTERCEPT_VINTR);
0172 }
0173
0174
0175 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
0176
0177 for (i = 0; i < MAX_INTERCEPT; i++)
0178 c->intercepts[i] |= g->intercepts[i];
0179
0180
0181 if (!intercept_smi)
0182 vmcb_clr_intercept(c, INTERCEPT_SMI);
0183
0184 if (nested_vmcb_needs_vls_intercept(svm)) {
0185
0186
0187
0188
0189
0190 vmcb_set_intercept(c, INTERCEPT_VMLOAD);
0191 vmcb_set_intercept(c, INTERCEPT_VMSAVE);
0192 } else {
0193 WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
0194 }
0195 }
0196
0197
0198
0199
0200
0201
0202 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
0203 {
0204 struct hv_enlightenments *hve =
0205 (struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
0206 int i;
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 if (!svm->nested.force_msr_bitmap_recalc &&
0217 kvm_hv_hypercall_enabled(&svm->vcpu) &&
0218 hve->hv_enlightenments_control.msr_bitmap &&
0219 (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS)))
0220 goto set_msrpm_base_pa;
0221
0222 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
0223 return true;
0224
0225 for (i = 0; i < MSRPM_OFFSETS; i++) {
0226 u32 value, p;
0227 u64 offset;
0228
0229 if (msrpm_offsets[i] == 0xffffffff)
0230 break;
0231
0232 p = msrpm_offsets[i];
0233
0234
0235 if (is_x2apic_msrpm_offset(p))
0236 continue;
0237
0238 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
0239
0240 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
0241 return false;
0242
0243 svm->nested.msrpm[p] = svm->msrpm[p] | value;
0244 }
0245
0246 svm->nested.force_msr_bitmap_recalc = false;
0247
0248 set_msrpm_base_pa:
0249 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
0250
0251 return true;
0252 }
0253
0254
0255
0256
0257 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
0258 {
0259 u64 addr = PAGE_ALIGN(pa);
0260
0261 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
0262 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
0263 }
0264
0265 static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
0266 {
0267
0268 switch(tlb_ctl) {
0269 case TLB_CONTROL_DO_NOTHING:
0270 case TLB_CONTROL_FLUSH_ALL_ASID:
0271 return true;
0272 default:
0273 return false;
0274 }
0275 }
0276
0277 static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
0278 struct vmcb_ctrl_area_cached *control)
0279 {
0280 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
0281 return false;
0282
0283 if (CC(control->asid == 0))
0284 return false;
0285
0286 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
0287 return false;
0288
0289 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
0290 MSRPM_SIZE)))
0291 return false;
0292 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
0293 IOPM_SIZE)))
0294 return false;
0295
0296 if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
0297 return false;
0298
0299 return true;
0300 }
0301
0302
0303 static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
0304 struct vmcb_save_area_cached *save)
0305 {
0306 if (CC(!(save->efer & EFER_SVME)))
0307 return false;
0308
0309 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
0310 CC(save->cr0 & ~0xffffffffULL))
0311 return false;
0312
0313 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
0314 return false;
0315
0316
0317
0318
0319
0320
0321 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
0322 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
0323 CC(!(save->cr0 & X86_CR0_PE)) ||
0324 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
0325 return false;
0326 }
0327
0328
0329 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
0330 return false;
0331
0332 if (CC(!kvm_valid_efer(vcpu, save->efer)))
0333 return false;
0334
0335 return true;
0336 }
0337
0338 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
0339 {
0340 struct vcpu_svm *svm = to_svm(vcpu);
0341 struct vmcb_save_area_cached *save = &svm->nested.save;
0342
0343 return __nested_vmcb_check_save(vcpu, save);
0344 }
0345
0346 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
0347 {
0348 struct vcpu_svm *svm = to_svm(vcpu);
0349 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
0350
0351 return __nested_vmcb_check_controls(vcpu, ctl);
0352 }
0353
0354 static
0355 void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
0356 struct vmcb_ctrl_area_cached *to,
0357 struct vmcb_control_area *from)
0358 {
0359 unsigned int i;
0360
0361 for (i = 0; i < MAX_INTERCEPT; i++)
0362 to->intercepts[i] = from->intercepts[i];
0363
0364 to->iopm_base_pa = from->iopm_base_pa;
0365 to->msrpm_base_pa = from->msrpm_base_pa;
0366 to->tsc_offset = from->tsc_offset;
0367 to->tlb_ctl = from->tlb_ctl;
0368 to->int_ctl = from->int_ctl;
0369 to->int_vector = from->int_vector;
0370 to->int_state = from->int_state;
0371 to->exit_code = from->exit_code;
0372 to->exit_code_hi = from->exit_code_hi;
0373 to->exit_info_1 = from->exit_info_1;
0374 to->exit_info_2 = from->exit_info_2;
0375 to->exit_int_info = from->exit_int_info;
0376 to->exit_int_info_err = from->exit_int_info_err;
0377 to->nested_ctl = from->nested_ctl;
0378 to->event_inj = from->event_inj;
0379 to->event_inj_err = from->event_inj_err;
0380 to->next_rip = from->next_rip;
0381 to->nested_cr3 = from->nested_cr3;
0382 to->virt_ext = from->virt_ext;
0383 to->pause_filter_count = from->pause_filter_count;
0384 to->pause_filter_thresh = from->pause_filter_thresh;
0385
0386
0387 to->asid = from->asid;
0388 to->msrpm_base_pa &= ~0x0fffULL;
0389 to->iopm_base_pa &= ~0x0fffULL;
0390
0391
0392 if (kvm_hv_hypercall_enabled(vcpu)) {
0393 to->clean = from->clean;
0394 memcpy(to->reserved_sw, from->reserved_sw,
0395 sizeof(struct hv_enlightenments));
0396 }
0397 }
0398
0399 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
0400 struct vmcb_control_area *control)
0401 {
0402 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
0403 }
0404
0405 static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
0406 struct vmcb_save_area *from)
0407 {
0408
0409
0410
0411
0412 to->efer = from->efer;
0413 to->cr0 = from->cr0;
0414 to->cr3 = from->cr3;
0415 to->cr4 = from->cr4;
0416
0417 to->dr6 = from->dr6;
0418 to->dr7 = from->dr7;
0419 }
0420
0421 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
0422 struct vmcb_save_area *save)
0423 {
0424 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
0425 }
0426
0427
0428
0429
0430
0431 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
0432 {
0433 u32 mask;
0434 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
0435 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
0436
0437
0438 mask = V_IRQ_MASK | V_TPR_MASK;
0439 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
0440 svm_is_intercept(svm, INTERCEPT_VINTR)) {
0441
0442
0443
0444
0445
0446
0447
0448
0449 mask &= ~V_IRQ_MASK;
0450 }
0451
0452 if (nested_vgif_enabled(svm))
0453 mask |= V_GIF_MASK;
0454
0455 svm->nested.ctl.int_ctl &= ~mask;
0456 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
0457 }
0458
0459
0460
0461
0462
0463 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
0464 struct vmcb *vmcb12)
0465 {
0466 struct kvm_vcpu *vcpu = &svm->vcpu;
0467 u32 exit_int_info = 0;
0468 unsigned int nr;
0469
0470 if (vcpu->arch.exception.injected) {
0471 nr = vcpu->arch.exception.nr;
0472 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
0473
0474 if (vcpu->arch.exception.has_error_code) {
0475 exit_int_info |= SVM_EVTINJ_VALID_ERR;
0476 vmcb12->control.exit_int_info_err =
0477 vcpu->arch.exception.error_code;
0478 }
0479
0480 } else if (vcpu->arch.nmi_injected) {
0481 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
0482
0483 } else if (vcpu->arch.interrupt.injected) {
0484 nr = vcpu->arch.interrupt.nr;
0485 exit_int_info = nr | SVM_EVTINJ_VALID;
0486
0487 if (vcpu->arch.interrupt.soft)
0488 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
0489 else
0490 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
0491 }
0492
0493 vmcb12->control.exit_int_info = exit_int_info;
0494 }
0495
0496 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
0497 {
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
0512 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
0513 }
0514
0515
0516
0517
0518
0519 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
0520 bool nested_npt, bool reload_pdptrs)
0521 {
0522 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
0523 return -EINVAL;
0524
0525 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
0526 CC(!load_pdptrs(vcpu, cr3)))
0527 return -EINVAL;
0528
0529 vcpu->arch.cr3 = cr3;
0530
0531
0532 kvm_init_mmu(vcpu);
0533
0534 if (!nested_npt)
0535 kvm_mmu_new_pgd(vcpu, cr3);
0536
0537 return 0;
0538 }
0539
0540 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
0541 {
0542 if (!svm->nested.vmcb02.ptr)
0543 return;
0544
0545
0546 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
0547 }
0548
0549 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
0550 {
0551 bool new_vmcb12 = false;
0552 struct vmcb *vmcb01 = svm->vmcb01.ptr;
0553 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
0554
0555 nested_vmcb02_compute_g_pat(svm);
0556
0557
0558 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
0559 new_vmcb12 = true;
0560 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
0561 svm->nested.force_msr_bitmap_recalc = true;
0562 }
0563
0564 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
0565 vmcb02->save.es = vmcb12->save.es;
0566 vmcb02->save.cs = vmcb12->save.cs;
0567 vmcb02->save.ss = vmcb12->save.ss;
0568 vmcb02->save.ds = vmcb12->save.ds;
0569 vmcb02->save.cpl = vmcb12->save.cpl;
0570 vmcb_mark_dirty(vmcb02, VMCB_SEG);
0571 }
0572
0573 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
0574 vmcb02->save.gdtr = vmcb12->save.gdtr;
0575 vmcb02->save.idtr = vmcb12->save.idtr;
0576 vmcb_mark_dirty(vmcb02, VMCB_DT);
0577 }
0578
0579 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
0580
0581 svm_set_efer(&svm->vcpu, svm->nested.save.efer);
0582
0583 svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
0584 svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
0585
0586 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
0587
0588 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
0589 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
0590 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
0591
0592
0593 vmcb02->save.rax = vmcb12->save.rax;
0594 vmcb02->save.rsp = vmcb12->save.rsp;
0595 vmcb02->save.rip = vmcb12->save.rip;
0596
0597
0598 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
0599 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
0600 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
0601 vmcb_mark_dirty(vmcb02, VMCB_DR);
0602 }
0603
0604 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
0605
0606
0607
0608
0609 svm_copy_lbrs(vmcb02, vmcb12);
0610 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
0611 svm_update_lbrv(&svm->vcpu);
0612
0613 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
0614 svm_copy_lbrs(vmcb02, vmcb01);
0615 }
0616 }
0617
0618 static inline bool is_evtinj_soft(u32 evtinj)
0619 {
0620 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
0621 u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
0622
0623 if (!(evtinj & SVM_EVTINJ_VALID))
0624 return false;
0625
0626 if (type == SVM_EVTINJ_TYPE_SOFT)
0627 return true;
0628
0629 return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
0630 }
0631
0632 static bool is_evtinj_nmi(u32 evtinj)
0633 {
0634 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
0635
0636 if (!(evtinj & SVM_EVTINJ_VALID))
0637 return false;
0638
0639 return type == SVM_EVTINJ_TYPE_NMI;
0640 }
0641
0642 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
0643 unsigned long vmcb12_rip,
0644 unsigned long vmcb12_csbase)
0645 {
0646 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
0647 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
0648
0649 struct kvm_vcpu *vcpu = &svm->vcpu;
0650 struct vmcb *vmcb01 = svm->vmcb01.ptr;
0651 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
0652 u32 pause_count12;
0653 u32 pause_thresh12;
0654
0655
0656
0657
0658
0659
0660 if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
0661 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
0662 else
0663 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
0664
0665
0666 vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
0667 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
0668 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
0669
0670
0671
0672
0673 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
0674
0675
0676 if (nested_npt_enabled(svm))
0677 nested_svm_init_mmu_context(vcpu);
0678
0679 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
0680 vcpu->arch.l1_tsc_offset,
0681 svm->nested.ctl.tsc_offset,
0682 svm->tsc_ratio_msr);
0683
0684 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
0685
0686 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
0687 WARN_ON(!svm->tsc_scaling_enabled);
0688 nested_svm_update_tsc_ratio_msr(vcpu);
0689 }
0690
0691 vmcb02->control.int_ctl =
0692 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
0693 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
0694
0695 vmcb02->control.int_vector = svm->nested.ctl.int_vector;
0696 vmcb02->control.int_state = svm->nested.ctl.int_state;
0697 vmcb02->control.event_inj = svm->nested.ctl.event_inj;
0698 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708 if (svm->nrips_enabled)
0709 vmcb02->control.next_rip = svm->nested.ctl.next_rip;
0710 else if (boot_cpu_has(X86_FEATURE_NRIPS))
0711 vmcb02->control.next_rip = vmcb12_rip;
0712
0713 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
0714 if (is_evtinj_soft(vmcb02->control.event_inj)) {
0715 svm->soft_int_injected = true;
0716 svm->soft_int_csbase = vmcb12_csbase;
0717 svm->soft_int_old_rip = vmcb12_rip;
0718 if (svm->nrips_enabled)
0719 svm->soft_int_next_rip = svm->nested.ctl.next_rip;
0720 else
0721 svm->soft_int_next_rip = vmcb12_rip;
0722 }
0723
0724 vmcb02->control.virt_ext = vmcb01->control.virt_ext &
0725 LBR_CTL_ENABLE_MASK;
0726 if (svm->lbrv_enabled)
0727 vmcb02->control.virt_ext |=
0728 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
0729
0730 if (!nested_vmcb_needs_vls_intercept(svm))
0731 vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
0732
0733 pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
0734 pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
0735 if (kvm_pause_in_guest(svm->vcpu.kvm)) {
0736
0737 vmcb02->control.pause_filter_count = pause_count12;
0738 vmcb02->control.pause_filter_thresh = pause_thresh12;
0739
0740 } else {
0741
0742 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
0743 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
0744
0745
0746 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
0747 if (!pause_count12)
0748 vmcb02->control.pause_filter_count = 0;
0749 if (!pause_thresh12)
0750 vmcb02->control.pause_filter_thresh = 0;
0751 }
0752 }
0753
0754 nested_svm_transition_tlb_flush(vcpu);
0755
0756
0757 enter_guest_mode(vcpu);
0758
0759
0760
0761
0762
0763 recalc_intercepts(svm);
0764 }
0765
0766 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
0767 {
0768
0769
0770
0771
0772
0773
0774
0775 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
0776 }
0777
0778 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
0779 struct vmcb *vmcb12, bool from_vmrun)
0780 {
0781 struct vcpu_svm *svm = to_svm(vcpu);
0782 int ret;
0783
0784 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
0785 vmcb12->save.rip,
0786 vmcb12->control.int_ctl,
0787 vmcb12->control.event_inj,
0788 vmcb12->control.nested_ctl);
0789
0790 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
0791 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
0792 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
0793 vmcb12->control.intercepts[INTERCEPT_WORD3],
0794 vmcb12->control.intercepts[INTERCEPT_WORD4],
0795 vmcb12->control.intercepts[INTERCEPT_WORD5]);
0796
0797
0798 svm->nested.vmcb12_gpa = vmcb12_gpa;
0799
0800 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
0801
0802 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
0803
0804 svm_switch_vmcb(svm, &svm->nested.vmcb02);
0805 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
0806 nested_vmcb02_prepare_save(svm, vmcb12);
0807
0808 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
0809 nested_npt_enabled(svm), from_vmrun);
0810 if (ret)
0811 return ret;
0812
0813 if (!from_vmrun)
0814 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
0815
0816 svm_set_gif(svm, true);
0817
0818 if (kvm_vcpu_apicv_active(vcpu))
0819 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
0820
0821 return 0;
0822 }
0823
0824 int nested_svm_vmrun(struct kvm_vcpu *vcpu)
0825 {
0826 struct vcpu_svm *svm = to_svm(vcpu);
0827 int ret;
0828 struct vmcb *vmcb12;
0829 struct kvm_host_map map;
0830 u64 vmcb12_gpa;
0831 struct vmcb *vmcb01 = svm->vmcb01.ptr;
0832
0833 if (!svm->nested.hsave_msr) {
0834 kvm_inject_gp(vcpu, 0);
0835 return 1;
0836 }
0837
0838 if (is_smm(vcpu)) {
0839 kvm_queue_exception(vcpu, UD_VECTOR);
0840 return 1;
0841 }
0842
0843 vmcb12_gpa = svm->vmcb->save.rax;
0844 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
0845 if (ret == -EINVAL) {
0846 kvm_inject_gp(vcpu, 0);
0847 return 1;
0848 } else if (ret) {
0849 return kvm_skip_emulated_instruction(vcpu);
0850 }
0851
0852 ret = kvm_skip_emulated_instruction(vcpu);
0853
0854 vmcb12 = map.hva;
0855
0856 if (WARN_ON_ONCE(!svm->nested.initialized))
0857 return -EINVAL;
0858
0859 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
0860 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
0861
0862 if (!nested_vmcb_check_save(vcpu) ||
0863 !nested_vmcb_check_controls(vcpu)) {
0864 vmcb12->control.exit_code = SVM_EXIT_ERR;
0865 vmcb12->control.exit_code_hi = 0;
0866 vmcb12->control.exit_info_1 = 0;
0867 vmcb12->control.exit_info_2 = 0;
0868 goto out;
0869 }
0870
0871
0872
0873
0874
0875 vmcb01->save.efer = vcpu->arch.efer;
0876 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
0877 vmcb01->save.cr4 = vcpu->arch.cr4;
0878 vmcb01->save.rflags = kvm_get_rflags(vcpu);
0879 vmcb01->save.rip = kvm_rip_read(vcpu);
0880
0881 if (!npt_enabled)
0882 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
0883
0884 svm->nested.nested_run_pending = 1;
0885
0886 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
0887 goto out_exit_err;
0888
0889 if (nested_svm_vmrun_msrpm(svm))
0890 goto out;
0891
0892 out_exit_err:
0893 svm->nested.nested_run_pending = 0;
0894 svm->nmi_l1_to_l2 = false;
0895 svm->soft_int_injected = false;
0896
0897 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
0898 svm->vmcb->control.exit_code_hi = 0;
0899 svm->vmcb->control.exit_info_1 = 0;
0900 svm->vmcb->control.exit_info_2 = 0;
0901
0902 nested_svm_vmexit(svm);
0903
0904 out:
0905 kvm_vcpu_unmap(vcpu, &map, true);
0906
0907 return ret;
0908 }
0909
0910
0911 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
0912 struct vmcb_save_area *from_save)
0913 {
0914 to_save->es = from_save->es;
0915 to_save->cs = from_save->cs;
0916 to_save->ss = from_save->ss;
0917 to_save->ds = from_save->ds;
0918 to_save->gdtr = from_save->gdtr;
0919 to_save->idtr = from_save->idtr;
0920 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
0921 to_save->efer = from_save->efer;
0922 to_save->cr0 = from_save->cr0;
0923 to_save->cr3 = from_save->cr3;
0924 to_save->cr4 = from_save->cr4;
0925 to_save->rax = from_save->rax;
0926 to_save->rsp = from_save->rsp;
0927 to_save->rip = from_save->rip;
0928 to_save->cpl = 0;
0929 }
0930
0931 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
0932 {
0933 to_vmcb->save.fs = from_vmcb->save.fs;
0934 to_vmcb->save.gs = from_vmcb->save.gs;
0935 to_vmcb->save.tr = from_vmcb->save.tr;
0936 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
0937 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
0938 to_vmcb->save.star = from_vmcb->save.star;
0939 to_vmcb->save.lstar = from_vmcb->save.lstar;
0940 to_vmcb->save.cstar = from_vmcb->save.cstar;
0941 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
0942 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
0943 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
0944 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
0945 }
0946
0947 int nested_svm_vmexit(struct vcpu_svm *svm)
0948 {
0949 struct kvm_vcpu *vcpu = &svm->vcpu;
0950 struct vmcb *vmcb01 = svm->vmcb01.ptr;
0951 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
0952 struct vmcb *vmcb12;
0953 struct kvm_host_map map;
0954 int rc;
0955
0956 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
0957 if (rc) {
0958 if (rc == -EINVAL)
0959 kvm_inject_gp(vcpu, 0);
0960 return 1;
0961 }
0962
0963 vmcb12 = map.hva;
0964
0965
0966 leave_guest_mode(vcpu);
0967 svm->nested.vmcb12_gpa = 0;
0968 WARN_ON_ONCE(svm->nested.nested_run_pending);
0969
0970 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
0971
0972
0973 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
0974
0975
0976
0977 vmcb12->save.es = vmcb02->save.es;
0978 vmcb12->save.cs = vmcb02->save.cs;
0979 vmcb12->save.ss = vmcb02->save.ss;
0980 vmcb12->save.ds = vmcb02->save.ds;
0981 vmcb12->save.gdtr = vmcb02->save.gdtr;
0982 vmcb12->save.idtr = vmcb02->save.idtr;
0983 vmcb12->save.efer = svm->vcpu.arch.efer;
0984 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
0985 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
0986 vmcb12->save.cr2 = vmcb02->save.cr2;
0987 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
0988 vmcb12->save.rflags = kvm_get_rflags(vcpu);
0989 vmcb12->save.rip = kvm_rip_read(vcpu);
0990 vmcb12->save.rsp = kvm_rsp_read(vcpu);
0991 vmcb12->save.rax = kvm_rax_read(vcpu);
0992 vmcb12->save.dr7 = vmcb02->save.dr7;
0993 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
0994 vmcb12->save.cpl = vmcb02->save.cpl;
0995
0996 vmcb12->control.int_state = vmcb02->control.int_state;
0997 vmcb12->control.exit_code = vmcb02->control.exit_code;
0998 vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
0999 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1000 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1001
1002 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
1003 nested_save_pending_event_to_vmcb12(svm, vmcb12);
1004
1005 if (svm->nrips_enabled)
1006 vmcb12->control.next_rip = vmcb02->control.next_rip;
1007
1008 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1009 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
1010 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
1011 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1012
1013 if (!kvm_pause_in_guest(vcpu->kvm)) {
1014 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1015 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1016
1017 }
1018
1019 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1020
1021 svm_switch_vmcb(svm, &svm->vmcb01);
1022
1023 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1024 svm_copy_lbrs(vmcb12, vmcb02);
1025 svm_update_lbrv(vcpu);
1026 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1027 svm_copy_lbrs(vmcb01, vmcb02);
1028 svm_update_lbrv(vcpu);
1029 }
1030
1031
1032
1033
1034
1035 svm_set_gif(svm, false);
1036 vmcb01->control.exit_int_info = 0;
1037
1038 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1039 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1040 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1041 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1042 }
1043
1044 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
1045 WARN_ON(!svm->tsc_scaling_enabled);
1046 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1047 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1048 }
1049
1050 svm->nested.ctl.nested_cr3 = 0;
1051
1052
1053
1054
1055 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1056 svm_set_efer(vcpu, vmcb01->save.efer);
1057 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1058 svm_set_cr4(vcpu, vmcb01->save.cr4);
1059 kvm_rax_write(vcpu, vmcb01->save.rax);
1060 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1061 kvm_rip_write(vcpu, vmcb01->save.rip);
1062
1063 svm->vcpu.arch.dr7 = DR7_FIXED_1;
1064 kvm_update_dr7(&svm->vcpu);
1065
1066 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1067 vmcb12->control.exit_info_1,
1068 vmcb12->control.exit_info_2,
1069 vmcb12->control.exit_int_info,
1070 vmcb12->control.exit_int_info_err,
1071 KVM_ISA_SVM);
1072
1073 kvm_vcpu_unmap(vcpu, &map, true);
1074
1075 nested_svm_transition_tlb_flush(vcpu);
1076
1077 nested_svm_uninit_mmu_context(vcpu);
1078
1079 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1080 if (rc)
1081 return 1;
1082
1083
1084
1085
1086
1087 svm->vcpu.arch.nmi_injected = false;
1088 kvm_clear_exception_queue(vcpu);
1089 kvm_clear_interrupt_queue(vcpu);
1090
1091
1092
1093
1094
1095
1096
1097 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1098 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1099
1100
1101
1102
1103
1104 if (kvm_apicv_activated(vcpu->kvm))
1105 kvm_vcpu_update_apicv(vcpu);
1106
1107 return 0;
1108 }
1109
1110 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1111 {
1112 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1113 }
1114
1115 int svm_allocate_nested(struct vcpu_svm *svm)
1116 {
1117 struct page *vmcb02_page;
1118
1119 if (svm->nested.initialized)
1120 return 0;
1121
1122 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1123 if (!vmcb02_page)
1124 return -ENOMEM;
1125 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1126 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1127
1128 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1129 if (!svm->nested.msrpm)
1130 goto err_free_vmcb02;
1131 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
1132
1133 svm->nested.initialized = true;
1134 return 0;
1135
1136 err_free_vmcb02:
1137 __free_page(vmcb02_page);
1138 return -ENOMEM;
1139 }
1140
1141 void svm_free_nested(struct vcpu_svm *svm)
1142 {
1143 if (!svm->nested.initialized)
1144 return;
1145
1146 svm_vcpu_free_msrpm(svm->nested.msrpm);
1147 svm->nested.msrpm = NULL;
1148
1149 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1150 svm->nested.vmcb02.ptr = NULL;
1151
1152
1153
1154
1155
1156
1157
1158
1159 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1160
1161 svm->nested.initialized = false;
1162 }
1163
1164
1165
1166
1167 void svm_leave_nested(struct kvm_vcpu *vcpu)
1168 {
1169 struct vcpu_svm *svm = to_svm(vcpu);
1170
1171 if (is_guest_mode(vcpu)) {
1172 svm->nested.nested_run_pending = 0;
1173 svm->nested.vmcb12_gpa = INVALID_GPA;
1174
1175 leave_guest_mode(vcpu);
1176
1177 svm_switch_vmcb(svm, &svm->vmcb01);
1178
1179 nested_svm_uninit_mmu_context(vcpu);
1180 vmcb_mark_all_dirty(svm->vmcb);
1181 }
1182
1183 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1184 }
1185
1186 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1187 {
1188 u32 offset, msr, value;
1189 int write, mask;
1190
1191 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1192 return NESTED_EXIT_HOST;
1193
1194 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1195 offset = svm_msrpm_offset(msr);
1196 write = svm->vmcb->control.exit_info_1 & 1;
1197 mask = 1 << ((2 * (msr & 0xf)) + write);
1198
1199 if (offset == MSR_INVALID)
1200 return NESTED_EXIT_DONE;
1201
1202
1203 offset *= 4;
1204
1205 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1206 return NESTED_EXIT_DONE;
1207
1208 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1209 }
1210
1211 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1212 {
1213 unsigned port, size, iopm_len;
1214 u16 val, mask;
1215 u8 start_bit;
1216 u64 gpa;
1217
1218 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1219 return NESTED_EXIT_HOST;
1220
1221 port = svm->vmcb->control.exit_info_1 >> 16;
1222 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1223 SVM_IOIO_SIZE_SHIFT;
1224 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1225 start_bit = port % 8;
1226 iopm_len = (start_bit + size > 8) ? 2 : 1;
1227 mask = (0xf >> (4 - size)) << start_bit;
1228 val = 0;
1229
1230 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1231 return NESTED_EXIT_DONE;
1232
1233 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1234 }
1235
1236 static int nested_svm_intercept(struct vcpu_svm *svm)
1237 {
1238 u32 exit_code = svm->vmcb->control.exit_code;
1239 int vmexit = NESTED_EXIT_HOST;
1240
1241 switch (exit_code) {
1242 case SVM_EXIT_MSR:
1243 vmexit = nested_svm_exit_handled_msr(svm);
1244 break;
1245 case SVM_EXIT_IOIO:
1246 vmexit = nested_svm_intercept_ioio(svm);
1247 break;
1248 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1249 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1250 vmexit = NESTED_EXIT_DONE;
1251 break;
1252 }
1253 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1254 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1255 vmexit = NESTED_EXIT_DONE;
1256 break;
1257 }
1258 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1259
1260
1261
1262
1263
1264 vmexit = NESTED_EXIT_DONE;
1265 break;
1266 }
1267 case SVM_EXIT_ERR: {
1268 vmexit = NESTED_EXIT_DONE;
1269 break;
1270 }
1271 default: {
1272 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1273 vmexit = NESTED_EXIT_DONE;
1274 }
1275 }
1276
1277 return vmexit;
1278 }
1279
1280 int nested_svm_exit_handled(struct vcpu_svm *svm)
1281 {
1282 int vmexit;
1283
1284 vmexit = nested_svm_intercept(svm);
1285
1286 if (vmexit == NESTED_EXIT_DONE)
1287 nested_svm_vmexit(svm);
1288
1289 return vmexit;
1290 }
1291
1292 int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1293 {
1294 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1295 kvm_queue_exception(vcpu, UD_VECTOR);
1296 return 1;
1297 }
1298
1299 if (to_svm(vcpu)->vmcb->save.cpl) {
1300 kvm_inject_gp(vcpu, 0);
1301 return 1;
1302 }
1303
1304 return 0;
1305 }
1306
1307 static bool nested_exit_on_exception(struct vcpu_svm *svm)
1308 {
1309 unsigned int nr = svm->vcpu.arch.exception.nr;
1310
1311 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
1312 }
1313
1314 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
1315 {
1316 unsigned int nr = svm->vcpu.arch.exception.nr;
1317 struct vmcb *vmcb = svm->vmcb;
1318
1319 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1320 vmcb->control.exit_code_hi = 0;
1321
1322 if (svm->vcpu.arch.exception.has_error_code)
1323 vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1324
1325
1326
1327
1328
1329 if (nr == PF_VECTOR) {
1330 if (svm->vcpu.arch.exception.nested_apf)
1331 vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1332 else if (svm->vcpu.arch.exception.has_payload)
1333 vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1334 else
1335 vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1336 } else if (nr == DB_VECTOR) {
1337
1338 kvm_deliver_exception_payload(&svm->vcpu);
1339 if (svm->vcpu.arch.dr7 & DR7_GD) {
1340 svm->vcpu.arch.dr7 &= ~DR7_GD;
1341 kvm_update_dr7(&svm->vcpu);
1342 }
1343 } else
1344 WARN_ON(svm->vcpu.arch.exception.has_payload);
1345
1346 nested_svm_vmexit(svm);
1347 }
1348
1349 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1350 {
1351 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1352 }
1353
1354 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1355 {
1356 struct vcpu_svm *svm = to_svm(vcpu);
1357 bool block_nested_events =
1358 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1359 struct kvm_lapic *apic = vcpu->arch.apic;
1360
1361 if (lapic_in_kernel(vcpu) &&
1362 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1363 if (block_nested_events)
1364 return -EBUSY;
1365 if (!nested_exit_on_init(svm))
1366 return 0;
1367 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1368 return 0;
1369 }
1370
1371 if (vcpu->arch.exception.pending) {
1372
1373
1374
1375
1376
1377
1378 if (svm->nested.nested_run_pending)
1379 return -EBUSY;
1380 if (!nested_exit_on_exception(svm))
1381 return 0;
1382 nested_svm_inject_exception_vmexit(svm);
1383 return 0;
1384 }
1385
1386 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1387 if (block_nested_events)
1388 return -EBUSY;
1389 if (!nested_exit_on_smi(svm))
1390 return 0;
1391 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1392 return 0;
1393 }
1394
1395 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1396 if (block_nested_events)
1397 return -EBUSY;
1398 if (!nested_exit_on_nmi(svm))
1399 return 0;
1400 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1401 return 0;
1402 }
1403
1404 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1405 if (block_nested_events)
1406 return -EBUSY;
1407 if (!nested_exit_on_intr(svm))
1408 return 0;
1409 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1410 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1411 return 0;
1412 }
1413
1414 return 0;
1415 }
1416
1417 int nested_svm_exit_special(struct vcpu_svm *svm)
1418 {
1419 u32 exit_code = svm->vmcb->control.exit_code;
1420
1421 switch (exit_code) {
1422 case SVM_EXIT_INTR:
1423 case SVM_EXIT_NMI:
1424 case SVM_EXIT_NPF:
1425 return NESTED_EXIT_HOST;
1426 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1427 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1428
1429 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1430 excp_bits)
1431 return NESTED_EXIT_HOST;
1432 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1433 svm->vcpu.arch.apf.host_apf_flags)
1434
1435 return NESTED_EXIT_HOST;
1436 break;
1437 }
1438 default:
1439 break;
1440 }
1441
1442 return NESTED_EXIT_CONTINUE;
1443 }
1444
1445 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1446 {
1447 struct vcpu_svm *svm = to_svm(vcpu);
1448
1449 vcpu->arch.tsc_scaling_ratio =
1450 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1451 svm->tsc_ratio_msr);
1452 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1453 }
1454
1455
1456 static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1457 struct vmcb_ctrl_area_cached *from)
1458 {
1459 unsigned int i;
1460
1461 memset(dst, 0, sizeof(struct vmcb_control_area));
1462
1463 for (i = 0; i < MAX_INTERCEPT; i++)
1464 dst->intercepts[i] = from->intercepts[i];
1465
1466 dst->iopm_base_pa = from->iopm_base_pa;
1467 dst->msrpm_base_pa = from->msrpm_base_pa;
1468 dst->tsc_offset = from->tsc_offset;
1469 dst->asid = from->asid;
1470 dst->tlb_ctl = from->tlb_ctl;
1471 dst->int_ctl = from->int_ctl;
1472 dst->int_vector = from->int_vector;
1473 dst->int_state = from->int_state;
1474 dst->exit_code = from->exit_code;
1475 dst->exit_code_hi = from->exit_code_hi;
1476 dst->exit_info_1 = from->exit_info_1;
1477 dst->exit_info_2 = from->exit_info_2;
1478 dst->exit_int_info = from->exit_int_info;
1479 dst->exit_int_info_err = from->exit_int_info_err;
1480 dst->nested_ctl = from->nested_ctl;
1481 dst->event_inj = from->event_inj;
1482 dst->event_inj_err = from->event_inj_err;
1483 dst->next_rip = from->next_rip;
1484 dst->nested_cr3 = from->nested_cr3;
1485 dst->virt_ext = from->virt_ext;
1486 dst->pause_filter_count = from->pause_filter_count;
1487 dst->pause_filter_thresh = from->pause_filter_thresh;
1488
1489 }
1490
1491 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1492 struct kvm_nested_state __user *user_kvm_nested_state,
1493 u32 user_data_size)
1494 {
1495 struct vcpu_svm *svm;
1496 struct vmcb_control_area *ctl;
1497 unsigned long r;
1498 struct kvm_nested_state kvm_state = {
1499 .flags = 0,
1500 .format = KVM_STATE_NESTED_FORMAT_SVM,
1501 .size = sizeof(kvm_state),
1502 };
1503 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1504 &user_kvm_nested_state->data.svm[0];
1505
1506 if (!vcpu)
1507 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1508
1509 svm = to_svm(vcpu);
1510
1511 if (user_data_size < kvm_state.size)
1512 goto out;
1513
1514
1515 if (is_guest_mode(vcpu)) {
1516 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1517 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1518 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1519
1520 if (svm->nested.nested_run_pending)
1521 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1522 }
1523
1524 if (gif_set(svm))
1525 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1526
1527 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1528 return -EFAULT;
1529
1530 if (!is_guest_mode(vcpu))
1531 goto out;
1532
1533
1534
1535
1536
1537 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1538 return -EFAULT;
1539
1540 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1541 if (!ctl)
1542 return -ENOMEM;
1543
1544 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1545 r = copy_to_user(&user_vmcb->control, ctl,
1546 sizeof(user_vmcb->control));
1547 kfree(ctl);
1548 if (r)
1549 return -EFAULT;
1550
1551 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1552 sizeof(user_vmcb->save)))
1553 return -EFAULT;
1554 out:
1555 return kvm_state.size;
1556 }
1557
1558 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1559 struct kvm_nested_state __user *user_kvm_nested_state,
1560 struct kvm_nested_state *kvm_state)
1561 {
1562 struct vcpu_svm *svm = to_svm(vcpu);
1563 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1564 &user_kvm_nested_state->data.svm[0];
1565 struct vmcb_control_area *ctl;
1566 struct vmcb_save_area *save;
1567 struct vmcb_save_area_cached save_cached;
1568 struct vmcb_ctrl_area_cached ctl_cached;
1569 unsigned long cr0;
1570 int ret;
1571
1572 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1573 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1574
1575 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1576 return -EINVAL;
1577
1578 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1579 KVM_STATE_NESTED_RUN_PENDING |
1580 KVM_STATE_NESTED_GIF_SET))
1581 return -EINVAL;
1582
1583
1584
1585
1586
1587 if (!(vcpu->arch.efer & EFER_SVME)) {
1588
1589 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1590 return -EINVAL;
1591 }
1592
1593
1594 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1595 return -EINVAL;
1596
1597 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1598 svm_leave_nested(vcpu);
1599 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1600 return 0;
1601 }
1602
1603 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1604 return -EINVAL;
1605 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1606 return -EINVAL;
1607
1608 ret = -ENOMEM;
1609 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1610 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1611 if (!ctl || !save)
1612 goto out_free;
1613
1614 ret = -EFAULT;
1615 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1616 goto out_free;
1617 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1618 goto out_free;
1619
1620 ret = -EINVAL;
1621 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1622 if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
1623 goto out_free;
1624
1625
1626
1627
1628
1629 cr0 = kvm_read_cr0(vcpu);
1630 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1631 goto out_free;
1632
1633
1634
1635
1636
1637 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1638 if (!(save->cr0 & X86_CR0_PG) ||
1639 !(save->cr0 & X86_CR0_PE) ||
1640 (save->rflags & X86_EFLAGS_VM) ||
1641 !__nested_vmcb_check_save(vcpu, &save_cached))
1642 goto out_free;
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 if (is_guest_mode(vcpu))
1653 svm_leave_nested(vcpu);
1654 else
1655 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1656
1657 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1658
1659 svm->nested.nested_run_pending =
1660 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1661
1662 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1663
1664 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1665 nested_copy_vmcb_control_to_cache(svm, ctl);
1666
1667 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1668 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1669
1670
1671
1672
1673
1674
1675
1676
1677 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1678 nested_npt_enabled(svm), false);
1679 if (WARN_ON_ONCE(ret))
1680 goto out_free;
1681
1682 svm->nested.force_msr_bitmap_recalc = true;
1683
1684 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1685 ret = 0;
1686 out_free:
1687 kfree(save);
1688 kfree(ctl);
1689
1690 return ret;
1691 }
1692
1693 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1694 {
1695 struct vcpu_svm *svm = to_svm(vcpu);
1696
1697 if (WARN_ON(!is_guest_mode(vcpu)))
1698 return true;
1699
1700 if (!vcpu->arch.pdptrs_from_userspace &&
1701 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1702
1703
1704
1705
1706
1707 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1708 return false;
1709
1710 if (!nested_svm_vmrun_msrpm(svm)) {
1711 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1712 vcpu->run->internal.suberror =
1713 KVM_INTERNAL_ERROR_EMULATION;
1714 vcpu->run->internal.ndata = 0;
1715 return false;
1716 }
1717
1718 return true;
1719 }
1720
1721 struct kvm_x86_nested_ops svm_nested_ops = {
1722 .leave_nested = svm_leave_nested,
1723 .check_events = svm_check_nested_events,
1724 .handle_page_fault_workaround = nested_svm_handle_page_fault_workaround,
1725 .triple_fault = nested_svm_triple_fault,
1726 .get_nested_state_pages = svm_get_nested_state_pages,
1727 .get_state = svm_get_nested_state,
1728 .set_state = svm_set_nested_state,
1729 };