Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2022 Oracle and/or its affiliates.
0004  *
0005  * Based on:
0006  *   svm_int_ctl_test
0007  *
0008  *   Copyright (C) 2021, Red Hat, Inc.
0009  *
0010  */
0011 
0012 #include <stdatomic.h>
0013 #include <stdio.h>
0014 #include <unistd.h>
0015 #include "apic.h"
0016 #include "kvm_util.h"
0017 #include "processor.h"
0018 #include "svm_util.h"
0019 #include "test_util.h"
0020 
0021 #define INT_NR          0x20
0022 
0023 static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
0024 
0025 static unsigned int bp_fired;
0026 static void guest_bp_handler(struct ex_regs *regs)
0027 {
0028     bp_fired++;
0029 }
0030 
0031 static unsigned int int_fired;
0032 static void l2_guest_code_int(void);
0033 
0034 static void guest_int_handler(struct ex_regs *regs)
0035 {
0036     int_fired++;
0037     GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int,
0038                regs->rip, (unsigned long)l2_guest_code_int);
0039 }
0040 
0041 static void l2_guest_code_int(void)
0042 {
0043     GUEST_ASSERT_1(int_fired == 1, int_fired);
0044     vmmcall();
0045     ud2();
0046 
0047     GUEST_ASSERT_1(bp_fired == 1, bp_fired);
0048     hlt();
0049 }
0050 
0051 static atomic_int nmi_stage;
0052 #define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire)
0053 #define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel)
0054 static void guest_nmi_handler(struct ex_regs *regs)
0055 {
0056     nmi_stage_inc();
0057 
0058     if (nmi_stage_get() == 1) {
0059         vmmcall();
0060         GUEST_ASSERT(false);
0061     } else {
0062         GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get());
0063         GUEST_DONE();
0064     }
0065 }
0066 
0067 static void l2_guest_code_nmi(void)
0068 {
0069     ud2();
0070 }
0071 
0072 static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
0073 {
0074     #define L2_GUEST_STACK_SIZE 64
0075     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
0076     struct vmcb *vmcb = svm->vmcb;
0077 
0078     if (is_nmi)
0079         x2apic_enable();
0080 
0081     /* Prepare for L2 execution. */
0082     generic_svm_setup(svm,
0083               is_nmi ? l2_guest_code_nmi : l2_guest_code_int,
0084               &l2_guest_stack[L2_GUEST_STACK_SIZE]);
0085 
0086     vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR);
0087     vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT);
0088 
0089     if (is_nmi) {
0090         vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
0091     } else {
0092         vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT;
0093         /* The return address pushed on stack */
0094         vmcb->control.next_rip = vmcb->save.rip;
0095     }
0096 
0097     run_guest(vmcb, svm->vmcb_gpa);
0098     GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
0099                vmcb->control.exit_code,
0100                vmcb->control.exit_info_1, vmcb->control.exit_info_2);
0101 
0102     if (is_nmi) {
0103         clgi();
0104         x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
0105 
0106         GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get());
0107         nmi_stage_inc();
0108 
0109         stgi();
0110         /* self-NMI happens here */
0111         while (true)
0112             cpu_relax();
0113     }
0114 
0115     /* Skip over VMMCALL */
0116     vmcb->save.rip += 3;
0117 
0118     /* Switch to alternate IDT to cause intervening NPF again */
0119     vmcb->save.idtr.base = idt_alt;
0120     vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */
0121 
0122     vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
0123     /* The return address pushed on stack, skip over UD2 */
0124     vmcb->control.next_rip = vmcb->save.rip + 2;
0125 
0126     run_guest(vmcb, svm->vmcb_gpa);
0127     GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT,
0128                vmcb->control.exit_code,
0129                vmcb->control.exit_info_1, vmcb->control.exit_info_2);
0130 
0131     GUEST_DONE();
0132 }
0133 
0134 static void run_test(bool is_nmi)
0135 {
0136     struct kvm_vcpu *vcpu;
0137     struct kvm_vm *vm;
0138     vm_vaddr_t svm_gva;
0139     vm_vaddr_t idt_alt_vm;
0140     struct kvm_guest_debug debug;
0141 
0142     pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
0143 
0144     vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
0145 
0146     vm_init_descriptor_tables(vm);
0147     vcpu_init_descriptor_tables(vcpu);
0148 
0149     vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
0150     vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
0151     vm_install_exception_handler(vm, INT_NR, guest_int_handler);
0152 
0153     vcpu_alloc_svm(vm, &svm_gva);
0154 
0155     if (!is_nmi) {
0156         void *idt, *idt_alt;
0157 
0158         idt_alt_vm = vm_vaddr_alloc_page(vm);
0159         idt_alt = addr_gva2hva(vm, idt_alt_vm);
0160         idt = addr_gva2hva(vm, vm->idt);
0161         memcpy(idt_alt, idt, getpagesize());
0162     } else {
0163         idt_alt_vm = 0;
0164     }
0165     vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
0166 
0167     memset(&debug, 0, sizeof(debug));
0168     vcpu_guest_debug_set(vcpu, &debug);
0169 
0170     struct kvm_run *run = vcpu->run;
0171     struct ucall uc;
0172 
0173     alarm(2);
0174     vcpu_run(vcpu);
0175     alarm(0);
0176     TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0177             "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
0178             run->exit_reason,
0179             exit_reason_str(run->exit_reason));
0180 
0181     switch (get_ucall(vcpu, &uc)) {
0182     case UCALL_ABORT:
0183         REPORT_GUEST_ASSERT_3(uc, "vals = 0x%lx 0x%lx 0x%lx");
0184         break;
0185         /* NOT REACHED */
0186     case UCALL_DONE:
0187         goto done;
0188     default:
0189         TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
0190     }
0191 done:
0192     kvm_vm_free(vm);
0193 }
0194 
0195 int main(int argc, char *argv[])
0196 {
0197     /* Tell stdout not to buffer its content */
0198     setbuf(stdout, NULL);
0199 
0200     TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
0201 
0202     TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS),
0203             "KVM with nSVM is supposed to unconditionally advertise nRIP Save");
0204 
0205     atomic_init(&nmi_stage, 0);
0206 
0207     run_test(false);
0208     run_test(true);
0209 
0210     return 0;
0211 }