Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * svm_int_ctl_test
0004  *
0005  * Copyright (C) 2021, Red Hat, Inc.
0006  *
0007  * Nested SVM testing: test simultaneous use of V_IRQ from L1 and L0.
0008  */
0009 
0010 #include "test_util.h"
0011 #include "kvm_util.h"
0012 #include "processor.h"
0013 #include "svm_util.h"
0014 #include "apic.h"
0015 
0016 bool vintr_irq_called;
0017 bool intr_irq_called;
0018 
0019 #define VINTR_IRQ_NUMBER 0x20
0020 #define INTR_IRQ_NUMBER 0x30
0021 
0022 static void vintr_irq_handler(struct ex_regs *regs)
0023 {
0024     vintr_irq_called = true;
0025 }
0026 
0027 static void intr_irq_handler(struct ex_regs *regs)
0028 {
0029     x2apic_write_reg(APIC_EOI, 0x00);
0030     intr_irq_called = true;
0031 }
0032 
0033 static void l2_guest_code(struct svm_test_data *svm)
0034 {
0035     /* This code raises interrupt INTR_IRQ_NUMBER in the L1's LAPIC,
0036      * and since L1 didn't enable virtual interrupt masking,
0037      * L2 should receive it and not L1.
0038      *
0039      * L2 also has virtual interrupt 'VINTR_IRQ_NUMBER' pending in V_IRQ
0040      * so it should also receive it after the following 'sti'.
0041      */
0042     x2apic_write_reg(APIC_ICR,
0043         APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
0044 
0045     __asm__ __volatile__(
0046         "sti\n"
0047         "nop\n"
0048     );
0049 
0050     GUEST_ASSERT(vintr_irq_called);
0051     GUEST_ASSERT(intr_irq_called);
0052 
0053     __asm__ __volatile__(
0054         "vmcall\n"
0055     );
0056 }
0057 
0058 static void l1_guest_code(struct svm_test_data *svm)
0059 {
0060     #define L2_GUEST_STACK_SIZE 64
0061     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
0062     struct vmcb *vmcb = svm->vmcb;
0063 
0064     x2apic_enable();
0065 
0066     /* Prepare for L2 execution. */
0067     generic_svm_setup(svm, l2_guest_code,
0068               &l2_guest_stack[L2_GUEST_STACK_SIZE]);
0069 
0070     /* No virtual interrupt masking */
0071     vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
0072 
0073     /* No intercepts for real and virtual interrupts */
0074     vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR));
0075 
0076     /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
0077     vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
0078     vmcb->control.int_vector = VINTR_IRQ_NUMBER;
0079 
0080     run_guest(vmcb, svm->vmcb_gpa);
0081     GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
0082     GUEST_DONE();
0083 }
0084 
0085 int main(int argc, char *argv[])
0086 {
0087     struct kvm_vcpu *vcpu;
0088     struct kvm_run *run;
0089     vm_vaddr_t svm_gva;
0090     struct kvm_vm *vm;
0091     struct ucall uc;
0092 
0093     TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
0094 
0095     vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
0096 
0097     vm_init_descriptor_tables(vm);
0098     vcpu_init_descriptor_tables(vcpu);
0099 
0100     vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
0101     vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
0102 
0103     vcpu_alloc_svm(vm, &svm_gva);
0104     vcpu_args_set(vcpu, 1, svm_gva);
0105 
0106     run = vcpu->run;
0107 
0108     vcpu_run(vcpu);
0109     TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0110             "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
0111             run->exit_reason,
0112             exit_reason_str(run->exit_reason));
0113 
0114     switch (get_ucall(vcpu, &uc)) {
0115     case UCALL_ABORT:
0116         REPORT_GUEST_ASSERT(uc);
0117         break;
0118         /* NOT REACHED */
0119     case UCALL_DONE:
0120         goto done;
0121     default:
0122         TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
0123     }
0124 done:
0125     kvm_vm_free(vm);
0126     return 0;
0127 }