Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2018, Red Hat, Inc.
0004  *
0005  * Tests for SMM.
0006  */
0007 #define _GNU_SOURCE /* for program_invocation_short_name */
0008 #include <fcntl.h>
0009 #include <stdio.h>
0010 #include <stdlib.h>
0011 #include <stdint.h>
0012 #include <string.h>
0013 #include <sys/ioctl.h>
0014 
0015 #include "test_util.h"
0016 
0017 #include "kvm_util.h"
0018 
0019 #include "vmx.h"
0020 #include "svm_util.h"
0021 
0022 #define SMRAM_SIZE 65536
0023 #define SMRAM_MEMSLOT ((1 << 16) | 1)
0024 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
0025 #define SMRAM_GPA 0x1000000
0026 #define SMRAM_STAGE 0xfe
0027 
0028 #define STR(x) #x
0029 #define XSTR(s) STR(s)
0030 
0031 #define SYNC_PORT 0xe
0032 #define DONE 0xff
0033 
0034 /*
0035  * This is compiled as normal 64-bit code, however, SMI handler is executed
0036  * in real-address mode. To stay simple we're limiting ourselves to a mode
0037  * independent subset of asm here.
0038  * SMI handler always report back fixed stage SMRAM_STAGE.
0039  */
0040 uint8_t smi_handler[] = {
0041     0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
0042     0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
0043     0x0f, 0xaa,           /* rsm */
0044 };
0045 
0046 static inline void sync_with_host(uint64_t phase)
0047 {
0048     asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
0049              : "+a" (phase));
0050 }
0051 
0052 static void self_smi(void)
0053 {
0054     x2apic_write_reg(APIC_ICR,
0055              APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
0056 }
0057 
0058 static void l2_guest_code(void)
0059 {
0060     sync_with_host(8);
0061 
0062     sync_with_host(10);
0063 
0064     vmcall();
0065 }
0066 
0067 static void guest_code(void *arg)
0068 {
0069     #define L2_GUEST_STACK_SIZE 64
0070     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
0071     uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
0072     struct svm_test_data *svm = arg;
0073     struct vmx_pages *vmx_pages = arg;
0074 
0075     sync_with_host(1);
0076 
0077     wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
0078 
0079     sync_with_host(2);
0080 
0081     self_smi();
0082 
0083     sync_with_host(4);
0084 
0085     if (arg) {
0086         if (this_cpu_has(X86_FEATURE_SVM)) {
0087             generic_svm_setup(svm, l2_guest_code,
0088                       &l2_guest_stack[L2_GUEST_STACK_SIZE]);
0089         } else {
0090             GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
0091             GUEST_ASSERT(load_vmcs(vmx_pages));
0092             prepare_vmcs(vmx_pages, l2_guest_code,
0093                      &l2_guest_stack[L2_GUEST_STACK_SIZE]);
0094         }
0095 
0096         sync_with_host(5);
0097 
0098         self_smi();
0099 
0100         sync_with_host(7);
0101 
0102         if (this_cpu_has(X86_FEATURE_SVM)) {
0103             run_guest(svm->vmcb, svm->vmcb_gpa);
0104             run_guest(svm->vmcb, svm->vmcb_gpa);
0105         } else {
0106             vmlaunch();
0107             vmresume();
0108         }
0109 
0110         /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
0111         sync_with_host(12);
0112     }
0113 
0114     sync_with_host(DONE);
0115 }
0116 
0117 void inject_smi(struct kvm_vcpu *vcpu)
0118 {
0119     struct kvm_vcpu_events events;
0120 
0121     vcpu_events_get(vcpu, &events);
0122 
0123     events.smi.pending = 1;
0124     events.flags |= KVM_VCPUEVENT_VALID_SMM;
0125 
0126     vcpu_events_set(vcpu, &events);
0127 }
0128 
0129 int main(int argc, char *argv[])
0130 {
0131     vm_vaddr_t nested_gva = 0;
0132 
0133     struct kvm_vcpu *vcpu;
0134     struct kvm_regs regs;
0135     struct kvm_vm *vm;
0136     struct kvm_run *run;
0137     struct kvm_x86_state *state;
0138     int stage, stage_reported;
0139 
0140     /* Create VM */
0141     vm = vm_create_with_one_vcpu(&vcpu, guest_code);
0142 
0143     run = vcpu->run;
0144 
0145     vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
0146                     SMRAM_MEMSLOT, SMRAM_PAGES, 0);
0147     TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
0148             == SMRAM_GPA, "could not allocate guest physical addresses?");
0149 
0150     memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
0151     memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
0152            sizeof(smi_handler));
0153 
0154     vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
0155 
0156     if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
0157         if (kvm_cpu_has(X86_FEATURE_SVM))
0158             vcpu_alloc_svm(vm, &nested_gva);
0159         else if (kvm_cpu_has(X86_FEATURE_VMX))
0160             vcpu_alloc_vmx(vm, &nested_gva);
0161     }
0162 
0163     if (!nested_gva)
0164         pr_info("will skip SMM test with VMX enabled\n");
0165 
0166     vcpu_args_set(vcpu, 1, nested_gva);
0167 
0168     for (stage = 1;; stage++) {
0169         vcpu_run(vcpu);
0170         TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0171                 "Stage %d: unexpected exit reason: %u (%s),\n",
0172                 stage, run->exit_reason,
0173                 exit_reason_str(run->exit_reason));
0174 
0175         memset(&regs, 0, sizeof(regs));
0176         vcpu_regs_get(vcpu, &regs);
0177 
0178         stage_reported = regs.rax & 0xff;
0179 
0180         if (stage_reported == DONE)
0181             goto done;
0182 
0183         TEST_ASSERT(stage_reported == stage ||
0184                 stage_reported == SMRAM_STAGE,
0185                 "Unexpected stage: #%x, got %x",
0186                 stage, stage_reported);
0187 
0188         /*
0189          * Enter SMM during L2 execution and check that we correctly
0190          * return from it. Do not perform save/restore while in SMM yet.
0191          */
0192         if (stage == 8) {
0193             inject_smi(vcpu);
0194             continue;
0195         }
0196 
0197         /*
0198          * Perform save/restore while the guest is in SMM triggered
0199          * during L2 execution.
0200          */
0201         if (stage == 10)
0202             inject_smi(vcpu);
0203 
0204         state = vcpu_save_state(vcpu);
0205         kvm_vm_release(vm);
0206 
0207         vcpu = vm_recreate_with_one_vcpu(vm);
0208         vcpu_load_state(vcpu, state);
0209         run = vcpu->run;
0210         kvm_x86_state_cleanup(state);
0211     }
0212 
0213 done:
0214     kvm_vm_free(vm);
0215 }