Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * vmx_tsc_adjust_test
0004  *
0005  * Copyright (C) 2018, Google LLC.
0006  *
0007  * IA32_TSC_ADJUST test
0008  *
0009  * According to the SDM, "if an execution of WRMSR to the
0010  * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
0011  * the logical processor also adds (or subtracts) value X from the
0012  * IA32_TSC_ADJUST MSR.
0013  *
0014  * Note that when L1 doesn't intercept writes to IA32_TSC, a
0015  * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
0016  * value.
0017  *
0018  * This test verifies that this unusual case is handled correctly.
0019  */
0020 
0021 #include "test_util.h"
0022 #include "kvm_util.h"
0023 #include "processor.h"
0024 #include "vmx.h"
0025 
0026 #include <string.h>
0027 #include <sys/ioctl.h>
0028 
0029 #include "kselftest.h"
0030 
0031 #ifndef MSR_IA32_TSC_ADJUST
0032 #define MSR_IA32_TSC_ADJUST 0x3b
0033 #endif
0034 
0035 #define TSC_ADJUST_VALUE (1ll << 32)
0036 #define TSC_OFFSET_VALUE -(1ll << 48)
0037 
0038 enum {
0039     PORT_ABORT = 0x1000,
0040     PORT_REPORT,
0041     PORT_DONE,
0042 };
0043 
0044 enum {
0045     VMXON_PAGE = 0,
0046     VMCS_PAGE,
0047     MSR_BITMAP_PAGE,
0048 
0049     NUM_VMX_PAGES,
0050 };
0051 
0052 struct kvm_single_msr {
0053     struct kvm_msrs header;
0054     struct kvm_msr_entry entry;
0055 } __attribute__((packed));
0056 
0057 /* The virtual machine object. */
0058 static struct kvm_vm *vm;
0059 
0060 static void check_ia32_tsc_adjust(int64_t max)
0061 {
0062     int64_t adjust;
0063 
0064     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
0065     GUEST_SYNC(adjust);
0066     GUEST_ASSERT(adjust <= max);
0067 }
0068 
0069 static void l2_guest_code(void)
0070 {
0071     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
0072 
0073     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
0074     check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
0075 
0076     /* Exit to L1 */
0077     __asm__ __volatile__("vmcall");
0078 }
0079 
0080 static void l1_guest_code(struct vmx_pages *vmx_pages)
0081 {
0082 #define L2_GUEST_STACK_SIZE 64
0083     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
0084     uint32_t control;
0085     uintptr_t save_cr3;
0086 
0087     GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
0088     wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
0089     check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
0090 
0091     GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
0092     GUEST_ASSERT(load_vmcs(vmx_pages));
0093 
0094     /* Prepare the VMCS for L2 execution. */
0095     prepare_vmcs(vmx_pages, l2_guest_code,
0096              &l2_guest_stack[L2_GUEST_STACK_SIZE]);
0097     control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
0098     control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
0099     vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
0100     vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
0101 
0102     /* Jump into L2.  First, test failure to load guest CR3.  */
0103     save_cr3 = vmreadz(GUEST_CR3);
0104     vmwrite(GUEST_CR3, -1ull);
0105     GUEST_ASSERT(!vmlaunch());
0106     GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
0107              (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
0108     check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
0109     vmwrite(GUEST_CR3, save_cr3);
0110 
0111     GUEST_ASSERT(!vmlaunch());
0112     GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
0113 
0114     check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
0115 
0116     GUEST_DONE();
0117 }
0118 
0119 static void report(int64_t val)
0120 {
0121     pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
0122         val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
0123 }
0124 
0125 int main(int argc, char *argv[])
0126 {
0127     vm_vaddr_t vmx_pages_gva;
0128     struct kvm_vcpu *vcpu;
0129 
0130     TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
0131 
0132     vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
0133 
0134     /* Allocate VMX pages and shared descriptors (vmx_pages). */
0135     vcpu_alloc_vmx(vm, &vmx_pages_gva);
0136     vcpu_args_set(vcpu, 1, vmx_pages_gva);
0137 
0138     for (;;) {
0139         volatile struct kvm_run *run = vcpu->run;
0140         struct ucall uc;
0141 
0142         vcpu_run(vcpu);
0143         TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0144                 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
0145                 run->exit_reason,
0146                 exit_reason_str(run->exit_reason));
0147 
0148         switch (get_ucall(vcpu, &uc)) {
0149         case UCALL_ABORT:
0150             REPORT_GUEST_ASSERT(uc);
0151             /* NOT REACHED */
0152         case UCALL_SYNC:
0153             report(uc.args[1]);
0154             break;
0155         case UCALL_DONE:
0156             goto done;
0157         default:
0158             TEST_FAIL("Unknown ucall %lu", uc.cmd);
0159         }
0160     }
0161 
0162 done:
0163     kvm_vm_free(vm);
0164     return 0;
0165 }