0001
0002
0003
0004
0005
0006
0007 #include <stdio.h>
0008 #include <string.h>
0009 #include "kvm_util.h"
0010 #include "processor.h"
0011
0012 #define UNITY (1ull << 30)
0013 #define HOST_ADJUST (UNITY * 64)
0014 #define GUEST_STEP (UNITY * 4)
0015 #define ROUND(x) ((x + UNITY / 2) & -UNITY)
0016 #define rounded_rdmsr(x) ROUND(rdmsr(x))
0017 #define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vcpu, x))
0018
0019 static void guest_code(void)
0020 {
0021 u64 val = 0;
0022
0023 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
0024 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
0025
0026
0027 val = 1ull * GUEST_STEP;
0028 wrmsr(MSR_IA32_TSC, val);
0029 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
0030 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
0031
0032
0033 GUEST_SYNC(2);
0034 val = 2ull * GUEST_STEP;
0035 wrmsr(MSR_IA32_TSC_ADJUST, val);
0036 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
0037 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
0038
0039
0040 GUEST_SYNC(3);
0041 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
0042 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
0043
0044
0045
0046
0047
0048 GUEST_SYNC(4);
0049 val = 3ull * GUEST_STEP;
0050 wrmsr(MSR_IA32_TSC_ADJUST, val);
0051 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
0052 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
0053
0054
0055
0056
0057
0058 GUEST_SYNC(5);
0059 val = 4ull * GUEST_STEP;
0060 wrmsr(MSR_IA32_TSC, val);
0061 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
0062 GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
0063
0064 GUEST_DONE();
0065 }
0066
0067 static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
0068 {
0069 struct ucall uc;
0070
0071 vcpu_run(vcpu);
0072
0073 switch (get_ucall(vcpu, &uc)) {
0074 case UCALL_SYNC:
0075 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
0076 uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
0077 stage + 1, (ulong)uc.args[1]);
0078 return;
0079 case UCALL_DONE:
0080 return;
0081 case UCALL_ABORT:
0082 REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
0083 default:
0084 TEST_ASSERT(false, "Unexpected exit: %s",
0085 exit_reason_str(vcpu->run->exit_reason));
0086 }
0087 }
0088
0089 int main(void)
0090 {
0091 struct kvm_vcpu *vcpu;
0092 struct kvm_vm *vm;
0093 uint64_t val;
0094
0095 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
0096
0097 val = 0;
0098 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
0099 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
0100
0101
0102 run_vcpu(vcpu, 1);
0103 val = 1ull * GUEST_STEP;
0104 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
0105 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
0106
0107
0108 run_vcpu(vcpu, 2);
0109 val = 2ull * GUEST_STEP;
0110 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
0111 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
0112
0113
0114
0115
0116
0117 vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
0118 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
0119 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
0120 run_vcpu(vcpu, 3);
0121
0122
0123 vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
0124 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
0125 ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
0126
0127
0128 vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
0129 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
0130 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
0131
0132
0133
0134
0135
0136 run_vcpu(vcpu, 4);
0137 val = 3ull * GUEST_STEP;
0138 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
0139 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
0140
0141
0142
0143
0144
0145 run_vcpu(vcpu, 5);
0146 val = 4ull * GUEST_STEP;
0147 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
0148 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
0149
0150 kvm_vm_free(vm);
0151
0152 return 0;
0153 }