0001
0002
0003
0004
0005
0006
0007 #include "test_util.h"
0008 #include "kvm_util.h"
0009 #include "processor.h"
0010 #include "hyperv.h"
0011
0012 struct ms_hyperv_tsc_page {
0013 volatile u32 tsc_sequence;
0014 u32 reserved1;
0015 volatile u64 tsc_scale;
0016 volatile s64 tsc_offset;
0017 } __packed;
0018
0019
0020 static inline u64 mul_u64_u64_shr64(u64 a, u64 b)
0021 {
0022 union {
0023 u64 ll;
0024 struct {
0025 u32 low, high;
0026 } l;
0027 } rm, rn, rh, a0, b0;
0028 u64 c;
0029
0030 a0.ll = a;
0031 b0.ll = b;
0032
0033 rm.ll = (u64)a0.l.low * b0.l.high;
0034 rn.ll = (u64)a0.l.high * b0.l.low;
0035 rh.ll = (u64)a0.l.high * b0.l.high;
0036
0037 rh.l.low = c = rm.l.high + rn.l.high + rh.l.low;
0038 rh.l.high = (c >> 32) + rh.l.high;
0039
0040 return rh.ll;
0041 }
0042
0043 static inline void nop_loop(void)
0044 {
0045 int i;
0046
0047 for (i = 0; i < 100000000; i++)
0048 asm volatile("nop");
0049 }
0050
0051 static inline void check_tsc_msr_rdtsc(void)
0052 {
0053 u64 tsc_freq, r1, r2, t1, t2;
0054 s64 delta_ns;
0055
0056 tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
0057 GUEST_ASSERT(tsc_freq > 0);
0058
0059
0060 r1 = rdtsc();
0061 t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
0062 r1 = (r1 + rdtsc()) / 2;
0063 nop_loop();
0064 r2 = rdtsc();
0065 t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
0066 r2 = (r2 + rdtsc()) / 2;
0067
0068 GUEST_ASSERT(r2 > r1 && t2 > t1);
0069
0070
0071 delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
0072 if (delta_ns < 0)
0073 delta_ns = -delta_ns;
0074
0075
0076 GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
0077 }
0078
0079 static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
0080 {
0081 return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
0082 }
0083
0084 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
0085 {
0086 u64 r1, r2, t1, t2;
0087
0088
0089 t1 = get_tscpage_ts(tsc_page);
0090 r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
0091
0092
0093 GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
0094 nop_loop();
0095
0096 t2 = get_tscpage_ts(tsc_page);
0097 r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
0098 GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
0099 }
0100
0101 static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa)
0102 {
0103 u64 tsc_scale, tsc_offset;
0104
0105
0106 GUEST_SYNC(1);
0107 wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
0108 GUEST_SYNC(2);
0109
0110 check_tsc_msr_rdtsc();
0111
0112 GUEST_SYNC(3);
0113
0114
0115 wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa);
0116 GUEST_ASSERT(tsc_page->tsc_sequence == 0);
0117 GUEST_ASSERT(tsc_page->tsc_scale == 0);
0118 GUEST_ASSERT(tsc_page->tsc_offset == 0);
0119
0120 GUEST_SYNC(4);
0121
0122
0123 wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa | 0x1);
0124 GUEST_ASSERT(tsc_page->tsc_sequence != 0);
0125
0126 GUEST_SYNC(5);
0127
0128 check_tsc_msr_tsc_page(tsc_page);
0129
0130 GUEST_SYNC(6);
0131
0132 tsc_offset = tsc_page->tsc_offset;
0133
0134
0135 GUEST_SYNC(7);
0136
0137 GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
0138
0139 GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
0140
0141 nop_loop();
0142
0143
0144
0145
0146
0147 wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0x1 << 16 | 0xff);
0148 wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0x1);
0149 tsc_offset = tsc_page->tsc_offset;
0150 tsc_scale = tsc_page->tsc_scale;
0151 GUEST_SYNC(8);
0152 GUEST_ASSERT(tsc_page->tsc_offset == tsc_offset);
0153 GUEST_ASSERT(tsc_page->tsc_scale == tsc_scale);
0154
0155 GUEST_SYNC(9);
0156
0157 check_tsc_msr_tsc_page(tsc_page);
0158
0159
0160
0161
0162
0163 wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
0164 wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
0165 wrmsr(HV_X64_MSR_REFERENCE_TSC, 0);
0166 memset(tsc_page, 0, sizeof(*tsc_page));
0167
0168 GUEST_SYNC(10);
0169 GUEST_ASSERT(tsc_page->tsc_sequence == 0);
0170 GUEST_ASSERT(tsc_page->tsc_offset == 0);
0171 GUEST_ASSERT(tsc_page->tsc_scale == 0);
0172
0173 GUEST_DONE();
0174 }
0175
0176 static void host_check_tsc_msr_rdtsc(struct kvm_vcpu *vcpu)
0177 {
0178 u64 tsc_freq, r1, r2, t1, t2;
0179 s64 delta_ns;
0180
0181 tsc_freq = vcpu_get_msr(vcpu, HV_X64_MSR_TSC_FREQUENCY);
0182 TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
0183
0184
0185 r1 = rdtsc();
0186 t1 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
0187 r1 = (r1 + rdtsc()) / 2;
0188 nop_loop();
0189 r2 = rdtsc();
0190 t2 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
0191 r2 = (r2 + rdtsc()) / 2;
0192
0193 TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
0194
0195
0196 delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
0197 if (delta_ns < 0)
0198 delta_ns = -delta_ns;
0199
0200
0201 TEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100,
0202 "Elapsed time does not match (MSR=%ld, TSC=%ld)",
0203 (t2 - t1) * 100, (r2 - r1) * 1000000000 / tsc_freq);
0204 }
0205
0206 int main(void)
0207 {
0208 struct kvm_vcpu *vcpu;
0209 struct kvm_vm *vm;
0210 struct kvm_run *run;
0211 struct ucall uc;
0212 vm_vaddr_t tsc_page_gva;
0213 int stage;
0214
0215 vm = vm_create_with_one_vcpu(&vcpu, guest_main);
0216 run = vcpu->run;
0217
0218 vcpu_set_hv_cpuid(vcpu);
0219
0220 tsc_page_gva = vm_vaddr_alloc_page(vm);
0221 memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
0222 TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
0223 "TSC page has to be page aligned\n");
0224 vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
0225
0226 host_check_tsc_msr_rdtsc(vcpu);
0227
0228 for (stage = 1;; stage++) {
0229 vcpu_run(vcpu);
0230 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0231 "Stage %d: unexpected exit reason: %u (%s),\n",
0232 stage, run->exit_reason,
0233 exit_reason_str(run->exit_reason));
0234
0235 switch (get_ucall(vcpu, &uc)) {
0236 case UCALL_ABORT:
0237 REPORT_GUEST_ASSERT(uc);
0238
0239 case UCALL_SYNC:
0240 break;
0241 case UCALL_DONE:
0242
0243 TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n",
0244 stage);
0245 goto out;
0246 default:
0247 TEST_FAIL("Unknown ucall %lu", uc.cmd);
0248 }
0249
0250 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
0251 uc.args[1] == stage,
0252 "Stage %d: Unexpected register values vmexit, got %lx",
0253 stage, (ulong)uc.args[1]);
0254
0255
0256 if (stage == 7 || stage == 8 || stage == 10) {
0257 struct kvm_clock_data clock = {0};
0258
0259 vm_ioctl(vm, KVM_SET_CLOCK, &clock);
0260 }
0261 }
0262
0263 out:
0264 kvm_vm_free(vm);
0265 }