0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define _GNU_SOURCE
0013 #include <fcntl.h>
0014 #include <stdio.h>
0015 #include <stdlib.h>
0016 #include <string.h>
0017 #include <sys/ioctl.h>
0018
0019 #include "test_util.h"
0020
0021 #include "kvm_util.h"
0022 #include "processor.h"
0023 #include "vmx.h"
0024
0025 #define PREEMPTION_TIMER_VALUE 100000000ull
0026 #define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull
0027
0028 u32 vmx_pt_rate;
0029 bool l2_save_restore_done;
0030 static u64 l2_vmx_pt_start;
0031 volatile u64 l2_vmx_pt_finish;
0032
0033 union vmx_basic basic;
0034 union vmx_ctrl_msr ctrl_pin_rev;
0035 union vmx_ctrl_msr ctrl_exit_rev;
0036
0037 void l2_guest_code(void)
0038 {
0039 u64 vmx_pt_delta;
0040
0041 vmcall();
0042 l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
0043
0044
0045
0046
0047 do {
0048 l2_vmx_pt_finish = rdtsc();
0049 vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >>
0050 vmx_pt_rate;
0051 } while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1);
0052
0053
0054
0055
0056 GUEST_SYNC(1);
0057
0058 l2_save_restore_done = 1;
0059
0060
0061
0062
0063
0064 while ((l2_vmx_pt_finish = rdtsc()))
0065 ;
0066 }
0067
0068 void l1_guest_code(struct vmx_pages *vmx_pages)
0069 {
0070 #define L2_GUEST_STACK_SIZE 64
0071 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
0072 u64 l1_vmx_pt_start;
0073 u64 l1_vmx_pt_finish;
0074 u64 l1_tsc_deadline, l2_tsc_deadline;
0075
0076 GUEST_ASSERT(vmx_pages->vmcs_gpa);
0077 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
0078 GUEST_ASSERT(load_vmcs(vmx_pages));
0079 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
0080
0081 prepare_vmcs(vmx_pages, l2_guest_code,
0082 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
0083
0084
0085
0086
0087 basic.val = rdmsr(MSR_IA32_VMX_BASIC);
0088 ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS
0089 : MSR_IA32_VMX_PINBASED_CTLS);
0090 ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS
0091 : MSR_IA32_VMX_EXIT_CTLS);
0092
0093 if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) ||
0094 !(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
0095 return;
0096
0097 GUEST_ASSERT(!vmlaunch());
0098 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
0099 vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN));
0100
0101
0102
0103
0104 GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL,
0105 vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
0106 PIN_BASED_VMX_PREEMPTION_TIMER));
0107
0108 GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE,
0109 PREEMPTION_TIMER_VALUE));
0110
0111 vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
0112
0113 l2_save_restore_done = 0;
0114
0115 l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
0116
0117 GUEST_ASSERT(!vmresume());
0118
0119 l1_vmx_pt_finish = rdtsc();
0120
0121
0122
0123
0124
0125 GUEST_ASSERT(l2_save_restore_done);
0126
0127
0128
0129
0130 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER);
0131
0132 l1_tsc_deadline = l1_vmx_pt_start +
0133 (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
0134
0135 l2_tsc_deadline = l2_vmx_pt_start +
0136 (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
0137
0138
0139
0140
0141
0142 GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline,
0143 l2_vmx_pt_finish, l2_tsc_deadline);
0144 }
0145
0146 void guest_code(struct vmx_pages *vmx_pages)
0147 {
0148 if (vmx_pages)
0149 l1_guest_code(vmx_pages);
0150
0151 GUEST_DONE();
0152 }
0153
0154 int main(int argc, char *argv[])
0155 {
0156 vm_vaddr_t vmx_pages_gva = 0;
0157
0158 struct kvm_regs regs1, regs2;
0159 struct kvm_vm *vm;
0160 struct kvm_run *run;
0161 struct kvm_vcpu *vcpu;
0162 struct kvm_x86_state *state;
0163 struct ucall uc;
0164 int stage;
0165
0166
0167
0168
0169
0170 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
0171
0172 TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
0173
0174
0175 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
0176 run = vcpu->run;
0177
0178 vcpu_regs_get(vcpu, ®s1);
0179
0180 vcpu_alloc_vmx(vm, &vmx_pages_gva);
0181 vcpu_args_set(vcpu, 1, vmx_pages_gva);
0182
0183 for (stage = 1;; stage++) {
0184 vcpu_run(vcpu);
0185 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0186 "Stage %d: unexpected exit reason: %u (%s),\n",
0187 stage, run->exit_reason,
0188 exit_reason_str(run->exit_reason));
0189
0190 switch (get_ucall(vcpu, &uc)) {
0191 case UCALL_ABORT:
0192 REPORT_GUEST_ASSERT(uc);
0193
0194 case UCALL_SYNC:
0195 break;
0196 case UCALL_DONE:
0197 goto done;
0198 default:
0199 TEST_FAIL("Unknown ucall %lu", uc.cmd);
0200 }
0201
0202
0203 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
0204 uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
0205 stage, (ulong)uc.args[1]);
0206
0207
0208
0209
0210
0211
0212
0213
0214 if (stage == 2) {
0215
0216 pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n",
0217 stage, uc.args[2], uc.args[3]);
0218
0219 pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n",
0220 stage, uc.args[4], uc.args[5]);
0221
0222 TEST_ASSERT(uc.args[2] >= uc.args[3],
0223 "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)",
0224 stage, uc.args[2], uc.args[3]);
0225
0226 TEST_ASSERT(uc.args[4] < uc.args[5],
0227 "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)",
0228 stage, uc.args[4], uc.args[5]);
0229 }
0230
0231 state = vcpu_save_state(vcpu);
0232 memset(®s1, 0, sizeof(regs1));
0233 vcpu_regs_get(vcpu, ®s1);
0234
0235 kvm_vm_release(vm);
0236
0237
0238 vcpu = vm_recreate_with_one_vcpu(vm);
0239 vcpu_load_state(vcpu, state);
0240 run = vcpu->run;
0241 kvm_x86_state_cleanup(state);
0242
0243 memset(®s2, 0, sizeof(regs2));
0244 vcpu_regs_get(vcpu, ®s2);
0245 TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
0246 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
0247 (ulong) regs2.rdi, (ulong) regs2.rsi);
0248 }
0249
0250 done:
0251 kvm_vm_free(vm);
0252 }