0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "test_util.h"
0011 #include "kvm_util.h"
0012 #include "processor.h"
0013
0014 #include <stdint.h>
0015 #include <time.h>
0016 #include <sched.h>
0017 #include <signal.h>
0018
0019 #include <sys/eventfd.h>
0020
0021 #define SHINFO_REGION_GVA 0xc0000000ULL
0022 #define SHINFO_REGION_GPA 0xc0000000ULL
0023 #define SHINFO_REGION_SLOT 10
0024
0025 #define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (2 * PAGE_SIZE))
0026 #define DUMMY_REGION_SLOT 11
0027
0028 #define SHINFO_ADDR (SHINFO_REGION_GPA)
0029 #define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
0030 #define RUNSTATE_ADDR (SHINFO_REGION_GPA + PAGE_SIZE + 0x20)
0031 #define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40)
0032
0033 #define SHINFO_VADDR (SHINFO_REGION_GVA)
0034 #define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + 0x20)
0035 #define VCPU_INFO_VADDR (SHINFO_REGION_GVA + 0x40)
0036
0037 #define EVTCHN_VECTOR 0x10
0038
0039 #define EVTCHN_TEST1 15
0040 #define EVTCHN_TEST2 66
0041 #define EVTCHN_TIMER 13
0042
0043 #define XEN_HYPERCALL_MSR 0x40000000
0044
0045 #define MIN_STEAL_TIME 50000
0046
0047 #define __HYPERVISOR_set_timer_op 15
0048 #define __HYPERVISOR_sched_op 29
0049 #define __HYPERVISOR_event_channel_op 32
0050
0051 #define SCHEDOP_poll 3
0052
0053 #define EVTCHNOP_send 4
0054
0055 #define EVTCHNSTAT_interdomain 2
0056
0057 struct evtchn_send {
0058 u32 port;
0059 };
0060
0061 struct sched_poll {
0062 u32 *ports;
0063 unsigned int nr_ports;
0064 u64 timeout;
0065 };
0066
0067 struct pvclock_vcpu_time_info {
0068 u32 version;
0069 u32 pad0;
0070 u64 tsc_timestamp;
0071 u64 system_time;
0072 u32 tsc_to_system_mul;
0073 s8 tsc_shift;
0074 u8 flags;
0075 u8 pad[2];
0076 } __attribute__((__packed__));
0077
0078 struct pvclock_wall_clock {
0079 u32 version;
0080 u32 sec;
0081 u32 nsec;
0082 } __attribute__((__packed__));
0083
0084 struct vcpu_runstate_info {
0085 uint32_t state;
0086 uint64_t state_entry_time;
0087 uint64_t time[4];
0088 };
0089
0090 struct arch_vcpu_info {
0091 unsigned long cr2;
0092 unsigned long pad;
0093 };
0094
0095 struct vcpu_info {
0096 uint8_t evtchn_upcall_pending;
0097 uint8_t evtchn_upcall_mask;
0098 unsigned long evtchn_pending_sel;
0099 struct arch_vcpu_info arch;
0100 struct pvclock_vcpu_time_info time;
0101 };
0102
0103 struct shared_info {
0104 struct vcpu_info vcpu_info[32];
0105 unsigned long evtchn_pending[64];
0106 unsigned long evtchn_mask[64];
0107 struct pvclock_wall_clock wc;
0108 uint32_t wc_sec_hi;
0109
0110 };
0111
0112 #define RUNSTATE_running 0
0113 #define RUNSTATE_runnable 1
0114 #define RUNSTATE_blocked 2
0115 #define RUNSTATE_offline 3
0116
0117 static const char *runstate_names[] = {
0118 "running",
0119 "runnable",
0120 "blocked",
0121 "offline"
0122 };
0123
0124 struct {
0125 struct kvm_irq_routing info;
0126 struct kvm_irq_routing_entry entries[2];
0127 } irq_routes;
0128
0129 bool guest_saw_irq;
0130
0131 static void evtchn_handler(struct ex_regs *regs)
0132 {
0133 struct vcpu_info *vi = (void *)VCPU_INFO_VADDR;
0134 vi->evtchn_upcall_pending = 0;
0135 vi->evtchn_pending_sel = 0;
0136 guest_saw_irq = true;
0137
0138 GUEST_SYNC(0x20);
0139 }
0140
0141 static void guest_wait_for_irq(void)
0142 {
0143 while (!guest_saw_irq)
0144 __asm__ __volatile__ ("rep nop" : : : "memory");
0145 guest_saw_irq = false;
0146 }
0147
0148 static void guest_code(void)
0149 {
0150 struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
0151
0152 __asm__ __volatile__(
0153 "sti\n"
0154 "nop\n"
0155 );
0156
0157
0158 GUEST_SYNC(0);
0159
0160 guest_wait_for_irq();
0161
0162
0163 GUEST_SYNC(RUNSTATE_runnable);
0164 GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
0165 GUEST_ASSERT(rs->state == 0);
0166
0167 GUEST_SYNC(RUNSTATE_blocked);
0168 GUEST_ASSERT(rs->time[RUNSTATE_blocked] != 0);
0169 GUEST_ASSERT(rs->state == 0);
0170
0171 GUEST_SYNC(RUNSTATE_offline);
0172 GUEST_ASSERT(rs->time[RUNSTATE_offline] != 0);
0173 GUEST_ASSERT(rs->state == 0);
0174
0175
0176 GUEST_SYNC(4);
0177 GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x5a);
0178 GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x6b6b);
0179
0180
0181 GUEST_SYNC(5);
0182 GUEST_ASSERT(rs->state_entry_time >= 0x8000);
0183 GUEST_ASSERT(rs->time[RUNSTATE_runnable] == 0);
0184 GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x6b6b);
0185 GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x5a);
0186
0187
0188 GUEST_SYNC(6);
0189 GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME);
0190
0191
0192 GUEST_SYNC(7);
0193
0194
0195 struct shared_info *si = (void *)SHINFO_VADDR;
0196 while (!si->evtchn_pending[0])
0197 __asm__ __volatile__ ("rep nop" : : : "memory");
0198
0199
0200 GUEST_SYNC(8);
0201
0202 guest_wait_for_irq();
0203
0204
0205 GUEST_SYNC(9);
0206
0207 guest_wait_for_irq();
0208
0209
0210 GUEST_SYNC(10);
0211
0212 guest_wait_for_irq();
0213
0214 GUEST_SYNC(11);
0215
0216
0217
0218 unsigned long rax;
0219 struct evtchn_send s = { .port = 127 };
0220 __asm__ __volatile__ ("vmcall" :
0221 "=a" (rax) :
0222 "a" (__HYPERVISOR_event_channel_op),
0223 "D" (EVTCHNOP_send),
0224 "S" (&s));
0225
0226 GUEST_ASSERT(rax == 0);
0227
0228 guest_wait_for_irq();
0229
0230 GUEST_SYNC(12);
0231
0232
0233
0234 s.port = 197;
0235 __asm__ __volatile__ ("vmcall" :
0236 "=a" (rax) :
0237 "a" (__HYPERVISOR_event_channel_op),
0238 "D" (EVTCHNOP_send),
0239 "S" (&s));
0240
0241 GUEST_ASSERT(rax == 0);
0242
0243 guest_wait_for_irq();
0244
0245 GUEST_SYNC(13);
0246
0247
0248 __asm__ __volatile__ ("vmcall" :
0249 "=a" (rax) :
0250 "a" (__HYPERVISOR_set_timer_op),
0251 "D" (rs->state_entry_time + 100000000));
0252 GUEST_ASSERT(rax == 0);
0253
0254 GUEST_SYNC(14);
0255
0256
0257 guest_wait_for_irq();
0258
0259 GUEST_SYNC(15);
0260
0261
0262 guest_wait_for_irq();
0263
0264 GUEST_SYNC(16);
0265
0266
0267 u32 ports[1] = { EVTCHN_TIMER };
0268 struct sched_poll p = {
0269 .ports = ports,
0270 .nr_ports = 1,
0271 .timeout = 0,
0272 };
0273
0274 __asm__ __volatile__ ("vmcall" :
0275 "=a" (rax) :
0276 "a" (__HYPERVISOR_sched_op),
0277 "D" (SCHEDOP_poll),
0278 "S" (&p));
0279
0280 GUEST_ASSERT(rax == 0);
0281
0282 GUEST_SYNC(17);
0283
0284
0285 p.timeout = 100000000;
0286 __asm__ __volatile__ ("vmcall" :
0287 "=a" (rax) :
0288 "a" (__HYPERVISOR_sched_op),
0289 "D" (SCHEDOP_poll),
0290 "S" (&p));
0291
0292 GUEST_ASSERT(rax == 0);
0293
0294 GUEST_SYNC(18);
0295
0296
0297 p.timeout = 0;
0298 __asm__ __volatile__ ("vmcall" :
0299 "=a" (rax) :
0300 "a" (__HYPERVISOR_sched_op),
0301 "D" (SCHEDOP_poll),
0302 "S" (&p));
0303
0304 GUEST_ASSERT(rax == 0);
0305
0306 GUEST_SYNC(19);
0307
0308
0309
0310 ports[0]++;
0311 p.timeout = 0;
0312 __asm__ __volatile__ ("vmcall" :
0313 "=a" (rax) :
0314 "a" (__HYPERVISOR_sched_op),
0315 "D" (SCHEDOP_poll),
0316 "S" (&p));
0317
0318 GUEST_ASSERT(rax == 0);
0319
0320 guest_wait_for_irq();
0321
0322 GUEST_SYNC(20);
0323
0324
0325 guest_wait_for_irq();
0326
0327 GUEST_SYNC(21);
0328 }
0329
0330 static int cmp_timespec(struct timespec *a, struct timespec *b)
0331 {
0332 if (a->tv_sec > b->tv_sec)
0333 return 1;
0334 else if (a->tv_sec < b->tv_sec)
0335 return -1;
0336 else if (a->tv_nsec > b->tv_nsec)
0337 return 1;
0338 else if (a->tv_nsec < b->tv_nsec)
0339 return -1;
0340 else
0341 return 0;
0342 }
0343
0344 static struct vcpu_info *vinfo;
0345 static struct kvm_vcpu *vcpu;
0346
0347 static void handle_alrm(int sig)
0348 {
0349 if (vinfo)
0350 printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending);
0351 vcpu_dump(stdout, vcpu, 0);
0352 TEST_FAIL("IRQ delivery timed out");
0353 }
0354
0355 int main(int argc, char *argv[])
0356 {
0357 struct timespec min_ts, max_ts, vm_ts;
0358 struct kvm_vm *vm;
0359 bool verbose;
0360
0361 verbose = argc > 1 && (!strncmp(argv[1], "-v", 3) ||
0362 !strncmp(argv[1], "--verbose", 10));
0363
0364 int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
0365 TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO);
0366
0367 bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE);
0368 bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
0369 bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
0370
0371 clock_gettime(CLOCK_REALTIME, &min_ts);
0372
0373 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
0374
0375
0376 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
0377 SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0);
0378 virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2);
0379
0380 struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
0381
0382 int zero_fd = open("/dev/zero", O_RDONLY);
0383 TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero");
0384
0385 struct kvm_xen_hvm_config hvmc = {
0386 .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
0387 .msr = XEN_HYPERCALL_MSR,
0388 };
0389
0390
0391
0392 if (do_evtchn_tests)
0393 hvmc.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
0394
0395 vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
0396
0397 struct kvm_xen_hvm_attr lm = {
0398 .type = KVM_XEN_ATTR_TYPE_LONG_MODE,
0399 .u.long_mode = 1,
0400 };
0401 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
0402
0403 struct kvm_xen_hvm_attr ha = {
0404 .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
0405 .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE,
0406 };
0407 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
0408
0409
0410
0411
0412
0413
0414 struct pvclock_wall_clock wc_copy = shinfo->wc;
0415 void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0);
0416 TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info");
0417 shinfo->wc = wc_copy;
0418
0419 struct kvm_xen_vcpu_attr vi = {
0420 .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
0421 .u.gpa = VCPU_INFO_ADDR,
0422 };
0423 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi);
0424
0425 struct kvm_xen_vcpu_attr pvclock = {
0426 .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
0427 .u.gpa = PVTIME_ADDR,
0428 };
0429 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &pvclock);
0430
0431 struct kvm_xen_hvm_attr vec = {
0432 .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
0433 .u.vector = EVTCHN_VECTOR,
0434 };
0435 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
0436
0437 vm_init_descriptor_tables(vm);
0438 vcpu_init_descriptor_tables(vcpu);
0439 vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
0440
0441 if (do_runstate_tests) {
0442 struct kvm_xen_vcpu_attr st = {
0443 .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
0444 .u.gpa = RUNSTATE_ADDR,
0445 };
0446 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
0447 }
0448
0449 int irq_fd[2] = { -1, -1 };
0450
0451 if (do_eventfd_tests) {
0452 irq_fd[0] = eventfd(0, 0);
0453 irq_fd[1] = eventfd(0, 0);
0454
0455
0456 if (irq_fd[0] == -1 || irq_fd[1] == -1)
0457 do_evtchn_tests = do_eventfd_tests = false;
0458 }
0459
0460 if (do_eventfd_tests) {
0461 irq_routes.info.nr = 2;
0462
0463 irq_routes.entries[0].gsi = 32;
0464 irq_routes.entries[0].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
0465 irq_routes.entries[0].u.xen_evtchn.port = EVTCHN_TEST1;
0466 irq_routes.entries[0].u.xen_evtchn.vcpu = vcpu->id;
0467 irq_routes.entries[0].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
0468
0469 irq_routes.entries[1].gsi = 33;
0470 irq_routes.entries[1].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
0471 irq_routes.entries[1].u.xen_evtchn.port = EVTCHN_TEST2;
0472 irq_routes.entries[1].u.xen_evtchn.vcpu = vcpu->id;
0473 irq_routes.entries[1].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
0474
0475 vm_ioctl(vm, KVM_SET_GSI_ROUTING, &irq_routes.info);
0476
0477 struct kvm_irqfd ifd = { };
0478
0479 ifd.fd = irq_fd[0];
0480 ifd.gsi = 32;
0481 vm_ioctl(vm, KVM_IRQFD, &ifd);
0482
0483 ifd.fd = irq_fd[1];
0484 ifd.gsi = 33;
0485 vm_ioctl(vm, KVM_IRQFD, &ifd);
0486
0487 struct sigaction sa = { };
0488 sa.sa_handler = handle_alrm;
0489 sigaction(SIGALRM, &sa, NULL);
0490 }
0491
0492 struct kvm_xen_vcpu_attr tmr = {
0493 .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
0494 .u.timer.port = EVTCHN_TIMER,
0495 .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
0496 .u.timer.expires_ns = 0
0497 };
0498
0499 if (do_evtchn_tests) {
0500 struct kvm_xen_hvm_attr inj = {
0501 .type = KVM_XEN_ATTR_TYPE_EVTCHN,
0502 .u.evtchn.send_port = 127,
0503 .u.evtchn.type = EVTCHNSTAT_interdomain,
0504 .u.evtchn.flags = 0,
0505 .u.evtchn.deliver.port.port = EVTCHN_TEST1,
0506 .u.evtchn.deliver.port.vcpu = vcpu->id + 1,
0507 .u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
0508 };
0509 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
0510
0511
0512 inj.u.evtchn.flags = KVM_XEN_EVTCHN_UPDATE;
0513 inj.u.evtchn.deliver.port.vcpu = vcpu->id;
0514 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
0515
0516 inj.u.evtchn.send_port = 197;
0517 inj.u.evtchn.deliver.eventfd.port = 0;
0518 inj.u.evtchn.deliver.eventfd.fd = irq_fd[1];
0519 inj.u.evtchn.flags = 0;
0520 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
0521
0522 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
0523 }
0524 vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
0525 vinfo->evtchn_upcall_pending = 0;
0526
0527 struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
0528 rs->state = 0x5a;
0529
0530 bool evtchn_irq_expected = false;
0531
0532 for (;;) {
0533 volatile struct kvm_run *run = vcpu->run;
0534 struct ucall uc;
0535
0536 vcpu_run(vcpu);
0537
0538 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0539 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
0540 run->exit_reason,
0541 exit_reason_str(run->exit_reason));
0542
0543 switch (get_ucall(vcpu, &uc)) {
0544 case UCALL_ABORT:
0545 REPORT_GUEST_ASSERT(uc);
0546
0547 case UCALL_SYNC: {
0548 struct kvm_xen_vcpu_attr rst;
0549 long rundelay;
0550
0551 if (do_runstate_tests)
0552 TEST_ASSERT(rs->state_entry_time == rs->time[0] +
0553 rs->time[1] + rs->time[2] + rs->time[3],
0554 "runstate times don't add up");
0555
0556 switch (uc.args[1]) {
0557 case 0:
0558 if (verbose)
0559 printf("Delivering evtchn upcall\n");
0560 evtchn_irq_expected = true;
0561 vinfo->evtchn_upcall_pending = 1;
0562 break;
0563
0564 case RUNSTATE_runnable...RUNSTATE_offline:
0565 TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen");
0566 if (!do_runstate_tests)
0567 goto done;
0568 if (verbose)
0569 printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
0570 rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
0571 rst.u.runstate.state = uc.args[1];
0572 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
0573 break;
0574
0575 case 4:
0576 if (verbose)
0577 printf("Testing RUNSTATE_ADJUST\n");
0578 rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST;
0579 memset(&rst.u, 0, sizeof(rst.u));
0580 rst.u.runstate.state = (uint64_t)-1;
0581 rst.u.runstate.time_blocked =
0582 0x5a - rs->time[RUNSTATE_blocked];
0583 rst.u.runstate.time_offline =
0584 0x6b6b - rs->time[RUNSTATE_offline];
0585 rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked -
0586 rst.u.runstate.time_offline;
0587 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
0588 break;
0589
0590 case 5:
0591 if (verbose)
0592 printf("Testing RUNSTATE_DATA\n");
0593 rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA;
0594 memset(&rst.u, 0, sizeof(rst.u));
0595 rst.u.runstate.state = RUNSTATE_running;
0596 rst.u.runstate.state_entry_time = 0x6b6b + 0x5a;
0597 rst.u.runstate.time_blocked = 0x6b6b;
0598 rst.u.runstate.time_offline = 0x5a;
0599 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
0600 break;
0601
0602 case 6:
0603 if (verbose)
0604 printf("Testing steal time\n");
0605
0606 rundelay = get_run_delay() + MIN_STEAL_TIME;
0607 do {
0608 sched_yield();
0609 } while (get_run_delay() < rundelay);
0610 break;
0611
0612 case 7:
0613 if (!do_eventfd_tests)
0614 goto done;
0615 if (verbose)
0616 printf("Testing masked event channel\n");
0617 shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1;
0618 eventfd_write(irq_fd[0], 1UL);
0619 alarm(1);
0620 break;
0621
0622 case 8:
0623 if (verbose)
0624 printf("Testing unmasked event channel\n");
0625
0626 shinfo->evtchn_pending[0] = 0;
0627 shinfo->evtchn_mask[0] = 0;
0628 eventfd_write(irq_fd[1], 1UL);
0629 evtchn_irq_expected = true;
0630 alarm(1);
0631 break;
0632
0633 case 9:
0634 TEST_ASSERT(!evtchn_irq_expected,
0635 "Expected event channel IRQ but it didn't happen");
0636 shinfo->evtchn_pending[1] = 0;
0637 if (verbose)
0638 printf("Testing event channel after memslot change\n");
0639 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
0640 DUMMY_REGION_GPA, DUMMY_REGION_SLOT, 1, 0);
0641 eventfd_write(irq_fd[0], 1UL);
0642 evtchn_irq_expected = true;
0643 alarm(1);
0644 break;
0645
0646 case 10:
0647 TEST_ASSERT(!evtchn_irq_expected,
0648 "Expected event channel IRQ but it didn't happen");
0649 if (!do_evtchn_tests)
0650 goto done;
0651
0652 shinfo->evtchn_pending[0] = 0;
0653 if (verbose)
0654 printf("Testing injection with KVM_XEN_HVM_EVTCHN_SEND\n");
0655
0656 struct kvm_irq_routing_xen_evtchn e;
0657 e.port = EVTCHN_TEST2;
0658 e.vcpu = vcpu->id;
0659 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
0660
0661 vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &e);
0662 evtchn_irq_expected = true;
0663 alarm(1);
0664 break;
0665
0666 case 11:
0667 TEST_ASSERT(!evtchn_irq_expected,
0668 "Expected event channel IRQ but it didn't happen");
0669 shinfo->evtchn_pending[1] = 0;
0670
0671 if (verbose)
0672 printf("Testing guest EVTCHNOP_send direct to evtchn\n");
0673 evtchn_irq_expected = true;
0674 alarm(1);
0675 break;
0676
0677 case 12:
0678 TEST_ASSERT(!evtchn_irq_expected,
0679 "Expected event channel IRQ but it didn't happen");
0680 shinfo->evtchn_pending[0] = 0;
0681
0682 if (verbose)
0683 printf("Testing guest EVTCHNOP_send to eventfd\n");
0684 evtchn_irq_expected = true;
0685 alarm(1);
0686 break;
0687
0688 case 13:
0689 TEST_ASSERT(!evtchn_irq_expected,
0690 "Expected event channel IRQ but it didn't happen");
0691 shinfo->evtchn_pending[1] = 0;
0692
0693 if (verbose)
0694 printf("Testing guest oneshot timer\n");
0695 break;
0696
0697 case 14:
0698 memset(&tmr, 0, sizeof(tmr));
0699 tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER;
0700 vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
0701 TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER,
0702 "Timer port not returned");
0703 TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
0704 "Timer priority not returned");
0705 TEST_ASSERT(tmr.u.timer.expires_ns > rs->state_entry_time,
0706 "Timer expiry not returned");
0707 evtchn_irq_expected = true;
0708 alarm(1);
0709 break;
0710
0711 case 15:
0712 TEST_ASSERT(!evtchn_irq_expected,
0713 "Expected event channel IRQ but it didn't happen");
0714 shinfo->evtchn_pending[0] = 0;
0715
0716 if (verbose)
0717 printf("Testing restored oneshot timer\n");
0718
0719 tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
0720 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
0721 evtchn_irq_expected = true;
0722 alarm(1);
0723 break;
0724
0725 case 16:
0726 TEST_ASSERT(!evtchn_irq_expected,
0727 "Expected event channel IRQ but it didn't happen");
0728
0729 if (verbose)
0730 printf("Testing SCHEDOP_poll with already pending event\n");
0731 shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 1UL << EVTCHN_TIMER;
0732 alarm(1);
0733 break;
0734
0735 case 17:
0736 if (verbose)
0737 printf("Testing SCHEDOP_poll timeout\n");
0738 shinfo->evtchn_pending[0] = 0;
0739 alarm(1);
0740 break;
0741
0742 case 18:
0743 if (verbose)
0744 printf("Testing SCHEDOP_poll wake on masked event\n");
0745
0746 tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
0747 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
0748 alarm(1);
0749 break;
0750
0751 case 19:
0752 shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 0;
0753 if (verbose)
0754 printf("Testing SCHEDOP_poll wake on unmasked event\n");
0755
0756 evtchn_irq_expected = true;
0757 tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
0758 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
0759
0760
0761 tmr.u.timer.expires_ns = 0;
0762 vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
0763 TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000,
0764 "Timer not reported pending");
0765 alarm(1);
0766 break;
0767
0768 case 20:
0769 TEST_ASSERT(!evtchn_irq_expected,
0770 "Expected event channel IRQ but it didn't happen");
0771
0772 vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
0773 TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending");
0774
0775 shinfo->evtchn_pending[0] = 0;
0776 if (verbose)
0777 printf("Testing timer in the past\n");
0778
0779 evtchn_irq_expected = true;
0780 tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL;
0781 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
0782 alarm(1);
0783 break;
0784
0785 case 21:
0786 TEST_ASSERT(!evtchn_irq_expected,
0787 "Expected event channel IRQ but it didn't happen");
0788 goto done;
0789
0790 case 0x20:
0791 TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
0792 evtchn_irq_expected = false;
0793 break;
0794 }
0795 break;
0796 }
0797 case UCALL_DONE:
0798 goto done;
0799 default:
0800 TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
0801 }
0802 }
0803
0804 done:
0805 alarm(0);
0806 clock_gettime(CLOCK_REALTIME, &max_ts);
0807
0808
0809
0810
0811
0812
0813 struct pvclock_wall_clock *wc;
0814 struct pvclock_vcpu_time_info *ti, *ti2;
0815
0816 wc = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0xc00);
0817 ti = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20);
0818 ti2 = addr_gpa2hva(vm, PVTIME_ADDR);
0819
0820 if (verbose) {
0821 printf("Wall clock (v %d) %d.%09d\n", wc->version, wc->sec, wc->nsec);
0822 printf("Time info 1: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n",
0823 ti->version, ti->tsc_timestamp, ti->system_time, ti->tsc_to_system_mul,
0824 ti->tsc_shift, ti->flags);
0825 printf("Time info 2: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n",
0826 ti2->version, ti2->tsc_timestamp, ti2->system_time, ti2->tsc_to_system_mul,
0827 ti2->tsc_shift, ti2->flags);
0828 }
0829
0830 vm_ts.tv_sec = wc->sec;
0831 vm_ts.tv_nsec = wc->nsec;
0832 TEST_ASSERT(wc->version && !(wc->version & 1),
0833 "Bad wallclock version %x", wc->version);
0834 TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old");
0835 TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new");
0836
0837 TEST_ASSERT(ti->version && !(ti->version & 1),
0838 "Bad time_info version %x", ti->version);
0839 TEST_ASSERT(ti2->version && !(ti2->version & 1),
0840 "Bad time_info version %x", ti->version);
0841
0842 if (do_runstate_tests) {
0843
0844
0845
0846
0847
0848 struct kvm_xen_vcpu_attr rst = {
0849 .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA,
0850 };
0851 vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst);
0852
0853 if (verbose) {
0854 printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
0855 rs->state <= RUNSTATE_offline ? runstate_names[rs->state] : "unknown",
0856 rs->state, rs->state_entry_time);
0857 for (int i = RUNSTATE_running; i <= RUNSTATE_offline; i++) {
0858 printf("State %s: %" PRIu64 " ns\n",
0859 runstate_names[i], rs->time[i]);
0860 }
0861 }
0862 TEST_ASSERT(rs->state == rst.u.runstate.state, "Runstate mismatch");
0863 TEST_ASSERT(rs->state_entry_time == rst.u.runstate.state_entry_time,
0864 "State entry time mismatch");
0865 TEST_ASSERT(rs->time[RUNSTATE_running] == rst.u.runstate.time_running,
0866 "Running time mismatch");
0867 TEST_ASSERT(rs->time[RUNSTATE_runnable] == rst.u.runstate.time_runnable,
0868 "Runnable time mismatch");
0869 TEST_ASSERT(rs->time[RUNSTATE_blocked] == rst.u.runstate.time_blocked,
0870 "Blocked time mismatch");
0871 TEST_ASSERT(rs->time[RUNSTATE_offline] == rst.u.runstate.time_offline,
0872 "Offline time mismatch");
0873
0874 TEST_ASSERT(rs->state_entry_time == rs->time[0] +
0875 rs->time[1] + rs->time[2] + rs->time[3],
0876 "runstate times don't add up");
0877 }
0878 kvm_vm_free(vm);
0879 return 0;
0880 }