0001
0002
0003
0004
0005
0006
0007
0008 #define _GNU_SOURCE
0009
0010 #include <stdio.h>
0011 #include <stdlib.h>
0012 #include <pthread.h>
0013 #include <semaphore.h>
0014 #include <sys/types.h>
0015 #include <signal.h>
0016 #include <errno.h>
0017 #include <linux/bitmap.h>
0018 #include <linux/bitops.h>
0019 #include <linux/atomic.h>
0020
0021 #include "kvm_util.h"
0022 #include "test_util.h"
0023 #include "guest_modes.h"
0024 #include "processor.h"
0025
0026
0027 #define TEST_MEM_SLOT_INDEX 1
0028
0029
0030 #define DEFAULT_GUEST_TEST_MEM 0xc0000000
0031
0032
0033 #define TEST_PAGES_PER_LOOP 1024
0034
0035
0036 #define TEST_HOST_LOOP_N 32UL
0037
0038
0039 #define TEST_HOST_LOOP_INTERVAL 10UL
0040
0041
0042 #if defined(__s390x__)
0043 # define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
0044 # define test_bit_le(nr, addr) \
0045 test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
0046 # define set_bit_le(nr, addr) \
0047 set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
0048 # define clear_bit_le(nr, addr) \
0049 clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
0050 # define test_and_set_bit_le(nr, addr) \
0051 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
0052 # define test_and_clear_bit_le(nr, addr) \
0053 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
0054 #else
0055 # define test_bit_le test_bit
0056 # define set_bit_le set_bit
0057 # define clear_bit_le clear_bit
0058 # define test_and_set_bit_le test_and_set_bit
0059 # define test_and_clear_bit_le test_and_clear_bit
0060 #endif
0061
0062 #define TEST_DIRTY_RING_COUNT 65536
0063
0064 #define SIG_IPI SIGUSR1
0065
0066
0067
0068
0069
0070
0071
0072 static uint64_t host_page_size;
0073 static uint64_t guest_page_size;
0074 static uint64_t guest_num_pages;
0075 static uint64_t random_array[TEST_PAGES_PER_LOOP];
0076 static uint64_t iteration;
0077
0078
0079
0080
0081
0082
0083 static uint64_t guest_test_phys_mem;
0084
0085
0086
0087
0088
0089 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
0090
0091
0092
0093
0094
0095 static void guest_code(void)
0096 {
0097 uint64_t addr;
0098 int i;
0099
0100
0101
0102
0103
0104
0105
0106 for (i = 0; i < guest_num_pages; i++) {
0107 addr = guest_test_virt_mem + i * guest_page_size;
0108 *(uint64_t *)addr = READ_ONCE(iteration);
0109 }
0110
0111 while (true) {
0112 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
0113 addr = guest_test_virt_mem;
0114 addr += (READ_ONCE(random_array[i]) % guest_num_pages)
0115 * guest_page_size;
0116 addr = align_down(addr, host_page_size);
0117 *(uint64_t *)addr = READ_ONCE(iteration);
0118 }
0119
0120
0121 GUEST_SYNC(1);
0122 }
0123 }
0124
0125
0126 static bool host_quit;
0127
0128
0129 static void *host_test_mem;
0130 static uint64_t host_num_pages;
0131
0132
0133 static uint64_t host_dirty_count;
0134 static uint64_t host_clear_count;
0135 static uint64_t host_track_next_count;
0136
0137
0138 static sem_t sem_vcpu_stop;
0139 static sem_t sem_vcpu_cont;
0140
0141
0142
0143
0144
0145
0146
0147 static atomic_t vcpu_sync_stop_requested;
0148
0149
0150
0151
0152
0153 static bool dirty_ring_vcpu_ring_full;
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168 static uint64_t dirty_ring_last_page;
0169
0170 enum log_mode_t {
0171
0172 LOG_MODE_DIRTY_LOG = 0,
0173
0174
0175 LOG_MODE_CLEAR_LOG = 1,
0176
0177
0178 LOG_MODE_DIRTY_RING = 2,
0179
0180 LOG_MODE_NUM,
0181
0182
0183 LOG_MODE_ALL = LOG_MODE_NUM,
0184 };
0185
0186
0187 static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
0188
0189 static enum log_mode_t host_log_mode;
0190 static pthread_t vcpu_thread;
0191 static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
0192
0193 static void vcpu_kick(void)
0194 {
0195 pthread_kill(vcpu_thread, SIG_IPI);
0196 }
0197
0198
0199
0200
0201
0202 static void sem_wait_until(sem_t *sem)
0203 {
0204 int ret;
0205
0206 do
0207 ret = sem_wait(sem);
0208 while (ret == -1 && errno == EINTR);
0209 }
0210
0211 static bool clear_log_supported(void)
0212 {
0213 return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
0214 }
0215
0216 static void clear_log_create_vm_done(struct kvm_vm *vm)
0217 {
0218 u64 manual_caps;
0219
0220 manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
0221 TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
0222 manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
0223 KVM_DIRTY_LOG_INITIALLY_SET);
0224 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
0225 }
0226
0227 static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
0228 void *bitmap, uint32_t num_pages)
0229 {
0230 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
0231 }
0232
0233 static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
0234 void *bitmap, uint32_t num_pages)
0235 {
0236 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
0237 kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
0238 }
0239
0240
0241 static void vcpu_handle_sync_stop(void)
0242 {
0243 if (atomic_read(&vcpu_sync_stop_requested)) {
0244
0245 atomic_set(&vcpu_sync_stop_requested, false);
0246 sem_post(&sem_vcpu_stop);
0247 sem_wait_until(&sem_vcpu_cont);
0248 }
0249 }
0250
0251 static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
0252 {
0253 struct kvm_run *run = vcpu->run;
0254
0255 TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
0256 "vcpu run failed: errno=%d", err);
0257
0258 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
0259 "Invalid guest sync status: exit_reason=%s\n",
0260 exit_reason_str(run->exit_reason));
0261
0262 vcpu_handle_sync_stop();
0263 }
0264
0265 static bool dirty_ring_supported(void)
0266 {
0267 return kvm_has_cap(KVM_CAP_DIRTY_LOG_RING);
0268 }
0269
0270 static void dirty_ring_create_vm_done(struct kvm_vm *vm)
0271 {
0272
0273
0274
0275
0276 vm_enable_dirty_ring(vm, test_dirty_ring_count *
0277 sizeof(struct kvm_dirty_gfn));
0278 }
0279
0280 static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
0281 {
0282 return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
0283 }
0284
0285 static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
0286 {
0287 gfn->flags = KVM_DIRTY_GFN_F_RESET;
0288 }
0289
0290 static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
0291 int slot, void *bitmap,
0292 uint32_t num_pages, uint32_t *fetch_index)
0293 {
0294 struct kvm_dirty_gfn *cur;
0295 uint32_t count = 0;
0296
0297 while (true) {
0298 cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
0299 if (!dirty_gfn_is_dirtied(cur))
0300 break;
0301 TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
0302 "%u != %u", cur->slot, slot);
0303 TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
0304 "0x%llx >= 0x%x", cur->offset, num_pages);
0305
0306 set_bit_le(cur->offset, bitmap);
0307 dirty_ring_last_page = cur->offset;
0308 dirty_gfn_set_collected(cur);
0309 (*fetch_index)++;
0310 count++;
0311 }
0312
0313 return count;
0314 }
0315
0316 static void dirty_ring_wait_vcpu(void)
0317 {
0318
0319 vcpu_kick();
0320 sem_wait_until(&sem_vcpu_stop);
0321 }
0322
0323 static void dirty_ring_continue_vcpu(void)
0324 {
0325 pr_info("Notifying vcpu to continue\n");
0326 sem_post(&sem_vcpu_cont);
0327 }
0328
0329 static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
0330 void *bitmap, uint32_t num_pages)
0331 {
0332
0333 static uint32_t fetch_index = 0;
0334 uint32_t count = 0, cleared;
0335 bool continued_vcpu = false;
0336
0337 dirty_ring_wait_vcpu();
0338
0339 if (!dirty_ring_vcpu_ring_full) {
0340
0341
0342
0343
0344 dirty_ring_continue_vcpu();
0345 continued_vcpu = true;
0346 }
0347
0348
0349 count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
0350 slot, bitmap, num_pages, &fetch_index);
0351
0352 cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
0353
0354
0355 TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
0356 "with collected (%u)", cleared, count);
0357
0358 if (!continued_vcpu) {
0359 TEST_ASSERT(dirty_ring_vcpu_ring_full,
0360 "Didn't continue vcpu even without ring full");
0361 dirty_ring_continue_vcpu();
0362 }
0363
0364 pr_info("Iteration %ld collected %u pages\n", iteration, count);
0365 }
0366
0367 static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
0368 {
0369 struct kvm_run *run = vcpu->run;
0370
0371
0372 if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
0373
0374 ;
0375 } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
0376 (ret == -1 && err == EINTR)) {
0377
0378 WRITE_ONCE(dirty_ring_vcpu_ring_full,
0379 run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
0380 sem_post(&sem_vcpu_stop);
0381 pr_info("vcpu stops because %s...\n",
0382 dirty_ring_vcpu_ring_full ?
0383 "dirty ring is full" : "vcpu is kicked out");
0384 sem_wait_until(&sem_vcpu_cont);
0385 pr_info("vcpu continues now.\n");
0386 } else {
0387 TEST_ASSERT(false, "Invalid guest sync status: "
0388 "exit_reason=%s\n",
0389 exit_reason_str(run->exit_reason));
0390 }
0391 }
0392
0393 static void dirty_ring_before_vcpu_join(void)
0394 {
0395
0396 sem_post(&sem_vcpu_cont);
0397 }
0398
0399 struct log_mode {
0400 const char *name;
0401
0402 bool (*supported)(void);
0403
0404 void (*create_vm_done)(struct kvm_vm *vm);
0405
0406 void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
0407 void *bitmap, uint32_t num_pages);
0408
0409 void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
0410 void (*before_vcpu_join) (void);
0411 } log_modes[LOG_MODE_NUM] = {
0412 {
0413 .name = "dirty-log",
0414 .collect_dirty_pages = dirty_log_collect_dirty_pages,
0415 .after_vcpu_run = default_after_vcpu_run,
0416 },
0417 {
0418 .name = "clear-log",
0419 .supported = clear_log_supported,
0420 .create_vm_done = clear_log_create_vm_done,
0421 .collect_dirty_pages = clear_log_collect_dirty_pages,
0422 .after_vcpu_run = default_after_vcpu_run,
0423 },
0424 {
0425 .name = "dirty-ring",
0426 .supported = dirty_ring_supported,
0427 .create_vm_done = dirty_ring_create_vm_done,
0428 .collect_dirty_pages = dirty_ring_collect_dirty_pages,
0429 .before_vcpu_join = dirty_ring_before_vcpu_join,
0430 .after_vcpu_run = dirty_ring_after_vcpu_run,
0431 },
0432 };
0433
0434
0435
0436
0437
0438
0439
0440
0441 static unsigned long *host_bmap_track;
0442
0443 static void log_modes_dump(void)
0444 {
0445 int i;
0446
0447 printf("all");
0448 for (i = 0; i < LOG_MODE_NUM; i++)
0449 printf(", %s", log_modes[i].name);
0450 printf("\n");
0451 }
0452
0453 static bool log_mode_supported(void)
0454 {
0455 struct log_mode *mode = &log_modes[host_log_mode];
0456
0457 if (mode->supported)
0458 return mode->supported();
0459
0460 return true;
0461 }
0462
0463 static void log_mode_create_vm_done(struct kvm_vm *vm)
0464 {
0465 struct log_mode *mode = &log_modes[host_log_mode];
0466
0467 if (mode->create_vm_done)
0468 mode->create_vm_done(vm);
0469 }
0470
0471 static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
0472 void *bitmap, uint32_t num_pages)
0473 {
0474 struct log_mode *mode = &log_modes[host_log_mode];
0475
0476 TEST_ASSERT(mode->collect_dirty_pages != NULL,
0477 "collect_dirty_pages() is required for any log mode!");
0478 mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
0479 }
0480
0481 static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
0482 {
0483 struct log_mode *mode = &log_modes[host_log_mode];
0484
0485 if (mode->after_vcpu_run)
0486 mode->after_vcpu_run(vcpu, ret, err);
0487 }
0488
0489 static void log_mode_before_vcpu_join(void)
0490 {
0491 struct log_mode *mode = &log_modes[host_log_mode];
0492
0493 if (mode->before_vcpu_join)
0494 mode->before_vcpu_join();
0495 }
0496
0497 static void generate_random_array(uint64_t *guest_array, uint64_t size)
0498 {
0499 uint64_t i;
0500
0501 for (i = 0; i < size; i++)
0502 guest_array[i] = random();
0503 }
0504
0505 static void *vcpu_worker(void *data)
0506 {
0507 int ret;
0508 struct kvm_vcpu *vcpu = data;
0509 struct kvm_vm *vm = vcpu->vm;
0510 uint64_t *guest_array;
0511 uint64_t pages_count = 0;
0512 struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
0513 + sizeof(sigset_t));
0514 sigset_t *sigset = (sigset_t *) &sigmask->sigset;
0515
0516
0517
0518
0519
0520
0521 sigmask->len = 8;
0522 pthread_sigmask(0, NULL, sigset);
0523 sigdelset(sigset, SIG_IPI);
0524 vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
0525
0526 sigemptyset(sigset);
0527 sigaddset(sigset, SIG_IPI);
0528
0529 guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
0530
0531 while (!READ_ONCE(host_quit)) {
0532
0533 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
0534 pages_count += TEST_PAGES_PER_LOOP;
0535
0536 ret = __vcpu_run(vcpu);
0537 if (ret == -1 && errno == EINTR) {
0538 int sig = -1;
0539 sigwait(sigset, &sig);
0540 assert(sig == SIG_IPI);
0541 }
0542 log_mode_after_vcpu_run(vcpu, ret, errno);
0543 }
0544
0545 pr_info("Dirtied %"PRIu64" pages\n", pages_count);
0546
0547 return NULL;
0548 }
0549
0550 static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
0551 {
0552 uint64_t step = vm_num_host_pages(mode, 1);
0553 uint64_t page;
0554 uint64_t *value_ptr;
0555 uint64_t min_iter = 0;
0556
0557 for (page = 0; page < host_num_pages; page += step) {
0558 value_ptr = host_test_mem + page * host_page_size;
0559
0560
0561 if (test_and_clear_bit_le(page, host_bmap_track)) {
0562 host_track_next_count++;
0563 TEST_ASSERT(test_bit_le(page, bmap),
0564 "Page %"PRIu64" should have its dirty bit "
0565 "set in this iteration but it is missing",
0566 page);
0567 }
0568
0569 if (test_and_clear_bit_le(page, bmap)) {
0570 bool matched;
0571
0572 host_dirty_count++;
0573
0574
0575
0576
0577
0578
0579 matched = (*value_ptr == iteration ||
0580 *value_ptr == iteration - 1);
0581
0582 if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
0583 if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 min_iter = iteration - 1;
0617 continue;
0618 } else if (page == dirty_ring_last_page) {
0619
0620
0621
0622
0623 continue;
0624 }
0625 }
0626
0627 TEST_ASSERT(matched,
0628 "Set page %"PRIu64" value %"PRIu64
0629 " incorrect (iteration=%"PRIu64")",
0630 page, *value_ptr, iteration);
0631 } else {
0632 host_clear_count++;
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652 TEST_ASSERT(*value_ptr <= iteration,
0653 "Clear page %"PRIu64" value %"PRIu64
0654 " incorrect (iteration=%"PRIu64")",
0655 page, *value_ptr, iteration);
0656 if (*value_ptr == iteration) {
0657
0658
0659
0660
0661
0662 set_bit_le(page, host_bmap_track);
0663 }
0664 }
0665 }
0666 }
0667
0668 static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
0669 uint64_t extra_mem_pages, void *guest_code)
0670 {
0671 struct kvm_vm *vm;
0672
0673 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
0674
0675 vm = __vm_create(mode, 1, extra_mem_pages);
0676
0677 log_mode_create_vm_done(vm);
0678 *vcpu = vm_vcpu_add(vm, 0, guest_code);
0679 return vm;
0680 }
0681
0682 #define DIRTY_MEM_BITS 30
0683 #define PAGE_SHIFT_4K 12
0684
0685 struct test_params {
0686 unsigned long iterations;
0687 unsigned long interval;
0688 uint64_t phys_offset;
0689 };
0690
0691 static void run_test(enum vm_guest_mode mode, void *arg)
0692 {
0693 struct test_params *p = arg;
0694 struct kvm_vcpu *vcpu;
0695 struct kvm_vm *vm;
0696 unsigned long *bmap;
0697
0698 if (!log_mode_supported()) {
0699 print_skip("Log mode '%s' not supported",
0700 log_modes[host_log_mode].name);
0701 return;
0702 }
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712 vm = create_vm(mode, &vcpu,
0713 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
0714
0715 guest_page_size = vm->page_size;
0716
0717
0718
0719
0720 guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
0721 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
0722
0723 host_page_size = getpagesize();
0724 host_num_pages = vm_num_host_pages(mode, guest_num_pages);
0725
0726 if (!p->phys_offset) {
0727 guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
0728 guest_page_size;
0729 guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
0730 } else {
0731 guest_test_phys_mem = p->phys_offset;
0732 }
0733
0734 #ifdef __s390x__
0735
0736 guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
0737 #endif
0738
0739 pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
0740
0741 bmap = bitmap_zalloc(host_num_pages);
0742 host_bmap_track = bitmap_zalloc(host_num_pages);
0743
0744
0745 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
0746 guest_test_phys_mem,
0747 TEST_MEM_SLOT_INDEX,
0748 guest_num_pages,
0749 KVM_MEM_LOG_DIRTY_PAGES);
0750
0751
0752 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
0753
0754
0755 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
0756
0757 ucall_init(vm, NULL);
0758
0759
0760 sync_global_to_guest(vm, host_page_size);
0761 sync_global_to_guest(vm, guest_page_size);
0762 sync_global_to_guest(vm, guest_test_virt_mem);
0763 sync_global_to_guest(vm, guest_num_pages);
0764
0765
0766 iteration = 1;
0767 sync_global_to_guest(vm, iteration);
0768 host_quit = false;
0769 host_dirty_count = 0;
0770 host_clear_count = 0;
0771 host_track_next_count = 0;
0772
0773 pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
0774
0775 while (iteration < p->iterations) {
0776
0777 usleep(p->interval * 1000);
0778 log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
0779 bmap, host_num_pages);
0780
0781
0782
0783
0784
0785 atomic_set(&vcpu_sync_stop_requested, true);
0786 sem_wait_until(&sem_vcpu_stop);
0787
0788
0789
0790
0791
0792
0793
0794 assert(host_log_mode == LOG_MODE_DIRTY_RING ||
0795 atomic_read(&vcpu_sync_stop_requested) == false);
0796 vm_dirty_log_verify(mode, bmap);
0797 sem_post(&sem_vcpu_cont);
0798
0799 iteration++;
0800 sync_global_to_guest(vm, iteration);
0801 }
0802
0803
0804 host_quit = true;
0805 log_mode_before_vcpu_join();
0806 pthread_join(vcpu_thread, NULL);
0807
0808 pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
0809 "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
0810 host_track_next_count);
0811
0812 free(bmap);
0813 free(host_bmap_track);
0814 ucall_uninit(vm);
0815 kvm_vm_free(vm);
0816 }
0817
0818 static void help(char *name)
0819 {
0820 puts("");
0821 printf("usage: %s [-h] [-i iterations] [-I interval] "
0822 "[-p offset] [-m mode]\n", name);
0823 puts("");
0824 printf(" -c: specify dirty ring size, in number of entries\n");
0825 printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
0826 TEST_DIRTY_RING_COUNT);
0827 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
0828 TEST_HOST_LOOP_N);
0829 printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
0830 TEST_HOST_LOOP_INTERVAL);
0831 printf(" -p: specify guest physical test memory offset\n"
0832 " Warning: a low offset can conflict with the loaded test code.\n");
0833 printf(" -M: specify the host logging mode "
0834 "(default: run all log modes). Supported modes: \n\t");
0835 log_modes_dump();
0836 guest_modes_help();
0837 puts("");
0838 exit(0);
0839 }
0840
0841 int main(int argc, char *argv[])
0842 {
0843 struct test_params p = {
0844 .iterations = TEST_HOST_LOOP_N,
0845 .interval = TEST_HOST_LOOP_INTERVAL,
0846 };
0847 int opt, i;
0848 sigset_t sigset;
0849
0850 sem_init(&sem_vcpu_stop, 0, 0);
0851 sem_init(&sem_vcpu_cont, 0, 0);
0852
0853 guest_modes_append_default();
0854
0855 while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
0856 switch (opt) {
0857 case 'c':
0858 test_dirty_ring_count = strtol(optarg, NULL, 10);
0859 break;
0860 case 'i':
0861 p.iterations = strtol(optarg, NULL, 10);
0862 break;
0863 case 'I':
0864 p.interval = strtol(optarg, NULL, 10);
0865 break;
0866 case 'p':
0867 p.phys_offset = strtoull(optarg, NULL, 0);
0868 break;
0869 case 'm':
0870 guest_modes_cmdline(optarg);
0871 break;
0872 case 'M':
0873 if (!strcmp(optarg, "all")) {
0874 host_log_mode_option = LOG_MODE_ALL;
0875 break;
0876 }
0877 for (i = 0; i < LOG_MODE_NUM; i++) {
0878 if (!strcmp(optarg, log_modes[i].name)) {
0879 pr_info("Setting log mode to: '%s'\n",
0880 optarg);
0881 host_log_mode_option = i;
0882 break;
0883 }
0884 }
0885 if (i == LOG_MODE_NUM) {
0886 printf("Log mode '%s' invalid. Please choose "
0887 "from: ", optarg);
0888 log_modes_dump();
0889 exit(1);
0890 }
0891 break;
0892 case 'h':
0893 default:
0894 help(argv[0]);
0895 break;
0896 }
0897 }
0898
0899 TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
0900 TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
0901
0902 pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
0903 p.iterations, p.interval);
0904
0905 srandom(time(0));
0906
0907
0908 sigemptyset(&sigset);
0909 sigaddset(&sigset, SIG_IPI);
0910 pthread_sigmask(SIG_BLOCK, &sigset, NULL);
0911
0912 if (host_log_mode_option == LOG_MODE_ALL) {
0913
0914 for (i = 0; i < LOG_MODE_NUM; i++) {
0915 pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
0916 host_log_mode = i;
0917 for_each_guest_mode(run_test, &p);
0918 }
0919 } else {
0920 host_log_mode = host_log_mode_option;
0921 for_each_guest_mode(run_test, &p);
0922 }
0923
0924 return 0;
0925 }