Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * KVM page table test
0004  *
0005  * Copyright (C) 2021, Huawei, Inc.
0006  *
0007  * Make sure that THP has been enabled or enough HUGETLB pages with specific
0008  * page size have been pre-allocated on your system, if you are planning to
0009  * use hugepages to back the guest memory for testing.
0010  */
0011 
0012 #define _GNU_SOURCE /* for program_invocation_name */
0013 
0014 #include <stdio.h>
0015 #include <stdlib.h>
0016 #include <time.h>
0017 #include <pthread.h>
0018 #include <semaphore.h>
0019 
0020 #include "test_util.h"
0021 #include "kvm_util.h"
0022 #include "processor.h"
0023 #include "guest_modes.h"
0024 
0025 #define TEST_MEM_SLOT_INDEX             1
0026 
0027 /* Default size(1GB) of the memory for testing */
0028 #define DEFAULT_TEST_MEM_SIZE       (1 << 30)
0029 
0030 /* Default guest test virtual memory offset */
0031 #define DEFAULT_GUEST_TEST_MEM      0xc0000000
0032 
0033 /* Different guest memory accessing stages */
0034 enum test_stage {
0035     KVM_BEFORE_MAPPINGS,
0036     KVM_CREATE_MAPPINGS,
0037     KVM_UPDATE_MAPPINGS,
0038     KVM_ADJUST_MAPPINGS,
0039     NUM_TEST_STAGES,
0040 };
0041 
0042 static const char * const test_stage_string[] = {
0043     "KVM_BEFORE_MAPPINGS",
0044     "KVM_CREATE_MAPPINGS",
0045     "KVM_UPDATE_MAPPINGS",
0046     "KVM_ADJUST_MAPPINGS",
0047 };
0048 
0049 struct test_args {
0050     struct kvm_vm *vm;
0051     uint64_t guest_test_virt_mem;
0052     uint64_t host_page_size;
0053     uint64_t host_num_pages;
0054     uint64_t large_page_size;
0055     uint64_t large_num_pages;
0056     uint64_t host_pages_per_lpage;
0057     enum vm_mem_backing_src_type src_type;
0058     struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
0059 };
0060 
0061 /*
0062  * Guest variables. Use addr_gva2hva() if these variables need
0063  * to be changed in host.
0064  */
0065 static enum test_stage guest_test_stage;
0066 
0067 /* Host variables */
0068 static uint32_t nr_vcpus = 1;
0069 static struct test_args test_args;
0070 static enum test_stage *current_stage;
0071 static bool host_quit;
0072 
0073 /* Whether the test stage is updated, or completed */
0074 static sem_t test_stage_updated;
0075 static sem_t test_stage_completed;
0076 
0077 /*
0078  * Guest physical memory offset of the testing memory slot.
0079  * This will be set to the topmost valid physical address minus
0080  * the test memory size.
0081  */
0082 static uint64_t guest_test_phys_mem;
0083 
0084 /*
0085  * Guest virtual memory offset of the testing memory slot.
0086  * Must not conflict with identity mapped test code.
0087  */
0088 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
0089 
0090 static void guest_code(bool do_write)
0091 {
0092     struct test_args *p = &test_args;
0093     enum test_stage *current_stage = &guest_test_stage;
0094     uint64_t addr;
0095     int i, j;
0096 
0097     while (true) {
0098         addr = p->guest_test_virt_mem;
0099 
0100         switch (READ_ONCE(*current_stage)) {
0101         /*
0102          * All vCPU threads will be started in this stage,
0103          * where guest code of each vCPU will do nothing.
0104          */
0105         case KVM_BEFORE_MAPPINGS:
0106             break;
0107 
0108         /*
0109          * Before dirty logging, vCPUs concurrently access the first
0110          * 8 bytes of each page (host page/large page) within the same
0111          * memory region with different accessing types (read/write).
0112          * Then KVM will create normal page mappings or huge block
0113          * mappings for them.
0114          */
0115         case KVM_CREATE_MAPPINGS:
0116             for (i = 0; i < p->large_num_pages; i++) {
0117                 if (do_write)
0118                     *(uint64_t *)addr = 0x0123456789ABCDEF;
0119                 else
0120                     READ_ONCE(*(uint64_t *)addr);
0121 
0122                 addr += p->large_page_size;
0123             }
0124             break;
0125 
0126         /*
0127          * During dirty logging, KVM will only update attributes of the
0128          * normal page mappings from RO to RW if memory backing src type
0129          * is anonymous. In other cases, KVM will split the huge block
0130          * mappings into normal page mappings if memory backing src type
0131          * is THP or HUGETLB.
0132          */
0133         case KVM_UPDATE_MAPPINGS:
0134             if (p->src_type == VM_MEM_SRC_ANONYMOUS) {
0135                 for (i = 0; i < p->host_num_pages; i++) {
0136                     *(uint64_t *)addr = 0x0123456789ABCDEF;
0137                     addr += p->host_page_size;
0138                 }
0139                 break;
0140             }
0141 
0142             for (i = 0; i < p->large_num_pages; i++) {
0143                 /*
0144                  * Write to the first host page in each large
0145                  * page region, and triger break of large pages.
0146                  */
0147                 *(uint64_t *)addr = 0x0123456789ABCDEF;
0148 
0149                 /*
0150                  * Access the middle host pages in each large
0151                  * page region. Since dirty logging is enabled,
0152                  * this will create new mappings at the smallest
0153                  * granularity.
0154                  */
0155                 addr += p->large_page_size / 2;
0156                 for (j = 0; j < p->host_pages_per_lpage / 2; j++) {
0157                     READ_ONCE(*(uint64_t *)addr);
0158                     addr += p->host_page_size;
0159                 }
0160             }
0161             break;
0162 
0163         /*
0164          * After dirty logging is stopped, vCPUs concurrently read
0165          * from every single host page. Then KVM will coalesce the
0166          * split page mappings back to block mappings. And a TLB
0167          * conflict abort could occur here if TLB entries of the
0168          * page mappings are not fully invalidated.
0169          */
0170         case KVM_ADJUST_MAPPINGS:
0171             for (i = 0; i < p->host_num_pages; i++) {
0172                 READ_ONCE(*(uint64_t *)addr);
0173                 addr += p->host_page_size;
0174             }
0175             break;
0176 
0177         default:
0178             GUEST_ASSERT(0);
0179         }
0180 
0181         GUEST_SYNC(1);
0182     }
0183 }
0184 
0185 static void *vcpu_worker(void *data)
0186 {
0187     struct kvm_vcpu *vcpu = data;
0188     bool do_write = !(vcpu->id % 2);
0189     struct timespec start;
0190     struct timespec ts_diff;
0191     enum test_stage stage;
0192     int ret;
0193 
0194     vcpu_args_set(vcpu, 1, do_write);
0195 
0196     while (!READ_ONCE(host_quit)) {
0197         ret = sem_wait(&test_stage_updated);
0198         TEST_ASSERT(ret == 0, "Error in sem_wait");
0199 
0200         if (READ_ONCE(host_quit))
0201             return NULL;
0202 
0203         clock_gettime(CLOCK_MONOTONIC_RAW, &start);
0204         ret = _vcpu_run(vcpu);
0205         ts_diff = timespec_elapsed(start);
0206 
0207         TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
0208         TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
0209                 "Invalid guest sync status: exit_reason=%s\n",
0210                 exit_reason_str(vcpu->run->exit_reason));
0211 
0212         pr_debug("Got sync event from vCPU %d\n", vcpu->id);
0213         stage = READ_ONCE(*current_stage);
0214 
0215         /*
0216          * Here we can know the execution time of every
0217          * single vcpu running in different test stages.
0218          */
0219         pr_debug("vCPU %d has completed stage %s\n"
0220              "execution time is: %ld.%.9lds\n\n",
0221              vcpu->id, test_stage_string[stage],
0222              ts_diff.tv_sec, ts_diff.tv_nsec);
0223 
0224         ret = sem_post(&test_stage_completed);
0225         TEST_ASSERT(ret == 0, "Error in sem_post");
0226     }
0227 
0228     return NULL;
0229 }
0230 
0231 struct test_params {
0232     uint64_t phys_offset;
0233     uint64_t test_mem_size;
0234     enum vm_mem_backing_src_type src_type;
0235 };
0236 
0237 static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
0238 {
0239     int ret;
0240     struct test_params *p = arg;
0241     enum vm_mem_backing_src_type src_type = p->src_type;
0242     uint64_t large_page_size = get_backing_src_pagesz(src_type);
0243     uint64_t guest_page_size = vm_guest_mode_params[mode].page_size;
0244     uint64_t host_page_size = getpagesize();
0245     uint64_t test_mem_size = p->test_mem_size;
0246     uint64_t guest_num_pages;
0247     uint64_t alignment;
0248     void *host_test_mem;
0249     struct kvm_vm *vm;
0250 
0251     /* Align up the test memory size */
0252     alignment = max(large_page_size, guest_page_size);
0253     test_mem_size = (test_mem_size + alignment - 1) & ~(alignment - 1);
0254 
0255     /* Create a VM with enough guest pages */
0256     guest_num_pages = test_mem_size / guest_page_size;
0257     vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
0258                     guest_code, test_args.vcpus);
0259 
0260     /* Align down GPA of the testing memslot */
0261     if (!p->phys_offset)
0262         guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
0263                        guest_page_size;
0264     else
0265         guest_test_phys_mem = p->phys_offset;
0266 #ifdef __s390x__
0267     alignment = max(0x100000UL, alignment);
0268 #endif
0269     guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
0270 
0271     /* Set up the shared data structure test_args */
0272     test_args.vm = vm;
0273     test_args.guest_test_virt_mem = guest_test_virt_mem;
0274     test_args.host_page_size = host_page_size;
0275     test_args.host_num_pages = test_mem_size / host_page_size;
0276     test_args.large_page_size = large_page_size;
0277     test_args.large_num_pages = test_mem_size / large_page_size;
0278     test_args.host_pages_per_lpage = large_page_size / host_page_size;
0279     test_args.src_type = src_type;
0280 
0281     /* Add an extra memory slot with specified backing src type */
0282     vm_userspace_mem_region_add(vm, src_type, guest_test_phys_mem,
0283                     TEST_MEM_SLOT_INDEX, guest_num_pages, 0);
0284 
0285     /* Do mapping(GVA->GPA) for the testing memory slot */
0286     virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
0287 
0288     /* Cache the HVA pointer of the region */
0289     host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
0290 
0291     /* Export shared structure test_args to guest */
0292     ucall_init(vm, NULL);
0293     sync_global_to_guest(vm, test_args);
0294 
0295     ret = sem_init(&test_stage_updated, 0, 0);
0296     TEST_ASSERT(ret == 0, "Error in sem_init");
0297 
0298     ret = sem_init(&test_stage_completed, 0, 0);
0299     TEST_ASSERT(ret == 0, "Error in sem_init");
0300 
0301     current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage));
0302     *current_stage = NUM_TEST_STAGES;
0303 
0304     pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
0305     pr_info("Testing memory backing src type: %s\n",
0306         vm_mem_backing_src_alias(src_type)->name);
0307     pr_info("Testing memory backing src granularity: 0x%lx\n",
0308         large_page_size);
0309     pr_info("Testing memory size(aligned): 0x%lx\n", test_mem_size);
0310     pr_info("Guest physical test memory offset: 0x%lx\n",
0311         guest_test_phys_mem);
0312     pr_info("Host  virtual  test memory offset: 0x%lx\n",
0313         (uint64_t)host_test_mem);
0314     pr_info("Number of testing vCPUs: %d\n", nr_vcpus);
0315 
0316     return vm;
0317 }
0318 
0319 static void vcpus_complete_new_stage(enum test_stage stage)
0320 {
0321     int ret;
0322     int vcpus;
0323 
0324     /* Wake up all the vcpus to run new test stage */
0325     for (vcpus = 0; vcpus < nr_vcpus; vcpus++) {
0326         ret = sem_post(&test_stage_updated);
0327         TEST_ASSERT(ret == 0, "Error in sem_post");
0328     }
0329     pr_debug("All vcpus have been notified to continue\n");
0330 
0331     /* Wait for all the vcpus to complete new test stage */
0332     for (vcpus = 0; vcpus < nr_vcpus; vcpus++) {
0333         ret = sem_wait(&test_stage_completed);
0334         TEST_ASSERT(ret == 0, "Error in sem_wait");
0335 
0336         pr_debug("%d vcpus have completed stage %s\n",
0337              vcpus + 1, test_stage_string[stage]);
0338     }
0339 
0340     pr_debug("All vcpus have completed stage %s\n",
0341          test_stage_string[stage]);
0342 }
0343 
0344 static void run_test(enum vm_guest_mode mode, void *arg)
0345 {
0346     pthread_t *vcpu_threads;
0347     struct kvm_vm *vm;
0348     struct timespec start;
0349     struct timespec ts_diff;
0350     int ret, i;
0351 
0352     /* Create VM with vCPUs and make some pre-initialization */
0353     vm = pre_init_before_test(mode, arg);
0354 
0355     vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
0356     TEST_ASSERT(vcpu_threads, "Memory allocation failed");
0357 
0358     host_quit = false;
0359     *current_stage = KVM_BEFORE_MAPPINGS;
0360 
0361     for (i = 0; i < nr_vcpus; i++)
0362         pthread_create(&vcpu_threads[i], NULL, vcpu_worker,
0363                    test_args.vcpus[i]);
0364 
0365     vcpus_complete_new_stage(*current_stage);
0366     pr_info("Started all vCPUs successfully\n");
0367 
0368     /* Test the stage of KVM creating mappings */
0369     *current_stage = KVM_CREATE_MAPPINGS;
0370 
0371     clock_gettime(CLOCK_MONOTONIC_RAW, &start);
0372     vcpus_complete_new_stage(*current_stage);
0373     ts_diff = timespec_elapsed(start);
0374 
0375     pr_info("KVM_CREATE_MAPPINGS: total execution time: %ld.%.9lds\n\n",
0376         ts_diff.tv_sec, ts_diff.tv_nsec);
0377 
0378     /* Test the stage of KVM updating mappings */
0379     vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
0380                 KVM_MEM_LOG_DIRTY_PAGES);
0381 
0382     *current_stage = KVM_UPDATE_MAPPINGS;
0383 
0384     clock_gettime(CLOCK_MONOTONIC_RAW, &start);
0385     vcpus_complete_new_stage(*current_stage);
0386     ts_diff = timespec_elapsed(start);
0387 
0388     pr_info("KVM_UPDATE_MAPPINGS: total execution time: %ld.%.9lds\n\n",
0389         ts_diff.tv_sec, ts_diff.tv_nsec);
0390 
0391     /* Test the stage of KVM adjusting mappings */
0392     vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
0393 
0394     *current_stage = KVM_ADJUST_MAPPINGS;
0395 
0396     clock_gettime(CLOCK_MONOTONIC_RAW, &start);
0397     vcpus_complete_new_stage(*current_stage);
0398     ts_diff = timespec_elapsed(start);
0399 
0400     pr_info("KVM_ADJUST_MAPPINGS: total execution time: %ld.%.9lds\n\n",
0401         ts_diff.tv_sec, ts_diff.tv_nsec);
0402 
0403     /* Tell the vcpu thread to quit */
0404     host_quit = true;
0405     for (i = 0; i < nr_vcpus; i++) {
0406         ret = sem_post(&test_stage_updated);
0407         TEST_ASSERT(ret == 0, "Error in sem_post");
0408     }
0409 
0410     for (i = 0; i < nr_vcpus; i++)
0411         pthread_join(vcpu_threads[i], NULL);
0412 
0413     ret = sem_destroy(&test_stage_updated);
0414     TEST_ASSERT(ret == 0, "Error in sem_destroy");
0415 
0416     ret = sem_destroy(&test_stage_completed);
0417     TEST_ASSERT(ret == 0, "Error in sem_destroy");
0418 
0419     free(vcpu_threads);
0420     ucall_uninit(vm);
0421     kvm_vm_free(vm);
0422 }
0423 
0424 static void help(char *name)
0425 {
0426     puts("");
0427     printf("usage: %s [-h] [-p offset] [-m mode] "
0428            "[-b mem-size] [-v vcpus] [-s mem-type]\n", name);
0429     puts("");
0430     printf(" -p: specify guest physical test memory offset\n"
0431            "     Warning: a low offset can conflict with the loaded test code.\n");
0432     guest_modes_help();
0433     printf(" -b: specify size of the memory region for testing. e.g. 10M or 3G.\n"
0434            "     (default: 1G)\n");
0435     printf(" -v: specify the number of vCPUs to run\n"
0436            "     (default: 1)\n");
0437     backing_src_help("-s");
0438     puts("");
0439 }
0440 
0441 int main(int argc, char *argv[])
0442 {
0443     int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
0444     struct test_params p = {
0445         .test_mem_size = DEFAULT_TEST_MEM_SIZE,
0446         .src_type = DEFAULT_VM_MEM_SRC,
0447     };
0448     int opt;
0449 
0450     guest_modes_append_default();
0451 
0452     while ((opt = getopt(argc, argv, "hp:m:b:v:s:")) != -1) {
0453         switch (opt) {
0454         case 'p':
0455             p.phys_offset = strtoull(optarg, NULL, 0);
0456             break;
0457         case 'm':
0458             guest_modes_cmdline(optarg);
0459             break;
0460         case 'b':
0461             p.test_mem_size = parse_size(optarg);
0462             break;
0463         case 'v':
0464             nr_vcpus = atoi(optarg);
0465             TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
0466                     "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
0467             break;
0468         case 's':
0469             p.src_type = parse_backing_src_type(optarg);
0470             break;
0471         case 'h':
0472         default:
0473             help(argv[0]);
0474             exit(0);
0475         }
0476     }
0477 
0478     for_each_guest_mode(run_test, &p);
0479 
0480     return 0;
0481 }