0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #define _GNU_SOURCE
0024
0025 #include <stdlib.h>
0026 #include <pthread.h>
0027 #include <linux/kvm.h>
0028 #include <linux/sizes.h>
0029 #include <linux/bitmap.h>
0030 #include <sys/sysinfo.h>
0031
0032 #include "kvm_util.h"
0033 #include "processor.h"
0034 #include "delay.h"
0035 #include "arch_timer.h"
0036 #include "gic.h"
0037 #include "vgic.h"
0038
0039 #define NR_VCPUS_DEF 4
0040 #define NR_TEST_ITERS_DEF 5
0041 #define TIMER_TEST_PERIOD_MS_DEF 10
0042 #define TIMER_TEST_ERR_MARGIN_US 100
0043 #define TIMER_TEST_MIGRATION_FREQ_MS 2
0044
0045 struct test_args {
0046 int nr_vcpus;
0047 int nr_iter;
0048 int timer_period_ms;
0049 int migration_freq_ms;
0050 };
0051
0052 static struct test_args test_args = {
0053 .nr_vcpus = NR_VCPUS_DEF,
0054 .nr_iter = NR_TEST_ITERS_DEF,
0055 .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
0056 .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
0057 };
0058
0059 #define msecs_to_usecs(msec) ((msec) * 1000LL)
0060
0061 #define GICD_BASE_GPA 0x8000000ULL
0062 #define GICR_BASE_GPA 0x80A0000ULL
0063
0064 enum guest_stage {
0065 GUEST_STAGE_VTIMER_CVAL = 1,
0066 GUEST_STAGE_VTIMER_TVAL,
0067 GUEST_STAGE_PTIMER_CVAL,
0068 GUEST_STAGE_PTIMER_TVAL,
0069 GUEST_STAGE_MAX,
0070 };
0071
0072
0073 struct test_vcpu_shared_data {
0074 int nr_iter;
0075 enum guest_stage guest_stage;
0076 uint64_t xcnt;
0077 };
0078
0079 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
0080 static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
0081 static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
0082
0083 static int vtimer_irq, ptimer_irq;
0084
0085 static unsigned long *vcpu_done_map;
0086 static pthread_mutex_t vcpu_done_map_lock;
0087
0088 static void
0089 guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
0090 {
0091 switch (shared_data->guest_stage) {
0092 case GUEST_STAGE_VTIMER_CVAL:
0093 timer_set_next_cval_ms(VIRTUAL, test_args.timer_period_ms);
0094 shared_data->xcnt = timer_get_cntct(VIRTUAL);
0095 timer_set_ctl(VIRTUAL, CTL_ENABLE);
0096 break;
0097 case GUEST_STAGE_VTIMER_TVAL:
0098 timer_set_next_tval_ms(VIRTUAL, test_args.timer_period_ms);
0099 shared_data->xcnt = timer_get_cntct(VIRTUAL);
0100 timer_set_ctl(VIRTUAL, CTL_ENABLE);
0101 break;
0102 case GUEST_STAGE_PTIMER_CVAL:
0103 timer_set_next_cval_ms(PHYSICAL, test_args.timer_period_ms);
0104 shared_data->xcnt = timer_get_cntct(PHYSICAL);
0105 timer_set_ctl(PHYSICAL, CTL_ENABLE);
0106 break;
0107 case GUEST_STAGE_PTIMER_TVAL:
0108 timer_set_next_tval_ms(PHYSICAL, test_args.timer_period_ms);
0109 shared_data->xcnt = timer_get_cntct(PHYSICAL);
0110 timer_set_ctl(PHYSICAL, CTL_ENABLE);
0111 break;
0112 default:
0113 GUEST_ASSERT(0);
0114 }
0115 }
0116
0117 static void guest_validate_irq(unsigned int intid,
0118 struct test_vcpu_shared_data *shared_data)
0119 {
0120 enum guest_stage stage = shared_data->guest_stage;
0121 uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
0122 unsigned long xctl = 0;
0123 unsigned int timer_irq = 0;
0124
0125 if (stage == GUEST_STAGE_VTIMER_CVAL ||
0126 stage == GUEST_STAGE_VTIMER_TVAL) {
0127 xctl = timer_get_ctl(VIRTUAL);
0128 timer_set_ctl(VIRTUAL, CTL_IMASK);
0129 xcnt = timer_get_cntct(VIRTUAL);
0130 cval = timer_get_cval(VIRTUAL);
0131 timer_irq = vtimer_irq;
0132 } else if (stage == GUEST_STAGE_PTIMER_CVAL ||
0133 stage == GUEST_STAGE_PTIMER_TVAL) {
0134 xctl = timer_get_ctl(PHYSICAL);
0135 timer_set_ctl(PHYSICAL, CTL_IMASK);
0136 xcnt = timer_get_cntct(PHYSICAL);
0137 cval = timer_get_cval(PHYSICAL);
0138 timer_irq = ptimer_irq;
0139 } else {
0140 GUEST_ASSERT(0);
0141 }
0142
0143 xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
0144
0145
0146 GUEST_ASSERT_2(intid == timer_irq, intid, timer_irq);
0147
0148
0149 GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us);
0150 GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl);
0151 }
0152
0153 static void guest_irq_handler(struct ex_regs *regs)
0154 {
0155 unsigned int intid = gic_get_and_ack_irq();
0156 uint32_t cpu = guest_get_vcpuid();
0157 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
0158
0159 guest_validate_irq(intid, shared_data);
0160
0161 WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
0162
0163 gic_set_eoi(intid);
0164 }
0165
0166 static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
0167 enum guest_stage stage)
0168 {
0169 uint32_t irq_iter, config_iter;
0170
0171 shared_data->guest_stage = stage;
0172 shared_data->nr_iter = 0;
0173
0174 for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
0175
0176 guest_configure_timer_action(shared_data);
0177
0178
0179 udelay(msecs_to_usecs(test_args.timer_period_ms) +
0180 TIMER_TEST_ERR_MARGIN_US);
0181
0182 irq_iter = READ_ONCE(shared_data->nr_iter);
0183 GUEST_ASSERT_2(config_iter + 1 == irq_iter,
0184 config_iter + 1, irq_iter);
0185 }
0186 }
0187
0188 static void guest_code(void)
0189 {
0190 uint32_t cpu = guest_get_vcpuid();
0191 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
0192
0193 local_irq_disable();
0194
0195 gic_init(GIC_V3, test_args.nr_vcpus,
0196 (void *)GICD_BASE_GPA, (void *)GICR_BASE_GPA);
0197
0198 timer_set_ctl(VIRTUAL, CTL_IMASK);
0199 timer_set_ctl(PHYSICAL, CTL_IMASK);
0200
0201 gic_irq_enable(vtimer_irq);
0202 gic_irq_enable(ptimer_irq);
0203 local_irq_enable();
0204
0205 guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL);
0206 guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL);
0207 guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL);
0208 guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL);
0209
0210 GUEST_DONE();
0211 }
0212
0213 static void *test_vcpu_run(void *arg)
0214 {
0215 unsigned int vcpu_idx = (unsigned long)arg;
0216 struct ucall uc;
0217 struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
0218 struct kvm_vm *vm = vcpu->vm;
0219 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
0220
0221 vcpu_run(vcpu);
0222
0223
0224 pthread_mutex_lock(&vcpu_done_map_lock);
0225 set_bit(vcpu_idx, vcpu_done_map);
0226 pthread_mutex_unlock(&vcpu_done_map_lock);
0227
0228 switch (get_ucall(vcpu, &uc)) {
0229 case UCALL_SYNC:
0230 case UCALL_DONE:
0231 break;
0232 case UCALL_ABORT:
0233 sync_global_from_guest(vm, *shared_data);
0234 REPORT_GUEST_ASSERT_N(uc, "values: %lu, %lu; %lu, vcpu %u; stage; %u; iter: %u",
0235 GUEST_ASSERT_ARG(uc, 0),
0236 GUEST_ASSERT_ARG(uc, 1),
0237 GUEST_ASSERT_ARG(uc, 2),
0238 vcpu_idx,
0239 shared_data->guest_stage,
0240 shared_data->nr_iter);
0241 break;
0242 default:
0243 TEST_FAIL("Unexpected guest exit\n");
0244 }
0245
0246 return NULL;
0247 }
0248
0249 static uint32_t test_get_pcpu(void)
0250 {
0251 uint32_t pcpu;
0252 unsigned int nproc_conf;
0253 cpu_set_t online_cpuset;
0254
0255 nproc_conf = get_nprocs_conf();
0256 sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
0257
0258
0259 do {
0260 pcpu = rand() % nproc_conf;
0261 } while (!CPU_ISSET(pcpu, &online_cpuset));
0262
0263 return pcpu;
0264 }
0265
0266 static int test_migrate_vcpu(unsigned int vcpu_idx)
0267 {
0268 int ret;
0269 cpu_set_t cpuset;
0270 uint32_t new_pcpu = test_get_pcpu();
0271
0272 CPU_ZERO(&cpuset);
0273 CPU_SET(new_pcpu, &cpuset);
0274
0275 pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
0276
0277 ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
0278 sizeof(cpuset), &cpuset);
0279
0280
0281 TEST_ASSERT(ret == 0 || ret == ESRCH,
0282 "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
0283 vcpu_idx, new_pcpu, ret);
0284
0285 return ret;
0286 }
0287
0288 static void *test_vcpu_migration(void *arg)
0289 {
0290 unsigned int i, n_done;
0291 bool vcpu_done;
0292
0293 do {
0294 usleep(msecs_to_usecs(test_args.migration_freq_ms));
0295
0296 for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
0297 pthread_mutex_lock(&vcpu_done_map_lock);
0298 vcpu_done = test_bit(i, vcpu_done_map);
0299 pthread_mutex_unlock(&vcpu_done_map_lock);
0300
0301 if (vcpu_done) {
0302 n_done++;
0303 continue;
0304 }
0305
0306 test_migrate_vcpu(i);
0307 }
0308 } while (test_args.nr_vcpus != n_done);
0309
0310 return NULL;
0311 }
0312
0313 static void test_run(struct kvm_vm *vm)
0314 {
0315 pthread_t pt_vcpu_migration;
0316 unsigned int i;
0317 int ret;
0318
0319 pthread_mutex_init(&vcpu_done_map_lock, NULL);
0320 vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
0321 TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
0322
0323 for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
0324 ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
0325 (void *)(unsigned long)i);
0326 TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
0327 }
0328
0329
0330 if (test_args.migration_freq_ms) {
0331 srand(time(NULL));
0332
0333 ret = pthread_create(&pt_vcpu_migration, NULL,
0334 test_vcpu_migration, NULL);
0335 TEST_ASSERT(!ret, "Failed to create the migration pthread\n");
0336 }
0337
0338
0339 for (i = 0; i < test_args.nr_vcpus; i++)
0340 pthread_join(pt_vcpu_run[i], NULL);
0341
0342 if (test_args.migration_freq_ms)
0343 pthread_join(pt_vcpu_migration, NULL);
0344
0345 bitmap_free(vcpu_done_map);
0346 }
0347
0348 static void test_init_timer_irq(struct kvm_vm *vm)
0349 {
0350
0351 vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
0352 KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
0353 vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
0354 KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
0355
0356 sync_global_to_guest(vm, ptimer_irq);
0357 sync_global_to_guest(vm, vtimer_irq);
0358
0359 pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
0360 }
0361
0362 static int gic_fd;
0363
0364 static struct kvm_vm *test_vm_create(void)
0365 {
0366 struct kvm_vm *vm;
0367 unsigned int i;
0368 int nr_vcpus = test_args.nr_vcpus;
0369
0370 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
0371
0372 vm_init_descriptor_tables(vm);
0373 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
0374
0375 for (i = 0; i < nr_vcpus; i++)
0376 vcpu_init_descriptor_tables(vcpus[i]);
0377
0378 ucall_init(vm, NULL);
0379 test_init_timer_irq(vm);
0380 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
0381 __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
0382
0383
0384 sync_global_to_guest(vm, test_args);
0385
0386 return vm;
0387 }
0388
0389 static void test_vm_cleanup(struct kvm_vm *vm)
0390 {
0391 close(gic_fd);
0392 kvm_vm_free(vm);
0393 }
0394
0395 static void test_print_help(char *name)
0396 {
0397 pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
0398 name);
0399 pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
0400 NR_VCPUS_DEF, KVM_MAX_VCPUS);
0401 pr_info("\t-i: Number of iterations per stage (default: %u)\n",
0402 NR_TEST_ITERS_DEF);
0403 pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
0404 TIMER_TEST_PERIOD_MS_DEF);
0405 pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
0406 TIMER_TEST_MIGRATION_FREQ_MS);
0407 pr_info("\t-h: print this help screen\n");
0408 }
0409
0410 static bool parse_args(int argc, char *argv[])
0411 {
0412 int opt;
0413
0414 while ((opt = getopt(argc, argv, "hn:i:p:m:")) != -1) {
0415 switch (opt) {
0416 case 'n':
0417 test_args.nr_vcpus = atoi(optarg);
0418 if (test_args.nr_vcpus <= 0) {
0419 pr_info("Positive value needed for -n\n");
0420 goto err;
0421 } else if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
0422 pr_info("Max allowed vCPUs: %u\n",
0423 KVM_MAX_VCPUS);
0424 goto err;
0425 }
0426 break;
0427 case 'i':
0428 test_args.nr_iter = atoi(optarg);
0429 if (test_args.nr_iter <= 0) {
0430 pr_info("Positive value needed for -i\n");
0431 goto err;
0432 }
0433 break;
0434 case 'p':
0435 test_args.timer_period_ms = atoi(optarg);
0436 if (test_args.timer_period_ms <= 0) {
0437 pr_info("Positive value needed for -p\n");
0438 goto err;
0439 }
0440 break;
0441 case 'm':
0442 test_args.migration_freq_ms = atoi(optarg);
0443 if (test_args.migration_freq_ms < 0) {
0444 pr_info("0 or positive value needed for -m\n");
0445 goto err;
0446 }
0447 break;
0448 case 'h':
0449 default:
0450 goto err;
0451 }
0452 }
0453
0454 return true;
0455
0456 err:
0457 test_print_help(argv[0]);
0458 return false;
0459 }
0460
0461 int main(int argc, char *argv[])
0462 {
0463 struct kvm_vm *vm;
0464
0465
0466 setbuf(stdout, NULL);
0467
0468 if (!parse_args(argc, argv))
0469 exit(KSFT_SKIP);
0470
0471 __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
0472 "At least two physical CPUs needed for vCPU migration");
0473
0474 vm = test_vm_create();
0475 test_run(vm);
0476 test_vm_cleanup(vm);
0477
0478 return 0;
0479 }