Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * steal/stolen time test
0004  *
0005  * Copyright (C) 2020, Red Hat, Inc.
0006  */
0007 #define _GNU_SOURCE
0008 #include <stdio.h>
0009 #include <time.h>
0010 #include <sched.h>
0011 #include <pthread.h>
0012 #include <linux/kernel.h>
0013 #include <asm/kvm.h>
0014 #include <asm/kvm_para.h>
0015 
0016 #include "test_util.h"
0017 #include "kvm_util.h"
0018 #include "processor.h"
0019 
0020 #define NR_VCPUS        4
0021 #define ST_GPA_BASE     (1 << 30)
0022 
0023 static void *st_gva[NR_VCPUS];
0024 static uint64_t guest_stolen_time[NR_VCPUS];
0025 
0026 #if defined(__x86_64__)
0027 
0028 /* steal_time must have 64-byte alignment */
0029 #define STEAL_TIME_SIZE     ((sizeof(struct kvm_steal_time) + 63) & ~63)
0030 
0031 static void check_status(struct kvm_steal_time *st)
0032 {
0033     GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
0034     GUEST_ASSERT(READ_ONCE(st->flags) == 0);
0035     GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
0036 }
0037 
0038 static void guest_code(int cpu)
0039 {
0040     struct kvm_steal_time *st = st_gva[cpu];
0041     uint32_t version;
0042 
0043     GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
0044 
0045     memset(st, 0, sizeof(*st));
0046     GUEST_SYNC(0);
0047 
0048     check_status(st);
0049     WRITE_ONCE(guest_stolen_time[cpu], st->steal);
0050     version = READ_ONCE(st->version);
0051     check_status(st);
0052     GUEST_SYNC(1);
0053 
0054     check_status(st);
0055     GUEST_ASSERT(version < READ_ONCE(st->version));
0056     WRITE_ONCE(guest_stolen_time[cpu], st->steal);
0057     check_status(st);
0058     GUEST_DONE();
0059 }
0060 
0061 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
0062 {
0063     return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
0064 }
0065 
0066 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
0067 {
0068     int ret;
0069 
0070     /* ST_GPA_BASE is identity mapped */
0071     st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
0072     sync_global_to_guest(vcpu->vm, st_gva[i]);
0073 
0074     ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
0075                 (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
0076     TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
0077 
0078     vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
0079 }
0080 
0081 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
0082 {
0083     struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
0084     int i;
0085 
0086     pr_info("VCPU%d:\n", vcpu_idx);
0087     pr_info("    steal:     %lld\n", st->steal);
0088     pr_info("    version:   %d\n", st->version);
0089     pr_info("    flags:     %d\n", st->flags);
0090     pr_info("    preempted: %d\n", st->preempted);
0091     pr_info("    u8_pad:    ");
0092     for (i = 0; i < 3; ++i)
0093         pr_info("%d", st->u8_pad[i]);
0094     pr_info("\n    pad:       ");
0095     for (i = 0; i < 11; ++i)
0096         pr_info("%d", st->pad[i]);
0097     pr_info("\n");
0098 }
0099 
0100 #elif defined(__aarch64__)
0101 
0102 /* PV_TIME_ST must have 64-byte alignment */
0103 #define STEAL_TIME_SIZE     ((sizeof(struct st_time) + 63) & ~63)
0104 
0105 #define SMCCC_ARCH_FEATURES 0x80000001
0106 #define PV_TIME_FEATURES    0xc5000020
0107 #define PV_TIME_ST      0xc5000021
0108 
0109 struct st_time {
0110     uint32_t rev;
0111     uint32_t attr;
0112     uint64_t st_time;
0113 };
0114 
0115 static int64_t smccc(uint32_t func, uint64_t arg)
0116 {
0117     struct arm_smccc_res res;
0118 
0119     smccc_hvc(func, arg, 0, 0, 0, 0, 0, 0, &res);
0120     return res.a0;
0121 }
0122 
0123 static void check_status(struct st_time *st)
0124 {
0125     GUEST_ASSERT(READ_ONCE(st->rev) == 0);
0126     GUEST_ASSERT(READ_ONCE(st->attr) == 0);
0127 }
0128 
0129 static void guest_code(int cpu)
0130 {
0131     struct st_time *st;
0132     int64_t status;
0133 
0134     status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
0135     GUEST_ASSERT(status == 0);
0136     status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
0137     GUEST_ASSERT(status == 0);
0138     status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
0139     GUEST_ASSERT(status == 0);
0140 
0141     status = smccc(PV_TIME_ST, 0);
0142     GUEST_ASSERT(status != -1);
0143     GUEST_ASSERT(status == (ulong)st_gva[cpu]);
0144 
0145     st = (struct st_time *)status;
0146     GUEST_SYNC(0);
0147 
0148     check_status(st);
0149     WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
0150     GUEST_SYNC(1);
0151 
0152     check_status(st);
0153     WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
0154     GUEST_DONE();
0155 }
0156 
0157 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
0158 {
0159     struct kvm_device_attr dev = {
0160         .group = KVM_ARM_VCPU_PVTIME_CTRL,
0161         .attr = KVM_ARM_VCPU_PVTIME_IPA,
0162     };
0163 
0164     return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
0165 }
0166 
0167 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
0168 {
0169     struct kvm_vm *vm = vcpu->vm;
0170     uint64_t st_ipa;
0171     int ret;
0172 
0173     struct kvm_device_attr dev = {
0174         .group = KVM_ARM_VCPU_PVTIME_CTRL,
0175         .attr = KVM_ARM_VCPU_PVTIME_IPA,
0176         .addr = (uint64_t)&st_ipa,
0177     };
0178 
0179     vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
0180 
0181     /* ST_GPA_BASE is identity mapped */
0182     st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
0183     sync_global_to_guest(vm, st_gva[i]);
0184 
0185     st_ipa = (ulong)st_gva[i] | 1;
0186     ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
0187     TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
0188 
0189     st_ipa = (ulong)st_gva[i];
0190     vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
0191 
0192     ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
0193     TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
0194 }
0195 
0196 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
0197 {
0198     struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
0199 
0200     pr_info("VCPU%d:\n", vcpu_idx);
0201     pr_info("    rev:     %d\n", st->rev);
0202     pr_info("    attr:    %d\n", st->attr);
0203     pr_info("    st_time: %ld\n", st->st_time);
0204 }
0205 
0206 #endif
0207 
0208 static void *do_steal_time(void *arg)
0209 {
0210     struct timespec ts, stop;
0211 
0212     clock_gettime(CLOCK_MONOTONIC, &ts);
0213     stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
0214 
0215     while (1) {
0216         clock_gettime(CLOCK_MONOTONIC, &ts);
0217         if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
0218             break;
0219     }
0220 
0221     return NULL;
0222 }
0223 
0224 static void run_vcpu(struct kvm_vcpu *vcpu)
0225 {
0226     struct ucall uc;
0227 
0228     vcpu_run(vcpu);
0229 
0230     switch (get_ucall(vcpu, &uc)) {
0231     case UCALL_SYNC:
0232     case UCALL_DONE:
0233         break;
0234     case UCALL_ABORT:
0235         REPORT_GUEST_ASSERT(uc);
0236     default:
0237         TEST_ASSERT(false, "Unexpected exit: %s",
0238                 exit_reason_str(vcpu->run->exit_reason));
0239     }
0240 }
0241 
0242 int main(int ac, char **av)
0243 {
0244     struct kvm_vcpu *vcpus[NR_VCPUS];
0245     struct kvm_vm *vm;
0246     pthread_attr_t attr;
0247     pthread_t thread;
0248     cpu_set_t cpuset;
0249     unsigned int gpages;
0250     long stolen_time;
0251     long run_delay;
0252     bool verbose;
0253     int i;
0254 
0255     verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
0256 
0257     /* Set CPU affinity so we can force preemption of the VCPU */
0258     CPU_ZERO(&cpuset);
0259     CPU_SET(0, &cpuset);
0260     pthread_attr_init(&attr);
0261     pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
0262     pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
0263 
0264     /* Create a VM and an identity mapped memslot for the steal time structure */
0265     vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
0266     gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
0267     vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
0268     virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
0269     ucall_init(vm, NULL);
0270 
0271     TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
0272 
0273     /* Run test on each VCPU */
0274     for (i = 0; i < NR_VCPUS; ++i) {
0275         steal_time_init(vcpus[i], i);
0276 
0277         vcpu_args_set(vcpus[i], 1, i);
0278 
0279         /* First VCPU run initializes steal-time */
0280         run_vcpu(vcpus[i]);
0281 
0282         /* Second VCPU run, expect guest stolen time to be <= run_delay */
0283         run_vcpu(vcpus[i]);
0284         sync_global_from_guest(vm, guest_stolen_time[i]);
0285         stolen_time = guest_stolen_time[i];
0286         run_delay = get_run_delay();
0287         TEST_ASSERT(stolen_time <= run_delay,
0288                 "Expected stolen time <= %ld, got %ld",
0289                 run_delay, stolen_time);
0290 
0291         /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
0292         run_delay = get_run_delay();
0293         pthread_create(&thread, &attr, do_steal_time, NULL);
0294         do
0295             sched_yield();
0296         while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
0297         pthread_join(thread, NULL);
0298         run_delay = get_run_delay() - run_delay;
0299         TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
0300                 "Expected run_delay >= %ld, got %ld",
0301                 MIN_RUN_DELAY_NS, run_delay);
0302 
0303         /* Run VCPU again to confirm stolen time is consistent with run_delay */
0304         run_vcpu(vcpus[i]);
0305         sync_global_from_guest(vm, guest_stolen_time[i]);
0306         stolen_time = guest_stolen_time[i] - stolen_time;
0307         TEST_ASSERT(stolen_time >= run_delay,
0308                 "Expected stolen time >= %ld, got %ld",
0309                 run_delay, stolen_time);
0310 
0311         if (verbose) {
0312             pr_info("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld", i,
0313                 guest_stolen_time[i], stolen_time);
0314             if (stolen_time == run_delay)
0315                 pr_info(" (BONUS: guest test-stolen-time even exactly matches test-run_delay)");
0316             pr_info("\n");
0317             steal_time_dump(vm, i);
0318         }
0319     }
0320 
0321     return 0;
0322 }