0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "test_util.h"
0011 #include "kvm_util.h"
0012 #include "processor.h"
0013
0014 #define HCALL_REGION_GPA 0xc0000000ULL
0015 #define HCALL_REGION_SLOT 10
0016
0017 #define INPUTVALUE 17
0018 #define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x)
0019 #define RETVALUE 0xcafef00dfbfbffffUL
0020
0021 #define XEN_HYPERCALL_MSR 0x40000200
0022 #define HV_GUEST_OS_ID_MSR 0x40000000
0023 #define HV_HYPERCALL_MSR 0x40000001
0024
0025 #define HVCALL_SIGNAL_EVENT 0x005d
0026 #define HV_STATUS_INVALID_ALIGNMENT 4
0027
0028 static void guest_code(void)
0029 {
0030 unsigned long rax = INPUTVALUE;
0031 unsigned long rdi = ARGVALUE(1);
0032 unsigned long rsi = ARGVALUE(2);
0033 unsigned long rdx = ARGVALUE(3);
0034 unsigned long rcx;
0035 register unsigned long r10 __asm__("r10") = ARGVALUE(4);
0036 register unsigned long r8 __asm__("r8") = ARGVALUE(5);
0037 register unsigned long r9 __asm__("r9") = ARGVALUE(6);
0038
0039
0040 __asm__ __volatile__("vmcall" :
0041 "=a"(rax) :
0042 "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
0043 "r"(r10), "r"(r8), "r"(r9));
0044 GUEST_ASSERT(rax == RETVALUE);
0045
0046
0047 __asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR),
0048 "a" (HCALL_REGION_GPA & 0xffffffff),
0049 "d" (HCALL_REGION_GPA >> 32));
0050
0051
0052 __asm__ __volatile__("wrmsr" : : "c" (HV_GUEST_OS_ID_MSR),
0053 "a" (0x5a), "d" (0));
0054
0055
0056 u64 msrval = HCALL_REGION_GPA + PAGE_SIZE + 1;
0057 __asm__ __volatile__("wrmsr" : : "c" (HV_HYPERCALL_MSR),
0058 "a" (msrval & 0xffffffff),
0059 "d" (msrval >> 32));
0060
0061
0062 __asm__ __volatile__("call *%1" : "=a"(rax) :
0063 "r"(HCALL_REGION_GPA + INPUTVALUE * 32),
0064 "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
0065 "r"(r10), "r"(r8), "r"(r9));
0066 GUEST_ASSERT(rax == RETVALUE);
0067
0068
0069 rax = 0;
0070 rcx = HVCALL_SIGNAL_EVENT;
0071 rdx = 0x5a5a5a5a;
0072 __asm__ __volatile__("call *%1" : "=a"(rax) :
0073 "r"(HCALL_REGION_GPA + PAGE_SIZE),
0074 "a"(rax), "c"(rcx), "d"(rdx),
0075 "r"(r8));
0076 GUEST_ASSERT(rax == HV_STATUS_INVALID_ALIGNMENT);
0077
0078 GUEST_DONE();
0079 }
0080
0081 int main(int argc, char *argv[])
0082 {
0083 unsigned int xen_caps;
0084 struct kvm_vcpu *vcpu;
0085 struct kvm_vm *vm;
0086
0087 xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
0088 TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
0089
0090 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
0091 vcpu_set_hv_cpuid(vcpu);
0092
0093 struct kvm_xen_hvm_config hvmc = {
0094 .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
0095 .msr = XEN_HYPERCALL_MSR,
0096 };
0097 vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
0098
0099
0100 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
0101 HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0);
0102 virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2);
0103
0104 for (;;) {
0105 volatile struct kvm_run *run = vcpu->run;
0106 struct ucall uc;
0107
0108 vcpu_run(vcpu);
0109
0110 if (run->exit_reason == KVM_EXIT_XEN) {
0111 ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
0112 ASSERT_EQ(run->xen.u.hcall.cpl, 0);
0113 ASSERT_EQ(run->xen.u.hcall.longmode, 1);
0114 ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
0115 ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
0116 ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
0117 ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
0118 ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
0119 ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
0120 ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
0121 run->xen.u.hcall.result = RETVALUE;
0122 continue;
0123 }
0124
0125 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0126 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
0127 run->exit_reason,
0128 exit_reason_str(run->exit_reason));
0129
0130 switch (get_ucall(vcpu, &uc)) {
0131 case UCALL_ABORT:
0132 REPORT_GUEST_ASSERT(uc);
0133
0134 case UCALL_SYNC:
0135 break;
0136 case UCALL_DONE:
0137 goto done;
0138 default:
0139 TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
0140 }
0141 }
0142 done:
0143 kvm_vm_free(vm);
0144 return 0;
0145 }