0001
0002
0003
0004
0005
0006
0007
0008 #define _GNU_SOURCE
0009 #include <sys/ioctl.h>
0010
0011 #include "test_util.h"
0012 #include "kvm_util.h"
0013 #include "vmx.h"
0014
0015
0016 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
0017 #define KVM_FEP_LENGTH 5
0018 static int fep_available = 1;
0019
0020 #define MSR_NON_EXISTENT 0x474f4f00
0021
0022 static u64 deny_bits = 0;
0023 struct kvm_msr_filter filter_allow = {
0024 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
0025 .ranges = {
0026 {
0027 .flags = KVM_MSR_FILTER_READ |
0028 KVM_MSR_FILTER_WRITE,
0029 .nmsrs = 1,
0030
0031 .base = MSR_IA32_XSS,
0032 .bitmap = (uint8_t*)&deny_bits,
0033 }, {
0034 .flags = KVM_MSR_FILTER_READ |
0035 KVM_MSR_FILTER_WRITE,
0036 .nmsrs = 1,
0037
0038 .base = MSR_IA32_FLUSH_CMD,
0039 .bitmap = (uint8_t*)&deny_bits,
0040 }, {
0041 .flags = KVM_MSR_FILTER_READ |
0042 KVM_MSR_FILTER_WRITE,
0043 .nmsrs = 1,
0044
0045 .base = MSR_NON_EXISTENT,
0046 .bitmap = (uint8_t*)&deny_bits,
0047 },
0048 },
0049 };
0050
0051 struct kvm_msr_filter filter_fs = {
0052 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
0053 .ranges = {
0054 {
0055 .flags = KVM_MSR_FILTER_READ,
0056 .nmsrs = 1,
0057 .base = MSR_FS_BASE,
0058 .bitmap = (uint8_t*)&deny_bits,
0059 },
0060 },
0061 };
0062
0063 struct kvm_msr_filter filter_gs = {
0064 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
0065 .ranges = {
0066 {
0067 .flags = KVM_MSR_FILTER_READ,
0068 .nmsrs = 1,
0069 .base = MSR_GS_BASE,
0070 .bitmap = (uint8_t*)&deny_bits,
0071 },
0072 },
0073 };
0074
0075 static uint64_t msr_non_existent_data;
0076 static int guest_exception_count;
0077 static u32 msr_reads, msr_writes;
0078
0079 static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
0080 static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
0081 static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
0082 static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
0083 static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
0084 static u8 bitmap_deadbeef[1] = { 0x1 };
0085
0086 static void deny_msr(uint8_t *bitmap, u32 msr)
0087 {
0088 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
0089
0090 bitmap[idx / 8] &= ~(1 << (idx % 8));
0091 }
0092
0093 static void prepare_bitmaps(void)
0094 {
0095 memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
0096 memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
0097 memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
0098 memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
0099 memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
0100
0101 deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
0102 deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
0103 deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
0104 }
0105
0106 struct kvm_msr_filter filter_deny = {
0107 .flags = KVM_MSR_FILTER_DEFAULT_DENY,
0108 .ranges = {
0109 {
0110 .flags = KVM_MSR_FILTER_READ,
0111 .base = 0x00000000,
0112 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
0113 .bitmap = bitmap_00000000,
0114 }, {
0115 .flags = KVM_MSR_FILTER_WRITE,
0116 .base = 0x00000000,
0117 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
0118 .bitmap = bitmap_00000000_write,
0119 }, {
0120 .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
0121 .base = 0x40000000,
0122 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
0123 .bitmap = bitmap_40000000,
0124 }, {
0125 .flags = KVM_MSR_FILTER_READ,
0126 .base = 0xc0000000,
0127 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
0128 .bitmap = bitmap_c0000000_read,
0129 }, {
0130 .flags = KVM_MSR_FILTER_WRITE,
0131 .base = 0xc0000000,
0132 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
0133 .bitmap = bitmap_c0000000,
0134 }, {
0135 .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
0136 .base = 0xdeadbeef,
0137 .nmsrs = 1,
0138 .bitmap = bitmap_deadbeef,
0139 },
0140 },
0141 };
0142
0143 struct kvm_msr_filter no_filter_deny = {
0144 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
0145 };
0146
0147
0148
0149
0150
0151 static noinline uint64_t test_rdmsr(uint32_t msr)
0152 {
0153 uint32_t a, d;
0154
0155 guest_exception_count = 0;
0156
0157 __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
0158 "=a"(a), "=d"(d) : "c"(msr) : "memory");
0159
0160 return a | ((uint64_t) d << 32);
0161 }
0162
0163
0164
0165
0166
0167 static noinline void test_wrmsr(uint32_t msr, uint64_t value)
0168 {
0169 uint32_t a = value;
0170 uint32_t d = value >> 32;
0171
0172 guest_exception_count = 0;
0173
0174 __asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
0175 "a"(a), "d"(d), "c"(msr) : "memory");
0176 }
0177
0178 extern char rdmsr_start, rdmsr_end;
0179 extern char wrmsr_start, wrmsr_end;
0180
0181
0182
0183
0184
0185 static noinline uint64_t test_em_rdmsr(uint32_t msr)
0186 {
0187 uint32_t a, d;
0188
0189 guest_exception_count = 0;
0190
0191 __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
0192 "=a"(a), "=d"(d) : "c"(msr) : "memory");
0193
0194 return a | ((uint64_t) d << 32);
0195 }
0196
0197
0198
0199
0200
0201 static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
0202 {
0203 uint32_t a = value;
0204 uint32_t d = value >> 32;
0205
0206 guest_exception_count = 0;
0207
0208 __asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
0209 "a"(a), "d"(d), "c"(msr) : "memory");
0210 }
0211
0212 extern char em_rdmsr_start, em_rdmsr_end;
0213 extern char em_wrmsr_start, em_wrmsr_end;
0214
0215 static void guest_code_filter_allow(void)
0216 {
0217 uint64_t data;
0218
0219
0220
0221
0222
0223
0224
0225 data = test_rdmsr(MSR_IA32_XSS);
0226 GUEST_ASSERT(data == 0);
0227 GUEST_ASSERT(guest_exception_count == 0);
0228
0229 test_wrmsr(MSR_IA32_XSS, 0);
0230 GUEST_ASSERT(guest_exception_count == 0);
0231
0232 test_wrmsr(MSR_IA32_XSS, 1);
0233 GUEST_ASSERT(guest_exception_count == 1);
0234
0235
0236
0237
0238
0239
0240
0241 test_rdmsr(MSR_IA32_FLUSH_CMD);
0242 GUEST_ASSERT(guest_exception_count == 1);
0243
0244 test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
0245 GUEST_ASSERT(guest_exception_count == 1);
0246
0247 test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
0248 GUEST_ASSERT(guest_exception_count == 0);
0249
0250
0251
0252
0253
0254
0255
0256 test_wrmsr(MSR_NON_EXISTENT, 2);
0257 GUEST_ASSERT(guest_exception_count == 0);
0258
0259 data = test_rdmsr(MSR_NON_EXISTENT);
0260 GUEST_ASSERT(data == 2);
0261 GUEST_ASSERT(guest_exception_count == 0);
0262
0263
0264
0265
0266
0267
0268 __asm__ __volatile__(KVM_FEP "nop");
0269
0270 if (fep_available) {
0271
0272 GUEST_SYNC(0);
0273
0274
0275
0276
0277 data = test_em_rdmsr(MSR_IA32_XSS);
0278 GUEST_ASSERT(data == 0);
0279 GUEST_ASSERT(guest_exception_count == 0);
0280 test_em_wrmsr(MSR_IA32_XSS, 0);
0281 GUEST_ASSERT(guest_exception_count == 0);
0282 test_em_wrmsr(MSR_IA32_XSS, 1);
0283 GUEST_ASSERT(guest_exception_count == 1);
0284
0285 test_em_rdmsr(MSR_IA32_FLUSH_CMD);
0286 GUEST_ASSERT(guest_exception_count == 1);
0287 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
0288 GUEST_ASSERT(guest_exception_count == 1);
0289 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
0290 GUEST_ASSERT(guest_exception_count == 0);
0291
0292 test_em_wrmsr(MSR_NON_EXISTENT, 2);
0293 GUEST_ASSERT(guest_exception_count == 0);
0294 data = test_em_rdmsr(MSR_NON_EXISTENT);
0295 GUEST_ASSERT(data == 2);
0296 GUEST_ASSERT(guest_exception_count == 0);
0297 }
0298
0299 GUEST_DONE();
0300 }
0301
0302 static void guest_msr_calls(bool trapped)
0303 {
0304
0305 wrmsr(MSR_SYSCALL_MASK, 0);
0306
0307 if (trapped) {
0308
0309 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
0310 GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
0311 } else {
0312 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
0313 GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
0314 }
0315
0316
0317 wrmsr(MSR_IA32_POWER_CTL, 0x1234);
0318
0319
0320 rdmsr(MSR_IA32_POWER_CTL);
0321
0322
0323 GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
0324 wrmsr(0xdeadbeef, 0x1234);
0325 }
0326
0327 static void guest_code_filter_deny(void)
0328 {
0329 guest_msr_calls(true);
0330
0331
0332
0333
0334
0335 GUEST_SYNC(0);
0336
0337 guest_msr_calls(false);
0338
0339 GUEST_DONE();
0340 }
0341
0342 static void guest_code_permission_bitmap(void)
0343 {
0344 uint64_t data;
0345
0346 data = test_rdmsr(MSR_FS_BASE);
0347 GUEST_ASSERT(data == MSR_FS_BASE);
0348 data = test_rdmsr(MSR_GS_BASE);
0349 GUEST_ASSERT(data != MSR_GS_BASE);
0350
0351
0352 GUEST_SYNC(0);
0353
0354 data = test_rdmsr(MSR_FS_BASE);
0355 GUEST_ASSERT(data != MSR_FS_BASE);
0356 data = test_rdmsr(MSR_GS_BASE);
0357 GUEST_ASSERT(data == MSR_GS_BASE);
0358
0359 GUEST_DONE();
0360 }
0361
0362 static void __guest_gp_handler(struct ex_regs *regs,
0363 char *r_start, char *r_end,
0364 char *w_start, char *w_end)
0365 {
0366 if (regs->rip == (uintptr_t)r_start) {
0367 regs->rip = (uintptr_t)r_end;
0368 regs->rax = 0;
0369 regs->rdx = 0;
0370 } else if (regs->rip == (uintptr_t)w_start) {
0371 regs->rip = (uintptr_t)w_end;
0372 } else {
0373 GUEST_ASSERT(!"RIP is at an unknown location!");
0374 }
0375
0376 ++guest_exception_count;
0377 }
0378
0379 static void guest_gp_handler(struct ex_regs *regs)
0380 {
0381 __guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
0382 &wrmsr_start, &wrmsr_end);
0383 }
0384
0385 static void guest_fep_gp_handler(struct ex_regs *regs)
0386 {
0387 __guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
0388 &em_wrmsr_start, &em_wrmsr_end);
0389 }
0390
0391 static void guest_ud_handler(struct ex_regs *regs)
0392 {
0393 fep_available = 0;
0394 regs->rip += KVM_FEP_LENGTH;
0395 }
0396
0397 static void check_for_guest_assert(struct kvm_vcpu *vcpu)
0398 {
0399 struct ucall uc;
0400
0401 if (vcpu->run->exit_reason == KVM_EXIT_IO &&
0402 get_ucall(vcpu, &uc) == UCALL_ABORT) {
0403 REPORT_GUEST_ASSERT(uc);
0404 }
0405 }
0406
0407 static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
0408 {
0409 struct kvm_run *run = vcpu->run;
0410
0411 check_for_guest_assert(vcpu);
0412
0413 TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
0414 "Unexpected exit reason: %u (%s),\n",
0415 run->exit_reason,
0416 exit_reason_str(run->exit_reason));
0417 TEST_ASSERT(run->msr.index == msr_index,
0418 "Unexpected msr (0x%04x), expected 0x%04x",
0419 run->msr.index, msr_index);
0420
0421 switch (run->msr.index) {
0422 case MSR_IA32_XSS:
0423 run->msr.data = 0;
0424 break;
0425 case MSR_IA32_FLUSH_CMD:
0426 run->msr.error = 1;
0427 break;
0428 case MSR_NON_EXISTENT:
0429 run->msr.data = msr_non_existent_data;
0430 break;
0431 case MSR_FS_BASE:
0432 run->msr.data = MSR_FS_BASE;
0433 break;
0434 case MSR_GS_BASE:
0435 run->msr.data = MSR_GS_BASE;
0436 break;
0437 default:
0438 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
0439 }
0440 }
0441
0442 static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
0443 {
0444 struct kvm_run *run = vcpu->run;
0445
0446 check_for_guest_assert(vcpu);
0447
0448 TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
0449 "Unexpected exit reason: %u (%s),\n",
0450 run->exit_reason,
0451 exit_reason_str(run->exit_reason));
0452 TEST_ASSERT(run->msr.index == msr_index,
0453 "Unexpected msr (0x%04x), expected 0x%04x",
0454 run->msr.index, msr_index);
0455
0456 switch (run->msr.index) {
0457 case MSR_IA32_XSS:
0458 if (run->msr.data != 0)
0459 run->msr.error = 1;
0460 break;
0461 case MSR_IA32_FLUSH_CMD:
0462 if (run->msr.data != 1)
0463 run->msr.error = 1;
0464 break;
0465 case MSR_NON_EXISTENT:
0466 msr_non_existent_data = run->msr.data;
0467 break;
0468 default:
0469 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
0470 }
0471 }
0472
0473 static void process_ucall_done(struct kvm_vcpu *vcpu)
0474 {
0475 struct kvm_run *run = vcpu->run;
0476 struct ucall uc;
0477
0478 check_for_guest_assert(vcpu);
0479
0480 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0481 "Unexpected exit reason: %u (%s)",
0482 run->exit_reason,
0483 exit_reason_str(run->exit_reason));
0484
0485 TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
0486 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
0487 uc.cmd, UCALL_DONE);
0488 }
0489
0490 static uint64_t process_ucall(struct kvm_vcpu *vcpu)
0491 {
0492 struct kvm_run *run = vcpu->run;
0493 struct ucall uc = {};
0494
0495 check_for_guest_assert(vcpu);
0496
0497 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
0498 "Unexpected exit reason: %u (%s)",
0499 run->exit_reason,
0500 exit_reason_str(run->exit_reason));
0501
0502 switch (get_ucall(vcpu, &uc)) {
0503 case UCALL_SYNC:
0504 break;
0505 case UCALL_ABORT:
0506 check_for_guest_assert(vcpu);
0507 break;
0508 case UCALL_DONE:
0509 process_ucall_done(vcpu);
0510 break;
0511 default:
0512 TEST_ASSERT(false, "Unexpected ucall");
0513 }
0514
0515 return uc.cmd;
0516 }
0517
0518 static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
0519 uint32_t msr_index)
0520 {
0521 vcpu_run(vcpu);
0522 process_rdmsr(vcpu, msr_index);
0523 }
0524
0525 static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
0526 uint32_t msr_index)
0527 {
0528 vcpu_run(vcpu);
0529 process_wrmsr(vcpu, msr_index);
0530 }
0531
0532 static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
0533 {
0534 vcpu_run(vcpu);
0535 return process_ucall(vcpu);
0536 }
0537
0538 static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
0539 {
0540 vcpu_run(vcpu);
0541 process_ucall_done(vcpu);
0542 }
0543
0544 static void test_msr_filter_allow(void)
0545 {
0546 struct kvm_vcpu *vcpu;
0547 struct kvm_vm *vm;
0548 int rc;
0549
0550 vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_allow);
0551
0552 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
0553 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
0554 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
0555
0556 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
0557 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
0558
0559 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
0560
0561 vm_init_descriptor_tables(vm);
0562 vcpu_init_descriptor_tables(vcpu);
0563
0564 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
0565
0566
0567 run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
0568 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
0569 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
0570
0571 run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
0572 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
0573 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
0574
0575 run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
0576 run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
0577
0578 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
0579 vcpu_run(vcpu);
0580 vm_install_exception_handler(vm, UD_VECTOR, NULL);
0581
0582 if (process_ucall(vcpu) != UCALL_DONE) {
0583 vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
0584
0585
0586 run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
0587 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
0588 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
0589
0590 run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
0591 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
0592 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
0593
0594 run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
0595 run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
0596
0597
0598 run_guest_then_process_ucall_done(vcpu);
0599 } else {
0600 printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
0601 }
0602
0603 kvm_vm_free(vm);
0604 }
0605
0606 static int handle_ucall(struct kvm_vcpu *vcpu)
0607 {
0608 struct ucall uc;
0609
0610 switch (get_ucall(vcpu, &uc)) {
0611 case UCALL_ABORT:
0612 REPORT_GUEST_ASSERT(uc);
0613 break;
0614 case UCALL_SYNC:
0615 vm_ioctl(vcpu->vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
0616 break;
0617 case UCALL_DONE:
0618 return 1;
0619 default:
0620 TEST_FAIL("Unknown ucall %lu", uc.cmd);
0621 }
0622
0623 return 0;
0624 }
0625
0626 static void handle_rdmsr(struct kvm_run *run)
0627 {
0628 run->msr.data = run->msr.index;
0629 msr_reads++;
0630
0631 if (run->msr.index == MSR_SYSCALL_MASK ||
0632 run->msr.index == MSR_GS_BASE) {
0633 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
0634 "MSR read trap w/o access fault");
0635 }
0636
0637 if (run->msr.index == 0xdeadbeef) {
0638 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
0639 "MSR deadbeef read trap w/o inval fault");
0640 }
0641 }
0642
0643 static void handle_wrmsr(struct kvm_run *run)
0644 {
0645
0646 msr_writes++;
0647
0648 if (run->msr.index == MSR_IA32_POWER_CTL) {
0649 TEST_ASSERT(run->msr.data == 0x1234,
0650 "MSR data for MSR_IA32_POWER_CTL incorrect");
0651 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
0652 "MSR_IA32_POWER_CTL trap w/o access fault");
0653 }
0654
0655 if (run->msr.index == 0xdeadbeef) {
0656 TEST_ASSERT(run->msr.data == 0x1234,
0657 "MSR data for deadbeef incorrect");
0658 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
0659 "deadbeef trap w/o inval fault");
0660 }
0661 }
0662
0663 static void test_msr_filter_deny(void)
0664 {
0665 struct kvm_vcpu *vcpu;
0666 struct kvm_vm *vm;
0667 struct kvm_run *run;
0668 int rc;
0669
0670 vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_deny);
0671 run = vcpu->run;
0672
0673 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
0674 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
0675 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
0676 KVM_MSR_EXIT_REASON_UNKNOWN |
0677 KVM_MSR_EXIT_REASON_FILTER);
0678
0679 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
0680 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
0681
0682 prepare_bitmaps();
0683 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
0684
0685 while (1) {
0686 vcpu_run(vcpu);
0687
0688 switch (run->exit_reason) {
0689 case KVM_EXIT_X86_RDMSR:
0690 handle_rdmsr(run);
0691 break;
0692 case KVM_EXIT_X86_WRMSR:
0693 handle_wrmsr(run);
0694 break;
0695 case KVM_EXIT_IO:
0696 if (handle_ucall(vcpu))
0697 goto done;
0698 break;
0699 }
0700
0701 }
0702
0703 done:
0704 TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
0705 TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
0706
0707 kvm_vm_free(vm);
0708 }
0709
0710 static void test_msr_permission_bitmap(void)
0711 {
0712 struct kvm_vcpu *vcpu;
0713 struct kvm_vm *vm;
0714 int rc;
0715
0716 vm = vm_create_with_one_vcpu(&vcpu, guest_code_permission_bitmap);
0717
0718 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
0719 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
0720 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
0721
0722 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
0723 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
0724
0725 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
0726 run_guest_then_process_rdmsr(vcpu, MSR_FS_BASE);
0727 TEST_ASSERT(run_guest_then_process_ucall(vcpu) == UCALL_SYNC,
0728 "Expected ucall state to be UCALL_SYNC.");
0729 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
0730 run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
0731 run_guest_then_process_ucall_done(vcpu);
0732
0733 kvm_vm_free(vm);
0734 }
0735
0736 int main(int argc, char *argv[])
0737 {
0738
0739 setbuf(stdout, NULL);
0740
0741 test_msr_filter_allow();
0742
0743 test_msr_filter_deny();
0744
0745 test_msr_permission_bitmap();
0746
0747 return 0;
0748 }