0001
0002
0003
0004
0005
0006
0007
0008 #include <sys/mman.h>
0009 #include "test_util.h"
0010 #include "kvm_util.h"
0011 #include "kselftest.h"
0012
0013 #define PAGE_SHIFT 12
0014 #define PAGE_SIZE (1 << PAGE_SHIFT)
0015 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
0016 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
0017
0018 static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
0019 static uint8_t *const page_store_prot = pages[0];
0020 static uint8_t *const page_fetch_prot = pages[1];
0021
0022
0023 static int set_storage_key(void *addr, uint8_t key)
0024 {
0025 int not_mapped = 0;
0026
0027 asm volatile (
0028 "lra %[addr], 0(0,%[addr])\n"
0029 " jz 0f\n"
0030 " llill %[not_mapped],1\n"
0031 " j 1f\n"
0032 "0: sske %[key], %[addr]\n"
0033 "1:"
0034 : [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
0035 : [key] "r" (key)
0036 : "cc"
0037 );
0038 return -not_mapped;
0039 }
0040
0041 enum permission {
0042 READ_WRITE = 0,
0043 READ = 1,
0044 RW_PROTECTED = 2,
0045 TRANSL_UNAVAIL = 3,
0046 };
0047
0048 static enum permission test_protection(void *addr, uint8_t key)
0049 {
0050 uint64_t mask;
0051
0052 asm volatile (
0053 "tprot %[addr], 0(%[key])\n"
0054 " ipm %[mask]\n"
0055 : [mask] "=r" (mask)
0056 : [addr] "Q" (*(char *)addr),
0057 [key] "a" (key)
0058 : "cc"
0059 );
0060
0061 return (enum permission)(mask >> 28);
0062 }
0063
0064 enum stage {
0065 STAGE_INIT_SIMPLE,
0066 TEST_SIMPLE,
0067 STAGE_INIT_FETCH_PROT_OVERRIDE,
0068 TEST_FETCH_PROT_OVERRIDE,
0069 TEST_STORAGE_PROT_OVERRIDE,
0070 STAGE_END
0071 };
0072
0073 struct test {
0074 enum stage stage;
0075 void *addr;
0076 uint8_t key;
0077 enum permission expected;
0078 } tests[] = {
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 { TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
0104
0105 { TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
0106
0107 { TEST_SIMPLE, page_store_prot, 0x20, READ },
0108
0109 { TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
0110
0111 { TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
0112
0113 { TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
0114
0115 { TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
0116
0117
0118
0119
0120
0121
0122
0123
0124 { TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
0125
0126 { TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
0127
0128
0129
0130
0131 { TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
0132
0133 { TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
0134
0135 { TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
0136
0137 { STAGE_END, 0, 0, 0 },
0138 };
0139
0140 static enum stage perform_next_stage(int *i, bool mapped_0)
0141 {
0142 enum stage stage = tests[*i].stage;
0143 enum permission result;
0144 bool skip;
0145
0146 for (; tests[*i].stage == stage; (*i)++) {
0147
0148
0149
0150
0151
0152
0153
0154 skip = tests[*i].addr < (void *)4096 &&
0155 tests[*i].expected != TRANSL_UNAVAIL &&
0156 !mapped_0;
0157 if (!skip) {
0158 result = test_protection(tests[*i].addr, tests[*i].key);
0159 GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
0160 }
0161 }
0162 return stage;
0163 }
0164
0165 static void guest_code(void)
0166 {
0167 bool mapped_0;
0168 int i = 0;
0169
0170 GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
0171 GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
0172 GUEST_SYNC(STAGE_INIT_SIMPLE);
0173 GUEST_SYNC(perform_next_stage(&i, false));
0174
0175
0176 mapped_0 = !set_storage_key((void *)0, 0x98);
0177 GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
0178 GUEST_SYNC(perform_next_stage(&i, mapped_0));
0179
0180
0181 GUEST_SYNC(perform_next_stage(&i, mapped_0));
0182 }
0183
0184 #define HOST_SYNC_NO_TAP(vcpup, stage) \
0185 ({ \
0186 struct kvm_vcpu *__vcpu = (vcpup); \
0187 struct ucall uc; \
0188 int __stage = (stage); \
0189 \
0190 vcpu_run(__vcpu); \
0191 get_ucall(__vcpu, &uc); \
0192 if (uc.cmd == UCALL_ABORT) \
0193 REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
0194 ASSERT_EQ(uc.cmd, UCALL_SYNC); \
0195 ASSERT_EQ(uc.args[1], __stage); \
0196 })
0197
0198 #define HOST_SYNC(vcpu, stage) \
0199 ({ \
0200 HOST_SYNC_NO_TAP(vcpu, stage); \
0201 ksft_test_result_pass("" #stage "\n"); \
0202 })
0203
0204 int main(int argc, char *argv[])
0205 {
0206 struct kvm_vcpu *vcpu;
0207 struct kvm_vm *vm;
0208 struct kvm_run *run;
0209 vm_vaddr_t guest_0_page;
0210
0211 ksft_print_header();
0212 ksft_set_plan(STAGE_END);
0213
0214 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
0215 run = vcpu->run;
0216
0217 HOST_SYNC(vcpu, STAGE_INIT_SIMPLE);
0218 mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
0219 HOST_SYNC(vcpu, TEST_SIMPLE);
0220
0221 guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
0222 if (guest_0_page != 0) {
0223
0224 HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
0225 ksft_test_result_skip("STAGE_INIT_FETCH_PROT_OVERRIDE - "
0226 "Did not allocate page at 0\n");
0227 } else {
0228 HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
0229 }
0230 if (guest_0_page == 0)
0231 mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
0232 run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
0233 run->kvm_dirty_regs = KVM_SYNC_CRS;
0234 HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
0235
0236 run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
0237 run->kvm_dirty_regs = KVM_SYNC_CRS;
0238 HOST_SYNC(vcpu, TEST_STORAGE_PROT_OVERRIDE);
0239
0240 kvm_vm_free(vm);
0241
0242 ksft_finished();
0243 }