Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Test for s390x KVM_S390_MEM_OP
0004  *
0005  * Copyright (C) 2019, Red Hat, Inc.
0006  */
0007 
0008 #include <stdio.h>
0009 #include <stdlib.h>
0010 #include <string.h>
0011 #include <sys/ioctl.h>
0012 
0013 #include <linux/bits.h>
0014 
0015 #include "test_util.h"
0016 #include "kvm_util.h"
0017 #include "kselftest.h"
0018 
0019 enum mop_target {
0020     LOGICAL,
0021     SIDA,
0022     ABSOLUTE,
0023     INVALID,
0024 };
0025 
0026 enum mop_access_mode {
0027     READ,
0028     WRITE,
0029 };
0030 
0031 struct mop_desc {
0032     uintptr_t gaddr;
0033     uintptr_t gaddr_v;
0034     uint64_t set_flags;
0035     unsigned int f_check : 1;
0036     unsigned int f_inject : 1;
0037     unsigned int f_key : 1;
0038     unsigned int _gaddr_v : 1;
0039     unsigned int _set_flags : 1;
0040     unsigned int _sida_offset : 1;
0041     unsigned int _ar : 1;
0042     uint32_t size;
0043     enum mop_target target;
0044     enum mop_access_mode mode;
0045     void *buf;
0046     uint32_t sida_offset;
0047     uint8_t ar;
0048     uint8_t key;
0049 };
0050 
0051 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc)
0052 {
0053     struct kvm_s390_mem_op ksmo = {
0054         .gaddr = (uintptr_t)desc.gaddr,
0055         .size = desc.size,
0056         .buf = ((uintptr_t)desc.buf),
0057         .reserved = "ignored_ignored_ignored_ignored"
0058     };
0059 
0060     switch (desc.target) {
0061     case LOGICAL:
0062         if (desc.mode == READ)
0063             ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
0064         if (desc.mode == WRITE)
0065             ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
0066         break;
0067     case SIDA:
0068         if (desc.mode == READ)
0069             ksmo.op = KVM_S390_MEMOP_SIDA_READ;
0070         if (desc.mode == WRITE)
0071             ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
0072         break;
0073     case ABSOLUTE:
0074         if (desc.mode == READ)
0075             ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
0076         if (desc.mode == WRITE)
0077             ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
0078         break;
0079     case INVALID:
0080         ksmo.op = -1;
0081     }
0082     if (desc.f_check)
0083         ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
0084     if (desc.f_inject)
0085         ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
0086     if (desc._set_flags)
0087         ksmo.flags = desc.set_flags;
0088     if (desc.f_key) {
0089         ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
0090         ksmo.key = desc.key;
0091     }
0092     if (desc._ar)
0093         ksmo.ar = desc.ar;
0094     else
0095         ksmo.ar = 0;
0096     if (desc._sida_offset)
0097         ksmo.sida_offset = desc.sida_offset;
0098 
0099     return ksmo;
0100 }
0101 
0102 struct test_info {
0103     struct kvm_vm *vm;
0104     struct kvm_vcpu *vcpu;
0105 };
0106 
0107 #define PRINT_MEMOP false
0108 static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
0109 {
0110     if (!PRINT_MEMOP)
0111         return;
0112 
0113     if (!vcpu)
0114         printf("vm memop(");
0115     else
0116         printf("vcpu memop(");
0117     switch (ksmo->op) {
0118     case KVM_S390_MEMOP_LOGICAL_READ:
0119         printf("LOGICAL, READ, ");
0120         break;
0121     case KVM_S390_MEMOP_LOGICAL_WRITE:
0122         printf("LOGICAL, WRITE, ");
0123         break;
0124     case KVM_S390_MEMOP_SIDA_READ:
0125         printf("SIDA, READ, ");
0126         break;
0127     case KVM_S390_MEMOP_SIDA_WRITE:
0128         printf("SIDA, WRITE, ");
0129         break;
0130     case KVM_S390_MEMOP_ABSOLUTE_READ:
0131         printf("ABSOLUTE, READ, ");
0132         break;
0133     case KVM_S390_MEMOP_ABSOLUTE_WRITE:
0134         printf("ABSOLUTE, WRITE, ");
0135         break;
0136     }
0137     printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u",
0138            ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key);
0139     if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
0140         printf(", CHECK_ONLY");
0141     if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
0142         printf(", INJECT_EXCEPTION");
0143     if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
0144         printf(", SKEY_PROTECTION");
0145     puts(")");
0146 }
0147 
0148 static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
0149 {
0150     struct kvm_vcpu *vcpu = info.vcpu;
0151 
0152     if (!vcpu)
0153         vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
0154     else
0155         vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
0156 }
0157 
0158 static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
0159 {
0160     struct kvm_vcpu *vcpu = info.vcpu;
0161 
0162     if (!vcpu)
0163         return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
0164     else
0165         return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
0166 }
0167 
0168 #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
0169 ({                                      \
0170     struct test_info __info = (info_p);                 \
0171     struct mop_desc __desc = {                      \
0172         .target = (mop_target_p),                   \
0173         .mode = (access_mode_p),                    \
0174         .buf = (buf_p),                         \
0175         .size = (size_p),                       \
0176         __VA_ARGS__                         \
0177     };                                  \
0178     struct kvm_s390_mem_op __ksmo;                      \
0179                                         \
0180     if (__desc._gaddr_v) {                          \
0181         if (__desc.target == ABSOLUTE)                  \
0182             __desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v); \
0183         else                                \
0184             __desc.gaddr = __desc.gaddr_v;              \
0185     }                                   \
0186     __ksmo = ksmo_from_desc(__desc);                    \
0187     print_memop(__info.vcpu, &__ksmo);                  \
0188     err##memop_ioctl(__info, &__ksmo);                  \
0189 })
0190 
0191 #define MOP(...) MEMOP(, __VA_ARGS__)
0192 #define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
0193 
0194 #define GADDR(a) .gaddr = ((uintptr_t)a)
0195 #define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
0196 #define CHECK_ONLY .f_check = 1
0197 #define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
0198 #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
0199 #define AR(a) ._ar = 1, .ar = (a)
0200 #define KEY(a) .f_key = 1, .key = (a)
0201 #define INJECT .f_inject = 1
0202 
0203 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
0204 
0205 #define PAGE_SHIFT 12
0206 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
0207 #define PAGE_MASK (~(PAGE_SIZE - 1))
0208 #define CR0_FETCH_PROTECTION_OVERRIDE   (1UL << (63 - 38))
0209 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
0210 
0211 static uint8_t mem1[65536];
0212 static uint8_t mem2[65536];
0213 
0214 struct test_default {
0215     struct kvm_vm *kvm_vm;
0216     struct test_info vm;
0217     struct test_info vcpu;
0218     struct kvm_run *run;
0219     int size;
0220 };
0221 
0222 static struct test_default test_default_init(void *guest_code)
0223 {
0224     struct kvm_vcpu *vcpu;
0225     struct test_default t;
0226 
0227     t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
0228     t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
0229     t.vm = (struct test_info) { t.kvm_vm, NULL };
0230     t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
0231     t.run = vcpu->run;
0232     return t;
0233 }
0234 
0235 enum stage {
0236     /* Synced state set by host, e.g. DAT */
0237     STAGE_INITED,
0238     /* Guest did nothing */
0239     STAGE_IDLED,
0240     /* Guest set storage keys (specifics up to test case) */
0241     STAGE_SKEYS_SET,
0242     /* Guest copied memory (locations up to test case) */
0243     STAGE_COPIED,
0244 };
0245 
0246 #define HOST_SYNC(info_p, stage)                    \
0247 ({                                  \
0248     struct test_info __info = (info_p);             \
0249     struct kvm_vcpu *__vcpu = __info.vcpu;              \
0250     struct ucall uc;                        \
0251     int __stage = (stage);                      \
0252                                     \
0253     vcpu_run(__vcpu);                       \
0254     get_ucall(__vcpu, &uc);                     \
0255     ASSERT_EQ(uc.cmd, UCALL_SYNC);                  \
0256     ASSERT_EQ(uc.args[1], __stage);                 \
0257 })                                  \
0258 
0259 static void prepare_mem12(void)
0260 {
0261     int i;
0262 
0263     for (i = 0; i < sizeof(mem1); i++)
0264         mem1[i] = rand();
0265     memset(mem2, 0xaa, sizeof(mem2));
0266 }
0267 
0268 #define ASSERT_MEM_EQ(p1, p2, size) \
0269     TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
0270 
0271 #define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...)      \
0272 ({                                      \
0273     struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu);    \
0274     enum mop_target __target = (mop_target_p);              \
0275     uint32_t __size = (size);                       \
0276                                         \
0277     prepare_mem12();                            \
0278     CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size,       \
0279             GADDR_V(mem1), ##__VA_ARGS__);              \
0280     HOST_SYNC(__copy_cpu, STAGE_COPIED);                    \
0281     CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size,        \
0282             GADDR_V(mem2), ##__VA_ARGS__);              \
0283     ASSERT_MEM_EQ(mem1, mem2, __size);                  \
0284 })
0285 
0286 #define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...)        \
0287 ({                                      \
0288     struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu);    \
0289     enum mop_target __target = (mop_target_p);              \
0290     uint32_t __size = (size);                       \
0291                                         \
0292     prepare_mem12();                            \
0293     CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size,       \
0294             GADDR_V(mem1));                     \
0295     HOST_SYNC(__copy_cpu, STAGE_COPIED);                    \
0296     CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
0297     ASSERT_MEM_EQ(mem1, mem2, __size);                  \
0298 })
0299 
0300 static void guest_copy(void)
0301 {
0302     GUEST_SYNC(STAGE_INITED);
0303     memcpy(&mem2, &mem1, sizeof(mem2));
0304     GUEST_SYNC(STAGE_COPIED);
0305 }
0306 
0307 static void test_copy(void)
0308 {
0309     struct test_default t = test_default_init(guest_copy);
0310 
0311     HOST_SYNC(t.vcpu, STAGE_INITED);
0312 
0313     DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
0314 
0315     kvm_vm_free(t.kvm_vm);
0316 }
0317 
0318 static void set_storage_key_range(void *addr, size_t len, uint8_t key)
0319 {
0320     uintptr_t _addr, abs, i;
0321     int not_mapped = 0;
0322 
0323     _addr = (uintptr_t)addr;
0324     for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
0325         abs = i;
0326         asm volatile (
0327                    "lra %[abs], 0(0,%[abs])\n"
0328             "   jz  0f\n"
0329             "   llill   %[not_mapped],1\n"
0330             "   j   1f\n"
0331             "0: sske    %[key], %[abs]\n"
0332             "1:"
0333             : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
0334             : [key] "r" (key)
0335             : "cc"
0336         );
0337         GUEST_ASSERT_EQ(not_mapped, 0);
0338     }
0339 }
0340 
0341 static void guest_copy_key(void)
0342 {
0343     set_storage_key_range(mem1, sizeof(mem1), 0x90);
0344     set_storage_key_range(mem2, sizeof(mem2), 0x90);
0345     GUEST_SYNC(STAGE_SKEYS_SET);
0346 
0347     for (;;) {
0348         memcpy(&mem2, &mem1, sizeof(mem2));
0349         GUEST_SYNC(STAGE_COPIED);
0350     }
0351 }
0352 
0353 static void test_copy_key(void)
0354 {
0355     struct test_default t = test_default_init(guest_copy_key);
0356 
0357     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0358 
0359     /* vm, no key */
0360     DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
0361 
0362     /* vm/vcpu, machting key or key 0 */
0363     DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
0364     DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
0365     DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
0366     DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
0367     /*
0368      * There used to be different code paths for key handling depending on
0369      * if the region crossed a page boundary.
0370      * There currently are not, but the more tests the merrier.
0371      */
0372     DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
0373     DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
0374     DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
0375     DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
0376 
0377     /* vm/vcpu, mismatching keys on read, but no fetch protection */
0378     DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
0379     DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
0380 
0381     kvm_vm_free(t.kvm_vm);
0382 }
0383 
0384 static void guest_copy_key_fetch_prot(void)
0385 {
0386     /*
0387      * For some reason combining the first sync with override enablement
0388      * results in an exception when calling HOST_SYNC.
0389      */
0390     GUEST_SYNC(STAGE_INITED);
0391     /* Storage protection override applies to both store and fetch. */
0392     set_storage_key_range(mem1, sizeof(mem1), 0x98);
0393     set_storage_key_range(mem2, sizeof(mem2), 0x98);
0394     GUEST_SYNC(STAGE_SKEYS_SET);
0395 
0396     for (;;) {
0397         memcpy(&mem2, &mem1, sizeof(mem2));
0398         GUEST_SYNC(STAGE_COPIED);
0399     }
0400 }
0401 
0402 static void test_copy_key_storage_prot_override(void)
0403 {
0404     struct test_default t = test_default_init(guest_copy_key_fetch_prot);
0405 
0406     HOST_SYNC(t.vcpu, STAGE_INITED);
0407     t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
0408     t.run->kvm_dirty_regs = KVM_SYNC_CRS;
0409     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0410 
0411     /* vcpu, mismatching keys, storage protection override in effect */
0412     DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
0413 
0414     kvm_vm_free(t.kvm_vm);
0415 }
0416 
0417 static void test_copy_key_fetch_prot(void)
0418 {
0419     struct test_default t = test_default_init(guest_copy_key_fetch_prot);
0420 
0421     HOST_SYNC(t.vcpu, STAGE_INITED);
0422     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0423 
0424     /* vm/vcpu, matching key, fetch protection in effect */
0425     DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
0426     DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
0427 
0428     kvm_vm_free(t.kvm_vm);
0429 }
0430 
0431 #define ERR_PROT_MOP(...)                           \
0432 ({                                      \
0433     int rv;                                 \
0434                                         \
0435     rv = ERR_MOP(__VA_ARGS__);                      \
0436     TEST_ASSERT(rv == 4, "Should result in protection exception");      \
0437 })
0438 
0439 static void guest_error_key(void)
0440 {
0441     GUEST_SYNC(STAGE_INITED);
0442     set_storage_key_range(mem1, PAGE_SIZE, 0x18);
0443     set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
0444     GUEST_SYNC(STAGE_SKEYS_SET);
0445     GUEST_SYNC(STAGE_IDLED);
0446 }
0447 
0448 static void test_errors_key(void)
0449 {
0450     struct test_default t = test_default_init(guest_error_key);
0451 
0452     HOST_SYNC(t.vcpu, STAGE_INITED);
0453     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0454 
0455     /* vm/vcpu, mismatching keys, fetch protection in effect */
0456     CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
0457     CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
0458     CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
0459     CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
0460 
0461     kvm_vm_free(t.kvm_vm);
0462 }
0463 
0464 static void test_termination(void)
0465 {
0466     struct test_default t = test_default_init(guest_error_key);
0467     uint64_t prefix;
0468     uint64_t teid;
0469     uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
0470     uint64_t psw[2];
0471 
0472     HOST_SYNC(t.vcpu, STAGE_INITED);
0473     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0474 
0475     /* vcpu, mismatching keys after first page */
0476     ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
0477     /*
0478      * The memop injected a program exception and the test needs to check the
0479      * Translation-Exception Identification (TEID). It is necessary to run
0480      * the guest in order to be able to read the TEID from guest memory.
0481      * Set the guest program new PSW, so the guest state is not clobbered.
0482      */
0483     prefix = t.run->s.regs.prefix;
0484     psw[0] = t.run->psw_mask;
0485     psw[1] = t.run->psw_addr;
0486     MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
0487     HOST_SYNC(t.vcpu, STAGE_IDLED);
0488     MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
0489     /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
0490     ASSERT_EQ(teid & teid_mask, 0);
0491 
0492     kvm_vm_free(t.kvm_vm);
0493 }
0494 
0495 static void test_errors_key_storage_prot_override(void)
0496 {
0497     struct test_default t = test_default_init(guest_copy_key_fetch_prot);
0498 
0499     HOST_SYNC(t.vcpu, STAGE_INITED);
0500     t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
0501     t.run->kvm_dirty_regs = KVM_SYNC_CRS;
0502     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0503 
0504     /* vm, mismatching keys, storage protection override not applicable to vm */
0505     CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
0506     CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
0507 
0508     kvm_vm_free(t.kvm_vm);
0509 }
0510 
0511 const uint64_t last_page_addr = -PAGE_SIZE;
0512 
0513 static void guest_copy_key_fetch_prot_override(void)
0514 {
0515     int i;
0516     char *page_0 = 0;
0517 
0518     GUEST_SYNC(STAGE_INITED);
0519     set_storage_key_range(0, PAGE_SIZE, 0x18);
0520     set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
0521     asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc");
0522     GUEST_SYNC(STAGE_SKEYS_SET);
0523 
0524     for (;;) {
0525         for (i = 0; i < PAGE_SIZE; i++)
0526             page_0[i] = mem1[i];
0527         GUEST_SYNC(STAGE_COPIED);
0528     }
0529 }
0530 
0531 static void test_copy_key_fetch_prot_override(void)
0532 {
0533     struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
0534     vm_vaddr_t guest_0_page, guest_last_page;
0535 
0536     guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
0537     guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
0538     if (guest_0_page != 0 || guest_last_page != last_page_addr) {
0539         print_skip("did not allocate guest pages at required positions");
0540         goto out;
0541     }
0542 
0543     HOST_SYNC(t.vcpu, STAGE_INITED);
0544     t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
0545     t.run->kvm_dirty_regs = KVM_SYNC_CRS;
0546     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0547 
0548     /* vcpu, mismatching keys on fetch, fetch protection override applies */
0549     prepare_mem12();
0550     MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
0551     HOST_SYNC(t.vcpu, STAGE_COPIED);
0552     CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
0553     ASSERT_MEM_EQ(mem1, mem2, 2048);
0554 
0555     /*
0556      * vcpu, mismatching keys on fetch, fetch protection override applies,
0557      * wraparound
0558      */
0559     prepare_mem12();
0560     MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
0561     HOST_SYNC(t.vcpu, STAGE_COPIED);
0562     CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
0563            GADDR_V(guest_last_page), KEY(2));
0564     ASSERT_MEM_EQ(mem1, mem2, 2048);
0565 
0566 out:
0567     kvm_vm_free(t.kvm_vm);
0568 }
0569 
0570 static void test_errors_key_fetch_prot_override_not_enabled(void)
0571 {
0572     struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
0573     vm_vaddr_t guest_0_page, guest_last_page;
0574 
0575     guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
0576     guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
0577     if (guest_0_page != 0 || guest_last_page != last_page_addr) {
0578         print_skip("did not allocate guest pages at required positions");
0579         goto out;
0580     }
0581     HOST_SYNC(t.vcpu, STAGE_INITED);
0582     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0583 
0584     /* vcpu, mismatching keys on fetch, fetch protection override not enabled */
0585     CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
0586 
0587 out:
0588     kvm_vm_free(t.kvm_vm);
0589 }
0590 
0591 static void test_errors_key_fetch_prot_override_enabled(void)
0592 {
0593     struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
0594     vm_vaddr_t guest_0_page, guest_last_page;
0595 
0596     guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
0597     guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
0598     if (guest_0_page != 0 || guest_last_page != last_page_addr) {
0599         print_skip("did not allocate guest pages at required positions");
0600         goto out;
0601     }
0602     HOST_SYNC(t.vcpu, STAGE_INITED);
0603     t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
0604     t.run->kvm_dirty_regs = KVM_SYNC_CRS;
0605     HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
0606 
0607     /*
0608      * vcpu, mismatching keys on fetch,
0609      * fetch protection override does not apply because memory range acceeded
0610      */
0611     CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
0612     CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
0613            GADDR_V(guest_last_page), KEY(2));
0614     /* vm, fetch protected override does not apply */
0615     CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
0616     CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
0617 
0618 out:
0619     kvm_vm_free(t.kvm_vm);
0620 }
0621 
0622 static void guest_idle(void)
0623 {
0624     GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
0625     for (;;)
0626         GUEST_SYNC(STAGE_IDLED);
0627 }
0628 
0629 static void _test_errors_common(struct test_info info, enum mop_target target, int size)
0630 {
0631     int rv;
0632 
0633     /* Bad size: */
0634     rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
0635     TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
0636 
0637     /* Zero size: */
0638     rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
0639     TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
0640             "ioctl allows 0 as size");
0641 
0642     /* Bad flags: */
0643     rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
0644     TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
0645 
0646     /* Bad guest address: */
0647     rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
0648     TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
0649 
0650     /* Bad host address: */
0651     rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
0652     TEST_ASSERT(rv == -1 && errno == EFAULT,
0653             "ioctl does not report bad host memory address");
0654 
0655     /* Bad key: */
0656     rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
0657     TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
0658 }
0659 
0660 static void test_errors(void)
0661 {
0662     struct test_default t = test_default_init(guest_idle);
0663     int rv;
0664 
0665     HOST_SYNC(t.vcpu, STAGE_INITED);
0666 
0667     _test_errors_common(t.vcpu, LOGICAL, t.size);
0668     _test_errors_common(t.vm, ABSOLUTE, t.size);
0669 
0670     /* Bad operation: */
0671     rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
0672     TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
0673     /* virtual addresses are not translated when passing INVALID */
0674     rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
0675     TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
0676 
0677     /* Bad access register: */
0678     t.run->psw_mask &= ~(3UL << (63 - 17));
0679     t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
0680     HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
0681     rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
0682     TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
0683     t.run->psw_mask &= ~(3UL << (63 - 17));   /* Disable AR mode */
0684     HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
0685 
0686     /* Check that the SIDA calls are rejected for non-protected guests */
0687     rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
0688     TEST_ASSERT(rv == -1 && errno == EINVAL,
0689             "ioctl does not reject SIDA_READ in non-protected mode");
0690     rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
0691     TEST_ASSERT(rv == -1 && errno == EINVAL,
0692             "ioctl does not reject SIDA_WRITE in non-protected mode");
0693 
0694     kvm_vm_free(t.kvm_vm);
0695 }
0696 
0697 struct testdef {
0698     const char *name;
0699     void (*test)(void);
0700     int extension;
0701 } testlist[] = {
0702     {
0703         .name = "simple copy",
0704         .test = test_copy,
0705     },
0706     {
0707         .name = "generic error checks",
0708         .test = test_errors,
0709     },
0710     {
0711         .name = "copy with storage keys",
0712         .test = test_copy_key,
0713         .extension = 1,
0714     },
0715     {
0716         .name = "copy with key storage protection override",
0717         .test = test_copy_key_storage_prot_override,
0718         .extension = 1,
0719     },
0720     {
0721         .name = "copy with key fetch protection",
0722         .test = test_copy_key_fetch_prot,
0723         .extension = 1,
0724     },
0725     {
0726         .name = "copy with key fetch protection override",
0727         .test = test_copy_key_fetch_prot_override,
0728         .extension = 1,
0729     },
0730     {
0731         .name = "error checks with key",
0732         .test = test_errors_key,
0733         .extension = 1,
0734     },
0735     {
0736         .name = "termination",
0737         .test = test_termination,
0738         .extension = 1,
0739     },
0740     {
0741         .name = "error checks with key storage protection override",
0742         .test = test_errors_key_storage_prot_override,
0743         .extension = 1,
0744     },
0745     {
0746         .name = "error checks without key fetch prot override",
0747         .test = test_errors_key_fetch_prot_override_not_enabled,
0748         .extension = 1,
0749     },
0750     {
0751         .name = "error checks with key fetch prot override",
0752         .test = test_errors_key_fetch_prot_override_enabled,
0753         .extension = 1,
0754     },
0755 };
0756 
0757 int main(int argc, char *argv[])
0758 {
0759     int extension_cap, idx;
0760 
0761     TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
0762 
0763     setbuf(stdout, NULL);   /* Tell stdout not to buffer its content */
0764 
0765     ksft_print_header();
0766 
0767     ksft_set_plan(ARRAY_SIZE(testlist));
0768 
0769     extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
0770     for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
0771         if (extension_cap >= testlist[idx].extension) {
0772             testlist[idx].test();
0773             ksft_test_result_pass("%s\n", testlist[idx].name);
0774         } else {
0775             ksft_test_result_skip("%s - extension level %d not supported\n",
0776                           testlist[idx].name,
0777                           testlist[idx].extension);
0778         }
0779     }
0780 
0781     ksft_finished();    /* Print results and exit() accordingly */
0782 }