Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * in-kernel handling for sie intercepts
0004  *
0005  * Copyright IBM Corp. 2008, 2020
0006  *
0007  *    Author(s): Carsten Otte <cotte@de.ibm.com>
0008  *               Christian Borntraeger <borntraeger@de.ibm.com>
0009  */
0010 
0011 #include <linux/kvm_host.h>
0012 #include <linux/errno.h>
0013 #include <linux/pagemap.h>
0014 
0015 #include <asm/asm-offsets.h>
0016 #include <asm/irq.h>
0017 #include <asm/sysinfo.h>
0018 #include <asm/uv.h>
0019 
0020 #include "kvm-s390.h"
0021 #include "gaccess.h"
0022 #include "trace.h"
0023 #include "trace-s390.h"
0024 
0025 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
0026 {
0027     struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
0028     u8 ilen = 0;
0029 
0030     switch (vcpu->arch.sie_block->icptcode) {
0031     case ICPT_INST:
0032     case ICPT_INSTPROGI:
0033     case ICPT_OPEREXC:
0034     case ICPT_PARTEXEC:
0035     case ICPT_IOINST:
0036         /* instruction only stored for these icptcodes */
0037         ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
0038         /* Use the length of the EXECUTE instruction if necessary */
0039         if (sie_block->icptstatus & 1) {
0040             ilen = (sie_block->icptstatus >> 4) & 0x6;
0041             if (!ilen)
0042                 ilen = 4;
0043         }
0044         break;
0045     case ICPT_PROGI:
0046         /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
0047         ilen = vcpu->arch.sie_block->pgmilc & 0x6;
0048         break;
0049     }
0050     return ilen;
0051 }
0052 
0053 static int handle_stop(struct kvm_vcpu *vcpu)
0054 {
0055     struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0056     int rc = 0;
0057     uint8_t flags, stop_pending;
0058 
0059     vcpu->stat.exit_stop_request++;
0060 
0061     /* delay the stop if any non-stop irq is pending */
0062     if (kvm_s390_vcpu_has_irq(vcpu, 1))
0063         return 0;
0064 
0065     /* avoid races with the injection/SIGP STOP code */
0066     spin_lock(&li->lock);
0067     flags = li->irq.stop.flags;
0068     stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
0069     spin_unlock(&li->lock);
0070 
0071     trace_kvm_s390_stop_request(stop_pending, flags);
0072     if (!stop_pending)
0073         return 0;
0074 
0075     if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
0076         rc = kvm_s390_vcpu_store_status(vcpu,
0077                         KVM_S390_STORE_STATUS_NOADDR);
0078         if (rc)
0079             return rc;
0080     }
0081 
0082     /*
0083      * no need to check the return value of vcpu_stop as it can only have
0084      * an error for protvirt, but protvirt means user cpu state
0085      */
0086     if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
0087         kvm_s390_vcpu_stop(vcpu);
0088     return -EOPNOTSUPP;
0089 }
0090 
0091 static int handle_validity(struct kvm_vcpu *vcpu)
0092 {
0093     int viwhy = vcpu->arch.sie_block->ipb >> 16;
0094 
0095     vcpu->stat.exit_validity++;
0096     trace_kvm_s390_intercept_validity(vcpu, viwhy);
0097     KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
0098           current->pid, vcpu->kvm);
0099 
0100     /* do not warn on invalid runtime instrumentation mode */
0101     WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
0102           viwhy);
0103     return -EINVAL;
0104 }
0105 
0106 static int handle_instruction(struct kvm_vcpu *vcpu)
0107 {
0108     vcpu->stat.exit_instruction++;
0109     trace_kvm_s390_intercept_instruction(vcpu,
0110                          vcpu->arch.sie_block->ipa,
0111                          vcpu->arch.sie_block->ipb);
0112 
0113     switch (vcpu->arch.sie_block->ipa >> 8) {
0114     case 0x01:
0115         return kvm_s390_handle_01(vcpu);
0116     case 0x82:
0117         return kvm_s390_handle_lpsw(vcpu);
0118     case 0x83:
0119         return kvm_s390_handle_diag(vcpu);
0120     case 0xaa:
0121         return kvm_s390_handle_aa(vcpu);
0122     case 0xae:
0123         return kvm_s390_handle_sigp(vcpu);
0124     case 0xb2:
0125         return kvm_s390_handle_b2(vcpu);
0126     case 0xb6:
0127         return kvm_s390_handle_stctl(vcpu);
0128     case 0xb7:
0129         return kvm_s390_handle_lctl(vcpu);
0130     case 0xb9:
0131         return kvm_s390_handle_b9(vcpu);
0132     case 0xe3:
0133         return kvm_s390_handle_e3(vcpu);
0134     case 0xe5:
0135         return kvm_s390_handle_e5(vcpu);
0136     case 0xeb:
0137         return kvm_s390_handle_eb(vcpu);
0138     default:
0139         return -EOPNOTSUPP;
0140     }
0141 }
0142 
0143 static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
0144 {
0145     struct kvm_s390_pgm_info pgm_info = {
0146         .code = vcpu->arch.sie_block->iprcc,
0147         /* the PSW has already been rewound */
0148         .flags = KVM_S390_PGM_FLAGS_NO_REWIND,
0149     };
0150 
0151     switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
0152     case PGM_AFX_TRANSLATION:
0153     case PGM_ASX_TRANSLATION:
0154     case PGM_EX_TRANSLATION:
0155     case PGM_LFX_TRANSLATION:
0156     case PGM_LSTE_SEQUENCE:
0157     case PGM_LSX_TRANSLATION:
0158     case PGM_LX_TRANSLATION:
0159     case PGM_PRIMARY_AUTHORITY:
0160     case PGM_SECONDARY_AUTHORITY:
0161     case PGM_SPACE_SWITCH:
0162         pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
0163         break;
0164     case PGM_ALEN_TRANSLATION:
0165     case PGM_ALE_SEQUENCE:
0166     case PGM_ASTE_INSTANCE:
0167     case PGM_ASTE_SEQUENCE:
0168     case PGM_ASTE_VALIDITY:
0169     case PGM_EXTENDED_AUTHORITY:
0170         pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
0171         break;
0172     case PGM_ASCE_TYPE:
0173     case PGM_PAGE_TRANSLATION:
0174     case PGM_REGION_FIRST_TRANS:
0175     case PGM_REGION_SECOND_TRANS:
0176     case PGM_REGION_THIRD_TRANS:
0177     case PGM_SEGMENT_TRANSLATION:
0178         pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
0179         pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
0180         pgm_info.op_access_id  = vcpu->arch.sie_block->oai;
0181         break;
0182     case PGM_MONITOR:
0183         pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
0184         pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
0185         break;
0186     case PGM_VECTOR_PROCESSING:
0187     case PGM_DATA:
0188         pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
0189         break;
0190     case PGM_PROTECTION:
0191         pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
0192         pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
0193         break;
0194     default:
0195         break;
0196     }
0197 
0198     if (vcpu->arch.sie_block->iprcc & PGM_PER) {
0199         pgm_info.per_code = vcpu->arch.sie_block->perc;
0200         pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
0201         pgm_info.per_address = vcpu->arch.sie_block->peraddr;
0202         pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
0203     }
0204     return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
0205 }
0206 
0207 /*
0208  * restore ITDB to program-interruption TDB in guest lowcore
0209  * and set TX abort indication if required
0210 */
0211 static int handle_itdb(struct kvm_vcpu *vcpu)
0212 {
0213     struct kvm_s390_itdb *itdb;
0214     int rc;
0215 
0216     if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
0217         return 0;
0218     if (current->thread.per_flags & PER_FLAG_NO_TE)
0219         return 0;
0220     itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
0221     rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
0222     if (rc)
0223         return rc;
0224     memset(itdb, 0, sizeof(*itdb));
0225 
0226     return 0;
0227 }
0228 
0229 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
0230 
0231 static int handle_prog(struct kvm_vcpu *vcpu)
0232 {
0233     psw_t psw;
0234     int rc;
0235 
0236     vcpu->stat.exit_program_interruption++;
0237 
0238     /*
0239      * Intercept 8 indicates a loop of specification exceptions
0240      * for protected guests.
0241      */
0242     if (kvm_s390_pv_cpu_is_protected(vcpu))
0243         return -EOPNOTSUPP;
0244 
0245     if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
0246         rc = kvm_s390_handle_per_event(vcpu);
0247         if (rc)
0248             return rc;
0249         /* the interrupt might have been filtered out completely */
0250         if (vcpu->arch.sie_block->iprcc == 0)
0251             return 0;
0252     }
0253 
0254     trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
0255     if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
0256         rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
0257         if (rc)
0258             return rc;
0259         /* Avoid endless loops of specification exceptions */
0260         if (!is_valid_psw(&psw))
0261             return -EOPNOTSUPP;
0262     }
0263     rc = handle_itdb(vcpu);
0264     if (rc)
0265         return rc;
0266 
0267     return inject_prog_on_prog_intercept(vcpu);
0268 }
0269 
0270 /**
0271  * handle_external_interrupt - used for external interruption interceptions
0272  * @vcpu: virtual cpu
0273  *
0274  * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
0275  * the new PSW does not have external interrupts disabled. In the first case,
0276  * we've got to deliver the interrupt manually, and in the second case, we
0277  * drop to userspace to handle the situation there.
0278  */
0279 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
0280 {
0281     u16 eic = vcpu->arch.sie_block->eic;
0282     struct kvm_s390_irq irq;
0283     psw_t newpsw;
0284     int rc;
0285 
0286     vcpu->stat.exit_external_interrupt++;
0287 
0288     rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
0289     if (rc)
0290         return rc;
0291     /* We can not handle clock comparator or timer interrupt with bad PSW */
0292     if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
0293         (newpsw.mask & PSW_MASK_EXT))
0294         return -EOPNOTSUPP;
0295 
0296     switch (eic) {
0297     case EXT_IRQ_CLK_COMP:
0298         irq.type = KVM_S390_INT_CLOCK_COMP;
0299         break;
0300     case EXT_IRQ_CPU_TIMER:
0301         irq.type = KVM_S390_INT_CPU_TIMER;
0302         break;
0303     case EXT_IRQ_EXTERNAL_CALL:
0304         irq.type = KVM_S390_INT_EXTERNAL_CALL;
0305         irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
0306         rc = kvm_s390_inject_vcpu(vcpu, &irq);
0307         /* ignore if another external call is already pending */
0308         if (rc == -EBUSY)
0309             return 0;
0310         return rc;
0311     default:
0312         return -EOPNOTSUPP;
0313     }
0314 
0315     return kvm_s390_inject_vcpu(vcpu, &irq);
0316 }
0317 
0318 /**
0319  * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
0320  * @vcpu: virtual cpu
0321  *
0322  * This interception can only happen for guests with DAT disabled and
0323  * addresses that are currently not mapped in the host. Thus we try to
0324  * set up the mappings for the corresponding user pages here (or throw
0325  * addressing exceptions in case of illegal guest addresses).
0326  */
0327 static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
0328 {
0329     unsigned long srcaddr, dstaddr;
0330     int reg1, reg2, rc;
0331 
0332     kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
0333 
0334     /* Ensure that the source is paged-in, no actual access -> no key checking */
0335     rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2],
0336                           reg2, &srcaddr, GACC_FETCH, 0);
0337     if (rc)
0338         return kvm_s390_inject_prog_cond(vcpu, rc);
0339     rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
0340     if (rc != 0)
0341         return rc;
0342 
0343     /* Ensure that the source is paged-in, no actual access -> no key checking */
0344     rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1],
0345                           reg1, &dstaddr, GACC_STORE, 0);
0346     if (rc)
0347         return kvm_s390_inject_prog_cond(vcpu, rc);
0348     rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
0349     if (rc != 0)
0350         return rc;
0351 
0352     kvm_s390_retry_instr(vcpu);
0353 
0354     return 0;
0355 }
0356 
0357 static int handle_partial_execution(struct kvm_vcpu *vcpu)
0358 {
0359     vcpu->stat.exit_pei++;
0360 
0361     if (vcpu->arch.sie_block->ipa == 0xb254)    /* MVPG */
0362         return handle_mvpg_pei(vcpu);
0363     if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
0364         return kvm_s390_handle_sigp_pei(vcpu);
0365 
0366     return -EOPNOTSUPP;
0367 }
0368 
0369 /*
0370  * Handle the sthyi instruction that provides the guest with system
0371  * information, like current CPU resources available at each level of
0372  * the machine.
0373  */
0374 int handle_sthyi(struct kvm_vcpu *vcpu)
0375 {
0376     int reg1, reg2, r = 0;
0377     u64 code, addr, cc = 0, rc = 0;
0378     struct sthyi_sctns *sctns = NULL;
0379 
0380     if (!test_kvm_facility(vcpu->kvm, 74))
0381         return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
0382 
0383     kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
0384     code = vcpu->run->s.regs.gprs[reg1];
0385     addr = vcpu->run->s.regs.gprs[reg2];
0386 
0387     vcpu->stat.instruction_sthyi++;
0388     VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
0389     trace_kvm_s390_handle_sthyi(vcpu, code, addr);
0390 
0391     if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
0392         return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0393 
0394     if (code & 0xffff) {
0395         cc = 3;
0396         rc = 4;
0397         goto out;
0398     }
0399 
0400     if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
0401         return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0402 
0403     sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
0404     if (!sctns)
0405         return -ENOMEM;
0406 
0407     cc = sthyi_fill(sctns, &rc);
0408 
0409 out:
0410     if (!cc) {
0411         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
0412             memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
0413                    sctns, PAGE_SIZE);
0414         } else {
0415             r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
0416             if (r) {
0417                 free_page((unsigned long)sctns);
0418                 return kvm_s390_inject_prog_cond(vcpu, r);
0419             }
0420         }
0421     }
0422 
0423     free_page((unsigned long)sctns);
0424     vcpu->run->s.regs.gprs[reg2 + 1] = rc;
0425     kvm_s390_set_psw_cc(vcpu, cc);
0426     return r;
0427 }
0428 
0429 static int handle_operexc(struct kvm_vcpu *vcpu)
0430 {
0431     psw_t oldpsw, newpsw;
0432     int rc;
0433 
0434     vcpu->stat.exit_operation_exception++;
0435     trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
0436                       vcpu->arch.sie_block->ipb);
0437 
0438     if (vcpu->arch.sie_block->ipa == 0xb256)
0439         return handle_sthyi(vcpu);
0440 
0441     if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
0442         return -EOPNOTSUPP;
0443     rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
0444     if (rc)
0445         return rc;
0446     /*
0447      * Avoid endless loops of operation exceptions, if the pgm new
0448      * PSW will cause a new operation exception.
0449      * The heuristic checks if the pgm new psw is within 6 bytes before
0450      * the faulting psw address (with same DAT, AS settings) and the
0451      * new psw is not a wait psw and the fault was not triggered by
0452      * problem state.
0453      */
0454     oldpsw = vcpu->arch.sie_block->gpsw;
0455     if (oldpsw.addr - newpsw.addr <= 6 &&
0456         !(newpsw.mask & PSW_MASK_WAIT) &&
0457         !(oldpsw.mask & PSW_MASK_PSTATE) &&
0458         (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
0459         (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
0460         return -EOPNOTSUPP;
0461 
0462     return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
0463 }
0464 
0465 static int handle_pv_spx(struct kvm_vcpu *vcpu)
0466 {
0467     u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
0468 
0469     kvm_s390_set_prefix(vcpu, pref);
0470     trace_kvm_s390_handle_prefix(vcpu, 1, pref);
0471     return 0;
0472 }
0473 
0474 static int handle_pv_sclp(struct kvm_vcpu *vcpu)
0475 {
0476     struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
0477 
0478     spin_lock(&fi->lock);
0479     /*
0480      * 2 cases:
0481      * a: an sccb answering interrupt was already pending or in flight.
0482      *    As the sccb value is not known we can simply set some value to
0483      *    trigger delivery of a saved SCCB. UV will then use its saved
0484      *    copy of the SCCB value.
0485      * b: an error SCCB interrupt needs to be injected so we also inject
0486      *    a fake SCCB address. Firmware will use the proper one.
0487      * This makes sure, that both errors and real sccb returns will only
0488      * be delivered after a notification intercept (instruction has
0489      * finished) but not after others.
0490      */
0491     fi->srv_signal.ext_params |= 0x43000;
0492     set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
0493     clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
0494     spin_unlock(&fi->lock);
0495     return 0;
0496 }
0497 
0498 static int handle_pv_uvc(struct kvm_vcpu *vcpu)
0499 {
0500     struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
0501     struct uv_cb_cts uvcb = {
0502         .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
0503         .header.len = sizeof(uvcb),
0504         .guest_handle   = kvm_s390_pv_get_handle(vcpu->kvm),
0505         .gaddr      = guest_uvcb->paddr,
0506     };
0507     int rc;
0508 
0509     if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
0510         WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
0511               guest_uvcb->header.cmd);
0512         return 0;
0513     }
0514     rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
0515     /*
0516      * If the unpin did not succeed, the guest will exit again for the UVC
0517      * and we will retry the unpin.
0518      */
0519     if (rc == -EINVAL)
0520         return 0;
0521     /*
0522      * If we got -EAGAIN here, we simply return it. It will eventually
0523      * get propagated all the way to userspace, which should then try
0524      * again.
0525      */
0526     return rc;
0527 }
0528 
0529 static int handle_pv_notification(struct kvm_vcpu *vcpu)
0530 {
0531     int ret;
0532 
0533     if (vcpu->arch.sie_block->ipa == 0xb210)
0534         return handle_pv_spx(vcpu);
0535     if (vcpu->arch.sie_block->ipa == 0xb220)
0536         return handle_pv_sclp(vcpu);
0537     if (vcpu->arch.sie_block->ipa == 0xb9a4)
0538         return handle_pv_uvc(vcpu);
0539     if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
0540         /*
0541          * Besides external call, other SIGP orders also cause a
0542          * 108 (pv notify) intercept. In contrast to external call,
0543          * these orders need to be emulated and hence the appropriate
0544          * place to handle them is in handle_instruction().
0545          * So first try kvm_s390_handle_sigp_pei() and if that isn't
0546          * successful, go on with handle_instruction().
0547          */
0548         ret = kvm_s390_handle_sigp_pei(vcpu);
0549         if (!ret)
0550             return ret;
0551     }
0552 
0553     return handle_instruction(vcpu);
0554 }
0555 
0556 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
0557 {
0558     int rc, per_rc = 0;
0559 
0560     if (kvm_is_ucontrol(vcpu->kvm))
0561         return -EOPNOTSUPP;
0562 
0563     switch (vcpu->arch.sie_block->icptcode) {
0564     case ICPT_EXTREQ:
0565         vcpu->stat.exit_external_request++;
0566         return 0;
0567     case ICPT_IOREQ:
0568         vcpu->stat.exit_io_request++;
0569         return 0;
0570     case ICPT_INST:
0571         rc = handle_instruction(vcpu);
0572         break;
0573     case ICPT_PROGI:
0574         return handle_prog(vcpu);
0575     case ICPT_EXTINT:
0576         return handle_external_interrupt(vcpu);
0577     case ICPT_WAIT:
0578         return kvm_s390_handle_wait(vcpu);
0579     case ICPT_VALIDITY:
0580         return handle_validity(vcpu);
0581     case ICPT_STOP:
0582         return handle_stop(vcpu);
0583     case ICPT_OPEREXC:
0584         rc = handle_operexc(vcpu);
0585         break;
0586     case ICPT_PARTEXEC:
0587         rc = handle_partial_execution(vcpu);
0588         break;
0589     case ICPT_KSS:
0590         rc = kvm_s390_skey_check_enable(vcpu);
0591         break;
0592     case ICPT_MCHKREQ:
0593     case ICPT_INT_ENABLE:
0594         /*
0595          * PSW bit 13 or a CR (0, 6, 14) changed and we might
0596          * now be able to deliver interrupts. The pre-run code
0597          * will take care of this.
0598          */
0599         rc = 0;
0600         break;
0601     case ICPT_PV_INSTR:
0602         rc = handle_instruction(vcpu);
0603         break;
0604     case ICPT_PV_NOTIFY:
0605         rc = handle_pv_notification(vcpu);
0606         break;
0607     case ICPT_PV_PREF:
0608         rc = 0;
0609         gmap_convert_to_secure(vcpu->arch.gmap,
0610                        kvm_s390_get_prefix(vcpu));
0611         gmap_convert_to_secure(vcpu->arch.gmap,
0612                        kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
0613         break;
0614     default:
0615         return -EOPNOTSUPP;
0616     }
0617 
0618     /* process PER, also if the instrution is processed in user space */
0619     if (vcpu->arch.sie_block->icptstatus & 0x02 &&
0620         (!rc || rc == -EOPNOTSUPP))
0621         per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
0622     return per_rc ? per_rc : rc;
0623 }