Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * handling interprocessor communication
0004  *
0005  * Copyright IBM Corp. 2008, 2013
0006  *
0007  *    Author(s): Carsten Otte <cotte@de.ibm.com>
0008  *               Christian Borntraeger <borntraeger@de.ibm.com>
0009  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
0010  */
0011 
0012 #include <linux/kvm.h>
0013 #include <linux/kvm_host.h>
0014 #include <linux/slab.h>
0015 #include <asm/sigp.h>
0016 #include "gaccess.h"
0017 #include "kvm-s390.h"
0018 #include "trace.h"
0019 
0020 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
0021             u64 *reg)
0022 {
0023     const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED);
0024     int rc;
0025     int ext_call_pending;
0026 
0027     ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
0028     if (!stopped && !ext_call_pending)
0029         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
0030     else {
0031         *reg &= 0xffffffff00000000UL;
0032         if (ext_call_pending)
0033             *reg |= SIGP_STATUS_EXT_CALL_PENDING;
0034         if (stopped)
0035             *reg |= SIGP_STATUS_STOPPED;
0036         rc = SIGP_CC_STATUS_STORED;
0037     }
0038 
0039     VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
0040            rc);
0041     return rc;
0042 }
0043 
0044 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
0045                     struct kvm_vcpu *dst_vcpu)
0046 {
0047     struct kvm_s390_irq irq = {
0048         .type = KVM_S390_INT_EMERGENCY,
0049         .u.emerg.code = vcpu->vcpu_id,
0050     };
0051     int rc = 0;
0052 
0053     rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
0054     if (!rc)
0055         VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
0056                dst_vcpu->vcpu_id);
0057 
0058     return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
0059 }
0060 
0061 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
0062 {
0063     return __inject_sigp_emergency(vcpu, dst_vcpu);
0064 }
0065 
0066 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
0067                     struct kvm_vcpu *dst_vcpu,
0068                     u16 asn, u64 *reg)
0069 {
0070     const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
0071     u16 p_asn, s_asn;
0072     psw_t *psw;
0073     bool idle;
0074 
0075     idle = is_vcpu_idle(vcpu);
0076     psw = &dst_vcpu->arch.sie_block->gpsw;
0077     p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
0078     s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */
0079 
0080     /* Inject the emergency signal? */
0081     if (!is_vcpu_stopped(vcpu)
0082         || (psw->mask & psw_int_mask) != psw_int_mask
0083         || (idle && psw->addr != 0)
0084         || (!idle && (asn == p_asn || asn == s_asn))) {
0085         return __inject_sigp_emergency(vcpu, dst_vcpu);
0086     } else {
0087         *reg &= 0xffffffff00000000UL;
0088         *reg |= SIGP_STATUS_INCORRECT_STATE;
0089         return SIGP_CC_STATUS_STORED;
0090     }
0091 }
0092 
0093 static int __sigp_external_call(struct kvm_vcpu *vcpu,
0094                 struct kvm_vcpu *dst_vcpu, u64 *reg)
0095 {
0096     struct kvm_s390_irq irq = {
0097         .type = KVM_S390_INT_EXTERNAL_CALL,
0098         .u.extcall.code = vcpu->vcpu_id,
0099     };
0100     int rc;
0101 
0102     rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
0103     if (rc == -EBUSY) {
0104         *reg &= 0xffffffff00000000UL;
0105         *reg |= SIGP_STATUS_EXT_CALL_PENDING;
0106         return SIGP_CC_STATUS_STORED;
0107     } else if (rc == 0) {
0108         VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
0109                dst_vcpu->vcpu_id);
0110     }
0111 
0112     return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
0113 }
0114 
0115 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
0116 {
0117     struct kvm_s390_irq irq = {
0118         .type = KVM_S390_SIGP_STOP,
0119     };
0120     int rc;
0121 
0122     rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
0123     if (rc == -EBUSY)
0124         rc = SIGP_CC_BUSY;
0125     else if (rc == 0)
0126         VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
0127                dst_vcpu->vcpu_id);
0128 
0129     return rc;
0130 }
0131 
0132 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
0133                     struct kvm_vcpu *dst_vcpu, u64 *reg)
0134 {
0135     struct kvm_s390_irq irq = {
0136         .type = KVM_S390_SIGP_STOP,
0137         .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
0138     };
0139     int rc;
0140 
0141     rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
0142     if (rc == -EBUSY)
0143         rc = SIGP_CC_BUSY;
0144     else if (rc == 0)
0145         VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
0146                dst_vcpu->vcpu_id);
0147 
0148     return rc;
0149 }
0150 
0151 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter,
0152                u64 *status_reg)
0153 {
0154     *status_reg &= 0xffffffff00000000UL;
0155 
0156     /* Reject set arch order, with czam we're always in z/Arch mode. */
0157     *status_reg |= SIGP_STATUS_INVALID_PARAMETER;
0158     return SIGP_CC_STATUS_STORED;
0159 }
0160 
0161 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
0162                  u32 address, u64 *reg)
0163 {
0164     struct kvm_s390_irq irq = {
0165         .type = KVM_S390_SIGP_SET_PREFIX,
0166         .u.prefix.address = address & 0x7fffe000u,
0167     };
0168     int rc;
0169 
0170     /*
0171      * Make sure the new value is valid memory. We only need to check the
0172      * first page, since address is 8k aligned and memory pieces are always
0173      * at least 1MB aligned and have at least a size of 1MB.
0174      */
0175     if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
0176         *reg &= 0xffffffff00000000UL;
0177         *reg |= SIGP_STATUS_INVALID_PARAMETER;
0178         return SIGP_CC_STATUS_STORED;
0179     }
0180 
0181     rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
0182     if (rc == -EBUSY) {
0183         *reg &= 0xffffffff00000000UL;
0184         *reg |= SIGP_STATUS_INCORRECT_STATE;
0185         return SIGP_CC_STATUS_STORED;
0186     }
0187 
0188     return rc;
0189 }
0190 
0191 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
0192                        struct kvm_vcpu *dst_vcpu,
0193                        u32 addr, u64 *reg)
0194 {
0195     int rc;
0196 
0197     if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) {
0198         *reg &= 0xffffffff00000000UL;
0199         *reg |= SIGP_STATUS_INCORRECT_STATE;
0200         return SIGP_CC_STATUS_STORED;
0201     }
0202 
0203     addr &= 0x7ffffe00;
0204     rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
0205     if (rc == -EFAULT) {
0206         *reg &= 0xffffffff00000000UL;
0207         *reg |= SIGP_STATUS_INVALID_PARAMETER;
0208         rc = SIGP_CC_STATUS_STORED;
0209     }
0210     return rc;
0211 }
0212 
0213 static int __sigp_sense_running(struct kvm_vcpu *vcpu,
0214                 struct kvm_vcpu *dst_vcpu, u64 *reg)
0215 {
0216     int rc;
0217 
0218     if (!test_kvm_facility(vcpu->kvm, 9)) {
0219         *reg &= 0xffffffff00000000UL;
0220         *reg |= SIGP_STATUS_INVALID_ORDER;
0221         return SIGP_CC_STATUS_STORED;
0222     }
0223 
0224     if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) {
0225         /* running */
0226         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
0227     } else {
0228         /* not running */
0229         *reg &= 0xffffffff00000000UL;
0230         *reg |= SIGP_STATUS_NOT_RUNNING;
0231         rc = SIGP_CC_STATUS_STORED;
0232     }
0233 
0234     VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
0235            dst_vcpu->vcpu_id, rc);
0236 
0237     return rc;
0238 }
0239 
0240 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
0241                    struct kvm_vcpu *dst_vcpu, u8 order_code)
0242 {
0243     struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
0244     /* handle (RE)START in user space */
0245     int rc = -EOPNOTSUPP;
0246 
0247     /* make sure we don't race with STOP irq injection */
0248     spin_lock(&li->lock);
0249     if (kvm_s390_is_stop_irq_pending(dst_vcpu))
0250         rc = SIGP_CC_BUSY;
0251     spin_unlock(&li->lock);
0252 
0253     return rc;
0254 }
0255 
0256 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
0257                     struct kvm_vcpu *dst_vcpu, u8 order_code)
0258 {
0259     /* handle (INITIAL) CPU RESET in user space */
0260     return -EOPNOTSUPP;
0261 }
0262 
0263 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
0264                   struct kvm_vcpu *dst_vcpu)
0265 {
0266     /* handle unknown orders in user space */
0267     return -EOPNOTSUPP;
0268 }
0269 
0270 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
0271                u16 cpu_addr, u32 parameter, u64 *status_reg)
0272 {
0273     int rc;
0274     struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
0275 
0276     if (!dst_vcpu)
0277         return SIGP_CC_NOT_OPERATIONAL;
0278 
0279     /*
0280      * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
0281      * are processed asynchronously. Until the affected VCPU finishes
0282      * its work and calls back into KVM to clear the (RESTART or STOP)
0283      * interrupt, we need to return any new non-reset orders "busy".
0284      *
0285      * This is important because a single VCPU could issue:
0286      *  1) SIGP STOP $DESTINATION
0287      *  2) SIGP SENSE $DESTINATION
0288      *
0289      * If the SIGP SENSE would not be rejected as "busy", it could
0290      * return an incorrect answer as to whether the VCPU is STOPPED
0291      * or OPERATING.
0292      */
0293     if (order_code != SIGP_INITIAL_CPU_RESET &&
0294         order_code != SIGP_CPU_RESET) {
0295         /*
0296          * Lockless check. Both SIGP STOP and SIGP (RE)START
0297          * properly synchronize everything while processing
0298          * their orders, while the guest cannot observe a
0299          * difference when issuing other orders from two
0300          * different VCPUs.
0301          */
0302         if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
0303             kvm_s390_is_restart_irq_pending(dst_vcpu))
0304             return SIGP_CC_BUSY;
0305     }
0306 
0307     switch (order_code) {
0308     case SIGP_SENSE:
0309         vcpu->stat.instruction_sigp_sense++;
0310         rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
0311         break;
0312     case SIGP_EXTERNAL_CALL:
0313         vcpu->stat.instruction_sigp_external_call++;
0314         rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
0315         break;
0316     case SIGP_EMERGENCY_SIGNAL:
0317         vcpu->stat.instruction_sigp_emergency++;
0318         rc = __sigp_emergency(vcpu, dst_vcpu);
0319         break;
0320     case SIGP_STOP:
0321         vcpu->stat.instruction_sigp_stop++;
0322         rc = __sigp_stop(vcpu, dst_vcpu);
0323         break;
0324     case SIGP_STOP_AND_STORE_STATUS:
0325         vcpu->stat.instruction_sigp_stop_store_status++;
0326         rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
0327         break;
0328     case SIGP_STORE_STATUS_AT_ADDRESS:
0329         vcpu->stat.instruction_sigp_store_status++;
0330         rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
0331                          status_reg);
0332         break;
0333     case SIGP_SET_PREFIX:
0334         vcpu->stat.instruction_sigp_prefix++;
0335         rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
0336         break;
0337     case SIGP_COND_EMERGENCY_SIGNAL:
0338         vcpu->stat.instruction_sigp_cond_emergency++;
0339         rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
0340                           status_reg);
0341         break;
0342     case SIGP_SENSE_RUNNING:
0343         vcpu->stat.instruction_sigp_sense_running++;
0344         rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
0345         break;
0346     case SIGP_START:
0347         vcpu->stat.instruction_sigp_start++;
0348         rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
0349         break;
0350     case SIGP_RESTART:
0351         vcpu->stat.instruction_sigp_restart++;
0352         rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
0353         break;
0354     case SIGP_INITIAL_CPU_RESET:
0355         vcpu->stat.instruction_sigp_init_cpu_reset++;
0356         rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
0357         break;
0358     case SIGP_CPU_RESET:
0359         vcpu->stat.instruction_sigp_cpu_reset++;
0360         rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
0361         break;
0362     default:
0363         vcpu->stat.instruction_sigp_unknown++;
0364         rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
0365     }
0366 
0367     if (rc == -EOPNOTSUPP)
0368         VCPU_EVENT(vcpu, 4,
0369                "sigp order %u -> cpu %x: handled in user space",
0370                order_code, dst_vcpu->vcpu_id);
0371 
0372     return rc;
0373 }
0374 
0375 static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
0376                        u16 cpu_addr)
0377 {
0378     if (!vcpu->kvm->arch.user_sigp)
0379         return 0;
0380 
0381     switch (order_code) {
0382     case SIGP_SENSE:
0383     case SIGP_EXTERNAL_CALL:
0384     case SIGP_EMERGENCY_SIGNAL:
0385     case SIGP_COND_EMERGENCY_SIGNAL:
0386     case SIGP_SENSE_RUNNING:
0387         return 0;
0388     /* update counters as we're directly dropping to user space */
0389     case SIGP_STOP:
0390         vcpu->stat.instruction_sigp_stop++;
0391         break;
0392     case SIGP_STOP_AND_STORE_STATUS:
0393         vcpu->stat.instruction_sigp_stop_store_status++;
0394         break;
0395     case SIGP_STORE_STATUS_AT_ADDRESS:
0396         vcpu->stat.instruction_sigp_store_status++;
0397         break;
0398     case SIGP_STORE_ADDITIONAL_STATUS:
0399         vcpu->stat.instruction_sigp_store_adtl_status++;
0400         break;
0401     case SIGP_SET_PREFIX:
0402         vcpu->stat.instruction_sigp_prefix++;
0403         break;
0404     case SIGP_START:
0405         vcpu->stat.instruction_sigp_start++;
0406         break;
0407     case SIGP_RESTART:
0408         vcpu->stat.instruction_sigp_restart++;
0409         break;
0410     case SIGP_INITIAL_CPU_RESET:
0411         vcpu->stat.instruction_sigp_init_cpu_reset++;
0412         break;
0413     case SIGP_CPU_RESET:
0414         vcpu->stat.instruction_sigp_cpu_reset++;
0415         break;
0416     default:
0417         vcpu->stat.instruction_sigp_unknown++;
0418     }
0419     VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
0420            order_code, cpu_addr);
0421 
0422     return 1;
0423 }
0424 
0425 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
0426 {
0427     int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
0428     int r3 = vcpu->arch.sie_block->ipa & 0x000f;
0429     u32 parameter;
0430     u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
0431     u8 order_code;
0432     int rc;
0433 
0434     /* sigp in userspace can exit */
0435     if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
0436         return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
0437 
0438     order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
0439     if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
0440         return -EOPNOTSUPP;
0441 
0442     if (r1 % 2)
0443         parameter = vcpu->run->s.regs.gprs[r1];
0444     else
0445         parameter = vcpu->run->s.regs.gprs[r1 + 1];
0446 
0447     trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
0448     switch (order_code) {
0449     case SIGP_SET_ARCHITECTURE:
0450         vcpu->stat.instruction_sigp_arch++;
0451         rc = __sigp_set_arch(vcpu, parameter,
0452                      &vcpu->run->s.regs.gprs[r1]);
0453         break;
0454     default:
0455         rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
0456                      parameter,
0457                      &vcpu->run->s.regs.gprs[r1]);
0458     }
0459 
0460     if (rc < 0)
0461         return rc;
0462 
0463     kvm_s390_set_psw_cc(vcpu, rc);
0464     return 0;
0465 }
0466 
0467 /*
0468  * Handle SIGP partial execution interception.
0469  *
0470  * This interception will occur at the source cpu when a source cpu sends an
0471  * external call to a target cpu and the target cpu has the WAIT bit set in
0472  * its cpuflags. Interception will occurr after the interrupt indicator bits at
0473  * the target cpu have been set. All error cases will lead to instruction
0474  * interception, therefore nothing is to be checked or prepared.
0475  */
0476 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
0477 {
0478     int r3 = vcpu->arch.sie_block->ipa & 0x000f;
0479     u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
0480     struct kvm_vcpu *dest_vcpu;
0481     u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
0482 
0483     if (order_code == SIGP_EXTERNAL_CALL) {
0484         trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
0485 
0486         dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
0487         BUG_ON(dest_vcpu == NULL);
0488 
0489         kvm_s390_vcpu_wakeup(dest_vcpu);
0490         kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
0491         return 0;
0492     }
0493 
0494     return -EOPNOTSUPP;
0495 }