0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kvm.h>
0012 #include <linux/kvm_host.h>
0013 #include <asm/gmap.h>
0014 #include <asm/virtio-ccw.h>
0015 #include "kvm-s390.h"
0016 #include "trace.h"
0017 #include "trace-s390.h"
0018 #include "gaccess.h"
0019
0020 static int diag_release_pages(struct kvm_vcpu *vcpu)
0021 {
0022 unsigned long start, end;
0023 unsigned long prefix = kvm_s390_get_prefix(vcpu);
0024
0025 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
0026 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
0027 vcpu->stat.instruction_diagnose_10++;
0028
0029 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
0030 || start < 2 * PAGE_SIZE)
0031 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0032
0033 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
0034
0035
0036
0037
0038
0039 if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
0040 gmap_discard(vcpu->arch.gmap, start, end);
0041 } else {
0042
0043
0044
0045
0046
0047
0048 gmap_discard(vcpu->arch.gmap, start, prefix);
0049 if (start <= prefix)
0050 gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
0051 if (end > prefix + PAGE_SIZE)
0052 gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
0053 gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
0054 }
0055 return 0;
0056 }
0057
0058 static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
0059 {
0060 struct prs_parm {
0061 u16 code;
0062 u16 subcode;
0063 u16 parm_len;
0064 u16 parm_version;
0065 u64 token_addr;
0066 u64 select_mask;
0067 u64 compare_mask;
0068 u64 zarch;
0069 };
0070 struct prs_parm parm;
0071 int rc;
0072 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
0073 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
0074
0075 VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
0076 vcpu->run->s.regs.gprs[rx]);
0077 vcpu->stat.instruction_diagnose_258++;
0078 if (vcpu->run->s.regs.gprs[rx] & 7)
0079 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0080 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
0081 if (rc)
0082 return kvm_s390_inject_prog_cond(vcpu, rc);
0083 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
0084 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0085
0086 switch (parm.subcode) {
0087 case 0:
0088 VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
0089 "select mask 0x%llx compare mask 0x%llx",
0090 parm.token_addr, parm.select_mask, parm.compare_mask);
0091 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
0092
0093
0094
0095
0096
0097 vcpu->run->s.regs.gprs[ry] = 8;
0098 return 0;
0099 }
0100
0101 if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
0102 parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
0103 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0104
0105 if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
0106 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
0107
0108 vcpu->arch.pfault_token = parm.token_addr;
0109 vcpu->arch.pfault_select = parm.select_mask;
0110 vcpu->arch.pfault_compare = parm.compare_mask;
0111 vcpu->run->s.regs.gprs[ry] = 0;
0112 rc = 0;
0113 break;
0114 case 1:
0115
0116
0117
0118
0119
0120 VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
0121 if (parm.token_addr || parm.select_mask ||
0122 parm.compare_mask || parm.zarch)
0123 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0124
0125 vcpu->run->s.regs.gprs[ry] = 0;
0126
0127
0128
0129
0130 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
0131 vcpu->run->s.regs.gprs[ry] = 4;
0132 else
0133 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
0134
0135 rc = 0;
0136 break;
0137 default:
0138 rc = -EOPNOTSUPP;
0139 break;
0140 }
0141
0142 return rc;
0143 }
0144
0145 static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
0146 {
0147 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
0148 vcpu->stat.instruction_diagnose_44++;
0149 kvm_vcpu_on_spin(vcpu, true);
0150 return 0;
0151 }
0152
0153 static int forward_cnt;
0154 static unsigned long cur_slice;
0155
0156 static int diag9c_forwarding_overrun(void)
0157 {
0158
0159 if (time_after(jiffies, cur_slice)) {
0160 cur_slice = jiffies;
0161 forward_cnt = diag9c_forwarding_hz / HZ;
0162 }
0163 return forward_cnt-- <= 0 ? 1 : 0;
0164 }
0165
0166 static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
0167 {
0168 struct kvm_vcpu *tcpu;
0169 int tid;
0170
0171 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
0172 vcpu->stat.instruction_diagnose_9c++;
0173
0174
0175 if (tid == vcpu->vcpu_id)
0176 goto no_yield;
0177
0178
0179 tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
0180 if (!tcpu)
0181 goto no_yield;
0182
0183
0184 if (READ_ONCE(tcpu->cpu) >= 0) {
0185 if (!diag9c_forwarding_hz || diag9c_forwarding_overrun())
0186 goto no_yield;
0187
0188
0189 if (!vcpu_is_preempted(tcpu->cpu))
0190 goto no_yield;
0191 smp_yield_cpu(tcpu->cpu);
0192 VCPU_EVENT(vcpu, 5,
0193 "diag time slice end directed to %d: yield forwarded",
0194 tid);
0195 vcpu->stat.diag_9c_forward++;
0196 return 0;
0197 }
0198
0199 if (kvm_vcpu_yield_to(tcpu) <= 0)
0200 goto no_yield;
0201
0202 VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
0203 return 0;
0204 no_yield:
0205 VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
0206 vcpu->stat.diag_9c_ignored++;
0207 return 0;
0208 }
0209
0210 static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
0211 {
0212 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
0213 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
0214
0215 VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
0216 vcpu->stat.instruction_diagnose_308++;
0217 switch (subcode) {
0218 case 3:
0219 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
0220 break;
0221 case 4:
0222 vcpu->run->s390_reset_flags = 0;
0223 break;
0224 default:
0225 return -EOPNOTSUPP;
0226 }
0227
0228
0229
0230
0231
0232 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
0233 kvm_s390_vcpu_stop(vcpu);
0234 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
0235 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
0236 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
0237 vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
0238 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
0239 vcpu->run->s390_reset_flags);
0240 trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
0241 return -EREMOTE;
0242 }
0243
0244 static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
0245 {
0246 int ret;
0247
0248 vcpu->stat.instruction_diagnose_500++;
0249
0250 if (!vcpu->kvm->arch.css_support ||
0251 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
0252 return -EOPNOTSUPP;
0253
0254 VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
0255 (u32) vcpu->run->s.regs.gprs[2],
0256 (u32) vcpu->run->s.regs.gprs[3],
0257 vcpu->run->s.regs.gprs[4]);
0258
0259
0260
0261
0262
0263
0264
0265 ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
0266 vcpu->run->s.regs.gprs[2] & 0xffffffff,
0267 8, &vcpu->run->s.regs.gprs[3],
0268 vcpu->run->s.regs.gprs[4]);
0269
0270
0271
0272
0273
0274 if (ret != -EOPNOTSUPP)
0275 vcpu->run->s.regs.gprs[2] = ret;
0276
0277 return ret < 0 ? ret : 0;
0278 }
0279
0280 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
0281 {
0282 int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
0283
0284 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
0285 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
0286
0287 trace_kvm_s390_handle_diag(vcpu, code);
0288 switch (code) {
0289 case 0x10:
0290 return diag_release_pages(vcpu);
0291 case 0x44:
0292 return __diag_time_slice_end(vcpu);
0293 case 0x9c:
0294 return __diag_time_slice_end_directed(vcpu);
0295 case 0x258:
0296 return __diag_page_ref_service(vcpu);
0297 case 0x308:
0298 return __diag_ipl_functions(vcpu);
0299 case 0x500:
0300 return __diag_virtio_hypercall(vcpu);
0301 default:
0302 vcpu->stat.instruction_diagnose_other++;
0303 return -EOPNOTSUPP;
0304 }
0305 }