0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/vfio.h>
0013
0014 #include <asm/isc.h>
0015
0016 #include "ioasm.h"
0017 #include "vfio_ccw_private.h"
0018
0019 static int fsm_io_helper(struct vfio_ccw_private *private)
0020 {
0021 struct subchannel *sch;
0022 union orb *orb;
0023 int ccode;
0024 __u8 lpm;
0025 unsigned long flags;
0026 int ret;
0027
0028 sch = private->sch;
0029
0030 spin_lock_irqsave(sch->lock, flags);
0031
0032 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
0033 if (!orb) {
0034 ret = -EIO;
0035 goto out;
0036 }
0037
0038 VFIO_CCW_TRACE_EVENT(5, "stIO");
0039 VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
0040
0041
0042 ccode = ssch(sch->schid, orb);
0043
0044 VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
0045
0046 switch (ccode) {
0047 case 0:
0048
0049
0050
0051 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
0052 ret = 0;
0053 private->state = VFIO_CCW_STATE_CP_PENDING;
0054 break;
0055 case 1:
0056 case 2:
0057 ret = -EBUSY;
0058 break;
0059 case 3:
0060 {
0061 lpm = orb->cmd.lpm;
0062 if (lpm != 0)
0063 sch->lpm &= ~lpm;
0064 else
0065 sch->lpm = 0;
0066
0067 if (cio_update_schib(sch))
0068 ret = -ENODEV;
0069 else
0070 ret = sch->lpm ? -EACCES : -ENODEV;
0071 break;
0072 }
0073 default:
0074 ret = ccode;
0075 }
0076 out:
0077 spin_unlock_irqrestore(sch->lock, flags);
0078 return ret;
0079 }
0080
0081 static int fsm_do_halt(struct vfio_ccw_private *private)
0082 {
0083 struct subchannel *sch;
0084 unsigned long flags;
0085 int ccode;
0086 int ret;
0087
0088 sch = private->sch;
0089
0090 spin_lock_irqsave(sch->lock, flags);
0091
0092 VFIO_CCW_TRACE_EVENT(2, "haltIO");
0093 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
0094
0095
0096 ccode = hsch(sch->schid);
0097
0098 VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
0099
0100 switch (ccode) {
0101 case 0:
0102
0103
0104
0105 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
0106 ret = 0;
0107 break;
0108 case 1:
0109 case 2:
0110 ret = -EBUSY;
0111 break;
0112 case 3:
0113 ret = -ENODEV;
0114 break;
0115 default:
0116 ret = ccode;
0117 }
0118 spin_unlock_irqrestore(sch->lock, flags);
0119 return ret;
0120 }
0121
0122 static int fsm_do_clear(struct vfio_ccw_private *private)
0123 {
0124 struct subchannel *sch;
0125 unsigned long flags;
0126 int ccode;
0127 int ret;
0128
0129 sch = private->sch;
0130
0131 spin_lock_irqsave(sch->lock, flags);
0132
0133 VFIO_CCW_TRACE_EVENT(2, "clearIO");
0134 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
0135
0136
0137 ccode = csch(sch->schid);
0138
0139 VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
0140
0141 switch (ccode) {
0142 case 0:
0143
0144
0145
0146 sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
0147
0148 ret = 0;
0149 break;
0150 case 3:
0151 ret = -ENODEV;
0152 break;
0153 default:
0154 ret = ccode;
0155 }
0156 spin_unlock_irqrestore(sch->lock, flags);
0157 return ret;
0158 }
0159
0160 static void fsm_notoper(struct vfio_ccw_private *private,
0161 enum vfio_ccw_event event)
0162 {
0163 struct subchannel *sch = private->sch;
0164
0165 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
0166 sch->schid.cssid,
0167 sch->schid.ssid,
0168 sch->schid.sch_no,
0169 event,
0170 private->state);
0171
0172
0173
0174
0175
0176 css_sched_sch_todo(sch, SCH_TODO_UNREG);
0177 private->state = VFIO_CCW_STATE_NOT_OPER;
0178
0179
0180 cp_free(&private->cp);
0181 }
0182
0183
0184
0185
0186 static void fsm_nop(struct vfio_ccw_private *private,
0187 enum vfio_ccw_event event)
0188 {
0189 }
0190
0191 static void fsm_io_error(struct vfio_ccw_private *private,
0192 enum vfio_ccw_event event)
0193 {
0194 pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
0195 private->io_region->ret_code = -EIO;
0196 }
0197
0198 static void fsm_io_busy(struct vfio_ccw_private *private,
0199 enum vfio_ccw_event event)
0200 {
0201 private->io_region->ret_code = -EBUSY;
0202 }
0203
0204 static void fsm_io_retry(struct vfio_ccw_private *private,
0205 enum vfio_ccw_event event)
0206 {
0207 private->io_region->ret_code = -EAGAIN;
0208 }
0209
0210 static void fsm_async_error(struct vfio_ccw_private *private,
0211 enum vfio_ccw_event event)
0212 {
0213 struct ccw_cmd_region *cmd_region = private->cmd_region;
0214
0215 pr_err("vfio-ccw: FSM: %s request from state:%d\n",
0216 cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
0217 cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
0218 "<unknown>", private->state);
0219 cmd_region->ret_code = -EIO;
0220 }
0221
0222 static void fsm_async_retry(struct vfio_ccw_private *private,
0223 enum vfio_ccw_event event)
0224 {
0225 private->cmd_region->ret_code = -EAGAIN;
0226 }
0227
0228 static void fsm_disabled_irq(struct vfio_ccw_private *private,
0229 enum vfio_ccw_event event)
0230 {
0231 struct subchannel *sch = private->sch;
0232
0233
0234
0235
0236
0237 cio_disable_subchannel(sch);
0238 }
0239 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
0240 {
0241 return p->sch->schid;
0242 }
0243
0244
0245
0246
0247 static void fsm_io_request(struct vfio_ccw_private *private,
0248 enum vfio_ccw_event event)
0249 {
0250 union orb *orb;
0251 union scsw *scsw = &private->scsw;
0252 struct ccw_io_region *io_region = private->io_region;
0253 char *errstr = "request";
0254 struct subchannel_id schid = get_schid(private);
0255
0256 private->state = VFIO_CCW_STATE_CP_PROCESSING;
0257 memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
0258
0259 if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
0260 orb = (union orb *)io_region->orb_area;
0261
0262
0263 if (orb->tm.b) {
0264 io_region->ret_code = -EOPNOTSUPP;
0265 VFIO_CCW_MSG_EVENT(2,
0266 "sch %x.%x.%04x: transport mode\n",
0267 schid.cssid,
0268 schid.ssid, schid.sch_no);
0269 errstr = "transport mode";
0270 goto err_out;
0271 }
0272 io_region->ret_code = cp_init(&private->cp, orb);
0273 if (io_region->ret_code) {
0274 VFIO_CCW_MSG_EVENT(2,
0275 "sch %x.%x.%04x: cp_init=%d\n",
0276 schid.cssid,
0277 schid.ssid, schid.sch_no,
0278 io_region->ret_code);
0279 errstr = "cp init";
0280 goto err_out;
0281 }
0282
0283 io_region->ret_code = cp_prefetch(&private->cp);
0284 if (io_region->ret_code) {
0285 VFIO_CCW_MSG_EVENT(2,
0286 "sch %x.%x.%04x: cp_prefetch=%d\n",
0287 schid.cssid,
0288 schid.ssid, schid.sch_no,
0289 io_region->ret_code);
0290 errstr = "cp prefetch";
0291 cp_free(&private->cp);
0292 goto err_out;
0293 }
0294
0295
0296 io_region->ret_code = fsm_io_helper(private);
0297 if (io_region->ret_code) {
0298 VFIO_CCW_MSG_EVENT(2,
0299 "sch %x.%x.%04x: fsm_io_helper=%d\n",
0300 schid.cssid,
0301 schid.ssid, schid.sch_no,
0302 io_region->ret_code);
0303 errstr = "cp fsm_io_helper";
0304 cp_free(&private->cp);
0305 goto err_out;
0306 }
0307 return;
0308 } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
0309 VFIO_CCW_MSG_EVENT(2,
0310 "sch %x.%x.%04x: halt on io_region\n",
0311 schid.cssid,
0312 schid.ssid, schid.sch_no);
0313
0314 io_region->ret_code = -EOPNOTSUPP;
0315 goto err_out;
0316 } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
0317 VFIO_CCW_MSG_EVENT(2,
0318 "sch %x.%x.%04x: clear on io_region\n",
0319 schid.cssid,
0320 schid.ssid, schid.sch_no);
0321
0322 io_region->ret_code = -EOPNOTSUPP;
0323 goto err_out;
0324 }
0325
0326 err_out:
0327 private->state = VFIO_CCW_STATE_IDLE;
0328 trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
0329 io_region->ret_code, errstr);
0330 }
0331
0332
0333
0334
0335 static void fsm_async_request(struct vfio_ccw_private *private,
0336 enum vfio_ccw_event event)
0337 {
0338 struct ccw_cmd_region *cmd_region = private->cmd_region;
0339
0340 switch (cmd_region->command) {
0341 case VFIO_CCW_ASYNC_CMD_HSCH:
0342 cmd_region->ret_code = fsm_do_halt(private);
0343 break;
0344 case VFIO_CCW_ASYNC_CMD_CSCH:
0345 cmd_region->ret_code = fsm_do_clear(private);
0346 break;
0347 default:
0348
0349 cmd_region->ret_code = -EINVAL;
0350 }
0351
0352 trace_vfio_ccw_fsm_async_request(get_schid(private),
0353 cmd_region->command,
0354 cmd_region->ret_code);
0355 }
0356
0357
0358
0359
0360 static void fsm_irq(struct vfio_ccw_private *private,
0361 enum vfio_ccw_event event)
0362 {
0363 struct irb *irb = this_cpu_ptr(&cio_irb);
0364
0365 VFIO_CCW_TRACE_EVENT(6, "IRQ");
0366 VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
0367
0368 memcpy(&private->irb, irb, sizeof(*irb));
0369
0370 queue_work(vfio_ccw_work_q, &private->io_work);
0371
0372 if (private->completion)
0373 complete(private->completion);
0374 }
0375
0376 static void fsm_open(struct vfio_ccw_private *private,
0377 enum vfio_ccw_event event)
0378 {
0379 struct subchannel *sch = private->sch;
0380 int ret;
0381
0382 spin_lock_irq(sch->lock);
0383 sch->isc = VFIO_CCW_ISC;
0384 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
0385 if (ret)
0386 goto err_unlock;
0387
0388 private->state = VFIO_CCW_STATE_IDLE;
0389 spin_unlock_irq(sch->lock);
0390 return;
0391
0392 err_unlock:
0393 spin_unlock_irq(sch->lock);
0394 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
0395 }
0396
0397 static void fsm_close(struct vfio_ccw_private *private,
0398 enum vfio_ccw_event event)
0399 {
0400 struct subchannel *sch = private->sch;
0401 int ret;
0402
0403 spin_lock_irq(sch->lock);
0404
0405 if (!sch->schib.pmcw.ena)
0406 goto err_unlock;
0407
0408 ret = cio_disable_subchannel(sch);
0409 if (ret == -EBUSY)
0410 ret = vfio_ccw_sch_quiesce(sch);
0411 if (ret)
0412 goto err_unlock;
0413
0414 private->state = VFIO_CCW_STATE_STANDBY;
0415 spin_unlock_irq(sch->lock);
0416 cp_free(&private->cp);
0417 return;
0418
0419 err_unlock:
0420 spin_unlock_irq(sch->lock);
0421 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
0422 }
0423
0424
0425
0426
0427 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
0428 [VFIO_CCW_STATE_NOT_OPER] = {
0429 [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
0430 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
0431 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
0432 [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
0433 [VFIO_CCW_EVENT_OPEN] = fsm_nop,
0434 [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
0435 },
0436 [VFIO_CCW_STATE_STANDBY] = {
0437 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
0438 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
0439 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
0440 [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
0441 [VFIO_CCW_EVENT_OPEN] = fsm_open,
0442 [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
0443 },
0444 [VFIO_CCW_STATE_IDLE] = {
0445 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
0446 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
0447 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
0448 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
0449 [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
0450 [VFIO_CCW_EVENT_CLOSE] = fsm_close,
0451 },
0452 [VFIO_CCW_STATE_CP_PROCESSING] = {
0453 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
0454 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
0455 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
0456 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
0457 [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
0458 [VFIO_CCW_EVENT_CLOSE] = fsm_close,
0459 },
0460 [VFIO_CCW_STATE_CP_PENDING] = {
0461 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
0462 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
0463 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
0464 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
0465 [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
0466 [VFIO_CCW_EVENT_CLOSE] = fsm_close,
0467 },
0468 };