0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011 #include <linux/init.h>
0012 #include <linux/jiffies.h>
0013 #include <linux/string.h>
0014
0015 #include <asm/ccwdev.h>
0016 #include <asm/cio.h>
0017 #include <asm/chpid.h>
0018
0019 #include "cio.h"
0020 #include "cio_debug.h"
0021 #include "css.h"
0022 #include "device.h"
0023 #include "chsc.h"
0024 #include "ioasm.h"
0025 #include "chp.h"
0026
0027 static int timeout_log_enabled;
0028
0029 static int __init ccw_timeout_log_setup(char *unused)
0030 {
0031 timeout_log_enabled = 1;
0032 return 1;
0033 }
0034
0035 __setup("ccw_timeout_log", ccw_timeout_log_setup);
0036
0037 static void ccw_timeout_log(struct ccw_device *cdev)
0038 {
0039 struct schib schib;
0040 struct subchannel *sch;
0041 struct io_subchannel_private *private;
0042 union orb *orb;
0043 int cc;
0044
0045 sch = to_subchannel(cdev->dev.parent);
0046 private = to_io_private(sch);
0047 orb = &private->orb;
0048 cc = stsch(sch->schid, &schib);
0049
0050 printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, "
0051 "device information:\n", get_tod_clock());
0052 printk(KERN_WARNING "cio: orb:\n");
0053 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
0054 orb, sizeof(*orb), 0);
0055 printk(KERN_WARNING "cio: ccw device bus id: %s\n",
0056 dev_name(&cdev->dev));
0057 printk(KERN_WARNING "cio: subchannel bus id: %s\n",
0058 dev_name(&sch->dev));
0059 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
0060 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
0061
0062 if (orb->tm.b) {
0063 printk(KERN_WARNING "cio: orb indicates transport mode\n");
0064 printk(KERN_WARNING "cio: last tcw:\n");
0065 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
0066 (void *)(addr_t)orb->tm.tcw,
0067 sizeof(struct tcw), 0);
0068 } else {
0069 printk(KERN_WARNING "cio: orb indicates command mode\n");
0070 if ((void *)(addr_t)orb->cmd.cpa ==
0071 &private->dma_area->sense_ccw ||
0072 (void *)(addr_t)orb->cmd.cpa ==
0073 cdev->private->dma_area->iccws)
0074 printk(KERN_WARNING "cio: last channel program "
0075 "(intern):\n");
0076 else
0077 printk(KERN_WARNING "cio: last channel program:\n");
0078
0079 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
0080 (void *)(addr_t)orb->cmd.cpa,
0081 sizeof(struct ccw1), 0);
0082 }
0083 printk(KERN_WARNING "cio: ccw device state: %d\n",
0084 cdev->private->state);
0085 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
0086 printk(KERN_WARNING "cio: schib:\n");
0087 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
0088 &schib, sizeof(schib), 0);
0089 printk(KERN_WARNING "cio: ccw device flags:\n");
0090 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
0091 &cdev->private->flags, sizeof(cdev->private->flags), 0);
0092 }
0093
0094
0095
0096
0097 void
0098 ccw_device_timeout(struct timer_list *t)
0099 {
0100 struct ccw_device_private *priv = from_timer(priv, t, timer);
0101 struct ccw_device *cdev = priv->cdev;
0102
0103 spin_lock_irq(cdev->ccwlock);
0104 if (timeout_log_enabled)
0105 ccw_timeout_log(cdev);
0106 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
0107 spin_unlock_irq(cdev->ccwlock);
0108 }
0109
0110
0111
0112
0113 void
0114 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
0115 {
0116 if (expires == 0)
0117 del_timer(&cdev->private->timer);
0118 else
0119 mod_timer(&cdev->private->timer, jiffies + expires);
0120 }
0121
0122 int
0123 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
0124 {
0125 struct subchannel *sch;
0126 int ret;
0127
0128 sch = to_subchannel(cdev->dev.parent);
0129 ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
0130
0131 if (ret == -EIO)
0132 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
0133 cdev->private->dev_id.ssid,
0134 cdev->private->dev_id.devno);
0135
0136 return ret;
0137 }
0138
0139 void ccw_device_update_sense_data(struct ccw_device *cdev)
0140 {
0141 memset(&cdev->id, 0, sizeof(cdev->id));
0142 cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
0143 cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
0144 cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
0145 cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
0146 }
0147
0148 int ccw_device_test_sense_data(struct ccw_device *cdev)
0149 {
0150 return cdev->id.cu_type ==
0151 cdev->private->dma_area->senseid.cu_type &&
0152 cdev->id.cu_model ==
0153 cdev->private->dma_area->senseid.cu_model &&
0154 cdev->id.dev_type ==
0155 cdev->private->dma_area->senseid.dev_type &&
0156 cdev->id.dev_model ==
0157 cdev->private->dma_area->senseid.dev_model;
0158 }
0159
0160
0161
0162
0163
0164
0165 static void
0166 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
0167 {
0168 int mask, i;
0169 struct chp_id chpid;
0170
0171 chp_id_init(&chpid);
0172 for (i = 0; i<8; i++) {
0173 mask = 0x80 >> i;
0174 if (!(sch->lpm & mask))
0175 continue;
0176 if (old_lpm & mask)
0177 continue;
0178 chpid.id = sch->schib.pmcw.chpid[i];
0179 if (!chp_is_registered(chpid))
0180 css_schedule_eval_all();
0181 }
0182 }
0183
0184
0185
0186
0187 static void
0188 ccw_device_recog_done(struct ccw_device *cdev, int state)
0189 {
0190 struct subchannel *sch;
0191 int old_lpm;
0192
0193 sch = to_subchannel(cdev->dev.parent);
0194
0195 if (cio_disable_subchannel(sch))
0196 state = DEV_STATE_NOT_OPER;
0197
0198
0199
0200
0201 old_lpm = sch->lpm;
0202
0203
0204 if (cio_update_schib(sch))
0205 state = DEV_STATE_NOT_OPER;
0206 else
0207 sch->lpm = sch->schib.pmcw.pam & sch->opm;
0208
0209 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
0210
0211 old_lpm = 0;
0212 if (sch->lpm != old_lpm)
0213 __recover_lost_chpids(sch, old_lpm);
0214 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
0215 (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
0216 cdev->private->flags.recog_done = 1;
0217 cdev->private->state = DEV_STATE_DISCONNECTED;
0218 wake_up(&cdev->private->wait_q);
0219 return;
0220 }
0221 switch (state) {
0222 case DEV_STATE_NOT_OPER:
0223 break;
0224 case DEV_STATE_OFFLINE:
0225 if (!cdev->online) {
0226 ccw_device_update_sense_data(cdev);
0227 break;
0228 }
0229 cdev->private->state = DEV_STATE_OFFLINE;
0230 cdev->private->flags.recog_done = 1;
0231 if (ccw_device_test_sense_data(cdev)) {
0232 cdev->private->flags.donotify = 1;
0233 ccw_device_online(cdev);
0234 wake_up(&cdev->private->wait_q);
0235 } else {
0236 ccw_device_update_sense_data(cdev);
0237 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
0238 }
0239 return;
0240 case DEV_STATE_BOXED:
0241 if (cdev->id.cu_type != 0) {
0242 cdev->private->flags.recog_done = 1;
0243 cdev->private->state = DEV_STATE_BOXED;
0244 wake_up(&cdev->private->wait_q);
0245 return;
0246 }
0247 break;
0248 }
0249 cdev->private->state = state;
0250 io_subchannel_recog_done(cdev);
0251 wake_up(&cdev->private->wait_q);
0252 }
0253
0254
0255
0256
0257 void
0258 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
0259 {
0260 switch (err) {
0261 case 0:
0262 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
0263 break;
0264 case -ETIME:
0265 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
0266 break;
0267 default:
0268 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
0269 break;
0270 }
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284 int ccw_device_notify(struct ccw_device *cdev, int event)
0285 {
0286 int ret = -EINVAL;
0287
0288 if (!cdev->drv)
0289 goto out;
0290 if (!cdev->online)
0291 goto out;
0292 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
0293 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
0294 event);
0295 if (!cdev->drv->notify) {
0296 ret = -EOPNOTSUPP;
0297 goto out;
0298 }
0299 if (cdev->drv->notify(cdev, event))
0300 ret = NOTIFY_OK;
0301 else
0302 ret = NOTIFY_BAD;
0303 out:
0304 return ret;
0305 }
0306
0307 static void ccw_device_oper_notify(struct ccw_device *cdev)
0308 {
0309 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0310
0311 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
0312
0313 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
0314
0315 cdev->private->path_new_mask = sch->vpm;
0316 return;
0317 }
0318
0319 ccw_device_set_notoper(cdev);
0320 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
0321 }
0322
0323
0324
0325
0326 static void
0327 ccw_device_done(struct ccw_device *cdev, int state)
0328 {
0329 struct subchannel *sch;
0330
0331 sch = to_subchannel(cdev->dev.parent);
0332
0333 ccw_device_set_timeout(cdev, 0);
0334
0335 if (state != DEV_STATE_ONLINE)
0336 cio_disable_subchannel(sch);
0337
0338
0339 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
0340
0341 cdev->private->state = state;
0342
0343 switch (state) {
0344 case DEV_STATE_BOXED:
0345 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
0346 cdev->private->dev_id.devno, sch->schid.sch_no);
0347 if (cdev->online &&
0348 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
0349 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
0350 cdev->private->flags.donotify = 0;
0351 break;
0352 case DEV_STATE_NOT_OPER:
0353 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
0354 cdev->private->dev_id.devno, sch->schid.sch_no);
0355 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
0356 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
0357 else
0358 ccw_device_set_disconnected(cdev);
0359 cdev->private->flags.donotify = 0;
0360 break;
0361 case DEV_STATE_DISCONNECTED:
0362 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
0363 "%04x\n", cdev->private->dev_id.devno,
0364 sch->schid.sch_no);
0365 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
0366 cdev->private->state = DEV_STATE_NOT_OPER;
0367 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
0368 } else
0369 ccw_device_set_disconnected(cdev);
0370 cdev->private->flags.donotify = 0;
0371 break;
0372 default:
0373 break;
0374 }
0375
0376 if (cdev->private->flags.donotify) {
0377 cdev->private->flags.donotify = 0;
0378 ccw_device_oper_notify(cdev);
0379 }
0380 wake_up(&cdev->private->wait_q);
0381 }
0382
0383
0384
0385
0386 void ccw_device_recognition(struct ccw_device *cdev)
0387 {
0388 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398 cdev->private->flags.recog_done = 0;
0399 cdev->private->state = DEV_STATE_SENSE_ID;
0400 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
0401 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
0402 return;
0403 }
0404 ccw_device_sense_id_start(cdev);
0405 }
0406
0407
0408
0409
0410 static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
0411 {
0412 switch (e) {
0413 case DEV_EVENT_NOTOPER:
0414 ccw_request_notoper(cdev);
0415 break;
0416 case DEV_EVENT_INTERRUPT:
0417 ccw_request_handler(cdev);
0418 break;
0419 case DEV_EVENT_TIMEOUT:
0420 ccw_request_timeout(cdev);
0421 break;
0422 default:
0423 break;
0424 }
0425 }
0426
0427 static void ccw_device_report_path_events(struct ccw_device *cdev)
0428 {
0429 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0430 int path_event[8];
0431 int chp, mask;
0432
0433 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
0434 path_event[chp] = PE_NONE;
0435 if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
0436 path_event[chp] |= PE_PATH_GONE;
0437 if (mask & cdev->private->path_new_mask & sch->vpm)
0438 path_event[chp] |= PE_PATH_AVAILABLE;
0439 if (mask & cdev->private->pgid_reset_mask & sch->vpm)
0440 path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
0441 }
0442 if (cdev->online && cdev->drv->path_event)
0443 cdev->drv->path_event(cdev, path_event);
0444 }
0445
0446 static void ccw_device_reset_path_events(struct ccw_device *cdev)
0447 {
0448 cdev->private->path_gone_mask = 0;
0449 cdev->private->path_new_mask = 0;
0450 cdev->private->pgid_reset_mask = 0;
0451 }
0452
0453 static void create_fake_irb(struct irb *irb, int type)
0454 {
0455 memset(irb, 0, sizeof(*irb));
0456 if (type == FAKE_CMD_IRB) {
0457 struct cmd_scsw *scsw = &irb->scsw.cmd;
0458 scsw->cc = 1;
0459 scsw->fctl = SCSW_FCTL_START_FUNC;
0460 scsw->actl = SCSW_ACTL_START_PEND;
0461 scsw->stctl = SCSW_STCTL_STATUS_PEND;
0462 } else if (type == FAKE_TM_IRB) {
0463 struct tm_scsw *scsw = &irb->scsw.tm;
0464 scsw->x = 1;
0465 scsw->cc = 1;
0466 scsw->fctl = SCSW_FCTL_START_FUNC;
0467 scsw->actl = SCSW_ACTL_START_PEND;
0468 scsw->stctl = SCSW_STCTL_STATUS_PEND;
0469 }
0470 }
0471
0472 static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
0473 {
0474 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0475 u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
0476
0477 if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
0478 ccw_device_schedule_recovery();
0479
0480 cdev->private->path_broken_mask = broken_paths;
0481 }
0482
0483 void ccw_device_verify_done(struct ccw_device *cdev, int err)
0484 {
0485 struct subchannel *sch;
0486
0487 sch = to_subchannel(cdev->dev.parent);
0488
0489 if (cio_update_schib(sch)) {
0490 err = -ENODEV;
0491 goto callback;
0492 }
0493
0494 sch->lpm = sch->vpm;
0495
0496 if (cdev->private->flags.doverify) {
0497 ccw_device_verify_start(cdev);
0498 return;
0499 }
0500 callback:
0501 switch (err) {
0502 case 0:
0503 ccw_device_done(cdev, DEV_STATE_ONLINE);
0504
0505 if (cdev->private->flags.fake_irb) {
0506 create_fake_irb(&cdev->private->dma_area->irb,
0507 cdev->private->flags.fake_irb);
0508 cdev->private->flags.fake_irb = 0;
0509 if (cdev->handler)
0510 cdev->handler(cdev, cdev->private->intparm,
0511 &cdev->private->dma_area->irb);
0512 memset(&cdev->private->dma_area->irb, 0,
0513 sizeof(struct irb));
0514 }
0515 ccw_device_report_path_events(cdev);
0516 ccw_device_handle_broken_paths(cdev);
0517 break;
0518 case -ETIME:
0519 case -EUSERS:
0520
0521 cdev->private->flags.donotify = 0;
0522 ccw_device_done(cdev, DEV_STATE_BOXED);
0523 break;
0524 case -EACCES:
0525
0526 cdev->private->flags.donotify = 0;
0527 ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
0528 break;
0529 default:
0530
0531 cdev->private->flags.donotify = 0;
0532 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
0533 break;
0534 }
0535 ccw_device_reset_path_events(cdev);
0536 }
0537
0538
0539
0540
0541 int
0542 ccw_device_online(struct ccw_device *cdev)
0543 {
0544 struct subchannel *sch;
0545 int ret;
0546
0547 if ((cdev->private->state != DEV_STATE_OFFLINE) &&
0548 (cdev->private->state != DEV_STATE_BOXED))
0549 return -EINVAL;
0550 sch = to_subchannel(cdev->dev.parent);
0551 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
0552 if (ret != 0) {
0553
0554 if (ret == -ENODEV)
0555 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
0556 return ret;
0557 }
0558
0559 cdev->private->state = DEV_STATE_VERIFY;
0560 ccw_device_verify_start(cdev);
0561 return 0;
0562 }
0563
0564 void
0565 ccw_device_disband_done(struct ccw_device *cdev, int err)
0566 {
0567 switch (err) {
0568 case 0:
0569 ccw_device_done(cdev, DEV_STATE_OFFLINE);
0570 break;
0571 case -ETIME:
0572 ccw_device_done(cdev, DEV_STATE_BOXED);
0573 break;
0574 default:
0575 cdev->private->flags.donotify = 0;
0576 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
0577 break;
0578 }
0579 }
0580
0581
0582
0583
0584 int
0585 ccw_device_offline(struct ccw_device *cdev)
0586 {
0587 struct subchannel *sch;
0588
0589
0590 if (cdev->private->state == DEV_STATE_DISCONNECTED ||
0591 cdev->private->state == DEV_STATE_NOT_OPER) {
0592 cdev->private->flags.donotify = 0;
0593 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
0594 return 0;
0595 }
0596 if (cdev->private->state == DEV_STATE_BOXED) {
0597 ccw_device_done(cdev, DEV_STATE_BOXED);
0598 return 0;
0599 }
0600 if (ccw_device_is_orphan(cdev)) {
0601 ccw_device_done(cdev, DEV_STATE_OFFLINE);
0602 return 0;
0603 }
0604 sch = to_subchannel(cdev->dev.parent);
0605 if (cio_update_schib(sch))
0606 return -ENODEV;
0607 if (scsw_actl(&sch->schib.scsw) != 0)
0608 return -EBUSY;
0609 if (cdev->private->state != DEV_STATE_ONLINE)
0610 return -EINVAL;
0611
0612 if (!cdev->private->flags.pgroup) {
0613
0614 ccw_device_done(cdev, DEV_STATE_OFFLINE);
0615 return 0;
0616 }
0617
0618 cdev->private->state = DEV_STATE_DISBAND_PGID;
0619 ccw_device_disband_start(cdev);
0620 return 0;
0621 }
0622
0623
0624
0625
0626 static void ccw_device_generic_notoper(struct ccw_device *cdev,
0627 enum dev_event dev_event)
0628 {
0629 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
0630 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
0631 else
0632 ccw_device_set_disconnected(cdev);
0633 }
0634
0635
0636
0637
0638 static void ccw_device_offline_verify(struct ccw_device *cdev,
0639 enum dev_event dev_event)
0640 {
0641 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0642
0643 css_schedule_eval(sch->schid);
0644 }
0645
0646
0647
0648
0649 static void
0650 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
0651 {
0652 struct subchannel *sch;
0653
0654 if (cdev->private->state == DEV_STATE_W4SENSE) {
0655 cdev->private->flags.doverify = 1;
0656 return;
0657 }
0658 sch = to_subchannel(cdev->dev.parent);
0659
0660
0661
0662
0663 if (cio_update_schib(sch)) {
0664 ccw_device_verify_done(cdev, -ENODEV);
0665 return;
0666 }
0667
0668 if (scsw_actl(&sch->schib.scsw) != 0 ||
0669 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
0670 (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
0671 SCSW_STCTL_STATUS_PEND)) {
0672
0673
0674
0675
0676
0677 cdev->private->flags.doverify = 1;
0678 return;
0679 }
0680
0681 cdev->private->state = DEV_STATE_VERIFY;
0682 ccw_device_verify_start(cdev);
0683 }
0684
0685
0686
0687
0688 static void ccw_device_boxed_verify(struct ccw_device *cdev,
0689 enum dev_event dev_event)
0690 {
0691 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0692
0693 if (cdev->online) {
0694 if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
0695 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
0696 else
0697 ccw_device_online_verify(cdev, dev_event);
0698 } else
0699 css_schedule_eval(sch->schid);
0700 }
0701
0702
0703
0704
0705 static int ccw_device_call_handler(struct ccw_device *cdev)
0706 {
0707 unsigned int stctl;
0708 int ending_status;
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718 stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
0719 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
0720 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
0721 (stctl == SCSW_STCTL_STATUS_PEND);
0722 if (!ending_status &&
0723 !cdev->private->options.repall &&
0724 !(stctl & SCSW_STCTL_INTER_STATUS) &&
0725 !(cdev->private->options.fast &&
0726 (stctl & SCSW_STCTL_PRIM_STATUS)))
0727 return 0;
0728
0729 if (ending_status)
0730 ccw_device_set_timeout(cdev, 0);
0731
0732 if (cdev->handler)
0733 cdev->handler(cdev, cdev->private->intparm,
0734 &cdev->private->dma_area->irb);
0735
0736 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
0737 return 1;
0738 }
0739
0740
0741
0742
0743 static void
0744 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
0745 {
0746 struct irb *irb;
0747 int is_cmd;
0748
0749 irb = this_cpu_ptr(&cio_irb);
0750 is_cmd = !scsw_is_tm(&irb->scsw);
0751
0752 if (!scsw_is_solicited(&irb->scsw)) {
0753 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
0754 !irb->esw.esw0.erw.cons) {
0755
0756 if (ccw_device_do_sense(cdev, irb) != 0)
0757 goto call_handler_unsol;
0758 memcpy(&cdev->private->dma_area->irb, irb,
0759 sizeof(struct irb));
0760 cdev->private->state = DEV_STATE_W4SENSE;
0761 cdev->private->intparm = 0;
0762 return;
0763 }
0764 call_handler_unsol:
0765 if (cdev->handler)
0766 cdev->handler (cdev, 0, irb);
0767 if (cdev->private->flags.doverify)
0768 ccw_device_online_verify(cdev, 0);
0769 return;
0770 }
0771
0772 ccw_device_accumulate_irb(cdev, irb);
0773 if (is_cmd && cdev->private->flags.dosense) {
0774 if (ccw_device_do_sense(cdev, irb) == 0) {
0775 cdev->private->state = DEV_STATE_W4SENSE;
0776 }
0777 return;
0778 }
0779
0780 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
0781
0782 ccw_device_online_verify(cdev, 0);
0783 }
0784
0785
0786
0787
0788 static void
0789 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
0790 {
0791 int ret;
0792
0793 ccw_device_set_timeout(cdev, 0);
0794 cdev->private->iretry = 255;
0795 cdev->private->async_kill_io_rc = -ETIMEDOUT;
0796 ret = ccw_device_cancel_halt_clear(cdev);
0797 if (ret == -EBUSY) {
0798 ccw_device_set_timeout(cdev, 3*HZ);
0799 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
0800 return;
0801 }
0802 if (ret)
0803 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
0804 else if (cdev->handler)
0805 cdev->handler(cdev, cdev->private->intparm,
0806 ERR_PTR(-ETIMEDOUT));
0807 }
0808
0809
0810
0811
0812 static void
0813 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
0814 {
0815 struct irb *irb;
0816
0817 irb = this_cpu_ptr(&cio_irb);
0818
0819 if (scsw_stctl(&irb->scsw) ==
0820 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
0821 if (scsw_cc(&irb->scsw) == 1)
0822
0823 ccw_device_do_sense(cdev, irb);
0824 else {
0825 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
0826 "interrupt during w4sense...\n",
0827 cdev->private->dev_id.ssid,
0828 cdev->private->dev_id.devno);
0829 if (cdev->handler)
0830 cdev->handler (cdev, 0, irb);
0831 }
0832 return;
0833 }
0834
0835
0836
0837
0838
0839 if (scsw_fctl(&irb->scsw) &
0840 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
0841 cdev->private->flags.dosense = 0;
0842 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
0843 ccw_device_accumulate_irb(cdev, irb);
0844 goto call_handler;
0845 }
0846
0847 ccw_device_accumulate_basic_sense(cdev, irb);
0848 if (cdev->private->flags.dosense) {
0849
0850 ccw_device_do_sense(cdev, irb);
0851 return;
0852 }
0853 call_handler:
0854 cdev->private->state = DEV_STATE_ONLINE;
0855
0856 wake_up(&cdev->private->wait_q);
0857
0858 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
0859
0860 ccw_device_online_verify(cdev, 0);
0861 }
0862
0863 static void
0864 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
0865 {
0866 ccw_device_set_timeout(cdev, 0);
0867
0868 ccw_device_online_verify(cdev, 0);
0869
0870 if (cdev->handler)
0871 cdev->handler(cdev, cdev->private->intparm,
0872 ERR_PTR(cdev->private->async_kill_io_rc));
0873 }
0874
0875 static void
0876 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
0877 {
0878 int ret;
0879
0880 ret = ccw_device_cancel_halt_clear(cdev);
0881 if (ret == -EBUSY) {
0882 ccw_device_set_timeout(cdev, 3*HZ);
0883 return;
0884 }
0885
0886 ccw_device_online_verify(cdev, 0);
0887 if (cdev->handler)
0888 cdev->handler(cdev, cdev->private->intparm,
0889 ERR_PTR(cdev->private->async_kill_io_rc));
0890 }
0891
0892 void ccw_device_kill_io(struct ccw_device *cdev)
0893 {
0894 int ret;
0895
0896 ccw_device_set_timeout(cdev, 0);
0897 cdev->private->iretry = 255;
0898 cdev->private->async_kill_io_rc = -EIO;
0899 ret = ccw_device_cancel_halt_clear(cdev);
0900 if (ret == -EBUSY) {
0901 ccw_device_set_timeout(cdev, 3*HZ);
0902 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
0903 return;
0904 }
0905
0906 ccw_device_online_verify(cdev, 0);
0907 if (cdev->handler)
0908 cdev->handler(cdev, cdev->private->intparm,
0909 ERR_PTR(-EIO));
0910 }
0911
0912 static void
0913 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
0914 {
0915
0916 cdev->private->flags.doverify = 1;
0917 }
0918
0919 static void
0920 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
0921 {
0922 struct subchannel *sch;
0923
0924 sch = to_subchannel(cdev->dev.parent);
0925 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
0926
0927 return;
0928 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
0929 ccw_device_sense_id_start(cdev);
0930 }
0931
0932 void ccw_device_trigger_reprobe(struct ccw_device *cdev)
0933 {
0934 struct subchannel *sch;
0935
0936 if (cdev->private->state != DEV_STATE_DISCONNECTED)
0937 return;
0938
0939 sch = to_subchannel(cdev->dev.parent);
0940
0941 if (cio_update_schib(sch))
0942 return;
0943
0944
0945
0946
0947 sch->lpm = sch->schib.pmcw.pam & sch->opm;
0948
0949
0950
0951
0952 io_subchannel_init_config(sch);
0953 if (cio_commit_config(sch))
0954 return;
0955
0956
0957
0958 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
0959 css_schedule_eval(sch->schid);
0960 else
0961 ccw_device_start_id(cdev, 0);
0962 }
0963
0964 static void ccw_device_disabled_irq(struct ccw_device *cdev,
0965 enum dev_event dev_event)
0966 {
0967 struct subchannel *sch;
0968
0969 sch = to_subchannel(cdev->dev.parent);
0970
0971
0972
0973
0974 cio_disable_subchannel(sch);
0975 }
0976
0977 static void
0978 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
0979 {
0980 retry_set_schib(cdev);
0981 cdev->private->state = DEV_STATE_ONLINE;
0982 dev_fsm_event(cdev, dev_event);
0983 }
0984
0985 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
0986 enum dev_event dev_event)
0987 {
0988 cmf_retry_copy_block(cdev);
0989 cdev->private->state = DEV_STATE_ONLINE;
0990 dev_fsm_event(cdev, dev_event);
0991 }
0992
0993 static void
0994 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
0995 {
0996 ccw_device_set_timeout(cdev, 0);
0997 cdev->private->state = DEV_STATE_NOT_OPER;
0998 wake_up(&cdev->private->wait_q);
0999 }
1000
1001 static void
1002 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1003 {
1004 int ret;
1005
1006 ret = ccw_device_cancel_halt_clear(cdev);
1007 if (ret == -EBUSY) {
1008 ccw_device_set_timeout(cdev, HZ/10);
1009 } else {
1010 cdev->private->state = DEV_STATE_NOT_OPER;
1011 wake_up(&cdev->private->wait_q);
1012 }
1013 }
1014
1015
1016
1017
1018
1019 static void
1020 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1021 {
1022 }
1023
1024
1025
1026
1027 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1028 [DEV_STATE_NOT_OPER] = {
1029 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1030 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1031 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1032 [DEV_EVENT_VERIFY] = ccw_device_nop,
1033 },
1034 [DEV_STATE_SENSE_ID] = {
1035 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1036 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1037 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1038 [DEV_EVENT_VERIFY] = ccw_device_nop,
1039 },
1040 [DEV_STATE_OFFLINE] = {
1041 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1042 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1043 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1044 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1045 },
1046 [DEV_STATE_VERIFY] = {
1047 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1048 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1049 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1050 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1051 },
1052 [DEV_STATE_ONLINE] = {
1053 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1054 [DEV_EVENT_INTERRUPT] = ccw_device_irq,
1055 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
1056 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1057 },
1058 [DEV_STATE_W4SENSE] = {
1059 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1060 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
1061 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1062 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1063 },
1064 [DEV_STATE_DISBAND_PGID] = {
1065 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1066 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1067 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1068 [DEV_EVENT_VERIFY] = ccw_device_nop,
1069 },
1070 [DEV_STATE_BOXED] = {
1071 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1072 [DEV_EVENT_INTERRUPT] = ccw_device_nop,
1073 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1074 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
1075 },
1076
1077 [DEV_STATE_TIMEOUT_KILL] = {
1078 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1079 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
1080 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
1081 [DEV_EVENT_VERIFY] = ccw_device_nop,
1082 },
1083 [DEV_STATE_QUIESCE] = {
1084 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
1085 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
1086 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
1087 [DEV_EVENT_VERIFY] = ccw_device_nop,
1088 },
1089
1090 [DEV_STATE_DISCONNECTED] = {
1091 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1092 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1093 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1094 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1095 },
1096 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1097 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1098 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1099 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1100 [DEV_EVENT_VERIFY] = ccw_device_nop,
1101 },
1102 [DEV_STATE_CMFCHANGE] = {
1103 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
1104 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
1105 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
1106 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
1107 },
1108 [DEV_STATE_CMFUPDATE] = {
1109 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
1110 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
1111 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1112 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1113 },
1114 [DEV_STATE_STEAL_LOCK] = {
1115 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1116 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1117 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1118 [DEV_EVENT_VERIFY] = ccw_device_nop,
1119 },
1120 };
1121
1122 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);