0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/string.h>
0013 #include <linux/bitops.h>
0014 #include <linux/types.h>
0015 #include <linux/errno.h>
0016 #include <linux/slab.h>
0017 #include <asm/ccwdev.h>
0018 #include <asm/cio.h>
0019
0020 #include "cio.h"
0021 #include "cio_debug.h"
0022 #include "device.h"
0023 #include "io_sch.h"
0024
0025 #define PGID_RETRIES 256
0026 #define PGID_TIMEOUT (10 * HZ)
0027
0028 static void verify_start(struct ccw_device *cdev);
0029
0030
0031
0032
0033 static void verify_done(struct ccw_device *cdev, int rc)
0034 {
0035 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0036 struct ccw_dev_id *id = &cdev->private->dev_id;
0037 int mpath = cdev->private->flags.mpath;
0038 int pgroup = cdev->private->flags.pgroup;
0039
0040 if (rc)
0041 goto out;
0042
0043 if (sch->config.mp != mpath) {
0044 sch->config.mp = mpath;
0045 rc = cio_commit_config(sch);
0046 }
0047 out:
0048 CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
0049 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
0050 sch->vpm);
0051 ccw_device_verify_done(cdev, rc);
0052 }
0053
0054
0055
0056
0057 static void nop_build_cp(struct ccw_device *cdev)
0058 {
0059 struct ccw_request *req = &cdev->private->req;
0060 struct ccw1 *cp = cdev->private->dma_area->iccws;
0061
0062 cp->cmd_code = CCW_CMD_NOOP;
0063 cp->cda = 0;
0064 cp->count = 0;
0065 cp->flags = CCW_FLAG_SLI;
0066 req->cp = cp;
0067 }
0068
0069
0070
0071
0072 static void nop_do(struct ccw_device *cdev)
0073 {
0074 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0075 struct ccw_request *req = &cdev->private->req;
0076
0077 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
0078 ~cdev->private->path_noirq_mask);
0079 if (!req->lpm)
0080 goto out_nopath;
0081 nop_build_cp(cdev);
0082 ccw_request_start(cdev);
0083 return;
0084
0085 out_nopath:
0086 verify_done(cdev, sch->vpm ? 0 : -EACCES);
0087 }
0088
0089
0090
0091
0092 static enum io_status nop_filter(struct ccw_device *cdev, void *data,
0093 struct irb *irb, enum io_status status)
0094 {
0095
0096 if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
0097 return IO_DONE;
0098 return status;
0099 }
0100
0101
0102
0103
0104 static void nop_callback(struct ccw_device *cdev, void *data, int rc)
0105 {
0106 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0107 struct ccw_request *req = &cdev->private->req;
0108
0109 switch (rc) {
0110 case 0:
0111 sch->vpm |= req->lpm;
0112 break;
0113 case -ETIME:
0114 cdev->private->path_noirq_mask |= req->lpm;
0115 break;
0116 case -EACCES:
0117 cdev->private->path_notoper_mask |= req->lpm;
0118 break;
0119 default:
0120 goto err;
0121 }
0122
0123 req->lpm >>= 1;
0124 nop_do(cdev);
0125 return;
0126
0127 err:
0128 verify_done(cdev, rc);
0129 }
0130
0131
0132
0133
0134 static void spid_build_cp(struct ccw_device *cdev, u8 fn)
0135 {
0136 struct ccw_request *req = &cdev->private->req;
0137 struct ccw1 *cp = cdev->private->dma_area->iccws;
0138 int i = pathmask_to_pos(req->lpm);
0139 struct pgid *pgid = &cdev->private->dma_area->pgid[i];
0140
0141 pgid->inf.fc = fn;
0142 cp->cmd_code = CCW_CMD_SET_PGID;
0143 cp->cda = (u32) (addr_t) pgid;
0144 cp->count = sizeof(*pgid);
0145 cp->flags = CCW_FLAG_SLI;
0146 req->cp = cp;
0147 }
0148
0149 static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
0150 {
0151 if (rc) {
0152
0153 verify_done(cdev, rc);
0154 return;
0155 }
0156
0157
0158
0159
0160 cdev->private->flags.pgid_unknown = 0;
0161 verify_start(cdev);
0162 }
0163
0164
0165
0166
0167 static void pgid_wipeout_start(struct ccw_device *cdev)
0168 {
0169 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0170 struct ccw_dev_id *id = &cdev->private->dev_id;
0171 struct ccw_request *req = &cdev->private->req;
0172 u8 fn;
0173
0174 CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
0175 id->ssid, id->devno, cdev->private->pgid_valid_mask,
0176 cdev->private->path_noirq_mask);
0177
0178
0179 memset(req, 0, sizeof(*req));
0180 req->timeout = PGID_TIMEOUT;
0181 req->maxretries = PGID_RETRIES;
0182 req->lpm = sch->schib.pmcw.pam;
0183 req->callback = pgid_wipeout_callback;
0184 fn = SPID_FUNC_DISBAND;
0185 if (cdev->private->flags.mpath)
0186 fn |= SPID_FUNC_MULTI_PATH;
0187 spid_build_cp(cdev, fn);
0188 ccw_request_start(cdev);
0189 }
0190
0191
0192
0193
0194 static void spid_do(struct ccw_device *cdev)
0195 {
0196 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0197 struct ccw_request *req = &cdev->private->req;
0198 u8 fn;
0199
0200
0201 req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
0202 if (!req->lpm)
0203 goto out_nopath;
0204
0205 if (req->lpm & sch->opm)
0206 fn = SPID_FUNC_ESTABLISH;
0207 else
0208 fn = SPID_FUNC_RESIGN;
0209 if (cdev->private->flags.mpath)
0210 fn |= SPID_FUNC_MULTI_PATH;
0211 spid_build_cp(cdev, fn);
0212 ccw_request_start(cdev);
0213 return;
0214
0215 out_nopath:
0216 if (cdev->private->flags.pgid_unknown) {
0217
0218 pgid_wipeout_start(cdev);
0219 return;
0220 }
0221 verify_done(cdev, sch->vpm ? 0 : -EACCES);
0222 }
0223
0224
0225
0226
0227 static void spid_callback(struct ccw_device *cdev, void *data, int rc)
0228 {
0229 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0230 struct ccw_request *req = &cdev->private->req;
0231
0232 switch (rc) {
0233 case 0:
0234 sch->vpm |= req->lpm & sch->opm;
0235 break;
0236 case -ETIME:
0237 cdev->private->flags.pgid_unknown = 1;
0238 cdev->private->path_noirq_mask |= req->lpm;
0239 break;
0240 case -EACCES:
0241 cdev->private->path_notoper_mask |= req->lpm;
0242 break;
0243 case -EOPNOTSUPP:
0244 if (cdev->private->flags.mpath) {
0245
0246 cdev->private->flags.mpath = 0;
0247 goto out_restart;
0248 }
0249
0250 cdev->private->flags.pgroup = 0;
0251 goto out_restart;
0252 default:
0253 goto err;
0254 }
0255 req->lpm >>= 1;
0256 spid_do(cdev);
0257 return;
0258
0259 out_restart:
0260 verify_start(cdev);
0261 return;
0262 err:
0263 verify_done(cdev, rc);
0264 }
0265
0266 static void spid_start(struct ccw_device *cdev)
0267 {
0268 struct ccw_request *req = &cdev->private->req;
0269
0270
0271 memset(req, 0, sizeof(*req));
0272 req->timeout = PGID_TIMEOUT;
0273 req->maxretries = PGID_RETRIES;
0274 req->lpm = 0x80;
0275 req->singlepath = 1;
0276 req->callback = spid_callback;
0277 spid_do(cdev);
0278 }
0279
0280 static int pgid_is_reset(struct pgid *p)
0281 {
0282 char *c;
0283
0284 for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
0285 if (*c != 0)
0286 return 0;
0287 }
0288 return 1;
0289 }
0290
0291 static int pgid_cmp(struct pgid *p1, struct pgid *p2)
0292 {
0293 return memcmp((char *) p1 + 1, (char *) p2 + 1,
0294 sizeof(struct pgid) - 1);
0295 }
0296
0297
0298
0299
0300 static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
0301 int *mismatch, u8 *reserved, u8 *reset)
0302 {
0303 struct pgid *pgid = &cdev->private->dma_area->pgid[0];
0304 struct pgid *first = NULL;
0305 int lpm;
0306 int i;
0307
0308 *mismatch = 0;
0309 *reserved = 0;
0310 *reset = 0;
0311 for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
0312 if ((cdev->private->pgid_valid_mask & lpm) == 0)
0313 continue;
0314 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
0315 *reserved |= lpm;
0316 if (pgid_is_reset(pgid)) {
0317 *reset |= lpm;
0318 continue;
0319 }
0320 if (!first) {
0321 first = pgid;
0322 continue;
0323 }
0324 if (pgid_cmp(pgid, first) != 0)
0325 *mismatch = 1;
0326 }
0327 if (!first)
0328 first = &channel_subsystems[0]->global_pgid;
0329 *p = first;
0330 }
0331
0332 static u8 pgid_to_donepm(struct ccw_device *cdev)
0333 {
0334 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0335 struct pgid *pgid;
0336 int i;
0337 int lpm;
0338 u8 donepm = 0;
0339
0340
0341 for (i = 0; i < 8; i++) {
0342 lpm = 0x80 >> i;
0343 if ((cdev->private->pgid_valid_mask & lpm) == 0)
0344 continue;
0345 pgid = &cdev->private->dma_area->pgid[i];
0346 if (sch->opm & lpm) {
0347 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
0348 continue;
0349 } else {
0350 if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
0351 continue;
0352 }
0353 if (cdev->private->flags.mpath) {
0354 if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
0355 continue;
0356 } else {
0357 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
0358 continue;
0359 }
0360 donepm |= lpm;
0361 }
0362
0363 return donepm;
0364 }
0365
0366 static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
0367 {
0368 int i;
0369
0370 for (i = 0; i < 8; i++)
0371 memcpy(&cdev->private->dma_area->pgid[i], pgid,
0372 sizeof(struct pgid));
0373 }
0374
0375
0376
0377
0378 static void snid_done(struct ccw_device *cdev, int rc)
0379 {
0380 struct ccw_dev_id *id = &cdev->private->dev_id;
0381 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0382 struct pgid *pgid;
0383 int mismatch = 0;
0384 u8 reserved = 0;
0385 u8 reset = 0;
0386 u8 donepm;
0387
0388 if (rc)
0389 goto out;
0390 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
0391 if (reserved == cdev->private->pgid_valid_mask)
0392 rc = -EUSERS;
0393 else if (mismatch)
0394 rc = -EOPNOTSUPP;
0395 else {
0396 donepm = pgid_to_donepm(cdev);
0397 sch->vpm = donepm & sch->opm;
0398 cdev->private->pgid_reset_mask |= reset;
0399 cdev->private->pgid_todo_mask &=
0400 ~(donepm | cdev->private->path_noirq_mask);
0401 pgid_fill(cdev, pgid);
0402 }
0403 out:
0404 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
0405 "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
0406 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
0407 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
0408 switch (rc) {
0409 case 0:
0410 if (cdev->private->flags.pgid_unknown) {
0411 pgid_wipeout_start(cdev);
0412 return;
0413 }
0414
0415 if (cdev->private->pgid_todo_mask == 0) {
0416 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
0417 return;
0418 }
0419
0420 spid_start(cdev);
0421 break;
0422 case -EOPNOTSUPP:
0423
0424 cdev->private->flags.pgroup = 0;
0425 cdev->private->flags.mpath = 0;
0426 verify_start(cdev);
0427 break;
0428 default:
0429 verify_done(cdev, rc);
0430 }
0431 }
0432
0433
0434
0435
0436 static void snid_build_cp(struct ccw_device *cdev)
0437 {
0438 struct ccw_request *req = &cdev->private->req;
0439 struct ccw1 *cp = cdev->private->dma_area->iccws;
0440 int i = pathmask_to_pos(req->lpm);
0441
0442
0443 cp->cmd_code = CCW_CMD_SENSE_PGID;
0444 cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
0445 cp->count = sizeof(struct pgid);
0446 cp->flags = CCW_FLAG_SLI;
0447 req->cp = cp;
0448 }
0449
0450
0451
0452
0453 static void snid_do(struct ccw_device *cdev)
0454 {
0455 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0456 struct ccw_request *req = &cdev->private->req;
0457 int ret;
0458
0459 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
0460 ~cdev->private->path_noirq_mask);
0461 if (!req->lpm)
0462 goto out_nopath;
0463 snid_build_cp(cdev);
0464 ccw_request_start(cdev);
0465 return;
0466
0467 out_nopath:
0468 if (cdev->private->pgid_valid_mask)
0469 ret = 0;
0470 else if (cdev->private->path_noirq_mask)
0471 ret = -ETIME;
0472 else
0473 ret = -EACCES;
0474 snid_done(cdev, ret);
0475 }
0476
0477
0478
0479
0480 static void snid_callback(struct ccw_device *cdev, void *data, int rc)
0481 {
0482 struct ccw_request *req = &cdev->private->req;
0483
0484 switch (rc) {
0485 case 0:
0486 cdev->private->pgid_valid_mask |= req->lpm;
0487 break;
0488 case -ETIME:
0489 cdev->private->flags.pgid_unknown = 1;
0490 cdev->private->path_noirq_mask |= req->lpm;
0491 break;
0492 case -EACCES:
0493 cdev->private->path_notoper_mask |= req->lpm;
0494 break;
0495 default:
0496 goto err;
0497 }
0498
0499 req->lpm >>= 1;
0500 snid_do(cdev);
0501 return;
0502
0503 err:
0504 snid_done(cdev, rc);
0505 }
0506
0507
0508
0509
0510 static void verify_start(struct ccw_device *cdev)
0511 {
0512 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0513 struct ccw_request *req = &cdev->private->req;
0514 struct ccw_dev_id *devid = &cdev->private->dev_id;
0515
0516 sch->vpm = 0;
0517 sch->lpm = sch->schib.pmcw.pam;
0518
0519
0520 memset(cdev->private->dma_area->pgid, 0,
0521 sizeof(cdev->private->dma_area->pgid));
0522 cdev->private->pgid_valid_mask = 0;
0523 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
0524 cdev->private->path_notoper_mask = 0;
0525
0526
0527 memset(req, 0, sizeof(*req));
0528 req->timeout = PGID_TIMEOUT;
0529 req->maxretries = PGID_RETRIES;
0530 req->lpm = 0x80;
0531 req->singlepath = 1;
0532 if (cdev->private->flags.pgroup) {
0533 CIO_TRACE_EVENT(4, "snid");
0534 CIO_HEX_EVENT(4, devid, sizeof(*devid));
0535 req->callback = snid_callback;
0536 snid_do(cdev);
0537 } else {
0538 CIO_TRACE_EVENT(4, "nop");
0539 CIO_HEX_EVENT(4, devid, sizeof(*devid));
0540 req->filter = nop_filter;
0541 req->callback = nop_callback;
0542 nop_do(cdev);
0543 }
0544 }
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 void ccw_device_verify_start(struct ccw_device *cdev)
0557 {
0558 CIO_TRACE_EVENT(4, "vrfy");
0559 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
0560
0561
0562
0563
0564 cdev->private->flags.pgroup = cdev->private->options.pgroup;
0565 cdev->private->flags.mpath = cdev->private->options.mpath;
0566 cdev->private->flags.doverify = 0;
0567 cdev->private->path_noirq_mask = 0;
0568 verify_start(cdev);
0569 }
0570
0571
0572
0573
0574 static void disband_callback(struct ccw_device *cdev, void *data, int rc)
0575 {
0576 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0577 struct ccw_dev_id *id = &cdev->private->dev_id;
0578
0579 if (rc)
0580 goto out;
0581
0582 cdev->private->flags.mpath = 0;
0583 if (sch->config.mp) {
0584 sch->config.mp = 0;
0585 rc = cio_commit_config(sch);
0586 }
0587 out:
0588 CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
0589 rc);
0590 ccw_device_disband_done(cdev, rc);
0591 }
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601 void ccw_device_disband_start(struct ccw_device *cdev)
0602 {
0603 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0604 struct ccw_request *req = &cdev->private->req;
0605 u8 fn;
0606
0607 CIO_TRACE_EVENT(4, "disb");
0608 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
0609
0610 memset(req, 0, sizeof(*req));
0611 req->timeout = PGID_TIMEOUT;
0612 req->maxretries = PGID_RETRIES;
0613 req->lpm = sch->schib.pmcw.pam & sch->opm;
0614 req->singlepath = 1;
0615 req->callback = disband_callback;
0616 fn = SPID_FUNC_DISBAND;
0617 if (cdev->private->flags.mpath)
0618 fn |= SPID_FUNC_MULTI_PATH;
0619 spid_build_cp(cdev, fn);
0620 ccw_request_start(cdev);
0621 }
0622
0623 struct stlck_data {
0624 struct completion done;
0625 int rc;
0626 };
0627
0628 static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
0629 {
0630 struct ccw_request *req = &cdev->private->req;
0631 struct ccw1 *cp = cdev->private->dma_area->iccws;
0632
0633 cp[0].cmd_code = CCW_CMD_STLCK;
0634 cp[0].cda = (u32) (addr_t) buf1;
0635 cp[0].count = 32;
0636 cp[0].flags = CCW_FLAG_CC;
0637 cp[1].cmd_code = CCW_CMD_RELEASE;
0638 cp[1].cda = (u32) (addr_t) buf2;
0639 cp[1].count = 32;
0640 cp[1].flags = 0;
0641 req->cp = cp;
0642 }
0643
0644 static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
0645 {
0646 struct stlck_data *sdata = data;
0647
0648 sdata->rc = rc;
0649 complete(&sdata->done);
0650 }
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661 static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
0662 void *buf1, void *buf2)
0663 {
0664 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0665 struct ccw_request *req = &cdev->private->req;
0666
0667 CIO_TRACE_EVENT(4, "stlck");
0668 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
0669
0670 memset(req, 0, sizeof(*req));
0671 req->timeout = PGID_TIMEOUT;
0672 req->maxretries = PGID_RETRIES;
0673 req->lpm = sch->schib.pmcw.pam & sch->opm;
0674 req->data = data;
0675 req->callback = stlck_callback;
0676 stlck_build_cp(cdev, buf1, buf2);
0677 ccw_request_start(cdev);
0678 }
0679
0680
0681
0682
0683 int ccw_device_stlck(struct ccw_device *cdev)
0684 {
0685 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0686 struct stlck_data data;
0687 u8 *buffer;
0688 int rc;
0689
0690
0691 if (cdev->drv) {
0692 if (!cdev->private->options.force)
0693 return -EINVAL;
0694 }
0695 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
0696 if (!buffer)
0697 return -ENOMEM;
0698 init_completion(&data.done);
0699 data.rc = -EIO;
0700 spin_lock_irq(sch->lock);
0701 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
0702 if (rc)
0703 goto out_unlock;
0704
0705 cdev->private->state = DEV_STATE_STEAL_LOCK;
0706 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
0707 spin_unlock_irq(sch->lock);
0708
0709 if (wait_for_completion_interruptible(&data.done)) {
0710
0711 spin_lock_irq(sch->lock);
0712 ccw_request_cancel(cdev);
0713 spin_unlock_irq(sch->lock);
0714 wait_for_completion(&data.done);
0715 }
0716 rc = data.rc;
0717
0718 spin_lock_irq(sch->lock);
0719 cio_disable_subchannel(sch);
0720 cdev->private->state = DEV_STATE_BOXED;
0721 out_unlock:
0722 spin_unlock_irq(sch->lock);
0723 kfree(buffer);
0724
0725 return rc;
0726 }