0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define KMSG_COMPONENT "cio"
0012 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0013
0014 #include <linux/export.h>
0015 #include <linux/init.h>
0016 #include <linux/device.h>
0017 #include <linux/slab.h>
0018 #include <linux/errno.h>
0019 #include <linux/list.h>
0020 #include <linux/reboot.h>
0021 #include <linux/proc_fs.h>
0022 #include <linux/genalloc.h>
0023 #include <linux/dma-mapping.h>
0024 #include <asm/isc.h>
0025 #include <asm/crw.h>
0026
0027 #include "css.h"
0028 #include "cio.h"
0029 #include "blacklist.h"
0030 #include "cio_debug.h"
0031 #include "ioasm.h"
0032 #include "chsc.h"
0033 #include "device.h"
0034 #include "idset.h"
0035 #include "chp.h"
0036
0037 int css_init_done = 0;
0038 int max_ssid;
0039
0040 #define MAX_CSS_IDX 0
0041 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
0042 static struct bus_type css_bus_type;
0043
0044 int
0045 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
0046 {
0047 struct subchannel_id schid;
0048 int ret;
0049
0050 init_subchannel_id(&schid);
0051 do {
0052 do {
0053 ret = fn(schid, data);
0054 if (ret)
0055 break;
0056 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
0057 schid.sch_no = 0;
0058 } while (schid.ssid++ < max_ssid);
0059 return ret;
0060 }
0061
0062 struct cb_data {
0063 void *data;
0064 struct idset *set;
0065 int (*fn_known_sch)(struct subchannel *, void *);
0066 int (*fn_unknown_sch)(struct subchannel_id, void *);
0067 };
0068
0069 static int call_fn_known_sch(struct device *dev, void *data)
0070 {
0071 struct subchannel *sch = to_subchannel(dev);
0072 struct cb_data *cb = data;
0073 int rc = 0;
0074
0075 if (cb->set)
0076 idset_sch_del(cb->set, sch->schid);
0077 if (cb->fn_known_sch)
0078 rc = cb->fn_known_sch(sch, cb->data);
0079 return rc;
0080 }
0081
0082 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
0083 {
0084 struct cb_data *cb = data;
0085 int rc = 0;
0086
0087 if (idset_sch_contains(cb->set, schid))
0088 rc = cb->fn_unknown_sch(schid, cb->data);
0089 return rc;
0090 }
0091
0092 static int call_fn_all_sch(struct subchannel_id schid, void *data)
0093 {
0094 struct cb_data *cb = data;
0095 struct subchannel *sch;
0096 int rc = 0;
0097
0098 sch = get_subchannel_by_schid(schid);
0099 if (sch) {
0100 if (cb->fn_known_sch)
0101 rc = cb->fn_known_sch(sch, cb->data);
0102 put_device(&sch->dev);
0103 } else {
0104 if (cb->fn_unknown_sch)
0105 rc = cb->fn_unknown_sch(schid, cb->data);
0106 }
0107
0108 return rc;
0109 }
0110
0111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
0112 int (*fn_unknown)(struct subchannel_id,
0113 void *), void *data)
0114 {
0115 struct cb_data cb;
0116 int rc;
0117
0118 cb.data = data;
0119 cb.fn_known_sch = fn_known;
0120 cb.fn_unknown_sch = fn_unknown;
0121
0122 if (fn_known && !fn_unknown) {
0123
0124 cb.set = NULL;
0125 return bus_for_each_dev(&css_bus_type, NULL, &cb,
0126 call_fn_known_sch);
0127 }
0128
0129 cb.set = idset_sch_new();
0130 if (!cb.set)
0131
0132 return for_each_subchannel(call_fn_all_sch, &cb);
0133
0134 idset_fill(cb.set);
0135
0136
0137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
0138 if (rc)
0139 goto out;
0140
0141 if (fn_unknown)
0142 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
0143 out:
0144 idset_free(cb.set);
0145
0146 return rc;
0147 }
0148
0149 static void css_sch_todo(struct work_struct *work);
0150
0151 static int css_sch_create_locks(struct subchannel *sch)
0152 {
0153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
0154 if (!sch->lock)
0155 return -ENOMEM;
0156
0157 spin_lock_init(sch->lock);
0158 mutex_init(&sch->reg_mutex);
0159
0160 return 0;
0161 }
0162
0163 static void css_subchannel_release(struct device *dev)
0164 {
0165 struct subchannel *sch = to_subchannel(dev);
0166
0167 sch->config.intparm = 0;
0168 cio_commit_config(sch);
0169 kfree(sch->driver_override);
0170 kfree(sch->lock);
0171 kfree(sch);
0172 }
0173
0174 static int css_validate_subchannel(struct subchannel_id schid,
0175 struct schib *schib)
0176 {
0177 int err;
0178
0179 switch (schib->pmcw.st) {
0180 case SUBCHANNEL_TYPE_IO:
0181 case SUBCHANNEL_TYPE_MSG:
0182 if (!css_sch_is_valid(schib))
0183 err = -ENODEV;
0184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
0185 CIO_MSG_EVENT(6, "Blacklisted device detected "
0186 "at devno %04X, subchannel set %x\n",
0187 schib->pmcw.dev, schid.ssid);
0188 err = -ENODEV;
0189 } else
0190 err = 0;
0191 break;
0192 default:
0193 err = 0;
0194 }
0195 if (err)
0196 goto out;
0197
0198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
0199 schid.ssid, schid.sch_no, schib->pmcw.st);
0200 out:
0201 return err;
0202 }
0203
0204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
0205 struct schib *schib)
0206 {
0207 struct subchannel *sch;
0208 int ret;
0209
0210 ret = css_validate_subchannel(schid, schib);
0211 if (ret < 0)
0212 return ERR_PTR(ret);
0213
0214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
0215 if (!sch)
0216 return ERR_PTR(-ENOMEM);
0217
0218 sch->schid = schid;
0219 sch->schib = *schib;
0220 sch->st = schib->pmcw.st;
0221
0222 ret = css_sch_create_locks(sch);
0223 if (ret)
0224 goto err;
0225
0226 INIT_WORK(&sch->todo_work, css_sch_todo);
0227 sch->dev.release = &css_subchannel_release;
0228 sch->dev.dma_mask = &sch->dma_mask;
0229 device_initialize(&sch->dev);
0230
0231
0232
0233
0234 ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
0235 if (ret)
0236 goto err;
0237
0238
0239
0240
0241 ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
0242 if (ret)
0243 goto err;
0244
0245 return sch;
0246
0247 err:
0248 kfree(sch);
0249 return ERR_PTR(ret);
0250 }
0251
0252 static int css_sch_device_register(struct subchannel *sch)
0253 {
0254 int ret;
0255
0256 mutex_lock(&sch->reg_mutex);
0257 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
0258 sch->schid.sch_no);
0259 ret = device_add(&sch->dev);
0260 mutex_unlock(&sch->reg_mutex);
0261 return ret;
0262 }
0263
0264
0265
0266
0267
0268 void css_sch_device_unregister(struct subchannel *sch)
0269 {
0270 mutex_lock(&sch->reg_mutex);
0271 if (device_is_registered(&sch->dev))
0272 device_unregister(&sch->dev);
0273 mutex_unlock(&sch->reg_mutex);
0274 }
0275 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
0276
0277 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
0278 {
0279 int i;
0280 int mask;
0281
0282 memset(ssd, 0, sizeof(struct chsc_ssd_info));
0283 ssd->path_mask = pmcw->pim;
0284 for (i = 0; i < 8; i++) {
0285 mask = 0x80 >> i;
0286 if (pmcw->pim & mask) {
0287 chp_id_init(&ssd->chpid[i]);
0288 ssd->chpid[i].id = pmcw->chpid[i];
0289 }
0290 }
0291 }
0292
0293 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
0294 {
0295 int i;
0296 int mask;
0297
0298 for (i = 0; i < 8; i++) {
0299 mask = 0x80 >> i;
0300 if (ssd->path_mask & mask)
0301 chp_new(ssd->chpid[i]);
0302 }
0303 }
0304
0305 void css_update_ssd_info(struct subchannel *sch)
0306 {
0307 int ret;
0308
0309 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
0310 if (ret)
0311 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
0312
0313 ssd_register_chpids(&sch->ssd_info);
0314 }
0315
0316 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
0317 char *buf)
0318 {
0319 struct subchannel *sch = to_subchannel(dev);
0320
0321 return sprintf(buf, "%01x\n", sch->st);
0322 }
0323
0324 static DEVICE_ATTR_RO(type);
0325
0326 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
0327 char *buf)
0328 {
0329 struct subchannel *sch = to_subchannel(dev);
0330
0331 return sprintf(buf, "css:t%01X\n", sch->st);
0332 }
0333
0334 static DEVICE_ATTR_RO(modalias);
0335
0336 static ssize_t driver_override_store(struct device *dev,
0337 struct device_attribute *attr,
0338 const char *buf, size_t count)
0339 {
0340 struct subchannel *sch = to_subchannel(dev);
0341 int ret;
0342
0343 ret = driver_set_override(dev, &sch->driver_override, buf, count);
0344 if (ret)
0345 return ret;
0346
0347 return count;
0348 }
0349
0350 static ssize_t driver_override_show(struct device *dev,
0351 struct device_attribute *attr, char *buf)
0352 {
0353 struct subchannel *sch = to_subchannel(dev);
0354 ssize_t len;
0355
0356 device_lock(dev);
0357 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
0358 device_unlock(dev);
0359 return len;
0360 }
0361 static DEVICE_ATTR_RW(driver_override);
0362
0363 static struct attribute *subch_attrs[] = {
0364 &dev_attr_type.attr,
0365 &dev_attr_modalias.attr,
0366 &dev_attr_driver_override.attr,
0367 NULL,
0368 };
0369
0370 static struct attribute_group subch_attr_group = {
0371 .attrs = subch_attrs,
0372 };
0373
0374 static const struct attribute_group *default_subch_attr_groups[] = {
0375 &subch_attr_group,
0376 NULL,
0377 };
0378
0379 static ssize_t chpids_show(struct device *dev,
0380 struct device_attribute *attr,
0381 char *buf)
0382 {
0383 struct subchannel *sch = to_subchannel(dev);
0384 struct chsc_ssd_info *ssd = &sch->ssd_info;
0385 ssize_t ret = 0;
0386 int mask;
0387 int chp;
0388
0389 for (chp = 0; chp < 8; chp++) {
0390 mask = 0x80 >> chp;
0391 if (ssd->path_mask & mask)
0392 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
0393 else
0394 ret += sprintf(buf + ret, "00 ");
0395 }
0396 ret += sprintf(buf + ret, "\n");
0397 return ret;
0398 }
0399 static DEVICE_ATTR_RO(chpids);
0400
0401 static ssize_t pimpampom_show(struct device *dev,
0402 struct device_attribute *attr,
0403 char *buf)
0404 {
0405 struct subchannel *sch = to_subchannel(dev);
0406 struct pmcw *pmcw = &sch->schib.pmcw;
0407
0408 return sprintf(buf, "%02x %02x %02x\n",
0409 pmcw->pim, pmcw->pam, pmcw->pom);
0410 }
0411 static DEVICE_ATTR_RO(pimpampom);
0412
0413 static ssize_t dev_busid_show(struct device *dev,
0414 struct device_attribute *attr,
0415 char *buf)
0416 {
0417 struct subchannel *sch = to_subchannel(dev);
0418 struct pmcw *pmcw = &sch->schib.pmcw;
0419
0420 if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
0421 (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
0422 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
0423 pmcw->dev);
0424 else
0425 return sysfs_emit(buf, "none\n");
0426 }
0427 static DEVICE_ATTR_RO(dev_busid);
0428
0429 static struct attribute *io_subchannel_type_attrs[] = {
0430 &dev_attr_chpids.attr,
0431 &dev_attr_pimpampom.attr,
0432 &dev_attr_dev_busid.attr,
0433 NULL,
0434 };
0435 ATTRIBUTE_GROUPS(io_subchannel_type);
0436
0437 static const struct device_type io_subchannel_type = {
0438 .groups = io_subchannel_type_groups,
0439 };
0440
0441 int css_register_subchannel(struct subchannel *sch)
0442 {
0443 int ret;
0444
0445
0446 sch->dev.parent = &channel_subsystems[0]->device;
0447 sch->dev.bus = &css_bus_type;
0448 sch->dev.groups = default_subch_attr_groups;
0449
0450 if (sch->st == SUBCHANNEL_TYPE_IO)
0451 sch->dev.type = &io_subchannel_type;
0452
0453 css_update_ssd_info(sch);
0454
0455 ret = css_sch_device_register(sch);
0456 if (ret) {
0457 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
0458 sch->schid.ssid, sch->schid.sch_no, ret);
0459 return ret;
0460 }
0461 return ret;
0462 }
0463
0464 static int css_probe_device(struct subchannel_id schid, struct schib *schib)
0465 {
0466 struct subchannel *sch;
0467 int ret;
0468
0469 sch = css_alloc_subchannel(schid, schib);
0470 if (IS_ERR(sch))
0471 return PTR_ERR(sch);
0472
0473 ret = css_register_subchannel(sch);
0474 if (ret)
0475 put_device(&sch->dev);
0476
0477 return ret;
0478 }
0479
0480 static int
0481 check_subchannel(struct device *dev, const void *data)
0482 {
0483 struct subchannel *sch;
0484 struct subchannel_id *schid = (void *)data;
0485
0486 sch = to_subchannel(dev);
0487 return schid_equal(&sch->schid, schid);
0488 }
0489
0490 struct subchannel *
0491 get_subchannel_by_schid(struct subchannel_id schid)
0492 {
0493 struct device *dev;
0494
0495 dev = bus_find_device(&css_bus_type, NULL,
0496 &schid, check_subchannel);
0497
0498 return dev ? to_subchannel(dev) : NULL;
0499 }
0500
0501
0502
0503
0504
0505 int css_sch_is_valid(struct schib *schib)
0506 {
0507 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
0508 return 0;
0509 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
0510 return 0;
0511 return 1;
0512 }
0513 EXPORT_SYMBOL_GPL(css_sch_is_valid);
0514
0515 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
0516 {
0517 struct schib schib;
0518 int ccode;
0519
0520 if (!slow) {
0521
0522 return -EAGAIN;
0523 }
0524
0525
0526
0527
0528
0529
0530 ccode = stsch(schid, &schib);
0531 if (ccode)
0532 return (ccode == 3) ? -ENXIO : ccode;
0533
0534 return css_probe_device(schid, &schib);
0535 }
0536
0537 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
0538 {
0539 int ret = 0;
0540
0541 if (sch->driver) {
0542 if (sch->driver->sch_event)
0543 ret = sch->driver->sch_event(sch, slow);
0544 else
0545 dev_dbg(&sch->dev,
0546 "Got subchannel machine check but "
0547 "no sch_event handler provided.\n");
0548 }
0549 if (ret != 0 && ret != -EAGAIN) {
0550 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
0551 sch->schid.ssid, sch->schid.sch_no, ret);
0552 }
0553 return ret;
0554 }
0555
0556 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
0557 {
0558 struct subchannel *sch;
0559 int ret;
0560
0561 sch = get_subchannel_by_schid(schid);
0562 if (sch) {
0563 ret = css_evaluate_known_subchannel(sch, slow);
0564 put_device(&sch->dev);
0565 } else
0566 ret = css_evaluate_new_subchannel(schid, slow);
0567 if (ret == -EAGAIN)
0568 css_schedule_eval(schid);
0569 }
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
0581 {
0582 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
0583 sch->schid.ssid, sch->schid.sch_no, todo);
0584 if (sch->todo >= todo)
0585 return;
0586
0587 if (!get_device(&sch->dev))
0588 return;
0589 sch->todo = todo;
0590 if (!queue_work(cio_work_q, &sch->todo_work)) {
0591
0592 put_device(&sch->dev);
0593 }
0594 }
0595 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
0596
0597 static void css_sch_todo(struct work_struct *work)
0598 {
0599 struct subchannel *sch;
0600 enum sch_todo todo;
0601 int ret;
0602
0603 sch = container_of(work, struct subchannel, todo_work);
0604
0605 spin_lock_irq(sch->lock);
0606 todo = sch->todo;
0607 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
0608 sch->schid.sch_no, todo);
0609 sch->todo = SCH_TODO_NOTHING;
0610 spin_unlock_irq(sch->lock);
0611
0612 switch (todo) {
0613 case SCH_TODO_NOTHING:
0614 break;
0615 case SCH_TODO_EVAL:
0616 ret = css_evaluate_known_subchannel(sch, 1);
0617 if (ret == -EAGAIN) {
0618 spin_lock_irq(sch->lock);
0619 css_sched_sch_todo(sch, todo);
0620 spin_unlock_irq(sch->lock);
0621 }
0622 break;
0623 case SCH_TODO_UNREG:
0624 css_sch_device_unregister(sch);
0625 break;
0626 }
0627
0628 put_device(&sch->dev);
0629 }
0630
0631 static struct idset *slow_subchannel_set;
0632 static DEFINE_SPINLOCK(slow_subchannel_lock);
0633 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
0634 static atomic_t css_eval_scheduled;
0635
0636 static int __init slow_subchannel_init(void)
0637 {
0638 atomic_set(&css_eval_scheduled, 0);
0639 slow_subchannel_set = idset_sch_new();
0640 if (!slow_subchannel_set) {
0641 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
0642 return -ENOMEM;
0643 }
0644 return 0;
0645 }
0646
0647 static int slow_eval_known_fn(struct subchannel *sch, void *data)
0648 {
0649 int eval;
0650 int rc;
0651
0652 spin_lock_irq(&slow_subchannel_lock);
0653 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
0654 idset_sch_del(slow_subchannel_set, sch->schid);
0655 spin_unlock_irq(&slow_subchannel_lock);
0656 if (eval) {
0657 rc = css_evaluate_known_subchannel(sch, 1);
0658 if (rc == -EAGAIN)
0659 css_schedule_eval(sch->schid);
0660
0661
0662
0663
0664 cond_resched();
0665 }
0666 return 0;
0667 }
0668
0669 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
0670 {
0671 int eval;
0672 int rc = 0;
0673
0674 spin_lock_irq(&slow_subchannel_lock);
0675 eval = idset_sch_contains(slow_subchannel_set, schid);
0676 idset_sch_del(slow_subchannel_set, schid);
0677 spin_unlock_irq(&slow_subchannel_lock);
0678 if (eval) {
0679 rc = css_evaluate_new_subchannel(schid, 1);
0680 switch (rc) {
0681 case -EAGAIN:
0682 css_schedule_eval(schid);
0683 rc = 0;
0684 break;
0685 case -ENXIO:
0686 case -ENOMEM:
0687 case -EIO:
0688
0689 spin_lock_irq(&slow_subchannel_lock);
0690 idset_sch_del_subseq(slow_subchannel_set, schid);
0691 spin_unlock_irq(&slow_subchannel_lock);
0692 break;
0693 default:
0694 rc = 0;
0695 }
0696
0697
0698 cond_resched();
0699 }
0700 return rc;
0701 }
0702
0703 static void css_slow_path_func(struct work_struct *unused)
0704 {
0705 unsigned long flags;
0706
0707 CIO_TRACE_EVENT(4, "slowpath");
0708 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
0709 NULL);
0710 spin_lock_irqsave(&slow_subchannel_lock, flags);
0711 if (idset_is_empty(slow_subchannel_set)) {
0712 atomic_set(&css_eval_scheduled, 0);
0713 wake_up(&css_eval_wq);
0714 }
0715 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
0716 }
0717
0718 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
0719 struct workqueue_struct *cio_work_q;
0720
0721 void css_schedule_eval(struct subchannel_id schid)
0722 {
0723 unsigned long flags;
0724
0725 spin_lock_irqsave(&slow_subchannel_lock, flags);
0726 idset_sch_add(slow_subchannel_set, schid);
0727 atomic_set(&css_eval_scheduled, 1);
0728 queue_delayed_work(cio_work_q, &slow_path_work, 0);
0729 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
0730 }
0731
0732 void css_schedule_eval_all(void)
0733 {
0734 unsigned long flags;
0735
0736 spin_lock_irqsave(&slow_subchannel_lock, flags);
0737 idset_fill(slow_subchannel_set);
0738 atomic_set(&css_eval_scheduled, 1);
0739 queue_delayed_work(cio_work_q, &slow_path_work, 0);
0740 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
0741 }
0742
0743 static int __unset_registered(struct device *dev, void *data)
0744 {
0745 struct idset *set = data;
0746 struct subchannel *sch = to_subchannel(dev);
0747
0748 idset_sch_del(set, sch->schid);
0749 return 0;
0750 }
0751
0752 static int __unset_online(struct device *dev, void *data)
0753 {
0754 struct idset *set = data;
0755 struct subchannel *sch = to_subchannel(dev);
0756 struct ccw_device *cdev;
0757
0758 if (sch->st == SUBCHANNEL_TYPE_IO) {
0759 cdev = sch_get_cdev(sch);
0760 if (cdev && cdev->online)
0761 idset_sch_del(set, sch->schid);
0762 }
0763
0764 return 0;
0765 }
0766
0767 void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
0768 {
0769 unsigned long flags;
0770 struct idset *set;
0771
0772
0773 set = idset_sch_new();
0774 if (!set) {
0775
0776 css_schedule_eval_all();
0777 return;
0778 }
0779 idset_fill(set);
0780 switch (cond) {
0781 case CSS_EVAL_UNREG:
0782 bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
0783 break;
0784 case CSS_EVAL_NOT_ONLINE:
0785 bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
0786 break;
0787 default:
0788 break;
0789 }
0790
0791
0792 spin_lock_irqsave(&slow_subchannel_lock, flags);
0793 idset_add_set(slow_subchannel_set, set);
0794 atomic_set(&css_eval_scheduled, 1);
0795 queue_delayed_work(cio_work_q, &slow_path_work, delay);
0796 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
0797 idset_free(set);
0798 }
0799
0800 void css_wait_for_slow_path(void)
0801 {
0802 flush_workqueue(cio_work_q);
0803 }
0804
0805
0806 void css_schedule_reprobe(void)
0807 {
0808
0809 css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
0810 }
0811 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
0812
0813
0814
0815
0816 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
0817 {
0818 struct subchannel_id mchk_schid;
0819 struct subchannel *sch;
0820
0821 if (overflow) {
0822 css_schedule_eval_all();
0823 return;
0824 }
0825 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
0826 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
0827 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
0828 crw0->erc, crw0->rsid);
0829 if (crw1)
0830 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
0831 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
0832 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
0833 crw1->anc, crw1->erc, crw1->rsid);
0834 init_subchannel_id(&mchk_schid);
0835 mchk_schid.sch_no = crw0->rsid;
0836 if (crw1)
0837 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
0838
0839 if (crw0->erc == CRW_ERC_PMOD) {
0840 sch = get_subchannel_by_schid(mchk_schid);
0841 if (sch) {
0842 css_update_ssd_info(sch);
0843 put_device(&sch->dev);
0844 }
0845 }
0846
0847
0848
0849
0850
0851 css_evaluate_subchannel(mchk_schid, 0);
0852 }
0853
0854 static void __init
0855 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
0856 {
0857 struct cpuid cpu_id;
0858
0859 if (css_general_characteristics.mcss) {
0860 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
0861 css->global_pgid.pgid_high.ext_cssid.cssid =
0862 css->id_valid ? css->cssid : 0;
0863 } else {
0864 css->global_pgid.pgid_high.cpu_addr = stap();
0865 }
0866 get_cpu_id(&cpu_id);
0867 css->global_pgid.cpu_id = cpu_id.ident;
0868 css->global_pgid.cpu_model = cpu_id.machine;
0869 css->global_pgid.tod_high = tod_high;
0870 }
0871
0872 static void channel_subsystem_release(struct device *dev)
0873 {
0874 struct channel_subsystem *css = to_css(dev);
0875
0876 mutex_destroy(&css->mutex);
0877 kfree(css);
0878 }
0879
0880 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
0881 char *buf)
0882 {
0883 struct channel_subsystem *css = to_css(dev);
0884
0885 if (!css->id_valid)
0886 return -EINVAL;
0887
0888 return sprintf(buf, "%x\n", css->cssid);
0889 }
0890 static DEVICE_ATTR_RO(real_cssid);
0891
0892 static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
0893 const char *buf, size_t count)
0894 {
0895 CIO_TRACE_EVENT(4, "usr-rescan");
0896
0897 css_schedule_eval_all();
0898 css_complete_work();
0899
0900 return count;
0901 }
0902 static DEVICE_ATTR_WO(rescan);
0903
0904 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
0905 char *buf)
0906 {
0907 struct channel_subsystem *css = to_css(dev);
0908 int ret;
0909
0910 mutex_lock(&css->mutex);
0911 ret = sprintf(buf, "%x\n", css->cm_enabled);
0912 mutex_unlock(&css->mutex);
0913 return ret;
0914 }
0915
0916 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
0917 const char *buf, size_t count)
0918 {
0919 struct channel_subsystem *css = to_css(dev);
0920 unsigned long val;
0921 int ret;
0922
0923 ret = kstrtoul(buf, 16, &val);
0924 if (ret)
0925 return ret;
0926 mutex_lock(&css->mutex);
0927 switch (val) {
0928 case 0:
0929 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
0930 break;
0931 case 1:
0932 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
0933 break;
0934 default:
0935 ret = -EINVAL;
0936 }
0937 mutex_unlock(&css->mutex);
0938 return ret < 0 ? ret : count;
0939 }
0940 static DEVICE_ATTR_RW(cm_enable);
0941
0942 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
0943 int index)
0944 {
0945 return css_chsc_characteristics.secm ? attr->mode : 0;
0946 }
0947
0948 static struct attribute *cssdev_attrs[] = {
0949 &dev_attr_real_cssid.attr,
0950 &dev_attr_rescan.attr,
0951 NULL,
0952 };
0953
0954 static struct attribute_group cssdev_attr_group = {
0955 .attrs = cssdev_attrs,
0956 };
0957
0958 static struct attribute *cssdev_cm_attrs[] = {
0959 &dev_attr_cm_enable.attr,
0960 NULL,
0961 };
0962
0963 static struct attribute_group cssdev_cm_attr_group = {
0964 .attrs = cssdev_cm_attrs,
0965 .is_visible = cm_enable_mode,
0966 };
0967
0968 static const struct attribute_group *cssdev_attr_groups[] = {
0969 &cssdev_attr_group,
0970 &cssdev_cm_attr_group,
0971 NULL,
0972 };
0973
0974 static int __init setup_css(int nr)
0975 {
0976 struct channel_subsystem *css;
0977 int ret;
0978
0979 css = kzalloc(sizeof(*css), GFP_KERNEL);
0980 if (!css)
0981 return -ENOMEM;
0982
0983 channel_subsystems[nr] = css;
0984 dev_set_name(&css->device, "css%x", nr);
0985 css->device.groups = cssdev_attr_groups;
0986 css->device.release = channel_subsystem_release;
0987
0988
0989
0990
0991
0992 ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
0993 if (ret) {
0994 kfree(css);
0995 goto out_err;
0996 }
0997
0998 mutex_init(&css->mutex);
0999 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
1000 if (!ret) {
1001 css->id_valid = true;
1002 pr_info("Partition identifier %01x.%01x\n", css->cssid,
1003 css->iid);
1004 }
1005 css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1006
1007 ret = device_register(&css->device);
1008 if (ret) {
1009 put_device(&css->device);
1010 goto out_err;
1011 }
1012
1013 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1014 GFP_KERNEL);
1015 if (!css->pseudo_subchannel) {
1016 device_unregister(&css->device);
1017 ret = -ENOMEM;
1018 goto out_err;
1019 }
1020
1021 css->pseudo_subchannel->dev.parent = &css->device;
1022 css->pseudo_subchannel->dev.release = css_subchannel_release;
1023 mutex_init(&css->pseudo_subchannel->reg_mutex);
1024 ret = css_sch_create_locks(css->pseudo_subchannel);
1025 if (ret) {
1026 kfree(css->pseudo_subchannel);
1027 device_unregister(&css->device);
1028 goto out_err;
1029 }
1030
1031 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1032 ret = device_register(&css->pseudo_subchannel->dev);
1033 if (ret) {
1034 put_device(&css->pseudo_subchannel->dev);
1035 device_unregister(&css->device);
1036 goto out_err;
1037 }
1038
1039 return ret;
1040 out_err:
1041 channel_subsystems[nr] = NULL;
1042 return ret;
1043 }
1044
1045 static int css_reboot_event(struct notifier_block *this,
1046 unsigned long event,
1047 void *ptr)
1048 {
1049 struct channel_subsystem *css;
1050 int ret;
1051
1052 ret = NOTIFY_DONE;
1053 for_each_css(css) {
1054 mutex_lock(&css->mutex);
1055 if (css->cm_enabled)
1056 if (chsc_secm(css, 0))
1057 ret = NOTIFY_BAD;
1058 mutex_unlock(&css->mutex);
1059 }
1060
1061 return ret;
1062 }
1063
1064 static struct notifier_block css_reboot_notifier = {
1065 .notifier_call = css_reboot_event,
1066 };
1067
1068 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1069 static struct gen_pool *cio_dma_pool;
1070
1071
1072 struct device *cio_get_dma_css_dev(void)
1073 {
1074 return &channel_subsystems[0]->device;
1075 }
1076
1077 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1078 {
1079 struct gen_pool *gp_dma;
1080 void *cpu_addr;
1081 dma_addr_t dma_addr;
1082 int i;
1083
1084 gp_dma = gen_pool_create(3, -1);
1085 if (!gp_dma)
1086 return NULL;
1087 for (i = 0; i < nr_pages; ++i) {
1088 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1089 CIO_DMA_GFP);
1090 if (!cpu_addr)
1091 return gp_dma;
1092 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1093 dma_addr, PAGE_SIZE, -1);
1094 }
1095 return gp_dma;
1096 }
1097
1098 static void __gp_dma_free_dma(struct gen_pool *pool,
1099 struct gen_pool_chunk *chunk, void *data)
1100 {
1101 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1102
1103 dma_free_coherent((struct device *) data, chunk_size,
1104 (void *) chunk->start_addr,
1105 (dma_addr_t) chunk->phys_addr);
1106 }
1107
1108 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1109 {
1110 if (!gp_dma)
1111 return;
1112
1113 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1114 gen_pool_destroy(gp_dma);
1115 }
1116
1117 static int cio_dma_pool_init(void)
1118 {
1119
1120 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1121 if (!cio_dma_pool)
1122 return -ENOMEM;
1123 return 0;
1124 }
1125
1126 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1127 size_t size)
1128 {
1129 dma_addr_t dma_addr;
1130 unsigned long addr;
1131 size_t chunk_size;
1132
1133 if (!gp_dma)
1134 return NULL;
1135 addr = gen_pool_alloc(gp_dma, size);
1136 while (!addr) {
1137 chunk_size = round_up(size, PAGE_SIZE);
1138 addr = (unsigned long) dma_alloc_coherent(dma_dev,
1139 chunk_size, &dma_addr, CIO_DMA_GFP);
1140 if (!addr)
1141 return NULL;
1142 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1143 addr = gen_pool_alloc(gp_dma, size);
1144 }
1145 return (void *) addr;
1146 }
1147
1148 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1149 {
1150 if (!cpu_addr)
1151 return;
1152 memset(cpu_addr, 0, size);
1153 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163 void *cio_dma_zalloc(size_t size)
1164 {
1165 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1166 }
1167
1168 void cio_dma_free(void *cpu_addr, size_t size)
1169 {
1170 cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1171 }
1172
1173
1174
1175
1176
1177 static int __init css_bus_init(void)
1178 {
1179 int ret, i;
1180
1181 ret = chsc_init();
1182 if (ret)
1183 return ret;
1184
1185 chsc_determine_css_characteristics();
1186
1187 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1188 if (ret)
1189 max_ssid = 0;
1190 else
1191 max_ssid = __MAX_SSID;
1192
1193 ret = slow_subchannel_init();
1194 if (ret)
1195 goto out;
1196
1197 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1198 if (ret)
1199 goto out;
1200
1201 if ((ret = bus_register(&css_bus_type)))
1202 goto out;
1203
1204
1205 for (i = 0; i <= MAX_CSS_IDX; i++) {
1206 ret = setup_css(i);
1207 if (ret)
1208 goto out_unregister;
1209 }
1210 ret = register_reboot_notifier(&css_reboot_notifier);
1211 if (ret)
1212 goto out_unregister;
1213 ret = cio_dma_pool_init();
1214 if (ret)
1215 goto out_unregister_rn;
1216 airq_init();
1217 css_init_done = 1;
1218
1219
1220 isc_register(IO_SCH_ISC);
1221
1222 return 0;
1223 out_unregister_rn:
1224 unregister_reboot_notifier(&css_reboot_notifier);
1225 out_unregister:
1226 while (i-- > 0) {
1227 struct channel_subsystem *css = channel_subsystems[i];
1228 device_unregister(&css->pseudo_subchannel->dev);
1229 device_unregister(&css->device);
1230 }
1231 bus_unregister(&css_bus_type);
1232 out:
1233 crw_unregister_handler(CRW_RSC_SCH);
1234 idset_free(slow_subchannel_set);
1235 chsc_init_cleanup();
1236 pr_alert("The CSS device driver initialization failed with "
1237 "errno=%d\n", ret);
1238 return ret;
1239 }
1240
1241 static void __init css_bus_cleanup(void)
1242 {
1243 struct channel_subsystem *css;
1244
1245 for_each_css(css) {
1246 device_unregister(&css->pseudo_subchannel->dev);
1247 device_unregister(&css->device);
1248 }
1249 bus_unregister(&css_bus_type);
1250 crw_unregister_handler(CRW_RSC_SCH);
1251 idset_free(slow_subchannel_set);
1252 chsc_init_cleanup();
1253 isc_unregister(IO_SCH_ISC);
1254 }
1255
1256 static int __init channel_subsystem_init(void)
1257 {
1258 int ret;
1259
1260 ret = css_bus_init();
1261 if (ret)
1262 return ret;
1263 cio_work_q = create_singlethread_workqueue("cio");
1264 if (!cio_work_q) {
1265 ret = -ENOMEM;
1266 goto out_bus;
1267 }
1268 ret = io_subchannel_init();
1269 if (ret)
1270 goto out_wq;
1271
1272
1273 cio_register_early_subchannels();
1274
1275 css_schedule_eval_all();
1276
1277 return ret;
1278 out_wq:
1279 destroy_workqueue(cio_work_q);
1280 out_bus:
1281 css_bus_cleanup();
1282 return ret;
1283 }
1284 subsys_initcall(channel_subsystem_init);
1285
1286 static int css_settle(struct device_driver *drv, void *unused)
1287 {
1288 struct css_driver *cssdrv = to_cssdriver(drv);
1289
1290 if (cssdrv->settle)
1291 return cssdrv->settle();
1292 return 0;
1293 }
1294
1295 int css_complete_work(void)
1296 {
1297 int ret;
1298
1299
1300 ret = wait_event_interruptible(css_eval_wq,
1301 atomic_read(&css_eval_scheduled) == 0);
1302 if (ret)
1303 return -EINTR;
1304 flush_workqueue(cio_work_q);
1305
1306 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1307 }
1308
1309
1310
1311
1312
1313
1314 static int __init channel_subsystem_init_sync(void)
1315 {
1316 css_complete_work();
1317 return 0;
1318 }
1319 subsys_initcall_sync(channel_subsystem_init_sync);
1320
1321 #ifdef CONFIG_PROC_FS
1322 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1323 size_t count, loff_t *ppos)
1324 {
1325 int ret;
1326
1327
1328 crw_wait_for_channel_report();
1329 ret = css_complete_work();
1330
1331 return ret ? ret : count;
1332 }
1333
1334 static const struct proc_ops cio_settle_proc_ops = {
1335 .proc_open = nonseekable_open,
1336 .proc_write = cio_settle_write,
1337 .proc_lseek = no_llseek,
1338 };
1339
1340 static int __init cio_settle_init(void)
1341 {
1342 struct proc_dir_entry *entry;
1343
1344 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1345 if (!entry)
1346 return -ENOMEM;
1347 return 0;
1348 }
1349 device_initcall(cio_settle_init);
1350 #endif
1351
1352 int sch_is_pseudo_sch(struct subchannel *sch)
1353 {
1354 if (!sch->dev.parent)
1355 return 0;
1356 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1357 }
1358
1359 static int css_bus_match(struct device *dev, struct device_driver *drv)
1360 {
1361 struct subchannel *sch = to_subchannel(dev);
1362 struct css_driver *driver = to_cssdriver(drv);
1363 struct css_device_id *id;
1364
1365
1366 if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1367 return 0;
1368
1369 for (id = driver->subchannel_type; id->match_flags; id++) {
1370 if (sch->st == id->type)
1371 return 1;
1372 }
1373
1374 return 0;
1375 }
1376
1377 static int css_probe(struct device *dev)
1378 {
1379 struct subchannel *sch;
1380 int ret;
1381
1382 sch = to_subchannel(dev);
1383 sch->driver = to_cssdriver(dev->driver);
1384 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1385 if (ret)
1386 sch->driver = NULL;
1387 return ret;
1388 }
1389
1390 static void css_remove(struct device *dev)
1391 {
1392 struct subchannel *sch;
1393
1394 sch = to_subchannel(dev);
1395 if (sch->driver->remove)
1396 sch->driver->remove(sch);
1397 sch->driver = NULL;
1398 }
1399
1400 static void css_shutdown(struct device *dev)
1401 {
1402 struct subchannel *sch;
1403
1404 sch = to_subchannel(dev);
1405 if (sch->driver && sch->driver->shutdown)
1406 sch->driver->shutdown(sch);
1407 }
1408
1409 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1410 {
1411 struct subchannel *sch = to_subchannel(dev);
1412 int ret;
1413
1414 ret = add_uevent_var(env, "ST=%01X", sch->st);
1415 if (ret)
1416 return ret;
1417 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1418 return ret;
1419 }
1420
1421 static struct bus_type css_bus_type = {
1422 .name = "css",
1423 .match = css_bus_match,
1424 .probe = css_probe,
1425 .remove = css_remove,
1426 .shutdown = css_shutdown,
1427 .uevent = css_uevent,
1428 };
1429
1430
1431
1432
1433
1434
1435
1436
1437 int css_driver_register(struct css_driver *cdrv)
1438 {
1439 cdrv->drv.bus = &css_bus_type;
1440 return driver_register(&cdrv->drv);
1441 }
1442 EXPORT_SYMBOL_GPL(css_driver_register);
1443
1444
1445
1446
1447
1448
1449
1450 void css_driver_unregister(struct css_driver *cdrv)
1451 {
1452 driver_unregister(&cdrv->drv);
1453 }
1454 EXPORT_SYMBOL_GPL(css_driver_unregister);