Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-1.0+
0002 /*
0003  *  bus driver for ccw devices
0004  *
0005  *    Copyright IBM Corp. 2002, 2008
0006  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
0007  *       Cornelia Huck (cornelia.huck@de.ibm.com)
0008  *       Martin Schwidefsky (schwidefsky@de.ibm.com)
0009  */
0010 
0011 #define KMSG_COMPONENT "cio"
0012 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0013 
0014 #include <linux/export.h>
0015 #include <linux/init.h>
0016 #include <linux/spinlock.h>
0017 #include <linux/errno.h>
0018 #include <linux/err.h>
0019 #include <linux/slab.h>
0020 #include <linux/list.h>
0021 #include <linux/device.h>
0022 #include <linux/workqueue.h>
0023 #include <linux/delay.h>
0024 #include <linux/timer.h>
0025 #include <linux/kernel_stat.h>
0026 #include <linux/sched/signal.h>
0027 #include <linux/dma-mapping.h>
0028 
0029 #include <asm/ccwdev.h>
0030 #include <asm/cio.h>
0031 #include <asm/param.h>      /* HZ */
0032 #include <asm/cmb.h>
0033 #include <asm/isc.h>
0034 
0035 #include "chp.h"
0036 #include "cio.h"
0037 #include "cio_debug.h"
0038 #include "css.h"
0039 #include "device.h"
0040 #include "ioasm.h"
0041 #include "io_sch.h"
0042 #include "blacklist.h"
0043 #include "chsc.h"
0044 
0045 static struct timer_list recovery_timer;
0046 static DEFINE_SPINLOCK(recovery_lock);
0047 static int recovery_phase;
0048 static const unsigned long recovery_delay[] = { 3, 30, 300 };
0049 
0050 static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
0051 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
0052 static struct bus_type ccw_bus_type;
0053 
0054 /******************* bus type handling ***********************/
0055 
0056 /* The Linux driver model distinguishes between a bus type and
0057  * the bus itself. Of course we only have one channel
0058  * subsystem driver and one channel system per machine, but
0059  * we still use the abstraction. T.R. says it's a good idea. */
0060 static int
0061 ccw_bus_match (struct device * dev, struct device_driver * drv)
0062 {
0063     struct ccw_device *cdev = to_ccwdev(dev);
0064     struct ccw_driver *cdrv = to_ccwdrv(drv);
0065     const struct ccw_device_id *ids = cdrv->ids, *found;
0066 
0067     if (!ids)
0068         return 0;
0069 
0070     found = ccw_device_id_match(ids, &cdev->id);
0071     if (!found)
0072         return 0;
0073 
0074     cdev->id.driver_info = found->driver_info;
0075 
0076     return 1;
0077 }
0078 
0079 /* Store modalias string delimited by prefix/suffix string into buffer with
0080  * specified size. Return length of resulting string (excluding trailing '\0')
0081  * even if string doesn't fit buffer (snprintf semantics). */
0082 static int snprint_alias(char *buf, size_t size,
0083              struct ccw_device_id *id, const char *suffix)
0084 {
0085     int len;
0086 
0087     len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
0088     if (len > size)
0089         return len;
0090     buf += len;
0091     size -= len;
0092 
0093     if (id->dev_type != 0)
0094         len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
0095                 id->dev_model, suffix);
0096     else
0097         len += snprintf(buf, size, "dtdm%s", suffix);
0098 
0099     return len;
0100 }
0101 
0102 /* Set up environment variables for ccw device uevent. Return 0 on success,
0103  * non-zero otherwise. */
0104 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
0105 {
0106     struct ccw_device *cdev = to_ccwdev(dev);
0107     struct ccw_device_id *id = &(cdev->id);
0108     int ret;
0109     char modalias_buf[30];
0110 
0111     /* CU_TYPE= */
0112     ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
0113     if (ret)
0114         return ret;
0115 
0116     /* CU_MODEL= */
0117     ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
0118     if (ret)
0119         return ret;
0120 
0121     /* The next two can be zero, that's ok for us */
0122     /* DEV_TYPE= */
0123     ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
0124     if (ret)
0125         return ret;
0126 
0127     /* DEV_MODEL= */
0128     ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
0129     if (ret)
0130         return ret;
0131 
0132     /* MODALIAS=  */
0133     snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
0134     ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
0135     return ret;
0136 }
0137 
0138 static void io_subchannel_irq(struct subchannel *);
0139 static int io_subchannel_probe(struct subchannel *);
0140 static void io_subchannel_remove(struct subchannel *);
0141 static void io_subchannel_shutdown(struct subchannel *);
0142 static int io_subchannel_sch_event(struct subchannel *, int);
0143 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
0144                    int);
0145 static void recovery_func(struct timer_list *unused);
0146 
0147 static struct css_device_id io_subchannel_ids[] = {
0148     { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
0149     { /* end of list */ },
0150 };
0151 
0152 static int io_subchannel_settle(void)
0153 {
0154     int ret;
0155 
0156     ret = wait_event_interruptible(ccw_device_init_wq,
0157                 atomic_read(&ccw_device_init_count) == 0);
0158     if (ret)
0159         return -EINTR;
0160     flush_workqueue(cio_work_q);
0161     return 0;
0162 }
0163 
0164 static struct css_driver io_subchannel_driver = {
0165     .drv = {
0166         .owner = THIS_MODULE,
0167         .name = "io_subchannel",
0168     },
0169     .subchannel_type = io_subchannel_ids,
0170     .irq = io_subchannel_irq,
0171     .sch_event = io_subchannel_sch_event,
0172     .chp_event = io_subchannel_chp_event,
0173     .probe = io_subchannel_probe,
0174     .remove = io_subchannel_remove,
0175     .shutdown = io_subchannel_shutdown,
0176     .settle = io_subchannel_settle,
0177 };
0178 
0179 int __init io_subchannel_init(void)
0180 {
0181     int ret;
0182 
0183     timer_setup(&recovery_timer, recovery_func, 0);
0184     ret = bus_register(&ccw_bus_type);
0185     if (ret)
0186         return ret;
0187     ret = css_driver_register(&io_subchannel_driver);
0188     if (ret)
0189         bus_unregister(&ccw_bus_type);
0190 
0191     return ret;
0192 }
0193 
0194 
0195 /************************ device handling **************************/
0196 
0197 static ssize_t
0198 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
0199 {
0200     struct ccw_device *cdev = to_ccwdev(dev);
0201     struct ccw_device_id *id = &(cdev->id);
0202 
0203     if (id->dev_type != 0)
0204         return sprintf(buf, "%04x/%02x\n",
0205                 id->dev_type, id->dev_model);
0206     else
0207         return sprintf(buf, "n/a\n");
0208 }
0209 
0210 static ssize_t
0211 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
0212 {
0213     struct ccw_device *cdev = to_ccwdev(dev);
0214     struct ccw_device_id *id = &(cdev->id);
0215 
0216     return sprintf(buf, "%04x/%02x\n",
0217                id->cu_type, id->cu_model);
0218 }
0219 
0220 static ssize_t
0221 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
0222 {
0223     struct ccw_device *cdev = to_ccwdev(dev);
0224     struct ccw_device_id *id = &(cdev->id);
0225     int len;
0226 
0227     len = snprint_alias(buf, PAGE_SIZE, id, "\n");
0228 
0229     return len > PAGE_SIZE ? PAGE_SIZE : len;
0230 }
0231 
0232 static ssize_t
0233 online_show (struct device *dev, struct device_attribute *attr, char *buf)
0234 {
0235     struct ccw_device *cdev = to_ccwdev(dev);
0236 
0237     return sprintf(buf, cdev->online ? "1\n" : "0\n");
0238 }
0239 
0240 int ccw_device_is_orphan(struct ccw_device *cdev)
0241 {
0242     return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
0243 }
0244 
0245 static void ccw_device_unregister(struct ccw_device *cdev)
0246 {
0247     if (device_is_registered(&cdev->dev)) {
0248         /* Undo device_add(). */
0249         device_del(&cdev->dev);
0250     }
0251     if (cdev->private->flags.initialized) {
0252         cdev->private->flags.initialized = 0;
0253         /* Release reference from device_initialize(). */
0254         put_device(&cdev->dev);
0255     }
0256 }
0257 
0258 static void io_subchannel_quiesce(struct subchannel *);
0259 
0260 /**
0261  * ccw_device_set_offline() - disable a ccw device for I/O
0262  * @cdev: target ccw device
0263  *
0264  * This function calls the driver's set_offline() function for @cdev, if
0265  * given, and then disables @cdev.
0266  * Returns:
0267  *   %0 on success and a negative error value on failure.
0268  * Context:
0269  *  enabled, ccw device lock not held
0270  */
0271 int ccw_device_set_offline(struct ccw_device *cdev)
0272 {
0273     struct subchannel *sch;
0274     int ret, state;
0275 
0276     if (!cdev)
0277         return -ENODEV;
0278     if (!cdev->online || !cdev->drv)
0279         return -EINVAL;
0280 
0281     if (cdev->drv->set_offline) {
0282         ret = cdev->drv->set_offline(cdev);
0283         if (ret != 0)
0284             return ret;
0285     }
0286     spin_lock_irq(cdev->ccwlock);
0287     sch = to_subchannel(cdev->dev.parent);
0288     cdev->online = 0;
0289     /* Wait until a final state or DISCONNECTED is reached */
0290     while (!dev_fsm_final_state(cdev) &&
0291            cdev->private->state != DEV_STATE_DISCONNECTED) {
0292         spin_unlock_irq(cdev->ccwlock);
0293         wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
0294                cdev->private->state == DEV_STATE_DISCONNECTED));
0295         spin_lock_irq(cdev->ccwlock);
0296     }
0297     do {
0298         ret = ccw_device_offline(cdev);
0299         if (!ret)
0300             break;
0301         CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
0302                   "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
0303                   cdev->private->dev_id.devno);
0304         if (ret != -EBUSY)
0305             goto error;
0306         state = cdev->private->state;
0307         spin_unlock_irq(cdev->ccwlock);
0308         io_subchannel_quiesce(sch);
0309         spin_lock_irq(cdev->ccwlock);
0310         cdev->private->state = state;
0311     } while (ret == -EBUSY);
0312     spin_unlock_irq(cdev->ccwlock);
0313     wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
0314            cdev->private->state == DEV_STATE_DISCONNECTED));
0315     /* Inform the user if set offline failed. */
0316     if (cdev->private->state == DEV_STATE_BOXED) {
0317         pr_warn("%s: The device entered boxed state while being set offline\n",
0318             dev_name(&cdev->dev));
0319     } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
0320         pr_warn("%s: The device stopped operating while being set offline\n",
0321             dev_name(&cdev->dev));
0322     }
0323     /* Give up reference from ccw_device_set_online(). */
0324     put_device(&cdev->dev);
0325     return 0;
0326 
0327 error:
0328     cdev->private->state = DEV_STATE_OFFLINE;
0329     dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
0330     spin_unlock_irq(cdev->ccwlock);
0331     /* Give up reference from ccw_device_set_online(). */
0332     put_device(&cdev->dev);
0333     return -ENODEV;
0334 }
0335 
0336 /**
0337  * ccw_device_set_online() - enable a ccw device for I/O
0338  * @cdev: target ccw device
0339  *
0340  * This function first enables @cdev and then calls the driver's set_online()
0341  * function for @cdev, if given. If set_online() returns an error, @cdev is
0342  * disabled again.
0343  * Returns:
0344  *   %0 on success and a negative error value on failure.
0345  * Context:
0346  *  enabled, ccw device lock not held
0347  */
0348 int ccw_device_set_online(struct ccw_device *cdev)
0349 {
0350     int ret;
0351     int ret2;
0352 
0353     if (!cdev)
0354         return -ENODEV;
0355     if (cdev->online || !cdev->drv)
0356         return -EINVAL;
0357     /* Hold on to an extra reference while device is online. */
0358     if (!get_device(&cdev->dev))
0359         return -ENODEV;
0360 
0361     spin_lock_irq(cdev->ccwlock);
0362     ret = ccw_device_online(cdev);
0363     spin_unlock_irq(cdev->ccwlock);
0364     if (ret == 0)
0365         wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
0366     else {
0367         CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
0368                   "device 0.%x.%04x\n",
0369                   ret, cdev->private->dev_id.ssid,
0370                   cdev->private->dev_id.devno);
0371         /* Give up online reference since onlining failed. */
0372         put_device(&cdev->dev);
0373         return ret;
0374     }
0375     spin_lock_irq(cdev->ccwlock);
0376     /* Check if online processing was successful */
0377     if ((cdev->private->state != DEV_STATE_ONLINE) &&
0378         (cdev->private->state != DEV_STATE_W4SENSE)) {
0379         spin_unlock_irq(cdev->ccwlock);
0380         /* Inform the user that set online failed. */
0381         if (cdev->private->state == DEV_STATE_BOXED) {
0382             pr_warn("%s: Setting the device online failed because it is boxed\n",
0383                 dev_name(&cdev->dev));
0384         } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
0385             pr_warn("%s: Setting the device online failed because it is not operational\n",
0386                 dev_name(&cdev->dev));
0387         }
0388         /* Give up online reference since onlining failed. */
0389         put_device(&cdev->dev);
0390         return -ENODEV;
0391     }
0392     spin_unlock_irq(cdev->ccwlock);
0393     if (cdev->drv->set_online)
0394         ret = cdev->drv->set_online(cdev);
0395     if (ret)
0396         goto rollback;
0397 
0398     spin_lock_irq(cdev->ccwlock);
0399     cdev->online = 1;
0400     spin_unlock_irq(cdev->ccwlock);
0401     return 0;
0402 
0403 rollback:
0404     spin_lock_irq(cdev->ccwlock);
0405     /* Wait until a final state or DISCONNECTED is reached */
0406     while (!dev_fsm_final_state(cdev) &&
0407            cdev->private->state != DEV_STATE_DISCONNECTED) {
0408         spin_unlock_irq(cdev->ccwlock);
0409         wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
0410                cdev->private->state == DEV_STATE_DISCONNECTED));
0411         spin_lock_irq(cdev->ccwlock);
0412     }
0413     ret2 = ccw_device_offline(cdev);
0414     if (ret2)
0415         goto error;
0416     spin_unlock_irq(cdev->ccwlock);
0417     wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
0418            cdev->private->state == DEV_STATE_DISCONNECTED));
0419     /* Give up online reference since onlining failed. */
0420     put_device(&cdev->dev);
0421     return ret;
0422 
0423 error:
0424     CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
0425               "device 0.%x.%04x\n",
0426               ret2, cdev->private->dev_id.ssid,
0427               cdev->private->dev_id.devno);
0428     cdev->private->state = DEV_STATE_OFFLINE;
0429     spin_unlock_irq(cdev->ccwlock);
0430     /* Give up online reference since onlining failed. */
0431     put_device(&cdev->dev);
0432     return ret;
0433 }
0434 
0435 static int online_store_handle_offline(struct ccw_device *cdev)
0436 {
0437     if (cdev->private->state == DEV_STATE_DISCONNECTED) {
0438         spin_lock_irq(cdev->ccwlock);
0439         ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
0440         spin_unlock_irq(cdev->ccwlock);
0441         return 0;
0442     }
0443     if (cdev->drv && cdev->drv->set_offline)
0444         return ccw_device_set_offline(cdev);
0445     return -EINVAL;
0446 }
0447 
0448 static int online_store_recog_and_online(struct ccw_device *cdev)
0449 {
0450     /* Do device recognition, if needed. */
0451     if (cdev->private->state == DEV_STATE_BOXED) {
0452         spin_lock_irq(cdev->ccwlock);
0453         ccw_device_recognition(cdev);
0454         spin_unlock_irq(cdev->ccwlock);
0455         wait_event(cdev->private->wait_q,
0456                cdev->private->flags.recog_done);
0457         if (cdev->private->state != DEV_STATE_OFFLINE)
0458             /* recognition failed */
0459             return -EAGAIN;
0460     }
0461     if (cdev->drv && cdev->drv->set_online)
0462         return ccw_device_set_online(cdev);
0463     return -EINVAL;
0464 }
0465 
0466 static int online_store_handle_online(struct ccw_device *cdev, int force)
0467 {
0468     int ret;
0469 
0470     ret = online_store_recog_and_online(cdev);
0471     if (ret && !force)
0472         return ret;
0473     if (force && cdev->private->state == DEV_STATE_BOXED) {
0474         ret = ccw_device_stlck(cdev);
0475         if (ret)
0476             return ret;
0477         if (cdev->id.cu_type == 0)
0478             cdev->private->state = DEV_STATE_NOT_OPER;
0479         ret = online_store_recog_and_online(cdev);
0480         if (ret)
0481             return ret;
0482     }
0483     return 0;
0484 }
0485 
0486 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
0487                  const char *buf, size_t count)
0488 {
0489     struct ccw_device *cdev = to_ccwdev(dev);
0490     int force, ret;
0491     unsigned long i;
0492 
0493     /* Prevent conflict between multiple on-/offline processing requests. */
0494     if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
0495         return -EAGAIN;
0496     /* Prevent conflict between internal I/Os and on-/offline processing. */
0497     if (!dev_fsm_final_state(cdev) &&
0498         cdev->private->state != DEV_STATE_DISCONNECTED) {
0499         ret = -EAGAIN;
0500         goto out;
0501     }
0502     /* Prevent conflict between pending work and on-/offline processing.*/
0503     if (work_pending(&cdev->private->todo_work)) {
0504         ret = -EAGAIN;
0505         goto out;
0506     }
0507     if (!strncmp(buf, "force\n", count)) {
0508         force = 1;
0509         i = 1;
0510         ret = 0;
0511     } else {
0512         force = 0;
0513         ret = kstrtoul(buf, 16, &i);
0514     }
0515     if (ret)
0516         goto out;
0517 
0518     device_lock(dev);
0519     switch (i) {
0520     case 0:
0521         ret = online_store_handle_offline(cdev);
0522         break;
0523     case 1:
0524         ret = online_store_handle_online(cdev, force);
0525         break;
0526     default:
0527         ret = -EINVAL;
0528     }
0529     device_unlock(dev);
0530 
0531 out:
0532     atomic_set(&cdev->private->onoff, 0);
0533     return (ret < 0) ? ret : count;
0534 }
0535 
0536 static ssize_t
0537 available_show (struct device *dev, struct device_attribute *attr, char *buf)
0538 {
0539     struct ccw_device *cdev = to_ccwdev(dev);
0540     struct subchannel *sch;
0541 
0542     if (ccw_device_is_orphan(cdev))
0543         return sprintf(buf, "no device\n");
0544     switch (cdev->private->state) {
0545     case DEV_STATE_BOXED:
0546         return sprintf(buf, "boxed\n");
0547     case DEV_STATE_DISCONNECTED:
0548     case DEV_STATE_DISCONNECTED_SENSE_ID:
0549     case DEV_STATE_NOT_OPER:
0550         sch = to_subchannel(dev->parent);
0551         if (!sch->lpm)
0552             return sprintf(buf, "no path\n");
0553         else
0554             return sprintf(buf, "no device\n");
0555     default:
0556         /* All other states considered fine. */
0557         return sprintf(buf, "good\n");
0558     }
0559 }
0560 
0561 static ssize_t
0562 initiate_logging(struct device *dev, struct device_attribute *attr,
0563          const char *buf, size_t count)
0564 {
0565     struct subchannel *sch = to_subchannel(dev);
0566     int rc;
0567 
0568     rc = chsc_siosl(sch->schid);
0569     if (rc < 0) {
0570         pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
0571             sch->schid.ssid, sch->schid.sch_no, rc);
0572         return rc;
0573     }
0574     pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
0575           sch->schid.ssid, sch->schid.sch_no);
0576     return count;
0577 }
0578 
0579 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
0580             char *buf)
0581 {
0582     struct subchannel *sch = to_subchannel(dev);
0583 
0584     return sprintf(buf, "%02x\n", sch->vpm);
0585 }
0586 
0587 static DEVICE_ATTR_RO(devtype);
0588 static DEVICE_ATTR_RO(cutype);
0589 static DEVICE_ATTR_RO(modalias);
0590 static DEVICE_ATTR_RW(online);
0591 static DEVICE_ATTR(availability, 0444, available_show, NULL);
0592 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
0593 static DEVICE_ATTR_RO(vpm);
0594 
0595 static struct attribute *io_subchannel_attrs[] = {
0596     &dev_attr_logging.attr,
0597     &dev_attr_vpm.attr,
0598     NULL,
0599 };
0600 
0601 static const struct attribute_group io_subchannel_attr_group = {
0602     .attrs = io_subchannel_attrs,
0603 };
0604 
0605 static struct attribute * ccwdev_attrs[] = {
0606     &dev_attr_devtype.attr,
0607     &dev_attr_cutype.attr,
0608     &dev_attr_modalias.attr,
0609     &dev_attr_online.attr,
0610     &dev_attr_cmb_enable.attr,
0611     &dev_attr_availability.attr,
0612     NULL,
0613 };
0614 
0615 static const struct attribute_group ccwdev_attr_group = {
0616     .attrs = ccwdev_attrs,
0617 };
0618 
0619 static const struct attribute_group *ccwdev_attr_groups[] = {
0620     &ccwdev_attr_group,
0621     NULL,
0622 };
0623 
0624 static int match_dev_id(struct device *dev, const void *data)
0625 {
0626     struct ccw_device *cdev = to_ccwdev(dev);
0627     struct ccw_dev_id *dev_id = (void *)data;
0628 
0629     return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
0630 }
0631 
0632 /**
0633  * get_ccwdev_by_dev_id() - obtain device from a ccw device id
0634  * @dev_id: id of the device to be searched
0635  *
0636  * This function searches all devices attached to the ccw bus for a device
0637  * matching @dev_id.
0638  * Returns:
0639  *  If a device is found its reference count is increased and returned;
0640  *  else %NULL is returned.
0641  */
0642 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
0643 {
0644     struct device *dev;
0645 
0646     dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
0647 
0648     return dev ? to_ccwdev(dev) : NULL;
0649 }
0650 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
0651 
0652 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
0653 {
0654     int ret;
0655 
0656     if (device_is_registered(&cdev->dev)) {
0657         device_release_driver(&cdev->dev);
0658         ret = device_attach(&cdev->dev);
0659         WARN_ON(ret == -ENODEV);
0660     }
0661 }
0662 
0663 static void
0664 ccw_device_release(struct device *dev)
0665 {
0666     struct ccw_device *cdev;
0667 
0668     cdev = to_ccwdev(dev);
0669     cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
0670             sizeof(*cdev->private->dma_area));
0671     cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
0672     /* Release reference of parent subchannel. */
0673     put_device(cdev->dev.parent);
0674     kfree(cdev->private);
0675     kfree(cdev);
0676 }
0677 
0678 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
0679 {
0680     struct ccw_device *cdev;
0681     struct gen_pool *dma_pool;
0682     int ret;
0683 
0684     cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
0685     if (!cdev) {
0686         ret = -ENOMEM;
0687         goto err_cdev;
0688     }
0689     cdev->private = kzalloc(sizeof(struct ccw_device_private),
0690                 GFP_KERNEL | GFP_DMA);
0691     if (!cdev->private) {
0692         ret = -ENOMEM;
0693         goto err_priv;
0694     }
0695 
0696     cdev->dev.dma_mask = sch->dev.dma_mask;
0697     ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
0698     if (ret)
0699         goto err_coherent_mask;
0700 
0701     dma_pool = cio_gp_dma_create(&cdev->dev, 1);
0702     if (!dma_pool) {
0703         ret = -ENOMEM;
0704         goto err_dma_pool;
0705     }
0706     cdev->private->dma_pool = dma_pool;
0707     cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
0708                     sizeof(*cdev->private->dma_area));
0709     if (!cdev->private->dma_area) {
0710         ret = -ENOMEM;
0711         goto err_dma_area;
0712     }
0713     return cdev;
0714 err_dma_area:
0715     cio_gp_dma_destroy(dma_pool, &cdev->dev);
0716 err_dma_pool:
0717 err_coherent_mask:
0718     kfree(cdev->private);
0719 err_priv:
0720     kfree(cdev);
0721 err_cdev:
0722     return ERR_PTR(ret);
0723 }
0724 
0725 static void ccw_device_todo(struct work_struct *work);
0726 
0727 static int io_subchannel_initialize_dev(struct subchannel *sch,
0728                     struct ccw_device *cdev)
0729 {
0730     struct ccw_device_private *priv = cdev->private;
0731     int ret;
0732 
0733     priv->cdev = cdev;
0734     priv->int_class = IRQIO_CIO;
0735     priv->state = DEV_STATE_NOT_OPER;
0736     priv->dev_id.devno = sch->schib.pmcw.dev;
0737     priv->dev_id.ssid = sch->schid.ssid;
0738 
0739     INIT_WORK(&priv->todo_work, ccw_device_todo);
0740     INIT_LIST_HEAD(&priv->cmb_list);
0741     init_waitqueue_head(&priv->wait_q);
0742     timer_setup(&priv->timer, ccw_device_timeout, 0);
0743 
0744     atomic_set(&priv->onoff, 0);
0745     cdev->ccwlock = sch->lock;
0746     cdev->dev.parent = &sch->dev;
0747     cdev->dev.release = ccw_device_release;
0748     cdev->dev.bus = &ccw_bus_type;
0749     cdev->dev.groups = ccwdev_attr_groups;
0750     /* Do first half of device_register. */
0751     device_initialize(&cdev->dev);
0752     ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
0753                cdev->private->dev_id.devno);
0754     if (ret)
0755         goto out_put;
0756     if (!get_device(&sch->dev)) {
0757         ret = -ENODEV;
0758         goto out_put;
0759     }
0760     priv->flags.initialized = 1;
0761     spin_lock_irq(sch->lock);
0762     sch_set_cdev(sch, cdev);
0763     spin_unlock_irq(sch->lock);
0764     return 0;
0765 
0766 out_put:
0767     /* Release reference from device_initialize(). */
0768     put_device(&cdev->dev);
0769     return ret;
0770 }
0771 
0772 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
0773 {
0774     struct ccw_device *cdev;
0775     int ret;
0776 
0777     cdev = io_subchannel_allocate_dev(sch);
0778     if (!IS_ERR(cdev)) {
0779         ret = io_subchannel_initialize_dev(sch, cdev);
0780         if (ret)
0781             cdev = ERR_PTR(ret);
0782     }
0783     return cdev;
0784 }
0785 
0786 static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
0787 
0788 static void sch_create_and_recog_new_device(struct subchannel *sch)
0789 {
0790     struct ccw_device *cdev;
0791 
0792     /* Need to allocate a new ccw device. */
0793     cdev = io_subchannel_create_ccwdev(sch);
0794     if (IS_ERR(cdev)) {
0795         /* OK, we did everything we could... */
0796         css_sch_device_unregister(sch);
0797         return;
0798     }
0799     /* Start recognition for the new ccw device. */
0800     io_subchannel_recog(cdev, sch);
0801 }
0802 
0803 /*
0804  * Register recognized device.
0805  */
0806 static void io_subchannel_register(struct ccw_device *cdev)
0807 {
0808     struct subchannel *sch;
0809     int ret, adjust_init_count = 1;
0810     unsigned long flags;
0811 
0812     sch = to_subchannel(cdev->dev.parent);
0813     /*
0814      * Check if subchannel is still registered. It may have become
0815      * unregistered if a machine check hit us after finishing
0816      * device recognition but before the register work could be
0817      * queued.
0818      */
0819     if (!device_is_registered(&sch->dev))
0820         goto out_err;
0821     css_update_ssd_info(sch);
0822     /*
0823      * io_subchannel_register() will also be called after device
0824      * recognition has been done for a boxed device (which will already
0825      * be registered). We need to reprobe since we may now have sense id
0826      * information.
0827      */
0828     if (device_is_registered(&cdev->dev)) {
0829         if (!cdev->drv) {
0830             ret = device_reprobe(&cdev->dev);
0831             if (ret)
0832                 /* We can't do much here. */
0833                 CIO_MSG_EVENT(0, "device_reprobe() returned"
0834                           " %d for 0.%x.%04x\n", ret,
0835                           cdev->private->dev_id.ssid,
0836                           cdev->private->dev_id.devno);
0837         }
0838         adjust_init_count = 0;
0839         goto out;
0840     }
0841     /* make it known to the system */
0842     ret = device_add(&cdev->dev);
0843     if (ret) {
0844         CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
0845                   cdev->private->dev_id.ssid,
0846                   cdev->private->dev_id.devno, ret);
0847         spin_lock_irqsave(sch->lock, flags);
0848         sch_set_cdev(sch, NULL);
0849         spin_unlock_irqrestore(sch->lock, flags);
0850         /* Release initial device reference. */
0851         put_device(&cdev->dev);
0852         goto out_err;
0853     }
0854 out:
0855     cdev->private->flags.recog_done = 1;
0856     wake_up(&cdev->private->wait_q);
0857 out_err:
0858     if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
0859         wake_up(&ccw_device_init_wq);
0860 }
0861 
0862 /*
0863  * subchannel recognition done. Called from the state machine.
0864  */
0865 void
0866 io_subchannel_recog_done(struct ccw_device *cdev)
0867 {
0868     if (css_init_done == 0) {
0869         cdev->private->flags.recog_done = 1;
0870         return;
0871     }
0872     switch (cdev->private->state) {
0873     case DEV_STATE_BOXED:
0874         /* Device did not respond in time. */
0875     case DEV_STATE_NOT_OPER:
0876         cdev->private->flags.recog_done = 1;
0877         /* Remove device found not operational. */
0878         ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
0879         if (atomic_dec_and_test(&ccw_device_init_count))
0880             wake_up(&ccw_device_init_wq);
0881         break;
0882     case DEV_STATE_OFFLINE:
0883         /*
0884          * We can't register the device in interrupt context so
0885          * we schedule a work item.
0886          */
0887         ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
0888         break;
0889     }
0890 }
0891 
0892 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
0893 {
0894     /* Increase counter of devices currently in recognition. */
0895     atomic_inc(&ccw_device_init_count);
0896 
0897     /* Start async. device sensing. */
0898     spin_lock_irq(sch->lock);
0899     ccw_device_recognition(cdev);
0900     spin_unlock_irq(sch->lock);
0901 }
0902 
0903 static int ccw_device_move_to_sch(struct ccw_device *cdev,
0904                   struct subchannel *sch)
0905 {
0906     struct subchannel *old_sch;
0907     int rc, old_enabled = 0;
0908 
0909     old_sch = to_subchannel(cdev->dev.parent);
0910     /* Obtain child reference for new parent. */
0911     if (!get_device(&sch->dev))
0912         return -ENODEV;
0913 
0914     if (!sch_is_pseudo_sch(old_sch)) {
0915         spin_lock_irq(old_sch->lock);
0916         old_enabled = old_sch->schib.pmcw.ena;
0917         rc = 0;
0918         if (old_enabled)
0919             rc = cio_disable_subchannel(old_sch);
0920         spin_unlock_irq(old_sch->lock);
0921         if (rc == -EBUSY) {
0922             /* Release child reference for new parent. */
0923             put_device(&sch->dev);
0924             return rc;
0925         }
0926     }
0927 
0928     mutex_lock(&sch->reg_mutex);
0929     rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
0930     mutex_unlock(&sch->reg_mutex);
0931     if (rc) {
0932         CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
0933                   cdev->private->dev_id.ssid,
0934                   cdev->private->dev_id.devno, sch->schid.ssid,
0935                   sch->schib.pmcw.dev, rc);
0936         if (old_enabled) {
0937             /* Try to reenable the old subchannel. */
0938             spin_lock_irq(old_sch->lock);
0939             cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
0940             spin_unlock_irq(old_sch->lock);
0941         }
0942         /* Release child reference for new parent. */
0943         put_device(&sch->dev);
0944         return rc;
0945     }
0946     /* Clean up old subchannel. */
0947     if (!sch_is_pseudo_sch(old_sch)) {
0948         spin_lock_irq(old_sch->lock);
0949         sch_set_cdev(old_sch, NULL);
0950         spin_unlock_irq(old_sch->lock);
0951         css_schedule_eval(old_sch->schid);
0952     }
0953     /* Release child reference for old parent. */
0954     put_device(&old_sch->dev);
0955     /* Initialize new subchannel. */
0956     spin_lock_irq(sch->lock);
0957     cdev->ccwlock = sch->lock;
0958     if (!sch_is_pseudo_sch(sch))
0959         sch_set_cdev(sch, cdev);
0960     spin_unlock_irq(sch->lock);
0961     if (!sch_is_pseudo_sch(sch))
0962         css_update_ssd_info(sch);
0963     return 0;
0964 }
0965 
0966 static int ccw_device_move_to_orph(struct ccw_device *cdev)
0967 {
0968     struct subchannel *sch = to_subchannel(cdev->dev.parent);
0969     struct channel_subsystem *css = to_css(sch->dev.parent);
0970 
0971     return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
0972 }
0973 
0974 static void io_subchannel_irq(struct subchannel *sch)
0975 {
0976     struct ccw_device *cdev;
0977 
0978     cdev = sch_get_cdev(sch);
0979 
0980     CIO_TRACE_EVENT(6, "IRQ");
0981     CIO_TRACE_EVENT(6, dev_name(&sch->dev));
0982     if (cdev)
0983         dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
0984     else
0985         inc_irq_stat(IRQIO_CIO);
0986 }
0987 
0988 void io_subchannel_init_config(struct subchannel *sch)
0989 {
0990     memset(&sch->config, 0, sizeof(sch->config));
0991     sch->config.csense = 1;
0992 }
0993 
0994 static void io_subchannel_init_fields(struct subchannel *sch)
0995 {
0996     if (cio_is_console(sch->schid))
0997         sch->opm = 0xff;
0998     else
0999         sch->opm = chp_get_sch_opm(sch);
1000     sch->lpm = sch->schib.pmcw.pam & sch->opm;
1001     sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1002 
1003     CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1004               " - PIM = %02X, PAM = %02X, POM = %02X\n",
1005               sch->schib.pmcw.dev, sch->schid.ssid,
1006               sch->schid.sch_no, sch->schib.pmcw.pim,
1007               sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1008 
1009     io_subchannel_init_config(sch);
1010 }
1011 
1012 /*
1013  * Note: We always return 0 so that we bind to the device even on error.
1014  * This is needed so that our remove function is called on unregister.
1015  */
1016 static int io_subchannel_probe(struct subchannel *sch)
1017 {
1018     struct io_subchannel_private *io_priv;
1019     struct ccw_device *cdev;
1020     int rc;
1021 
1022     if (cio_is_console(sch->schid)) {
1023         rc = sysfs_create_group(&sch->dev.kobj,
1024                     &io_subchannel_attr_group);
1025         if (rc)
1026             CIO_MSG_EVENT(0, "Failed to create io subchannel "
1027                       "attributes for subchannel "
1028                       "0.%x.%04x (rc=%d)\n",
1029                       sch->schid.ssid, sch->schid.sch_no, rc);
1030         /*
1031         * The console subchannel already has an associated ccw_device.
1032         * Register it and exit.
1033         */
1034         cdev = sch_get_cdev(sch);
1035         rc = device_add(&cdev->dev);
1036         if (rc) {
1037             /* Release online reference. */
1038             put_device(&cdev->dev);
1039             goto out_schedule;
1040         }
1041         if (atomic_dec_and_test(&ccw_device_init_count))
1042             wake_up(&ccw_device_init_wq);
1043         return 0;
1044     }
1045     io_subchannel_init_fields(sch);
1046     rc = cio_commit_config(sch);
1047     if (rc)
1048         goto out_schedule;
1049     rc = sysfs_create_group(&sch->dev.kobj,
1050                 &io_subchannel_attr_group);
1051     if (rc)
1052         goto out_schedule;
1053     /* Allocate I/O subchannel private data. */
1054     io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1055     if (!io_priv)
1056         goto out_schedule;
1057 
1058     io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1059                 sizeof(*io_priv->dma_area),
1060                 &io_priv->dma_area_dma, GFP_KERNEL);
1061     if (!io_priv->dma_area) {
1062         kfree(io_priv);
1063         goto out_schedule;
1064     }
1065 
1066     set_io_private(sch, io_priv);
1067     css_schedule_eval(sch->schid);
1068     return 0;
1069 
1070 out_schedule:
1071     spin_lock_irq(sch->lock);
1072     css_sched_sch_todo(sch, SCH_TODO_UNREG);
1073     spin_unlock_irq(sch->lock);
1074     return 0;
1075 }
1076 
1077 static void io_subchannel_remove(struct subchannel *sch)
1078 {
1079     struct io_subchannel_private *io_priv = to_io_private(sch);
1080     struct ccw_device *cdev;
1081 
1082     cdev = sch_get_cdev(sch);
1083     if (!cdev)
1084         goto out_free;
1085 
1086     ccw_device_unregister(cdev);
1087     spin_lock_irq(sch->lock);
1088     sch_set_cdev(sch, NULL);
1089     set_io_private(sch, NULL);
1090     spin_unlock_irq(sch->lock);
1091 out_free:
1092     dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1093               io_priv->dma_area, io_priv->dma_area_dma);
1094     kfree(io_priv);
1095     sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1096 }
1097 
1098 static void io_subchannel_verify(struct subchannel *sch)
1099 {
1100     struct ccw_device *cdev;
1101 
1102     cdev = sch_get_cdev(sch);
1103     if (cdev)
1104         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1105 }
1106 
1107 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1108 {
1109     struct ccw_device *cdev;
1110 
1111     cdev = sch_get_cdev(sch);
1112     if (!cdev)
1113         return;
1114     if (cio_update_schib(sch))
1115         goto err;
1116     /* Check for I/O on path. */
1117     if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1118         goto out;
1119     if (cdev->private->state == DEV_STATE_ONLINE) {
1120         ccw_device_kill_io(cdev);
1121         goto out;
1122     }
1123     if (cio_clear(sch))
1124         goto err;
1125 out:
1126     /* Trigger path verification. */
1127     dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1128     return;
1129 
1130 err:
1131     dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1132 }
1133 
1134 static int io_subchannel_chp_event(struct subchannel *sch,
1135                    struct chp_link *link, int event)
1136 {
1137     struct ccw_device *cdev = sch_get_cdev(sch);
1138     int mask, chpid, valid_bit;
1139     int path_event[8];
1140 
1141     mask = chp_ssd_get_mask(&sch->ssd_info, link);
1142     if (!mask)
1143         return 0;
1144     switch (event) {
1145     case CHP_VARY_OFF:
1146         sch->opm &= ~mask;
1147         sch->lpm &= ~mask;
1148         if (cdev)
1149             cdev->private->path_gone_mask |= mask;
1150         io_subchannel_terminate_path(sch, mask);
1151         break;
1152     case CHP_VARY_ON:
1153         sch->opm |= mask;
1154         sch->lpm |= mask;
1155         if (cdev)
1156             cdev->private->path_new_mask |= mask;
1157         io_subchannel_verify(sch);
1158         break;
1159     case CHP_OFFLINE:
1160         if (cio_update_schib(sch))
1161             return -ENODEV;
1162         if (cdev)
1163             cdev->private->path_gone_mask |= mask;
1164         io_subchannel_terminate_path(sch, mask);
1165         break;
1166     case CHP_ONLINE:
1167         if (cio_update_schib(sch))
1168             return -ENODEV;
1169         sch->lpm |= mask & sch->opm;
1170         if (cdev)
1171             cdev->private->path_new_mask |= mask;
1172         io_subchannel_verify(sch);
1173         break;
1174     case CHP_FCES_EVENT:
1175         /* Forward Endpoint Security event */
1176         for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
1177                 valid_bit >>= 1) {
1178             if (mask & valid_bit)
1179                 path_event[chpid] = PE_PATH_FCES_EVENT;
1180             else
1181                 path_event[chpid] = PE_NONE;
1182         }
1183         if (cdev && cdev->drv && cdev->drv->path_event)
1184             cdev->drv->path_event(cdev, path_event);
1185         break;
1186     }
1187     return 0;
1188 }
1189 
1190 static void io_subchannel_quiesce(struct subchannel *sch)
1191 {
1192     struct ccw_device *cdev;
1193     int ret;
1194 
1195     spin_lock_irq(sch->lock);
1196     cdev = sch_get_cdev(sch);
1197     if (cio_is_console(sch->schid))
1198         goto out_unlock;
1199     if (!sch->schib.pmcw.ena)
1200         goto out_unlock;
1201     ret = cio_disable_subchannel(sch);
1202     if (ret != -EBUSY)
1203         goto out_unlock;
1204     if (cdev->handler)
1205         cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1206     while (ret == -EBUSY) {
1207         cdev->private->state = DEV_STATE_QUIESCE;
1208         cdev->private->iretry = 255;
1209         ret = ccw_device_cancel_halt_clear(cdev);
1210         if (ret == -EBUSY) {
1211             ccw_device_set_timeout(cdev, HZ/10);
1212             spin_unlock_irq(sch->lock);
1213             wait_event(cdev->private->wait_q,
1214                    cdev->private->state != DEV_STATE_QUIESCE);
1215             spin_lock_irq(sch->lock);
1216         }
1217         ret = cio_disable_subchannel(sch);
1218     }
1219 out_unlock:
1220     spin_unlock_irq(sch->lock);
1221 }
1222 
1223 static void io_subchannel_shutdown(struct subchannel *sch)
1224 {
1225     io_subchannel_quiesce(sch);
1226 }
1227 
1228 static int device_is_disconnected(struct ccw_device *cdev)
1229 {
1230     if (!cdev)
1231         return 0;
1232     return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1233         cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1234 }
1235 
1236 static int recovery_check(struct device *dev, void *data)
1237 {
1238     struct ccw_device *cdev = to_ccwdev(dev);
1239     struct subchannel *sch;
1240     int *redo = data;
1241 
1242     spin_lock_irq(cdev->ccwlock);
1243     switch (cdev->private->state) {
1244     case DEV_STATE_ONLINE:
1245         sch = to_subchannel(cdev->dev.parent);
1246         if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1247             break;
1248         fallthrough;
1249     case DEV_STATE_DISCONNECTED:
1250         CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1251                   cdev->private->dev_id.ssid,
1252                   cdev->private->dev_id.devno);
1253         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1254         *redo = 1;
1255         break;
1256     case DEV_STATE_DISCONNECTED_SENSE_ID:
1257         *redo = 1;
1258         break;
1259     }
1260     spin_unlock_irq(cdev->ccwlock);
1261 
1262     return 0;
1263 }
1264 
1265 static void recovery_work_func(struct work_struct *unused)
1266 {
1267     int redo = 0;
1268 
1269     bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1270     if (redo) {
1271         spin_lock_irq(&recovery_lock);
1272         if (!timer_pending(&recovery_timer)) {
1273             if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1274                 recovery_phase++;
1275             mod_timer(&recovery_timer, jiffies +
1276                   recovery_delay[recovery_phase] * HZ);
1277         }
1278         spin_unlock_irq(&recovery_lock);
1279     } else
1280         CIO_MSG_EVENT(3, "recovery: end\n");
1281 }
1282 
1283 static DECLARE_WORK(recovery_work, recovery_work_func);
1284 
1285 static void recovery_func(struct timer_list *unused)
1286 {
1287     /*
1288      * We can't do our recovery in softirq context and it's not
1289      * performance critical, so we schedule it.
1290      */
1291     schedule_work(&recovery_work);
1292 }
1293 
1294 void ccw_device_schedule_recovery(void)
1295 {
1296     unsigned long flags;
1297 
1298     CIO_MSG_EVENT(3, "recovery: schedule\n");
1299     spin_lock_irqsave(&recovery_lock, flags);
1300     if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1301         recovery_phase = 0;
1302         mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1303     }
1304     spin_unlock_irqrestore(&recovery_lock, flags);
1305 }
1306 
1307 static int purge_fn(struct device *dev, void *data)
1308 {
1309     struct ccw_device *cdev = to_ccwdev(dev);
1310     struct ccw_dev_id *id = &cdev->private->dev_id;
1311     struct subchannel *sch = to_subchannel(cdev->dev.parent);
1312 
1313     spin_lock_irq(cdev->ccwlock);
1314     if (is_blacklisted(id->ssid, id->devno) &&
1315         (cdev->private->state == DEV_STATE_OFFLINE) &&
1316         (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1317         CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1318                   id->devno);
1319         ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1320         css_sched_sch_todo(sch, SCH_TODO_UNREG);
1321         atomic_set(&cdev->private->onoff, 0);
1322     }
1323     spin_unlock_irq(cdev->ccwlock);
1324     /* Abort loop in case of pending signal. */
1325     if (signal_pending(current))
1326         return -EINTR;
1327 
1328     return 0;
1329 }
1330 
1331 /**
1332  * ccw_purge_blacklisted - purge unused, blacklisted devices
1333  *
1334  * Unregister all ccw devices that are offline and on the blacklist.
1335  */
1336 int ccw_purge_blacklisted(void)
1337 {
1338     CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1339     bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1340     return 0;
1341 }
1342 
1343 void ccw_device_set_disconnected(struct ccw_device *cdev)
1344 {
1345     if (!cdev)
1346         return;
1347     ccw_device_set_timeout(cdev, 0);
1348     cdev->private->flags.fake_irb = 0;
1349     cdev->private->state = DEV_STATE_DISCONNECTED;
1350     if (cdev->online)
1351         ccw_device_schedule_recovery();
1352 }
1353 
1354 void ccw_device_set_notoper(struct ccw_device *cdev)
1355 {
1356     struct subchannel *sch = to_subchannel(cdev->dev.parent);
1357 
1358     CIO_TRACE_EVENT(2, "notoper");
1359     CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1360     ccw_device_set_timeout(cdev, 0);
1361     cio_disable_subchannel(sch);
1362     cdev->private->state = DEV_STATE_NOT_OPER;
1363 }
1364 
1365 enum io_sch_action {
1366     IO_SCH_UNREG,
1367     IO_SCH_ORPH_UNREG,
1368     IO_SCH_ATTACH,
1369     IO_SCH_UNREG_ATTACH,
1370     IO_SCH_ORPH_ATTACH,
1371     IO_SCH_REPROBE,
1372     IO_SCH_VERIFY,
1373     IO_SCH_DISC,
1374     IO_SCH_NOP,
1375 };
1376 
1377 static enum io_sch_action sch_get_action(struct subchannel *sch)
1378 {
1379     struct ccw_device *cdev;
1380 
1381     cdev = sch_get_cdev(sch);
1382     if (cio_update_schib(sch)) {
1383         /* Not operational. */
1384         if (!cdev)
1385             return IO_SCH_UNREG;
1386         if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1387             return IO_SCH_UNREG;
1388         return IO_SCH_ORPH_UNREG;
1389     }
1390     /* Operational. */
1391     if (!cdev)
1392         return IO_SCH_ATTACH;
1393     if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1394         if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1395             return IO_SCH_UNREG_ATTACH;
1396         return IO_SCH_ORPH_ATTACH;
1397     }
1398     if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1399         if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1400             return IO_SCH_UNREG;
1401         return IO_SCH_DISC;
1402     }
1403     if (device_is_disconnected(cdev))
1404         return IO_SCH_REPROBE;
1405     if (cdev->online)
1406         return IO_SCH_VERIFY;
1407     if (cdev->private->state == DEV_STATE_NOT_OPER)
1408         return IO_SCH_UNREG_ATTACH;
1409     return IO_SCH_NOP;
1410 }
1411 
1412 /**
1413  * io_subchannel_sch_event - process subchannel event
1414  * @sch: subchannel
1415  * @process: non-zero if function is called in process context
1416  *
1417  * An unspecified event occurred for this subchannel. Adjust data according
1418  * to the current operational state of the subchannel and device. Return
1419  * zero when the event has been handled sufficiently or -EAGAIN when this
1420  * function should be called again in process context.
1421  */
1422 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1423 {
1424     unsigned long flags;
1425     struct ccw_device *cdev;
1426     struct ccw_dev_id dev_id;
1427     enum io_sch_action action;
1428     int rc = -EAGAIN;
1429 
1430     spin_lock_irqsave(sch->lock, flags);
1431     if (!device_is_registered(&sch->dev))
1432         goto out_unlock;
1433     if (work_pending(&sch->todo_work))
1434         goto out_unlock;
1435     cdev = sch_get_cdev(sch);
1436     if (cdev && work_pending(&cdev->private->todo_work))
1437         goto out_unlock;
1438     action = sch_get_action(sch);
1439     CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1440               sch->schid.ssid, sch->schid.sch_no, process,
1441               action);
1442     /* Perform immediate actions while holding the lock. */
1443     switch (action) {
1444     case IO_SCH_REPROBE:
1445         /* Trigger device recognition. */
1446         ccw_device_trigger_reprobe(cdev);
1447         rc = 0;
1448         goto out_unlock;
1449     case IO_SCH_VERIFY:
1450         /* Trigger path verification. */
1451         io_subchannel_verify(sch);
1452         rc = 0;
1453         goto out_unlock;
1454     case IO_SCH_DISC:
1455         ccw_device_set_disconnected(cdev);
1456         rc = 0;
1457         goto out_unlock;
1458     case IO_SCH_ORPH_UNREG:
1459     case IO_SCH_ORPH_ATTACH:
1460         ccw_device_set_disconnected(cdev);
1461         break;
1462     case IO_SCH_UNREG_ATTACH:
1463     case IO_SCH_UNREG:
1464         if (!cdev)
1465             break;
1466         if (cdev->private->state == DEV_STATE_SENSE_ID) {
1467             /*
1468              * Note: delayed work triggered by this event
1469              * and repeated calls to sch_event are synchronized
1470              * by the above check for work_pending(cdev).
1471              */
1472             dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1473         } else
1474             ccw_device_set_notoper(cdev);
1475         break;
1476     case IO_SCH_NOP:
1477         rc = 0;
1478         goto out_unlock;
1479     default:
1480         break;
1481     }
1482     spin_unlock_irqrestore(sch->lock, flags);
1483     /* All other actions require process context. */
1484     if (!process)
1485         goto out;
1486     /* Handle attached ccw device. */
1487     switch (action) {
1488     case IO_SCH_ORPH_UNREG:
1489     case IO_SCH_ORPH_ATTACH:
1490         /* Move ccw device to orphanage. */
1491         rc = ccw_device_move_to_orph(cdev);
1492         if (rc)
1493             goto out;
1494         break;
1495     case IO_SCH_UNREG_ATTACH:
1496         spin_lock_irqsave(sch->lock, flags);
1497         sch_set_cdev(sch, NULL);
1498         spin_unlock_irqrestore(sch->lock, flags);
1499         /* Unregister ccw device. */
1500         ccw_device_unregister(cdev);
1501         break;
1502     default:
1503         break;
1504     }
1505     /* Handle subchannel. */
1506     switch (action) {
1507     case IO_SCH_ORPH_UNREG:
1508     case IO_SCH_UNREG:
1509         css_sch_device_unregister(sch);
1510         break;
1511     case IO_SCH_ORPH_ATTACH:
1512     case IO_SCH_UNREG_ATTACH:
1513     case IO_SCH_ATTACH:
1514         dev_id.ssid = sch->schid.ssid;
1515         dev_id.devno = sch->schib.pmcw.dev;
1516         cdev = get_ccwdev_by_dev_id(&dev_id);
1517         if (!cdev) {
1518             sch_create_and_recog_new_device(sch);
1519             break;
1520         }
1521         rc = ccw_device_move_to_sch(cdev, sch);
1522         if (rc) {
1523             /* Release reference from get_ccwdev_by_dev_id() */
1524             put_device(&cdev->dev);
1525             goto out;
1526         }
1527         spin_lock_irqsave(sch->lock, flags);
1528         ccw_device_trigger_reprobe(cdev);
1529         spin_unlock_irqrestore(sch->lock, flags);
1530         /* Release reference from get_ccwdev_by_dev_id() */
1531         put_device(&cdev->dev);
1532         break;
1533     default:
1534         break;
1535     }
1536     return 0;
1537 
1538 out_unlock:
1539     spin_unlock_irqrestore(sch->lock, flags);
1540 out:
1541     return rc;
1542 }
1543 
1544 static void ccw_device_set_int_class(struct ccw_device *cdev)
1545 {
1546     struct ccw_driver *cdrv = cdev->drv;
1547 
1548     /* Note: we interpret class 0 in this context as an uninitialized
1549      * field since it translates to a non-I/O interrupt class. */
1550     if (cdrv->int_class != 0)
1551         cdev->private->int_class = cdrv->int_class;
1552     else
1553         cdev->private->int_class = IRQIO_CIO;
1554 }
1555 
1556 #ifdef CONFIG_CCW_CONSOLE
1557 int __init ccw_device_enable_console(struct ccw_device *cdev)
1558 {
1559     struct subchannel *sch = to_subchannel(cdev->dev.parent);
1560     int rc;
1561 
1562     if (!cdev->drv || !cdev->handler)
1563         return -EINVAL;
1564 
1565     io_subchannel_init_fields(sch);
1566     rc = cio_commit_config(sch);
1567     if (rc)
1568         return rc;
1569     sch->driver = &io_subchannel_driver;
1570     io_subchannel_recog(cdev, sch);
1571     /* Now wait for the async. recognition to come to an end. */
1572     spin_lock_irq(cdev->ccwlock);
1573     while (!dev_fsm_final_state(cdev))
1574         ccw_device_wait_idle(cdev);
1575 
1576     /* Hold on to an extra reference while device is online. */
1577     get_device(&cdev->dev);
1578     rc = ccw_device_online(cdev);
1579     if (rc)
1580         goto out_unlock;
1581 
1582     while (!dev_fsm_final_state(cdev))
1583         ccw_device_wait_idle(cdev);
1584 
1585     if (cdev->private->state == DEV_STATE_ONLINE)
1586         cdev->online = 1;
1587     else
1588         rc = -EIO;
1589 out_unlock:
1590     spin_unlock_irq(cdev->ccwlock);
1591     if (rc) /* Give up online reference since onlining failed. */
1592         put_device(&cdev->dev);
1593     return rc;
1594 }
1595 
1596 struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1597 {
1598     struct io_subchannel_private *io_priv;
1599     struct ccw_device *cdev;
1600     struct subchannel *sch;
1601 
1602     sch = cio_probe_console();
1603     if (IS_ERR(sch))
1604         return ERR_CAST(sch);
1605 
1606     io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1607     if (!io_priv)
1608         goto err_priv;
1609     io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1610                 sizeof(*io_priv->dma_area),
1611                 &io_priv->dma_area_dma, GFP_KERNEL);
1612     if (!io_priv->dma_area)
1613         goto err_dma_area;
1614     set_io_private(sch, io_priv);
1615     cdev = io_subchannel_create_ccwdev(sch);
1616     if (IS_ERR(cdev)) {
1617         dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1618                   io_priv->dma_area, io_priv->dma_area_dma);
1619         set_io_private(sch, NULL);
1620         put_device(&sch->dev);
1621         kfree(io_priv);
1622         return cdev;
1623     }
1624     cdev->drv = drv;
1625     ccw_device_set_int_class(cdev);
1626     return cdev;
1627 
1628 err_dma_area:
1629     kfree(io_priv);
1630 err_priv:
1631     put_device(&sch->dev);
1632     return ERR_PTR(-ENOMEM);
1633 }
1634 
1635 void __init ccw_device_destroy_console(struct ccw_device *cdev)
1636 {
1637     struct subchannel *sch = to_subchannel(cdev->dev.parent);
1638     struct io_subchannel_private *io_priv = to_io_private(sch);
1639 
1640     set_io_private(sch, NULL);
1641     dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1642               io_priv->dma_area, io_priv->dma_area_dma);
1643     put_device(&sch->dev);
1644     put_device(&cdev->dev);
1645     kfree(io_priv);
1646 }
1647 
1648 /**
1649  * ccw_device_wait_idle() - busy wait for device to become idle
1650  * @cdev: ccw device
1651  *
1652  * Poll until activity control is zero, that is, no function or data
1653  * transfer is pending/active.
1654  * Called with device lock being held.
1655  */
1656 void ccw_device_wait_idle(struct ccw_device *cdev)
1657 {
1658     struct subchannel *sch = to_subchannel(cdev->dev.parent);
1659 
1660     while (1) {
1661         cio_tsch(sch);
1662         if (sch->schib.scsw.cmd.actl == 0)
1663             break;
1664         udelay(100);
1665     }
1666 }
1667 #endif
1668 
1669 /**
1670  * get_ccwdev_by_busid() - obtain device from a bus id
1671  * @cdrv: driver the device is owned by
1672  * @bus_id: bus id of the device to be searched
1673  *
1674  * This function searches all devices owned by @cdrv for a device with a bus
1675  * id matching @bus_id.
1676  * Returns:
1677  *  If a match is found, its reference count of the found device is increased
1678  *  and it is returned; else %NULL is returned.
1679  */
1680 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1681                        const char *bus_id)
1682 {
1683     struct device *dev;
1684 
1685     dev = driver_find_device_by_name(&cdrv->driver, bus_id);
1686 
1687     return dev ? to_ccwdev(dev) : NULL;
1688 }
1689 
1690 /************************** device driver handling ************************/
1691 
1692 /* This is the implementation of the ccw_driver class. The probe, remove
1693  * and release methods are initially very similar to the device_driver
1694  * implementations, with the difference that they have ccw_device
1695  * arguments.
1696  *
1697  * A ccw driver also contains the information that is needed for
1698  * device matching.
1699  */
1700 static int
1701 ccw_device_probe (struct device *dev)
1702 {
1703     struct ccw_device *cdev = to_ccwdev(dev);
1704     struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1705     int ret;
1706 
1707     cdev->drv = cdrv; /* to let the driver call _set_online */
1708     ccw_device_set_int_class(cdev);
1709     ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1710     if (ret) {
1711         cdev->drv = NULL;
1712         cdev->private->int_class = IRQIO_CIO;
1713         return ret;
1714     }
1715 
1716     return 0;
1717 }
1718 
1719 static void ccw_device_remove(struct device *dev)
1720 {
1721     struct ccw_device *cdev = to_ccwdev(dev);
1722     struct ccw_driver *cdrv = cdev->drv;
1723     struct subchannel *sch;
1724     int ret;
1725 
1726     if (cdrv->remove)
1727         cdrv->remove(cdev);
1728 
1729     spin_lock_irq(cdev->ccwlock);
1730     if (cdev->online) {
1731         cdev->online = 0;
1732         ret = ccw_device_offline(cdev);
1733         spin_unlock_irq(cdev->ccwlock);
1734         if (ret == 0)
1735             wait_event(cdev->private->wait_q,
1736                    dev_fsm_final_state(cdev));
1737         else
1738             CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1739                       "device 0.%x.%04x\n",
1740                       ret, cdev->private->dev_id.ssid,
1741                       cdev->private->dev_id.devno);
1742         /* Give up reference obtained in ccw_device_set_online(). */
1743         put_device(&cdev->dev);
1744         spin_lock_irq(cdev->ccwlock);
1745     }
1746     ccw_device_set_timeout(cdev, 0);
1747     cdev->drv = NULL;
1748     cdev->private->int_class = IRQIO_CIO;
1749     sch = to_subchannel(cdev->dev.parent);
1750     spin_unlock_irq(cdev->ccwlock);
1751     io_subchannel_quiesce(sch);
1752     __disable_cmf(cdev);
1753 }
1754 
1755 static void ccw_device_shutdown(struct device *dev)
1756 {
1757     struct ccw_device *cdev;
1758 
1759     cdev = to_ccwdev(dev);
1760     if (cdev->drv && cdev->drv->shutdown)
1761         cdev->drv->shutdown(cdev);
1762     __disable_cmf(cdev);
1763 }
1764 
1765 static struct bus_type ccw_bus_type = {
1766     .name   = "ccw",
1767     .match  = ccw_bus_match,
1768     .uevent = ccw_uevent,
1769     .probe  = ccw_device_probe,
1770     .remove = ccw_device_remove,
1771     .shutdown = ccw_device_shutdown,
1772 };
1773 
1774 /**
1775  * ccw_driver_register() - register a ccw driver
1776  * @cdriver: driver to be registered
1777  *
1778  * This function is mainly a wrapper around driver_register().
1779  * Returns:
1780  *   %0 on success and a negative error value on failure.
1781  */
1782 int ccw_driver_register(struct ccw_driver *cdriver)
1783 {
1784     struct device_driver *drv = &cdriver->driver;
1785 
1786     drv->bus = &ccw_bus_type;
1787 
1788     return driver_register(drv);
1789 }
1790 
1791 /**
1792  * ccw_driver_unregister() - deregister a ccw driver
1793  * @cdriver: driver to be deregistered
1794  *
1795  * This function is mainly a wrapper around driver_unregister().
1796  */
1797 void ccw_driver_unregister(struct ccw_driver *cdriver)
1798 {
1799     driver_unregister(&cdriver->driver);
1800 }
1801 
1802 static void ccw_device_todo(struct work_struct *work)
1803 {
1804     struct ccw_device_private *priv;
1805     struct ccw_device *cdev;
1806     struct subchannel *sch;
1807     enum cdev_todo todo;
1808 
1809     priv = container_of(work, struct ccw_device_private, todo_work);
1810     cdev = priv->cdev;
1811     sch = to_subchannel(cdev->dev.parent);
1812     /* Find out todo. */
1813     spin_lock_irq(cdev->ccwlock);
1814     todo = priv->todo;
1815     priv->todo = CDEV_TODO_NOTHING;
1816     CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1817               priv->dev_id.ssid, priv->dev_id.devno, todo);
1818     spin_unlock_irq(cdev->ccwlock);
1819     /* Perform todo. */
1820     switch (todo) {
1821     case CDEV_TODO_ENABLE_CMF:
1822         cmf_reenable(cdev);
1823         break;
1824     case CDEV_TODO_REBIND:
1825         ccw_device_do_unbind_bind(cdev);
1826         break;
1827     case CDEV_TODO_REGISTER:
1828         io_subchannel_register(cdev);
1829         break;
1830     case CDEV_TODO_UNREG_EVAL:
1831         if (!sch_is_pseudo_sch(sch))
1832             css_schedule_eval(sch->schid);
1833         fallthrough;
1834     case CDEV_TODO_UNREG:
1835         spin_lock_irq(sch->lock);
1836         sch_set_cdev(sch, NULL);
1837         spin_unlock_irq(sch->lock);
1838         ccw_device_unregister(cdev);
1839         break;
1840     default:
1841         break;
1842     }
1843     /* Release workqueue ref. */
1844     put_device(&cdev->dev);
1845 }
1846 
1847 /**
1848  * ccw_device_sched_todo - schedule ccw device operation
1849  * @cdev: ccw device
1850  * @todo: todo
1851  *
1852  * Schedule the operation identified by @todo to be performed on the slow path
1853  * workqueue. Do nothing if another operation with higher priority is already
1854  * scheduled. Needs to be called with ccwdev lock held.
1855  */
1856 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
1857 {
1858     CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
1859               cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
1860               todo);
1861     if (cdev->private->todo >= todo)
1862         return;
1863     cdev->private->todo = todo;
1864     /* Get workqueue ref. */
1865     if (!get_device(&cdev->dev))
1866         return;
1867     if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
1868         /* Already queued, release workqueue ref. */
1869         put_device(&cdev->dev);
1870     }
1871 }
1872 
1873 /**
1874  * ccw_device_siosl() - initiate logging
1875  * @cdev: ccw device
1876  *
1877  * This function is used to invoke model-dependent logging within the channel
1878  * subsystem.
1879  */
1880 int ccw_device_siosl(struct ccw_device *cdev)
1881 {
1882     struct subchannel *sch = to_subchannel(cdev->dev.parent);
1883 
1884     return chsc_siosl(sch->schid);
1885 }
1886 EXPORT_SYMBOL_GPL(ccw_device_siosl);
1887 
1888 EXPORT_SYMBOL(ccw_device_set_online);
1889 EXPORT_SYMBOL(ccw_device_set_offline);
1890 EXPORT_SYMBOL(ccw_driver_register);
1891 EXPORT_SYMBOL(ccw_driver_unregister);
1892 EXPORT_SYMBOL(get_ccwdev_by_busid);