Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *    Copyright IBM Corp. 1999, 2010
0004  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
0005  *       Arnd Bergmann (arndb@de.ibm.com)
0006  *       Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
0007  */
0008 
0009 #include <linux/bug.h>
0010 #include <linux/workqueue.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/export.h>
0013 #include <linux/sched.h>
0014 #include <linux/init.h>
0015 #include <linux/jiffies.h>
0016 #include <linux/wait.h>
0017 #include <linux/mutex.h>
0018 #include <linux/errno.h>
0019 #include <linux/slab.h>
0020 #include <asm/chpid.h>
0021 #include <asm/sclp.h>
0022 #include <asm/crw.h>
0023 
0024 #include "cio.h"
0025 #include "css.h"
0026 #include "ioasm.h"
0027 #include "cio_debug.h"
0028 #include "chp.h"
0029 
0030 #define to_channelpath(device) container_of(device, struct channel_path, dev)
0031 #define CHP_INFO_UPDATE_INTERVAL    1*HZ
0032 
0033 enum cfg_task_t {
0034     cfg_none,
0035     cfg_configure,
0036     cfg_deconfigure
0037 };
0038 
0039 /* Map for pending configure tasks. */
0040 static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
0041 static DEFINE_SPINLOCK(cfg_lock);
0042 
0043 /* Map for channel-path status. */
0044 static struct sclp_chp_info chp_info;
0045 static DEFINE_MUTEX(info_lock);
0046 
0047 /* Time after which channel-path status may be outdated. */
0048 static unsigned long chp_info_expires;
0049 
0050 static struct work_struct cfg_work;
0051 
0052 /* Wait queue for configure completion events. */
0053 static DECLARE_WAIT_QUEUE_HEAD(cfg_wait_queue);
0054 
0055 /* Set vary state for given chpid. */
0056 static void set_chp_logically_online(struct chp_id chpid, int onoff)
0057 {
0058     chpid_to_chp(chpid)->state = onoff;
0059 }
0060 
0061 /* On success return 0 if channel-path is varied offline, 1 if it is varied
0062  * online. Return -ENODEV if channel-path is not registered. */
0063 int chp_get_status(struct chp_id chpid)
0064 {
0065     return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
0066 }
0067 
0068 /**
0069  * chp_get_sch_opm - return opm for subchannel
0070  * @sch: subchannel
0071  *
0072  * Calculate and return the operational path mask (opm) based on the chpids
0073  * used by the subchannel and the status of the associated channel-paths.
0074  */
0075 u8 chp_get_sch_opm(struct subchannel *sch)
0076 {
0077     struct chp_id chpid;
0078     int opm;
0079     int i;
0080 
0081     opm = 0;
0082     chp_id_init(&chpid);
0083     for (i = 0; i < 8; i++) {
0084         opm <<= 1;
0085         chpid.id = sch->schib.pmcw.chpid[i];
0086         if (chp_get_status(chpid) != 0)
0087             opm |= 1;
0088     }
0089     return opm;
0090 }
0091 EXPORT_SYMBOL_GPL(chp_get_sch_opm);
0092 
0093 /**
0094  * chp_is_registered - check if a channel-path is registered
0095  * @chpid: channel-path ID
0096  *
0097  * Return non-zero if a channel-path with the given chpid is registered,
0098  * zero otherwise.
0099  */
0100 int chp_is_registered(struct chp_id chpid)
0101 {
0102     return chpid_to_chp(chpid) != NULL;
0103 }
0104 
0105 /*
0106  * Function: s390_vary_chpid
0107  * Varies the specified chpid online or offline
0108  */
0109 static int s390_vary_chpid(struct chp_id chpid, int on)
0110 {
0111     char dbf_text[15];
0112     int status;
0113 
0114     sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
0115         chpid.id);
0116     CIO_TRACE_EVENT(2, dbf_text);
0117 
0118     status = chp_get_status(chpid);
0119     if (!on && !status)
0120         return 0;
0121 
0122     set_chp_logically_online(chpid, on);
0123     chsc_chp_vary(chpid, on);
0124     return 0;
0125 }
0126 
0127 /*
0128  * Channel measurement related functions
0129  */
0130 static ssize_t chp_measurement_chars_read(struct file *filp,
0131                       struct kobject *kobj,
0132                       struct bin_attribute *bin_attr,
0133                       char *buf, loff_t off, size_t count)
0134 {
0135     struct channel_path *chp;
0136     struct device *device;
0137 
0138     device = kobj_to_dev(kobj);
0139     chp = to_channelpath(device);
0140     if (chp->cmg == -1)
0141         return 0;
0142 
0143     return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
0144                        sizeof(chp->cmg_chars));
0145 }
0146 
0147 static const struct bin_attribute chp_measurement_chars_attr = {
0148     .attr = {
0149         .name = "measurement_chars",
0150         .mode = S_IRUSR,
0151     },
0152     .size = sizeof(struct cmg_chars),
0153     .read = chp_measurement_chars_read,
0154 };
0155 
0156 static void chp_measurement_copy_block(struct cmg_entry *buf,
0157                        struct channel_subsystem *css,
0158                        struct chp_id chpid)
0159 {
0160     void *area;
0161     struct cmg_entry *entry, reference_buf;
0162     int idx;
0163 
0164     if (chpid.id < 128) {
0165         area = css->cub_addr1;
0166         idx = chpid.id;
0167     } else {
0168         area = css->cub_addr2;
0169         idx = chpid.id - 128;
0170     }
0171     entry = area + (idx * sizeof(struct cmg_entry));
0172     do {
0173         memcpy(buf, entry, sizeof(*entry));
0174         memcpy(&reference_buf, entry, sizeof(*entry));
0175     } while (reference_buf.values[0] != buf->values[0]);
0176 }
0177 
0178 static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
0179                     struct bin_attribute *bin_attr,
0180                     char *buf, loff_t off, size_t count)
0181 {
0182     struct channel_path *chp;
0183     struct channel_subsystem *css;
0184     struct device *device;
0185     unsigned int size;
0186 
0187     device = kobj_to_dev(kobj);
0188     chp = to_channelpath(device);
0189     css = to_css(chp->dev.parent);
0190 
0191     size = sizeof(struct cmg_entry);
0192 
0193     /* Only allow single reads. */
0194     if (off || count < size)
0195         return 0;
0196     chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
0197     count = size;
0198     return count;
0199 }
0200 
0201 static const struct bin_attribute chp_measurement_attr = {
0202     .attr = {
0203         .name = "measurement",
0204         .mode = S_IRUSR,
0205     },
0206     .size = sizeof(struct cmg_entry),
0207     .read = chp_measurement_read,
0208 };
0209 
0210 void chp_remove_cmg_attr(struct channel_path *chp)
0211 {
0212     device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
0213     device_remove_bin_file(&chp->dev, &chp_measurement_attr);
0214 }
0215 
0216 int chp_add_cmg_attr(struct channel_path *chp)
0217 {
0218     int ret;
0219 
0220     ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
0221     if (ret)
0222         return ret;
0223     ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
0224     if (ret)
0225         device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
0226     return ret;
0227 }
0228 
0229 /*
0230  * Files for the channel path entries.
0231  */
0232 static ssize_t chp_status_show(struct device *dev,
0233                    struct device_attribute *attr, char *buf)
0234 {
0235     struct channel_path *chp = to_channelpath(dev);
0236     int status;
0237 
0238     mutex_lock(&chp->lock);
0239     status = chp->state;
0240     mutex_unlock(&chp->lock);
0241 
0242     return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
0243 }
0244 
0245 static ssize_t chp_status_write(struct device *dev,
0246                 struct device_attribute *attr,
0247                 const char *buf, size_t count)
0248 {
0249     struct channel_path *cp = to_channelpath(dev);
0250     char cmd[10];
0251     int num_args;
0252     int error;
0253 
0254     num_args = sscanf(buf, "%5s", cmd);
0255     if (!num_args)
0256         return count;
0257 
0258     /* Wait until previous actions have settled. */
0259     css_wait_for_slow_path();
0260 
0261     if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
0262         mutex_lock(&cp->lock);
0263         error = s390_vary_chpid(cp->chpid, 1);
0264         mutex_unlock(&cp->lock);
0265     } else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
0266         mutex_lock(&cp->lock);
0267         error = s390_vary_chpid(cp->chpid, 0);
0268         mutex_unlock(&cp->lock);
0269     } else
0270         error = -EINVAL;
0271 
0272     return error < 0 ? error : count;
0273 }
0274 
0275 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
0276 
0277 static ssize_t chp_configure_show(struct device *dev,
0278                   struct device_attribute *attr, char *buf)
0279 {
0280     struct channel_path *cp;
0281     int status;
0282 
0283     cp = to_channelpath(dev);
0284     status = chp_info_get_status(cp->chpid);
0285     if (status < 0)
0286         return status;
0287 
0288     return sysfs_emit(buf, "%d\n", status);
0289 }
0290 
0291 static int cfg_wait_idle(void);
0292 
0293 static ssize_t chp_configure_write(struct device *dev,
0294                    struct device_attribute *attr,
0295                    const char *buf, size_t count)
0296 {
0297     struct channel_path *cp;
0298     int val;
0299     char delim;
0300 
0301     if (sscanf(buf, "%d %c", &val, &delim) != 1)
0302         return -EINVAL;
0303     if (val != 0 && val != 1)
0304         return -EINVAL;
0305     cp = to_channelpath(dev);
0306     chp_cfg_schedule(cp->chpid, val);
0307     cfg_wait_idle();
0308 
0309     return count;
0310 }
0311 
0312 static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
0313 
0314 static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
0315                  char *buf)
0316 {
0317     struct channel_path *chp = to_channelpath(dev);
0318     u8 type;
0319 
0320     mutex_lock(&chp->lock);
0321     type = chp->desc.desc;
0322     mutex_unlock(&chp->lock);
0323     return sprintf(buf, "%x\n", type);
0324 }
0325 
0326 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
0327 
0328 static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
0329                 char *buf)
0330 {
0331     struct channel_path *chp = to_channelpath(dev);
0332 
0333     if (!chp)
0334         return 0;
0335     if (chp->cmg == -1) /* channel measurements not available */
0336         return sprintf(buf, "unknown\n");
0337     return sprintf(buf, "%x\n", chp->cmg);
0338 }
0339 
0340 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
0341 
0342 static ssize_t chp_shared_show(struct device *dev,
0343                    struct device_attribute *attr, char *buf)
0344 {
0345     struct channel_path *chp = to_channelpath(dev);
0346 
0347     if (!chp)
0348         return 0;
0349     if (chp->shared == -1) /* channel measurements not available */
0350         return sprintf(buf, "unknown\n");
0351     return sprintf(buf, "%x\n", chp->shared);
0352 }
0353 
0354 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
0355 
0356 static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
0357                  char *buf)
0358 {
0359     struct channel_path *chp = to_channelpath(dev);
0360     ssize_t rc;
0361 
0362     mutex_lock(&chp->lock);
0363     if (chp->desc_fmt1.flags & 0x10)
0364         rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
0365     else
0366         rc = 0;
0367     mutex_unlock(&chp->lock);
0368 
0369     return rc;
0370 }
0371 static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
0372 
0373 static ssize_t chp_chid_external_show(struct device *dev,
0374                       struct device_attribute *attr, char *buf)
0375 {
0376     struct channel_path *chp = to_channelpath(dev);
0377     ssize_t rc;
0378 
0379     mutex_lock(&chp->lock);
0380     if (chp->desc_fmt1.flags & 0x10)
0381         rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
0382     else
0383         rc = 0;
0384     mutex_unlock(&chp->lock);
0385 
0386     return rc;
0387 }
0388 static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
0389 
0390 static ssize_t chp_esc_show(struct device *dev,
0391                 struct device_attribute *attr, char *buf)
0392 {
0393     struct channel_path *chp = to_channelpath(dev);
0394     ssize_t rc;
0395 
0396     mutex_lock(&chp->lock);
0397     rc = sprintf(buf, "%x\n", chp->desc_fmt1.esc);
0398     mutex_unlock(&chp->lock);
0399 
0400     return rc;
0401 }
0402 static DEVICE_ATTR(esc, 0444, chp_esc_show, NULL);
0403 
0404 static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
0405                 struct bin_attribute *attr, char *buf,
0406                 loff_t off, size_t count)
0407 {
0408     struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
0409     ssize_t rc;
0410 
0411     mutex_lock(&chp->lock);
0412     rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
0413                      sizeof(chp->desc_fmt3.util_str));
0414     mutex_unlock(&chp->lock);
0415 
0416     return rc;
0417 }
0418 static BIN_ATTR_RO(util_string,
0419            sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
0420 
0421 static struct bin_attribute *chp_bin_attrs[] = {
0422     &bin_attr_util_string,
0423     NULL,
0424 };
0425 
0426 static struct attribute *chp_attrs[] = {
0427     &dev_attr_status.attr,
0428     &dev_attr_configure.attr,
0429     &dev_attr_type.attr,
0430     &dev_attr_cmg.attr,
0431     &dev_attr_shared.attr,
0432     &dev_attr_chid.attr,
0433     &dev_attr_chid_external.attr,
0434     &dev_attr_esc.attr,
0435     NULL,
0436 };
0437 static struct attribute_group chp_attr_group = {
0438     .attrs = chp_attrs,
0439     .bin_attrs = chp_bin_attrs,
0440 };
0441 static const struct attribute_group *chp_attr_groups[] = {
0442     &chp_attr_group,
0443     NULL,
0444 };
0445 
0446 static void chp_release(struct device *dev)
0447 {
0448     struct channel_path *cp;
0449 
0450     cp = to_channelpath(dev);
0451     kfree(cp);
0452 }
0453 
0454 /**
0455  * chp_update_desc - update channel-path description
0456  * @chp: channel-path
0457  *
0458  * Update the channel-path description of the specified channel-path
0459  * including channel measurement related information.
0460  * Return zero on success, non-zero otherwise.
0461  */
0462 int chp_update_desc(struct channel_path *chp)
0463 {
0464     int rc;
0465 
0466     rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
0467     if (rc)
0468         return rc;
0469 
0470     /*
0471      * Fetching the following data is optional. Not all machines or
0472      * hypervisors implement the required chsc commands.
0473      */
0474     chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
0475     chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
0476     chsc_get_channel_measurement_chars(chp);
0477 
0478     return 0;
0479 }
0480 
0481 /**
0482  * chp_new - register a new channel-path
0483  * @chpid: channel-path ID
0484  *
0485  * Create and register data structure representing new channel-path. Return
0486  * zero on success, non-zero otherwise.
0487  */
0488 int chp_new(struct chp_id chpid)
0489 {
0490     struct channel_subsystem *css = css_by_id(chpid.cssid);
0491     struct channel_path *chp;
0492     int ret = 0;
0493 
0494     mutex_lock(&css->mutex);
0495     if (chp_is_registered(chpid))
0496         goto out;
0497 
0498     chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
0499     if (!chp) {
0500         ret = -ENOMEM;
0501         goto out;
0502     }
0503     /* fill in status, etc. */
0504     chp->chpid = chpid;
0505     chp->state = 1;
0506     chp->dev.parent = &css->device;
0507     chp->dev.groups = chp_attr_groups;
0508     chp->dev.release = chp_release;
0509     mutex_init(&chp->lock);
0510 
0511     /* Obtain channel path description and fill it in. */
0512     ret = chp_update_desc(chp);
0513     if (ret)
0514         goto out_free;
0515     if ((chp->desc.flags & 0x80) == 0) {
0516         ret = -ENODEV;
0517         goto out_free;
0518     }
0519     dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
0520 
0521     /* make it known to the system */
0522     ret = device_register(&chp->dev);
0523     if (ret) {
0524         CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
0525                   chpid.cssid, chpid.id, ret);
0526         put_device(&chp->dev);
0527         goto out;
0528     }
0529 
0530     if (css->cm_enabled) {
0531         ret = chp_add_cmg_attr(chp);
0532         if (ret) {
0533             device_unregister(&chp->dev);
0534             goto out;
0535         }
0536     }
0537     css->chps[chpid.id] = chp;
0538     goto out;
0539 out_free:
0540     kfree(chp);
0541 out:
0542     mutex_unlock(&css->mutex);
0543     return ret;
0544 }
0545 
0546 /**
0547  * chp_get_chp_desc - return newly allocated channel-path description
0548  * @chpid: channel-path ID
0549  *
0550  * On success return a newly allocated copy of the channel-path description
0551  * data associated with the given channel-path ID. Return %NULL on error.
0552  */
0553 struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
0554 {
0555     struct channel_path *chp;
0556     struct channel_path_desc_fmt0 *desc;
0557 
0558     chp = chpid_to_chp(chpid);
0559     if (!chp)
0560         return NULL;
0561     desc = kmalloc(sizeof(*desc), GFP_KERNEL);
0562     if (!desc)
0563         return NULL;
0564 
0565     mutex_lock(&chp->lock);
0566     memcpy(desc, &chp->desc, sizeof(*desc));
0567     mutex_unlock(&chp->lock);
0568     return desc;
0569 }
0570 
0571 /**
0572  * chp_process_crw - process channel-path status change
0573  * @crw0: channel report-word to handler
0574  * @crw1: second channel-report word (always NULL)
0575  * @overflow: crw overflow indication
0576  *
0577  * Handle channel-report-words indicating that the status of a channel-path
0578  * has changed.
0579  */
0580 static void chp_process_crw(struct crw *crw0, struct crw *crw1,
0581                 int overflow)
0582 {
0583     struct chp_id chpid;
0584 
0585     if (overflow) {
0586         css_schedule_eval_all();
0587         return;
0588     }
0589     CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
0590               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
0591               crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
0592               crw0->erc, crw0->rsid);
0593     /*
0594      * Check for solicited machine checks. These are
0595      * created by reset channel path and need not be
0596      * handled here.
0597      */
0598     if (crw0->slct) {
0599         CIO_CRW_EVENT(2, "solicited machine check for "
0600                   "channel path %02X\n", crw0->rsid);
0601         return;
0602     }
0603     chp_id_init(&chpid);
0604     chpid.id = crw0->rsid;
0605     switch (crw0->erc) {
0606     case CRW_ERC_IPARM: /* Path has come. */
0607     case CRW_ERC_INIT:
0608         chp_new(chpid);
0609         chsc_chp_online(chpid);
0610         break;
0611     case CRW_ERC_PERRI: /* Path has gone. */
0612     case CRW_ERC_PERRN:
0613         chsc_chp_offline(chpid);
0614         break;
0615     default:
0616         CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
0617                   crw0->erc);
0618     }
0619 }
0620 
0621 int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
0622 {
0623     int i;
0624     int mask;
0625 
0626     for (i = 0; i < 8; i++) {
0627         mask = 0x80 >> i;
0628         if (!(ssd->path_mask & mask))
0629             continue;
0630         if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
0631             continue;
0632         if ((ssd->fla_valid_mask & mask) &&
0633             ((ssd->fla[i] & link->fla_mask) != link->fla))
0634             continue;
0635         return mask;
0636     }
0637     return 0;
0638 }
0639 EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
0640 
0641 static inline int info_bit_num(struct chp_id id)
0642 {
0643     return id.id + id.cssid * (__MAX_CHPID + 1);
0644 }
0645 
0646 /* Force chp_info refresh on next call to info_validate(). */
0647 static void info_expire(void)
0648 {
0649     mutex_lock(&info_lock);
0650     chp_info_expires = jiffies - 1;
0651     mutex_unlock(&info_lock);
0652 }
0653 
0654 /* Ensure that chp_info is up-to-date. */
0655 static int info_update(void)
0656 {
0657     int rc;
0658 
0659     mutex_lock(&info_lock);
0660     rc = 0;
0661     if (time_after(jiffies, chp_info_expires)) {
0662         /* Data is too old, update. */
0663         rc = sclp_chp_read_info(&chp_info);
0664         chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
0665     }
0666     mutex_unlock(&info_lock);
0667 
0668     return rc;
0669 }
0670 
0671 /**
0672  * chp_info_get_status - retrieve configure status of a channel-path
0673  * @chpid: channel-path ID
0674  *
0675  * On success, return 0 for standby, 1 for configured, 2 for reserved,
0676  * 3 for not recognized. Return negative error code on error.
0677  */
0678 int chp_info_get_status(struct chp_id chpid)
0679 {
0680     int rc;
0681     int bit;
0682 
0683     rc = info_update();
0684     if (rc)
0685         return rc;
0686 
0687     bit = info_bit_num(chpid);
0688     mutex_lock(&info_lock);
0689     if (!chp_test_bit(chp_info.recognized, bit))
0690         rc = CHP_STATUS_NOT_RECOGNIZED;
0691     else if (chp_test_bit(chp_info.configured, bit))
0692         rc = CHP_STATUS_CONFIGURED;
0693     else if (chp_test_bit(chp_info.standby, bit))
0694         rc = CHP_STATUS_STANDBY;
0695     else
0696         rc = CHP_STATUS_RESERVED;
0697     mutex_unlock(&info_lock);
0698 
0699     return rc;
0700 }
0701 
0702 /* Return configure task for chpid. */
0703 static enum cfg_task_t cfg_get_task(struct chp_id chpid)
0704 {
0705     return chp_cfg_task[chpid.cssid][chpid.id];
0706 }
0707 
0708 /* Set configure task for chpid. */
0709 static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
0710 {
0711     chp_cfg_task[chpid.cssid][chpid.id] = cfg;
0712 }
0713 
0714 /* Fetch the first configure task. Set chpid accordingly. */
0715 static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
0716 {
0717     enum cfg_task_t t = cfg_none;
0718 
0719     chp_id_for_each(chpid) {
0720         t = cfg_get_task(*chpid);
0721         if (t != cfg_none)
0722             break;
0723     }
0724 
0725     return t;
0726 }
0727 
0728 /* Perform one configure/deconfigure request. Reschedule work function until
0729  * last request. */
0730 static void cfg_func(struct work_struct *work)
0731 {
0732     struct chp_id chpid;
0733     enum cfg_task_t t;
0734     int rc;
0735 
0736     spin_lock(&cfg_lock);
0737     t = chp_cfg_fetch_task(&chpid);
0738     spin_unlock(&cfg_lock);
0739 
0740     switch (t) {
0741     case cfg_configure:
0742         rc = sclp_chp_configure(chpid);
0743         if (rc)
0744             CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
0745                       "%d\n", chpid.cssid, chpid.id, rc);
0746         else {
0747             info_expire();
0748             chsc_chp_online(chpid);
0749         }
0750         break;
0751     case cfg_deconfigure:
0752         rc = sclp_chp_deconfigure(chpid);
0753         if (rc)
0754             CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
0755                       "%d\n", chpid.cssid, chpid.id, rc);
0756         else {
0757             info_expire();
0758             chsc_chp_offline(chpid);
0759         }
0760         break;
0761     case cfg_none:
0762         /* Get updated information after last change. */
0763         info_update();
0764         wake_up_interruptible(&cfg_wait_queue);
0765         return;
0766     }
0767     spin_lock(&cfg_lock);
0768     if (t == cfg_get_task(chpid))
0769         cfg_set_task(chpid, cfg_none);
0770     spin_unlock(&cfg_lock);
0771     schedule_work(&cfg_work);
0772 }
0773 
0774 /**
0775  * chp_cfg_schedule - schedule chpid configuration request
0776  * @chpid: channel-path ID
0777  * @configure: Non-zero for configure, zero for deconfigure
0778  *
0779  * Schedule a channel-path configuration/deconfiguration request.
0780  */
0781 void chp_cfg_schedule(struct chp_id chpid, int configure)
0782 {
0783     CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
0784               configure);
0785     spin_lock(&cfg_lock);
0786     cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
0787     spin_unlock(&cfg_lock);
0788     schedule_work(&cfg_work);
0789 }
0790 
0791 /**
0792  * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
0793  * @chpid: channel-path ID
0794  *
0795  * Cancel an active channel-path deconfiguration request if it has not yet
0796  * been performed.
0797  */
0798 void chp_cfg_cancel_deconfigure(struct chp_id chpid)
0799 {
0800     CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
0801     spin_lock(&cfg_lock);
0802     if (cfg_get_task(chpid) == cfg_deconfigure)
0803         cfg_set_task(chpid, cfg_none);
0804     spin_unlock(&cfg_lock);
0805 }
0806 
0807 static bool cfg_idle(void)
0808 {
0809     struct chp_id chpid;
0810     enum cfg_task_t t;
0811 
0812     spin_lock(&cfg_lock);
0813     t = chp_cfg_fetch_task(&chpid);
0814     spin_unlock(&cfg_lock);
0815 
0816     return t == cfg_none;
0817 }
0818 
0819 static int cfg_wait_idle(void)
0820 {
0821     if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
0822         return -ERESTARTSYS;
0823     return 0;
0824 }
0825 
0826 static int __init chp_init(void)
0827 {
0828     struct chp_id chpid;
0829     int state, ret;
0830 
0831     ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
0832     if (ret)
0833         return ret;
0834     INIT_WORK(&cfg_work, cfg_func);
0835     if (info_update())
0836         return 0;
0837     /* Register available channel-paths. */
0838     chp_id_for_each(&chpid) {
0839         state = chp_info_get_status(chpid);
0840         if (state == CHP_STATUS_CONFIGURED ||
0841             state == CHP_STATUS_STANDBY)
0842             chp_new(chpid);
0843     }
0844 
0845     return 0;
0846 }
0847 
0848 subsys_initcall(chp_init);