Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  Handling of internal CCW device requests.
0004  *
0005  *    Copyright IBM Corp. 2009, 2011
0006  *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
0007  */
0008 
0009 #define KMSG_COMPONENT "cio"
0010 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0011 
0012 #include <linux/types.h>
0013 #include <linux/err.h>
0014 #include <asm/ccwdev.h>
0015 #include <asm/cio.h>
0016 
0017 #include "io_sch.h"
0018 #include "cio.h"
0019 #include "device.h"
0020 #include "cio_debug.h"
0021 
0022 /**
0023  * lpm_adjust - adjust path mask
0024  * @lpm: path mask to adjust
0025  * @mask: mask of available paths
0026  *
0027  * Shift @lpm right until @lpm and @mask have at least one bit in common or
0028  * until @lpm is zero. Return the resulting lpm.
0029  */
0030 int lpm_adjust(int lpm, int mask)
0031 {
0032     while (lpm && ((lpm & mask) == 0))
0033         lpm >>= 1;
0034     return lpm;
0035 }
0036 
0037 /*
0038  * Adjust path mask to use next path and reset retry count. Return resulting
0039  * path mask.
0040  */
0041 static u16 ccwreq_next_path(struct ccw_device *cdev)
0042 {
0043     struct ccw_request *req = &cdev->private->req;
0044 
0045     if (!req->singlepath) {
0046         req->mask = 0;
0047         goto out;
0048     }
0049     req->retries    = req->maxretries;
0050     req->mask   = lpm_adjust(req->mask >> 1, req->lpm);
0051 out:
0052     return req->mask;
0053 }
0054 
0055 /*
0056  * Clean up device state and report to callback.
0057  */
0058 static void ccwreq_stop(struct ccw_device *cdev, int rc)
0059 {
0060     struct ccw_request *req = &cdev->private->req;
0061 
0062     if (req->done)
0063         return;
0064     req->done = 1;
0065     ccw_device_set_timeout(cdev, 0);
0066     memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
0067     if (rc && rc != -ENODEV && req->drc)
0068         rc = req->drc;
0069     req->callback(cdev, req->data, rc);
0070 }
0071 
0072 /*
0073  * (Re-)Start the operation until retries and paths are exhausted.
0074  */
0075 static void ccwreq_do(struct ccw_device *cdev)
0076 {
0077     struct ccw_request *req = &cdev->private->req;
0078     struct subchannel *sch = to_subchannel(cdev->dev.parent);
0079     struct ccw1 *cp = req->cp;
0080     int rc = -EACCES;
0081 
0082     while (req->mask) {
0083         if (req->retries-- == 0) {
0084             /* Retries exhausted, try next path. */
0085             ccwreq_next_path(cdev);
0086             continue;
0087         }
0088         /* Perform start function. */
0089         memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
0090         rc = cio_start(sch, cp, (u8) req->mask);
0091         if (rc == 0) {
0092             /* I/O started successfully. */
0093             ccw_device_set_timeout(cdev, req->timeout);
0094             return;
0095         }
0096         if (rc == -ENODEV) {
0097             /* Permanent device error. */
0098             break;
0099         }
0100         if (rc == -EACCES) {
0101             /* Permant path error. */
0102             ccwreq_next_path(cdev);
0103             continue;
0104         }
0105         /* Temporary improper status. */
0106         rc = cio_clear(sch);
0107         if (rc)
0108             break;
0109         return;
0110     }
0111     ccwreq_stop(cdev, rc);
0112 }
0113 
0114 /**
0115  * ccw_request_start - perform I/O request
0116  * @cdev: ccw device
0117  *
0118  * Perform the I/O request specified by cdev->req.
0119  */
0120 void ccw_request_start(struct ccw_device *cdev)
0121 {
0122     struct ccw_request *req = &cdev->private->req;
0123 
0124     if (req->singlepath) {
0125         /* Try all paths twice to counter link flapping. */
0126         req->mask = 0x8080;
0127     } else
0128         req->mask = req->lpm;
0129 
0130     req->retries    = req->maxretries;
0131     req->mask   = lpm_adjust(req->mask, req->lpm);
0132     req->drc    = 0;
0133     req->done   = 0;
0134     req->cancel = 0;
0135     if (!req->mask)
0136         goto out_nopath;
0137     ccwreq_do(cdev);
0138     return;
0139 
0140 out_nopath:
0141     ccwreq_stop(cdev, -EACCES);
0142 }
0143 
0144 /**
0145  * ccw_request_cancel - cancel running I/O request
0146  * @cdev: ccw device
0147  *
0148  * Cancel the I/O request specified by cdev->req. Return non-zero if request
0149  * has already finished, zero otherwise.
0150  */
0151 int ccw_request_cancel(struct ccw_device *cdev)
0152 {
0153     struct subchannel *sch = to_subchannel(cdev->dev.parent);
0154     struct ccw_request *req = &cdev->private->req;
0155     int rc;
0156 
0157     if (req->done)
0158         return 1;
0159     req->cancel = 1;
0160     rc = cio_clear(sch);
0161     if (rc)
0162         ccwreq_stop(cdev, rc);
0163     return 0;
0164 }
0165 
0166 /*
0167  * Return the status of the internal I/O started on the specified ccw device.
0168  * Perform BASIC SENSE if required.
0169  */
0170 static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
0171 {
0172     struct irb *irb = &cdev->private->dma_area->irb;
0173     struct cmd_scsw *scsw = &irb->scsw.cmd;
0174     enum uc_todo todo;
0175 
0176     /* Perform BASIC SENSE if needed. */
0177     if (ccw_device_accumulate_and_sense(cdev, lcirb))
0178         return IO_RUNNING;
0179     /* Check for halt/clear interrupt. */
0180     if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
0181         return IO_KILLED;
0182     /* Check for path error. */
0183     if (scsw->cc == 3 || scsw->pno)
0184         return IO_PATH_ERROR;
0185     /* Handle BASIC SENSE data. */
0186     if (irb->esw.esw0.erw.cons) {
0187         CIO_TRACE_EVENT(2, "sensedata");
0188         CIO_HEX_EVENT(2, &cdev->private->dev_id,
0189                   sizeof(struct ccw_dev_id));
0190         CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
0191                   SENSE_MAX_COUNT);
0192         /* Check for command reject. */
0193         if (irb->ecw[0] & SNS0_CMD_REJECT)
0194             return IO_REJECTED;
0195         /* Ask the driver what to do */
0196         if (cdev->drv && cdev->drv->uc_handler) {
0197             todo = cdev->drv->uc_handler(cdev, lcirb);
0198             CIO_TRACE_EVENT(2, "uc_response");
0199             CIO_HEX_EVENT(2, &todo, sizeof(todo));
0200             switch (todo) {
0201             case UC_TODO_RETRY:
0202                 return IO_STATUS_ERROR;
0203             case UC_TODO_RETRY_ON_NEW_PATH:
0204                 return IO_PATH_ERROR;
0205             case UC_TODO_STOP:
0206                 return IO_REJECTED;
0207             default:
0208                 return IO_STATUS_ERROR;
0209             }
0210         }
0211         /* Assume that unexpected SENSE data implies an error. */
0212         return IO_STATUS_ERROR;
0213     }
0214     /* Check for channel errors. */
0215     if (scsw->cstat != 0)
0216         return IO_STATUS_ERROR;
0217     /* Check for device errors. */
0218     if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
0219         return IO_STATUS_ERROR;
0220     /* Check for final state. */
0221     if (!(scsw->dstat & DEV_STAT_DEV_END))
0222         return IO_RUNNING;
0223     /* Check for other improper status. */
0224     if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
0225         return IO_STATUS_ERROR;
0226     return IO_DONE;
0227 }
0228 
0229 /*
0230  * Log ccw request status.
0231  */
0232 static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
0233 {
0234     struct ccw_request *req = &cdev->private->req;
0235     struct {
0236         struct ccw_dev_id dev_id;
0237         u16 retries;
0238         u8 lpm;
0239         u8 status;
0240     }  __attribute__ ((packed)) data;
0241     data.dev_id = cdev->private->dev_id;
0242     data.retries    = req->retries;
0243     data.lpm    = (u8) req->mask;
0244     data.status = (u8) status;
0245     CIO_TRACE_EVENT(2, "reqstat");
0246     CIO_HEX_EVENT(2, &data, sizeof(data));
0247 }
0248 
0249 /**
0250  * ccw_request_handler - interrupt handler for I/O request procedure.
0251  * @cdev: ccw device
0252  *
0253  * Handle interrupt during I/O request procedure.
0254  */
0255 void ccw_request_handler(struct ccw_device *cdev)
0256 {
0257     struct irb *irb = this_cpu_ptr(&cio_irb);
0258     struct ccw_request *req = &cdev->private->req;
0259     enum io_status status;
0260     int rc = -EOPNOTSUPP;
0261 
0262     /* Check status of I/O request. */
0263     status = ccwreq_status(cdev, irb);
0264     if (req->filter)
0265         status = req->filter(cdev, req->data, irb, status);
0266     if (status != IO_RUNNING)
0267         ccw_device_set_timeout(cdev, 0);
0268     if (status != IO_DONE && status != IO_RUNNING)
0269         ccwreq_log_status(cdev, status);
0270     switch (status) {
0271     case IO_DONE:
0272         break;
0273     case IO_RUNNING:
0274         return;
0275     case IO_REJECTED:
0276         goto err;
0277     case IO_PATH_ERROR:
0278         goto out_next_path;
0279     case IO_STATUS_ERROR:
0280         goto out_restart;
0281     case IO_KILLED:
0282         /* Check if request was cancelled on purpose. */
0283         if (req->cancel) {
0284             rc = -EIO;
0285             goto err;
0286         }
0287         goto out_restart;
0288     }
0289     /* Check back with request initiator. */
0290     if (!req->check)
0291         goto out;
0292     switch (req->check(cdev, req->data)) {
0293     case 0:
0294         break;
0295     case -EAGAIN:
0296         goto out_restart;
0297     case -EACCES:
0298         goto out_next_path;
0299     default:
0300         goto err;
0301     }
0302 out:
0303     ccwreq_stop(cdev, 0);
0304     return;
0305 
0306 out_next_path:
0307     /* Try next path and restart I/O. */
0308     if (!ccwreq_next_path(cdev)) {
0309         rc = -EACCES;
0310         goto err;
0311     }
0312 out_restart:
0313     /* Restart. */
0314     ccwreq_do(cdev);
0315     return;
0316 err:
0317     ccwreq_stop(cdev, rc);
0318 }
0319 
0320 
0321 /**
0322  * ccw_request_timeout - timeout handler for I/O request procedure
0323  * @cdev: ccw device
0324  *
0325  * Handle timeout during I/O request procedure.
0326  */
0327 void ccw_request_timeout(struct ccw_device *cdev)
0328 {
0329     struct subchannel *sch = to_subchannel(cdev->dev.parent);
0330     struct ccw_request *req = &cdev->private->req;
0331     int rc = -ENODEV, chp;
0332 
0333     if (cio_update_schib(sch))
0334         goto err;
0335 
0336     for (chp = 0; chp < 8; chp++) {
0337         if ((0x80 >> chp) & sch->schib.pmcw.lpum)
0338             pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
0339                 dev_name(&cdev->dev), req->timeout / HZ,
0340                 scsw_cstat(&sch->schib.scsw),
0341                 scsw_dstat(&sch->schib.scsw),
0342                 sch->schid.cssid,
0343                 sch->schib.pmcw.chpid[chp]);
0344     }
0345 
0346     if (!ccwreq_next_path(cdev)) {
0347         /* set the final return code for this request */
0348         req->drc = -ETIME;
0349     }
0350     rc = cio_clear(sch);
0351     if (rc)
0352         goto err;
0353     return;
0354 
0355 err:
0356     ccwreq_stop(cdev, rc);
0357 }
0358 
0359 /**
0360  * ccw_request_notoper - notoper handler for I/O request procedure
0361  * @cdev: ccw device
0362  *
0363  * Handle notoper during I/O request procedure.
0364  */
0365 void ccw_request_notoper(struct ccw_device *cdev)
0366 {
0367     ccwreq_stop(cdev, -ENODEV);
0368 }