Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *    basic function of the tape device driver
0004  *
0005  *  S390 and zSeries version
0006  *    Copyright IBM Corp. 2001, 2009
0007  *    Author(s): Carsten Otte <cotte@de.ibm.com>
0008  *       Michael Holzheu <holzheu@de.ibm.com>
0009  *       Tuan Ngo-Anh <ngoanh@de.ibm.com>
0010  *       Martin Schwidefsky <schwidefsky@de.ibm.com>
0011  *       Stefan Bader <shbader@de.ibm.com>
0012  */
0013 
0014 #define KMSG_COMPONENT "tape"
0015 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0016 
0017 #include <linux/module.h>
0018 #include <linux/init.h>      // for kernel parameters
0019 #include <linux/kmod.h>      // for requesting modules
0020 #include <linux/spinlock.h>  // for locks
0021 #include <linux/vmalloc.h>
0022 #include <linux/list.h>
0023 #include <linux/slab.h>
0024 
0025 #include <asm/types.h>       // for variable types
0026 
0027 #define TAPE_DBF_AREA   tape_core_dbf
0028 
0029 #include "tape.h"
0030 #include "tape_std.h"
0031 
0032 #define LONG_BUSY_TIMEOUT 180 /* seconds */
0033 
0034 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
0035 static void tape_delayed_next_request(struct work_struct *);
0036 static void tape_long_busy_timeout(struct timer_list *t);
0037 
0038 /*
0039  * One list to contain all tape devices of all disciplines, so
0040  * we can assign the devices to minor numbers of the same major
0041  * The list is protected by the rwlock
0042  */
0043 static LIST_HEAD(tape_device_list);
0044 static DEFINE_RWLOCK(tape_device_lock);
0045 
0046 /*
0047  * Pointer to debug area.
0048  */
0049 debug_info_t *TAPE_DBF_AREA = NULL;
0050 EXPORT_SYMBOL(TAPE_DBF_AREA);
0051 
0052 /*
0053  * Printable strings for tape enumerations.
0054  */
0055 const char *tape_state_verbose[TS_SIZE] =
0056 {
0057     [TS_UNUSED]   = "UNUSED",
0058     [TS_IN_USE]   = "IN_USE",
0059     [TS_BLKUSE]   = "BLKUSE",
0060     [TS_INIT]     = "INIT  ",
0061     [TS_NOT_OPER] = "NOT_OP"
0062 };
0063 
0064 const char *tape_op_verbose[TO_SIZE] =
0065 {
0066     [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
0067     [TO_BSF] = "BSF",   [TO_DSE] = "DSE",
0068     [TO_FSB] = "FSB",   [TO_FSF] = "FSF",
0069     [TO_LBL] = "LBL",   [TO_NOP] = "NOP",
0070     [TO_RBA] = "RBA",   [TO_RBI] = "RBI",
0071     [TO_RFO] = "RFO",   [TO_REW] = "REW",
0072     [TO_RUN] = "RUN",   [TO_WRI] = "WRI",
0073     [TO_WTM] = "WTM",   [TO_MSEN] = "MSN",
0074     [TO_LOAD] = "LOA",  [TO_READ_CONFIG] = "RCF",
0075     [TO_READ_ATTMSG] = "RAT",
0076     [TO_DIS] = "DIS",   [TO_ASSIGN] = "ASS",
0077     [TO_UNASSIGN] = "UAS",  [TO_CRYPT_ON] = "CON",
0078     [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
0079     [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
0080 };
0081 
0082 static int devid_to_int(struct ccw_dev_id *dev_id)
0083 {
0084     return dev_id->devno + (dev_id->ssid << 16);
0085 }
0086 
0087 /*
0088  * Some channel attached tape specific attributes.
0089  *
0090  * FIXME: In the future the first_minor and blocksize attribute should be
0091  *        replaced by a link to the cdev tree.
0092  */
0093 static ssize_t
0094 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
0095 {
0096     struct tape_device *tdev;
0097 
0098     tdev = dev_get_drvdata(dev);
0099     return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
0100 }
0101 
0102 static
0103 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
0104 
0105 static ssize_t
0106 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
0107 {
0108     struct tape_device *tdev;
0109 
0110     tdev = dev_get_drvdata(dev);
0111     return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
0112 }
0113 
0114 static
0115 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
0116 
0117 static ssize_t
0118 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
0119 {
0120     struct tape_device *tdev;
0121 
0122     tdev = dev_get_drvdata(dev);
0123     return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
0124         "OFFLINE" : tape_state_verbose[tdev->tape_state]);
0125 }
0126 
0127 static
0128 DEVICE_ATTR(state, 0444, tape_state_show, NULL);
0129 
0130 static ssize_t
0131 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
0132 {
0133     struct tape_device *tdev;
0134     ssize_t rc;
0135 
0136     tdev = dev_get_drvdata(dev);
0137     if (tdev->first_minor < 0)
0138         return scnprintf(buf, PAGE_SIZE, "N/A\n");
0139 
0140     spin_lock_irq(get_ccwdev_lock(tdev->cdev));
0141     if (list_empty(&tdev->req_queue))
0142         rc = scnprintf(buf, PAGE_SIZE, "---\n");
0143     else {
0144         struct tape_request *req;
0145 
0146         req = list_entry(tdev->req_queue.next, struct tape_request,
0147             list);
0148         rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
0149     }
0150     spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
0151     return rc;
0152 }
0153 
0154 static
0155 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
0156 
0157 static ssize_t
0158 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
0159 {
0160     struct tape_device *tdev;
0161 
0162     tdev = dev_get_drvdata(dev);
0163 
0164     return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
0165 }
0166 
0167 static
0168 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
0169 
0170 static struct attribute *tape_attrs[] = {
0171     &dev_attr_medium_state.attr,
0172     &dev_attr_first_minor.attr,
0173     &dev_attr_state.attr,
0174     &dev_attr_operation.attr,
0175     &dev_attr_blocksize.attr,
0176     NULL
0177 };
0178 
0179 static const struct attribute_group tape_attr_group = {
0180     .attrs = tape_attrs,
0181 };
0182 
0183 /*
0184  * Tape state functions
0185  */
0186 void
0187 tape_state_set(struct tape_device *device, enum tape_state newstate)
0188 {
0189     const char *str;
0190 
0191     if (device->tape_state == TS_NOT_OPER) {
0192         DBF_EVENT(3, "ts_set err: not oper\n");
0193         return;
0194     }
0195     DBF_EVENT(4, "ts. dev:  %x\n", device->first_minor);
0196     DBF_EVENT(4, "old ts:\t\n");
0197     if (device->tape_state < TS_SIZE && device->tape_state >=0 )
0198         str = tape_state_verbose[device->tape_state];
0199     else
0200         str = "UNKNOWN TS";
0201     DBF_EVENT(4, "%s\n", str);
0202     DBF_EVENT(4, "new ts:\t\n");
0203     if (newstate < TS_SIZE && newstate >= 0)
0204         str = tape_state_verbose[newstate];
0205     else
0206         str = "UNKNOWN TS";
0207     DBF_EVENT(4, "%s\n", str);
0208     device->tape_state = newstate;
0209     wake_up(&device->state_change_wq);
0210 }
0211 
0212 struct tape_med_state_work_data {
0213     struct tape_device *device;
0214     enum tape_medium_state state;
0215     struct work_struct  work;
0216 };
0217 
0218 static void
0219 tape_med_state_work_handler(struct work_struct *work)
0220 {
0221     static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
0222     static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
0223     struct tape_med_state_work_data *p =
0224         container_of(work, struct tape_med_state_work_data, work);
0225     struct tape_device *device = p->device;
0226     char *envp[] = { NULL, NULL };
0227 
0228     switch (p->state) {
0229     case MS_UNLOADED:
0230         pr_info("%s: The tape cartridge has been successfully "
0231             "unloaded\n", dev_name(&device->cdev->dev));
0232         envp[0] = env_state_unloaded;
0233         kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
0234         break;
0235     case MS_LOADED:
0236         pr_info("%s: A tape cartridge has been mounted\n",
0237             dev_name(&device->cdev->dev));
0238         envp[0] = env_state_loaded;
0239         kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
0240         break;
0241     default:
0242         break;
0243     }
0244     tape_put_device(device);
0245     kfree(p);
0246 }
0247 
0248 static void
0249 tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
0250 {
0251     struct tape_med_state_work_data *p;
0252 
0253     p = kzalloc(sizeof(*p), GFP_ATOMIC);
0254     if (p) {
0255         INIT_WORK(&p->work, tape_med_state_work_handler);
0256         p->device = tape_get_device(device);
0257         p->state = state;
0258         schedule_work(&p->work);
0259     }
0260 }
0261 
0262 void
0263 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
0264 {
0265     enum tape_medium_state oldstate;
0266 
0267     oldstate = device->medium_state;
0268     if (oldstate == newstate)
0269         return;
0270     device->medium_state = newstate;
0271     switch(newstate){
0272     case MS_UNLOADED:
0273         device->tape_generic_status |= GMT_DR_OPEN(~0);
0274         if (oldstate == MS_LOADED)
0275             tape_med_state_work(device, MS_UNLOADED);
0276         break;
0277     case MS_LOADED:
0278         device->tape_generic_status &= ~GMT_DR_OPEN(~0);
0279         if (oldstate == MS_UNLOADED)
0280             tape_med_state_work(device, MS_LOADED);
0281         break;
0282     default:
0283         break;
0284     }
0285     wake_up(&device->state_change_wq);
0286 }
0287 
0288 /*
0289  * Stop running ccw. Has to be called with the device lock held.
0290  */
0291 static int
0292 __tape_cancel_io(struct tape_device *device, struct tape_request *request)
0293 {
0294     int retries;
0295     int rc;
0296 
0297     /* Check if interrupt has already been processed */
0298     if (request->callback == NULL)
0299         return 0;
0300 
0301     rc = 0;
0302     for (retries = 0; retries < 5; retries++) {
0303         rc = ccw_device_clear(device->cdev, (long) request);
0304 
0305         switch (rc) {
0306             case 0:
0307                 request->status = TAPE_REQUEST_DONE;
0308                 return 0;
0309             case -EBUSY:
0310                 request->status = TAPE_REQUEST_CANCEL;
0311                 schedule_delayed_work(&device->tape_dnr, 0);
0312                 return 0;
0313             case -ENODEV:
0314                 DBF_EXCEPTION(2, "device gone, retry\n");
0315                 break;
0316             case -EIO:
0317                 DBF_EXCEPTION(2, "I/O error, retry\n");
0318                 break;
0319             default:
0320                 BUG();
0321         }
0322     }
0323 
0324     return rc;
0325 }
0326 
0327 /*
0328  * Add device into the sorted list, giving it the first
0329  * available minor number.
0330  */
0331 static int
0332 tape_assign_minor(struct tape_device *device)
0333 {
0334     struct tape_device *tmp;
0335     int minor;
0336 
0337     minor = 0;
0338     write_lock(&tape_device_lock);
0339     list_for_each_entry(tmp, &tape_device_list, node) {
0340         if (minor < tmp->first_minor)
0341             break;
0342         minor += TAPE_MINORS_PER_DEV;
0343     }
0344     if (minor >= 256) {
0345         write_unlock(&tape_device_lock);
0346         return -ENODEV;
0347     }
0348     device->first_minor = minor;
0349     list_add_tail(&device->node, &tmp->node);
0350     write_unlock(&tape_device_lock);
0351     return 0;
0352 }
0353 
0354 /* remove device from the list */
0355 static void
0356 tape_remove_minor(struct tape_device *device)
0357 {
0358     write_lock(&tape_device_lock);
0359     list_del_init(&device->node);
0360     device->first_minor = -1;
0361     write_unlock(&tape_device_lock);
0362 }
0363 
0364 /*
0365  * Set a device online.
0366  *
0367  * This function is called by the common I/O layer to move a device from the
0368  * detected but offline into the online state.
0369  * If we return an error (RC < 0) the device remains in the offline state. This
0370  * can happen if the device is assigned somewhere else, for example.
0371  */
0372 int
0373 tape_generic_online(struct tape_device *device,
0374            struct tape_discipline *discipline)
0375 {
0376     int rc;
0377 
0378     DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
0379 
0380     if (device->tape_state != TS_INIT) {
0381         DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
0382         return -EINVAL;
0383     }
0384 
0385     timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0);
0386 
0387     /* Let the discipline have a go at the device. */
0388     device->discipline = discipline;
0389     if (!try_module_get(discipline->owner)) {
0390         return -EINVAL;
0391     }
0392 
0393     rc = discipline->setup_device(device);
0394     if (rc)
0395         goto out;
0396     rc = tape_assign_minor(device);
0397     if (rc)
0398         goto out_discipline;
0399 
0400     rc = tapechar_setup_device(device);
0401     if (rc)
0402         goto out_minor;
0403 
0404     tape_state_set(device, TS_UNUSED);
0405 
0406     DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
0407 
0408     return 0;
0409 
0410 out_minor:
0411     tape_remove_minor(device);
0412 out_discipline:
0413     device->discipline->cleanup_device(device);
0414     device->discipline = NULL;
0415 out:
0416     module_put(discipline->owner);
0417     return rc;
0418 }
0419 
0420 static void
0421 tape_cleanup_device(struct tape_device *device)
0422 {
0423     tapechar_cleanup_device(device);
0424     device->discipline->cleanup_device(device);
0425     module_put(device->discipline->owner);
0426     tape_remove_minor(device);
0427     tape_med_state_set(device, MS_UNKNOWN);
0428 }
0429 
0430 /*
0431  * Set device offline.
0432  *
0433  * Called by the common I/O layer if the drive should set offline on user
0434  * request. We may prevent this by returning an error.
0435  * Manual offline is only allowed while the drive is not in use.
0436  */
0437 int
0438 tape_generic_offline(struct ccw_device *cdev)
0439 {
0440     struct tape_device *device;
0441 
0442     device = dev_get_drvdata(&cdev->dev);
0443     if (!device) {
0444         return -ENODEV;
0445     }
0446 
0447     DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
0448         device->cdev_id, device);
0449 
0450     spin_lock_irq(get_ccwdev_lock(device->cdev));
0451     switch (device->tape_state) {
0452         case TS_INIT:
0453         case TS_NOT_OPER:
0454             spin_unlock_irq(get_ccwdev_lock(device->cdev));
0455             break;
0456         case TS_UNUSED:
0457             tape_state_set(device, TS_INIT);
0458             spin_unlock_irq(get_ccwdev_lock(device->cdev));
0459             tape_cleanup_device(device);
0460             break;
0461         default:
0462             DBF_EVENT(3, "(%08x): Set offline failed "
0463                 "- drive in use.\n",
0464                 device->cdev_id);
0465             spin_unlock_irq(get_ccwdev_lock(device->cdev));
0466             return -EBUSY;
0467     }
0468 
0469     DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
0470     return 0;
0471 }
0472 
0473 /*
0474  * Allocate memory for a new device structure.
0475  */
0476 static struct tape_device *
0477 tape_alloc_device(void)
0478 {
0479     struct tape_device *device;
0480 
0481     device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
0482     if (device == NULL) {
0483         DBF_EXCEPTION(2, "ti:no mem\n");
0484         return ERR_PTR(-ENOMEM);
0485     }
0486     device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
0487     if (device->modeset_byte == NULL) {
0488         DBF_EXCEPTION(2, "ti:no mem\n");
0489         kfree(device);
0490         return ERR_PTR(-ENOMEM);
0491     }
0492     mutex_init(&device->mutex);
0493     INIT_LIST_HEAD(&device->req_queue);
0494     INIT_LIST_HEAD(&device->node);
0495     init_waitqueue_head(&device->state_change_wq);
0496     init_waitqueue_head(&device->wait_queue);
0497     device->tape_state = TS_INIT;
0498     device->medium_state = MS_UNKNOWN;
0499     *device->modeset_byte = 0;
0500     device->first_minor = -1;
0501     atomic_set(&device->ref_count, 1);
0502     INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
0503 
0504     return device;
0505 }
0506 
0507 /*
0508  * Get a reference to an existing device structure. This will automatically
0509  * increment the reference count.
0510  */
0511 struct tape_device *
0512 tape_get_device(struct tape_device *device)
0513 {
0514     int count;
0515 
0516     count = atomic_inc_return(&device->ref_count);
0517     DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
0518     return device;
0519 }
0520 
0521 /*
0522  * Decrease the reference counter of a devices structure. If the
0523  * reference counter reaches zero free the device structure.
0524  * The function returns a NULL pointer to be used by the caller
0525  * for clearing reference pointers.
0526  */
0527 void
0528 tape_put_device(struct tape_device *device)
0529 {
0530     int count;
0531 
0532     count = atomic_dec_return(&device->ref_count);
0533     DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
0534     BUG_ON(count < 0);
0535     if (count == 0) {
0536         kfree(device->modeset_byte);
0537         kfree(device);
0538     }
0539 }
0540 
0541 /*
0542  * Find tape device by a device index.
0543  */
0544 struct tape_device *
0545 tape_find_device(int devindex)
0546 {
0547     struct tape_device *device, *tmp;
0548 
0549     device = ERR_PTR(-ENODEV);
0550     read_lock(&tape_device_lock);
0551     list_for_each_entry(tmp, &tape_device_list, node) {
0552         if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
0553             device = tape_get_device(tmp);
0554             break;
0555         }
0556     }
0557     read_unlock(&tape_device_lock);
0558     return device;
0559 }
0560 
0561 /*
0562  * Driverfs tape probe function.
0563  */
0564 int
0565 tape_generic_probe(struct ccw_device *cdev)
0566 {
0567     struct tape_device *device;
0568     int ret;
0569     struct ccw_dev_id dev_id;
0570 
0571     device = tape_alloc_device();
0572     if (IS_ERR(device))
0573         return -ENODEV;
0574     ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
0575                      CCWDEV_DO_MULTIPATH);
0576     ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
0577     if (ret) {
0578         tape_put_device(device);
0579         return ret;
0580     }
0581     dev_set_drvdata(&cdev->dev, device);
0582     cdev->handler = __tape_do_irq;
0583     device->cdev = cdev;
0584     ccw_device_get_id(cdev, &dev_id);
0585     device->cdev_id = devid_to_int(&dev_id);
0586     return ret;
0587 }
0588 
0589 static void
0590 __tape_discard_requests(struct tape_device *device)
0591 {
0592     struct tape_request *   request;
0593     struct list_head *  l, *n;
0594 
0595     list_for_each_safe(l, n, &device->req_queue) {
0596         request = list_entry(l, struct tape_request, list);
0597         if (request->status == TAPE_REQUEST_IN_IO)
0598             request->status = TAPE_REQUEST_DONE;
0599         list_del(&request->list);
0600 
0601         /* Decrease ref_count for removed request. */
0602         request->device = NULL;
0603         tape_put_device(device);
0604         request->rc = -EIO;
0605         if (request->callback != NULL)
0606             request->callback(request, request->callback_data);
0607     }
0608 }
0609 
0610 /*
0611  * Driverfs tape remove function.
0612  *
0613  * This function is called whenever the common I/O layer detects the device
0614  * gone. This can happen at any time and we cannot refuse.
0615  */
0616 void
0617 tape_generic_remove(struct ccw_device *cdev)
0618 {
0619     struct tape_device *    device;
0620 
0621     device = dev_get_drvdata(&cdev->dev);
0622     if (!device) {
0623         return;
0624     }
0625     DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
0626 
0627     spin_lock_irq(get_ccwdev_lock(device->cdev));
0628     switch (device->tape_state) {
0629         case TS_INIT:
0630             tape_state_set(device, TS_NOT_OPER);
0631             fallthrough;
0632         case TS_NOT_OPER:
0633             /*
0634              * Nothing to do.
0635              */
0636             spin_unlock_irq(get_ccwdev_lock(device->cdev));
0637             break;
0638         case TS_UNUSED:
0639             /*
0640              * Need only to release the device.
0641              */
0642             tape_state_set(device, TS_NOT_OPER);
0643             spin_unlock_irq(get_ccwdev_lock(device->cdev));
0644             tape_cleanup_device(device);
0645             break;
0646         default:
0647             /*
0648              * There may be requests on the queue. We will not get
0649              * an interrupt for a request that was running. So we
0650              * just post them all as I/O errors.
0651              */
0652             DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
0653                 device->cdev_id);
0654             pr_warn("%s: A tape unit was detached while in use\n",
0655                 dev_name(&device->cdev->dev));
0656             tape_state_set(device, TS_NOT_OPER);
0657             __tape_discard_requests(device);
0658             spin_unlock_irq(get_ccwdev_lock(device->cdev));
0659             tape_cleanup_device(device);
0660     }
0661 
0662     device = dev_get_drvdata(&cdev->dev);
0663     if (device) {
0664         sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
0665         dev_set_drvdata(&cdev->dev, NULL);
0666         tape_put_device(device);
0667     }
0668 }
0669 
0670 /*
0671  * Allocate a new tape ccw request
0672  */
0673 struct tape_request *
0674 tape_alloc_request(int cplength, int datasize)
0675 {
0676     struct tape_request *request;
0677 
0678     BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
0679 
0680     DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
0681 
0682     request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
0683     if (request == NULL) {
0684         DBF_EXCEPTION(1, "cqra nomem\n");
0685         return ERR_PTR(-ENOMEM);
0686     }
0687     /* allocate channel program */
0688     if (cplength > 0) {
0689         request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
0690                       GFP_ATOMIC | GFP_DMA);
0691         if (request->cpaddr == NULL) {
0692             DBF_EXCEPTION(1, "cqra nomem\n");
0693             kfree(request);
0694             return ERR_PTR(-ENOMEM);
0695         }
0696     }
0697     /* alloc small kernel buffer */
0698     if (datasize > 0) {
0699         request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
0700         if (request->cpdata == NULL) {
0701             DBF_EXCEPTION(1, "cqra nomem\n");
0702             kfree(request->cpaddr);
0703             kfree(request);
0704             return ERR_PTR(-ENOMEM);
0705         }
0706     }
0707     DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
0708         request->cpdata);
0709 
0710     return request;
0711 }
0712 
0713 /*
0714  * Free tape ccw request
0715  */
0716 void
0717 tape_free_request (struct tape_request * request)
0718 {
0719     DBF_LH(6, "Free request %p\n", request);
0720 
0721     if (request->device)
0722         tape_put_device(request->device);
0723     kfree(request->cpdata);
0724     kfree(request->cpaddr);
0725     kfree(request);
0726 }
0727 
0728 static int
0729 __tape_start_io(struct tape_device *device, struct tape_request *request)
0730 {
0731     int rc;
0732 
0733     rc = ccw_device_start(
0734         device->cdev,
0735         request->cpaddr,
0736         (unsigned long) request,
0737         0x00,
0738         request->options
0739     );
0740     if (rc == 0) {
0741         request->status = TAPE_REQUEST_IN_IO;
0742     } else if (rc == -EBUSY) {
0743         /* The common I/O subsystem is currently busy. Retry later. */
0744         request->status = TAPE_REQUEST_QUEUED;
0745         schedule_delayed_work(&device->tape_dnr, 0);
0746         rc = 0;
0747     } else {
0748         /* Start failed. Remove request and indicate failure. */
0749         DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
0750     }
0751     return rc;
0752 }
0753 
0754 static void
0755 __tape_start_next_request(struct tape_device *device)
0756 {
0757     struct list_head *l, *n;
0758     struct tape_request *request;
0759     int rc;
0760 
0761     DBF_LH(6, "__tape_start_next_request(%p)\n", device);
0762     /*
0763      * Try to start each request on request queue until one is
0764      * started successful.
0765      */
0766     list_for_each_safe(l, n, &device->req_queue) {
0767         request = list_entry(l, struct tape_request, list);
0768 
0769         /*
0770          * Avoid race condition if bottom-half was triggered more than
0771          * once.
0772          */
0773         if (request->status == TAPE_REQUEST_IN_IO)
0774             return;
0775         /*
0776          * Request has already been stopped. We have to wait until
0777          * the request is removed from the queue in the interrupt
0778          * handling.
0779          */
0780         if (request->status == TAPE_REQUEST_DONE)
0781             return;
0782 
0783         /*
0784          * We wanted to cancel the request but the common I/O layer
0785          * was busy at that time. This can only happen if this
0786          * function is called by delayed_next_request.
0787          * Otherwise we start the next request on the queue.
0788          */
0789         if (request->status == TAPE_REQUEST_CANCEL) {
0790             rc = __tape_cancel_io(device, request);
0791         } else {
0792             rc = __tape_start_io(device, request);
0793         }
0794         if (rc == 0)
0795             return;
0796 
0797         /* Set ending status. */
0798         request->rc = rc;
0799         request->status = TAPE_REQUEST_DONE;
0800 
0801         /* Remove from request queue. */
0802         list_del(&request->list);
0803 
0804         /* Do callback. */
0805         if (request->callback != NULL)
0806             request->callback(request, request->callback_data);
0807     }
0808 }
0809 
0810 static void
0811 tape_delayed_next_request(struct work_struct *work)
0812 {
0813     struct tape_device *device =
0814         container_of(work, struct tape_device, tape_dnr.work);
0815 
0816     DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
0817     spin_lock_irq(get_ccwdev_lock(device->cdev));
0818     __tape_start_next_request(device);
0819     spin_unlock_irq(get_ccwdev_lock(device->cdev));
0820 }
0821 
0822 static void tape_long_busy_timeout(struct timer_list *t)
0823 {
0824     struct tape_device *device = from_timer(device, t, lb_timeout);
0825     struct tape_request *request;
0826 
0827     spin_lock_irq(get_ccwdev_lock(device->cdev));
0828     request = list_entry(device->req_queue.next, struct tape_request, list);
0829     BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
0830     DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
0831     __tape_start_next_request(device);
0832     tape_put_device(device);
0833     spin_unlock_irq(get_ccwdev_lock(device->cdev));
0834 }
0835 
0836 static void
0837 __tape_end_request(
0838     struct tape_device *    device,
0839     struct tape_request *   request,
0840     int         rc)
0841 {
0842     DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
0843     if (request) {
0844         request->rc = rc;
0845         request->status = TAPE_REQUEST_DONE;
0846 
0847         /* Remove from request queue. */
0848         list_del(&request->list);
0849 
0850         /* Do callback. */
0851         if (request->callback != NULL)
0852             request->callback(request, request->callback_data);
0853     }
0854 
0855     /* Start next request. */
0856     if (!list_empty(&device->req_queue))
0857         __tape_start_next_request(device);
0858 }
0859 
0860 /*
0861  * Write sense data to dbf
0862  */
0863 void
0864 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
0865             struct irb *irb)
0866 {
0867     unsigned int *sptr;
0868     const char* op;
0869 
0870     if (request != NULL)
0871         op = tape_op_verbose[request->op];
0872     else
0873         op = "---";
0874     DBF_EVENT(3, "DSTAT : %02x   CSTAT: %02x\n",
0875           irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
0876     DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
0877     sptr = (unsigned int *) irb->ecw;
0878     DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
0879     DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
0880     DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
0881     DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
0882 }
0883 
0884 /*
0885  * I/O helper function. Adds the request to the request queue
0886  * and starts it if the tape is idle. Has to be called with
0887  * the device lock held.
0888  */
0889 static int
0890 __tape_start_request(struct tape_device *device, struct tape_request *request)
0891 {
0892     int rc;
0893 
0894     switch (request->op) {
0895         case TO_MSEN:
0896         case TO_ASSIGN:
0897         case TO_UNASSIGN:
0898         case TO_READ_ATTMSG:
0899         case TO_RDC:
0900             if (device->tape_state == TS_INIT)
0901                 break;
0902             if (device->tape_state == TS_UNUSED)
0903                 break;
0904             fallthrough;
0905         default:
0906             if (device->tape_state == TS_BLKUSE)
0907                 break;
0908             if (device->tape_state != TS_IN_USE)
0909                 return -ENODEV;
0910     }
0911 
0912     /* Increase use count of device for the added request. */
0913     request->device = tape_get_device(device);
0914 
0915     if (list_empty(&device->req_queue)) {
0916         /* No other requests are on the queue. Start this one. */
0917         rc = __tape_start_io(device, request);
0918         if (rc)
0919             return rc;
0920 
0921         DBF_LH(5, "Request %p added for execution.\n", request);
0922         list_add(&request->list, &device->req_queue);
0923     } else {
0924         DBF_LH(5, "Request %p add to queue.\n", request);
0925         request->status = TAPE_REQUEST_QUEUED;
0926         list_add_tail(&request->list, &device->req_queue);
0927     }
0928     return 0;
0929 }
0930 
0931 /*
0932  * Add the request to the request queue, try to start it if the
0933  * tape is idle. Return without waiting for end of i/o.
0934  */
0935 int
0936 tape_do_io_async(struct tape_device *device, struct tape_request *request)
0937 {
0938     int rc;
0939 
0940     DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
0941 
0942     spin_lock_irq(get_ccwdev_lock(device->cdev));
0943     /* Add request to request queue and try to start it. */
0944     rc = __tape_start_request(device, request);
0945     spin_unlock_irq(get_ccwdev_lock(device->cdev));
0946     return rc;
0947 }
0948 
0949 /*
0950  * tape_do_io/__tape_wake_up
0951  * Add the request to the request queue, try to start it if the
0952  * tape is idle and wait uninterruptible for its completion.
0953  */
0954 static void
0955 __tape_wake_up(struct tape_request *request, void *data)
0956 {
0957     request->callback = NULL;
0958     wake_up((wait_queue_head_t *) data);
0959 }
0960 
0961 int
0962 tape_do_io(struct tape_device *device, struct tape_request *request)
0963 {
0964     int rc;
0965 
0966     spin_lock_irq(get_ccwdev_lock(device->cdev));
0967     /* Setup callback */
0968     request->callback = __tape_wake_up;
0969     request->callback_data = &device->wait_queue;
0970     /* Add request to request queue and try to start it. */
0971     rc = __tape_start_request(device, request);
0972     spin_unlock_irq(get_ccwdev_lock(device->cdev));
0973     if (rc)
0974         return rc;
0975     /* Request added to the queue. Wait for its completion. */
0976     wait_event(device->wait_queue, (request->callback == NULL));
0977     /* Get rc from request */
0978     return request->rc;
0979 }
0980 
0981 /*
0982  * tape_do_io_interruptible/__tape_wake_up_interruptible
0983  * Add the request to the request queue, try to start it if the
0984  * tape is idle and wait uninterruptible for its completion.
0985  */
0986 static void
0987 __tape_wake_up_interruptible(struct tape_request *request, void *data)
0988 {
0989     request->callback = NULL;
0990     wake_up_interruptible((wait_queue_head_t *) data);
0991 }
0992 
0993 int
0994 tape_do_io_interruptible(struct tape_device *device,
0995              struct tape_request *request)
0996 {
0997     int rc;
0998 
0999     spin_lock_irq(get_ccwdev_lock(device->cdev));
1000     /* Setup callback */
1001     request->callback = __tape_wake_up_interruptible;
1002     request->callback_data = &device->wait_queue;
1003     rc = __tape_start_request(device, request);
1004     spin_unlock_irq(get_ccwdev_lock(device->cdev));
1005     if (rc)
1006         return rc;
1007     /* Request added to the queue. Wait for its completion. */
1008     rc = wait_event_interruptible(device->wait_queue,
1009                       (request->callback == NULL));
1010     if (rc != -ERESTARTSYS)
1011         /* Request finished normally. */
1012         return request->rc;
1013 
1014     /* Interrupted by a signal. We have to stop the current request. */
1015     spin_lock_irq(get_ccwdev_lock(device->cdev));
1016     rc = __tape_cancel_io(device, request);
1017     spin_unlock_irq(get_ccwdev_lock(device->cdev));
1018     if (rc == 0) {
1019         /* Wait for the interrupt that acknowledges the halt. */
1020         do {
1021             rc = wait_event_interruptible(
1022                 device->wait_queue,
1023                 (request->callback == NULL)
1024             );
1025         } while (rc == -ERESTARTSYS);
1026 
1027         DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
1028         rc = -ERESTARTSYS;
1029     }
1030     return rc;
1031 }
1032 
1033 /*
1034  * Stop running ccw.
1035  */
1036 int
1037 tape_cancel_io(struct tape_device *device, struct tape_request *request)
1038 {
1039     int rc;
1040 
1041     spin_lock_irq(get_ccwdev_lock(device->cdev));
1042     rc = __tape_cancel_io(device, request);
1043     spin_unlock_irq(get_ccwdev_lock(device->cdev));
1044     return rc;
1045 }
1046 
1047 /*
1048  * Tape interrupt routine, called from the ccw_device layer
1049  */
1050 static void
1051 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1052 {
1053     struct tape_device *device;
1054     struct tape_request *request;
1055     int rc;
1056 
1057     device = dev_get_drvdata(&cdev->dev);
1058     if (device == NULL) {
1059         return;
1060     }
1061     request = (struct tape_request *) intparm;
1062 
1063     DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
1064 
1065     /* On special conditions irb is an error pointer */
1066     if (IS_ERR(irb)) {
1067         /* FIXME: What to do with the request? */
1068         switch (PTR_ERR(irb)) {
1069             case -ETIMEDOUT:
1070                 DBF_LH(1, "(%08x): Request timed out\n",
1071                        device->cdev_id);
1072                 fallthrough;
1073             case -EIO:
1074                 __tape_end_request(device, request, -EIO);
1075                 break;
1076             default:
1077                 DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
1078                        device->cdev_id, PTR_ERR(irb));
1079         }
1080         return;
1081     }
1082 
1083     /*
1084      * If the condition code is not zero and the start function bit is
1085      * still set, this is an deferred error and the last start I/O did
1086      * not succeed. At this point the condition that caused the deferred
1087      * error might still apply. So we just schedule the request to be
1088      * started later.
1089      */
1090     if (irb->scsw.cmd.cc != 0 &&
1091         (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1092         (request->status == TAPE_REQUEST_IN_IO)) {
1093         DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1094             device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
1095         request->status = TAPE_REQUEST_QUEUED;
1096         schedule_delayed_work(&device->tape_dnr, HZ);
1097         return;
1098     }
1099 
1100     /* May be an unsolicited irq */
1101     if(request != NULL)
1102         request->rescnt = irb->scsw.cmd.count;
1103     else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
1104          !list_empty(&device->req_queue)) {
1105         /* Not Ready to Ready after long busy ? */
1106         struct tape_request *req;
1107         req = list_entry(device->req_queue.next,
1108                  struct tape_request, list);
1109         if (req->status == TAPE_REQUEST_LONG_BUSY) {
1110             DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1111             if (del_timer(&device->lb_timeout)) {
1112                 tape_put_device(device);
1113                 __tape_start_next_request(device);
1114             }
1115             return;
1116         }
1117     }
1118     if (irb->scsw.cmd.dstat != 0x0c) {
1119         /* Set the 'ONLINE' flag depending on sense byte 1 */
1120         if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1121             device->tape_generic_status |= GMT_ONLINE(~0);
1122         else
1123             device->tape_generic_status &= ~GMT_ONLINE(~0);
1124 
1125         /*
1126          * Any request that does not come back with channel end
1127          * and device end is unusual. Log the sense data.
1128          */
1129         DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1130         tape_dump_sense_dbf(device, request, irb);
1131     } else {
1132         /* Upon normal completion the device _is_ online */
1133         device->tape_generic_status |= GMT_ONLINE(~0);
1134     }
1135     if (device->tape_state == TS_NOT_OPER) {
1136         DBF_EVENT(6, "tape:device is not operational\n");
1137         return;
1138     }
1139 
1140     /*
1141      * Request that were canceled still come back with an interrupt.
1142      * To detect these request the state will be set to TAPE_REQUEST_DONE.
1143      */
1144     if(request != NULL && request->status == TAPE_REQUEST_DONE) {
1145         __tape_end_request(device, request, -EIO);
1146         return;
1147     }
1148 
1149     rc = device->discipline->irq(device, request, irb);
1150     /*
1151      * rc < 0 : request finished unsuccessfully.
1152      * rc == TAPE_IO_SUCCESS: request finished successfully.
1153      * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1154      * rc == TAPE_IO_RETRY: request finished but needs another go.
1155      * rc == TAPE_IO_STOP: request needs to get terminated.
1156      */
1157     switch (rc) {
1158         case TAPE_IO_SUCCESS:
1159             /* Upon normal completion the device _is_ online */
1160             device->tape_generic_status |= GMT_ONLINE(~0);
1161             __tape_end_request(device, request, rc);
1162             break;
1163         case TAPE_IO_PENDING:
1164             break;
1165         case TAPE_IO_LONG_BUSY:
1166             device->lb_timeout.expires = jiffies +
1167                 LONG_BUSY_TIMEOUT * HZ;
1168             DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1169             add_timer(&device->lb_timeout);
1170             request->status = TAPE_REQUEST_LONG_BUSY;
1171             break;
1172         case TAPE_IO_RETRY:
1173             rc = __tape_start_io(device, request);
1174             if (rc)
1175                 __tape_end_request(device, request, rc);
1176             break;
1177         case TAPE_IO_STOP:
1178             rc = __tape_cancel_io(device, request);
1179             if (rc)
1180                 __tape_end_request(device, request, rc);
1181             break;
1182         default:
1183             if (rc > 0) {
1184                 DBF_EVENT(6, "xunknownrc\n");
1185                 __tape_end_request(device, request, -EIO);
1186             } else {
1187                 __tape_end_request(device, request, rc);
1188             }
1189             break;
1190     }
1191 }
1192 
1193 /*
1194  * Tape device open function used by tape_char frontend.
1195  */
1196 int
1197 tape_open(struct tape_device *device)
1198 {
1199     int rc;
1200 
1201     spin_lock_irq(get_ccwdev_lock(device->cdev));
1202     if (device->tape_state == TS_NOT_OPER) {
1203         DBF_EVENT(6, "TAPE:nodev\n");
1204         rc = -ENODEV;
1205     } else if (device->tape_state == TS_IN_USE) {
1206         DBF_EVENT(6, "TAPE:dbusy\n");
1207         rc = -EBUSY;
1208     } else if (device->tape_state == TS_BLKUSE) {
1209         DBF_EVENT(6, "TAPE:dbusy\n");
1210         rc = -EBUSY;
1211     } else if (device->discipline != NULL &&
1212            !try_module_get(device->discipline->owner)) {
1213         DBF_EVENT(6, "TAPE:nodisc\n");
1214         rc = -ENODEV;
1215     } else {
1216         tape_state_set(device, TS_IN_USE);
1217         rc = 0;
1218     }
1219     spin_unlock_irq(get_ccwdev_lock(device->cdev));
1220     return rc;
1221 }
1222 
1223 /*
1224  * Tape device release function used by tape_char frontend.
1225  */
1226 int
1227 tape_release(struct tape_device *device)
1228 {
1229     spin_lock_irq(get_ccwdev_lock(device->cdev));
1230     if (device->tape_state == TS_IN_USE)
1231         tape_state_set(device, TS_UNUSED);
1232     module_put(device->discipline->owner);
1233     spin_unlock_irq(get_ccwdev_lock(device->cdev));
1234     return 0;
1235 }
1236 
1237 /*
1238  * Execute a magnetic tape command a number of times.
1239  */
1240 int
1241 tape_mtop(struct tape_device *device, int mt_op, int mt_count)
1242 {
1243     tape_mtop_fn fn;
1244     int rc;
1245 
1246     DBF_EVENT(6, "TAPE:mtio\n");
1247     DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
1248     DBF_EVENT(6, "TAPE:arg:  %x\n", mt_count);
1249 
1250     if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
1251         return -EINVAL;
1252     fn = device->discipline->mtop_array[mt_op];
1253     if (fn == NULL)
1254         return -EINVAL;
1255 
1256     /* We assume that the backends can handle count up to 500. */
1257     if (mt_op == MTBSR  || mt_op == MTFSR  || mt_op == MTFSF  ||
1258         mt_op == MTBSF  || mt_op == MTFSFM || mt_op == MTBSFM) {
1259         rc = 0;
1260         for (; mt_count > 500; mt_count -= 500)
1261             if ((rc = fn(device, 500)) != 0)
1262                 break;
1263         if (rc == 0)
1264             rc = fn(device, mt_count);
1265     } else
1266         rc = fn(device, mt_count);
1267     return rc;
1268 
1269 }
1270 
1271 /*
1272  * Tape init function.
1273  */
1274 static int
1275 tape_init (void)
1276 {
1277     TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
1278     debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1279 #ifdef DBF_LIKE_HELL
1280     debug_set_level(TAPE_DBF_AREA, 6);
1281 #endif
1282     DBF_EVENT(3, "tape init\n");
1283     tape_proc_init();
1284     tapechar_init ();
1285     return 0;
1286 }
1287 
1288 /*
1289  * Tape exit function.
1290  */
1291 static void
1292 tape_exit(void)
1293 {
1294     DBF_EVENT(6, "tape exit\n");
1295 
1296     /* Get rid of the frontends */
1297     tapechar_exit();
1298     tape_proc_cleanup();
1299     debug_unregister (TAPE_DBF_AREA);
1300 }
1301 
1302 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1303           "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1304 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
1305 MODULE_LICENSE("GPL");
1306 
1307 module_init(tape_init);
1308 module_exit(tape_exit);
1309 
1310 EXPORT_SYMBOL(tape_generic_remove);
1311 EXPORT_SYMBOL(tape_generic_probe);
1312 EXPORT_SYMBOL(tape_generic_online);
1313 EXPORT_SYMBOL(tape_generic_offline);
1314 EXPORT_SYMBOL(tape_put_device);
1315 EXPORT_SYMBOL(tape_get_device);
1316 EXPORT_SYMBOL(tape_state_verbose);
1317 EXPORT_SYMBOL(tape_op_verbose);
1318 EXPORT_SYMBOL(tape_state_set);
1319 EXPORT_SYMBOL(tape_med_state_set);
1320 EXPORT_SYMBOL(tape_alloc_request);
1321 EXPORT_SYMBOL(tape_free_request);
1322 EXPORT_SYMBOL(tape_dump_sense_dbf);
1323 EXPORT_SYMBOL(tape_do_io);
1324 EXPORT_SYMBOL(tape_do_io_async);
1325 EXPORT_SYMBOL(tape_do_io_interruptible);
1326 EXPORT_SYMBOL(tape_cancel_io);
1327 EXPORT_SYMBOL(tape_mtop);