Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Linux driver for System z and s390 unit record devices
0004  * (z/VM virtual punch, reader, printer)
0005  *
0006  * Copyright IBM Corp. 2001, 2009
0007  * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
0008  *      Michael Holzheu <holzheu@de.ibm.com>
0009  *      Frank Munzert <munzert@de.ibm.com>
0010  */
0011 
0012 #define KMSG_COMPONENT "vmur"
0013 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0014 
0015 #include <linux/cdev.h>
0016 #include <linux/slab.h>
0017 #include <linux/module.h>
0018 
0019 #include <linux/uaccess.h>
0020 #include <asm/cio.h>
0021 #include <asm/ccwdev.h>
0022 #include <asm/debug.h>
0023 #include <asm/diag.h>
0024 
0025 #include "vmur.h"
0026 
0027 /*
0028  * Driver overview
0029  *
0030  * Unit record device support is implemented as a character device driver.
0031  * We can fit at least 16 bits into a device minor number and use the
0032  * simple method of mapping a character device number with minor abcd
0033  * to the unit record device with devno abcd.
0034  * I/O to virtual unit record devices is handled as follows:
0035  * Reads: Diagnose code 0x14 (input spool file manipulation)
0036  * is used to read spool data page-wise.
0037  * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
0038  * is available by reading sysfs attr reclen. Each write() to the device
0039  * must specify an integral multiple (maximal 511) of reclen.
0040  */
0041 
0042 static char ur_banner[] = "z/VM virtual unit record device driver";
0043 
0044 MODULE_AUTHOR("IBM Corporation");
0045 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
0046 MODULE_LICENSE("GPL");
0047 
0048 static dev_t ur_first_dev_maj_min;
0049 static struct class *vmur_class;
0050 static struct debug_info *vmur_dbf;
0051 
0052 /* We put the device's record length (for writes) in the driver_info field */
0053 static struct ccw_device_id ur_ids[] = {
0054     { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
0055     { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
0056     { /* end of list */ }
0057 };
0058 
0059 MODULE_DEVICE_TABLE(ccw, ur_ids);
0060 
0061 static int ur_probe(struct ccw_device *cdev);
0062 static void ur_remove(struct ccw_device *cdev);
0063 static int ur_set_online(struct ccw_device *cdev);
0064 static int ur_set_offline(struct ccw_device *cdev);
0065 
0066 static struct ccw_driver ur_driver = {
0067     .driver = {
0068         .name   = "vmur",
0069         .owner  = THIS_MODULE,
0070     },
0071     .ids        = ur_ids,
0072     .probe      = ur_probe,
0073     .remove     = ur_remove,
0074     .set_online = ur_set_online,
0075     .set_offline    = ur_set_offline,
0076     .int_class  = IRQIO_VMR,
0077 };
0078 
0079 static DEFINE_MUTEX(vmur_mutex);
0080 
0081 /*
0082  * Allocation, freeing, getting and putting of urdev structures
0083  *
0084  * Each ur device (urd) contains a reference to its corresponding ccw device
0085  * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
0086  * ur device using dev_get_drvdata(&cdev->dev) pointer.
0087  *
0088  * urd references:
0089  * - ur_probe gets a urd reference, ur_remove drops the reference
0090  *   dev_get_drvdata(&cdev->dev)
0091  * - ur_open gets a urd reference, ur_release drops the reference
0092  *   (urf->urd)
0093  *
0094  * cdev references:
0095  * - urdev_alloc get a cdev reference (urd->cdev)
0096  * - urdev_free drops the cdev reference (urd->cdev)
0097  *
0098  * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
0099  */
0100 static struct urdev *urdev_alloc(struct ccw_device *cdev)
0101 {
0102     struct urdev *urd;
0103 
0104     urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
0105     if (!urd)
0106         return NULL;
0107     urd->reclen = cdev->id.driver_info;
0108     ccw_device_get_id(cdev, &urd->dev_id);
0109     mutex_init(&urd->io_mutex);
0110     init_waitqueue_head(&urd->wait);
0111     spin_lock_init(&urd->open_lock);
0112     refcount_set(&urd->ref_count,  1);
0113     urd->cdev = cdev;
0114     get_device(&cdev->dev);
0115     return urd;
0116 }
0117 
0118 static void urdev_free(struct urdev *urd)
0119 {
0120     TRACE("urdev_free: %p\n", urd);
0121     if (urd->cdev)
0122         put_device(&urd->cdev->dev);
0123     kfree(urd);
0124 }
0125 
0126 static void urdev_get(struct urdev *urd)
0127 {
0128     refcount_inc(&urd->ref_count);
0129 }
0130 
0131 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
0132 {
0133     struct urdev *urd;
0134     unsigned long flags;
0135 
0136     spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
0137     urd = dev_get_drvdata(&cdev->dev);
0138     if (urd)
0139         urdev_get(urd);
0140     spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
0141     return urd;
0142 }
0143 
0144 static struct urdev *urdev_get_from_devno(u16 devno)
0145 {
0146     char bus_id[16];
0147     struct ccw_device *cdev;
0148     struct urdev *urd;
0149 
0150     sprintf(bus_id, "0.0.%04x", devno);
0151     cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
0152     if (!cdev)
0153         return NULL;
0154     urd = urdev_get_from_cdev(cdev);
0155     put_device(&cdev->dev);
0156     return urd;
0157 }
0158 
0159 static void urdev_put(struct urdev *urd)
0160 {
0161     if (refcount_dec_and_test(&urd->ref_count))
0162         urdev_free(urd);
0163 }
0164 
0165 /*
0166  * Low-level functions to do I/O to a ur device.
0167  *     alloc_chan_prog
0168  *     free_chan_prog
0169  *     do_ur_io
0170  *     ur_int_handler
0171  *
0172  * alloc_chan_prog allocates and builds the channel program
0173  * free_chan_prog frees memory of the channel program
0174  *
0175  * do_ur_io issues the channel program to the device and blocks waiting
0176  * on a completion event it publishes at urd->io_done. The function
0177  * serialises itself on the device's mutex so that only one I/O
0178  * is issued at a time (and that I/O is synchronous).
0179  *
0180  * ur_int_handler catches the "I/O done" interrupt, writes the
0181  * subchannel status word into the scsw member of the urdev structure
0182  * and complete()s the io_done to wake the waiting do_ur_io.
0183  *
0184  * The caller of do_ur_io is responsible for kfree()ing the channel program
0185  * address pointer that alloc_chan_prog returned.
0186  */
0187 
0188 static void free_chan_prog(struct ccw1 *cpa)
0189 {
0190     struct ccw1 *ptr = cpa;
0191 
0192     while (ptr->cda) {
0193         kfree((void *)(addr_t) ptr->cda);
0194         ptr++;
0195     }
0196     kfree(cpa);
0197 }
0198 
0199 /*
0200  * alloc_chan_prog
0201  * The channel program we use is write commands chained together
0202  * with a final NOP CCW command-chained on (which ensures that CE and DE
0203  * are presented together in a single interrupt instead of as separate
0204  * interrupts unless an incorrect length indication kicks in first). The
0205  * data length in each CCW is reclen.
0206  */
0207 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
0208                     int reclen)
0209 {
0210     struct ccw1 *cpa;
0211     void *kbuf;
0212     int i;
0213 
0214     TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
0215 
0216     /*
0217      * We chain a NOP onto the writes to force CE+DE together.
0218      * That means we allocate room for CCWs to cover count/reclen
0219      * records plus a NOP.
0220      */
0221     cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
0222               GFP_KERNEL | GFP_DMA);
0223     if (!cpa)
0224         return ERR_PTR(-ENOMEM);
0225 
0226     for (i = 0; i < rec_count; i++) {
0227         cpa[i].cmd_code = WRITE_CCW_CMD;
0228         cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
0229         cpa[i].count = reclen;
0230         kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
0231         if (!kbuf) {
0232             free_chan_prog(cpa);
0233             return ERR_PTR(-ENOMEM);
0234         }
0235         cpa[i].cda = (u32)(addr_t) kbuf;
0236         if (copy_from_user(kbuf, ubuf, reclen)) {
0237             free_chan_prog(cpa);
0238             return ERR_PTR(-EFAULT);
0239         }
0240         ubuf += reclen;
0241     }
0242     /* The following NOP CCW forces CE+DE to be presented together */
0243     cpa[i].cmd_code = CCW_CMD_NOOP;
0244     return cpa;
0245 }
0246 
0247 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
0248 {
0249     int rc;
0250     struct ccw_device *cdev = urd->cdev;
0251     DECLARE_COMPLETION_ONSTACK(event);
0252 
0253     TRACE("do_ur_io: cpa=%p\n", cpa);
0254 
0255     rc = mutex_lock_interruptible(&urd->io_mutex);
0256     if (rc)
0257         return rc;
0258 
0259     urd->io_done = &event;
0260 
0261     spin_lock_irq(get_ccwdev_lock(cdev));
0262     rc = ccw_device_start(cdev, cpa, 1, 0, 0);
0263     spin_unlock_irq(get_ccwdev_lock(cdev));
0264 
0265     TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
0266     if (rc)
0267         goto out;
0268 
0269     wait_for_completion(&event);
0270     TRACE("do_ur_io: I/O complete\n");
0271     rc = 0;
0272 
0273 out:
0274     mutex_unlock(&urd->io_mutex);
0275     return rc;
0276 }
0277 
0278 /*
0279  * ur interrupt handler, called from the ccw_device layer
0280  */
0281 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
0282                struct irb *irb)
0283 {
0284     struct urdev *urd;
0285 
0286     if (!IS_ERR(irb)) {
0287         TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
0288               intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
0289               irb->scsw.cmd.count);
0290     }
0291     if (!intparm) {
0292         TRACE("ur_int_handler: unsolicited interrupt\n");
0293         return;
0294     }
0295     urd = dev_get_drvdata(&cdev->dev);
0296     BUG_ON(!urd);
0297     /* On special conditions irb is an error pointer */
0298     if (IS_ERR(irb))
0299         urd->io_request_rc = PTR_ERR(irb);
0300     else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
0301         urd->io_request_rc = 0;
0302     else
0303         urd->io_request_rc = -EIO;
0304 
0305     complete(urd->io_done);
0306 }
0307 
0308 /*
0309  * reclen sysfs attribute - The record length to be used for write CCWs
0310  */
0311 static ssize_t ur_attr_reclen_show(struct device *dev,
0312                    struct device_attribute *attr, char *buf)
0313 {
0314     struct urdev *urd;
0315     int rc;
0316 
0317     urd = urdev_get_from_cdev(to_ccwdev(dev));
0318     if (!urd)
0319         return -ENODEV;
0320     rc = sprintf(buf, "%zu\n", urd->reclen);
0321     urdev_put(urd);
0322     return rc;
0323 }
0324 
0325 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
0326 
0327 static int ur_create_attributes(struct device *dev)
0328 {
0329     return device_create_file(dev, &dev_attr_reclen);
0330 }
0331 
0332 static void ur_remove_attributes(struct device *dev)
0333 {
0334     device_remove_file(dev, &dev_attr_reclen);
0335 }
0336 
0337 /*
0338  * diagnose code 0x210 - retrieve device information
0339  * cc=0  normal completion, we have a real device
0340  * cc=1  CP paging error
0341  * cc=2  The virtual device exists, but is not associated with a real device
0342  * cc=3  Invalid device address, or the virtual device does not exist
0343  */
0344 static int get_urd_class(struct urdev *urd)
0345 {
0346     static struct diag210 ur_diag210;
0347     int cc;
0348 
0349     ur_diag210.vrdcdvno = urd->dev_id.devno;
0350     ur_diag210.vrdclen = sizeof(struct diag210);
0351 
0352     cc = diag210(&ur_diag210);
0353     switch (cc) {
0354     case 0:
0355         return -EOPNOTSUPP;
0356     case 2:
0357         return ur_diag210.vrdcvcla; /* virtual device class */
0358     case 3:
0359         return -ENODEV;
0360     default:
0361         return -EIO;
0362     }
0363 }
0364 
0365 /*
0366  * Allocation and freeing of urfile structures
0367  */
0368 static struct urfile *urfile_alloc(struct urdev *urd)
0369 {
0370     struct urfile *urf;
0371 
0372     urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
0373     if (!urf)
0374         return NULL;
0375     urf->urd = urd;
0376 
0377     TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
0378           urf->dev_reclen);
0379 
0380     return urf;
0381 }
0382 
0383 static void urfile_free(struct urfile *urf)
0384 {
0385     TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
0386     kfree(urf);
0387 }
0388 
0389 /*
0390  * The fops implementation of the character device driver
0391  */
0392 static ssize_t do_write(struct urdev *urd, const char __user *udata,
0393             size_t count, size_t reclen, loff_t *ppos)
0394 {
0395     struct ccw1 *cpa;
0396     int rc;
0397 
0398     cpa = alloc_chan_prog(udata, count / reclen, reclen);
0399     if (IS_ERR(cpa))
0400         return PTR_ERR(cpa);
0401 
0402     rc = do_ur_io(urd, cpa);
0403     if (rc)
0404         goto fail_kfree_cpa;
0405 
0406     if (urd->io_request_rc) {
0407         rc = urd->io_request_rc;
0408         goto fail_kfree_cpa;
0409     }
0410     *ppos += count;
0411     rc = count;
0412 
0413 fail_kfree_cpa:
0414     free_chan_prog(cpa);
0415     return rc;
0416 }
0417 
0418 static ssize_t ur_write(struct file *file, const char __user *udata,
0419             size_t count, loff_t *ppos)
0420 {
0421     struct urfile *urf = file->private_data;
0422 
0423     TRACE("ur_write: count=%zu\n", count);
0424 
0425     if (count == 0)
0426         return 0;
0427 
0428     if (count % urf->dev_reclen)
0429         return -EINVAL; /* count must be a multiple of reclen */
0430 
0431     if (count > urf->dev_reclen * MAX_RECS_PER_IO)
0432         count = urf->dev_reclen * MAX_RECS_PER_IO;
0433 
0434     return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
0435 }
0436 
0437 /*
0438  * diagnose code 0x14 subcode 0x0028 - position spool file to designated
0439  *                     record
0440  * cc=0  normal completion
0441  * cc=2  no file active on the virtual reader or device not ready
0442  * cc=3  record specified is beyond EOF
0443  */
0444 static int diag_position_to_record(int devno, int record)
0445 {
0446     int cc;
0447 
0448     cc = diag14(record, devno, 0x28);
0449     switch (cc) {
0450     case 0:
0451         return 0;
0452     case 2:
0453         return -ENOMEDIUM;
0454     case 3:
0455         return -ENODATA; /* position beyond end of file */
0456     default:
0457         return -EIO;
0458     }
0459 }
0460 
0461 /*
0462  * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
0463  * cc=0  normal completion
0464  * cc=1  EOF reached
0465  * cc=2  no file active on the virtual reader, and no file eligible
0466  * cc=3  file already active on the virtual reader or specified virtual
0467  *   reader does not exist or is not a reader
0468  */
0469 static int diag_read_file(int devno, char *buf)
0470 {
0471     int cc;
0472 
0473     cc = diag14((unsigned long) buf, devno, 0x00);
0474     switch (cc) {
0475     case 0:
0476         return 0;
0477     case 1:
0478         return -ENODATA;
0479     case 2:
0480         return -ENOMEDIUM;
0481     default:
0482         return -EIO;
0483     }
0484 }
0485 
0486 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
0487                loff_t *offs)
0488 {
0489     size_t len, copied, res;
0490     char *buf;
0491     int rc;
0492     u16 reclen;
0493     struct urdev *urd;
0494 
0495     urd = ((struct urfile *) file->private_data)->urd;
0496     reclen = ((struct urfile *) file->private_data)->file_reclen;
0497 
0498     rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
0499     if (rc == -ENODATA)
0500         return 0;
0501     if (rc)
0502         return rc;
0503 
0504     len = min((size_t) PAGE_SIZE, count);
0505     buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
0506     if (!buf)
0507         return -ENOMEM;
0508 
0509     copied = 0;
0510     res = (size_t) (*offs % PAGE_SIZE);
0511     do {
0512         rc = diag_read_file(urd->dev_id.devno, buf);
0513         if (rc == -ENODATA) {
0514             break;
0515         }
0516         if (rc)
0517             goto fail;
0518         if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
0519             *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
0520         len = min(count - copied, PAGE_SIZE - res);
0521         if (copy_to_user(ubuf + copied, buf + res, len)) {
0522             rc = -EFAULT;
0523             goto fail;
0524         }
0525         res = 0;
0526         copied += len;
0527     } while (copied != count);
0528 
0529     *offs += copied;
0530     rc = copied;
0531 fail:
0532     free_page((unsigned long) buf);
0533     return rc;
0534 }
0535 
0536 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
0537                loff_t *offs)
0538 {
0539     struct urdev *urd;
0540     int rc;
0541 
0542     TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
0543 
0544     if (count == 0)
0545         return 0;
0546 
0547     urd = ((struct urfile *) file->private_data)->urd;
0548     rc = mutex_lock_interruptible(&urd->io_mutex);
0549     if (rc)
0550         return rc;
0551     rc = diag14_read(file, ubuf, count, offs);
0552     mutex_unlock(&urd->io_mutex);
0553     return rc;
0554 }
0555 
0556 /*
0557  * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
0558  * cc=0  normal completion
0559  * cc=1  no files on reader queue or no subsequent file
0560  * cc=2  spid specified is invalid
0561  */
0562 static int diag_read_next_file_info(struct file_control_block *buf, int spid)
0563 {
0564     int cc;
0565 
0566     cc = diag14((unsigned long) buf, spid, 0xfff);
0567     switch (cc) {
0568     case 0:
0569         return 0;
0570     default:
0571         return -ENODATA;
0572     }
0573 }
0574 
0575 static int verify_uri_device(struct urdev *urd)
0576 {
0577     struct file_control_block *fcb;
0578     char *buf;
0579     int rc;
0580 
0581     fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
0582     if (!fcb)
0583         return -ENOMEM;
0584 
0585     /* check for empty reader device (beginning of chain) */
0586     rc = diag_read_next_file_info(fcb, 0);
0587     if (rc)
0588         goto fail_free_fcb;
0589 
0590     /* if file is in hold status, we do not read it */
0591     if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
0592         rc = -EPERM;
0593         goto fail_free_fcb;
0594     }
0595 
0596     /* open file on virtual reader  */
0597     buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
0598     if (!buf) {
0599         rc = -ENOMEM;
0600         goto fail_free_fcb;
0601     }
0602     rc = diag_read_file(urd->dev_id.devno, buf);
0603     if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
0604         goto fail_free_buf;
0605 
0606     /* check if the file on top of the queue is open now */
0607     rc = diag_read_next_file_info(fcb, 0);
0608     if (rc)
0609         goto fail_free_buf;
0610     if (!(fcb->file_stat & FLG_IN_USE)) {
0611         rc = -EMFILE;
0612         goto fail_free_buf;
0613     }
0614     rc = 0;
0615 
0616 fail_free_buf:
0617     free_page((unsigned long) buf);
0618 fail_free_fcb:
0619     kfree(fcb);
0620     return rc;
0621 }
0622 
0623 static int verify_device(struct urdev *urd)
0624 {
0625     switch (urd->class) {
0626     case DEV_CLASS_UR_O:
0627         return 0; /* no check needed here */
0628     case DEV_CLASS_UR_I:
0629         return verify_uri_device(urd);
0630     default:
0631         return -EOPNOTSUPP;
0632     }
0633 }
0634 
0635 static int get_uri_file_reclen(struct urdev *urd)
0636 {
0637     struct file_control_block *fcb;
0638     int rc;
0639 
0640     fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
0641     if (!fcb)
0642         return -ENOMEM;
0643     rc = diag_read_next_file_info(fcb, 0);
0644     if (rc)
0645         goto fail_free;
0646     if (fcb->file_stat & FLG_CP_DUMP)
0647         rc = 0;
0648     else
0649         rc = fcb->rec_len;
0650 
0651 fail_free:
0652     kfree(fcb);
0653     return rc;
0654 }
0655 
0656 static int get_file_reclen(struct urdev *urd)
0657 {
0658     switch (urd->class) {
0659     case DEV_CLASS_UR_O:
0660         return 0;
0661     case DEV_CLASS_UR_I:
0662         return get_uri_file_reclen(urd);
0663     default:
0664         return -EOPNOTSUPP;
0665     }
0666 }
0667 
0668 static int ur_open(struct inode *inode, struct file *file)
0669 {
0670     u16 devno;
0671     struct urdev *urd;
0672     struct urfile *urf;
0673     unsigned short accmode;
0674     int rc;
0675 
0676     accmode = file->f_flags & O_ACCMODE;
0677 
0678     if (accmode == O_RDWR)
0679         return -EACCES;
0680     /*
0681      * We treat the minor number as the devno of the ur device
0682      * to find in the driver tree.
0683      */
0684     devno = iminor(file_inode(file));
0685 
0686     urd = urdev_get_from_devno(devno);
0687     if (!urd) {
0688         rc = -ENXIO;
0689         goto out;
0690     }
0691 
0692     spin_lock(&urd->open_lock);
0693     while (urd->open_flag) {
0694         spin_unlock(&urd->open_lock);
0695         if (file->f_flags & O_NONBLOCK) {
0696             rc = -EBUSY;
0697             goto fail_put;
0698         }
0699         if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
0700             rc = -ERESTARTSYS;
0701             goto fail_put;
0702         }
0703         spin_lock(&urd->open_lock);
0704     }
0705     urd->open_flag++;
0706     spin_unlock(&urd->open_lock);
0707 
0708     TRACE("ur_open\n");
0709 
0710     if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
0711         ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
0712         TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
0713         rc = -EACCES;
0714         goto fail_unlock;
0715     }
0716 
0717     rc = verify_device(urd);
0718     if (rc)
0719         goto fail_unlock;
0720 
0721     urf = urfile_alloc(urd);
0722     if (!urf) {
0723         rc = -ENOMEM;
0724         goto fail_unlock;
0725     }
0726 
0727     urf->dev_reclen = urd->reclen;
0728     rc = get_file_reclen(urd);
0729     if (rc < 0)
0730         goto fail_urfile_free;
0731     urf->file_reclen = rc;
0732     file->private_data = urf;
0733     return 0;
0734 
0735 fail_urfile_free:
0736     urfile_free(urf);
0737 fail_unlock:
0738     spin_lock(&urd->open_lock);
0739     urd->open_flag--;
0740     spin_unlock(&urd->open_lock);
0741 fail_put:
0742     urdev_put(urd);
0743 out:
0744     return rc;
0745 }
0746 
0747 static int ur_release(struct inode *inode, struct file *file)
0748 {
0749     struct urfile *urf = file->private_data;
0750 
0751     TRACE("ur_release\n");
0752     spin_lock(&urf->urd->open_lock);
0753     urf->urd->open_flag--;
0754     spin_unlock(&urf->urd->open_lock);
0755     wake_up_interruptible(&urf->urd->wait);
0756     urdev_put(urf->urd);
0757     urfile_free(urf);
0758     return 0;
0759 }
0760 
0761 static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
0762 {
0763     if ((file->f_flags & O_ACCMODE) != O_RDONLY)
0764         return -ESPIPE; /* seek allowed only for reader */
0765     if (offset % PAGE_SIZE)
0766         return -ESPIPE; /* only multiples of 4K allowed */
0767     return no_seek_end_llseek(file, offset, whence);
0768 }
0769 
0770 static const struct file_operations ur_fops = {
0771     .owner   = THIS_MODULE,
0772     .open    = ur_open,
0773     .release = ur_release,
0774     .read    = ur_read,
0775     .write   = ur_write,
0776     .llseek  = ur_llseek,
0777 };
0778 
0779 /*
0780  * ccw_device infrastructure:
0781  *     ur_probe creates the struct urdev (with refcount = 1), the device
0782  *     attributes, sets up the interrupt handler and validates the virtual
0783  *     unit record device.
0784  *     ur_remove removes the device attributes and drops the reference to
0785  *     struct urdev.
0786  *
0787  *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
0788  *     by the vmur_mutex lock.
0789  *
0790  *     urd->char_device is used as indication that the online function has
0791  *     been completed successfully.
0792  */
0793 static int ur_probe(struct ccw_device *cdev)
0794 {
0795     struct urdev *urd;
0796     int rc;
0797 
0798     TRACE("ur_probe: cdev=%p\n", cdev);
0799 
0800     mutex_lock(&vmur_mutex);
0801     urd = urdev_alloc(cdev);
0802     if (!urd) {
0803         rc = -ENOMEM;
0804         goto fail_unlock;
0805     }
0806 
0807     rc = ur_create_attributes(&cdev->dev);
0808     if (rc) {
0809         rc = -ENOMEM;
0810         goto fail_urdev_put;
0811     }
0812     cdev->handler = ur_int_handler;
0813 
0814     /* validate virtual unit record device */
0815     urd->class = get_urd_class(urd);
0816     if (urd->class < 0) {
0817         rc = urd->class;
0818         goto fail_remove_attr;
0819     }
0820     if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
0821         rc = -EOPNOTSUPP;
0822         goto fail_remove_attr;
0823     }
0824     spin_lock_irq(get_ccwdev_lock(cdev));
0825     dev_set_drvdata(&cdev->dev, urd);
0826     spin_unlock_irq(get_ccwdev_lock(cdev));
0827 
0828     mutex_unlock(&vmur_mutex);
0829     return 0;
0830 
0831 fail_remove_attr:
0832     ur_remove_attributes(&cdev->dev);
0833 fail_urdev_put:
0834     urdev_put(urd);
0835 fail_unlock:
0836     mutex_unlock(&vmur_mutex);
0837     return rc;
0838 }
0839 
0840 static int ur_set_online(struct ccw_device *cdev)
0841 {
0842     struct urdev *urd;
0843     int minor, major, rc;
0844     char node_id[16];
0845 
0846     TRACE("ur_set_online: cdev=%p\n", cdev);
0847 
0848     mutex_lock(&vmur_mutex);
0849     urd = urdev_get_from_cdev(cdev);
0850     if (!urd) {
0851         /* ur_remove already deleted our urd */
0852         rc = -ENODEV;
0853         goto fail_unlock;
0854     }
0855 
0856     if (urd->char_device) {
0857         /* Another ur_set_online was faster */
0858         rc = -EBUSY;
0859         goto fail_urdev_put;
0860     }
0861 
0862     minor = urd->dev_id.devno;
0863     major = MAJOR(ur_first_dev_maj_min);
0864 
0865     urd->char_device = cdev_alloc();
0866     if (!urd->char_device) {
0867         rc = -ENOMEM;
0868         goto fail_urdev_put;
0869     }
0870 
0871     urd->char_device->ops = &ur_fops;
0872     urd->char_device->owner = ur_fops.owner;
0873 
0874     rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
0875     if (rc)
0876         goto fail_free_cdev;
0877     if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
0878         if (urd->class == DEV_CLASS_UR_I)
0879             sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
0880         if (urd->class == DEV_CLASS_UR_O)
0881             sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
0882     } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
0883         sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
0884     } else {
0885         rc = -EOPNOTSUPP;
0886         goto fail_free_cdev;
0887     }
0888 
0889     urd->device = device_create(vmur_class, &cdev->dev,
0890                     urd->char_device->dev, NULL, "%s", node_id);
0891     if (IS_ERR(urd->device)) {
0892         rc = PTR_ERR(urd->device);
0893         TRACE("ur_set_online: device_create rc=%d\n", rc);
0894         goto fail_free_cdev;
0895     }
0896     urdev_put(urd);
0897     mutex_unlock(&vmur_mutex);
0898     return 0;
0899 
0900 fail_free_cdev:
0901     cdev_del(urd->char_device);
0902     urd->char_device = NULL;
0903 fail_urdev_put:
0904     urdev_put(urd);
0905 fail_unlock:
0906     mutex_unlock(&vmur_mutex);
0907     return rc;
0908 }
0909 
0910 static int ur_set_offline_force(struct ccw_device *cdev, int force)
0911 {
0912     struct urdev *urd;
0913     int rc;
0914 
0915     TRACE("ur_set_offline: cdev=%p\n", cdev);
0916     urd = urdev_get_from_cdev(cdev);
0917     if (!urd)
0918         /* ur_remove already deleted our urd */
0919         return -ENODEV;
0920     if (!urd->char_device) {
0921         /* Another ur_set_offline was faster */
0922         rc = -EBUSY;
0923         goto fail_urdev_put;
0924     }
0925     if (!force && (refcount_read(&urd->ref_count) > 2)) {
0926         /* There is still a user of urd (e.g. ur_open) */
0927         TRACE("ur_set_offline: BUSY\n");
0928         rc = -EBUSY;
0929         goto fail_urdev_put;
0930     }
0931     device_destroy(vmur_class, urd->char_device->dev);
0932     cdev_del(urd->char_device);
0933     urd->char_device = NULL;
0934     rc = 0;
0935 
0936 fail_urdev_put:
0937     urdev_put(urd);
0938     return rc;
0939 }
0940 
0941 static int ur_set_offline(struct ccw_device *cdev)
0942 {
0943     int rc;
0944 
0945     mutex_lock(&vmur_mutex);
0946     rc = ur_set_offline_force(cdev, 0);
0947     mutex_unlock(&vmur_mutex);
0948     return rc;
0949 }
0950 
0951 static void ur_remove(struct ccw_device *cdev)
0952 {
0953     unsigned long flags;
0954 
0955     TRACE("ur_remove\n");
0956 
0957     mutex_lock(&vmur_mutex);
0958 
0959     if (cdev->online)
0960         ur_set_offline_force(cdev, 1);
0961     ur_remove_attributes(&cdev->dev);
0962 
0963     spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
0964     urdev_put(dev_get_drvdata(&cdev->dev));
0965     dev_set_drvdata(&cdev->dev, NULL);
0966     spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
0967 
0968     mutex_unlock(&vmur_mutex);
0969 }
0970 
0971 /*
0972  * Module initialisation and cleanup
0973  */
0974 static int __init ur_init(void)
0975 {
0976     int rc;
0977     dev_t dev;
0978 
0979     if (!MACHINE_IS_VM) {
0980         pr_err("The %s cannot be loaded without z/VM\n",
0981                ur_banner);
0982         return -ENODEV;
0983     }
0984 
0985     vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
0986     if (!vmur_dbf)
0987         return -ENOMEM;
0988     rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
0989     if (rc)
0990         goto fail_free_dbf;
0991 
0992     debug_set_level(vmur_dbf, 6);
0993 
0994     vmur_class = class_create(THIS_MODULE, "vmur");
0995     if (IS_ERR(vmur_class)) {
0996         rc = PTR_ERR(vmur_class);
0997         goto fail_free_dbf;
0998     }
0999 
1000     rc = ccw_driver_register(&ur_driver);
1001     if (rc)
1002         goto fail_class_destroy;
1003 
1004     rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
1005     if (rc) {
1006         pr_err("Kernel function alloc_chrdev_region failed with "
1007                "error code %d\n", rc);
1008         goto fail_unregister_driver;
1009     }
1010     ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
1011 
1012     pr_info("%s loaded.\n", ur_banner);
1013     return 0;
1014 
1015 fail_unregister_driver:
1016     ccw_driver_unregister(&ur_driver);
1017 fail_class_destroy:
1018     class_destroy(vmur_class);
1019 fail_free_dbf:
1020     debug_unregister(vmur_dbf);
1021     return rc;
1022 }
1023 
1024 static void __exit ur_exit(void)
1025 {
1026     unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1027     ccw_driver_unregister(&ur_driver);
1028     class_destroy(vmur_class);
1029     debug_unregister(vmur_dbf);
1030     pr_info("%s unloaded.\n", ur_banner);
1031 }
1032 
1033 module_init(ur_init);
1034 module_exit(ur_exit);