0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define KMSG_COMPONENT "vmur"
0013 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0014
0015 #include <linux/cdev.h>
0016 #include <linux/slab.h>
0017 #include <linux/module.h>
0018
0019 #include <linux/uaccess.h>
0020 #include <asm/cio.h>
0021 #include <asm/ccwdev.h>
0022 #include <asm/debug.h>
0023 #include <asm/diag.h>
0024
0025 #include "vmur.h"
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 static char ur_banner[] = "z/VM virtual unit record device driver";
0043
0044 MODULE_AUTHOR("IBM Corporation");
0045 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
0046 MODULE_LICENSE("GPL");
0047
0048 static dev_t ur_first_dev_maj_min;
0049 static struct class *vmur_class;
0050 static struct debug_info *vmur_dbf;
0051
0052
0053 static struct ccw_device_id ur_ids[] = {
0054 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
0055 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
0056 { }
0057 };
0058
0059 MODULE_DEVICE_TABLE(ccw, ur_ids);
0060
0061 static int ur_probe(struct ccw_device *cdev);
0062 static void ur_remove(struct ccw_device *cdev);
0063 static int ur_set_online(struct ccw_device *cdev);
0064 static int ur_set_offline(struct ccw_device *cdev);
0065
0066 static struct ccw_driver ur_driver = {
0067 .driver = {
0068 .name = "vmur",
0069 .owner = THIS_MODULE,
0070 },
0071 .ids = ur_ids,
0072 .probe = ur_probe,
0073 .remove = ur_remove,
0074 .set_online = ur_set_online,
0075 .set_offline = ur_set_offline,
0076 .int_class = IRQIO_VMR,
0077 };
0078
0079 static DEFINE_MUTEX(vmur_mutex);
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 static struct urdev *urdev_alloc(struct ccw_device *cdev)
0101 {
0102 struct urdev *urd;
0103
0104 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
0105 if (!urd)
0106 return NULL;
0107 urd->reclen = cdev->id.driver_info;
0108 ccw_device_get_id(cdev, &urd->dev_id);
0109 mutex_init(&urd->io_mutex);
0110 init_waitqueue_head(&urd->wait);
0111 spin_lock_init(&urd->open_lock);
0112 refcount_set(&urd->ref_count, 1);
0113 urd->cdev = cdev;
0114 get_device(&cdev->dev);
0115 return urd;
0116 }
0117
0118 static void urdev_free(struct urdev *urd)
0119 {
0120 TRACE("urdev_free: %p\n", urd);
0121 if (urd->cdev)
0122 put_device(&urd->cdev->dev);
0123 kfree(urd);
0124 }
0125
0126 static void urdev_get(struct urdev *urd)
0127 {
0128 refcount_inc(&urd->ref_count);
0129 }
0130
0131 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
0132 {
0133 struct urdev *urd;
0134 unsigned long flags;
0135
0136 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
0137 urd = dev_get_drvdata(&cdev->dev);
0138 if (urd)
0139 urdev_get(urd);
0140 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
0141 return urd;
0142 }
0143
0144 static struct urdev *urdev_get_from_devno(u16 devno)
0145 {
0146 char bus_id[16];
0147 struct ccw_device *cdev;
0148 struct urdev *urd;
0149
0150 sprintf(bus_id, "0.0.%04x", devno);
0151 cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
0152 if (!cdev)
0153 return NULL;
0154 urd = urdev_get_from_cdev(cdev);
0155 put_device(&cdev->dev);
0156 return urd;
0157 }
0158
0159 static void urdev_put(struct urdev *urd)
0160 {
0161 if (refcount_dec_and_test(&urd->ref_count))
0162 urdev_free(urd);
0163 }
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 static void free_chan_prog(struct ccw1 *cpa)
0189 {
0190 struct ccw1 *ptr = cpa;
0191
0192 while (ptr->cda) {
0193 kfree((void *)(addr_t) ptr->cda);
0194 ptr++;
0195 }
0196 kfree(cpa);
0197 }
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
0208 int reclen)
0209 {
0210 struct ccw1 *cpa;
0211 void *kbuf;
0212 int i;
0213
0214 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
0215
0216
0217
0218
0219
0220
0221 cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
0222 GFP_KERNEL | GFP_DMA);
0223 if (!cpa)
0224 return ERR_PTR(-ENOMEM);
0225
0226 for (i = 0; i < rec_count; i++) {
0227 cpa[i].cmd_code = WRITE_CCW_CMD;
0228 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
0229 cpa[i].count = reclen;
0230 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
0231 if (!kbuf) {
0232 free_chan_prog(cpa);
0233 return ERR_PTR(-ENOMEM);
0234 }
0235 cpa[i].cda = (u32)(addr_t) kbuf;
0236 if (copy_from_user(kbuf, ubuf, reclen)) {
0237 free_chan_prog(cpa);
0238 return ERR_PTR(-EFAULT);
0239 }
0240 ubuf += reclen;
0241 }
0242
0243 cpa[i].cmd_code = CCW_CMD_NOOP;
0244 return cpa;
0245 }
0246
0247 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
0248 {
0249 int rc;
0250 struct ccw_device *cdev = urd->cdev;
0251 DECLARE_COMPLETION_ONSTACK(event);
0252
0253 TRACE("do_ur_io: cpa=%p\n", cpa);
0254
0255 rc = mutex_lock_interruptible(&urd->io_mutex);
0256 if (rc)
0257 return rc;
0258
0259 urd->io_done = &event;
0260
0261 spin_lock_irq(get_ccwdev_lock(cdev));
0262 rc = ccw_device_start(cdev, cpa, 1, 0, 0);
0263 spin_unlock_irq(get_ccwdev_lock(cdev));
0264
0265 TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
0266 if (rc)
0267 goto out;
0268
0269 wait_for_completion(&event);
0270 TRACE("do_ur_io: I/O complete\n");
0271 rc = 0;
0272
0273 out:
0274 mutex_unlock(&urd->io_mutex);
0275 return rc;
0276 }
0277
0278
0279
0280
0281 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
0282 struct irb *irb)
0283 {
0284 struct urdev *urd;
0285
0286 if (!IS_ERR(irb)) {
0287 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
0288 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
0289 irb->scsw.cmd.count);
0290 }
0291 if (!intparm) {
0292 TRACE("ur_int_handler: unsolicited interrupt\n");
0293 return;
0294 }
0295 urd = dev_get_drvdata(&cdev->dev);
0296 BUG_ON(!urd);
0297
0298 if (IS_ERR(irb))
0299 urd->io_request_rc = PTR_ERR(irb);
0300 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
0301 urd->io_request_rc = 0;
0302 else
0303 urd->io_request_rc = -EIO;
0304
0305 complete(urd->io_done);
0306 }
0307
0308
0309
0310
0311 static ssize_t ur_attr_reclen_show(struct device *dev,
0312 struct device_attribute *attr, char *buf)
0313 {
0314 struct urdev *urd;
0315 int rc;
0316
0317 urd = urdev_get_from_cdev(to_ccwdev(dev));
0318 if (!urd)
0319 return -ENODEV;
0320 rc = sprintf(buf, "%zu\n", urd->reclen);
0321 urdev_put(urd);
0322 return rc;
0323 }
0324
0325 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
0326
0327 static int ur_create_attributes(struct device *dev)
0328 {
0329 return device_create_file(dev, &dev_attr_reclen);
0330 }
0331
0332 static void ur_remove_attributes(struct device *dev)
0333 {
0334 device_remove_file(dev, &dev_attr_reclen);
0335 }
0336
0337
0338
0339
0340
0341
0342
0343
0344 static int get_urd_class(struct urdev *urd)
0345 {
0346 static struct diag210 ur_diag210;
0347 int cc;
0348
0349 ur_diag210.vrdcdvno = urd->dev_id.devno;
0350 ur_diag210.vrdclen = sizeof(struct diag210);
0351
0352 cc = diag210(&ur_diag210);
0353 switch (cc) {
0354 case 0:
0355 return -EOPNOTSUPP;
0356 case 2:
0357 return ur_diag210.vrdcvcla;
0358 case 3:
0359 return -ENODEV;
0360 default:
0361 return -EIO;
0362 }
0363 }
0364
0365
0366
0367
0368 static struct urfile *urfile_alloc(struct urdev *urd)
0369 {
0370 struct urfile *urf;
0371
0372 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
0373 if (!urf)
0374 return NULL;
0375 urf->urd = urd;
0376
0377 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
0378 urf->dev_reclen);
0379
0380 return urf;
0381 }
0382
0383 static void urfile_free(struct urfile *urf)
0384 {
0385 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
0386 kfree(urf);
0387 }
0388
0389
0390
0391
0392 static ssize_t do_write(struct urdev *urd, const char __user *udata,
0393 size_t count, size_t reclen, loff_t *ppos)
0394 {
0395 struct ccw1 *cpa;
0396 int rc;
0397
0398 cpa = alloc_chan_prog(udata, count / reclen, reclen);
0399 if (IS_ERR(cpa))
0400 return PTR_ERR(cpa);
0401
0402 rc = do_ur_io(urd, cpa);
0403 if (rc)
0404 goto fail_kfree_cpa;
0405
0406 if (urd->io_request_rc) {
0407 rc = urd->io_request_rc;
0408 goto fail_kfree_cpa;
0409 }
0410 *ppos += count;
0411 rc = count;
0412
0413 fail_kfree_cpa:
0414 free_chan_prog(cpa);
0415 return rc;
0416 }
0417
0418 static ssize_t ur_write(struct file *file, const char __user *udata,
0419 size_t count, loff_t *ppos)
0420 {
0421 struct urfile *urf = file->private_data;
0422
0423 TRACE("ur_write: count=%zu\n", count);
0424
0425 if (count == 0)
0426 return 0;
0427
0428 if (count % urf->dev_reclen)
0429 return -EINVAL;
0430
0431 if (count > urf->dev_reclen * MAX_RECS_PER_IO)
0432 count = urf->dev_reclen * MAX_RECS_PER_IO;
0433
0434 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
0435 }
0436
0437
0438
0439
0440
0441
0442
0443
0444 static int diag_position_to_record(int devno, int record)
0445 {
0446 int cc;
0447
0448 cc = diag14(record, devno, 0x28);
0449 switch (cc) {
0450 case 0:
0451 return 0;
0452 case 2:
0453 return -ENOMEDIUM;
0454 case 3:
0455 return -ENODATA;
0456 default:
0457 return -EIO;
0458 }
0459 }
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469 static int diag_read_file(int devno, char *buf)
0470 {
0471 int cc;
0472
0473 cc = diag14((unsigned long) buf, devno, 0x00);
0474 switch (cc) {
0475 case 0:
0476 return 0;
0477 case 1:
0478 return -ENODATA;
0479 case 2:
0480 return -ENOMEDIUM;
0481 default:
0482 return -EIO;
0483 }
0484 }
0485
0486 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
0487 loff_t *offs)
0488 {
0489 size_t len, copied, res;
0490 char *buf;
0491 int rc;
0492 u16 reclen;
0493 struct urdev *urd;
0494
0495 urd = ((struct urfile *) file->private_data)->urd;
0496 reclen = ((struct urfile *) file->private_data)->file_reclen;
0497
0498 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
0499 if (rc == -ENODATA)
0500 return 0;
0501 if (rc)
0502 return rc;
0503
0504 len = min((size_t) PAGE_SIZE, count);
0505 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
0506 if (!buf)
0507 return -ENOMEM;
0508
0509 copied = 0;
0510 res = (size_t) (*offs % PAGE_SIZE);
0511 do {
0512 rc = diag_read_file(urd->dev_id.devno, buf);
0513 if (rc == -ENODATA) {
0514 break;
0515 }
0516 if (rc)
0517 goto fail;
0518 if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
0519 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
0520 len = min(count - copied, PAGE_SIZE - res);
0521 if (copy_to_user(ubuf + copied, buf + res, len)) {
0522 rc = -EFAULT;
0523 goto fail;
0524 }
0525 res = 0;
0526 copied += len;
0527 } while (copied != count);
0528
0529 *offs += copied;
0530 rc = copied;
0531 fail:
0532 free_page((unsigned long) buf);
0533 return rc;
0534 }
0535
0536 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
0537 loff_t *offs)
0538 {
0539 struct urdev *urd;
0540 int rc;
0541
0542 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
0543
0544 if (count == 0)
0545 return 0;
0546
0547 urd = ((struct urfile *) file->private_data)->urd;
0548 rc = mutex_lock_interruptible(&urd->io_mutex);
0549 if (rc)
0550 return rc;
0551 rc = diag14_read(file, ubuf, count, offs);
0552 mutex_unlock(&urd->io_mutex);
0553 return rc;
0554 }
0555
0556
0557
0558
0559
0560
0561
0562 static int diag_read_next_file_info(struct file_control_block *buf, int spid)
0563 {
0564 int cc;
0565
0566 cc = diag14((unsigned long) buf, spid, 0xfff);
0567 switch (cc) {
0568 case 0:
0569 return 0;
0570 default:
0571 return -ENODATA;
0572 }
0573 }
0574
0575 static int verify_uri_device(struct urdev *urd)
0576 {
0577 struct file_control_block *fcb;
0578 char *buf;
0579 int rc;
0580
0581 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
0582 if (!fcb)
0583 return -ENOMEM;
0584
0585
0586 rc = diag_read_next_file_info(fcb, 0);
0587 if (rc)
0588 goto fail_free_fcb;
0589
0590
0591 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
0592 rc = -EPERM;
0593 goto fail_free_fcb;
0594 }
0595
0596
0597 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
0598 if (!buf) {
0599 rc = -ENOMEM;
0600 goto fail_free_fcb;
0601 }
0602 rc = diag_read_file(urd->dev_id.devno, buf);
0603 if ((rc != 0) && (rc != -ENODATA))
0604 goto fail_free_buf;
0605
0606
0607 rc = diag_read_next_file_info(fcb, 0);
0608 if (rc)
0609 goto fail_free_buf;
0610 if (!(fcb->file_stat & FLG_IN_USE)) {
0611 rc = -EMFILE;
0612 goto fail_free_buf;
0613 }
0614 rc = 0;
0615
0616 fail_free_buf:
0617 free_page((unsigned long) buf);
0618 fail_free_fcb:
0619 kfree(fcb);
0620 return rc;
0621 }
0622
0623 static int verify_device(struct urdev *urd)
0624 {
0625 switch (urd->class) {
0626 case DEV_CLASS_UR_O:
0627 return 0;
0628 case DEV_CLASS_UR_I:
0629 return verify_uri_device(urd);
0630 default:
0631 return -EOPNOTSUPP;
0632 }
0633 }
0634
0635 static int get_uri_file_reclen(struct urdev *urd)
0636 {
0637 struct file_control_block *fcb;
0638 int rc;
0639
0640 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
0641 if (!fcb)
0642 return -ENOMEM;
0643 rc = diag_read_next_file_info(fcb, 0);
0644 if (rc)
0645 goto fail_free;
0646 if (fcb->file_stat & FLG_CP_DUMP)
0647 rc = 0;
0648 else
0649 rc = fcb->rec_len;
0650
0651 fail_free:
0652 kfree(fcb);
0653 return rc;
0654 }
0655
0656 static int get_file_reclen(struct urdev *urd)
0657 {
0658 switch (urd->class) {
0659 case DEV_CLASS_UR_O:
0660 return 0;
0661 case DEV_CLASS_UR_I:
0662 return get_uri_file_reclen(urd);
0663 default:
0664 return -EOPNOTSUPP;
0665 }
0666 }
0667
0668 static int ur_open(struct inode *inode, struct file *file)
0669 {
0670 u16 devno;
0671 struct urdev *urd;
0672 struct urfile *urf;
0673 unsigned short accmode;
0674 int rc;
0675
0676 accmode = file->f_flags & O_ACCMODE;
0677
0678 if (accmode == O_RDWR)
0679 return -EACCES;
0680
0681
0682
0683
0684 devno = iminor(file_inode(file));
0685
0686 urd = urdev_get_from_devno(devno);
0687 if (!urd) {
0688 rc = -ENXIO;
0689 goto out;
0690 }
0691
0692 spin_lock(&urd->open_lock);
0693 while (urd->open_flag) {
0694 spin_unlock(&urd->open_lock);
0695 if (file->f_flags & O_NONBLOCK) {
0696 rc = -EBUSY;
0697 goto fail_put;
0698 }
0699 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
0700 rc = -ERESTARTSYS;
0701 goto fail_put;
0702 }
0703 spin_lock(&urd->open_lock);
0704 }
0705 urd->open_flag++;
0706 spin_unlock(&urd->open_lock);
0707
0708 TRACE("ur_open\n");
0709
0710 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
0711 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
0712 TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
0713 rc = -EACCES;
0714 goto fail_unlock;
0715 }
0716
0717 rc = verify_device(urd);
0718 if (rc)
0719 goto fail_unlock;
0720
0721 urf = urfile_alloc(urd);
0722 if (!urf) {
0723 rc = -ENOMEM;
0724 goto fail_unlock;
0725 }
0726
0727 urf->dev_reclen = urd->reclen;
0728 rc = get_file_reclen(urd);
0729 if (rc < 0)
0730 goto fail_urfile_free;
0731 urf->file_reclen = rc;
0732 file->private_data = urf;
0733 return 0;
0734
0735 fail_urfile_free:
0736 urfile_free(urf);
0737 fail_unlock:
0738 spin_lock(&urd->open_lock);
0739 urd->open_flag--;
0740 spin_unlock(&urd->open_lock);
0741 fail_put:
0742 urdev_put(urd);
0743 out:
0744 return rc;
0745 }
0746
0747 static int ur_release(struct inode *inode, struct file *file)
0748 {
0749 struct urfile *urf = file->private_data;
0750
0751 TRACE("ur_release\n");
0752 spin_lock(&urf->urd->open_lock);
0753 urf->urd->open_flag--;
0754 spin_unlock(&urf->urd->open_lock);
0755 wake_up_interruptible(&urf->urd->wait);
0756 urdev_put(urf->urd);
0757 urfile_free(urf);
0758 return 0;
0759 }
0760
0761 static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
0762 {
0763 if ((file->f_flags & O_ACCMODE) != O_RDONLY)
0764 return -ESPIPE;
0765 if (offset % PAGE_SIZE)
0766 return -ESPIPE;
0767 return no_seek_end_llseek(file, offset, whence);
0768 }
0769
0770 static const struct file_operations ur_fops = {
0771 .owner = THIS_MODULE,
0772 .open = ur_open,
0773 .release = ur_release,
0774 .read = ur_read,
0775 .write = ur_write,
0776 .llseek = ur_llseek,
0777 };
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 static int ur_probe(struct ccw_device *cdev)
0794 {
0795 struct urdev *urd;
0796 int rc;
0797
0798 TRACE("ur_probe: cdev=%p\n", cdev);
0799
0800 mutex_lock(&vmur_mutex);
0801 urd = urdev_alloc(cdev);
0802 if (!urd) {
0803 rc = -ENOMEM;
0804 goto fail_unlock;
0805 }
0806
0807 rc = ur_create_attributes(&cdev->dev);
0808 if (rc) {
0809 rc = -ENOMEM;
0810 goto fail_urdev_put;
0811 }
0812 cdev->handler = ur_int_handler;
0813
0814
0815 urd->class = get_urd_class(urd);
0816 if (urd->class < 0) {
0817 rc = urd->class;
0818 goto fail_remove_attr;
0819 }
0820 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
0821 rc = -EOPNOTSUPP;
0822 goto fail_remove_attr;
0823 }
0824 spin_lock_irq(get_ccwdev_lock(cdev));
0825 dev_set_drvdata(&cdev->dev, urd);
0826 spin_unlock_irq(get_ccwdev_lock(cdev));
0827
0828 mutex_unlock(&vmur_mutex);
0829 return 0;
0830
0831 fail_remove_attr:
0832 ur_remove_attributes(&cdev->dev);
0833 fail_urdev_put:
0834 urdev_put(urd);
0835 fail_unlock:
0836 mutex_unlock(&vmur_mutex);
0837 return rc;
0838 }
0839
0840 static int ur_set_online(struct ccw_device *cdev)
0841 {
0842 struct urdev *urd;
0843 int minor, major, rc;
0844 char node_id[16];
0845
0846 TRACE("ur_set_online: cdev=%p\n", cdev);
0847
0848 mutex_lock(&vmur_mutex);
0849 urd = urdev_get_from_cdev(cdev);
0850 if (!urd) {
0851
0852 rc = -ENODEV;
0853 goto fail_unlock;
0854 }
0855
0856 if (urd->char_device) {
0857
0858 rc = -EBUSY;
0859 goto fail_urdev_put;
0860 }
0861
0862 minor = urd->dev_id.devno;
0863 major = MAJOR(ur_first_dev_maj_min);
0864
0865 urd->char_device = cdev_alloc();
0866 if (!urd->char_device) {
0867 rc = -ENOMEM;
0868 goto fail_urdev_put;
0869 }
0870
0871 urd->char_device->ops = &ur_fops;
0872 urd->char_device->owner = ur_fops.owner;
0873
0874 rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
0875 if (rc)
0876 goto fail_free_cdev;
0877 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
0878 if (urd->class == DEV_CLASS_UR_I)
0879 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
0880 if (urd->class == DEV_CLASS_UR_O)
0881 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
0882 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
0883 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
0884 } else {
0885 rc = -EOPNOTSUPP;
0886 goto fail_free_cdev;
0887 }
0888
0889 urd->device = device_create(vmur_class, &cdev->dev,
0890 urd->char_device->dev, NULL, "%s", node_id);
0891 if (IS_ERR(urd->device)) {
0892 rc = PTR_ERR(urd->device);
0893 TRACE("ur_set_online: device_create rc=%d\n", rc);
0894 goto fail_free_cdev;
0895 }
0896 urdev_put(urd);
0897 mutex_unlock(&vmur_mutex);
0898 return 0;
0899
0900 fail_free_cdev:
0901 cdev_del(urd->char_device);
0902 urd->char_device = NULL;
0903 fail_urdev_put:
0904 urdev_put(urd);
0905 fail_unlock:
0906 mutex_unlock(&vmur_mutex);
0907 return rc;
0908 }
0909
0910 static int ur_set_offline_force(struct ccw_device *cdev, int force)
0911 {
0912 struct urdev *urd;
0913 int rc;
0914
0915 TRACE("ur_set_offline: cdev=%p\n", cdev);
0916 urd = urdev_get_from_cdev(cdev);
0917 if (!urd)
0918
0919 return -ENODEV;
0920 if (!urd->char_device) {
0921
0922 rc = -EBUSY;
0923 goto fail_urdev_put;
0924 }
0925 if (!force && (refcount_read(&urd->ref_count) > 2)) {
0926
0927 TRACE("ur_set_offline: BUSY\n");
0928 rc = -EBUSY;
0929 goto fail_urdev_put;
0930 }
0931 device_destroy(vmur_class, urd->char_device->dev);
0932 cdev_del(urd->char_device);
0933 urd->char_device = NULL;
0934 rc = 0;
0935
0936 fail_urdev_put:
0937 urdev_put(urd);
0938 return rc;
0939 }
0940
0941 static int ur_set_offline(struct ccw_device *cdev)
0942 {
0943 int rc;
0944
0945 mutex_lock(&vmur_mutex);
0946 rc = ur_set_offline_force(cdev, 0);
0947 mutex_unlock(&vmur_mutex);
0948 return rc;
0949 }
0950
0951 static void ur_remove(struct ccw_device *cdev)
0952 {
0953 unsigned long flags;
0954
0955 TRACE("ur_remove\n");
0956
0957 mutex_lock(&vmur_mutex);
0958
0959 if (cdev->online)
0960 ur_set_offline_force(cdev, 1);
0961 ur_remove_attributes(&cdev->dev);
0962
0963 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
0964 urdev_put(dev_get_drvdata(&cdev->dev));
0965 dev_set_drvdata(&cdev->dev, NULL);
0966 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
0967
0968 mutex_unlock(&vmur_mutex);
0969 }
0970
0971
0972
0973
0974 static int __init ur_init(void)
0975 {
0976 int rc;
0977 dev_t dev;
0978
0979 if (!MACHINE_IS_VM) {
0980 pr_err("The %s cannot be loaded without z/VM\n",
0981 ur_banner);
0982 return -ENODEV;
0983 }
0984
0985 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
0986 if (!vmur_dbf)
0987 return -ENOMEM;
0988 rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
0989 if (rc)
0990 goto fail_free_dbf;
0991
0992 debug_set_level(vmur_dbf, 6);
0993
0994 vmur_class = class_create(THIS_MODULE, "vmur");
0995 if (IS_ERR(vmur_class)) {
0996 rc = PTR_ERR(vmur_class);
0997 goto fail_free_dbf;
0998 }
0999
1000 rc = ccw_driver_register(&ur_driver);
1001 if (rc)
1002 goto fail_class_destroy;
1003
1004 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
1005 if (rc) {
1006 pr_err("Kernel function alloc_chrdev_region failed with "
1007 "error code %d\n", rc);
1008 goto fail_unregister_driver;
1009 }
1010 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
1011
1012 pr_info("%s loaded.\n", ur_banner);
1013 return 0;
1014
1015 fail_unregister_driver:
1016 ccw_driver_unregister(&ur_driver);
1017 fail_class_destroy:
1018 class_destroy(vmur_class);
1019 fail_free_dbf:
1020 debug_unregister(vmur_dbf);
1021 return rc;
1022 }
1023
1024 static void __exit ur_exit(void)
1025 {
1026 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1027 ccw_driver_unregister(&ur_driver);
1028 class_destroy(vmur_class);
1029 debug_unregister(vmur_dbf);
1030 pr_info("%s unloaded.\n", ur_banner);
1031 }
1032
1033 module_init(ur_init);
1034 module_exit(ur_exit);