0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #undef PARPORT_DEBUG_SHARING
0019
0020 #include <linux/module.h>
0021 #include <linux/string.h>
0022 #include <linux/threads.h>
0023 #include <linux/parport.h>
0024 #include <linux/delay.h>
0025 #include <linux/errno.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/ioport.h>
0028 #include <linux/kernel.h>
0029 #include <linux/slab.h>
0030 #include <linux/sched/signal.h>
0031 #include <linux/kmod.h>
0032 #include <linux/device.h>
0033
0034 #include <linux/spinlock.h>
0035 #include <linux/mutex.h>
0036 #include <asm/irq.h>
0037
0038 #undef PARPORT_PARANOID
0039
0040 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
0041
0042 unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
0043 int parport_default_spintime = DEFAULT_SPIN_TIME;
0044
0045 static LIST_HEAD(portlist);
0046 static DEFINE_SPINLOCK(parportlist_lock);
0047
0048
0049 static LIST_HEAD(all_ports);
0050 static DEFINE_SPINLOCK(full_list_lock);
0051
0052 static LIST_HEAD(drivers);
0053
0054 static DEFINE_MUTEX(registration_lock);
0055
0056
0057 static void dead_write_lines(struct parport *p, unsigned char b){}
0058 static unsigned char dead_read_lines(struct parport *p) { return 0; }
0059 static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
0060 unsigned char c) { return 0; }
0061 static void dead_onearg(struct parport *p){}
0062 static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
0063 static void dead_state(struct parport *p, struct parport_state *s) { }
0064 static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
0065 { return 0; }
0066 static size_t dead_read(struct parport *p, void *b, size_t l, int f)
0067 { return 0; }
0068 static struct parport_operations dead_ops = {
0069 .write_data = dead_write_lines,
0070 .read_data = dead_read_lines,
0071
0072 .write_control = dead_write_lines,
0073 .read_control = dead_read_lines,
0074 .frob_control = dead_frob_lines,
0075
0076 .read_status = dead_read_lines,
0077
0078 .enable_irq = dead_onearg,
0079 .disable_irq = dead_onearg,
0080
0081 .data_forward = dead_onearg,
0082 .data_reverse = dead_onearg,
0083
0084 .init_state = dead_initstate,
0085 .save_state = dead_state,
0086 .restore_state = dead_state,
0087
0088 .epp_write_data = dead_write,
0089 .epp_read_data = dead_read,
0090 .epp_write_addr = dead_write,
0091 .epp_read_addr = dead_read,
0092
0093 .ecp_write_data = dead_write,
0094 .ecp_read_data = dead_read,
0095 .ecp_write_addr = dead_write,
0096
0097 .compat_write_data = dead_write,
0098 .nibble_read_data = dead_read,
0099 .byte_read_data = dead_read,
0100
0101 .owner = NULL,
0102 };
0103
0104 static struct device_type parport_device_type = {
0105 .name = "parport",
0106 };
0107
0108 static int is_parport(struct device *dev)
0109 {
0110 return dev->type == &parport_device_type;
0111 }
0112
0113 static int parport_probe(struct device *dev)
0114 {
0115 struct parport_driver *drv;
0116
0117 if (is_parport(dev))
0118 return -ENODEV;
0119
0120 drv = to_parport_driver(dev->driver);
0121 if (!drv->probe) {
0122
0123 struct pardevice *par_dev = to_pardevice(dev);
0124
0125 if (strcmp(par_dev->name, drv->name))
0126 return -ENODEV;
0127 return 0;
0128 }
0129
0130 return drv->probe(to_pardevice(dev));
0131 }
0132
0133 static struct bus_type parport_bus_type = {
0134 .name = "parport",
0135 .probe = parport_probe,
0136 };
0137
0138 int parport_bus_init(void)
0139 {
0140 return bus_register(&parport_bus_type);
0141 }
0142
0143 void parport_bus_exit(void)
0144 {
0145 bus_unregister(&parport_bus_type);
0146 }
0147
0148
0149
0150
0151
0152
0153
0154 static int driver_check(struct device_driver *dev_drv, void *_port)
0155 {
0156 struct parport *port = _port;
0157 struct parport_driver *drv = to_parport_driver(dev_drv);
0158
0159 if (drv->match_port)
0160 drv->match_port(port);
0161 return 0;
0162 }
0163
0164
0165 static void attach_driver_chain(struct parport *port)
0166 {
0167
0168 struct parport_driver *drv;
0169
0170 list_for_each_entry(drv, &drivers, list)
0171 drv->attach(port);
0172
0173
0174
0175
0176
0177
0178 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
0179 }
0180
0181 static int driver_detach(struct device_driver *_drv, void *_port)
0182 {
0183 struct parport *port = _port;
0184 struct parport_driver *drv = to_parport_driver(_drv);
0185
0186 if (drv->detach)
0187 drv->detach(port);
0188 return 0;
0189 }
0190
0191
0192 static void detach_driver_chain(struct parport *port)
0193 {
0194 struct parport_driver *drv;
0195
0196 list_for_each_entry(drv, &drivers, list)
0197 drv->detach(port);
0198
0199
0200
0201
0202
0203
0204 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
0205 }
0206
0207
0208 static void get_lowlevel_driver(void)
0209 {
0210
0211
0212
0213
0214 request_module("parport_lowlevel");
0215 }
0216
0217
0218
0219
0220
0221
0222
0223 static int port_check(struct device *dev, void *dev_drv)
0224 {
0225 struct parport_driver *drv = dev_drv;
0226
0227
0228 if (is_parport(dev))
0229 drv->match_port(to_parport_dev(dev));
0230 return 0;
0231 }
0232
0233
0234
0235
0236
0237
0238 static int port_detect(struct device *dev, void *dev_drv)
0239 {
0240 if (is_parport(dev))
0241 return 1;
0242 return 0;
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 int __parport_register_driver(struct parport_driver *drv, struct module *owner,
0279 const char *mod_name)
0280 {
0281
0282 int ret;
0283
0284
0285 drv->driver.name = drv->name;
0286 drv->driver.bus = &parport_bus_type;
0287 drv->driver.owner = owner;
0288 drv->driver.mod_name = mod_name;
0289 ret = driver_register(&drv->driver);
0290 if (ret)
0291 return ret;
0292
0293
0294
0295
0296
0297 ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
0298 port_detect);
0299 if (!ret)
0300 get_lowlevel_driver();
0301
0302 mutex_lock(®istration_lock);
0303 if (drv->match_port)
0304 bus_for_each_dev(&parport_bus_type, NULL, drv,
0305 port_check);
0306 mutex_unlock(®istration_lock);
0307
0308 return 0;
0309 }
0310 EXPORT_SYMBOL(__parport_register_driver);
0311
0312 static int port_detach(struct device *dev, void *_drv)
0313 {
0314 struct parport_driver *drv = _drv;
0315
0316 if (is_parport(dev) && drv->detach)
0317 drv->detach(to_parport_dev(dev));
0318
0319 return 0;
0320 }
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 void parport_unregister_driver(struct parport_driver *drv)
0340 {
0341 mutex_lock(®istration_lock);
0342 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
0343 driver_unregister(&drv->driver);
0344 mutex_unlock(®istration_lock);
0345 }
0346 EXPORT_SYMBOL(parport_unregister_driver);
0347
0348 static void free_port(struct device *dev)
0349 {
0350 int d;
0351 struct parport *port = to_parport_dev(dev);
0352
0353 spin_lock(&full_list_lock);
0354 list_del(&port->full_list);
0355 spin_unlock(&full_list_lock);
0356 for (d = 0; d < 5; d++) {
0357 kfree(port->probe_info[d].class_name);
0358 kfree(port->probe_info[d].mfr);
0359 kfree(port->probe_info[d].model);
0360 kfree(port->probe_info[d].cmdset);
0361 kfree(port->probe_info[d].description);
0362 }
0363
0364 kfree(port->name);
0365 kfree(port);
0366 }
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376 struct parport *parport_get_port(struct parport *port)
0377 {
0378 struct device *dev = get_device(&port->bus_dev);
0379
0380 return to_parport_dev(dev);
0381 }
0382 EXPORT_SYMBOL(parport_get_port);
0383
0384 void parport_del_port(struct parport *port)
0385 {
0386 device_unregister(&port->bus_dev);
0387 }
0388 EXPORT_SYMBOL(parport_del_port);
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399 void parport_put_port(struct parport *port)
0400 {
0401 put_device(&port->bus_dev);
0402 }
0403 EXPORT_SYMBOL(parport_put_port);
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434 struct parport *parport_register_port(unsigned long base, int irq, int dma,
0435 struct parport_operations *ops)
0436 {
0437 struct list_head *l;
0438 struct parport *tmp;
0439 int num;
0440 int device;
0441 char *name;
0442 int ret;
0443
0444 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
0445 if (!tmp)
0446 return NULL;
0447
0448
0449 tmp->base = base;
0450 tmp->irq = irq;
0451 tmp->dma = dma;
0452 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
0453 tmp->modes = 0;
0454 INIT_LIST_HEAD(&tmp->list);
0455 tmp->devices = tmp->cad = NULL;
0456 tmp->flags = 0;
0457 tmp->ops = ops;
0458 tmp->physport = tmp;
0459 memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
0460 rwlock_init(&tmp->cad_lock);
0461 spin_lock_init(&tmp->waitlist_lock);
0462 spin_lock_init(&tmp->pardevice_lock);
0463 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
0464 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
0465 sema_init(&tmp->ieee1284.irq, 0);
0466 tmp->spintime = parport_default_spintime;
0467 atomic_set(&tmp->ref_count, 1);
0468 INIT_LIST_HEAD(&tmp->full_list);
0469
0470 name = kmalloc(15, GFP_KERNEL);
0471 if (!name) {
0472 kfree(tmp);
0473 return NULL;
0474 }
0475
0476
0477 spin_lock(&full_list_lock);
0478 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
0479 struct parport *p = list_entry(l, struct parport, full_list);
0480 if (p->number != num)
0481 break;
0482 }
0483 tmp->portnum = tmp->number = num;
0484 list_add_tail(&tmp->full_list, l);
0485 spin_unlock(&full_list_lock);
0486
0487
0488
0489
0490 sprintf(name, "parport%d", tmp->portnum = tmp->number);
0491 tmp->name = name;
0492 tmp->bus_dev.bus = &parport_bus_type;
0493 tmp->bus_dev.release = free_port;
0494 dev_set_name(&tmp->bus_dev, name);
0495 tmp->bus_dev.type = &parport_device_type;
0496
0497 for (device = 0; device < 5; device++)
0498
0499 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
0500
0501 tmp->waithead = tmp->waittail = NULL;
0502
0503 ret = device_register(&tmp->bus_dev);
0504 if (ret) {
0505 put_device(&tmp->bus_dev);
0506 return NULL;
0507 }
0508
0509 return tmp;
0510 }
0511 EXPORT_SYMBOL(parport_register_port);
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 void parport_announce_port(struct parport *port)
0526 {
0527 int i;
0528
0529 #ifdef CONFIG_PARPORT_1284
0530
0531 parport_daisy_init(port);
0532 #endif
0533
0534 if (!port->dev)
0535 pr_warn("%s: fix this legacy no-device port driver!\n",
0536 port->name);
0537
0538 parport_proc_register(port);
0539 mutex_lock(®istration_lock);
0540 spin_lock_irq(&parportlist_lock);
0541 list_add_tail(&port->list, &portlist);
0542 for (i = 1; i < 3; i++) {
0543 struct parport *slave = port->slaves[i-1];
0544 if (slave)
0545 list_add_tail(&slave->list, &portlist);
0546 }
0547 spin_unlock_irq(&parportlist_lock);
0548
0549
0550 attach_driver_chain(port);
0551 for (i = 1; i < 3; i++) {
0552 struct parport *slave = port->slaves[i-1];
0553 if (slave)
0554 attach_driver_chain(slave);
0555 }
0556 mutex_unlock(®istration_lock);
0557 }
0558 EXPORT_SYMBOL(parport_announce_port);
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579 void parport_remove_port(struct parport *port)
0580 {
0581 int i;
0582
0583 mutex_lock(®istration_lock);
0584
0585
0586 detach_driver_chain(port);
0587
0588 #ifdef CONFIG_PARPORT_1284
0589
0590 parport_daisy_fini(port);
0591 for (i = 1; i < 3; i++) {
0592 struct parport *slave = port->slaves[i-1];
0593 if (!slave)
0594 continue;
0595 detach_driver_chain(slave);
0596 parport_daisy_fini(slave);
0597 }
0598 #endif
0599
0600 port->ops = &dead_ops;
0601 spin_lock(&parportlist_lock);
0602 list_del_init(&port->list);
0603 for (i = 1; i < 3; i++) {
0604 struct parport *slave = port->slaves[i-1];
0605 if (slave)
0606 list_del_init(&slave->list);
0607 }
0608 spin_unlock(&parportlist_lock);
0609
0610 mutex_unlock(®istration_lock);
0611
0612 parport_proc_unregister(port);
0613
0614 for (i = 1; i < 3; i++) {
0615 struct parport *slave = port->slaves[i-1];
0616 if (slave)
0617 parport_put_port(slave);
0618 }
0619 }
0620 EXPORT_SYMBOL(parport_remove_port);
0621
0622 static void free_pardevice(struct device *dev)
0623 {
0624 struct pardevice *par_dev = to_pardevice(dev);
0625
0626 kfree(par_dev->name);
0627 kfree(par_dev);
0628 }
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 struct pardevice *
0693 parport_register_dev_model(struct parport *port, const char *name,
0694 const struct pardev_cb *par_dev_cb, int id)
0695 {
0696 struct pardevice *par_dev;
0697 int ret;
0698 char *devname;
0699
0700 if (port->physport->flags & PARPORT_FLAG_EXCL) {
0701
0702 pr_err("%s: no more devices allowed\n", port->name);
0703 return NULL;
0704 }
0705
0706 if (par_dev_cb->flags & PARPORT_DEV_LURK) {
0707 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
0708 pr_info("%s: refused to register lurking device (%s) without callbacks\n",
0709 port->name, name);
0710 return NULL;
0711 }
0712 }
0713
0714 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
0715 if (port->physport->devices) {
0716
0717
0718
0719
0720
0721
0722 pr_err("%s: cannot grant exclusive access for device %s\n",
0723 port->name, name);
0724 return NULL;
0725 }
0726 }
0727
0728 if (!try_module_get(port->ops->owner))
0729 return NULL;
0730
0731 parport_get_port(port);
0732
0733 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
0734 if (!par_dev)
0735 goto err_put_port;
0736
0737 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
0738 if (!par_dev->state)
0739 goto err_put_par_dev;
0740
0741 devname = kstrdup(name, GFP_KERNEL);
0742 if (!devname)
0743 goto err_free_par_dev;
0744
0745 par_dev->name = devname;
0746 par_dev->port = port;
0747 par_dev->daisy = -1;
0748 par_dev->preempt = par_dev_cb->preempt;
0749 par_dev->wakeup = par_dev_cb->wakeup;
0750 par_dev->private = par_dev_cb->private;
0751 par_dev->flags = par_dev_cb->flags;
0752 par_dev->irq_func = par_dev_cb->irq_func;
0753 par_dev->waiting = 0;
0754 par_dev->timeout = 5 * HZ;
0755
0756 par_dev->dev.parent = &port->bus_dev;
0757 par_dev->dev.bus = &parport_bus_type;
0758 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
0759 if (ret)
0760 goto err_free_devname;
0761 par_dev->dev.release = free_pardevice;
0762 par_dev->devmodel = true;
0763 ret = device_register(&par_dev->dev);
0764 if (ret) {
0765 kfree(par_dev->state);
0766 put_device(&par_dev->dev);
0767 goto err_put_port;
0768 }
0769
0770
0771 par_dev->prev = NULL;
0772
0773
0774
0775
0776 spin_lock(&port->physport->pardevice_lock);
0777
0778 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
0779 if (port->physport->devices) {
0780 spin_unlock(&port->physport->pardevice_lock);
0781 pr_debug("%s: cannot grant exclusive access for device %s\n",
0782 port->name, name);
0783 kfree(par_dev->state);
0784 device_unregister(&par_dev->dev);
0785 goto err_put_port;
0786 }
0787 port->flags |= PARPORT_FLAG_EXCL;
0788 }
0789
0790 par_dev->next = port->physport->devices;
0791 wmb();
0792
0793
0794
0795
0796 if (port->physport->devices)
0797 port->physport->devices->prev = par_dev;
0798 port->physport->devices = par_dev;
0799 spin_unlock(&port->physport->pardevice_lock);
0800
0801 init_waitqueue_head(&par_dev->wait_q);
0802 par_dev->timeslice = parport_default_timeslice;
0803 par_dev->waitnext = NULL;
0804 par_dev->waitprev = NULL;
0805
0806
0807
0808
0809
0810 port->ops->init_state(par_dev, par_dev->state);
0811 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
0812 port->proc_device = par_dev;
0813 parport_device_proc_register(par_dev);
0814 }
0815
0816 return par_dev;
0817
0818 err_free_devname:
0819 kfree(devname);
0820 err_free_par_dev:
0821 kfree(par_dev->state);
0822 err_put_par_dev:
0823 if (!par_dev->devmodel)
0824 kfree(par_dev);
0825 err_put_port:
0826 parport_put_port(port);
0827 module_put(port->ops->owner);
0828
0829 return NULL;
0830 }
0831 EXPORT_SYMBOL(parport_register_dev_model);
0832
0833
0834
0835
0836
0837
0838
0839
0840 void parport_unregister_device(struct pardevice *dev)
0841 {
0842 struct parport *port;
0843
0844 #ifdef PARPORT_PARANOID
0845 if (!dev) {
0846 pr_err("%s: passed NULL\n", __func__);
0847 return;
0848 }
0849 #endif
0850
0851 port = dev->port->physport;
0852
0853 if (port->proc_device == dev) {
0854 port->proc_device = NULL;
0855 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
0856 parport_device_proc_unregister(dev);
0857 }
0858
0859 if (port->cad == dev) {
0860 printk(KERN_DEBUG "%s: %s forgot to release port\n",
0861 port->name, dev->name);
0862 parport_release(dev);
0863 }
0864
0865 spin_lock(&port->pardevice_lock);
0866 if (dev->next)
0867 dev->next->prev = dev->prev;
0868 if (dev->prev)
0869 dev->prev->next = dev->next;
0870 else
0871 port->devices = dev->next;
0872
0873 if (dev->flags & PARPORT_DEV_EXCL)
0874 port->flags &= ~PARPORT_FLAG_EXCL;
0875
0876 spin_unlock(&port->pardevice_lock);
0877
0878
0879
0880
0881
0882 spin_lock_irq(&port->waitlist_lock);
0883 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
0884 if (dev->waitprev)
0885 dev->waitprev->waitnext = dev->waitnext;
0886 else
0887 port->waithead = dev->waitnext;
0888 if (dev->waitnext)
0889 dev->waitnext->waitprev = dev->waitprev;
0890 else
0891 port->waittail = dev->waitprev;
0892 }
0893 spin_unlock_irq(&port->waitlist_lock);
0894
0895 kfree(dev->state);
0896 device_unregister(&dev->dev);
0897
0898 module_put(port->ops->owner);
0899 parport_put_port(port);
0900 }
0901 EXPORT_SYMBOL(parport_unregister_device);
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915 struct parport *parport_find_number(int number)
0916 {
0917 struct parport *port, *result = NULL;
0918
0919 if (list_empty(&portlist))
0920 get_lowlevel_driver();
0921
0922 spin_lock(&parportlist_lock);
0923 list_for_each_entry(port, &portlist, list) {
0924 if (port->number == number) {
0925 result = parport_get_port(port);
0926 break;
0927 }
0928 }
0929 spin_unlock(&parportlist_lock);
0930 return result;
0931 }
0932 EXPORT_SYMBOL(parport_find_number);
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946 struct parport *parport_find_base(unsigned long base)
0947 {
0948 struct parport *port, *result = NULL;
0949
0950 if (list_empty(&portlist))
0951 get_lowlevel_driver();
0952
0953 spin_lock(&parportlist_lock);
0954 list_for_each_entry(port, &portlist, list) {
0955 if (port->base == base) {
0956 result = parport_get_port(port);
0957 break;
0958 }
0959 }
0960 spin_unlock(&parportlist_lock);
0961 return result;
0962 }
0963 EXPORT_SYMBOL(parport_find_base);
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 int parport_claim(struct pardevice *dev)
0978 {
0979 struct pardevice *oldcad;
0980 struct parport *port = dev->port->physport;
0981 unsigned long flags;
0982
0983 if (port->cad == dev) {
0984 pr_info("%s: %s already owner\n", dev->port->name, dev->name);
0985 return 0;
0986 }
0987
0988
0989 write_lock_irqsave(&port->cad_lock, flags);
0990 oldcad = port->cad;
0991 if (oldcad) {
0992 if (oldcad->preempt) {
0993 if (oldcad->preempt(oldcad->private))
0994 goto blocked;
0995 port->ops->save_state(port, dev->state);
0996 } else
0997 goto blocked;
0998
0999 if (port->cad != oldcad) {
1000
1001
1002
1003
1004 pr_warn("%s: %s released port when preempted!\n",
1005 port->name, oldcad->name);
1006 if (port->cad)
1007 goto blocked;
1008 }
1009 }
1010
1011
1012 if (dev->waiting & 1) {
1013 dev->waiting = 0;
1014
1015
1016 spin_lock_irq(&port->waitlist_lock);
1017 if (dev->waitprev)
1018 dev->waitprev->waitnext = dev->waitnext;
1019 else
1020 port->waithead = dev->waitnext;
1021 if (dev->waitnext)
1022 dev->waitnext->waitprev = dev->waitprev;
1023 else
1024 port->waittail = dev->waitprev;
1025 spin_unlock_irq(&port->waitlist_lock);
1026 dev->waitprev = dev->waitnext = NULL;
1027 }
1028
1029
1030 port->cad = dev;
1031
1032 #ifdef CONFIG_PARPORT_1284
1033
1034 if (dev->port->muxport >= 0) {
1035
1036 port->muxsel = dev->port->muxport;
1037 }
1038
1039
1040 if (dev->daisy >= 0) {
1041
1042 if (!parport_daisy_select(port, dev->daisy,
1043 IEEE1284_MODE_COMPAT))
1044 port->daisy = dev->daisy;
1045 }
1046 #endif
1047
1048
1049 port->ops->restore_state(port, dev->state);
1050 write_unlock_irqrestore(&port->cad_lock, flags);
1051 dev->time = jiffies;
1052 return 0;
1053
1054 blocked:
1055
1056
1057
1058
1059
1060
1061
1062 if (dev->waiting & 2 || dev->wakeup) {
1063 spin_lock(&port->waitlist_lock);
1064 if (test_and_set_bit(0, &dev->waiting) == 0) {
1065
1066 dev->waitnext = NULL;
1067 dev->waitprev = port->waittail;
1068 if (port->waittail) {
1069 port->waittail->waitnext = dev;
1070 port->waittail = dev;
1071 } else
1072 port->waithead = port->waittail = dev;
1073 }
1074 spin_unlock(&port->waitlist_lock);
1075 }
1076 write_unlock_irqrestore(&port->cad_lock, flags);
1077 return -EAGAIN;
1078 }
1079 EXPORT_SYMBOL(parport_claim);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 int parport_claim_or_block(struct pardevice *dev)
1092 {
1093 int r;
1094
1095
1096
1097
1098
1099 dev->waiting = 2;
1100
1101
1102 r = parport_claim(dev);
1103 if (r == -EAGAIN) {
1104 #ifdef PARPORT_DEBUG_SHARING
1105 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
1106 dev->name);
1107 #endif
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (dev->waiting) {
1122 wait_event_interruptible(dev->wait_q,
1123 !dev->waiting);
1124 if (signal_pending(current))
1125 return -EINTR;
1126 r = 1;
1127 } else {
1128 r = 0;
1129 #ifdef PARPORT_DEBUG_SHARING
1130 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1131 dev->name);
1132 #endif
1133 }
1134
1135 #ifdef PARPORT_DEBUG_SHARING
1136 if (dev->port->physport->cad != dev)
1137 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
1138 dev->name, dev->port->physport->cad ?
1139 dev->port->physport->cad->name : "nobody");
1140 #endif
1141 }
1142 dev->waiting = 0;
1143 return r;
1144 }
1145 EXPORT_SYMBOL(parport_claim_or_block);
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 void parport_release(struct pardevice *dev)
1157 {
1158 struct parport *port = dev->port->physport;
1159 struct pardevice *pd;
1160 unsigned long flags;
1161
1162
1163 write_lock_irqsave(&port->cad_lock, flags);
1164 if (port->cad != dev) {
1165 write_unlock_irqrestore(&port->cad_lock, flags);
1166 pr_warn("%s: %s tried to release parport when not owner\n",
1167 port->name, dev->name);
1168 return;
1169 }
1170
1171 #ifdef CONFIG_PARPORT_1284
1172
1173 if (dev->port->muxport >= 0) {
1174
1175 port->muxsel = -1;
1176 }
1177
1178
1179 if (dev->daisy >= 0) {
1180 parport_daisy_deselect_all(port);
1181 port->daisy = -1;
1182 }
1183 #endif
1184
1185 port->cad = NULL;
1186 write_unlock_irqrestore(&port->cad_lock, flags);
1187
1188
1189 port->ops->save_state(port, dev->state);
1190
1191
1192
1193
1194
1195
1196 for (pd = port->waithead; pd; pd = pd->waitnext) {
1197 if (pd->waiting & 2) {
1198 parport_claim(pd);
1199 if (waitqueue_active(&pd->wait_q))
1200 wake_up_interruptible(&pd->wait_q);
1201 return;
1202 } else if (pd->wakeup) {
1203 pd->wakeup(pd->private);
1204 if (dev->port->cad)
1205 return;
1206 } else {
1207 pr_err("%s: don't know how to wake %s\n",
1208 port->name, pd->name);
1209 }
1210 }
1211
1212
1213
1214
1215
1216
1217 for (pd = port->devices; !port->cad && pd; pd = pd->next) {
1218 if (pd->wakeup && pd != dev)
1219 pd->wakeup(pd->private);
1220 }
1221 }
1222 EXPORT_SYMBOL(parport_release);
1223
1224 irqreturn_t parport_irq_handler(int irq, void *dev_id)
1225 {
1226 struct parport *port = dev_id;
1227
1228 parport_generic_irq(port);
1229
1230 return IRQ_HANDLED;
1231 }
1232 EXPORT_SYMBOL(parport_irq_handler);
1233
1234 MODULE_LICENSE("GPL");