Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * nosy - Snoop mode driver for TI PCILynx 1394 controllers
0004  * Copyright (C) 2002-2007 Kristian Høgsberg
0005  */
0006 
0007 #include <linux/device.h>
0008 #include <linux/errno.h>
0009 #include <linux/fs.h>
0010 #include <linux/init.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/io.h>
0013 #include <linux/kernel.h>
0014 #include <linux/kref.h>
0015 #include <linux/miscdevice.h>
0016 #include <linux/module.h>
0017 #include <linux/mutex.h>
0018 #include <linux/pci.h>
0019 #include <linux/poll.h>
0020 #include <linux/sched.h> /* required for linux/wait.h */
0021 #include <linux/slab.h>
0022 #include <linux/spinlock.h>
0023 #include <linux/time64.h>
0024 #include <linux/timex.h>
0025 #include <linux/uaccess.h>
0026 #include <linux/wait.h>
0027 #include <linux/dma-mapping.h>
0028 #include <linux/atomic.h>
0029 #include <asm/byteorder.h>
0030 
0031 #include "nosy.h"
0032 #include "nosy-user.h"
0033 
0034 #define TCODE_PHY_PACKET        0x10
0035 #define PCI_DEVICE_ID_TI_PCILYNX    0x8000
0036 
0037 static char driver_name[] = KBUILD_MODNAME;
0038 
0039 /* this is the physical layout of a PCL, its size is 128 bytes */
0040 struct pcl {
0041     __le32 next;
0042     __le32 async_error_next;
0043     u32 user_data;
0044     __le32 pcl_status;
0045     __le32 remaining_transfer_count;
0046     __le32 next_data_buffer;
0047     struct {
0048         __le32 control;
0049         __le32 pointer;
0050     } buffer[13];
0051 };
0052 
0053 struct packet {
0054     unsigned int length;
0055     char data[];
0056 };
0057 
0058 struct packet_buffer {
0059     char *data;
0060     size_t capacity;
0061     long total_packet_count, lost_packet_count;
0062     atomic_t size;
0063     struct packet *head, *tail;
0064     wait_queue_head_t wait;
0065 };
0066 
0067 struct pcilynx {
0068     struct pci_dev *pci_device;
0069     __iomem char *registers;
0070 
0071     struct pcl *rcv_start_pcl, *rcv_pcl;
0072     __le32 *rcv_buffer;
0073 
0074     dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
0075 
0076     spinlock_t client_list_lock;
0077     struct list_head client_list;
0078 
0079     struct miscdevice misc;
0080     struct list_head link;
0081     struct kref kref;
0082 };
0083 
0084 static inline struct pcilynx *
0085 lynx_get(struct pcilynx *lynx)
0086 {
0087     kref_get(&lynx->kref);
0088 
0089     return lynx;
0090 }
0091 
0092 static void
0093 lynx_release(struct kref *kref)
0094 {
0095     kfree(container_of(kref, struct pcilynx, kref));
0096 }
0097 
0098 static inline void
0099 lynx_put(struct pcilynx *lynx)
0100 {
0101     kref_put(&lynx->kref, lynx_release);
0102 }
0103 
0104 struct client {
0105     struct pcilynx *lynx;
0106     u32 tcode_mask;
0107     struct packet_buffer buffer;
0108     struct list_head link;
0109 };
0110 
0111 static DEFINE_MUTEX(card_mutex);
0112 static LIST_HEAD(card_list);
0113 
0114 static int
0115 packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
0116 {
0117     buffer->data = kmalloc(capacity, GFP_KERNEL);
0118     if (buffer->data == NULL)
0119         return -ENOMEM;
0120     buffer->head = (struct packet *) buffer->data;
0121     buffer->tail = (struct packet *) buffer->data;
0122     buffer->capacity = capacity;
0123     buffer->lost_packet_count = 0;
0124     atomic_set(&buffer->size, 0);
0125     init_waitqueue_head(&buffer->wait);
0126 
0127     return 0;
0128 }
0129 
0130 static void
0131 packet_buffer_destroy(struct packet_buffer *buffer)
0132 {
0133     kfree(buffer->data);
0134 }
0135 
0136 static int
0137 packet_buffer_get(struct client *client, char __user *data, size_t user_length)
0138 {
0139     struct packet_buffer *buffer = &client->buffer;
0140     size_t length;
0141     char *end;
0142 
0143     if (wait_event_interruptible(buffer->wait,
0144                      atomic_read(&buffer->size) > 0) ||
0145                      list_empty(&client->lynx->link))
0146         return -ERESTARTSYS;
0147 
0148     if (atomic_read(&buffer->size) == 0)
0149         return -ENODEV;
0150 
0151     /* FIXME: Check length <= user_length. */
0152 
0153     end = buffer->data + buffer->capacity;
0154     length = buffer->head->length;
0155 
0156     if (&buffer->head->data[length] < end) {
0157         if (copy_to_user(data, buffer->head->data, length))
0158             return -EFAULT;
0159         buffer->head = (struct packet *) &buffer->head->data[length];
0160     } else {
0161         size_t split = end - buffer->head->data;
0162 
0163         if (copy_to_user(data, buffer->head->data, split))
0164             return -EFAULT;
0165         if (copy_to_user(data + split, buffer->data, length - split))
0166             return -EFAULT;
0167         buffer->head = (struct packet *) &buffer->data[length - split];
0168     }
0169 
0170     /*
0171      * Decrease buffer->size as the last thing, since this is what
0172      * keeps the interrupt from overwriting the packet we are
0173      * retrieving from the buffer.
0174      */
0175     atomic_sub(sizeof(struct packet) + length, &buffer->size);
0176 
0177     return length;
0178 }
0179 
0180 static void
0181 packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
0182 {
0183     char *end;
0184 
0185     buffer->total_packet_count++;
0186 
0187     if (buffer->capacity <
0188         atomic_read(&buffer->size) + sizeof(struct packet) + length) {
0189         buffer->lost_packet_count++;
0190         return;
0191     }
0192 
0193     end = buffer->data + buffer->capacity;
0194     buffer->tail->length = length;
0195 
0196     if (&buffer->tail->data[length] < end) {
0197         memcpy(buffer->tail->data, data, length);
0198         buffer->tail = (struct packet *) &buffer->tail->data[length];
0199     } else {
0200         size_t split = end - buffer->tail->data;
0201 
0202         memcpy(buffer->tail->data, data, split);
0203         memcpy(buffer->data, data + split, length - split);
0204         buffer->tail = (struct packet *) &buffer->data[length - split];
0205     }
0206 
0207     /* Finally, adjust buffer size and wake up userspace reader. */
0208 
0209     atomic_add(sizeof(struct packet) + length, &buffer->size);
0210     wake_up_interruptible(&buffer->wait);
0211 }
0212 
0213 static inline void
0214 reg_write(struct pcilynx *lynx, int offset, u32 data)
0215 {
0216     writel(data, lynx->registers + offset);
0217 }
0218 
0219 static inline u32
0220 reg_read(struct pcilynx *lynx, int offset)
0221 {
0222     return readl(lynx->registers + offset);
0223 }
0224 
0225 static inline void
0226 reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
0227 {
0228     reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
0229 }
0230 
0231 /*
0232  * Maybe the pcl programs could be set up to just append data instead
0233  * of using a whole packet.
0234  */
0235 static inline void
0236 run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
0237                int dmachan)
0238 {
0239     reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
0240     reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
0241           DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
0242 }
0243 
0244 static int
0245 set_phy_reg(struct pcilynx *lynx, int addr, int val)
0246 {
0247     if (addr > 15) {
0248         dev_err(&lynx->pci_device->dev,
0249             "PHY register address %d out of range\n", addr);
0250         return -1;
0251     }
0252     if (val > 0xff) {
0253         dev_err(&lynx->pci_device->dev,
0254             "PHY register value %d out of range\n", val);
0255         return -1;
0256     }
0257     reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
0258           LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
0259 
0260     return 0;
0261 }
0262 
0263 static int
0264 nosy_open(struct inode *inode, struct file *file)
0265 {
0266     int minor = iminor(inode);
0267     struct client *client;
0268     struct pcilynx *tmp, *lynx = NULL;
0269 
0270     mutex_lock(&card_mutex);
0271     list_for_each_entry(tmp, &card_list, link)
0272         if (tmp->misc.minor == minor) {
0273             lynx = lynx_get(tmp);
0274             break;
0275         }
0276     mutex_unlock(&card_mutex);
0277     if (lynx == NULL)
0278         return -ENODEV;
0279 
0280     client = kmalloc(sizeof *client, GFP_KERNEL);
0281     if (client == NULL)
0282         goto fail;
0283 
0284     client->tcode_mask = ~0;
0285     client->lynx = lynx;
0286     INIT_LIST_HEAD(&client->link);
0287 
0288     if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
0289         goto fail;
0290 
0291     file->private_data = client;
0292 
0293     return stream_open(inode, file);
0294 fail:
0295     kfree(client);
0296     lynx_put(lynx);
0297 
0298     return -ENOMEM;
0299 }
0300 
0301 static int
0302 nosy_release(struct inode *inode, struct file *file)
0303 {
0304     struct client *client = file->private_data;
0305     struct pcilynx *lynx = client->lynx;
0306 
0307     spin_lock_irq(&lynx->client_list_lock);
0308     list_del_init(&client->link);
0309     spin_unlock_irq(&lynx->client_list_lock);
0310 
0311     packet_buffer_destroy(&client->buffer);
0312     kfree(client);
0313     lynx_put(lynx);
0314 
0315     return 0;
0316 }
0317 
0318 static __poll_t
0319 nosy_poll(struct file *file, poll_table *pt)
0320 {
0321     struct client *client = file->private_data;
0322     __poll_t ret = 0;
0323 
0324     poll_wait(file, &client->buffer.wait, pt);
0325 
0326     if (atomic_read(&client->buffer.size) > 0)
0327         ret = EPOLLIN | EPOLLRDNORM;
0328 
0329     if (list_empty(&client->lynx->link))
0330         ret |= EPOLLHUP;
0331 
0332     return ret;
0333 }
0334 
0335 static ssize_t
0336 nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
0337 {
0338     struct client *client = file->private_data;
0339 
0340     return packet_buffer_get(client, buffer, count);
0341 }
0342 
0343 static long
0344 nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
0345 {
0346     struct client *client = file->private_data;
0347     spinlock_t *client_list_lock = &client->lynx->client_list_lock;
0348     struct nosy_stats stats;
0349     int ret;
0350 
0351     switch (cmd) {
0352     case NOSY_IOC_GET_STATS:
0353         spin_lock_irq(client_list_lock);
0354         stats.total_packet_count = client->buffer.total_packet_count;
0355         stats.lost_packet_count  = client->buffer.lost_packet_count;
0356         spin_unlock_irq(client_list_lock);
0357 
0358         if (copy_to_user((void __user *) arg, &stats, sizeof stats))
0359             return -EFAULT;
0360         else
0361             return 0;
0362 
0363     case NOSY_IOC_START:
0364         ret = -EBUSY;
0365         spin_lock_irq(client_list_lock);
0366         if (list_empty(&client->link)) {
0367             list_add_tail(&client->link, &client->lynx->client_list);
0368             ret = 0;
0369         }
0370         spin_unlock_irq(client_list_lock);
0371 
0372         return ret;
0373 
0374     case NOSY_IOC_STOP:
0375         spin_lock_irq(client_list_lock);
0376         list_del_init(&client->link);
0377         spin_unlock_irq(client_list_lock);
0378 
0379         return 0;
0380 
0381     case NOSY_IOC_FILTER:
0382         spin_lock_irq(client_list_lock);
0383         client->tcode_mask = arg;
0384         spin_unlock_irq(client_list_lock);
0385 
0386         return 0;
0387 
0388     default:
0389         return -EINVAL;
0390         /* Flush buffer, configure filter. */
0391     }
0392 }
0393 
0394 static const struct file_operations nosy_ops = {
0395     .owner =        THIS_MODULE,
0396     .read =         nosy_read,
0397     .unlocked_ioctl =   nosy_ioctl,
0398     .poll =         nosy_poll,
0399     .open =         nosy_open,
0400     .release =      nosy_release,
0401 };
0402 
0403 #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
0404 
0405 static void
0406 packet_irq_handler(struct pcilynx *lynx)
0407 {
0408     struct client *client;
0409     u32 tcode_mask, tcode, timestamp;
0410     size_t length;
0411     struct timespec64 ts64;
0412 
0413     /* FIXME: Also report rcv_speed. */
0414 
0415     length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
0416     tcode  = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
0417 
0418     ktime_get_real_ts64(&ts64);
0419     timestamp = ts64.tv_nsec / NSEC_PER_USEC;
0420     lynx->rcv_buffer[0] = (__force __le32)timestamp;
0421 
0422     if (length == PHY_PACKET_SIZE)
0423         tcode_mask = 1 << TCODE_PHY_PACKET;
0424     else
0425         tcode_mask = 1 << tcode;
0426 
0427     spin_lock(&lynx->client_list_lock);
0428 
0429     list_for_each_entry(client, &lynx->client_list, link)
0430         if (client->tcode_mask & tcode_mask)
0431             packet_buffer_put(&client->buffer,
0432                       lynx->rcv_buffer, length + 4);
0433 
0434     spin_unlock(&lynx->client_list_lock);
0435 }
0436 
0437 static void
0438 bus_reset_irq_handler(struct pcilynx *lynx)
0439 {
0440     struct client *client;
0441     struct timespec64 ts64;
0442     u32    timestamp;
0443 
0444     ktime_get_real_ts64(&ts64);
0445     timestamp = ts64.tv_nsec / NSEC_PER_USEC;
0446 
0447     spin_lock(&lynx->client_list_lock);
0448 
0449     list_for_each_entry(client, &lynx->client_list, link)
0450         packet_buffer_put(&client->buffer, &timestamp, 4);
0451 
0452     spin_unlock(&lynx->client_list_lock);
0453 }
0454 
0455 static irqreturn_t
0456 irq_handler(int irq, void *device)
0457 {
0458     struct pcilynx *lynx = device;
0459     u32 pci_int_status;
0460 
0461     pci_int_status = reg_read(lynx, PCI_INT_STATUS);
0462 
0463     if (pci_int_status == ~0)
0464         /* Card was ejected. */
0465         return IRQ_NONE;
0466 
0467     if ((pci_int_status & PCI_INT_INT_PEND) == 0)
0468         /* Not our interrupt, bail out quickly. */
0469         return IRQ_NONE;
0470 
0471     if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
0472         u32 link_int_status;
0473 
0474         link_int_status = reg_read(lynx, LINK_INT_STATUS);
0475         reg_write(lynx, LINK_INT_STATUS, link_int_status);
0476 
0477         if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
0478             bus_reset_irq_handler(lynx);
0479     }
0480 
0481     /* Clear the PCI_INT_STATUS register only after clearing the
0482      * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
0483      * be set again immediately. */
0484 
0485     reg_write(lynx, PCI_INT_STATUS, pci_int_status);
0486 
0487     if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
0488         packet_irq_handler(lynx);
0489         run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
0490     }
0491 
0492     return IRQ_HANDLED;
0493 }
0494 
0495 static void
0496 remove_card(struct pci_dev *dev)
0497 {
0498     struct pcilynx *lynx = pci_get_drvdata(dev);
0499     struct client *client;
0500 
0501     mutex_lock(&card_mutex);
0502     list_del_init(&lynx->link);
0503     misc_deregister(&lynx->misc);
0504     mutex_unlock(&card_mutex);
0505 
0506     reg_write(lynx, PCI_INT_ENABLE, 0);
0507     free_irq(lynx->pci_device->irq, lynx);
0508 
0509     spin_lock_irq(&lynx->client_list_lock);
0510     list_for_each_entry(client, &lynx->client_list, link)
0511         wake_up_interruptible(&client->buffer.wait);
0512     spin_unlock_irq(&lynx->client_list_lock);
0513 
0514     dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
0515               lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
0516     dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
0517               lynx->rcv_pcl, lynx->rcv_pcl_bus);
0518     dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE, lynx->rcv_buffer,
0519               lynx->rcv_buffer_bus);
0520 
0521     iounmap(lynx->registers);
0522     pci_disable_device(dev);
0523     lynx_put(lynx);
0524 }
0525 
0526 #define RCV_BUFFER_SIZE (16 * 1024)
0527 
0528 static int
0529 add_card(struct pci_dev *dev, const struct pci_device_id *unused)
0530 {
0531     struct pcilynx *lynx;
0532     u32 p, end;
0533     int ret, i;
0534 
0535     if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
0536         dev_err(&dev->dev,
0537             "DMA address limits not supported for PCILynx hardware\n");
0538         return -ENXIO;
0539     }
0540     if (pci_enable_device(dev)) {
0541         dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
0542         return -ENXIO;
0543     }
0544     pci_set_master(dev);
0545 
0546     lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
0547     if (lynx == NULL) {
0548         dev_err(&dev->dev, "Failed to allocate control structure\n");
0549         ret = -ENOMEM;
0550         goto fail_disable;
0551     }
0552     lynx->pci_device = dev;
0553     pci_set_drvdata(dev, lynx);
0554 
0555     spin_lock_init(&lynx->client_list_lock);
0556     INIT_LIST_HEAD(&lynx->client_list);
0557     kref_init(&lynx->kref);
0558 
0559     lynx->registers = ioremap(pci_resource_start(dev, 0),
0560                       PCILYNX_MAX_REGISTER);
0561     if (lynx->registers == NULL) {
0562         dev_err(&dev->dev, "Failed to map registers\n");
0563         ret = -ENOMEM;
0564         goto fail_deallocate_lynx;
0565     }
0566 
0567     lynx->rcv_start_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
0568                          sizeof(struct pcl),
0569                          &lynx->rcv_start_pcl_bus,
0570                          GFP_KERNEL);
0571     lynx->rcv_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
0572                        sizeof(struct pcl),
0573                        &lynx->rcv_pcl_bus, GFP_KERNEL);
0574     lynx->rcv_buffer = dma_alloc_coherent(&lynx->pci_device->dev,
0575                           RCV_BUFFER_SIZE,
0576                           &lynx->rcv_buffer_bus, GFP_KERNEL);
0577     if (lynx->rcv_start_pcl == NULL ||
0578         lynx->rcv_pcl == NULL ||
0579         lynx->rcv_buffer == NULL) {
0580         dev_err(&dev->dev, "Failed to allocate receive buffer\n");
0581         ret = -ENOMEM;
0582         goto fail_deallocate_buffers;
0583     }
0584     lynx->rcv_start_pcl->next   = cpu_to_le32(lynx->rcv_pcl_bus);
0585     lynx->rcv_pcl->next     = cpu_to_le32(PCL_NEXT_INVALID);
0586     lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
0587 
0588     lynx->rcv_pcl->buffer[0].control =
0589             cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
0590     lynx->rcv_pcl->buffer[0].pointer =
0591             cpu_to_le32(lynx->rcv_buffer_bus + 4);
0592     p = lynx->rcv_buffer_bus + 2048;
0593     end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
0594     for (i = 1; p < end; i++, p += 2048) {
0595         lynx->rcv_pcl->buffer[i].control =
0596             cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
0597         lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
0598     }
0599     lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
0600 
0601     reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
0602     /* Fix buggy cards with autoboot pin not tied low: */
0603     reg_write(lynx, DMA0_CHAN_CTRL, 0);
0604     reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
0605 
0606 #if 0
0607     /* now, looking for PHY register set */
0608     if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
0609         lynx->phyic.reg_1394a = 1;
0610         PRINT(KERN_INFO, lynx->id,
0611               "found 1394a conform PHY (using extended register set)");
0612         lynx->phyic.vendor = get_phy_vendorid(lynx);
0613         lynx->phyic.product = get_phy_productid(lynx);
0614     } else {
0615         lynx->phyic.reg_1394a = 0;
0616         PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
0617     }
0618 #endif
0619 
0620     /* Setup the general receive FIFO max size. */
0621     reg_write(lynx, FIFO_SIZES, 255);
0622 
0623     reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
0624 
0625     reg_write(lynx, LINK_INT_ENABLE,
0626           LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
0627           LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
0628           LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
0629           LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
0630           LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
0631 
0632     /* Disable the L flag in self ID packets. */
0633     set_phy_reg(lynx, 4, 0);
0634 
0635     /* Put this baby into snoop mode */
0636     reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
0637 
0638     run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
0639 
0640     if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
0641             driver_name, lynx)) {
0642         dev_err(&dev->dev,
0643             "Failed to allocate shared interrupt %d\n", dev->irq);
0644         ret = -EIO;
0645         goto fail_deallocate_buffers;
0646     }
0647 
0648     lynx->misc.parent = &dev->dev;
0649     lynx->misc.minor = MISC_DYNAMIC_MINOR;
0650     lynx->misc.name = "nosy";
0651     lynx->misc.fops = &nosy_ops;
0652 
0653     mutex_lock(&card_mutex);
0654     ret = misc_register(&lynx->misc);
0655     if (ret) {
0656         dev_err(&dev->dev, "Failed to register misc char device\n");
0657         mutex_unlock(&card_mutex);
0658         goto fail_free_irq;
0659     }
0660     list_add_tail(&lynx->link, &card_list);
0661     mutex_unlock(&card_mutex);
0662 
0663     dev_info(&dev->dev,
0664          "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
0665 
0666     return 0;
0667 
0668 fail_free_irq:
0669     reg_write(lynx, PCI_INT_ENABLE, 0);
0670     free_irq(lynx->pci_device->irq, lynx);
0671 
0672 fail_deallocate_buffers:
0673     if (lynx->rcv_start_pcl)
0674         dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
0675                   lynx->rcv_start_pcl,
0676                   lynx->rcv_start_pcl_bus);
0677     if (lynx->rcv_pcl)
0678         dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
0679                   lynx->rcv_pcl, lynx->rcv_pcl_bus);
0680     if (lynx->rcv_buffer)
0681         dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE,
0682                   lynx->rcv_buffer, lynx->rcv_buffer_bus);
0683     iounmap(lynx->registers);
0684 
0685 fail_deallocate_lynx:
0686     kfree(lynx);
0687 
0688 fail_disable:
0689     pci_disable_device(dev);
0690 
0691     return ret;
0692 }
0693 
0694 static struct pci_device_id pci_table[] = {
0695     {
0696         .vendor =    PCI_VENDOR_ID_TI,
0697         .device =    PCI_DEVICE_ID_TI_PCILYNX,
0698         .subvendor = PCI_ANY_ID,
0699         .subdevice = PCI_ANY_ID,
0700     },
0701     { } /* Terminating entry */
0702 };
0703 
0704 MODULE_DEVICE_TABLE(pci, pci_table);
0705 
0706 static struct pci_driver lynx_pci_driver = {
0707     .name =     driver_name,
0708     .id_table = pci_table,
0709     .probe =    add_card,
0710     .remove =   remove_card,
0711 };
0712 
0713 module_pci_driver(lynx_pci_driver);
0714 
0715 MODULE_AUTHOR("Kristian Hoegsberg");
0716 MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
0717 MODULE_LICENSE("GPL");