0001
0002
0003
0004
0005
0006
0007
0008 #include "dmaengine.h"
0009
0010 #include <linux/circ_buf.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/kref.h>
0013 #include <linux/list.h>
0014 #include <linux/module.h>
0015 #include <linux/pci.h>
0016
0017 MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
0018 MODULE_VERSION("0.1");
0019 MODULE_LICENSE("GPL");
0020 MODULE_AUTHOR("Logan Gunthorpe");
0021
0022 #define PLX_REG_DESC_RING_ADDR 0x214
0023 #define PLX_REG_DESC_RING_ADDR_HI 0x218
0024 #define PLX_REG_DESC_RING_NEXT_ADDR 0x21C
0025 #define PLX_REG_DESC_RING_COUNT 0x220
0026 #define PLX_REG_DESC_RING_LAST_ADDR 0x224
0027 #define PLX_REG_DESC_RING_LAST_SIZE 0x228
0028 #define PLX_REG_PREF_LIMIT 0x234
0029 #define PLX_REG_CTRL 0x238
0030 #define PLX_REG_CTRL2 0x23A
0031 #define PLX_REG_INTR_CTRL 0x23C
0032 #define PLX_REG_INTR_STATUS 0x23E
0033
0034 #define PLX_REG_PREF_LIMIT_PREF_FOUR 8
0035
0036 #define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0)
0037 #define PLX_REG_CTRL_ABORT BIT(1)
0038 #define PLX_REG_CTRL_WRITE_BACK_EN BIT(2)
0039 #define PLX_REG_CTRL_START BIT(3)
0040 #define PLX_REG_CTRL_RING_STOP_MODE BIT(4)
0041 #define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5)
0042 #define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5)
0043 #define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5)
0044 #define PLX_REG_CTRL_DESC_INVALID BIT(8)
0045 #define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9)
0046 #define PLX_REG_CTRL_ABORT_DONE BIT(10)
0047 #define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12)
0048 #define PLX_REG_CTRL_IN_PROGRESS BIT(30)
0049
0050 #define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
0051 PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
0052 PLX_REG_CTRL_ABORT_DONE | \
0053 PLX_REG_CTRL_IMM_PAUSE_DONE)
0054
0055 #define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
0056 PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
0057 PLX_REG_CTRL_START | \
0058 PLX_REG_CTRL_RESET_VAL)
0059
0060 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0
0061 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1
0062 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2
0063 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3
0064 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4
0065 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5
0066 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7
0067
0068 #define PLX_REG_INTR_CRTL_ERROR_EN BIT(0)
0069 #define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1)
0070 #define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3)
0071 #define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4)
0072 #define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5)
0073
0074 #define PLX_REG_INTR_STATUS_ERROR BIT(0)
0075 #define PLX_REG_INTR_STATUS_INV_DESC BIT(1)
0076 #define PLX_REG_INTR_STATUS_DESC_DONE BIT(2)
0077 #define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3)
0078
0079 struct plx_dma_hw_std_desc {
0080 __le32 flags_and_size;
0081 __le16 dst_addr_hi;
0082 __le16 src_addr_hi;
0083 __le32 dst_addr_lo;
0084 __le32 src_addr_lo;
0085 };
0086
0087 #define PLX_DESC_SIZE_MASK 0x7ffffff
0088 #define PLX_DESC_FLAG_VALID BIT(31)
0089 #define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30)
0090
0091 #define PLX_DESC_WB_SUCCESS BIT(30)
0092 #define PLX_DESC_WB_RD_FAIL BIT(29)
0093 #define PLX_DESC_WB_WR_FAIL BIT(28)
0094
0095 #define PLX_DMA_RING_COUNT 2048
0096
0097 struct plx_dma_desc {
0098 struct dma_async_tx_descriptor txd;
0099 struct plx_dma_hw_std_desc *hw;
0100 u32 orig_size;
0101 };
0102
0103 struct plx_dma_dev {
0104 struct dma_device dma_dev;
0105 struct dma_chan dma_chan;
0106 struct pci_dev __rcu *pdev;
0107 void __iomem *bar;
0108 struct tasklet_struct desc_task;
0109
0110 spinlock_t ring_lock;
0111 bool ring_active;
0112 int head;
0113 int tail;
0114 struct plx_dma_hw_std_desc *hw_ring;
0115 dma_addr_t hw_ring_dma;
0116 struct plx_dma_desc **desc_ring;
0117 };
0118
0119 static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
0120 {
0121 return container_of(c, struct plx_dma_dev, dma_chan);
0122 }
0123
0124 static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
0125 {
0126 return container_of(txd, struct plx_dma_desc, txd);
0127 }
0128
0129 static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
0130 {
0131 return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
0132 }
0133
0134 static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
0135 {
0136 struct dmaengine_result res;
0137 struct plx_dma_desc *desc;
0138 u32 flags;
0139
0140 spin_lock(&plxdev->ring_lock);
0141
0142 while (plxdev->tail != plxdev->head) {
0143 desc = plx_dma_get_desc(plxdev, plxdev->tail);
0144
0145 flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
0146
0147 if (flags & PLX_DESC_FLAG_VALID)
0148 break;
0149
0150 res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
0151
0152 if (flags & PLX_DESC_WB_SUCCESS)
0153 res.result = DMA_TRANS_NOERROR;
0154 else if (flags & PLX_DESC_WB_WR_FAIL)
0155 res.result = DMA_TRANS_WRITE_FAILED;
0156 else
0157 res.result = DMA_TRANS_READ_FAILED;
0158
0159 dma_cookie_complete(&desc->txd);
0160 dma_descriptor_unmap(&desc->txd);
0161 dmaengine_desc_get_callback_invoke(&desc->txd, &res);
0162 desc->txd.callback = NULL;
0163 desc->txd.callback_result = NULL;
0164
0165 plxdev->tail++;
0166 }
0167
0168 spin_unlock(&plxdev->ring_lock);
0169 }
0170
0171 static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
0172 {
0173 struct dmaengine_result res;
0174 struct plx_dma_desc *desc;
0175
0176 plx_dma_process_desc(plxdev);
0177
0178 spin_lock_bh(&plxdev->ring_lock);
0179
0180 while (plxdev->tail != plxdev->head) {
0181 desc = plx_dma_get_desc(plxdev, plxdev->tail);
0182
0183 res.residue = desc->orig_size;
0184 res.result = DMA_TRANS_ABORTED;
0185
0186 dma_cookie_complete(&desc->txd);
0187 dma_descriptor_unmap(&desc->txd);
0188 dmaengine_desc_get_callback_invoke(&desc->txd, &res);
0189 desc->txd.callback = NULL;
0190 desc->txd.callback_result = NULL;
0191
0192 plxdev->tail++;
0193 }
0194
0195 spin_unlock_bh(&plxdev->ring_lock);
0196 }
0197
0198 static void __plx_dma_stop(struct plx_dma_dev *plxdev)
0199 {
0200 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
0201 u32 val;
0202
0203 val = readl(plxdev->bar + PLX_REG_CTRL);
0204 if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
0205 return;
0206
0207 writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
0208 plxdev->bar + PLX_REG_CTRL);
0209
0210 while (!time_after(jiffies, timeout)) {
0211 val = readl(plxdev->bar + PLX_REG_CTRL);
0212 if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
0213 break;
0214
0215 cpu_relax();
0216 }
0217
0218 if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
0219 dev_err(plxdev->dma_dev.dev,
0220 "Timeout waiting for graceful pause!\n");
0221
0222 writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
0223 plxdev->bar + PLX_REG_CTRL);
0224
0225 writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
0226 writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
0227 writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
0228 writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
0229 }
0230
0231 static void plx_dma_stop(struct plx_dma_dev *plxdev)
0232 {
0233 rcu_read_lock();
0234 if (!rcu_dereference(plxdev->pdev)) {
0235 rcu_read_unlock();
0236 return;
0237 }
0238
0239 __plx_dma_stop(plxdev);
0240
0241 rcu_read_unlock();
0242 }
0243
0244 static void plx_dma_desc_task(struct tasklet_struct *t)
0245 {
0246 struct plx_dma_dev *plxdev = from_tasklet(plxdev, t, desc_task);
0247
0248 plx_dma_process_desc(plxdev);
0249 }
0250
0251 static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
0252 dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
0253 unsigned long flags)
0254 __acquires(plxdev->ring_lock)
0255 {
0256 struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
0257 struct plx_dma_desc *plxdesc;
0258
0259 spin_lock_bh(&plxdev->ring_lock);
0260 if (!plxdev->ring_active)
0261 goto err_unlock;
0262
0263 if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
0264 goto err_unlock;
0265
0266 if (len > PLX_DESC_SIZE_MASK)
0267 goto err_unlock;
0268
0269 plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
0270 plxdev->head++;
0271
0272 plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
0273 plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
0274 plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
0275 plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
0276
0277 plxdesc->orig_size = len;
0278
0279 if (flags & DMA_PREP_INTERRUPT)
0280 len |= PLX_DESC_FLAG_INT_WHEN_DONE;
0281
0282 plxdesc->hw->flags_and_size = cpu_to_le32(len);
0283 plxdesc->txd.flags = flags;
0284
0285
0286
0287 return &plxdesc->txd;
0288
0289 err_unlock:
0290
0291
0292
0293
0294 __acquire(plxdev->ring_lock);
0295
0296 spin_unlock_bh(&plxdev->ring_lock);
0297 return NULL;
0298 }
0299
0300 static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
0301 __releases(plxdev->ring_lock)
0302 {
0303 struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
0304 struct plx_dma_desc *plxdesc = to_plx_desc(desc);
0305 dma_cookie_t cookie;
0306
0307 cookie = dma_cookie_assign(desc);
0308
0309
0310
0311
0312
0313 wmb();
0314
0315 plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
0316
0317 spin_unlock_bh(&plxdev->ring_lock);
0318
0319 return cookie;
0320 }
0321
0322 static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
0323 dma_cookie_t cookie, struct dma_tx_state *txstate)
0324 {
0325 struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
0326 enum dma_status ret;
0327
0328 ret = dma_cookie_status(chan, cookie, txstate);
0329 if (ret == DMA_COMPLETE)
0330 return ret;
0331
0332 plx_dma_process_desc(plxdev);
0333
0334 return dma_cookie_status(chan, cookie, txstate);
0335 }
0336
0337 static void plx_dma_issue_pending(struct dma_chan *chan)
0338 {
0339 struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
0340
0341 rcu_read_lock();
0342 if (!rcu_dereference(plxdev->pdev)) {
0343 rcu_read_unlock();
0344 return;
0345 }
0346
0347
0348
0349
0350
0351 wmb();
0352
0353 writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
0354
0355 rcu_read_unlock();
0356 }
0357
0358 static irqreturn_t plx_dma_isr(int irq, void *devid)
0359 {
0360 struct plx_dma_dev *plxdev = devid;
0361 u32 status;
0362
0363 status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
0364
0365 if (!status)
0366 return IRQ_NONE;
0367
0368 if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
0369 tasklet_schedule(&plxdev->desc_task);
0370
0371 writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
0372
0373 return IRQ_HANDLED;
0374 }
0375
0376 static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
0377 {
0378 struct plx_dma_desc *desc;
0379 int i;
0380
0381 plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
0382 sizeof(*plxdev->desc_ring), GFP_KERNEL);
0383 if (!plxdev->desc_ring)
0384 return -ENOMEM;
0385
0386 for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
0387 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0388 if (!desc)
0389 goto free_and_exit;
0390
0391 dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
0392 desc->txd.tx_submit = plx_dma_tx_submit;
0393 desc->hw = &plxdev->hw_ring[i];
0394
0395 plxdev->desc_ring[i] = desc;
0396 }
0397
0398 return 0;
0399
0400 free_and_exit:
0401 for (i = 0; i < PLX_DMA_RING_COUNT; i++)
0402 kfree(plxdev->desc_ring[i]);
0403 kfree(plxdev->desc_ring);
0404 return -ENOMEM;
0405 }
0406
0407 static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
0408 {
0409 struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
0410 size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
0411 int rc;
0412
0413 plxdev->head = plxdev->tail = 0;
0414 plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
0415 &plxdev->hw_ring_dma, GFP_KERNEL);
0416 if (!plxdev->hw_ring)
0417 return -ENOMEM;
0418
0419 rc = plx_dma_alloc_desc(plxdev);
0420 if (rc)
0421 goto out_free_hw_ring;
0422
0423 rcu_read_lock();
0424 if (!rcu_dereference(plxdev->pdev)) {
0425 rcu_read_unlock();
0426 rc = -ENODEV;
0427 goto out_free_hw_ring;
0428 }
0429
0430 writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
0431 writel(lower_32_bits(plxdev->hw_ring_dma),
0432 plxdev->bar + PLX_REG_DESC_RING_ADDR);
0433 writel(upper_32_bits(plxdev->hw_ring_dma),
0434 plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
0435 writel(lower_32_bits(plxdev->hw_ring_dma),
0436 plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
0437 writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
0438 writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
0439
0440 plxdev->ring_active = true;
0441
0442 rcu_read_unlock();
0443
0444 return PLX_DMA_RING_COUNT;
0445
0446 out_free_hw_ring:
0447 dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
0448 plxdev->hw_ring_dma);
0449 return rc;
0450 }
0451
0452 static void plx_dma_free_chan_resources(struct dma_chan *chan)
0453 {
0454 struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
0455 size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
0456 struct pci_dev *pdev;
0457 int irq = -1;
0458 int i;
0459
0460 spin_lock_bh(&plxdev->ring_lock);
0461 plxdev->ring_active = false;
0462 spin_unlock_bh(&plxdev->ring_lock);
0463
0464 plx_dma_stop(plxdev);
0465
0466 rcu_read_lock();
0467 pdev = rcu_dereference(plxdev->pdev);
0468 if (pdev)
0469 irq = pci_irq_vector(pdev, 0);
0470 rcu_read_unlock();
0471
0472 if (irq > 0)
0473 synchronize_irq(irq);
0474
0475 tasklet_kill(&plxdev->desc_task);
0476
0477 plx_dma_abort_desc(plxdev);
0478
0479 for (i = 0; i < PLX_DMA_RING_COUNT; i++)
0480 kfree(plxdev->desc_ring[i]);
0481
0482 kfree(plxdev->desc_ring);
0483 dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
0484 plxdev->hw_ring_dma);
0485
0486 }
0487
0488 static void plx_dma_release(struct dma_device *dma_dev)
0489 {
0490 struct plx_dma_dev *plxdev =
0491 container_of(dma_dev, struct plx_dma_dev, dma_dev);
0492
0493 put_device(dma_dev->dev);
0494 kfree(plxdev);
0495 }
0496
0497 static int plx_dma_create(struct pci_dev *pdev)
0498 {
0499 struct plx_dma_dev *plxdev;
0500 struct dma_device *dma;
0501 struct dma_chan *chan;
0502 int rc;
0503
0504 plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
0505 if (!plxdev)
0506 return -ENOMEM;
0507
0508 rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
0509 KBUILD_MODNAME, plxdev);
0510 if (rc)
0511 goto free_plx;
0512
0513 spin_lock_init(&plxdev->ring_lock);
0514 tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
0515
0516 RCU_INIT_POINTER(plxdev->pdev, pdev);
0517 plxdev->bar = pcim_iomap_table(pdev)[0];
0518
0519 dma = &plxdev->dma_dev;
0520 dma->chancnt = 1;
0521 INIT_LIST_HEAD(&dma->channels);
0522 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
0523 dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
0524 dma->dev = get_device(&pdev->dev);
0525
0526 dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
0527 dma->device_free_chan_resources = plx_dma_free_chan_resources;
0528 dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
0529 dma->device_issue_pending = plx_dma_issue_pending;
0530 dma->device_tx_status = plx_dma_tx_status;
0531 dma->device_release = plx_dma_release;
0532
0533 chan = &plxdev->dma_chan;
0534 chan->device = dma;
0535 dma_cookie_init(chan);
0536 list_add_tail(&chan->device_node, &dma->channels);
0537
0538 rc = dma_async_device_register(dma);
0539 if (rc) {
0540 pci_err(pdev, "Failed to register dma device: %d\n", rc);
0541 goto put_device;
0542 }
0543
0544 pci_set_drvdata(pdev, plxdev);
0545
0546 return 0;
0547
0548 put_device:
0549 put_device(&pdev->dev);
0550 free_irq(pci_irq_vector(pdev, 0), plxdev);
0551 free_plx:
0552 kfree(plxdev);
0553
0554 return rc;
0555 }
0556
0557 static int plx_dma_probe(struct pci_dev *pdev,
0558 const struct pci_device_id *id)
0559 {
0560 int rc;
0561
0562 rc = pcim_enable_device(pdev);
0563 if (rc)
0564 return rc;
0565
0566 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
0567 if (rc)
0568 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0569 if (rc)
0570 return rc;
0571
0572 rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
0573 if (rc)
0574 return rc;
0575
0576 rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
0577 if (rc <= 0)
0578 return rc;
0579
0580 pci_set_master(pdev);
0581
0582 rc = plx_dma_create(pdev);
0583 if (rc)
0584 goto err_free_irq_vectors;
0585
0586 pci_info(pdev, "PLX DMA Channel Registered\n");
0587
0588 return 0;
0589
0590 err_free_irq_vectors:
0591 pci_free_irq_vectors(pdev);
0592 return rc;
0593 }
0594
0595 static void plx_dma_remove(struct pci_dev *pdev)
0596 {
0597 struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
0598
0599 free_irq(pci_irq_vector(pdev, 0), plxdev);
0600
0601 rcu_assign_pointer(plxdev->pdev, NULL);
0602 synchronize_rcu();
0603
0604 spin_lock_bh(&plxdev->ring_lock);
0605 plxdev->ring_active = false;
0606 spin_unlock_bh(&plxdev->ring_lock);
0607
0608 __plx_dma_stop(plxdev);
0609 plx_dma_abort_desc(plxdev);
0610
0611 plxdev->bar = NULL;
0612 dma_async_device_unregister(&plxdev->dma_dev);
0613
0614 pci_free_irq_vectors(pdev);
0615 }
0616
0617 static const struct pci_device_id plx_dma_pci_tbl[] = {
0618 {
0619 .vendor = PCI_VENDOR_ID_PLX,
0620 .device = 0x87D0,
0621 .subvendor = PCI_ANY_ID,
0622 .subdevice = PCI_ANY_ID,
0623 .class = PCI_CLASS_SYSTEM_OTHER << 8,
0624 .class_mask = 0xFFFFFFFF,
0625 },
0626 {0}
0627 };
0628 MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
0629
0630 static struct pci_driver plx_dma_pci_driver = {
0631 .name = KBUILD_MODNAME,
0632 .id_table = plx_dma_pci_tbl,
0633 .probe = plx_dma_probe,
0634 .remove = plx_dma_remove,
0635 };
0636 module_pci_driver(plx_dma_pci_driver);