Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
0004  *
0005  * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
0006  * Alexandre Bounine <alexandre.bounine@idt.com>
0007  */
0008 
0009 #include <linux/io.h>
0010 #include <linux/errno.h>
0011 #include <linux/init.h>
0012 #include <linux/ioport.h>
0013 #include <linux/kernel.h>
0014 #include <linux/module.h>
0015 #include <linux/pci.h>
0016 #include <linux/rio.h>
0017 #include <linux/rio_drv.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/kfifo.h>
0021 #include <linux/sched.h>
0022 #include <linux/delay.h>
0023 #include "../../dma/dmaengine.h"
0024 
0025 #include "tsi721.h"
0026 
0027 #ifdef CONFIG_PCI_MSI
0028 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
0029 #endif
0030 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
0031 
0032 static unsigned int dma_desc_per_channel = 128;
0033 module_param(dma_desc_per_channel, uint, S_IRUGO);
0034 MODULE_PARM_DESC(dma_desc_per_channel,
0035          "Number of DMA descriptors per channel (default: 128)");
0036 
0037 static unsigned int dma_txqueue_sz = 16;
0038 module_param(dma_txqueue_sz, uint, S_IRUGO);
0039 MODULE_PARM_DESC(dma_txqueue_sz,
0040          "DMA Transactions Queue Size (default: 16)");
0041 
0042 static u8 dma_sel = 0x7f;
0043 module_param(dma_sel, byte, S_IRUGO);
0044 MODULE_PARM_DESC(dma_sel,
0045          "DMA Channel Selection Mask (default: 0x7f = all)");
0046 
0047 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
0048 {
0049     return container_of(chan, struct tsi721_bdma_chan, dchan);
0050 }
0051 
0052 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
0053 {
0054     return container_of(ddev, struct rio_mport, dma)->priv;
0055 }
0056 
0057 static inline
0058 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
0059 {
0060     return container_of(txd, struct tsi721_tx_desc, txd);
0061 }
0062 
0063 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
0064 {
0065     struct tsi721_dma_desc *bd_ptr;
0066     struct device *dev = bdma_chan->dchan.device->dev;
0067     u64     *sts_ptr;
0068     dma_addr_t  bd_phys;
0069     dma_addr_t  sts_phys;
0070     int     sts_size;
0071 #ifdef CONFIG_PCI_MSI
0072     struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
0073 #endif
0074 
0075     tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
0076 
0077     /*
0078      * Allocate space for DMA descriptors
0079      * (add an extra element for link descriptor)
0080      */
0081     bd_ptr = dma_alloc_coherent(dev,
0082                     (bd_num + 1) * sizeof(struct tsi721_dma_desc),
0083                     &bd_phys, GFP_ATOMIC);
0084     if (!bd_ptr)
0085         return -ENOMEM;
0086 
0087     bdma_chan->bd_num = bd_num;
0088     bdma_chan->bd_phys = bd_phys;
0089     bdma_chan->bd_base = bd_ptr;
0090 
0091     tsi_debug(DMA, &bdma_chan->dchan.dev->device,
0092           "DMAC%d descriptors @ %p (phys = %pad)",
0093           bdma_chan->id, bd_ptr, &bd_phys);
0094 
0095     /* Allocate space for descriptor status FIFO */
0096     sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
0097                     (bd_num + 1) : TSI721_DMA_MINSTSSZ;
0098     sts_size = roundup_pow_of_two(sts_size);
0099     sts_ptr = dma_alloc_coherent(dev,
0100                      sts_size * sizeof(struct tsi721_dma_sts),
0101                      &sts_phys, GFP_ATOMIC);
0102     if (!sts_ptr) {
0103         /* Free space allocated for DMA descriptors */
0104         dma_free_coherent(dev,
0105                   (bd_num + 1) * sizeof(struct tsi721_dma_desc),
0106                   bd_ptr, bd_phys);
0107         bdma_chan->bd_base = NULL;
0108         return -ENOMEM;
0109     }
0110 
0111     bdma_chan->sts_phys = sts_phys;
0112     bdma_chan->sts_base = sts_ptr;
0113     bdma_chan->sts_size = sts_size;
0114 
0115     tsi_debug(DMA, &bdma_chan->dchan.dev->device,
0116         "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
0117         bdma_chan->id, sts_ptr, &sts_phys, sts_size);
0118 
0119     /* Initialize DMA descriptors ring using added link descriptor */
0120     bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
0121     bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
0122                          TSI721_DMAC_DPTRL_MASK);
0123     bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
0124 
0125     /* Setup DMA descriptor pointers */
0126     iowrite32(((u64)bd_phys >> 32),
0127         bdma_chan->regs + TSI721_DMAC_DPTRH);
0128     iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
0129         bdma_chan->regs + TSI721_DMAC_DPTRL);
0130 
0131     /* Setup descriptor status FIFO */
0132     iowrite32(((u64)sts_phys >> 32),
0133         bdma_chan->regs + TSI721_DMAC_DSBH);
0134     iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
0135         bdma_chan->regs + TSI721_DMAC_DSBL);
0136     iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
0137         bdma_chan->regs + TSI721_DMAC_DSSZ);
0138 
0139     /* Clear interrupt bits */
0140     iowrite32(TSI721_DMAC_INT_ALL,
0141         bdma_chan->regs + TSI721_DMAC_INT);
0142 
0143     ioread32(bdma_chan->regs + TSI721_DMAC_INT);
0144 
0145 #ifdef CONFIG_PCI_MSI
0146     /* Request interrupt service if we are in MSI-X mode */
0147     if (priv->flags & TSI721_USING_MSIX) {
0148         int rc, idx;
0149 
0150         idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
0151 
0152         rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
0153                  priv->msix[idx].irq_name, (void *)bdma_chan);
0154 
0155         if (rc) {
0156             tsi_debug(DMA, &bdma_chan->dchan.dev->device,
0157                   "Unable to get MSI-X for DMAC%d-DONE",
0158                   bdma_chan->id);
0159             goto err_out;
0160         }
0161 
0162         idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
0163 
0164         rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
0165                 priv->msix[idx].irq_name, (void *)bdma_chan);
0166 
0167         if (rc) {
0168             tsi_debug(DMA, &bdma_chan->dchan.dev->device,
0169                   "Unable to get MSI-X for DMAC%d-INT",
0170                   bdma_chan->id);
0171             free_irq(
0172                 priv->msix[TSI721_VECT_DMA0_DONE +
0173                         bdma_chan->id].vector,
0174                 (void *)bdma_chan);
0175         }
0176 
0177 err_out:
0178         if (rc) {
0179             /* Free space allocated for DMA descriptors */
0180             dma_free_coherent(dev,
0181                 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
0182                 bd_ptr, bd_phys);
0183             bdma_chan->bd_base = NULL;
0184 
0185             /* Free space allocated for status descriptors */
0186             dma_free_coherent(dev,
0187                 sts_size * sizeof(struct tsi721_dma_sts),
0188                 sts_ptr, sts_phys);
0189             bdma_chan->sts_base = NULL;
0190 
0191             return -EIO;
0192         }
0193     }
0194 #endif /* CONFIG_PCI_MSI */
0195 
0196     /* Toggle DMA channel initialization */
0197     iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
0198     ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
0199     bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
0200     bdma_chan->sts_rdptr = 0;
0201     udelay(10);
0202 
0203     return 0;
0204 }
0205 
0206 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
0207 {
0208     u32 ch_stat;
0209 #ifdef CONFIG_PCI_MSI
0210     struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
0211 #endif
0212 
0213     if (!bdma_chan->bd_base)
0214         return 0;
0215 
0216     /* Check if DMA channel still running */
0217     ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
0218     if (ch_stat & TSI721_DMAC_STS_RUN)
0219         return -EFAULT;
0220 
0221     /* Put DMA channel into init state */
0222     iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
0223 
0224 #ifdef CONFIG_PCI_MSI
0225     if (priv->flags & TSI721_USING_MSIX) {
0226         free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
0227                     bdma_chan->id].vector, (void *)bdma_chan);
0228         free_irq(priv->msix[TSI721_VECT_DMA0_INT +
0229                     bdma_chan->id].vector, (void *)bdma_chan);
0230     }
0231 #endif /* CONFIG_PCI_MSI */
0232 
0233     /* Free space allocated for DMA descriptors */
0234     dma_free_coherent(bdma_chan->dchan.device->dev,
0235         (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
0236         bdma_chan->bd_base, bdma_chan->bd_phys);
0237     bdma_chan->bd_base = NULL;
0238 
0239     /* Free space allocated for status FIFO */
0240     dma_free_coherent(bdma_chan->dchan.device->dev,
0241         bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
0242         bdma_chan->sts_base, bdma_chan->sts_phys);
0243     bdma_chan->sts_base = NULL;
0244     return 0;
0245 }
0246 
0247 static void
0248 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
0249 {
0250     if (enable) {
0251         /* Clear pending BDMA channel interrupts */
0252         iowrite32(TSI721_DMAC_INT_ALL,
0253             bdma_chan->regs + TSI721_DMAC_INT);
0254         ioread32(bdma_chan->regs + TSI721_DMAC_INT);
0255         /* Enable BDMA channel interrupts */
0256         iowrite32(TSI721_DMAC_INT_ALL,
0257             bdma_chan->regs + TSI721_DMAC_INTE);
0258     } else {
0259         /* Disable BDMA channel interrupts */
0260         iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
0261         /* Clear pending BDMA channel interrupts */
0262         iowrite32(TSI721_DMAC_INT_ALL,
0263             bdma_chan->regs + TSI721_DMAC_INT);
0264     }
0265 
0266 }
0267 
0268 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
0269 {
0270     u32 sts;
0271 
0272     sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
0273     return ((sts & TSI721_DMAC_STS_RUN) == 0);
0274 }
0275 
0276 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
0277 {
0278     /* Disable BDMA channel interrupts */
0279     iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
0280     if (bdma_chan->active)
0281         tasklet_hi_schedule(&bdma_chan->tasklet);
0282 }
0283 
0284 #ifdef CONFIG_PCI_MSI
0285 /**
0286  * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
0287  * @irq: Linux interrupt number
0288  * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
0289  *
0290  * Handles BDMA channel interrupts signaled using MSI-X.
0291  */
0292 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
0293 {
0294     struct tsi721_bdma_chan *bdma_chan = ptr;
0295 
0296     if (bdma_chan->active)
0297         tasklet_hi_schedule(&bdma_chan->tasklet);
0298     return IRQ_HANDLED;
0299 }
0300 #endif /* CONFIG_PCI_MSI */
0301 
0302 /* Must be called with the spinlock held */
0303 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
0304 {
0305     if (!tsi721_dma_is_idle(bdma_chan)) {
0306         tsi_err(&bdma_chan->dchan.dev->device,
0307             "DMAC%d Attempt to start non-idle channel",
0308             bdma_chan->id);
0309         return;
0310     }
0311 
0312     if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
0313         tsi_err(&bdma_chan->dchan.dev->device,
0314             "DMAC%d Attempt to start DMA with no BDs ready %d",
0315             bdma_chan->id, task_pid_nr(current));
0316         return;
0317     }
0318 
0319     tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
0320           bdma_chan->id, bdma_chan->wr_count_next,
0321           task_pid_nr(current));
0322 
0323     iowrite32(bdma_chan->wr_count_next,
0324         bdma_chan->regs + TSI721_DMAC_DWRCNT);
0325     ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
0326 
0327     bdma_chan->wr_count = bdma_chan->wr_count_next;
0328 }
0329 
0330 static int
0331 tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
0332               struct tsi721_dma_desc *bd_ptr,
0333               struct scatterlist *sg, u32 sys_size)
0334 {
0335     u64 rio_addr;
0336 
0337     if (!bd_ptr)
0338         return -EINVAL;
0339 
0340     /* Initialize DMA descriptor */
0341     bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
0342                       (desc->rtype << 19) | desc->destid);
0343     bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
0344                      (sys_size << 26));
0345     rio_addr = (desc->rio_addr >> 2) |
0346                 ((u64)(desc->rio_addr_u & 0x3) << 62);
0347     bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
0348     bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
0349     bd_ptr->t1.bufptr_lo = cpu_to_le32(
0350                     (u64)sg_dma_address(sg) & 0xffffffff);
0351     bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
0352     bd_ptr->t1.s_dist = 0;
0353     bd_ptr->t1.s_size = 0;
0354 
0355     return 0;
0356 }
0357 
0358 static int
0359 tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
0360 {
0361     if (!bd_ptr)
0362         return -EINVAL;
0363 
0364     /* Update DMA descriptor */
0365     if (interrupt)
0366         bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
0367     bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
0368 
0369     return 0;
0370 }
0371 
0372 static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
0373                   struct tsi721_tx_desc *desc)
0374 {
0375     struct dma_async_tx_descriptor *txd = &desc->txd;
0376     dma_async_tx_callback callback = txd->callback;
0377     void *param = txd->callback_param;
0378 
0379     list_move(&desc->desc_node, &bdma_chan->free_list);
0380 
0381     if (callback)
0382         callback(param);
0383 }
0384 
0385 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
0386 {
0387     u32 srd_ptr;
0388     u64 *sts_ptr;
0389     int i, j;
0390 
0391     /* Check and clear descriptor status FIFO entries */
0392     srd_ptr = bdma_chan->sts_rdptr;
0393     sts_ptr = bdma_chan->sts_base;
0394     j = srd_ptr * 8;
0395     while (sts_ptr[j]) {
0396         for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
0397             sts_ptr[j] = 0;
0398 
0399         ++srd_ptr;
0400         srd_ptr %= bdma_chan->sts_size;
0401         j = srd_ptr * 8;
0402     }
0403 
0404     iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
0405     bdma_chan->sts_rdptr = srd_ptr;
0406 }
0407 
0408 /* Must be called with the channel spinlock held */
0409 static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
0410 {
0411     struct dma_chan *dchan = desc->txd.chan;
0412     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
0413     u32 sys_size;
0414     u64 rio_addr;
0415     dma_addr_t next_addr;
0416     u32 bcount;
0417     struct scatterlist *sg;
0418     unsigned int i;
0419     int err = 0;
0420     struct tsi721_dma_desc *bd_ptr = NULL;
0421     u32 idx, rd_idx;
0422     u32 add_count = 0;
0423     struct device *ch_dev = &dchan->dev->device;
0424 
0425     if (!tsi721_dma_is_idle(bdma_chan)) {
0426         tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
0427             bdma_chan->id);
0428         return -EIO;
0429     }
0430 
0431     /*
0432      * Fill DMA channel's hardware buffer descriptors.
0433      * (NOTE: RapidIO destination address is limited to 64 bits for now)
0434      */
0435     rio_addr = desc->rio_addr;
0436     next_addr = -1;
0437     bcount = 0;
0438     sys_size = dma_to_mport(dchan->device)->sys_size;
0439 
0440     rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
0441     rd_idx %= (bdma_chan->bd_num + 1);
0442 
0443     idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
0444     if (idx == bdma_chan->bd_num) {
0445         /* wrap around link descriptor */
0446         idx = 0;
0447         add_count++;
0448     }
0449 
0450     tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
0451           bdma_chan->id, rd_idx, idx);
0452 
0453     for_each_sg(desc->sg, sg, desc->sg_len, i) {
0454 
0455         tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
0456             bdma_chan->id, i, desc->sg_len,
0457             (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
0458 
0459         if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
0460             tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
0461                 bdma_chan->id, i);
0462             err = -EINVAL;
0463             break;
0464         }
0465 
0466         /*
0467          * If this sg entry forms contiguous block with previous one,
0468          * try to merge it into existing DMA descriptor
0469          */
0470         if (next_addr == sg_dma_address(sg) &&
0471             bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
0472             /* Adjust byte count of the descriptor */
0473             bcount += sg_dma_len(sg);
0474             goto entry_done;
0475         } else if (next_addr != -1) {
0476             /* Finalize descriptor using total byte count value */
0477             tsi721_desc_fill_end(bd_ptr, bcount, 0);
0478             tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
0479                   bdma_chan->id, bcount);
0480         }
0481 
0482         desc->rio_addr = rio_addr;
0483 
0484         if (i && idx == rd_idx) {
0485             tsi_debug(DMAV, ch_dev,
0486                   "DMAC%d HW descriptor ring is full @ %d",
0487                   bdma_chan->id, i);
0488             desc->sg = sg;
0489             desc->sg_len -= i;
0490             break;
0491         }
0492 
0493         bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
0494         err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
0495         if (err) {
0496             tsi_err(ch_dev, "Failed to build desc: err=%d", err);
0497             break;
0498         }
0499 
0500         tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
0501               bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
0502 
0503         next_addr = sg_dma_address(sg);
0504         bcount = sg_dma_len(sg);
0505 
0506         add_count++;
0507         if (++idx == bdma_chan->bd_num) {
0508             /* wrap around link descriptor */
0509             idx = 0;
0510             add_count++;
0511         }
0512 
0513 entry_done:
0514         if (sg_is_last(sg)) {
0515             tsi721_desc_fill_end(bd_ptr, bcount, 0);
0516             tsi_debug(DMAV, ch_dev,
0517                   "DMAC%d last desc final len: %d",
0518                   bdma_chan->id, bcount);
0519             desc->sg_len = 0;
0520         } else {
0521             rio_addr += sg_dma_len(sg);
0522             next_addr += sg_dma_len(sg);
0523         }
0524     }
0525 
0526     if (!err)
0527         bdma_chan->wr_count_next += add_count;
0528 
0529     return err;
0530 }
0531 
0532 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
0533                 struct tsi721_tx_desc *desc)
0534 {
0535     int err;
0536 
0537     tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
0538 
0539     if (!tsi721_dma_is_idle(bdma_chan))
0540         return;
0541 
0542     /*
0543      * If there is no data transfer in progress, fetch new descriptor from
0544      * the pending queue.
0545     */
0546     if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
0547         desc = list_first_entry(&bdma_chan->queue,
0548                     struct tsi721_tx_desc, desc_node);
0549         list_del_init((&desc->desc_node));
0550         bdma_chan->active_tx = desc;
0551     }
0552 
0553     if (desc) {
0554         err = tsi721_submit_sg(desc);
0555         if (!err)
0556             tsi721_start_dma(bdma_chan);
0557         else {
0558             tsi721_dma_tx_err(bdma_chan, desc);
0559             tsi_debug(DMA, &bdma_chan->dchan.dev->device,
0560                 "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
0561                 bdma_chan->id, err);
0562         }
0563     }
0564 
0565     tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
0566           bdma_chan->id);
0567 }
0568 
0569 static void tsi721_dma_tasklet(unsigned long data)
0570 {
0571     struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
0572     u32 dmac_int, dmac_sts;
0573 
0574     dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
0575     tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
0576           bdma_chan->id, dmac_int);
0577     /* Clear channel interrupts */
0578     iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
0579 
0580     if (dmac_int & TSI721_DMAC_INT_ERR) {
0581         int i = 10000;
0582         struct tsi721_tx_desc *desc;
0583 
0584         desc = bdma_chan->active_tx;
0585         dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
0586         tsi_err(&bdma_chan->dchan.dev->device,
0587             "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
0588             bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
0589 
0590         /* Re-initialize DMA channel if possible */
0591 
0592         if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
0593             goto err_out;
0594 
0595         tsi721_clr_stat(bdma_chan);
0596 
0597         spin_lock(&bdma_chan->lock);
0598 
0599         /* Put DMA channel into init state */
0600         iowrite32(TSI721_DMAC_CTL_INIT,
0601               bdma_chan->regs + TSI721_DMAC_CTL);
0602         do {
0603             udelay(1);
0604             dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
0605             i--;
0606         } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
0607 
0608         if (dmac_sts & TSI721_DMAC_STS_ABORT) {
0609             tsi_err(&bdma_chan->dchan.dev->device,
0610                 "Failed to re-initiate DMAC%d", bdma_chan->id);
0611             spin_unlock(&bdma_chan->lock);
0612             goto err_out;
0613         }
0614 
0615         /* Setup DMA descriptor pointers */
0616         iowrite32(((u64)bdma_chan->bd_phys >> 32),
0617             bdma_chan->regs + TSI721_DMAC_DPTRH);
0618         iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
0619             bdma_chan->regs + TSI721_DMAC_DPTRL);
0620 
0621         /* Setup descriptor status FIFO */
0622         iowrite32(((u64)bdma_chan->sts_phys >> 32),
0623             bdma_chan->regs + TSI721_DMAC_DSBH);
0624         iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
0625             bdma_chan->regs + TSI721_DMAC_DSBL);
0626         iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
0627             bdma_chan->regs + TSI721_DMAC_DSSZ);
0628 
0629         /* Clear interrupt bits */
0630         iowrite32(TSI721_DMAC_INT_ALL,
0631             bdma_chan->regs + TSI721_DMAC_INT);
0632 
0633         ioread32(bdma_chan->regs + TSI721_DMAC_INT);
0634 
0635         bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
0636         bdma_chan->sts_rdptr = 0;
0637         udelay(10);
0638 
0639         desc = bdma_chan->active_tx;
0640         desc->status = DMA_ERROR;
0641         dma_cookie_complete(&desc->txd);
0642         list_add(&desc->desc_node, &bdma_chan->free_list);
0643         bdma_chan->active_tx = NULL;
0644         if (bdma_chan->active)
0645             tsi721_advance_work(bdma_chan, NULL);
0646         spin_unlock(&bdma_chan->lock);
0647     }
0648 
0649     if (dmac_int & TSI721_DMAC_INT_STFULL) {
0650         tsi_err(&bdma_chan->dchan.dev->device,
0651             "DMAC%d descriptor status FIFO is full",
0652             bdma_chan->id);
0653     }
0654 
0655     if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
0656         struct tsi721_tx_desc *desc;
0657 
0658         tsi721_clr_stat(bdma_chan);
0659         spin_lock(&bdma_chan->lock);
0660         desc = bdma_chan->active_tx;
0661 
0662         if (desc->sg_len == 0) {
0663             dma_async_tx_callback callback = NULL;
0664             void *param = NULL;
0665 
0666             desc->status = DMA_COMPLETE;
0667             dma_cookie_complete(&desc->txd);
0668             if (desc->txd.flags & DMA_PREP_INTERRUPT) {
0669                 callback = desc->txd.callback;
0670                 param = desc->txd.callback_param;
0671             }
0672             list_add(&desc->desc_node, &bdma_chan->free_list);
0673             bdma_chan->active_tx = NULL;
0674             if (bdma_chan->active)
0675                 tsi721_advance_work(bdma_chan, NULL);
0676             spin_unlock(&bdma_chan->lock);
0677             if (callback)
0678                 callback(param);
0679         } else {
0680             if (bdma_chan->active)
0681                 tsi721_advance_work(bdma_chan,
0682                             bdma_chan->active_tx);
0683             spin_unlock(&bdma_chan->lock);
0684         }
0685     }
0686 err_out:
0687     /* Re-Enable BDMA channel interrupts */
0688     iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
0689 }
0690 
0691 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
0692 {
0693     struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
0694     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
0695     dma_cookie_t cookie;
0696 
0697     /* Check if the descriptor is detached from any lists */
0698     if (!list_empty(&desc->desc_node)) {
0699         tsi_err(&bdma_chan->dchan.dev->device,
0700             "DMAC%d wrong state of descriptor %p",
0701             bdma_chan->id, txd);
0702         return -EIO;
0703     }
0704 
0705     spin_lock_bh(&bdma_chan->lock);
0706 
0707     if (!bdma_chan->active) {
0708         spin_unlock_bh(&bdma_chan->lock);
0709         return -ENODEV;
0710     }
0711 
0712     cookie = dma_cookie_assign(txd);
0713     desc->status = DMA_IN_PROGRESS;
0714     list_add_tail(&desc->desc_node, &bdma_chan->queue);
0715     tsi721_advance_work(bdma_chan, NULL);
0716 
0717     spin_unlock_bh(&bdma_chan->lock);
0718     return cookie;
0719 }
0720 
0721 static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
0722 {
0723     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
0724     struct tsi721_tx_desc *desc;
0725     int i;
0726 
0727     tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
0728 
0729     if (bdma_chan->bd_base)
0730         return dma_txqueue_sz;
0731 
0732     /* Initialize BDMA channel */
0733     if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
0734         tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
0735             bdma_chan->id);
0736         return -ENODEV;
0737     }
0738 
0739     /* Allocate queue of transaction descriptors */
0740     desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
0741             GFP_ATOMIC);
0742     if (!desc) {
0743         tsi721_bdma_ch_free(bdma_chan);
0744         return -ENOMEM;
0745     }
0746 
0747     bdma_chan->tx_desc = desc;
0748 
0749     for (i = 0; i < dma_txqueue_sz; i++) {
0750         dma_async_tx_descriptor_init(&desc[i].txd, dchan);
0751         desc[i].txd.tx_submit = tsi721_tx_submit;
0752         desc[i].txd.flags = DMA_CTRL_ACK;
0753         list_add(&desc[i].desc_node, &bdma_chan->free_list);
0754     }
0755 
0756     dma_cookie_init(dchan);
0757 
0758     bdma_chan->active = true;
0759     tsi721_bdma_interrupt_enable(bdma_chan, 1);
0760 
0761     return dma_txqueue_sz;
0762 }
0763 
0764 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
0765 {
0766     struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
0767 
0768 #ifdef CONFIG_PCI_MSI
0769     if (priv->flags & TSI721_USING_MSIX) {
0770         synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
0771                        bdma_chan->id].vector);
0772         synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
0773                        bdma_chan->id].vector);
0774     } else
0775 #endif
0776     synchronize_irq(priv->pdev->irq);
0777 }
0778 
0779 static void tsi721_free_chan_resources(struct dma_chan *dchan)
0780 {
0781     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
0782 
0783     tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
0784 
0785     if (!bdma_chan->bd_base)
0786         return;
0787 
0788     tsi721_bdma_interrupt_enable(bdma_chan, 0);
0789     bdma_chan->active = false;
0790     tsi721_sync_dma_irq(bdma_chan);
0791     tasklet_kill(&bdma_chan->tasklet);
0792     INIT_LIST_HEAD(&bdma_chan->free_list);
0793     kfree(bdma_chan->tx_desc);
0794     tsi721_bdma_ch_free(bdma_chan);
0795 }
0796 
0797 static
0798 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
0799                  struct dma_tx_state *txstate)
0800 {
0801     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
0802     enum dma_status status;
0803 
0804     spin_lock_bh(&bdma_chan->lock);
0805     status = dma_cookie_status(dchan, cookie, txstate);
0806     spin_unlock_bh(&bdma_chan->lock);
0807     return status;
0808 }
0809 
0810 static void tsi721_issue_pending(struct dma_chan *dchan)
0811 {
0812     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
0813 
0814     tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
0815 
0816     spin_lock_bh(&bdma_chan->lock);
0817     if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
0818         tsi721_advance_work(bdma_chan, NULL);
0819     }
0820     spin_unlock_bh(&bdma_chan->lock);
0821 }
0822 
0823 static
0824 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
0825             struct scatterlist *sgl, unsigned int sg_len,
0826             enum dma_transfer_direction dir, unsigned long flags,
0827             void *tinfo)
0828 {
0829     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
0830     struct tsi721_tx_desc *desc;
0831     struct rio_dma_ext *rext = tinfo;
0832     enum dma_rtype rtype;
0833     struct dma_async_tx_descriptor *txd = NULL;
0834 
0835     if (!sgl || !sg_len) {
0836         tsi_err(&dchan->dev->device, "DMAC%d No SG list",
0837             bdma_chan->id);
0838         return ERR_PTR(-EINVAL);
0839     }
0840 
0841     tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
0842           (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
0843 
0844     if (dir == DMA_DEV_TO_MEM)
0845         rtype = NREAD;
0846     else if (dir == DMA_MEM_TO_DEV) {
0847         switch (rext->wr_type) {
0848         case RDW_ALL_NWRITE:
0849             rtype = ALL_NWRITE;
0850             break;
0851         case RDW_ALL_NWRITE_R:
0852             rtype = ALL_NWRITE_R;
0853             break;
0854         case RDW_LAST_NWRITE_R:
0855         default:
0856             rtype = LAST_NWRITE_R;
0857             break;
0858         }
0859     } else {
0860         tsi_err(&dchan->dev->device,
0861             "DMAC%d Unsupported DMA direction option",
0862             bdma_chan->id);
0863         return ERR_PTR(-EINVAL);
0864     }
0865 
0866     spin_lock_bh(&bdma_chan->lock);
0867 
0868     if (!list_empty(&bdma_chan->free_list)) {
0869         desc = list_first_entry(&bdma_chan->free_list,
0870                 struct tsi721_tx_desc, desc_node);
0871         list_del_init(&desc->desc_node);
0872         desc->destid = rext->destid;
0873         desc->rio_addr = rext->rio_addr;
0874         desc->rio_addr_u = 0;
0875         desc->rtype = rtype;
0876         desc->sg_len    = sg_len;
0877         desc->sg    = sgl;
0878         txd     = &desc->txd;
0879         txd->flags  = flags;
0880     }
0881 
0882     spin_unlock_bh(&bdma_chan->lock);
0883 
0884     if (!txd) {
0885         tsi_debug(DMA, &dchan->dev->device,
0886               "DMAC%d free TXD is not available", bdma_chan->id);
0887         return ERR_PTR(-EBUSY);
0888     }
0889 
0890     return txd;
0891 }
0892 
0893 static int tsi721_terminate_all(struct dma_chan *dchan)
0894 {
0895     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
0896     struct tsi721_tx_desc *desc, *_d;
0897     LIST_HEAD(list);
0898 
0899     tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
0900 
0901     spin_lock_bh(&bdma_chan->lock);
0902 
0903     bdma_chan->active = false;
0904 
0905     while (!tsi721_dma_is_idle(bdma_chan)) {
0906 
0907         udelay(5);
0908 #if (0)
0909         /* make sure to stop the transfer */
0910         iowrite32(TSI721_DMAC_CTL_SUSP,
0911               bdma_chan->regs + TSI721_DMAC_CTL);
0912 
0913         /* Wait until DMA channel stops */
0914         do {
0915             dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
0916         } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
0917 #endif
0918     }
0919 
0920     if (bdma_chan->active_tx)
0921         list_add(&bdma_chan->active_tx->desc_node, &list);
0922     list_splice_init(&bdma_chan->queue, &list);
0923 
0924     list_for_each_entry_safe(desc, _d, &list, desc_node)
0925         tsi721_dma_tx_err(bdma_chan, desc);
0926 
0927     spin_unlock_bh(&bdma_chan->lock);
0928 
0929     return 0;
0930 }
0931 
0932 static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
0933 {
0934     if (!bdma_chan->active)
0935         return;
0936     spin_lock_bh(&bdma_chan->lock);
0937     if (!tsi721_dma_is_idle(bdma_chan)) {
0938         int timeout = 100000;
0939 
0940         /* stop the transfer in progress */
0941         iowrite32(TSI721_DMAC_CTL_SUSP,
0942               bdma_chan->regs + TSI721_DMAC_CTL);
0943 
0944         /* Wait until DMA channel stops */
0945         while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
0946             udelay(1);
0947     }
0948 
0949     spin_unlock_bh(&bdma_chan->lock);
0950 }
0951 
0952 void tsi721_dma_stop_all(struct tsi721_device *priv)
0953 {
0954     int i;
0955 
0956     for (i = 0; i < TSI721_DMA_MAXCH; i++) {
0957         if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
0958             tsi721_dma_stop(&priv->bdma[i]);
0959     }
0960 }
0961 
0962 int tsi721_register_dma(struct tsi721_device *priv)
0963 {
0964     int i;
0965     int nr_channels = 0;
0966     int err;
0967     struct rio_mport *mport = &priv->mport;
0968 
0969     INIT_LIST_HEAD(&mport->dma.channels);
0970 
0971     for (i = 0; i < TSI721_DMA_MAXCH; i++) {
0972         struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
0973 
0974         if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
0975             continue;
0976 
0977         bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
0978 
0979         bdma_chan->dchan.device = &mport->dma;
0980         bdma_chan->dchan.cookie = 1;
0981         bdma_chan->dchan.chan_id = i;
0982         bdma_chan->id = i;
0983         bdma_chan->active = false;
0984 
0985         spin_lock_init(&bdma_chan->lock);
0986 
0987         bdma_chan->active_tx = NULL;
0988         INIT_LIST_HEAD(&bdma_chan->queue);
0989         INIT_LIST_HEAD(&bdma_chan->free_list);
0990 
0991         tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
0992                  (unsigned long)bdma_chan);
0993         list_add_tail(&bdma_chan->dchan.device_node,
0994                   &mport->dma.channels);
0995         nr_channels++;
0996     }
0997 
0998     mport->dma.chancnt = nr_channels;
0999     dma_cap_zero(mport->dma.cap_mask);
1000     dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
1001     dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
1002 
1003     mport->dma.dev = &priv->pdev->dev;
1004     mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
1005     mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
1006     mport->dma.device_tx_status = tsi721_tx_status;
1007     mport->dma.device_issue_pending = tsi721_issue_pending;
1008     mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
1009     mport->dma.device_terminate_all = tsi721_terminate_all;
1010 
1011     err = dma_async_device_register(&mport->dma);
1012     if (err)
1013         tsi_err(&priv->pdev->dev, "Failed to register DMA device");
1014 
1015     return err;
1016 }
1017 
1018 void tsi721_unregister_dma(struct tsi721_device *priv)
1019 {
1020     struct rio_mport *mport = &priv->mport;
1021     struct dma_chan *chan, *_c;
1022     struct tsi721_bdma_chan *bdma_chan;
1023 
1024     tsi721_dma_stop_all(priv);
1025     dma_async_device_unregister(&mport->dma);
1026 
1027     list_for_each_entry_safe(chan, _c, &mport->dma.channels,
1028                     device_node) {
1029         bdma_chan = to_tsi721_chan(chan);
1030         if (bdma_chan->active) {
1031             tsi721_bdma_interrupt_enable(bdma_chan, 0);
1032             bdma_chan->active = false;
1033             tsi721_sync_dma_irq(bdma_chan);
1034             tasklet_kill(&bdma_chan->tasklet);
1035             INIT_LIST_HEAD(&bdma_chan->free_list);
1036             kfree(bdma_chan->tx_desc);
1037             tsi721_bdma_ch_free(bdma_chan);
1038         }
1039 
1040         list_del(&chan->device_node);
1041     }
1042 }