Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 //
0003 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
0004 // Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
0005 
0006 #include <linux/dmapool.h>
0007 #include <linux/module.h>
0008 #include <linux/slab.h>
0009 #include <linux/dma-mapping.h>
0010 
0011 #include "fsl-edma-common.h"
0012 
0013 #define EDMA_CR         0x00
0014 #define EDMA_ES         0x04
0015 #define EDMA_ERQ        0x0C
0016 #define EDMA_EEI        0x14
0017 #define EDMA_SERQ       0x1B
0018 #define EDMA_CERQ       0x1A
0019 #define EDMA_SEEI       0x19
0020 #define EDMA_CEEI       0x18
0021 #define EDMA_CINT       0x1F
0022 #define EDMA_CERR       0x1E
0023 #define EDMA_SSRT       0x1D
0024 #define EDMA_CDNE       0x1C
0025 #define EDMA_INTR       0x24
0026 #define EDMA_ERR        0x2C
0027 
0028 #define EDMA64_ERQH     0x08
0029 #define EDMA64_EEIH     0x10
0030 #define EDMA64_SERQ     0x18
0031 #define EDMA64_CERQ     0x19
0032 #define EDMA64_SEEI     0x1a
0033 #define EDMA64_CEEI     0x1b
0034 #define EDMA64_CINT     0x1c
0035 #define EDMA64_CERR     0x1d
0036 #define EDMA64_SSRT     0x1e
0037 #define EDMA64_CDNE     0x1f
0038 #define EDMA64_INTH     0x20
0039 #define EDMA64_INTL     0x24
0040 #define EDMA64_ERRH     0x28
0041 #define EDMA64_ERRL     0x2c
0042 
0043 #define EDMA_TCD        0x1000
0044 
0045 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
0046 {
0047     struct edma_regs *regs = &fsl_chan->edma->regs;
0048     u32 ch = fsl_chan->vchan.chan.chan_id;
0049 
0050     if (fsl_chan->edma->drvdata->version == v1) {
0051         edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
0052         edma_writeb(fsl_chan->edma, ch, regs->serq);
0053     } else {
0054         /* ColdFire is big endian, and accesses natively
0055          * big endian I/O peripherals
0056          */
0057         iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
0058         iowrite8(ch, regs->serq);
0059     }
0060 }
0061 
0062 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
0063 {
0064     struct edma_regs *regs = &fsl_chan->edma->regs;
0065     u32 ch = fsl_chan->vchan.chan.chan_id;
0066 
0067     if (fsl_chan->edma->drvdata->version == v1) {
0068         edma_writeb(fsl_chan->edma, ch, regs->cerq);
0069         edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
0070     } else {
0071         /* ColdFire is big endian, and accesses natively
0072          * big endian I/O peripherals
0073          */
0074         iowrite8(ch, regs->cerq);
0075         iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
0076     }
0077 }
0078 EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
0079 
0080 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
0081                u32 off, u32 slot, bool enable)
0082 {
0083     u8 val8;
0084 
0085     if (enable)
0086         val8 = EDMAMUX_CHCFG_ENBL | slot;
0087     else
0088         val8 = EDMAMUX_CHCFG_DIS;
0089 
0090     iowrite8(val8, addr + off);
0091 }
0092 
0093 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
0094                 u32 off, u32 slot, bool enable)
0095 {
0096     u32 val;
0097 
0098     if (enable)
0099         val = EDMAMUX_CHCFG_ENBL << 24 | slot;
0100     else
0101         val = EDMAMUX_CHCFG_DIS;
0102 
0103     iowrite32(val, addr + off * 4);
0104 }
0105 
0106 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
0107                unsigned int slot, bool enable)
0108 {
0109     u32 ch = fsl_chan->vchan.chan.chan_id;
0110     void __iomem *muxaddr;
0111     unsigned int chans_per_mux, ch_off;
0112     int endian_diff[4] = {3, 1, -1, -3};
0113     u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
0114 
0115     chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
0116     ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
0117 
0118     if (fsl_chan->edma->drvdata->mux_swap)
0119         ch_off += endian_diff[ch_off % 4];
0120 
0121     muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
0122     slot = EDMAMUX_CHCFG_SOURCE(slot);
0123 
0124     if (fsl_chan->edma->drvdata->version == v3)
0125         mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
0126     else
0127         mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
0128 }
0129 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
0130 
0131 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
0132 {
0133     switch (addr_width) {
0134     case 1:
0135         return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
0136     case 2:
0137         return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
0138     case 4:
0139         return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
0140     case 8:
0141         return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
0142     default:
0143         return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
0144     }
0145 }
0146 
0147 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
0148 {
0149     struct fsl_edma_desc *fsl_desc;
0150     int i;
0151 
0152     fsl_desc = to_fsl_edma_desc(vdesc);
0153     for (i = 0; i < fsl_desc->n_tcds; i++)
0154         dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
0155                   fsl_desc->tcd[i].ptcd);
0156     kfree(fsl_desc);
0157 }
0158 EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
0159 
0160 int fsl_edma_terminate_all(struct dma_chan *chan)
0161 {
0162     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0163     unsigned long flags;
0164     LIST_HEAD(head);
0165 
0166     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
0167     fsl_edma_disable_request(fsl_chan);
0168     fsl_chan->edesc = NULL;
0169     fsl_chan->idle = true;
0170     vchan_get_all_descriptors(&fsl_chan->vchan, &head);
0171     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0172     vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
0173     return 0;
0174 }
0175 EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
0176 
0177 int fsl_edma_pause(struct dma_chan *chan)
0178 {
0179     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0180     unsigned long flags;
0181 
0182     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
0183     if (fsl_chan->edesc) {
0184         fsl_edma_disable_request(fsl_chan);
0185         fsl_chan->status = DMA_PAUSED;
0186         fsl_chan->idle = true;
0187     }
0188     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0189     return 0;
0190 }
0191 EXPORT_SYMBOL_GPL(fsl_edma_pause);
0192 
0193 int fsl_edma_resume(struct dma_chan *chan)
0194 {
0195     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0196     unsigned long flags;
0197 
0198     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
0199     if (fsl_chan->edesc) {
0200         fsl_edma_enable_request(fsl_chan);
0201         fsl_chan->status = DMA_IN_PROGRESS;
0202         fsl_chan->idle = false;
0203     }
0204     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0205     return 0;
0206 }
0207 EXPORT_SYMBOL_GPL(fsl_edma_resume);
0208 
0209 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
0210 {
0211     if (fsl_chan->dma_dir != DMA_NONE)
0212         dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
0213                    fsl_chan->dma_dev_addr,
0214                    fsl_chan->dma_dev_size,
0215                    fsl_chan->dma_dir, 0);
0216     fsl_chan->dma_dir = DMA_NONE;
0217 }
0218 
0219 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
0220                     enum dma_transfer_direction dir)
0221 {
0222     struct device *dev = fsl_chan->vchan.chan.device->dev;
0223     enum dma_data_direction dma_dir;
0224     phys_addr_t addr = 0;
0225     u32 size = 0;
0226 
0227     switch (dir) {
0228     case DMA_MEM_TO_DEV:
0229         dma_dir = DMA_FROM_DEVICE;
0230         addr = fsl_chan->cfg.dst_addr;
0231         size = fsl_chan->cfg.dst_maxburst;
0232         break;
0233     case DMA_DEV_TO_MEM:
0234         dma_dir = DMA_TO_DEVICE;
0235         addr = fsl_chan->cfg.src_addr;
0236         size = fsl_chan->cfg.src_maxburst;
0237         break;
0238     default:
0239         dma_dir = DMA_NONE;
0240         break;
0241     }
0242 
0243     /* Already mapped for this config? */
0244     if (fsl_chan->dma_dir == dma_dir)
0245         return true;
0246 
0247     fsl_edma_unprep_slave_dma(fsl_chan);
0248 
0249     fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
0250     if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
0251         return false;
0252     fsl_chan->dma_dev_size = size;
0253     fsl_chan->dma_dir = dma_dir;
0254 
0255     return true;
0256 }
0257 
0258 int fsl_edma_slave_config(struct dma_chan *chan,
0259                  struct dma_slave_config *cfg)
0260 {
0261     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0262 
0263     memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
0264     fsl_edma_unprep_slave_dma(fsl_chan);
0265 
0266     return 0;
0267 }
0268 EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
0269 
0270 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
0271         struct virt_dma_desc *vdesc, bool in_progress)
0272 {
0273     struct fsl_edma_desc *edesc = fsl_chan->edesc;
0274     struct edma_regs *regs = &fsl_chan->edma->regs;
0275     u32 ch = fsl_chan->vchan.chan.chan_id;
0276     enum dma_transfer_direction dir = edesc->dirn;
0277     dma_addr_t cur_addr, dma_addr;
0278     size_t len, size;
0279     int i;
0280 
0281     /* calculate the total size in this desc */
0282     for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
0283         len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
0284             * le16_to_cpu(edesc->tcd[i].vtcd->biter);
0285 
0286     if (!in_progress)
0287         return len;
0288 
0289     if (dir == DMA_MEM_TO_DEV)
0290         cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
0291     else
0292         cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
0293 
0294     /* figure out the finished and calculate the residue */
0295     for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
0296         size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
0297             * le16_to_cpu(edesc->tcd[i].vtcd->biter);
0298         if (dir == DMA_MEM_TO_DEV)
0299             dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
0300         else
0301             dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
0302 
0303         len -= size;
0304         if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
0305             len += dma_addr + size - cur_addr;
0306             break;
0307         }
0308     }
0309 
0310     return len;
0311 }
0312 
0313 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
0314         dma_cookie_t cookie, struct dma_tx_state *txstate)
0315 {
0316     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0317     struct virt_dma_desc *vdesc;
0318     enum dma_status status;
0319     unsigned long flags;
0320 
0321     status = dma_cookie_status(chan, cookie, txstate);
0322     if (status == DMA_COMPLETE)
0323         return status;
0324 
0325     if (!txstate)
0326         return fsl_chan->status;
0327 
0328     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
0329     vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
0330     if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
0331         txstate->residue =
0332             fsl_edma_desc_residue(fsl_chan, vdesc, true);
0333     else if (vdesc)
0334         txstate->residue =
0335             fsl_edma_desc_residue(fsl_chan, vdesc, false);
0336     else
0337         txstate->residue = 0;
0338 
0339     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0340 
0341     return fsl_chan->status;
0342 }
0343 EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
0344 
0345 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
0346                   struct fsl_edma_hw_tcd *tcd)
0347 {
0348     struct fsl_edma_engine *edma = fsl_chan->edma;
0349     struct edma_regs *regs = &fsl_chan->edma->regs;
0350     u32 ch = fsl_chan->vchan.chan.chan_id;
0351     u16 csr = 0;
0352 
0353     /*
0354      * TCD parameters are stored in struct fsl_edma_hw_tcd in little
0355      * endian format. However, we need to load the TCD registers in
0356      * big- or little-endian obeying the eDMA engine model endian,
0357      * and this is performed from specific edma_write functions
0358      */
0359     edma_writew(edma, 0,  &regs->tcd[ch].csr);
0360 
0361     edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
0362     edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
0363 
0364     edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
0365     edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
0366 
0367     edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
0368     edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
0369 
0370     edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
0371     edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
0372     edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
0373 
0374     edma_writel(edma, (s32)tcd->dlast_sga,
0375             &regs->tcd[ch].dlast_sga);
0376 
0377     if (fsl_chan->is_sw) {
0378         csr = le16_to_cpu(tcd->csr);
0379         csr |= EDMA_TCD_CSR_START;
0380         tcd->csr = cpu_to_le16(csr);
0381     }
0382 
0383     edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
0384 }
0385 
0386 static inline
0387 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
0388                u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
0389                u16 biter, u16 doff, u32 dlast_sga, bool major_int,
0390                bool disable_req, bool enable_sg)
0391 {
0392     u16 csr = 0;
0393 
0394     /*
0395      * eDMA hardware SGs require the TCDs to be stored in little
0396      * endian format irrespective of the register endian model.
0397      * So we put the value in little endian in memory, waiting
0398      * for fsl_edma_set_tcd_regs doing the swap.
0399      */
0400     tcd->saddr = cpu_to_le32(src);
0401     tcd->daddr = cpu_to_le32(dst);
0402 
0403     tcd->attr = cpu_to_le16(attr);
0404 
0405     tcd->soff = cpu_to_le16(soff);
0406 
0407     tcd->nbytes = cpu_to_le32(nbytes);
0408     tcd->slast = cpu_to_le32(slast);
0409 
0410     tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
0411     tcd->doff = cpu_to_le16(doff);
0412 
0413     tcd->dlast_sga = cpu_to_le32(dlast_sga);
0414 
0415     tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
0416     if (major_int)
0417         csr |= EDMA_TCD_CSR_INT_MAJOR;
0418 
0419     if (disable_req)
0420         csr |= EDMA_TCD_CSR_D_REQ;
0421 
0422     if (enable_sg)
0423         csr |= EDMA_TCD_CSR_E_SG;
0424 
0425     tcd->csr = cpu_to_le16(csr);
0426 }
0427 
0428 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
0429         int sg_len)
0430 {
0431     struct fsl_edma_desc *fsl_desc;
0432     int i;
0433 
0434     fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
0435     if (!fsl_desc)
0436         return NULL;
0437 
0438     fsl_desc->echan = fsl_chan;
0439     fsl_desc->n_tcds = sg_len;
0440     for (i = 0; i < sg_len; i++) {
0441         fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
0442                     GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
0443         if (!fsl_desc->tcd[i].vtcd)
0444             goto err;
0445     }
0446     return fsl_desc;
0447 
0448 err:
0449     while (--i >= 0)
0450         dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
0451                 fsl_desc->tcd[i].ptcd);
0452     kfree(fsl_desc);
0453     return NULL;
0454 }
0455 
0456 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
0457         struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
0458         size_t period_len, enum dma_transfer_direction direction,
0459         unsigned long flags)
0460 {
0461     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0462     struct fsl_edma_desc *fsl_desc;
0463     dma_addr_t dma_buf_next;
0464     int sg_len, i;
0465     u32 src_addr, dst_addr, last_sg, nbytes;
0466     u16 soff, doff, iter;
0467 
0468     if (!is_slave_direction(direction))
0469         return NULL;
0470 
0471     if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
0472         return NULL;
0473 
0474     sg_len = buf_len / period_len;
0475     fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
0476     if (!fsl_desc)
0477         return NULL;
0478     fsl_desc->iscyclic = true;
0479     fsl_desc->dirn = direction;
0480 
0481     dma_buf_next = dma_addr;
0482     if (direction == DMA_MEM_TO_DEV) {
0483         fsl_chan->attr =
0484             fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
0485         nbytes = fsl_chan->cfg.dst_addr_width *
0486             fsl_chan->cfg.dst_maxburst;
0487     } else {
0488         fsl_chan->attr =
0489             fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
0490         nbytes = fsl_chan->cfg.src_addr_width *
0491             fsl_chan->cfg.src_maxburst;
0492     }
0493 
0494     iter = period_len / nbytes;
0495 
0496     for (i = 0; i < sg_len; i++) {
0497         if (dma_buf_next >= dma_addr + buf_len)
0498             dma_buf_next = dma_addr;
0499 
0500         /* get next sg's physical address */
0501         last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
0502 
0503         if (direction == DMA_MEM_TO_DEV) {
0504             src_addr = dma_buf_next;
0505             dst_addr = fsl_chan->dma_dev_addr;
0506             soff = fsl_chan->cfg.dst_addr_width;
0507             doff = 0;
0508         } else {
0509             src_addr = fsl_chan->dma_dev_addr;
0510             dst_addr = dma_buf_next;
0511             soff = 0;
0512             doff = fsl_chan->cfg.src_addr_width;
0513         }
0514 
0515         fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
0516                   fsl_chan->attr, soff, nbytes, 0, iter,
0517                   iter, doff, last_sg, true, false, true);
0518         dma_buf_next += period_len;
0519     }
0520 
0521     return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
0522 }
0523 EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
0524 
0525 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
0526         struct dma_chan *chan, struct scatterlist *sgl,
0527         unsigned int sg_len, enum dma_transfer_direction direction,
0528         unsigned long flags, void *context)
0529 {
0530     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0531     struct fsl_edma_desc *fsl_desc;
0532     struct scatterlist *sg;
0533     u32 src_addr, dst_addr, last_sg, nbytes;
0534     u16 soff, doff, iter;
0535     int i;
0536 
0537     if (!is_slave_direction(direction))
0538         return NULL;
0539 
0540     if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
0541         return NULL;
0542 
0543     fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
0544     if (!fsl_desc)
0545         return NULL;
0546     fsl_desc->iscyclic = false;
0547     fsl_desc->dirn = direction;
0548 
0549     if (direction == DMA_MEM_TO_DEV) {
0550         fsl_chan->attr =
0551             fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
0552         nbytes = fsl_chan->cfg.dst_addr_width *
0553             fsl_chan->cfg.dst_maxburst;
0554     } else {
0555         fsl_chan->attr =
0556             fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
0557         nbytes = fsl_chan->cfg.src_addr_width *
0558             fsl_chan->cfg.src_maxburst;
0559     }
0560 
0561     for_each_sg(sgl, sg, sg_len, i) {
0562         if (direction == DMA_MEM_TO_DEV) {
0563             src_addr = sg_dma_address(sg);
0564             dst_addr = fsl_chan->dma_dev_addr;
0565             soff = fsl_chan->cfg.dst_addr_width;
0566             doff = 0;
0567         } else {
0568             src_addr = fsl_chan->dma_dev_addr;
0569             dst_addr = sg_dma_address(sg);
0570             soff = 0;
0571             doff = fsl_chan->cfg.src_addr_width;
0572         }
0573 
0574         iter = sg_dma_len(sg) / nbytes;
0575         if (i < sg_len - 1) {
0576             last_sg = fsl_desc->tcd[(i + 1)].ptcd;
0577             fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
0578                       dst_addr, fsl_chan->attr, soff,
0579                       nbytes, 0, iter, iter, doff, last_sg,
0580                       false, false, true);
0581         } else {
0582             last_sg = 0;
0583             fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
0584                       dst_addr, fsl_chan->attr, soff,
0585                       nbytes, 0, iter, iter, doff, last_sg,
0586                       true, true, false);
0587         }
0588     }
0589 
0590     return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
0591 }
0592 EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
0593 
0594 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
0595                              dma_addr_t dma_dst, dma_addr_t dma_src,
0596                              size_t len, unsigned long flags)
0597 {
0598     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0599     struct fsl_edma_desc *fsl_desc;
0600 
0601     fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
0602     if (!fsl_desc)
0603         return NULL;
0604     fsl_desc->iscyclic = false;
0605 
0606     fsl_chan->is_sw = true;
0607 
0608     /* To match with copy_align and max_seg_size so 1 tcd is enough */
0609     fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
0610             EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
0611             32, len, 0, 1, 1, 32, 0, true, true, false);
0612 
0613     return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
0614 }
0615 EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
0616 
0617 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
0618 {
0619     struct virt_dma_desc *vdesc;
0620 
0621     lockdep_assert_held(&fsl_chan->vchan.lock);
0622 
0623     vdesc = vchan_next_desc(&fsl_chan->vchan);
0624     if (!vdesc)
0625         return;
0626     fsl_chan->edesc = to_fsl_edma_desc(vdesc);
0627     fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
0628     fsl_edma_enable_request(fsl_chan);
0629     fsl_chan->status = DMA_IN_PROGRESS;
0630     fsl_chan->idle = false;
0631 }
0632 EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
0633 
0634 void fsl_edma_issue_pending(struct dma_chan *chan)
0635 {
0636     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0637     unsigned long flags;
0638 
0639     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
0640 
0641     if (unlikely(fsl_chan->pm_state != RUNNING)) {
0642         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0643         /* cannot submit due to suspend */
0644         return;
0645     }
0646 
0647     if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
0648         fsl_edma_xfer_desc(fsl_chan);
0649 
0650     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0651 }
0652 EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
0653 
0654 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
0655 {
0656     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0657 
0658     fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
0659                 sizeof(struct fsl_edma_hw_tcd),
0660                 32, 0);
0661     return 0;
0662 }
0663 EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
0664 
0665 void fsl_edma_free_chan_resources(struct dma_chan *chan)
0666 {
0667     struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
0668     struct fsl_edma_engine *edma = fsl_chan->edma;
0669     unsigned long flags;
0670     LIST_HEAD(head);
0671 
0672     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
0673     fsl_edma_disable_request(fsl_chan);
0674     if (edma->drvdata->dmamuxs)
0675         fsl_edma_chan_mux(fsl_chan, 0, false);
0676     fsl_chan->edesc = NULL;
0677     vchan_get_all_descriptors(&fsl_chan->vchan, &head);
0678     fsl_edma_unprep_slave_dma(fsl_chan);
0679     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0680 
0681     vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
0682     dma_pool_destroy(fsl_chan->tcd_pool);
0683     fsl_chan->tcd_pool = NULL;
0684     fsl_chan->is_sw = false;
0685 }
0686 EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
0687 
0688 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
0689 {
0690     struct fsl_edma_chan *chan, *_chan;
0691 
0692     list_for_each_entry_safe(chan, _chan,
0693                 &dmadev->channels, vchan.chan.device_node) {
0694         list_del(&chan->vchan.chan.device_node);
0695         tasklet_kill(&chan->vchan.task);
0696     }
0697 }
0698 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
0699 
0700 /*
0701  * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
0702  * register offsets are different compared to ColdFire mcf5441x 64 channels
0703  * edma (here called "v2").
0704  *
0705  * This function sets up register offsets as per proper declared version
0706  * so must be called in xxx_edma_probe() just after setting the
0707  * edma "version" and "membase" appropriately.
0708  */
0709 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
0710 {
0711     edma->regs.cr = edma->membase + EDMA_CR;
0712     edma->regs.es = edma->membase + EDMA_ES;
0713     edma->regs.erql = edma->membase + EDMA_ERQ;
0714     edma->regs.eeil = edma->membase + EDMA_EEI;
0715 
0716     edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
0717             EDMA64_SERQ : EDMA_SERQ);
0718     edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
0719             EDMA64_CERQ : EDMA_CERQ);
0720     edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
0721             EDMA64_SEEI : EDMA_SEEI);
0722     edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
0723             EDMA64_CEEI : EDMA_CEEI);
0724     edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
0725             EDMA64_CINT : EDMA_CINT);
0726     edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
0727             EDMA64_CERR : EDMA_CERR);
0728     edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
0729             EDMA64_SSRT : EDMA_SSRT);
0730     edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
0731             EDMA64_CDNE : EDMA_CDNE);
0732     edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
0733             EDMA64_INTL : EDMA_INTR);
0734     edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
0735             EDMA64_ERRL : EDMA_ERR);
0736 
0737     if (edma->drvdata->version == v2) {
0738         edma->regs.erqh = edma->membase + EDMA64_ERQH;
0739         edma->regs.eeih = edma->membase + EDMA64_EEIH;
0740         edma->regs.errh = edma->membase + EDMA64_ERRH;
0741         edma->regs.inth = edma->membase + EDMA64_INTH;
0742     }
0743 
0744     edma->regs.tcd = edma->membase + EDMA_TCD;
0745 }
0746 EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
0747 
0748 MODULE_LICENSE("GPL v2");