Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 //
0003 // drivers/dma/imx-dma.c
0004 //
0005 // This file contains a driver for the Freescale i.MX DMA engine
0006 // found on i.MX1/21/27
0007 //
0008 // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
0009 // Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
0010 
0011 #include <linux/err.h>
0012 #include <linux/init.h>
0013 #include <linux/types.h>
0014 #include <linux/mm.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/spinlock.h>
0017 #include <linux/device.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/slab.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/clk.h>
0022 #include <linux/dmaengine.h>
0023 #include <linux/module.h>
0024 #include <linux/of_device.h>
0025 #include <linux/of_dma.h>
0026 
0027 #include <asm/irq.h>
0028 #include <linux/dma/imx-dma.h>
0029 
0030 #include "dmaengine.h"
0031 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
0032 #define IMX_DMA_CHANNELS  16
0033 
0034 #define IMX_DMA_2D_SLOTS    2
0035 #define IMX_DMA_2D_SLOT_A   0
0036 #define IMX_DMA_2D_SLOT_B   1
0037 
0038 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
0039 #define IMX_DMA_MEMSIZE_32  (0 << 4)
0040 #define IMX_DMA_MEMSIZE_8   (1 << 4)
0041 #define IMX_DMA_MEMSIZE_16  (2 << 4)
0042 #define IMX_DMA_TYPE_LINEAR (0 << 10)
0043 #define IMX_DMA_TYPE_2D     (1 << 10)
0044 #define IMX_DMA_TYPE_FIFO   (2 << 10)
0045 
0046 #define IMX_DMA_ERR_BURST     (1 << 0)
0047 #define IMX_DMA_ERR_REQUEST   (1 << 1)
0048 #define IMX_DMA_ERR_TRANSFER  (1 << 2)
0049 #define IMX_DMA_ERR_BUFFER    (1 << 3)
0050 #define IMX_DMA_ERR_TIMEOUT   (1 << 4)
0051 
0052 #define DMA_DCR     0x00        /* Control Register */
0053 #define DMA_DISR    0x04        /* Interrupt status Register */
0054 #define DMA_DIMR    0x08        /* Interrupt mask Register */
0055 #define DMA_DBTOSR  0x0c        /* Burst timeout status Register */
0056 #define DMA_DRTOSR  0x10        /* Request timeout Register */
0057 #define DMA_DSESR   0x14        /* Transfer Error Status Register */
0058 #define DMA_DBOSR   0x18        /* Buffer overflow status Register */
0059 #define DMA_DBTOCR  0x1c        /* Burst timeout control Register */
0060 #define DMA_WSRA    0x40        /* W-Size Register A */
0061 #define DMA_XSRA    0x44        /* X-Size Register A */
0062 #define DMA_YSRA    0x48        /* Y-Size Register A */
0063 #define DMA_WSRB    0x4c        /* W-Size Register B */
0064 #define DMA_XSRB    0x50        /* X-Size Register B */
0065 #define DMA_YSRB    0x54        /* Y-Size Register B */
0066 #define DMA_SAR(x)  (0x80 + ((x) << 6)) /* Source Address Registers */
0067 #define DMA_DAR(x)  (0x84 + ((x) << 6)) /* Destination Address Registers */
0068 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
0069 #define DMA_CCR(x)  (0x8c + ((x) << 6)) /* Control Registers */
0070 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
0071 #define DMA_BLR(x)  (0x94 + ((x) << 6)) /* Burst length Registers */
0072 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
0073 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
0074 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
0075 
0076 #define DCR_DRST           (1<<1)
0077 #define DCR_DEN            (1<<0)
0078 #define DBTOCR_EN          (1<<15)
0079 #define DBTOCR_CNT(x)      ((x) & 0x7fff)
0080 #define CNTR_CNT(x)        ((x) & 0xffffff)
0081 #define CCR_ACRPT          (1<<14)
0082 #define CCR_DMOD_LINEAR    (0x0 << 12)
0083 #define CCR_DMOD_2D        (0x1 << 12)
0084 #define CCR_DMOD_FIFO      (0x2 << 12)
0085 #define CCR_DMOD_EOBFIFO   (0x3 << 12)
0086 #define CCR_SMOD_LINEAR    (0x0 << 10)
0087 #define CCR_SMOD_2D        (0x1 << 10)
0088 #define CCR_SMOD_FIFO      (0x2 << 10)
0089 #define CCR_SMOD_EOBFIFO   (0x3 << 10)
0090 #define CCR_MDIR_DEC       (1<<9)
0091 #define CCR_MSEL_B         (1<<8)
0092 #define CCR_DSIZ_32        (0x0 << 6)
0093 #define CCR_DSIZ_8         (0x1 << 6)
0094 #define CCR_DSIZ_16        (0x2 << 6)
0095 #define CCR_SSIZ_32        (0x0 << 4)
0096 #define CCR_SSIZ_8         (0x1 << 4)
0097 #define CCR_SSIZ_16        (0x2 << 4)
0098 #define CCR_REN            (1<<3)
0099 #define CCR_RPT            (1<<2)
0100 #define CCR_FRC            (1<<1)
0101 #define CCR_CEN            (1<<0)
0102 #define RTOR_EN            (1<<15)
0103 #define RTOR_CLK           (1<<14)
0104 #define RTOR_PSC           (1<<13)
0105 
0106 enum  imxdma_prep_type {
0107     IMXDMA_DESC_MEMCPY,
0108     IMXDMA_DESC_INTERLEAVED,
0109     IMXDMA_DESC_SLAVE_SG,
0110     IMXDMA_DESC_CYCLIC,
0111 };
0112 
0113 struct imx_dma_2d_config {
0114     u16     xsr;
0115     u16     ysr;
0116     u16     wsr;
0117     int     count;
0118 };
0119 
0120 struct imxdma_desc {
0121     struct list_head        node;
0122     struct dma_async_tx_descriptor  desc;
0123     enum dma_status         status;
0124     dma_addr_t          src;
0125     dma_addr_t          dest;
0126     size_t              len;
0127     enum dma_transfer_direction direction;
0128     enum imxdma_prep_type       type;
0129     /* For memcpy and interleaved */
0130     unsigned int            config_port;
0131     unsigned int            config_mem;
0132     /* For interleaved transfers */
0133     unsigned int            x;
0134     unsigned int            y;
0135     unsigned int            w;
0136     /* For slave sg and cyclic */
0137     struct scatterlist      *sg;
0138     unsigned int            sgcount;
0139 };
0140 
0141 struct imxdma_channel {
0142     int             hw_chaining;
0143     struct timer_list       watchdog;
0144     struct imxdma_engine        *imxdma;
0145     unsigned int            channel;
0146 
0147     struct tasklet_struct       dma_tasklet;
0148     struct list_head        ld_free;
0149     struct list_head        ld_queue;
0150     struct list_head        ld_active;
0151     int             descs_allocated;
0152     enum dma_slave_buswidth     word_size;
0153     dma_addr_t          per_address;
0154     u32             watermark_level;
0155     struct dma_chan         chan;
0156     struct dma_async_tx_descriptor  desc;
0157     enum dma_status         status;
0158     int             dma_request;
0159     struct scatterlist      *sg_list;
0160     u32             ccr_from_device;
0161     u32             ccr_to_device;
0162     bool                enabled_2d;
0163     int             slot_2d;
0164     unsigned int            irq;
0165     struct dma_slave_config     config;
0166 };
0167 
0168 enum imx_dma_type {
0169     IMX1_DMA,
0170     IMX21_DMA,
0171     IMX27_DMA,
0172 };
0173 
0174 struct imxdma_engine {
0175     struct device           *dev;
0176     struct dma_device       dma_device;
0177     void __iomem            *base;
0178     struct clk          *dma_ahb;
0179     struct clk          *dma_ipg;
0180     spinlock_t          lock;
0181     struct imx_dma_2d_config    slots_2d[IMX_DMA_2D_SLOTS];
0182     struct imxdma_channel       channel[IMX_DMA_CHANNELS];
0183     enum imx_dma_type       devtype;
0184     unsigned int            irq;
0185     unsigned int            irq_err;
0186 
0187 };
0188 
0189 struct imxdma_filter_data {
0190     struct imxdma_engine    *imxdma;
0191     int          request;
0192 };
0193 
0194 static const struct of_device_id imx_dma_of_dev_id[] = {
0195     {
0196         .compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
0197     }, {
0198         .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
0199     }, {
0200         .compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
0201     }, {
0202         /* sentinel */
0203     }
0204 };
0205 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
0206 
0207 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
0208 {
0209     return imxdma->devtype == IMX1_DMA;
0210 }
0211 
0212 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
0213 {
0214     return imxdma->devtype == IMX27_DMA;
0215 }
0216 
0217 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
0218 {
0219     return container_of(chan, struct imxdma_channel, chan);
0220 }
0221 
0222 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
0223 {
0224     struct imxdma_desc *desc;
0225 
0226     if (!list_empty(&imxdmac->ld_active)) {
0227         desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
0228                     node);
0229         if (desc->type == IMXDMA_DESC_CYCLIC)
0230             return true;
0231     }
0232     return false;
0233 }
0234 
0235 
0236 
0237 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
0238                  unsigned offset)
0239 {
0240     __raw_writel(val, imxdma->base + offset);
0241 }
0242 
0243 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
0244 {
0245     return __raw_readl(imxdma->base + offset);
0246 }
0247 
0248 static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
0249 {
0250     struct imxdma_engine *imxdma = imxdmac->imxdma;
0251 
0252     if (is_imx27_dma(imxdma))
0253         return imxdmac->hw_chaining;
0254     else
0255         return 0;
0256 }
0257 
0258 /*
0259  * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
0260  */
0261 static inline void imxdma_sg_next(struct imxdma_desc *d)
0262 {
0263     struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
0264     struct imxdma_engine *imxdma = imxdmac->imxdma;
0265     struct scatterlist *sg = d->sg;
0266     size_t now;
0267 
0268     now = min_t(size_t, d->len, sg_dma_len(sg));
0269     if (d->len != IMX_DMA_LENGTH_LOOP)
0270         d->len -= now;
0271 
0272     if (d->direction == DMA_DEV_TO_MEM)
0273         imx_dmav1_writel(imxdma, sg->dma_address,
0274                  DMA_DAR(imxdmac->channel));
0275     else
0276         imx_dmav1_writel(imxdma, sg->dma_address,
0277                  DMA_SAR(imxdmac->channel));
0278 
0279     imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
0280 
0281     dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
0282         "size 0x%08x\n", __func__, imxdmac->channel,
0283          imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
0284          imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
0285          imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
0286 }
0287 
0288 static void imxdma_enable_hw(struct imxdma_desc *d)
0289 {
0290     struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
0291     struct imxdma_engine *imxdma = imxdmac->imxdma;
0292     int channel = imxdmac->channel;
0293     unsigned long flags;
0294 
0295     dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
0296 
0297     local_irq_save(flags);
0298 
0299     imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
0300     imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
0301              ~(1 << channel), DMA_DIMR);
0302     imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
0303              CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
0304 
0305     if (!is_imx1_dma(imxdma) &&
0306             d->sg && imxdma_hw_chain(imxdmac)) {
0307         d->sg = sg_next(d->sg);
0308         if (d->sg) {
0309             u32 tmp;
0310             imxdma_sg_next(d);
0311             tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
0312             imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
0313                      DMA_CCR(channel));
0314         }
0315     }
0316 
0317     local_irq_restore(flags);
0318 }
0319 
0320 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
0321 {
0322     struct imxdma_engine *imxdma = imxdmac->imxdma;
0323     int channel = imxdmac->channel;
0324     unsigned long flags;
0325 
0326     dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
0327 
0328     if (imxdma_hw_chain(imxdmac))
0329         del_timer(&imxdmac->watchdog);
0330 
0331     local_irq_save(flags);
0332     imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
0333              (1 << channel), DMA_DIMR);
0334     imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
0335              ~CCR_CEN, DMA_CCR(channel));
0336     imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
0337     local_irq_restore(flags);
0338 }
0339 
0340 static void imxdma_watchdog(struct timer_list *t)
0341 {
0342     struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
0343     struct imxdma_engine *imxdma = imxdmac->imxdma;
0344     int channel = imxdmac->channel;
0345 
0346     imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
0347 
0348     /* Tasklet watchdog error handler */
0349     tasklet_schedule(&imxdmac->dma_tasklet);
0350     dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
0351         imxdmac->channel);
0352 }
0353 
0354 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
0355 {
0356     struct imxdma_engine *imxdma = dev_id;
0357     unsigned int err_mask;
0358     int i, disr;
0359     int errcode;
0360 
0361     disr = imx_dmav1_readl(imxdma, DMA_DISR);
0362 
0363     err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
0364            imx_dmav1_readl(imxdma, DMA_DRTOSR) |
0365            imx_dmav1_readl(imxdma, DMA_DSESR)  |
0366            imx_dmav1_readl(imxdma, DMA_DBOSR);
0367 
0368     if (!err_mask)
0369         return IRQ_HANDLED;
0370 
0371     imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
0372 
0373     for (i = 0; i < IMX_DMA_CHANNELS; i++) {
0374         if (!(err_mask & (1 << i)))
0375             continue;
0376         errcode = 0;
0377 
0378         if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
0379             imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
0380             errcode |= IMX_DMA_ERR_BURST;
0381         }
0382         if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
0383             imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
0384             errcode |= IMX_DMA_ERR_REQUEST;
0385         }
0386         if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
0387             imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
0388             errcode |= IMX_DMA_ERR_TRANSFER;
0389         }
0390         if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
0391             imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
0392             errcode |= IMX_DMA_ERR_BUFFER;
0393         }
0394         /* Tasklet error handler */
0395         tasklet_schedule(&imxdma->channel[i].dma_tasklet);
0396 
0397         dev_warn(imxdma->dev,
0398              "DMA timeout on channel %d -%s%s%s%s\n", i,
0399              errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
0400              errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
0401              errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
0402              errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
0403     }
0404     return IRQ_HANDLED;
0405 }
0406 
0407 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
0408 {
0409     struct imxdma_engine *imxdma = imxdmac->imxdma;
0410     int chno = imxdmac->channel;
0411     struct imxdma_desc *desc;
0412     unsigned long flags;
0413 
0414     spin_lock_irqsave(&imxdma->lock, flags);
0415     if (list_empty(&imxdmac->ld_active)) {
0416         spin_unlock_irqrestore(&imxdma->lock, flags);
0417         goto out;
0418     }
0419 
0420     desc = list_first_entry(&imxdmac->ld_active,
0421                 struct imxdma_desc,
0422                 node);
0423     spin_unlock_irqrestore(&imxdma->lock, flags);
0424 
0425     if (desc->sg) {
0426         u32 tmp;
0427         desc->sg = sg_next(desc->sg);
0428 
0429         if (desc->sg) {
0430             imxdma_sg_next(desc);
0431 
0432             tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
0433 
0434             if (imxdma_hw_chain(imxdmac)) {
0435                 /* FIXME: The timeout should probably be
0436                  * configurable
0437                  */
0438                 mod_timer(&imxdmac->watchdog,
0439                     jiffies + msecs_to_jiffies(500));
0440 
0441                 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
0442                 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
0443             } else {
0444                 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
0445                          DMA_CCR(chno));
0446                 tmp |= CCR_CEN;
0447             }
0448 
0449             imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
0450 
0451             if (imxdma_chan_is_doing_cyclic(imxdmac))
0452                 /* Tasklet progression */
0453                 tasklet_schedule(&imxdmac->dma_tasklet);
0454 
0455             return;
0456         }
0457 
0458         if (imxdma_hw_chain(imxdmac)) {
0459             del_timer(&imxdmac->watchdog);
0460             return;
0461         }
0462     }
0463 
0464 out:
0465     imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
0466     /* Tasklet irq */
0467     tasklet_schedule(&imxdmac->dma_tasklet);
0468 }
0469 
0470 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
0471 {
0472     struct imxdma_engine *imxdma = dev_id;
0473     int i, disr;
0474 
0475     if (!is_imx1_dma(imxdma))
0476         imxdma_err_handler(irq, dev_id);
0477 
0478     disr = imx_dmav1_readl(imxdma, DMA_DISR);
0479 
0480     dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
0481 
0482     imx_dmav1_writel(imxdma, disr, DMA_DISR);
0483     for (i = 0; i < IMX_DMA_CHANNELS; i++) {
0484         if (disr & (1 << i))
0485             dma_irq_handle_channel(&imxdma->channel[i]);
0486     }
0487 
0488     return IRQ_HANDLED;
0489 }
0490 
0491 static int imxdma_xfer_desc(struct imxdma_desc *d)
0492 {
0493     struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
0494     struct imxdma_engine *imxdma = imxdmac->imxdma;
0495     int slot = -1;
0496     int i;
0497 
0498     /* Configure and enable */
0499     switch (d->type) {
0500     case IMXDMA_DESC_INTERLEAVED:
0501         /* Try to get a free 2D slot */
0502         for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
0503             if ((imxdma->slots_2d[i].count > 0) &&
0504             ((imxdma->slots_2d[i].xsr != d->x) ||
0505             (imxdma->slots_2d[i].ysr != d->y) ||
0506             (imxdma->slots_2d[i].wsr != d->w)))
0507                 continue;
0508             slot = i;
0509             break;
0510         }
0511         if (slot < 0)
0512             return -EBUSY;
0513 
0514         imxdma->slots_2d[slot].xsr = d->x;
0515         imxdma->slots_2d[slot].ysr = d->y;
0516         imxdma->slots_2d[slot].wsr = d->w;
0517         imxdma->slots_2d[slot].count++;
0518 
0519         imxdmac->slot_2d = slot;
0520         imxdmac->enabled_2d = true;
0521 
0522         if (slot == IMX_DMA_2D_SLOT_A) {
0523             d->config_mem &= ~CCR_MSEL_B;
0524             d->config_port &= ~CCR_MSEL_B;
0525             imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
0526             imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
0527             imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
0528         } else {
0529             d->config_mem |= CCR_MSEL_B;
0530             d->config_port |= CCR_MSEL_B;
0531             imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
0532             imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
0533             imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
0534         }
0535         /*
0536          * We fall-through here intentionally, since a 2D transfer is
0537          * similar to MEMCPY just adding the 2D slot configuration.
0538          */
0539         fallthrough;
0540     case IMXDMA_DESC_MEMCPY:
0541         imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
0542         imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
0543         imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
0544              DMA_CCR(imxdmac->channel));
0545 
0546         imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
0547 
0548         dev_dbg(imxdma->dev,
0549             "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
0550             __func__, imxdmac->channel,
0551             (unsigned long long)d->dest,
0552             (unsigned long long)d->src, d->len);
0553 
0554         break;
0555     /* Cyclic transfer is the same as slave_sg with special sg configuration. */
0556     case IMXDMA_DESC_CYCLIC:
0557     case IMXDMA_DESC_SLAVE_SG:
0558         if (d->direction == DMA_DEV_TO_MEM) {
0559             imx_dmav1_writel(imxdma, imxdmac->per_address,
0560                      DMA_SAR(imxdmac->channel));
0561             imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
0562                      DMA_CCR(imxdmac->channel));
0563 
0564             dev_dbg(imxdma->dev,
0565                 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
0566                 __func__, imxdmac->channel,
0567                 d->sg, d->sgcount, d->len,
0568                 (unsigned long long)imxdmac->per_address);
0569         } else if (d->direction == DMA_MEM_TO_DEV) {
0570             imx_dmav1_writel(imxdma, imxdmac->per_address,
0571                      DMA_DAR(imxdmac->channel));
0572             imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
0573                      DMA_CCR(imxdmac->channel));
0574 
0575             dev_dbg(imxdma->dev,
0576                 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
0577                 __func__, imxdmac->channel,
0578                 d->sg, d->sgcount, d->len,
0579                 (unsigned long long)imxdmac->per_address);
0580         } else {
0581             dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
0582                 __func__, imxdmac->channel);
0583             return -EINVAL;
0584         }
0585 
0586         imxdma_sg_next(d);
0587 
0588         break;
0589     default:
0590         return -EINVAL;
0591     }
0592     imxdma_enable_hw(d);
0593     return 0;
0594 }
0595 
0596 static void imxdma_tasklet(struct tasklet_struct *t)
0597 {
0598     struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet);
0599     struct imxdma_engine *imxdma = imxdmac->imxdma;
0600     struct imxdma_desc *desc, *next_desc;
0601     unsigned long flags;
0602 
0603     spin_lock_irqsave(&imxdma->lock, flags);
0604 
0605     if (list_empty(&imxdmac->ld_active)) {
0606         /* Someone might have called terminate all */
0607         spin_unlock_irqrestore(&imxdma->lock, flags);
0608         return;
0609     }
0610     desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
0611 
0612     /* If we are dealing with a cyclic descriptor, keep it on ld_active
0613      * and dont mark the descriptor as complete.
0614      * Only in non-cyclic cases it would be marked as complete
0615      */
0616     if (imxdma_chan_is_doing_cyclic(imxdmac))
0617         goto out;
0618     else
0619         dma_cookie_complete(&desc->desc);
0620 
0621     /* Free 2D slot if it was an interleaved transfer */
0622     if (imxdmac->enabled_2d) {
0623         imxdma->slots_2d[imxdmac->slot_2d].count--;
0624         imxdmac->enabled_2d = false;
0625     }
0626 
0627     list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
0628 
0629     if (!list_empty(&imxdmac->ld_queue)) {
0630         next_desc = list_first_entry(&imxdmac->ld_queue,
0631                          struct imxdma_desc, node);
0632         list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
0633         if (imxdma_xfer_desc(next_desc) < 0)
0634             dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
0635                  __func__, imxdmac->channel);
0636     }
0637 out:
0638     spin_unlock_irqrestore(&imxdma->lock, flags);
0639 
0640     dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
0641 }
0642 
0643 static int imxdma_terminate_all(struct dma_chan *chan)
0644 {
0645     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0646     struct imxdma_engine *imxdma = imxdmac->imxdma;
0647     unsigned long flags;
0648 
0649     imxdma_disable_hw(imxdmac);
0650 
0651     spin_lock_irqsave(&imxdma->lock, flags);
0652     list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
0653     list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
0654     spin_unlock_irqrestore(&imxdma->lock, flags);
0655     return 0;
0656 }
0657 
0658 static int imxdma_config_write(struct dma_chan *chan,
0659                    struct dma_slave_config *dmaengine_cfg,
0660                    enum dma_transfer_direction direction)
0661 {
0662     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0663     struct imxdma_engine *imxdma = imxdmac->imxdma;
0664     unsigned int mode = 0;
0665 
0666     if (direction == DMA_DEV_TO_MEM) {
0667         imxdmac->per_address = dmaengine_cfg->src_addr;
0668         imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
0669         imxdmac->word_size = dmaengine_cfg->src_addr_width;
0670     } else {
0671         imxdmac->per_address = dmaengine_cfg->dst_addr;
0672         imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
0673         imxdmac->word_size = dmaengine_cfg->dst_addr_width;
0674     }
0675 
0676     switch (imxdmac->word_size) {
0677     case DMA_SLAVE_BUSWIDTH_1_BYTE:
0678         mode = IMX_DMA_MEMSIZE_8;
0679         break;
0680     case DMA_SLAVE_BUSWIDTH_2_BYTES:
0681         mode = IMX_DMA_MEMSIZE_16;
0682         break;
0683     default:
0684     case DMA_SLAVE_BUSWIDTH_4_BYTES:
0685         mode = IMX_DMA_MEMSIZE_32;
0686         break;
0687     }
0688 
0689     imxdmac->hw_chaining = 0;
0690 
0691     imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
0692         ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
0693         CCR_REN;
0694     imxdmac->ccr_to_device =
0695         (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
0696         ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
0697     imx_dmav1_writel(imxdma, imxdmac->dma_request,
0698              DMA_RSSR(imxdmac->channel));
0699 
0700     /* Set burst length */
0701     imx_dmav1_writel(imxdma, imxdmac->watermark_level *
0702              imxdmac->word_size, DMA_BLR(imxdmac->channel));
0703 
0704     return 0;
0705 }
0706 
0707 static int imxdma_config(struct dma_chan *chan,
0708              struct dma_slave_config *dmaengine_cfg)
0709 {
0710     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0711 
0712     memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
0713 
0714     return 0;
0715 }
0716 
0717 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
0718                         dma_cookie_t cookie,
0719                         struct dma_tx_state *txstate)
0720 {
0721     return dma_cookie_status(chan, cookie, txstate);
0722 }
0723 
0724 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
0725 {
0726     struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
0727     struct imxdma_engine *imxdma = imxdmac->imxdma;
0728     dma_cookie_t cookie;
0729     unsigned long flags;
0730 
0731     spin_lock_irqsave(&imxdma->lock, flags);
0732     list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
0733     cookie = dma_cookie_assign(tx);
0734     spin_unlock_irqrestore(&imxdma->lock, flags);
0735 
0736     return cookie;
0737 }
0738 
0739 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
0740 {
0741     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0742     struct imx_dma_data *data = chan->private;
0743 
0744     if (data != NULL)
0745         imxdmac->dma_request = data->dma_request;
0746 
0747     while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
0748         struct imxdma_desc *desc;
0749 
0750         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0751         if (!desc)
0752             break;
0753         memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor));
0754         dma_async_tx_descriptor_init(&desc->desc, chan);
0755         desc->desc.tx_submit = imxdma_tx_submit;
0756         /* txd.flags will be overwritten in prep funcs */
0757         desc->desc.flags = DMA_CTRL_ACK;
0758         desc->status = DMA_COMPLETE;
0759 
0760         list_add_tail(&desc->node, &imxdmac->ld_free);
0761         imxdmac->descs_allocated++;
0762     }
0763 
0764     if (!imxdmac->descs_allocated)
0765         return -ENOMEM;
0766 
0767     return imxdmac->descs_allocated;
0768 }
0769 
0770 static void imxdma_free_chan_resources(struct dma_chan *chan)
0771 {
0772     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0773     struct imxdma_engine *imxdma = imxdmac->imxdma;
0774     struct imxdma_desc *desc, *_desc;
0775     unsigned long flags;
0776 
0777     spin_lock_irqsave(&imxdma->lock, flags);
0778 
0779     imxdma_disable_hw(imxdmac);
0780     list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
0781     list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
0782 
0783     spin_unlock_irqrestore(&imxdma->lock, flags);
0784 
0785     list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
0786         kfree(desc);
0787         imxdmac->descs_allocated--;
0788     }
0789     INIT_LIST_HEAD(&imxdmac->ld_free);
0790 
0791     kfree(imxdmac->sg_list);
0792     imxdmac->sg_list = NULL;
0793 }
0794 
0795 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
0796         struct dma_chan *chan, struct scatterlist *sgl,
0797         unsigned int sg_len, enum dma_transfer_direction direction,
0798         unsigned long flags, void *context)
0799 {
0800     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0801     struct scatterlist *sg;
0802     int i, dma_length = 0;
0803     struct imxdma_desc *desc;
0804 
0805     if (list_empty(&imxdmac->ld_free) ||
0806         imxdma_chan_is_doing_cyclic(imxdmac))
0807         return NULL;
0808 
0809     desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
0810 
0811     for_each_sg(sgl, sg, sg_len, i) {
0812         dma_length += sg_dma_len(sg);
0813     }
0814 
0815     imxdma_config_write(chan, &imxdmac->config, direction);
0816 
0817     switch (imxdmac->word_size) {
0818     case DMA_SLAVE_BUSWIDTH_4_BYTES:
0819         if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
0820             return NULL;
0821         break;
0822     case DMA_SLAVE_BUSWIDTH_2_BYTES:
0823         if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
0824             return NULL;
0825         break;
0826     case DMA_SLAVE_BUSWIDTH_1_BYTE:
0827         break;
0828     default:
0829         return NULL;
0830     }
0831 
0832     desc->type = IMXDMA_DESC_SLAVE_SG;
0833     desc->sg = sgl;
0834     desc->sgcount = sg_len;
0835     desc->len = dma_length;
0836     desc->direction = direction;
0837     if (direction == DMA_DEV_TO_MEM) {
0838         desc->src = imxdmac->per_address;
0839     } else {
0840         desc->dest = imxdmac->per_address;
0841     }
0842     desc->desc.callback = NULL;
0843     desc->desc.callback_param = NULL;
0844 
0845     return &desc->desc;
0846 }
0847 
0848 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
0849         struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
0850         size_t period_len, enum dma_transfer_direction direction,
0851         unsigned long flags)
0852 {
0853     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0854     struct imxdma_engine *imxdma = imxdmac->imxdma;
0855     struct imxdma_desc *desc;
0856     int i;
0857     unsigned int periods = buf_len / period_len;
0858 
0859     dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
0860             __func__, imxdmac->channel, buf_len, period_len);
0861 
0862     if (list_empty(&imxdmac->ld_free) ||
0863         imxdma_chan_is_doing_cyclic(imxdmac))
0864         return NULL;
0865 
0866     desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
0867 
0868     kfree(imxdmac->sg_list);
0869 
0870     imxdmac->sg_list = kcalloc(periods + 1,
0871             sizeof(struct scatterlist), GFP_ATOMIC);
0872     if (!imxdmac->sg_list)
0873         return NULL;
0874 
0875     sg_init_table(imxdmac->sg_list, periods);
0876 
0877     for (i = 0; i < periods; i++) {
0878         sg_assign_page(&imxdmac->sg_list[i], NULL);
0879         imxdmac->sg_list[i].offset = 0;
0880         imxdmac->sg_list[i].dma_address = dma_addr;
0881         sg_dma_len(&imxdmac->sg_list[i]) = period_len;
0882         dma_addr += period_len;
0883     }
0884 
0885     /* close the loop */
0886     sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
0887 
0888     desc->type = IMXDMA_DESC_CYCLIC;
0889     desc->sg = imxdmac->sg_list;
0890     desc->sgcount = periods;
0891     desc->len = IMX_DMA_LENGTH_LOOP;
0892     desc->direction = direction;
0893     if (direction == DMA_DEV_TO_MEM) {
0894         desc->src = imxdmac->per_address;
0895     } else {
0896         desc->dest = imxdmac->per_address;
0897     }
0898     desc->desc.callback = NULL;
0899     desc->desc.callback_param = NULL;
0900 
0901     imxdma_config_write(chan, &imxdmac->config, direction);
0902 
0903     return &desc->desc;
0904 }
0905 
0906 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
0907     struct dma_chan *chan, dma_addr_t dest,
0908     dma_addr_t src, size_t len, unsigned long flags)
0909 {
0910     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0911     struct imxdma_engine *imxdma = imxdmac->imxdma;
0912     struct imxdma_desc *desc;
0913 
0914     dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
0915         __func__, imxdmac->channel, (unsigned long long)src,
0916         (unsigned long long)dest, len);
0917 
0918     if (list_empty(&imxdmac->ld_free) ||
0919         imxdma_chan_is_doing_cyclic(imxdmac))
0920         return NULL;
0921 
0922     desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
0923 
0924     desc->type = IMXDMA_DESC_MEMCPY;
0925     desc->src = src;
0926     desc->dest = dest;
0927     desc->len = len;
0928     desc->direction = DMA_MEM_TO_MEM;
0929     desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
0930     desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
0931     desc->desc.callback = NULL;
0932     desc->desc.callback_param = NULL;
0933 
0934     return &desc->desc;
0935 }
0936 
0937 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
0938     struct dma_chan *chan, struct dma_interleaved_template *xt,
0939     unsigned long flags)
0940 {
0941     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0942     struct imxdma_engine *imxdma = imxdmac->imxdma;
0943     struct imxdma_desc *desc;
0944 
0945     dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
0946         "   src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
0947         imxdmac->channel, (unsigned long long)xt->src_start,
0948         (unsigned long long) xt->dst_start,
0949         xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
0950         xt->numf, xt->frame_size);
0951 
0952     if (list_empty(&imxdmac->ld_free) ||
0953         imxdma_chan_is_doing_cyclic(imxdmac))
0954         return NULL;
0955 
0956     if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
0957         return NULL;
0958 
0959     desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
0960 
0961     desc->type = IMXDMA_DESC_INTERLEAVED;
0962     desc->src = xt->src_start;
0963     desc->dest = xt->dst_start;
0964     desc->x = xt->sgl[0].size;
0965     desc->y = xt->numf;
0966     desc->w = xt->sgl[0].icg + desc->x;
0967     desc->len = desc->x * desc->y;
0968     desc->direction = DMA_MEM_TO_MEM;
0969     desc->config_port = IMX_DMA_MEMSIZE_32;
0970     desc->config_mem = IMX_DMA_MEMSIZE_32;
0971     if (xt->src_sgl)
0972         desc->config_mem |= IMX_DMA_TYPE_2D;
0973     if (xt->dst_sgl)
0974         desc->config_port |= IMX_DMA_TYPE_2D;
0975     desc->desc.callback = NULL;
0976     desc->desc.callback_param = NULL;
0977 
0978     return &desc->desc;
0979 }
0980 
0981 static void imxdma_issue_pending(struct dma_chan *chan)
0982 {
0983     struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
0984     struct imxdma_engine *imxdma = imxdmac->imxdma;
0985     struct imxdma_desc *desc;
0986     unsigned long flags;
0987 
0988     spin_lock_irqsave(&imxdma->lock, flags);
0989     if (list_empty(&imxdmac->ld_active) &&
0990         !list_empty(&imxdmac->ld_queue)) {
0991         desc = list_first_entry(&imxdmac->ld_queue,
0992                     struct imxdma_desc, node);
0993 
0994         if (imxdma_xfer_desc(desc) < 0) {
0995             dev_warn(imxdma->dev,
0996                  "%s: channel: %d couldn't issue DMA xfer\n",
0997                  __func__, imxdmac->channel);
0998         } else {
0999             list_move_tail(imxdmac->ld_queue.next,
1000                        &imxdmac->ld_active);
1001         }
1002     }
1003     spin_unlock_irqrestore(&imxdma->lock, flags);
1004 }
1005 
1006 static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1007 {
1008     struct imxdma_filter_data *fdata = param;
1009     struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1010 
1011     if (chan->device->dev != fdata->imxdma->dev)
1012         return false;
1013 
1014     imxdma_chan->dma_request = fdata->request;
1015     chan->private = NULL;
1016 
1017     return true;
1018 }
1019 
1020 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1021                         struct of_dma *ofdma)
1022 {
1023     int count = dma_spec->args_count;
1024     struct imxdma_engine *imxdma = ofdma->of_dma_data;
1025     struct imxdma_filter_data fdata = {
1026         .imxdma = imxdma,
1027     };
1028 
1029     if (count != 1)
1030         return NULL;
1031 
1032     fdata.request = dma_spec->args[0];
1033 
1034     return dma_request_channel(imxdma->dma_device.cap_mask,
1035                     imxdma_filter_fn, &fdata);
1036 }
1037 
1038 static int __init imxdma_probe(struct platform_device *pdev)
1039 {
1040     struct imxdma_engine *imxdma;
1041     struct resource *res;
1042     int ret, i;
1043     int irq, irq_err;
1044 
1045     imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1046     if (!imxdma)
1047         return -ENOMEM;
1048 
1049     imxdma->dev = &pdev->dev;
1050     imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
1051 
1052     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1053     imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1054     if (IS_ERR(imxdma->base))
1055         return PTR_ERR(imxdma->base);
1056 
1057     irq = platform_get_irq(pdev, 0);
1058     if (irq < 0)
1059         return irq;
1060 
1061     imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1062     if (IS_ERR(imxdma->dma_ipg))
1063         return PTR_ERR(imxdma->dma_ipg);
1064 
1065     imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1066     if (IS_ERR(imxdma->dma_ahb))
1067         return PTR_ERR(imxdma->dma_ahb);
1068 
1069     ret = clk_prepare_enable(imxdma->dma_ipg);
1070     if (ret)
1071         return ret;
1072     ret = clk_prepare_enable(imxdma->dma_ahb);
1073     if (ret)
1074         goto disable_dma_ipg_clk;
1075 
1076     /* reset DMA module */
1077     imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1078 
1079     if (is_imx1_dma(imxdma)) {
1080         ret = devm_request_irq(&pdev->dev, irq,
1081                        dma_irq_handler, 0, "DMA", imxdma);
1082         if (ret) {
1083             dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1084             goto disable_dma_ahb_clk;
1085         }
1086         imxdma->irq = irq;
1087 
1088         irq_err = platform_get_irq(pdev, 1);
1089         if (irq_err < 0) {
1090             ret = irq_err;
1091             goto disable_dma_ahb_clk;
1092         }
1093 
1094         ret = devm_request_irq(&pdev->dev, irq_err,
1095                        imxdma_err_handler, 0, "DMA", imxdma);
1096         if (ret) {
1097             dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1098             goto disable_dma_ahb_clk;
1099         }
1100         imxdma->irq_err = irq_err;
1101     }
1102 
1103     /* enable DMA module */
1104     imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1105 
1106     /* clear all interrupts */
1107     imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1108 
1109     /* disable interrupts */
1110     imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1111 
1112     INIT_LIST_HEAD(&imxdma->dma_device.channels);
1113 
1114     dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1115     dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1116     dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1117     dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1118 
1119     /* Initialize 2D global parameters */
1120     for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1121         imxdma->slots_2d[i].count = 0;
1122 
1123     spin_lock_init(&imxdma->lock);
1124 
1125     /* Initialize channel parameters */
1126     for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1127         struct imxdma_channel *imxdmac = &imxdma->channel[i];
1128 
1129         if (!is_imx1_dma(imxdma)) {
1130             ret = devm_request_irq(&pdev->dev, irq + i,
1131                     dma_irq_handler, 0, "DMA", imxdma);
1132             if (ret) {
1133                 dev_warn(imxdma->dev, "Can't register IRQ %d "
1134                      "for DMA channel %d\n",
1135                      irq + i, i);
1136                 goto disable_dma_ahb_clk;
1137             }
1138 
1139             imxdmac->irq = irq + i;
1140             timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
1141         }
1142 
1143         imxdmac->imxdma = imxdma;
1144 
1145         INIT_LIST_HEAD(&imxdmac->ld_queue);
1146         INIT_LIST_HEAD(&imxdmac->ld_free);
1147         INIT_LIST_HEAD(&imxdmac->ld_active);
1148 
1149         tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet);
1150         imxdmac->chan.device = &imxdma->dma_device;
1151         dma_cookie_init(&imxdmac->chan);
1152         imxdmac->channel = i;
1153 
1154         /* Add the channel to the DMAC list */
1155         list_add_tail(&imxdmac->chan.device_node,
1156                   &imxdma->dma_device.channels);
1157     }
1158 
1159     imxdma->dma_device.dev = &pdev->dev;
1160 
1161     imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1162     imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1163     imxdma->dma_device.device_tx_status = imxdma_tx_status;
1164     imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1165     imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1166     imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1167     imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1168     imxdma->dma_device.device_config = imxdma_config;
1169     imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1170     imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1171 
1172     platform_set_drvdata(pdev, imxdma);
1173 
1174     imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1175     dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1176 
1177     ret = dma_async_device_register(&imxdma->dma_device);
1178     if (ret) {
1179         dev_err(&pdev->dev, "unable to register\n");
1180         goto disable_dma_ahb_clk;
1181     }
1182 
1183     if (pdev->dev.of_node) {
1184         ret = of_dma_controller_register(pdev->dev.of_node,
1185                 imxdma_xlate, imxdma);
1186         if (ret) {
1187             dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1188             goto err_of_dma_controller;
1189         }
1190     }
1191 
1192     return 0;
1193 
1194 err_of_dma_controller:
1195     dma_async_device_unregister(&imxdma->dma_device);
1196 disable_dma_ahb_clk:
1197     clk_disable_unprepare(imxdma->dma_ahb);
1198 disable_dma_ipg_clk:
1199     clk_disable_unprepare(imxdma->dma_ipg);
1200     return ret;
1201 }
1202 
1203 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1204 {
1205     int i;
1206 
1207     if (is_imx1_dma(imxdma)) {
1208         disable_irq(imxdma->irq);
1209         disable_irq(imxdma->irq_err);
1210     }
1211 
1212     for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1213         struct imxdma_channel *imxdmac = &imxdma->channel[i];
1214 
1215         if (!is_imx1_dma(imxdma))
1216             disable_irq(imxdmac->irq);
1217 
1218         tasklet_kill(&imxdmac->dma_tasklet);
1219     }
1220 }
1221 
1222 static int imxdma_remove(struct platform_device *pdev)
1223 {
1224     struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1225 
1226     imxdma_free_irq(pdev, imxdma);
1227 
1228         dma_async_device_unregister(&imxdma->dma_device);
1229 
1230     if (pdev->dev.of_node)
1231         of_dma_controller_free(pdev->dev.of_node);
1232 
1233     clk_disable_unprepare(imxdma->dma_ipg);
1234     clk_disable_unprepare(imxdma->dma_ahb);
1235 
1236         return 0;
1237 }
1238 
1239 static struct platform_driver imxdma_driver = {
1240     .driver     = {
1241         .name   = "imx-dma",
1242         .of_match_table = imx_dma_of_dev_id,
1243     },
1244     .remove     = imxdma_remove,
1245 };
1246 
1247 static int __init imxdma_module_init(void)
1248 {
1249     return platform_driver_probe(&imxdma_driver, imxdma_probe);
1250 }
1251 subsys_initcall(imxdma_module_init);
1252 
1253 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1254 MODULE_DESCRIPTION("i.MX dma driver");
1255 MODULE_LICENSE("GPL");