Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 //
0003 // Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
0004 //
0005 // Refer to drivers/dma/imx-sdma.c
0006 
0007 #include <linux/init.h>
0008 #include <linux/types.h>
0009 #include <linux/mm.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/clk.h>
0012 #include <linux/wait.h>
0013 #include <linux/sched.h>
0014 #include <linux/semaphore.h>
0015 #include <linux/device.h>
0016 #include <linux/dma-mapping.h>
0017 #include <linux/slab.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/dmaengine.h>
0020 #include <linux/delay.h>
0021 #include <linux/module.h>
0022 #include <linux/stmp_device.h>
0023 #include <linux/of.h>
0024 #include <linux/of_device.h>
0025 #include <linux/of_dma.h>
0026 #include <linux/list.h>
0027 #include <linux/dma/mxs-dma.h>
0028 
0029 #include <asm/irq.h>
0030 
0031 #include "dmaengine.h"
0032 
0033 /*
0034  * NOTE: The term "PIO" throughout the mxs-dma implementation means
0035  * PIO mode of mxs apbh-dma and apbx-dma.  With this working mode,
0036  * dma can program the controller registers of peripheral devices.
0037  */
0038 
0039 #define dma_is_apbh(mxs_dma)    ((mxs_dma)->type == MXS_DMA_APBH)
0040 #define apbh_is_old(mxs_dma)    ((mxs_dma)->dev_id == IMX23_DMA)
0041 
0042 #define HW_APBHX_CTRL0              0x000
0043 #define BM_APBH_CTRL0_APB_BURST8_EN     (1 << 29)
0044 #define BM_APBH_CTRL0_APB_BURST_EN      (1 << 28)
0045 #define BP_APBH_CTRL0_RESET_CHANNEL     16
0046 #define HW_APBHX_CTRL1              0x010
0047 #define HW_APBHX_CTRL2              0x020
0048 #define HW_APBHX_CHANNEL_CTRL           0x030
0049 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
0050 /*
0051  * The offset of NXTCMDAR register is different per both dma type and version,
0052  * while stride for each channel is all the same 0x70.
0053  */
0054 #define HW_APBHX_CHn_NXTCMDAR(d, n) \
0055     (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
0056 #define HW_APBHX_CHn_SEMA(d, n) \
0057     (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
0058 #define HW_APBHX_CHn_BAR(d, n) \
0059     (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
0060 #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
0061 
0062 /*
0063  * ccw bits definitions
0064  *
0065  * COMMAND:     0..1    (2)
0066  * CHAIN:       2   (1)
0067  * IRQ:         3   (1)
0068  * NAND_LOCK:       4   (1) - not implemented
0069  * NAND_WAIT4READY: 5   (1) - not implemented
0070  * DEC_SEM:     6   (1)
0071  * WAIT4END:        7   (1)
0072  * HALT_ON_TERMINATE:   8   (1)
0073  * TERMINATE_FLUSH: 9   (1)
0074  * RESERVED:        10..11  (2)
0075  * PIO_NUM:     12..15  (4)
0076  */
0077 #define BP_CCW_COMMAND      0
0078 #define BM_CCW_COMMAND      (3 << 0)
0079 #define CCW_CHAIN       (1 << 2)
0080 #define CCW_IRQ         (1 << 3)
0081 #define CCW_WAIT4RDY        (1 << 5)
0082 #define CCW_DEC_SEM     (1 << 6)
0083 #define CCW_WAIT4END        (1 << 7)
0084 #define CCW_HALT_ON_TERM    (1 << 8)
0085 #define CCW_TERM_FLUSH      (1 << 9)
0086 #define BP_CCW_PIO_NUM      12
0087 #define BM_CCW_PIO_NUM      (0xf << 12)
0088 
0089 #define BF_CCW(value, field)    (((value) << BP_CCW_##field) & BM_CCW_##field)
0090 
0091 #define MXS_DMA_CMD_NO_XFER 0
0092 #define MXS_DMA_CMD_WRITE   1
0093 #define MXS_DMA_CMD_READ    2
0094 #define MXS_DMA_CMD_DMA_SENSE   3   /* not implemented */
0095 
0096 struct mxs_dma_ccw {
0097     u32     next;
0098     u16     bits;
0099     u16     xfer_bytes;
0100 #define MAX_XFER_BYTES  0xff00
0101     u32     bufaddr;
0102 #define MXS_PIO_WORDS   16
0103     u32     pio_words[MXS_PIO_WORDS];
0104 };
0105 
0106 #define CCW_BLOCK_SIZE  (4 * PAGE_SIZE)
0107 #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
0108 
0109 struct mxs_dma_chan {
0110     struct mxs_dma_engine       *mxs_dma;
0111     struct dma_chan         chan;
0112     struct dma_async_tx_descriptor  desc;
0113     struct tasklet_struct       tasklet;
0114     unsigned int            chan_irq;
0115     struct mxs_dma_ccw      *ccw;
0116     dma_addr_t          ccw_phys;
0117     int             desc_count;
0118     enum dma_status         status;
0119     unsigned int            flags;
0120     bool                reset;
0121 #define MXS_DMA_SG_LOOP         (1 << 0)
0122 #define MXS_DMA_USE_SEMAPHORE       (1 << 1)
0123 };
0124 
0125 #define MXS_DMA_CHANNELS        16
0126 #define MXS_DMA_CHANNELS_MASK       0xffff
0127 
0128 enum mxs_dma_devtype {
0129     MXS_DMA_APBH,
0130     MXS_DMA_APBX,
0131 };
0132 
0133 enum mxs_dma_id {
0134     IMX23_DMA,
0135     IMX28_DMA,
0136 };
0137 
0138 struct mxs_dma_engine {
0139     enum mxs_dma_id         dev_id;
0140     enum mxs_dma_devtype        type;
0141     void __iomem            *base;
0142     struct clk          *clk;
0143     struct dma_device       dma_device;
0144     struct mxs_dma_chan     mxs_chans[MXS_DMA_CHANNELS];
0145     struct platform_device      *pdev;
0146     unsigned int            nr_channels;
0147 };
0148 
0149 struct mxs_dma_type {
0150     enum mxs_dma_id id;
0151     enum mxs_dma_devtype type;
0152 };
0153 
0154 static struct mxs_dma_type mxs_dma_types[] = {
0155     {
0156         .id = IMX23_DMA,
0157         .type = MXS_DMA_APBH,
0158     }, {
0159         .id = IMX23_DMA,
0160         .type = MXS_DMA_APBX,
0161     }, {
0162         .id = IMX28_DMA,
0163         .type = MXS_DMA_APBH,
0164     }, {
0165         .id = IMX28_DMA,
0166         .type = MXS_DMA_APBX,
0167     }
0168 };
0169 
0170 static const struct of_device_id mxs_dma_dt_ids[] = {
0171     { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], },
0172     { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], },
0173     { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], },
0174     { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], },
0175     { /* sentinel */ }
0176 };
0177 MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
0178 
0179 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
0180 {
0181     return container_of(chan, struct mxs_dma_chan, chan);
0182 }
0183 
0184 static void mxs_dma_reset_chan(struct dma_chan *chan)
0185 {
0186     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0187     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0188     int chan_id = mxs_chan->chan.chan_id;
0189 
0190     /*
0191      * mxs dma channel resets can cause a channel stall. To recover from a
0192      * channel stall, we have to reset the whole DMA engine. To avoid this,
0193      * we use cyclic DMA with semaphores, that are enhanced in
0194      * mxs_dma_int_handler. To reset the channel, we can simply stop writing
0195      * into the semaphore counter.
0196      */
0197     if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
0198             mxs_chan->flags & MXS_DMA_SG_LOOP) {
0199         mxs_chan->reset = true;
0200     } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
0201         writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
0202             mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
0203     } else {
0204         unsigned long elapsed = 0;
0205         const unsigned long max_wait = 50000; /* 50ms */
0206         void __iomem *reg_dbg1 = mxs_dma->base +
0207                 HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
0208 
0209         /*
0210          * On i.MX28 APBX, the DMA channel can stop working if we reset
0211          * the channel while it is in READ_FLUSH (0x08) state.
0212          * We wait here until we leave the state. Then we trigger the
0213          * reset. Waiting a maximum of 50ms, the kernel shouldn't crash
0214          * because of this.
0215          */
0216         while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
0217             udelay(100);
0218             elapsed += 100;
0219         }
0220 
0221         if (elapsed >= max_wait)
0222             dev_err(&mxs_chan->mxs_dma->pdev->dev,
0223                     "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
0224                     chan_id);
0225 
0226         writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
0227             mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
0228     }
0229 
0230     mxs_chan->status = DMA_COMPLETE;
0231 }
0232 
0233 static void mxs_dma_enable_chan(struct dma_chan *chan)
0234 {
0235     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0236     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0237     int chan_id = mxs_chan->chan.chan_id;
0238 
0239     /* set cmd_addr up */
0240     writel(mxs_chan->ccw_phys,
0241         mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
0242 
0243     /* write 1 to SEMA to kick off the channel */
0244     if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
0245             mxs_chan->flags & MXS_DMA_SG_LOOP) {
0246         /* A cyclic DMA consists of at least 2 segments, so initialize
0247          * the semaphore with 2 so we have enough time to add 1 to the
0248          * semaphore if we need to */
0249         writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
0250     } else {
0251         writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
0252     }
0253     mxs_chan->reset = false;
0254 }
0255 
0256 static void mxs_dma_disable_chan(struct dma_chan *chan)
0257 {
0258     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0259 
0260     mxs_chan->status = DMA_COMPLETE;
0261 }
0262 
0263 static int mxs_dma_pause_chan(struct dma_chan *chan)
0264 {
0265     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0266     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0267     int chan_id = mxs_chan->chan.chan_id;
0268 
0269     /* freeze the channel */
0270     if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
0271         writel(1 << chan_id,
0272             mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
0273     else
0274         writel(1 << chan_id,
0275             mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
0276 
0277     mxs_chan->status = DMA_PAUSED;
0278     return 0;
0279 }
0280 
0281 static int mxs_dma_resume_chan(struct dma_chan *chan)
0282 {
0283     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0284     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0285     int chan_id = mxs_chan->chan.chan_id;
0286 
0287     /* unfreeze the channel */
0288     if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
0289         writel(1 << chan_id,
0290             mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
0291     else
0292         writel(1 << chan_id,
0293             mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
0294 
0295     mxs_chan->status = DMA_IN_PROGRESS;
0296     return 0;
0297 }
0298 
0299 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
0300 {
0301     return dma_cookie_assign(tx);
0302 }
0303 
0304 static void mxs_dma_tasklet(struct tasklet_struct *t)
0305 {
0306     struct mxs_dma_chan *mxs_chan = from_tasklet(mxs_chan, t, tasklet);
0307 
0308     dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
0309 }
0310 
0311 static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
0312 {
0313     int i;
0314 
0315     for (i = 0; i != mxs_dma->nr_channels; ++i)
0316         if (mxs_dma->mxs_chans[i].chan_irq == irq)
0317             return i;
0318 
0319     return -EINVAL;
0320 }
0321 
0322 static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
0323 {
0324     struct mxs_dma_engine *mxs_dma = dev_id;
0325     struct mxs_dma_chan *mxs_chan;
0326     u32 completed;
0327     u32 err;
0328     int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
0329 
0330     if (chan < 0)
0331         return IRQ_NONE;
0332 
0333     /* completion status */
0334     completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
0335     completed = (completed >> chan) & 0x1;
0336 
0337     /* Clear interrupt */
0338     writel((1 << chan),
0339             mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
0340 
0341     /* error status */
0342     err = readl(mxs_dma->base + HW_APBHX_CTRL2);
0343     err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
0344 
0345     /*
0346      * error status bit is in the upper 16 bits, error irq bit in the lower
0347      * 16 bits. We transform it into a simpler error code:
0348      * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
0349      */
0350     err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
0351 
0352     /* Clear error irq */
0353     writel((1 << chan),
0354             mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
0355 
0356     /*
0357      * When both completion and error of termination bits set at the
0358      * same time, we do not take it as an error.  IOW, it only becomes
0359      * an error we need to handle here in case of either it's a bus
0360      * error or a termination error with no completion. 0x01 is termination
0361      * error, so we can subtract err & completed to get the real error case.
0362      */
0363     err -= err & completed;
0364 
0365     mxs_chan = &mxs_dma->mxs_chans[chan];
0366 
0367     if (err) {
0368         dev_dbg(mxs_dma->dma_device.dev,
0369             "%s: error in channel %d\n", __func__,
0370             chan);
0371         mxs_chan->status = DMA_ERROR;
0372         mxs_dma_reset_chan(&mxs_chan->chan);
0373     } else if (mxs_chan->status != DMA_COMPLETE) {
0374         if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
0375             mxs_chan->status = DMA_IN_PROGRESS;
0376             if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
0377                 writel(1, mxs_dma->base +
0378                     HW_APBHX_CHn_SEMA(mxs_dma, chan));
0379         } else {
0380             mxs_chan->status = DMA_COMPLETE;
0381         }
0382     }
0383 
0384     if (mxs_chan->status == DMA_COMPLETE) {
0385         if (mxs_chan->reset)
0386             return IRQ_HANDLED;
0387         dma_cookie_complete(&mxs_chan->desc);
0388     }
0389 
0390     /* schedule tasklet on this channel */
0391     tasklet_schedule(&mxs_chan->tasklet);
0392 
0393     return IRQ_HANDLED;
0394 }
0395 
0396 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
0397 {
0398     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0399     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0400     int ret;
0401 
0402     mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
0403                        CCW_BLOCK_SIZE,
0404                        &mxs_chan->ccw_phys, GFP_KERNEL);
0405     if (!mxs_chan->ccw) {
0406         ret = -ENOMEM;
0407         goto err_alloc;
0408     }
0409 
0410     ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0411               0, "mxs-dma", mxs_dma);
0412     if (ret)
0413         goto err_irq;
0414 
0415     ret = clk_prepare_enable(mxs_dma->clk);
0416     if (ret)
0417         goto err_clk;
0418 
0419     mxs_dma_reset_chan(chan);
0420 
0421     dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
0422     mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
0423 
0424     /* the descriptor is ready */
0425     async_tx_ack(&mxs_chan->desc);
0426 
0427     return 0;
0428 
0429 err_clk:
0430     free_irq(mxs_chan->chan_irq, mxs_dma);
0431 err_irq:
0432     dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
0433             mxs_chan->ccw, mxs_chan->ccw_phys);
0434 err_alloc:
0435     return ret;
0436 }
0437 
0438 static void mxs_dma_free_chan_resources(struct dma_chan *chan)
0439 {
0440     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0441     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0442 
0443     mxs_dma_disable_chan(chan);
0444 
0445     free_irq(mxs_chan->chan_irq, mxs_dma);
0446 
0447     dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
0448             mxs_chan->ccw, mxs_chan->ccw_phys);
0449 
0450     clk_disable_unprepare(mxs_dma->clk);
0451 }
0452 
0453 /*
0454  * How to use the flags for ->device_prep_slave_sg() :
0455  *    [1] If there is only one DMA command in the DMA chain, the code should be:
0456  *            ......
0457  *            ->device_prep_slave_sg(DMA_CTRL_ACK);
0458  *            ......
0459  *    [2] If there are two DMA commands in the DMA chain, the code should be
0460  *            ......
0461  *            ->device_prep_slave_sg(0);
0462  *            ......
0463  *            ->device_prep_slave_sg(DMA_CTRL_ACK);
0464  *            ......
0465  *    [3] If there are more than two DMA commands in the DMA chain, the code
0466  *        should be:
0467  *            ......
0468  *            ->device_prep_slave_sg(0);                                // First
0469  *            ......
0470  *            ->device_prep_slave_sg(DMA_CTRL_ACK]);
0471  *            ......
0472  *            ->device_prep_slave_sg(DMA_CTRL_ACK); // Last
0473  *            ......
0474  */
0475 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
0476         struct dma_chan *chan, struct scatterlist *sgl,
0477         unsigned int sg_len, enum dma_transfer_direction direction,
0478         unsigned long flags, void *context)
0479 {
0480     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0481     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0482     struct mxs_dma_ccw *ccw;
0483     struct scatterlist *sg;
0484     u32 i, j;
0485     u32 *pio;
0486     int idx = 0;
0487 
0488     if (mxs_chan->status == DMA_IN_PROGRESS)
0489         idx = mxs_chan->desc_count;
0490 
0491     if (sg_len + idx > NUM_CCW) {
0492         dev_err(mxs_dma->dma_device.dev,
0493                 "maximum number of sg exceeded: %d > %d\n",
0494                 sg_len, NUM_CCW);
0495         goto err_out;
0496     }
0497 
0498     mxs_chan->status = DMA_IN_PROGRESS;
0499     mxs_chan->flags = 0;
0500 
0501     /*
0502      * If the sg is prepared with append flag set, the sg
0503      * will be appended to the last prepared sg.
0504      */
0505     if (idx) {
0506         BUG_ON(idx < 1);
0507         ccw = &mxs_chan->ccw[idx - 1];
0508         ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
0509         ccw->bits |= CCW_CHAIN;
0510         ccw->bits &= ~CCW_IRQ;
0511         ccw->bits &= ~CCW_DEC_SEM;
0512     } else {
0513         idx = 0;
0514     }
0515 
0516     if (direction == DMA_TRANS_NONE) {
0517         ccw = &mxs_chan->ccw[idx++];
0518         pio = (u32 *) sgl;
0519 
0520         for (j = 0; j < sg_len;)
0521             ccw->pio_words[j++] = *pio++;
0522 
0523         ccw->bits = 0;
0524         ccw->bits |= CCW_IRQ;
0525         ccw->bits |= CCW_DEC_SEM;
0526         if (flags & MXS_DMA_CTRL_WAIT4END)
0527             ccw->bits |= CCW_WAIT4END;
0528         ccw->bits |= CCW_HALT_ON_TERM;
0529         ccw->bits |= CCW_TERM_FLUSH;
0530         ccw->bits |= BF_CCW(sg_len, PIO_NUM);
0531         ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
0532         if (flags & MXS_DMA_CTRL_WAIT4RDY)
0533             ccw->bits |= CCW_WAIT4RDY;
0534     } else {
0535         for_each_sg(sgl, sg, sg_len, i) {
0536             if (sg_dma_len(sg) > MAX_XFER_BYTES) {
0537                 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
0538                         sg_dma_len(sg), MAX_XFER_BYTES);
0539                 goto err_out;
0540             }
0541 
0542             ccw = &mxs_chan->ccw[idx++];
0543 
0544             ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
0545             ccw->bufaddr = sg->dma_address;
0546             ccw->xfer_bytes = sg_dma_len(sg);
0547 
0548             ccw->bits = 0;
0549             ccw->bits |= CCW_CHAIN;
0550             ccw->bits |= CCW_HALT_ON_TERM;
0551             ccw->bits |= CCW_TERM_FLUSH;
0552             ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
0553                     MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
0554                     COMMAND);
0555 
0556             if (i + 1 == sg_len) {
0557                 ccw->bits &= ~CCW_CHAIN;
0558                 ccw->bits |= CCW_IRQ;
0559                 ccw->bits |= CCW_DEC_SEM;
0560                 if (flags & MXS_DMA_CTRL_WAIT4END)
0561                     ccw->bits |= CCW_WAIT4END;
0562             }
0563         }
0564     }
0565     mxs_chan->desc_count = idx;
0566 
0567     return &mxs_chan->desc;
0568 
0569 err_out:
0570     mxs_chan->status = DMA_ERROR;
0571     return NULL;
0572 }
0573 
0574 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
0575         struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
0576         size_t period_len, enum dma_transfer_direction direction,
0577         unsigned long flags)
0578 {
0579     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0580     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0581     u32 num_periods = buf_len / period_len;
0582     u32 i = 0, buf = 0;
0583 
0584     if (mxs_chan->status == DMA_IN_PROGRESS)
0585         return NULL;
0586 
0587     mxs_chan->status = DMA_IN_PROGRESS;
0588     mxs_chan->flags |= MXS_DMA_SG_LOOP;
0589     mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
0590 
0591     if (num_periods > NUM_CCW) {
0592         dev_err(mxs_dma->dma_device.dev,
0593                 "maximum number of sg exceeded: %d > %d\n",
0594                 num_periods, NUM_CCW);
0595         goto err_out;
0596     }
0597 
0598     if (period_len > MAX_XFER_BYTES) {
0599         dev_err(mxs_dma->dma_device.dev,
0600                 "maximum period size exceeded: %zu > %d\n",
0601                 period_len, MAX_XFER_BYTES);
0602         goto err_out;
0603     }
0604 
0605     while (buf < buf_len) {
0606         struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
0607 
0608         if (i + 1 == num_periods)
0609             ccw->next = mxs_chan->ccw_phys;
0610         else
0611             ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
0612 
0613         ccw->bufaddr = dma_addr;
0614         ccw->xfer_bytes = period_len;
0615 
0616         ccw->bits = 0;
0617         ccw->bits |= CCW_CHAIN;
0618         ccw->bits |= CCW_IRQ;
0619         ccw->bits |= CCW_HALT_ON_TERM;
0620         ccw->bits |= CCW_TERM_FLUSH;
0621         ccw->bits |= CCW_DEC_SEM;
0622         ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
0623                 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
0624 
0625         dma_addr += period_len;
0626         buf += period_len;
0627 
0628         i++;
0629     }
0630     mxs_chan->desc_count = i;
0631 
0632     return &mxs_chan->desc;
0633 
0634 err_out:
0635     mxs_chan->status = DMA_ERROR;
0636     return NULL;
0637 }
0638 
0639 static int mxs_dma_terminate_all(struct dma_chan *chan)
0640 {
0641     mxs_dma_reset_chan(chan);
0642     mxs_dma_disable_chan(chan);
0643 
0644     return 0;
0645 }
0646 
0647 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
0648             dma_cookie_t cookie, struct dma_tx_state *txstate)
0649 {
0650     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0651     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0652     u32 residue = 0;
0653 
0654     if (mxs_chan->status == DMA_IN_PROGRESS &&
0655             mxs_chan->flags & MXS_DMA_SG_LOOP) {
0656         struct mxs_dma_ccw *last_ccw;
0657         u32 bar;
0658 
0659         last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
0660         residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
0661 
0662         bar = readl(mxs_dma->base +
0663                 HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
0664         residue -= bar;
0665     }
0666 
0667     dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
0668             residue);
0669 
0670     return mxs_chan->status;
0671 }
0672 
0673 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
0674 {
0675     int ret;
0676 
0677     ret = clk_prepare_enable(mxs_dma->clk);
0678     if (ret)
0679         return ret;
0680 
0681     ret = stmp_reset_block(mxs_dma->base);
0682     if (ret)
0683         goto err_out;
0684 
0685     /* enable apbh burst */
0686     if (dma_is_apbh(mxs_dma)) {
0687         writel(BM_APBH_CTRL0_APB_BURST_EN,
0688             mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
0689         writel(BM_APBH_CTRL0_APB_BURST8_EN,
0690             mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
0691     }
0692 
0693     /* enable irq for all the channels */
0694     writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
0695         mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
0696 
0697 err_out:
0698     clk_disable_unprepare(mxs_dma->clk);
0699     return ret;
0700 }
0701 
0702 struct mxs_dma_filter_param {
0703     unsigned int chan_id;
0704 };
0705 
0706 static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
0707 {
0708     struct mxs_dma_filter_param *param = fn_param;
0709     struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
0710     struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
0711     int chan_irq;
0712 
0713     if (chan->chan_id != param->chan_id)
0714         return false;
0715 
0716     chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
0717     if (chan_irq < 0)
0718         return false;
0719 
0720     mxs_chan->chan_irq = chan_irq;
0721 
0722     return true;
0723 }
0724 
0725 static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
0726                    struct of_dma *ofdma)
0727 {
0728     struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
0729     dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
0730     struct mxs_dma_filter_param param;
0731 
0732     if (dma_spec->args_count != 1)
0733         return NULL;
0734 
0735     param.chan_id = dma_spec->args[0];
0736 
0737     if (param.chan_id >= mxs_dma->nr_channels)
0738         return NULL;
0739 
0740     return __dma_request_channel(&mask, mxs_dma_filter_fn, &param,
0741                      ofdma->of_node);
0742 }
0743 
0744 static int __init mxs_dma_probe(struct platform_device *pdev)
0745 {
0746     struct device_node *np = pdev->dev.of_node;
0747     const struct mxs_dma_type *dma_type;
0748     struct mxs_dma_engine *mxs_dma;
0749     struct resource *iores;
0750     int ret, i;
0751 
0752     mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
0753     if (!mxs_dma)
0754         return -ENOMEM;
0755 
0756     ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
0757     if (ret) {
0758         dev_err(&pdev->dev, "failed to read dma-channels\n");
0759         return ret;
0760     }
0761 
0762     dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev);
0763     mxs_dma->type = dma_type->type;
0764     mxs_dma->dev_id = dma_type->id;
0765 
0766     iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0767     mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
0768     if (IS_ERR(mxs_dma->base))
0769         return PTR_ERR(mxs_dma->base);
0770 
0771     mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
0772     if (IS_ERR(mxs_dma->clk))
0773         return PTR_ERR(mxs_dma->clk);
0774 
0775     dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
0776     dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
0777 
0778     INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
0779 
0780     /* Initialize channel parameters */
0781     for (i = 0; i < MXS_DMA_CHANNELS; i++) {
0782         struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
0783 
0784         mxs_chan->mxs_dma = mxs_dma;
0785         mxs_chan->chan.device = &mxs_dma->dma_device;
0786         dma_cookie_init(&mxs_chan->chan);
0787 
0788         tasklet_setup(&mxs_chan->tasklet, mxs_dma_tasklet);
0789 
0790 
0791         /* Add the channel to mxs_chan list */
0792         list_add_tail(&mxs_chan->chan.device_node,
0793             &mxs_dma->dma_device.channels);
0794     }
0795 
0796     ret = mxs_dma_init(mxs_dma);
0797     if (ret)
0798         return ret;
0799 
0800     mxs_dma->pdev = pdev;
0801     mxs_dma->dma_device.dev = &pdev->dev;
0802 
0803     /* mxs_dma gets 65535 bytes maximum sg size */
0804     dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
0805 
0806     mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
0807     mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
0808     mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
0809     mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
0810     mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
0811     mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
0812     mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
0813     mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
0814     mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0815     mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0816     mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
0817     mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0818     mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
0819 
0820     ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
0821     if (ret) {
0822         dev_err(mxs_dma->dma_device.dev, "unable to register\n");
0823         return ret;
0824     }
0825 
0826     ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
0827     if (ret) {
0828         dev_err(mxs_dma->dma_device.dev,
0829             "failed to register controller\n");
0830     }
0831 
0832     dev_info(mxs_dma->dma_device.dev, "initialized\n");
0833 
0834     return 0;
0835 }
0836 
0837 static struct platform_driver mxs_dma_driver = {
0838     .driver     = {
0839         .name   = "mxs-dma",
0840         .of_match_table = mxs_dma_dt_ids,
0841     },
0842 };
0843 
0844 static int __init mxs_dma_module_init(void)
0845 {
0846     return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
0847 }
0848 subsys_initcall(mxs_dma_module_init);