Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Driver for the Cirrus Logic EP93xx DMA Controller
0004  *
0005  * Copyright (C) 2011 Mika Westerberg
0006  *
0007  * DMA M2P implementation is based on the original
0008  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
0009  *
0010  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
0011  *   Copyright (C) 2006 Applied Data Systems
0012  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
0013  *
0014  * This driver is based on dw_dmac and amba-pl08x drivers.
0015  */
0016 
0017 #include <linux/clk.h>
0018 #include <linux/init.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/dmaengine.h>
0021 #include <linux/module.h>
0022 #include <linux/mod_devicetable.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/slab.h>
0025 
0026 #include <linux/platform_data/dma-ep93xx.h>
0027 
0028 #include "dmaengine.h"
0029 
0030 /* M2P registers */
0031 #define M2P_CONTROL         0x0000
0032 #define M2P_CONTROL_STALLINT        BIT(0)
0033 #define M2P_CONTROL_NFBINT      BIT(1)
0034 #define M2P_CONTROL_CH_ERROR_INT    BIT(3)
0035 #define M2P_CONTROL_ENABLE      BIT(4)
0036 #define M2P_CONTROL_ICE         BIT(6)
0037 
0038 #define M2P_INTERRUPT           0x0004
0039 #define M2P_INTERRUPT_STALL     BIT(0)
0040 #define M2P_INTERRUPT_NFB       BIT(1)
0041 #define M2P_INTERRUPT_ERROR     BIT(3)
0042 
0043 #define M2P_PPALLOC         0x0008
0044 #define M2P_STATUS          0x000c
0045 
0046 #define M2P_MAXCNT0         0x0020
0047 #define M2P_BASE0           0x0024
0048 #define M2P_MAXCNT1         0x0030
0049 #define M2P_BASE1           0x0034
0050 
0051 #define M2P_STATE_IDLE          0
0052 #define M2P_STATE_STALL         1
0053 #define M2P_STATE_ON            2
0054 #define M2P_STATE_NEXT          3
0055 
0056 /* M2M registers */
0057 #define M2M_CONTROL         0x0000
0058 #define M2M_CONTROL_DONEINT     BIT(2)
0059 #define M2M_CONTROL_ENABLE      BIT(3)
0060 #define M2M_CONTROL_START       BIT(4)
0061 #define M2M_CONTROL_DAH         BIT(11)
0062 #define M2M_CONTROL_SAH         BIT(12)
0063 #define M2M_CONTROL_PW_SHIFT        9
0064 #define M2M_CONTROL_PW_8        (0 << M2M_CONTROL_PW_SHIFT)
0065 #define M2M_CONTROL_PW_16       (1 << M2M_CONTROL_PW_SHIFT)
0066 #define M2M_CONTROL_PW_32       (2 << M2M_CONTROL_PW_SHIFT)
0067 #define M2M_CONTROL_PW_MASK     (3 << M2M_CONTROL_PW_SHIFT)
0068 #define M2M_CONTROL_TM_SHIFT        13
0069 #define M2M_CONTROL_TM_TX       (1 << M2M_CONTROL_TM_SHIFT)
0070 #define M2M_CONTROL_TM_RX       (2 << M2M_CONTROL_TM_SHIFT)
0071 #define M2M_CONTROL_NFBINT      BIT(21)
0072 #define M2M_CONTROL_RSS_SHIFT       22
0073 #define M2M_CONTROL_RSS_SSPRX       (1 << M2M_CONTROL_RSS_SHIFT)
0074 #define M2M_CONTROL_RSS_SSPTX       (2 << M2M_CONTROL_RSS_SHIFT)
0075 #define M2M_CONTROL_RSS_IDE     (3 << M2M_CONTROL_RSS_SHIFT)
0076 #define M2M_CONTROL_NO_HDSK     BIT(24)
0077 #define M2M_CONTROL_PWSC_SHIFT      25
0078 
0079 #define M2M_INTERRUPT           0x0004
0080 #define M2M_INTERRUPT_MASK      6
0081 
0082 #define M2M_STATUS          0x000c
0083 #define M2M_STATUS_CTL_SHIFT        1
0084 #define M2M_STATUS_CTL_IDLE     (0 << M2M_STATUS_CTL_SHIFT)
0085 #define M2M_STATUS_CTL_STALL        (1 << M2M_STATUS_CTL_SHIFT)
0086 #define M2M_STATUS_CTL_MEMRD        (2 << M2M_STATUS_CTL_SHIFT)
0087 #define M2M_STATUS_CTL_MEMWR        (3 << M2M_STATUS_CTL_SHIFT)
0088 #define M2M_STATUS_CTL_BWCWAIT      (4 << M2M_STATUS_CTL_SHIFT)
0089 #define M2M_STATUS_CTL_MASK     (7 << M2M_STATUS_CTL_SHIFT)
0090 #define M2M_STATUS_BUF_SHIFT        4
0091 #define M2M_STATUS_BUF_NO       (0 << M2M_STATUS_BUF_SHIFT)
0092 #define M2M_STATUS_BUF_ON       (1 << M2M_STATUS_BUF_SHIFT)
0093 #define M2M_STATUS_BUF_NEXT     (2 << M2M_STATUS_BUF_SHIFT)
0094 #define M2M_STATUS_BUF_MASK     (3 << M2M_STATUS_BUF_SHIFT)
0095 #define M2M_STATUS_DONE         BIT(6)
0096 
0097 #define M2M_BCR0            0x0010
0098 #define M2M_BCR1            0x0014
0099 #define M2M_SAR_BASE0           0x0018
0100 #define M2M_SAR_BASE1           0x001c
0101 #define M2M_DAR_BASE0           0x002c
0102 #define M2M_DAR_BASE1           0x0030
0103 
0104 #define DMA_MAX_CHAN_BYTES      0xffff
0105 #define DMA_MAX_CHAN_DESCRIPTORS    32
0106 
0107 struct ep93xx_dma_engine;
0108 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
0109                      enum dma_transfer_direction dir,
0110                      struct dma_slave_config *config);
0111 
0112 /**
0113  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
0114  * @src_addr: source address of the transaction
0115  * @dst_addr: destination address of the transaction
0116  * @size: size of the transaction (in bytes)
0117  * @complete: this descriptor is completed
0118  * @txd: dmaengine API descriptor
0119  * @tx_list: list of linked descriptors
0120  * @node: link used for putting this into a channel queue
0121  */
0122 struct ep93xx_dma_desc {
0123     u32             src_addr;
0124     u32             dst_addr;
0125     size_t              size;
0126     bool                complete;
0127     struct dma_async_tx_descriptor  txd;
0128     struct list_head        tx_list;
0129     struct list_head        node;
0130 };
0131 
0132 /**
0133  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
0134  * @chan: dmaengine API channel
0135  * @edma: pointer to the engine device
0136  * @regs: memory mapped registers
0137  * @irq: interrupt number of the channel
0138  * @clk: clock used by this channel
0139  * @tasklet: channel specific tasklet used for callbacks
0140  * @lock: lock protecting the fields following
0141  * @flags: flags for the channel
0142  * @buffer: which buffer to use next (0/1)
0143  * @active: flattened chain of descriptors currently being processed
0144  * @queue: pending descriptors which are handled next
0145  * @free_list: list of free descriptors which can be used
0146  * @runtime_addr: physical address currently used as dest/src (M2M only). This
0147  *                is set via .device_config before slave operation is
0148  *                prepared
0149  * @runtime_ctrl: M2M runtime values for the control register.
0150  * @slave_config: slave configuration
0151  *
0152  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
0153  * will have slightly different scheme here: @active points to a head of
0154  * flattened DMA descriptor chain.
0155  *
0156  * @queue holds pending transactions. These are linked through the first
0157  * descriptor in the chain. When a descriptor is moved to the @active queue,
0158  * the first and chained descriptors are flattened into a single list.
0159  *
0160  * @chan.private holds pointer to &struct ep93xx_dma_data which contains
0161  * necessary channel configuration information. For memcpy channels this must
0162  * be %NULL.
0163  */
0164 struct ep93xx_dma_chan {
0165     struct dma_chan         chan;
0166     const struct ep93xx_dma_engine  *edma;
0167     void __iomem            *regs;
0168     int             irq;
0169     struct clk          *clk;
0170     struct tasklet_struct       tasklet;
0171     /* protects the fields following */
0172     spinlock_t          lock;
0173     unsigned long           flags;
0174 /* Channel is configured for cyclic transfers */
0175 #define EP93XX_DMA_IS_CYCLIC        0
0176 
0177     int             buffer;
0178     struct list_head        active;
0179     struct list_head        queue;
0180     struct list_head        free_list;
0181     u32             runtime_addr;
0182     u32             runtime_ctrl;
0183     struct dma_slave_config     slave_config;
0184 };
0185 
0186 /**
0187  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
0188  * @dma_dev: holds the dmaengine device
0189  * @m2m: is this an M2M or M2P device
0190  * @hw_setup: method which sets the channel up for operation
0191  * @hw_synchronize: synchronizes DMA channel termination to current context
0192  * @hw_shutdown: shuts the channel down and flushes whatever is left
0193  * @hw_submit: pushes active descriptor(s) to the hardware
0194  * @hw_interrupt: handle the interrupt
0195  * @num_channels: number of channels for this instance
0196  * @channels: array of channels
0197  *
0198  * There is one instance of this struct for the M2P channels and one for the
0199  * M2M channels. hw_xxx() methods are used to perform operations which are
0200  * different on M2M and M2P channels. These methods are called with channel
0201  * lock held and interrupts disabled so they cannot sleep.
0202  */
0203 struct ep93xx_dma_engine {
0204     struct dma_device   dma_dev;
0205     bool            m2m;
0206     int         (*hw_setup)(struct ep93xx_dma_chan *);
0207     void            (*hw_synchronize)(struct ep93xx_dma_chan *);
0208     void            (*hw_shutdown)(struct ep93xx_dma_chan *);
0209     void            (*hw_submit)(struct ep93xx_dma_chan *);
0210     int         (*hw_interrupt)(struct ep93xx_dma_chan *);
0211 #define INTERRUPT_UNKNOWN   0
0212 #define INTERRUPT_DONE      1
0213 #define INTERRUPT_NEXT_BUFFER   2
0214 
0215     size_t          num_channels;
0216     struct ep93xx_dma_chan  channels[];
0217 };
0218 
0219 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
0220 {
0221     return &edmac->chan.dev->device;
0222 }
0223 
0224 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
0225 {
0226     return container_of(chan, struct ep93xx_dma_chan, chan);
0227 }
0228 
0229 /**
0230  * ep93xx_dma_set_active - set new active descriptor chain
0231  * @edmac: channel
0232  * @desc: head of the new active descriptor chain
0233  *
0234  * Sets @desc to be the head of the new active descriptor chain. This is the
0235  * chain which is processed next. The active list must be empty before calling
0236  * this function.
0237  *
0238  * Called with @edmac->lock held and interrupts disabled.
0239  */
0240 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
0241                   struct ep93xx_dma_desc *desc)
0242 {
0243     BUG_ON(!list_empty(&edmac->active));
0244 
0245     list_add_tail(&desc->node, &edmac->active);
0246 
0247     /* Flatten the @desc->tx_list chain into @edmac->active list */
0248     while (!list_empty(&desc->tx_list)) {
0249         struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
0250             struct ep93xx_dma_desc, node);
0251 
0252         /*
0253          * We copy the callback parameters from the first descriptor
0254          * to all the chained descriptors. This way we can call the
0255          * callback without having to find out the first descriptor in
0256          * the chain. Useful for cyclic transfers.
0257          */
0258         d->txd.callback = desc->txd.callback;
0259         d->txd.callback_param = desc->txd.callback_param;
0260 
0261         list_move_tail(&d->node, &edmac->active);
0262     }
0263 }
0264 
0265 /* Called with @edmac->lock held and interrupts disabled */
0266 static struct ep93xx_dma_desc *
0267 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
0268 {
0269     return list_first_entry_or_null(&edmac->active,
0270                     struct ep93xx_dma_desc, node);
0271 }
0272 
0273 /**
0274  * ep93xx_dma_advance_active - advances to the next active descriptor
0275  * @edmac: channel
0276  *
0277  * Function advances active descriptor to the next in the @edmac->active and
0278  * returns %true if we still have descriptors in the chain to process.
0279  * Otherwise returns %false.
0280  *
0281  * When the channel is in cyclic mode always returns %true.
0282  *
0283  * Called with @edmac->lock held and interrupts disabled.
0284  */
0285 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
0286 {
0287     struct ep93xx_dma_desc *desc;
0288 
0289     list_rotate_left(&edmac->active);
0290 
0291     if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
0292         return true;
0293 
0294     desc = ep93xx_dma_get_active(edmac);
0295     if (!desc)
0296         return false;
0297 
0298     /*
0299      * If txd.cookie is set it means that we are back in the first
0300      * descriptor in the chain and hence done with it.
0301      */
0302     return !desc->txd.cookie;
0303 }
0304 
0305 /*
0306  * M2P DMA implementation
0307  */
0308 
0309 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
0310 {
0311     writel(control, edmac->regs + M2P_CONTROL);
0312     /*
0313      * EP93xx User's Guide states that we must perform a dummy read after
0314      * write to the control register.
0315      */
0316     readl(edmac->regs + M2P_CONTROL);
0317 }
0318 
0319 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
0320 {
0321     struct ep93xx_dma_data *data = edmac->chan.private;
0322     u32 control;
0323 
0324     writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
0325 
0326     control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
0327         | M2P_CONTROL_ENABLE;
0328     m2p_set_control(edmac, control);
0329 
0330     edmac->buffer = 0;
0331 
0332     return 0;
0333 }
0334 
0335 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
0336 {
0337     return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
0338 }
0339 
0340 static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
0341 {
0342     unsigned long flags;
0343     u32 control;
0344 
0345     spin_lock_irqsave(&edmac->lock, flags);
0346     control = readl(edmac->regs + M2P_CONTROL);
0347     control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
0348     m2p_set_control(edmac, control);
0349     spin_unlock_irqrestore(&edmac->lock, flags);
0350 
0351     while (m2p_channel_state(edmac) >= M2P_STATE_ON)
0352         schedule();
0353 }
0354 
0355 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
0356 {
0357     m2p_set_control(edmac, 0);
0358 
0359     while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
0360         dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
0361 }
0362 
0363 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
0364 {
0365     struct ep93xx_dma_desc *desc;
0366     u32 bus_addr;
0367 
0368     desc = ep93xx_dma_get_active(edmac);
0369     if (!desc) {
0370         dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
0371         return;
0372     }
0373 
0374     if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
0375         bus_addr = desc->src_addr;
0376     else
0377         bus_addr = desc->dst_addr;
0378 
0379     if (edmac->buffer == 0) {
0380         writel(desc->size, edmac->regs + M2P_MAXCNT0);
0381         writel(bus_addr, edmac->regs + M2P_BASE0);
0382     } else {
0383         writel(desc->size, edmac->regs + M2P_MAXCNT1);
0384         writel(bus_addr, edmac->regs + M2P_BASE1);
0385     }
0386 
0387     edmac->buffer ^= 1;
0388 }
0389 
0390 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
0391 {
0392     u32 control = readl(edmac->regs + M2P_CONTROL);
0393 
0394     m2p_fill_desc(edmac);
0395     control |= M2P_CONTROL_STALLINT;
0396 
0397     if (ep93xx_dma_advance_active(edmac)) {
0398         m2p_fill_desc(edmac);
0399         control |= M2P_CONTROL_NFBINT;
0400     }
0401 
0402     m2p_set_control(edmac, control);
0403 }
0404 
0405 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
0406 {
0407     u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
0408     u32 control;
0409 
0410     if (irq_status & M2P_INTERRUPT_ERROR) {
0411         struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
0412 
0413         /* Clear the error interrupt */
0414         writel(1, edmac->regs + M2P_INTERRUPT);
0415 
0416         /*
0417          * It seems that there is no easy way of reporting errors back
0418          * to client so we just report the error here and continue as
0419          * usual.
0420          *
0421          * Revisit this when there is a mechanism to report back the
0422          * errors.
0423          */
0424         dev_err(chan2dev(edmac),
0425             "DMA transfer failed! Details:\n"
0426             "\tcookie   : %d\n"
0427             "\tsrc_addr : 0x%08x\n"
0428             "\tdst_addr : 0x%08x\n"
0429             "\tsize     : %zu\n",
0430             desc->txd.cookie, desc->src_addr, desc->dst_addr,
0431             desc->size);
0432     }
0433 
0434     /*
0435      * Even latest E2 silicon revision sometimes assert STALL interrupt
0436      * instead of NFB. Therefore we treat them equally, basing on the
0437      * amount of data we still have to transfer.
0438      */
0439     if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
0440         return INTERRUPT_UNKNOWN;
0441 
0442     if (ep93xx_dma_advance_active(edmac)) {
0443         m2p_fill_desc(edmac);
0444         return INTERRUPT_NEXT_BUFFER;
0445     }
0446 
0447     /* Disable interrupts */
0448     control = readl(edmac->regs + M2P_CONTROL);
0449     control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
0450     m2p_set_control(edmac, control);
0451 
0452     return INTERRUPT_DONE;
0453 }
0454 
0455 /*
0456  * M2M DMA implementation
0457  */
0458 
0459 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
0460 {
0461     const struct ep93xx_dma_data *data = edmac->chan.private;
0462     u32 control = 0;
0463 
0464     if (!data) {
0465         /* This is memcpy channel, nothing to configure */
0466         writel(control, edmac->regs + M2M_CONTROL);
0467         return 0;
0468     }
0469 
0470     switch (data->port) {
0471     case EP93XX_DMA_SSP:
0472         /*
0473          * This was found via experimenting - anything less than 5
0474          * causes the channel to perform only a partial transfer which
0475          * leads to problems since we don't get DONE interrupt then.
0476          */
0477         control = (5 << M2M_CONTROL_PWSC_SHIFT);
0478         control |= M2M_CONTROL_NO_HDSK;
0479 
0480         if (data->direction == DMA_MEM_TO_DEV) {
0481             control |= M2M_CONTROL_DAH;
0482             control |= M2M_CONTROL_TM_TX;
0483             control |= M2M_CONTROL_RSS_SSPTX;
0484         } else {
0485             control |= M2M_CONTROL_SAH;
0486             control |= M2M_CONTROL_TM_RX;
0487             control |= M2M_CONTROL_RSS_SSPRX;
0488         }
0489         break;
0490 
0491     case EP93XX_DMA_IDE:
0492         /*
0493          * This IDE part is totally untested. Values below are taken
0494          * from the EP93xx Users's Guide and might not be correct.
0495          */
0496         if (data->direction == DMA_MEM_TO_DEV) {
0497             /* Worst case from the UG */
0498             control = (3 << M2M_CONTROL_PWSC_SHIFT);
0499             control |= M2M_CONTROL_DAH;
0500             control |= M2M_CONTROL_TM_TX;
0501         } else {
0502             control = (2 << M2M_CONTROL_PWSC_SHIFT);
0503             control |= M2M_CONTROL_SAH;
0504             control |= M2M_CONTROL_TM_RX;
0505         }
0506 
0507         control |= M2M_CONTROL_NO_HDSK;
0508         control |= M2M_CONTROL_RSS_IDE;
0509         control |= M2M_CONTROL_PW_16;
0510         break;
0511 
0512     default:
0513         return -EINVAL;
0514     }
0515 
0516     writel(control, edmac->regs + M2M_CONTROL);
0517     return 0;
0518 }
0519 
0520 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
0521 {
0522     /* Just disable the channel */
0523     writel(0, edmac->regs + M2M_CONTROL);
0524 }
0525 
0526 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
0527 {
0528     struct ep93xx_dma_desc *desc;
0529 
0530     desc = ep93xx_dma_get_active(edmac);
0531     if (!desc) {
0532         dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
0533         return;
0534     }
0535 
0536     if (edmac->buffer == 0) {
0537         writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
0538         writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
0539         writel(desc->size, edmac->regs + M2M_BCR0);
0540     } else {
0541         writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
0542         writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
0543         writel(desc->size, edmac->regs + M2M_BCR1);
0544     }
0545 
0546     edmac->buffer ^= 1;
0547 }
0548 
0549 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
0550 {
0551     struct ep93xx_dma_data *data = edmac->chan.private;
0552     u32 control = readl(edmac->regs + M2M_CONTROL);
0553 
0554     /*
0555      * Since we allow clients to configure PW (peripheral width) we always
0556      * clear PW bits here and then set them according what is given in
0557      * the runtime configuration.
0558      */
0559     control &= ~M2M_CONTROL_PW_MASK;
0560     control |= edmac->runtime_ctrl;
0561 
0562     m2m_fill_desc(edmac);
0563     control |= M2M_CONTROL_DONEINT;
0564 
0565     if (ep93xx_dma_advance_active(edmac)) {
0566         m2m_fill_desc(edmac);
0567         control |= M2M_CONTROL_NFBINT;
0568     }
0569 
0570     /*
0571      * Now we can finally enable the channel. For M2M channel this must be
0572      * done _after_ the BCRx registers are programmed.
0573      */
0574     control |= M2M_CONTROL_ENABLE;
0575     writel(control, edmac->regs + M2M_CONTROL);
0576 
0577     if (!data) {
0578         /*
0579          * For memcpy channels the software trigger must be asserted
0580          * in order to start the memcpy operation.
0581          */
0582         control |= M2M_CONTROL_START;
0583         writel(control, edmac->regs + M2M_CONTROL);
0584     }
0585 }
0586 
0587 /*
0588  * According to EP93xx User's Guide, we should receive DONE interrupt when all
0589  * M2M DMA controller transactions complete normally. This is not always the
0590  * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
0591  * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
0592  * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
0593  * In effect, disabling the channel when only DONE bit is set could stop
0594  * currently running DMA transfer. To avoid this, we use Buffer FSM and
0595  * Control FSM to check current state of DMA channel.
0596  */
0597 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
0598 {
0599     u32 status = readl(edmac->regs + M2M_STATUS);
0600     u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
0601     u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
0602     bool done = status & M2M_STATUS_DONE;
0603     bool last_done;
0604     u32 control;
0605     struct ep93xx_dma_desc *desc;
0606 
0607     /* Accept only DONE and NFB interrupts */
0608     if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
0609         return INTERRUPT_UNKNOWN;
0610 
0611     if (done) {
0612         /* Clear the DONE bit */
0613         writel(0, edmac->regs + M2M_INTERRUPT);
0614     }
0615 
0616     /*
0617      * Check whether we are done with descriptors or not. This, together
0618      * with DMA channel state, determines action to take in interrupt.
0619      */
0620     desc = ep93xx_dma_get_active(edmac);
0621     last_done = !desc || desc->txd.cookie;
0622 
0623     /*
0624      * Use M2M DMA Buffer FSM and Control FSM to check current state of
0625      * DMA channel. Using DONE and NFB bits from channel status register
0626      * or bits from channel interrupt register is not reliable.
0627      */
0628     if (!last_done &&
0629         (buf_fsm == M2M_STATUS_BUF_NO ||
0630          buf_fsm == M2M_STATUS_BUF_ON)) {
0631         /*
0632          * Two buffers are ready for update when Buffer FSM is in
0633          * DMA_NO_BUF state. Only one buffer can be prepared without
0634          * disabling the channel or polling the DONE bit.
0635          * To simplify things, always prepare only one buffer.
0636          */
0637         if (ep93xx_dma_advance_active(edmac)) {
0638             m2m_fill_desc(edmac);
0639             if (done && !edmac->chan.private) {
0640                 /* Software trigger for memcpy channel */
0641                 control = readl(edmac->regs + M2M_CONTROL);
0642                 control |= M2M_CONTROL_START;
0643                 writel(control, edmac->regs + M2M_CONTROL);
0644             }
0645             return INTERRUPT_NEXT_BUFFER;
0646         } else {
0647             last_done = true;
0648         }
0649     }
0650 
0651     /*
0652      * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
0653      * and Control FSM is in DMA_STALL state.
0654      */
0655     if (last_done &&
0656         buf_fsm == M2M_STATUS_BUF_NO &&
0657         ctl_fsm == M2M_STATUS_CTL_STALL) {
0658         /* Disable interrupts and the channel */
0659         control = readl(edmac->regs + M2M_CONTROL);
0660         control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
0661                 | M2M_CONTROL_ENABLE);
0662         writel(control, edmac->regs + M2M_CONTROL);
0663         return INTERRUPT_DONE;
0664     }
0665 
0666     /*
0667      * Nothing to do this time.
0668      */
0669     return INTERRUPT_NEXT_BUFFER;
0670 }
0671 
0672 /*
0673  * DMA engine API implementation
0674  */
0675 
0676 static struct ep93xx_dma_desc *
0677 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
0678 {
0679     struct ep93xx_dma_desc *desc, *_desc;
0680     struct ep93xx_dma_desc *ret = NULL;
0681     unsigned long flags;
0682 
0683     spin_lock_irqsave(&edmac->lock, flags);
0684     list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
0685         if (async_tx_test_ack(&desc->txd)) {
0686             list_del_init(&desc->node);
0687 
0688             /* Re-initialize the descriptor */
0689             desc->src_addr = 0;
0690             desc->dst_addr = 0;
0691             desc->size = 0;
0692             desc->complete = false;
0693             desc->txd.cookie = 0;
0694             desc->txd.callback = NULL;
0695             desc->txd.callback_param = NULL;
0696 
0697             ret = desc;
0698             break;
0699         }
0700     }
0701     spin_unlock_irqrestore(&edmac->lock, flags);
0702     return ret;
0703 }
0704 
0705 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
0706                 struct ep93xx_dma_desc *desc)
0707 {
0708     if (desc) {
0709         unsigned long flags;
0710 
0711         spin_lock_irqsave(&edmac->lock, flags);
0712         list_splice_init(&desc->tx_list, &edmac->free_list);
0713         list_add(&desc->node, &edmac->free_list);
0714         spin_unlock_irqrestore(&edmac->lock, flags);
0715     }
0716 }
0717 
0718 /**
0719  * ep93xx_dma_advance_work - start processing the next pending transaction
0720  * @edmac: channel
0721  *
0722  * If we have pending transactions queued and we are currently idling, this
0723  * function takes the next queued transaction from the @edmac->queue and
0724  * pushes it to the hardware for execution.
0725  */
0726 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
0727 {
0728     struct ep93xx_dma_desc *new;
0729     unsigned long flags;
0730 
0731     spin_lock_irqsave(&edmac->lock, flags);
0732     if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
0733         spin_unlock_irqrestore(&edmac->lock, flags);
0734         return;
0735     }
0736 
0737     /* Take the next descriptor from the pending queue */
0738     new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
0739     list_del_init(&new->node);
0740 
0741     ep93xx_dma_set_active(edmac, new);
0742 
0743     /* Push it to the hardware */
0744     edmac->edma->hw_submit(edmac);
0745     spin_unlock_irqrestore(&edmac->lock, flags);
0746 }
0747 
0748 static void ep93xx_dma_tasklet(struct tasklet_struct *t)
0749 {
0750     struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
0751     struct ep93xx_dma_desc *desc, *d;
0752     struct dmaengine_desc_callback cb;
0753     LIST_HEAD(list);
0754 
0755     memset(&cb, 0, sizeof(cb));
0756     spin_lock_irq(&edmac->lock);
0757     /*
0758      * If dma_terminate_all() was called before we get to run, the active
0759      * list has become empty. If that happens we aren't supposed to do
0760      * anything more than call ep93xx_dma_advance_work().
0761      */
0762     desc = ep93xx_dma_get_active(edmac);
0763     if (desc) {
0764         if (desc->complete) {
0765             /* mark descriptor complete for non cyclic case only */
0766             if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
0767                 dma_cookie_complete(&desc->txd);
0768             list_splice_init(&edmac->active, &list);
0769         }
0770         dmaengine_desc_get_callback(&desc->txd, &cb);
0771     }
0772     spin_unlock_irq(&edmac->lock);
0773 
0774     /* Pick up the next descriptor from the queue */
0775     ep93xx_dma_advance_work(edmac);
0776 
0777     /* Now we can release all the chained descriptors */
0778     list_for_each_entry_safe(desc, d, &list, node) {
0779         dma_descriptor_unmap(&desc->txd);
0780         ep93xx_dma_desc_put(edmac, desc);
0781     }
0782 
0783     dmaengine_desc_callback_invoke(&cb, NULL);
0784 }
0785 
0786 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
0787 {
0788     struct ep93xx_dma_chan *edmac = dev_id;
0789     struct ep93xx_dma_desc *desc;
0790     irqreturn_t ret = IRQ_HANDLED;
0791 
0792     spin_lock(&edmac->lock);
0793 
0794     desc = ep93xx_dma_get_active(edmac);
0795     if (!desc) {
0796         dev_warn(chan2dev(edmac),
0797              "got interrupt while active list is empty\n");
0798         spin_unlock(&edmac->lock);
0799         return IRQ_NONE;
0800     }
0801 
0802     switch (edmac->edma->hw_interrupt(edmac)) {
0803     case INTERRUPT_DONE:
0804         desc->complete = true;
0805         tasklet_schedule(&edmac->tasklet);
0806         break;
0807 
0808     case INTERRUPT_NEXT_BUFFER:
0809         if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
0810             tasklet_schedule(&edmac->tasklet);
0811         break;
0812 
0813     default:
0814         dev_warn(chan2dev(edmac), "unknown interrupt!\n");
0815         ret = IRQ_NONE;
0816         break;
0817     }
0818 
0819     spin_unlock(&edmac->lock);
0820     return ret;
0821 }
0822 
0823 /**
0824  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
0825  * @tx: descriptor to be executed
0826  *
0827  * Function will execute given descriptor on the hardware or if the hardware
0828  * is busy, queue the descriptor to be executed later on. Returns cookie which
0829  * can be used to poll the status of the descriptor.
0830  */
0831 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
0832 {
0833     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
0834     struct ep93xx_dma_desc *desc;
0835     dma_cookie_t cookie;
0836     unsigned long flags;
0837 
0838     spin_lock_irqsave(&edmac->lock, flags);
0839     cookie = dma_cookie_assign(tx);
0840 
0841     desc = container_of(tx, struct ep93xx_dma_desc, txd);
0842 
0843     /*
0844      * If nothing is currently prosessed, we push this descriptor
0845      * directly to the hardware. Otherwise we put the descriptor
0846      * to the pending queue.
0847      */
0848     if (list_empty(&edmac->active)) {
0849         ep93xx_dma_set_active(edmac, desc);
0850         edmac->edma->hw_submit(edmac);
0851     } else {
0852         list_add_tail(&desc->node, &edmac->queue);
0853     }
0854 
0855     spin_unlock_irqrestore(&edmac->lock, flags);
0856     return cookie;
0857 }
0858 
0859 /**
0860  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
0861  * @chan: channel to allocate resources
0862  *
0863  * Function allocates necessary resources for the given DMA channel and
0864  * returns number of allocated descriptors for the channel. Negative errno
0865  * is returned in case of failure.
0866  */
0867 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
0868 {
0869     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
0870     struct ep93xx_dma_data *data = chan->private;
0871     const char *name = dma_chan_name(chan);
0872     int ret, i;
0873 
0874     /* Sanity check the channel parameters */
0875     if (!edmac->edma->m2m) {
0876         if (!data)
0877             return -EINVAL;
0878         if (data->port < EP93XX_DMA_I2S1 ||
0879             data->port > EP93XX_DMA_IRDA)
0880             return -EINVAL;
0881         if (data->direction != ep93xx_dma_chan_direction(chan))
0882             return -EINVAL;
0883     } else {
0884         if (data) {
0885             switch (data->port) {
0886             case EP93XX_DMA_SSP:
0887             case EP93XX_DMA_IDE:
0888                 if (!is_slave_direction(data->direction))
0889                     return -EINVAL;
0890                 break;
0891             default:
0892                 return -EINVAL;
0893             }
0894         }
0895     }
0896 
0897     if (data && data->name)
0898         name = data->name;
0899 
0900     ret = clk_prepare_enable(edmac->clk);
0901     if (ret)
0902         return ret;
0903 
0904     ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
0905     if (ret)
0906         goto fail_clk_disable;
0907 
0908     spin_lock_irq(&edmac->lock);
0909     dma_cookie_init(&edmac->chan);
0910     ret = edmac->edma->hw_setup(edmac);
0911     spin_unlock_irq(&edmac->lock);
0912 
0913     if (ret)
0914         goto fail_free_irq;
0915 
0916     for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
0917         struct ep93xx_dma_desc *desc;
0918 
0919         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0920         if (!desc) {
0921             dev_warn(chan2dev(edmac), "not enough descriptors\n");
0922             break;
0923         }
0924 
0925         INIT_LIST_HEAD(&desc->tx_list);
0926 
0927         dma_async_tx_descriptor_init(&desc->txd, chan);
0928         desc->txd.flags = DMA_CTRL_ACK;
0929         desc->txd.tx_submit = ep93xx_dma_tx_submit;
0930 
0931         ep93xx_dma_desc_put(edmac, desc);
0932     }
0933 
0934     return i;
0935 
0936 fail_free_irq:
0937     free_irq(edmac->irq, edmac);
0938 fail_clk_disable:
0939     clk_disable_unprepare(edmac->clk);
0940 
0941     return ret;
0942 }
0943 
0944 /**
0945  * ep93xx_dma_free_chan_resources - release resources for the channel
0946  * @chan: channel
0947  *
0948  * Function releases all the resources allocated for the given channel.
0949  * The channel must be idle when this is called.
0950  */
0951 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
0952 {
0953     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
0954     struct ep93xx_dma_desc *desc, *d;
0955     unsigned long flags;
0956     LIST_HEAD(list);
0957 
0958     BUG_ON(!list_empty(&edmac->active));
0959     BUG_ON(!list_empty(&edmac->queue));
0960 
0961     spin_lock_irqsave(&edmac->lock, flags);
0962     edmac->edma->hw_shutdown(edmac);
0963     edmac->runtime_addr = 0;
0964     edmac->runtime_ctrl = 0;
0965     edmac->buffer = 0;
0966     list_splice_init(&edmac->free_list, &list);
0967     spin_unlock_irqrestore(&edmac->lock, flags);
0968 
0969     list_for_each_entry_safe(desc, d, &list, node)
0970         kfree(desc);
0971 
0972     clk_disable_unprepare(edmac->clk);
0973     free_irq(edmac->irq, edmac);
0974 }
0975 
0976 /**
0977  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
0978  * @chan: channel
0979  * @dest: destination bus address
0980  * @src: source bus address
0981  * @len: size of the transaction
0982  * @flags: flags for the descriptor
0983  *
0984  * Returns a valid DMA descriptor or %NULL in case of failure.
0985  */
0986 static struct dma_async_tx_descriptor *
0987 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
0988                dma_addr_t src, size_t len, unsigned long flags)
0989 {
0990     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
0991     struct ep93xx_dma_desc *desc, *first;
0992     size_t bytes, offset;
0993 
0994     first = NULL;
0995     for (offset = 0; offset < len; offset += bytes) {
0996         desc = ep93xx_dma_desc_get(edmac);
0997         if (!desc) {
0998             dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
0999             goto fail;
1000         }
1001 
1002         bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1003 
1004         desc->src_addr = src + offset;
1005         desc->dst_addr = dest + offset;
1006         desc->size = bytes;
1007 
1008         if (!first)
1009             first = desc;
1010         else
1011             list_add_tail(&desc->node, &first->tx_list);
1012     }
1013 
1014     first->txd.cookie = -EBUSY;
1015     first->txd.flags = flags;
1016 
1017     return &first->txd;
1018 fail:
1019     ep93xx_dma_desc_put(edmac, first);
1020     return NULL;
1021 }
1022 
1023 /**
1024  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1025  * @chan: channel
1026  * @sgl: list of buffers to transfer
1027  * @sg_len: number of entries in @sgl
1028  * @dir: direction of tha DMA transfer
1029  * @flags: flags for the descriptor
1030  * @context: operation context (ignored)
1031  *
1032  * Returns a valid DMA descriptor or %NULL in case of failure.
1033  */
1034 static struct dma_async_tx_descriptor *
1035 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1036              unsigned int sg_len, enum dma_transfer_direction dir,
1037              unsigned long flags, void *context)
1038 {
1039     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1040     struct ep93xx_dma_desc *desc, *first;
1041     struct scatterlist *sg;
1042     int i;
1043 
1044     if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1045         dev_warn(chan2dev(edmac),
1046              "channel was configured with different direction\n");
1047         return NULL;
1048     }
1049 
1050     if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1051         dev_warn(chan2dev(edmac),
1052              "channel is already used for cyclic transfers\n");
1053         return NULL;
1054     }
1055 
1056     ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1057 
1058     first = NULL;
1059     for_each_sg(sgl, sg, sg_len, i) {
1060         size_t len = sg_dma_len(sg);
1061 
1062         if (len > DMA_MAX_CHAN_BYTES) {
1063             dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1064                  len);
1065             goto fail;
1066         }
1067 
1068         desc = ep93xx_dma_desc_get(edmac);
1069         if (!desc) {
1070             dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1071             goto fail;
1072         }
1073 
1074         if (dir == DMA_MEM_TO_DEV) {
1075             desc->src_addr = sg_dma_address(sg);
1076             desc->dst_addr = edmac->runtime_addr;
1077         } else {
1078             desc->src_addr = edmac->runtime_addr;
1079             desc->dst_addr = sg_dma_address(sg);
1080         }
1081         desc->size = len;
1082 
1083         if (!first)
1084             first = desc;
1085         else
1086             list_add_tail(&desc->node, &first->tx_list);
1087     }
1088 
1089     first->txd.cookie = -EBUSY;
1090     first->txd.flags = flags;
1091 
1092     return &first->txd;
1093 
1094 fail:
1095     ep93xx_dma_desc_put(edmac, first);
1096     return NULL;
1097 }
1098 
1099 /**
1100  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1101  * @chan: channel
1102  * @dma_addr: DMA mapped address of the buffer
1103  * @buf_len: length of the buffer (in bytes)
1104  * @period_len: length of a single period
1105  * @dir: direction of the operation
1106  * @flags: tx descriptor status flags
1107  *
1108  * Prepares a descriptor for cyclic DMA operation. This means that once the
1109  * descriptor is submitted, we will be submitting in a @period_len sized
1110  * buffers and calling callback once the period has been elapsed. Transfer
1111  * terminates only when client calls dmaengine_terminate_all() for this
1112  * channel.
1113  *
1114  * Returns a valid DMA descriptor or %NULL in case of failure.
1115  */
1116 static struct dma_async_tx_descriptor *
1117 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1118                size_t buf_len, size_t period_len,
1119                enum dma_transfer_direction dir, unsigned long flags)
1120 {
1121     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1122     struct ep93xx_dma_desc *desc, *first;
1123     size_t offset = 0;
1124 
1125     if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1126         dev_warn(chan2dev(edmac),
1127              "channel was configured with different direction\n");
1128         return NULL;
1129     }
1130 
1131     if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1132         dev_warn(chan2dev(edmac),
1133              "channel is already used for cyclic transfers\n");
1134         return NULL;
1135     }
1136 
1137     if (period_len > DMA_MAX_CHAN_BYTES) {
1138         dev_warn(chan2dev(edmac), "too big period length %zu\n",
1139              period_len);
1140         return NULL;
1141     }
1142 
1143     ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1144 
1145     /* Split the buffer into period size chunks */
1146     first = NULL;
1147     for (offset = 0; offset < buf_len; offset += period_len) {
1148         desc = ep93xx_dma_desc_get(edmac);
1149         if (!desc) {
1150             dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1151             goto fail;
1152         }
1153 
1154         if (dir == DMA_MEM_TO_DEV) {
1155             desc->src_addr = dma_addr + offset;
1156             desc->dst_addr = edmac->runtime_addr;
1157         } else {
1158             desc->src_addr = edmac->runtime_addr;
1159             desc->dst_addr = dma_addr + offset;
1160         }
1161 
1162         desc->size = period_len;
1163 
1164         if (!first)
1165             first = desc;
1166         else
1167             list_add_tail(&desc->node, &first->tx_list);
1168     }
1169 
1170     first->txd.cookie = -EBUSY;
1171 
1172     return &first->txd;
1173 
1174 fail:
1175     ep93xx_dma_desc_put(edmac, first);
1176     return NULL;
1177 }
1178 
1179 /**
1180  * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1181  * current context.
1182  * @chan: channel
1183  *
1184  * Synchronizes the DMA channel termination to the current context. When this
1185  * function returns it is guaranteed that all transfers for previously issued
1186  * descriptors have stopped and it is safe to free the memory associated
1187  * with them. Furthermore it is guaranteed that all complete callback functions
1188  * for a previously submitted descriptor have finished running and it is safe to
1189  * free resources accessed from within the complete callbacks.
1190  */
1191 static void ep93xx_dma_synchronize(struct dma_chan *chan)
1192 {
1193     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1194 
1195     if (edmac->edma->hw_synchronize)
1196         edmac->edma->hw_synchronize(edmac);
1197 }
1198 
1199 /**
1200  * ep93xx_dma_terminate_all - terminate all transactions
1201  * @chan: channel
1202  *
1203  * Stops all DMA transactions. All descriptors are put back to the
1204  * @edmac->free_list and callbacks are _not_ called.
1205  */
1206 static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1207 {
1208     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1209     struct ep93xx_dma_desc *desc, *_d;
1210     unsigned long flags;
1211     LIST_HEAD(list);
1212 
1213     spin_lock_irqsave(&edmac->lock, flags);
1214     /* First we disable and flush the DMA channel */
1215     edmac->edma->hw_shutdown(edmac);
1216     clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1217     list_splice_init(&edmac->active, &list);
1218     list_splice_init(&edmac->queue, &list);
1219     /*
1220      * We then re-enable the channel. This way we can continue submitting
1221      * the descriptors by just calling ->hw_submit() again.
1222      */
1223     edmac->edma->hw_setup(edmac);
1224     spin_unlock_irqrestore(&edmac->lock, flags);
1225 
1226     list_for_each_entry_safe(desc, _d, &list, node)
1227         ep93xx_dma_desc_put(edmac, desc);
1228 
1229     return 0;
1230 }
1231 
1232 static int ep93xx_dma_slave_config(struct dma_chan *chan,
1233                    struct dma_slave_config *config)
1234 {
1235     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1236 
1237     memcpy(&edmac->slave_config, config, sizeof(*config));
1238 
1239     return 0;
1240 }
1241 
1242 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1243                      enum dma_transfer_direction dir,
1244                      struct dma_slave_config *config)
1245 {
1246     struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1247     enum dma_slave_buswidth width;
1248     unsigned long flags;
1249     u32 addr, ctrl;
1250 
1251     if (!edmac->edma->m2m)
1252         return -EINVAL;
1253 
1254     switch (dir) {
1255     case DMA_DEV_TO_MEM:
1256         width = config->src_addr_width;
1257         addr = config->src_addr;
1258         break;
1259 
1260     case DMA_MEM_TO_DEV:
1261         width = config->dst_addr_width;
1262         addr = config->dst_addr;
1263         break;
1264 
1265     default:
1266         return -EINVAL;
1267     }
1268 
1269     switch (width) {
1270     case DMA_SLAVE_BUSWIDTH_1_BYTE:
1271         ctrl = 0;
1272         break;
1273     case DMA_SLAVE_BUSWIDTH_2_BYTES:
1274         ctrl = M2M_CONTROL_PW_16;
1275         break;
1276     case DMA_SLAVE_BUSWIDTH_4_BYTES:
1277         ctrl = M2M_CONTROL_PW_32;
1278         break;
1279     default:
1280         return -EINVAL;
1281     }
1282 
1283     spin_lock_irqsave(&edmac->lock, flags);
1284     edmac->runtime_addr = addr;
1285     edmac->runtime_ctrl = ctrl;
1286     spin_unlock_irqrestore(&edmac->lock, flags);
1287 
1288     return 0;
1289 }
1290 
1291 /**
1292  * ep93xx_dma_tx_status - check if a transaction is completed
1293  * @chan: channel
1294  * @cookie: transaction specific cookie
1295  * @state: state of the transaction is stored here if given
1296  *
1297  * This function can be used to query state of a given transaction.
1298  */
1299 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1300                         dma_cookie_t cookie,
1301                         struct dma_tx_state *state)
1302 {
1303     return dma_cookie_status(chan, cookie, state);
1304 }
1305 
1306 /**
1307  * ep93xx_dma_issue_pending - push pending transactions to the hardware
1308  * @chan: channel
1309  *
1310  * When this function is called, all pending transactions are pushed to the
1311  * hardware and executed.
1312  */
1313 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1314 {
1315     ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1316 }
1317 
1318 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1319 {
1320     struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1321     struct ep93xx_dma_engine *edma;
1322     struct dma_device *dma_dev;
1323     size_t edma_size;
1324     int ret, i;
1325 
1326     edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1327     edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1328     if (!edma)
1329         return -ENOMEM;
1330 
1331     dma_dev = &edma->dma_dev;
1332     edma->m2m = platform_get_device_id(pdev)->driver_data;
1333     edma->num_channels = pdata->num_channels;
1334 
1335     INIT_LIST_HEAD(&dma_dev->channels);
1336     for (i = 0; i < pdata->num_channels; i++) {
1337         const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1338         struct ep93xx_dma_chan *edmac = &edma->channels[i];
1339 
1340         edmac->chan.device = dma_dev;
1341         edmac->regs = cdata->base;
1342         edmac->irq = cdata->irq;
1343         edmac->edma = edma;
1344 
1345         edmac->clk = clk_get(NULL, cdata->name);
1346         if (IS_ERR(edmac->clk)) {
1347             dev_warn(&pdev->dev, "failed to get clock for %s\n",
1348                  cdata->name);
1349             continue;
1350         }
1351 
1352         spin_lock_init(&edmac->lock);
1353         INIT_LIST_HEAD(&edmac->active);
1354         INIT_LIST_HEAD(&edmac->queue);
1355         INIT_LIST_HEAD(&edmac->free_list);
1356         tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
1357 
1358         list_add_tail(&edmac->chan.device_node,
1359                   &dma_dev->channels);
1360     }
1361 
1362     dma_cap_zero(dma_dev->cap_mask);
1363     dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1364     dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1365 
1366     dma_dev->dev = &pdev->dev;
1367     dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1368     dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1369     dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1370     dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1371     dma_dev->device_config = ep93xx_dma_slave_config;
1372     dma_dev->device_synchronize = ep93xx_dma_synchronize;
1373     dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1374     dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1375     dma_dev->device_tx_status = ep93xx_dma_tx_status;
1376 
1377     dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1378 
1379     if (edma->m2m) {
1380         dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1381         dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1382 
1383         edma->hw_setup = m2m_hw_setup;
1384         edma->hw_shutdown = m2m_hw_shutdown;
1385         edma->hw_submit = m2m_hw_submit;
1386         edma->hw_interrupt = m2m_hw_interrupt;
1387     } else {
1388         dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1389 
1390         edma->hw_synchronize = m2p_hw_synchronize;
1391         edma->hw_setup = m2p_hw_setup;
1392         edma->hw_shutdown = m2p_hw_shutdown;
1393         edma->hw_submit = m2p_hw_submit;
1394         edma->hw_interrupt = m2p_hw_interrupt;
1395     }
1396 
1397     ret = dma_async_device_register(dma_dev);
1398     if (unlikely(ret)) {
1399         for (i = 0; i < edma->num_channels; i++) {
1400             struct ep93xx_dma_chan *edmac = &edma->channels[i];
1401             if (!IS_ERR_OR_NULL(edmac->clk))
1402                 clk_put(edmac->clk);
1403         }
1404         kfree(edma);
1405     } else {
1406         dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1407              edma->m2m ? "M" : "P");
1408     }
1409 
1410     return ret;
1411 }
1412 
1413 static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1414     { "ep93xx-dma-m2p", 0 },
1415     { "ep93xx-dma-m2m", 1 },
1416     { },
1417 };
1418 
1419 static struct platform_driver ep93xx_dma_driver = {
1420     .driver     = {
1421         .name   = "ep93xx-dma",
1422     },
1423     .id_table   = ep93xx_dma_driver_ids,
1424 };
1425 
1426 static int __init ep93xx_dma_module_init(void)
1427 {
1428     return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1429 }
1430 subsys_initcall(ep93xx_dma_module_init);
1431 
1432 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1433 MODULE_DESCRIPTION("EP93xx DMA driver");
1434 MODULE_LICENSE("GPL");