Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) 2014 Emilio López
0004  * Emilio López <emilio@elopez.com.ar>
0005  */
0006 
0007 #include <linux/bitmap.h>
0008 #include <linux/bitops.h>
0009 #include <linux/clk.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/dmapool.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/module.h>
0015 #include <linux/of_dma.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 
0020 #include "virt-dma.h"
0021 
0022 /** Common macros to normal and dedicated DMA registers **/
0023 
0024 #define SUN4I_DMA_CFG_LOADING           BIT(31)
0025 #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25)
0026 #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23)
0027 #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode)   ((mode) << 21)
0028 #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type)    ((type) << 16)
0029 #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9)
0030 #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7)
0031 #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode)   ((mode) << 5)
0032 #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type)    (type)
0033 
0034 /** Normal DMA register values **/
0035 
0036 /* Normal DMA source/destination data request type values */
0037 #define SUN4I_NDMA_DRQ_TYPE_SDRAM       0x16
0038 #define SUN4I_NDMA_DRQ_TYPE_LIMIT       (0x1F + 1)
0039 
0040 /** Normal DMA register layout **/
0041 
0042 /* Dedicated DMA source/destination address mode values */
0043 #define SUN4I_NDMA_ADDR_MODE_LINEAR     0
0044 #define SUN4I_NDMA_ADDR_MODE_IO         1
0045 
0046 /* Normal DMA configuration register layout */
0047 #define SUN4I_NDMA_CFG_CONT_MODE        BIT(30)
0048 #define SUN4I_NDMA_CFG_WAIT_STATE(n)        ((n) << 27)
0049 #define SUN4I_NDMA_CFG_DST_NON_SECURE       BIT(22)
0050 #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN   BIT(15)
0051 #define SUN4I_NDMA_CFG_SRC_NON_SECURE       BIT(6)
0052 
0053 /** Dedicated DMA register values **/
0054 
0055 /* Dedicated DMA source/destination address mode values */
0056 #define SUN4I_DDMA_ADDR_MODE_LINEAR     0
0057 #define SUN4I_DDMA_ADDR_MODE_IO         1
0058 #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE    2
0059 #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE  3
0060 
0061 /* Dedicated DMA source/destination data request type values */
0062 #define SUN4I_DDMA_DRQ_TYPE_SDRAM       0x1
0063 #define SUN4I_DDMA_DRQ_TYPE_LIMIT       (0x1F + 1)
0064 
0065 /** Dedicated DMA register layout **/
0066 
0067 /* Dedicated DMA configuration register layout */
0068 #define SUN4I_DDMA_CFG_BUSY         BIT(30)
0069 #define SUN4I_DDMA_CFG_CONT_MODE        BIT(29)
0070 #define SUN4I_DDMA_CFG_DST_NON_SECURE       BIT(28)
0071 #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN   BIT(15)
0072 #define SUN4I_DDMA_CFG_SRC_NON_SECURE       BIT(12)
0073 
0074 /* Dedicated DMA parameter register layout */
0075 #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n)    (((n) - 1) << 24)
0076 #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n)  (((n) - 1) << 16)
0077 #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n)    (((n) - 1) << 8)
0078 #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n)  (((n) - 1) << 0)
0079 
0080 /** DMA register offsets **/
0081 
0082 /* General register offsets */
0083 #define SUN4I_DMA_IRQ_ENABLE_REG        0x0
0084 #define SUN4I_DMA_IRQ_PENDING_STATUS_REG    0x4
0085 
0086 /* Normal DMA register offsets */
0087 #define SUN4I_NDMA_CHANNEL_REG_BASE(n)      (0x100 + (n) * 0x20)
0088 #define SUN4I_NDMA_CFG_REG          0x0
0089 #define SUN4I_NDMA_SRC_ADDR_REG         0x4
0090 #define SUN4I_NDMA_DST_ADDR_REG     0x8
0091 #define SUN4I_NDMA_BYTE_COUNT_REG       0xC
0092 
0093 /* Dedicated DMA register offsets */
0094 #define SUN4I_DDMA_CHANNEL_REG_BASE(n)      (0x300 + (n) * 0x20)
0095 #define SUN4I_DDMA_CFG_REG          0x0
0096 #define SUN4I_DDMA_SRC_ADDR_REG         0x4
0097 #define SUN4I_DDMA_DST_ADDR_REG     0x8
0098 #define SUN4I_DDMA_BYTE_COUNT_REG       0xC
0099 #define SUN4I_DDMA_PARA_REG         0x18
0100 
0101 /** DMA Driver **/
0102 
0103 /*
0104  * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
0105  * that's 16 channels. As for endpoints, there's 29 and 21
0106  * respectively. Given that the Normal DMA endpoints (other than
0107  * SDRAM) can be used as tx/rx, we need 78 vchans in total
0108  */
0109 #define SUN4I_NDMA_NR_MAX_CHANNELS  8
0110 #define SUN4I_DDMA_NR_MAX_CHANNELS  8
0111 #define SUN4I_DMA_NR_MAX_CHANNELS                   \
0112     (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
0113 #define SUN4I_NDMA_NR_MAX_VCHANS    (29 * 2 - 1)
0114 #define SUN4I_DDMA_NR_MAX_VCHANS    21
0115 #define SUN4I_DMA_NR_MAX_VCHANS                     \
0116     (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
0117 
0118 /* This set of SUN4I_DDMA timing parameters were found experimentally while
0119  * working with the SPI driver and seem to make it behave correctly */
0120 #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
0121     (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) |         \
0122      SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) |             \
0123      SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) |               \
0124      SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
0125 
0126 /*
0127  * Normal DMA supports individual transfers (segments) up to 128k.
0128  * Dedicated DMA supports transfers up to 16M. We can only report
0129  * one size limit, so we have to use the smaller value.
0130  */
0131 #define SUN4I_NDMA_MAX_SEG_SIZE     SZ_128K
0132 #define SUN4I_DDMA_MAX_SEG_SIZE     SZ_16M
0133 #define SUN4I_DMA_MAX_SEG_SIZE      SUN4I_NDMA_MAX_SEG_SIZE
0134 
0135 struct sun4i_dma_pchan {
0136     /* Register base of channel */
0137     void __iomem            *base;
0138     /* vchan currently being serviced */
0139     struct sun4i_dma_vchan      *vchan;
0140     /* Is this a dedicated pchan? */
0141     int             is_dedicated;
0142 };
0143 
0144 struct sun4i_dma_vchan {
0145     struct virt_dma_chan        vc;
0146     struct dma_slave_config     cfg;
0147     struct sun4i_dma_pchan      *pchan;
0148     struct sun4i_dma_promise    *processing;
0149     struct sun4i_dma_contract   *contract;
0150     u8              endpoint;
0151     int             is_dedicated;
0152 };
0153 
0154 struct sun4i_dma_promise {
0155     u32             cfg;
0156     u32             para;
0157     dma_addr_t          src;
0158     dma_addr_t          dst;
0159     size_t              len;
0160     struct list_head        list;
0161 };
0162 
0163 /* A contract is a set of promises */
0164 struct sun4i_dma_contract {
0165     struct virt_dma_desc        vd;
0166     struct list_head        demands;
0167     struct list_head        completed_demands;
0168     bool                is_cyclic : 1;
0169     bool                use_half_int : 1;
0170 };
0171 
0172 struct sun4i_dma_dev {
0173     DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
0174     struct dma_device       slave;
0175     struct sun4i_dma_pchan      *pchans;
0176     struct sun4i_dma_vchan      *vchans;
0177     void __iomem            *base;
0178     struct clk          *clk;
0179     int             irq;
0180     spinlock_t          lock;
0181 };
0182 
0183 static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
0184 {
0185     return container_of(dev, struct sun4i_dma_dev, slave);
0186 }
0187 
0188 static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
0189 {
0190     return container_of(chan, struct sun4i_dma_vchan, vc.chan);
0191 }
0192 
0193 static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
0194 {
0195     return container_of(vd, struct sun4i_dma_contract, vd);
0196 }
0197 
0198 static struct device *chan2dev(struct dma_chan *chan)
0199 {
0200     return &chan->dev->device;
0201 }
0202 
0203 static int convert_burst(u32 maxburst)
0204 {
0205     if (maxburst > 8)
0206         return -EINVAL;
0207 
0208     /* 1 -> 0, 4 -> 1, 8 -> 2 */
0209     return (maxburst >> 2);
0210 }
0211 
0212 static int convert_buswidth(enum dma_slave_buswidth addr_width)
0213 {
0214     if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
0215         return -EINVAL;
0216 
0217     /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
0218     return (addr_width >> 1);
0219 }
0220 
0221 static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
0222 {
0223     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
0224 
0225     vchan_free_chan_resources(&vchan->vc);
0226 }
0227 
0228 static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
0229                           struct sun4i_dma_vchan *vchan)
0230 {
0231     struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
0232     unsigned long flags;
0233     int i, max;
0234 
0235     /*
0236      * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
0237      * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
0238      */
0239     if (vchan->is_dedicated) {
0240         i = SUN4I_NDMA_NR_MAX_CHANNELS;
0241         max = SUN4I_DMA_NR_MAX_CHANNELS;
0242     } else {
0243         i = 0;
0244         max = SUN4I_NDMA_NR_MAX_CHANNELS;
0245     }
0246 
0247     spin_lock_irqsave(&priv->lock, flags);
0248     for_each_clear_bit_from(i, priv->pchans_used, max) {
0249         pchan = &pchans[i];
0250         pchan->vchan = vchan;
0251         set_bit(i, priv->pchans_used);
0252         break;
0253     }
0254     spin_unlock_irqrestore(&priv->lock, flags);
0255 
0256     return pchan;
0257 }
0258 
0259 static void release_pchan(struct sun4i_dma_dev *priv,
0260               struct sun4i_dma_pchan *pchan)
0261 {
0262     unsigned long flags;
0263     int nr = pchan - priv->pchans;
0264 
0265     spin_lock_irqsave(&priv->lock, flags);
0266 
0267     pchan->vchan = NULL;
0268     clear_bit(nr, priv->pchans_used);
0269 
0270     spin_unlock_irqrestore(&priv->lock, flags);
0271 }
0272 
0273 static void configure_pchan(struct sun4i_dma_pchan *pchan,
0274                 struct sun4i_dma_promise *d)
0275 {
0276     /*
0277      * Configure addresses and misc parameters depending on type
0278      * SUN4I_DDMA has an extra field with timing parameters
0279      */
0280     if (pchan->is_dedicated) {
0281         writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
0282         writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
0283         writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
0284         writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
0285         writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
0286     } else {
0287         writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
0288         writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
0289         writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
0290         writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
0291     }
0292 }
0293 
0294 static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
0295                 struct sun4i_dma_pchan *pchan,
0296                 int half, int end)
0297 {
0298     u32 reg;
0299     int pchan_number = pchan - priv->pchans;
0300     unsigned long flags;
0301 
0302     spin_lock_irqsave(&priv->lock, flags);
0303 
0304     reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
0305 
0306     if (half)
0307         reg |= BIT(pchan_number * 2);
0308     else
0309         reg &= ~BIT(pchan_number * 2);
0310 
0311     if (end)
0312         reg |= BIT(pchan_number * 2 + 1);
0313     else
0314         reg &= ~BIT(pchan_number * 2 + 1);
0315 
0316     writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
0317 
0318     spin_unlock_irqrestore(&priv->lock, flags);
0319 }
0320 
0321 /*
0322  * Execute pending operations on a vchan
0323  *
0324  * When given a vchan, this function will try to acquire a suitable
0325  * pchan and, if successful, will configure it to fulfill a promise
0326  * from the next pending contract.
0327  *
0328  * This function must be called with &vchan->vc.lock held.
0329  */
0330 static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
0331                    struct sun4i_dma_vchan *vchan)
0332 {
0333     struct sun4i_dma_promise *promise = NULL;
0334     struct sun4i_dma_contract *contract = NULL;
0335     struct sun4i_dma_pchan *pchan;
0336     struct virt_dma_desc *vd;
0337     int ret;
0338 
0339     lockdep_assert_held(&vchan->vc.lock);
0340 
0341     /* We need a pchan to do anything, so secure one if available */
0342     pchan = find_and_use_pchan(priv, vchan);
0343     if (!pchan)
0344         return -EBUSY;
0345 
0346     /*
0347      * Channel endpoints must not be repeated, so if this vchan
0348      * has already submitted some work, we can't do anything else
0349      */
0350     if (vchan->processing) {
0351         dev_dbg(chan2dev(&vchan->vc.chan),
0352             "processing something to this endpoint already\n");
0353         ret = -EBUSY;
0354         goto release_pchan;
0355     }
0356 
0357     do {
0358         /* Figure out which contract we're working with today */
0359         vd = vchan_next_desc(&vchan->vc);
0360         if (!vd) {
0361             dev_dbg(chan2dev(&vchan->vc.chan),
0362                 "No pending contract found");
0363             ret = 0;
0364             goto release_pchan;
0365         }
0366 
0367         contract = to_sun4i_dma_contract(vd);
0368         if (list_empty(&contract->demands)) {
0369             /* The contract has been completed so mark it as such */
0370             list_del(&contract->vd.node);
0371             vchan_cookie_complete(&contract->vd);
0372             dev_dbg(chan2dev(&vchan->vc.chan),
0373                 "Empty contract found and marked complete");
0374         }
0375     } while (list_empty(&contract->demands));
0376 
0377     /* Now find out what we need to do */
0378     promise = list_first_entry(&contract->demands,
0379                    struct sun4i_dma_promise, list);
0380     vchan->processing = promise;
0381 
0382     /* ... and make it reality */
0383     if (promise) {
0384         vchan->contract = contract;
0385         vchan->pchan = pchan;
0386         set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
0387         configure_pchan(pchan, promise);
0388     }
0389 
0390     return 0;
0391 
0392 release_pchan:
0393     release_pchan(priv, pchan);
0394     return ret;
0395 }
0396 
0397 static int sanitize_config(struct dma_slave_config *sconfig,
0398                enum dma_transfer_direction direction)
0399 {
0400     switch (direction) {
0401     case DMA_MEM_TO_DEV:
0402         if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
0403             !sconfig->dst_maxburst)
0404             return -EINVAL;
0405 
0406         if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
0407             sconfig->src_addr_width = sconfig->dst_addr_width;
0408 
0409         if (!sconfig->src_maxburst)
0410             sconfig->src_maxburst = sconfig->dst_maxburst;
0411 
0412         break;
0413 
0414     case DMA_DEV_TO_MEM:
0415         if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
0416             !sconfig->src_maxburst)
0417             return -EINVAL;
0418 
0419         if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
0420             sconfig->dst_addr_width = sconfig->src_addr_width;
0421 
0422         if (!sconfig->dst_maxburst)
0423             sconfig->dst_maxburst = sconfig->src_maxburst;
0424 
0425         break;
0426     default:
0427         return 0;
0428     }
0429 
0430     return 0;
0431 }
0432 
0433 /*
0434  * Generate a promise, to be used in a normal DMA contract.
0435  *
0436  * A NDMA promise contains all the information required to program the
0437  * normal part of the DMA Engine and get data copied. A non-executed
0438  * promise will live in the demands list on a contract. Once it has been
0439  * completed, it will be moved to the completed demands list for later freeing.
0440  * All linked promises will be freed when the corresponding contract is freed
0441  */
0442 static struct sun4i_dma_promise *
0443 generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
0444               size_t len, struct dma_slave_config *sconfig,
0445               enum dma_transfer_direction direction)
0446 {
0447     struct sun4i_dma_promise *promise;
0448     int ret;
0449 
0450     ret = sanitize_config(sconfig, direction);
0451     if (ret)
0452         return NULL;
0453 
0454     promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
0455     if (!promise)
0456         return NULL;
0457 
0458     promise->src = src;
0459     promise->dst = dest;
0460     promise->len = len;
0461     promise->cfg = SUN4I_DMA_CFG_LOADING |
0462         SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
0463 
0464     dev_dbg(chan2dev(chan),
0465         "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
0466         sconfig->src_maxburst, sconfig->dst_maxburst,
0467         sconfig->src_addr_width, sconfig->dst_addr_width);
0468 
0469     /* Source burst */
0470     ret = convert_burst(sconfig->src_maxburst);
0471     if (ret < 0)
0472         goto fail;
0473     promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
0474 
0475     /* Destination burst */
0476     ret = convert_burst(sconfig->dst_maxburst);
0477     if (ret < 0)
0478         goto fail;
0479     promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
0480 
0481     /* Source bus width */
0482     ret = convert_buswidth(sconfig->src_addr_width);
0483     if (ret < 0)
0484         goto fail;
0485     promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
0486 
0487     /* Destination bus width */
0488     ret = convert_buswidth(sconfig->dst_addr_width);
0489     if (ret < 0)
0490         goto fail;
0491     promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
0492 
0493     return promise;
0494 
0495 fail:
0496     kfree(promise);
0497     return NULL;
0498 }
0499 
0500 /*
0501  * Generate a promise, to be used in a dedicated DMA contract.
0502  *
0503  * A DDMA promise contains all the information required to program the
0504  * Dedicated part of the DMA Engine and get data copied. A non-executed
0505  * promise will live in the demands list on a contract. Once it has been
0506  * completed, it will be moved to the completed demands list for later freeing.
0507  * All linked promises will be freed when the corresponding contract is freed
0508  */
0509 static struct sun4i_dma_promise *
0510 generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
0511               size_t len, struct dma_slave_config *sconfig)
0512 {
0513     struct sun4i_dma_promise *promise;
0514     int ret;
0515 
0516     promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
0517     if (!promise)
0518         return NULL;
0519 
0520     promise->src = src;
0521     promise->dst = dest;
0522     promise->len = len;
0523     promise->cfg = SUN4I_DMA_CFG_LOADING |
0524         SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
0525 
0526     /* Source burst */
0527     ret = convert_burst(sconfig->src_maxburst);
0528     if (ret < 0)
0529         goto fail;
0530     promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
0531 
0532     /* Destination burst */
0533     ret = convert_burst(sconfig->dst_maxburst);
0534     if (ret < 0)
0535         goto fail;
0536     promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
0537 
0538     /* Source bus width */
0539     ret = convert_buswidth(sconfig->src_addr_width);
0540     if (ret < 0)
0541         goto fail;
0542     promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
0543 
0544     /* Destination bus width */
0545     ret = convert_buswidth(sconfig->dst_addr_width);
0546     if (ret < 0)
0547         goto fail;
0548     promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
0549 
0550     return promise;
0551 
0552 fail:
0553     kfree(promise);
0554     return NULL;
0555 }
0556 
0557 /*
0558  * Generate a contract
0559  *
0560  * Contracts function as DMA descriptors. As our hardware does not support
0561  * linked lists, we need to implement SG via software. We use a contract
0562  * to hold all the pieces of the request and process them serially one
0563  * after another. Each piece is represented as a promise.
0564  */
0565 static struct sun4i_dma_contract *generate_dma_contract(void)
0566 {
0567     struct sun4i_dma_contract *contract;
0568 
0569     contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
0570     if (!contract)
0571         return NULL;
0572 
0573     INIT_LIST_HEAD(&contract->demands);
0574     INIT_LIST_HEAD(&contract->completed_demands);
0575 
0576     return contract;
0577 }
0578 
0579 /*
0580  * Get next promise on a cyclic transfer
0581  *
0582  * Cyclic contracts contain a series of promises which are executed on a
0583  * loop. This function returns the next promise from a cyclic contract,
0584  * so it can be programmed into the hardware.
0585  */
0586 static struct sun4i_dma_promise *
0587 get_next_cyclic_promise(struct sun4i_dma_contract *contract)
0588 {
0589     struct sun4i_dma_promise *promise;
0590 
0591     promise = list_first_entry_or_null(&contract->demands,
0592                        struct sun4i_dma_promise, list);
0593     if (!promise) {
0594         list_splice_init(&contract->completed_demands,
0595                  &contract->demands);
0596         promise = list_first_entry(&contract->demands,
0597                        struct sun4i_dma_promise, list);
0598     }
0599 
0600     return promise;
0601 }
0602 
0603 /*
0604  * Free a contract and all its associated promises
0605  */
0606 static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
0607 {
0608     struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
0609     struct sun4i_dma_promise *promise, *tmp;
0610 
0611     /* Free all the demands and completed demands */
0612     list_for_each_entry_safe(promise, tmp, &contract->demands, list)
0613         kfree(promise);
0614 
0615     list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
0616         kfree(promise);
0617 
0618     kfree(contract);
0619 }
0620 
0621 static struct dma_async_tx_descriptor *
0622 sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
0623               dma_addr_t src, size_t len, unsigned long flags)
0624 {
0625     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
0626     struct dma_slave_config *sconfig = &vchan->cfg;
0627     struct sun4i_dma_promise *promise;
0628     struct sun4i_dma_contract *contract;
0629 
0630     contract = generate_dma_contract();
0631     if (!contract)
0632         return NULL;
0633 
0634     /*
0635      * We can only do the copy to bus aligned addresses, so
0636      * choose the best one so we get decent performance. We also
0637      * maximize the burst size for this same reason.
0638      */
0639     sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0640     sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0641     sconfig->src_maxburst = 8;
0642     sconfig->dst_maxburst = 8;
0643 
0644     if (vchan->is_dedicated)
0645         promise = generate_ddma_promise(chan, src, dest, len, sconfig);
0646     else
0647         promise = generate_ndma_promise(chan, src, dest, len, sconfig,
0648                         DMA_MEM_TO_MEM);
0649 
0650     if (!promise) {
0651         kfree(contract);
0652         return NULL;
0653     }
0654 
0655     /* Configure memcpy mode */
0656     if (vchan->is_dedicated) {
0657         promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
0658                 SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
0659     } else {
0660         promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
0661                 SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
0662     }
0663 
0664     /* Fill the contract with our only promise */
0665     list_add_tail(&promise->list, &contract->demands);
0666 
0667     /* And add it to the vchan */
0668     return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
0669 }
0670 
0671 static struct dma_async_tx_descriptor *
0672 sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
0673               size_t period_len, enum dma_transfer_direction dir,
0674               unsigned long flags)
0675 {
0676     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
0677     struct dma_slave_config *sconfig = &vchan->cfg;
0678     struct sun4i_dma_promise *promise;
0679     struct sun4i_dma_contract *contract;
0680     dma_addr_t src, dest;
0681     u32 endpoints;
0682     int nr_periods, offset, plength, i;
0683     u8 ram_type, io_mode, linear_mode;
0684 
0685     if (!is_slave_direction(dir)) {
0686         dev_err(chan2dev(chan), "Invalid DMA direction\n");
0687         return NULL;
0688     }
0689 
0690     contract = generate_dma_contract();
0691     if (!contract)
0692         return NULL;
0693 
0694     contract->is_cyclic = 1;
0695 
0696     if (vchan->is_dedicated) {
0697         io_mode = SUN4I_DDMA_ADDR_MODE_IO;
0698         linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
0699         ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
0700     } else {
0701         io_mode = SUN4I_NDMA_ADDR_MODE_IO;
0702         linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
0703         ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
0704     }
0705 
0706     if (dir == DMA_MEM_TO_DEV) {
0707         src = buf;
0708         dest = sconfig->dst_addr;
0709         endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
0710                 SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
0711                 SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
0712                 SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
0713     } else {
0714         src = sconfig->src_addr;
0715         dest = buf;
0716         endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
0717                 SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
0718                 SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
0719                 SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
0720     }
0721 
0722     /*
0723      * We will be using half done interrupts to make two periods
0724      * out of a promise, so we need to program the DMA engine less
0725      * often
0726      */
0727 
0728     /*
0729      * The engine can interrupt on half-transfer, so we can use
0730      * this feature to program the engine half as often as if we
0731      * didn't use it (keep in mind the hardware doesn't support
0732      * linked lists).
0733      *
0734      * Say you have a set of periods (| marks the start/end, I for
0735      * interrupt, P for programming the engine to do a new
0736      * transfer), the easy but slow way would be to do
0737      *
0738      *  |---|---|---|---| (periods / promises)
0739      *  P  I,P I,P I,P  I
0740      *
0741      * Using half transfer interrupts you can do
0742      *
0743      *  |-------|-------| (promises as configured on hw)
0744      *  |---|---|---|---| (periods)
0745      *  P   I  I,P  I   I
0746      *
0747      * Which requires half the engine programming for the same
0748      * functionality.
0749      *
0750      * This only works if two periods fit in a single promise. That will
0751      * always be the case for dedicated DMA, where the hardware has a much
0752      * larger maximum transfer size than advertised to clients.
0753      */
0754     if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
0755         period_len *= 2;
0756         contract->use_half_int = 1;
0757     }
0758 
0759     nr_periods = DIV_ROUND_UP(len, period_len);
0760     for (i = 0; i < nr_periods; i++) {
0761         /* Calculate the offset in the buffer and the length needed */
0762         offset = i * period_len;
0763         plength = min((len - offset), period_len);
0764         if (dir == DMA_MEM_TO_DEV)
0765             src = buf + offset;
0766         else
0767             dest = buf + offset;
0768 
0769         /* Make the promise */
0770         if (vchan->is_dedicated)
0771             promise = generate_ddma_promise(chan, src, dest,
0772                             plength, sconfig);
0773         else
0774             promise = generate_ndma_promise(chan, src, dest,
0775                             plength, sconfig, dir);
0776 
0777         if (!promise) {
0778             /* TODO: should we free everything? */
0779             return NULL;
0780         }
0781         promise->cfg |= endpoints;
0782 
0783         /* Then add it to the contract */
0784         list_add_tail(&promise->list, &contract->demands);
0785     }
0786 
0787     /* And add it to the vchan */
0788     return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
0789 }
0790 
0791 static struct dma_async_tx_descriptor *
0792 sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
0793             unsigned int sg_len, enum dma_transfer_direction dir,
0794             unsigned long flags, void *context)
0795 {
0796     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
0797     struct dma_slave_config *sconfig = &vchan->cfg;
0798     struct sun4i_dma_promise *promise;
0799     struct sun4i_dma_contract *contract;
0800     u8 ram_type, io_mode, linear_mode;
0801     struct scatterlist *sg;
0802     dma_addr_t srcaddr, dstaddr;
0803     u32 endpoints, para;
0804     int i;
0805 
0806     if (!sgl)
0807         return NULL;
0808 
0809     if (!is_slave_direction(dir)) {
0810         dev_err(chan2dev(chan), "Invalid DMA direction\n");
0811         return NULL;
0812     }
0813 
0814     contract = generate_dma_contract();
0815     if (!contract)
0816         return NULL;
0817 
0818     if (vchan->is_dedicated) {
0819         io_mode = SUN4I_DDMA_ADDR_MODE_IO;
0820         linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
0821         ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
0822     } else {
0823         io_mode = SUN4I_NDMA_ADDR_MODE_IO;
0824         linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
0825         ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
0826     }
0827 
0828     if (dir == DMA_MEM_TO_DEV)
0829         endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
0830                 SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
0831                 SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
0832                 SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
0833     else
0834         endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
0835                 SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
0836                 SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
0837                 SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
0838 
0839     for_each_sg(sgl, sg, sg_len, i) {
0840         /* Figure out addresses */
0841         if (dir == DMA_MEM_TO_DEV) {
0842             srcaddr = sg_dma_address(sg);
0843             dstaddr = sconfig->dst_addr;
0844         } else {
0845             srcaddr = sconfig->src_addr;
0846             dstaddr = sg_dma_address(sg);
0847         }
0848 
0849         /*
0850          * These are the magic DMA engine timings that keep SPI going.
0851          * I haven't seen any interface on DMAEngine to configure
0852          * timings, and so far they seem to work for everything we
0853          * support, so I've kept them here. I don't know if other
0854          * devices need different timings because, as usual, we only
0855          * have the "para" bitfield meanings, but no comment on what
0856          * the values should be when doing a certain operation :|
0857          */
0858         para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
0859 
0860         /* And make a suitable promise */
0861         if (vchan->is_dedicated)
0862             promise = generate_ddma_promise(chan, srcaddr, dstaddr,
0863                             sg_dma_len(sg),
0864                             sconfig);
0865         else
0866             promise = generate_ndma_promise(chan, srcaddr, dstaddr,
0867                             sg_dma_len(sg),
0868                             sconfig, dir);
0869 
0870         if (!promise)
0871             return NULL; /* TODO: should we free everything? */
0872 
0873         promise->cfg |= endpoints;
0874         promise->para = para;
0875 
0876         /* Then add it to the contract */
0877         list_add_tail(&promise->list, &contract->demands);
0878     }
0879 
0880     /*
0881      * Once we've got all the promises ready, add the contract
0882      * to the pending list on the vchan
0883      */
0884     return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
0885 }
0886 
0887 static int sun4i_dma_terminate_all(struct dma_chan *chan)
0888 {
0889     struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
0890     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
0891     struct sun4i_dma_pchan *pchan = vchan->pchan;
0892     LIST_HEAD(head);
0893     unsigned long flags;
0894 
0895     spin_lock_irqsave(&vchan->vc.lock, flags);
0896     vchan_get_all_descriptors(&vchan->vc, &head);
0897     spin_unlock_irqrestore(&vchan->vc.lock, flags);
0898 
0899     /*
0900      * Clearing the configuration register will halt the pchan. Interrupts
0901      * may still trigger, so don't forget to disable them.
0902      */
0903     if (pchan) {
0904         if (pchan->is_dedicated)
0905             writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
0906         else
0907             writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
0908         set_pchan_interrupt(priv, pchan, 0, 0);
0909         release_pchan(priv, pchan);
0910     }
0911 
0912     spin_lock_irqsave(&vchan->vc.lock, flags);
0913     /* Clear these so the vchan is usable again */
0914     vchan->processing = NULL;
0915     vchan->pchan = NULL;
0916     spin_unlock_irqrestore(&vchan->vc.lock, flags);
0917 
0918     vchan_dma_desc_free_list(&vchan->vc, &head);
0919 
0920     return 0;
0921 }
0922 
0923 static int sun4i_dma_config(struct dma_chan *chan,
0924                 struct dma_slave_config *config)
0925 {
0926     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
0927 
0928     memcpy(&vchan->cfg, config, sizeof(*config));
0929 
0930     return 0;
0931 }
0932 
0933 static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
0934                        struct of_dma *ofdma)
0935 {
0936     struct sun4i_dma_dev *priv = ofdma->of_dma_data;
0937     struct sun4i_dma_vchan *vchan;
0938     struct dma_chan *chan;
0939     u8 is_dedicated = dma_spec->args[0];
0940     u8 endpoint = dma_spec->args[1];
0941 
0942     /* Check if type is Normal or Dedicated */
0943     if (is_dedicated != 0 && is_dedicated != 1)
0944         return NULL;
0945 
0946     /* Make sure the endpoint looks sane */
0947     if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
0948         (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
0949         return NULL;
0950 
0951     chan = dma_get_any_slave_channel(&priv->slave);
0952     if (!chan)
0953         return NULL;
0954 
0955     /* Assign the endpoint to the vchan */
0956     vchan = to_sun4i_dma_vchan(chan);
0957     vchan->is_dedicated = is_dedicated;
0958     vchan->endpoint = endpoint;
0959 
0960     return chan;
0961 }
0962 
0963 static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
0964                        dma_cookie_t cookie,
0965                        struct dma_tx_state *state)
0966 {
0967     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
0968     struct sun4i_dma_pchan *pchan = vchan->pchan;
0969     struct sun4i_dma_contract *contract;
0970     struct sun4i_dma_promise *promise;
0971     struct virt_dma_desc *vd;
0972     unsigned long flags;
0973     enum dma_status ret;
0974     size_t bytes = 0;
0975 
0976     ret = dma_cookie_status(chan, cookie, state);
0977     if (!state || (ret == DMA_COMPLETE))
0978         return ret;
0979 
0980     spin_lock_irqsave(&vchan->vc.lock, flags);
0981     vd = vchan_find_desc(&vchan->vc, cookie);
0982     if (!vd)
0983         goto exit;
0984     contract = to_sun4i_dma_contract(vd);
0985 
0986     list_for_each_entry(promise, &contract->demands, list)
0987         bytes += promise->len;
0988 
0989     /*
0990      * The hardware is configured to return the remaining byte
0991      * quantity. If possible, replace the first listed element's
0992      * full size with the actual remaining amount
0993      */
0994     promise = list_first_entry_or_null(&contract->demands,
0995                        struct sun4i_dma_promise, list);
0996     if (promise && pchan) {
0997         bytes -= promise->len;
0998         if (pchan->is_dedicated)
0999             bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
1000         else
1001             bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
1002     }
1003 
1004 exit:
1005 
1006     dma_set_residue(state, bytes);
1007     spin_unlock_irqrestore(&vchan->vc.lock, flags);
1008 
1009     return ret;
1010 }
1011 
1012 static void sun4i_dma_issue_pending(struct dma_chan *chan)
1013 {
1014     struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
1015     struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
1016     unsigned long flags;
1017 
1018     spin_lock_irqsave(&vchan->vc.lock, flags);
1019 
1020     /*
1021      * If there are pending transactions for this vchan, push one of
1022      * them into the engine to get the ball rolling.
1023      */
1024     if (vchan_issue_pending(&vchan->vc))
1025         __execute_vchan_pending(priv, vchan);
1026 
1027     spin_unlock_irqrestore(&vchan->vc.lock, flags);
1028 }
1029 
1030 static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
1031 {
1032     struct sun4i_dma_dev *priv = dev_id;
1033     struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
1034     struct sun4i_dma_vchan *vchan;
1035     struct sun4i_dma_contract *contract;
1036     struct sun4i_dma_promise *promise;
1037     unsigned long pendirq, irqs, disableirqs;
1038     int bit, i, free_room, allow_mitigation = 1;
1039 
1040     pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1041 
1042 handle_pending:
1043 
1044     disableirqs = 0;
1045     free_room = 0;
1046 
1047     for_each_set_bit(bit, &pendirq, 32) {
1048         pchan = &pchans[bit >> 1];
1049         vchan = pchan->vchan;
1050         if (!vchan) /* a terminated channel may still interrupt */
1051             continue;
1052         contract = vchan->contract;
1053 
1054         /*
1055          * Disable the IRQ and free the pchan if it's an end
1056          * interrupt (odd bit)
1057          */
1058         if (bit & 1) {
1059             spin_lock(&vchan->vc.lock);
1060 
1061             /*
1062              * Move the promise into the completed list now that
1063              * we're done with it
1064              */
1065             list_move_tail(&vchan->processing->list,
1066                        &contract->completed_demands);
1067 
1068             /*
1069              * Cyclic DMA transfers are special:
1070              * - There's always something we can dispatch
1071              * - We need to run the callback
1072              * - Latency is very important, as this is used by audio
1073              * We therefore just cycle through the list and dispatch
1074              * whatever we have here, reusing the pchan. There's
1075              * no need to run the thread after this.
1076              *
1077              * For non-cyclic transfers we need to look around,
1078              * so we can program some more work, or notify the
1079              * client that their transfers have been completed.
1080              */
1081             if (contract->is_cyclic) {
1082                 promise = get_next_cyclic_promise(contract);
1083                 vchan->processing = promise;
1084                 configure_pchan(pchan, promise);
1085                 vchan_cyclic_callback(&contract->vd);
1086             } else {
1087                 vchan->processing = NULL;
1088                 vchan->pchan = NULL;
1089 
1090                 free_room = 1;
1091                 disableirqs |= BIT(bit);
1092                 release_pchan(priv, pchan);
1093             }
1094 
1095             spin_unlock(&vchan->vc.lock);
1096         } else {
1097             /* Half done interrupt */
1098             if (contract->is_cyclic)
1099                 vchan_cyclic_callback(&contract->vd);
1100             else
1101                 disableirqs |= BIT(bit);
1102         }
1103     }
1104 
1105     /* Disable the IRQs for events we handled */
1106     spin_lock(&priv->lock);
1107     irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1108     writel_relaxed(irqs & ~disableirqs,
1109                priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1110     spin_unlock(&priv->lock);
1111 
1112     /* Writing 1 to the pending field will clear the pending interrupt */
1113     writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1114 
1115     /*
1116      * If a pchan was freed, we may be able to schedule something else,
1117      * so have a look around
1118      */
1119     if (free_room) {
1120         for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1121             vchan = &priv->vchans[i];
1122             spin_lock(&vchan->vc.lock);
1123             __execute_vchan_pending(priv, vchan);
1124             spin_unlock(&vchan->vc.lock);
1125         }
1126     }
1127 
1128     /*
1129      * Handle newer interrupts if some showed up, but only do it once
1130      * to avoid a too long a loop
1131      */
1132     if (allow_mitigation) {
1133         pendirq = readl_relaxed(priv->base +
1134                     SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1135         if (pendirq) {
1136             allow_mitigation = 0;
1137             goto handle_pending;
1138         }
1139     }
1140 
1141     return IRQ_HANDLED;
1142 }
1143 
1144 static int sun4i_dma_probe(struct platform_device *pdev)
1145 {
1146     struct sun4i_dma_dev *priv;
1147     struct resource *res;
1148     int i, j, ret;
1149 
1150     priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1151     if (!priv)
1152         return -ENOMEM;
1153 
1154     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1155     priv->base = devm_ioremap_resource(&pdev->dev, res);
1156     if (IS_ERR(priv->base))
1157         return PTR_ERR(priv->base);
1158 
1159     priv->irq = platform_get_irq(pdev, 0);
1160     if (priv->irq < 0)
1161         return priv->irq;
1162 
1163     priv->clk = devm_clk_get(&pdev->dev, NULL);
1164     if (IS_ERR(priv->clk)) {
1165         dev_err(&pdev->dev, "No clock specified\n");
1166         return PTR_ERR(priv->clk);
1167     }
1168 
1169     platform_set_drvdata(pdev, priv);
1170     spin_lock_init(&priv->lock);
1171 
1172     dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
1173 
1174     dma_cap_zero(priv->slave.cap_mask);
1175     dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
1176     dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
1177     dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
1178     dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
1179 
1180     INIT_LIST_HEAD(&priv->slave.channels);
1181     priv->slave.device_free_chan_resources  = sun4i_dma_free_chan_resources;
1182     priv->slave.device_tx_status        = sun4i_dma_tx_status;
1183     priv->slave.device_issue_pending    = sun4i_dma_issue_pending;
1184     priv->slave.device_prep_slave_sg    = sun4i_dma_prep_slave_sg;
1185     priv->slave.device_prep_dma_memcpy  = sun4i_dma_prep_dma_memcpy;
1186     priv->slave.device_prep_dma_cyclic  = sun4i_dma_prep_dma_cyclic;
1187     priv->slave.device_config       = sun4i_dma_config;
1188     priv->slave.device_terminate_all    = sun4i_dma_terminate_all;
1189     priv->slave.copy_align          = 2;
1190     priv->slave.src_addr_widths     = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1191                           BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1192                           BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1193     priv->slave.dst_addr_widths     = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1194                           BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1195                           BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1196     priv->slave.directions          = BIT(DMA_DEV_TO_MEM) |
1197                           BIT(DMA_MEM_TO_DEV);
1198     priv->slave.residue_granularity     = DMA_RESIDUE_GRANULARITY_BURST;
1199 
1200     priv->slave.dev = &pdev->dev;
1201 
1202     priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
1203                     sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
1204     priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
1205                     sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
1206     if (!priv->vchans || !priv->pchans)
1207         return -ENOMEM;
1208 
1209     /*
1210      * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
1211      * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
1212      * dedicated ones
1213      */
1214     for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
1215         priv->pchans[i].base = priv->base +
1216             SUN4I_NDMA_CHANNEL_REG_BASE(i);
1217 
1218     for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
1219         priv->pchans[i].base = priv->base +
1220             SUN4I_DDMA_CHANNEL_REG_BASE(j);
1221         priv->pchans[i].is_dedicated = 1;
1222     }
1223 
1224     for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1225         struct sun4i_dma_vchan *vchan = &priv->vchans[i];
1226 
1227         spin_lock_init(&vchan->vc.lock);
1228         vchan->vc.desc_free = sun4i_dma_free_contract;
1229         vchan_init(&vchan->vc, &priv->slave);
1230     }
1231 
1232     ret = clk_prepare_enable(priv->clk);
1233     if (ret) {
1234         dev_err(&pdev->dev, "Couldn't enable the clock\n");
1235         return ret;
1236     }
1237 
1238     /*
1239      * Make sure the IRQs are all disabled and accounted for. The bootloader
1240      * likes to leave these dirty
1241      */
1242     writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1243     writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1244 
1245     ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
1246                    0, dev_name(&pdev->dev), priv);
1247     if (ret) {
1248         dev_err(&pdev->dev, "Cannot request IRQ\n");
1249         goto err_clk_disable;
1250     }
1251 
1252     ret = dma_async_device_register(&priv->slave);
1253     if (ret) {
1254         dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
1255         goto err_clk_disable;
1256     }
1257 
1258     ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
1259                      priv);
1260     if (ret) {
1261         dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1262         goto err_dma_unregister;
1263     }
1264 
1265     dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
1266 
1267     return 0;
1268 
1269 err_dma_unregister:
1270     dma_async_device_unregister(&priv->slave);
1271 err_clk_disable:
1272     clk_disable_unprepare(priv->clk);
1273     return ret;
1274 }
1275 
1276 static int sun4i_dma_remove(struct platform_device *pdev)
1277 {
1278     struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
1279 
1280     /* Disable IRQ so no more work is scheduled */
1281     disable_irq(priv->irq);
1282 
1283     of_dma_controller_free(pdev->dev.of_node);
1284     dma_async_device_unregister(&priv->slave);
1285 
1286     clk_disable_unprepare(priv->clk);
1287 
1288     return 0;
1289 }
1290 
1291 static const struct of_device_id sun4i_dma_match[] = {
1292     { .compatible = "allwinner,sun4i-a10-dma" },
1293     { /* sentinel */ },
1294 };
1295 MODULE_DEVICE_TABLE(of, sun4i_dma_match);
1296 
1297 static struct platform_driver sun4i_dma_driver = {
1298     .probe  = sun4i_dma_probe,
1299     .remove = sun4i_dma_remove,
1300     .driver = {
1301         .name       = "sun4i-dma",
1302         .of_match_table = sun4i_dma_match,
1303     },
1304 };
1305 
1306 module_platform_driver(sun4i_dma_driver);
1307 
1308 MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
1309 MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
1310 MODULE_LICENSE("GPL");