Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 //
0003 // Actions Semi Owl SoCs DMA driver
0004 //
0005 // Copyright (c) 2014 Actions Semi Inc.
0006 // Author: David Liu <liuwei@actions-semi.com>
0007 //
0008 // Copyright (c) 2018 Linaro Ltd.
0009 // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
0010 
0011 #include <linux/bitops.h>
0012 #include <linux/clk.h>
0013 #include <linux/delay.h>
0014 #include <linux/dmaengine.h>
0015 #include <linux/dma-mapping.h>
0016 #include <linux/dmapool.h>
0017 #include <linux/err.h>
0018 #include <linux/init.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/io.h>
0021 #include <linux/mm.h>
0022 #include <linux/module.h>
0023 #include <linux/of_device.h>
0024 #include <linux/of_dma.h>
0025 #include <linux/slab.h>
0026 #include "virt-dma.h"
0027 
0028 #define OWL_DMA_FRAME_MAX_LENGTH        0xfffff
0029 
0030 /* Global DMA Controller Registers */
0031 #define OWL_DMA_IRQ_PD0             0x00
0032 #define OWL_DMA_IRQ_PD1             0x04
0033 #define OWL_DMA_IRQ_PD2             0x08
0034 #define OWL_DMA_IRQ_PD3             0x0C
0035 #define OWL_DMA_IRQ_EN0             0x10
0036 #define OWL_DMA_IRQ_EN1             0x14
0037 #define OWL_DMA_IRQ_EN2             0x18
0038 #define OWL_DMA_IRQ_EN3             0x1C
0039 #define OWL_DMA_SECURE_ACCESS_CTL       0x20
0040 #define OWL_DMA_NIC_QOS             0x24
0041 #define OWL_DMA_DBGSEL              0x28
0042 #define OWL_DMA_IDLE_STAT           0x2C
0043 
0044 /* Channel Registers */
0045 #define OWL_DMA_CHAN_BASE(i)            (0x100 + (i) * 0x100)
0046 #define OWL_DMAX_MODE               0x00
0047 #define OWL_DMAX_SOURCE             0x04
0048 #define OWL_DMAX_DESTINATION            0x08
0049 #define OWL_DMAX_FRAME_LEN          0x0C
0050 #define OWL_DMAX_FRAME_CNT          0x10
0051 #define OWL_DMAX_REMAIN_FRAME_CNT       0x14
0052 #define OWL_DMAX_REMAIN_CNT         0x18
0053 #define OWL_DMAX_SOURCE_STRIDE          0x1C
0054 #define OWL_DMAX_DESTINATION_STRIDE     0x20
0055 #define OWL_DMAX_START              0x24
0056 #define OWL_DMAX_PAUSE              0x28
0057 #define OWL_DMAX_CHAINED_CTL            0x2C
0058 #define OWL_DMAX_CONSTANT           0x30
0059 #define OWL_DMAX_LINKLIST_CTL           0x34
0060 #define OWL_DMAX_NEXT_DESCRIPTOR        0x38
0061 #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM     0x3C
0062 #define OWL_DMAX_INT_CTL            0x40
0063 #define OWL_DMAX_INT_STATUS         0x44
0064 #define OWL_DMAX_CURRENT_SOURCE_POINTER     0x48
0065 #define OWL_DMAX_CURRENT_DESTINATION_POINTER    0x4C
0066 
0067 /* OWL_DMAX_MODE Bits */
0068 #define OWL_DMA_MODE_TS(x)          (((x) & GENMASK(5, 0)) << 0)
0069 #define OWL_DMA_MODE_ST(x)          (((x) & GENMASK(1, 0)) << 8)
0070 #define OWL_DMA_MODE_ST_DEV         OWL_DMA_MODE_ST(0)
0071 #define OWL_DMA_MODE_ST_DCU         OWL_DMA_MODE_ST(2)
0072 #define OWL_DMA_MODE_ST_SRAM            OWL_DMA_MODE_ST(3)
0073 #define OWL_DMA_MODE_DT(x)          (((x) & GENMASK(1, 0)) << 10)
0074 #define OWL_DMA_MODE_DT_DEV         OWL_DMA_MODE_DT(0)
0075 #define OWL_DMA_MODE_DT_DCU         OWL_DMA_MODE_DT(2)
0076 #define OWL_DMA_MODE_DT_SRAM            OWL_DMA_MODE_DT(3)
0077 #define OWL_DMA_MODE_SAM(x)         (((x) & GENMASK(1, 0)) << 16)
0078 #define OWL_DMA_MODE_SAM_CONST          OWL_DMA_MODE_SAM(0)
0079 #define OWL_DMA_MODE_SAM_INC            OWL_DMA_MODE_SAM(1)
0080 #define OWL_DMA_MODE_SAM_STRIDE         OWL_DMA_MODE_SAM(2)
0081 #define OWL_DMA_MODE_DAM(x)         (((x) & GENMASK(1, 0)) << 18)
0082 #define OWL_DMA_MODE_DAM_CONST          OWL_DMA_MODE_DAM(0)
0083 #define OWL_DMA_MODE_DAM_INC            OWL_DMA_MODE_DAM(1)
0084 #define OWL_DMA_MODE_DAM_STRIDE         OWL_DMA_MODE_DAM(2)
0085 #define OWL_DMA_MODE_PW(x)          (((x) & GENMASK(2, 0)) << 20)
0086 #define OWL_DMA_MODE_CB             BIT(23)
0087 #define OWL_DMA_MODE_NDDBW(x)           (((x) & 0x1) << 28)
0088 #define OWL_DMA_MODE_NDDBW_32BIT        OWL_DMA_MODE_NDDBW(0)
0089 #define OWL_DMA_MODE_NDDBW_8BIT         OWL_DMA_MODE_NDDBW(1)
0090 #define OWL_DMA_MODE_CFE            BIT(29)
0091 #define OWL_DMA_MODE_LME            BIT(30)
0092 #define OWL_DMA_MODE_CME            BIT(31)
0093 
0094 /* OWL_DMAX_LINKLIST_CTL Bits */
0095 #define OWL_DMA_LLC_SAV(x)          (((x) & GENMASK(1, 0)) << 8)
0096 #define OWL_DMA_LLC_SAV_INC         OWL_DMA_LLC_SAV(0)
0097 #define OWL_DMA_LLC_SAV_LOAD_NEXT       OWL_DMA_LLC_SAV(1)
0098 #define OWL_DMA_LLC_SAV_LOAD_PREV       OWL_DMA_LLC_SAV(2)
0099 #define OWL_DMA_LLC_DAV(x)          (((x) & GENMASK(1, 0)) << 10)
0100 #define OWL_DMA_LLC_DAV_INC         OWL_DMA_LLC_DAV(0)
0101 #define OWL_DMA_LLC_DAV_LOAD_NEXT       OWL_DMA_LLC_DAV(1)
0102 #define OWL_DMA_LLC_DAV_LOAD_PREV       OWL_DMA_LLC_DAV(2)
0103 #define OWL_DMA_LLC_SUSPEND         BIT(16)
0104 
0105 /* OWL_DMAX_INT_CTL Bits */
0106 #define OWL_DMA_INTCTL_BLOCK            BIT(0)
0107 #define OWL_DMA_INTCTL_SUPER_BLOCK      BIT(1)
0108 #define OWL_DMA_INTCTL_FRAME            BIT(2)
0109 #define OWL_DMA_INTCTL_HALF_FRAME       BIT(3)
0110 #define OWL_DMA_INTCTL_LAST_FRAME       BIT(4)
0111 
0112 /* OWL_DMAX_INT_STATUS Bits */
0113 #define OWL_DMA_INTSTAT_BLOCK           BIT(0)
0114 #define OWL_DMA_INTSTAT_SUPER_BLOCK     BIT(1)
0115 #define OWL_DMA_INTSTAT_FRAME           BIT(2)
0116 #define OWL_DMA_INTSTAT_HALF_FRAME      BIT(3)
0117 #define OWL_DMA_INTSTAT_LAST_FRAME      BIT(4)
0118 
0119 /* Pack shift and newshift in a single word */
0120 #define BIT_FIELD(val, width, shift, newshift)  \
0121         ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
0122 
0123 /* Frame count value is fixed as 1 */
0124 #define FCNT_VAL                0x1
0125 
0126 /**
0127  * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link
0128  * list for dma transfer
0129  * @OWL_DMADESC_NEXT_LLI: physical address of the next link list
0130  * @OWL_DMADESC_SADDR: source physical address
0131  * @OWL_DMADESC_DADDR: destination physical address
0132  * @OWL_DMADESC_FLEN: frame length
0133  * @OWL_DMADESC_SRC_STRIDE: source stride
0134  * @OWL_DMADESC_DST_STRIDE: destination stride
0135  * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config
0136  * @OWL_DMADESC_CTRLB: interrupt config
0137  * @OWL_DMADESC_CONST_NUM: data for constant fill
0138  * @OWL_DMADESC_SIZE: max size of this enum
0139  */
0140 enum owl_dmadesc_offsets {
0141     OWL_DMADESC_NEXT_LLI = 0,
0142     OWL_DMADESC_SADDR,
0143     OWL_DMADESC_DADDR,
0144     OWL_DMADESC_FLEN,
0145     OWL_DMADESC_SRC_STRIDE,
0146     OWL_DMADESC_DST_STRIDE,
0147     OWL_DMADESC_CTRLA,
0148     OWL_DMADESC_CTRLB,
0149     OWL_DMADESC_CONST_NUM,
0150     OWL_DMADESC_SIZE
0151 };
0152 
0153 enum owl_dma_id {
0154     S900_DMA,
0155     S700_DMA,
0156 };
0157 
0158 /**
0159  * struct owl_dma_lli - Link list for dma transfer
0160  * @hw: hardware link list
0161  * @phys: physical address of hardware link list
0162  * @node: node for txd's lli_list
0163  */
0164 struct owl_dma_lli {
0165     u32         hw[OWL_DMADESC_SIZE];
0166     dma_addr_t      phys;
0167     struct list_head    node;
0168 };
0169 
0170 /**
0171  * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
0172  * @vd: virtual DMA descriptor
0173  * @lli_list: link list of lli nodes
0174  * @cyclic: flag to indicate cyclic transfers
0175  */
0176 struct owl_dma_txd {
0177     struct virt_dma_desc    vd;
0178     struct list_head    lli_list;
0179     bool            cyclic;
0180 };
0181 
0182 /**
0183  * struct owl_dma_pchan - Holder for the physical channels
0184  * @id: physical index to this channel
0185  * @base: virtual memory base for the dma channel
0186  * @vchan: the virtual channel currently being served by this physical channel
0187  */
0188 struct owl_dma_pchan {
0189     u32         id;
0190     void __iomem        *base;
0191     struct owl_dma_vchan    *vchan;
0192 };
0193 
0194 /**
0195  * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
0196  * @vc: wrapped virtual channel
0197  * @pchan: the physical channel utilized by this channel
0198  * @txd: active transaction on this channel
0199  * @cfg: slave configuration for this channel
0200  * @drq: physical DMA request ID for this channel
0201  */
0202 struct owl_dma_vchan {
0203     struct virt_dma_chan    vc;
0204     struct owl_dma_pchan    *pchan;
0205     struct owl_dma_txd  *txd;
0206     struct dma_slave_config cfg;
0207     u8          drq;
0208 };
0209 
0210 /**
0211  * struct owl_dma - Holder for the Owl DMA controller
0212  * @dma: dma engine for this instance
0213  * @base: virtual memory base for the DMA controller
0214  * @clk: clock for the DMA controller
0215  * @lock: a lock to use when change DMA controller global register
0216  * @lli_pool: a pool for the LLI descriptors
0217  * @irq: interrupt ID for the DMA controller
0218  * @nr_pchans: the number of physical channels
0219  * @pchans: array of data for the physical channels
0220  * @nr_vchans: the number of physical channels
0221  * @vchans: array of data for the physical channels
0222  * @devid: device id based on OWL SoC
0223  */
0224 struct owl_dma {
0225     struct dma_device   dma;
0226     void __iomem        *base;
0227     struct clk      *clk;
0228     spinlock_t      lock;
0229     struct dma_pool     *lli_pool;
0230     int         irq;
0231 
0232     unsigned int        nr_pchans;
0233     struct owl_dma_pchan    *pchans;
0234 
0235     unsigned int        nr_vchans;
0236     struct owl_dma_vchan    *vchans;
0237     enum owl_dma_id     devid;
0238 };
0239 
0240 static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
0241              u32 val, bool state)
0242 {
0243     u32 regval;
0244 
0245     regval = readl(pchan->base + reg);
0246 
0247     if (state)
0248         regval |= val;
0249     else
0250         regval &= ~val;
0251 
0252     writel(val, pchan->base + reg);
0253 }
0254 
0255 static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
0256 {
0257     writel(data, pchan->base + reg);
0258 }
0259 
0260 static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg)
0261 {
0262     return readl(pchan->base + reg);
0263 }
0264 
0265 static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
0266 {
0267     u32 regval;
0268 
0269     regval = readl(od->base + reg);
0270 
0271     if (state)
0272         regval |= val;
0273     else
0274         regval &= ~val;
0275 
0276     writel(val, od->base + reg);
0277 }
0278 
0279 static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
0280 {
0281     writel(data, od->base + reg);
0282 }
0283 
0284 static u32 dma_readl(struct owl_dma *od, u32 reg)
0285 {
0286     return readl(od->base + reg);
0287 }
0288 
0289 static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
0290 {
0291     return container_of(dd, struct owl_dma, dma);
0292 }
0293 
0294 static struct device *chan2dev(struct dma_chan *chan)
0295 {
0296     return &chan->dev->device;
0297 }
0298 
0299 static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan)
0300 {
0301     return container_of(chan, struct owl_dma_vchan, vc.chan);
0302 }
0303 
0304 static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx)
0305 {
0306     return container_of(tx, struct owl_dma_txd, vd.tx);
0307 }
0308 
0309 static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl)
0310 {
0311     u32 ctl;
0312 
0313     ctl = BIT_FIELD(mode, 4, 28, 28) |
0314           BIT_FIELD(mode, 8, 16, 20) |
0315           BIT_FIELD(mode, 4, 8, 16) |
0316           BIT_FIELD(mode, 6, 0, 10) |
0317           BIT_FIELD(llc_ctl, 2, 10, 8) |
0318           BIT_FIELD(llc_ctl, 2, 8, 6);
0319 
0320     return ctl;
0321 }
0322 
0323 static inline u32 llc_hw_ctrlb(u32 int_ctl)
0324 {
0325     u32 ctl;
0326 
0327     /*
0328      * Irrespective of the SoC, ctrlb value starts filling from
0329      * bit 18.
0330      */
0331     ctl = BIT_FIELD(int_ctl, 7, 0, 18);
0332 
0333     return ctl;
0334 }
0335 
0336 static u32 llc_hw_flen(struct owl_dma_lli *lli)
0337 {
0338     return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0);
0339 }
0340 
0341 static void owl_dma_free_lli(struct owl_dma *od,
0342                  struct owl_dma_lli *lli)
0343 {
0344     list_del(&lli->node);
0345     dma_pool_free(od->lli_pool, lli, lli->phys);
0346 }
0347 
0348 static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
0349 {
0350     struct owl_dma_lli *lli;
0351     dma_addr_t phys;
0352 
0353     lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
0354     if (!lli)
0355         return NULL;
0356 
0357     INIT_LIST_HEAD(&lli->node);
0358     lli->phys = phys;
0359 
0360     return lli;
0361 }
0362 
0363 static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
0364                        struct owl_dma_lli *prev,
0365                        struct owl_dma_lli *next,
0366                        bool is_cyclic)
0367 {
0368     if (!is_cyclic)
0369         list_add_tail(&next->node, &txd->lli_list);
0370 
0371     if (prev) {
0372         prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys;
0373         prev->hw[OWL_DMADESC_CTRLA] |=
0374                     llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
0375     }
0376 
0377     return next;
0378 }
0379 
0380 static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
0381                   struct owl_dma_lli *lli,
0382                   dma_addr_t src, dma_addr_t dst,
0383                   u32 len, enum dma_transfer_direction dir,
0384                   struct dma_slave_config *sconfig,
0385                   bool is_cyclic)
0386 {
0387     struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
0388     u32 mode, ctrlb;
0389 
0390     mode = OWL_DMA_MODE_PW(0);
0391 
0392     switch (dir) {
0393     case DMA_MEM_TO_MEM:
0394         mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU |
0395             OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
0396             OWL_DMA_MODE_DAM_INC;
0397 
0398         break;
0399     case DMA_MEM_TO_DEV:
0400         mode |= OWL_DMA_MODE_TS(vchan->drq)
0401             | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
0402             | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
0403 
0404         /*
0405          * Hardware only supports 32bit and 8bit buswidth. Since the
0406          * default is 32bit, select 8bit only when requested.
0407          */
0408         if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
0409             mode |= OWL_DMA_MODE_NDDBW_8BIT;
0410 
0411         break;
0412     case DMA_DEV_TO_MEM:
0413          mode |= OWL_DMA_MODE_TS(vchan->drq)
0414             | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
0415             | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
0416 
0417         /*
0418          * Hardware only supports 32bit and 8bit buswidth. Since the
0419          * default is 32bit, select 8bit only when requested.
0420          */
0421         if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
0422             mode |= OWL_DMA_MODE_NDDBW_8BIT;
0423 
0424         break;
0425     default:
0426         return -EINVAL;
0427     }
0428 
0429     lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode,
0430                           OWL_DMA_LLC_SAV_LOAD_NEXT |
0431                           OWL_DMA_LLC_DAV_LOAD_NEXT);
0432 
0433     if (is_cyclic)
0434         ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
0435     else
0436         ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
0437 
0438     lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */
0439     lli->hw[OWL_DMADESC_SADDR] = src;
0440     lli->hw[OWL_DMADESC_DADDR] = dst;
0441     lli->hw[OWL_DMADESC_SRC_STRIDE] = 0;
0442     lli->hw[OWL_DMADESC_DST_STRIDE] = 0;
0443 
0444     if (od->devid == S700_DMA) {
0445         /* Max frame length is 1MB */
0446         lli->hw[OWL_DMADESC_FLEN] = len;
0447         /*
0448          * On S700, word starts from offset 0x1C is shared between
0449          * frame count and ctrlb, where first 12 bits are for frame
0450          * count and rest of 20 bits are for ctrlb.
0451          */
0452         lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb;
0453     } else {
0454         /*
0455          * On S900, word starts from offset 0xC is shared between
0456          * frame length (max frame length is 1MB) and frame count,
0457          * where first 20 bits are for frame length and rest of
0458          * 12 bits are for frame count.
0459          */
0460         lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20;
0461         lli->hw[OWL_DMADESC_CTRLB] = ctrlb;
0462     }
0463 
0464     return 0;
0465 }
0466 
0467 static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
0468                            struct owl_dma_vchan *vchan)
0469 {
0470     struct owl_dma_pchan *pchan = NULL;
0471     unsigned long flags;
0472     int i;
0473 
0474     for (i = 0; i < od->nr_pchans; i++) {
0475         pchan = &od->pchans[i];
0476 
0477         spin_lock_irqsave(&od->lock, flags);
0478         if (!pchan->vchan) {
0479             pchan->vchan = vchan;
0480             spin_unlock_irqrestore(&od->lock, flags);
0481             break;
0482         }
0483 
0484         spin_unlock_irqrestore(&od->lock, flags);
0485     }
0486 
0487     return pchan;
0488 }
0489 
0490 static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
0491 {
0492     unsigned int val;
0493 
0494     val = dma_readl(od, OWL_DMA_IDLE_STAT);
0495 
0496     return !(val & (1 << pchan->id));
0497 }
0498 
0499 static void owl_dma_terminate_pchan(struct owl_dma *od,
0500                     struct owl_dma_pchan *pchan)
0501 {
0502     unsigned long flags;
0503     u32 irq_pd;
0504 
0505     pchan_writel(pchan, OWL_DMAX_START, 0);
0506     pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
0507 
0508     spin_lock_irqsave(&od->lock, flags);
0509     dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
0510 
0511     irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
0512     if (irq_pd & (1 << pchan->id)) {
0513         dev_warn(od->dma.dev,
0514              "terminating pchan %d that still has pending irq\n",
0515              pchan->id);
0516         dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
0517     }
0518 
0519     pchan->vchan = NULL;
0520 
0521     spin_unlock_irqrestore(&od->lock, flags);
0522 }
0523 
0524 static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
0525 {
0526     pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
0527 }
0528 
0529 static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
0530 {
0531     pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
0532 }
0533 
0534 static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
0535 {
0536     struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
0537     struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc);
0538     struct owl_dma_pchan *pchan = vchan->pchan;
0539     struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
0540     struct owl_dma_lli *lli;
0541     unsigned long flags;
0542     u32 int_ctl;
0543 
0544     list_del(&vd->node);
0545 
0546     vchan->txd = txd;
0547 
0548     /* Wait for channel inactive */
0549     while (owl_dma_pchan_busy(od, pchan))
0550         cpu_relax();
0551 
0552     lli = list_first_entry(&txd->lli_list,
0553                    struct owl_dma_lli, node);
0554 
0555     if (txd->cyclic)
0556         int_ctl = OWL_DMA_INTCTL_BLOCK;
0557     else
0558         int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
0559 
0560     pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
0561     pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
0562              OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT);
0563     pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys);
0564     pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl);
0565 
0566     /* Clear IRQ status for this pchan */
0567     pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
0568 
0569     spin_lock_irqsave(&od->lock, flags);
0570 
0571     dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
0572 
0573     spin_unlock_irqrestore(&od->lock, flags);
0574 
0575     dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id);
0576 
0577     /* Start DMA transfer for this pchan */
0578     pchan_writel(pchan, OWL_DMAX_START, 0x1);
0579 
0580     return 0;
0581 }
0582 
0583 static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
0584 {
0585     /* Ensure that the physical channel is stopped */
0586     owl_dma_terminate_pchan(od, vchan->pchan);
0587 
0588     vchan->pchan = NULL;
0589 }
0590 
0591 static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
0592 {
0593     struct owl_dma *od = dev_id;
0594     struct owl_dma_vchan *vchan;
0595     struct owl_dma_pchan *pchan;
0596     unsigned long pending;
0597     int i;
0598     unsigned int global_irq_pending, chan_irq_pending;
0599 
0600     spin_lock(&od->lock);
0601 
0602     pending = dma_readl(od, OWL_DMA_IRQ_PD0);
0603 
0604     /* Clear IRQ status for each pchan */
0605     for_each_set_bit(i, &pending, od->nr_pchans) {
0606         pchan = &od->pchans[i];
0607         pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
0608     }
0609 
0610     /* Clear pending IRQ */
0611     dma_writel(od, OWL_DMA_IRQ_PD0, pending);
0612 
0613     /* Check missed pending IRQ */
0614     for (i = 0; i < od->nr_pchans; i++) {
0615         pchan = &od->pchans[i];
0616         chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) &
0617                    pchan_readl(pchan, OWL_DMAX_INT_STATUS);
0618 
0619         /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
0620         dma_readl(od, OWL_DMA_IRQ_PD0);
0621 
0622         global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
0623 
0624         if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
0625             dev_dbg(od->dma.dev,
0626                 "global and channel IRQ pending match err\n");
0627 
0628             /* Clear IRQ status for this pchan */
0629             pchan_update(pchan, OWL_DMAX_INT_STATUS,
0630                      0xff, false);
0631 
0632             /* Update global IRQ pending */
0633             pending |= BIT(i);
0634         }
0635     }
0636 
0637     spin_unlock(&od->lock);
0638 
0639     for_each_set_bit(i, &pending, od->nr_pchans) {
0640         struct owl_dma_txd *txd;
0641 
0642         pchan = &od->pchans[i];
0643 
0644         vchan = pchan->vchan;
0645         if (!vchan) {
0646             dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
0647                  pchan->id);
0648             continue;
0649         }
0650 
0651         spin_lock(&vchan->vc.lock);
0652 
0653         txd = vchan->txd;
0654         if (txd) {
0655             vchan->txd = NULL;
0656 
0657             vchan_cookie_complete(&txd->vd);
0658 
0659             /*
0660              * Start the next descriptor (if any),
0661              * otherwise free this channel.
0662              */
0663             if (vchan_next_desc(&vchan->vc))
0664                 owl_dma_start_next_txd(vchan);
0665             else
0666                 owl_dma_phy_free(od, vchan);
0667         }
0668 
0669         spin_unlock(&vchan->vc.lock);
0670     }
0671 
0672     return IRQ_HANDLED;
0673 }
0674 
0675 static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
0676 {
0677     struct owl_dma_lli *lli, *_lli;
0678 
0679     if (unlikely(!txd))
0680         return;
0681 
0682     list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
0683         owl_dma_free_lli(od, lli);
0684 
0685     kfree(txd);
0686 }
0687 
0688 static void owl_dma_desc_free(struct virt_dma_desc *vd)
0689 {
0690     struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
0691     struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
0692 
0693     owl_dma_free_txd(od, txd);
0694 }
0695 
0696 static int owl_dma_terminate_all(struct dma_chan *chan)
0697 {
0698     struct owl_dma *od = to_owl_dma(chan->device);
0699     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0700     unsigned long flags;
0701     LIST_HEAD(head);
0702 
0703     spin_lock_irqsave(&vchan->vc.lock, flags);
0704 
0705     if (vchan->pchan)
0706         owl_dma_phy_free(od, vchan);
0707 
0708     if (vchan->txd) {
0709         owl_dma_desc_free(&vchan->txd->vd);
0710         vchan->txd = NULL;
0711     }
0712 
0713     vchan_get_all_descriptors(&vchan->vc, &head);
0714 
0715     spin_unlock_irqrestore(&vchan->vc.lock, flags);
0716 
0717     vchan_dma_desc_free_list(&vchan->vc, &head);
0718 
0719     return 0;
0720 }
0721 
0722 static int owl_dma_config(struct dma_chan *chan,
0723               struct dma_slave_config *config)
0724 {
0725     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0726 
0727     /* Reject definitely invalid configurations */
0728     if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
0729         config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
0730         return -EINVAL;
0731 
0732     memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
0733 
0734     return 0;
0735 }
0736 
0737 static int owl_dma_pause(struct dma_chan *chan)
0738 {
0739     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0740     unsigned long flags;
0741 
0742     spin_lock_irqsave(&vchan->vc.lock, flags);
0743 
0744     owl_dma_pause_pchan(vchan->pchan);
0745 
0746     spin_unlock_irqrestore(&vchan->vc.lock, flags);
0747 
0748     return 0;
0749 }
0750 
0751 static int owl_dma_resume(struct dma_chan *chan)
0752 {
0753     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0754     unsigned long flags;
0755 
0756     if (!vchan->pchan && !vchan->txd)
0757         return 0;
0758 
0759     dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
0760 
0761     spin_lock_irqsave(&vchan->vc.lock, flags);
0762 
0763     owl_dma_resume_pchan(vchan->pchan);
0764 
0765     spin_unlock_irqrestore(&vchan->vc.lock, flags);
0766 
0767     return 0;
0768 }
0769 
0770 static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
0771 {
0772     struct owl_dma_pchan *pchan;
0773     struct owl_dma_txd *txd;
0774     struct owl_dma_lli *lli;
0775     unsigned int next_lli_phy;
0776     size_t bytes;
0777 
0778     pchan = vchan->pchan;
0779     txd = vchan->txd;
0780 
0781     if (!pchan || !txd)
0782         return 0;
0783 
0784     /* Get remain count of current node in link list */
0785     bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT);
0786 
0787     /* Loop through the preceding nodes to get total remaining bytes */
0788     if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) {
0789         next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR);
0790         list_for_each_entry(lli, &txd->lli_list, node) {
0791             /* Start from the next active node */
0792             if (lli->phys == next_lli_phy) {
0793                 list_for_each_entry(lli, &txd->lli_list, node)
0794                     bytes += llc_hw_flen(lli);
0795                 break;
0796             }
0797         }
0798     }
0799 
0800     return bytes;
0801 }
0802 
0803 static enum dma_status owl_dma_tx_status(struct dma_chan *chan,
0804                      dma_cookie_t cookie,
0805                      struct dma_tx_state *state)
0806 {
0807     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0808     struct owl_dma_lli *lli;
0809     struct virt_dma_desc *vd;
0810     struct owl_dma_txd *txd;
0811     enum dma_status ret;
0812     unsigned long flags;
0813     size_t bytes = 0;
0814 
0815     ret = dma_cookie_status(chan, cookie, state);
0816     if (ret == DMA_COMPLETE || !state)
0817         return ret;
0818 
0819     spin_lock_irqsave(&vchan->vc.lock, flags);
0820 
0821     vd = vchan_find_desc(&vchan->vc, cookie);
0822     if (vd) {
0823         txd = to_owl_txd(&vd->tx);
0824         list_for_each_entry(lli, &txd->lli_list, node)
0825             bytes += llc_hw_flen(lli);
0826     } else {
0827         bytes = owl_dma_getbytes_chan(vchan);
0828     }
0829 
0830     spin_unlock_irqrestore(&vchan->vc.lock, flags);
0831 
0832     dma_set_residue(state, bytes);
0833 
0834     return ret;
0835 }
0836 
0837 static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan)
0838 {
0839     struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
0840     struct owl_dma_pchan *pchan;
0841 
0842     pchan = owl_dma_get_pchan(od, vchan);
0843     if (!pchan)
0844         return;
0845 
0846     dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
0847 
0848     vchan->pchan = pchan;
0849     owl_dma_start_next_txd(vchan);
0850 }
0851 
0852 static void owl_dma_issue_pending(struct dma_chan *chan)
0853 {
0854     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0855     unsigned long flags;
0856 
0857     spin_lock_irqsave(&vchan->vc.lock, flags);
0858     if (vchan_issue_pending(&vchan->vc)) {
0859         if (!vchan->pchan)
0860             owl_dma_phy_alloc_and_start(vchan);
0861     }
0862     spin_unlock_irqrestore(&vchan->vc.lock, flags);
0863 }
0864 
0865 static struct dma_async_tx_descriptor
0866         *owl_dma_prep_memcpy(struct dma_chan *chan,
0867                      dma_addr_t dst, dma_addr_t src,
0868                      size_t len, unsigned long flags)
0869 {
0870     struct owl_dma *od = to_owl_dma(chan->device);
0871     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0872     struct owl_dma_txd *txd;
0873     struct owl_dma_lli *lli, *prev = NULL;
0874     size_t offset, bytes;
0875     int ret;
0876 
0877     if (!len)
0878         return NULL;
0879 
0880     txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
0881     if (!txd)
0882         return NULL;
0883 
0884     INIT_LIST_HEAD(&txd->lli_list);
0885 
0886     /* Process the transfer as frame by frame */
0887     for (offset = 0; offset < len; offset += bytes) {
0888         lli = owl_dma_alloc_lli(od);
0889         if (!lli) {
0890             dev_warn(chan2dev(chan), "failed to allocate lli\n");
0891             goto err_txd_free;
0892         }
0893 
0894         bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
0895 
0896         ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
0897                       bytes, DMA_MEM_TO_MEM,
0898                       &vchan->cfg, txd->cyclic);
0899         if (ret) {
0900             dev_warn(chan2dev(chan), "failed to config lli\n");
0901             goto err_txd_free;
0902         }
0903 
0904         prev = owl_dma_add_lli(txd, prev, lli, false);
0905     }
0906 
0907     return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
0908 
0909 err_txd_free:
0910     owl_dma_free_txd(od, txd);
0911     return NULL;
0912 }
0913 
0914 static struct dma_async_tx_descriptor
0915         *owl_dma_prep_slave_sg(struct dma_chan *chan,
0916                        struct scatterlist *sgl,
0917                        unsigned int sg_len,
0918                        enum dma_transfer_direction dir,
0919                        unsigned long flags, void *context)
0920 {
0921     struct owl_dma *od = to_owl_dma(chan->device);
0922     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0923     struct dma_slave_config *sconfig = &vchan->cfg;
0924     struct owl_dma_txd *txd;
0925     struct owl_dma_lli *lli, *prev = NULL;
0926     struct scatterlist *sg;
0927     dma_addr_t addr, src = 0, dst = 0;
0928     size_t len;
0929     int ret, i;
0930 
0931     txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
0932     if (!txd)
0933         return NULL;
0934 
0935     INIT_LIST_HEAD(&txd->lli_list);
0936 
0937     for_each_sg(sgl, sg, sg_len, i) {
0938         addr = sg_dma_address(sg);
0939         len = sg_dma_len(sg);
0940 
0941         if (len > OWL_DMA_FRAME_MAX_LENGTH) {
0942             dev_err(od->dma.dev,
0943                 "frame length exceeds max supported length");
0944             goto err_txd_free;
0945         }
0946 
0947         lli = owl_dma_alloc_lli(od);
0948         if (!lli) {
0949             dev_err(chan2dev(chan), "failed to allocate lli");
0950             goto err_txd_free;
0951         }
0952 
0953         if (dir == DMA_MEM_TO_DEV) {
0954             src = addr;
0955             dst = sconfig->dst_addr;
0956         } else {
0957             src = sconfig->src_addr;
0958             dst = addr;
0959         }
0960 
0961         ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
0962                       txd->cyclic);
0963         if (ret) {
0964             dev_warn(chan2dev(chan), "failed to config lli");
0965             goto err_txd_free;
0966         }
0967 
0968         prev = owl_dma_add_lli(txd, prev, lli, false);
0969     }
0970 
0971     return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
0972 
0973 err_txd_free:
0974     owl_dma_free_txd(od, txd);
0975 
0976     return NULL;
0977 }
0978 
0979 static struct dma_async_tx_descriptor
0980         *owl_prep_dma_cyclic(struct dma_chan *chan,
0981                      dma_addr_t buf_addr, size_t buf_len,
0982                      size_t period_len,
0983                      enum dma_transfer_direction dir,
0984                      unsigned long flags)
0985 {
0986     struct owl_dma *od = to_owl_dma(chan->device);
0987     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
0988     struct dma_slave_config *sconfig = &vchan->cfg;
0989     struct owl_dma_txd *txd;
0990     struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
0991     dma_addr_t src = 0, dst = 0;
0992     unsigned int periods = buf_len / period_len;
0993     int ret, i;
0994 
0995     txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
0996     if (!txd)
0997         return NULL;
0998 
0999     INIT_LIST_HEAD(&txd->lli_list);
1000     txd->cyclic = true;
1001 
1002     for (i = 0; i < periods; i++) {
1003         lli = owl_dma_alloc_lli(od);
1004         if (!lli) {
1005             dev_warn(chan2dev(chan), "failed to allocate lli");
1006             goto err_txd_free;
1007         }
1008 
1009         if (dir == DMA_MEM_TO_DEV) {
1010             src = buf_addr + (period_len * i);
1011             dst = sconfig->dst_addr;
1012         } else if (dir == DMA_DEV_TO_MEM) {
1013             src = sconfig->src_addr;
1014             dst = buf_addr + (period_len * i);
1015         }
1016 
1017         ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
1018                       dir, sconfig, txd->cyclic);
1019         if (ret) {
1020             dev_warn(chan2dev(chan), "failed to config lli");
1021             goto err_txd_free;
1022         }
1023 
1024         if (!first)
1025             first = lli;
1026 
1027         prev = owl_dma_add_lli(txd, prev, lli, false);
1028     }
1029 
1030     /* close the cyclic list */
1031     owl_dma_add_lli(txd, prev, first, true);
1032 
1033     return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
1034 
1035 err_txd_free:
1036     owl_dma_free_txd(od, txd);
1037 
1038     return NULL;
1039 }
1040 
1041 static void owl_dma_free_chan_resources(struct dma_chan *chan)
1042 {
1043     struct owl_dma_vchan *vchan = to_owl_vchan(chan);
1044 
1045     /* Ensure all queued descriptors are freed */
1046     vchan_free_chan_resources(&vchan->vc);
1047 }
1048 
1049 static inline void owl_dma_free(struct owl_dma *od)
1050 {
1051     struct owl_dma_vchan *vchan = NULL;
1052     struct owl_dma_vchan *next;
1053 
1054     list_for_each_entry_safe(vchan,
1055                  next, &od->dma.channels, vc.chan.device_node) {
1056         list_del(&vchan->vc.chan.device_node);
1057         tasklet_kill(&vchan->vc.task);
1058     }
1059 }
1060 
1061 static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
1062                      struct of_dma *ofdma)
1063 {
1064     struct owl_dma *od = ofdma->of_dma_data;
1065     struct owl_dma_vchan *vchan;
1066     struct dma_chan *chan;
1067     u8 drq = dma_spec->args[0];
1068 
1069     if (drq > od->nr_vchans)
1070         return NULL;
1071 
1072     chan = dma_get_any_slave_channel(&od->dma);
1073     if (!chan)
1074         return NULL;
1075 
1076     vchan = to_owl_vchan(chan);
1077     vchan->drq = drq;
1078 
1079     return chan;
1080 }
1081 
1082 static const struct of_device_id owl_dma_match[] = {
1083     { .compatible = "actions,s500-dma", .data = (void *)S900_DMA,},
1084     { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
1085     { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
1086     { /* sentinel */ },
1087 };
1088 MODULE_DEVICE_TABLE(of, owl_dma_match);
1089 
1090 static int owl_dma_probe(struct platform_device *pdev)
1091 {
1092     struct device_node *np = pdev->dev.of_node;
1093     struct owl_dma *od;
1094     int ret, i, nr_channels, nr_requests;
1095 
1096     od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1097     if (!od)
1098         return -ENOMEM;
1099 
1100     od->base = devm_platform_ioremap_resource(pdev, 0);
1101     if (IS_ERR(od->base))
1102         return PTR_ERR(od->base);
1103 
1104     ret = of_property_read_u32(np, "dma-channels", &nr_channels);
1105     if (ret) {
1106         dev_err(&pdev->dev, "can't get dma-channels\n");
1107         return ret;
1108     }
1109 
1110     ret = of_property_read_u32(np, "dma-requests", &nr_requests);
1111     if (ret) {
1112         dev_err(&pdev->dev, "can't get dma-requests\n");
1113         return ret;
1114     }
1115 
1116     dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
1117          nr_channels, nr_requests);
1118 
1119     od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
1120 
1121     od->nr_pchans = nr_channels;
1122     od->nr_vchans = nr_requests;
1123 
1124     pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1125 
1126     platform_set_drvdata(pdev, od);
1127     spin_lock_init(&od->lock);
1128 
1129     dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
1130     dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
1131     dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
1132 
1133     od->dma.dev = &pdev->dev;
1134     od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
1135     od->dma.device_tx_status = owl_dma_tx_status;
1136     od->dma.device_issue_pending = owl_dma_issue_pending;
1137     od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
1138     od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
1139     od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
1140     od->dma.device_config = owl_dma_config;
1141     od->dma.device_pause = owl_dma_pause;
1142     od->dma.device_resume = owl_dma_resume;
1143     od->dma.device_terminate_all = owl_dma_terminate_all;
1144     od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1145     od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1146     od->dma.directions = BIT(DMA_MEM_TO_MEM);
1147     od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1148 
1149     INIT_LIST_HEAD(&od->dma.channels);
1150 
1151     od->clk = devm_clk_get(&pdev->dev, NULL);
1152     if (IS_ERR(od->clk)) {
1153         dev_err(&pdev->dev, "unable to get clock\n");
1154         return PTR_ERR(od->clk);
1155     }
1156 
1157     /*
1158      * Eventhough the DMA controller is capable of generating 4
1159      * IRQ's for DMA priority feature, we only use 1 IRQ for
1160      * simplification.
1161      */
1162     od->irq = platform_get_irq(pdev, 0);
1163     ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
1164                    dev_name(&pdev->dev), od);
1165     if (ret) {
1166         dev_err(&pdev->dev, "unable to request IRQ\n");
1167         return ret;
1168     }
1169 
1170     /* Init physical channel */
1171     od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
1172                   sizeof(struct owl_dma_pchan), GFP_KERNEL);
1173     if (!od->pchans)
1174         return -ENOMEM;
1175 
1176     for (i = 0; i < od->nr_pchans; i++) {
1177         struct owl_dma_pchan *pchan = &od->pchans[i];
1178 
1179         pchan->id = i;
1180         pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
1181     }
1182 
1183     /* Init virtual channel */
1184     od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
1185                   sizeof(struct owl_dma_vchan), GFP_KERNEL);
1186     if (!od->vchans)
1187         return -ENOMEM;
1188 
1189     for (i = 0; i < od->nr_vchans; i++) {
1190         struct owl_dma_vchan *vchan = &od->vchans[i];
1191 
1192         vchan->vc.desc_free = owl_dma_desc_free;
1193         vchan_init(&vchan->vc, &od->dma);
1194     }
1195 
1196     /* Create a pool of consistent memory blocks for hardware descriptors */
1197     od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
1198                        sizeof(struct owl_dma_lli),
1199                        __alignof__(struct owl_dma_lli),
1200                        0);
1201     if (!od->lli_pool) {
1202         dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
1203         return -ENOMEM;
1204     }
1205 
1206     clk_prepare_enable(od->clk);
1207 
1208     ret = dma_async_device_register(&od->dma);
1209     if (ret) {
1210         dev_err(&pdev->dev, "failed to register DMA engine device\n");
1211         goto err_pool_free;
1212     }
1213 
1214     /* Device-tree DMA controller registration */
1215     ret = of_dma_controller_register(pdev->dev.of_node,
1216                      owl_dma_of_xlate, od);
1217     if (ret) {
1218         dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1219         goto err_dma_unregister;
1220     }
1221 
1222     return 0;
1223 
1224 err_dma_unregister:
1225     dma_async_device_unregister(&od->dma);
1226 err_pool_free:
1227     clk_disable_unprepare(od->clk);
1228     dma_pool_destroy(od->lli_pool);
1229 
1230     return ret;
1231 }
1232 
1233 static int owl_dma_remove(struct platform_device *pdev)
1234 {
1235     struct owl_dma *od = platform_get_drvdata(pdev);
1236 
1237     of_dma_controller_free(pdev->dev.of_node);
1238     dma_async_device_unregister(&od->dma);
1239 
1240     /* Mask all interrupts for this execution environment */
1241     dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
1242 
1243     /* Make sure we won't have any further interrupts */
1244     devm_free_irq(od->dma.dev, od->irq, od);
1245 
1246     owl_dma_free(od);
1247 
1248     clk_disable_unprepare(od->clk);
1249     dma_pool_destroy(od->lli_pool);
1250 
1251     return 0;
1252 }
1253 
1254 static struct platform_driver owl_dma_driver = {
1255     .probe  = owl_dma_probe,
1256     .remove = owl_dma_remove,
1257     .driver = {
1258         .name = "dma-owl",
1259         .of_match_table = of_match_ptr(owl_dma_match),
1260     },
1261 };
1262 
1263 static int owl_dma_init(void)
1264 {
1265     return platform_driver_register(&owl_dma_driver);
1266 }
1267 subsys_initcall(owl_dma_init);
1268 
1269 static void __exit owl_dma_exit(void)
1270 {
1271     platform_driver_unregister(&owl_dma_driver);
1272 }
1273 module_exit(owl_dma_exit);
1274 
1275 MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
1276 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
1277 MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
1278 MODULE_LICENSE("GPL");