Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (c) 2018-2019 MediaTek Inc.
0003 
0004 /*
0005  * Driver for MediaTek Command-Queue DMA Controller
0006  *
0007  * Author: Shun-Chih Yu <shun-chih.yu@mediatek.com>
0008  *
0009  */
0010 
0011 #include <linux/bitops.h>
0012 #include <linux/clk.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/iopoll.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/list.h>
0019 #include <linux/module.h>
0020 #include <linux/of.h>
0021 #include <linux/of_device.h>
0022 #include <linux/of_dma.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/pm_runtime.h>
0025 #include <linux/refcount.h>
0026 #include <linux/slab.h>
0027 
0028 #include "../virt-dma.h"
0029 
0030 #define MTK_CQDMA_USEC_POLL     10
0031 #define MTK_CQDMA_TIMEOUT_POLL      1000
0032 #define MTK_CQDMA_DMA_BUSWIDTHS     BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
0033 #define MTK_CQDMA_ALIGN_SIZE        1
0034 
0035 /* The default number of virtual channel */
0036 #define MTK_CQDMA_NR_VCHANS     32
0037 
0038 /* The default number of physical channel */
0039 #define MTK_CQDMA_NR_PCHANS     3
0040 
0041 /* Registers for underlying dma manipulation */
0042 #define MTK_CQDMA_INT_FLAG      0x0
0043 #define MTK_CQDMA_INT_EN        0x4
0044 #define MTK_CQDMA_EN            0x8
0045 #define MTK_CQDMA_RESET         0xc
0046 #define MTK_CQDMA_FLUSH         0x14
0047 #define MTK_CQDMA_SRC           0x1c
0048 #define MTK_CQDMA_DST           0x20
0049 #define MTK_CQDMA_LEN1          0x24
0050 #define MTK_CQDMA_LEN2          0x28
0051 #define MTK_CQDMA_SRC2          0x60
0052 #define MTK_CQDMA_DST2          0x64
0053 
0054 /* Registers setting */
0055 #define MTK_CQDMA_EN_BIT        BIT(0)
0056 #define MTK_CQDMA_INT_FLAG_BIT      BIT(0)
0057 #define MTK_CQDMA_INT_EN_BIT        BIT(0)
0058 #define MTK_CQDMA_FLUSH_BIT     BIT(0)
0059 
0060 #define MTK_CQDMA_WARM_RST_BIT      BIT(0)
0061 #define MTK_CQDMA_HARD_RST_BIT      BIT(1)
0062 
0063 #define MTK_CQDMA_MAX_LEN       GENMASK(27, 0)
0064 #define MTK_CQDMA_ADDR_LIMIT        GENMASK(31, 0)
0065 #define MTK_CQDMA_ADDR2_SHFIT       (32)
0066 
0067 /**
0068  * struct mtk_cqdma_vdesc - The struct holding info describing virtual
0069  *                         descriptor (CVD)
0070  * @vd:                    An instance for struct virt_dma_desc
0071  * @len:                   The total data size device wants to move
0072  * @residue:               The remaining data size device will move
0073  * @dest:                  The destination address device wants to move to
0074  * @src:                   The source address device wants to move from
0075  * @ch:                    The pointer to the corresponding dma channel
0076  * @node:                  The lise_head struct to build link-list for VDs
0077  * @parent:                The pointer to the parent CVD
0078  */
0079 struct mtk_cqdma_vdesc {
0080     struct virt_dma_desc vd;
0081     size_t len;
0082     size_t residue;
0083     dma_addr_t dest;
0084     dma_addr_t src;
0085     struct dma_chan *ch;
0086 
0087     struct list_head node;
0088     struct mtk_cqdma_vdesc *parent;
0089 };
0090 
0091 /**
0092  * struct mtk_cqdma_pchan - The struct holding info describing physical
0093  *                         channel (PC)
0094  * @queue:                 Queue for the PDs issued to this PC
0095  * @base:                  The mapped register I/O base of this PC
0096  * @irq:                   The IRQ that this PC are using
0097  * @refcnt:                Track how many VCs are using this PC
0098  * @tasklet:               Tasklet for this PC
0099  * @lock:                  Lock protect agaisting multiple VCs access PC
0100  */
0101 struct mtk_cqdma_pchan {
0102     struct list_head queue;
0103     void __iomem *base;
0104     u32 irq;
0105 
0106     refcount_t refcnt;
0107 
0108     struct tasklet_struct tasklet;
0109 
0110     /* lock to protect PC */
0111     spinlock_t lock;
0112 };
0113 
0114 /**
0115  * struct mtk_cqdma_vchan - The struct holding info describing virtual
0116  *                         channel (VC)
0117  * @vc:                    An instance for struct virt_dma_chan
0118  * @pc:                    The pointer to the underlying PC
0119  * @issue_completion:      The wait for all issued descriptors completited
0120  * @issue_synchronize:     Bool indicating channel synchronization starts
0121  */
0122 struct mtk_cqdma_vchan {
0123     struct virt_dma_chan vc;
0124     struct mtk_cqdma_pchan *pc;
0125     struct completion issue_completion;
0126     bool issue_synchronize;
0127 };
0128 
0129 /**
0130  * struct mtk_cqdma_device - The struct holding info describing CQDMA
0131  *                          device
0132  * @ddev:                   An instance for struct dma_device
0133  * @clk:                    The clock that device internal is using
0134  * @dma_requests:           The number of VCs the device supports to
0135  * @dma_channels:           The number of PCs the device supports to
0136  * @vc:                     The pointer to all available VCs
0137  * @pc:                     The pointer to all the underlying PCs
0138  */
0139 struct mtk_cqdma_device {
0140     struct dma_device ddev;
0141     struct clk *clk;
0142 
0143     u32 dma_requests;
0144     u32 dma_channels;
0145     struct mtk_cqdma_vchan *vc;
0146     struct mtk_cqdma_pchan **pc;
0147 };
0148 
0149 static struct mtk_cqdma_device *to_cqdma_dev(struct dma_chan *chan)
0150 {
0151     return container_of(chan->device, struct mtk_cqdma_device, ddev);
0152 }
0153 
0154 static struct mtk_cqdma_vchan *to_cqdma_vchan(struct dma_chan *chan)
0155 {
0156     return container_of(chan, struct mtk_cqdma_vchan, vc.chan);
0157 }
0158 
0159 static struct mtk_cqdma_vdesc *to_cqdma_vdesc(struct virt_dma_desc *vd)
0160 {
0161     return container_of(vd, struct mtk_cqdma_vdesc, vd);
0162 }
0163 
0164 static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma)
0165 {
0166     return cqdma->ddev.dev;
0167 }
0168 
0169 static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg)
0170 {
0171     return readl(pc->base + reg);
0172 }
0173 
0174 static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val)
0175 {
0176     writel_relaxed(val, pc->base + reg);
0177 }
0178 
0179 static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg,
0180             u32 mask, u32 set)
0181 {
0182     u32 val;
0183 
0184     val = mtk_dma_read(pc, reg);
0185     val &= ~mask;
0186     val |= set;
0187     mtk_dma_write(pc, reg, val);
0188 }
0189 
0190 static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val)
0191 {
0192     mtk_dma_rmw(pc, reg, 0, val);
0193 }
0194 
0195 static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val)
0196 {
0197     mtk_dma_rmw(pc, reg, val, 0);
0198 }
0199 
0200 static void mtk_cqdma_vdesc_free(struct virt_dma_desc *vd)
0201 {
0202     kfree(to_cqdma_vdesc(vd));
0203 }
0204 
0205 static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic)
0206 {
0207     u32 status = 0;
0208 
0209     if (!atomic)
0210         return readl_poll_timeout(pc->base + MTK_CQDMA_EN,
0211                       status,
0212                       !(status & MTK_CQDMA_EN_BIT),
0213                       MTK_CQDMA_USEC_POLL,
0214                       MTK_CQDMA_TIMEOUT_POLL);
0215 
0216     return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN,
0217                      status,
0218                      !(status & MTK_CQDMA_EN_BIT),
0219                      MTK_CQDMA_USEC_POLL,
0220                      MTK_CQDMA_TIMEOUT_POLL);
0221 }
0222 
0223 static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
0224 {
0225     mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
0226     mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
0227 
0228     return mtk_cqdma_poll_engine_done(pc, true);
0229 }
0230 
0231 static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
0232                 struct mtk_cqdma_vdesc *cvd)
0233 {
0234     /* wait for the previous transaction done */
0235     if (mtk_cqdma_poll_engine_done(pc, true) < 0)
0236         dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n");
0237 
0238     /* warm reset the dma engine for the new transaction */
0239     mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT);
0240     if (mtk_cqdma_poll_engine_done(pc, true) < 0)
0241         dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n");
0242 
0243     /* setup the source */
0244     mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT);
0245 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0246     mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT);
0247 #else
0248     mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
0249 #endif
0250 
0251     /* setup the destination */
0252     mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT);
0253 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0254     mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
0255 #else
0256     mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
0257 #endif
0258 
0259     /* setup the length */
0260     mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len);
0261 
0262     /* start dma engine */
0263     mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT);
0264 }
0265 
0266 static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan *cvc)
0267 {
0268     struct virt_dma_desc *vd, *vd2;
0269     struct mtk_cqdma_pchan *pc = cvc->pc;
0270     struct mtk_cqdma_vdesc *cvd;
0271     bool trigger_engine = false;
0272 
0273     lockdep_assert_held(&cvc->vc.lock);
0274     lockdep_assert_held(&pc->lock);
0275 
0276     list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) {
0277         /* need to trigger dma engine if PC's queue is empty */
0278         if (list_empty(&pc->queue))
0279             trigger_engine = true;
0280 
0281         cvd = to_cqdma_vdesc(vd);
0282 
0283         /* add VD into PC's queue */
0284         list_add_tail(&cvd->node, &pc->queue);
0285 
0286         /* start the dma engine */
0287         if (trigger_engine)
0288             mtk_cqdma_start(pc, cvd);
0289 
0290         /* remove VD from list desc_issued */
0291         list_del(&vd->node);
0292     }
0293 }
0294 
0295 /*
0296  * return true if this VC is active,
0297  * meaning that there are VDs under processing by the PC
0298  */
0299 static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan *cvc)
0300 {
0301     struct mtk_cqdma_vdesc *cvd;
0302 
0303     list_for_each_entry(cvd, &cvc->pc->queue, node)
0304         if (cvc == to_cqdma_vchan(cvd->ch))
0305             return true;
0306 
0307     return false;
0308 }
0309 
0310 /*
0311  * return the pointer of the CVD that is just consumed by the PC
0312  */
0313 static struct mtk_cqdma_vdesc
0314 *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc)
0315 {
0316     struct mtk_cqdma_vchan *cvc;
0317     struct mtk_cqdma_vdesc *cvd, *ret = NULL;
0318 
0319     /* consume a CVD from PC's queue */
0320     cvd = list_first_entry_or_null(&pc->queue,
0321                        struct mtk_cqdma_vdesc, node);
0322     if (unlikely(!cvd || !cvd->parent))
0323         return NULL;
0324 
0325     cvc = to_cqdma_vchan(cvd->ch);
0326     ret = cvd;
0327 
0328     /* update residue of the parent CVD */
0329     cvd->parent->residue -= cvd->len;
0330 
0331     /* delete CVD from PC's queue */
0332     list_del(&cvd->node);
0333 
0334     spin_lock(&cvc->vc.lock);
0335 
0336     /* check whether all the child CVDs completed */
0337     if (!cvd->parent->residue) {
0338         /* add the parent VD into list desc_completed */
0339         vchan_cookie_complete(&cvd->parent->vd);
0340 
0341         /* setup completion if this VC is under synchronization */
0342         if (cvc->issue_synchronize && !mtk_cqdma_is_vchan_active(cvc)) {
0343             complete(&cvc->issue_completion);
0344             cvc->issue_synchronize = false;
0345         }
0346     }
0347 
0348     spin_unlock(&cvc->vc.lock);
0349 
0350     /* start transaction for next CVD in the queue */
0351     cvd = list_first_entry_or_null(&pc->queue,
0352                        struct mtk_cqdma_vdesc, node);
0353     if (cvd)
0354         mtk_cqdma_start(pc, cvd);
0355 
0356     return ret;
0357 }
0358 
0359 static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
0360 {
0361     struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet);
0362     struct mtk_cqdma_vdesc *cvd = NULL;
0363     unsigned long flags;
0364 
0365     spin_lock_irqsave(&pc->lock, flags);
0366     /* consume the queue */
0367     cvd = mtk_cqdma_consume_work_queue(pc);
0368     spin_unlock_irqrestore(&pc->lock, flags);
0369 
0370     /* submit the next CVD */
0371     if (cvd) {
0372         dma_run_dependencies(&cvd->vd.tx);
0373 
0374         /*
0375          * free child CVD after completion.
0376          * the parent CVD would be freed with desc_free by user.
0377          */
0378         if (cvd->parent != cvd)
0379             kfree(cvd);
0380     }
0381 
0382     /* re-enable interrupt before leaving tasklet */
0383     enable_irq(pc->irq);
0384 }
0385 
0386 static irqreturn_t mtk_cqdma_irq(int irq, void *devid)
0387 {
0388     struct mtk_cqdma_device *cqdma = devid;
0389     irqreturn_t ret = IRQ_NONE;
0390     bool schedule_tasklet = false;
0391     u32 i;
0392 
0393     /* clear interrupt flags for each PC */
0394     for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) {
0395         spin_lock(&cqdma->pc[i]->lock);
0396         if (mtk_dma_read(cqdma->pc[i],
0397                  MTK_CQDMA_INT_FLAG) & MTK_CQDMA_INT_FLAG_BIT) {
0398             /* clear interrupt */
0399             mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG,
0400                     MTK_CQDMA_INT_FLAG_BIT);
0401 
0402             schedule_tasklet = true;
0403             ret = IRQ_HANDLED;
0404         }
0405         spin_unlock(&cqdma->pc[i]->lock);
0406 
0407         if (schedule_tasklet) {
0408             /* disable interrupt */
0409             disable_irq_nosync(cqdma->pc[i]->irq);
0410 
0411             /* schedule the tasklet to handle the transactions */
0412             tasklet_schedule(&cqdma->pc[i]->tasklet);
0413         }
0414     }
0415 
0416     return ret;
0417 }
0418 
0419 static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c,
0420                             dma_cookie_t cookie)
0421 {
0422     struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0423     struct virt_dma_desc *vd;
0424     unsigned long flags;
0425 
0426     spin_lock_irqsave(&cvc->pc->lock, flags);
0427     list_for_each_entry(vd, &cvc->pc->queue, node)
0428         if (vd->tx.cookie == cookie) {
0429             spin_unlock_irqrestore(&cvc->pc->lock, flags);
0430             return vd;
0431         }
0432     spin_unlock_irqrestore(&cvc->pc->lock, flags);
0433 
0434     list_for_each_entry(vd, &cvc->vc.desc_issued, node)
0435         if (vd->tx.cookie == cookie)
0436             return vd;
0437 
0438     return NULL;
0439 }
0440 
0441 static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
0442                        dma_cookie_t cookie,
0443                        struct dma_tx_state *txstate)
0444 {
0445     struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0446     struct mtk_cqdma_vdesc *cvd;
0447     struct virt_dma_desc *vd;
0448     enum dma_status ret;
0449     unsigned long flags;
0450     size_t bytes = 0;
0451 
0452     ret = dma_cookie_status(c, cookie, txstate);
0453     if (ret == DMA_COMPLETE || !txstate)
0454         return ret;
0455 
0456     spin_lock_irqsave(&cvc->vc.lock, flags);
0457     vd = mtk_cqdma_find_active_desc(c, cookie);
0458     spin_unlock_irqrestore(&cvc->vc.lock, flags);
0459 
0460     if (vd) {
0461         cvd = to_cqdma_vdesc(vd);
0462         bytes = cvd->residue;
0463     }
0464 
0465     dma_set_residue(txstate, bytes);
0466 
0467     return ret;
0468 }
0469 
0470 static void mtk_cqdma_issue_pending(struct dma_chan *c)
0471 {
0472     struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0473     unsigned long pc_flags;
0474     unsigned long vc_flags;
0475 
0476     /* acquire PC's lock before VS's lock for lock dependency in tasklet */
0477     spin_lock_irqsave(&cvc->pc->lock, pc_flags);
0478     spin_lock_irqsave(&cvc->vc.lock, vc_flags);
0479 
0480     if (vchan_issue_pending(&cvc->vc))
0481         mtk_cqdma_issue_vchan_pending(cvc);
0482 
0483     spin_unlock_irqrestore(&cvc->vc.lock, vc_flags);
0484     spin_unlock_irqrestore(&cvc->pc->lock, pc_flags);
0485 }
0486 
0487 static struct dma_async_tx_descriptor *
0488 mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
0489               dma_addr_t src, size_t len, unsigned long flags)
0490 {
0491     struct mtk_cqdma_vdesc **cvd;
0492     struct dma_async_tx_descriptor *tx = NULL, *prev_tx = NULL;
0493     size_t i, tlen, nr_vd;
0494 
0495     /*
0496      * In the case that trsanction length is larger than the
0497      * DMA engine supports, a single memcpy transaction needs
0498      * to be separated into several DMA transactions.
0499      * Each DMA transaction would be described by a CVD,
0500      * and the first one is referred as the parent CVD,
0501      * while the others are child CVDs.
0502      * The parent CVD's tx descriptor is the only tx descriptor
0503      * returned to the DMA user, and it should not be completed
0504      * until all the child CVDs completed.
0505      */
0506     nr_vd = DIV_ROUND_UP(len, MTK_CQDMA_MAX_LEN);
0507     cvd = kcalloc(nr_vd, sizeof(*cvd), GFP_NOWAIT);
0508     if (!cvd)
0509         return NULL;
0510 
0511     for (i = 0; i < nr_vd; ++i) {
0512         cvd[i] = kzalloc(sizeof(*cvd[i]), GFP_NOWAIT);
0513         if (!cvd[i]) {
0514             for (; i > 0; --i)
0515                 kfree(cvd[i - 1]);
0516             return NULL;
0517         }
0518 
0519         /* setup dma channel */
0520         cvd[i]->ch = c;
0521 
0522         /* setup sourece, destination, and length */
0523         tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len;
0524         cvd[i]->len = tlen;
0525         cvd[i]->src = src;
0526         cvd[i]->dest = dest;
0527 
0528         /* setup tx descriptor */
0529         tx = vchan_tx_prep(to_virt_chan(c), &cvd[i]->vd, flags);
0530         tx->next = NULL;
0531 
0532         if (!i) {
0533             cvd[0]->residue = len;
0534         } else {
0535             prev_tx->next = tx;
0536             cvd[i]->residue = tlen;
0537         }
0538 
0539         cvd[i]->parent = cvd[0];
0540 
0541         /* update the src, dest, len, prev_tx for the next CVD */
0542         src += tlen;
0543         dest += tlen;
0544         len -= tlen;
0545         prev_tx = tx;
0546     }
0547 
0548     return &cvd[0]->vd.tx;
0549 }
0550 
0551 static void mtk_cqdma_free_inactive_desc(struct dma_chan *c)
0552 {
0553     struct virt_dma_chan *vc = to_virt_chan(c);
0554     unsigned long flags;
0555     LIST_HEAD(head);
0556 
0557     /*
0558      * set desc_allocated, desc_submitted,
0559      * and desc_issued as the candicates to be freed
0560      */
0561     spin_lock_irqsave(&vc->lock, flags);
0562     list_splice_tail_init(&vc->desc_allocated, &head);
0563     list_splice_tail_init(&vc->desc_submitted, &head);
0564     list_splice_tail_init(&vc->desc_issued, &head);
0565     spin_unlock_irqrestore(&vc->lock, flags);
0566 
0567     /* free descriptor lists */
0568     vchan_dma_desc_free_list(vc, &head);
0569 }
0570 
0571 static void mtk_cqdma_free_active_desc(struct dma_chan *c)
0572 {
0573     struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0574     bool sync_needed = false;
0575     unsigned long pc_flags;
0576     unsigned long vc_flags;
0577 
0578     /* acquire PC's lock first due to lock dependency in dma ISR */
0579     spin_lock_irqsave(&cvc->pc->lock, pc_flags);
0580     spin_lock_irqsave(&cvc->vc.lock, vc_flags);
0581 
0582     /* synchronization is required if this VC is active */
0583     if (mtk_cqdma_is_vchan_active(cvc)) {
0584         cvc->issue_synchronize = true;
0585         sync_needed = true;
0586     }
0587 
0588     spin_unlock_irqrestore(&cvc->vc.lock, vc_flags);
0589     spin_unlock_irqrestore(&cvc->pc->lock, pc_flags);
0590 
0591     /* waiting for the completion of this VC */
0592     if (sync_needed)
0593         wait_for_completion(&cvc->issue_completion);
0594 
0595     /* free all descriptors in list desc_completed */
0596     vchan_synchronize(&cvc->vc);
0597 
0598     WARN_ONCE(!list_empty(&cvc->vc.desc_completed),
0599           "Desc pending still in list desc_completed\n");
0600 }
0601 
0602 static int mtk_cqdma_terminate_all(struct dma_chan *c)
0603 {
0604     /* free descriptors not processed yet by hardware */
0605     mtk_cqdma_free_inactive_desc(c);
0606 
0607     /* free descriptors being processed by hardware */
0608     mtk_cqdma_free_active_desc(c);
0609 
0610     return 0;
0611 }
0612 
0613 static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c)
0614 {
0615     struct mtk_cqdma_device *cqdma = to_cqdma_dev(c);
0616     struct mtk_cqdma_vchan *vc = to_cqdma_vchan(c);
0617     struct mtk_cqdma_pchan *pc = NULL;
0618     u32 i, min_refcnt = U32_MAX, refcnt;
0619     unsigned long flags;
0620 
0621     /* allocate PC with the minimun refcount */
0622     for (i = 0; i < cqdma->dma_channels; ++i) {
0623         refcnt = refcount_read(&cqdma->pc[i]->refcnt);
0624         if (refcnt < min_refcnt) {
0625             pc = cqdma->pc[i];
0626             min_refcnt = refcnt;
0627         }
0628     }
0629 
0630     if (!pc)
0631         return -ENOSPC;
0632 
0633     spin_lock_irqsave(&pc->lock, flags);
0634 
0635     if (!refcount_read(&pc->refcnt)) {
0636         /* allocate PC when the refcount is zero */
0637         mtk_cqdma_hard_reset(pc);
0638 
0639         /* enable interrupt for this PC */
0640         mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT);
0641 
0642         /*
0643          * refcount_inc would complain increment on 0; use-after-free.
0644          * Thus, we need to explicitly set it as 1 initially.
0645          */
0646         refcount_set(&pc->refcnt, 1);
0647     } else {
0648         refcount_inc(&pc->refcnt);
0649     }
0650 
0651     spin_unlock_irqrestore(&pc->lock, flags);
0652 
0653     vc->pc = pc;
0654 
0655     return 0;
0656 }
0657 
0658 static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
0659 {
0660     struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0661     unsigned long flags;
0662 
0663     /* free all descriptors in all lists on the VC */
0664     mtk_cqdma_terminate_all(c);
0665 
0666     spin_lock_irqsave(&cvc->pc->lock, flags);
0667 
0668     /* PC is not freed until there is no VC mapped to it */
0669     if (refcount_dec_and_test(&cvc->pc->refcnt)) {
0670         /* start the flush operation and stop the engine */
0671         mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
0672 
0673         /* wait for the completion of flush operation */
0674         if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
0675             dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
0676 
0677         /* clear the flush bit and interrupt flag */
0678         mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
0679         mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG,
0680                 MTK_CQDMA_INT_FLAG_BIT);
0681 
0682         /* disable interrupt for this PC */
0683         mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT);
0684     }
0685 
0686     spin_unlock_irqrestore(&cvc->pc->lock, flags);
0687 }
0688 
0689 static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma)
0690 {
0691     unsigned long flags;
0692     int err;
0693     u32 i;
0694 
0695     pm_runtime_enable(cqdma2dev(cqdma));
0696     pm_runtime_get_sync(cqdma2dev(cqdma));
0697 
0698     err = clk_prepare_enable(cqdma->clk);
0699 
0700     if (err) {
0701         pm_runtime_put_sync(cqdma2dev(cqdma));
0702         pm_runtime_disable(cqdma2dev(cqdma));
0703         return err;
0704     }
0705 
0706     /* reset all PCs */
0707     for (i = 0; i < cqdma->dma_channels; ++i) {
0708         spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
0709         if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) {
0710             dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
0711             spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0712 
0713             clk_disable_unprepare(cqdma->clk);
0714             pm_runtime_put_sync(cqdma2dev(cqdma));
0715             pm_runtime_disable(cqdma2dev(cqdma));
0716             return -EINVAL;
0717         }
0718         spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0719     }
0720 
0721     return 0;
0722 }
0723 
0724 static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma)
0725 {
0726     unsigned long flags;
0727     u32 i;
0728 
0729     /* reset all PCs */
0730     for (i = 0; i < cqdma->dma_channels; ++i) {
0731         spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
0732         if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0)
0733             dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
0734         spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0735     }
0736 
0737     clk_disable_unprepare(cqdma->clk);
0738 
0739     pm_runtime_put_sync(cqdma2dev(cqdma));
0740     pm_runtime_disable(cqdma2dev(cqdma));
0741 }
0742 
0743 static const struct of_device_id mtk_cqdma_match[] = {
0744     { .compatible = "mediatek,mt6765-cqdma" },
0745     { /* sentinel */ }
0746 };
0747 MODULE_DEVICE_TABLE(of, mtk_cqdma_match);
0748 
0749 static int mtk_cqdma_probe(struct platform_device *pdev)
0750 {
0751     struct mtk_cqdma_device *cqdma;
0752     struct mtk_cqdma_vchan *vc;
0753     struct dma_device *dd;
0754     int err;
0755     u32 i;
0756 
0757     cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL);
0758     if (!cqdma)
0759         return -ENOMEM;
0760 
0761     dd = &cqdma->ddev;
0762 
0763     cqdma->clk = devm_clk_get(&pdev->dev, "cqdma");
0764     if (IS_ERR(cqdma->clk)) {
0765         dev_err(&pdev->dev, "No clock for %s\n",
0766             dev_name(&pdev->dev));
0767         return PTR_ERR(cqdma->clk);
0768     }
0769 
0770     dma_cap_set(DMA_MEMCPY, dd->cap_mask);
0771 
0772     dd->copy_align = MTK_CQDMA_ALIGN_SIZE;
0773     dd->device_alloc_chan_resources = mtk_cqdma_alloc_chan_resources;
0774     dd->device_free_chan_resources = mtk_cqdma_free_chan_resources;
0775     dd->device_tx_status = mtk_cqdma_tx_status;
0776     dd->device_issue_pending = mtk_cqdma_issue_pending;
0777     dd->device_prep_dma_memcpy = mtk_cqdma_prep_dma_memcpy;
0778     dd->device_terminate_all = mtk_cqdma_terminate_all;
0779     dd->src_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS;
0780     dd->dst_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS;
0781     dd->directions = BIT(DMA_MEM_TO_MEM);
0782     dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
0783     dd->dev = &pdev->dev;
0784     INIT_LIST_HEAD(&dd->channels);
0785 
0786     if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
0787                               "dma-requests",
0788                               &cqdma->dma_requests)) {
0789         dev_info(&pdev->dev,
0790              "Using %u as missing dma-requests property\n",
0791              MTK_CQDMA_NR_VCHANS);
0792 
0793         cqdma->dma_requests = MTK_CQDMA_NR_VCHANS;
0794     }
0795 
0796     if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
0797                               "dma-channels",
0798                               &cqdma->dma_channels)) {
0799         dev_info(&pdev->dev,
0800              "Using %u as missing dma-channels property\n",
0801              MTK_CQDMA_NR_PCHANS);
0802 
0803         cqdma->dma_channels = MTK_CQDMA_NR_PCHANS;
0804     }
0805 
0806     cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels,
0807                  sizeof(*cqdma->pc), GFP_KERNEL);
0808     if (!cqdma->pc)
0809         return -ENOMEM;
0810 
0811     /* initialization for PCs */
0812     for (i = 0; i < cqdma->dma_channels; ++i) {
0813         cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1,
0814                         sizeof(**cqdma->pc), GFP_KERNEL);
0815         if (!cqdma->pc[i])
0816             return -ENOMEM;
0817 
0818         INIT_LIST_HEAD(&cqdma->pc[i]->queue);
0819         spin_lock_init(&cqdma->pc[i]->lock);
0820         refcount_set(&cqdma->pc[i]->refcnt, 0);
0821         cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
0822         if (IS_ERR(cqdma->pc[i]->base))
0823             return PTR_ERR(cqdma->pc[i]->base);
0824 
0825         /* allocate IRQ resource */
0826         err = platform_get_irq(pdev, i);
0827         if (err < 0)
0828             return err;
0829         cqdma->pc[i]->irq = err;
0830 
0831         err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
0832                        mtk_cqdma_irq, 0, dev_name(&pdev->dev),
0833                        cqdma);
0834         if (err) {
0835             dev_err(&pdev->dev,
0836                 "request_irq failed with err %d\n", err);
0837             return -EINVAL;
0838         }
0839     }
0840 
0841     /* allocate resource for VCs */
0842     cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests,
0843                  sizeof(*cqdma->vc), GFP_KERNEL);
0844     if (!cqdma->vc)
0845         return -ENOMEM;
0846 
0847     for (i = 0; i < cqdma->dma_requests; i++) {
0848         vc = &cqdma->vc[i];
0849         vc->vc.desc_free = mtk_cqdma_vdesc_free;
0850         vchan_init(&vc->vc, dd);
0851         init_completion(&vc->issue_completion);
0852     }
0853 
0854     err = dma_async_device_register(dd);
0855     if (err)
0856         return err;
0857 
0858     err = of_dma_controller_register(pdev->dev.of_node,
0859                      of_dma_xlate_by_chan_id, cqdma);
0860     if (err) {
0861         dev_err(&pdev->dev,
0862             "MediaTek CQDMA OF registration failed %d\n", err);
0863         goto err_unregister;
0864     }
0865 
0866     err = mtk_cqdma_hw_init(cqdma);
0867     if (err) {
0868         dev_err(&pdev->dev,
0869             "MediaTek CQDMA HW initialization failed %d\n", err);
0870         goto err_unregister;
0871     }
0872 
0873     platform_set_drvdata(pdev, cqdma);
0874 
0875     /* initialize tasklet for each PC */
0876     for (i = 0; i < cqdma->dma_channels; ++i)
0877         tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb);
0878 
0879     dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n");
0880 
0881     return 0;
0882 
0883 err_unregister:
0884     dma_async_device_unregister(dd);
0885 
0886     return err;
0887 }
0888 
0889 static int mtk_cqdma_remove(struct platform_device *pdev)
0890 {
0891     struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev);
0892     struct mtk_cqdma_vchan *vc;
0893     unsigned long flags;
0894     int i;
0895 
0896     /* kill VC task */
0897     for (i = 0; i < cqdma->dma_requests; i++) {
0898         vc = &cqdma->vc[i];
0899 
0900         list_del(&vc->vc.chan.device_node);
0901         tasklet_kill(&vc->vc.task);
0902     }
0903 
0904     /* disable interrupt */
0905     for (i = 0; i < cqdma->dma_channels; i++) {
0906         spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
0907         mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN,
0908                 MTK_CQDMA_INT_EN_BIT);
0909         spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0910 
0911         /* Waits for any pending IRQ handlers to complete */
0912         synchronize_irq(cqdma->pc[i]->irq);
0913 
0914         tasklet_kill(&cqdma->pc[i]->tasklet);
0915     }
0916 
0917     /* disable hardware */
0918     mtk_cqdma_hw_deinit(cqdma);
0919 
0920     dma_async_device_unregister(&cqdma->ddev);
0921     of_dma_controller_free(pdev->dev.of_node);
0922 
0923     return 0;
0924 }
0925 
0926 static struct platform_driver mtk_cqdma_driver = {
0927     .probe = mtk_cqdma_probe,
0928     .remove = mtk_cqdma_remove,
0929     .driver = {
0930         .name           = KBUILD_MODNAME,
0931         .of_match_table = mtk_cqdma_match,
0932     },
0933 };
0934 module_platform_driver(mtk_cqdma_driver);
0935 
0936 MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver");
0937 MODULE_AUTHOR("Shun-Chih Yu <shun-chih.yu@mediatek.com>");
0938 MODULE_LICENSE("GPL v2");