0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/bitops.h>
0012 #include <linux/clk.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/iopoll.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/list.h>
0019 #include <linux/module.h>
0020 #include <linux/of.h>
0021 #include <linux/of_device.h>
0022 #include <linux/of_dma.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/pm_runtime.h>
0025 #include <linux/refcount.h>
0026 #include <linux/slab.h>
0027
0028 #include "../virt-dma.h"
0029
0030 #define MTK_CQDMA_USEC_POLL 10
0031 #define MTK_CQDMA_TIMEOUT_POLL 1000
0032 #define MTK_CQDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
0033 #define MTK_CQDMA_ALIGN_SIZE 1
0034
0035
0036 #define MTK_CQDMA_NR_VCHANS 32
0037
0038
0039 #define MTK_CQDMA_NR_PCHANS 3
0040
0041
0042 #define MTK_CQDMA_INT_FLAG 0x0
0043 #define MTK_CQDMA_INT_EN 0x4
0044 #define MTK_CQDMA_EN 0x8
0045 #define MTK_CQDMA_RESET 0xc
0046 #define MTK_CQDMA_FLUSH 0x14
0047 #define MTK_CQDMA_SRC 0x1c
0048 #define MTK_CQDMA_DST 0x20
0049 #define MTK_CQDMA_LEN1 0x24
0050 #define MTK_CQDMA_LEN2 0x28
0051 #define MTK_CQDMA_SRC2 0x60
0052 #define MTK_CQDMA_DST2 0x64
0053
0054
0055 #define MTK_CQDMA_EN_BIT BIT(0)
0056 #define MTK_CQDMA_INT_FLAG_BIT BIT(0)
0057 #define MTK_CQDMA_INT_EN_BIT BIT(0)
0058 #define MTK_CQDMA_FLUSH_BIT BIT(0)
0059
0060 #define MTK_CQDMA_WARM_RST_BIT BIT(0)
0061 #define MTK_CQDMA_HARD_RST_BIT BIT(1)
0062
0063 #define MTK_CQDMA_MAX_LEN GENMASK(27, 0)
0064 #define MTK_CQDMA_ADDR_LIMIT GENMASK(31, 0)
0065 #define MTK_CQDMA_ADDR2_SHFIT (32)
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 struct mtk_cqdma_vdesc {
0080 struct virt_dma_desc vd;
0081 size_t len;
0082 size_t residue;
0083 dma_addr_t dest;
0084 dma_addr_t src;
0085 struct dma_chan *ch;
0086
0087 struct list_head node;
0088 struct mtk_cqdma_vdesc *parent;
0089 };
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 struct mtk_cqdma_pchan {
0102 struct list_head queue;
0103 void __iomem *base;
0104 u32 irq;
0105
0106 refcount_t refcnt;
0107
0108 struct tasklet_struct tasklet;
0109
0110
0111 spinlock_t lock;
0112 };
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 struct mtk_cqdma_vchan {
0123 struct virt_dma_chan vc;
0124 struct mtk_cqdma_pchan *pc;
0125 struct completion issue_completion;
0126 bool issue_synchronize;
0127 };
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 struct mtk_cqdma_device {
0140 struct dma_device ddev;
0141 struct clk *clk;
0142
0143 u32 dma_requests;
0144 u32 dma_channels;
0145 struct mtk_cqdma_vchan *vc;
0146 struct mtk_cqdma_pchan **pc;
0147 };
0148
0149 static struct mtk_cqdma_device *to_cqdma_dev(struct dma_chan *chan)
0150 {
0151 return container_of(chan->device, struct mtk_cqdma_device, ddev);
0152 }
0153
0154 static struct mtk_cqdma_vchan *to_cqdma_vchan(struct dma_chan *chan)
0155 {
0156 return container_of(chan, struct mtk_cqdma_vchan, vc.chan);
0157 }
0158
0159 static struct mtk_cqdma_vdesc *to_cqdma_vdesc(struct virt_dma_desc *vd)
0160 {
0161 return container_of(vd, struct mtk_cqdma_vdesc, vd);
0162 }
0163
0164 static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma)
0165 {
0166 return cqdma->ddev.dev;
0167 }
0168
0169 static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg)
0170 {
0171 return readl(pc->base + reg);
0172 }
0173
0174 static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val)
0175 {
0176 writel_relaxed(val, pc->base + reg);
0177 }
0178
0179 static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg,
0180 u32 mask, u32 set)
0181 {
0182 u32 val;
0183
0184 val = mtk_dma_read(pc, reg);
0185 val &= ~mask;
0186 val |= set;
0187 mtk_dma_write(pc, reg, val);
0188 }
0189
0190 static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val)
0191 {
0192 mtk_dma_rmw(pc, reg, 0, val);
0193 }
0194
0195 static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val)
0196 {
0197 mtk_dma_rmw(pc, reg, val, 0);
0198 }
0199
0200 static void mtk_cqdma_vdesc_free(struct virt_dma_desc *vd)
0201 {
0202 kfree(to_cqdma_vdesc(vd));
0203 }
0204
0205 static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic)
0206 {
0207 u32 status = 0;
0208
0209 if (!atomic)
0210 return readl_poll_timeout(pc->base + MTK_CQDMA_EN,
0211 status,
0212 !(status & MTK_CQDMA_EN_BIT),
0213 MTK_CQDMA_USEC_POLL,
0214 MTK_CQDMA_TIMEOUT_POLL);
0215
0216 return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN,
0217 status,
0218 !(status & MTK_CQDMA_EN_BIT),
0219 MTK_CQDMA_USEC_POLL,
0220 MTK_CQDMA_TIMEOUT_POLL);
0221 }
0222
0223 static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
0224 {
0225 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
0226 mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
0227
0228 return mtk_cqdma_poll_engine_done(pc, true);
0229 }
0230
0231 static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
0232 struct mtk_cqdma_vdesc *cvd)
0233 {
0234
0235 if (mtk_cqdma_poll_engine_done(pc, true) < 0)
0236 dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n");
0237
0238
0239 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT);
0240 if (mtk_cqdma_poll_engine_done(pc, true) < 0)
0241 dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n");
0242
0243
0244 mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT);
0245 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0246 mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT);
0247 #else
0248 mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
0249 #endif
0250
0251
0252 mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT);
0253 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0254 mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
0255 #else
0256 mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
0257 #endif
0258
0259
0260 mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len);
0261
0262
0263 mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT);
0264 }
0265
0266 static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan *cvc)
0267 {
0268 struct virt_dma_desc *vd, *vd2;
0269 struct mtk_cqdma_pchan *pc = cvc->pc;
0270 struct mtk_cqdma_vdesc *cvd;
0271 bool trigger_engine = false;
0272
0273 lockdep_assert_held(&cvc->vc.lock);
0274 lockdep_assert_held(&pc->lock);
0275
0276 list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) {
0277
0278 if (list_empty(&pc->queue))
0279 trigger_engine = true;
0280
0281 cvd = to_cqdma_vdesc(vd);
0282
0283
0284 list_add_tail(&cvd->node, &pc->queue);
0285
0286
0287 if (trigger_engine)
0288 mtk_cqdma_start(pc, cvd);
0289
0290
0291 list_del(&vd->node);
0292 }
0293 }
0294
0295
0296
0297
0298
0299 static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan *cvc)
0300 {
0301 struct mtk_cqdma_vdesc *cvd;
0302
0303 list_for_each_entry(cvd, &cvc->pc->queue, node)
0304 if (cvc == to_cqdma_vchan(cvd->ch))
0305 return true;
0306
0307 return false;
0308 }
0309
0310
0311
0312
0313 static struct mtk_cqdma_vdesc
0314 *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc)
0315 {
0316 struct mtk_cqdma_vchan *cvc;
0317 struct mtk_cqdma_vdesc *cvd, *ret = NULL;
0318
0319
0320 cvd = list_first_entry_or_null(&pc->queue,
0321 struct mtk_cqdma_vdesc, node);
0322 if (unlikely(!cvd || !cvd->parent))
0323 return NULL;
0324
0325 cvc = to_cqdma_vchan(cvd->ch);
0326 ret = cvd;
0327
0328
0329 cvd->parent->residue -= cvd->len;
0330
0331
0332 list_del(&cvd->node);
0333
0334 spin_lock(&cvc->vc.lock);
0335
0336
0337 if (!cvd->parent->residue) {
0338
0339 vchan_cookie_complete(&cvd->parent->vd);
0340
0341
0342 if (cvc->issue_synchronize && !mtk_cqdma_is_vchan_active(cvc)) {
0343 complete(&cvc->issue_completion);
0344 cvc->issue_synchronize = false;
0345 }
0346 }
0347
0348 spin_unlock(&cvc->vc.lock);
0349
0350
0351 cvd = list_first_entry_or_null(&pc->queue,
0352 struct mtk_cqdma_vdesc, node);
0353 if (cvd)
0354 mtk_cqdma_start(pc, cvd);
0355
0356 return ret;
0357 }
0358
0359 static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
0360 {
0361 struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet);
0362 struct mtk_cqdma_vdesc *cvd = NULL;
0363 unsigned long flags;
0364
0365 spin_lock_irqsave(&pc->lock, flags);
0366
0367 cvd = mtk_cqdma_consume_work_queue(pc);
0368 spin_unlock_irqrestore(&pc->lock, flags);
0369
0370
0371 if (cvd) {
0372 dma_run_dependencies(&cvd->vd.tx);
0373
0374
0375
0376
0377
0378 if (cvd->parent != cvd)
0379 kfree(cvd);
0380 }
0381
0382
0383 enable_irq(pc->irq);
0384 }
0385
0386 static irqreturn_t mtk_cqdma_irq(int irq, void *devid)
0387 {
0388 struct mtk_cqdma_device *cqdma = devid;
0389 irqreturn_t ret = IRQ_NONE;
0390 bool schedule_tasklet = false;
0391 u32 i;
0392
0393
0394 for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) {
0395 spin_lock(&cqdma->pc[i]->lock);
0396 if (mtk_dma_read(cqdma->pc[i],
0397 MTK_CQDMA_INT_FLAG) & MTK_CQDMA_INT_FLAG_BIT) {
0398
0399 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG,
0400 MTK_CQDMA_INT_FLAG_BIT);
0401
0402 schedule_tasklet = true;
0403 ret = IRQ_HANDLED;
0404 }
0405 spin_unlock(&cqdma->pc[i]->lock);
0406
0407 if (schedule_tasklet) {
0408
0409 disable_irq_nosync(cqdma->pc[i]->irq);
0410
0411
0412 tasklet_schedule(&cqdma->pc[i]->tasklet);
0413 }
0414 }
0415
0416 return ret;
0417 }
0418
0419 static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c,
0420 dma_cookie_t cookie)
0421 {
0422 struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0423 struct virt_dma_desc *vd;
0424 unsigned long flags;
0425
0426 spin_lock_irqsave(&cvc->pc->lock, flags);
0427 list_for_each_entry(vd, &cvc->pc->queue, node)
0428 if (vd->tx.cookie == cookie) {
0429 spin_unlock_irqrestore(&cvc->pc->lock, flags);
0430 return vd;
0431 }
0432 spin_unlock_irqrestore(&cvc->pc->lock, flags);
0433
0434 list_for_each_entry(vd, &cvc->vc.desc_issued, node)
0435 if (vd->tx.cookie == cookie)
0436 return vd;
0437
0438 return NULL;
0439 }
0440
0441 static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
0442 dma_cookie_t cookie,
0443 struct dma_tx_state *txstate)
0444 {
0445 struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0446 struct mtk_cqdma_vdesc *cvd;
0447 struct virt_dma_desc *vd;
0448 enum dma_status ret;
0449 unsigned long flags;
0450 size_t bytes = 0;
0451
0452 ret = dma_cookie_status(c, cookie, txstate);
0453 if (ret == DMA_COMPLETE || !txstate)
0454 return ret;
0455
0456 spin_lock_irqsave(&cvc->vc.lock, flags);
0457 vd = mtk_cqdma_find_active_desc(c, cookie);
0458 spin_unlock_irqrestore(&cvc->vc.lock, flags);
0459
0460 if (vd) {
0461 cvd = to_cqdma_vdesc(vd);
0462 bytes = cvd->residue;
0463 }
0464
0465 dma_set_residue(txstate, bytes);
0466
0467 return ret;
0468 }
0469
0470 static void mtk_cqdma_issue_pending(struct dma_chan *c)
0471 {
0472 struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0473 unsigned long pc_flags;
0474 unsigned long vc_flags;
0475
0476
0477 spin_lock_irqsave(&cvc->pc->lock, pc_flags);
0478 spin_lock_irqsave(&cvc->vc.lock, vc_flags);
0479
0480 if (vchan_issue_pending(&cvc->vc))
0481 mtk_cqdma_issue_vchan_pending(cvc);
0482
0483 spin_unlock_irqrestore(&cvc->vc.lock, vc_flags);
0484 spin_unlock_irqrestore(&cvc->pc->lock, pc_flags);
0485 }
0486
0487 static struct dma_async_tx_descriptor *
0488 mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
0489 dma_addr_t src, size_t len, unsigned long flags)
0490 {
0491 struct mtk_cqdma_vdesc **cvd;
0492 struct dma_async_tx_descriptor *tx = NULL, *prev_tx = NULL;
0493 size_t i, tlen, nr_vd;
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 nr_vd = DIV_ROUND_UP(len, MTK_CQDMA_MAX_LEN);
0507 cvd = kcalloc(nr_vd, sizeof(*cvd), GFP_NOWAIT);
0508 if (!cvd)
0509 return NULL;
0510
0511 for (i = 0; i < nr_vd; ++i) {
0512 cvd[i] = kzalloc(sizeof(*cvd[i]), GFP_NOWAIT);
0513 if (!cvd[i]) {
0514 for (; i > 0; --i)
0515 kfree(cvd[i - 1]);
0516 return NULL;
0517 }
0518
0519
0520 cvd[i]->ch = c;
0521
0522
0523 tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len;
0524 cvd[i]->len = tlen;
0525 cvd[i]->src = src;
0526 cvd[i]->dest = dest;
0527
0528
0529 tx = vchan_tx_prep(to_virt_chan(c), &cvd[i]->vd, flags);
0530 tx->next = NULL;
0531
0532 if (!i) {
0533 cvd[0]->residue = len;
0534 } else {
0535 prev_tx->next = tx;
0536 cvd[i]->residue = tlen;
0537 }
0538
0539 cvd[i]->parent = cvd[0];
0540
0541
0542 src += tlen;
0543 dest += tlen;
0544 len -= tlen;
0545 prev_tx = tx;
0546 }
0547
0548 return &cvd[0]->vd.tx;
0549 }
0550
0551 static void mtk_cqdma_free_inactive_desc(struct dma_chan *c)
0552 {
0553 struct virt_dma_chan *vc = to_virt_chan(c);
0554 unsigned long flags;
0555 LIST_HEAD(head);
0556
0557
0558
0559
0560
0561 spin_lock_irqsave(&vc->lock, flags);
0562 list_splice_tail_init(&vc->desc_allocated, &head);
0563 list_splice_tail_init(&vc->desc_submitted, &head);
0564 list_splice_tail_init(&vc->desc_issued, &head);
0565 spin_unlock_irqrestore(&vc->lock, flags);
0566
0567
0568 vchan_dma_desc_free_list(vc, &head);
0569 }
0570
0571 static void mtk_cqdma_free_active_desc(struct dma_chan *c)
0572 {
0573 struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0574 bool sync_needed = false;
0575 unsigned long pc_flags;
0576 unsigned long vc_flags;
0577
0578
0579 spin_lock_irqsave(&cvc->pc->lock, pc_flags);
0580 spin_lock_irqsave(&cvc->vc.lock, vc_flags);
0581
0582
0583 if (mtk_cqdma_is_vchan_active(cvc)) {
0584 cvc->issue_synchronize = true;
0585 sync_needed = true;
0586 }
0587
0588 spin_unlock_irqrestore(&cvc->vc.lock, vc_flags);
0589 spin_unlock_irqrestore(&cvc->pc->lock, pc_flags);
0590
0591
0592 if (sync_needed)
0593 wait_for_completion(&cvc->issue_completion);
0594
0595
0596 vchan_synchronize(&cvc->vc);
0597
0598 WARN_ONCE(!list_empty(&cvc->vc.desc_completed),
0599 "Desc pending still in list desc_completed\n");
0600 }
0601
0602 static int mtk_cqdma_terminate_all(struct dma_chan *c)
0603 {
0604
0605 mtk_cqdma_free_inactive_desc(c);
0606
0607
0608 mtk_cqdma_free_active_desc(c);
0609
0610 return 0;
0611 }
0612
0613 static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c)
0614 {
0615 struct mtk_cqdma_device *cqdma = to_cqdma_dev(c);
0616 struct mtk_cqdma_vchan *vc = to_cqdma_vchan(c);
0617 struct mtk_cqdma_pchan *pc = NULL;
0618 u32 i, min_refcnt = U32_MAX, refcnt;
0619 unsigned long flags;
0620
0621
0622 for (i = 0; i < cqdma->dma_channels; ++i) {
0623 refcnt = refcount_read(&cqdma->pc[i]->refcnt);
0624 if (refcnt < min_refcnt) {
0625 pc = cqdma->pc[i];
0626 min_refcnt = refcnt;
0627 }
0628 }
0629
0630 if (!pc)
0631 return -ENOSPC;
0632
0633 spin_lock_irqsave(&pc->lock, flags);
0634
0635 if (!refcount_read(&pc->refcnt)) {
0636
0637 mtk_cqdma_hard_reset(pc);
0638
0639
0640 mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT);
0641
0642
0643
0644
0645
0646 refcount_set(&pc->refcnt, 1);
0647 } else {
0648 refcount_inc(&pc->refcnt);
0649 }
0650
0651 spin_unlock_irqrestore(&pc->lock, flags);
0652
0653 vc->pc = pc;
0654
0655 return 0;
0656 }
0657
0658 static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
0659 {
0660 struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
0661 unsigned long flags;
0662
0663
0664 mtk_cqdma_terminate_all(c);
0665
0666 spin_lock_irqsave(&cvc->pc->lock, flags);
0667
0668
0669 if (refcount_dec_and_test(&cvc->pc->refcnt)) {
0670
0671 mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
0672
0673
0674 if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
0675 dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
0676
0677
0678 mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
0679 mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG,
0680 MTK_CQDMA_INT_FLAG_BIT);
0681
0682
0683 mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT);
0684 }
0685
0686 spin_unlock_irqrestore(&cvc->pc->lock, flags);
0687 }
0688
0689 static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma)
0690 {
0691 unsigned long flags;
0692 int err;
0693 u32 i;
0694
0695 pm_runtime_enable(cqdma2dev(cqdma));
0696 pm_runtime_get_sync(cqdma2dev(cqdma));
0697
0698 err = clk_prepare_enable(cqdma->clk);
0699
0700 if (err) {
0701 pm_runtime_put_sync(cqdma2dev(cqdma));
0702 pm_runtime_disable(cqdma2dev(cqdma));
0703 return err;
0704 }
0705
0706
0707 for (i = 0; i < cqdma->dma_channels; ++i) {
0708 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
0709 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) {
0710 dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
0711 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0712
0713 clk_disable_unprepare(cqdma->clk);
0714 pm_runtime_put_sync(cqdma2dev(cqdma));
0715 pm_runtime_disable(cqdma2dev(cqdma));
0716 return -EINVAL;
0717 }
0718 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0719 }
0720
0721 return 0;
0722 }
0723
0724 static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma)
0725 {
0726 unsigned long flags;
0727 u32 i;
0728
0729
0730 for (i = 0; i < cqdma->dma_channels; ++i) {
0731 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
0732 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0)
0733 dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
0734 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0735 }
0736
0737 clk_disable_unprepare(cqdma->clk);
0738
0739 pm_runtime_put_sync(cqdma2dev(cqdma));
0740 pm_runtime_disable(cqdma2dev(cqdma));
0741 }
0742
0743 static const struct of_device_id mtk_cqdma_match[] = {
0744 { .compatible = "mediatek,mt6765-cqdma" },
0745 { }
0746 };
0747 MODULE_DEVICE_TABLE(of, mtk_cqdma_match);
0748
0749 static int mtk_cqdma_probe(struct platform_device *pdev)
0750 {
0751 struct mtk_cqdma_device *cqdma;
0752 struct mtk_cqdma_vchan *vc;
0753 struct dma_device *dd;
0754 int err;
0755 u32 i;
0756
0757 cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL);
0758 if (!cqdma)
0759 return -ENOMEM;
0760
0761 dd = &cqdma->ddev;
0762
0763 cqdma->clk = devm_clk_get(&pdev->dev, "cqdma");
0764 if (IS_ERR(cqdma->clk)) {
0765 dev_err(&pdev->dev, "No clock for %s\n",
0766 dev_name(&pdev->dev));
0767 return PTR_ERR(cqdma->clk);
0768 }
0769
0770 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
0771
0772 dd->copy_align = MTK_CQDMA_ALIGN_SIZE;
0773 dd->device_alloc_chan_resources = mtk_cqdma_alloc_chan_resources;
0774 dd->device_free_chan_resources = mtk_cqdma_free_chan_resources;
0775 dd->device_tx_status = mtk_cqdma_tx_status;
0776 dd->device_issue_pending = mtk_cqdma_issue_pending;
0777 dd->device_prep_dma_memcpy = mtk_cqdma_prep_dma_memcpy;
0778 dd->device_terminate_all = mtk_cqdma_terminate_all;
0779 dd->src_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS;
0780 dd->dst_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS;
0781 dd->directions = BIT(DMA_MEM_TO_MEM);
0782 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
0783 dd->dev = &pdev->dev;
0784 INIT_LIST_HEAD(&dd->channels);
0785
0786 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
0787 "dma-requests",
0788 &cqdma->dma_requests)) {
0789 dev_info(&pdev->dev,
0790 "Using %u as missing dma-requests property\n",
0791 MTK_CQDMA_NR_VCHANS);
0792
0793 cqdma->dma_requests = MTK_CQDMA_NR_VCHANS;
0794 }
0795
0796 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
0797 "dma-channels",
0798 &cqdma->dma_channels)) {
0799 dev_info(&pdev->dev,
0800 "Using %u as missing dma-channels property\n",
0801 MTK_CQDMA_NR_PCHANS);
0802
0803 cqdma->dma_channels = MTK_CQDMA_NR_PCHANS;
0804 }
0805
0806 cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels,
0807 sizeof(*cqdma->pc), GFP_KERNEL);
0808 if (!cqdma->pc)
0809 return -ENOMEM;
0810
0811
0812 for (i = 0; i < cqdma->dma_channels; ++i) {
0813 cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1,
0814 sizeof(**cqdma->pc), GFP_KERNEL);
0815 if (!cqdma->pc[i])
0816 return -ENOMEM;
0817
0818 INIT_LIST_HEAD(&cqdma->pc[i]->queue);
0819 spin_lock_init(&cqdma->pc[i]->lock);
0820 refcount_set(&cqdma->pc[i]->refcnt, 0);
0821 cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
0822 if (IS_ERR(cqdma->pc[i]->base))
0823 return PTR_ERR(cqdma->pc[i]->base);
0824
0825
0826 err = platform_get_irq(pdev, i);
0827 if (err < 0)
0828 return err;
0829 cqdma->pc[i]->irq = err;
0830
0831 err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
0832 mtk_cqdma_irq, 0, dev_name(&pdev->dev),
0833 cqdma);
0834 if (err) {
0835 dev_err(&pdev->dev,
0836 "request_irq failed with err %d\n", err);
0837 return -EINVAL;
0838 }
0839 }
0840
0841
0842 cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests,
0843 sizeof(*cqdma->vc), GFP_KERNEL);
0844 if (!cqdma->vc)
0845 return -ENOMEM;
0846
0847 for (i = 0; i < cqdma->dma_requests; i++) {
0848 vc = &cqdma->vc[i];
0849 vc->vc.desc_free = mtk_cqdma_vdesc_free;
0850 vchan_init(&vc->vc, dd);
0851 init_completion(&vc->issue_completion);
0852 }
0853
0854 err = dma_async_device_register(dd);
0855 if (err)
0856 return err;
0857
0858 err = of_dma_controller_register(pdev->dev.of_node,
0859 of_dma_xlate_by_chan_id, cqdma);
0860 if (err) {
0861 dev_err(&pdev->dev,
0862 "MediaTek CQDMA OF registration failed %d\n", err);
0863 goto err_unregister;
0864 }
0865
0866 err = mtk_cqdma_hw_init(cqdma);
0867 if (err) {
0868 dev_err(&pdev->dev,
0869 "MediaTek CQDMA HW initialization failed %d\n", err);
0870 goto err_unregister;
0871 }
0872
0873 platform_set_drvdata(pdev, cqdma);
0874
0875
0876 for (i = 0; i < cqdma->dma_channels; ++i)
0877 tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb);
0878
0879 dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n");
0880
0881 return 0;
0882
0883 err_unregister:
0884 dma_async_device_unregister(dd);
0885
0886 return err;
0887 }
0888
0889 static int mtk_cqdma_remove(struct platform_device *pdev)
0890 {
0891 struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev);
0892 struct mtk_cqdma_vchan *vc;
0893 unsigned long flags;
0894 int i;
0895
0896
0897 for (i = 0; i < cqdma->dma_requests; i++) {
0898 vc = &cqdma->vc[i];
0899
0900 list_del(&vc->vc.chan.device_node);
0901 tasklet_kill(&vc->vc.task);
0902 }
0903
0904
0905 for (i = 0; i < cqdma->dma_channels; i++) {
0906 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
0907 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN,
0908 MTK_CQDMA_INT_EN_BIT);
0909 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
0910
0911
0912 synchronize_irq(cqdma->pc[i]->irq);
0913
0914 tasklet_kill(&cqdma->pc[i]->tasklet);
0915 }
0916
0917
0918 mtk_cqdma_hw_deinit(cqdma);
0919
0920 dma_async_device_unregister(&cqdma->ddev);
0921 of_dma_controller_free(pdev->dev.of_node);
0922
0923 return 0;
0924 }
0925
0926 static struct platform_driver mtk_cqdma_driver = {
0927 .probe = mtk_cqdma_probe,
0928 .remove = mtk_cqdma_remove,
0929 .driver = {
0930 .name = KBUILD_MODNAME,
0931 .of_match_table = mtk_cqdma_match,
0932 },
0933 };
0934 module_platform_driver(mtk_cqdma_driver);
0935
0936 MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver");
0937 MODULE_AUTHOR("Shun-Chih Yu <shun-chih.yu@mediatek.com>");
0938 MODULE_LICENSE("GPL v2");