0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/bitops.h>
0012 #include <linux/clk.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/iopoll.h>
0017 #include <linux/list.h>
0018 #include <linux/module.h>
0019 #include <linux/of.h>
0020 #include <linux/of_device.h>
0021 #include <linux/of_dma.h>
0022 #include <linux/platform_device.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/refcount.h>
0025 #include <linux/slab.h>
0026
0027 #include "../virt-dma.h"
0028
0029 #define MTK_HSDMA_USEC_POLL 20
0030 #define MTK_HSDMA_TIMEOUT_POLL 200000
0031 #define MTK_HSDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
0032
0033
0034 #define MTK_HSDMA_NR_VCHANS 3
0035
0036
0037 #define MTK_HSDMA_NR_MAX_PCHANS 1
0038
0039
0040
0041 #define MTK_DMA_SIZE 64
0042 #define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1))
0043 #define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1))
0044 #define MTK_HSDMA_MAX_LEN 0x3f80
0045 #define MTK_HSDMA_ALIGN_SIZE 4
0046 #define MTK_HSDMA_PLEN_MASK 0x3fff
0047 #define MTK_HSDMA_DESC_PLEN(x) (((x) & MTK_HSDMA_PLEN_MASK) << 16)
0048 #define MTK_HSDMA_DESC_PLEN_GET(x) (((x) >> 16) & MTK_HSDMA_PLEN_MASK)
0049
0050
0051 #define MTK_HSDMA_TX_BASE 0x0
0052 #define MTK_HSDMA_TX_CNT 0x4
0053 #define MTK_HSDMA_TX_CPU 0x8
0054 #define MTK_HSDMA_TX_DMA 0xc
0055 #define MTK_HSDMA_RX_BASE 0x100
0056 #define MTK_HSDMA_RX_CNT 0x104
0057 #define MTK_HSDMA_RX_CPU 0x108
0058 #define MTK_HSDMA_RX_DMA 0x10c
0059
0060
0061 #define MTK_HSDMA_GLO 0x204
0062 #define MTK_HSDMA_GLO_MULTI_DMA BIT(10)
0063 #define MTK_HSDMA_TX_WB_DDONE BIT(6)
0064 #define MTK_HSDMA_BURST_64BYTES (0x2 << 4)
0065 #define MTK_HSDMA_GLO_RX_BUSY BIT(3)
0066 #define MTK_HSDMA_GLO_RX_DMA BIT(2)
0067 #define MTK_HSDMA_GLO_TX_BUSY BIT(1)
0068 #define MTK_HSDMA_GLO_TX_DMA BIT(0)
0069 #define MTK_HSDMA_GLO_DMA (MTK_HSDMA_GLO_TX_DMA | \
0070 MTK_HSDMA_GLO_RX_DMA)
0071 #define MTK_HSDMA_GLO_BUSY (MTK_HSDMA_GLO_RX_BUSY | \
0072 MTK_HSDMA_GLO_TX_BUSY)
0073 #define MTK_HSDMA_GLO_DEFAULT (MTK_HSDMA_GLO_TX_DMA | \
0074 MTK_HSDMA_GLO_RX_DMA | \
0075 MTK_HSDMA_TX_WB_DDONE | \
0076 MTK_HSDMA_BURST_64BYTES | \
0077 MTK_HSDMA_GLO_MULTI_DMA)
0078
0079
0080 #define MTK_HSDMA_RESET 0x208
0081 #define MTK_HSDMA_RST_TX BIT(0)
0082 #define MTK_HSDMA_RST_RX BIT(16)
0083
0084
0085 #define MTK_HSDMA_DLYINT 0x20c
0086 #define MTK_HSDMA_RXDLY_INT_EN BIT(15)
0087
0088
0089 #define MTK_HSDMA_RXMAX_PINT(x) (((x) & 0x7f) << 8)
0090
0091
0092 #define MTK_HSDMA_RXMAX_PTIME(x) ((x) & 0x7f)
0093 #define MTK_HSDMA_DLYINT_DEFAULT (MTK_HSDMA_RXDLY_INT_EN | \
0094 MTK_HSDMA_RXMAX_PINT(20) | \
0095 MTK_HSDMA_RXMAX_PTIME(20))
0096 #define MTK_HSDMA_INT_STATUS 0x220
0097 #define MTK_HSDMA_INT_ENABLE 0x228
0098 #define MTK_HSDMA_INT_RXDONE BIT(16)
0099
0100 enum mtk_hsdma_vdesc_flag {
0101 MTK_HSDMA_VDESC_FINISHED = 0x01,
0102 };
0103
0104 #define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED)
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 struct mtk_hsdma_pdesc {
0116 __le32 desc1;
0117 __le32 desc2;
0118 __le32 desc3;
0119 __le32 desc4;
0120 } __packed __aligned(4);
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131 struct mtk_hsdma_vdesc {
0132 struct virt_dma_desc vd;
0133 size_t len;
0134 size_t residue;
0135 dma_addr_t dest;
0136 dma_addr_t src;
0137 };
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 struct mtk_hsdma_cb {
0148 struct virt_dma_desc *vd;
0149 enum mtk_hsdma_vdesc_flag flag;
0150 };
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 struct mtk_hsdma_ring {
0166 struct mtk_hsdma_pdesc *txd;
0167 struct mtk_hsdma_pdesc *rxd;
0168 struct mtk_hsdma_cb *cb;
0169 dma_addr_t tphys;
0170 dma_addr_t rphys;
0171 u16 cur_tptr;
0172 u16 cur_rptr;
0173 };
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 struct mtk_hsdma_pchan {
0186 struct mtk_hsdma_ring ring;
0187 size_t sz_ring;
0188 atomic_t nr_free;
0189 };
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 struct mtk_hsdma_vchan {
0201 struct virt_dma_chan vc;
0202 struct completion issue_completion;
0203 bool issue_synchronize;
0204 struct list_head desc_hw_processing;
0205 };
0206
0207
0208
0209
0210
0211
0212 struct mtk_hsdma_soc {
0213 __le32 ddone;
0214 __le32 ls0;
0215 };
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232 struct mtk_hsdma_device {
0233 struct dma_device ddev;
0234 void __iomem *base;
0235 struct clk *clk;
0236 u32 irq;
0237
0238 u32 dma_requests;
0239 struct mtk_hsdma_vchan *vc;
0240 struct mtk_hsdma_pchan *pc;
0241 refcount_t pc_refcnt;
0242
0243
0244 spinlock_t lock;
0245
0246 const struct mtk_hsdma_soc *soc;
0247 };
0248
0249 static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan)
0250 {
0251 return container_of(chan->device, struct mtk_hsdma_device, ddev);
0252 }
0253
0254 static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan)
0255 {
0256 return container_of(chan, struct mtk_hsdma_vchan, vc.chan);
0257 }
0258
0259 static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd)
0260 {
0261 return container_of(vd, struct mtk_hsdma_vdesc, vd);
0262 }
0263
0264 static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma)
0265 {
0266 return hsdma->ddev.dev;
0267 }
0268
0269 static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg)
0270 {
0271 return readl(hsdma->base + reg);
0272 }
0273
0274 static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
0275 {
0276 writel(val, hsdma->base + reg);
0277 }
0278
0279 static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg,
0280 u32 mask, u32 set)
0281 {
0282 u32 val;
0283
0284 val = mtk_dma_read(hsdma, reg);
0285 val &= ~mask;
0286 val |= set;
0287 mtk_dma_write(hsdma, reg, val);
0288 }
0289
0290 static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
0291 {
0292 mtk_dma_rmw(hsdma, reg, 0, val);
0293 }
0294
0295 static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
0296 {
0297 mtk_dma_rmw(hsdma, reg, val, 0);
0298 }
0299
0300 static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd)
0301 {
0302 kfree(container_of(vd, struct mtk_hsdma_vdesc, vd));
0303 }
0304
0305 static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma)
0306 {
0307 u32 status = 0;
0308
0309 return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status,
0310 !(status & MTK_HSDMA_GLO_BUSY),
0311 MTK_HSDMA_USEC_POLL,
0312 MTK_HSDMA_TIMEOUT_POLL);
0313 }
0314
0315 static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
0316 struct mtk_hsdma_pchan *pc)
0317 {
0318 struct mtk_hsdma_ring *ring = &pc->ring;
0319 int err;
0320
0321 memset(pc, 0, sizeof(*pc));
0322
0323
0324
0325
0326
0327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
0328 ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
0329 &ring->tphys, GFP_NOWAIT);
0330 if (!ring->txd)
0331 return -ENOMEM;
0332
0333 ring->rxd = &ring->txd[MTK_DMA_SIZE];
0334 ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd);
0335 ring->cur_tptr = 0;
0336 ring->cur_rptr = MTK_DMA_SIZE - 1;
0337
0338 ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT);
0339 if (!ring->cb) {
0340 err = -ENOMEM;
0341 goto err_free_dma;
0342 }
0343
0344 atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1);
0345
0346
0347 mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
0348 err = mtk_hsdma_busy_wait(hsdma);
0349 if (err)
0350 goto err_free_cb;
0351
0352
0353 mtk_dma_set(hsdma, MTK_HSDMA_RESET,
0354 MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
0355 mtk_dma_clr(hsdma, MTK_HSDMA_RESET,
0356 MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
0357
0358
0359 mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys);
0360 mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE);
0361 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
0362 mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0);
0363 mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys);
0364 mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE);
0365 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr);
0366 mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0);
0367
0368
0369 mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
0370
0371
0372 mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT);
0373
0374
0375 mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0376
0377 return 0;
0378
0379 err_free_cb:
0380 kfree(ring->cb);
0381
0382 err_free_dma:
0383 dma_free_coherent(hsdma2dev(hsdma),
0384 pc->sz_ring, ring->txd, ring->tphys);
0385 return err;
0386 }
0387
0388 static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma,
0389 struct mtk_hsdma_pchan *pc)
0390 {
0391 struct mtk_hsdma_ring *ring = &pc->ring;
0392
0393
0394 mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
0395 mtk_hsdma_busy_wait(hsdma);
0396
0397
0398 mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0399 mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0);
0400 mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0);
0401 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0);
0402 mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0);
0403 mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0);
0404 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1);
0405
0406 kfree(ring->cb);
0407
0408 dma_free_coherent(hsdma2dev(hsdma),
0409 pc->sz_ring, ring->txd, ring->tphys);
0410 }
0411
0412 static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma,
0413 struct mtk_hsdma_pchan *pc,
0414 struct mtk_hsdma_vdesc *hvd)
0415 {
0416 struct mtk_hsdma_ring *ring = &pc->ring;
0417 struct mtk_hsdma_pdesc *txd, *rxd;
0418 u16 reserved, prev, tlen, num_sgs;
0419 unsigned long flags;
0420
0421
0422 spin_lock_irqsave(&hsdma->lock, flags);
0423
0424
0425
0426
0427
0428 num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN);
0429 reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free));
0430
0431 if (!reserved) {
0432 spin_unlock_irqrestore(&hsdma->lock, flags);
0433 return -ENOSPC;
0434 }
0435
0436 atomic_sub(reserved, &pc->nr_free);
0437
0438 while (reserved--) {
0439
0440 tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ?
0441 MTK_HSDMA_MAX_LEN : hvd->len;
0442
0443
0444
0445
0446
0447
0448
0449
0450 txd = &ring->txd[ring->cur_tptr];
0451 WRITE_ONCE(txd->desc1, hvd->src);
0452 WRITE_ONCE(txd->desc2,
0453 hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen));
0454
0455 rxd = &ring->rxd[ring->cur_tptr];
0456 WRITE_ONCE(rxd->desc1, hvd->dest);
0457 WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen));
0458
0459
0460 ring->cb[ring->cur_tptr].vd = &hvd->vd;
0461
0462
0463 ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr,
0464 MTK_DMA_SIZE);
0465
0466
0467 hvd->src += tlen;
0468 hvd->dest += tlen;
0469 hvd->len -= tlen;
0470 }
0471
0472
0473
0474
0475
0476 if (!hvd->len) {
0477 prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE);
0478 ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED;
0479 }
0480
0481
0482 wmb();
0483
0484
0485
0486
0487
0488 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
0489
0490 spin_unlock_irqrestore(&hsdma->lock, flags);
0491
0492 return 0;
0493 }
0494
0495 static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma,
0496 struct mtk_hsdma_vchan *hvc)
0497 {
0498 struct virt_dma_desc *vd, *vd2;
0499 int err;
0500
0501 lockdep_assert_held(&hvc->vc.lock);
0502
0503 list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) {
0504 struct mtk_hsdma_vdesc *hvd;
0505
0506 hvd = to_hsdma_vdesc(vd);
0507
0508
0509 err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd);
0510
0511
0512
0513
0514
0515
0516
0517
0518 if (err == -ENOSPC || hvd->len > 0)
0519 break;
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 list_move_tail(&vd->node, &hvc->desc_hw_processing);
0530 }
0531 }
0532
0533 static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
0534 {
0535 struct mtk_hsdma_vchan *hvc;
0536 struct mtk_hsdma_pdesc *rxd;
0537 struct mtk_hsdma_vdesc *hvd;
0538 struct mtk_hsdma_pchan *pc;
0539 struct mtk_hsdma_cb *cb;
0540 int i = MTK_DMA_SIZE;
0541 __le32 desc2;
0542 u32 status;
0543 u16 next;
0544
0545
0546 status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS);
0547 if (unlikely(!(status & MTK_HSDMA_INT_RXDONE)))
0548 goto rx_done;
0549
0550 pc = hsdma->pc;
0551
0552
0553
0554
0555
0556
0557
0558
0559 while (i--) {
0560 next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr,
0561 MTK_DMA_SIZE);
0562 rxd = &pc->ring.rxd[next];
0563
0564
0565
0566
0567
0568 desc2 = READ_ONCE(rxd->desc2);
0569 if (!(desc2 & hsdma->soc->ddone))
0570 break;
0571
0572 cb = &pc->ring.cb[next];
0573 if (unlikely(!cb->vd)) {
0574 dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n");
0575 break;
0576 }
0577
0578
0579 hvd = to_hsdma_vdesc(cb->vd);
0580 hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2);
0581
0582
0583 if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) {
0584 hvc = to_hsdma_vchan(cb->vd->tx.chan);
0585
0586 spin_lock(&hvc->vc.lock);
0587
0588
0589 list_del(&cb->vd->node);
0590
0591
0592 vchan_cookie_complete(cb->vd);
0593
0594 if (hvc->issue_synchronize &&
0595 list_empty(&hvc->desc_hw_processing)) {
0596 complete(&hvc->issue_completion);
0597 hvc->issue_synchronize = false;
0598 }
0599 spin_unlock(&hvc->vc.lock);
0600
0601 cb->flag = 0;
0602 }
0603
0604 cb->vd = NULL;
0605
0606
0607
0608
0609
0610 WRITE_ONCE(rxd->desc1, 0);
0611 WRITE_ONCE(rxd->desc2, 0);
0612 pc->ring.cur_rptr = next;
0613
0614
0615 atomic_inc(&pc->nr_free);
0616 }
0617
0618
0619 wmb();
0620
0621
0622 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr);
0623
0624
0625
0626
0627
0628
0629 if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1)
0630 mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status);
0631
0632
0633 for (i = 0; i < hsdma->dma_requests; i++) {
0634 hvc = &hsdma->vc[i];
0635 spin_lock(&hvc->vc.lock);
0636 mtk_hsdma_issue_vchan_pending(hsdma, hvc);
0637 spin_unlock(&hvc->vc.lock);
0638 }
0639
0640 rx_done:
0641
0642 mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0643 }
0644
0645 static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
0646 {
0647 struct mtk_hsdma_device *hsdma = devid;
0648
0649
0650
0651
0652
0653 mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0654
0655 mtk_hsdma_free_rooms_in_ring(hsdma);
0656
0657 return IRQ_HANDLED;
0658 }
0659
0660 static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c,
0661 dma_cookie_t cookie)
0662 {
0663 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0664 struct virt_dma_desc *vd;
0665
0666 list_for_each_entry(vd, &hvc->desc_hw_processing, node)
0667 if (vd->tx.cookie == cookie)
0668 return vd;
0669
0670 list_for_each_entry(vd, &hvc->vc.desc_issued, node)
0671 if (vd->tx.cookie == cookie)
0672 return vd;
0673
0674 return NULL;
0675 }
0676
0677 static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
0678 dma_cookie_t cookie,
0679 struct dma_tx_state *txstate)
0680 {
0681 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0682 struct mtk_hsdma_vdesc *hvd;
0683 struct virt_dma_desc *vd;
0684 enum dma_status ret;
0685 unsigned long flags;
0686 size_t bytes = 0;
0687
0688 ret = dma_cookie_status(c, cookie, txstate);
0689 if (ret == DMA_COMPLETE || !txstate)
0690 return ret;
0691
0692 spin_lock_irqsave(&hvc->vc.lock, flags);
0693 vd = mtk_hsdma_find_active_desc(c, cookie);
0694 spin_unlock_irqrestore(&hvc->vc.lock, flags);
0695
0696 if (vd) {
0697 hvd = to_hsdma_vdesc(vd);
0698 bytes = hvd->residue;
0699 }
0700
0701 dma_set_residue(txstate, bytes);
0702
0703 return ret;
0704 }
0705
0706 static void mtk_hsdma_issue_pending(struct dma_chan *c)
0707 {
0708 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
0709 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0710 unsigned long flags;
0711
0712 spin_lock_irqsave(&hvc->vc.lock, flags);
0713
0714 if (vchan_issue_pending(&hvc->vc))
0715 mtk_hsdma_issue_vchan_pending(hsdma, hvc);
0716
0717 spin_unlock_irqrestore(&hvc->vc.lock, flags);
0718 }
0719
0720 static struct dma_async_tx_descriptor *
0721 mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
0722 dma_addr_t src, size_t len, unsigned long flags)
0723 {
0724 struct mtk_hsdma_vdesc *hvd;
0725
0726 hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT);
0727 if (!hvd)
0728 return NULL;
0729
0730 hvd->len = len;
0731 hvd->residue = len;
0732 hvd->src = src;
0733 hvd->dest = dest;
0734
0735 return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags);
0736 }
0737
0738 static int mtk_hsdma_free_inactive_desc(struct dma_chan *c)
0739 {
0740 struct virt_dma_chan *vc = to_virt_chan(c);
0741 unsigned long flags;
0742 LIST_HEAD(head);
0743
0744 spin_lock_irqsave(&vc->lock, flags);
0745 list_splice_tail_init(&vc->desc_allocated, &head);
0746 list_splice_tail_init(&vc->desc_submitted, &head);
0747 list_splice_tail_init(&vc->desc_issued, &head);
0748 spin_unlock_irqrestore(&vc->lock, flags);
0749
0750
0751 vchan_dma_desc_free_list(vc, &head);
0752
0753 return 0;
0754 }
0755
0756 static void mtk_hsdma_free_active_desc(struct dma_chan *c)
0757 {
0758 struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0759 bool sync_needed = false;
0760
0761
0762
0763
0764
0765
0766 spin_lock(&hvc->vc.lock);
0767 if (!list_empty(&hvc->desc_hw_processing)) {
0768 hvc->issue_synchronize = true;
0769 sync_needed = true;
0770 }
0771 spin_unlock(&hvc->vc.lock);
0772
0773 if (sync_needed)
0774 wait_for_completion(&hvc->issue_completion);
0775
0776
0777
0778
0779 WARN_ONCE(!list_empty(&hvc->desc_hw_processing),
0780 "Desc pending still in list desc_hw_processing\n");
0781
0782
0783 vchan_synchronize(&hvc->vc);
0784
0785 WARN_ONCE(!list_empty(&hvc->vc.desc_completed),
0786 "Desc pending still in list desc_completed\n");
0787 }
0788
0789 static int mtk_hsdma_terminate_all(struct dma_chan *c)
0790 {
0791
0792
0793
0794
0795 mtk_hsdma_free_inactive_desc(c);
0796
0797
0798
0799
0800
0801
0802
0803 mtk_hsdma_free_active_desc(c);
0804
0805 return 0;
0806 }
0807
0808 static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c)
0809 {
0810 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
0811 int err;
0812
0813
0814
0815
0816
0817
0818 if (!refcount_read(&hsdma->pc_refcnt)) {
0819 err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc);
0820 if (err)
0821 return err;
0822
0823
0824
0825
0826 refcount_set(&hsdma->pc_refcnt, 1);
0827 } else {
0828 refcount_inc(&hsdma->pc_refcnt);
0829 }
0830
0831 return 0;
0832 }
0833
0834 static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
0835 {
0836 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
0837
0838
0839 mtk_hsdma_terminate_all(c);
0840
0841
0842 if (!refcount_dec_and_test(&hsdma->pc_refcnt))
0843 return;
0844
0845 mtk_hsdma_free_pchan(hsdma, hsdma->pc);
0846 }
0847
0848 static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma)
0849 {
0850 int err;
0851
0852 pm_runtime_enable(hsdma2dev(hsdma));
0853 pm_runtime_get_sync(hsdma2dev(hsdma));
0854
0855 err = clk_prepare_enable(hsdma->clk);
0856 if (err)
0857 return err;
0858
0859 mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
0860 mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT);
0861
0862 return 0;
0863 }
0864
0865 static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma)
0866 {
0867 mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0);
0868
0869 clk_disable_unprepare(hsdma->clk);
0870
0871 pm_runtime_put_sync(hsdma2dev(hsdma));
0872 pm_runtime_disable(hsdma2dev(hsdma));
0873
0874 return 0;
0875 }
0876
0877 static const struct mtk_hsdma_soc mt7623_soc = {
0878 .ddone = BIT(31),
0879 .ls0 = BIT(30),
0880 };
0881
0882 static const struct mtk_hsdma_soc mt7622_soc = {
0883 .ddone = BIT(15),
0884 .ls0 = BIT(14),
0885 };
0886
0887 static const struct of_device_id mtk_hsdma_match[] = {
0888 { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc},
0889 { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc},
0890 { }
0891 };
0892 MODULE_DEVICE_TABLE(of, mtk_hsdma_match);
0893
0894 static int mtk_hsdma_probe(struct platform_device *pdev)
0895 {
0896 struct mtk_hsdma_device *hsdma;
0897 struct mtk_hsdma_vchan *vc;
0898 struct dma_device *dd;
0899 struct resource *res;
0900 int i, err;
0901
0902 hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
0903 if (!hsdma)
0904 return -ENOMEM;
0905
0906 dd = &hsdma->ddev;
0907
0908 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0909 hsdma->base = devm_ioremap_resource(&pdev->dev, res);
0910 if (IS_ERR(hsdma->base))
0911 return PTR_ERR(hsdma->base);
0912
0913 hsdma->soc = of_device_get_match_data(&pdev->dev);
0914 if (!hsdma->soc) {
0915 dev_err(&pdev->dev, "No device match found\n");
0916 return -ENODEV;
0917 }
0918
0919 hsdma->clk = devm_clk_get(&pdev->dev, "hsdma");
0920 if (IS_ERR(hsdma->clk)) {
0921 dev_err(&pdev->dev, "No clock for %s\n",
0922 dev_name(&pdev->dev));
0923 return PTR_ERR(hsdma->clk);
0924 }
0925
0926 err = platform_get_irq(pdev, 0);
0927 if (err < 0)
0928 return err;
0929 hsdma->irq = err;
0930
0931 refcount_set(&hsdma->pc_refcnt, 0);
0932 spin_lock_init(&hsdma->lock);
0933
0934 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
0935
0936 dd->copy_align = MTK_HSDMA_ALIGN_SIZE;
0937 dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources;
0938 dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
0939 dd->device_tx_status = mtk_hsdma_tx_status;
0940 dd->device_issue_pending = mtk_hsdma_issue_pending;
0941 dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
0942 dd->device_terminate_all = mtk_hsdma_terminate_all;
0943 dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
0944 dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
0945 dd->directions = BIT(DMA_MEM_TO_MEM);
0946 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
0947 dd->dev = &pdev->dev;
0948 INIT_LIST_HEAD(&dd->channels);
0949
0950 hsdma->dma_requests = MTK_HSDMA_NR_VCHANS;
0951 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
0952 "dma-requests",
0953 &hsdma->dma_requests)) {
0954 dev_info(&pdev->dev,
0955 "Using %u as missing dma-requests property\n",
0956 MTK_HSDMA_NR_VCHANS);
0957 }
0958
0959 hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS,
0960 sizeof(*hsdma->pc), GFP_KERNEL);
0961 if (!hsdma->pc)
0962 return -ENOMEM;
0963
0964 hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests,
0965 sizeof(*hsdma->vc), GFP_KERNEL);
0966 if (!hsdma->vc)
0967 return -ENOMEM;
0968
0969 for (i = 0; i < hsdma->dma_requests; i++) {
0970 vc = &hsdma->vc[i];
0971 vc->vc.desc_free = mtk_hsdma_vdesc_free;
0972 vchan_init(&vc->vc, dd);
0973 init_completion(&vc->issue_completion);
0974 INIT_LIST_HEAD(&vc->desc_hw_processing);
0975 }
0976
0977 err = dma_async_device_register(dd);
0978 if (err)
0979 return err;
0980
0981 err = of_dma_controller_register(pdev->dev.of_node,
0982 of_dma_xlate_by_chan_id, hsdma);
0983 if (err) {
0984 dev_err(&pdev->dev,
0985 "MediaTek HSDMA OF registration failed %d\n", err);
0986 goto err_unregister;
0987 }
0988
0989 mtk_hsdma_hw_init(hsdma);
0990
0991 err = devm_request_irq(&pdev->dev, hsdma->irq,
0992 mtk_hsdma_irq, 0,
0993 dev_name(&pdev->dev), hsdma);
0994 if (err) {
0995 dev_err(&pdev->dev,
0996 "request_irq failed with err %d\n", err);
0997 goto err_free;
0998 }
0999
1000 platform_set_drvdata(pdev, hsdma);
1001
1002 dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n");
1003
1004 return 0;
1005
1006 err_free:
1007 mtk_hsdma_hw_deinit(hsdma);
1008 of_dma_controller_free(pdev->dev.of_node);
1009 err_unregister:
1010 dma_async_device_unregister(dd);
1011
1012 return err;
1013 }
1014
1015 static int mtk_hsdma_remove(struct platform_device *pdev)
1016 {
1017 struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev);
1018 struct mtk_hsdma_vchan *vc;
1019 int i;
1020
1021
1022 for (i = 0; i < hsdma->dma_requests; i++) {
1023 vc = &hsdma->vc[i];
1024
1025 list_del(&vc->vc.chan.device_node);
1026 tasklet_kill(&vc->vc.task);
1027 }
1028
1029
1030 mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
1031
1032
1033 synchronize_irq(hsdma->irq);
1034
1035
1036 mtk_hsdma_hw_deinit(hsdma);
1037
1038 dma_async_device_unregister(&hsdma->ddev);
1039 of_dma_controller_free(pdev->dev.of_node);
1040
1041 return 0;
1042 }
1043
1044 static struct platform_driver mtk_hsdma_driver = {
1045 .probe = mtk_hsdma_probe,
1046 .remove = mtk_hsdma_remove,
1047 .driver = {
1048 .name = KBUILD_MODNAME,
1049 .of_match_table = mtk_hsdma_match,
1050 },
1051 };
1052 module_platform_driver(mtk_hsdma_driver);
1053
1054 MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver");
1055 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1056 MODULE_LICENSE("GPL v2");