Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (c) 2017-2018 MediaTek Inc.
0003 
0004 /*
0005  * Driver for MediaTek High-Speed DMA Controller
0006  *
0007  * Author: Sean Wang <sean.wang@mediatek.com>
0008  *
0009  */
0010 
0011 #include <linux/bitops.h>
0012 #include <linux/clk.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/iopoll.h>
0017 #include <linux/list.h>
0018 #include <linux/module.h>
0019 #include <linux/of.h>
0020 #include <linux/of_device.h>
0021 #include <linux/of_dma.h>
0022 #include <linux/platform_device.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/refcount.h>
0025 #include <linux/slab.h>
0026 
0027 #include "../virt-dma.h"
0028 
0029 #define MTK_HSDMA_USEC_POLL     20
0030 #define MTK_HSDMA_TIMEOUT_POLL      200000
0031 #define MTK_HSDMA_DMA_BUSWIDTHS     BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
0032 
0033 /* The default number of virtual channel */
0034 #define MTK_HSDMA_NR_VCHANS     3
0035 
0036 /* Only one physical channel supported */
0037 #define MTK_HSDMA_NR_MAX_PCHANS     1
0038 
0039 /* Macro for physical descriptor (PD) manipulation */
0040 /* The number of PD which must be 2 of power */
0041 #define MTK_DMA_SIZE            64
0042 #define MTK_HSDMA_NEXT_DESP_IDX(x, y)   (((x) + 1) & ((y) - 1))
0043 #define MTK_HSDMA_LAST_DESP_IDX(x, y)   (((x) - 1) & ((y) - 1))
0044 #define MTK_HSDMA_MAX_LEN       0x3f80
0045 #define MTK_HSDMA_ALIGN_SIZE        4
0046 #define MTK_HSDMA_PLEN_MASK     0x3fff
0047 #define MTK_HSDMA_DESC_PLEN(x)      (((x) & MTK_HSDMA_PLEN_MASK) << 16)
0048 #define MTK_HSDMA_DESC_PLEN_GET(x)  (((x) >> 16) & MTK_HSDMA_PLEN_MASK)
0049 
0050 /* Registers for underlying ring manipulation */
0051 #define MTK_HSDMA_TX_BASE       0x0
0052 #define MTK_HSDMA_TX_CNT        0x4
0053 #define MTK_HSDMA_TX_CPU        0x8
0054 #define MTK_HSDMA_TX_DMA        0xc
0055 #define MTK_HSDMA_RX_BASE       0x100
0056 #define MTK_HSDMA_RX_CNT        0x104
0057 #define MTK_HSDMA_RX_CPU        0x108
0058 #define MTK_HSDMA_RX_DMA        0x10c
0059 
0060 /* Registers for global setup */
0061 #define MTK_HSDMA_GLO           0x204
0062 #define MTK_HSDMA_GLO_MULTI_DMA     BIT(10)
0063 #define MTK_HSDMA_TX_WB_DDONE       BIT(6)
0064 #define MTK_HSDMA_BURST_64BYTES     (0x2 << 4)
0065 #define MTK_HSDMA_GLO_RX_BUSY       BIT(3)
0066 #define MTK_HSDMA_GLO_RX_DMA        BIT(2)
0067 #define MTK_HSDMA_GLO_TX_BUSY       BIT(1)
0068 #define MTK_HSDMA_GLO_TX_DMA        BIT(0)
0069 #define MTK_HSDMA_GLO_DMA       (MTK_HSDMA_GLO_TX_DMA | \
0070                      MTK_HSDMA_GLO_RX_DMA)
0071 #define MTK_HSDMA_GLO_BUSY      (MTK_HSDMA_GLO_RX_BUSY | \
0072                      MTK_HSDMA_GLO_TX_BUSY)
0073 #define MTK_HSDMA_GLO_DEFAULT       (MTK_HSDMA_GLO_TX_DMA | \
0074                      MTK_HSDMA_GLO_RX_DMA | \
0075                      MTK_HSDMA_TX_WB_DDONE | \
0076                      MTK_HSDMA_BURST_64BYTES | \
0077                      MTK_HSDMA_GLO_MULTI_DMA)
0078 
0079 /* Registers for reset */
0080 #define MTK_HSDMA_RESET         0x208
0081 #define MTK_HSDMA_RST_TX        BIT(0)
0082 #define MTK_HSDMA_RST_RX        BIT(16)
0083 
0084 /* Registers for interrupt control */
0085 #define MTK_HSDMA_DLYINT        0x20c
0086 #define MTK_HSDMA_RXDLY_INT_EN      BIT(15)
0087 
0088 /* Interrupt fires when the pending number's more than the specified */
0089 #define MTK_HSDMA_RXMAX_PINT(x)     (((x) & 0x7f) << 8)
0090 
0091 /* Interrupt fires when the pending time's more than the specified in 20 us */
0092 #define MTK_HSDMA_RXMAX_PTIME(x)    ((x) & 0x7f)
0093 #define MTK_HSDMA_DLYINT_DEFAULT    (MTK_HSDMA_RXDLY_INT_EN | \
0094                      MTK_HSDMA_RXMAX_PINT(20) | \
0095                      MTK_HSDMA_RXMAX_PTIME(20))
0096 #define MTK_HSDMA_INT_STATUS        0x220
0097 #define MTK_HSDMA_INT_ENABLE        0x228
0098 #define MTK_HSDMA_INT_RXDONE        BIT(16)
0099 
0100 enum mtk_hsdma_vdesc_flag {
0101     MTK_HSDMA_VDESC_FINISHED    = 0x01,
0102 };
0103 
0104 #define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED)
0105 
0106 /**
0107  * struct mtk_hsdma_pdesc - This is the struct holding info describing physical
0108  *              descriptor (PD) and its placement must be kept at
0109  *              4-bytes alignment in little endian order.
0110  * @desc1:          | The control pad used to indicate hardware how to
0111  * @desc2:          | deal with the descriptor such as source and
0112  * @desc3:          | destination address and data length. The maximum
0113  * @desc4:          | data length each pdesc can handle is 0x3f80 bytes
0114  */
0115 struct mtk_hsdma_pdesc {
0116     __le32 desc1;
0117     __le32 desc2;
0118     __le32 desc3;
0119     __le32 desc4;
0120 } __packed __aligned(4);
0121 
0122 /**
0123  * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual
0124  *              descriptor (VD)
0125  * @vd:             An instance for struct virt_dma_desc
0126  * @len:            The total data size device wants to move
0127  * @residue:            The remaining data size device will move
0128  * @dest:           The destination address device wants to move to
0129  * @src:            The source address device wants to move from
0130  */
0131 struct mtk_hsdma_vdesc {
0132     struct virt_dma_desc vd;
0133     size_t len;
0134     size_t residue;
0135     dma_addr_t dest;
0136     dma_addr_t src;
0137 };
0138 
0139 /**
0140  * struct mtk_hsdma_cb - This is the struct holding extra info required for RX
0141  *           ring to know what relevant VD the PD is being
0142  *           mapped to.
0143  * @vd:          Pointer to the relevant VD.
0144  * @flag:        Flag indicating what action should be taken when VD
0145  *           is completed.
0146  */
0147 struct mtk_hsdma_cb {
0148     struct virt_dma_desc *vd;
0149     enum mtk_hsdma_vdesc_flag flag;
0150 };
0151 
0152 /**
0153  * struct mtk_hsdma_ring - This struct holds info describing underlying ring
0154  *             space
0155  * @txd:           The descriptor TX ring which describes DMA source
0156  *             information
0157  * @rxd:           The descriptor RX ring which describes DMA
0158  *             destination information
0159  * @cb:            The extra information pointed at by RX ring
0160  * @tphys:         The physical addr of TX ring
0161  * @rphys:         The physical addr of RX ring
0162  * @cur_tptr:          Pointer to the next free descriptor used by the host
0163  * @cur_rptr:          Pointer to the last done descriptor by the device
0164  */
0165 struct mtk_hsdma_ring {
0166     struct mtk_hsdma_pdesc *txd;
0167     struct mtk_hsdma_pdesc *rxd;
0168     struct mtk_hsdma_cb *cb;
0169     dma_addr_t tphys;
0170     dma_addr_t rphys;
0171     u16 cur_tptr;
0172     u16 cur_rptr;
0173 };
0174 
0175 /**
0176  * struct mtk_hsdma_pchan - This is the struct holding info describing physical
0177  *             channel (PC)
0178  * @ring:          An instance for the underlying ring
0179  * @sz_ring:           Total size allocated for the ring
0180  * @nr_free:           Total number of free rooms in the ring. It would
0181  *             be accessed and updated frequently between IRQ
0182  *             context and user context to reflect whether ring
0183  *             can accept requests from VD.
0184  */
0185 struct mtk_hsdma_pchan {
0186     struct mtk_hsdma_ring ring;
0187     size_t sz_ring;
0188     atomic_t nr_free;
0189 };
0190 
0191 /**
0192  * struct mtk_hsdma_vchan - This is the struct holding info describing virtual
0193  *             channel (VC)
0194  * @vc:            An instance for struct virt_dma_chan
0195  * @issue_completion:      The wait for all issued descriptors completited
0196  * @issue_synchronize:     Bool indicating channel synchronization starts
0197  * @desc_hw_processing:    List those descriptors the hardware is processing,
0198  *             which is protected by vc.lock
0199  */
0200 struct mtk_hsdma_vchan {
0201     struct virt_dma_chan vc;
0202     struct completion issue_completion;
0203     bool issue_synchronize;
0204     struct list_head desc_hw_processing;
0205 };
0206 
0207 /**
0208  * struct mtk_hsdma_soc - This is the struct holding differences among SoCs
0209  * @ddone:        Bit mask for DDONE
0210  * @ls0:          Bit mask for LS0
0211  */
0212 struct mtk_hsdma_soc {
0213     __le32 ddone;
0214     __le32 ls0;
0215 };
0216 
0217 /**
0218  * struct mtk_hsdma_device - This is the struct holding info describing HSDMA
0219  *               device
0220  * @ddev:            An instance for struct dma_device
0221  * @base:            The mapped register I/O base
0222  * @clk:             The clock that device internal is using
0223  * @irq:             The IRQ that device are using
0224  * @dma_requests:        The number of VCs the device supports to
0225  * @vc:              The pointer to all available VCs
0226  * @pc:              The pointer to the underlying PC
0227  * @pc_refcnt:           Track how many VCs are using the PC
0228  * @lock:            Lock protect agaisting multiple VCs access PC
0229  * @soc:             The pointer to area holding differences among
0230  *               vaious platform
0231  */
0232 struct mtk_hsdma_device {
0233     struct dma_device ddev;
0234     void __iomem *base;
0235     struct clk *clk;
0236     u32 irq;
0237 
0238     u32 dma_requests;
0239     struct mtk_hsdma_vchan *vc;
0240     struct mtk_hsdma_pchan *pc;
0241     refcount_t pc_refcnt;
0242 
0243     /* Lock used to protect against multiple VCs access PC */
0244     spinlock_t lock;
0245 
0246     const struct mtk_hsdma_soc *soc;
0247 };
0248 
0249 static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan)
0250 {
0251     return container_of(chan->device, struct mtk_hsdma_device, ddev);
0252 }
0253 
0254 static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan)
0255 {
0256     return container_of(chan, struct mtk_hsdma_vchan, vc.chan);
0257 }
0258 
0259 static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd)
0260 {
0261     return container_of(vd, struct mtk_hsdma_vdesc, vd);
0262 }
0263 
0264 static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma)
0265 {
0266     return hsdma->ddev.dev;
0267 }
0268 
0269 static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg)
0270 {
0271     return readl(hsdma->base + reg);
0272 }
0273 
0274 static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
0275 {
0276     writel(val, hsdma->base + reg);
0277 }
0278 
0279 static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg,
0280             u32 mask, u32 set)
0281 {
0282     u32 val;
0283 
0284     val = mtk_dma_read(hsdma, reg);
0285     val &= ~mask;
0286     val |= set;
0287     mtk_dma_write(hsdma, reg, val);
0288 }
0289 
0290 static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
0291 {
0292     mtk_dma_rmw(hsdma, reg, 0, val);
0293 }
0294 
0295 static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
0296 {
0297     mtk_dma_rmw(hsdma, reg, val, 0);
0298 }
0299 
0300 static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd)
0301 {
0302     kfree(container_of(vd, struct mtk_hsdma_vdesc, vd));
0303 }
0304 
0305 static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma)
0306 {
0307     u32 status = 0;
0308 
0309     return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status,
0310                   !(status & MTK_HSDMA_GLO_BUSY),
0311                   MTK_HSDMA_USEC_POLL,
0312                   MTK_HSDMA_TIMEOUT_POLL);
0313 }
0314 
0315 static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
0316                  struct mtk_hsdma_pchan *pc)
0317 {
0318     struct mtk_hsdma_ring *ring = &pc->ring;
0319     int err;
0320 
0321     memset(pc, 0, sizeof(*pc));
0322 
0323     /*
0324      * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring
0325      * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
0326      */
0327     pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
0328     ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
0329                        &ring->tphys, GFP_NOWAIT);
0330     if (!ring->txd)
0331         return -ENOMEM;
0332 
0333     ring->rxd = &ring->txd[MTK_DMA_SIZE];
0334     ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd);
0335     ring->cur_tptr = 0;
0336     ring->cur_rptr = MTK_DMA_SIZE - 1;
0337 
0338     ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT);
0339     if (!ring->cb) {
0340         err = -ENOMEM;
0341         goto err_free_dma;
0342     }
0343 
0344     atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1);
0345 
0346     /* Disable HSDMA and wait for the completion */
0347     mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
0348     err = mtk_hsdma_busy_wait(hsdma);
0349     if (err)
0350         goto err_free_cb;
0351 
0352     /* Reset */
0353     mtk_dma_set(hsdma, MTK_HSDMA_RESET,
0354             MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
0355     mtk_dma_clr(hsdma, MTK_HSDMA_RESET,
0356             MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
0357 
0358     /* Setup HSDMA initial pointer in the ring */
0359     mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys);
0360     mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE);
0361     mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
0362     mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0);
0363     mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys);
0364     mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE);
0365     mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr);
0366     mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0);
0367 
0368     /* Enable HSDMA */
0369     mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
0370 
0371     /* Setup delayed interrupt */
0372     mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT);
0373 
0374     /* Enable interrupt */
0375     mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0376 
0377     return 0;
0378 
0379 err_free_cb:
0380     kfree(ring->cb);
0381 
0382 err_free_dma:
0383     dma_free_coherent(hsdma2dev(hsdma),
0384               pc->sz_ring, ring->txd, ring->tphys);
0385     return err;
0386 }
0387 
0388 static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma,
0389                  struct mtk_hsdma_pchan *pc)
0390 {
0391     struct mtk_hsdma_ring *ring = &pc->ring;
0392 
0393     /* Disable HSDMA and then wait for the completion */
0394     mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
0395     mtk_hsdma_busy_wait(hsdma);
0396 
0397     /* Reset pointer in the ring */
0398     mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0399     mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0);
0400     mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0);
0401     mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0);
0402     mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0);
0403     mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0);
0404     mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1);
0405 
0406     kfree(ring->cb);
0407 
0408     dma_free_coherent(hsdma2dev(hsdma),
0409               pc->sz_ring, ring->txd, ring->tphys);
0410 }
0411 
0412 static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma,
0413                      struct mtk_hsdma_pchan *pc,
0414                      struct mtk_hsdma_vdesc *hvd)
0415 {
0416     struct mtk_hsdma_ring *ring = &pc->ring;
0417     struct mtk_hsdma_pdesc *txd, *rxd;
0418     u16 reserved, prev, tlen, num_sgs;
0419     unsigned long flags;
0420 
0421     /* Protect against PC is accessed by multiple VCs simultaneously */
0422     spin_lock_irqsave(&hsdma->lock, flags);
0423 
0424     /*
0425      * Reserve rooms, where pc->nr_free is used to track how many free
0426      * rooms in the ring being updated in user and IRQ context.
0427      */
0428     num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN);
0429     reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free));
0430 
0431     if (!reserved) {
0432         spin_unlock_irqrestore(&hsdma->lock, flags);
0433         return -ENOSPC;
0434     }
0435 
0436     atomic_sub(reserved, &pc->nr_free);
0437 
0438     while (reserved--) {
0439         /* Limit size by PD capability for valid data moving */
0440         tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ?
0441                MTK_HSDMA_MAX_LEN : hvd->len;
0442 
0443         /*
0444          * Setup PDs using the remaining VD info mapped on those
0445          * reserved rooms. And since RXD is shared memory between the
0446          * host and the device allocated by dma_alloc_coherent call,
0447          * the helper macro WRITE_ONCE can ensure the data written to
0448          * RAM would really happens.
0449          */
0450         txd = &ring->txd[ring->cur_tptr];
0451         WRITE_ONCE(txd->desc1, hvd->src);
0452         WRITE_ONCE(txd->desc2,
0453                hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen));
0454 
0455         rxd = &ring->rxd[ring->cur_tptr];
0456         WRITE_ONCE(rxd->desc1, hvd->dest);
0457         WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen));
0458 
0459         /* Associate VD, the PD belonged to */
0460         ring->cb[ring->cur_tptr].vd = &hvd->vd;
0461 
0462         /* Move forward the pointer of TX ring */
0463         ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr,
0464                              MTK_DMA_SIZE);
0465 
0466         /* Update VD with remaining data */
0467         hvd->src  += tlen;
0468         hvd->dest += tlen;
0469         hvd->len  -= tlen;
0470     }
0471 
0472     /*
0473      * Tagging flag for the last PD for VD will be responsible for
0474      * completing VD.
0475      */
0476     if (!hvd->len) {
0477         prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE);
0478         ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED;
0479     }
0480 
0481     /* Ensure all changes indeed done before we're going on */
0482     wmb();
0483 
0484     /*
0485      * Updating into hardware the pointer of TX ring lets HSDMA to take
0486      * action for those pending PDs.
0487      */
0488     mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
0489 
0490     spin_unlock_irqrestore(&hsdma->lock, flags);
0491 
0492     return 0;
0493 }
0494 
0495 static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma,
0496                       struct mtk_hsdma_vchan *hvc)
0497 {
0498     struct virt_dma_desc *vd, *vd2;
0499     int err;
0500 
0501     lockdep_assert_held(&hvc->vc.lock);
0502 
0503     list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) {
0504         struct mtk_hsdma_vdesc *hvd;
0505 
0506         hvd = to_hsdma_vdesc(vd);
0507 
0508         /* Map VD into PC and all VCs shares a single PC */
0509         err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd);
0510 
0511         /*
0512          * Move VD from desc_issued to desc_hw_processing when entire
0513          * VD is fit into available PDs. Otherwise, the uncompleted
0514          * VDs would stay in list desc_issued and then restart the
0515          * processing as soon as possible once underlying ring space
0516          * got freed.
0517          */
0518         if (err == -ENOSPC || hvd->len > 0)
0519             break;
0520 
0521         /*
0522          * The extra list desc_hw_processing is used because
0523          * hardware can't provide sufficient information allowing us
0524          * to know what VDs are still working on the underlying ring.
0525          * Through the additional list, it can help us to implement
0526          * terminate_all, residue calculation and such thing needed
0527          * to know detail descriptor status on the hardware.
0528          */
0529         list_move_tail(&vd->node, &hvc->desc_hw_processing);
0530     }
0531 }
0532 
0533 static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
0534 {
0535     struct mtk_hsdma_vchan *hvc;
0536     struct mtk_hsdma_pdesc *rxd;
0537     struct mtk_hsdma_vdesc *hvd;
0538     struct mtk_hsdma_pchan *pc;
0539     struct mtk_hsdma_cb *cb;
0540     int i = MTK_DMA_SIZE;
0541     __le32 desc2;
0542     u32 status;
0543     u16 next;
0544 
0545     /* Read IRQ status */
0546     status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS);
0547     if (unlikely(!(status & MTK_HSDMA_INT_RXDONE)))
0548         goto rx_done;
0549 
0550     pc = hsdma->pc;
0551 
0552     /*
0553      * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to
0554      * reclaim these finished descriptors: The most number of PDs the ISR
0555      * can handle at one time shouldn't be more than MTK_DMA_SIZE so we
0556      * take it as limited count instead of just using a dangerous infinite
0557      * poll.
0558      */
0559     while (i--) {
0560         next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr,
0561                            MTK_DMA_SIZE);
0562         rxd = &pc->ring.rxd[next];
0563 
0564         /*
0565          * If MTK_HSDMA_DESC_DDONE is no specified, that means data
0566          * moving for the PD is still under going.
0567          */
0568         desc2 = READ_ONCE(rxd->desc2);
0569         if (!(desc2 & hsdma->soc->ddone))
0570             break;
0571 
0572         cb = &pc->ring.cb[next];
0573         if (unlikely(!cb->vd)) {
0574             dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n");
0575             break;
0576         }
0577 
0578         /* Update residue of VD the associated PD belonged to */
0579         hvd = to_hsdma_vdesc(cb->vd);
0580         hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2);
0581 
0582         /* Complete VD until the relevant last PD is finished */
0583         if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) {
0584             hvc = to_hsdma_vchan(cb->vd->tx.chan);
0585 
0586             spin_lock(&hvc->vc.lock);
0587 
0588             /* Remove VD from list desc_hw_processing */
0589             list_del(&cb->vd->node);
0590 
0591             /* Add VD into list desc_completed */
0592             vchan_cookie_complete(cb->vd);
0593 
0594             if (hvc->issue_synchronize &&
0595                 list_empty(&hvc->desc_hw_processing)) {
0596                 complete(&hvc->issue_completion);
0597                 hvc->issue_synchronize = false;
0598             }
0599             spin_unlock(&hvc->vc.lock);
0600 
0601             cb->flag = 0;
0602         }
0603 
0604         cb->vd = NULL;
0605 
0606         /*
0607          * Recycle the RXD with the helper WRITE_ONCE that can ensure
0608          * data written into RAM would really happens.
0609          */
0610         WRITE_ONCE(rxd->desc1, 0);
0611         WRITE_ONCE(rxd->desc2, 0);
0612         pc->ring.cur_rptr = next;
0613 
0614         /* Release rooms */
0615         atomic_inc(&pc->nr_free);
0616     }
0617 
0618     /* Ensure all changes indeed done before we're going on */
0619     wmb();
0620 
0621     /* Update CPU pointer for those completed PDs */
0622     mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr);
0623 
0624     /*
0625      * Acking the pending IRQ allows hardware no longer to keep the used
0626      * IRQ line in certain trigger state when software has completed all
0627      * the finished physical descriptors.
0628      */
0629     if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1)
0630         mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status);
0631 
0632     /* ASAP handles pending VDs in all VCs after freeing some rooms */
0633     for (i = 0; i < hsdma->dma_requests; i++) {
0634         hvc = &hsdma->vc[i];
0635         spin_lock(&hvc->vc.lock);
0636         mtk_hsdma_issue_vchan_pending(hsdma, hvc);
0637         spin_unlock(&hvc->vc.lock);
0638     }
0639 
0640 rx_done:
0641     /* All completed PDs are cleaned up, so enable interrupt again */
0642     mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0643 }
0644 
0645 static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
0646 {
0647     struct mtk_hsdma_device *hsdma = devid;
0648 
0649     /*
0650      * Disable interrupt until all completed PDs are cleaned up in
0651      * mtk_hsdma_free_rooms call.
0652      */
0653     mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
0654 
0655     mtk_hsdma_free_rooms_in_ring(hsdma);
0656 
0657     return IRQ_HANDLED;
0658 }
0659 
0660 static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c,
0661                             dma_cookie_t cookie)
0662 {
0663     struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0664     struct virt_dma_desc *vd;
0665 
0666     list_for_each_entry(vd, &hvc->desc_hw_processing, node)
0667         if (vd->tx.cookie == cookie)
0668             return vd;
0669 
0670     list_for_each_entry(vd, &hvc->vc.desc_issued, node)
0671         if (vd->tx.cookie == cookie)
0672             return vd;
0673 
0674     return NULL;
0675 }
0676 
0677 static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
0678                        dma_cookie_t cookie,
0679                        struct dma_tx_state *txstate)
0680 {
0681     struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0682     struct mtk_hsdma_vdesc *hvd;
0683     struct virt_dma_desc *vd;
0684     enum dma_status ret;
0685     unsigned long flags;
0686     size_t bytes = 0;
0687 
0688     ret = dma_cookie_status(c, cookie, txstate);
0689     if (ret == DMA_COMPLETE || !txstate)
0690         return ret;
0691 
0692     spin_lock_irqsave(&hvc->vc.lock, flags);
0693     vd = mtk_hsdma_find_active_desc(c, cookie);
0694     spin_unlock_irqrestore(&hvc->vc.lock, flags);
0695 
0696     if (vd) {
0697         hvd = to_hsdma_vdesc(vd);
0698         bytes = hvd->residue;
0699     }
0700 
0701     dma_set_residue(txstate, bytes);
0702 
0703     return ret;
0704 }
0705 
0706 static void mtk_hsdma_issue_pending(struct dma_chan *c)
0707 {
0708     struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
0709     struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0710     unsigned long flags;
0711 
0712     spin_lock_irqsave(&hvc->vc.lock, flags);
0713 
0714     if (vchan_issue_pending(&hvc->vc))
0715         mtk_hsdma_issue_vchan_pending(hsdma, hvc);
0716 
0717     spin_unlock_irqrestore(&hvc->vc.lock, flags);
0718 }
0719 
0720 static struct dma_async_tx_descriptor *
0721 mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
0722               dma_addr_t src, size_t len, unsigned long flags)
0723 {
0724     struct mtk_hsdma_vdesc *hvd;
0725 
0726     hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT);
0727     if (!hvd)
0728         return NULL;
0729 
0730     hvd->len = len;
0731     hvd->residue = len;
0732     hvd->src = src;
0733     hvd->dest = dest;
0734 
0735     return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags);
0736 }
0737 
0738 static int mtk_hsdma_free_inactive_desc(struct dma_chan *c)
0739 {
0740     struct virt_dma_chan *vc = to_virt_chan(c);
0741     unsigned long flags;
0742     LIST_HEAD(head);
0743 
0744     spin_lock_irqsave(&vc->lock, flags);
0745     list_splice_tail_init(&vc->desc_allocated, &head);
0746     list_splice_tail_init(&vc->desc_submitted, &head);
0747     list_splice_tail_init(&vc->desc_issued, &head);
0748     spin_unlock_irqrestore(&vc->lock, flags);
0749 
0750     /* At the point, we don't expect users put descriptor into VC again */
0751     vchan_dma_desc_free_list(vc, &head);
0752 
0753     return 0;
0754 }
0755 
0756 static void mtk_hsdma_free_active_desc(struct dma_chan *c)
0757 {
0758     struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
0759     bool sync_needed = false;
0760 
0761     /*
0762      * Once issue_synchronize is being set, which means once the hardware
0763      * consumes all descriptors for the channel in the ring, the
0764      * synchronization must be notified immediately it is completed.
0765      */
0766     spin_lock(&hvc->vc.lock);
0767     if (!list_empty(&hvc->desc_hw_processing)) {
0768         hvc->issue_synchronize = true;
0769         sync_needed = true;
0770     }
0771     spin_unlock(&hvc->vc.lock);
0772 
0773     if (sync_needed)
0774         wait_for_completion(&hvc->issue_completion);
0775     /*
0776      * At the point, we expect that all remaining descriptors in the ring
0777      * for the channel should be all processing done.
0778      */
0779     WARN_ONCE(!list_empty(&hvc->desc_hw_processing),
0780           "Desc pending still in list desc_hw_processing\n");
0781 
0782     /* Free all descriptors in list desc_completed */
0783     vchan_synchronize(&hvc->vc);
0784 
0785     WARN_ONCE(!list_empty(&hvc->vc.desc_completed),
0786           "Desc pending still in list desc_completed\n");
0787 }
0788 
0789 static int mtk_hsdma_terminate_all(struct dma_chan *c)
0790 {
0791     /*
0792      * Free pending descriptors not processed yet by hardware that have
0793      * previously been submitted to the channel.
0794      */
0795     mtk_hsdma_free_inactive_desc(c);
0796 
0797     /*
0798      * However, the DMA engine doesn't provide any way to stop these
0799      * descriptors being processed currently by hardware. The only way is
0800      * to just waiting until these descriptors are all processed completely
0801      * through mtk_hsdma_free_active_desc call.
0802      */
0803     mtk_hsdma_free_active_desc(c);
0804 
0805     return 0;
0806 }
0807 
0808 static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c)
0809 {
0810     struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
0811     int err;
0812 
0813     /*
0814      * Since HSDMA has only one PC, the resource for PC is being allocated
0815      * when the first VC is being created and the other VCs would run on
0816      * the same PC.
0817      */
0818     if (!refcount_read(&hsdma->pc_refcnt)) {
0819         err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc);
0820         if (err)
0821             return err;
0822         /*
0823          * refcount_inc would complain increment on 0; use-after-free.
0824          * Thus, we need to explicitly set it as 1 initially.
0825          */
0826         refcount_set(&hsdma->pc_refcnt, 1);
0827     } else {
0828         refcount_inc(&hsdma->pc_refcnt);
0829     }
0830 
0831     return 0;
0832 }
0833 
0834 static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
0835 {
0836     struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
0837 
0838     /* Free all descriptors in all lists on the VC */
0839     mtk_hsdma_terminate_all(c);
0840 
0841     /* The resource for PC is not freed until all the VCs are destroyed */
0842     if (!refcount_dec_and_test(&hsdma->pc_refcnt))
0843         return;
0844 
0845     mtk_hsdma_free_pchan(hsdma, hsdma->pc);
0846 }
0847 
0848 static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma)
0849 {
0850     int err;
0851 
0852     pm_runtime_enable(hsdma2dev(hsdma));
0853     pm_runtime_get_sync(hsdma2dev(hsdma));
0854 
0855     err = clk_prepare_enable(hsdma->clk);
0856     if (err)
0857         return err;
0858 
0859     mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
0860     mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT);
0861 
0862     return 0;
0863 }
0864 
0865 static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma)
0866 {
0867     mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0);
0868 
0869     clk_disable_unprepare(hsdma->clk);
0870 
0871     pm_runtime_put_sync(hsdma2dev(hsdma));
0872     pm_runtime_disable(hsdma2dev(hsdma));
0873 
0874     return 0;
0875 }
0876 
0877 static const struct mtk_hsdma_soc mt7623_soc = {
0878     .ddone = BIT(31),
0879     .ls0 = BIT(30),
0880 };
0881 
0882 static const struct mtk_hsdma_soc mt7622_soc = {
0883     .ddone = BIT(15),
0884     .ls0 = BIT(14),
0885 };
0886 
0887 static const struct of_device_id mtk_hsdma_match[] = {
0888     { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc},
0889     { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc},
0890     { /* sentinel */ }
0891 };
0892 MODULE_DEVICE_TABLE(of, mtk_hsdma_match);
0893 
0894 static int mtk_hsdma_probe(struct platform_device *pdev)
0895 {
0896     struct mtk_hsdma_device *hsdma;
0897     struct mtk_hsdma_vchan *vc;
0898     struct dma_device *dd;
0899     struct resource *res;
0900     int i, err;
0901 
0902     hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
0903     if (!hsdma)
0904         return -ENOMEM;
0905 
0906     dd = &hsdma->ddev;
0907 
0908     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0909     hsdma->base = devm_ioremap_resource(&pdev->dev, res);
0910     if (IS_ERR(hsdma->base))
0911         return PTR_ERR(hsdma->base);
0912 
0913     hsdma->soc = of_device_get_match_data(&pdev->dev);
0914     if (!hsdma->soc) {
0915         dev_err(&pdev->dev, "No device match found\n");
0916         return -ENODEV;
0917     }
0918 
0919     hsdma->clk = devm_clk_get(&pdev->dev, "hsdma");
0920     if (IS_ERR(hsdma->clk)) {
0921         dev_err(&pdev->dev, "No clock for %s\n",
0922             dev_name(&pdev->dev));
0923         return PTR_ERR(hsdma->clk);
0924     }
0925 
0926     err = platform_get_irq(pdev, 0);
0927     if (err < 0)
0928         return err;
0929     hsdma->irq = err;
0930 
0931     refcount_set(&hsdma->pc_refcnt, 0);
0932     spin_lock_init(&hsdma->lock);
0933 
0934     dma_cap_set(DMA_MEMCPY, dd->cap_mask);
0935 
0936     dd->copy_align = MTK_HSDMA_ALIGN_SIZE;
0937     dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources;
0938     dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
0939     dd->device_tx_status = mtk_hsdma_tx_status;
0940     dd->device_issue_pending = mtk_hsdma_issue_pending;
0941     dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
0942     dd->device_terminate_all = mtk_hsdma_terminate_all;
0943     dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
0944     dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
0945     dd->directions = BIT(DMA_MEM_TO_MEM);
0946     dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
0947     dd->dev = &pdev->dev;
0948     INIT_LIST_HEAD(&dd->channels);
0949 
0950     hsdma->dma_requests = MTK_HSDMA_NR_VCHANS;
0951     if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
0952                               "dma-requests",
0953                               &hsdma->dma_requests)) {
0954         dev_info(&pdev->dev,
0955              "Using %u as missing dma-requests property\n",
0956              MTK_HSDMA_NR_VCHANS);
0957     }
0958 
0959     hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS,
0960                  sizeof(*hsdma->pc), GFP_KERNEL);
0961     if (!hsdma->pc)
0962         return -ENOMEM;
0963 
0964     hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests,
0965                  sizeof(*hsdma->vc), GFP_KERNEL);
0966     if (!hsdma->vc)
0967         return -ENOMEM;
0968 
0969     for (i = 0; i < hsdma->dma_requests; i++) {
0970         vc = &hsdma->vc[i];
0971         vc->vc.desc_free = mtk_hsdma_vdesc_free;
0972         vchan_init(&vc->vc, dd);
0973         init_completion(&vc->issue_completion);
0974         INIT_LIST_HEAD(&vc->desc_hw_processing);
0975     }
0976 
0977     err = dma_async_device_register(dd);
0978     if (err)
0979         return err;
0980 
0981     err = of_dma_controller_register(pdev->dev.of_node,
0982                      of_dma_xlate_by_chan_id, hsdma);
0983     if (err) {
0984         dev_err(&pdev->dev,
0985             "MediaTek HSDMA OF registration failed %d\n", err);
0986         goto err_unregister;
0987     }
0988 
0989     mtk_hsdma_hw_init(hsdma);
0990 
0991     err = devm_request_irq(&pdev->dev, hsdma->irq,
0992                    mtk_hsdma_irq, 0,
0993                    dev_name(&pdev->dev), hsdma);
0994     if (err) {
0995         dev_err(&pdev->dev,
0996             "request_irq failed with err %d\n", err);
0997         goto err_free;
0998     }
0999 
1000     platform_set_drvdata(pdev, hsdma);
1001 
1002     dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n");
1003 
1004     return 0;
1005 
1006 err_free:
1007     mtk_hsdma_hw_deinit(hsdma);
1008     of_dma_controller_free(pdev->dev.of_node);
1009 err_unregister:
1010     dma_async_device_unregister(dd);
1011 
1012     return err;
1013 }
1014 
1015 static int mtk_hsdma_remove(struct platform_device *pdev)
1016 {
1017     struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev);
1018     struct mtk_hsdma_vchan *vc;
1019     int i;
1020 
1021     /* Kill VC task */
1022     for (i = 0; i < hsdma->dma_requests; i++) {
1023         vc = &hsdma->vc[i];
1024 
1025         list_del(&vc->vc.chan.device_node);
1026         tasklet_kill(&vc->vc.task);
1027     }
1028 
1029     /* Disable DMA interrupt */
1030     mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
1031 
1032     /* Waits for any pending IRQ handlers to complete */
1033     synchronize_irq(hsdma->irq);
1034 
1035     /* Disable hardware */
1036     mtk_hsdma_hw_deinit(hsdma);
1037 
1038     dma_async_device_unregister(&hsdma->ddev);
1039     of_dma_controller_free(pdev->dev.of_node);
1040 
1041     return 0;
1042 }
1043 
1044 static struct platform_driver mtk_hsdma_driver = {
1045     .probe      = mtk_hsdma_probe,
1046     .remove     = mtk_hsdma_remove,
1047     .driver = {
1048         .name       = KBUILD_MODNAME,
1049         .of_match_table = mtk_hsdma_match,
1050     },
1051 };
1052 module_platform_driver(mtk_hsdma_driver);
1053 
1054 MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver");
1055 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1056 MODULE_LICENSE("GPL v2");