Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * DMA driver for NVIDIA Tegra GPC DMA controller.
0004  *
0005  * Copyright (c) 2014-2022, NVIDIA CORPORATION.  All rights reserved.
0006  */
0007 
0008 #include <linux/bitfield.h>
0009 #include <linux/dmaengine.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/iommu.h>
0013 #include <linux/iopoll.h>
0014 #include <linux/minmax.h>
0015 #include <linux/module.h>
0016 #include <linux/of_device.h>
0017 #include <linux/of_dma.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/reset.h>
0020 #include <linux/slab.h>
0021 #include <dt-bindings/memory/tegra186-mc.h>
0022 #include "virt-dma.h"
0023 
0024 /* CSR register */
0025 #define TEGRA_GPCDMA_CHAN_CSR           0x00
0026 #define TEGRA_GPCDMA_CSR_ENB            BIT(31)
0027 #define TEGRA_GPCDMA_CSR_IE_EOC         BIT(30)
0028 #define TEGRA_GPCDMA_CSR_ONCE           BIT(27)
0029 
0030 #define TEGRA_GPCDMA_CSR_FC_MODE        GENMASK(25, 24)
0031 #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO    \
0032         FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
0033 #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO   \
0034         FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
0035 #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO   \
0036         FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
0037 #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO  \
0038         FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
0039 
0040 #define TEGRA_GPCDMA_CSR_DMA            GENMASK(23, 21)
0041 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC   \
0042         FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
0043 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC      \
0044         FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
0045 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC   \
0046         FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
0047 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC      \
0048         FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
0049 #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM        \
0050         FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
0051 #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT      \
0052         FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
0053 
0054 #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK       GENMASK(20, 16)
0055 #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED     \
0056                     FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
0057 #define TEGRA_GPCDMA_CSR_IRQ_MASK       BIT(15)
0058 #define TEGRA_GPCDMA_CSR_WEIGHT         GENMASK(13, 10)
0059 
0060 /* STATUS register */
0061 #define TEGRA_GPCDMA_CHAN_STATUS        0x004
0062 #define TEGRA_GPCDMA_STATUS_BUSY        BIT(31)
0063 #define TEGRA_GPCDMA_STATUS_ISE_EOC     BIT(30)
0064 #define TEGRA_GPCDMA_STATUS_PING_PONG       BIT(28)
0065 #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY    BIT(27)
0066 #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE   BIT(26)
0067 #define TEGRA_GPCDMA_STATUS_CHANNEL_RX      BIT(25)
0068 #define TEGRA_GPCDMA_STATUS_CHANNEL_TX      BIT(24)
0069 #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA    BIT(23)
0070 #define TEGRA_GPCDMA_STATUS_IRQ_STA     BIT(21)
0071 #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA    BIT(20)
0072 
0073 #define TEGRA_GPCDMA_CHAN_CSRE          0x008
0074 #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE        BIT(31)
0075 
0076 /* Source address */
0077 #define TEGRA_GPCDMA_CHAN_SRC_PTR       0x00C
0078 
0079 /* Destination address */
0080 #define TEGRA_GPCDMA_CHAN_DST_PTR       0x010
0081 
0082 /* High address pointer */
0083 #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR     0x014
0084 #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR      GENMASK(7, 0)
0085 #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR      GENMASK(23, 16)
0086 
0087 /* MC sequence register */
0088 #define TEGRA_GPCDMA_CHAN_MCSEQ         0x18
0089 #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP        BIT(31)
0090 #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT        GENMASK(30, 25)
0091 #define TEGRA_GPCDMA_MCSEQ_BURST        GENMASK(24, 23)
0092 #define TEGRA_GPCDMA_MCSEQ_BURST_2      \
0093         FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
0094 #define TEGRA_GPCDMA_MCSEQ_BURST_16     \
0095         FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
0096 #define TEGRA_GPCDMA_MCSEQ_WRAP1        GENMASK(22, 20)
0097 #define TEGRA_GPCDMA_MCSEQ_WRAP0        GENMASK(19, 17)
0098 #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE        0
0099 
0100 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK  GENMASK(13, 7)
0101 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK  GENMASK(6, 0)
0102 
0103 /* MMIO sequence register */
0104 #define TEGRA_GPCDMA_CHAN_MMIOSEQ           0x01c
0105 #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF        BIT(31)
0106 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH      GENMASK(30, 28)
0107 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8    \
0108         FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
0109 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16   \
0110         FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
0111 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32   \
0112         FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
0113 #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP      BIT(27)
0114 #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT    23
0115 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN      2U
0116 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX      32U
0117 #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs)  \
0118         (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
0119 #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID      GENMASK(22, 19)
0120 #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD      GENMASK(18, 16)
0121 #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT      GENMASK(8, 7)
0122 
0123 /* Channel WCOUNT */
0124 #define TEGRA_GPCDMA_CHAN_WCOUNT        0x20
0125 
0126 /* Transfer count */
0127 #define TEGRA_GPCDMA_CHAN_XFER_COUNT        0x24
0128 
0129 /* DMA byte count status */
0130 #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS   0x28
0131 
0132 /* Error Status Register */
0133 #define TEGRA_GPCDMA_CHAN_ERR_STATUS        0x30
0134 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT    8
0135 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF
0136 #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) (           \
0137         ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) &   \
0138         TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
0139 #define TEGRA_DMA_BM_FIFO_FULL_ERR      0xF
0140 #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR      0xE
0141 #define TEGRA_DMA_PERIPH_ID_ERR         0xD
0142 #define TEGRA_DMA_STREAM_ID_ERR         0xC
0143 #define TEGRA_DMA_MC_SLAVE_ERR          0xB
0144 #define TEGRA_DMA_MMIO_SLAVE_ERR        0xA
0145 
0146 /* Fixed Pattern */
0147 #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN     0x34
0148 
0149 #define TEGRA_GPCDMA_CHAN_TZ            0x38
0150 #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1    BIT(0)
0151 #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1      BIT(1)
0152 
0153 #define TEGRA_GPCDMA_CHAN_SPARE         0x3c
0154 #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC    BIT(16)
0155 
0156 /*
0157  * If any burst is in flight and DMA paused then this is the time to complete
0158  * on-flight burst and update DMA status register.
0159  */
0160 #define TEGRA_GPCDMA_BURST_COMPLETE_TIME    10
0161 #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT   5000 /* 5 msec */
0162 
0163 /* Channel base address offset from GPCDMA base address */
0164 #define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET    0x20000
0165 
0166 struct tegra_dma;
0167 struct tegra_dma_channel;
0168 
0169 /*
0170  * tegra_dma_chip_data Tegra chip specific DMA data
0171  * @nr_channels: Number of channels available in the controller.
0172  * @channel_reg_size: Channel register size.
0173  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
0174  * @hw_support_pause: DMA HW engine support pause of the channel.
0175  */
0176 struct tegra_dma_chip_data {
0177     bool hw_support_pause;
0178     unsigned int nr_channels;
0179     unsigned int channel_reg_size;
0180     unsigned int max_dma_count;
0181     int (*terminate)(struct tegra_dma_channel *tdc);
0182 };
0183 
0184 /* DMA channel registers */
0185 struct tegra_dma_channel_regs {
0186     u32 csr;
0187     u32 src_ptr;
0188     u32 dst_ptr;
0189     u32 high_addr_ptr;
0190     u32 mc_seq;
0191     u32 mmio_seq;
0192     u32 wcount;
0193     u32 fixed_pattern;
0194 };
0195 
0196 /*
0197  * tegra_dma_sg_req: DMA request details to configure hardware. This
0198  * contains the details for one transfer to configure DMA hw.
0199  * The client's request for data transfer can be broken into multiple
0200  * sub-transfer as per requester details and hw support. This sub transfer
0201  * get added as an array in Tegra DMA desc which manages the transfer details.
0202  */
0203 struct tegra_dma_sg_req {
0204     unsigned int len;
0205     struct tegra_dma_channel_regs ch_regs;
0206 };
0207 
0208 /*
0209  * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
0210  * manage client request and keep track of transfer status, callbacks
0211  * and request counts etc.
0212  */
0213 struct tegra_dma_desc {
0214     bool cyclic;
0215     unsigned int bytes_req;
0216     unsigned int bytes_xfer;
0217     unsigned int sg_idx;
0218     unsigned int sg_count;
0219     struct virt_dma_desc vd;
0220     struct tegra_dma_channel *tdc;
0221     struct tegra_dma_sg_req sg_req[];
0222 };
0223 
0224 /*
0225  * tegra_dma_channel: Channel specific information
0226  */
0227 struct tegra_dma_channel {
0228     bool config_init;
0229     char name[30];
0230     enum dma_transfer_direction sid_dir;
0231     int id;
0232     int irq;
0233     int slave_id;
0234     struct tegra_dma *tdma;
0235     struct virt_dma_chan vc;
0236     struct tegra_dma_desc *dma_desc;
0237     struct dma_slave_config dma_sconfig;
0238     unsigned int stream_id;
0239     unsigned long chan_base_offset;
0240 };
0241 
0242 /*
0243  * tegra_dma: Tegra DMA specific information
0244  */
0245 struct tegra_dma {
0246     const struct tegra_dma_chip_data *chip_data;
0247     unsigned long sid_m2d_reserved;
0248     unsigned long sid_d2m_reserved;
0249     void __iomem *base_addr;
0250     struct device *dev;
0251     struct dma_device dma_dev;
0252     struct reset_control *rst;
0253     struct tegra_dma_channel channels[];
0254 };
0255 
0256 static inline void tdc_write(struct tegra_dma_channel *tdc,
0257                  u32 reg, u32 val)
0258 {
0259     writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
0260 }
0261 
0262 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
0263 {
0264     return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
0265 }
0266 
0267 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
0268 {
0269     return container_of(dc, struct tegra_dma_channel, vc.chan);
0270 }
0271 
0272 static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd)
0273 {
0274     return container_of(vd, struct tegra_dma_desc, vd);
0275 }
0276 
0277 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
0278 {
0279     return tdc->vc.chan.device->dev;
0280 }
0281 
0282 static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc)
0283 {
0284     dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n",
0285         tdc->id, tdc->name);
0286     dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
0287         tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR),
0288         tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS),
0289         tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE),
0290         tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR),
0291         tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR)
0292     );
0293     dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
0294         tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ),
0295         tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ),
0296         tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT),
0297         tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT),
0298         tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS)
0299     );
0300     dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n",
0301         tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS));
0302 }
0303 
0304 static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc,
0305                  enum dma_transfer_direction direction)
0306 {
0307     struct tegra_dma *tdma = tdc->tdma;
0308     int sid = tdc->slave_id;
0309 
0310     if (!is_slave_direction(direction))
0311         return 0;
0312 
0313     switch (direction) {
0314     case DMA_MEM_TO_DEV:
0315         if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) {
0316             dev_err(tdma->dev, "slave id already in use\n");
0317             return -EINVAL;
0318         }
0319         break;
0320     case DMA_DEV_TO_MEM:
0321         if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) {
0322             dev_err(tdma->dev, "slave id already in use\n");
0323             return -EINVAL;
0324         }
0325         break;
0326     default:
0327         break;
0328     }
0329 
0330     tdc->sid_dir = direction;
0331 
0332     return 0;
0333 }
0334 
0335 static void tegra_dma_sid_free(struct tegra_dma_channel *tdc)
0336 {
0337     struct tegra_dma *tdma = tdc->tdma;
0338     int sid = tdc->slave_id;
0339 
0340     switch (tdc->sid_dir) {
0341     case DMA_MEM_TO_DEV:
0342         clear_bit(sid,  &tdma->sid_m2d_reserved);
0343         break;
0344     case DMA_DEV_TO_MEM:
0345         clear_bit(sid,  &tdma->sid_d2m_reserved);
0346         break;
0347     default:
0348         break;
0349     }
0350 
0351     tdc->sid_dir = DMA_TRANS_NONE;
0352 }
0353 
0354 static void tegra_dma_desc_free(struct virt_dma_desc *vd)
0355 {
0356     kfree(container_of(vd, struct tegra_dma_desc, vd));
0357 }
0358 
0359 static int tegra_dma_slave_config(struct dma_chan *dc,
0360                   struct dma_slave_config *sconfig)
0361 {
0362     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0363 
0364     memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
0365     tdc->config_init = true;
0366 
0367     return 0;
0368 }
0369 
0370 static int tegra_dma_pause(struct tegra_dma_channel *tdc)
0371 {
0372     int ret;
0373     u32 val;
0374 
0375     val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
0376     val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
0377     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
0378 
0379     /* Wait until busy bit is de-asserted */
0380     ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
0381             tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
0382             val,
0383             !(val & TEGRA_GPCDMA_STATUS_BUSY),
0384             TEGRA_GPCDMA_BURST_COMPLETE_TIME,
0385             TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
0386 
0387     if (ret) {
0388         dev_err(tdc2dev(tdc), "DMA pause timed out\n");
0389         tegra_dma_dump_chan_regs(tdc);
0390     }
0391 
0392     return ret;
0393 }
0394 
0395 static int tegra_dma_device_pause(struct dma_chan *dc)
0396 {
0397     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0398     unsigned long flags;
0399     int ret;
0400 
0401     if (!tdc->tdma->chip_data->hw_support_pause)
0402         return -ENOSYS;
0403 
0404     spin_lock_irqsave(&tdc->vc.lock, flags);
0405     ret = tegra_dma_pause(tdc);
0406     spin_unlock_irqrestore(&tdc->vc.lock, flags);
0407 
0408     return ret;
0409 }
0410 
0411 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
0412 {
0413     u32 val;
0414 
0415     val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
0416     val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
0417     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
0418 }
0419 
0420 static int tegra_dma_device_resume(struct dma_chan *dc)
0421 {
0422     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0423     unsigned long flags;
0424 
0425     if (!tdc->tdma->chip_data->hw_support_pause)
0426         return -ENOSYS;
0427 
0428     spin_lock_irqsave(&tdc->vc.lock, flags);
0429     tegra_dma_resume(tdc);
0430     spin_unlock_irqrestore(&tdc->vc.lock, flags);
0431 
0432     return 0;
0433 }
0434 
0435 static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
0436 {
0437     /* Return 0 irrespective of PAUSE status.
0438      * This is useful to recover channels that can exit out of flush
0439      * state when the channel is disabled.
0440      */
0441 
0442     tegra_dma_pause(tdc);
0443     return 0;
0444 }
0445 
0446 static void tegra_dma_disable(struct tegra_dma_channel *tdc)
0447 {
0448     u32 csr, status;
0449 
0450     csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
0451 
0452     /* Disable interrupts */
0453     csr &= ~TEGRA_GPCDMA_CSR_IE_EOC;
0454 
0455     /* Disable DMA */
0456     csr &= ~TEGRA_GPCDMA_CSR_ENB;
0457     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
0458 
0459     /* Clear interrupt status if it is there */
0460     status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
0461     if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
0462         dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
0463         tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status);
0464     }
0465 }
0466 
0467 static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc)
0468 {
0469     struct tegra_dma_desc *dma_desc = tdc->dma_desc;
0470     struct tegra_dma_channel_regs *ch_regs;
0471     int ret;
0472     u32 val;
0473 
0474     dma_desc->sg_idx++;
0475 
0476     /* Reset the sg index for cyclic transfers */
0477     if (dma_desc->sg_idx == dma_desc->sg_count)
0478         dma_desc->sg_idx = 0;
0479 
0480     /* Configure next transfer immediately after DMA is busy */
0481     ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
0482             tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
0483             val,
0484             (val & TEGRA_GPCDMA_STATUS_BUSY), 0,
0485             TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
0486     if (ret)
0487         return;
0488 
0489     ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
0490 
0491     tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
0492     tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
0493     tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
0494     tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
0495 
0496     /* Start DMA */
0497     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
0498           ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
0499 }
0500 
0501 static void tegra_dma_start(struct tegra_dma_channel *tdc)
0502 {
0503     struct tegra_dma_desc *dma_desc = tdc->dma_desc;
0504     struct tegra_dma_channel_regs *ch_regs;
0505     struct virt_dma_desc *vdesc;
0506 
0507     if (!dma_desc) {
0508         vdesc = vchan_next_desc(&tdc->vc);
0509         if (!vdesc)
0510             return;
0511 
0512         dma_desc = vd_to_tegra_dma_desc(vdesc);
0513         list_del(&vdesc->node);
0514         dma_desc->tdc = tdc;
0515         tdc->dma_desc = dma_desc;
0516 
0517         tegra_dma_resume(tdc);
0518     }
0519 
0520     ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
0521 
0522     tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
0523     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0);
0524     tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
0525     tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
0526     tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
0527     tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern);
0528     tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq);
0529     tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq);
0530     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr);
0531 
0532     /* Start DMA */
0533     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
0534           ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
0535 }
0536 
0537 static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
0538 {
0539     vchan_cookie_complete(&tdc->dma_desc->vd);
0540 
0541     tegra_dma_sid_free(tdc);
0542     tdc->dma_desc = NULL;
0543 }
0544 
0545 static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
0546                     unsigned int err_status)
0547 {
0548     switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) {
0549     case TEGRA_DMA_BM_FIFO_FULL_ERR:
0550         dev_err(tdc->tdma->dev,
0551             "GPCDMA CH%d bm fifo full\n", tdc->id);
0552         break;
0553 
0554     case TEGRA_DMA_PERIPH_FIFO_FULL_ERR:
0555         dev_err(tdc->tdma->dev,
0556             "GPCDMA CH%d peripheral fifo full\n", tdc->id);
0557         break;
0558 
0559     case TEGRA_DMA_PERIPH_ID_ERR:
0560         dev_err(tdc->tdma->dev,
0561             "GPCDMA CH%d illegal peripheral id\n", tdc->id);
0562         break;
0563 
0564     case TEGRA_DMA_STREAM_ID_ERR:
0565         dev_err(tdc->tdma->dev,
0566             "GPCDMA CH%d illegal stream id\n", tdc->id);
0567         break;
0568 
0569     case TEGRA_DMA_MC_SLAVE_ERR:
0570         dev_err(tdc->tdma->dev,
0571             "GPCDMA CH%d mc slave error\n", tdc->id);
0572         break;
0573 
0574     case TEGRA_DMA_MMIO_SLAVE_ERR:
0575         dev_err(tdc->tdma->dev,
0576             "GPCDMA CH%d mmio slave error\n", tdc->id);
0577         break;
0578 
0579     default:
0580         dev_err(tdc->tdma->dev,
0581             "GPCDMA CH%d security violation %x\n", tdc->id,
0582             err_status);
0583     }
0584 }
0585 
0586 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
0587 {
0588     struct tegra_dma_channel *tdc = dev_id;
0589     struct tegra_dma_desc *dma_desc = tdc->dma_desc;
0590     struct tegra_dma_sg_req *sg_req;
0591     u32 status;
0592 
0593     /* Check channel error status register */
0594     status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS);
0595     if (status) {
0596         tegra_dma_chan_decode_error(tdc, status);
0597         tegra_dma_dump_chan_regs(tdc);
0598         tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF);
0599     }
0600 
0601     spin_lock(&tdc->vc.lock);
0602     status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
0603     if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC))
0604         goto irq_done;
0605 
0606     tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS,
0607           TEGRA_GPCDMA_STATUS_ISE_EOC);
0608 
0609     if (!dma_desc)
0610         goto irq_done;
0611 
0612     sg_req = dma_desc->sg_req;
0613     dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len;
0614 
0615     if (dma_desc->cyclic) {
0616         vchan_cyclic_callback(&dma_desc->vd);
0617         tegra_dma_configure_next_sg(tdc);
0618     } else {
0619         dma_desc->sg_idx++;
0620         if (dma_desc->sg_idx == dma_desc->sg_count)
0621             tegra_dma_xfer_complete(tdc);
0622         else
0623             tegra_dma_start(tdc);
0624     }
0625 
0626 irq_done:
0627     spin_unlock(&tdc->vc.lock);
0628     return IRQ_HANDLED;
0629 }
0630 
0631 static void tegra_dma_issue_pending(struct dma_chan *dc)
0632 {
0633     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0634     unsigned long flags;
0635 
0636     if (tdc->dma_desc)
0637         return;
0638 
0639     spin_lock_irqsave(&tdc->vc.lock, flags);
0640     if (vchan_issue_pending(&tdc->vc))
0641         tegra_dma_start(tdc);
0642 
0643     /*
0644      * For cyclic DMA transfers, program the second
0645      * transfer parameters as soon as the first DMA
0646      * transfer is started inorder for the DMA
0647      * controller to trigger the second transfer
0648      * with the correct parameters.
0649      */
0650     if (tdc->dma_desc && tdc->dma_desc->cyclic)
0651         tegra_dma_configure_next_sg(tdc);
0652 
0653     spin_unlock_irqrestore(&tdc->vc.lock, flags);
0654 }
0655 
0656 static int tegra_dma_stop_client(struct tegra_dma_channel *tdc)
0657 {
0658     int ret;
0659     u32 status, csr;
0660 
0661     /*
0662      * Change the client associated with the DMA channel
0663      * to stop DMA engine from starting any more bursts for
0664      * the given client and wait for in flight bursts to complete
0665      */
0666     csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
0667     csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK);
0668     csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED;
0669     tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
0670 
0671     /* Wait for in flight data transfer to finish */
0672     udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME);
0673 
0674     /* If TX/RX path is still active wait till it becomes
0675      * inactive
0676      */
0677 
0678     ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
0679                 tdc->chan_base_offset +
0680                 TEGRA_GPCDMA_CHAN_STATUS,
0681                 status,
0682                 !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX |
0683                 TEGRA_GPCDMA_STATUS_CHANNEL_RX)),
0684                 5,
0685                 TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
0686     if (ret) {
0687         dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n");
0688         tegra_dma_dump_chan_regs(tdc);
0689     }
0690 
0691     return ret;
0692 }
0693 
0694 static int tegra_dma_terminate_all(struct dma_chan *dc)
0695 {
0696     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0697     unsigned long flags;
0698     LIST_HEAD(head);
0699     int err;
0700 
0701     spin_lock_irqsave(&tdc->vc.lock, flags);
0702 
0703     if (tdc->dma_desc) {
0704         err = tdc->tdma->chip_data->terminate(tdc);
0705         if (err) {
0706             spin_unlock_irqrestore(&tdc->vc.lock, flags);
0707             return err;
0708         }
0709 
0710         tegra_dma_disable(tdc);
0711         tdc->dma_desc = NULL;
0712     }
0713 
0714     tegra_dma_sid_free(tdc);
0715     vchan_get_all_descriptors(&tdc->vc, &head);
0716     spin_unlock_irqrestore(&tdc->vc.lock, flags);
0717 
0718     vchan_dma_desc_free_list(&tdc->vc, &head);
0719 
0720     return 0;
0721 }
0722 
0723 static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
0724 {
0725     struct tegra_dma_desc *dma_desc = tdc->dma_desc;
0726     struct tegra_dma_sg_req *sg_req = dma_desc->sg_req;
0727     unsigned int bytes_xfer, residual;
0728     u32 wcount = 0, status;
0729 
0730     wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
0731 
0732     /*
0733      * Set wcount = 0 if EOC bit is set. The transfer would have
0734      * already completed and the CHAN_XFER_COUNT could have updated
0735      * for the next transfer, specifically in case of cyclic transfers.
0736      */
0737     status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
0738     if (status & TEGRA_GPCDMA_STATUS_ISE_EOC)
0739         wcount = 0;
0740 
0741     bytes_xfer = dma_desc->bytes_xfer +
0742              sg_req[dma_desc->sg_idx].len - (wcount * 4);
0743 
0744     residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
0745 
0746     return residual;
0747 }
0748 
0749 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
0750                        dma_cookie_t cookie,
0751                        struct dma_tx_state *txstate)
0752 {
0753     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0754     struct tegra_dma_desc *dma_desc;
0755     struct virt_dma_desc *vd;
0756     unsigned int residual;
0757     unsigned long flags;
0758     enum dma_status ret;
0759 
0760     ret = dma_cookie_status(dc, cookie, txstate);
0761     if (ret == DMA_COMPLETE)
0762         return ret;
0763 
0764     spin_lock_irqsave(&tdc->vc.lock, flags);
0765     vd = vchan_find_desc(&tdc->vc, cookie);
0766     if (vd) {
0767         dma_desc = vd_to_tegra_dma_desc(vd);
0768         residual = dma_desc->bytes_req;
0769         dma_set_residue(txstate, residual);
0770     } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) {
0771         residual =  tegra_dma_get_residual(tdc);
0772         dma_set_residue(txstate, residual);
0773     } else {
0774         dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie);
0775     }
0776     spin_unlock_irqrestore(&tdc->vc.lock, flags);
0777 
0778     return ret;
0779 }
0780 
0781 static inline int get_bus_width(struct tegra_dma_channel *tdc,
0782                 enum dma_slave_buswidth slave_bw)
0783 {
0784     switch (slave_bw) {
0785     case DMA_SLAVE_BUSWIDTH_1_BYTE:
0786         return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8;
0787     case DMA_SLAVE_BUSWIDTH_2_BYTES:
0788         return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16;
0789     case DMA_SLAVE_BUSWIDTH_4_BYTES:
0790         return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32;
0791     default:
0792         dev_err(tdc2dev(tdc), "given slave bus width is not supported\n");
0793         return -EINVAL;
0794     }
0795 }
0796 
0797 static unsigned int get_burst_size(struct tegra_dma_channel *tdc,
0798                    u32 burst_size, enum dma_slave_buswidth slave_bw,
0799                    int len)
0800 {
0801     unsigned int burst_mmio_width, burst_byte;
0802 
0803     /*
0804      * burst_size from client is in terms of the bus_width.
0805      * convert that into words.
0806      * If burst_size is not specified from client, then use
0807      * len to calculate the optimum burst size
0808      */
0809     burst_byte = burst_size ? burst_size * slave_bw : len;
0810     burst_mmio_width = burst_byte / 4;
0811 
0812     if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN)
0813         return 0;
0814 
0815     burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX);
0816 
0817     return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width);
0818 }
0819 
0820 static int get_transfer_param(struct tegra_dma_channel *tdc,
0821                   enum dma_transfer_direction direction,
0822                   u32 *apb_addr,
0823                   u32 *mmio_seq,
0824                   u32 *csr,
0825                   unsigned int *burst_size,
0826                   enum dma_slave_buswidth *slave_bw)
0827 {
0828     switch (direction) {
0829     case DMA_MEM_TO_DEV:
0830         *apb_addr = tdc->dma_sconfig.dst_addr;
0831         *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
0832         *burst_size = tdc->dma_sconfig.dst_maxburst;
0833         *slave_bw = tdc->dma_sconfig.dst_addr_width;
0834         *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC;
0835         return 0;
0836     case DMA_DEV_TO_MEM:
0837         *apb_addr = tdc->dma_sconfig.src_addr;
0838         *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
0839         *burst_size = tdc->dma_sconfig.src_maxburst;
0840         *slave_bw = tdc->dma_sconfig.src_addr_width;
0841         *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC;
0842         return 0;
0843     default:
0844         dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
0845     }
0846 
0847     return -EINVAL;
0848 }
0849 
0850 static struct dma_async_tx_descriptor *
0851 tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value,
0852               size_t len, unsigned long flags)
0853 {
0854     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0855     unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
0856     struct tegra_dma_sg_req *sg_req;
0857     struct tegra_dma_desc *dma_desc;
0858     u32 csr, mc_seq;
0859 
0860     if ((len & 3) || (dest & 3) || len > max_dma_count) {
0861         dev_err(tdc2dev(tdc),
0862             "DMA length/memory address is not supported\n");
0863         return NULL;
0864     }
0865 
0866     /* Set DMA mode to fixed pattern */
0867     csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT;
0868     /* Enable once or continuous mode */
0869     csr |= TEGRA_GPCDMA_CSR_ONCE;
0870     /* Enable IRQ mask */
0871     csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
0872     /* Enable the DMA interrupt */
0873     if (flags & DMA_PREP_INTERRUPT)
0874         csr |= TEGRA_GPCDMA_CSR_IE_EOC;
0875     /* Configure default priority weight for the channel */
0876     csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
0877 
0878     mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
0879     /* retain stream-id and clean rest */
0880     mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
0881 
0882     /* Set the address wrapping */
0883     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
0884                         TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
0885     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
0886                         TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
0887 
0888     /* Program outstanding MC requests */
0889     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
0890     /* Set burst size */
0891     mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
0892 
0893     dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
0894     if (!dma_desc)
0895         return NULL;
0896 
0897     dma_desc->bytes_req = len;
0898     dma_desc->sg_count = 1;
0899     sg_req = dma_desc->sg_req;
0900 
0901     sg_req[0].ch_regs.src_ptr = 0;
0902     sg_req[0].ch_regs.dst_ptr = dest;
0903     sg_req[0].ch_regs.high_addr_ptr =
0904             FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
0905     sg_req[0].ch_regs.fixed_pattern = value;
0906     /* Word count reg takes value as (N +1) words */
0907     sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
0908     sg_req[0].ch_regs.csr = csr;
0909     sg_req[0].ch_regs.mmio_seq = 0;
0910     sg_req[0].ch_regs.mc_seq = mc_seq;
0911     sg_req[0].len = len;
0912 
0913     dma_desc->cyclic = false;
0914     return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
0915 }
0916 
0917 static struct dma_async_tx_descriptor *
0918 tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest,
0919               dma_addr_t src, size_t len, unsigned long flags)
0920 {
0921     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0922     struct tegra_dma_sg_req *sg_req;
0923     struct tegra_dma_desc *dma_desc;
0924     unsigned int max_dma_count;
0925     u32 csr, mc_seq;
0926 
0927     max_dma_count = tdc->tdma->chip_data->max_dma_count;
0928     if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) {
0929         dev_err(tdc2dev(tdc),
0930             "DMA length/memory address is not supported\n");
0931         return NULL;
0932     }
0933 
0934     /* Set DMA mode to memory to memory transfer */
0935     csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
0936     /* Enable once or continuous mode */
0937     csr |= TEGRA_GPCDMA_CSR_ONCE;
0938     /* Enable IRQ mask */
0939     csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
0940     /* Enable the DMA interrupt */
0941     if (flags & DMA_PREP_INTERRUPT)
0942         csr |= TEGRA_GPCDMA_CSR_IE_EOC;
0943     /* Configure default priority weight for the channel */
0944     csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
0945 
0946     mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
0947     /* retain stream-id and clean rest */
0948     mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) |
0949           (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
0950 
0951     /* Set the address wrapping */
0952     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
0953                  TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
0954     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
0955                  TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
0956 
0957     /* Program outstanding MC requests */
0958     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
0959     /* Set burst size */
0960     mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
0961 
0962     dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
0963     if (!dma_desc)
0964         return NULL;
0965 
0966     dma_desc->bytes_req = len;
0967     dma_desc->sg_count = 1;
0968     sg_req = dma_desc->sg_req;
0969 
0970     sg_req[0].ch_regs.src_ptr = src;
0971     sg_req[0].ch_regs.dst_ptr = dest;
0972     sg_req[0].ch_regs.high_addr_ptr =
0973         FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32));
0974     sg_req[0].ch_regs.high_addr_ptr |=
0975         FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
0976     /* Word count reg takes value as (N +1) words */
0977     sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
0978     sg_req[0].ch_regs.csr = csr;
0979     sg_req[0].ch_regs.mmio_seq = 0;
0980     sg_req[0].ch_regs.mc_seq = mc_seq;
0981     sg_req[0].len = len;
0982 
0983     dma_desc->cyclic = false;
0984     return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
0985 }
0986 
0987 static struct dma_async_tx_descriptor *
0988 tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl,
0989             unsigned int sg_len, enum dma_transfer_direction direction,
0990             unsigned long flags, void *context)
0991 {
0992     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0993     unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
0994     enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
0995     u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0;
0996     struct tegra_dma_sg_req *sg_req;
0997     struct tegra_dma_desc *dma_desc;
0998     struct scatterlist *sg;
0999     u32 burst_size;
1000     unsigned int i;
1001     int ret;
1002 
1003     if (!tdc->config_init) {
1004         dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1005         return NULL;
1006     }
1007     if (sg_len < 1) {
1008         dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1009         return NULL;
1010     }
1011 
1012     ret = tegra_dma_sid_reserve(tdc, direction);
1013     if (ret)
1014         return NULL;
1015 
1016     ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
1017                  &burst_size, &slave_bw);
1018     if (ret < 0)
1019         return NULL;
1020 
1021     /* Enable once or continuous mode */
1022     csr |= TEGRA_GPCDMA_CSR_ONCE;
1023     /* Program the slave id in requestor select */
1024     csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
1025     /* Enable IRQ mask */
1026     csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
1027     /* Configure default priority weight for the channel*/
1028     csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
1029 
1030     /* Enable the DMA interrupt */
1031     if (flags & DMA_PREP_INTERRUPT)
1032         csr |= TEGRA_GPCDMA_CSR_IE_EOC;
1033 
1034     mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
1035     /* retain stream-id and clean rest */
1036     mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
1037 
1038     /* Set the address wrapping on both MC and MMIO side */
1039 
1040     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
1041                  TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1042     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
1043                  TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1044     mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
1045 
1046     /* Program 2 MC outstanding requests by default. */
1047     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
1048 
1049     /* Setting MC burst size depending on MMIO burst size */
1050     if (burst_size == 64)
1051         mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
1052     else
1053         mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
1054 
1055     dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT);
1056     if (!dma_desc)
1057         return NULL;
1058 
1059     dma_desc->sg_count = sg_len;
1060     sg_req = dma_desc->sg_req;
1061 
1062     /* Make transfer requests */
1063     for_each_sg(sgl, sg, sg_len, i) {
1064         u32 len;
1065         dma_addr_t mem;
1066 
1067         mem = sg_dma_address(sg);
1068         len = sg_dma_len(sg);
1069 
1070         if ((len & 3) || (mem & 3) || len > max_dma_count) {
1071             dev_err(tdc2dev(tdc),
1072                 "DMA length/memory address is not supported\n");
1073             kfree(dma_desc);
1074             return NULL;
1075         }
1076 
1077         mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1078         dma_desc->bytes_req += len;
1079 
1080         if (direction == DMA_MEM_TO_DEV) {
1081             sg_req[i].ch_regs.src_ptr = mem;
1082             sg_req[i].ch_regs.dst_ptr = apb_ptr;
1083             sg_req[i].ch_regs.high_addr_ptr =
1084                 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
1085         } else if (direction == DMA_DEV_TO_MEM) {
1086             sg_req[i].ch_regs.src_ptr = apb_ptr;
1087             sg_req[i].ch_regs.dst_ptr = mem;
1088             sg_req[i].ch_regs.high_addr_ptr =
1089                 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
1090         }
1091 
1092         /*
1093          * Word count register takes input in words. Writing a value
1094          * of N into word count register means a req of (N+1) words.
1095          */
1096         sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
1097         sg_req[i].ch_regs.csr = csr;
1098         sg_req[i].ch_regs.mmio_seq = mmio_seq;
1099         sg_req[i].ch_regs.mc_seq = mc_seq;
1100         sg_req[i].len = len;
1101     }
1102 
1103     dma_desc->cyclic = false;
1104     return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
1105 }
1106 
1107 static struct dma_async_tx_descriptor *
1108 tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1109               size_t period_len, enum dma_transfer_direction direction,
1110               unsigned long flags)
1111 {
1112     enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1113     u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size;
1114     unsigned int max_dma_count, len, period_count, i;
1115     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1116     struct tegra_dma_desc *dma_desc;
1117     struct tegra_dma_sg_req *sg_req;
1118     dma_addr_t mem = buf_addr;
1119     int ret;
1120 
1121     if (!buf_len || !period_len) {
1122         dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1123         return NULL;
1124     }
1125 
1126     if (!tdc->config_init) {
1127         dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1128         return NULL;
1129     }
1130 
1131     ret = tegra_dma_sid_reserve(tdc, direction);
1132     if (ret)
1133         return NULL;
1134 
1135     /*
1136      * We only support cycle transfer when buf_len is multiple of
1137      * period_len.
1138      */
1139     if (buf_len % period_len) {
1140         dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1141         return NULL;
1142     }
1143 
1144     len = period_len;
1145     max_dma_count = tdc->tdma->chip_data->max_dma_count;
1146     if ((len & 3) || (buf_addr & 3) || len > max_dma_count) {
1147         dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1148         return NULL;
1149     }
1150 
1151     ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
1152                  &burst_size, &slave_bw);
1153     if (ret < 0)
1154         return NULL;
1155 
1156     /* Enable once or continuous mode */
1157     csr &= ~TEGRA_GPCDMA_CSR_ONCE;
1158     /* Program the slave id in requestor select */
1159     csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
1160     /* Enable IRQ mask */
1161     csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
1162     /* Configure default priority weight for the channel*/
1163     csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
1164 
1165     /* Enable the DMA interrupt */
1166     if (flags & DMA_PREP_INTERRUPT)
1167         csr |= TEGRA_GPCDMA_CSR_IE_EOC;
1168 
1169     mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
1170 
1171     mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
1172     /* retain stream-id and clean rest */
1173     mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
1174 
1175     /* Set the address wrapping on both MC and MMIO side */
1176     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
1177                  TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1178     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
1179                  TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1180 
1181     /* Program 2 MC outstanding requests by default. */
1182     mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
1183     /* Setting MC burst size depending on MMIO burst size */
1184     if (burst_size == 64)
1185         mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
1186     else
1187         mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
1188 
1189     period_count = buf_len / period_len;
1190     dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count),
1191                GFP_NOWAIT);
1192     if (!dma_desc)
1193         return NULL;
1194 
1195     dma_desc->bytes_req = buf_len;
1196     dma_desc->sg_count = period_count;
1197     sg_req = dma_desc->sg_req;
1198 
1199     /* Split transfer equal to period size */
1200     for (i = 0; i < period_count; i++) {
1201         mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1202         if (direction == DMA_MEM_TO_DEV) {
1203             sg_req[i].ch_regs.src_ptr = mem;
1204             sg_req[i].ch_regs.dst_ptr = apb_ptr;
1205             sg_req[i].ch_regs.high_addr_ptr =
1206                 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
1207         } else if (direction == DMA_DEV_TO_MEM) {
1208             sg_req[i].ch_regs.src_ptr = apb_ptr;
1209             sg_req[i].ch_regs.dst_ptr = mem;
1210             sg_req[i].ch_regs.high_addr_ptr =
1211                 FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
1212         }
1213         /*
1214          * Word count register takes input in words. Writing a value
1215          * of N into word count register means a req of (N+1) words.
1216          */
1217         sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
1218         sg_req[i].ch_regs.csr = csr;
1219         sg_req[i].ch_regs.mmio_seq = mmio_seq;
1220         sg_req[i].ch_regs.mc_seq = mc_seq;
1221         sg_req[i].len = len;
1222 
1223         mem += len;
1224     }
1225 
1226     dma_desc->cyclic = true;
1227 
1228     return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
1229 }
1230 
1231 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1232 {
1233     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1234     int ret;
1235 
1236     ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
1237     if (ret) {
1238         dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name);
1239         return ret;
1240     }
1241 
1242     dma_cookie_init(&tdc->vc.chan);
1243     tdc->config_init = false;
1244     return 0;
1245 }
1246 
1247 static void tegra_dma_chan_synchronize(struct dma_chan *dc)
1248 {
1249     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1250 
1251     synchronize_irq(tdc->irq);
1252     vchan_synchronize(&tdc->vc);
1253 }
1254 
1255 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1256 {
1257     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1258 
1259     dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1260 
1261     tegra_dma_terminate_all(dc);
1262     synchronize_irq(tdc->irq);
1263 
1264     tasklet_kill(&tdc->vc.task);
1265     tdc->config_init = false;
1266     tdc->slave_id = -1;
1267     tdc->sid_dir = DMA_TRANS_NONE;
1268     free_irq(tdc->irq, tdc);
1269 
1270     vchan_free_chan_resources(&tdc->vc);
1271 }
1272 
1273 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1274                        struct of_dma *ofdma)
1275 {
1276     struct tegra_dma *tdma = ofdma->of_dma_data;
1277     struct tegra_dma_channel *tdc;
1278     struct dma_chan *chan;
1279 
1280     chan = dma_get_any_slave_channel(&tdma->dma_dev);
1281     if (!chan)
1282         return NULL;
1283 
1284     tdc = to_tegra_dma_chan(chan);
1285     tdc->slave_id = dma_spec->args[0];
1286 
1287     return chan;
1288 }
1289 
1290 static const struct tegra_dma_chip_data tegra186_dma_chip_data = {
1291     .nr_channels = 31,
1292     .channel_reg_size = SZ_64K,
1293     .max_dma_count = SZ_1G,
1294     .hw_support_pause = false,
1295     .terminate = tegra_dma_stop_client,
1296 };
1297 
1298 static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
1299     .nr_channels = 31,
1300     .channel_reg_size = SZ_64K,
1301     .max_dma_count = SZ_1G,
1302     .hw_support_pause = true,
1303     .terminate = tegra_dma_pause,
1304 };
1305 
1306 static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
1307     .nr_channels = 31,
1308     .channel_reg_size = SZ_64K,
1309     .max_dma_count = SZ_1G,
1310     .hw_support_pause = true,
1311     .terminate = tegra_dma_pause_noerr,
1312 };
1313 
1314 static const struct of_device_id tegra_dma_of_match[] = {
1315     {
1316         .compatible = "nvidia,tegra186-gpcdma",
1317         .data = &tegra186_dma_chip_data,
1318     }, {
1319         .compatible = "nvidia,tegra194-gpcdma",
1320         .data = &tegra194_dma_chip_data,
1321     }, {
1322         .compatible = "nvidia,tegra234-gpcdma",
1323         .data = &tegra234_dma_chip_data,
1324     }, {
1325     },
1326 };
1327 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1328 
1329 static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
1330 {
1331     unsigned int reg_val =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
1332 
1333     reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK);
1334     reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
1335 
1336     reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id);
1337     reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id);
1338 
1339     tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val);
1340     return 0;
1341 }
1342 
1343 static int tegra_dma_probe(struct platform_device *pdev)
1344 {
1345     const struct tegra_dma_chip_data *cdata = NULL;
1346     struct iommu_fwspec *iommu_spec;
1347     unsigned int stream_id, i;
1348     struct tegra_dma *tdma;
1349     int ret;
1350 
1351     cdata = of_device_get_match_data(&pdev->dev);
1352 
1353     tdma = devm_kzalloc(&pdev->dev,
1354                 struct_size(tdma, channels, cdata->nr_channels),
1355                 GFP_KERNEL);
1356     if (!tdma)
1357         return -ENOMEM;
1358 
1359     tdma->dev = &pdev->dev;
1360     tdma->chip_data = cdata;
1361     platform_set_drvdata(pdev, tdma);
1362 
1363     tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
1364     if (IS_ERR(tdma->base_addr))
1365         return PTR_ERR(tdma->base_addr);
1366 
1367     tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma");
1368     if (IS_ERR(tdma->rst)) {
1369         return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst),
1370                   "Missing controller reset\n");
1371     }
1372     reset_control_reset(tdma->rst);
1373 
1374     tdma->dma_dev.dev = &pdev->dev;
1375 
1376     iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
1377     if (!iommu_spec) {
1378         dev_err(&pdev->dev, "Missing iommu stream-id\n");
1379         return -EINVAL;
1380     }
1381     stream_id = iommu_spec->ids[0] & 0xffff;
1382 
1383     INIT_LIST_HEAD(&tdma->dma_dev.channels);
1384     for (i = 0; i < cdata->nr_channels; i++) {
1385         struct tegra_dma_channel *tdc = &tdma->channels[i];
1386 
1387         tdc->irq = platform_get_irq(pdev, i);
1388         if (tdc->irq < 0)
1389             return tdc->irq;
1390 
1391         tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET +
1392                     i * cdata->channel_reg_size;
1393         snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i);
1394         tdc->tdma = tdma;
1395         tdc->id = i;
1396         tdc->slave_id = -1;
1397 
1398         vchan_init(&tdc->vc, &tdma->dma_dev);
1399         tdc->vc.desc_free = tegra_dma_desc_free;
1400 
1401         /* program stream-id for this channel */
1402         tegra_dma_program_sid(tdc, stream_id);
1403         tdc->stream_id = stream_id;
1404     }
1405 
1406     dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1407     dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1408     dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask);
1409     dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask);
1410     dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1411 
1412     /*
1413      * Only word aligned transfers are supported. Set the copy
1414      * alignment shift.
1415      */
1416     tdma->dma_dev.copy_align = 2;
1417     tdma->dma_dev.fill_align = 2;
1418     tdma->dma_dev.device_alloc_chan_resources =
1419                     tegra_dma_alloc_chan_resources;
1420     tdma->dma_dev.device_free_chan_resources =
1421                     tegra_dma_free_chan_resources;
1422     tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1423     tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy;
1424     tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset;
1425     tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1426     tdma->dma_dev.device_config = tegra_dma_slave_config;
1427     tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1428     tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1429     tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1430     tdma->dma_dev.device_pause = tegra_dma_device_pause;
1431     tdma->dma_dev.device_resume = tegra_dma_device_resume;
1432     tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize;
1433     tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1434 
1435     ret = dma_async_device_register(&tdma->dma_dev);
1436     if (ret < 0) {
1437         dev_err_probe(&pdev->dev, ret,
1438                   "GPC DMA driver registration failed\n");
1439         return ret;
1440     }
1441 
1442     ret = of_dma_controller_register(pdev->dev.of_node,
1443                      tegra_dma_of_xlate, tdma);
1444     if (ret < 0) {
1445         dev_err_probe(&pdev->dev, ret,
1446                   "GPC DMA OF registration failed\n");
1447 
1448         dma_async_device_unregister(&tdma->dma_dev);
1449         return ret;
1450     }
1451 
1452     dev_info(&pdev->dev, "GPC DMA driver register %d channels\n",
1453          cdata->nr_channels);
1454 
1455     return 0;
1456 }
1457 
1458 static int tegra_dma_remove(struct platform_device *pdev)
1459 {
1460     struct tegra_dma *tdma = platform_get_drvdata(pdev);
1461 
1462     of_dma_controller_free(pdev->dev.of_node);
1463     dma_async_device_unregister(&tdma->dma_dev);
1464 
1465     return 0;
1466 }
1467 
1468 static int __maybe_unused tegra_dma_pm_suspend(struct device *dev)
1469 {
1470     struct tegra_dma *tdma = dev_get_drvdata(dev);
1471     unsigned int i;
1472 
1473     for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1474         struct tegra_dma_channel *tdc = &tdma->channels[i];
1475 
1476         if (tdc->dma_desc) {
1477             dev_err(tdma->dev, "channel %u busy\n", i);
1478             return -EBUSY;
1479         }
1480     }
1481 
1482     return 0;
1483 }
1484 
1485 static int __maybe_unused tegra_dma_pm_resume(struct device *dev)
1486 {
1487     struct tegra_dma *tdma = dev_get_drvdata(dev);
1488     unsigned int i;
1489 
1490     reset_control_reset(tdma->rst);
1491 
1492     for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1493         struct tegra_dma_channel *tdc = &tdma->channels[i];
1494 
1495         tegra_dma_program_sid(tdc, tdc->stream_id);
1496     }
1497 
1498     return 0;
1499 }
1500 
1501 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1502     SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
1503 };
1504 
1505 static struct platform_driver tegra_dma_driver = {
1506     .driver = {
1507         .name   = "tegra-gpcdma",
1508         .pm = &tegra_dma_dev_pm_ops,
1509         .of_match_table = tegra_dma_of_match,
1510     },
1511     .probe      = tegra_dma_probe,
1512     .remove     = tegra_dma_remove,
1513 };
1514 
1515 module_platform_driver(tegra_dma_driver);
1516 
1517 MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
1518 MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>");
1519 MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>");
1520 MODULE_LICENSE("GPL");