Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * DMA driver for Nvidia's Tegra20 APB DMA controller.
0004  *
0005  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
0006  */
0007 
0008 #include <linux/bitops.h>
0009 #include <linux/clk.h>
0010 #include <linux/delay.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/err.h>
0014 #include <linux/init.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/mm.h>
0018 #include <linux/module.h>
0019 #include <linux/of.h>
0020 #include <linux/of_device.h>
0021 #include <linux/of_dma.h>
0022 #include <linux/platform_device.h>
0023 #include <linux/pm.h>
0024 #include <linux/pm_runtime.h>
0025 #include <linux/reset.h>
0026 #include <linux/slab.h>
0027 #include <linux/wait.h>
0028 
0029 #include "dmaengine.h"
0030 
0031 #define CREATE_TRACE_POINTS
0032 #include <trace/events/tegra_apb_dma.h>
0033 
0034 #define TEGRA_APBDMA_GENERAL            0x0
0035 #define TEGRA_APBDMA_GENERAL_ENABLE     BIT(31)
0036 
0037 #define TEGRA_APBDMA_CONTROL            0x010
0038 #define TEGRA_APBDMA_IRQ_MASK           0x01c
0039 #define TEGRA_APBDMA_IRQ_MASK_SET       0x020
0040 
0041 /* CSR register */
0042 #define TEGRA_APBDMA_CHAN_CSR           0x00
0043 #define TEGRA_APBDMA_CSR_ENB            BIT(31)
0044 #define TEGRA_APBDMA_CSR_IE_EOC         BIT(30)
0045 #define TEGRA_APBDMA_CSR_HOLD           BIT(29)
0046 #define TEGRA_APBDMA_CSR_DIR            BIT(28)
0047 #define TEGRA_APBDMA_CSR_ONCE           BIT(27)
0048 #define TEGRA_APBDMA_CSR_FLOW           BIT(21)
0049 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT      16
0050 #define TEGRA_APBDMA_CSR_REQ_SEL_MASK       0x1F
0051 #define TEGRA_APBDMA_CSR_WCOUNT_MASK        0xFFFC
0052 
0053 /* STATUS register */
0054 #define TEGRA_APBDMA_CHAN_STATUS        0x004
0055 #define TEGRA_APBDMA_STATUS_BUSY        BIT(31)
0056 #define TEGRA_APBDMA_STATUS_ISE_EOC     BIT(30)
0057 #define TEGRA_APBDMA_STATUS_HALT        BIT(29)
0058 #define TEGRA_APBDMA_STATUS_PING_PONG       BIT(28)
0059 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT     2
0060 #define TEGRA_APBDMA_STATUS_COUNT_MASK      0xFFFC
0061 
0062 #define TEGRA_APBDMA_CHAN_CSRE          0x00C
0063 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE        BIT(31)
0064 
0065 /* AHB memory address */
0066 #define TEGRA_APBDMA_CHAN_AHBPTR        0x010
0067 
0068 /* AHB sequence register */
0069 #define TEGRA_APBDMA_CHAN_AHBSEQ        0x14
0070 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB        BIT(31)
0071 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8     (0 << 28)
0072 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16    (1 << 28)
0073 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32    (2 << 28)
0074 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64    (3 << 28)
0075 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128   (4 << 28)
0076 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP       BIT(27)
0077 #define TEGRA_APBDMA_AHBSEQ_BURST_1     (4 << 24)
0078 #define TEGRA_APBDMA_AHBSEQ_BURST_4     (5 << 24)
0079 #define TEGRA_APBDMA_AHBSEQ_BURST_8     (6 << 24)
0080 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF     BIT(19)
0081 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT      16
0082 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE       0
0083 
0084 /* APB address */
0085 #define TEGRA_APBDMA_CHAN_APBPTR        0x018
0086 
0087 /* APB sequence register */
0088 #define TEGRA_APBDMA_CHAN_APBSEQ        0x01c
0089 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8     (0 << 28)
0090 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16    (1 << 28)
0091 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32    (2 << 28)
0092 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64    (3 << 28)
0093 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128   (4 << 28)
0094 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP       BIT(27)
0095 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1     (1 << 16)
0096 
0097 /* Tegra148 specific registers */
0098 #define TEGRA_APBDMA_CHAN_WCOUNT        0x20
0099 
0100 #define TEGRA_APBDMA_CHAN_WORD_TRANSFER     0x24
0101 
0102 /*
0103  * If any burst is in flight and DMA paused then this is the time to complete
0104  * on-flight burst and update DMA status register.
0105  */
0106 #define TEGRA_APBDMA_BURST_COMPLETE_TIME    20
0107 
0108 /* Channel base address offset from APBDMA base address */
0109 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET    0x1000
0110 
0111 #define TEGRA_APBDMA_SLAVE_ID_INVALID   (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
0112 
0113 struct tegra_dma;
0114 
0115 /*
0116  * tegra_dma_chip_data Tegra chip specific DMA data
0117  * @nr_channels: Number of channels available in the controller.
0118  * @channel_reg_size: Channel register size/stride.
0119  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
0120  * @support_channel_pause: Support channel wise pause of dma.
0121  * @support_separate_wcount_reg: Support separate word count register.
0122  */
0123 struct tegra_dma_chip_data {
0124     unsigned int nr_channels;
0125     unsigned int channel_reg_size;
0126     unsigned int max_dma_count;
0127     bool support_channel_pause;
0128     bool support_separate_wcount_reg;
0129 };
0130 
0131 /* DMA channel registers */
0132 struct tegra_dma_channel_regs {
0133     u32 csr;
0134     u32 ahb_ptr;
0135     u32 apb_ptr;
0136     u32 ahb_seq;
0137     u32 apb_seq;
0138     u32 wcount;
0139 };
0140 
0141 /*
0142  * tegra_dma_sg_req: DMA request details to configure hardware. This
0143  * contains the details for one transfer to configure DMA hw.
0144  * The client's request for data transfer can be broken into multiple
0145  * sub-transfer as per requester details and hw support.
0146  * This sub transfer get added in the list of transfer and point to Tegra
0147  * DMA descriptor which manages the transfer details.
0148  */
0149 struct tegra_dma_sg_req {
0150     struct tegra_dma_channel_regs   ch_regs;
0151     unsigned int            req_len;
0152     bool                configured;
0153     bool                last_sg;
0154     struct list_head        node;
0155     struct tegra_dma_desc       *dma_desc;
0156     unsigned int            words_xferred;
0157 };
0158 
0159 /*
0160  * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
0161  * This descriptor keep track of transfer status, callbacks and request
0162  * counts etc.
0163  */
0164 struct tegra_dma_desc {
0165     struct dma_async_tx_descriptor  txd;
0166     unsigned int            bytes_requested;
0167     unsigned int            bytes_transferred;
0168     enum dma_status         dma_status;
0169     struct list_head        node;
0170     struct list_head        tx_list;
0171     struct list_head        cb_node;
0172     unsigned int            cb_count;
0173 };
0174 
0175 struct tegra_dma_channel;
0176 
0177 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
0178                 bool to_terminate);
0179 
0180 /* tegra_dma_channel: Channel specific information */
0181 struct tegra_dma_channel {
0182     struct dma_chan     dma_chan;
0183     char            name[12];
0184     bool            config_init;
0185     unsigned int        id;
0186     void __iomem        *chan_addr;
0187     spinlock_t      lock;
0188     bool            busy;
0189     struct tegra_dma    *tdma;
0190     bool            cyclic;
0191 
0192     /* Different lists for managing the requests */
0193     struct list_head    free_sg_req;
0194     struct list_head    pending_sg_req;
0195     struct list_head    free_dma_desc;
0196     struct list_head    cb_desc;
0197 
0198     /* ISR handler and tasklet for bottom half of isr handling */
0199     dma_isr_handler     isr_handler;
0200     struct tasklet_struct   tasklet;
0201 
0202     /* Channel-slave specific configuration */
0203     unsigned int slave_id;
0204     struct dma_slave_config dma_sconfig;
0205     struct tegra_dma_channel_regs channel_reg;
0206 
0207     struct wait_queue_head wq;
0208 };
0209 
0210 /* tegra_dma: Tegra DMA specific information */
0211 struct tegra_dma {
0212     struct dma_device       dma_dev;
0213     struct device           *dev;
0214     struct clk          *dma_clk;
0215     struct reset_control        *rst;
0216     spinlock_t          global_lock;
0217     void __iomem            *base_addr;
0218     const struct tegra_dma_chip_data *chip_data;
0219 
0220     /*
0221      * Counter for managing global pausing of the DMA controller.
0222      * Only applicable for devices that don't support individual
0223      * channel pausing.
0224      */
0225     u32             global_pause_count;
0226 
0227     /* Last member of the structure */
0228     struct tegra_dma_channel channels[];
0229 };
0230 
0231 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
0232 {
0233     writel(val, tdma->base_addr + reg);
0234 }
0235 
0236 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
0237 {
0238     return readl(tdma->base_addr + reg);
0239 }
0240 
0241 static inline void tdc_write(struct tegra_dma_channel *tdc,
0242                  u32 reg, u32 val)
0243 {
0244     writel(val, tdc->chan_addr + reg);
0245 }
0246 
0247 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
0248 {
0249     return readl(tdc->chan_addr + reg);
0250 }
0251 
0252 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
0253 {
0254     return container_of(dc, struct tegra_dma_channel, dma_chan);
0255 }
0256 
0257 static inline struct tegra_dma_desc *
0258 txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
0259 {
0260     return container_of(td, struct tegra_dma_desc, txd);
0261 }
0262 
0263 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
0264 {
0265     return &tdc->dma_chan.dev->device;
0266 }
0267 
0268 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
0269 
0270 /* Get DMA desc from free list, if not there then allocate it.  */
0271 static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
0272 {
0273     struct tegra_dma_desc *dma_desc;
0274     unsigned long flags;
0275 
0276     spin_lock_irqsave(&tdc->lock, flags);
0277 
0278     /* Do not allocate if desc are waiting for ack */
0279     list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
0280         if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
0281             list_del(&dma_desc->node);
0282             spin_unlock_irqrestore(&tdc->lock, flags);
0283             dma_desc->txd.flags = 0;
0284             return dma_desc;
0285         }
0286     }
0287 
0288     spin_unlock_irqrestore(&tdc->lock, flags);
0289 
0290     /* Allocate DMA desc */
0291     dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
0292     if (!dma_desc)
0293         return NULL;
0294 
0295     dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
0296     dma_desc->txd.tx_submit = tegra_dma_tx_submit;
0297     dma_desc->txd.flags = 0;
0298 
0299     return dma_desc;
0300 }
0301 
0302 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
0303                    struct tegra_dma_desc *dma_desc)
0304 {
0305     unsigned long flags;
0306 
0307     spin_lock_irqsave(&tdc->lock, flags);
0308     if (!list_empty(&dma_desc->tx_list))
0309         list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
0310     list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
0311     spin_unlock_irqrestore(&tdc->lock, flags);
0312 }
0313 
0314 static struct tegra_dma_sg_req *
0315 tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
0316 {
0317     struct tegra_dma_sg_req *sg_req;
0318     unsigned long flags;
0319 
0320     spin_lock_irqsave(&tdc->lock, flags);
0321     if (!list_empty(&tdc->free_sg_req)) {
0322         sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
0323                       node);
0324         list_del(&sg_req->node);
0325         spin_unlock_irqrestore(&tdc->lock, flags);
0326         return sg_req;
0327     }
0328     spin_unlock_irqrestore(&tdc->lock, flags);
0329 
0330     sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
0331 
0332     return sg_req;
0333 }
0334 
0335 static int tegra_dma_slave_config(struct dma_chan *dc,
0336                   struct dma_slave_config *sconfig)
0337 {
0338     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0339 
0340     if (!list_empty(&tdc->pending_sg_req)) {
0341         dev_err(tdc2dev(tdc), "Configuration not allowed\n");
0342         return -EBUSY;
0343     }
0344 
0345     memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
0346     tdc->config_init = true;
0347 
0348     return 0;
0349 }
0350 
0351 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
0352                    bool wait_for_burst_complete)
0353 {
0354     struct tegra_dma *tdma = tdc->tdma;
0355 
0356     spin_lock(&tdma->global_lock);
0357 
0358     if (tdc->tdma->global_pause_count == 0) {
0359         tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
0360         if (wait_for_burst_complete)
0361             udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
0362     }
0363 
0364     tdc->tdma->global_pause_count++;
0365 
0366     spin_unlock(&tdma->global_lock);
0367 }
0368 
0369 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
0370 {
0371     struct tegra_dma *tdma = tdc->tdma;
0372 
0373     spin_lock(&tdma->global_lock);
0374 
0375     if (WARN_ON(tdc->tdma->global_pause_count == 0))
0376         goto out;
0377 
0378     if (--tdc->tdma->global_pause_count == 0)
0379         tdma_write(tdma, TEGRA_APBDMA_GENERAL,
0380                TEGRA_APBDMA_GENERAL_ENABLE);
0381 
0382 out:
0383     spin_unlock(&tdma->global_lock);
0384 }
0385 
0386 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
0387                 bool wait_for_burst_complete)
0388 {
0389     struct tegra_dma *tdma = tdc->tdma;
0390 
0391     if (tdma->chip_data->support_channel_pause) {
0392         tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
0393               TEGRA_APBDMA_CHAN_CSRE_PAUSE);
0394         if (wait_for_burst_complete)
0395             udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
0396     } else {
0397         tegra_dma_global_pause(tdc, wait_for_burst_complete);
0398     }
0399 }
0400 
0401 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
0402 {
0403     struct tegra_dma *tdma = tdc->tdma;
0404 
0405     if (tdma->chip_data->support_channel_pause)
0406         tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
0407     else
0408         tegra_dma_global_resume(tdc);
0409 }
0410 
0411 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
0412 {
0413     u32 csr, status;
0414 
0415     /* Disable interrupts */
0416     csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
0417     csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
0418     tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
0419 
0420     /* Disable DMA */
0421     csr &= ~TEGRA_APBDMA_CSR_ENB;
0422     tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
0423 
0424     /* Clear interrupt status if it is there */
0425     status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
0426     if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
0427         dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
0428         tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
0429     }
0430     tdc->busy = false;
0431 }
0432 
0433 static void tegra_dma_start(struct tegra_dma_channel *tdc,
0434                 struct tegra_dma_sg_req *sg_req)
0435 {
0436     struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
0437 
0438     tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
0439     tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
0440     tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
0441     tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
0442     tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
0443     if (tdc->tdma->chip_data->support_separate_wcount_reg)
0444         tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
0445 
0446     /* Start DMA */
0447     tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
0448           ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
0449 }
0450 
0451 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
0452                      struct tegra_dma_sg_req *nsg_req)
0453 {
0454     unsigned long status;
0455 
0456     /*
0457      * The DMA controller reloads the new configuration for next transfer
0458      * after last burst of current transfer completes.
0459      * If there is no IEC status then this makes sure that last burst
0460      * has not be completed. There may be case that last burst is on
0461      * flight and so it can complete but because DMA is paused, it
0462      * will not generates interrupt as well as not reload the new
0463      * configuration.
0464      * If there is already IEC status then interrupt handler need to
0465      * load new configuration.
0466      */
0467     tegra_dma_pause(tdc, false);
0468     status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
0469 
0470     /*
0471      * If interrupt is pending then do nothing as the ISR will handle
0472      * the programing for new request.
0473      */
0474     if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
0475         dev_err(tdc2dev(tdc),
0476             "Skipping new configuration as interrupt is pending\n");
0477         tegra_dma_resume(tdc);
0478         return;
0479     }
0480 
0481     /* Safe to program new configuration */
0482     tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
0483     tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
0484     if (tdc->tdma->chip_data->support_separate_wcount_reg)
0485         tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
0486               nsg_req->ch_regs.wcount);
0487     tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
0488           nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
0489     nsg_req->configured = true;
0490     nsg_req->words_xferred = 0;
0491 
0492     tegra_dma_resume(tdc);
0493 }
0494 
0495 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
0496 {
0497     struct tegra_dma_sg_req *sg_req;
0498 
0499     sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
0500     tegra_dma_start(tdc, sg_req);
0501     sg_req->configured = true;
0502     sg_req->words_xferred = 0;
0503     tdc->busy = true;
0504 }
0505 
0506 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
0507 {
0508     struct tegra_dma_sg_req *hsgreq, *hnsgreq;
0509 
0510     hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
0511     if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
0512         hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
0513                        node);
0514         tegra_dma_configure_for_next(tdc, hnsgreq);
0515     }
0516 }
0517 
0518 static inline unsigned int
0519 get_current_xferred_count(struct tegra_dma_channel *tdc,
0520               struct tegra_dma_sg_req *sg_req,
0521               unsigned long status)
0522 {
0523     return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
0524 }
0525 
0526 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
0527 {
0528     struct tegra_dma_desc *dma_desc;
0529     struct tegra_dma_sg_req *sgreq;
0530 
0531     while (!list_empty(&tdc->pending_sg_req)) {
0532         sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
0533                      node);
0534         list_move_tail(&sgreq->node, &tdc->free_sg_req);
0535         if (sgreq->last_sg) {
0536             dma_desc = sgreq->dma_desc;
0537             dma_desc->dma_status = DMA_ERROR;
0538             list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
0539 
0540             /* Add in cb list if it is not there. */
0541             if (!dma_desc->cb_count)
0542                 list_add_tail(&dma_desc->cb_node,
0543                           &tdc->cb_desc);
0544             dma_desc->cb_count++;
0545         }
0546     }
0547     tdc->isr_handler = NULL;
0548 }
0549 
0550 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
0551                        bool to_terminate)
0552 {
0553     struct tegra_dma_sg_req *hsgreq;
0554 
0555     /*
0556      * Check that head req on list should be in flight.
0557      * If it is not in flight then abort transfer as
0558      * looping of transfer can not continue.
0559      */
0560     hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
0561     if (!hsgreq->configured) {
0562         tegra_dma_stop(tdc);
0563         pm_runtime_put(tdc->tdma->dev);
0564         dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
0565         tegra_dma_abort_all(tdc);
0566         return false;
0567     }
0568 
0569     /* Configure next request */
0570     if (!to_terminate)
0571         tdc_configure_next_head_desc(tdc);
0572 
0573     return true;
0574 }
0575 
0576 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
0577                  bool to_terminate)
0578 {
0579     struct tegra_dma_desc *dma_desc;
0580     struct tegra_dma_sg_req *sgreq;
0581 
0582     tdc->busy = false;
0583     sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
0584     dma_desc = sgreq->dma_desc;
0585     dma_desc->bytes_transferred += sgreq->req_len;
0586 
0587     list_del(&sgreq->node);
0588     if (sgreq->last_sg) {
0589         dma_desc->dma_status = DMA_COMPLETE;
0590         dma_cookie_complete(&dma_desc->txd);
0591         if (!dma_desc->cb_count)
0592             list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
0593         dma_desc->cb_count++;
0594         list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
0595     }
0596     list_add_tail(&sgreq->node, &tdc->free_sg_req);
0597 
0598     /* Do not start DMA if it is going to be terminate */
0599     if (to_terminate)
0600         return;
0601 
0602     if (list_empty(&tdc->pending_sg_req)) {
0603         pm_runtime_put(tdc->tdma->dev);
0604         return;
0605     }
0606 
0607     tdc_start_head_req(tdc);
0608 }
0609 
0610 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
0611                         bool to_terminate)
0612 {
0613     struct tegra_dma_desc *dma_desc;
0614     struct tegra_dma_sg_req *sgreq;
0615     bool st;
0616 
0617     sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
0618     dma_desc = sgreq->dma_desc;
0619     /* if we dma for long enough the transfer count will wrap */
0620     dma_desc->bytes_transferred =
0621         (dma_desc->bytes_transferred + sgreq->req_len) %
0622         dma_desc->bytes_requested;
0623 
0624     /* Callback need to be call */
0625     if (!dma_desc->cb_count)
0626         list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
0627     dma_desc->cb_count++;
0628 
0629     sgreq->words_xferred = 0;
0630 
0631     /* If not last req then put at end of pending list */
0632     if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
0633         list_move_tail(&sgreq->node, &tdc->pending_sg_req);
0634         sgreq->configured = false;
0635         st = handle_continuous_head_request(tdc, to_terminate);
0636         if (!st)
0637             dma_desc->dma_status = DMA_ERROR;
0638     }
0639 }
0640 
0641 static void tegra_dma_tasklet(struct tasklet_struct *t)
0642 {
0643     struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
0644     struct dmaengine_desc_callback cb;
0645     struct tegra_dma_desc *dma_desc;
0646     unsigned int cb_count;
0647     unsigned long flags;
0648 
0649     spin_lock_irqsave(&tdc->lock, flags);
0650     while (!list_empty(&tdc->cb_desc)) {
0651         dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
0652                         cb_node);
0653         list_del(&dma_desc->cb_node);
0654         dmaengine_desc_get_callback(&dma_desc->txd, &cb);
0655         cb_count = dma_desc->cb_count;
0656         dma_desc->cb_count = 0;
0657         trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
0658                         cb.callback);
0659         spin_unlock_irqrestore(&tdc->lock, flags);
0660         while (cb_count--)
0661             dmaengine_desc_callback_invoke(&cb, NULL);
0662         spin_lock_irqsave(&tdc->lock, flags);
0663     }
0664     spin_unlock_irqrestore(&tdc->lock, flags);
0665 }
0666 
0667 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
0668 {
0669     struct tegra_dma_channel *tdc = dev_id;
0670     u32 status;
0671 
0672     spin_lock(&tdc->lock);
0673 
0674     trace_tegra_dma_isr(&tdc->dma_chan, irq);
0675     status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
0676     if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
0677         tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
0678         tdc->isr_handler(tdc, false);
0679         tasklet_schedule(&tdc->tasklet);
0680         wake_up_all(&tdc->wq);
0681         spin_unlock(&tdc->lock);
0682         return IRQ_HANDLED;
0683     }
0684 
0685     spin_unlock(&tdc->lock);
0686     dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
0687          status);
0688 
0689     return IRQ_NONE;
0690 }
0691 
0692 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
0693 {
0694     struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
0695     struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
0696     unsigned long flags;
0697     dma_cookie_t cookie;
0698 
0699     spin_lock_irqsave(&tdc->lock, flags);
0700     dma_desc->dma_status = DMA_IN_PROGRESS;
0701     cookie = dma_cookie_assign(&dma_desc->txd);
0702     list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
0703     spin_unlock_irqrestore(&tdc->lock, flags);
0704 
0705     return cookie;
0706 }
0707 
0708 static void tegra_dma_issue_pending(struct dma_chan *dc)
0709 {
0710     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0711     unsigned long flags;
0712     int err;
0713 
0714     spin_lock_irqsave(&tdc->lock, flags);
0715     if (list_empty(&tdc->pending_sg_req)) {
0716         dev_err(tdc2dev(tdc), "No DMA request\n");
0717         goto end;
0718     }
0719     if (!tdc->busy) {
0720         err = pm_runtime_resume_and_get(tdc->tdma->dev);
0721         if (err < 0) {
0722             dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
0723             goto end;
0724         }
0725 
0726         tdc_start_head_req(tdc);
0727 
0728         /* Continuous single mode: Configure next req */
0729         if (tdc->cyclic) {
0730             /*
0731              * Wait for 1 burst time for configure DMA for
0732              * next transfer.
0733              */
0734             udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
0735             tdc_configure_next_head_desc(tdc);
0736         }
0737     }
0738 end:
0739     spin_unlock_irqrestore(&tdc->lock, flags);
0740 }
0741 
0742 static int tegra_dma_terminate_all(struct dma_chan *dc)
0743 {
0744     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0745     struct tegra_dma_desc *dma_desc;
0746     struct tegra_dma_sg_req *sgreq;
0747     unsigned long flags;
0748     u32 status, wcount;
0749     bool was_busy;
0750 
0751     spin_lock_irqsave(&tdc->lock, flags);
0752 
0753     if (!tdc->busy)
0754         goto skip_dma_stop;
0755 
0756     /* Pause DMA before checking the queue status */
0757     tegra_dma_pause(tdc, true);
0758 
0759     status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
0760     if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
0761         dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
0762         tdc->isr_handler(tdc, true);
0763         status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
0764     }
0765     if (tdc->tdma->chip_data->support_separate_wcount_reg)
0766         wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
0767     else
0768         wcount = status;
0769 
0770     was_busy = tdc->busy;
0771     tegra_dma_stop(tdc);
0772 
0773     if (!list_empty(&tdc->pending_sg_req) && was_busy) {
0774         sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
0775                      node);
0776         sgreq->dma_desc->bytes_transferred +=
0777                 get_current_xferred_count(tdc, sgreq, wcount);
0778     }
0779     tegra_dma_resume(tdc);
0780 
0781     pm_runtime_put(tdc->tdma->dev);
0782     wake_up_all(&tdc->wq);
0783 
0784 skip_dma_stop:
0785     tegra_dma_abort_all(tdc);
0786 
0787     while (!list_empty(&tdc->cb_desc)) {
0788         dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
0789                         cb_node);
0790         list_del(&dma_desc->cb_node);
0791         dma_desc->cb_count = 0;
0792     }
0793     spin_unlock_irqrestore(&tdc->lock, flags);
0794 
0795     return 0;
0796 }
0797 
0798 static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
0799 {
0800     unsigned long flags;
0801     u32 status;
0802 
0803     spin_lock_irqsave(&tdc->lock, flags);
0804     status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
0805     spin_unlock_irqrestore(&tdc->lock, flags);
0806 
0807     return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
0808 }
0809 
0810 static void tegra_dma_synchronize(struct dma_chan *dc)
0811 {
0812     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0813     int err;
0814 
0815     err = pm_runtime_resume_and_get(tdc->tdma->dev);
0816     if (err < 0) {
0817         dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
0818         return;
0819     }
0820 
0821     /*
0822      * CPU, which handles interrupt, could be busy in
0823      * uninterruptible state, in this case sibling CPU
0824      * should wait until interrupt is handled.
0825      */
0826     wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
0827 
0828     tasklet_kill(&tdc->tasklet);
0829 
0830     pm_runtime_put(tdc->tdma->dev);
0831 }
0832 
0833 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
0834                            struct tegra_dma_sg_req *sg_req)
0835 {
0836     u32 status, wcount = 0;
0837 
0838     if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
0839         return 0;
0840 
0841     if (tdc->tdma->chip_data->support_separate_wcount_reg)
0842         wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
0843 
0844     status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
0845 
0846     if (!tdc->tdma->chip_data->support_separate_wcount_reg)
0847         wcount = status;
0848 
0849     if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
0850         return sg_req->req_len;
0851 
0852     wcount = get_current_xferred_count(tdc, sg_req, wcount);
0853 
0854     if (!wcount) {
0855         /*
0856          * If wcount wasn't ever polled for this SG before, then
0857          * simply assume that transfer hasn't started yet.
0858          *
0859          * Otherwise it's the end of the transfer.
0860          *
0861          * The alternative would be to poll the status register
0862          * until EOC bit is set or wcount goes UP. That's so
0863          * because EOC bit is getting set only after the last
0864          * burst's completion and counter is less than the actual
0865          * transfer size by 4 bytes. The counter value wraps around
0866          * in a cyclic mode before EOC is set(!), so we can't easily
0867          * distinguish start of transfer from its end.
0868          */
0869         if (sg_req->words_xferred)
0870             wcount = sg_req->req_len - 4;
0871 
0872     } else if (wcount < sg_req->words_xferred) {
0873         /*
0874          * This case will never happen for a non-cyclic transfer.
0875          *
0876          * For a cyclic transfer, although it is possible for the
0877          * next transfer to have already started (resetting the word
0878          * count), this case should still not happen because we should
0879          * have detected that the EOC bit is set and hence the transfer
0880          * was completed.
0881          */
0882         WARN_ON_ONCE(1);
0883 
0884         wcount = sg_req->req_len - 4;
0885     } else {
0886         sg_req->words_xferred = wcount;
0887     }
0888 
0889     return wcount;
0890 }
0891 
0892 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
0893                        dma_cookie_t cookie,
0894                        struct dma_tx_state *txstate)
0895 {
0896     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
0897     struct tegra_dma_desc *dma_desc;
0898     struct tegra_dma_sg_req *sg_req;
0899     enum dma_status ret;
0900     unsigned long flags;
0901     unsigned int residual;
0902     unsigned int bytes = 0;
0903 
0904     ret = dma_cookie_status(dc, cookie, txstate);
0905     if (ret == DMA_COMPLETE)
0906         return ret;
0907 
0908     spin_lock_irqsave(&tdc->lock, flags);
0909 
0910     /* Check on wait_ack desc status */
0911     list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
0912         if (dma_desc->txd.cookie == cookie) {
0913             ret = dma_desc->dma_status;
0914             goto found;
0915         }
0916     }
0917 
0918     /* Check in pending list */
0919     list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
0920         dma_desc = sg_req->dma_desc;
0921         if (dma_desc->txd.cookie == cookie) {
0922             bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
0923             ret = dma_desc->dma_status;
0924             goto found;
0925         }
0926     }
0927 
0928     dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
0929     dma_desc = NULL;
0930 
0931 found:
0932     if (dma_desc && txstate) {
0933         residual = dma_desc->bytes_requested -
0934                ((dma_desc->bytes_transferred + bytes) %
0935                 dma_desc->bytes_requested);
0936         dma_set_residue(txstate, residual);
0937     }
0938 
0939     trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
0940     spin_unlock_irqrestore(&tdc->lock, flags);
0941 
0942     return ret;
0943 }
0944 
0945 static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
0946                      enum dma_slave_buswidth slave_bw)
0947 {
0948     switch (slave_bw) {
0949     case DMA_SLAVE_BUSWIDTH_1_BYTE:
0950         return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
0951     case DMA_SLAVE_BUSWIDTH_2_BYTES:
0952         return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
0953     case DMA_SLAVE_BUSWIDTH_4_BYTES:
0954         return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
0955     case DMA_SLAVE_BUSWIDTH_8_BYTES:
0956         return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
0957     default:
0958         dev_warn(tdc2dev(tdc),
0959              "slave bw is not supported, using 32bits\n");
0960         return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
0961     }
0962 }
0963 
0964 static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
0965                       u32 burst_size,
0966                       enum dma_slave_buswidth slave_bw,
0967                       u32 len)
0968 {
0969     unsigned int burst_byte, burst_ahb_width;
0970 
0971     /*
0972      * burst_size from client is in terms of the bus_width.
0973      * convert them into AHB memory width which is 4 byte.
0974      */
0975     burst_byte = burst_size * slave_bw;
0976     burst_ahb_width = burst_byte / 4;
0977 
0978     /* If burst size is 0 then calculate the burst size based on length */
0979     if (!burst_ahb_width) {
0980         if (len & 0xF)
0981             return TEGRA_APBDMA_AHBSEQ_BURST_1;
0982         else if ((len >> 4) & 0x1)
0983             return TEGRA_APBDMA_AHBSEQ_BURST_4;
0984         else
0985             return TEGRA_APBDMA_AHBSEQ_BURST_8;
0986     }
0987     if (burst_ahb_width < 4)
0988         return TEGRA_APBDMA_AHBSEQ_BURST_1;
0989     else if (burst_ahb_width < 8)
0990         return TEGRA_APBDMA_AHBSEQ_BURST_4;
0991     else
0992         return TEGRA_APBDMA_AHBSEQ_BURST_8;
0993 }
0994 
0995 static int get_transfer_param(struct tegra_dma_channel *tdc,
0996                   enum dma_transfer_direction direction,
0997                   u32 *apb_addr,
0998                   u32 *apb_seq,
0999                   u32 *csr,
1000                   unsigned int *burst_size,
1001                   enum dma_slave_buswidth *slave_bw)
1002 {
1003     switch (direction) {
1004     case DMA_MEM_TO_DEV:
1005         *apb_addr = tdc->dma_sconfig.dst_addr;
1006         *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
1007         *burst_size = tdc->dma_sconfig.dst_maxburst;
1008         *slave_bw = tdc->dma_sconfig.dst_addr_width;
1009         *csr = TEGRA_APBDMA_CSR_DIR;
1010         return 0;
1011 
1012     case DMA_DEV_TO_MEM:
1013         *apb_addr = tdc->dma_sconfig.src_addr;
1014         *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
1015         *burst_size = tdc->dma_sconfig.src_maxburst;
1016         *slave_bw = tdc->dma_sconfig.src_addr_width;
1017         *csr = 0;
1018         return 0;
1019 
1020     default:
1021         dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
1022         break;
1023     }
1024 
1025     return -EINVAL;
1026 }
1027 
1028 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
1029                   struct tegra_dma_channel_regs *ch_regs,
1030                   u32 len)
1031 {
1032     u32 len_field = (len - 4) & 0xFFFC;
1033 
1034     if (tdc->tdma->chip_data->support_separate_wcount_reg)
1035         ch_regs->wcount = len_field;
1036     else
1037         ch_regs->csr |= len_field;
1038 }
1039 
1040 static struct dma_async_tx_descriptor *
1041 tegra_dma_prep_slave_sg(struct dma_chan *dc,
1042             struct scatterlist *sgl,
1043             unsigned int sg_len,
1044             enum dma_transfer_direction direction,
1045             unsigned long flags,
1046             void *context)
1047 {
1048     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1049     struct tegra_dma_sg_req *sg_req = NULL;
1050     u32 csr, ahb_seq, apb_ptr, apb_seq;
1051     enum dma_slave_buswidth slave_bw;
1052     struct tegra_dma_desc *dma_desc;
1053     struct list_head req_list;
1054     struct scatterlist *sg;
1055     unsigned int burst_size;
1056     unsigned int i;
1057 
1058     if (!tdc->config_init) {
1059         dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1060         return NULL;
1061     }
1062     if (sg_len < 1) {
1063         dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1064         return NULL;
1065     }
1066 
1067     if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1068                    &burst_size, &slave_bw) < 0)
1069         return NULL;
1070 
1071     INIT_LIST_HEAD(&req_list);
1072 
1073     ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1074     ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1075                     TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1076     ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1077 
1078     csr |= TEGRA_APBDMA_CSR_ONCE;
1079 
1080     if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1081         csr |= TEGRA_APBDMA_CSR_FLOW;
1082         csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1083     }
1084 
1085     if (flags & DMA_PREP_INTERRUPT) {
1086         csr |= TEGRA_APBDMA_CSR_IE_EOC;
1087     } else {
1088         WARN_ON_ONCE(1);
1089         return NULL;
1090     }
1091 
1092     apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1093 
1094     dma_desc = tegra_dma_desc_get(tdc);
1095     if (!dma_desc) {
1096         dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
1097         return NULL;
1098     }
1099     INIT_LIST_HEAD(&dma_desc->tx_list);
1100     INIT_LIST_HEAD(&dma_desc->cb_node);
1101     dma_desc->cb_count = 0;
1102     dma_desc->bytes_requested = 0;
1103     dma_desc->bytes_transferred = 0;
1104     dma_desc->dma_status = DMA_IN_PROGRESS;
1105 
1106     /* Make transfer requests */
1107     for_each_sg(sgl, sg, sg_len, i) {
1108         u32 len, mem;
1109 
1110         mem = sg_dma_address(sg);
1111         len = sg_dma_len(sg);
1112 
1113         if ((len & 3) || (mem & 3) ||
1114             len > tdc->tdma->chip_data->max_dma_count) {
1115             dev_err(tdc2dev(tdc),
1116                 "DMA length/memory address is not supported\n");
1117             tegra_dma_desc_put(tdc, dma_desc);
1118             return NULL;
1119         }
1120 
1121         sg_req = tegra_dma_sg_req_get(tdc);
1122         if (!sg_req) {
1123             dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1124             tegra_dma_desc_put(tdc, dma_desc);
1125             return NULL;
1126         }
1127 
1128         ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1129         dma_desc->bytes_requested += len;
1130 
1131         sg_req->ch_regs.apb_ptr = apb_ptr;
1132         sg_req->ch_regs.ahb_ptr = mem;
1133         sg_req->ch_regs.csr = csr;
1134         tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1135         sg_req->ch_regs.apb_seq = apb_seq;
1136         sg_req->ch_regs.ahb_seq = ahb_seq;
1137         sg_req->configured = false;
1138         sg_req->last_sg = false;
1139         sg_req->dma_desc = dma_desc;
1140         sg_req->req_len = len;
1141 
1142         list_add_tail(&sg_req->node, &dma_desc->tx_list);
1143     }
1144     sg_req->last_sg = true;
1145     if (flags & DMA_CTRL_ACK)
1146         dma_desc->txd.flags = DMA_CTRL_ACK;
1147 
1148     /*
1149      * Make sure that mode should not be conflicting with currently
1150      * configured mode.
1151      */
1152     if (!tdc->isr_handler) {
1153         tdc->isr_handler = handle_once_dma_done;
1154         tdc->cyclic = false;
1155     } else {
1156         if (tdc->cyclic) {
1157             dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1158             tegra_dma_desc_put(tdc, dma_desc);
1159             return NULL;
1160         }
1161     }
1162 
1163     return &dma_desc->txd;
1164 }
1165 
1166 static struct dma_async_tx_descriptor *
1167 tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
1168               size_t buf_len,
1169               size_t period_len,
1170               enum dma_transfer_direction direction,
1171               unsigned long flags)
1172 {
1173     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1174     struct tegra_dma_sg_req *sg_req = NULL;
1175     u32 csr, ahb_seq, apb_ptr, apb_seq;
1176     enum dma_slave_buswidth slave_bw;
1177     struct tegra_dma_desc *dma_desc;
1178     dma_addr_t mem = buf_addr;
1179     unsigned int burst_size;
1180     size_t len, remain_len;
1181 
1182     if (!buf_len || !period_len) {
1183         dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1184         return NULL;
1185     }
1186 
1187     if (!tdc->config_init) {
1188         dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1189         return NULL;
1190     }
1191 
1192     /*
1193      * We allow to take more number of requests till DMA is
1194      * not started. The driver will loop over all requests.
1195      * Once DMA is started then new requests can be queued only after
1196      * terminating the DMA.
1197      */
1198     if (tdc->busy) {
1199         dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
1200         return NULL;
1201     }
1202 
1203     /*
1204      * We only support cycle transfer when buf_len is multiple of
1205      * period_len.
1206      */
1207     if (buf_len % period_len) {
1208         dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1209         return NULL;
1210     }
1211 
1212     len = period_len;
1213     if ((len & 3) || (buf_addr & 3) ||
1214         len > tdc->tdma->chip_data->max_dma_count) {
1215         dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1216         return NULL;
1217     }
1218 
1219     if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1220                    &burst_size, &slave_bw) < 0)
1221         return NULL;
1222 
1223     ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1224     ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1225                     TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1226     ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1227 
1228     if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1229         csr |= TEGRA_APBDMA_CSR_FLOW;
1230         csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1231     }
1232 
1233     if (flags & DMA_PREP_INTERRUPT) {
1234         csr |= TEGRA_APBDMA_CSR_IE_EOC;
1235     } else {
1236         WARN_ON_ONCE(1);
1237         return NULL;
1238     }
1239 
1240     apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1241 
1242     dma_desc = tegra_dma_desc_get(tdc);
1243     if (!dma_desc) {
1244         dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1245         return NULL;
1246     }
1247 
1248     INIT_LIST_HEAD(&dma_desc->tx_list);
1249     INIT_LIST_HEAD(&dma_desc->cb_node);
1250     dma_desc->cb_count = 0;
1251 
1252     dma_desc->bytes_transferred = 0;
1253     dma_desc->bytes_requested = buf_len;
1254     remain_len = buf_len;
1255 
1256     /* Split transfer equal to period size */
1257     while (remain_len) {
1258         sg_req = tegra_dma_sg_req_get(tdc);
1259         if (!sg_req) {
1260             dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1261             tegra_dma_desc_put(tdc, dma_desc);
1262             return NULL;
1263         }
1264 
1265         ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1266         sg_req->ch_regs.apb_ptr = apb_ptr;
1267         sg_req->ch_regs.ahb_ptr = mem;
1268         sg_req->ch_regs.csr = csr;
1269         tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1270         sg_req->ch_regs.apb_seq = apb_seq;
1271         sg_req->ch_regs.ahb_seq = ahb_seq;
1272         sg_req->configured = false;
1273         sg_req->last_sg = false;
1274         sg_req->dma_desc = dma_desc;
1275         sg_req->req_len = len;
1276 
1277         list_add_tail(&sg_req->node, &dma_desc->tx_list);
1278         remain_len -= len;
1279         mem += len;
1280     }
1281     sg_req->last_sg = true;
1282     if (flags & DMA_CTRL_ACK)
1283         dma_desc->txd.flags = DMA_CTRL_ACK;
1284 
1285     /*
1286      * Make sure that mode should not be conflicting with currently
1287      * configured mode.
1288      */
1289     if (!tdc->isr_handler) {
1290         tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1291         tdc->cyclic = true;
1292     } else {
1293         if (!tdc->cyclic) {
1294             dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1295             tegra_dma_desc_put(tdc, dma_desc);
1296             return NULL;
1297         }
1298     }
1299 
1300     return &dma_desc->txd;
1301 }
1302 
1303 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1304 {
1305     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1306 
1307     dma_cookie_init(&tdc->dma_chan);
1308 
1309     return 0;
1310 }
1311 
1312 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1313 {
1314     struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1315     struct tegra_dma_desc *dma_desc;
1316     struct tegra_dma_sg_req *sg_req;
1317     struct list_head dma_desc_list;
1318     struct list_head sg_req_list;
1319 
1320     INIT_LIST_HEAD(&dma_desc_list);
1321     INIT_LIST_HEAD(&sg_req_list);
1322 
1323     dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1324 
1325     tegra_dma_terminate_all(dc);
1326     tasklet_kill(&tdc->tasklet);
1327 
1328     list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1329     list_splice_init(&tdc->free_sg_req, &sg_req_list);
1330     list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1331     INIT_LIST_HEAD(&tdc->cb_desc);
1332     tdc->config_init = false;
1333     tdc->isr_handler = NULL;
1334 
1335     while (!list_empty(&dma_desc_list)) {
1336         dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
1337                         node);
1338         list_del(&dma_desc->node);
1339         kfree(dma_desc);
1340     }
1341 
1342     while (!list_empty(&sg_req_list)) {
1343         sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1344         list_del(&sg_req->node);
1345         kfree(sg_req);
1346     }
1347 
1348     tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1349 }
1350 
1351 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1352                        struct of_dma *ofdma)
1353 {
1354     struct tegra_dma *tdma = ofdma->of_dma_data;
1355     struct tegra_dma_channel *tdc;
1356     struct dma_chan *chan;
1357 
1358     if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1359         dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1360         return NULL;
1361     }
1362 
1363     chan = dma_get_any_slave_channel(&tdma->dma_dev);
1364     if (!chan)
1365         return NULL;
1366 
1367     tdc = to_tegra_dma_chan(chan);
1368     tdc->slave_id = dma_spec->args[0];
1369 
1370     return chan;
1371 }
1372 
1373 /* Tegra20 specific DMA controller information */
1374 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1375     .nr_channels        = 16,
1376     .channel_reg_size   = 0x20,
1377     .max_dma_count      = 1024UL * 64,
1378     .support_channel_pause  = false,
1379     .support_separate_wcount_reg = false,
1380 };
1381 
1382 /* Tegra30 specific DMA controller information */
1383 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1384     .nr_channels        = 32,
1385     .channel_reg_size   = 0x20,
1386     .max_dma_count      = 1024UL * 64,
1387     .support_channel_pause  = false,
1388     .support_separate_wcount_reg = false,
1389 };
1390 
1391 /* Tegra114 specific DMA controller information */
1392 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1393     .nr_channels        = 32,
1394     .channel_reg_size   = 0x20,
1395     .max_dma_count      = 1024UL * 64,
1396     .support_channel_pause  = true,
1397     .support_separate_wcount_reg = false,
1398 };
1399 
1400 /* Tegra148 specific DMA controller information */
1401 static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1402     .nr_channels        = 32,
1403     .channel_reg_size   = 0x40,
1404     .max_dma_count      = 1024UL * 64,
1405     .support_channel_pause  = true,
1406     .support_separate_wcount_reg = true,
1407 };
1408 
1409 static int tegra_dma_init_hw(struct tegra_dma *tdma)
1410 {
1411     int err;
1412 
1413     err = reset_control_assert(tdma->rst);
1414     if (err) {
1415         dev_err(tdma->dev, "failed to assert reset: %d\n", err);
1416         return err;
1417     }
1418 
1419     err = clk_enable(tdma->dma_clk);
1420     if (err) {
1421         dev_err(tdma->dev, "failed to enable clk: %d\n", err);
1422         return err;
1423     }
1424 
1425     /* reset DMA controller */
1426     udelay(2);
1427     reset_control_deassert(tdma->rst);
1428 
1429     /* enable global DMA registers */
1430     tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1431     tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1432     tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
1433 
1434     clk_disable(tdma->dma_clk);
1435 
1436     return 0;
1437 }
1438 
1439 static int tegra_dma_probe(struct platform_device *pdev)
1440 {
1441     const struct tegra_dma_chip_data *cdata;
1442     struct tegra_dma *tdma;
1443     unsigned int i;
1444     size_t size;
1445     int ret;
1446 
1447     cdata = of_device_get_match_data(&pdev->dev);
1448     size = struct_size(tdma, channels, cdata->nr_channels);
1449 
1450     tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1451     if (!tdma)
1452         return -ENOMEM;
1453 
1454     tdma->dev = &pdev->dev;
1455     tdma->chip_data = cdata;
1456     platform_set_drvdata(pdev, tdma);
1457 
1458     tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
1459     if (IS_ERR(tdma->base_addr))
1460         return PTR_ERR(tdma->base_addr);
1461 
1462     tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1463     if (IS_ERR(tdma->dma_clk)) {
1464         dev_err(&pdev->dev, "Error: Missing controller clock\n");
1465         return PTR_ERR(tdma->dma_clk);
1466     }
1467 
1468     tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1469     if (IS_ERR(tdma->rst)) {
1470         dev_err(&pdev->dev, "Error: Missing reset\n");
1471         return PTR_ERR(tdma->rst);
1472     }
1473 
1474     spin_lock_init(&tdma->global_lock);
1475 
1476     ret = clk_prepare(tdma->dma_clk);
1477     if (ret)
1478         return ret;
1479 
1480     ret = tegra_dma_init_hw(tdma);
1481     if (ret)
1482         goto err_clk_unprepare;
1483 
1484     pm_runtime_irq_safe(&pdev->dev);
1485     pm_runtime_enable(&pdev->dev);
1486 
1487     INIT_LIST_HEAD(&tdma->dma_dev.channels);
1488     for (i = 0; i < cdata->nr_channels; i++) {
1489         struct tegra_dma_channel *tdc = &tdma->channels[i];
1490         int irq;
1491 
1492         tdc->chan_addr = tdma->base_addr +
1493                  TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1494                  (i * cdata->channel_reg_size);
1495 
1496         irq = platform_get_irq(pdev, i);
1497         if (irq < 0) {
1498             ret = irq;
1499             goto err_pm_disable;
1500         }
1501 
1502         snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1503         ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
1504                        tdc->name, tdc);
1505         if (ret) {
1506             dev_err(&pdev->dev,
1507                 "request_irq failed with err %d channel %d\n",
1508                 ret, i);
1509             goto err_pm_disable;
1510         }
1511 
1512         tdc->dma_chan.device = &tdma->dma_dev;
1513         dma_cookie_init(&tdc->dma_chan);
1514         list_add_tail(&tdc->dma_chan.device_node,
1515                   &tdma->dma_dev.channels);
1516         tdc->tdma = tdma;
1517         tdc->id = i;
1518         tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1519 
1520         tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
1521         spin_lock_init(&tdc->lock);
1522         init_waitqueue_head(&tdc->wq);
1523 
1524         INIT_LIST_HEAD(&tdc->pending_sg_req);
1525         INIT_LIST_HEAD(&tdc->free_sg_req);
1526         INIT_LIST_HEAD(&tdc->free_dma_desc);
1527         INIT_LIST_HEAD(&tdc->cb_desc);
1528     }
1529 
1530     dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1531     dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1532     dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1533 
1534     tdma->global_pause_count = 0;
1535     tdma->dma_dev.dev = &pdev->dev;
1536     tdma->dma_dev.device_alloc_chan_resources =
1537                     tegra_dma_alloc_chan_resources;
1538     tdma->dma_dev.device_free_chan_resources =
1539                     tegra_dma_free_chan_resources;
1540     tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1541     tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1542     tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1543         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1544         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1545         BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1546     tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1547         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1548         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1549         BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1550     tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1551     tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1552     tdma->dma_dev.device_config = tegra_dma_slave_config;
1553     tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1554     tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
1555     tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1556     tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1557 
1558     ret = dma_async_device_register(&tdma->dma_dev);
1559     if (ret < 0) {
1560         dev_err(&pdev->dev,
1561             "Tegra20 APB DMA driver registration failed %d\n", ret);
1562         goto err_pm_disable;
1563     }
1564 
1565     ret = of_dma_controller_register(pdev->dev.of_node,
1566                      tegra_dma_of_xlate, tdma);
1567     if (ret < 0) {
1568         dev_err(&pdev->dev,
1569             "Tegra20 APB DMA OF registration failed %d\n", ret);
1570         goto err_unregister_dma_dev;
1571     }
1572 
1573     dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n",
1574          cdata->nr_channels);
1575 
1576     return 0;
1577 
1578 err_unregister_dma_dev:
1579     dma_async_device_unregister(&tdma->dma_dev);
1580 
1581 err_pm_disable:
1582     pm_runtime_disable(&pdev->dev);
1583 
1584 err_clk_unprepare:
1585     clk_unprepare(tdma->dma_clk);
1586 
1587     return ret;
1588 }
1589 
1590 static int tegra_dma_remove(struct platform_device *pdev)
1591 {
1592     struct tegra_dma *tdma = platform_get_drvdata(pdev);
1593 
1594     of_dma_controller_free(pdev->dev.of_node);
1595     dma_async_device_unregister(&tdma->dma_dev);
1596     pm_runtime_disable(&pdev->dev);
1597     clk_unprepare(tdma->dma_clk);
1598 
1599     return 0;
1600 }
1601 
1602 static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
1603 {
1604     struct tegra_dma *tdma = dev_get_drvdata(dev);
1605 
1606     clk_disable(tdma->dma_clk);
1607 
1608     return 0;
1609 }
1610 
1611 static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
1612 {
1613     struct tegra_dma *tdma = dev_get_drvdata(dev);
1614 
1615     return clk_enable(tdma->dma_clk);
1616 }
1617 
1618 static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
1619 {
1620     struct tegra_dma *tdma = dev_get_drvdata(dev);
1621     unsigned long flags;
1622     unsigned int i;
1623     bool busy;
1624 
1625     for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1626         struct tegra_dma_channel *tdc = &tdma->channels[i];
1627 
1628         tasklet_kill(&tdc->tasklet);
1629 
1630         spin_lock_irqsave(&tdc->lock, flags);
1631         busy = tdc->busy;
1632         spin_unlock_irqrestore(&tdc->lock, flags);
1633 
1634         if (busy) {
1635             dev_err(tdma->dev, "channel %u busy\n", i);
1636             return -EBUSY;
1637         }
1638     }
1639 
1640     return pm_runtime_force_suspend(dev);
1641 }
1642 
1643 static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
1644 {
1645     struct tegra_dma *tdma = dev_get_drvdata(dev);
1646     int err;
1647 
1648     err = tegra_dma_init_hw(tdma);
1649     if (err)
1650         return err;
1651 
1652     return pm_runtime_force_resume(dev);
1653 }
1654 
1655 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1656     SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1657                NULL)
1658     SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
1659 };
1660 
1661 static const struct of_device_id tegra_dma_of_match[] = {
1662     {
1663         .compatible = "nvidia,tegra148-apbdma",
1664         .data = &tegra148_dma_chip_data,
1665     }, {
1666         .compatible = "nvidia,tegra114-apbdma",
1667         .data = &tegra114_dma_chip_data,
1668     }, {
1669         .compatible = "nvidia,tegra30-apbdma",
1670         .data = &tegra30_dma_chip_data,
1671     }, {
1672         .compatible = "nvidia,tegra20-apbdma",
1673         .data = &tegra20_dma_chip_data,
1674     }, {
1675     },
1676 };
1677 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1678 
1679 static struct platform_driver tegra_dmac_driver = {
1680     .driver = {
1681         .name   = "tegra-apbdma",
1682         .pm = &tegra_dma_dev_pm_ops,
1683         .of_match_table = tegra_dma_of_match,
1684     },
1685     .probe      = tegra_dma_probe,
1686     .remove     = tegra_dma_remove,
1687 };
1688 
1689 module_platform_driver(tegra_dmac_driver);
1690 
1691 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1692 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1693 MODULE_LICENSE("GPL v2");