Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Core driver for the Synopsys DesignWare DMA Controller
0004  *
0005  * Copyright (C) 2007-2008 Atmel Corporation
0006  * Copyright (C) 2010-2011 ST Microelectronics
0007  * Copyright (C) 2013 Intel Corporation
0008  */
0009 
0010 #include <linux/bitops.h>
0011 #include <linux/delay.h>
0012 #include <linux/dmaengine.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/dmapool.h>
0015 #include <linux/err.h>
0016 #include <linux/init.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/io.h>
0019 #include <linux/mm.h>
0020 #include <linux/module.h>
0021 #include <linux/slab.h>
0022 #include <linux/pm_runtime.h>
0023 
0024 #include "../dmaengine.h"
0025 #include "internal.h"
0026 
0027 /*
0028  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
0029  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
0030  * of which use ARM any more).  See the "Databook" from Synopsys for
0031  * information beyond what licensees probably provide.
0032  */
0033 
0034 /* The set of bus widths supported by the DMA controller */
0035 #define DW_DMA_BUSWIDTHS              \
0036     BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)   | \
0037     BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)      | \
0038     BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)     | \
0039     BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
0040 
0041 /*----------------------------------------------------------------------*/
0042 
0043 static struct device *chan2dev(struct dma_chan *chan)
0044 {
0045     return &chan->dev->device;
0046 }
0047 
0048 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
0049 {
0050     return to_dw_desc(dwc->active_list.next);
0051 }
0052 
0053 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
0054 {
0055     struct dw_desc      *desc = txd_to_dw_desc(tx);
0056     struct dw_dma_chan  *dwc = to_dw_dma_chan(tx->chan);
0057     dma_cookie_t        cookie;
0058     unsigned long       flags;
0059 
0060     spin_lock_irqsave(&dwc->lock, flags);
0061     cookie = dma_cookie_assign(tx);
0062 
0063     /*
0064      * REVISIT: We should attempt to chain as many descriptors as
0065      * possible, perhaps even appending to those already submitted
0066      * for DMA. But this is hard to do in a race-free manner.
0067      */
0068 
0069     list_add_tail(&desc->desc_node, &dwc->queue);
0070     spin_unlock_irqrestore(&dwc->lock, flags);
0071     dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
0072          __func__, desc->txd.cookie);
0073 
0074     return cookie;
0075 }
0076 
0077 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
0078 {
0079     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
0080     struct dw_desc *desc;
0081     dma_addr_t phys;
0082 
0083     desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
0084     if (!desc)
0085         return NULL;
0086 
0087     dwc->descs_allocated++;
0088     INIT_LIST_HEAD(&desc->tx_list);
0089     dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
0090     desc->txd.tx_submit = dwc_tx_submit;
0091     desc->txd.flags = DMA_CTRL_ACK;
0092     desc->txd.phys = phys;
0093     return desc;
0094 }
0095 
0096 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
0097 {
0098     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
0099     struct dw_desc *child, *_next;
0100 
0101     if (unlikely(!desc))
0102         return;
0103 
0104     list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
0105         list_del(&child->desc_node);
0106         dma_pool_free(dw->desc_pool, child, child->txd.phys);
0107         dwc->descs_allocated--;
0108     }
0109 
0110     dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
0111     dwc->descs_allocated--;
0112 }
0113 
0114 static void dwc_initialize(struct dw_dma_chan *dwc)
0115 {
0116     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
0117 
0118     dw->initialize_chan(dwc);
0119 
0120     /* Enable interrupts */
0121     channel_set_bit(dw, MASK.XFER, dwc->mask);
0122     channel_set_bit(dw, MASK.ERROR, dwc->mask);
0123 }
0124 
0125 /*----------------------------------------------------------------------*/
0126 
0127 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
0128 {
0129     dev_err(chan2dev(&dwc->chan),
0130         "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
0131         channel_readl(dwc, SAR),
0132         channel_readl(dwc, DAR),
0133         channel_readl(dwc, LLP),
0134         channel_readl(dwc, CTL_HI),
0135         channel_readl(dwc, CTL_LO));
0136 }
0137 
0138 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
0139 {
0140     channel_clear_bit(dw, CH_EN, dwc->mask);
0141     while (dma_readl(dw, CH_EN) & dwc->mask)
0142         cpu_relax();
0143 }
0144 
0145 /*----------------------------------------------------------------------*/
0146 
0147 /* Perform single block transfer */
0148 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
0149                        struct dw_desc *desc)
0150 {
0151     struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
0152     u32     ctllo;
0153 
0154     /*
0155      * Software emulation of LLP mode relies on interrupts to continue
0156      * multi block transfer.
0157      */
0158     ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
0159 
0160     channel_writel(dwc, SAR, lli_read(desc, sar));
0161     channel_writel(dwc, DAR, lli_read(desc, dar));
0162     channel_writel(dwc, CTL_LO, ctllo);
0163     channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
0164     channel_set_bit(dw, CH_EN, dwc->mask);
0165 
0166     /* Move pointer to next descriptor */
0167     dwc->tx_node_active = dwc->tx_node_active->next;
0168 }
0169 
0170 /* Called with dwc->lock held and bh disabled */
0171 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
0172 {
0173     struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
0174     u8      lms = DWC_LLP_LMS(dwc->dws.m_master);
0175     unsigned long   was_soft_llp;
0176 
0177     /* ASSERT:  channel is idle */
0178     if (dma_readl(dw, CH_EN) & dwc->mask) {
0179         dev_err(chan2dev(&dwc->chan),
0180             "%s: BUG: Attempted to start non-idle channel\n",
0181             __func__);
0182         dwc_dump_chan_regs(dwc);
0183 
0184         /* The tasklet will hopefully advance the queue... */
0185         return;
0186     }
0187 
0188     if (dwc->nollp) {
0189         was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
0190                         &dwc->flags);
0191         if (was_soft_llp) {
0192             dev_err(chan2dev(&dwc->chan),
0193                 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
0194             return;
0195         }
0196 
0197         dwc_initialize(dwc);
0198 
0199         first->residue = first->total_len;
0200         dwc->tx_node_active = &first->tx_list;
0201 
0202         /* Submit first block */
0203         dwc_do_single_block(dwc, first);
0204 
0205         return;
0206     }
0207 
0208     dwc_initialize(dwc);
0209 
0210     channel_writel(dwc, LLP, first->txd.phys | lms);
0211     channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
0212     channel_writel(dwc, CTL_HI, 0);
0213     channel_set_bit(dw, CH_EN, dwc->mask);
0214 }
0215 
0216 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
0217 {
0218     struct dw_desc *desc;
0219 
0220     if (list_empty(&dwc->queue))
0221         return;
0222 
0223     list_move(dwc->queue.next, &dwc->active_list);
0224     desc = dwc_first_active(dwc);
0225     dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
0226     dwc_dostart(dwc, desc);
0227 }
0228 
0229 /*----------------------------------------------------------------------*/
0230 
0231 static void
0232 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
0233         bool callback_required)
0234 {
0235     struct dma_async_tx_descriptor  *txd = &desc->txd;
0236     struct dw_desc          *child;
0237     unsigned long           flags;
0238     struct dmaengine_desc_callback  cb;
0239 
0240     dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
0241 
0242     spin_lock_irqsave(&dwc->lock, flags);
0243     dma_cookie_complete(txd);
0244     if (callback_required)
0245         dmaengine_desc_get_callback(txd, &cb);
0246     else
0247         memset(&cb, 0, sizeof(cb));
0248 
0249     /* async_tx_ack */
0250     list_for_each_entry(child, &desc->tx_list, desc_node)
0251         async_tx_ack(&child->txd);
0252     async_tx_ack(&desc->txd);
0253     dwc_desc_put(dwc, desc);
0254     spin_unlock_irqrestore(&dwc->lock, flags);
0255 
0256     dmaengine_desc_callback_invoke(&cb, NULL);
0257 }
0258 
0259 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
0260 {
0261     struct dw_desc *desc, *_desc;
0262     LIST_HEAD(list);
0263     unsigned long flags;
0264 
0265     spin_lock_irqsave(&dwc->lock, flags);
0266     if (dma_readl(dw, CH_EN) & dwc->mask) {
0267         dev_err(chan2dev(&dwc->chan),
0268             "BUG: XFER bit set, but channel not idle!\n");
0269 
0270         /* Try to continue after resetting the channel... */
0271         dwc_chan_disable(dw, dwc);
0272     }
0273 
0274     /*
0275      * Submit queued descriptors ASAP, i.e. before we go through
0276      * the completed ones.
0277      */
0278     list_splice_init(&dwc->active_list, &list);
0279     dwc_dostart_first_queued(dwc);
0280 
0281     spin_unlock_irqrestore(&dwc->lock, flags);
0282 
0283     list_for_each_entry_safe(desc, _desc, &list, desc_node)
0284         dwc_descriptor_complete(dwc, desc, true);
0285 }
0286 
0287 /* Returns how many bytes were already received from source */
0288 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
0289 {
0290     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
0291     u32 ctlhi = channel_readl(dwc, CTL_HI);
0292     u32 ctllo = channel_readl(dwc, CTL_LO);
0293 
0294     return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
0295 }
0296 
0297 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
0298 {
0299     dma_addr_t llp;
0300     struct dw_desc *desc, *_desc;
0301     struct dw_desc *child;
0302     u32 status_xfer;
0303     unsigned long flags;
0304 
0305     spin_lock_irqsave(&dwc->lock, flags);
0306     llp = channel_readl(dwc, LLP);
0307     status_xfer = dma_readl(dw, RAW.XFER);
0308 
0309     if (status_xfer & dwc->mask) {
0310         /* Everything we've submitted is done */
0311         dma_writel(dw, CLEAR.XFER, dwc->mask);
0312 
0313         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
0314             struct list_head *head, *active = dwc->tx_node_active;
0315 
0316             /*
0317              * We are inside first active descriptor.
0318              * Otherwise something is really wrong.
0319              */
0320             desc = dwc_first_active(dwc);
0321 
0322             head = &desc->tx_list;
0323             if (active != head) {
0324                 /* Update residue to reflect last sent descriptor */
0325                 if (active == head->next)
0326                     desc->residue -= desc->len;
0327                 else
0328                     desc->residue -= to_dw_desc(active->prev)->len;
0329 
0330                 child = to_dw_desc(active);
0331 
0332                 /* Submit next block */
0333                 dwc_do_single_block(dwc, child);
0334 
0335                 spin_unlock_irqrestore(&dwc->lock, flags);
0336                 return;
0337             }
0338 
0339             /* We are done here */
0340             clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
0341         }
0342 
0343         spin_unlock_irqrestore(&dwc->lock, flags);
0344 
0345         dwc_complete_all(dw, dwc);
0346         return;
0347     }
0348 
0349     if (list_empty(&dwc->active_list)) {
0350         spin_unlock_irqrestore(&dwc->lock, flags);
0351         return;
0352     }
0353 
0354     if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
0355         dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
0356         spin_unlock_irqrestore(&dwc->lock, flags);
0357         return;
0358     }
0359 
0360     dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
0361 
0362     list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
0363         /* Initial residue value */
0364         desc->residue = desc->total_len;
0365 
0366         /* Check first descriptors addr */
0367         if (desc->txd.phys == DWC_LLP_LOC(llp)) {
0368             spin_unlock_irqrestore(&dwc->lock, flags);
0369             return;
0370         }
0371 
0372         /* Check first descriptors llp */
0373         if (lli_read(desc, llp) == llp) {
0374             /* This one is currently in progress */
0375             desc->residue -= dwc_get_sent(dwc);
0376             spin_unlock_irqrestore(&dwc->lock, flags);
0377             return;
0378         }
0379 
0380         desc->residue -= desc->len;
0381         list_for_each_entry(child, &desc->tx_list, desc_node) {
0382             if (lli_read(child, llp) == llp) {
0383                 /* Currently in progress */
0384                 desc->residue -= dwc_get_sent(dwc);
0385                 spin_unlock_irqrestore(&dwc->lock, flags);
0386                 return;
0387             }
0388             desc->residue -= child->len;
0389         }
0390 
0391         /*
0392          * No descriptors so far seem to be in progress, i.e.
0393          * this one must be done.
0394          */
0395         spin_unlock_irqrestore(&dwc->lock, flags);
0396         dwc_descriptor_complete(dwc, desc, true);
0397         spin_lock_irqsave(&dwc->lock, flags);
0398     }
0399 
0400     dev_err(chan2dev(&dwc->chan),
0401         "BUG: All descriptors done, but channel not idle!\n");
0402 
0403     /* Try to continue after resetting the channel... */
0404     dwc_chan_disable(dw, dwc);
0405 
0406     dwc_dostart_first_queued(dwc);
0407     spin_unlock_irqrestore(&dwc->lock, flags);
0408 }
0409 
0410 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
0411 {
0412     dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
0413          lli_read(desc, sar),
0414          lli_read(desc, dar),
0415          lli_read(desc, llp),
0416          lli_read(desc, ctlhi),
0417          lli_read(desc, ctllo));
0418 }
0419 
0420 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
0421 {
0422     struct dw_desc *bad_desc;
0423     struct dw_desc *child;
0424     unsigned long flags;
0425 
0426     dwc_scan_descriptors(dw, dwc);
0427 
0428     spin_lock_irqsave(&dwc->lock, flags);
0429 
0430     /*
0431      * The descriptor currently at the head of the active list is
0432      * borked. Since we don't have any way to report errors, we'll
0433      * just have to scream loudly and try to carry on.
0434      */
0435     bad_desc = dwc_first_active(dwc);
0436     list_del_init(&bad_desc->desc_node);
0437     list_move(dwc->queue.next, dwc->active_list.prev);
0438 
0439     /* Clear the error flag and try to restart the controller */
0440     dma_writel(dw, CLEAR.ERROR, dwc->mask);
0441     if (!list_empty(&dwc->active_list))
0442         dwc_dostart(dwc, dwc_first_active(dwc));
0443 
0444     /*
0445      * WARN may seem harsh, but since this only happens
0446      * when someone submits a bad physical address in a
0447      * descriptor, we should consider ourselves lucky that the
0448      * controller flagged an error instead of scribbling over
0449      * random memory locations.
0450      */
0451     dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
0452                        "  cookie: %d\n", bad_desc->txd.cookie);
0453     dwc_dump_lli(dwc, bad_desc);
0454     list_for_each_entry(child, &bad_desc->tx_list, desc_node)
0455         dwc_dump_lli(dwc, child);
0456 
0457     spin_unlock_irqrestore(&dwc->lock, flags);
0458 
0459     /* Pretend the descriptor completed successfully */
0460     dwc_descriptor_complete(dwc, bad_desc, true);
0461 }
0462 
0463 static void dw_dma_tasklet(struct tasklet_struct *t)
0464 {
0465     struct dw_dma *dw = from_tasklet(dw, t, tasklet);
0466     struct dw_dma_chan *dwc;
0467     u32 status_xfer;
0468     u32 status_err;
0469     unsigned int i;
0470 
0471     status_xfer = dma_readl(dw, RAW.XFER);
0472     status_err = dma_readl(dw, RAW.ERROR);
0473 
0474     dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
0475 
0476     for (i = 0; i < dw->dma.chancnt; i++) {
0477         dwc = &dw->chan[i];
0478         if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
0479             dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
0480         else if (status_err & (1 << i))
0481             dwc_handle_error(dw, dwc);
0482         else if (status_xfer & (1 << i))
0483             dwc_scan_descriptors(dw, dwc);
0484     }
0485 
0486     /* Re-enable interrupts */
0487     channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
0488     channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
0489 }
0490 
0491 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
0492 {
0493     struct dw_dma *dw = dev_id;
0494     u32 status;
0495 
0496     /* Check if we have any interrupt from the DMAC which is not in use */
0497     if (!dw->in_use)
0498         return IRQ_NONE;
0499 
0500     status = dma_readl(dw, STATUS_INT);
0501     dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
0502 
0503     /* Check if we have any interrupt from the DMAC */
0504     if (!status)
0505         return IRQ_NONE;
0506 
0507     /*
0508      * Just disable the interrupts. We'll turn them back on in the
0509      * softirq handler.
0510      */
0511     channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
0512     channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
0513     channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
0514 
0515     status = dma_readl(dw, STATUS_INT);
0516     if (status) {
0517         dev_err(dw->dma.dev,
0518             "BUG: Unexpected interrupts pending: 0x%x\n",
0519             status);
0520 
0521         /* Try to recover */
0522         channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
0523         channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
0524         channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
0525         channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
0526         channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
0527     }
0528 
0529     tasklet_schedule(&dw->tasklet);
0530 
0531     return IRQ_HANDLED;
0532 }
0533 
0534 /*----------------------------------------------------------------------*/
0535 
0536 static struct dma_async_tx_descriptor *
0537 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
0538         size_t len, unsigned long flags)
0539 {
0540     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0541     struct dw_dma       *dw = to_dw_dma(chan->device);
0542     struct dw_desc      *desc;
0543     struct dw_desc      *first;
0544     struct dw_desc      *prev;
0545     size_t          xfer_count;
0546     size_t          offset;
0547     u8          m_master = dwc->dws.m_master;
0548     unsigned int        src_width;
0549     unsigned int        dst_width;
0550     unsigned int        data_width = dw->pdata->data_width[m_master];
0551     u32         ctllo, ctlhi;
0552     u8          lms = DWC_LLP_LMS(m_master);
0553 
0554     dev_vdbg(chan2dev(chan),
0555             "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
0556             &dest, &src, len, flags);
0557 
0558     if (unlikely(!len)) {
0559         dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
0560         return NULL;
0561     }
0562 
0563     dwc->direction = DMA_MEM_TO_MEM;
0564 
0565     src_width = dst_width = __ffs(data_width | src | dest | len);
0566 
0567     ctllo = dw->prepare_ctllo(dwc)
0568             | DWC_CTLL_DST_WIDTH(dst_width)
0569             | DWC_CTLL_SRC_WIDTH(src_width)
0570             | DWC_CTLL_DST_INC
0571             | DWC_CTLL_SRC_INC
0572             | DWC_CTLL_FC_M2M;
0573     prev = first = NULL;
0574 
0575     for (offset = 0; offset < len; offset += xfer_count) {
0576         desc = dwc_desc_get(dwc);
0577         if (!desc)
0578             goto err_desc_get;
0579 
0580         ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
0581 
0582         lli_write(desc, sar, src + offset);
0583         lli_write(desc, dar, dest + offset);
0584         lli_write(desc, ctllo, ctllo);
0585         lli_write(desc, ctlhi, ctlhi);
0586         desc->len = xfer_count;
0587 
0588         if (!first) {
0589             first = desc;
0590         } else {
0591             lli_write(prev, llp, desc->txd.phys | lms);
0592             list_add_tail(&desc->desc_node, &first->tx_list);
0593         }
0594         prev = desc;
0595     }
0596 
0597     if (flags & DMA_PREP_INTERRUPT)
0598         /* Trigger interrupt after last block */
0599         lli_set(prev, ctllo, DWC_CTLL_INT_EN);
0600 
0601     prev->lli.llp = 0;
0602     lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
0603     first->txd.flags = flags;
0604     first->total_len = len;
0605 
0606     return &first->txd;
0607 
0608 err_desc_get:
0609     dwc_desc_put(dwc, first);
0610     return NULL;
0611 }
0612 
0613 static struct dma_async_tx_descriptor *
0614 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
0615         unsigned int sg_len, enum dma_transfer_direction direction,
0616         unsigned long flags, void *context)
0617 {
0618     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0619     struct dw_dma       *dw = to_dw_dma(chan->device);
0620     struct dma_slave_config *sconfig = &dwc->dma_sconfig;
0621     struct dw_desc      *prev;
0622     struct dw_desc      *first;
0623     u32         ctllo, ctlhi;
0624     u8          m_master = dwc->dws.m_master;
0625     u8          lms = DWC_LLP_LMS(m_master);
0626     dma_addr_t      reg;
0627     unsigned int        reg_width;
0628     unsigned int        mem_width;
0629     unsigned int        data_width = dw->pdata->data_width[m_master];
0630     unsigned int        i;
0631     struct scatterlist  *sg;
0632     size_t          total_len = 0;
0633 
0634     dev_vdbg(chan2dev(chan), "%s\n", __func__);
0635 
0636     if (unlikely(!is_slave_direction(direction) || !sg_len))
0637         return NULL;
0638 
0639     dwc->direction = direction;
0640 
0641     prev = first = NULL;
0642 
0643     switch (direction) {
0644     case DMA_MEM_TO_DEV:
0645         reg_width = __ffs(sconfig->dst_addr_width);
0646         reg = sconfig->dst_addr;
0647         ctllo = dw->prepare_ctllo(dwc)
0648                 | DWC_CTLL_DST_WIDTH(reg_width)
0649                 | DWC_CTLL_DST_FIX
0650                 | DWC_CTLL_SRC_INC;
0651 
0652         ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
0653             DWC_CTLL_FC(DW_DMA_FC_D_M2P);
0654 
0655         for_each_sg(sgl, sg, sg_len, i) {
0656             struct dw_desc  *desc;
0657             u32     len, mem;
0658             size_t      dlen;
0659 
0660             mem = sg_dma_address(sg);
0661             len = sg_dma_len(sg);
0662 
0663             mem_width = __ffs(data_width | mem | len);
0664 
0665 slave_sg_todev_fill_desc:
0666             desc = dwc_desc_get(dwc);
0667             if (!desc)
0668                 goto err_desc_get;
0669 
0670             ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
0671 
0672             lli_write(desc, sar, mem);
0673             lli_write(desc, dar, reg);
0674             lli_write(desc, ctlhi, ctlhi);
0675             lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
0676             desc->len = dlen;
0677 
0678             if (!first) {
0679                 first = desc;
0680             } else {
0681                 lli_write(prev, llp, desc->txd.phys | lms);
0682                 list_add_tail(&desc->desc_node, &first->tx_list);
0683             }
0684             prev = desc;
0685 
0686             mem += dlen;
0687             len -= dlen;
0688             total_len += dlen;
0689 
0690             if (len)
0691                 goto slave_sg_todev_fill_desc;
0692         }
0693         break;
0694     case DMA_DEV_TO_MEM:
0695         reg_width = __ffs(sconfig->src_addr_width);
0696         reg = sconfig->src_addr;
0697         ctllo = dw->prepare_ctllo(dwc)
0698                 | DWC_CTLL_SRC_WIDTH(reg_width)
0699                 | DWC_CTLL_DST_INC
0700                 | DWC_CTLL_SRC_FIX;
0701 
0702         ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
0703             DWC_CTLL_FC(DW_DMA_FC_D_P2M);
0704 
0705         for_each_sg(sgl, sg, sg_len, i) {
0706             struct dw_desc  *desc;
0707             u32     len, mem;
0708             size_t      dlen;
0709 
0710             mem = sg_dma_address(sg);
0711             len = sg_dma_len(sg);
0712 
0713 slave_sg_fromdev_fill_desc:
0714             desc = dwc_desc_get(dwc);
0715             if (!desc)
0716                 goto err_desc_get;
0717 
0718             ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
0719 
0720             lli_write(desc, sar, reg);
0721             lli_write(desc, dar, mem);
0722             lli_write(desc, ctlhi, ctlhi);
0723             mem_width = __ffs(data_width | mem);
0724             lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
0725             desc->len = dlen;
0726 
0727             if (!first) {
0728                 first = desc;
0729             } else {
0730                 lli_write(prev, llp, desc->txd.phys | lms);
0731                 list_add_tail(&desc->desc_node, &first->tx_list);
0732             }
0733             prev = desc;
0734 
0735             mem += dlen;
0736             len -= dlen;
0737             total_len += dlen;
0738 
0739             if (len)
0740                 goto slave_sg_fromdev_fill_desc;
0741         }
0742         break;
0743     default:
0744         return NULL;
0745     }
0746 
0747     if (flags & DMA_PREP_INTERRUPT)
0748         /* Trigger interrupt after last block */
0749         lli_set(prev, ctllo, DWC_CTLL_INT_EN);
0750 
0751     prev->lli.llp = 0;
0752     lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
0753     first->total_len = total_len;
0754 
0755     return &first->txd;
0756 
0757 err_desc_get:
0758     dev_err(chan2dev(chan),
0759         "not enough descriptors available. Direction %d\n", direction);
0760     dwc_desc_put(dwc, first);
0761     return NULL;
0762 }
0763 
0764 bool dw_dma_filter(struct dma_chan *chan, void *param)
0765 {
0766     struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
0767     struct dw_dma_slave *dws = param;
0768 
0769     if (dws->dma_dev != chan->device->dev)
0770         return false;
0771 
0772     /* permit channels in accordance with the channels mask */
0773     if (dws->channels && !(dws->channels & dwc->mask))
0774         return false;
0775 
0776     /* We have to copy data since dws can be temporary storage */
0777     memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
0778 
0779     return true;
0780 }
0781 EXPORT_SYMBOL_GPL(dw_dma_filter);
0782 
0783 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
0784 {
0785     struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
0786     struct dw_dma *dw = to_dw_dma(chan->device);
0787 
0788     memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
0789 
0790     dwc->dma_sconfig.src_maxburst =
0791         clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
0792     dwc->dma_sconfig.dst_maxburst =
0793         clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
0794 
0795     dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
0796     dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
0797 
0798     return 0;
0799 }
0800 
0801 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
0802 {
0803     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
0804     unsigned int        count = 20; /* timeout iterations */
0805 
0806     dw->suspend_chan(dwc, drain);
0807 
0808     while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
0809         udelay(2);
0810 
0811     set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
0812 }
0813 
0814 static int dwc_pause(struct dma_chan *chan)
0815 {
0816     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0817     unsigned long       flags;
0818 
0819     spin_lock_irqsave(&dwc->lock, flags);
0820     dwc_chan_pause(dwc, false);
0821     spin_unlock_irqrestore(&dwc->lock, flags);
0822 
0823     return 0;
0824 }
0825 
0826 static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
0827 {
0828     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
0829 
0830     dw->resume_chan(dwc, drain);
0831 
0832     clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
0833 }
0834 
0835 static int dwc_resume(struct dma_chan *chan)
0836 {
0837     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0838     unsigned long       flags;
0839 
0840     spin_lock_irqsave(&dwc->lock, flags);
0841 
0842     if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
0843         dwc_chan_resume(dwc, false);
0844 
0845     spin_unlock_irqrestore(&dwc->lock, flags);
0846 
0847     return 0;
0848 }
0849 
0850 static int dwc_terminate_all(struct dma_chan *chan)
0851 {
0852     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0853     struct dw_dma       *dw = to_dw_dma(chan->device);
0854     struct dw_desc      *desc, *_desc;
0855     unsigned long       flags;
0856     LIST_HEAD(list);
0857 
0858     spin_lock_irqsave(&dwc->lock, flags);
0859 
0860     clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
0861 
0862     dwc_chan_pause(dwc, true);
0863 
0864     dwc_chan_disable(dw, dwc);
0865 
0866     dwc_chan_resume(dwc, true);
0867 
0868     /* active_list entries will end up before queued entries */
0869     list_splice_init(&dwc->queue, &list);
0870     list_splice_init(&dwc->active_list, &list);
0871 
0872     spin_unlock_irqrestore(&dwc->lock, flags);
0873 
0874     /* Flush all pending and queued descriptors */
0875     list_for_each_entry_safe(desc, _desc, &list, desc_node)
0876         dwc_descriptor_complete(dwc, desc, false);
0877 
0878     return 0;
0879 }
0880 
0881 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
0882 {
0883     struct dw_desc *desc;
0884 
0885     list_for_each_entry(desc, &dwc->active_list, desc_node)
0886         if (desc->txd.cookie == c)
0887             return desc;
0888 
0889     return NULL;
0890 }
0891 
0892 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
0893 {
0894     struct dw_desc *desc;
0895     unsigned long flags;
0896     u32 residue;
0897 
0898     spin_lock_irqsave(&dwc->lock, flags);
0899 
0900     desc = dwc_find_desc(dwc, cookie);
0901     if (desc) {
0902         if (desc == dwc_first_active(dwc)) {
0903             residue = desc->residue;
0904             if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
0905                 residue -= dwc_get_sent(dwc);
0906         } else {
0907             residue = desc->total_len;
0908         }
0909     } else {
0910         residue = 0;
0911     }
0912 
0913     spin_unlock_irqrestore(&dwc->lock, flags);
0914     return residue;
0915 }
0916 
0917 static enum dma_status
0918 dwc_tx_status(struct dma_chan *chan,
0919           dma_cookie_t cookie,
0920           struct dma_tx_state *txstate)
0921 {
0922     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0923     enum dma_status     ret;
0924 
0925     ret = dma_cookie_status(chan, cookie, txstate);
0926     if (ret == DMA_COMPLETE)
0927         return ret;
0928 
0929     dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
0930 
0931     ret = dma_cookie_status(chan, cookie, txstate);
0932     if (ret == DMA_COMPLETE)
0933         return ret;
0934 
0935     dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
0936 
0937     if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
0938         return DMA_PAUSED;
0939 
0940     return ret;
0941 }
0942 
0943 static void dwc_issue_pending(struct dma_chan *chan)
0944 {
0945     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0946     unsigned long       flags;
0947 
0948     spin_lock_irqsave(&dwc->lock, flags);
0949     if (list_empty(&dwc->active_list))
0950         dwc_dostart_first_queued(dwc);
0951     spin_unlock_irqrestore(&dwc->lock, flags);
0952 }
0953 
0954 /*----------------------------------------------------------------------*/
0955 
0956 void do_dw_dma_off(struct dw_dma *dw)
0957 {
0958     dma_writel(dw, CFG, 0);
0959 
0960     channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
0961     channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
0962     channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
0963     channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
0964     channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
0965 
0966     while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
0967         cpu_relax();
0968 }
0969 
0970 void do_dw_dma_on(struct dw_dma *dw)
0971 {
0972     dma_writel(dw, CFG, DW_CFG_DMA_EN);
0973 }
0974 
0975 static int dwc_alloc_chan_resources(struct dma_chan *chan)
0976 {
0977     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
0978     struct dw_dma       *dw = to_dw_dma(chan->device);
0979 
0980     dev_vdbg(chan2dev(chan), "%s\n", __func__);
0981 
0982     /* ASSERT:  channel is idle */
0983     if (dma_readl(dw, CH_EN) & dwc->mask) {
0984         dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
0985         return -EIO;
0986     }
0987 
0988     dma_cookie_init(chan);
0989 
0990     /*
0991      * NOTE: some controllers may have additional features that we
0992      * need to initialize here, like "scatter-gather" (which
0993      * doesn't mean what you think it means), and status writeback.
0994      */
0995 
0996     /*
0997      * We need controller-specific data to set up slave transfers.
0998      */
0999     if (chan->private && !dw_dma_filter(chan, chan->private)) {
1000         dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1001         return -EINVAL;
1002     }
1003 
1004     /* Enable controller here if needed */
1005     if (!dw->in_use)
1006         do_dw_dma_on(dw);
1007     dw->in_use |= dwc->mask;
1008 
1009     return 0;
1010 }
1011 
1012 static void dwc_free_chan_resources(struct dma_chan *chan)
1013 {
1014     struct dw_dma_chan  *dwc = to_dw_dma_chan(chan);
1015     struct dw_dma       *dw = to_dw_dma(chan->device);
1016     unsigned long       flags;
1017 
1018     dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1019             dwc->descs_allocated);
1020 
1021     /* ASSERT:  channel is idle */
1022     BUG_ON(!list_empty(&dwc->active_list));
1023     BUG_ON(!list_empty(&dwc->queue));
1024     BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1025 
1026     spin_lock_irqsave(&dwc->lock, flags);
1027 
1028     /* Clear custom channel configuration */
1029     memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1030 
1031     /* Disable interrupts */
1032     channel_clear_bit(dw, MASK.XFER, dwc->mask);
1033     channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1034     channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1035 
1036     spin_unlock_irqrestore(&dwc->lock, flags);
1037 
1038     /* Disable controller in case it was a last user */
1039     dw->in_use &= ~dwc->mask;
1040     if (!dw->in_use)
1041         do_dw_dma_off(dw);
1042 
1043     dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1044 }
1045 
1046 static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
1047 {
1048     struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1049 
1050     caps->max_burst = dwc->max_burst;
1051 
1052     /*
1053      * It might be crucial for some devices to have the hardware
1054      * accelerated multi-block transfers supported, aka LLPs in DW DMAC
1055      * notation. So if LLPs are supported then max_sg_burst is set to
1056      * zero which means unlimited number of SG entries can be handled in a
1057      * single DMA transaction, otherwise it's just one SG entry.
1058      */
1059     if (dwc->nollp)
1060         caps->max_sg_burst = 1;
1061     else
1062         caps->max_sg_burst = 0;
1063 }
1064 
1065 int do_dma_probe(struct dw_dma_chip *chip)
1066 {
1067     struct dw_dma *dw = chip->dw;
1068     struct dw_dma_platform_data *pdata;
1069     bool            autocfg = false;
1070     unsigned int        dw_params;
1071     unsigned int        i;
1072     int         err;
1073 
1074     dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1075     if (!dw->pdata)
1076         return -ENOMEM;
1077 
1078     dw->regs = chip->regs;
1079 
1080     pm_runtime_get_sync(chip->dev);
1081 
1082     if (!chip->pdata) {
1083         dw_params = dma_readl(dw, DW_PARAMS);
1084         dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1085 
1086         autocfg = dw_params >> DW_PARAMS_EN & 1;
1087         if (!autocfg) {
1088             err = -EINVAL;
1089             goto err_pdata;
1090         }
1091 
1092         /* Reassign the platform data pointer */
1093         pdata = dw->pdata;
1094 
1095         /* Get hardware configuration parameters */
1096         pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1097         pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1098         for (i = 0; i < pdata->nr_masters; i++) {
1099             pdata->data_width[i] =
1100                 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1101         }
1102         pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1103 
1104         /* Fill platform data with the default values */
1105         pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1106         pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1107     } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1108         err = -EINVAL;
1109         goto err_pdata;
1110     } else {
1111         memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1112 
1113         /* Reassign the platform data pointer */
1114         pdata = dw->pdata;
1115     }
1116 
1117     dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1118                 GFP_KERNEL);
1119     if (!dw->chan) {
1120         err = -ENOMEM;
1121         goto err_pdata;
1122     }
1123 
1124     /* Calculate all channel mask before DMA setup */
1125     dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1126 
1127     /* Force dma off, just in case */
1128     dw->disable(dw);
1129 
1130     /* Device and instance ID for IRQ and DMA pool */
1131     dw->set_device_name(dw, chip->id);
1132 
1133     /* Create a pool of consistent memory blocks for hardware descriptors */
1134     dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1135                      sizeof(struct dw_desc), 4, 0);
1136     if (!dw->desc_pool) {
1137         dev_err(chip->dev, "No memory for descriptors dma pool\n");
1138         err = -ENOMEM;
1139         goto err_pdata;
1140     }
1141 
1142     tasklet_setup(&dw->tasklet, dw_dma_tasklet);
1143 
1144     err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1145               dw->name, dw);
1146     if (err)
1147         goto err_pdata;
1148 
1149     INIT_LIST_HEAD(&dw->dma.channels);
1150     for (i = 0; i < pdata->nr_channels; i++) {
1151         struct dw_dma_chan  *dwc = &dw->chan[i];
1152 
1153         dwc->chan.device = &dw->dma;
1154         dma_cookie_init(&dwc->chan);
1155         if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1156             list_add_tail(&dwc->chan.device_node,
1157                     &dw->dma.channels);
1158         else
1159             list_add(&dwc->chan.device_node, &dw->dma.channels);
1160 
1161         /* 7 is highest priority & 0 is lowest. */
1162         if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1163             dwc->priority = pdata->nr_channels - i - 1;
1164         else
1165             dwc->priority = i;
1166 
1167         dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1168         spin_lock_init(&dwc->lock);
1169         dwc->mask = 1 << i;
1170 
1171         INIT_LIST_HEAD(&dwc->active_list);
1172         INIT_LIST_HEAD(&dwc->queue);
1173 
1174         channel_clear_bit(dw, CH_EN, dwc->mask);
1175 
1176         dwc->direction = DMA_TRANS_NONE;
1177 
1178         /* Hardware configuration */
1179         if (autocfg) {
1180             unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1181             void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1182             unsigned int dwc_params = readl(addr);
1183 
1184             dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1185                        dwc_params);
1186 
1187             /*
1188              * Decode maximum block size for given channel. The
1189              * stored 4 bit value represents blocks from 0x00 for 3
1190              * up to 0x0a for 4095.
1191              */
1192             dwc->block_size =
1193                 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1194 
1195             /*
1196              * According to the DW DMA databook the true scatter-
1197              * gether LLPs aren't available if either multi-block
1198              * config is disabled (CHx_MULTI_BLK_EN == 0) or the
1199              * LLP register is hard-coded to zeros
1200              * (CHx_HC_LLP == 1).
1201              */
1202             dwc->nollp =
1203                 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 ||
1204                 (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1;
1205             dwc->max_burst =
1206                 (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7));
1207         } else {
1208             dwc->block_size = pdata->block_size;
1209             dwc->nollp = !pdata->multi_block[i];
1210             dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST;
1211         }
1212     }
1213 
1214     /* Clear all interrupts on all channels. */
1215     dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1216     dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1217     dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1218     dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1219     dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1220 
1221     /* Set capabilities */
1222     dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1223     dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1224     dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1225 
1226     dw->dma.dev = chip->dev;
1227     dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1228     dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1229 
1230     dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1231     dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1232 
1233     dw->dma.device_caps = dwc_caps;
1234     dw->dma.device_config = dwc_config;
1235     dw->dma.device_pause = dwc_pause;
1236     dw->dma.device_resume = dwc_resume;
1237     dw->dma.device_terminate_all = dwc_terminate_all;
1238 
1239     dw->dma.device_tx_status = dwc_tx_status;
1240     dw->dma.device_issue_pending = dwc_issue_pending;
1241 
1242     /* DMA capabilities */
1243     dw->dma.min_burst = DW_DMA_MIN_BURST;
1244     dw->dma.max_burst = DW_DMA_MAX_BURST;
1245     dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1246     dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1247     dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1248                  BIT(DMA_MEM_TO_MEM);
1249     dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1250 
1251     /*
1252      * For now there is no hardware with non uniform maximum block size
1253      * across all of the device channels, so we set the maximum segment
1254      * size as the block size found for the very first channel.
1255      */
1256     dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size);
1257 
1258     err = dma_async_device_register(&dw->dma);
1259     if (err)
1260         goto err_dma_register;
1261 
1262     dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1263          pdata->nr_channels);
1264 
1265     pm_runtime_put_sync_suspend(chip->dev);
1266 
1267     return 0;
1268 
1269 err_dma_register:
1270     free_irq(chip->irq, dw);
1271 err_pdata:
1272     pm_runtime_put_sync_suspend(chip->dev);
1273     return err;
1274 }
1275 
1276 int do_dma_remove(struct dw_dma_chip *chip)
1277 {
1278     struct dw_dma       *dw = chip->dw;
1279     struct dw_dma_chan  *dwc, *_dwc;
1280 
1281     pm_runtime_get_sync(chip->dev);
1282 
1283     do_dw_dma_off(dw);
1284     dma_async_device_unregister(&dw->dma);
1285 
1286     free_irq(chip->irq, dw);
1287     tasklet_kill(&dw->tasklet);
1288 
1289     list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1290             chan.device_node) {
1291         list_del(&dwc->chan.device_node);
1292         channel_clear_bit(dw, CH_EN, dwc->mask);
1293     }
1294 
1295     pm_runtime_put_sync_suspend(chip->dev);
1296     return 0;
1297 }
1298 
1299 int do_dw_dma_disable(struct dw_dma_chip *chip)
1300 {
1301     struct dw_dma *dw = chip->dw;
1302 
1303     dw->disable(dw);
1304     return 0;
1305 }
1306 EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1307 
1308 int do_dw_dma_enable(struct dw_dma_chip *chip)
1309 {
1310     struct dw_dma *dw = chip->dw;
1311 
1312     dw->enable(dw);
1313     return 0;
1314 }
1315 EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1316 
1317 MODULE_LICENSE("GPL v2");
1318 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1319 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1320 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");