Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
0004  * Synopsys DesignWare eDMA core driver
0005  *
0006  * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
0007  */
0008 
0009 #include <linux/module.h>
0010 #include <linux/device.h>
0011 #include <linux/kernel.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/err.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/irq.h>
0017 #include <linux/dma/edma.h>
0018 #include <linux/dma-mapping.h>
0019 
0020 #include "dw-edma-core.h"
0021 #include "dw-edma-v0-core.h"
0022 #include "../dmaengine.h"
0023 #include "../virt-dma.h"
0024 
0025 static inline
0026 struct device *dchan2dev(struct dma_chan *dchan)
0027 {
0028     return &dchan->dev->device;
0029 }
0030 
0031 static inline
0032 struct device *chan2dev(struct dw_edma_chan *chan)
0033 {
0034     return &chan->vc.chan.dev->device;
0035 }
0036 
0037 static inline
0038 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
0039 {
0040     return container_of(vd, struct dw_edma_desc, vd);
0041 }
0042 
0043 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
0044 {
0045     struct dw_edma_burst *burst;
0046 
0047     burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
0048     if (unlikely(!burst))
0049         return NULL;
0050 
0051     INIT_LIST_HEAD(&burst->list);
0052     if (chunk->burst) {
0053         /* Create and add new element into the linked list */
0054         chunk->bursts_alloc++;
0055         list_add_tail(&burst->list, &chunk->burst->list);
0056     } else {
0057         /* List head */
0058         chunk->bursts_alloc = 0;
0059         chunk->burst = burst;
0060     }
0061 
0062     return burst;
0063 }
0064 
0065 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
0066 {
0067     struct dw_edma_chip *chip = desc->chan->dw->chip;
0068     struct dw_edma_chan *chan = desc->chan;
0069     struct dw_edma_chunk *chunk;
0070 
0071     chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
0072     if (unlikely(!chunk))
0073         return NULL;
0074 
0075     INIT_LIST_HEAD(&chunk->list);
0076     chunk->chan = chan;
0077     /* Toggling change bit (CB) in each chunk, this is a mechanism to
0078      * inform the eDMA HW block that this is a new linked list ready
0079      * to be consumed.
0080      *  - Odd chunks originate CB equal to 0
0081      *  - Even chunks originate CB equal to 1
0082      */
0083     chunk->cb = !(desc->chunks_alloc % 2);
0084     if (chan->dir == EDMA_DIR_WRITE) {
0085         chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
0086         chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
0087     } else {
0088         chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
0089         chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
0090     }
0091 
0092     if (desc->chunk) {
0093         /* Create and add new element into the linked list */
0094         if (!dw_edma_alloc_burst(chunk)) {
0095             kfree(chunk);
0096             return NULL;
0097         }
0098         desc->chunks_alloc++;
0099         list_add_tail(&chunk->list, &desc->chunk->list);
0100     } else {
0101         /* List head */
0102         chunk->burst = NULL;
0103         desc->chunks_alloc = 0;
0104         desc->chunk = chunk;
0105     }
0106 
0107     return chunk;
0108 }
0109 
0110 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
0111 {
0112     struct dw_edma_desc *desc;
0113 
0114     desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
0115     if (unlikely(!desc))
0116         return NULL;
0117 
0118     desc->chan = chan;
0119     if (!dw_edma_alloc_chunk(desc)) {
0120         kfree(desc);
0121         return NULL;
0122     }
0123 
0124     return desc;
0125 }
0126 
0127 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
0128 {
0129     struct dw_edma_burst *child, *_next;
0130 
0131     /* Remove all the list elements */
0132     list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
0133         list_del(&child->list);
0134         kfree(child);
0135         chunk->bursts_alloc--;
0136     }
0137 
0138     /* Remove the list head */
0139     kfree(child);
0140     chunk->burst = NULL;
0141 }
0142 
0143 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
0144 {
0145     struct dw_edma_chunk *child, *_next;
0146 
0147     if (!desc->chunk)
0148         return;
0149 
0150     /* Remove all the list elements */
0151     list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
0152         dw_edma_free_burst(child);
0153         list_del(&child->list);
0154         kfree(child);
0155         desc->chunks_alloc--;
0156     }
0157 
0158     /* Remove the list head */
0159     kfree(child);
0160     desc->chunk = NULL;
0161 }
0162 
0163 static void dw_edma_free_desc(struct dw_edma_desc *desc)
0164 {
0165     dw_edma_free_chunk(desc);
0166     kfree(desc);
0167 }
0168 
0169 static void vchan_free_desc(struct virt_dma_desc *vdesc)
0170 {
0171     dw_edma_free_desc(vd2dw_edma_desc(vdesc));
0172 }
0173 
0174 static void dw_edma_start_transfer(struct dw_edma_chan *chan)
0175 {
0176     struct dw_edma_chunk *child;
0177     struct dw_edma_desc *desc;
0178     struct virt_dma_desc *vd;
0179 
0180     vd = vchan_next_desc(&chan->vc);
0181     if (!vd)
0182         return;
0183 
0184     desc = vd2dw_edma_desc(vd);
0185     if (!desc)
0186         return;
0187 
0188     child = list_first_entry_or_null(&desc->chunk->list,
0189                      struct dw_edma_chunk, list);
0190     if (!child)
0191         return;
0192 
0193     dw_edma_v0_core_start(child, !desc->xfer_sz);
0194     desc->xfer_sz += child->ll_region.sz;
0195     dw_edma_free_burst(child);
0196     list_del(&child->list);
0197     kfree(child);
0198     desc->chunks_alloc--;
0199 }
0200 
0201 static int dw_edma_device_config(struct dma_chan *dchan,
0202                  struct dma_slave_config *config)
0203 {
0204     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0205 
0206     memcpy(&chan->config, config, sizeof(*config));
0207     chan->configured = true;
0208 
0209     return 0;
0210 }
0211 
0212 static int dw_edma_device_pause(struct dma_chan *dchan)
0213 {
0214     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0215     int err = 0;
0216 
0217     if (!chan->configured)
0218         err = -EPERM;
0219     else if (chan->status != EDMA_ST_BUSY)
0220         err = -EPERM;
0221     else if (chan->request != EDMA_REQ_NONE)
0222         err = -EPERM;
0223     else
0224         chan->request = EDMA_REQ_PAUSE;
0225 
0226     return err;
0227 }
0228 
0229 static int dw_edma_device_resume(struct dma_chan *dchan)
0230 {
0231     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0232     int err = 0;
0233 
0234     if (!chan->configured) {
0235         err = -EPERM;
0236     } else if (chan->status != EDMA_ST_PAUSE) {
0237         err = -EPERM;
0238     } else if (chan->request != EDMA_REQ_NONE) {
0239         err = -EPERM;
0240     } else {
0241         chan->status = EDMA_ST_BUSY;
0242         dw_edma_start_transfer(chan);
0243     }
0244 
0245     return err;
0246 }
0247 
0248 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
0249 {
0250     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0251     int err = 0;
0252 
0253     if (!chan->configured) {
0254         /* Do nothing */
0255     } else if (chan->status == EDMA_ST_PAUSE) {
0256         chan->status = EDMA_ST_IDLE;
0257         chan->configured = false;
0258     } else if (chan->status == EDMA_ST_IDLE) {
0259         chan->configured = false;
0260     } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
0261         /*
0262          * The channel is in a false BUSY state, probably didn't
0263          * receive or lost an interrupt
0264          */
0265         chan->status = EDMA_ST_IDLE;
0266         chan->configured = false;
0267     } else if (chan->request > EDMA_REQ_PAUSE) {
0268         err = -EPERM;
0269     } else {
0270         chan->request = EDMA_REQ_STOP;
0271     }
0272 
0273     return err;
0274 }
0275 
0276 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
0277 {
0278     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0279     unsigned long flags;
0280 
0281     spin_lock_irqsave(&chan->vc.lock, flags);
0282     if (chan->configured && chan->request == EDMA_REQ_NONE &&
0283         chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
0284         chan->status = EDMA_ST_BUSY;
0285         dw_edma_start_transfer(chan);
0286     }
0287     spin_unlock_irqrestore(&chan->vc.lock, flags);
0288 }
0289 
0290 static enum dma_status
0291 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
0292              struct dma_tx_state *txstate)
0293 {
0294     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0295     struct dw_edma_desc *desc;
0296     struct virt_dma_desc *vd;
0297     unsigned long flags;
0298     enum dma_status ret;
0299     u32 residue = 0;
0300 
0301     ret = dma_cookie_status(dchan, cookie, txstate);
0302     if (ret == DMA_COMPLETE)
0303         return ret;
0304 
0305     if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
0306         ret = DMA_PAUSED;
0307 
0308     if (!txstate)
0309         goto ret_residue;
0310 
0311     spin_lock_irqsave(&chan->vc.lock, flags);
0312     vd = vchan_find_desc(&chan->vc, cookie);
0313     if (vd) {
0314         desc = vd2dw_edma_desc(vd);
0315         if (desc)
0316             residue = desc->alloc_sz - desc->xfer_sz;
0317     }
0318     spin_unlock_irqrestore(&chan->vc.lock, flags);
0319 
0320 ret_residue:
0321     dma_set_residue(txstate, residue);
0322 
0323     return ret;
0324 }
0325 
0326 static struct dma_async_tx_descriptor *
0327 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
0328 {
0329     struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
0330     enum dma_transfer_direction dir = xfer->direction;
0331     phys_addr_t src_addr, dst_addr;
0332     struct scatterlist *sg = NULL;
0333     struct dw_edma_chunk *chunk;
0334     struct dw_edma_burst *burst;
0335     struct dw_edma_desc *desc;
0336     u32 cnt = 0;
0337     int i;
0338 
0339     if (!chan->configured)
0340         return NULL;
0341 
0342     /*
0343      * Local Root Port/End-point              Remote End-point
0344      * +-----------------------+ PCIe bus +----------------------+
0345      * |                       |    +-+   |                      |
0346      * |    DEV_TO_MEM   Rx Ch <----+ +---+ Tx Ch  DEV_TO_MEM    |
0347      * |                       |    | |   |                      |
0348      * |    MEM_TO_DEV   Tx Ch +----+ +---> Rx Ch  MEM_TO_DEV    |
0349      * |                       |    +-+   |                      |
0350      * +-----------------------+          +----------------------+
0351      *
0352      * 1. Normal logic:
0353      * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
0354      * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
0355      * for the device read operations (DEV_TO_MEM) and the Tx channel
0356      * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
0357      *
0358      * 2. Inverted logic:
0359      * If eDMA is embedded into a Remote PCIe EP and is controlled by the
0360      * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
0361      * channel (EDMA_DIR_WRITE) will be used for the device read operations
0362      * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
0363      * operations (MEM_TO_DEV).
0364      *
0365      * It is the client driver responsibility to choose a proper channel
0366      * for the DMA transfers.
0367      */
0368     if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
0369         if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
0370             (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
0371             return NULL;
0372     } else {
0373         if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
0374             (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
0375             return NULL;
0376     }
0377 
0378     if (xfer->type == EDMA_XFER_CYCLIC) {
0379         if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
0380             return NULL;
0381     } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
0382         if (xfer->xfer.sg.len < 1)
0383             return NULL;
0384     } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
0385         if (!xfer->xfer.il->numf)
0386             return NULL;
0387         if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
0388             return NULL;
0389     } else {
0390         return NULL;
0391     }
0392 
0393     desc = dw_edma_alloc_desc(chan);
0394     if (unlikely(!desc))
0395         goto err_alloc;
0396 
0397     chunk = dw_edma_alloc_chunk(desc);
0398     if (unlikely(!chunk))
0399         goto err_alloc;
0400 
0401     if (xfer->type == EDMA_XFER_INTERLEAVED) {
0402         src_addr = xfer->xfer.il->src_start;
0403         dst_addr = xfer->xfer.il->dst_start;
0404     } else {
0405         src_addr = chan->config.src_addr;
0406         dst_addr = chan->config.dst_addr;
0407     }
0408 
0409     if (xfer->type == EDMA_XFER_CYCLIC) {
0410         cnt = xfer->xfer.cyclic.cnt;
0411     } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
0412         cnt = xfer->xfer.sg.len;
0413         sg = xfer->xfer.sg.sgl;
0414     } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
0415         if (xfer->xfer.il->numf > 0)
0416             cnt = xfer->xfer.il->numf;
0417         else
0418             cnt = xfer->xfer.il->frame_size;
0419     }
0420 
0421     for (i = 0; i < cnt; i++) {
0422         if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
0423             break;
0424 
0425         if (chunk->bursts_alloc == chan->ll_max) {
0426             chunk = dw_edma_alloc_chunk(desc);
0427             if (unlikely(!chunk))
0428                 goto err_alloc;
0429         }
0430 
0431         burst = dw_edma_alloc_burst(chunk);
0432         if (unlikely(!burst))
0433             goto err_alloc;
0434 
0435         if (xfer->type == EDMA_XFER_CYCLIC)
0436             burst->sz = xfer->xfer.cyclic.len;
0437         else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
0438             burst->sz = sg_dma_len(sg);
0439         else if (xfer->type == EDMA_XFER_INTERLEAVED)
0440             burst->sz = xfer->xfer.il->sgl[i].size;
0441 
0442         chunk->ll_region.sz += burst->sz;
0443         desc->alloc_sz += burst->sz;
0444 
0445         if (dir == DMA_DEV_TO_MEM) {
0446             burst->sar = src_addr;
0447             if (xfer->type == EDMA_XFER_CYCLIC) {
0448                 burst->dar = xfer->xfer.cyclic.paddr;
0449             } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
0450                 src_addr += sg_dma_len(sg);
0451                 burst->dar = sg_dma_address(sg);
0452                 /* Unlike the typical assumption by other
0453                  * drivers/IPs the peripheral memory isn't
0454                  * a FIFO memory, in this case, it's a
0455                  * linear memory and that why the source
0456                  * and destination addresses are increased
0457                  * by the same portion (data length)
0458                  */
0459             }
0460         } else {
0461             burst->dar = dst_addr;
0462             if (xfer->type == EDMA_XFER_CYCLIC) {
0463                 burst->sar = xfer->xfer.cyclic.paddr;
0464             } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
0465                 dst_addr += sg_dma_len(sg);
0466                 burst->sar = sg_dma_address(sg);
0467                 /* Unlike the typical assumption by other
0468                  * drivers/IPs the peripheral memory isn't
0469                  * a FIFO memory, in this case, it's a
0470                  * linear memory and that why the source
0471                  * and destination addresses are increased
0472                  * by the same portion (data length)
0473                  */
0474             }
0475         }
0476 
0477         if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
0478             sg = sg_next(sg);
0479         } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
0480                xfer->xfer.il->frame_size > 0) {
0481             struct dma_interleaved_template *il = xfer->xfer.il;
0482             struct data_chunk *dc = &il->sgl[i];
0483 
0484             if (il->src_sgl) {
0485                 src_addr += burst->sz;
0486                 src_addr += dmaengine_get_src_icg(il, dc);
0487             }
0488 
0489             if (il->dst_sgl) {
0490                 dst_addr += burst->sz;
0491                 dst_addr += dmaengine_get_dst_icg(il, dc);
0492             }
0493         }
0494     }
0495 
0496     return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
0497 
0498 err_alloc:
0499     if (desc)
0500         dw_edma_free_desc(desc);
0501 
0502     return NULL;
0503 }
0504 
0505 static struct dma_async_tx_descriptor *
0506 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
0507                  unsigned int len,
0508                  enum dma_transfer_direction direction,
0509                  unsigned long flags, void *context)
0510 {
0511     struct dw_edma_transfer xfer;
0512 
0513     xfer.dchan = dchan;
0514     xfer.direction = direction;
0515     xfer.xfer.sg.sgl = sgl;
0516     xfer.xfer.sg.len = len;
0517     xfer.flags = flags;
0518     xfer.type = EDMA_XFER_SCATTER_GATHER;
0519 
0520     return dw_edma_device_transfer(&xfer);
0521 }
0522 
0523 static struct dma_async_tx_descriptor *
0524 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
0525                    size_t len, size_t count,
0526                    enum dma_transfer_direction direction,
0527                    unsigned long flags)
0528 {
0529     struct dw_edma_transfer xfer;
0530 
0531     xfer.dchan = dchan;
0532     xfer.direction = direction;
0533     xfer.xfer.cyclic.paddr = paddr;
0534     xfer.xfer.cyclic.len = len;
0535     xfer.xfer.cyclic.cnt = count;
0536     xfer.flags = flags;
0537     xfer.type = EDMA_XFER_CYCLIC;
0538 
0539     return dw_edma_device_transfer(&xfer);
0540 }
0541 
0542 static struct dma_async_tx_descriptor *
0543 dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
0544                     struct dma_interleaved_template *ilt,
0545                     unsigned long flags)
0546 {
0547     struct dw_edma_transfer xfer;
0548 
0549     xfer.dchan = dchan;
0550     xfer.direction = ilt->dir;
0551     xfer.xfer.il = ilt;
0552     xfer.flags = flags;
0553     xfer.type = EDMA_XFER_INTERLEAVED;
0554 
0555     return dw_edma_device_transfer(&xfer);
0556 }
0557 
0558 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
0559 {
0560     struct dw_edma_desc *desc;
0561     struct virt_dma_desc *vd;
0562     unsigned long flags;
0563 
0564     dw_edma_v0_core_clear_done_int(chan);
0565 
0566     spin_lock_irqsave(&chan->vc.lock, flags);
0567     vd = vchan_next_desc(&chan->vc);
0568     if (vd) {
0569         switch (chan->request) {
0570         case EDMA_REQ_NONE:
0571             desc = vd2dw_edma_desc(vd);
0572             if (desc->chunks_alloc) {
0573                 chan->status = EDMA_ST_BUSY;
0574                 dw_edma_start_transfer(chan);
0575             } else {
0576                 list_del(&vd->node);
0577                 vchan_cookie_complete(vd);
0578                 chan->status = EDMA_ST_IDLE;
0579             }
0580             break;
0581 
0582         case EDMA_REQ_STOP:
0583             list_del(&vd->node);
0584             vchan_cookie_complete(vd);
0585             chan->request = EDMA_REQ_NONE;
0586             chan->status = EDMA_ST_IDLE;
0587             break;
0588 
0589         case EDMA_REQ_PAUSE:
0590             chan->request = EDMA_REQ_NONE;
0591             chan->status = EDMA_ST_PAUSE;
0592             break;
0593 
0594         default:
0595             break;
0596         }
0597     }
0598     spin_unlock_irqrestore(&chan->vc.lock, flags);
0599 }
0600 
0601 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
0602 {
0603     struct virt_dma_desc *vd;
0604     unsigned long flags;
0605 
0606     dw_edma_v0_core_clear_abort_int(chan);
0607 
0608     spin_lock_irqsave(&chan->vc.lock, flags);
0609     vd = vchan_next_desc(&chan->vc);
0610     if (vd) {
0611         list_del(&vd->node);
0612         vchan_cookie_complete(vd);
0613     }
0614     spin_unlock_irqrestore(&chan->vc.lock, flags);
0615     chan->request = EDMA_REQ_NONE;
0616     chan->status = EDMA_ST_IDLE;
0617 }
0618 
0619 static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
0620 {
0621     struct dw_edma_irq *dw_irq = data;
0622     struct dw_edma *dw = dw_irq->dw;
0623     unsigned long total, pos, val;
0624     unsigned long off;
0625     u32 mask;
0626 
0627     if (write) {
0628         total = dw->wr_ch_cnt;
0629         off = 0;
0630         mask = dw_irq->wr_mask;
0631     } else {
0632         total = dw->rd_ch_cnt;
0633         off = dw->wr_ch_cnt;
0634         mask = dw_irq->rd_mask;
0635     }
0636 
0637     val = dw_edma_v0_core_status_done_int(dw, write ?
0638                               EDMA_DIR_WRITE :
0639                               EDMA_DIR_READ);
0640     val &= mask;
0641     for_each_set_bit(pos, &val, total) {
0642         struct dw_edma_chan *chan = &dw->chan[pos + off];
0643 
0644         dw_edma_done_interrupt(chan);
0645     }
0646 
0647     val = dw_edma_v0_core_status_abort_int(dw, write ?
0648                                EDMA_DIR_WRITE :
0649                                EDMA_DIR_READ);
0650     val &= mask;
0651     for_each_set_bit(pos, &val, total) {
0652         struct dw_edma_chan *chan = &dw->chan[pos + off];
0653 
0654         dw_edma_abort_interrupt(chan);
0655     }
0656 
0657     return IRQ_HANDLED;
0658 }
0659 
0660 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
0661 {
0662     return dw_edma_interrupt(irq, data, true);
0663 }
0664 
0665 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
0666 {
0667     return dw_edma_interrupt(irq, data, false);
0668 }
0669 
0670 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
0671 {
0672     dw_edma_interrupt(irq, data, true);
0673     dw_edma_interrupt(irq, data, false);
0674 
0675     return IRQ_HANDLED;
0676 }
0677 
0678 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
0679 {
0680     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0681 
0682     if (chan->status != EDMA_ST_IDLE)
0683         return -EBUSY;
0684 
0685     pm_runtime_get(chan->dw->chip->dev);
0686 
0687     return 0;
0688 }
0689 
0690 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
0691 {
0692     unsigned long timeout = jiffies + msecs_to_jiffies(5000);
0693     struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
0694     int ret;
0695 
0696     while (time_before(jiffies, timeout)) {
0697         ret = dw_edma_device_terminate_all(dchan);
0698         if (!ret)
0699             break;
0700 
0701         if (time_after_eq(jiffies, timeout))
0702             return;
0703 
0704         cpu_relax();
0705     }
0706 
0707     pm_runtime_put(chan->dw->chip->dev);
0708 }
0709 
0710 static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
0711                  u32 wr_alloc, u32 rd_alloc)
0712 {
0713     struct dw_edma_chip *chip = dw->chip;
0714     struct dw_edma_region *dt_region;
0715     struct device *dev = chip->dev;
0716     struct dw_edma_chan *chan;
0717     struct dw_edma_irq *irq;
0718     struct dma_device *dma;
0719     u32 alloc, off_alloc;
0720     u32 i, j, cnt;
0721     int err = 0;
0722     u32 pos;
0723 
0724     if (write) {
0725         i = 0;
0726         cnt = dw->wr_ch_cnt;
0727         dma = &dw->wr_edma;
0728         alloc = wr_alloc;
0729         off_alloc = 0;
0730     } else {
0731         i = dw->wr_ch_cnt;
0732         cnt = dw->rd_ch_cnt;
0733         dma = &dw->rd_edma;
0734         alloc = rd_alloc;
0735         off_alloc = wr_alloc;
0736     }
0737 
0738     INIT_LIST_HEAD(&dma->channels);
0739     for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
0740         chan = &dw->chan[i];
0741 
0742         dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
0743         if (!dt_region)
0744             return -ENOMEM;
0745 
0746         chan->vc.chan.private = dt_region;
0747 
0748         chan->dw = dw;
0749         chan->id = j;
0750         chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
0751         chan->configured = false;
0752         chan->request = EDMA_REQ_NONE;
0753         chan->status = EDMA_ST_IDLE;
0754 
0755         if (write)
0756             chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
0757         else
0758             chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
0759         chan->ll_max -= 1;
0760 
0761         dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
0762              write ? "write" : "read", j, chan->ll_max);
0763 
0764         if (dw->nr_irqs == 1)
0765             pos = 0;
0766         else
0767             pos = off_alloc + (j % alloc);
0768 
0769         irq = &dw->irq[pos];
0770 
0771         if (write)
0772             irq->wr_mask |= BIT(j);
0773         else
0774             irq->rd_mask |= BIT(j);
0775 
0776         irq->dw = dw;
0777         memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
0778 
0779         dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
0780              write ? "write" : "read", j,
0781              chan->msi.address_hi, chan->msi.address_lo,
0782              chan->msi.data);
0783 
0784         chan->vc.desc_free = vchan_free_desc;
0785         vchan_init(&chan->vc, dma);
0786 
0787         if (write) {
0788             dt_region->paddr = chip->dt_region_wr[j].paddr;
0789             dt_region->vaddr = chip->dt_region_wr[j].vaddr;
0790             dt_region->sz = chip->dt_region_wr[j].sz;
0791         } else {
0792             dt_region->paddr = chip->dt_region_rd[j].paddr;
0793             dt_region->vaddr = chip->dt_region_rd[j].vaddr;
0794             dt_region->sz = chip->dt_region_rd[j].sz;
0795         }
0796 
0797         dw_edma_v0_core_device_config(chan);
0798     }
0799 
0800     /* Set DMA channel capabilities */
0801     dma_cap_zero(dma->cap_mask);
0802     dma_cap_set(DMA_SLAVE, dma->cap_mask);
0803     dma_cap_set(DMA_CYCLIC, dma->cap_mask);
0804     dma_cap_set(DMA_PRIVATE, dma->cap_mask);
0805     dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
0806     dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
0807     dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0808     dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0809     dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
0810     dma->chancnt = cnt;
0811 
0812     /* Set DMA channel callbacks */
0813     dma->dev = chip->dev;
0814     dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
0815     dma->device_free_chan_resources = dw_edma_free_chan_resources;
0816     dma->device_config = dw_edma_device_config;
0817     dma->device_pause = dw_edma_device_pause;
0818     dma->device_resume = dw_edma_device_resume;
0819     dma->device_terminate_all = dw_edma_device_terminate_all;
0820     dma->device_issue_pending = dw_edma_device_issue_pending;
0821     dma->device_tx_status = dw_edma_device_tx_status;
0822     dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
0823     dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
0824     dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
0825 
0826     dma_set_max_seg_size(dma->dev, U32_MAX);
0827 
0828     /* Register DMA device */
0829     err = dma_async_device_register(dma);
0830 
0831     return err;
0832 }
0833 
0834 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
0835 {
0836     if (*nr_irqs && *alloc < cnt) {
0837         (*alloc)++;
0838         (*nr_irqs)--;
0839     }
0840 }
0841 
0842 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
0843 {
0844     while (*mask * alloc < cnt)
0845         (*mask)++;
0846 }
0847 
0848 static int dw_edma_irq_request(struct dw_edma *dw,
0849                    u32 *wr_alloc, u32 *rd_alloc)
0850 {
0851     struct dw_edma_chip *chip = dw->chip;
0852     struct device *dev = dw->chip->dev;
0853     u32 wr_mask = 1;
0854     u32 rd_mask = 1;
0855     int i, err = 0;
0856     u32 ch_cnt;
0857     int irq;
0858 
0859     ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
0860 
0861     if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
0862         return -EINVAL;
0863 
0864     dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
0865     if (!dw->irq)
0866         return -ENOMEM;
0867 
0868     if (chip->nr_irqs == 1) {
0869         /* Common IRQ shared among all channels */
0870         irq = chip->ops->irq_vector(dev, 0);
0871         err = request_irq(irq, dw_edma_interrupt_common,
0872                   IRQF_SHARED, dw->name, &dw->irq[0]);
0873         if (err) {
0874             dw->nr_irqs = 0;
0875             return err;
0876         }
0877 
0878         if (irq_get_msi_desc(irq))
0879             get_cached_msi_msg(irq, &dw->irq[0].msi);
0880 
0881         dw->nr_irqs = 1;
0882     } else {
0883         /* Distribute IRQs equally among all channels */
0884         int tmp = chip->nr_irqs;
0885 
0886         while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
0887             dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
0888             dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
0889         }
0890 
0891         dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
0892         dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
0893 
0894         for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
0895             irq = chip->ops->irq_vector(dev, i);
0896             err = request_irq(irq,
0897                       i < *wr_alloc ?
0898                         dw_edma_interrupt_write :
0899                         dw_edma_interrupt_read,
0900                       IRQF_SHARED, dw->name,
0901                       &dw->irq[i]);
0902             if (err) {
0903                 dw->nr_irqs = i;
0904                 return err;
0905             }
0906 
0907             if (irq_get_msi_desc(irq))
0908                 get_cached_msi_msg(irq, &dw->irq[i].msi);
0909         }
0910 
0911         dw->nr_irqs = i;
0912     }
0913 
0914     return err;
0915 }
0916 
0917 int dw_edma_probe(struct dw_edma_chip *chip)
0918 {
0919     struct device *dev;
0920     struct dw_edma *dw;
0921     u32 wr_alloc = 0;
0922     u32 rd_alloc = 0;
0923     int i, err;
0924 
0925     if (!chip)
0926         return -EINVAL;
0927 
0928     dev = chip->dev;
0929     if (!dev || !chip->ops)
0930         return -EINVAL;
0931 
0932     dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
0933     if (!dw)
0934         return -ENOMEM;
0935 
0936     dw->chip = chip;
0937 
0938     raw_spin_lock_init(&dw->lock);
0939 
0940     dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
0941                   dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
0942     dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
0943 
0944     dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
0945                   dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
0946     dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
0947 
0948     if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
0949         return -EINVAL;
0950 
0951     dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
0952          dw->wr_ch_cnt, dw->rd_ch_cnt);
0953 
0954     /* Allocate channels */
0955     dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
0956                 sizeof(*dw->chan), GFP_KERNEL);
0957     if (!dw->chan)
0958         return -ENOMEM;
0959 
0960     snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
0961 
0962     /* Disable eDMA, only to establish the ideal initial conditions */
0963     dw_edma_v0_core_off(dw);
0964 
0965     /* Request IRQs */
0966     err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
0967     if (err)
0968         return err;
0969 
0970     /* Setup write channels */
0971     err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
0972     if (err)
0973         goto err_irq_free;
0974 
0975     /* Setup read channels */
0976     err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
0977     if (err)
0978         goto err_irq_free;
0979 
0980     /* Power management */
0981     pm_runtime_enable(dev);
0982 
0983     /* Turn debugfs on */
0984     dw_edma_v0_core_debugfs_on(dw);
0985 
0986     chip->dw = dw;
0987 
0988     return 0;
0989 
0990 err_irq_free:
0991     for (i = (dw->nr_irqs - 1); i >= 0; i--)
0992         free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
0993 
0994     return err;
0995 }
0996 EXPORT_SYMBOL_GPL(dw_edma_probe);
0997 
0998 int dw_edma_remove(struct dw_edma_chip *chip)
0999 {
1000     struct dw_edma_chan *chan, *_chan;
1001     struct device *dev = chip->dev;
1002     struct dw_edma *dw = chip->dw;
1003     int i;
1004 
1005     /* Disable eDMA */
1006     dw_edma_v0_core_off(dw);
1007 
1008     /* Free irqs */
1009     for (i = (dw->nr_irqs - 1); i >= 0; i--)
1010         free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
1011 
1012     /* Power management */
1013     pm_runtime_disable(dev);
1014 
1015     /* Deregister eDMA device */
1016     dma_async_device_unregister(&dw->wr_edma);
1017     list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
1018                  vc.chan.device_node) {
1019         tasklet_kill(&chan->vc.task);
1020         list_del(&chan->vc.chan.device_node);
1021     }
1022 
1023     dma_async_device_unregister(&dw->rd_edma);
1024     list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
1025                  vc.chan.device_node) {
1026         tasklet_kill(&chan->vc.task);
1027         list_del(&chan->vc.chan.device_node);
1028     }
1029 
1030     /* Turn debugfs off */
1031     dw_edma_v0_core_debugfs_off(dw);
1032 
1033     return 0;
1034 }
1035 EXPORT_SYMBOL_GPL(dw_edma_remove);
1036 
1037 MODULE_LICENSE("GPL v2");
1038 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1039 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");