Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Renesas USB DMA Controller Driver
0004  *
0005  * Copyright (C) 2015 Renesas Electronics Corporation
0006  *
0007  * based on rcar-dmac.c
0008  * Copyright (C) 2014 Renesas Electronics Inc.
0009  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
0010  */
0011 
0012 #include <linux/delay.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/dmaengine.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/list.h>
0017 #include <linux/module.h>
0018 #include <linux/of.h>
0019 #include <linux/of_dma.h>
0020 #include <linux/of_platform.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/slab.h>
0024 #include <linux/spinlock.h>
0025 
0026 #include "../dmaengine.h"
0027 #include "../virt-dma.h"
0028 
0029 /*
0030  * struct usb_dmac_sg - Descriptor for a hardware transfer
0031  * @mem_addr: memory address
0032  * @size: transfer size in bytes
0033  */
0034 struct usb_dmac_sg {
0035     dma_addr_t mem_addr;
0036     u32 size;
0037 };
0038 
0039 /*
0040  * struct usb_dmac_desc - USB DMA Transfer Descriptor
0041  * @vd: base virtual channel DMA transaction descriptor
0042  * @direction: direction of the DMA transfer
0043  * @sg_allocated_len: length of allocated sg
0044  * @sg_len: length of sg
0045  * @sg_index: index of sg
0046  * @residue: residue after the DMAC completed a transfer
0047  * @node: node for desc_got and desc_freed
0048  * @done_cookie: cookie after the DMAC completed a transfer
0049  * @sg: information for the transfer
0050  */
0051 struct usb_dmac_desc {
0052     struct virt_dma_desc vd;
0053     enum dma_transfer_direction direction;
0054     unsigned int sg_allocated_len;
0055     unsigned int sg_len;
0056     unsigned int sg_index;
0057     u32 residue;
0058     struct list_head node;
0059     dma_cookie_t done_cookie;
0060     struct usb_dmac_sg sg[];
0061 };
0062 
0063 #define to_usb_dmac_desc(vd)    container_of(vd, struct usb_dmac_desc, vd)
0064 
0065 /*
0066  * struct usb_dmac_chan - USB DMA Controller Channel
0067  * @vc: base virtual DMA channel object
0068  * @iomem: channel I/O memory base
0069  * @index: index of this channel in the controller
0070  * @irq: irq number of this channel
0071  * @desc: the current descriptor
0072  * @descs_allocated: number of descriptors allocated
0073  * @desc_got: got descriptors
0074  * @desc_freed: freed descriptors after the DMAC completed a transfer
0075  */
0076 struct usb_dmac_chan {
0077     struct virt_dma_chan vc;
0078     void __iomem *iomem;
0079     unsigned int index;
0080     int irq;
0081     struct usb_dmac_desc *desc;
0082     int descs_allocated;
0083     struct list_head desc_got;
0084     struct list_head desc_freed;
0085 };
0086 
0087 #define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
0088 
0089 /*
0090  * struct usb_dmac - USB DMA Controller
0091  * @engine: base DMA engine object
0092  * @dev: the hardware device
0093  * @iomem: remapped I/O memory base
0094  * @n_channels: number of available channels
0095  * @channels: array of DMAC channels
0096  */
0097 struct usb_dmac {
0098     struct dma_device engine;
0099     struct device *dev;
0100     void __iomem *iomem;
0101 
0102     unsigned int n_channels;
0103     struct usb_dmac_chan *channels;
0104 };
0105 
0106 #define to_usb_dmac(d)      container_of(d, struct usb_dmac, engine)
0107 
0108 /* -----------------------------------------------------------------------------
0109  * Registers
0110  */
0111 
0112 #define USB_DMAC_CHAN_OFFSET(i)     (0x20 + 0x20 * (i))
0113 
0114 #define USB_DMASWR          0x0008
0115 #define USB_DMASWR_SWR          (1 << 0)
0116 #define USB_DMAOR           0x0060
0117 #define USB_DMAOR_AE            (1 << 1)
0118 #define USB_DMAOR_DME           (1 << 0)
0119 
0120 #define USB_DMASAR          0x0000
0121 #define USB_DMADAR          0x0004
0122 #define USB_DMATCR          0x0008
0123 #define USB_DMATCR_MASK         0x00ffffff
0124 #define USB_DMACHCR         0x0014
0125 #define USB_DMACHCR_FTE         (1 << 24)
0126 #define USB_DMACHCR_NULLE       (1 << 16)
0127 #define USB_DMACHCR_NULL        (1 << 12)
0128 #define USB_DMACHCR_TS_8B       ((0 << 7) | (0 << 6))
0129 #define USB_DMACHCR_TS_16B      ((0 << 7) | (1 << 6))
0130 #define USB_DMACHCR_TS_32B      ((1 << 7) | (0 << 6))
0131 #define USB_DMACHCR_IE          (1 << 5)
0132 #define USB_DMACHCR_SP          (1 << 2)
0133 #define USB_DMACHCR_TE          (1 << 1)
0134 #define USB_DMACHCR_DE          (1 << 0)
0135 #define USB_DMATEND         0x0018
0136 
0137 /* Hardcode the xfer_shift to 5 (32bytes) */
0138 #define USB_DMAC_XFER_SHIFT 5
0139 #define USB_DMAC_XFER_SIZE  (1 << USB_DMAC_XFER_SHIFT)
0140 #define USB_DMAC_CHCR_TS    USB_DMACHCR_TS_32B
0141 #define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
0142 
0143 /* for descriptors */
0144 #define USB_DMAC_INITIAL_NR_DESC    16
0145 #define USB_DMAC_INITIAL_NR_SG      8
0146 
0147 /* -----------------------------------------------------------------------------
0148  * Device access
0149  */
0150 
0151 static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
0152 {
0153     writel(data, dmac->iomem + reg);
0154 }
0155 
0156 static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
0157 {
0158     return readl(dmac->iomem + reg);
0159 }
0160 
0161 static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
0162 {
0163     return readl(chan->iomem + reg);
0164 }
0165 
0166 static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
0167 {
0168     writel(data, chan->iomem + reg);
0169 }
0170 
0171 /* -----------------------------------------------------------------------------
0172  * Initialization and configuration
0173  */
0174 
0175 static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
0176 {
0177     u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
0178 
0179     return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
0180 }
0181 
0182 static u32 usb_dmac_calc_tend(u32 size)
0183 {
0184     /*
0185      * Please refer to the Figure "Example of Final Transaction Valid
0186      * Data Transfer Enable (EDTEN) Setting" in the data sheet.
0187      */
0188     return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
0189                         USB_DMAC_XFER_SIZE));
0190 }
0191 
0192 /* This function is already held by vc.lock */
0193 static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
0194                    unsigned int index)
0195 {
0196     struct usb_dmac_desc *desc = chan->desc;
0197     struct usb_dmac_sg *sg = desc->sg + index;
0198     dma_addr_t src_addr = 0, dst_addr = 0;
0199 
0200     WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
0201 
0202     if (desc->direction == DMA_DEV_TO_MEM)
0203         dst_addr = sg->mem_addr;
0204     else
0205         src_addr = sg->mem_addr;
0206 
0207     dev_dbg(chan->vc.chan.device->dev,
0208         "chan%u: queue sg %p: %u@%pad -> %pad\n",
0209         chan->index, sg, sg->size, &src_addr, &dst_addr);
0210 
0211     usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
0212     usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
0213     usb_dmac_chan_write(chan, USB_DMATCR,
0214                 DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
0215     usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
0216 
0217     usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
0218             USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
0219 }
0220 
0221 /* This function is already held by vc.lock */
0222 static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
0223 {
0224     struct virt_dma_desc *vd;
0225 
0226     vd = vchan_next_desc(&chan->vc);
0227     if (!vd) {
0228         chan->desc = NULL;
0229         return;
0230     }
0231 
0232     /*
0233      * Remove this request from vc->desc_issued. Otherwise, this driver
0234      * will get the previous value from vchan_next_desc() after a transfer
0235      * was completed.
0236      */
0237     list_del(&vd->node);
0238 
0239     chan->desc = to_usb_dmac_desc(vd);
0240     chan->desc->sg_index = 0;
0241     usb_dmac_chan_start_sg(chan, 0);
0242 }
0243 
0244 static int usb_dmac_init(struct usb_dmac *dmac)
0245 {
0246     u16 dmaor;
0247 
0248     /* Clear all channels and enable the DMAC globally. */
0249     usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
0250 
0251     dmaor = usb_dmac_read(dmac, USB_DMAOR);
0252     if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
0253         dev_warn(dmac->dev, "DMAOR initialization failed.\n");
0254         return -EIO;
0255     }
0256 
0257     return 0;
0258 }
0259 
0260 /* -----------------------------------------------------------------------------
0261  * Descriptors allocation and free
0262  */
0263 static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
0264                    gfp_t gfp)
0265 {
0266     struct usb_dmac_desc *desc;
0267     unsigned long flags;
0268 
0269     desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
0270     if (!desc)
0271         return -ENOMEM;
0272 
0273     desc->sg_allocated_len = sg_len;
0274     INIT_LIST_HEAD(&desc->node);
0275 
0276     spin_lock_irqsave(&chan->vc.lock, flags);
0277     list_add_tail(&desc->node, &chan->desc_freed);
0278     spin_unlock_irqrestore(&chan->vc.lock, flags);
0279 
0280     return 0;
0281 }
0282 
0283 static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
0284 {
0285     struct usb_dmac_desc *desc, *_desc;
0286     LIST_HEAD(list);
0287 
0288     list_splice_init(&chan->desc_freed, &list);
0289     list_splice_init(&chan->desc_got, &list);
0290 
0291     list_for_each_entry_safe(desc, _desc, &list, node) {
0292         list_del(&desc->node);
0293         kfree(desc);
0294     }
0295     chan->descs_allocated = 0;
0296 }
0297 
0298 static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
0299                            unsigned int sg_len, gfp_t gfp)
0300 {
0301     struct usb_dmac_desc *desc = NULL;
0302     unsigned long flags;
0303 
0304     /* Get a freed descritpor */
0305     spin_lock_irqsave(&chan->vc.lock, flags);
0306     list_for_each_entry(desc, &chan->desc_freed, node) {
0307         if (sg_len <= desc->sg_allocated_len) {
0308             list_move_tail(&desc->node, &chan->desc_got);
0309             spin_unlock_irqrestore(&chan->vc.lock, flags);
0310             return desc;
0311         }
0312     }
0313     spin_unlock_irqrestore(&chan->vc.lock, flags);
0314 
0315     /* Allocate a new descriptor */
0316     if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
0317         /* If allocated the desc, it was added to tail of the list */
0318         spin_lock_irqsave(&chan->vc.lock, flags);
0319         desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
0320                        node);
0321         list_move_tail(&desc->node, &chan->desc_got);
0322         spin_unlock_irqrestore(&chan->vc.lock, flags);
0323         return desc;
0324     }
0325 
0326     return NULL;
0327 }
0328 
0329 static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
0330                   struct usb_dmac_desc *desc)
0331 {
0332     unsigned long flags;
0333 
0334     spin_lock_irqsave(&chan->vc.lock, flags);
0335     list_move_tail(&desc->node, &chan->desc_freed);
0336     spin_unlock_irqrestore(&chan->vc.lock, flags);
0337 }
0338 
0339 /* -----------------------------------------------------------------------------
0340  * Stop and reset
0341  */
0342 
0343 static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
0344 {
0345     struct dma_chan *chan = &uchan->vc.chan;
0346     struct usb_dmac *dmac = to_usb_dmac(chan->device);
0347     int i;
0348 
0349     /* Don't issue soft reset if any one of channels is busy */
0350     for (i = 0; i < dmac->n_channels; ++i) {
0351         if (usb_dmac_chan_is_busy(uchan))
0352             return;
0353     }
0354 
0355     usb_dmac_write(dmac, USB_DMAOR, 0);
0356     usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
0357     udelay(100);
0358     usb_dmac_write(dmac, USB_DMASWR, 0);
0359     usb_dmac_write(dmac, USB_DMAOR, 1);
0360 }
0361 
0362 static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
0363 {
0364     u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
0365 
0366     chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
0367     usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
0368 
0369     usb_dmac_soft_reset(chan);
0370 }
0371 
0372 static void usb_dmac_stop(struct usb_dmac *dmac)
0373 {
0374     usb_dmac_write(dmac, USB_DMAOR, 0);
0375 }
0376 
0377 /* -----------------------------------------------------------------------------
0378  * DMA engine operations
0379  */
0380 
0381 static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
0382 {
0383     struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
0384     int ret;
0385 
0386     while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
0387         ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
0388                       GFP_KERNEL);
0389         if (ret < 0) {
0390             usb_dmac_desc_free(uchan);
0391             return ret;
0392         }
0393         uchan->descs_allocated++;
0394     }
0395 
0396     return pm_runtime_get_sync(chan->device->dev);
0397 }
0398 
0399 static void usb_dmac_free_chan_resources(struct dma_chan *chan)
0400 {
0401     struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
0402     unsigned long flags;
0403 
0404     /* Protect against ISR */
0405     spin_lock_irqsave(&uchan->vc.lock, flags);
0406     usb_dmac_chan_halt(uchan);
0407     spin_unlock_irqrestore(&uchan->vc.lock, flags);
0408 
0409     usb_dmac_desc_free(uchan);
0410     vchan_free_chan_resources(&uchan->vc);
0411 
0412     pm_runtime_put(chan->device->dev);
0413 }
0414 
0415 static struct dma_async_tx_descriptor *
0416 usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
0417                unsigned int sg_len, enum dma_transfer_direction dir,
0418                unsigned long dma_flags, void *context)
0419 {
0420     struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
0421     struct usb_dmac_desc *desc;
0422     struct scatterlist *sg;
0423     int i;
0424 
0425     if (!sg_len) {
0426         dev_warn(chan->device->dev,
0427              "%s: bad parameter: len=%d\n", __func__, sg_len);
0428         return NULL;
0429     }
0430 
0431     desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
0432     if (!desc)
0433         return NULL;
0434 
0435     desc->direction = dir;
0436     desc->sg_len = sg_len;
0437     for_each_sg(sgl, sg, sg_len, i) {
0438         desc->sg[i].mem_addr = sg_dma_address(sg);
0439         desc->sg[i].size = sg_dma_len(sg);
0440     }
0441 
0442     return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
0443 }
0444 
0445 static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
0446 {
0447     struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
0448     struct usb_dmac_desc *desc, *_desc;
0449     unsigned long flags;
0450     LIST_HEAD(head);
0451     LIST_HEAD(list);
0452 
0453     spin_lock_irqsave(&uchan->vc.lock, flags);
0454     usb_dmac_chan_halt(uchan);
0455     vchan_get_all_descriptors(&uchan->vc, &head);
0456     if (uchan->desc)
0457         uchan->desc = NULL;
0458     list_splice_init(&uchan->desc_got, &list);
0459     list_for_each_entry_safe(desc, _desc, &list, node)
0460         list_move_tail(&desc->node, &uchan->desc_freed);
0461     spin_unlock_irqrestore(&uchan->vc.lock, flags);
0462     vchan_dma_desc_free_list(&uchan->vc, &head);
0463 
0464     return 0;
0465 }
0466 
0467 static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
0468                          struct usb_dmac_desc *desc,
0469                          unsigned int sg_index)
0470 {
0471     struct usb_dmac_sg *sg = desc->sg + sg_index;
0472     u32 mem_addr = sg->mem_addr & 0xffffffff;
0473     unsigned int residue = sg->size;
0474 
0475     /*
0476      * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
0477      * has unsuited value to calculate.
0478      */
0479     if (desc->direction == DMA_DEV_TO_MEM)
0480         residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
0481     else
0482         residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
0483 
0484     return residue;
0485 }
0486 
0487 static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
0488                          dma_cookie_t cookie)
0489 {
0490     struct usb_dmac_desc *desc;
0491     u32 residue = 0;
0492 
0493     list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
0494         if (desc->done_cookie == cookie) {
0495             residue = desc->residue;
0496             break;
0497         }
0498     }
0499 
0500     return residue;
0501 }
0502 
0503 static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
0504                      dma_cookie_t cookie)
0505 {
0506     u32 residue = 0;
0507     struct virt_dma_desc *vd;
0508     struct usb_dmac_desc *desc = chan->desc;
0509     int i;
0510 
0511     if (!desc) {
0512         vd = vchan_find_desc(&chan->vc, cookie);
0513         if (!vd)
0514             return 0;
0515         desc = to_usb_dmac_desc(vd);
0516     }
0517 
0518     /* Compute the size of all usb_dmac_sg still to be transferred */
0519     for (i = desc->sg_index + 1; i < desc->sg_len; i++)
0520         residue += desc->sg[i].size;
0521 
0522     /* Add the residue for the current sg */
0523     residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
0524 
0525     return residue;
0526 }
0527 
0528 static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
0529                       dma_cookie_t cookie,
0530                       struct dma_tx_state *txstate)
0531 {
0532     struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
0533     enum dma_status status;
0534     unsigned int residue = 0;
0535     unsigned long flags;
0536 
0537     status = dma_cookie_status(chan, cookie, txstate);
0538     /* a client driver will get residue after DMA_COMPLETE */
0539     if (!txstate)
0540         return status;
0541 
0542     spin_lock_irqsave(&uchan->vc.lock, flags);
0543     if (status == DMA_COMPLETE)
0544         residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
0545     else
0546         residue = usb_dmac_chan_get_residue(uchan, cookie);
0547     spin_unlock_irqrestore(&uchan->vc.lock, flags);
0548 
0549     dma_set_residue(txstate, residue);
0550 
0551     return status;
0552 }
0553 
0554 static void usb_dmac_issue_pending(struct dma_chan *chan)
0555 {
0556     struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
0557     unsigned long flags;
0558 
0559     spin_lock_irqsave(&uchan->vc.lock, flags);
0560     if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
0561         usb_dmac_chan_start_desc(uchan);
0562     spin_unlock_irqrestore(&uchan->vc.lock, flags);
0563 }
0564 
0565 static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
0566 {
0567     struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
0568     struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
0569 
0570     usb_dmac_desc_put(chan, desc);
0571 }
0572 
0573 /* -----------------------------------------------------------------------------
0574  * IRQ handling
0575  */
0576 
0577 static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
0578 {
0579     struct usb_dmac_desc *desc = chan->desc;
0580 
0581     BUG_ON(!desc);
0582 
0583     if (++desc->sg_index < desc->sg_len) {
0584         usb_dmac_chan_start_sg(chan, desc->sg_index);
0585     } else {
0586         desc->residue = usb_dmac_get_current_residue(chan, desc,
0587                             desc->sg_index - 1);
0588         desc->done_cookie = desc->vd.tx.cookie;
0589         desc->vd.tx_result.result = DMA_TRANS_NOERROR;
0590         desc->vd.tx_result.residue = desc->residue;
0591         vchan_cookie_complete(&desc->vd);
0592 
0593         /* Restart the next transfer if this driver has a next desc */
0594         usb_dmac_chan_start_desc(chan);
0595     }
0596 }
0597 
0598 static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
0599 {
0600     struct usb_dmac_chan *chan = dev;
0601     irqreturn_t ret = IRQ_NONE;
0602     u32 mask = 0;
0603     u32 chcr;
0604     bool xfer_end = false;
0605 
0606     spin_lock(&chan->vc.lock);
0607 
0608     chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
0609     if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
0610         mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
0611         if (chcr & USB_DMACHCR_DE)
0612             xfer_end = true;
0613         ret |= IRQ_HANDLED;
0614     }
0615     if (chcr & USB_DMACHCR_NULL) {
0616         /* An interruption of TE will happen after we set FTE */
0617         mask |= USB_DMACHCR_NULL;
0618         chcr |= USB_DMACHCR_FTE;
0619         ret |= IRQ_HANDLED;
0620     }
0621     if (mask)
0622         usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
0623 
0624     if (xfer_end)
0625         usb_dmac_isr_transfer_end(chan);
0626 
0627     spin_unlock(&chan->vc.lock);
0628 
0629     return ret;
0630 }
0631 
0632 /* -----------------------------------------------------------------------------
0633  * OF xlate and channel filter
0634  */
0635 
0636 static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
0637 {
0638     struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
0639     struct of_phandle_args *dma_spec = arg;
0640 
0641     /* USB-DMAC should be used with fixed usb controller's FIFO */
0642     if (uchan->index != dma_spec->args[0])
0643         return false;
0644 
0645     return true;
0646 }
0647 
0648 static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
0649                       struct of_dma *ofdma)
0650 {
0651     struct dma_chan *chan;
0652     dma_cap_mask_t mask;
0653 
0654     if (dma_spec->args_count != 1)
0655         return NULL;
0656 
0657     /* Only slave DMA channels can be allocated via DT */
0658     dma_cap_zero(mask);
0659     dma_cap_set(DMA_SLAVE, mask);
0660 
0661     chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
0662                      ofdma->of_node);
0663     if (!chan)
0664         return NULL;
0665 
0666     return chan;
0667 }
0668 
0669 /* -----------------------------------------------------------------------------
0670  * Power management
0671  */
0672 
0673 #ifdef CONFIG_PM
0674 static int usb_dmac_runtime_suspend(struct device *dev)
0675 {
0676     struct usb_dmac *dmac = dev_get_drvdata(dev);
0677     int i;
0678 
0679     for (i = 0; i < dmac->n_channels; ++i) {
0680         if (!dmac->channels[i].iomem)
0681             break;
0682         usb_dmac_chan_halt(&dmac->channels[i]);
0683     }
0684 
0685     return 0;
0686 }
0687 
0688 static int usb_dmac_runtime_resume(struct device *dev)
0689 {
0690     struct usb_dmac *dmac = dev_get_drvdata(dev);
0691 
0692     return usb_dmac_init(dmac);
0693 }
0694 #endif /* CONFIG_PM */
0695 
0696 static const struct dev_pm_ops usb_dmac_pm = {
0697     SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
0698                       pm_runtime_force_resume)
0699     SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
0700                NULL)
0701 };
0702 
0703 /* -----------------------------------------------------------------------------
0704  * Probe and remove
0705  */
0706 
0707 static int usb_dmac_chan_probe(struct usb_dmac *dmac,
0708                    struct usb_dmac_chan *uchan,
0709                    unsigned int index)
0710 {
0711     struct platform_device *pdev = to_platform_device(dmac->dev);
0712     char pdev_irqname[5];
0713     char *irqname;
0714     int ret;
0715 
0716     uchan->index = index;
0717     uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
0718 
0719     /* Request the channel interrupt. */
0720     sprintf(pdev_irqname, "ch%u", index);
0721     uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
0722     if (uchan->irq < 0)
0723         return -ENODEV;
0724 
0725     irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
0726                  dev_name(dmac->dev), index);
0727     if (!irqname)
0728         return -ENOMEM;
0729 
0730     ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
0731                    IRQF_SHARED, irqname, uchan);
0732     if (ret) {
0733         dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
0734             uchan->irq, ret);
0735         return ret;
0736     }
0737 
0738     uchan->vc.desc_free = usb_dmac_virt_desc_free;
0739     vchan_init(&uchan->vc, &dmac->engine);
0740     INIT_LIST_HEAD(&uchan->desc_freed);
0741     INIT_LIST_HEAD(&uchan->desc_got);
0742 
0743     return 0;
0744 }
0745 
0746 static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
0747 {
0748     struct device_node *np = dev->of_node;
0749     int ret;
0750 
0751     ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
0752     if (ret < 0) {
0753         dev_err(dev, "unable to read dma-channels property\n");
0754         return ret;
0755     }
0756 
0757     if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
0758         dev_err(dev, "invalid number of channels %u\n",
0759             dmac->n_channels);
0760         return -EINVAL;
0761     }
0762 
0763     return 0;
0764 }
0765 
0766 static int usb_dmac_probe(struct platform_device *pdev)
0767 {
0768     const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
0769     struct dma_device *engine;
0770     struct usb_dmac *dmac;
0771     struct resource *mem;
0772     unsigned int i;
0773     int ret;
0774 
0775     dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
0776     if (!dmac)
0777         return -ENOMEM;
0778 
0779     dmac->dev = &pdev->dev;
0780     platform_set_drvdata(pdev, dmac);
0781 
0782     ret = usb_dmac_parse_of(&pdev->dev, dmac);
0783     if (ret < 0)
0784         return ret;
0785 
0786     dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
0787                       sizeof(*dmac->channels), GFP_KERNEL);
0788     if (!dmac->channels)
0789         return -ENOMEM;
0790 
0791     /* Request resources. */
0792     mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0793     dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
0794     if (IS_ERR(dmac->iomem))
0795         return PTR_ERR(dmac->iomem);
0796 
0797     /* Enable runtime PM and initialize the device. */
0798     pm_runtime_enable(&pdev->dev);
0799     ret = pm_runtime_get_sync(&pdev->dev);
0800     if (ret < 0) {
0801         dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
0802         goto error_pm;
0803     }
0804 
0805     ret = usb_dmac_init(dmac);
0806 
0807     if (ret) {
0808         dev_err(&pdev->dev, "failed to reset device\n");
0809         goto error;
0810     }
0811 
0812     /* Initialize the channels. */
0813     INIT_LIST_HEAD(&dmac->engine.channels);
0814 
0815     for (i = 0; i < dmac->n_channels; ++i) {
0816         ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
0817         if (ret < 0)
0818             goto error;
0819     }
0820 
0821     /* Register the DMAC as a DMA provider for DT. */
0822     ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
0823                      NULL);
0824     if (ret < 0)
0825         goto error;
0826 
0827     /*
0828      * Register the DMA engine device.
0829      *
0830      * Default transfer size of 32 bytes requires 32-byte alignment.
0831      */
0832     engine = &dmac->engine;
0833     dma_cap_set(DMA_SLAVE, engine->cap_mask);
0834 
0835     engine->dev = &pdev->dev;
0836 
0837     engine->src_addr_widths = widths;
0838     engine->dst_addr_widths = widths;
0839     engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
0840     engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0841 
0842     engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
0843     engine->device_free_chan_resources = usb_dmac_free_chan_resources;
0844     engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
0845     engine->device_terminate_all = usb_dmac_chan_terminate_all;
0846     engine->device_tx_status = usb_dmac_tx_status;
0847     engine->device_issue_pending = usb_dmac_issue_pending;
0848 
0849     ret = dma_async_device_register(engine);
0850     if (ret < 0)
0851         goto error;
0852 
0853     pm_runtime_put(&pdev->dev);
0854     return 0;
0855 
0856 error:
0857     of_dma_controller_free(pdev->dev.of_node);
0858 error_pm:
0859     pm_runtime_put(&pdev->dev);
0860     pm_runtime_disable(&pdev->dev);
0861     return ret;
0862 }
0863 
0864 static void usb_dmac_chan_remove(struct usb_dmac *dmac,
0865                  struct usb_dmac_chan *uchan)
0866 {
0867     usb_dmac_chan_halt(uchan);
0868     devm_free_irq(dmac->dev, uchan->irq, uchan);
0869 }
0870 
0871 static int usb_dmac_remove(struct platform_device *pdev)
0872 {
0873     struct usb_dmac *dmac = platform_get_drvdata(pdev);
0874     int i;
0875 
0876     for (i = 0; i < dmac->n_channels; ++i)
0877         usb_dmac_chan_remove(dmac, &dmac->channels[i]);
0878     of_dma_controller_free(pdev->dev.of_node);
0879     dma_async_device_unregister(&dmac->engine);
0880 
0881     pm_runtime_disable(&pdev->dev);
0882 
0883     return 0;
0884 }
0885 
0886 static void usb_dmac_shutdown(struct platform_device *pdev)
0887 {
0888     struct usb_dmac *dmac = platform_get_drvdata(pdev);
0889 
0890     usb_dmac_stop(dmac);
0891 }
0892 
0893 static const struct of_device_id usb_dmac_of_ids[] = {
0894     { .compatible = "renesas,usb-dmac", },
0895     { /* Sentinel */ }
0896 };
0897 MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
0898 
0899 static struct platform_driver usb_dmac_driver = {
0900     .driver     = {
0901         .pm = &usb_dmac_pm,
0902         .name   = "usb-dmac",
0903         .of_match_table = usb_dmac_of_ids,
0904     },
0905     .probe      = usb_dmac_probe,
0906     .remove     = usb_dmac_remove,
0907     .shutdown   = usb_dmac_shutdown,
0908 };
0909 
0910 module_platform_driver(usb_dmac_driver);
0911 
0912 MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
0913 MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
0914 MODULE_LICENSE("GPL v2");