Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * External DMA controller driver for UniPhier SoCs
0004  * Copyright 2019 Socionext Inc.
0005  * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
0006  */
0007 
0008 #include <linux/bitops.h>
0009 #include <linux/bitfield.h>
0010 #include <linux/iopoll.h>
0011 #include <linux/module.h>
0012 #include <linux/of.h>
0013 #include <linux/of_dma.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/slab.h>
0016 
0017 #include "dmaengine.h"
0018 #include "virt-dma.h"
0019 
0020 #define XDMAC_CH_WIDTH      0x100
0021 
0022 #define XDMAC_TFA       0x08
0023 #define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
0024 #define XDMAC_TFA_MASK      GENMASK(5, 0)
0025 #define XDMAC_SADM      0x10
0026 #define XDMAC_SADM_STW_MASK GENMASK(25, 24)
0027 #define XDMAC_SADM_SAM      BIT(4)
0028 #define XDMAC_SADM_SAM_FIXED    XDMAC_SADM_SAM
0029 #define XDMAC_SADM_SAM_INC  0
0030 #define XDMAC_DADM      0x14
0031 #define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
0032 #define XDMAC_DADM_DAM      XDMAC_SADM_SAM
0033 #define XDMAC_DADM_DAM_FIXED    XDMAC_SADM_SAM_FIXED
0034 #define XDMAC_DADM_DAM_INC  XDMAC_SADM_SAM_INC
0035 #define XDMAC_EXSAD     0x18
0036 #define XDMAC_EXDAD     0x1c
0037 #define XDMAC_SAD       0x20
0038 #define XDMAC_DAD       0x24
0039 #define XDMAC_ITS       0x28
0040 #define XDMAC_ITS_MASK      GENMASK(25, 0)
0041 #define XDMAC_TNUM      0x2c
0042 #define XDMAC_TNUM_MASK     GENMASK(15, 0)
0043 #define XDMAC_TSS       0x30
0044 #define XDMAC_TSS_REQ       BIT(0)
0045 #define XDMAC_IEN       0x34
0046 #define XDMAC_IEN_ERRIEN    BIT(1)
0047 #define XDMAC_IEN_ENDIEN    BIT(0)
0048 #define XDMAC_STAT      0x40
0049 #define XDMAC_STAT_TENF     BIT(0)
0050 #define XDMAC_IR        0x44
0051 #define XDMAC_IR_ERRF       BIT(1)
0052 #define XDMAC_IR_ENDF       BIT(0)
0053 #define XDMAC_ID        0x48
0054 #define XDMAC_ID_ERRIDF     BIT(1)
0055 #define XDMAC_ID_ENDIDF     BIT(0)
0056 
0057 #define XDMAC_MAX_CHANS     16
0058 #define XDMAC_INTERVAL_CLKS 20
0059 #define XDMAC_MAX_WORDS     XDMAC_TNUM_MASK
0060 
0061 /* cut lower bit for maintain alignment of maximum transfer size */
0062 #define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
0063 
0064 #define UNIPHIER_XDMAC_BUSWIDTHS \
0065     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
0066      BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
0067      BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
0068      BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
0069 
0070 struct uniphier_xdmac_desc_node {
0071     dma_addr_t src;
0072     dma_addr_t dst;
0073     u32 burst_size;
0074     u32 nr_burst;
0075 };
0076 
0077 struct uniphier_xdmac_desc {
0078     struct virt_dma_desc vd;
0079 
0080     unsigned int nr_node;
0081     unsigned int cur_node;
0082     enum dma_transfer_direction dir;
0083     struct uniphier_xdmac_desc_node nodes[];
0084 };
0085 
0086 struct uniphier_xdmac_chan {
0087     struct virt_dma_chan vc;
0088     struct uniphier_xdmac_device *xdev;
0089     struct uniphier_xdmac_desc *xd;
0090     void __iomem *reg_ch_base;
0091     struct dma_slave_config sconfig;
0092     int id;
0093     unsigned int req_factor;
0094 };
0095 
0096 struct uniphier_xdmac_device {
0097     struct dma_device ddev;
0098     void __iomem *reg_base;
0099     int nr_chans;
0100     struct uniphier_xdmac_chan channels[];
0101 };
0102 
0103 static struct uniphier_xdmac_chan *
0104 to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
0105 {
0106     return container_of(vc, struct uniphier_xdmac_chan, vc);
0107 }
0108 
0109 static struct uniphier_xdmac_desc *
0110 to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
0111 {
0112     return container_of(vd, struct uniphier_xdmac_desc, vd);
0113 }
0114 
0115 /* xc->vc.lock must be held by caller */
0116 static struct uniphier_xdmac_desc *
0117 uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
0118 {
0119     struct virt_dma_desc *vd;
0120 
0121     vd = vchan_next_desc(&xc->vc);
0122     if (!vd)
0123         return NULL;
0124 
0125     list_del(&vd->node);
0126 
0127     return to_uniphier_xdmac_desc(vd);
0128 }
0129 
0130 /* xc->vc.lock must be held by caller */
0131 static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
0132                       struct uniphier_xdmac_desc *xd)
0133 {
0134     u32 src_mode, src_width;
0135     u32 dst_mode, dst_width;
0136     dma_addr_t src_addr, dst_addr;
0137     u32 val, its, tnum;
0138     enum dma_slave_buswidth buswidth;
0139 
0140     src_addr = xd->nodes[xd->cur_node].src;
0141     dst_addr = xd->nodes[xd->cur_node].dst;
0142     its      = xd->nodes[xd->cur_node].burst_size;
0143     tnum     = xd->nodes[xd->cur_node].nr_burst;
0144 
0145     /*
0146      * The width of MEM side must be 4 or 8 bytes, that does not
0147      * affect that of DEV side and transfer size.
0148      */
0149     if (xd->dir == DMA_DEV_TO_MEM) {
0150         src_mode = XDMAC_SADM_SAM_FIXED;
0151         buswidth = xc->sconfig.src_addr_width;
0152     } else {
0153         src_mode = XDMAC_SADM_SAM_INC;
0154         buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
0155     }
0156     src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
0157 
0158     if (xd->dir == DMA_MEM_TO_DEV) {
0159         dst_mode = XDMAC_DADM_DAM_FIXED;
0160         buswidth = xc->sconfig.dst_addr_width;
0161     } else {
0162         dst_mode = XDMAC_DADM_DAM_INC;
0163         buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
0164     }
0165     dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
0166 
0167     /* setup transfer factor */
0168     val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
0169     val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
0170     writel(val, xc->reg_ch_base + XDMAC_TFA);
0171 
0172     /* setup the channel */
0173     writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
0174     writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
0175 
0176     writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
0177     writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
0178 
0179     src_mode |= src_width;
0180     dst_mode |= dst_width;
0181     writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
0182     writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
0183 
0184     writel(its, xc->reg_ch_base + XDMAC_ITS);
0185     writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
0186 
0187     /* enable interrupt */
0188     writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
0189            xc->reg_ch_base + XDMAC_IEN);
0190 
0191     /* start XDMAC */
0192     val = readl(xc->reg_ch_base + XDMAC_TSS);
0193     val |= XDMAC_TSS_REQ;
0194     writel(val, xc->reg_ch_base + XDMAC_TSS);
0195 }
0196 
0197 /* xc->vc.lock must be held by caller */
0198 static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
0199 {
0200     u32 val;
0201 
0202     /* disable interrupt */
0203     val = readl(xc->reg_ch_base + XDMAC_IEN);
0204     val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
0205     writel(val, xc->reg_ch_base + XDMAC_IEN);
0206 
0207     /* stop XDMAC */
0208     val = readl(xc->reg_ch_base + XDMAC_TSS);
0209     val &= ~XDMAC_TSS_REQ;
0210     writel(0, xc->reg_ch_base + XDMAC_TSS);
0211 
0212     /* wait until transfer is stopped */
0213     return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
0214                      !(val & XDMAC_STAT_TENF), 100, 1000);
0215 }
0216 
0217 /* xc->vc.lock must be held by caller */
0218 static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
0219 {
0220     struct uniphier_xdmac_desc *xd;
0221 
0222     xd = uniphier_xdmac_next_desc(xc);
0223     if (xd)
0224         uniphier_xdmac_chan_start(xc, xd);
0225 
0226     /* set desc to chan regardless of xd is null */
0227     xc->xd = xd;
0228 }
0229 
0230 static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
0231 {
0232     u32 stat;
0233     int ret;
0234 
0235     spin_lock(&xc->vc.lock);
0236 
0237     stat = readl(xc->reg_ch_base + XDMAC_ID);
0238 
0239     if (stat & XDMAC_ID_ERRIDF) {
0240         ret = uniphier_xdmac_chan_stop(xc);
0241         if (ret)
0242             dev_err(xc->xdev->ddev.dev,
0243                 "DMA transfer error with aborting issue\n");
0244         else
0245             dev_err(xc->xdev->ddev.dev,
0246                 "DMA transfer error\n");
0247 
0248     } else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
0249         xc->xd->cur_node++;
0250         if (xc->xd->cur_node >= xc->xd->nr_node) {
0251             vchan_cookie_complete(&xc->xd->vd);
0252             uniphier_xdmac_start(xc);
0253         } else {
0254             uniphier_xdmac_chan_start(xc, xc->xd);
0255         }
0256     }
0257 
0258     /* write bits to clear */
0259     writel(stat, xc->reg_ch_base + XDMAC_IR);
0260 
0261     spin_unlock(&xc->vc.lock);
0262 }
0263 
0264 static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
0265 {
0266     struct uniphier_xdmac_device *xdev = dev_id;
0267     int i;
0268 
0269     for (i = 0; i < xdev->nr_chans; i++)
0270         uniphier_xdmac_chan_irq(&xdev->channels[i]);
0271 
0272     return IRQ_HANDLED;
0273 }
0274 
0275 static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
0276 {
0277     vchan_free_chan_resources(to_virt_chan(chan));
0278 }
0279 
0280 static struct dma_async_tx_descriptor *
0281 uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
0282                    dma_addr_t src, size_t len, unsigned long flags)
0283 {
0284     struct virt_dma_chan *vc = to_virt_chan(chan);
0285     struct uniphier_xdmac_desc *xd;
0286     unsigned int nr;
0287     size_t burst_size, tlen;
0288     int i;
0289 
0290     if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
0291         return NULL;
0292 
0293     nr = 1 + len / XDMAC_MAX_WORD_SIZE;
0294 
0295     xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
0296     if (!xd)
0297         return NULL;
0298 
0299     for (i = 0; i < nr; i++) {
0300         burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
0301         xd->nodes[i].src = src;
0302         xd->nodes[i].dst = dst;
0303         xd->nodes[i].burst_size = burst_size;
0304         xd->nodes[i].nr_burst = len / burst_size;
0305         tlen = rounddown(len, burst_size);
0306         src += tlen;
0307         dst += tlen;
0308         len -= tlen;
0309     }
0310 
0311     xd->dir = DMA_MEM_TO_MEM;
0312     xd->nr_node = nr;
0313     xd->cur_node = 0;
0314 
0315     return vchan_tx_prep(vc, &xd->vd, flags);
0316 }
0317 
0318 static struct dma_async_tx_descriptor *
0319 uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
0320                  unsigned int sg_len,
0321                  enum dma_transfer_direction direction,
0322                  unsigned long flags, void *context)
0323 {
0324     struct virt_dma_chan *vc = to_virt_chan(chan);
0325     struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
0326     struct uniphier_xdmac_desc *xd;
0327     struct scatterlist *sg;
0328     enum dma_slave_buswidth buswidth;
0329     u32 maxburst;
0330     int i;
0331 
0332     if (!is_slave_direction(direction))
0333         return NULL;
0334 
0335     if (direction == DMA_DEV_TO_MEM) {
0336         buswidth = xc->sconfig.src_addr_width;
0337         maxburst = xc->sconfig.src_maxburst;
0338     } else {
0339         buswidth = xc->sconfig.dst_addr_width;
0340         maxburst = xc->sconfig.dst_maxburst;
0341     }
0342 
0343     if (!maxburst)
0344         maxburst = 1;
0345     if (maxburst > xc->xdev->ddev.max_burst) {
0346         dev_err(xc->xdev->ddev.dev,
0347             "Exceed maximum number of burst words\n");
0348         return NULL;
0349     }
0350 
0351     xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
0352     if (!xd)
0353         return NULL;
0354 
0355     for_each_sg(sgl, sg, sg_len, i) {
0356         xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
0357             ? xc->sconfig.src_addr : sg_dma_address(sg);
0358         xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
0359             ? xc->sconfig.dst_addr : sg_dma_address(sg);
0360         xd->nodes[i].burst_size = maxburst * buswidth;
0361         xd->nodes[i].nr_burst =
0362             sg_dma_len(sg) / xd->nodes[i].burst_size;
0363 
0364         /*
0365          * Currently transfer that size doesn't align the unit size
0366          * (the number of burst words * bus-width) is not allowed,
0367          * because the driver does not support the way to transfer
0368          * residue size. As a matter of fact, in order to transfer
0369          * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
0370          * dma_slave_config must be 1.
0371          */
0372         if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
0373             dev_err(xc->xdev->ddev.dev,
0374                 "Unaligned transfer size: %d", sg_dma_len(sg));
0375             kfree(xd);
0376             return NULL;
0377         }
0378 
0379         if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
0380             dev_err(xc->xdev->ddev.dev,
0381                 "Exceed maximum transfer size");
0382             kfree(xd);
0383             return NULL;
0384         }
0385     }
0386 
0387     xd->dir = direction;
0388     xd->nr_node = sg_len;
0389     xd->cur_node = 0;
0390 
0391     return vchan_tx_prep(vc, &xd->vd, flags);
0392 }
0393 
0394 static int uniphier_xdmac_slave_config(struct dma_chan *chan,
0395                        struct dma_slave_config *config)
0396 {
0397     struct virt_dma_chan *vc = to_virt_chan(chan);
0398     struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
0399 
0400     memcpy(&xc->sconfig, config, sizeof(*config));
0401 
0402     return 0;
0403 }
0404 
0405 static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
0406 {
0407     struct virt_dma_chan *vc = to_virt_chan(chan);
0408     struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
0409     unsigned long flags;
0410     int ret = 0;
0411     LIST_HEAD(head);
0412 
0413     spin_lock_irqsave(&vc->lock, flags);
0414 
0415     if (xc->xd) {
0416         vchan_terminate_vdesc(&xc->xd->vd);
0417         xc->xd = NULL;
0418         ret = uniphier_xdmac_chan_stop(xc);
0419     }
0420 
0421     vchan_get_all_descriptors(vc, &head);
0422 
0423     spin_unlock_irqrestore(&vc->lock, flags);
0424 
0425     vchan_dma_desc_free_list(vc, &head);
0426 
0427     return ret;
0428 }
0429 
0430 static void uniphier_xdmac_synchronize(struct dma_chan *chan)
0431 {
0432     vchan_synchronize(to_virt_chan(chan));
0433 }
0434 
0435 static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
0436 {
0437     struct virt_dma_chan *vc = to_virt_chan(chan);
0438     struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
0439     unsigned long flags;
0440 
0441     spin_lock_irqsave(&vc->lock, flags);
0442 
0443     if (vchan_issue_pending(vc) && !xc->xd)
0444         uniphier_xdmac_start(xc);
0445 
0446     spin_unlock_irqrestore(&vc->lock, flags);
0447 }
0448 
0449 static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
0450 {
0451     kfree(to_uniphier_xdmac_desc(vd));
0452 }
0453 
0454 static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
0455                      int ch)
0456 {
0457     struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
0458 
0459     xc->xdev = xdev;
0460     xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
0461     xc->vc.desc_free = uniphier_xdmac_desc_free;
0462 
0463     vchan_init(&xc->vc, &xdev->ddev);
0464 }
0465 
0466 static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
0467                           struct of_dma *ofdma)
0468 {
0469     struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
0470     int chan_id = dma_spec->args[0];
0471 
0472     if (chan_id >= xdev->nr_chans)
0473         return NULL;
0474 
0475     xdev->channels[chan_id].id = chan_id;
0476     xdev->channels[chan_id].req_factor = dma_spec->args[1];
0477 
0478     return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
0479 }
0480 
0481 static int uniphier_xdmac_probe(struct platform_device *pdev)
0482 {
0483     struct uniphier_xdmac_device *xdev;
0484     struct device *dev = &pdev->dev;
0485     struct dma_device *ddev;
0486     int irq;
0487     int nr_chans;
0488     int i, ret;
0489 
0490     if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
0491         return -EINVAL;
0492     if (nr_chans > XDMAC_MAX_CHANS)
0493         nr_chans = XDMAC_MAX_CHANS;
0494 
0495     xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
0496                 GFP_KERNEL);
0497     if (!xdev)
0498         return -ENOMEM;
0499 
0500     xdev->nr_chans = nr_chans;
0501     xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
0502     if (IS_ERR(xdev->reg_base))
0503         return PTR_ERR(xdev->reg_base);
0504 
0505     ddev = &xdev->ddev;
0506     ddev->dev = dev;
0507     dma_cap_zero(ddev->cap_mask);
0508     dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
0509     dma_cap_set(DMA_SLAVE, ddev->cap_mask);
0510     ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
0511     ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
0512     ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
0513                BIT(DMA_MEM_TO_MEM);
0514     ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0515     ddev->max_burst = XDMAC_MAX_WORDS;
0516     ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
0517     ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
0518     ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
0519     ddev->device_config = uniphier_xdmac_slave_config;
0520     ddev->device_terminate_all = uniphier_xdmac_terminate_all;
0521     ddev->device_synchronize = uniphier_xdmac_synchronize;
0522     ddev->device_tx_status = dma_cookie_status;
0523     ddev->device_issue_pending = uniphier_xdmac_issue_pending;
0524     INIT_LIST_HEAD(&ddev->channels);
0525 
0526     for (i = 0; i < nr_chans; i++)
0527         uniphier_xdmac_chan_init(xdev, i);
0528 
0529     irq = platform_get_irq(pdev, 0);
0530     if (irq < 0)
0531         return irq;
0532 
0533     ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
0534                    IRQF_SHARED, "xdmac", xdev);
0535     if (ret) {
0536         dev_err(dev, "Failed to request IRQ\n");
0537         return ret;
0538     }
0539 
0540     ret = dma_async_device_register(ddev);
0541     if (ret) {
0542         dev_err(dev, "Failed to register XDMA device\n");
0543         return ret;
0544     }
0545 
0546     ret = of_dma_controller_register(dev->of_node,
0547                      of_dma_uniphier_xlate, xdev);
0548     if (ret) {
0549         dev_err(dev, "Failed to register XDMA controller\n");
0550         goto out_unregister_dmac;
0551     }
0552 
0553     platform_set_drvdata(pdev, xdev);
0554 
0555     dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
0556          nr_chans);
0557 
0558     return 0;
0559 
0560 out_unregister_dmac:
0561     dma_async_device_unregister(ddev);
0562 
0563     return ret;
0564 }
0565 
0566 static int uniphier_xdmac_remove(struct platform_device *pdev)
0567 {
0568     struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
0569     struct dma_device *ddev = &xdev->ddev;
0570     struct dma_chan *chan;
0571     int ret;
0572 
0573     /*
0574      * Before reaching here, almost all descriptors have been freed by the
0575      * ->device_free_chan_resources() hook. However, each channel might
0576      * be still holding one descriptor that was on-flight at that moment.
0577      * Terminate it to make sure this hardware is no longer running. Then,
0578      * free the channel resources once again to avoid memory leak.
0579      */
0580     list_for_each_entry(chan, &ddev->channels, device_node) {
0581         ret = dmaengine_terminate_sync(chan);
0582         if (ret)
0583             return ret;
0584         uniphier_xdmac_free_chan_resources(chan);
0585     }
0586 
0587     of_dma_controller_free(pdev->dev.of_node);
0588     dma_async_device_unregister(ddev);
0589 
0590     return 0;
0591 }
0592 
0593 static const struct of_device_id uniphier_xdmac_match[] = {
0594     { .compatible = "socionext,uniphier-xdmac" },
0595     { /* sentinel */ }
0596 };
0597 MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
0598 
0599 static struct platform_driver uniphier_xdmac_driver = {
0600     .probe = uniphier_xdmac_probe,
0601     .remove = uniphier_xdmac_remove,
0602     .driver = {
0603         .name = "uniphier-xdmac",
0604         .of_match_table = uniphier_xdmac_match,
0605     },
0606 };
0607 module_platform_driver(uniphier_xdmac_driver);
0608 
0609 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
0610 MODULE_DESCRIPTION("UniPhier external DMA controller driver");
0611 MODULE_LICENSE("GPL v2");