0001
0002
0003
0004
0005
0006 #include <linux/err.h>
0007 #include <linux/module.h>
0008 #include <linux/init.h>
0009 #include <linux/types.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/slab.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/device.h>
0016 #include <linux/platform_data/mmp_dma.h>
0017 #include <linux/dmapool.h>
0018 #include <linux/of_device.h>
0019 #include <linux/of_dma.h>
0020 #include <linux/of.h>
0021
0022 #include "dmaengine.h"
0023
0024 #define DCSR 0x0000
0025 #define DALGN 0x00a0
0026 #define DINT 0x00f0
0027 #define DDADR 0x0200
0028 #define DSADR(n) (0x0204 + ((n) << 4))
0029 #define DTADR(n) (0x0208 + ((n) << 4))
0030 #define DCMD 0x020c
0031
0032 #define DCSR_RUN BIT(31)
0033 #define DCSR_NODESC BIT(30)
0034 #define DCSR_STOPIRQEN BIT(29)
0035 #define DCSR_REQPEND BIT(8)
0036 #define DCSR_STOPSTATE BIT(3)
0037 #define DCSR_ENDINTR BIT(2)
0038 #define DCSR_STARTINTR BIT(1)
0039 #define DCSR_BUSERR BIT(0)
0040
0041 #define DCSR_EORIRQEN BIT(28)
0042 #define DCSR_EORJMPEN BIT(27)
0043 #define DCSR_EORSTOPEN BIT(26)
0044 #define DCSR_SETCMPST BIT(25)
0045 #define DCSR_CLRCMPST BIT(24)
0046 #define DCSR_CMPST BIT(10)
0047 #define DCSR_EORINTR BIT(9)
0048
0049 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
0050 #define DRCMR_MAPVLD BIT(7)
0051 #define DRCMR_CHLNUM 0x1f
0052
0053 #define DDADR_DESCADDR 0xfffffff0
0054 #define DDADR_STOP BIT(0)
0055
0056 #define DCMD_INCSRCADDR BIT(31)
0057 #define DCMD_INCTRGADDR BIT(30)
0058 #define DCMD_FLOWSRC BIT(29)
0059 #define DCMD_FLOWTRG BIT(28)
0060 #define DCMD_STARTIRQEN BIT(22)
0061 #define DCMD_ENDIRQEN BIT(21)
0062 #define DCMD_ENDIAN BIT(18)
0063 #define DCMD_BURST8 (1 << 16)
0064 #define DCMD_BURST16 (2 << 16)
0065 #define DCMD_BURST32 (3 << 16)
0066 #define DCMD_WIDTH1 (1 << 14)
0067 #define DCMD_WIDTH2 (2 << 14)
0068 #define DCMD_WIDTH4 (3 << 14)
0069 #define DCMD_LENGTH 0x01fff
0070
0071 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
0072
0073 struct mmp_pdma_desc_hw {
0074 u32 ddadr;
0075 u32 dsadr;
0076 u32 dtadr;
0077 u32 dcmd;
0078 } __aligned(32);
0079
0080 struct mmp_pdma_desc_sw {
0081 struct mmp_pdma_desc_hw desc;
0082 struct list_head node;
0083 struct list_head tx_list;
0084 struct dma_async_tx_descriptor async_tx;
0085 };
0086
0087 struct mmp_pdma_phy;
0088
0089 struct mmp_pdma_chan {
0090 struct device *dev;
0091 struct dma_chan chan;
0092 struct dma_async_tx_descriptor desc;
0093 struct mmp_pdma_phy *phy;
0094 enum dma_transfer_direction dir;
0095 struct dma_slave_config slave_config;
0096
0097 struct mmp_pdma_desc_sw *cyclic_first;
0098
0099
0100
0101 struct tasklet_struct tasklet;
0102 u32 dcmd;
0103 u32 drcmr;
0104 u32 dev_addr;
0105
0106
0107 spinlock_t desc_lock;
0108 struct list_head chain_pending;
0109 struct list_head chain_running;
0110 bool idle;
0111 bool byte_align;
0112
0113 struct dma_pool *desc_pool;
0114 };
0115
0116 struct mmp_pdma_phy {
0117 int idx;
0118 void __iomem *base;
0119 struct mmp_pdma_chan *vchan;
0120 };
0121
0122 struct mmp_pdma_device {
0123 int dma_channels;
0124 void __iomem *base;
0125 struct device *dev;
0126 struct dma_device device;
0127 struct mmp_pdma_phy *phy;
0128 spinlock_t phy_lock;
0129 };
0130
0131 #define tx_to_mmp_pdma_desc(tx) \
0132 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
0133 #define to_mmp_pdma_desc(lh) \
0134 container_of(lh, struct mmp_pdma_desc_sw, node)
0135 #define to_mmp_pdma_chan(dchan) \
0136 container_of(dchan, struct mmp_pdma_chan, chan)
0137 #define to_mmp_pdma_dev(dmadev) \
0138 container_of(dmadev, struct mmp_pdma_device, device)
0139
0140 static int mmp_pdma_config_write(struct dma_chan *dchan,
0141 struct dma_slave_config *cfg,
0142 enum dma_transfer_direction direction);
0143
0144 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
0145 {
0146 u32 reg = (phy->idx << 4) + DDADR;
0147
0148 writel(addr, phy->base + reg);
0149 }
0150
0151 static void enable_chan(struct mmp_pdma_phy *phy)
0152 {
0153 u32 reg, dalgn;
0154
0155 if (!phy->vchan)
0156 return;
0157
0158 reg = DRCMR(phy->vchan->drcmr);
0159 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
0160
0161 dalgn = readl(phy->base + DALGN);
0162 if (phy->vchan->byte_align)
0163 dalgn |= 1 << phy->idx;
0164 else
0165 dalgn &= ~(1 << phy->idx);
0166 writel(dalgn, phy->base + DALGN);
0167
0168 reg = (phy->idx << 2) + DCSR;
0169 writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
0170 }
0171
0172 static void disable_chan(struct mmp_pdma_phy *phy)
0173 {
0174 u32 reg;
0175
0176 if (!phy)
0177 return;
0178
0179 reg = (phy->idx << 2) + DCSR;
0180 writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
0181 }
0182
0183 static int clear_chan_irq(struct mmp_pdma_phy *phy)
0184 {
0185 u32 dcsr;
0186 u32 dint = readl(phy->base + DINT);
0187 u32 reg = (phy->idx << 2) + DCSR;
0188
0189 if (!(dint & BIT(phy->idx)))
0190 return -EAGAIN;
0191
0192
0193 dcsr = readl(phy->base + reg);
0194 writel(dcsr, phy->base + reg);
0195 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
0196 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
0197
0198 return 0;
0199 }
0200
0201 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
0202 {
0203 struct mmp_pdma_phy *phy = dev_id;
0204
0205 if (clear_chan_irq(phy) != 0)
0206 return IRQ_NONE;
0207
0208 tasklet_schedule(&phy->vchan->tasklet);
0209 return IRQ_HANDLED;
0210 }
0211
0212 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
0213 {
0214 struct mmp_pdma_device *pdev = dev_id;
0215 struct mmp_pdma_phy *phy;
0216 u32 dint = readl(pdev->base + DINT);
0217 int i, ret;
0218 int irq_num = 0;
0219
0220 while (dint) {
0221 i = __ffs(dint);
0222
0223 if (i >= pdev->dma_channels)
0224 break;
0225 dint &= (dint - 1);
0226 phy = &pdev->phy[i];
0227 ret = mmp_pdma_chan_handler(irq, phy);
0228 if (ret == IRQ_HANDLED)
0229 irq_num++;
0230 }
0231
0232 if (irq_num)
0233 return IRQ_HANDLED;
0234
0235 return IRQ_NONE;
0236 }
0237
0238
0239 static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
0240 {
0241 int prio, i;
0242 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
0243 struct mmp_pdma_phy *phy, *found = NULL;
0244 unsigned long flags;
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 spin_lock_irqsave(&pdev->phy_lock, flags);
0255 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
0256 for (i = 0; i < pdev->dma_channels; i++) {
0257 if (prio != (i & 0xf) >> 2)
0258 continue;
0259 phy = &pdev->phy[i];
0260 if (!phy->vchan) {
0261 phy->vchan = pchan;
0262 found = phy;
0263 goto out_unlock;
0264 }
0265 }
0266 }
0267
0268 out_unlock:
0269 spin_unlock_irqrestore(&pdev->phy_lock, flags);
0270 return found;
0271 }
0272
0273 static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
0274 {
0275 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
0276 unsigned long flags;
0277 u32 reg;
0278
0279 if (!pchan->phy)
0280 return;
0281
0282
0283 reg = DRCMR(pchan->drcmr);
0284 writel(0, pchan->phy->base + reg);
0285
0286 spin_lock_irqsave(&pdev->phy_lock, flags);
0287 pchan->phy->vchan = NULL;
0288 pchan->phy = NULL;
0289 spin_unlock_irqrestore(&pdev->phy_lock, flags);
0290 }
0291
0292
0293
0294
0295
0296 static void start_pending_queue(struct mmp_pdma_chan *chan)
0297 {
0298 struct mmp_pdma_desc_sw *desc;
0299
0300
0301 if (!chan->idle) {
0302 dev_dbg(chan->dev, "DMA controller still busy\n");
0303 return;
0304 }
0305
0306 if (list_empty(&chan->chain_pending)) {
0307
0308 mmp_pdma_free_phy(chan);
0309 dev_dbg(chan->dev, "no pending list\n");
0310 return;
0311 }
0312
0313 if (!chan->phy) {
0314 chan->phy = lookup_phy(chan);
0315 if (!chan->phy) {
0316 dev_dbg(chan->dev, "no free dma channel\n");
0317 return;
0318 }
0319 }
0320
0321
0322
0323
0324
0325 desc = list_first_entry(&chan->chain_pending,
0326 struct mmp_pdma_desc_sw, node);
0327 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
0328
0329
0330
0331
0332
0333 set_desc(chan->phy, desc->async_tx.phys);
0334 enable_chan(chan->phy);
0335 chan->idle = false;
0336 }
0337
0338
0339
0340 static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
0341 {
0342 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
0343 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
0344 struct mmp_pdma_desc_sw *child;
0345 unsigned long flags;
0346 dma_cookie_t cookie = -EBUSY;
0347
0348 spin_lock_irqsave(&chan->desc_lock, flags);
0349
0350 list_for_each_entry(child, &desc->tx_list, node) {
0351 cookie = dma_cookie_assign(&child->async_tx);
0352 }
0353
0354
0355 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
0356
0357 spin_unlock_irqrestore(&chan->desc_lock, flags);
0358
0359 return cookie;
0360 }
0361
0362 static struct mmp_pdma_desc_sw *
0363 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
0364 {
0365 struct mmp_pdma_desc_sw *desc;
0366 dma_addr_t pdesc;
0367
0368 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
0369 if (!desc) {
0370 dev_err(chan->dev, "out of memory for link descriptor\n");
0371 return NULL;
0372 }
0373
0374 INIT_LIST_HEAD(&desc->tx_list);
0375 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
0376
0377 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
0378 desc->async_tx.phys = pdesc;
0379
0380 return desc;
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
0392 {
0393 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0394
0395 if (chan->desc_pool)
0396 return 1;
0397
0398 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
0399 chan->dev,
0400 sizeof(struct mmp_pdma_desc_sw),
0401 __alignof__(struct mmp_pdma_desc_sw),
0402 0);
0403 if (!chan->desc_pool) {
0404 dev_err(chan->dev, "unable to allocate descriptor pool\n");
0405 return -ENOMEM;
0406 }
0407
0408 mmp_pdma_free_phy(chan);
0409 chan->idle = true;
0410 chan->dev_addr = 0;
0411 return 1;
0412 }
0413
0414 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
0415 struct list_head *list)
0416 {
0417 struct mmp_pdma_desc_sw *desc, *_desc;
0418
0419 list_for_each_entry_safe(desc, _desc, list, node) {
0420 list_del(&desc->node);
0421 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
0422 }
0423 }
0424
0425 static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
0426 {
0427 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0428 unsigned long flags;
0429
0430 spin_lock_irqsave(&chan->desc_lock, flags);
0431 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
0432 mmp_pdma_free_desc_list(chan, &chan->chain_running);
0433 spin_unlock_irqrestore(&chan->desc_lock, flags);
0434
0435 dma_pool_destroy(chan->desc_pool);
0436 chan->desc_pool = NULL;
0437 chan->idle = true;
0438 chan->dev_addr = 0;
0439 mmp_pdma_free_phy(chan);
0440 return;
0441 }
0442
0443 static struct dma_async_tx_descriptor *
0444 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
0445 dma_addr_t dma_dst, dma_addr_t dma_src,
0446 size_t len, unsigned long flags)
0447 {
0448 struct mmp_pdma_chan *chan;
0449 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
0450 size_t copy = 0;
0451
0452 if (!dchan)
0453 return NULL;
0454
0455 if (!len)
0456 return NULL;
0457
0458 chan = to_mmp_pdma_chan(dchan);
0459 chan->byte_align = false;
0460
0461 if (!chan->dir) {
0462 chan->dir = DMA_MEM_TO_MEM;
0463 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
0464 chan->dcmd |= DCMD_BURST32;
0465 }
0466
0467 do {
0468
0469 new = mmp_pdma_alloc_descriptor(chan);
0470 if (!new) {
0471 dev_err(chan->dev, "no memory for desc\n");
0472 goto fail;
0473 }
0474
0475 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
0476 if (dma_src & 0x7 || dma_dst & 0x7)
0477 chan->byte_align = true;
0478
0479 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
0480 new->desc.dsadr = dma_src;
0481 new->desc.dtadr = dma_dst;
0482
0483 if (!first)
0484 first = new;
0485 else
0486 prev->desc.ddadr = new->async_tx.phys;
0487
0488 new->async_tx.cookie = 0;
0489 async_tx_ack(&new->async_tx);
0490
0491 prev = new;
0492 len -= copy;
0493
0494 if (chan->dir == DMA_MEM_TO_DEV) {
0495 dma_src += copy;
0496 } else if (chan->dir == DMA_DEV_TO_MEM) {
0497 dma_dst += copy;
0498 } else if (chan->dir == DMA_MEM_TO_MEM) {
0499 dma_src += copy;
0500 dma_dst += copy;
0501 }
0502
0503
0504 list_add_tail(&new->node, &first->tx_list);
0505 } while (len);
0506
0507 first->async_tx.flags = flags;
0508 first->async_tx.cookie = -EBUSY;
0509
0510
0511 new->desc.ddadr = DDADR_STOP;
0512 new->desc.dcmd |= DCMD_ENDIRQEN;
0513
0514 chan->cyclic_first = NULL;
0515
0516 return &first->async_tx;
0517
0518 fail:
0519 if (first)
0520 mmp_pdma_free_desc_list(chan, &first->tx_list);
0521 return NULL;
0522 }
0523
0524 static struct dma_async_tx_descriptor *
0525 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
0526 unsigned int sg_len, enum dma_transfer_direction dir,
0527 unsigned long flags, void *context)
0528 {
0529 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0530 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
0531 size_t len, avail;
0532 struct scatterlist *sg;
0533 dma_addr_t addr;
0534 int i;
0535
0536 if ((sgl == NULL) || (sg_len == 0))
0537 return NULL;
0538
0539 chan->byte_align = false;
0540
0541 mmp_pdma_config_write(dchan, &chan->slave_config, dir);
0542
0543 for_each_sg(sgl, sg, sg_len, i) {
0544 addr = sg_dma_address(sg);
0545 avail = sg_dma_len(sgl);
0546
0547 do {
0548 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
0549 if (addr & 0x7)
0550 chan->byte_align = true;
0551
0552
0553 new = mmp_pdma_alloc_descriptor(chan);
0554 if (!new) {
0555 dev_err(chan->dev, "no memory for desc\n");
0556 goto fail;
0557 }
0558
0559 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
0560 if (dir == DMA_MEM_TO_DEV) {
0561 new->desc.dsadr = addr;
0562 new->desc.dtadr = chan->dev_addr;
0563 } else {
0564 new->desc.dsadr = chan->dev_addr;
0565 new->desc.dtadr = addr;
0566 }
0567
0568 if (!first)
0569 first = new;
0570 else
0571 prev->desc.ddadr = new->async_tx.phys;
0572
0573 new->async_tx.cookie = 0;
0574 async_tx_ack(&new->async_tx);
0575 prev = new;
0576
0577
0578 list_add_tail(&new->node, &first->tx_list);
0579
0580
0581 addr += len;
0582 avail -= len;
0583 } while (avail);
0584 }
0585
0586 first->async_tx.cookie = -EBUSY;
0587 first->async_tx.flags = flags;
0588
0589
0590 new->desc.ddadr = DDADR_STOP;
0591 new->desc.dcmd |= DCMD_ENDIRQEN;
0592
0593 chan->dir = dir;
0594 chan->cyclic_first = NULL;
0595
0596 return &first->async_tx;
0597
0598 fail:
0599 if (first)
0600 mmp_pdma_free_desc_list(chan, &first->tx_list);
0601 return NULL;
0602 }
0603
0604 static struct dma_async_tx_descriptor *
0605 mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
0606 dma_addr_t buf_addr, size_t len, size_t period_len,
0607 enum dma_transfer_direction direction,
0608 unsigned long flags)
0609 {
0610 struct mmp_pdma_chan *chan;
0611 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
0612 dma_addr_t dma_src, dma_dst;
0613
0614 if (!dchan || !len || !period_len)
0615 return NULL;
0616
0617
0618 if (len % period_len != 0)
0619 return NULL;
0620
0621 if (period_len > PDMA_MAX_DESC_BYTES)
0622 return NULL;
0623
0624 chan = to_mmp_pdma_chan(dchan);
0625 mmp_pdma_config_write(dchan, &chan->slave_config, direction);
0626
0627 switch (direction) {
0628 case DMA_MEM_TO_DEV:
0629 dma_src = buf_addr;
0630 dma_dst = chan->dev_addr;
0631 break;
0632 case DMA_DEV_TO_MEM:
0633 dma_dst = buf_addr;
0634 dma_src = chan->dev_addr;
0635 break;
0636 default:
0637 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
0638 return NULL;
0639 }
0640
0641 chan->dir = direction;
0642
0643 do {
0644
0645 new = mmp_pdma_alloc_descriptor(chan);
0646 if (!new) {
0647 dev_err(chan->dev, "no memory for desc\n");
0648 goto fail;
0649 }
0650
0651 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
0652 (DCMD_LENGTH & period_len));
0653 new->desc.dsadr = dma_src;
0654 new->desc.dtadr = dma_dst;
0655
0656 if (!first)
0657 first = new;
0658 else
0659 prev->desc.ddadr = new->async_tx.phys;
0660
0661 new->async_tx.cookie = 0;
0662 async_tx_ack(&new->async_tx);
0663
0664 prev = new;
0665 len -= period_len;
0666
0667 if (chan->dir == DMA_MEM_TO_DEV)
0668 dma_src += period_len;
0669 else
0670 dma_dst += period_len;
0671
0672
0673 list_add_tail(&new->node, &first->tx_list);
0674 } while (len);
0675
0676 first->async_tx.flags = flags;
0677 first->async_tx.cookie = -EBUSY;
0678
0679
0680 new->desc.ddadr = first->async_tx.phys;
0681 chan->cyclic_first = first;
0682
0683 return &first->async_tx;
0684
0685 fail:
0686 if (first)
0687 mmp_pdma_free_desc_list(chan, &first->tx_list);
0688 return NULL;
0689 }
0690
0691 static int mmp_pdma_config_write(struct dma_chan *dchan,
0692 struct dma_slave_config *cfg,
0693 enum dma_transfer_direction direction)
0694 {
0695 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0696 u32 maxburst = 0, addr = 0;
0697 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
0698
0699 if (!dchan)
0700 return -EINVAL;
0701
0702 if (direction == DMA_DEV_TO_MEM) {
0703 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
0704 maxburst = cfg->src_maxburst;
0705 width = cfg->src_addr_width;
0706 addr = cfg->src_addr;
0707 } else if (direction == DMA_MEM_TO_DEV) {
0708 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
0709 maxburst = cfg->dst_maxburst;
0710 width = cfg->dst_addr_width;
0711 addr = cfg->dst_addr;
0712 }
0713
0714 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
0715 chan->dcmd |= DCMD_WIDTH1;
0716 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
0717 chan->dcmd |= DCMD_WIDTH2;
0718 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
0719 chan->dcmd |= DCMD_WIDTH4;
0720
0721 if (maxburst == 8)
0722 chan->dcmd |= DCMD_BURST8;
0723 else if (maxburst == 16)
0724 chan->dcmd |= DCMD_BURST16;
0725 else if (maxburst == 32)
0726 chan->dcmd |= DCMD_BURST32;
0727
0728 chan->dir = direction;
0729 chan->dev_addr = addr;
0730
0731 return 0;
0732 }
0733
0734 static int mmp_pdma_config(struct dma_chan *dchan,
0735 struct dma_slave_config *cfg)
0736 {
0737 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0738
0739 memcpy(&chan->slave_config, cfg, sizeof(*cfg));
0740 return 0;
0741 }
0742
0743 static int mmp_pdma_terminate_all(struct dma_chan *dchan)
0744 {
0745 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0746 unsigned long flags;
0747
0748 if (!dchan)
0749 return -EINVAL;
0750
0751 disable_chan(chan->phy);
0752 mmp_pdma_free_phy(chan);
0753 spin_lock_irqsave(&chan->desc_lock, flags);
0754 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
0755 mmp_pdma_free_desc_list(chan, &chan->chain_running);
0756 spin_unlock_irqrestore(&chan->desc_lock, flags);
0757 chan->idle = true;
0758
0759 return 0;
0760 }
0761
0762 static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
0763 dma_cookie_t cookie)
0764 {
0765 struct mmp_pdma_desc_sw *sw;
0766 u32 curr, residue = 0;
0767 bool passed = false;
0768 bool cyclic = chan->cyclic_first != NULL;
0769
0770
0771
0772
0773
0774 if (!chan->phy)
0775 return 0;
0776
0777 if (chan->dir == DMA_DEV_TO_MEM)
0778 curr = readl(chan->phy->base + DTADR(chan->phy->idx));
0779 else
0780 curr = readl(chan->phy->base + DSADR(chan->phy->idx));
0781
0782 list_for_each_entry(sw, &chan->chain_running, node) {
0783 u32 start, end, len;
0784
0785 if (chan->dir == DMA_DEV_TO_MEM)
0786 start = sw->desc.dtadr;
0787 else
0788 start = sw->desc.dsadr;
0789
0790 len = sw->desc.dcmd & DCMD_LENGTH;
0791 end = start + len;
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801 if (passed) {
0802 residue += len;
0803 } else if (curr >= start && curr <= end) {
0804 residue += end - curr;
0805 passed = true;
0806 }
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
0822 continue;
0823
0824 if (sw->async_tx.cookie == cookie) {
0825 return residue;
0826 } else {
0827 residue = 0;
0828 passed = false;
0829 }
0830 }
0831
0832
0833 return residue;
0834 }
0835
0836 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
0837 dma_cookie_t cookie,
0838 struct dma_tx_state *txstate)
0839 {
0840 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0841 enum dma_status ret;
0842
0843 ret = dma_cookie_status(dchan, cookie, txstate);
0844 if (likely(ret != DMA_ERROR))
0845 dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
0846
0847 return ret;
0848 }
0849
0850
0851
0852
0853
0854 static void mmp_pdma_issue_pending(struct dma_chan *dchan)
0855 {
0856 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
0857 unsigned long flags;
0858
0859 spin_lock_irqsave(&chan->desc_lock, flags);
0860 start_pending_queue(chan);
0861 spin_unlock_irqrestore(&chan->desc_lock, flags);
0862 }
0863
0864
0865
0866
0867
0868
0869 static void dma_do_tasklet(struct tasklet_struct *t)
0870 {
0871 struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
0872 struct mmp_pdma_desc_sw *desc, *_desc;
0873 LIST_HEAD(chain_cleanup);
0874 unsigned long flags;
0875 struct dmaengine_desc_callback cb;
0876
0877 if (chan->cyclic_first) {
0878 spin_lock_irqsave(&chan->desc_lock, flags);
0879 desc = chan->cyclic_first;
0880 dmaengine_desc_get_callback(&desc->async_tx, &cb);
0881 spin_unlock_irqrestore(&chan->desc_lock, flags);
0882
0883 dmaengine_desc_callback_invoke(&cb, NULL);
0884
0885 return;
0886 }
0887
0888
0889 spin_lock_irqsave(&chan->desc_lock, flags);
0890
0891 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
0892
0893
0894
0895
0896 list_move(&desc->node, &chain_cleanup);
0897
0898
0899
0900
0901
0902
0903 if (desc->desc.dcmd & DCMD_ENDIRQEN) {
0904 dma_cookie_t cookie = desc->async_tx.cookie;
0905 dma_cookie_complete(&desc->async_tx);
0906 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
0907 break;
0908 }
0909 }
0910
0911
0912
0913
0914
0915 chan->idle = list_empty(&chan->chain_running);
0916
0917
0918 start_pending_queue(chan);
0919 spin_unlock_irqrestore(&chan->desc_lock, flags);
0920
0921
0922 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
0923 struct dma_async_tx_descriptor *txd = &desc->async_tx;
0924
0925
0926 list_del(&desc->node);
0927
0928 dmaengine_desc_get_callback(txd, &cb);
0929 dmaengine_desc_callback_invoke(&cb, NULL);
0930
0931 dma_pool_free(chan->desc_pool, desc, txd->phys);
0932 }
0933 }
0934
0935 static int mmp_pdma_remove(struct platform_device *op)
0936 {
0937 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
0938 struct mmp_pdma_phy *phy;
0939 int i, irq = 0, irq_num = 0;
0940
0941 if (op->dev.of_node)
0942 of_dma_controller_free(op->dev.of_node);
0943
0944 for (i = 0; i < pdev->dma_channels; i++) {
0945 if (platform_get_irq(op, i) > 0)
0946 irq_num++;
0947 }
0948
0949 if (irq_num != pdev->dma_channels) {
0950 irq = platform_get_irq(op, 0);
0951 devm_free_irq(&op->dev, irq, pdev);
0952 } else {
0953 for (i = 0; i < pdev->dma_channels; i++) {
0954 phy = &pdev->phy[i];
0955 irq = platform_get_irq(op, i);
0956 devm_free_irq(&op->dev, irq, phy);
0957 }
0958 }
0959
0960 dma_async_device_unregister(&pdev->device);
0961 return 0;
0962 }
0963
0964 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
0965 {
0966 struct mmp_pdma_phy *phy = &pdev->phy[idx];
0967 struct mmp_pdma_chan *chan;
0968 int ret;
0969
0970 chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
0971 if (chan == NULL)
0972 return -ENOMEM;
0973
0974 phy->idx = idx;
0975 phy->base = pdev->base;
0976
0977 if (irq) {
0978 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
0979 IRQF_SHARED, "pdma", phy);
0980 if (ret) {
0981 dev_err(pdev->dev, "channel request irq fail!\n");
0982 return ret;
0983 }
0984 }
0985
0986 spin_lock_init(&chan->desc_lock);
0987 chan->dev = pdev->dev;
0988 chan->chan.device = &pdev->device;
0989 tasklet_setup(&chan->tasklet, dma_do_tasklet);
0990 INIT_LIST_HEAD(&chan->chain_pending);
0991 INIT_LIST_HEAD(&chan->chain_running);
0992
0993
0994 list_add_tail(&chan->chan.device_node, &pdev->device.channels);
0995
0996 return 0;
0997 }
0998
0999 static const struct of_device_id mmp_pdma_dt_ids[] = {
1000 { .compatible = "marvell,pdma-1.0", },
1001 {}
1002 };
1003 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
1004
1005 static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
1006 struct of_dma *ofdma)
1007 {
1008 struct mmp_pdma_device *d = ofdma->of_dma_data;
1009 struct dma_chan *chan;
1010
1011 chan = dma_get_any_slave_channel(&d->device);
1012 if (!chan)
1013 return NULL;
1014
1015 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
1016
1017 return chan;
1018 }
1019
1020 static int mmp_pdma_probe(struct platform_device *op)
1021 {
1022 struct mmp_pdma_device *pdev;
1023 const struct of_device_id *of_id;
1024 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1025 struct resource *iores;
1026 int i, ret, irq = 0;
1027 int dma_channels = 0, irq_num = 0;
1028 const enum dma_slave_buswidth widths =
1029 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1030 DMA_SLAVE_BUSWIDTH_4_BYTES;
1031
1032 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1033 if (!pdev)
1034 return -ENOMEM;
1035
1036 pdev->dev = &op->dev;
1037
1038 spin_lock_init(&pdev->phy_lock);
1039
1040 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1041 pdev->base = devm_ioremap_resource(pdev->dev, iores);
1042 if (IS_ERR(pdev->base))
1043 return PTR_ERR(pdev->base);
1044
1045 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
1046 if (of_id) {
1047
1048 if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
1049 &dma_channels))
1050 of_property_read_u32(pdev->dev->of_node, "#dma-channels",
1051 &dma_channels);
1052 } else if (pdata && pdata->dma_channels) {
1053 dma_channels = pdata->dma_channels;
1054 } else {
1055 dma_channels = 32;
1056 }
1057 pdev->dma_channels = dma_channels;
1058
1059 for (i = 0; i < dma_channels; i++) {
1060 if (platform_get_irq_optional(op, i) > 0)
1061 irq_num++;
1062 }
1063
1064 pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
1065 GFP_KERNEL);
1066 if (pdev->phy == NULL)
1067 return -ENOMEM;
1068
1069 INIT_LIST_HEAD(&pdev->device.channels);
1070
1071 if (irq_num != dma_channels) {
1072
1073 irq = platform_get_irq(op, 0);
1074 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1075 IRQF_SHARED, "pdma", pdev);
1076 if (ret)
1077 return ret;
1078 }
1079
1080 for (i = 0; i < dma_channels; i++) {
1081 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1082 ret = mmp_pdma_chan_init(pdev, i, irq);
1083 if (ret)
1084 return ret;
1085 }
1086
1087 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1088 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
1089 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
1090 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
1091 pdev->device.dev = &op->dev;
1092 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1093 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1094 pdev->device.device_tx_status = mmp_pdma_tx_status;
1095 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1096 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1097 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1098 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1099 pdev->device.device_config = mmp_pdma_config;
1100 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1101 pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
1102 pdev->device.src_addr_widths = widths;
1103 pdev->device.dst_addr_widths = widths;
1104 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1105 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1106
1107 if (pdev->dev->coherent_dma_mask)
1108 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
1109 else
1110 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
1111
1112 ret = dma_async_device_register(&pdev->device);
1113 if (ret) {
1114 dev_err(pdev->device.dev, "unable to register\n");
1115 return ret;
1116 }
1117
1118 if (op->dev.of_node) {
1119
1120 ret = of_dma_controller_register(op->dev.of_node,
1121 mmp_pdma_dma_xlate, pdev);
1122 if (ret < 0) {
1123 dev_err(&op->dev, "of_dma_controller_register failed\n");
1124 dma_async_device_unregister(&pdev->device);
1125 return ret;
1126 }
1127 }
1128
1129 platform_set_drvdata(op, pdev);
1130 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1131 return 0;
1132 }
1133
1134 static const struct platform_device_id mmp_pdma_id_table[] = {
1135 { "mmp-pdma", },
1136 { },
1137 };
1138
1139 static struct platform_driver mmp_pdma_driver = {
1140 .driver = {
1141 .name = "mmp-pdma",
1142 .of_match_table = mmp_pdma_dt_ids,
1143 },
1144 .id_table = mmp_pdma_id_table,
1145 .probe = mmp_pdma_probe,
1146 .remove = mmp_pdma_remove,
1147 };
1148
1149 module_platform_driver(mmp_pdma_driver);
1150
1151 MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
1152 MODULE_AUTHOR("Marvell International Ltd.");
1153 MODULE_LICENSE("GPL v2");