0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/delay.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/dmaengine.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/list.h>
0015 #include <linux/module.h>
0016 #include <linux/mutex.h>
0017 #include <linux/of.h>
0018 #include <linux/of_dma.h>
0019 #include <linux/of_platform.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/slab.h>
0023 #include <linux/spinlock.h>
0024
0025 #include "../dmaengine.h"
0026
0027
0028
0029
0030
0031
0032
0033
0034 struct rcar_dmac_xfer_chunk {
0035 struct list_head node;
0036
0037 dma_addr_t src_addr;
0038 dma_addr_t dst_addr;
0039 u32 size;
0040 };
0041
0042
0043
0044
0045
0046
0047
0048 struct rcar_dmac_hw_desc {
0049 u32 sar;
0050 u32 dar;
0051 u32 tcr;
0052 u32 reserved;
0053 } __attribute__((__packed__));
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 struct rcar_dmac_desc {
0073 struct dma_async_tx_descriptor async_tx;
0074 enum dma_transfer_direction direction;
0075 unsigned int xfer_shift;
0076 u32 chcr;
0077
0078 struct list_head node;
0079 struct list_head chunks;
0080 struct rcar_dmac_xfer_chunk *running;
0081 unsigned int nchunks;
0082
0083 struct {
0084 bool use;
0085 struct rcar_dmac_hw_desc *mem;
0086 dma_addr_t dma;
0087 size_t size;
0088 } hwdescs;
0089
0090 unsigned int size;
0091 bool cyclic;
0092 };
0093
0094 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
0095
0096
0097
0098
0099
0100
0101
0102 struct rcar_dmac_desc_page {
0103 struct list_head node;
0104
0105 union {
0106 struct rcar_dmac_desc descs[0];
0107 struct rcar_dmac_xfer_chunk chunks[0];
0108 };
0109 };
0110
0111 #define RCAR_DMAC_DESCS_PER_PAGE \
0112 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
0113 sizeof(struct rcar_dmac_desc))
0114 #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
0115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
0116 sizeof(struct rcar_dmac_xfer_chunk))
0117
0118
0119
0120
0121
0122
0123 struct rcar_dmac_chan_slave {
0124 phys_addr_t slave_addr;
0125 unsigned int xfer_size;
0126 };
0127
0128
0129
0130
0131
0132
0133
0134 struct rcar_dmac_chan_map {
0135 dma_addr_t addr;
0136 enum dma_data_direction dir;
0137 struct rcar_dmac_chan_slave slave;
0138 };
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159 struct rcar_dmac_chan {
0160 struct dma_chan chan;
0161 void __iomem *iomem;
0162 unsigned int index;
0163 int irq;
0164
0165 struct rcar_dmac_chan_slave src;
0166 struct rcar_dmac_chan_slave dst;
0167 struct rcar_dmac_chan_map map;
0168 int mid_rid;
0169
0170 spinlock_t lock;
0171
0172 struct {
0173 struct list_head free;
0174 struct list_head pending;
0175 struct list_head active;
0176 struct list_head done;
0177 struct list_head wait;
0178 struct rcar_dmac_desc *running;
0179
0180 struct list_head chunks_free;
0181
0182 struct list_head pages;
0183 } desc;
0184 };
0185
0186 #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 struct rcar_dmac {
0200 struct dma_device engine;
0201 struct device *dev;
0202 void __iomem *dmac_base;
0203 void __iomem *chan_base;
0204
0205 unsigned int n_channels;
0206 struct rcar_dmac_chan *channels;
0207 u32 channels_mask;
0208
0209 DECLARE_BITMAP(modules, 256);
0210 };
0211
0212 #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
0213
0214 #define for_each_rcar_dmac_chan(i, dmac, chan) \
0215 for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \
0216 if (!((dmac)->channels_mask & BIT(i))) continue; else
0217
0218
0219
0220
0221
0222
0223 struct rcar_dmac_of_data {
0224 u32 chan_offset_base;
0225 u32 chan_offset_stride;
0226 };
0227
0228
0229
0230
0231
0232 #define RCAR_DMAISTA 0x0020
0233 #define RCAR_DMASEC 0x0030
0234 #define RCAR_DMAOR 0x0060
0235 #define RCAR_DMAOR_PRI_FIXED (0 << 8)
0236 #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
0237 #define RCAR_DMAOR_AE (1 << 2)
0238 #define RCAR_DMAOR_DME (1 << 0)
0239 #define RCAR_DMACHCLR 0x0080
0240 #define RCAR_DMADPSEC 0x00a0
0241
0242 #define RCAR_DMASAR 0x0000
0243 #define RCAR_DMADAR 0x0004
0244 #define RCAR_DMATCR 0x0008
0245 #define RCAR_DMATCR_MASK 0x00ffffff
0246 #define RCAR_DMATSR 0x0028
0247 #define RCAR_DMACHCR 0x000c
0248 #define RCAR_DMACHCR_CAE (1 << 31)
0249 #define RCAR_DMACHCR_CAIE (1 << 30)
0250 #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
0251 #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
0252 #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
0253 #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
0254 #define RCAR_DMACHCR_RPT_SAR (1 << 27)
0255 #define RCAR_DMACHCR_RPT_DAR (1 << 26)
0256 #define RCAR_DMACHCR_RPT_TCR (1 << 25)
0257 #define RCAR_DMACHCR_DPB (1 << 22)
0258 #define RCAR_DMACHCR_DSE (1 << 19)
0259 #define RCAR_DMACHCR_DSIE (1 << 18)
0260 #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
0261 #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
0262 #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
0263 #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
0264 #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
0265 #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
0266 #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
0267 #define RCAR_DMACHCR_DM_FIXED (0 << 14)
0268 #define RCAR_DMACHCR_DM_INC (1 << 14)
0269 #define RCAR_DMACHCR_DM_DEC (2 << 14)
0270 #define RCAR_DMACHCR_SM_FIXED (0 << 12)
0271 #define RCAR_DMACHCR_SM_INC (1 << 12)
0272 #define RCAR_DMACHCR_SM_DEC (2 << 12)
0273 #define RCAR_DMACHCR_RS_AUTO (4 << 8)
0274 #define RCAR_DMACHCR_RS_DMARS (8 << 8)
0275 #define RCAR_DMACHCR_IE (1 << 2)
0276 #define RCAR_DMACHCR_TE (1 << 1)
0277 #define RCAR_DMACHCR_DE (1 << 0)
0278 #define RCAR_DMATCRB 0x0018
0279 #define RCAR_DMATSRB 0x0038
0280 #define RCAR_DMACHCRB 0x001c
0281 #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
0282 #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
0283 #define RCAR_DMACHCRB_DPTR_SHIFT 16
0284 #define RCAR_DMACHCRB_DRST (1 << 15)
0285 #define RCAR_DMACHCRB_DTS (1 << 8)
0286 #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
0287 #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
0288 #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
0289 #define RCAR_DMARS 0x0040
0290 #define RCAR_DMABUFCR 0x0048
0291 #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
0292 #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
0293 #define RCAR_DMADPBASE 0x0050
0294 #define RCAR_DMADPBASE_MASK 0xfffffff0
0295 #define RCAR_DMADPBASE_SEL (1 << 0)
0296 #define RCAR_DMADPCR 0x0054
0297 #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
0298 #define RCAR_DMAFIXSAR 0x0010
0299 #define RCAR_DMAFIXDAR 0x0014
0300 #define RCAR_DMAFIXDPBASE 0x0060
0301
0302
0303 #define RCAR_GEN4_DMACHCLR 0x0100
0304
0305
0306 #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
0307
0308
0309
0310
0311
0312 static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
0313 {
0314 if (reg == RCAR_DMAOR)
0315 writew(data, dmac->dmac_base + reg);
0316 else
0317 writel(data, dmac->dmac_base + reg);
0318 }
0319
0320 static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
0321 {
0322 if (reg == RCAR_DMAOR)
0323 return readw(dmac->dmac_base + reg);
0324 else
0325 return readl(dmac->dmac_base + reg);
0326 }
0327
0328 static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
0329 {
0330 if (reg == RCAR_DMARS)
0331 return readw(chan->iomem + reg);
0332 else
0333 return readl(chan->iomem + reg);
0334 }
0335
0336 static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
0337 {
0338 if (reg == RCAR_DMARS)
0339 writew(data, chan->iomem + reg);
0340 else
0341 writel(data, chan->iomem + reg);
0342 }
0343
0344 static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
0345 struct rcar_dmac_chan *chan)
0346 {
0347 if (dmac->chan_base)
0348 rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
0349 else
0350 rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
0351 }
0352
0353 static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
0354 {
0355 struct rcar_dmac_chan *chan;
0356 unsigned int i;
0357
0358 if (dmac->chan_base) {
0359 for_each_rcar_dmac_chan(i, dmac, chan)
0360 rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
0361 } else {
0362 rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
0363 }
0364 }
0365
0366
0367
0368
0369
0370 static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
0371 {
0372 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
0373
0374 return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
0375 }
0376
0377 static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
0378 {
0379 struct rcar_dmac_desc *desc = chan->desc.running;
0380 u32 chcr = desc->chcr;
0381
0382 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
0383
0384 if (chan->mid_rid >= 0)
0385 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
0386
0387 if (desc->hwdescs.use) {
0388 struct rcar_dmac_xfer_chunk *chunk =
0389 list_first_entry(&desc->chunks,
0390 struct rcar_dmac_xfer_chunk, node);
0391
0392 dev_dbg(chan->chan.device->dev,
0393 "chan%u: queue desc %p: %u@%pad\n",
0394 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
0395
0396 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0397 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
0398 chunk->src_addr >> 32);
0399 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
0400 chunk->dst_addr >> 32);
0401 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
0402 desc->hwdescs.dma >> 32);
0403 #endif
0404 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
0405 (desc->hwdescs.dma & 0xfffffff0) |
0406 RCAR_DMADPBASE_SEL);
0407 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
0408 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
0409 RCAR_DMACHCRB_DRST);
0410
0411
0412
0413
0414
0415
0416
0417
0418 rcar_dmac_chan_write(chan, RCAR_DMADAR,
0419 chunk->dst_addr & 0xffffffff);
0420
0421
0422
0423
0424
0425 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
0426
0427 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
0428 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
0429
0430
0431
0432
0433
0434 if (!desc->cyclic)
0435 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
0436
0437
0438
0439
0440 else if (desc->async_tx.callback)
0441 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
0442
0443
0444
0445
0446 else
0447 chcr |= RCAR_DMACHCR_DPM_INFINITE;
0448 } else {
0449 struct rcar_dmac_xfer_chunk *chunk = desc->running;
0450
0451 dev_dbg(chan->chan.device->dev,
0452 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
0453 chan->index, chunk, chunk->size, &chunk->src_addr,
0454 &chunk->dst_addr);
0455
0456 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0457 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
0458 chunk->src_addr >> 32);
0459 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
0460 chunk->dst_addr >> 32);
0461 #endif
0462 rcar_dmac_chan_write(chan, RCAR_DMASAR,
0463 chunk->src_addr & 0xffffffff);
0464 rcar_dmac_chan_write(chan, RCAR_DMADAR,
0465 chunk->dst_addr & 0xffffffff);
0466 rcar_dmac_chan_write(chan, RCAR_DMATCR,
0467 chunk->size >> desc->xfer_shift);
0468
0469 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
0470 }
0471
0472 rcar_dmac_chan_write(chan, RCAR_DMACHCR,
0473 chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
0474 }
0475
0476 static int rcar_dmac_init(struct rcar_dmac *dmac)
0477 {
0478 u16 dmaor;
0479
0480
0481 rcar_dmac_chan_clear_all(dmac);
0482 rcar_dmac_write(dmac, RCAR_DMAOR,
0483 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
0484
0485 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
0486 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
0487 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
0488 return -EIO;
0489 }
0490
0491 return 0;
0492 }
0493
0494
0495
0496
0497
0498 static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
0499 {
0500 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
0501 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
0502 unsigned long flags;
0503 dma_cookie_t cookie;
0504
0505 spin_lock_irqsave(&chan->lock, flags);
0506
0507 cookie = dma_cookie_assign(tx);
0508
0509 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
0510 chan->index, tx->cookie, desc);
0511
0512 list_add_tail(&desc->node, &chan->desc.pending);
0513 desc->running = list_first_entry(&desc->chunks,
0514 struct rcar_dmac_xfer_chunk, node);
0515
0516 spin_unlock_irqrestore(&chan->lock, flags);
0517
0518 return cookie;
0519 }
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
0531 {
0532 struct rcar_dmac_desc_page *page;
0533 unsigned long flags;
0534 LIST_HEAD(list);
0535 unsigned int i;
0536
0537 page = (void *)get_zeroed_page(gfp);
0538 if (!page)
0539 return -ENOMEM;
0540
0541 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
0542 struct rcar_dmac_desc *desc = &page->descs[i];
0543
0544 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
0545 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
0546 INIT_LIST_HEAD(&desc->chunks);
0547
0548 list_add_tail(&desc->node, &list);
0549 }
0550
0551 spin_lock_irqsave(&chan->lock, flags);
0552 list_splice_tail(&list, &chan->desc.free);
0553 list_add_tail(&page->node, &chan->desc.pages);
0554 spin_unlock_irqrestore(&chan->lock, flags);
0555
0556 return 0;
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571 static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
0572 struct rcar_dmac_desc *desc)
0573 {
0574 unsigned long flags;
0575
0576 spin_lock_irqsave(&chan->lock, flags);
0577 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
0578 list_add(&desc->node, &chan->desc.free);
0579 spin_unlock_irqrestore(&chan->lock, flags);
0580 }
0581
0582 static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
0583 {
0584 struct rcar_dmac_desc *desc, *_desc;
0585 unsigned long flags;
0586 LIST_HEAD(list);
0587
0588
0589
0590
0591
0592
0593
0594 spin_lock_irqsave(&chan->lock, flags);
0595 list_splice_init(&chan->desc.wait, &list);
0596 spin_unlock_irqrestore(&chan->lock, flags);
0597
0598 list_for_each_entry_safe(desc, _desc, &list, node) {
0599 if (async_tx_test_ack(&desc->async_tx)) {
0600 list_del(&desc->node);
0601 rcar_dmac_desc_put(chan, desc);
0602 }
0603 }
0604
0605 if (list_empty(&list))
0606 return;
0607
0608
0609 spin_lock_irqsave(&chan->lock, flags);
0610 list_splice(&list, &chan->desc.wait);
0611 spin_unlock_irqrestore(&chan->lock, flags);
0612 }
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623 static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
0624 {
0625 struct rcar_dmac_desc *desc;
0626 unsigned long flags;
0627 int ret;
0628
0629
0630 rcar_dmac_desc_recycle_acked(chan);
0631
0632 spin_lock_irqsave(&chan->lock, flags);
0633
0634 while (list_empty(&chan->desc.free)) {
0635
0636
0637
0638
0639
0640
0641 spin_unlock_irqrestore(&chan->lock, flags);
0642 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
0643 if (ret < 0)
0644 return NULL;
0645 spin_lock_irqsave(&chan->lock, flags);
0646 }
0647
0648 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
0649 list_del(&desc->node);
0650
0651 spin_unlock_irqrestore(&chan->lock, flags);
0652
0653 return desc;
0654 }
0655
0656
0657
0658
0659
0660
0661 static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
0662 {
0663 struct rcar_dmac_desc_page *page;
0664 unsigned long flags;
0665 LIST_HEAD(list);
0666 unsigned int i;
0667
0668 page = (void *)get_zeroed_page(gfp);
0669 if (!page)
0670 return -ENOMEM;
0671
0672 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
0673 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
0674
0675 list_add_tail(&chunk->node, &list);
0676 }
0677
0678 spin_lock_irqsave(&chan->lock, flags);
0679 list_splice_tail(&list, &chan->desc.chunks_free);
0680 list_add_tail(&page->node, &chan->desc.pages);
0681 spin_unlock_irqrestore(&chan->lock, flags);
0682
0683 return 0;
0684 }
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695 static struct rcar_dmac_xfer_chunk *
0696 rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
0697 {
0698 struct rcar_dmac_xfer_chunk *chunk;
0699 unsigned long flags;
0700 int ret;
0701
0702 spin_lock_irqsave(&chan->lock, flags);
0703
0704 while (list_empty(&chan->desc.chunks_free)) {
0705
0706
0707
0708
0709
0710
0711 spin_unlock_irqrestore(&chan->lock, flags);
0712 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
0713 if (ret < 0)
0714 return NULL;
0715 spin_lock_irqsave(&chan->lock, flags);
0716 }
0717
0718 chunk = list_first_entry(&chan->desc.chunks_free,
0719 struct rcar_dmac_xfer_chunk, node);
0720 list_del(&chunk->node);
0721
0722 spin_unlock_irqrestore(&chan->lock, flags);
0723
0724 return chunk;
0725 }
0726
0727 static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
0728 struct rcar_dmac_desc *desc, size_t size)
0729 {
0730
0731
0732
0733
0734
0735
0736 size = PAGE_ALIGN(size);
0737
0738 if (desc->hwdescs.size == size)
0739 return;
0740
0741 if (desc->hwdescs.mem) {
0742 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
0743 desc->hwdescs.mem, desc->hwdescs.dma);
0744 desc->hwdescs.mem = NULL;
0745 desc->hwdescs.size = 0;
0746 }
0747
0748 if (!size)
0749 return;
0750
0751 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
0752 &desc->hwdescs.dma, GFP_NOWAIT);
0753 if (!desc->hwdescs.mem)
0754 return;
0755
0756 desc->hwdescs.size = size;
0757 }
0758
0759 static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
0760 struct rcar_dmac_desc *desc)
0761 {
0762 struct rcar_dmac_xfer_chunk *chunk;
0763 struct rcar_dmac_hw_desc *hwdesc;
0764
0765 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
0766
0767 hwdesc = desc->hwdescs.mem;
0768 if (!hwdesc)
0769 return -ENOMEM;
0770
0771 list_for_each_entry(chunk, &desc->chunks, node) {
0772 hwdesc->sar = chunk->src_addr;
0773 hwdesc->dar = chunk->dst_addr;
0774 hwdesc->tcr = chunk->size >> desc->xfer_shift;
0775 hwdesc++;
0776 }
0777
0778 return 0;
0779 }
0780
0781
0782
0783
0784 static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
0785 {
0786 u32 chcr;
0787 unsigned int i;
0788
0789
0790
0791
0792
0793 for (i = 0; i < 1024; i++) {
0794 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
0795 if (!(chcr & RCAR_DMACHCR_DE))
0796 return;
0797 udelay(1);
0798 }
0799
0800 dev_err(chan->chan.device->dev, "CHCR DE check error\n");
0801 }
0802
0803 static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
0804 {
0805 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
0806
0807
0808 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
0809
0810
0811 rcar_dmac_chcr_de_barrier(chan);
0812 }
0813
0814 static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
0815 {
0816 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
0817
0818 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
0819 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
0820 RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
0821 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
0822 rcar_dmac_chcr_de_barrier(chan);
0823 }
0824
0825 static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
0826 {
0827 struct rcar_dmac_desc *desc, *_desc;
0828 unsigned long flags;
0829 LIST_HEAD(descs);
0830
0831 spin_lock_irqsave(&chan->lock, flags);
0832
0833
0834 list_splice_init(&chan->desc.pending, &descs);
0835 list_splice_init(&chan->desc.active, &descs);
0836 list_splice_init(&chan->desc.done, &descs);
0837 list_splice_init(&chan->desc.wait, &descs);
0838
0839 chan->desc.running = NULL;
0840
0841 spin_unlock_irqrestore(&chan->lock, flags);
0842
0843 list_for_each_entry_safe(desc, _desc, &descs, node) {
0844 list_del(&desc->node);
0845 rcar_dmac_desc_put(chan, desc);
0846 }
0847 }
0848
0849 static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
0850 {
0851 struct rcar_dmac_chan *chan;
0852 unsigned int i;
0853
0854
0855 for_each_rcar_dmac_chan(i, dmac, chan) {
0856
0857 spin_lock_irq(&chan->lock);
0858 rcar_dmac_chan_halt(chan);
0859 spin_unlock_irq(&chan->lock);
0860 }
0861 }
0862
0863 static int rcar_dmac_chan_pause(struct dma_chan *chan)
0864 {
0865 unsigned long flags;
0866 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
0867
0868 spin_lock_irqsave(&rchan->lock, flags);
0869 rcar_dmac_clear_chcr_de(rchan);
0870 spin_unlock_irqrestore(&rchan->lock, flags);
0871
0872 return 0;
0873 }
0874
0875
0876
0877
0878
0879 static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
0880 struct rcar_dmac_desc *desc)
0881 {
0882 static const u32 chcr_ts[] = {
0883 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
0884 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
0885 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
0886 RCAR_DMACHCR_TS_64B,
0887 };
0888
0889 unsigned int xfer_size;
0890 u32 chcr;
0891
0892 switch (desc->direction) {
0893 case DMA_DEV_TO_MEM:
0894 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
0895 | RCAR_DMACHCR_RS_DMARS;
0896 xfer_size = chan->src.xfer_size;
0897 break;
0898
0899 case DMA_MEM_TO_DEV:
0900 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
0901 | RCAR_DMACHCR_RS_DMARS;
0902 xfer_size = chan->dst.xfer_size;
0903 break;
0904
0905 case DMA_MEM_TO_MEM:
0906 default:
0907 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
0908 | RCAR_DMACHCR_RS_AUTO;
0909 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
0910 break;
0911 }
0912
0913 desc->xfer_shift = ilog2(xfer_size);
0914 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
0915 }
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927 static struct dma_async_tx_descriptor *
0928 rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
0929 unsigned int sg_len, dma_addr_t dev_addr,
0930 enum dma_transfer_direction dir, unsigned long dma_flags,
0931 bool cyclic)
0932 {
0933 struct rcar_dmac_xfer_chunk *chunk;
0934 struct rcar_dmac_desc *desc;
0935 struct scatterlist *sg;
0936 unsigned int nchunks = 0;
0937 unsigned int max_chunk_size;
0938 unsigned int full_size = 0;
0939 bool cross_boundary = false;
0940 unsigned int i;
0941 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0942 u32 high_dev_addr;
0943 u32 high_mem_addr;
0944 #endif
0945
0946 desc = rcar_dmac_desc_get(chan);
0947 if (!desc)
0948 return NULL;
0949
0950 desc->async_tx.flags = dma_flags;
0951 desc->async_tx.cookie = -EBUSY;
0952
0953 desc->cyclic = cyclic;
0954 desc->direction = dir;
0955
0956 rcar_dmac_chan_configure_desc(chan, desc);
0957
0958 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
0959
0960
0961
0962
0963
0964 for_each_sg(sgl, sg, sg_len, i) {
0965 dma_addr_t mem_addr = sg_dma_address(sg);
0966 unsigned int len = sg_dma_len(sg);
0967
0968 full_size += len;
0969
0970 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0971 if (i == 0) {
0972 high_dev_addr = dev_addr >> 32;
0973 high_mem_addr = mem_addr >> 32;
0974 }
0975
0976 if ((dev_addr >> 32 != high_dev_addr) ||
0977 (mem_addr >> 32 != high_mem_addr))
0978 cross_boundary = true;
0979 #endif
0980 while (len) {
0981 unsigned int size = min(len, max_chunk_size);
0982
0983 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0984
0985
0986
0987
0988 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
0989 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
0990 cross_boundary = true;
0991 }
0992 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
0993 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
0994 cross_boundary = true;
0995 }
0996 #endif
0997
0998 chunk = rcar_dmac_xfer_chunk_get(chan);
0999 if (!chunk) {
1000 rcar_dmac_desc_put(chan, desc);
1001 return NULL;
1002 }
1003
1004 if (dir == DMA_DEV_TO_MEM) {
1005 chunk->src_addr = dev_addr;
1006 chunk->dst_addr = mem_addr;
1007 } else {
1008 chunk->src_addr = mem_addr;
1009 chunk->dst_addr = dev_addr;
1010 }
1011
1012 chunk->size = size;
1013
1014 dev_dbg(chan->chan.device->dev,
1015 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
1016 chan->index, chunk, desc, i, sg, size, len,
1017 &chunk->src_addr, &chunk->dst_addr);
1018
1019 mem_addr += size;
1020 if (dir == DMA_MEM_TO_MEM)
1021 dev_addr += size;
1022
1023 len -= size;
1024
1025 list_add_tail(&chunk->node, &desc->chunks);
1026 nchunks++;
1027 }
1028 }
1029
1030 desc->nchunks = nchunks;
1031 desc->size = full_size;
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 desc->hwdescs.use = !cross_boundary && nchunks > 1;
1042 if (desc->hwdescs.use) {
1043 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
1044 desc->hwdescs.use = false;
1045 }
1046
1047 return &desc->async_tx;
1048 }
1049
1050
1051
1052
1053
1054 static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
1055 {
1056 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1057 int ret;
1058
1059 INIT_LIST_HEAD(&rchan->desc.chunks_free);
1060 INIT_LIST_HEAD(&rchan->desc.pages);
1061
1062
1063 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
1064 if (ret < 0)
1065 return -ENOMEM;
1066
1067 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
1068 if (ret < 0)
1069 return -ENOMEM;
1070
1071 return pm_runtime_get_sync(chan->device->dev);
1072 }
1073
1074 static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1075 {
1076 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1077 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1078 struct rcar_dmac_chan_map *map = &rchan->map;
1079 struct rcar_dmac_desc_page *page, *_page;
1080 struct rcar_dmac_desc *desc;
1081 LIST_HEAD(list);
1082
1083
1084 spin_lock_irq(&rchan->lock);
1085 rcar_dmac_chan_halt(rchan);
1086 spin_unlock_irq(&rchan->lock);
1087
1088
1089
1090
1091
1092 synchronize_irq(rchan->irq);
1093
1094 if (rchan->mid_rid >= 0) {
1095
1096 clear_bit(rchan->mid_rid, dmac->modules);
1097 rchan->mid_rid = -EINVAL;
1098 }
1099
1100 list_splice_init(&rchan->desc.free, &list);
1101 list_splice_init(&rchan->desc.pending, &list);
1102 list_splice_init(&rchan->desc.active, &list);
1103 list_splice_init(&rchan->desc.done, &list);
1104 list_splice_init(&rchan->desc.wait, &list);
1105
1106 rchan->desc.running = NULL;
1107
1108 list_for_each_entry(desc, &list, node)
1109 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
1110
1111 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
1112 list_del(&page->node);
1113 free_page((unsigned long)page);
1114 }
1115
1116
1117 if (map->slave.xfer_size) {
1118 dma_unmap_resource(chan->device->dev, map->addr,
1119 map->slave.xfer_size, map->dir, 0);
1120 map->slave.xfer_size = 0;
1121 }
1122
1123 pm_runtime_put(chan->device->dev);
1124 }
1125
1126 static struct dma_async_tx_descriptor *
1127 rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1128 dma_addr_t dma_src, size_t len, unsigned long flags)
1129 {
1130 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1131 struct scatterlist sgl;
1132
1133 if (!len)
1134 return NULL;
1135
1136 sg_init_table(&sgl, 1);
1137 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1138 offset_in_page(dma_src));
1139 sg_dma_address(&sgl) = dma_src;
1140 sg_dma_len(&sgl) = len;
1141
1142 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1143 DMA_MEM_TO_MEM, flags, false);
1144 }
1145
1146 static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
1147 enum dma_transfer_direction dir)
1148 {
1149 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1150 struct rcar_dmac_chan_map *map = &rchan->map;
1151 phys_addr_t dev_addr;
1152 size_t dev_size;
1153 enum dma_data_direction dev_dir;
1154
1155 if (dir == DMA_DEV_TO_MEM) {
1156 dev_addr = rchan->src.slave_addr;
1157 dev_size = rchan->src.xfer_size;
1158 dev_dir = DMA_TO_DEVICE;
1159 } else {
1160 dev_addr = rchan->dst.slave_addr;
1161 dev_size = rchan->dst.xfer_size;
1162 dev_dir = DMA_FROM_DEVICE;
1163 }
1164
1165
1166 if (dev_addr == map->slave.slave_addr &&
1167 dev_size == map->slave.xfer_size &&
1168 dev_dir == map->dir)
1169 return 0;
1170
1171
1172 if (map->slave.xfer_size)
1173 dma_unmap_resource(chan->device->dev, map->addr,
1174 map->slave.xfer_size, map->dir, 0);
1175 map->slave.xfer_size = 0;
1176
1177
1178 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
1179 dev_dir, 0);
1180
1181 if (dma_mapping_error(chan->device->dev, map->addr)) {
1182 dev_err(chan->device->dev,
1183 "chan%u: failed to map %zx@%pap", rchan->index,
1184 dev_size, &dev_addr);
1185 return -EIO;
1186 }
1187
1188 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
1189 rchan->index, dev_size, &dev_addr, &map->addr,
1190 dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1191
1192 map->slave.slave_addr = dev_addr;
1193 map->slave.xfer_size = dev_size;
1194 map->dir = dev_dir;
1195
1196 return 0;
1197 }
1198
1199 static struct dma_async_tx_descriptor *
1200 rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1201 unsigned int sg_len, enum dma_transfer_direction dir,
1202 unsigned long flags, void *context)
1203 {
1204 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1205
1206
1207 if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
1208 dev_warn(chan->device->dev,
1209 "%s: bad parameter: len=%d, id=%d\n",
1210 __func__, sg_len, rchan->mid_rid);
1211 return NULL;
1212 }
1213
1214 if (rcar_dmac_map_slave_addr(chan, dir))
1215 return NULL;
1216
1217 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
1218 dir, flags, false);
1219 }
1220
1221 #define RCAR_DMAC_MAX_SG_LEN 32
1222
1223 static struct dma_async_tx_descriptor *
1224 rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1225 size_t buf_len, size_t period_len,
1226 enum dma_transfer_direction dir, unsigned long flags)
1227 {
1228 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1229 struct dma_async_tx_descriptor *desc;
1230 struct scatterlist *sgl;
1231 unsigned int sg_len;
1232 unsigned int i;
1233
1234
1235 if (rchan->mid_rid < 0 || buf_len < period_len) {
1236 dev_warn(chan->device->dev,
1237 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1238 __func__, buf_len, period_len, rchan->mid_rid);
1239 return NULL;
1240 }
1241
1242 if (rcar_dmac_map_slave_addr(chan, dir))
1243 return NULL;
1244
1245 sg_len = buf_len / period_len;
1246 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1247 dev_err(chan->device->dev,
1248 "chan%u: sg length %d exceeds limit %d",
1249 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1250 return NULL;
1251 }
1252
1253
1254
1255
1256
1257 sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
1258 if (!sgl)
1259 return NULL;
1260
1261 sg_init_table(sgl, sg_len);
1262
1263 for (i = 0; i < sg_len; ++i) {
1264 dma_addr_t src = buf_addr + (period_len * i);
1265
1266 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1267 offset_in_page(src));
1268 sg_dma_address(&sgl[i]) = src;
1269 sg_dma_len(&sgl[i]) = period_len;
1270 }
1271
1272 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
1273 dir, flags, true);
1274
1275 kfree(sgl);
1276 return desc;
1277 }
1278
1279 static int rcar_dmac_device_config(struct dma_chan *chan,
1280 struct dma_slave_config *cfg)
1281 {
1282 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1283
1284
1285
1286
1287
1288 rchan->src.slave_addr = cfg->src_addr;
1289 rchan->dst.slave_addr = cfg->dst_addr;
1290 rchan->src.xfer_size = cfg->src_addr_width;
1291 rchan->dst.xfer_size = cfg->dst_addr_width;
1292
1293 return 0;
1294 }
1295
1296 static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1297 {
1298 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1299 unsigned long flags;
1300
1301 spin_lock_irqsave(&rchan->lock, flags);
1302 rcar_dmac_chan_halt(rchan);
1303 spin_unlock_irqrestore(&rchan->lock, flags);
1304
1305
1306
1307
1308
1309
1310 rcar_dmac_chan_reinit(rchan);
1311
1312 return 0;
1313 }
1314
1315 static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1316 dma_cookie_t cookie)
1317 {
1318 struct rcar_dmac_desc *desc = chan->desc.running;
1319 struct rcar_dmac_xfer_chunk *running = NULL;
1320 struct rcar_dmac_xfer_chunk *chunk;
1321 enum dma_status status;
1322 unsigned int residue = 0;
1323 unsigned int dptr = 0;
1324 unsigned int chcrb;
1325 unsigned int tcrb;
1326 unsigned int i;
1327
1328 if (!desc)
1329 return 0;
1330
1331
1332
1333
1334
1335
1336
1337 status = dma_cookie_status(&chan->chan, cookie, NULL);
1338 if (status == DMA_COMPLETE)
1339 return 0;
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 if (cookie != desc->async_tx.cookie) {
1352 list_for_each_entry(desc, &chan->desc.done, node) {
1353 if (cookie == desc->async_tx.cookie)
1354 return 0;
1355 }
1356 list_for_each_entry(desc, &chan->desc.pending, node) {
1357 if (cookie == desc->async_tx.cookie)
1358 return desc->size;
1359 }
1360 list_for_each_entry(desc, &chan->desc.active, node) {
1361 if (cookie == desc->async_tx.cookie)
1362 return desc->size;
1363 }
1364
1365
1366
1367
1368
1369
1370 WARN(1, "No descriptor for cookie!");
1371 return 0;
1372 }
1373
1374
1375
1376
1377
1378
1379
1380
1381 for (i = 0; i < 3; i++) {
1382 chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1383 RCAR_DMACHCRB_DPTR_MASK;
1384 tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
1385
1386 if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1387 RCAR_DMACHCRB_DPTR_MASK))
1388 break;
1389 }
1390 WARN_ONCE(i >= 3, "residue might be not continuous!");
1391
1392
1393
1394
1395
1396
1397
1398 if (desc->hwdescs.use) {
1399 dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
1400 if (dptr == 0)
1401 dptr = desc->nchunks;
1402 dptr--;
1403 WARN_ON(dptr >= desc->nchunks);
1404 } else {
1405 running = desc->running;
1406 }
1407
1408
1409 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1410 if (chunk == running || ++dptr == desc->nchunks)
1411 break;
1412
1413 residue += chunk->size;
1414 }
1415
1416
1417 residue += tcrb << desc->xfer_shift;
1418
1419 return residue;
1420 }
1421
1422 static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1423 dma_cookie_t cookie,
1424 struct dma_tx_state *txstate)
1425 {
1426 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1427 enum dma_status status;
1428 unsigned long flags;
1429 unsigned int residue;
1430 bool cyclic;
1431
1432 status = dma_cookie_status(chan, cookie, txstate);
1433 if (status == DMA_COMPLETE || !txstate)
1434 return status;
1435
1436 spin_lock_irqsave(&rchan->lock, flags);
1437 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1438 cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
1439 spin_unlock_irqrestore(&rchan->lock, flags);
1440
1441
1442 if (!residue && !cyclic)
1443 return DMA_COMPLETE;
1444
1445 dma_set_residue(txstate, residue);
1446
1447 return status;
1448 }
1449
1450 static void rcar_dmac_issue_pending(struct dma_chan *chan)
1451 {
1452 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1453 unsigned long flags;
1454
1455 spin_lock_irqsave(&rchan->lock, flags);
1456
1457 if (list_empty(&rchan->desc.pending))
1458 goto done;
1459
1460
1461 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1462
1463
1464
1465
1466
1467 if (!rchan->desc.running) {
1468 struct rcar_dmac_desc *desc;
1469
1470 desc = list_first_entry(&rchan->desc.active,
1471 struct rcar_dmac_desc, node);
1472 rchan->desc.running = desc;
1473
1474 rcar_dmac_chan_start_xfer(rchan);
1475 }
1476
1477 done:
1478 spin_unlock_irqrestore(&rchan->lock, flags);
1479 }
1480
1481 static void rcar_dmac_device_synchronize(struct dma_chan *chan)
1482 {
1483 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1484
1485 synchronize_irq(rchan->irq);
1486 }
1487
1488
1489
1490
1491
1492 static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1493 {
1494 struct rcar_dmac_desc *desc = chan->desc.running;
1495 unsigned int stage;
1496
1497 if (WARN_ON(!desc || !desc->cyclic)) {
1498
1499
1500
1501
1502
1503 return IRQ_NONE;
1504 }
1505
1506
1507 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1508 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1509 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1510
1511 return IRQ_WAKE_THREAD;
1512 }
1513
1514 static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1515 {
1516 struct rcar_dmac_desc *desc = chan->desc.running;
1517 irqreturn_t ret = IRQ_WAKE_THREAD;
1518
1519 if (WARN_ON_ONCE(!desc)) {
1520
1521
1522
1523
1524
1525 return IRQ_NONE;
1526 }
1527
1528
1529
1530
1531
1532
1533 if (!desc->hwdescs.use) {
1534
1535
1536
1537
1538
1539 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1540 desc->running = list_next_entry(desc->running, node);
1541 if (!desc->cyclic)
1542 ret = IRQ_HANDLED;
1543 goto done;
1544 }
1545
1546
1547
1548
1549
1550 if (desc->cyclic) {
1551 desc->running =
1552 list_first_entry(&desc->chunks,
1553 struct rcar_dmac_xfer_chunk,
1554 node);
1555 goto done;
1556 }
1557 }
1558
1559
1560 list_move_tail(&desc->node, &chan->desc.done);
1561
1562
1563 if (!list_empty(&chan->desc.active))
1564 chan->desc.running = list_first_entry(&chan->desc.active,
1565 struct rcar_dmac_desc,
1566 node);
1567 else
1568 chan->desc.running = NULL;
1569
1570 done:
1571 if (chan->desc.running)
1572 rcar_dmac_chan_start_xfer(chan);
1573
1574 return ret;
1575 }
1576
1577 static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1578 {
1579 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1580 struct rcar_dmac_chan *chan = dev;
1581 irqreturn_t ret = IRQ_NONE;
1582 bool reinit = false;
1583 u32 chcr;
1584
1585 spin_lock(&chan->lock);
1586
1587 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1588 if (chcr & RCAR_DMACHCR_CAE) {
1589 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
1590
1591
1592
1593
1594
1595
1596 rcar_dmac_chan_clear(dmac, chan);
1597 rcar_dmac_chcr_de_barrier(chan);
1598 reinit = true;
1599 goto spin_lock_end;
1600 }
1601
1602 if (chcr & RCAR_DMACHCR_TE)
1603 mask |= RCAR_DMACHCR_DE;
1604 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1605 if (mask & RCAR_DMACHCR_DE)
1606 rcar_dmac_chcr_de_barrier(chan);
1607
1608 if (chcr & RCAR_DMACHCR_DSE)
1609 ret |= rcar_dmac_isr_desc_stage_end(chan);
1610
1611 if (chcr & RCAR_DMACHCR_TE)
1612 ret |= rcar_dmac_isr_transfer_end(chan);
1613
1614 spin_lock_end:
1615 spin_unlock(&chan->lock);
1616
1617 if (reinit) {
1618 dev_err(chan->chan.device->dev, "Channel Address Error\n");
1619
1620 rcar_dmac_chan_reinit(chan);
1621 ret = IRQ_HANDLED;
1622 }
1623
1624 return ret;
1625 }
1626
1627 static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1628 {
1629 struct rcar_dmac_chan *chan = dev;
1630 struct rcar_dmac_desc *desc;
1631 struct dmaengine_desc_callback cb;
1632
1633 spin_lock_irq(&chan->lock);
1634
1635
1636 if (chan->desc.running && chan->desc.running->cyclic) {
1637 desc = chan->desc.running;
1638 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1639
1640 if (dmaengine_desc_callback_valid(&cb)) {
1641 spin_unlock_irq(&chan->lock);
1642 dmaengine_desc_callback_invoke(&cb, NULL);
1643 spin_lock_irq(&chan->lock);
1644 }
1645 }
1646
1647
1648
1649
1650
1651 while (!list_empty(&chan->desc.done)) {
1652 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1653 node);
1654 dma_cookie_complete(&desc->async_tx);
1655 list_del(&desc->node);
1656
1657 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1658 if (dmaengine_desc_callback_valid(&cb)) {
1659 spin_unlock_irq(&chan->lock);
1660
1661
1662
1663
1664
1665 dmaengine_desc_callback_invoke(&cb, NULL);
1666 spin_lock_irq(&chan->lock);
1667 }
1668
1669 list_add_tail(&desc->node, &chan->desc.wait);
1670 }
1671
1672 spin_unlock_irq(&chan->lock);
1673
1674
1675 rcar_dmac_desc_recycle_acked(chan);
1676
1677 return IRQ_HANDLED;
1678 }
1679
1680
1681
1682
1683
1684 static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1685 {
1686 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1687 struct of_phandle_args *dma_spec = arg;
1688
1689
1690
1691
1692
1693
1694
1695
1696 if (chan->device->device_config != rcar_dmac_device_config)
1697 return false;
1698
1699 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1700 }
1701
1702 static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1703 struct of_dma *ofdma)
1704 {
1705 struct rcar_dmac_chan *rchan;
1706 struct dma_chan *chan;
1707 dma_cap_mask_t mask;
1708
1709 if (dma_spec->args_count != 1)
1710 return NULL;
1711
1712
1713 dma_cap_zero(mask);
1714 dma_cap_set(DMA_SLAVE, mask);
1715
1716 chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
1717 ofdma->of_node);
1718 if (!chan)
1719 return NULL;
1720
1721 rchan = to_rcar_dmac_chan(chan);
1722 rchan->mid_rid = dma_spec->args[0];
1723
1724 return chan;
1725 }
1726
1727
1728
1729
1730
1731 #ifdef CONFIG_PM
1732 static int rcar_dmac_runtime_suspend(struct device *dev)
1733 {
1734 return 0;
1735 }
1736
1737 static int rcar_dmac_runtime_resume(struct device *dev)
1738 {
1739 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1740
1741 return rcar_dmac_init(dmac);
1742 }
1743 #endif
1744
1745 static const struct dev_pm_ops rcar_dmac_pm = {
1746
1747
1748
1749
1750
1751 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1752 pm_runtime_force_resume)
1753 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1754 NULL)
1755 };
1756
1757
1758
1759
1760
1761 static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1762 struct rcar_dmac_chan *rchan)
1763 {
1764 struct platform_device *pdev = to_platform_device(dmac->dev);
1765 struct dma_chan *chan = &rchan->chan;
1766 char pdev_irqname[5];
1767 char *irqname;
1768 int ret;
1769
1770 rchan->mid_rid = -EINVAL;
1771
1772 spin_lock_init(&rchan->lock);
1773
1774 INIT_LIST_HEAD(&rchan->desc.free);
1775 INIT_LIST_HEAD(&rchan->desc.pending);
1776 INIT_LIST_HEAD(&rchan->desc.active);
1777 INIT_LIST_HEAD(&rchan->desc.done);
1778 INIT_LIST_HEAD(&rchan->desc.wait);
1779
1780
1781 sprintf(pdev_irqname, "ch%u", rchan->index);
1782 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
1783 if (rchan->irq < 0)
1784 return -ENODEV;
1785
1786 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1787 dev_name(dmac->dev), rchan->index);
1788 if (!irqname)
1789 return -ENOMEM;
1790
1791
1792
1793
1794
1795 chan->device = &dmac->engine;
1796 dma_cookie_init(chan);
1797
1798 list_add_tail(&chan->device_node, &dmac->engine.channels);
1799
1800 ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
1801 rcar_dmac_isr_channel,
1802 rcar_dmac_isr_channel_thread, 0,
1803 irqname, rchan);
1804 if (ret) {
1805 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
1806 rchan->irq, ret);
1807 return ret;
1808 }
1809
1810 return 0;
1811 }
1812
1813 #define RCAR_DMAC_MAX_CHANNELS 32
1814
1815 static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1816 {
1817 struct device_node *np = dev->of_node;
1818 int ret;
1819
1820 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1821 if (ret < 0) {
1822 dev_err(dev, "unable to read dma-channels property\n");
1823 return ret;
1824 }
1825
1826
1827 if (dmac->n_channels <= 0 ||
1828 dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
1829 dev_err(dev, "invalid number of channels %u\n",
1830 dmac->n_channels);
1831 return -EINVAL;
1832 }
1833
1834
1835
1836
1837
1838 dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
1839 of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
1840
1841
1842 dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
1843
1844 return 0;
1845 }
1846
1847 static int rcar_dmac_probe(struct platform_device *pdev)
1848 {
1849 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1850 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1851 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1852 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1853 const struct rcar_dmac_of_data *data;
1854 struct rcar_dmac_chan *chan;
1855 struct dma_device *engine;
1856 void __iomem *chan_base;
1857 struct rcar_dmac *dmac;
1858 unsigned int i;
1859 int ret;
1860
1861 data = of_device_get_match_data(&pdev->dev);
1862 if (!data)
1863 return -EINVAL;
1864
1865 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1866 if (!dmac)
1867 return -ENOMEM;
1868
1869 dmac->dev = &pdev->dev;
1870 platform_set_drvdata(pdev, dmac);
1871 ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
1872 if (ret)
1873 return ret;
1874
1875 ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
1876 if (ret)
1877 return ret;
1878
1879 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1880 if (ret < 0)
1881 return ret;
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891 if (device_iommu_mapped(&pdev->dev))
1892 dmac->channels_mask &= ~BIT(0);
1893
1894 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1895 sizeof(*dmac->channels), GFP_KERNEL);
1896 if (!dmac->channels)
1897 return -ENOMEM;
1898
1899
1900 dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0);
1901 if (IS_ERR(dmac->dmac_base))
1902 return PTR_ERR(dmac->dmac_base);
1903
1904 if (!data->chan_offset_base) {
1905 dmac->chan_base = devm_platform_ioremap_resource(pdev, 1);
1906 if (IS_ERR(dmac->chan_base))
1907 return PTR_ERR(dmac->chan_base);
1908
1909 chan_base = dmac->chan_base;
1910 } else {
1911 chan_base = dmac->dmac_base + data->chan_offset_base;
1912 }
1913
1914 for_each_rcar_dmac_chan(i, dmac, chan) {
1915 chan->index = i;
1916 chan->iomem = chan_base + i * data->chan_offset_stride;
1917 }
1918
1919
1920 pm_runtime_enable(&pdev->dev);
1921 ret = pm_runtime_resume_and_get(&pdev->dev);
1922 if (ret < 0) {
1923 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1924 goto err_pm_disable;
1925 }
1926
1927 ret = rcar_dmac_init(dmac);
1928 pm_runtime_put(&pdev->dev);
1929
1930 if (ret) {
1931 dev_err(&pdev->dev, "failed to reset device\n");
1932 goto err_pm_disable;
1933 }
1934
1935
1936 engine = &dmac->engine;
1937
1938 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1939 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1940
1941 engine->dev = &pdev->dev;
1942 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1943
1944 engine->src_addr_widths = widths;
1945 engine->dst_addr_widths = widths;
1946 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1947 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1948
1949 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1950 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1951 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1952 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1953 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1954 engine->device_config = rcar_dmac_device_config;
1955 engine->device_pause = rcar_dmac_chan_pause;
1956 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1957 engine->device_tx_status = rcar_dmac_tx_status;
1958 engine->device_issue_pending = rcar_dmac_issue_pending;
1959 engine->device_synchronize = rcar_dmac_device_synchronize;
1960
1961 INIT_LIST_HEAD(&engine->channels);
1962
1963 for_each_rcar_dmac_chan(i, dmac, chan) {
1964 ret = rcar_dmac_chan_probe(dmac, chan);
1965 if (ret < 0)
1966 goto err_pm_disable;
1967 }
1968
1969
1970 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1971 NULL);
1972 if (ret < 0)
1973 goto err_pm_disable;
1974
1975
1976
1977
1978
1979
1980 ret = dma_async_device_register(engine);
1981 if (ret < 0)
1982 goto err_dma_free;
1983
1984 return 0;
1985
1986 err_dma_free:
1987 of_dma_controller_free(pdev->dev.of_node);
1988 err_pm_disable:
1989 pm_runtime_disable(&pdev->dev);
1990 return ret;
1991 }
1992
1993 static int rcar_dmac_remove(struct platform_device *pdev)
1994 {
1995 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1996
1997 of_dma_controller_free(pdev->dev.of_node);
1998 dma_async_device_unregister(&dmac->engine);
1999
2000 pm_runtime_disable(&pdev->dev);
2001
2002 return 0;
2003 }
2004
2005 static void rcar_dmac_shutdown(struct platform_device *pdev)
2006 {
2007 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
2008
2009 rcar_dmac_stop_all_chan(dmac);
2010 }
2011
2012 static const struct rcar_dmac_of_data rcar_dmac_data = {
2013 .chan_offset_base = 0x8000,
2014 .chan_offset_stride = 0x80,
2015 };
2016
2017 static const struct rcar_dmac_of_data rcar_gen4_dmac_data = {
2018 .chan_offset_base = 0x0,
2019 .chan_offset_stride = 0x1000,
2020 };
2021
2022 static const struct of_device_id rcar_dmac_of_ids[] = {
2023 {
2024 .compatible = "renesas,rcar-dmac",
2025 .data = &rcar_dmac_data,
2026 }, {
2027 .compatible = "renesas,rcar-gen4-dmac",
2028 .data = &rcar_gen4_dmac_data,
2029 }, {
2030 .compatible = "renesas,dmac-r8a779a0",
2031 .data = &rcar_gen4_dmac_data,
2032 },
2033 { }
2034 };
2035 MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
2036
2037 static struct platform_driver rcar_dmac_driver = {
2038 .driver = {
2039 .pm = &rcar_dmac_pm,
2040 .name = "rcar-dmac",
2041 .of_match_table = rcar_dmac_of_ids,
2042 },
2043 .probe = rcar_dmac_probe,
2044 .remove = rcar_dmac_remove,
2045 .shutdown = rcar_dmac_shutdown,
2046 };
2047
2048 module_platform_driver(rcar_dmac_driver);
2049
2050 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
2051 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
2052 MODULE_LICENSE("GPL v2");