0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <dt-bindings/dma/at91.h>
0013 #include <linux/clk.h>
0014 #include <linux/dmaengine.h>
0015 #include <linux/dma-mapping.h>
0016 #include <linux/dmapool.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/module.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/slab.h>
0021 #include <linux/of.h>
0022 #include <linux/of_device.h>
0023 #include <linux/of_dma.h>
0024
0025 #include "at_hdmac_regs.h"
0026 #include "dmaengine.h"
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
0038 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
0039 |ATC_DIF(AT_DMA_MEM_IF))
0040 #define ATC_DMA_BUSWIDTHS\
0041 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
0042 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
0043 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
0044 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
0045
0046 #define ATC_MAX_DSCR_TRIALS 10
0047
0048
0049
0050
0051
0052 static unsigned int init_nr_desc_per_channel = 64;
0053 module_param(init_nr_desc_per_channel, uint, 0644);
0054 MODULE_PARM_DESC(init_nr_desc_per_channel,
0055 "initial descriptors per channel (default: 64)");
0056
0057
0058
0059
0060
0061
0062 struct at_dma_platform_data {
0063 unsigned int nr_channels;
0064 dma_cap_mask_t cap_mask;
0065 };
0066
0067
0068
0069
0070
0071
0072 struct at_dma_slave {
0073 struct device *dma_dev;
0074 u32 cfg;
0075 };
0076
0077
0078 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
0079 static void atc_issue_pending(struct dma_chan *chan);
0080
0081
0082
0083
0084 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
0085 size_t len)
0086 {
0087 unsigned int width;
0088
0089 if (!((src | dst | len) & 3))
0090 width = 2;
0091 else if (!((src | dst | len) & 1))
0092 width = 1;
0093 else
0094 width = 0;
0095
0096 return width;
0097 }
0098
0099 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
0100 {
0101 return list_first_entry(&atchan->active_list,
0102 struct at_desc, desc_node);
0103 }
0104
0105 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
0106 {
0107 return list_first_entry(&atchan->queue,
0108 struct at_desc, desc_node);
0109 }
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
0122 gfp_t gfp_flags)
0123 {
0124 struct at_desc *desc = NULL;
0125 struct at_dma *atdma = to_at_dma(chan->device);
0126 dma_addr_t phys;
0127
0128 desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
0129 if (desc) {
0130 INIT_LIST_HEAD(&desc->tx_list);
0131 dma_async_tx_descriptor_init(&desc->txd, chan);
0132
0133 desc->txd.flags = DMA_CTRL_ACK;
0134 desc->txd.tx_submit = atc_tx_submit;
0135 desc->txd.phys = phys;
0136 }
0137
0138 return desc;
0139 }
0140
0141
0142
0143
0144
0145 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
0146 {
0147 struct at_desc *desc, *_desc;
0148 struct at_desc *ret = NULL;
0149 unsigned long flags;
0150 unsigned int i = 0;
0151
0152 spin_lock_irqsave(&atchan->lock, flags);
0153 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
0154 i++;
0155 if (async_tx_test_ack(&desc->txd)) {
0156 list_del(&desc->desc_node);
0157 ret = desc;
0158 break;
0159 }
0160 dev_dbg(chan2dev(&atchan->chan_common),
0161 "desc %p not ACKed\n", desc);
0162 }
0163 spin_unlock_irqrestore(&atchan->lock, flags);
0164 dev_vdbg(chan2dev(&atchan->chan_common),
0165 "scanned %u descriptors on freelist\n", i);
0166
0167
0168 if (!ret)
0169 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
0170
0171 return ret;
0172 }
0173
0174
0175
0176
0177
0178
0179 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
0180 {
0181 if (desc) {
0182 struct at_desc *child;
0183 unsigned long flags;
0184
0185 spin_lock_irqsave(&atchan->lock, flags);
0186 list_for_each_entry(child, &desc->tx_list, desc_node)
0187 dev_vdbg(chan2dev(&atchan->chan_common),
0188 "moving child desc %p to freelist\n",
0189 child);
0190 list_splice_init(&desc->tx_list, &atchan->free_list);
0191 dev_vdbg(chan2dev(&atchan->chan_common),
0192 "moving desc %p to freelist\n", desc);
0193 list_add(&desc->desc_node, &atchan->free_list);
0194 spin_unlock_irqrestore(&atchan->lock, flags);
0195 }
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
0207 struct at_desc *desc)
0208 {
0209 if (!(*first)) {
0210 *first = desc;
0211 } else {
0212
0213 (*prev)->lli.dscr = desc->txd.phys;
0214
0215 list_add_tail(&desc->desc_node,
0216 &(*first)->tx_list);
0217 }
0218 *prev = desc;
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
0229 {
0230 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
0231
0232
0233 if (atc_chan_is_enabled(atchan)) {
0234 dev_err(chan2dev(&atchan->chan_common),
0235 "BUG: Attempted to start non-idle channel\n");
0236 dev_err(chan2dev(&atchan->chan_common),
0237 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
0238 channel_readl(atchan, SADDR),
0239 channel_readl(atchan, DADDR),
0240 channel_readl(atchan, CTRLA),
0241 channel_readl(atchan, CTRLB),
0242 channel_readl(atchan, DSCR));
0243
0244
0245 return;
0246 }
0247
0248 vdbg_dump_regs(atchan);
0249
0250 channel_writel(atchan, SADDR, 0);
0251 channel_writel(atchan, DADDR, 0);
0252 channel_writel(atchan, CTRLA, 0);
0253 channel_writel(atchan, CTRLB, 0);
0254 channel_writel(atchan, DSCR, first->txd.phys);
0255 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
0256 ATC_SPIP_BOUNDARY(first->boundary));
0257 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
0258 ATC_DPIP_BOUNDARY(first->boundary));
0259 dma_writel(atdma, CHER, atchan->mask);
0260
0261 vdbg_dump_regs(atchan);
0262 }
0263
0264
0265
0266
0267
0268
0269 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
0270 dma_cookie_t cookie)
0271 {
0272 struct at_desc *desc, *_desc;
0273
0274 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
0275 if (desc->txd.cookie == cookie)
0276 return desc;
0277 }
0278
0279 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
0280 if (desc->txd.cookie == cookie)
0281 return desc;
0282 }
0283
0284 return NULL;
0285 }
0286
0287
0288
0289
0290
0291
0292
0293
0294 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
0295 {
0296 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
0297 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
0298
0299
0300
0301
0302
0303
0304
0305 return current_len - (btsize << src_width);
0306 }
0307
0308
0309
0310
0311
0312
0313 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
0314 {
0315 struct at_dma_chan *atchan = to_at_dma_chan(chan);
0316 struct at_desc *desc_first = atc_first_active(atchan);
0317 struct at_desc *desc;
0318 int ret;
0319 u32 ctrla, dscr, trials;
0320
0321
0322
0323
0324
0325
0326 desc = atc_get_desc_by_cookie(atchan, cookie);
0327 if (desc == NULL)
0328 return -EINVAL;
0329 else if (desc != desc_first)
0330 return desc->total_len;
0331
0332
0333 ret = desc_first->total_len;
0334
0335 if (desc_first->lli.dscr) {
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 dscr = channel_readl(atchan, DSCR);
0387 rmb();
0388 ctrla = channel_readl(atchan, CTRLA);
0389 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
0390 u32 new_dscr;
0391
0392 rmb();
0393 new_dscr = channel_readl(atchan, DSCR);
0394
0395
0396
0397
0398
0399
0400
0401 if (likely(new_dscr == dscr))
0402 break;
0403
0404
0405
0406
0407
0408
0409
0410
0411 dscr = new_dscr;
0412 rmb();
0413 ctrla = channel_readl(atchan, CTRLA);
0414 }
0415 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
0416 return -ETIMEDOUT;
0417
0418
0419 if (desc_first->lli.dscr == dscr)
0420 return atc_calc_bytes_left(ret, ctrla);
0421
0422 ret -= desc_first->len;
0423 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
0424 if (desc->lli.dscr == dscr)
0425 break;
0426
0427 ret -= desc->len;
0428 }
0429
0430
0431
0432
0433
0434 ret = atc_calc_bytes_left(ret, ctrla);
0435 } else {
0436
0437 ctrla = channel_readl(atchan, CTRLA);
0438 ret = atc_calc_bytes_left(ret, ctrla);
0439 }
0440
0441 return ret;
0442 }
0443
0444
0445
0446
0447
0448
0449 static void
0450 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
0451 {
0452 struct dma_async_tx_descriptor *txd = &desc->txd;
0453 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
0454 unsigned long flags;
0455
0456 dev_vdbg(chan2dev(&atchan->chan_common),
0457 "descriptor %u complete\n", txd->cookie);
0458
0459 spin_lock_irqsave(&atchan->lock, flags);
0460
0461
0462 if (!atc_chan_is_cyclic(atchan))
0463 dma_cookie_complete(txd);
0464
0465
0466 if (desc->memset_buffer) {
0467 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
0468 desc->memset_paddr);
0469 desc->memset_buffer = false;
0470 }
0471
0472
0473 list_splice_init(&desc->tx_list, &atchan->free_list);
0474
0475 list_move(&desc->desc_node, &atchan->free_list);
0476
0477 spin_unlock_irqrestore(&atchan->lock, flags);
0478
0479 dma_descriptor_unmap(txd);
0480
0481
0482 if (!atc_chan_is_cyclic(atchan))
0483 dmaengine_desc_get_callback_invoke(txd, NULL);
0484
0485 dma_run_dependencies(txd);
0486 }
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 static void atc_complete_all(struct at_dma_chan *atchan)
0498 {
0499 struct at_desc *desc, *_desc;
0500 LIST_HEAD(list);
0501 unsigned long flags;
0502
0503 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
0504
0505 spin_lock_irqsave(&atchan->lock, flags);
0506
0507
0508
0509
0510
0511 if (!list_empty(&atchan->queue))
0512 atc_dostart(atchan, atc_first_queued(atchan));
0513
0514 list_splice_init(&atchan->active_list, &list);
0515
0516 list_splice_init(&atchan->queue, &atchan->active_list);
0517
0518 spin_unlock_irqrestore(&atchan->lock, flags);
0519
0520 list_for_each_entry_safe(desc, _desc, &list, desc_node)
0521 atc_chain_complete(atchan, desc);
0522 }
0523
0524
0525
0526
0527
0528 static void atc_advance_work(struct at_dma_chan *atchan)
0529 {
0530 unsigned long flags;
0531 int ret;
0532
0533 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
0534
0535 spin_lock_irqsave(&atchan->lock, flags);
0536 ret = atc_chan_is_enabled(atchan);
0537 spin_unlock_irqrestore(&atchan->lock, flags);
0538 if (ret)
0539 return;
0540
0541 if (list_empty(&atchan->active_list) ||
0542 list_is_singular(&atchan->active_list))
0543 return atc_complete_all(atchan);
0544
0545 atc_chain_complete(atchan, atc_first_active(atchan));
0546
0547
0548 spin_lock_irqsave(&atchan->lock, flags);
0549 atc_dostart(atchan, atc_first_active(atchan));
0550 spin_unlock_irqrestore(&atchan->lock, flags);
0551 }
0552
0553
0554
0555
0556
0557
0558 static void atc_handle_error(struct at_dma_chan *atchan)
0559 {
0560 struct at_desc *bad_desc;
0561 struct at_desc *child;
0562 unsigned long flags;
0563
0564 spin_lock_irqsave(&atchan->lock, flags);
0565
0566
0567
0568
0569
0570 bad_desc = atc_first_active(atchan);
0571 list_del_init(&bad_desc->desc_node);
0572
0573
0574
0575 list_splice_init(&atchan->queue, atchan->active_list.prev);
0576
0577
0578 if (!list_empty(&atchan->active_list))
0579 atc_dostart(atchan, atc_first_active(atchan));
0580
0581
0582
0583
0584
0585
0586
0587
0588 dev_crit(chan2dev(&atchan->chan_common),
0589 "Bad descriptor submitted for DMA!\n");
0590 dev_crit(chan2dev(&atchan->chan_common),
0591 " cookie: %d\n", bad_desc->txd.cookie);
0592 atc_dump_lli(atchan, &bad_desc->lli);
0593 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
0594 atc_dump_lli(atchan, &child->lli);
0595
0596 spin_unlock_irqrestore(&atchan->lock, flags);
0597
0598
0599 atc_chain_complete(atchan, bad_desc);
0600 }
0601
0602
0603
0604
0605
0606 static void atc_handle_cyclic(struct at_dma_chan *atchan)
0607 {
0608 struct at_desc *first = atc_first_active(atchan);
0609 struct dma_async_tx_descriptor *txd = &first->txd;
0610
0611 dev_vdbg(chan2dev(&atchan->chan_common),
0612 "new cyclic period llp 0x%08x\n",
0613 channel_readl(atchan, DSCR));
0614
0615 dmaengine_desc_get_callback_invoke(txd, NULL);
0616 }
0617
0618
0619
0620 static void atc_tasklet(struct tasklet_struct *t)
0621 {
0622 struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
0623
0624 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
0625 return atc_handle_error(atchan);
0626
0627 if (atc_chan_is_cyclic(atchan))
0628 return atc_handle_cyclic(atchan);
0629
0630 atc_advance_work(atchan);
0631 }
0632
0633 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
0634 {
0635 struct at_dma *atdma = (struct at_dma *)dev_id;
0636 struct at_dma_chan *atchan;
0637 int i;
0638 u32 status, pending, imr;
0639 int ret = IRQ_NONE;
0640
0641 do {
0642 imr = dma_readl(atdma, EBCIMR);
0643 status = dma_readl(atdma, EBCISR);
0644 pending = status & imr;
0645
0646 if (!pending)
0647 break;
0648
0649 dev_vdbg(atdma->dma_common.dev,
0650 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
0651 status, imr, pending);
0652
0653 for (i = 0; i < atdma->dma_common.chancnt; i++) {
0654 atchan = &atdma->chan[i];
0655 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
0656 if (pending & AT_DMA_ERR(i)) {
0657
0658 dma_writel(atdma, CHDR,
0659 AT_DMA_RES(i) | atchan->mask);
0660
0661 set_bit(ATC_IS_ERROR, &atchan->status);
0662 }
0663 tasklet_schedule(&atchan->tasklet);
0664 ret = IRQ_HANDLED;
0665 }
0666 }
0667
0668 } while (pending);
0669
0670 return ret;
0671 }
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
0685 {
0686 struct at_desc *desc = txd_to_at_desc(tx);
0687 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
0688 dma_cookie_t cookie;
0689 unsigned long flags;
0690
0691 spin_lock_irqsave(&atchan->lock, flags);
0692 cookie = dma_cookie_assign(tx);
0693
0694 if (list_empty(&atchan->active_list)) {
0695 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
0696 desc->txd.cookie);
0697 atc_dostart(atchan, desc);
0698 list_add_tail(&desc->desc_node, &atchan->active_list);
0699 } else {
0700 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
0701 desc->txd.cookie);
0702 list_add_tail(&desc->desc_node, &atchan->queue);
0703 }
0704
0705 spin_unlock_irqrestore(&atchan->lock, flags);
0706
0707 return cookie;
0708 }
0709
0710
0711
0712
0713
0714
0715
0716 static struct dma_async_tx_descriptor *
0717 atc_prep_dma_interleaved(struct dma_chan *chan,
0718 struct dma_interleaved_template *xt,
0719 unsigned long flags)
0720 {
0721 struct at_dma_chan *atchan = to_at_dma_chan(chan);
0722 struct data_chunk *first;
0723 struct at_desc *desc = NULL;
0724 size_t xfer_count;
0725 unsigned int dwidth;
0726 u32 ctrla;
0727 u32 ctrlb;
0728 size_t len = 0;
0729 int i;
0730
0731 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
0732 return NULL;
0733
0734 first = xt->sgl;
0735
0736 dev_info(chan2dev(chan),
0737 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
0738 __func__, &xt->src_start, &xt->dst_start, xt->numf,
0739 xt->frame_size, flags);
0740
0741
0742
0743
0744
0745
0746
0747 for (i = 0; i < xt->frame_size; i++) {
0748 struct data_chunk *chunk = xt->sgl + i;
0749
0750 if ((chunk->size != xt->sgl->size) ||
0751 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
0752 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
0753 dev_err(chan2dev(chan),
0754 "%s: the controller can transfer only identical chunks\n",
0755 __func__);
0756 return NULL;
0757 }
0758
0759 len += chunk->size;
0760 }
0761
0762 dwidth = atc_get_xfer_width(xt->src_start,
0763 xt->dst_start, len);
0764
0765 xfer_count = len >> dwidth;
0766 if (xfer_count > ATC_BTSIZE_MAX) {
0767 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
0768 return NULL;
0769 }
0770
0771 ctrla = ATC_SRC_WIDTH(dwidth) |
0772 ATC_DST_WIDTH(dwidth);
0773
0774 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
0775 | ATC_SRC_ADDR_MODE_INCR
0776 | ATC_DST_ADDR_MODE_INCR
0777 | ATC_SRC_PIP
0778 | ATC_DST_PIP
0779 | ATC_FC_MEM2MEM;
0780
0781
0782 desc = atc_desc_get(atchan);
0783 if (!desc) {
0784 dev_err(chan2dev(chan),
0785 "%s: couldn't allocate our descriptor\n", __func__);
0786 return NULL;
0787 }
0788
0789 desc->lli.saddr = xt->src_start;
0790 desc->lli.daddr = xt->dst_start;
0791 desc->lli.ctrla = ctrla | xfer_count;
0792 desc->lli.ctrlb = ctrlb;
0793
0794 desc->boundary = first->size >> dwidth;
0795 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
0796 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
0797
0798 desc->txd.cookie = -EBUSY;
0799 desc->total_len = desc->len = len;
0800
0801
0802 set_desc_eol(desc);
0803
0804 desc->txd.flags = flags;
0805
0806 return &desc->txd;
0807 }
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817 static struct dma_async_tx_descriptor *
0818 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
0819 size_t len, unsigned long flags)
0820 {
0821 struct at_dma_chan *atchan = to_at_dma_chan(chan);
0822 struct at_desc *desc = NULL;
0823 struct at_desc *first = NULL;
0824 struct at_desc *prev = NULL;
0825 size_t xfer_count;
0826 size_t offset;
0827 unsigned int src_width;
0828 unsigned int dst_width;
0829 u32 ctrla;
0830 u32 ctrlb;
0831
0832 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
0833 &dest, &src, len, flags);
0834
0835 if (unlikely(!len)) {
0836 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
0837 return NULL;
0838 }
0839
0840 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
0841 | ATC_SRC_ADDR_MODE_INCR
0842 | ATC_DST_ADDR_MODE_INCR
0843 | ATC_FC_MEM2MEM;
0844
0845
0846
0847
0848
0849 src_width = dst_width = atc_get_xfer_width(src, dest, len);
0850
0851 ctrla = ATC_SRC_WIDTH(src_width) |
0852 ATC_DST_WIDTH(dst_width);
0853
0854 for (offset = 0; offset < len; offset += xfer_count << src_width) {
0855 xfer_count = min_t(size_t, (len - offset) >> src_width,
0856 ATC_BTSIZE_MAX);
0857
0858 desc = atc_desc_get(atchan);
0859 if (!desc)
0860 goto err_desc_get;
0861
0862 desc->lli.saddr = src + offset;
0863 desc->lli.daddr = dest + offset;
0864 desc->lli.ctrla = ctrla | xfer_count;
0865 desc->lli.ctrlb = ctrlb;
0866
0867 desc->txd.cookie = 0;
0868 desc->len = xfer_count << src_width;
0869
0870 atc_desc_chain(&first, &prev, desc);
0871 }
0872
0873
0874 first->txd.cookie = -EBUSY;
0875 first->total_len = len;
0876
0877
0878 set_desc_eol(desc);
0879
0880 first->txd.flags = flags;
0881
0882 return &first->txd;
0883
0884 err_desc_get:
0885 atc_desc_put(atchan, first);
0886 return NULL;
0887 }
0888
0889 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
0890 dma_addr_t psrc,
0891 dma_addr_t pdst,
0892 size_t len)
0893 {
0894 struct at_dma_chan *atchan = to_at_dma_chan(chan);
0895 struct at_desc *desc;
0896 size_t xfer_count;
0897
0898 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
0899 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
0900 ATC_SRC_ADDR_MODE_FIXED |
0901 ATC_DST_ADDR_MODE_INCR |
0902 ATC_FC_MEM2MEM;
0903
0904 xfer_count = len >> 2;
0905 if (xfer_count > ATC_BTSIZE_MAX) {
0906 dev_err(chan2dev(chan), "%s: buffer is too big\n",
0907 __func__);
0908 return NULL;
0909 }
0910
0911 desc = atc_desc_get(atchan);
0912 if (!desc) {
0913 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
0914 __func__);
0915 return NULL;
0916 }
0917
0918 desc->lli.saddr = psrc;
0919 desc->lli.daddr = pdst;
0920 desc->lli.ctrla = ctrla | xfer_count;
0921 desc->lli.ctrlb = ctrlb;
0922
0923 desc->txd.cookie = 0;
0924 desc->len = len;
0925
0926 return desc;
0927 }
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937 static struct dma_async_tx_descriptor *
0938 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
0939 size_t len, unsigned long flags)
0940 {
0941 struct at_dma *atdma = to_at_dma(chan->device);
0942 struct at_desc *desc;
0943 void __iomem *vaddr;
0944 dma_addr_t paddr;
0945 char fill_pattern;
0946
0947 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
0948 &dest, value, len, flags);
0949
0950 if (unlikely(!len)) {
0951 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
0952 return NULL;
0953 }
0954
0955 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
0956 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
0957 __func__);
0958 return NULL;
0959 }
0960
0961 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
0962 if (!vaddr) {
0963 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
0964 __func__);
0965 return NULL;
0966 }
0967
0968
0969 fill_pattern = (char)value;
0970
0971 *(u32*)vaddr = (fill_pattern << 24) |
0972 (fill_pattern << 16) |
0973 (fill_pattern << 8) |
0974 fill_pattern;
0975
0976 desc = atc_create_memset_desc(chan, paddr, dest, len);
0977 if (!desc) {
0978 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
0979 __func__);
0980 goto err_free_buffer;
0981 }
0982
0983 desc->memset_paddr = paddr;
0984 desc->memset_vaddr = vaddr;
0985 desc->memset_buffer = true;
0986
0987 desc->txd.cookie = -EBUSY;
0988 desc->total_len = len;
0989
0990
0991 set_desc_eol(desc);
0992
0993 desc->txd.flags = flags;
0994
0995 return &desc->txd;
0996
0997 err_free_buffer:
0998 dma_pool_free(atdma->memset_pool, vaddr, paddr);
0999 return NULL;
1000 }
1001
1002 static struct dma_async_tx_descriptor *
1003 atc_prep_dma_memset_sg(struct dma_chan *chan,
1004 struct scatterlist *sgl,
1005 unsigned int sg_len, int value,
1006 unsigned long flags)
1007 {
1008 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1009 struct at_dma *atdma = to_at_dma(chan->device);
1010 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
1011 struct scatterlist *sg;
1012 void __iomem *vaddr;
1013 dma_addr_t paddr;
1014 size_t total_len = 0;
1015 int i;
1016
1017 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
1018 value, sg_len, flags);
1019
1020 if (unlikely(!sgl || !sg_len)) {
1021 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1022 __func__);
1023 return NULL;
1024 }
1025
1026 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
1027 if (!vaddr) {
1028 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1029 __func__);
1030 return NULL;
1031 }
1032 *(u32*)vaddr = value;
1033
1034 for_each_sg(sgl, sg, sg_len, i) {
1035 dma_addr_t dest = sg_dma_address(sg);
1036 size_t len = sg_dma_len(sg);
1037
1038 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1039 __func__, &dest, len);
1040
1041 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1042 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1043 __func__);
1044 goto err_put_desc;
1045 }
1046
1047 desc = atc_create_memset_desc(chan, paddr, dest, len);
1048 if (!desc)
1049 goto err_put_desc;
1050
1051 atc_desc_chain(&first, &prev, desc);
1052
1053 total_len += len;
1054 }
1055
1056
1057
1058
1059
1060 desc->memset_paddr = paddr;
1061 desc->memset_vaddr = vaddr;
1062 desc->memset_buffer = true;
1063
1064 first->txd.cookie = -EBUSY;
1065 first->total_len = total_len;
1066
1067
1068 set_desc_eol(desc);
1069
1070 first->txd.flags = flags;
1071
1072 return &first->txd;
1073
1074 err_put_desc:
1075 atc_desc_put(atchan, first);
1076 return NULL;
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 static struct dma_async_tx_descriptor *
1089 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1090 unsigned int sg_len, enum dma_transfer_direction direction,
1091 unsigned long flags, void *context)
1092 {
1093 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1094 struct at_dma_slave *atslave = chan->private;
1095 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1096 struct at_desc *first = NULL;
1097 struct at_desc *prev = NULL;
1098 u32 ctrla;
1099 u32 ctrlb;
1100 dma_addr_t reg;
1101 unsigned int reg_width;
1102 unsigned int mem_width;
1103 unsigned int i;
1104 struct scatterlist *sg;
1105 size_t total_len = 0;
1106
1107 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1108 sg_len,
1109 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1110 flags);
1111
1112 if (unlikely(!atslave || !sg_len)) {
1113 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1114 return NULL;
1115 }
1116
1117 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1118 | ATC_DCSIZE(sconfig->dst_maxburst);
1119 ctrlb = ATC_IEN;
1120
1121 switch (direction) {
1122 case DMA_MEM_TO_DEV:
1123 reg_width = convert_buswidth(sconfig->dst_addr_width);
1124 ctrla |= ATC_DST_WIDTH(reg_width);
1125 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1126 | ATC_SRC_ADDR_MODE_INCR
1127 | ATC_FC_MEM2PER
1128 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1129 reg = sconfig->dst_addr;
1130 for_each_sg(sgl, sg, sg_len, i) {
1131 struct at_desc *desc;
1132 u32 len;
1133 u32 mem;
1134
1135 desc = atc_desc_get(atchan);
1136 if (!desc)
1137 goto err_desc_get;
1138
1139 mem = sg_dma_address(sg);
1140 len = sg_dma_len(sg);
1141 if (unlikely(!len)) {
1142 dev_dbg(chan2dev(chan),
1143 "prep_slave_sg: sg(%d) data length is zero\n", i);
1144 goto err;
1145 }
1146 mem_width = 2;
1147 if (unlikely(mem & 3 || len & 3))
1148 mem_width = 0;
1149
1150 desc->lli.saddr = mem;
1151 desc->lli.daddr = reg;
1152 desc->lli.ctrla = ctrla
1153 | ATC_SRC_WIDTH(mem_width)
1154 | len >> mem_width;
1155 desc->lli.ctrlb = ctrlb;
1156 desc->len = len;
1157
1158 atc_desc_chain(&first, &prev, desc);
1159 total_len += len;
1160 }
1161 break;
1162 case DMA_DEV_TO_MEM:
1163 reg_width = convert_buswidth(sconfig->src_addr_width);
1164 ctrla |= ATC_SRC_WIDTH(reg_width);
1165 ctrlb |= ATC_DST_ADDR_MODE_INCR
1166 | ATC_SRC_ADDR_MODE_FIXED
1167 | ATC_FC_PER2MEM
1168 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1169
1170 reg = sconfig->src_addr;
1171 for_each_sg(sgl, sg, sg_len, i) {
1172 struct at_desc *desc;
1173 u32 len;
1174 u32 mem;
1175
1176 desc = atc_desc_get(atchan);
1177 if (!desc)
1178 goto err_desc_get;
1179
1180 mem = sg_dma_address(sg);
1181 len = sg_dma_len(sg);
1182 if (unlikely(!len)) {
1183 dev_dbg(chan2dev(chan),
1184 "prep_slave_sg: sg(%d) data length is zero\n", i);
1185 goto err;
1186 }
1187 mem_width = 2;
1188 if (unlikely(mem & 3 || len & 3))
1189 mem_width = 0;
1190
1191 desc->lli.saddr = reg;
1192 desc->lli.daddr = mem;
1193 desc->lli.ctrla = ctrla
1194 | ATC_DST_WIDTH(mem_width)
1195 | len >> reg_width;
1196 desc->lli.ctrlb = ctrlb;
1197 desc->len = len;
1198
1199 atc_desc_chain(&first, &prev, desc);
1200 total_len += len;
1201 }
1202 break;
1203 default:
1204 return NULL;
1205 }
1206
1207
1208 set_desc_eol(prev);
1209
1210
1211 first->txd.cookie = -EBUSY;
1212 first->total_len = total_len;
1213
1214
1215 first->txd.flags = flags;
1216
1217 return &first->txd;
1218
1219 err_desc_get:
1220 dev_err(chan2dev(chan), "not enough descriptors available\n");
1221 err:
1222 atc_desc_put(atchan, first);
1223 return NULL;
1224 }
1225
1226
1227
1228
1229
1230 static int
1231 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1232 size_t period_len)
1233 {
1234 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1235 goto err_out;
1236 if (unlikely(period_len & ((1 << reg_width) - 1)))
1237 goto err_out;
1238 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1239 goto err_out;
1240
1241 return 0;
1242
1243 err_out:
1244 return -EINVAL;
1245 }
1246
1247
1248
1249
1250 static int
1251 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1252 unsigned int period_index, dma_addr_t buf_addr,
1253 unsigned int reg_width, size_t period_len,
1254 enum dma_transfer_direction direction)
1255 {
1256 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1257 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1258 u32 ctrla;
1259
1260
1261 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1262 | ATC_DCSIZE(sconfig->dst_maxburst)
1263 | ATC_DST_WIDTH(reg_width)
1264 | ATC_SRC_WIDTH(reg_width)
1265 | period_len >> reg_width;
1266
1267 switch (direction) {
1268 case DMA_MEM_TO_DEV:
1269 desc->lli.saddr = buf_addr + (period_len * period_index);
1270 desc->lli.daddr = sconfig->dst_addr;
1271 desc->lli.ctrla = ctrla;
1272 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1273 | ATC_SRC_ADDR_MODE_INCR
1274 | ATC_FC_MEM2PER
1275 | ATC_SIF(atchan->mem_if)
1276 | ATC_DIF(atchan->per_if);
1277 desc->len = period_len;
1278 break;
1279
1280 case DMA_DEV_TO_MEM:
1281 desc->lli.saddr = sconfig->src_addr;
1282 desc->lli.daddr = buf_addr + (period_len * period_index);
1283 desc->lli.ctrla = ctrla;
1284 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1285 | ATC_SRC_ADDR_MODE_FIXED
1286 | ATC_FC_PER2MEM
1287 | ATC_SIF(atchan->per_if)
1288 | ATC_DIF(atchan->mem_if);
1289 desc->len = period_len;
1290 break;
1291
1292 default:
1293 return -EINVAL;
1294 }
1295
1296 return 0;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 static struct dma_async_tx_descriptor *
1309 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1310 size_t period_len, enum dma_transfer_direction direction,
1311 unsigned long flags)
1312 {
1313 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1314 struct at_dma_slave *atslave = chan->private;
1315 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1316 struct at_desc *first = NULL;
1317 struct at_desc *prev = NULL;
1318 unsigned long was_cyclic;
1319 unsigned int reg_width;
1320 unsigned int periods = buf_len / period_len;
1321 unsigned int i;
1322
1323 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1324 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1325 &buf_addr,
1326 periods, buf_len, period_len);
1327
1328 if (unlikely(!atslave || !buf_len || !period_len)) {
1329 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1330 return NULL;
1331 }
1332
1333 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1334 if (was_cyclic) {
1335 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1336 return NULL;
1337 }
1338
1339 if (unlikely(!is_slave_direction(direction)))
1340 goto err_out;
1341
1342 if (direction == DMA_MEM_TO_DEV)
1343 reg_width = convert_buswidth(sconfig->dst_addr_width);
1344 else
1345 reg_width = convert_buswidth(sconfig->src_addr_width);
1346
1347
1348 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1349 goto err_out;
1350
1351
1352 for (i = 0; i < periods; i++) {
1353 struct at_desc *desc;
1354
1355 desc = atc_desc_get(atchan);
1356 if (!desc)
1357 goto err_desc_get;
1358
1359 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1360 reg_width, period_len, direction))
1361 goto err_desc_get;
1362
1363 atc_desc_chain(&first, &prev, desc);
1364 }
1365
1366
1367 prev->lli.dscr = first->txd.phys;
1368
1369
1370 first->txd.cookie = -EBUSY;
1371 first->total_len = buf_len;
1372
1373 return &first->txd;
1374
1375 err_desc_get:
1376 dev_err(chan2dev(chan), "not enough descriptors available\n");
1377 atc_desc_put(atchan, first);
1378 err_out:
1379 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1380 return NULL;
1381 }
1382
1383 static int atc_config(struct dma_chan *chan,
1384 struct dma_slave_config *sconfig)
1385 {
1386 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1387
1388 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1389
1390
1391 if (!chan->private)
1392 return -EINVAL;
1393
1394 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1395
1396 convert_burst(&atchan->dma_sconfig.src_maxburst);
1397 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1398
1399 return 0;
1400 }
1401
1402 static int atc_pause(struct dma_chan *chan)
1403 {
1404 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1405 struct at_dma *atdma = to_at_dma(chan->device);
1406 int chan_id = atchan->chan_common.chan_id;
1407 unsigned long flags;
1408
1409 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1410
1411 spin_lock_irqsave(&atchan->lock, flags);
1412
1413 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1414 set_bit(ATC_IS_PAUSED, &atchan->status);
1415
1416 spin_unlock_irqrestore(&atchan->lock, flags);
1417
1418 return 0;
1419 }
1420
1421 static int atc_resume(struct dma_chan *chan)
1422 {
1423 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1424 struct at_dma *atdma = to_at_dma(chan->device);
1425 int chan_id = atchan->chan_common.chan_id;
1426 unsigned long flags;
1427
1428 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1429
1430 if (!atc_chan_is_paused(atchan))
1431 return 0;
1432
1433 spin_lock_irqsave(&atchan->lock, flags);
1434
1435 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1436 clear_bit(ATC_IS_PAUSED, &atchan->status);
1437
1438 spin_unlock_irqrestore(&atchan->lock, flags);
1439
1440 return 0;
1441 }
1442
1443 static int atc_terminate_all(struct dma_chan *chan)
1444 {
1445 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1446 struct at_dma *atdma = to_at_dma(chan->device);
1447 int chan_id = atchan->chan_common.chan_id;
1448 struct at_desc *desc, *_desc;
1449 unsigned long flags;
1450
1451 LIST_HEAD(list);
1452
1453 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1454
1455
1456
1457
1458
1459
1460
1461 spin_lock_irqsave(&atchan->lock, flags);
1462
1463
1464 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1465
1466
1467 while (dma_readl(atdma, CHSR) & atchan->mask)
1468 cpu_relax();
1469
1470
1471 list_splice_init(&atchan->queue, &list);
1472 list_splice_init(&atchan->active_list, &list);
1473
1474 spin_unlock_irqrestore(&atchan->lock, flags);
1475
1476
1477 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1478 atc_chain_complete(atchan, desc);
1479
1480 clear_bit(ATC_IS_PAUSED, &atchan->status);
1481
1482 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1483
1484 return 0;
1485 }
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497 static enum dma_status
1498 atc_tx_status(struct dma_chan *chan,
1499 dma_cookie_t cookie,
1500 struct dma_tx_state *txstate)
1501 {
1502 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1503 unsigned long flags;
1504 enum dma_status ret;
1505 int bytes = 0;
1506
1507 ret = dma_cookie_status(chan, cookie, txstate);
1508 if (ret == DMA_COMPLETE)
1509 return ret;
1510
1511
1512
1513
1514 if (!txstate)
1515 return DMA_ERROR;
1516
1517 spin_lock_irqsave(&atchan->lock, flags);
1518
1519
1520 bytes = atc_get_bytes_left(chan, cookie);
1521
1522 spin_unlock_irqrestore(&atchan->lock, flags);
1523
1524 if (unlikely(bytes < 0)) {
1525 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1526 return DMA_ERROR;
1527 } else {
1528 dma_set_residue(txstate, bytes);
1529 }
1530
1531 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1532 ret, cookie, bytes);
1533
1534 return ret;
1535 }
1536
1537
1538
1539
1540
1541 static void atc_issue_pending(struct dma_chan *chan)
1542 {
1543 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1544
1545 dev_vdbg(chan2dev(chan), "issue_pending\n");
1546
1547
1548 if (atc_chan_is_cyclic(atchan))
1549 return;
1550
1551 atc_advance_work(atchan);
1552 }
1553
1554
1555
1556
1557
1558
1559
1560 static int atc_alloc_chan_resources(struct dma_chan *chan)
1561 {
1562 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1563 struct at_dma *atdma = to_at_dma(chan->device);
1564 struct at_desc *desc;
1565 struct at_dma_slave *atslave;
1566 int i;
1567 u32 cfg;
1568
1569 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1570
1571
1572 if (atc_chan_is_enabled(atchan)) {
1573 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1574 return -EIO;
1575 }
1576
1577 if (!list_empty(&atchan->free_list)) {
1578 dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
1579 return -EIO;
1580 }
1581
1582 cfg = ATC_DEFAULT_CFG;
1583
1584 atslave = chan->private;
1585 if (atslave) {
1586
1587
1588
1589
1590 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1591
1592
1593 if (atslave->cfg)
1594 cfg = atslave->cfg;
1595 }
1596
1597
1598 for (i = 0; i < init_nr_desc_per_channel; i++) {
1599 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1600 if (!desc) {
1601 dev_err(atdma->dma_common.dev,
1602 "Only %d initial descriptors\n", i);
1603 break;
1604 }
1605 list_add_tail(&desc->desc_node, &atchan->free_list);
1606 }
1607
1608 dma_cookie_init(chan);
1609
1610
1611 channel_writel(atchan, CFG, cfg);
1612
1613 dev_dbg(chan2dev(chan),
1614 "alloc_chan_resources: allocated %d descriptors\n", i);
1615
1616 return i;
1617 }
1618
1619
1620
1621
1622
1623 static void atc_free_chan_resources(struct dma_chan *chan)
1624 {
1625 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1626 struct at_dma *atdma = to_at_dma(chan->device);
1627 struct at_desc *desc, *_desc;
1628 LIST_HEAD(list);
1629
1630
1631 BUG_ON(!list_empty(&atchan->active_list));
1632 BUG_ON(!list_empty(&atchan->queue));
1633 BUG_ON(atc_chan_is_enabled(atchan));
1634
1635 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1636 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1637 list_del(&desc->desc_node);
1638
1639 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1640 }
1641 list_splice_init(&atchan->free_list, &list);
1642 atchan->status = 0;
1643
1644
1645
1646
1647 kfree(chan->private);
1648 chan->private = NULL;
1649
1650 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1651 }
1652
1653 #ifdef CONFIG_OF
1654 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1655 {
1656 struct at_dma_slave *atslave = slave;
1657
1658 if (atslave->dma_dev == chan->device->dev) {
1659 chan->private = atslave;
1660 return true;
1661 } else {
1662 return false;
1663 }
1664 }
1665
1666 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1667 struct of_dma *of_dma)
1668 {
1669 struct dma_chan *chan;
1670 struct at_dma_chan *atchan;
1671 struct at_dma_slave *atslave;
1672 dma_cap_mask_t mask;
1673 unsigned int per_id;
1674 struct platform_device *dmac_pdev;
1675
1676 if (dma_spec->args_count != 2)
1677 return NULL;
1678
1679 dmac_pdev = of_find_device_by_node(dma_spec->np);
1680 if (!dmac_pdev)
1681 return NULL;
1682
1683 dma_cap_zero(mask);
1684 dma_cap_set(DMA_SLAVE, mask);
1685
1686 atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1687 if (!atslave) {
1688 put_device(&dmac_pdev->dev);
1689 return NULL;
1690 }
1691
1692 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1693
1694
1695
1696
1697 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1698 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1699 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1700
1701
1702
1703
1704
1705 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1706 case AT91_DMA_CFG_FIFOCFG_ALAP:
1707 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1708 break;
1709 case AT91_DMA_CFG_FIFOCFG_ASAP:
1710 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1711 break;
1712 case AT91_DMA_CFG_FIFOCFG_HALF:
1713 default:
1714 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1715 }
1716 atslave->dma_dev = &dmac_pdev->dev;
1717
1718 chan = dma_request_channel(mask, at_dma_filter, atslave);
1719 if (!chan) {
1720 put_device(&dmac_pdev->dev);
1721 kfree(atslave);
1722 return NULL;
1723 }
1724
1725 atchan = to_at_dma_chan(chan);
1726 atchan->per_if = dma_spec->args[0] & 0xff;
1727 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1728
1729 return chan;
1730 }
1731 #else
1732 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1733 struct of_dma *of_dma)
1734 {
1735 return NULL;
1736 }
1737 #endif
1738
1739
1740
1741
1742 static struct at_dma_platform_data at91sam9rl_config = {
1743 .nr_channels = 2,
1744 };
1745 static struct at_dma_platform_data at91sam9g45_config = {
1746 .nr_channels = 8,
1747 };
1748
1749 #if defined(CONFIG_OF)
1750 static const struct of_device_id atmel_dma_dt_ids[] = {
1751 {
1752 .compatible = "atmel,at91sam9rl-dma",
1753 .data = &at91sam9rl_config,
1754 }, {
1755 .compatible = "atmel,at91sam9g45-dma",
1756 .data = &at91sam9g45_config,
1757 }, {
1758
1759 }
1760 };
1761
1762 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1763 #endif
1764
1765 static const struct platform_device_id atdma_devtypes[] = {
1766 {
1767 .name = "at91sam9rl_dma",
1768 .driver_data = (unsigned long) &at91sam9rl_config,
1769 }, {
1770 .name = "at91sam9g45_dma",
1771 .driver_data = (unsigned long) &at91sam9g45_config,
1772 }, {
1773
1774 }
1775 };
1776
1777 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1778 struct platform_device *pdev)
1779 {
1780 if (pdev->dev.of_node) {
1781 const struct of_device_id *match;
1782 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1783 if (match == NULL)
1784 return NULL;
1785 return match->data;
1786 }
1787 return (struct at_dma_platform_data *)
1788 platform_get_device_id(pdev)->driver_data;
1789 }
1790
1791
1792
1793
1794
1795 static void at_dma_off(struct at_dma *atdma)
1796 {
1797 dma_writel(atdma, EN, 0);
1798
1799
1800 dma_writel(atdma, EBCIDR, -1L);
1801
1802
1803 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1804 cpu_relax();
1805 }
1806
1807 static int __init at_dma_probe(struct platform_device *pdev)
1808 {
1809 struct resource *io;
1810 struct at_dma *atdma;
1811 size_t size;
1812 int irq;
1813 int err;
1814 int i;
1815 const struct at_dma_platform_data *plat_dat;
1816
1817
1818 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1819 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1820 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1821 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1822 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1823 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1824 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1825
1826
1827 plat_dat = at_dma_get_driver_data(pdev);
1828 if (!plat_dat)
1829 return -ENODEV;
1830
1831 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1832 if (!io)
1833 return -EINVAL;
1834
1835 irq = platform_get_irq(pdev, 0);
1836 if (irq < 0)
1837 return irq;
1838
1839 size = sizeof(struct at_dma);
1840 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1841 atdma = kzalloc(size, GFP_KERNEL);
1842 if (!atdma)
1843 return -ENOMEM;
1844
1845
1846 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1847 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1848
1849 size = resource_size(io);
1850 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1851 err = -EBUSY;
1852 goto err_kfree;
1853 }
1854
1855 atdma->regs = ioremap(io->start, size);
1856 if (!atdma->regs) {
1857 err = -ENOMEM;
1858 goto err_release_r;
1859 }
1860
1861 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1862 if (IS_ERR(atdma->clk)) {
1863 err = PTR_ERR(atdma->clk);
1864 goto err_clk;
1865 }
1866 err = clk_prepare_enable(atdma->clk);
1867 if (err)
1868 goto err_clk_prepare;
1869
1870
1871 at_dma_off(atdma);
1872
1873 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1874 if (err)
1875 goto err_irq;
1876
1877 platform_set_drvdata(pdev, atdma);
1878
1879
1880 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1881 &pdev->dev, sizeof(struct at_desc),
1882 4 , 0);
1883 if (!atdma->dma_desc_pool) {
1884 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1885 err = -ENOMEM;
1886 goto err_desc_pool_create;
1887 }
1888
1889
1890 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1891 &pdev->dev, sizeof(int), 4, 0);
1892 if (!atdma->memset_pool) {
1893 dev_err(&pdev->dev, "No memory for memset dma pool\n");
1894 err = -ENOMEM;
1895 goto err_memset_pool_create;
1896 }
1897
1898
1899 while (dma_readl(atdma, EBCISR))
1900 cpu_relax();
1901
1902
1903 INIT_LIST_HEAD(&atdma->dma_common.channels);
1904 for (i = 0; i < plat_dat->nr_channels; i++) {
1905 struct at_dma_chan *atchan = &atdma->chan[i];
1906
1907 atchan->mem_if = AT_DMA_MEM_IF;
1908 atchan->per_if = AT_DMA_PER_IF;
1909 atchan->chan_common.device = &atdma->dma_common;
1910 dma_cookie_init(&atchan->chan_common);
1911 list_add_tail(&atchan->chan_common.device_node,
1912 &atdma->dma_common.channels);
1913
1914 atchan->ch_regs = atdma->regs + ch_regs(i);
1915 spin_lock_init(&atchan->lock);
1916 atchan->mask = 1 << i;
1917
1918 INIT_LIST_HEAD(&atchan->active_list);
1919 INIT_LIST_HEAD(&atchan->queue);
1920 INIT_LIST_HEAD(&atchan->free_list);
1921
1922 tasklet_setup(&atchan->tasklet, atc_tasklet);
1923 atc_enable_chan_irq(atdma, i);
1924 }
1925
1926
1927 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1928 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1929 atdma->dma_common.device_tx_status = atc_tx_status;
1930 atdma->dma_common.device_issue_pending = atc_issue_pending;
1931 atdma->dma_common.dev = &pdev->dev;
1932
1933
1934 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1935 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1936
1937 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1938 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1939
1940 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1941 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1942 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1943 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1944 }
1945
1946 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1947 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1948
1949 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1950 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1951 atdma->dma_common.device_config = atc_config;
1952 atdma->dma_common.device_pause = atc_pause;
1953 atdma->dma_common.device_resume = atc_resume;
1954 atdma->dma_common.device_terminate_all = atc_terminate_all;
1955 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1956 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1957 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1958 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1959 }
1960
1961 dma_writel(atdma, EN, AT_DMA_ENABLE);
1962
1963 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1964 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1965 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1966 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1967 plat_dat->nr_channels);
1968
1969 dma_async_device_register(&atdma->dma_common);
1970
1971
1972
1973
1974
1975
1976 if (pdev->dev.of_node) {
1977 err = of_dma_controller_register(pdev->dev.of_node,
1978 at_dma_xlate, atdma);
1979 if (err) {
1980 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1981 goto err_of_dma_controller_register;
1982 }
1983 }
1984
1985 return 0;
1986
1987 err_of_dma_controller_register:
1988 dma_async_device_unregister(&atdma->dma_common);
1989 dma_pool_destroy(atdma->memset_pool);
1990 err_memset_pool_create:
1991 dma_pool_destroy(atdma->dma_desc_pool);
1992 err_desc_pool_create:
1993 free_irq(platform_get_irq(pdev, 0), atdma);
1994 err_irq:
1995 clk_disable_unprepare(atdma->clk);
1996 err_clk_prepare:
1997 clk_put(atdma->clk);
1998 err_clk:
1999 iounmap(atdma->regs);
2000 atdma->regs = NULL;
2001 err_release_r:
2002 release_mem_region(io->start, size);
2003 err_kfree:
2004 kfree(atdma);
2005 return err;
2006 }
2007
2008 static int at_dma_remove(struct platform_device *pdev)
2009 {
2010 struct at_dma *atdma = platform_get_drvdata(pdev);
2011 struct dma_chan *chan, *_chan;
2012 struct resource *io;
2013
2014 at_dma_off(atdma);
2015 if (pdev->dev.of_node)
2016 of_dma_controller_free(pdev->dev.of_node);
2017 dma_async_device_unregister(&atdma->dma_common);
2018
2019 dma_pool_destroy(atdma->memset_pool);
2020 dma_pool_destroy(atdma->dma_desc_pool);
2021 free_irq(platform_get_irq(pdev, 0), atdma);
2022
2023 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2024 device_node) {
2025 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2026
2027
2028 atc_disable_chan_irq(atdma, chan->chan_id);
2029
2030 tasklet_kill(&atchan->tasklet);
2031 list_del(&chan->device_node);
2032 }
2033
2034 clk_disable_unprepare(atdma->clk);
2035 clk_put(atdma->clk);
2036
2037 iounmap(atdma->regs);
2038 atdma->regs = NULL;
2039
2040 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2041 release_mem_region(io->start, resource_size(io));
2042
2043 kfree(atdma);
2044
2045 return 0;
2046 }
2047
2048 static void at_dma_shutdown(struct platform_device *pdev)
2049 {
2050 struct at_dma *atdma = platform_get_drvdata(pdev);
2051
2052 at_dma_off(platform_get_drvdata(pdev));
2053 clk_disable_unprepare(atdma->clk);
2054 }
2055
2056 static int at_dma_prepare(struct device *dev)
2057 {
2058 struct at_dma *atdma = dev_get_drvdata(dev);
2059 struct dma_chan *chan, *_chan;
2060
2061 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2062 device_node) {
2063 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2064
2065 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2066 return -EAGAIN;
2067 }
2068 return 0;
2069 }
2070
2071 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2072 {
2073 struct dma_chan *chan = &atchan->chan_common;
2074
2075
2076
2077 if (!atc_chan_is_paused(atchan)) {
2078 dev_warn(chan2dev(chan),
2079 "cyclic channel not paused, should be done by channel user\n");
2080 atc_pause(chan);
2081 }
2082
2083
2084
2085 atchan->save_dscr = channel_readl(atchan, DSCR);
2086
2087 vdbg_dump_regs(atchan);
2088 }
2089
2090 static int at_dma_suspend_noirq(struct device *dev)
2091 {
2092 struct at_dma *atdma = dev_get_drvdata(dev);
2093 struct dma_chan *chan, *_chan;
2094
2095
2096 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2097 device_node) {
2098 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2099
2100 if (atc_chan_is_cyclic(atchan))
2101 atc_suspend_cyclic(atchan);
2102 atchan->save_cfg = channel_readl(atchan, CFG);
2103 }
2104 atdma->save_imr = dma_readl(atdma, EBCIMR);
2105
2106
2107 at_dma_off(atdma);
2108 clk_disable_unprepare(atdma->clk);
2109 return 0;
2110 }
2111
2112 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2113 {
2114 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2115
2116
2117
2118 channel_writel(atchan, SADDR, 0);
2119 channel_writel(atchan, DADDR, 0);
2120 channel_writel(atchan, CTRLA, 0);
2121 channel_writel(atchan, CTRLB, 0);
2122 channel_writel(atchan, DSCR, atchan->save_dscr);
2123 dma_writel(atdma, CHER, atchan->mask);
2124
2125
2126
2127
2128 vdbg_dump_regs(atchan);
2129 }
2130
2131 static int at_dma_resume_noirq(struct device *dev)
2132 {
2133 struct at_dma *atdma = dev_get_drvdata(dev);
2134 struct dma_chan *chan, *_chan;
2135
2136
2137 clk_prepare_enable(atdma->clk);
2138 dma_writel(atdma, EN, AT_DMA_ENABLE);
2139
2140
2141 while (dma_readl(atdma, EBCISR))
2142 cpu_relax();
2143
2144
2145 dma_writel(atdma, EBCIER, atdma->save_imr);
2146 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2147 device_node) {
2148 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2149
2150 channel_writel(atchan, CFG, atchan->save_cfg);
2151 if (atc_chan_is_cyclic(atchan))
2152 atc_resume_cyclic(atchan);
2153 }
2154 return 0;
2155 }
2156
2157 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2158 .prepare = at_dma_prepare,
2159 .suspend_noirq = at_dma_suspend_noirq,
2160 .resume_noirq = at_dma_resume_noirq,
2161 };
2162
2163 static struct platform_driver at_dma_driver = {
2164 .remove = at_dma_remove,
2165 .shutdown = at_dma_shutdown,
2166 .id_table = atdma_devtypes,
2167 .driver = {
2168 .name = "at_hdmac",
2169 .pm = &at_dma_dev_pm_ops,
2170 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2171 },
2172 };
2173
2174 static int __init at_dma_init(void)
2175 {
2176 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2177 }
2178 subsys_initcall(at_dma_init);
2179
2180 static void __exit at_dma_exit(void)
2181 {
2182 platform_driver_unregister(&at_dma_driver);
2183 }
2184 module_exit(at_dma_exit);
2185
2186 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2187 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2188 MODULE_LICENSE("GPL");
2189 MODULE_ALIAS("platform:at_hdmac");