0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include <linux/init.h>
0023 #include <linux/module.h>
0024 #include <linux/pci.h>
0025 #include <linux/slab.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/dmaengine.h>
0028 #include <linux/delay.h>
0029 #include <linux/dma-mapping.h>
0030 #include <linux/dmapool.h>
0031 #include <linux/of_address.h>
0032 #include <linux/of_irq.h>
0033 #include <linux/of_platform.h>
0034 #include <linux/fsldma.h>
0035 #include "dmaengine.h"
0036 #include "fsldma.h"
0037
0038 #define chan_dbg(chan, fmt, arg...) \
0039 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
0040 #define chan_err(chan, fmt, arg...) \
0041 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
0042
0043 static const char msg_ld_oom[] = "No free memory for link descriptor";
0044
0045
0046
0047
0048
0049 static void set_sr(struct fsldma_chan *chan, u32 val)
0050 {
0051 FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
0052 }
0053
0054 static u32 get_sr(struct fsldma_chan *chan)
0055 {
0056 return FSL_DMA_IN(chan, &chan->regs->sr, 32);
0057 }
0058
0059 static void set_mr(struct fsldma_chan *chan, u32 val)
0060 {
0061 FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
0062 }
0063
0064 static u32 get_mr(struct fsldma_chan *chan)
0065 {
0066 return FSL_DMA_IN(chan, &chan->regs->mr, 32);
0067 }
0068
0069 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
0070 {
0071 FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
0072 }
0073
0074 static dma_addr_t get_cdar(struct fsldma_chan *chan)
0075 {
0076 return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
0077 }
0078
0079 static void set_bcr(struct fsldma_chan *chan, u32 val)
0080 {
0081 FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
0082 }
0083
0084 static u32 get_bcr(struct fsldma_chan *chan)
0085 {
0086 return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
0087 }
0088
0089
0090
0091
0092
0093 static void set_desc_cnt(struct fsldma_chan *chan,
0094 struct fsl_dma_ld_hw *hw, u32 count)
0095 {
0096 hw->count = CPU_TO_DMA(chan, count, 32);
0097 }
0098
0099 static void set_desc_src(struct fsldma_chan *chan,
0100 struct fsl_dma_ld_hw *hw, dma_addr_t src)
0101 {
0102 u64 snoop_bits;
0103
0104 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
0105 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
0106 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
0107 }
0108
0109 static void set_desc_dst(struct fsldma_chan *chan,
0110 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
0111 {
0112 u64 snoop_bits;
0113
0114 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
0115 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
0116 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
0117 }
0118
0119 static void set_desc_next(struct fsldma_chan *chan,
0120 struct fsl_dma_ld_hw *hw, dma_addr_t next)
0121 {
0122 u64 snoop_bits;
0123
0124 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
0125 ? FSL_DMA_SNEN : 0;
0126 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
0127 }
0128
0129 static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
0130 {
0131 u64 snoop_bits;
0132
0133 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
0134 ? FSL_DMA_SNEN : 0;
0135
0136 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
0137 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
0138 | snoop_bits, 64);
0139 }
0140
0141
0142
0143
0144
0145 static void dma_init(struct fsldma_chan *chan)
0146 {
0147
0148 set_mr(chan, 0);
0149
0150 switch (chan->feature & FSL_DMA_IP_MASK) {
0151 case FSL_DMA_IP_85XX:
0152
0153
0154
0155
0156
0157 set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
0158 | FSL_DMA_MR_EOLNIE);
0159 break;
0160 case FSL_DMA_IP_83XX:
0161
0162
0163
0164
0165 set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
0166 break;
0167 }
0168 }
0169
0170 static int dma_is_idle(struct fsldma_chan *chan)
0171 {
0172 u32 sr = get_sr(chan);
0173 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
0174 }
0175
0176
0177
0178
0179
0180
0181
0182
0183 static void dma_start(struct fsldma_chan *chan)
0184 {
0185 u32 mode;
0186
0187 mode = get_mr(chan);
0188
0189 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
0190 set_bcr(chan, 0);
0191 mode |= FSL_DMA_MR_EMP_EN;
0192 } else {
0193 mode &= ~FSL_DMA_MR_EMP_EN;
0194 }
0195
0196 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
0197 mode |= FSL_DMA_MR_EMS_EN;
0198 } else {
0199 mode &= ~FSL_DMA_MR_EMS_EN;
0200 mode |= FSL_DMA_MR_CS;
0201 }
0202
0203 set_mr(chan, mode);
0204 }
0205
0206 static void dma_halt(struct fsldma_chan *chan)
0207 {
0208 u32 mode;
0209 int i;
0210
0211
0212 mode = get_mr(chan);
0213
0214
0215
0216
0217
0218
0219 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
0220 mode |= FSL_DMA_MR_CA;
0221 set_mr(chan, mode);
0222
0223 mode &= ~FSL_DMA_MR_CA;
0224 }
0225
0226
0227 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
0228 set_mr(chan, mode);
0229
0230
0231 for (i = 0; i < 100; i++) {
0232 if (dma_is_idle(chan))
0233 return;
0234
0235 udelay(10);
0236 }
0237
0238 if (!dma_is_idle(chan))
0239 chan_err(chan, "DMA halt timeout!\n");
0240 }
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
0254 {
0255 u32 mode;
0256
0257 mode = get_mr(chan);
0258
0259 switch (size) {
0260 case 0:
0261 mode &= ~FSL_DMA_MR_SAHE;
0262 break;
0263 case 1:
0264 case 2:
0265 case 4:
0266 case 8:
0267 mode &= ~FSL_DMA_MR_SAHTS_MASK;
0268 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
0269 break;
0270 }
0271
0272 set_mr(chan, mode);
0273 }
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
0287 {
0288 u32 mode;
0289
0290 mode = get_mr(chan);
0291
0292 switch (size) {
0293 case 0:
0294 mode &= ~FSL_DMA_MR_DAHE;
0295 break;
0296 case 1:
0297 case 2:
0298 case 4:
0299 case 8:
0300 mode &= ~FSL_DMA_MR_DAHTS_MASK;
0301 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
0302 break;
0303 }
0304
0305 set_mr(chan, mode);
0306 }
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
0321 {
0322 u32 mode;
0323
0324 BUG_ON(size > 1024);
0325
0326 mode = get_mr(chan);
0327 mode &= ~FSL_DMA_MR_BWC_MASK;
0328 mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
0329
0330 set_mr(chan, mode);
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
0343 {
0344 if (enable)
0345 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
0346 else
0347 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
0348 }
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
0361 {
0362 if (enable)
0363 chan->feature |= FSL_DMA_CHAN_START_EXT;
0364 else
0365 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
0366 }
0367
0368 int fsl_dma_external_start(struct dma_chan *dchan, int enable)
0369 {
0370 struct fsldma_chan *chan;
0371
0372 if (!dchan)
0373 return -EINVAL;
0374
0375 chan = to_fsl_chan(dchan);
0376
0377 fsl_chan_toggle_ext_start(chan, enable);
0378 return 0;
0379 }
0380 EXPORT_SYMBOL_GPL(fsl_dma_external_start);
0381
0382 static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
0383 {
0384 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
0385
0386 if (list_empty(&chan->ld_pending))
0387 goto out_splice;
0388
0389
0390
0391
0392
0393
0394
0395
0396 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
0397
0398
0399
0400
0401
0402 out_splice:
0403 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
0404 }
0405
0406 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
0407 {
0408 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
0409 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
0410 struct fsl_desc_sw *child;
0411 dma_cookie_t cookie = -EINVAL;
0412
0413 spin_lock_bh(&chan->desc_lock);
0414
0415 #ifdef CONFIG_PM
0416 if (unlikely(chan->pm_state != RUNNING)) {
0417 chan_dbg(chan, "cannot submit due to suspend\n");
0418 spin_unlock_bh(&chan->desc_lock);
0419 return -1;
0420 }
0421 #endif
0422
0423
0424
0425
0426
0427 list_for_each_entry(child, &desc->tx_list, node) {
0428 cookie = dma_cookie_assign(&child->async_tx);
0429 }
0430
0431
0432 append_ld_queue(chan, desc);
0433
0434 spin_unlock_bh(&chan->desc_lock);
0435
0436 return cookie;
0437 }
0438
0439
0440
0441
0442
0443
0444 static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
0445 struct fsl_desc_sw *desc)
0446 {
0447 list_del(&desc->node);
0448 chan_dbg(chan, "LD %p free\n", desc);
0449 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
0450 }
0451
0452
0453
0454
0455
0456
0457
0458 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
0459 {
0460 struct fsl_desc_sw *desc;
0461 dma_addr_t pdesc;
0462
0463 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
0464 if (!desc) {
0465 chan_dbg(chan, "out of memory for link descriptor\n");
0466 return NULL;
0467 }
0468
0469 INIT_LIST_HEAD(&desc->tx_list);
0470 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
0471 desc->async_tx.tx_submit = fsl_dma_tx_submit;
0472 desc->async_tx.phys = pdesc;
0473
0474 chan_dbg(chan, "LD %p allocated\n", desc);
0475
0476 return desc;
0477 }
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
0488 {
0489 struct fsl_desc_sw *desc, *_desc;
0490
0491
0492 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
0493 if (async_tx_test_ack(&desc->async_tx))
0494 fsl_dma_free_descriptor(chan, desc);
0495 }
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
0507 struct fsl_desc_sw *desc, dma_cookie_t cookie)
0508 {
0509 struct dma_async_tx_descriptor *txd = &desc->async_tx;
0510 dma_cookie_t ret = cookie;
0511
0512 BUG_ON(txd->cookie < 0);
0513
0514 if (txd->cookie > 0) {
0515 ret = txd->cookie;
0516
0517 dma_descriptor_unmap(txd);
0518
0519 dmaengine_desc_get_callback_invoke(txd, NULL);
0520 }
0521
0522
0523 dma_run_dependencies(txd);
0524
0525 return ret;
0526 }
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537 static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
0538 struct fsl_desc_sw *desc)
0539 {
0540
0541 list_del(&desc->node);
0542
0543
0544
0545
0546
0547 if (!async_tx_test_ack(&desc->async_tx)) {
0548
0549
0550
0551
0552 list_add_tail(&desc->node, &chan->ld_completed);
0553 return;
0554 }
0555
0556 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
0567 {
0568 struct fsl_desc_sw *desc;
0569
0570
0571
0572
0573
0574 if (list_empty(&chan->ld_pending)) {
0575 chan_dbg(chan, "no pending LDs\n");
0576 return;
0577 }
0578
0579
0580
0581
0582
0583
0584 if (!chan->idle) {
0585 chan_dbg(chan, "DMA controller still busy\n");
0586 return;
0587 }
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 chan_dbg(chan, "idle, starting controller\n");
0599 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
0600 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
0601
0602
0603
0604
0605
0606
0607 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
0608 u32 mode;
0609
0610 mode = get_mr(chan);
0611 mode &= ~FSL_DMA_MR_CS;
0612 set_mr(chan, mode);
0613 }
0614
0615
0616
0617
0618
0619 set_cdar(chan, desc->async_tx.phys);
0620 get_cdar(chan);
0621
0622 dma_start(chan);
0623 chan->idle = false;
0624 }
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635 static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
0636 {
0637 struct fsl_desc_sw *desc, *_desc;
0638 dma_cookie_t cookie = 0;
0639 dma_addr_t curr_phys = get_cdar(chan);
0640 int seen_current = 0;
0641
0642 fsldma_clean_completed_descriptor(chan);
0643
0644
0645 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
0646
0647
0648
0649
0650
0651 if (seen_current)
0652 break;
0653
0654
0655
0656
0657
0658 if (desc->async_tx.phys == curr_phys) {
0659 seen_current = 1;
0660 if (!dma_is_idle(chan))
0661 break;
0662 }
0663
0664 cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
0665
0666 fsldma_clean_running_descriptor(chan, desc);
0667 }
0668
0669
0670
0671
0672
0673
0674
0675 fsl_chan_xfer_ld_queue(chan);
0676
0677 if (cookie > 0)
0678 chan->common.completed_cookie = cookie;
0679 }
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
0690 {
0691 struct fsldma_chan *chan = to_fsl_chan(dchan);
0692
0693
0694 if (chan->desc_pool)
0695 return 1;
0696
0697
0698
0699
0700
0701 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
0702 sizeof(struct fsl_desc_sw),
0703 __alignof__(struct fsl_desc_sw), 0);
0704 if (!chan->desc_pool) {
0705 chan_err(chan, "unable to allocate descriptor pool\n");
0706 return -ENOMEM;
0707 }
0708
0709
0710 return 1;
0711 }
0712
0713
0714
0715
0716
0717
0718
0719
0720 static void fsldma_free_desc_list(struct fsldma_chan *chan,
0721 struct list_head *list)
0722 {
0723 struct fsl_desc_sw *desc, *_desc;
0724
0725 list_for_each_entry_safe(desc, _desc, list, node)
0726 fsl_dma_free_descriptor(chan, desc);
0727 }
0728
0729 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
0730 struct list_head *list)
0731 {
0732 struct fsl_desc_sw *desc, *_desc;
0733
0734 list_for_each_entry_safe_reverse(desc, _desc, list, node)
0735 fsl_dma_free_descriptor(chan, desc);
0736 }
0737
0738
0739
0740
0741
0742 static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
0743 {
0744 struct fsldma_chan *chan = to_fsl_chan(dchan);
0745
0746 chan_dbg(chan, "free all channel resources\n");
0747 spin_lock_bh(&chan->desc_lock);
0748 fsldma_cleanup_descriptors(chan);
0749 fsldma_free_desc_list(chan, &chan->ld_pending);
0750 fsldma_free_desc_list(chan, &chan->ld_running);
0751 fsldma_free_desc_list(chan, &chan->ld_completed);
0752 spin_unlock_bh(&chan->desc_lock);
0753
0754 dma_pool_destroy(chan->desc_pool);
0755 chan->desc_pool = NULL;
0756 }
0757
0758 static struct dma_async_tx_descriptor *
0759 fsl_dma_prep_memcpy(struct dma_chan *dchan,
0760 dma_addr_t dma_dst, dma_addr_t dma_src,
0761 size_t len, unsigned long flags)
0762 {
0763 struct fsldma_chan *chan;
0764 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
0765 size_t copy;
0766
0767 if (!dchan)
0768 return NULL;
0769
0770 if (!len)
0771 return NULL;
0772
0773 chan = to_fsl_chan(dchan);
0774
0775 do {
0776
0777
0778 new = fsl_dma_alloc_descriptor(chan);
0779 if (!new) {
0780 chan_err(chan, "%s\n", msg_ld_oom);
0781 goto fail;
0782 }
0783
0784 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
0785
0786 set_desc_cnt(chan, &new->hw, copy);
0787 set_desc_src(chan, &new->hw, dma_src);
0788 set_desc_dst(chan, &new->hw, dma_dst);
0789
0790 if (!first)
0791 first = new;
0792 else
0793 set_desc_next(chan, &prev->hw, new->async_tx.phys);
0794
0795 new->async_tx.cookie = 0;
0796 async_tx_ack(&new->async_tx);
0797
0798 prev = new;
0799 len -= copy;
0800 dma_src += copy;
0801 dma_dst += copy;
0802
0803
0804 list_add_tail(&new->node, &first->tx_list);
0805 } while (len);
0806
0807 new->async_tx.flags = flags;
0808 new->async_tx.cookie = -EBUSY;
0809
0810
0811 set_ld_eol(chan, new);
0812
0813 return &first->async_tx;
0814
0815 fail:
0816 if (!first)
0817 return NULL;
0818
0819 fsldma_free_desc_list_reverse(chan, &first->tx_list);
0820 return NULL;
0821 }
0822
0823 static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
0824 {
0825 struct fsldma_chan *chan;
0826
0827 if (!dchan)
0828 return -EINVAL;
0829
0830 chan = to_fsl_chan(dchan);
0831
0832 spin_lock_bh(&chan->desc_lock);
0833
0834
0835 dma_halt(chan);
0836
0837
0838 fsldma_free_desc_list(chan, &chan->ld_pending);
0839 fsldma_free_desc_list(chan, &chan->ld_running);
0840 fsldma_free_desc_list(chan, &chan->ld_completed);
0841 chan->idle = true;
0842
0843 spin_unlock_bh(&chan->desc_lock);
0844 return 0;
0845 }
0846
0847 static int fsl_dma_device_config(struct dma_chan *dchan,
0848 struct dma_slave_config *config)
0849 {
0850 struct fsldma_chan *chan;
0851 int size;
0852
0853 if (!dchan)
0854 return -EINVAL;
0855
0856 chan = to_fsl_chan(dchan);
0857
0858
0859 if (!chan->set_request_count)
0860 return -ENXIO;
0861
0862
0863 if (config->direction == DMA_MEM_TO_DEV)
0864 size = config->dst_addr_width * config->dst_maxburst;
0865 else
0866 size = config->src_addr_width * config->src_maxburst;
0867
0868 chan->set_request_count(chan, size);
0869 return 0;
0870 }
0871
0872
0873
0874
0875
0876
0877 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
0878 {
0879 struct fsldma_chan *chan = to_fsl_chan(dchan);
0880
0881 spin_lock_bh(&chan->desc_lock);
0882 fsl_chan_xfer_ld_queue(chan);
0883 spin_unlock_bh(&chan->desc_lock);
0884 }
0885
0886
0887
0888
0889
0890 static enum dma_status fsl_tx_status(struct dma_chan *dchan,
0891 dma_cookie_t cookie,
0892 struct dma_tx_state *txstate)
0893 {
0894 struct fsldma_chan *chan = to_fsl_chan(dchan);
0895 enum dma_status ret;
0896
0897 ret = dma_cookie_status(dchan, cookie, txstate);
0898 if (ret == DMA_COMPLETE)
0899 return ret;
0900
0901 spin_lock_bh(&chan->desc_lock);
0902 fsldma_cleanup_descriptors(chan);
0903 spin_unlock_bh(&chan->desc_lock);
0904
0905 return dma_cookie_status(dchan, cookie, txstate);
0906 }
0907
0908
0909
0910
0911
0912 static irqreturn_t fsldma_chan_irq(int irq, void *data)
0913 {
0914 struct fsldma_chan *chan = data;
0915 u32 stat;
0916
0917
0918 stat = get_sr(chan);
0919 set_sr(chan, stat);
0920 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
0921
0922
0923 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
0924 if (!stat)
0925 return IRQ_NONE;
0926
0927 if (stat & FSL_DMA_SR_TE)
0928 chan_err(chan, "Transfer Error!\n");
0929
0930
0931
0932
0933
0934
0935 if (stat & FSL_DMA_SR_PE) {
0936 chan_dbg(chan, "irq: Programming Error INT\n");
0937 stat &= ~FSL_DMA_SR_PE;
0938 if (get_bcr(chan) != 0)
0939 chan_err(chan, "Programming Error!\n");
0940 }
0941
0942
0943
0944
0945
0946 if (stat & FSL_DMA_SR_EOCDI) {
0947 chan_dbg(chan, "irq: End-of-Chain link INT\n");
0948 stat &= ~FSL_DMA_SR_EOCDI;
0949 }
0950
0951
0952
0953
0954
0955
0956 if (stat & FSL_DMA_SR_EOLNI) {
0957 chan_dbg(chan, "irq: End-of-link INT\n");
0958 stat &= ~FSL_DMA_SR_EOLNI;
0959 }
0960
0961
0962 if (!dma_is_idle(chan))
0963 chan_err(chan, "irq: controller not idle!\n");
0964
0965
0966 if (stat)
0967 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
0968
0969
0970
0971
0972
0973
0974 tasklet_schedule(&chan->tasklet);
0975 chan_dbg(chan, "irq: Exit\n");
0976 return IRQ_HANDLED;
0977 }
0978
0979 static void dma_do_tasklet(struct tasklet_struct *t)
0980 {
0981 struct fsldma_chan *chan = from_tasklet(chan, t, tasklet);
0982
0983 chan_dbg(chan, "tasklet entry\n");
0984
0985 spin_lock(&chan->desc_lock);
0986
0987
0988 chan->idle = true;
0989
0990
0991 fsldma_cleanup_descriptors(chan);
0992
0993 spin_unlock(&chan->desc_lock);
0994
0995 chan_dbg(chan, "tasklet exit\n");
0996 }
0997
0998 static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
0999 {
1000 struct fsldma_device *fdev = data;
1001 struct fsldma_chan *chan;
1002 unsigned int handled = 0;
1003 u32 gsr, mask;
1004 int i;
1005
1006 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1007 : in_le32(fdev->regs);
1008 mask = 0xff000000;
1009 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1010
1011 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1012 chan = fdev->chan[i];
1013 if (!chan)
1014 continue;
1015
1016 if (gsr & mask) {
1017 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1018 fsldma_chan_irq(irq, chan);
1019 handled++;
1020 }
1021
1022 gsr &= ~mask;
1023 mask >>= 8;
1024 }
1025
1026 return IRQ_RETVAL(handled);
1027 }
1028
1029 static void fsldma_free_irqs(struct fsldma_device *fdev)
1030 {
1031 struct fsldma_chan *chan;
1032 int i;
1033
1034 if (fdev->irq) {
1035 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1036 free_irq(fdev->irq, fdev);
1037 return;
1038 }
1039
1040 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1041 chan = fdev->chan[i];
1042 if (chan && chan->irq) {
1043 chan_dbg(chan, "free per-channel IRQ\n");
1044 free_irq(chan->irq, chan);
1045 }
1046 }
1047 }
1048
1049 static int fsldma_request_irqs(struct fsldma_device *fdev)
1050 {
1051 struct fsldma_chan *chan;
1052 int ret;
1053 int i;
1054
1055
1056 if (fdev->irq) {
1057 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1058 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1059 "fsldma-controller", fdev);
1060 return ret;
1061 }
1062
1063
1064 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1065 chan = fdev->chan[i];
1066 if (!chan)
1067 continue;
1068
1069 if (!chan->irq) {
1070 chan_err(chan, "interrupts property missing in device tree\n");
1071 ret = -ENODEV;
1072 goto out_unwind;
1073 }
1074
1075 chan_dbg(chan, "request per-channel IRQ\n");
1076 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1077 "fsldma-chan", chan);
1078 if (ret) {
1079 chan_err(chan, "unable to request per-channel IRQ\n");
1080 goto out_unwind;
1081 }
1082 }
1083
1084 return 0;
1085
1086 out_unwind:
1087 for (; i >= 0; i--) {
1088 chan = fdev->chan[i];
1089 if (!chan)
1090 continue;
1091
1092 if (!chan->irq)
1093 continue;
1094
1095 free_irq(chan->irq, chan);
1096 }
1097
1098 return ret;
1099 }
1100
1101
1102
1103
1104
1105 static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1106 struct device_node *node, u32 feature, const char *compatible)
1107 {
1108 struct fsldma_chan *chan;
1109 struct resource res;
1110 int err;
1111
1112
1113 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1114 if (!chan) {
1115 err = -ENOMEM;
1116 goto out_return;
1117 }
1118
1119
1120 chan->regs = of_iomap(node, 0);
1121 if (!chan->regs) {
1122 dev_err(fdev->dev, "unable to ioremap registers\n");
1123 err = -ENOMEM;
1124 goto out_free_chan;
1125 }
1126
1127 err = of_address_to_resource(node, 0, &res);
1128 if (err) {
1129 dev_err(fdev->dev, "unable to find 'reg' property\n");
1130 goto out_iounmap_regs;
1131 }
1132
1133 chan->feature = feature;
1134 if (!fdev->feature)
1135 fdev->feature = chan->feature;
1136
1137
1138
1139
1140
1141 WARN_ON(fdev->feature != chan->feature);
1142
1143 chan->dev = fdev->dev;
1144 chan->id = (res.start & 0xfff) < 0x300 ?
1145 ((res.start - 0x100) & 0xfff) >> 7 :
1146 ((res.start - 0x200) & 0xfff) >> 7;
1147 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1148 dev_err(fdev->dev, "too many channels for device\n");
1149 err = -EINVAL;
1150 goto out_iounmap_regs;
1151 }
1152
1153 fdev->chan[chan->id] = chan;
1154 tasklet_setup(&chan->tasklet, dma_do_tasklet);
1155 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1156
1157
1158 dma_init(chan);
1159
1160
1161 set_cdar(chan, 0);
1162
1163 switch (chan->feature & FSL_DMA_IP_MASK) {
1164 case FSL_DMA_IP_85XX:
1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166 fallthrough;
1167 case FSL_DMA_IP_83XX:
1168 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1169 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1170 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1171 chan->set_request_count = fsl_chan_set_request_count;
1172 }
1173
1174 spin_lock_init(&chan->desc_lock);
1175 INIT_LIST_HEAD(&chan->ld_pending);
1176 INIT_LIST_HEAD(&chan->ld_running);
1177 INIT_LIST_HEAD(&chan->ld_completed);
1178 chan->idle = true;
1179 #ifdef CONFIG_PM
1180 chan->pm_state = RUNNING;
1181 #endif
1182
1183 chan->common.device = &fdev->common;
1184 dma_cookie_init(&chan->common);
1185
1186
1187 chan->irq = irq_of_parse_and_map(node, 0);
1188
1189
1190 list_add_tail(&chan->common.device_node, &fdev->common.channels);
1191
1192 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1193 chan->irq ? chan->irq : fdev->irq);
1194
1195 return 0;
1196
1197 out_iounmap_regs:
1198 iounmap(chan->regs);
1199 out_free_chan:
1200 kfree(chan);
1201 out_return:
1202 return err;
1203 }
1204
1205 static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1206 {
1207 irq_dispose_mapping(chan->irq);
1208 list_del(&chan->common.device_node);
1209 iounmap(chan->regs);
1210 kfree(chan);
1211 }
1212
1213 static int fsldma_of_probe(struct platform_device *op)
1214 {
1215 struct fsldma_device *fdev;
1216 struct device_node *child;
1217 unsigned int i;
1218 int err;
1219
1220 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1221 if (!fdev) {
1222 err = -ENOMEM;
1223 goto out_return;
1224 }
1225
1226 fdev->dev = &op->dev;
1227 INIT_LIST_HEAD(&fdev->common.channels);
1228
1229
1230 fdev->regs = of_iomap(op->dev.of_node, 0);
1231 if (!fdev->regs) {
1232 dev_err(&op->dev, "unable to ioremap registers\n");
1233 err = -ENOMEM;
1234 goto out_free;
1235 }
1236
1237
1238 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1239
1240 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1241 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1242 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1243 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1244 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1245 fdev->common.device_tx_status = fsl_tx_status;
1246 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1247 fdev->common.device_config = fsl_dma_device_config;
1248 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1249 fdev->common.dev = &op->dev;
1250
1251 fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1252 fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1253 fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1254 fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1255
1256 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1257
1258 platform_set_drvdata(op, fdev);
1259
1260
1261
1262
1263
1264
1265 for_each_child_of_node(op->dev.of_node, child) {
1266 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1267 fsl_dma_chan_probe(fdev, child,
1268 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1269 "fsl,eloplus-dma-channel");
1270 }
1271
1272 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1273 fsl_dma_chan_probe(fdev, child,
1274 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1275 "fsl,elo-dma-channel");
1276 }
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286 err = fsldma_request_irqs(fdev);
1287 if (err) {
1288 dev_err(fdev->dev, "unable to request IRQs\n");
1289 goto out_free_fdev;
1290 }
1291
1292 dma_async_device_register(&fdev->common);
1293 return 0;
1294
1295 out_free_fdev:
1296 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1297 if (fdev->chan[i])
1298 fsl_dma_chan_remove(fdev->chan[i]);
1299 }
1300 irq_dispose_mapping(fdev->irq);
1301 iounmap(fdev->regs);
1302 out_free:
1303 kfree(fdev);
1304 out_return:
1305 return err;
1306 }
1307
1308 static int fsldma_of_remove(struct platform_device *op)
1309 {
1310 struct fsldma_device *fdev;
1311 unsigned int i;
1312
1313 fdev = platform_get_drvdata(op);
1314 dma_async_device_unregister(&fdev->common);
1315
1316 fsldma_free_irqs(fdev);
1317
1318 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1319 if (fdev->chan[i])
1320 fsl_dma_chan_remove(fdev->chan[i]);
1321 }
1322 irq_dispose_mapping(fdev->irq);
1323
1324 iounmap(fdev->regs);
1325 kfree(fdev);
1326
1327 return 0;
1328 }
1329
1330 #ifdef CONFIG_PM
1331 static int fsldma_suspend_late(struct device *dev)
1332 {
1333 struct fsldma_device *fdev = dev_get_drvdata(dev);
1334 struct fsldma_chan *chan;
1335 int i;
1336
1337 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1338 chan = fdev->chan[i];
1339 if (!chan)
1340 continue;
1341
1342 spin_lock_bh(&chan->desc_lock);
1343 if (unlikely(!chan->idle))
1344 goto out;
1345 chan->regs_save.mr = get_mr(chan);
1346 chan->pm_state = SUSPENDED;
1347 spin_unlock_bh(&chan->desc_lock);
1348 }
1349 return 0;
1350
1351 out:
1352 for (; i >= 0; i--) {
1353 chan = fdev->chan[i];
1354 if (!chan)
1355 continue;
1356 chan->pm_state = RUNNING;
1357 spin_unlock_bh(&chan->desc_lock);
1358 }
1359 return -EBUSY;
1360 }
1361
1362 static int fsldma_resume_early(struct device *dev)
1363 {
1364 struct fsldma_device *fdev = dev_get_drvdata(dev);
1365 struct fsldma_chan *chan;
1366 u32 mode;
1367 int i;
1368
1369 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1370 chan = fdev->chan[i];
1371 if (!chan)
1372 continue;
1373
1374 spin_lock_bh(&chan->desc_lock);
1375 mode = chan->regs_save.mr
1376 & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
1377 set_mr(chan, mode);
1378 chan->pm_state = RUNNING;
1379 spin_unlock_bh(&chan->desc_lock);
1380 }
1381
1382 return 0;
1383 }
1384
1385 static const struct dev_pm_ops fsldma_pm_ops = {
1386 .suspend_late = fsldma_suspend_late,
1387 .resume_early = fsldma_resume_early,
1388 };
1389 #endif
1390
1391 static const struct of_device_id fsldma_of_ids[] = {
1392 { .compatible = "fsl,elo3-dma", },
1393 { .compatible = "fsl,eloplus-dma", },
1394 { .compatible = "fsl,elo-dma", },
1395 {}
1396 };
1397 MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1398
1399 static struct platform_driver fsldma_of_driver = {
1400 .driver = {
1401 .name = "fsl-elo-dma",
1402 .of_match_table = fsldma_of_ids,
1403 #ifdef CONFIG_PM
1404 .pm = &fsldma_pm_ops,
1405 #endif
1406 },
1407 .probe = fsldma_of_probe,
1408 .remove = fsldma_of_remove,
1409 };
1410
1411
1412
1413
1414
1415 static __init int fsldma_init(void)
1416 {
1417 pr_info("Freescale Elo series DMA driver\n");
1418 return platform_driver_register(&fsldma_of_driver);
1419 }
1420
1421 static void __exit fsldma_exit(void)
1422 {
1423 platform_driver_unregister(&fsldma_of_driver);
1424 }
1425
1426 subsys_initcall(fsldma_init);
1427 module_exit(fsldma_exit);
1428
1429 MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1430 MODULE_LICENSE("GPL");