0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/bitops.h>
0012 #include <linux/delay.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/dmapool.h>
0015 #include <linux/init.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/io.h>
0018 #include <linux/iopoll.h>
0019 #include <linux/module.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/slab.h>
0022 #include <linux/of_dma.h>
0023
0024 #include "dmaengine.h"
0025
0026 #define MSGDMA_MAX_TRANS_LEN U32_MAX
0027 #define MSGDMA_DESC_NUM 1024
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 struct msgdma_extended_desc {
0044 u32 read_addr_lo;
0045 u32 write_addr_lo;
0046 u32 len;
0047 u32 burst_seq_num;
0048 u32 stride;
0049 u32 read_addr_hi;
0050 u32 write_addr_hi;
0051 u32 control;
0052 };
0053
0054
0055 #define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
0056 #define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
0057 #define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
0058 #define MSGDMA_DESC_CTL_PARK_READS BIT(10)
0059 #define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
0060 #define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
0061 #define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
0062 #define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
0063 #define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
0064 #define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
0065 #define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
0066
0067
0068
0069
0070
0071 #define MSGDMA_DESC_CTL_GO BIT(31)
0072
0073
0074 #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
0075 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
0076 MSGDMA_DESC_CTL_GO)
0077
0078 #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
0079 MSGDMA_DESC_CTL_GO)
0080
0081 #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
0082 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
0083 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
0084 MSGDMA_DESC_CTL_GO)
0085
0086 #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
0087 MSGDMA_DESC_CTL_GEN_EOP | \
0088 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
0089 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
0090 MSGDMA_DESC_CTL_GO)
0091
0092 #define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
0093 MSGDMA_DESC_CTL_END_ON_LEN | \
0094 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
0095 MSGDMA_DESC_CTL_EARLY_IRQ | \
0096 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
0097 MSGDMA_DESC_CTL_GO)
0098
0099
0100 #define MSGDMA_DESC_STRIDE_RD 0x00000001
0101 #define MSGDMA_DESC_STRIDE_WR 0x00010000
0102 #define MSGDMA_DESC_STRIDE_RW 0x00010001
0103
0104
0105 #define MSGDMA_CSR_STATUS 0x00
0106 #define MSGDMA_CSR_CONTROL 0x04
0107 #define MSGDMA_CSR_RW_FILL_LEVEL 0x08
0108
0109 #define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c
0110 #define MSGDMA_CSR_RW_SEQ_NUM 0x10
0111
0112
0113
0114 #define MSGDMA_CSR_STAT_BUSY BIT(0)
0115 #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
0116 #define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
0117 #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
0118 #define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
0119 #define MSGDMA_CSR_STAT_STOPPED BIT(5)
0120 #define MSGDMA_CSR_STAT_RESETTING BIT(6)
0121 #define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
0122 #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
0123 #define MSGDMA_CSR_STAT_IRQ BIT(9)
0124 #define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
0125 #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
0126
0127 #define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
0128 MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
0129
0130
0131 #define MSGDMA_CSR_CTL_STOP BIT(0)
0132 #define MSGDMA_CSR_CTL_RESET BIT(1)
0133 #define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
0134 #define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
0135 #define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
0136 #define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
0137
0138
0139 #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
0140 #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
0141 #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
0142
0143 #define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
0144
0145
0146 #define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
0147 #define MSGDMA_RESP_STATUS 0x04
0148
0149
0150 #define MSGDMA_RESP_EARLY_TERM BIT(8)
0151 #define MSGDMA_RESP_ERR_MASK 0xff
0152
0153
0154
0155
0156
0157
0158
0159
0160 struct msgdma_sw_desc {
0161 struct dma_async_tx_descriptor async_tx;
0162 struct msgdma_extended_desc hw_desc;
0163 struct list_head node;
0164 struct list_head tx_list;
0165 };
0166
0167
0168
0169
0170 struct msgdma_device {
0171 spinlock_t lock;
0172 struct device *dev;
0173 struct tasklet_struct irq_tasklet;
0174 struct list_head pending_list;
0175 struct list_head free_list;
0176 struct list_head active_list;
0177 struct list_head done_list;
0178 u32 desc_free_cnt;
0179 bool idle;
0180
0181 struct dma_device dmadev;
0182 struct dma_chan dmachan;
0183 dma_addr_t hw_desq;
0184 struct msgdma_sw_desc *sw_desq;
0185 unsigned int npendings;
0186
0187 struct dma_slave_config slave_cfg;
0188
0189 int irq;
0190
0191
0192 void __iomem *csr;
0193
0194
0195 void __iomem *desc;
0196
0197
0198 void __iomem *resp;
0199 };
0200
0201 #define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
0202 #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
0203
0204
0205
0206
0207
0208
0209
0210 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
0211 {
0212 struct msgdma_sw_desc *desc;
0213 unsigned long flags;
0214
0215 spin_lock_irqsave(&mdev->lock, flags);
0216 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
0217 list_del(&desc->node);
0218 spin_unlock_irqrestore(&mdev->lock, flags);
0219
0220 INIT_LIST_HEAD(&desc->tx_list);
0221
0222 return desc;
0223 }
0224
0225
0226
0227
0228
0229
0230 static void msgdma_free_descriptor(struct msgdma_device *mdev,
0231 struct msgdma_sw_desc *desc)
0232 {
0233 struct msgdma_sw_desc *child, *next;
0234
0235 mdev->desc_free_cnt++;
0236 list_add_tail(&desc->node, &mdev->free_list);
0237 list_for_each_entry_safe(child, next, &desc->tx_list, node) {
0238 mdev->desc_free_cnt++;
0239 list_move_tail(&child->node, &mdev->free_list);
0240 }
0241 }
0242
0243
0244
0245
0246
0247
0248 static void msgdma_free_desc_list(struct msgdma_device *mdev,
0249 struct list_head *list)
0250 {
0251 struct msgdma_sw_desc *desc, *next;
0252
0253 list_for_each_entry_safe(desc, next, list, node)
0254 msgdma_free_descriptor(mdev, desc);
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 static void msgdma_desc_config(struct msgdma_extended_desc *desc,
0266 dma_addr_t dst, dma_addr_t src, size_t len,
0267 u32 stride)
0268 {
0269
0270 desc->read_addr_lo = lower_32_bits(src);
0271 desc->write_addr_lo = lower_32_bits(dst);
0272
0273
0274 desc->read_addr_hi = upper_32_bits(src);
0275 desc->write_addr_hi = upper_32_bits(dst);
0276
0277 desc->len = len;
0278 desc->stride = stride;
0279 desc->burst_seq_num = 0;
0280
0281
0282
0283
0284
0285 desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
0286 MSGDMA_DESC_CTL_END_ON_LEN;
0287 }
0288
0289
0290
0291
0292
0293 static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
0294 {
0295 desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
0296 }
0297
0298
0299
0300
0301
0302
0303
0304 static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
0305 {
0306 struct msgdma_device *mdev = to_mdev(tx->chan);
0307 struct msgdma_sw_desc *new;
0308 dma_cookie_t cookie;
0309 unsigned long flags;
0310
0311 new = tx_to_desc(tx);
0312 spin_lock_irqsave(&mdev->lock, flags);
0313 cookie = dma_cookie_assign(tx);
0314
0315 list_add_tail(&new->node, &mdev->pending_list);
0316 spin_unlock_irqrestore(&mdev->lock, flags);
0317
0318 return cookie;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 static struct dma_async_tx_descriptor *
0332 msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
0333 dma_addr_t dma_src, size_t len, ulong flags)
0334 {
0335 struct msgdma_device *mdev = to_mdev(dchan);
0336 struct msgdma_sw_desc *new, *first = NULL;
0337 struct msgdma_extended_desc *desc;
0338 size_t copy;
0339 u32 desc_cnt;
0340 unsigned long irqflags;
0341
0342 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
0343
0344 spin_lock_irqsave(&mdev->lock, irqflags);
0345 if (desc_cnt > mdev->desc_free_cnt) {
0346 spin_unlock_irqrestore(&mdev->lock, irqflags);
0347 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
0348 return NULL;
0349 }
0350 mdev->desc_free_cnt -= desc_cnt;
0351 spin_unlock_irqrestore(&mdev->lock, irqflags);
0352
0353 do {
0354
0355 new = msgdma_get_descriptor(mdev);
0356
0357 copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
0358 desc = &new->hw_desc;
0359 msgdma_desc_config(desc, dma_dst, dma_src, copy,
0360 MSGDMA_DESC_STRIDE_RW);
0361 len -= copy;
0362 dma_src += copy;
0363 dma_dst += copy;
0364 if (!first)
0365 first = new;
0366 else
0367 list_add_tail(&new->node, &first->tx_list);
0368 } while (len);
0369
0370 msgdma_desc_config_eod(desc);
0371 async_tx_ack(&first->async_tx);
0372 first->async_tx.flags = flags;
0373
0374 return &first->async_tx;
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 static struct dma_async_tx_descriptor *
0388 msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
0389 unsigned int sg_len, enum dma_transfer_direction dir,
0390 unsigned long flags, void *context)
0391
0392 {
0393 struct msgdma_device *mdev = to_mdev(dchan);
0394 struct dma_slave_config *cfg = &mdev->slave_cfg;
0395 struct msgdma_sw_desc *new, *first = NULL;
0396 void *desc = NULL;
0397 size_t len, avail;
0398 dma_addr_t dma_dst, dma_src;
0399 u32 desc_cnt = 0, i;
0400 struct scatterlist *sg;
0401 u32 stride;
0402 unsigned long irqflags;
0403
0404 for_each_sg(sgl, sg, sg_len, i)
0405 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
0406
0407 spin_lock_irqsave(&mdev->lock, irqflags);
0408 if (desc_cnt > mdev->desc_free_cnt) {
0409 spin_unlock_irqrestore(&mdev->lock, irqflags);
0410 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
0411 return NULL;
0412 }
0413 mdev->desc_free_cnt -= desc_cnt;
0414 spin_unlock_irqrestore(&mdev->lock, irqflags);
0415
0416 avail = sg_dma_len(sgl);
0417
0418
0419 while (true) {
0420
0421 new = msgdma_get_descriptor(mdev);
0422
0423 desc = &new->hw_desc;
0424 len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
0425
0426 if (dir == DMA_MEM_TO_DEV) {
0427 dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
0428 dma_dst = cfg->dst_addr;
0429 stride = MSGDMA_DESC_STRIDE_RD;
0430 } else {
0431 dma_src = cfg->src_addr;
0432 dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
0433 stride = MSGDMA_DESC_STRIDE_WR;
0434 }
0435 msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
0436 avail -= len;
0437
0438 if (!first)
0439 first = new;
0440 else
0441 list_add_tail(&new->node, &first->tx_list);
0442
0443
0444 if (avail == 0) {
0445 if (sg_len == 0)
0446 break;
0447 sgl = sg_next(sgl);
0448 if (sgl == NULL)
0449 break;
0450 sg_len--;
0451 avail = sg_dma_len(sgl);
0452 }
0453 }
0454
0455 msgdma_desc_config_eod(desc);
0456 first->async_tx.flags = flags;
0457
0458 return &first->async_tx;
0459 }
0460
0461 static int msgdma_dma_config(struct dma_chan *dchan,
0462 struct dma_slave_config *config)
0463 {
0464 struct msgdma_device *mdev = to_mdev(dchan);
0465
0466 memcpy(&mdev->slave_cfg, config, sizeof(*config));
0467
0468 return 0;
0469 }
0470
0471 static void msgdma_reset(struct msgdma_device *mdev)
0472 {
0473 u32 val;
0474 int ret;
0475
0476
0477 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
0478 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
0479
0480 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
0481 (val & MSGDMA_CSR_STAT_RESETTING) == 0,
0482 1, 10000);
0483 if (ret)
0484 dev_err(mdev->dev, "DMA channel did not reset\n");
0485
0486
0487 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
0488
0489
0490 iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
0491 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
0492
0493 mdev->idle = true;
0494 };
0495
0496 static void msgdma_copy_one(struct msgdma_device *mdev,
0497 struct msgdma_sw_desc *desc)
0498 {
0499 void __iomem *hw_desc = mdev->desc;
0500
0501
0502
0503
0504
0505 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
0506 MSGDMA_CSR_STAT_DESC_BUF_FULL)
0507 mdelay(1);
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 memcpy((void __force *)hw_desc, &desc->hw_desc,
0519 sizeof(desc->hw_desc) - sizeof(u32));
0520
0521
0522 mdev->idle = false;
0523 wmb();
0524 iowrite32(desc->hw_desc.control, hw_desc +
0525 offsetof(struct msgdma_extended_desc, control));
0526 wmb();
0527 }
0528
0529
0530
0531
0532
0533
0534 static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
0535 struct msgdma_sw_desc *desc)
0536 {
0537 struct msgdma_sw_desc *sdesc, *next;
0538
0539 msgdma_copy_one(mdev, desc);
0540
0541 list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
0542 msgdma_copy_one(mdev, sdesc);
0543 }
0544
0545
0546
0547
0548
0549 static void msgdma_start_transfer(struct msgdma_device *mdev)
0550 {
0551 struct msgdma_sw_desc *desc;
0552
0553 if (!mdev->idle)
0554 return;
0555
0556 desc = list_first_entry_or_null(&mdev->pending_list,
0557 struct msgdma_sw_desc, node);
0558 if (!desc)
0559 return;
0560
0561 list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
0562 msgdma_copy_desc_to_fifo(mdev, desc);
0563 }
0564
0565
0566
0567
0568
0569 static void msgdma_issue_pending(struct dma_chan *chan)
0570 {
0571 struct msgdma_device *mdev = to_mdev(chan);
0572 unsigned long flags;
0573
0574 spin_lock_irqsave(&mdev->lock, flags);
0575 msgdma_start_transfer(mdev);
0576 spin_unlock_irqrestore(&mdev->lock, flags);
0577 }
0578
0579
0580
0581
0582
0583 static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
0584 {
0585 struct msgdma_sw_desc *desc, *next;
0586
0587 list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
0588 struct dmaengine_desc_callback cb;
0589
0590 list_del(&desc->node);
0591
0592 dmaengine_desc_get_callback(&desc->async_tx, &cb);
0593 if (dmaengine_desc_callback_valid(&cb)) {
0594 spin_unlock(&mdev->lock);
0595 dmaengine_desc_callback_invoke(&cb, NULL);
0596 spin_lock(&mdev->lock);
0597 }
0598
0599
0600 msgdma_free_descriptor(mdev, desc);
0601 }
0602 }
0603
0604
0605
0606
0607
0608 static void msgdma_complete_descriptor(struct msgdma_device *mdev)
0609 {
0610 struct msgdma_sw_desc *desc;
0611
0612 desc = list_first_entry_or_null(&mdev->active_list,
0613 struct msgdma_sw_desc, node);
0614 if (!desc)
0615 return;
0616 list_del(&desc->node);
0617 dma_cookie_complete(&desc->async_tx);
0618 list_add_tail(&desc->node, &mdev->done_list);
0619 }
0620
0621
0622
0623
0624
0625 static void msgdma_free_descriptors(struct msgdma_device *mdev)
0626 {
0627 msgdma_free_desc_list(mdev, &mdev->active_list);
0628 msgdma_free_desc_list(mdev, &mdev->pending_list);
0629 msgdma_free_desc_list(mdev, &mdev->done_list);
0630 }
0631
0632
0633
0634
0635
0636 static void msgdma_free_chan_resources(struct dma_chan *dchan)
0637 {
0638 struct msgdma_device *mdev = to_mdev(dchan);
0639 unsigned long flags;
0640
0641 spin_lock_irqsave(&mdev->lock, flags);
0642 msgdma_free_descriptors(mdev);
0643 spin_unlock_irqrestore(&mdev->lock, flags);
0644 kfree(mdev->sw_desq);
0645 }
0646
0647
0648
0649
0650
0651
0652
0653 static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
0654 {
0655 struct msgdma_device *mdev = to_mdev(dchan);
0656 struct msgdma_sw_desc *desc;
0657 int i;
0658
0659 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
0660 if (!mdev->sw_desq)
0661 return -ENOMEM;
0662
0663 mdev->idle = true;
0664 mdev->desc_free_cnt = MSGDMA_DESC_NUM;
0665
0666 INIT_LIST_HEAD(&mdev->free_list);
0667
0668 for (i = 0; i < MSGDMA_DESC_NUM; i++) {
0669 desc = mdev->sw_desq + i;
0670 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
0671 desc->async_tx.tx_submit = msgdma_tx_submit;
0672 list_add_tail(&desc->node, &mdev->free_list);
0673 }
0674
0675 return MSGDMA_DESC_NUM;
0676 }
0677
0678
0679
0680
0681
0682 static void msgdma_tasklet(struct tasklet_struct *t)
0683 {
0684 struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
0685 u32 count;
0686 u32 __maybe_unused size;
0687 u32 __maybe_unused status;
0688 unsigned long flags;
0689
0690 spin_lock_irqsave(&mdev->lock, flags);
0691
0692 if (mdev->resp) {
0693
0694 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
0695 dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
0696 __func__, __LINE__, count);
0697 } else {
0698 count = 1;
0699 }
0700
0701 while (count--) {
0702
0703
0704
0705
0706
0707
0708 if (mdev->resp) {
0709 size = ioread32(mdev->resp +
0710 MSGDMA_RESP_BYTES_TRANSFERRED);
0711 status = ioread32(mdev->resp +
0712 MSGDMA_RESP_STATUS);
0713 }
0714
0715 msgdma_complete_descriptor(mdev);
0716 msgdma_chan_desc_cleanup(mdev);
0717 }
0718
0719 spin_unlock_irqrestore(&mdev->lock, flags);
0720 }
0721
0722
0723
0724
0725
0726
0727
0728
0729 static irqreturn_t msgdma_irq_handler(int irq, void *data)
0730 {
0731 struct msgdma_device *mdev = data;
0732 u32 status;
0733
0734 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
0735 if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
0736
0737 spin_lock(&mdev->lock);
0738 mdev->idle = true;
0739 msgdma_start_transfer(mdev);
0740 spin_unlock(&mdev->lock);
0741 }
0742
0743 tasklet_schedule(&mdev->irq_tasklet);
0744
0745
0746 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
0747
0748 return IRQ_HANDLED;
0749 }
0750
0751
0752
0753
0754
0755 static void msgdma_dev_remove(struct msgdma_device *mdev)
0756 {
0757 if (!mdev)
0758 return;
0759
0760 devm_free_irq(mdev->dev, mdev->irq, mdev);
0761 tasklet_kill(&mdev->irq_tasklet);
0762 list_del(&mdev->dmachan.device_node);
0763 }
0764
0765 static int request_and_map(struct platform_device *pdev, const char *name,
0766 struct resource **res, void __iomem **ptr,
0767 bool optional)
0768 {
0769 struct resource *region;
0770 struct device *device = &pdev->dev;
0771
0772 *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
0773 if (*res == NULL) {
0774 if (optional) {
0775 *ptr = NULL;
0776 dev_info(device, "optional resource %s not defined\n",
0777 name);
0778 return 0;
0779 }
0780 dev_err(device, "mandatory resource %s not defined\n", name);
0781 return -ENODEV;
0782 }
0783
0784 region = devm_request_mem_region(device, (*res)->start,
0785 resource_size(*res), dev_name(device));
0786 if (region == NULL) {
0787 dev_err(device, "unable to request %s\n", name);
0788 return -EBUSY;
0789 }
0790
0791 *ptr = devm_ioremap(device, region->start,
0792 resource_size(region));
0793 if (*ptr == NULL) {
0794 dev_err(device, "ioremap of %s failed!", name);
0795 return -ENOMEM;
0796 }
0797
0798 return 0;
0799 }
0800
0801
0802
0803
0804
0805
0806
0807 static int msgdma_probe(struct platform_device *pdev)
0808 {
0809 struct msgdma_device *mdev;
0810 struct dma_device *dma_dev;
0811 struct resource *dma_res;
0812 int ret;
0813
0814 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
0815 if (!mdev)
0816 return -ENOMEM;
0817
0818 mdev->dev = &pdev->dev;
0819
0820
0821 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
0822 if (ret)
0823 return ret;
0824
0825
0826 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
0827 if (ret)
0828 return ret;
0829
0830
0831 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
0832 if (ret)
0833 return ret;
0834
0835 platform_set_drvdata(pdev, mdev);
0836
0837
0838 mdev->irq = platform_get_irq(pdev, 0);
0839 if (mdev->irq < 0)
0840 return -ENXIO;
0841
0842 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
0843 0, dev_name(&pdev->dev), mdev);
0844 if (ret)
0845 return ret;
0846
0847 tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
0848
0849 dma_cookie_init(&mdev->dmachan);
0850
0851 spin_lock_init(&mdev->lock);
0852
0853 INIT_LIST_HEAD(&mdev->active_list);
0854 INIT_LIST_HEAD(&mdev->pending_list);
0855 INIT_LIST_HEAD(&mdev->done_list);
0856 INIT_LIST_HEAD(&mdev->free_list);
0857
0858 dma_dev = &mdev->dmadev;
0859
0860
0861 dma_cap_zero(dma_dev->cap_mask);
0862 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
0863 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
0864
0865 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0866 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0867 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
0868 BIT(DMA_MEM_TO_MEM);
0869 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
0870
0871
0872 INIT_LIST_HEAD(&dma_dev->channels);
0873
0874
0875 dma_dev->device_tx_status = dma_cookie_status;
0876 dma_dev->device_issue_pending = msgdma_issue_pending;
0877 dma_dev->dev = &pdev->dev;
0878
0879 dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
0880 dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
0881 dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
0882 dma_dev->device_config = msgdma_dma_config;
0883
0884 dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
0885 dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
0886
0887 mdev->dmachan.device = dma_dev;
0888 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
0889
0890
0891 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0892 if (ret) {
0893 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
0894 goto fail;
0895 }
0896
0897 msgdma_reset(mdev);
0898
0899 ret = dma_async_device_register(dma_dev);
0900 if (ret)
0901 goto fail;
0902
0903 ret = of_dma_controller_register(pdev->dev.of_node,
0904 of_dma_xlate_by_chan_id, dma_dev);
0905 if (ret == -EINVAL)
0906 dev_warn(&pdev->dev, "device was not probed from DT");
0907 else if (ret && ret != -ENODEV)
0908 goto fail;
0909
0910 dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
0911
0912 return 0;
0913
0914 fail:
0915 msgdma_dev_remove(mdev);
0916
0917 return ret;
0918 }
0919
0920
0921
0922
0923
0924
0925
0926 static int msgdma_remove(struct platform_device *pdev)
0927 {
0928 struct msgdma_device *mdev = platform_get_drvdata(pdev);
0929
0930 if (pdev->dev.of_node)
0931 of_dma_controller_free(pdev->dev.of_node);
0932 dma_async_device_unregister(&mdev->dmadev);
0933 msgdma_dev_remove(mdev);
0934
0935 dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
0936
0937 return 0;
0938 }
0939
0940 #ifdef CONFIG_OF
0941 static const struct of_device_id msgdma_match[] = {
0942 { .compatible = "altr,socfpga-msgdma", },
0943 { }
0944 };
0945
0946 MODULE_DEVICE_TABLE(of, msgdma_match);
0947 #endif
0948
0949 static struct platform_driver msgdma_driver = {
0950 .driver = {
0951 .name = "altera-msgdma",
0952 .of_match_table = of_match_ptr(msgdma_match),
0953 },
0954 .probe = msgdma_probe,
0955 .remove = msgdma_remove,
0956 };
0957
0958 module_platform_driver(msgdma_driver);
0959
0960 MODULE_ALIAS("platform:altera-msgdma");
0961 MODULE_DESCRIPTION("Altera mSGDMA driver");
0962 MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
0963 MODULE_LICENSE("GPL");