0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/clk.h>
0018 #include <linux/init.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/dmaengine.h>
0021 #include <linux/module.h>
0022 #include <linux/mod_devicetable.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/slab.h>
0025
0026 #include <linux/platform_data/dma-ep93xx.h>
0027
0028 #include "dmaengine.h"
0029
0030
0031 #define M2P_CONTROL 0x0000
0032 #define M2P_CONTROL_STALLINT BIT(0)
0033 #define M2P_CONTROL_NFBINT BIT(1)
0034 #define M2P_CONTROL_CH_ERROR_INT BIT(3)
0035 #define M2P_CONTROL_ENABLE BIT(4)
0036 #define M2P_CONTROL_ICE BIT(6)
0037
0038 #define M2P_INTERRUPT 0x0004
0039 #define M2P_INTERRUPT_STALL BIT(0)
0040 #define M2P_INTERRUPT_NFB BIT(1)
0041 #define M2P_INTERRUPT_ERROR BIT(3)
0042
0043 #define M2P_PPALLOC 0x0008
0044 #define M2P_STATUS 0x000c
0045
0046 #define M2P_MAXCNT0 0x0020
0047 #define M2P_BASE0 0x0024
0048 #define M2P_MAXCNT1 0x0030
0049 #define M2P_BASE1 0x0034
0050
0051 #define M2P_STATE_IDLE 0
0052 #define M2P_STATE_STALL 1
0053 #define M2P_STATE_ON 2
0054 #define M2P_STATE_NEXT 3
0055
0056
0057 #define M2M_CONTROL 0x0000
0058 #define M2M_CONTROL_DONEINT BIT(2)
0059 #define M2M_CONTROL_ENABLE BIT(3)
0060 #define M2M_CONTROL_START BIT(4)
0061 #define M2M_CONTROL_DAH BIT(11)
0062 #define M2M_CONTROL_SAH BIT(12)
0063 #define M2M_CONTROL_PW_SHIFT 9
0064 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
0065 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
0066 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
0067 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
0068 #define M2M_CONTROL_TM_SHIFT 13
0069 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
0070 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
0071 #define M2M_CONTROL_NFBINT BIT(21)
0072 #define M2M_CONTROL_RSS_SHIFT 22
0073 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
0074 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
0075 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
0076 #define M2M_CONTROL_NO_HDSK BIT(24)
0077 #define M2M_CONTROL_PWSC_SHIFT 25
0078
0079 #define M2M_INTERRUPT 0x0004
0080 #define M2M_INTERRUPT_MASK 6
0081
0082 #define M2M_STATUS 0x000c
0083 #define M2M_STATUS_CTL_SHIFT 1
0084 #define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
0085 #define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
0086 #define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
0087 #define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
0088 #define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
0089 #define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
0090 #define M2M_STATUS_BUF_SHIFT 4
0091 #define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
0092 #define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
0093 #define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
0094 #define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
0095 #define M2M_STATUS_DONE BIT(6)
0096
0097 #define M2M_BCR0 0x0010
0098 #define M2M_BCR1 0x0014
0099 #define M2M_SAR_BASE0 0x0018
0100 #define M2M_SAR_BASE1 0x001c
0101 #define M2M_DAR_BASE0 0x002c
0102 #define M2M_DAR_BASE1 0x0030
0103
0104 #define DMA_MAX_CHAN_BYTES 0xffff
0105 #define DMA_MAX_CHAN_DESCRIPTORS 32
0106
0107 struct ep93xx_dma_engine;
0108 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
0109 enum dma_transfer_direction dir,
0110 struct dma_slave_config *config);
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 struct ep93xx_dma_desc {
0123 u32 src_addr;
0124 u32 dst_addr;
0125 size_t size;
0126 bool complete;
0127 struct dma_async_tx_descriptor txd;
0128 struct list_head tx_list;
0129 struct list_head node;
0130 };
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 struct ep93xx_dma_chan {
0165 struct dma_chan chan;
0166 const struct ep93xx_dma_engine *edma;
0167 void __iomem *regs;
0168 int irq;
0169 struct clk *clk;
0170 struct tasklet_struct tasklet;
0171
0172 spinlock_t lock;
0173 unsigned long flags;
0174
0175 #define EP93XX_DMA_IS_CYCLIC 0
0176
0177 int buffer;
0178 struct list_head active;
0179 struct list_head queue;
0180 struct list_head free_list;
0181 u32 runtime_addr;
0182 u32 runtime_ctrl;
0183 struct dma_slave_config slave_config;
0184 };
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203 struct ep93xx_dma_engine {
0204 struct dma_device dma_dev;
0205 bool m2m;
0206 int (*hw_setup)(struct ep93xx_dma_chan *);
0207 void (*hw_synchronize)(struct ep93xx_dma_chan *);
0208 void (*hw_shutdown)(struct ep93xx_dma_chan *);
0209 void (*hw_submit)(struct ep93xx_dma_chan *);
0210 int (*hw_interrupt)(struct ep93xx_dma_chan *);
0211 #define INTERRUPT_UNKNOWN 0
0212 #define INTERRUPT_DONE 1
0213 #define INTERRUPT_NEXT_BUFFER 2
0214
0215 size_t num_channels;
0216 struct ep93xx_dma_chan channels[];
0217 };
0218
0219 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
0220 {
0221 return &edmac->chan.dev->device;
0222 }
0223
0224 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
0225 {
0226 return container_of(chan, struct ep93xx_dma_chan, chan);
0227 }
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
0241 struct ep93xx_dma_desc *desc)
0242 {
0243 BUG_ON(!list_empty(&edmac->active));
0244
0245 list_add_tail(&desc->node, &edmac->active);
0246
0247
0248 while (!list_empty(&desc->tx_list)) {
0249 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
0250 struct ep93xx_dma_desc, node);
0251
0252
0253
0254
0255
0256
0257
0258 d->txd.callback = desc->txd.callback;
0259 d->txd.callback_param = desc->txd.callback_param;
0260
0261 list_move_tail(&d->node, &edmac->active);
0262 }
0263 }
0264
0265
0266 static struct ep93xx_dma_desc *
0267 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
0268 {
0269 return list_first_entry_or_null(&edmac->active,
0270 struct ep93xx_dma_desc, node);
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
0286 {
0287 struct ep93xx_dma_desc *desc;
0288
0289 list_rotate_left(&edmac->active);
0290
0291 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
0292 return true;
0293
0294 desc = ep93xx_dma_get_active(edmac);
0295 if (!desc)
0296 return false;
0297
0298
0299
0300
0301
0302 return !desc->txd.cookie;
0303 }
0304
0305
0306
0307
0308
0309 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
0310 {
0311 writel(control, edmac->regs + M2P_CONTROL);
0312
0313
0314
0315
0316 readl(edmac->regs + M2P_CONTROL);
0317 }
0318
0319 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
0320 {
0321 struct ep93xx_dma_data *data = edmac->chan.private;
0322 u32 control;
0323
0324 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
0325
0326 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
0327 | M2P_CONTROL_ENABLE;
0328 m2p_set_control(edmac, control);
0329
0330 edmac->buffer = 0;
0331
0332 return 0;
0333 }
0334
0335 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
0336 {
0337 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
0338 }
0339
0340 static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
0341 {
0342 unsigned long flags;
0343 u32 control;
0344
0345 spin_lock_irqsave(&edmac->lock, flags);
0346 control = readl(edmac->regs + M2P_CONTROL);
0347 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
0348 m2p_set_control(edmac, control);
0349 spin_unlock_irqrestore(&edmac->lock, flags);
0350
0351 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
0352 schedule();
0353 }
0354
0355 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
0356 {
0357 m2p_set_control(edmac, 0);
0358
0359 while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
0360 dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
0361 }
0362
0363 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
0364 {
0365 struct ep93xx_dma_desc *desc;
0366 u32 bus_addr;
0367
0368 desc = ep93xx_dma_get_active(edmac);
0369 if (!desc) {
0370 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
0371 return;
0372 }
0373
0374 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
0375 bus_addr = desc->src_addr;
0376 else
0377 bus_addr = desc->dst_addr;
0378
0379 if (edmac->buffer == 0) {
0380 writel(desc->size, edmac->regs + M2P_MAXCNT0);
0381 writel(bus_addr, edmac->regs + M2P_BASE0);
0382 } else {
0383 writel(desc->size, edmac->regs + M2P_MAXCNT1);
0384 writel(bus_addr, edmac->regs + M2P_BASE1);
0385 }
0386
0387 edmac->buffer ^= 1;
0388 }
0389
0390 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
0391 {
0392 u32 control = readl(edmac->regs + M2P_CONTROL);
0393
0394 m2p_fill_desc(edmac);
0395 control |= M2P_CONTROL_STALLINT;
0396
0397 if (ep93xx_dma_advance_active(edmac)) {
0398 m2p_fill_desc(edmac);
0399 control |= M2P_CONTROL_NFBINT;
0400 }
0401
0402 m2p_set_control(edmac, control);
0403 }
0404
0405 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
0406 {
0407 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
0408 u32 control;
0409
0410 if (irq_status & M2P_INTERRUPT_ERROR) {
0411 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
0412
0413
0414 writel(1, edmac->regs + M2P_INTERRUPT);
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424 dev_err(chan2dev(edmac),
0425 "DMA transfer failed! Details:\n"
0426 "\tcookie : %d\n"
0427 "\tsrc_addr : 0x%08x\n"
0428 "\tdst_addr : 0x%08x\n"
0429 "\tsize : %zu\n",
0430 desc->txd.cookie, desc->src_addr, desc->dst_addr,
0431 desc->size);
0432 }
0433
0434
0435
0436
0437
0438
0439 if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
0440 return INTERRUPT_UNKNOWN;
0441
0442 if (ep93xx_dma_advance_active(edmac)) {
0443 m2p_fill_desc(edmac);
0444 return INTERRUPT_NEXT_BUFFER;
0445 }
0446
0447
0448 control = readl(edmac->regs + M2P_CONTROL);
0449 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
0450 m2p_set_control(edmac, control);
0451
0452 return INTERRUPT_DONE;
0453 }
0454
0455
0456
0457
0458
0459 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
0460 {
0461 const struct ep93xx_dma_data *data = edmac->chan.private;
0462 u32 control = 0;
0463
0464 if (!data) {
0465
0466 writel(control, edmac->regs + M2M_CONTROL);
0467 return 0;
0468 }
0469
0470 switch (data->port) {
0471 case EP93XX_DMA_SSP:
0472
0473
0474
0475
0476
0477 control = (5 << M2M_CONTROL_PWSC_SHIFT);
0478 control |= M2M_CONTROL_NO_HDSK;
0479
0480 if (data->direction == DMA_MEM_TO_DEV) {
0481 control |= M2M_CONTROL_DAH;
0482 control |= M2M_CONTROL_TM_TX;
0483 control |= M2M_CONTROL_RSS_SSPTX;
0484 } else {
0485 control |= M2M_CONTROL_SAH;
0486 control |= M2M_CONTROL_TM_RX;
0487 control |= M2M_CONTROL_RSS_SSPRX;
0488 }
0489 break;
0490
0491 case EP93XX_DMA_IDE:
0492
0493
0494
0495
0496 if (data->direction == DMA_MEM_TO_DEV) {
0497
0498 control = (3 << M2M_CONTROL_PWSC_SHIFT);
0499 control |= M2M_CONTROL_DAH;
0500 control |= M2M_CONTROL_TM_TX;
0501 } else {
0502 control = (2 << M2M_CONTROL_PWSC_SHIFT);
0503 control |= M2M_CONTROL_SAH;
0504 control |= M2M_CONTROL_TM_RX;
0505 }
0506
0507 control |= M2M_CONTROL_NO_HDSK;
0508 control |= M2M_CONTROL_RSS_IDE;
0509 control |= M2M_CONTROL_PW_16;
0510 break;
0511
0512 default:
0513 return -EINVAL;
0514 }
0515
0516 writel(control, edmac->regs + M2M_CONTROL);
0517 return 0;
0518 }
0519
0520 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
0521 {
0522
0523 writel(0, edmac->regs + M2M_CONTROL);
0524 }
0525
0526 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
0527 {
0528 struct ep93xx_dma_desc *desc;
0529
0530 desc = ep93xx_dma_get_active(edmac);
0531 if (!desc) {
0532 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
0533 return;
0534 }
0535
0536 if (edmac->buffer == 0) {
0537 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
0538 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
0539 writel(desc->size, edmac->regs + M2M_BCR0);
0540 } else {
0541 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
0542 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
0543 writel(desc->size, edmac->regs + M2M_BCR1);
0544 }
0545
0546 edmac->buffer ^= 1;
0547 }
0548
0549 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
0550 {
0551 struct ep93xx_dma_data *data = edmac->chan.private;
0552 u32 control = readl(edmac->regs + M2M_CONTROL);
0553
0554
0555
0556
0557
0558
0559 control &= ~M2M_CONTROL_PW_MASK;
0560 control |= edmac->runtime_ctrl;
0561
0562 m2m_fill_desc(edmac);
0563 control |= M2M_CONTROL_DONEINT;
0564
0565 if (ep93xx_dma_advance_active(edmac)) {
0566 m2m_fill_desc(edmac);
0567 control |= M2M_CONTROL_NFBINT;
0568 }
0569
0570
0571
0572
0573
0574 control |= M2M_CONTROL_ENABLE;
0575 writel(control, edmac->regs + M2M_CONTROL);
0576
0577 if (!data) {
0578
0579
0580
0581
0582 control |= M2M_CONTROL_START;
0583 writel(control, edmac->regs + M2M_CONTROL);
0584 }
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
0598 {
0599 u32 status = readl(edmac->regs + M2M_STATUS);
0600 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
0601 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
0602 bool done = status & M2M_STATUS_DONE;
0603 bool last_done;
0604 u32 control;
0605 struct ep93xx_dma_desc *desc;
0606
0607
0608 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
0609 return INTERRUPT_UNKNOWN;
0610
0611 if (done) {
0612
0613 writel(0, edmac->regs + M2M_INTERRUPT);
0614 }
0615
0616
0617
0618
0619
0620 desc = ep93xx_dma_get_active(edmac);
0621 last_done = !desc || desc->txd.cookie;
0622
0623
0624
0625
0626
0627
0628 if (!last_done &&
0629 (buf_fsm == M2M_STATUS_BUF_NO ||
0630 buf_fsm == M2M_STATUS_BUF_ON)) {
0631
0632
0633
0634
0635
0636
0637 if (ep93xx_dma_advance_active(edmac)) {
0638 m2m_fill_desc(edmac);
0639 if (done && !edmac->chan.private) {
0640
0641 control = readl(edmac->regs + M2M_CONTROL);
0642 control |= M2M_CONTROL_START;
0643 writel(control, edmac->regs + M2M_CONTROL);
0644 }
0645 return INTERRUPT_NEXT_BUFFER;
0646 } else {
0647 last_done = true;
0648 }
0649 }
0650
0651
0652
0653
0654
0655 if (last_done &&
0656 buf_fsm == M2M_STATUS_BUF_NO &&
0657 ctl_fsm == M2M_STATUS_CTL_STALL) {
0658
0659 control = readl(edmac->regs + M2M_CONTROL);
0660 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
0661 | M2M_CONTROL_ENABLE);
0662 writel(control, edmac->regs + M2M_CONTROL);
0663 return INTERRUPT_DONE;
0664 }
0665
0666
0667
0668
0669 return INTERRUPT_NEXT_BUFFER;
0670 }
0671
0672
0673
0674
0675
0676 static struct ep93xx_dma_desc *
0677 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
0678 {
0679 struct ep93xx_dma_desc *desc, *_desc;
0680 struct ep93xx_dma_desc *ret = NULL;
0681 unsigned long flags;
0682
0683 spin_lock_irqsave(&edmac->lock, flags);
0684 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
0685 if (async_tx_test_ack(&desc->txd)) {
0686 list_del_init(&desc->node);
0687
0688
0689 desc->src_addr = 0;
0690 desc->dst_addr = 0;
0691 desc->size = 0;
0692 desc->complete = false;
0693 desc->txd.cookie = 0;
0694 desc->txd.callback = NULL;
0695 desc->txd.callback_param = NULL;
0696
0697 ret = desc;
0698 break;
0699 }
0700 }
0701 spin_unlock_irqrestore(&edmac->lock, flags);
0702 return ret;
0703 }
0704
0705 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
0706 struct ep93xx_dma_desc *desc)
0707 {
0708 if (desc) {
0709 unsigned long flags;
0710
0711 spin_lock_irqsave(&edmac->lock, flags);
0712 list_splice_init(&desc->tx_list, &edmac->free_list);
0713 list_add(&desc->node, &edmac->free_list);
0714 spin_unlock_irqrestore(&edmac->lock, flags);
0715 }
0716 }
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
0727 {
0728 struct ep93xx_dma_desc *new;
0729 unsigned long flags;
0730
0731 spin_lock_irqsave(&edmac->lock, flags);
0732 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
0733 spin_unlock_irqrestore(&edmac->lock, flags);
0734 return;
0735 }
0736
0737
0738 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
0739 list_del_init(&new->node);
0740
0741 ep93xx_dma_set_active(edmac, new);
0742
0743
0744 edmac->edma->hw_submit(edmac);
0745 spin_unlock_irqrestore(&edmac->lock, flags);
0746 }
0747
0748 static void ep93xx_dma_tasklet(struct tasklet_struct *t)
0749 {
0750 struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
0751 struct ep93xx_dma_desc *desc, *d;
0752 struct dmaengine_desc_callback cb;
0753 LIST_HEAD(list);
0754
0755 memset(&cb, 0, sizeof(cb));
0756 spin_lock_irq(&edmac->lock);
0757
0758
0759
0760
0761
0762 desc = ep93xx_dma_get_active(edmac);
0763 if (desc) {
0764 if (desc->complete) {
0765
0766 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
0767 dma_cookie_complete(&desc->txd);
0768 list_splice_init(&edmac->active, &list);
0769 }
0770 dmaengine_desc_get_callback(&desc->txd, &cb);
0771 }
0772 spin_unlock_irq(&edmac->lock);
0773
0774
0775 ep93xx_dma_advance_work(edmac);
0776
0777
0778 list_for_each_entry_safe(desc, d, &list, node) {
0779 dma_descriptor_unmap(&desc->txd);
0780 ep93xx_dma_desc_put(edmac, desc);
0781 }
0782
0783 dmaengine_desc_callback_invoke(&cb, NULL);
0784 }
0785
0786 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
0787 {
0788 struct ep93xx_dma_chan *edmac = dev_id;
0789 struct ep93xx_dma_desc *desc;
0790 irqreturn_t ret = IRQ_HANDLED;
0791
0792 spin_lock(&edmac->lock);
0793
0794 desc = ep93xx_dma_get_active(edmac);
0795 if (!desc) {
0796 dev_warn(chan2dev(edmac),
0797 "got interrupt while active list is empty\n");
0798 spin_unlock(&edmac->lock);
0799 return IRQ_NONE;
0800 }
0801
0802 switch (edmac->edma->hw_interrupt(edmac)) {
0803 case INTERRUPT_DONE:
0804 desc->complete = true;
0805 tasklet_schedule(&edmac->tasklet);
0806 break;
0807
0808 case INTERRUPT_NEXT_BUFFER:
0809 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
0810 tasklet_schedule(&edmac->tasklet);
0811 break;
0812
0813 default:
0814 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
0815 ret = IRQ_NONE;
0816 break;
0817 }
0818
0819 spin_unlock(&edmac->lock);
0820 return ret;
0821 }
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
0832 {
0833 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
0834 struct ep93xx_dma_desc *desc;
0835 dma_cookie_t cookie;
0836 unsigned long flags;
0837
0838 spin_lock_irqsave(&edmac->lock, flags);
0839 cookie = dma_cookie_assign(tx);
0840
0841 desc = container_of(tx, struct ep93xx_dma_desc, txd);
0842
0843
0844
0845
0846
0847
0848 if (list_empty(&edmac->active)) {
0849 ep93xx_dma_set_active(edmac, desc);
0850 edmac->edma->hw_submit(edmac);
0851 } else {
0852 list_add_tail(&desc->node, &edmac->queue);
0853 }
0854
0855 spin_unlock_irqrestore(&edmac->lock, flags);
0856 return cookie;
0857 }
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
0868 {
0869 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
0870 struct ep93xx_dma_data *data = chan->private;
0871 const char *name = dma_chan_name(chan);
0872 int ret, i;
0873
0874
0875 if (!edmac->edma->m2m) {
0876 if (!data)
0877 return -EINVAL;
0878 if (data->port < EP93XX_DMA_I2S1 ||
0879 data->port > EP93XX_DMA_IRDA)
0880 return -EINVAL;
0881 if (data->direction != ep93xx_dma_chan_direction(chan))
0882 return -EINVAL;
0883 } else {
0884 if (data) {
0885 switch (data->port) {
0886 case EP93XX_DMA_SSP:
0887 case EP93XX_DMA_IDE:
0888 if (!is_slave_direction(data->direction))
0889 return -EINVAL;
0890 break;
0891 default:
0892 return -EINVAL;
0893 }
0894 }
0895 }
0896
0897 if (data && data->name)
0898 name = data->name;
0899
0900 ret = clk_prepare_enable(edmac->clk);
0901 if (ret)
0902 return ret;
0903
0904 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
0905 if (ret)
0906 goto fail_clk_disable;
0907
0908 spin_lock_irq(&edmac->lock);
0909 dma_cookie_init(&edmac->chan);
0910 ret = edmac->edma->hw_setup(edmac);
0911 spin_unlock_irq(&edmac->lock);
0912
0913 if (ret)
0914 goto fail_free_irq;
0915
0916 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
0917 struct ep93xx_dma_desc *desc;
0918
0919 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0920 if (!desc) {
0921 dev_warn(chan2dev(edmac), "not enough descriptors\n");
0922 break;
0923 }
0924
0925 INIT_LIST_HEAD(&desc->tx_list);
0926
0927 dma_async_tx_descriptor_init(&desc->txd, chan);
0928 desc->txd.flags = DMA_CTRL_ACK;
0929 desc->txd.tx_submit = ep93xx_dma_tx_submit;
0930
0931 ep93xx_dma_desc_put(edmac, desc);
0932 }
0933
0934 return i;
0935
0936 fail_free_irq:
0937 free_irq(edmac->irq, edmac);
0938 fail_clk_disable:
0939 clk_disable_unprepare(edmac->clk);
0940
0941 return ret;
0942 }
0943
0944
0945
0946
0947
0948
0949
0950
0951 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
0952 {
0953 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
0954 struct ep93xx_dma_desc *desc, *d;
0955 unsigned long flags;
0956 LIST_HEAD(list);
0957
0958 BUG_ON(!list_empty(&edmac->active));
0959 BUG_ON(!list_empty(&edmac->queue));
0960
0961 spin_lock_irqsave(&edmac->lock, flags);
0962 edmac->edma->hw_shutdown(edmac);
0963 edmac->runtime_addr = 0;
0964 edmac->runtime_ctrl = 0;
0965 edmac->buffer = 0;
0966 list_splice_init(&edmac->free_list, &list);
0967 spin_unlock_irqrestore(&edmac->lock, flags);
0968
0969 list_for_each_entry_safe(desc, d, &list, node)
0970 kfree(desc);
0971
0972 clk_disable_unprepare(edmac->clk);
0973 free_irq(edmac->irq, edmac);
0974 }
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986 static struct dma_async_tx_descriptor *
0987 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
0988 dma_addr_t src, size_t len, unsigned long flags)
0989 {
0990 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
0991 struct ep93xx_dma_desc *desc, *first;
0992 size_t bytes, offset;
0993
0994 first = NULL;
0995 for (offset = 0; offset < len; offset += bytes) {
0996 desc = ep93xx_dma_desc_get(edmac);
0997 if (!desc) {
0998 dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
0999 goto fail;
1000 }
1001
1002 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1003
1004 desc->src_addr = src + offset;
1005 desc->dst_addr = dest + offset;
1006 desc->size = bytes;
1007
1008 if (!first)
1009 first = desc;
1010 else
1011 list_add_tail(&desc->node, &first->tx_list);
1012 }
1013
1014 first->txd.cookie = -EBUSY;
1015 first->txd.flags = flags;
1016
1017 return &first->txd;
1018 fail:
1019 ep93xx_dma_desc_put(edmac, first);
1020 return NULL;
1021 }
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 static struct dma_async_tx_descriptor *
1035 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1036 unsigned int sg_len, enum dma_transfer_direction dir,
1037 unsigned long flags, void *context)
1038 {
1039 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1040 struct ep93xx_dma_desc *desc, *first;
1041 struct scatterlist *sg;
1042 int i;
1043
1044 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1045 dev_warn(chan2dev(edmac),
1046 "channel was configured with different direction\n");
1047 return NULL;
1048 }
1049
1050 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1051 dev_warn(chan2dev(edmac),
1052 "channel is already used for cyclic transfers\n");
1053 return NULL;
1054 }
1055
1056 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1057
1058 first = NULL;
1059 for_each_sg(sgl, sg, sg_len, i) {
1060 size_t len = sg_dma_len(sg);
1061
1062 if (len > DMA_MAX_CHAN_BYTES) {
1063 dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1064 len);
1065 goto fail;
1066 }
1067
1068 desc = ep93xx_dma_desc_get(edmac);
1069 if (!desc) {
1070 dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1071 goto fail;
1072 }
1073
1074 if (dir == DMA_MEM_TO_DEV) {
1075 desc->src_addr = sg_dma_address(sg);
1076 desc->dst_addr = edmac->runtime_addr;
1077 } else {
1078 desc->src_addr = edmac->runtime_addr;
1079 desc->dst_addr = sg_dma_address(sg);
1080 }
1081 desc->size = len;
1082
1083 if (!first)
1084 first = desc;
1085 else
1086 list_add_tail(&desc->node, &first->tx_list);
1087 }
1088
1089 first->txd.cookie = -EBUSY;
1090 first->txd.flags = flags;
1091
1092 return &first->txd;
1093
1094 fail:
1095 ep93xx_dma_desc_put(edmac, first);
1096 return NULL;
1097 }
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 static struct dma_async_tx_descriptor *
1117 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1118 size_t buf_len, size_t period_len,
1119 enum dma_transfer_direction dir, unsigned long flags)
1120 {
1121 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1122 struct ep93xx_dma_desc *desc, *first;
1123 size_t offset = 0;
1124
1125 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1126 dev_warn(chan2dev(edmac),
1127 "channel was configured with different direction\n");
1128 return NULL;
1129 }
1130
1131 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1132 dev_warn(chan2dev(edmac),
1133 "channel is already used for cyclic transfers\n");
1134 return NULL;
1135 }
1136
1137 if (period_len > DMA_MAX_CHAN_BYTES) {
1138 dev_warn(chan2dev(edmac), "too big period length %zu\n",
1139 period_len);
1140 return NULL;
1141 }
1142
1143 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1144
1145
1146 first = NULL;
1147 for (offset = 0; offset < buf_len; offset += period_len) {
1148 desc = ep93xx_dma_desc_get(edmac);
1149 if (!desc) {
1150 dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1151 goto fail;
1152 }
1153
1154 if (dir == DMA_MEM_TO_DEV) {
1155 desc->src_addr = dma_addr + offset;
1156 desc->dst_addr = edmac->runtime_addr;
1157 } else {
1158 desc->src_addr = edmac->runtime_addr;
1159 desc->dst_addr = dma_addr + offset;
1160 }
1161
1162 desc->size = period_len;
1163
1164 if (!first)
1165 first = desc;
1166 else
1167 list_add_tail(&desc->node, &first->tx_list);
1168 }
1169
1170 first->txd.cookie = -EBUSY;
1171
1172 return &first->txd;
1173
1174 fail:
1175 ep93xx_dma_desc_put(edmac, first);
1176 return NULL;
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 static void ep93xx_dma_synchronize(struct dma_chan *chan)
1192 {
1193 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1194
1195 if (edmac->edma->hw_synchronize)
1196 edmac->edma->hw_synchronize(edmac);
1197 }
1198
1199
1200
1201
1202
1203
1204
1205
1206 static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1207 {
1208 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1209 struct ep93xx_dma_desc *desc, *_d;
1210 unsigned long flags;
1211 LIST_HEAD(list);
1212
1213 spin_lock_irqsave(&edmac->lock, flags);
1214
1215 edmac->edma->hw_shutdown(edmac);
1216 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1217 list_splice_init(&edmac->active, &list);
1218 list_splice_init(&edmac->queue, &list);
1219
1220
1221
1222
1223 edmac->edma->hw_setup(edmac);
1224 spin_unlock_irqrestore(&edmac->lock, flags);
1225
1226 list_for_each_entry_safe(desc, _d, &list, node)
1227 ep93xx_dma_desc_put(edmac, desc);
1228
1229 return 0;
1230 }
1231
1232 static int ep93xx_dma_slave_config(struct dma_chan *chan,
1233 struct dma_slave_config *config)
1234 {
1235 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1236
1237 memcpy(&edmac->slave_config, config, sizeof(*config));
1238
1239 return 0;
1240 }
1241
1242 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1243 enum dma_transfer_direction dir,
1244 struct dma_slave_config *config)
1245 {
1246 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1247 enum dma_slave_buswidth width;
1248 unsigned long flags;
1249 u32 addr, ctrl;
1250
1251 if (!edmac->edma->m2m)
1252 return -EINVAL;
1253
1254 switch (dir) {
1255 case DMA_DEV_TO_MEM:
1256 width = config->src_addr_width;
1257 addr = config->src_addr;
1258 break;
1259
1260 case DMA_MEM_TO_DEV:
1261 width = config->dst_addr_width;
1262 addr = config->dst_addr;
1263 break;
1264
1265 default:
1266 return -EINVAL;
1267 }
1268
1269 switch (width) {
1270 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1271 ctrl = 0;
1272 break;
1273 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1274 ctrl = M2M_CONTROL_PW_16;
1275 break;
1276 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1277 ctrl = M2M_CONTROL_PW_32;
1278 break;
1279 default:
1280 return -EINVAL;
1281 }
1282
1283 spin_lock_irqsave(&edmac->lock, flags);
1284 edmac->runtime_addr = addr;
1285 edmac->runtime_ctrl = ctrl;
1286 spin_unlock_irqrestore(&edmac->lock, flags);
1287
1288 return 0;
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1300 dma_cookie_t cookie,
1301 struct dma_tx_state *state)
1302 {
1303 return dma_cookie_status(chan, cookie, state);
1304 }
1305
1306
1307
1308
1309
1310
1311
1312
1313 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1314 {
1315 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1316 }
1317
1318 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1319 {
1320 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1321 struct ep93xx_dma_engine *edma;
1322 struct dma_device *dma_dev;
1323 size_t edma_size;
1324 int ret, i;
1325
1326 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1327 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1328 if (!edma)
1329 return -ENOMEM;
1330
1331 dma_dev = &edma->dma_dev;
1332 edma->m2m = platform_get_device_id(pdev)->driver_data;
1333 edma->num_channels = pdata->num_channels;
1334
1335 INIT_LIST_HEAD(&dma_dev->channels);
1336 for (i = 0; i < pdata->num_channels; i++) {
1337 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1338 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1339
1340 edmac->chan.device = dma_dev;
1341 edmac->regs = cdata->base;
1342 edmac->irq = cdata->irq;
1343 edmac->edma = edma;
1344
1345 edmac->clk = clk_get(NULL, cdata->name);
1346 if (IS_ERR(edmac->clk)) {
1347 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1348 cdata->name);
1349 continue;
1350 }
1351
1352 spin_lock_init(&edmac->lock);
1353 INIT_LIST_HEAD(&edmac->active);
1354 INIT_LIST_HEAD(&edmac->queue);
1355 INIT_LIST_HEAD(&edmac->free_list);
1356 tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
1357
1358 list_add_tail(&edmac->chan.device_node,
1359 &dma_dev->channels);
1360 }
1361
1362 dma_cap_zero(dma_dev->cap_mask);
1363 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1364 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1365
1366 dma_dev->dev = &pdev->dev;
1367 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1368 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1369 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1370 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1371 dma_dev->device_config = ep93xx_dma_slave_config;
1372 dma_dev->device_synchronize = ep93xx_dma_synchronize;
1373 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1374 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1375 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1376
1377 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1378
1379 if (edma->m2m) {
1380 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1381 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1382
1383 edma->hw_setup = m2m_hw_setup;
1384 edma->hw_shutdown = m2m_hw_shutdown;
1385 edma->hw_submit = m2m_hw_submit;
1386 edma->hw_interrupt = m2m_hw_interrupt;
1387 } else {
1388 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1389
1390 edma->hw_synchronize = m2p_hw_synchronize;
1391 edma->hw_setup = m2p_hw_setup;
1392 edma->hw_shutdown = m2p_hw_shutdown;
1393 edma->hw_submit = m2p_hw_submit;
1394 edma->hw_interrupt = m2p_hw_interrupt;
1395 }
1396
1397 ret = dma_async_device_register(dma_dev);
1398 if (unlikely(ret)) {
1399 for (i = 0; i < edma->num_channels; i++) {
1400 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1401 if (!IS_ERR_OR_NULL(edmac->clk))
1402 clk_put(edmac->clk);
1403 }
1404 kfree(edma);
1405 } else {
1406 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1407 edma->m2m ? "M" : "P");
1408 }
1409
1410 return ret;
1411 }
1412
1413 static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1414 { "ep93xx-dma-m2p", 0 },
1415 { "ep93xx-dma-m2m", 1 },
1416 { },
1417 };
1418
1419 static struct platform_driver ep93xx_dma_driver = {
1420 .driver = {
1421 .name = "ep93xx-dma",
1422 },
1423 .id_table = ep93xx_dma_driver_ids,
1424 };
1425
1426 static int __init ep93xx_dma_module_init(void)
1427 {
1428 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1429 }
1430 subsys_initcall(ep93xx_dma_module_init);
1431
1432 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1433 MODULE_DESCRIPTION("EP93xx DMA driver");
1434 MODULE_LICENSE("GPL");