0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/module.h>
0034 #include <linux/dmaengine.h>
0035 #include <linux/dma-mapping.h>
0036 #include <linux/interrupt.h>
0037 #include <linux/io.h>
0038 #include <linux/slab.h>
0039 #include <linux/of_address.h>
0040 #include <linux/of_device.h>
0041 #include <linux/of_irq.h>
0042 #include <linux/of_dma.h>
0043 #include <linux/of_platform.h>
0044
0045 #include <linux/random.h>
0046
0047 #include "dmaengine.h"
0048
0049
0050 #define MPC_DMA_DESCRIPTORS 64
0051
0052
0053 #define MPC_DMA_TCD_OFFSET 0x1000
0054
0055
0056
0057
0058
0059
0060 #define MPC8308_DMACHAN_MAX 16
0061 #define MPC512x_DMACHAN_MAX 64
0062 #define MPC_DMA_CHANNELS 64
0063
0064
0065 #define MPC_DMA_DMACR_EDCG (1 << 31)
0066 #define MPC_DMA_DMACR_ERGA (1 << 3)
0067 #define MPC_DMA_DMACR_ERCA (1 << 2)
0068
0069
0070 #define MPC_DMA_DMAES_VLD (1 << 31)
0071 #define MPC_DMA_DMAES_GPE (1 << 15)
0072 #define MPC_DMA_DMAES_CPE (1 << 14)
0073 #define MPC_DMA_DMAES_ERRCHN(err) \
0074 (((err) >> 8) & 0x3f)
0075 #define MPC_DMA_DMAES_SAE (1 << 7)
0076 #define MPC_DMA_DMAES_SOE (1 << 6)
0077 #define MPC_DMA_DMAES_DAE (1 << 5)
0078 #define MPC_DMA_DMAES_DOE (1 << 4)
0079 #define MPC_DMA_DMAES_NCE (1 << 3)
0080 #define MPC_DMA_DMAES_SGE (1 << 2)
0081 #define MPC_DMA_DMAES_SBE (1 << 1)
0082 #define MPC_DMA_DMAES_DBE (1 << 0)
0083
0084 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
0085
0086 #define MPC_DMA_TSIZE_1 0x00
0087 #define MPC_DMA_TSIZE_2 0x01
0088 #define MPC_DMA_TSIZE_4 0x02
0089 #define MPC_DMA_TSIZE_16 0x04
0090 #define MPC_DMA_TSIZE_32 0x05
0091
0092
0093 struct __attribute__ ((__packed__)) mpc_dma_regs {
0094
0095 u32 dmacr;
0096 u32 dmaes;
0097
0098 u32 dmaerqh;
0099 u32 dmaerql;
0100 u32 dmaeeih;
0101 u32 dmaeeil;
0102
0103 u8 dmaserq;
0104 u8 dmacerq;
0105 u8 dmaseei;
0106 u8 dmaceei;
0107
0108 u8 dmacint;
0109 u8 dmacerr;
0110 u8 dmassrt;
0111 u8 dmacdne;
0112
0113 u32 dmainth;
0114 u32 dmaintl;
0115 u32 dmaerrh;
0116 u32 dmaerrl;
0117
0118 u32 dmahrsh;
0119 u32 dmahrsl;
0120 union {
0121 u32 dmaihsa;
0122 u32 dmagpor;
0123 };
0124 u32 dmailsa;
0125
0126 u32 reserve0[48];
0127
0128 u8 dchpri[MPC_DMA_CHANNELS];
0129
0130 };
0131
0132 struct __attribute__ ((__packed__)) mpc_dma_tcd {
0133
0134 u32 saddr;
0135
0136 u32 smod:5;
0137 u32 ssize:3;
0138 u32 dmod:5;
0139 u32 dsize:3;
0140 u32 soff:16;
0141
0142
0143 u32 nbytes;
0144 u32 slast;
0145 u32 daddr;
0146
0147
0148 u32 citer_elink:1;
0149
0150
0151 u32 citer_linkch:6;
0152 u32 citer:9;
0153 u32 doff:16;
0154
0155
0156 u32 dlast_sga;
0157
0158
0159
0160
0161 u32 biter_elink:1;
0162
0163
0164 u32 biter_linkch:6;
0165 u32 biter:9;
0166 u32 bwc:2;
0167 u32 major_linkch:6;
0168 u32 done:1;
0169 u32 active:1;
0170 u32 major_elink:1;
0171
0172
0173 u32 e_sg:1;
0174 u32 d_req:1;
0175 u32 int_half:1;
0176
0177
0178 u32 int_maj:1;
0179
0180
0181 u32 start:1;
0182 };
0183
0184 struct mpc_dma_desc {
0185 struct dma_async_tx_descriptor desc;
0186 struct mpc_dma_tcd *tcd;
0187 dma_addr_t tcd_paddr;
0188 int error;
0189 struct list_head node;
0190 int will_access_peripheral;
0191 };
0192
0193 struct mpc_dma_chan {
0194 struct dma_chan chan;
0195 struct list_head free;
0196 struct list_head prepared;
0197 struct list_head queued;
0198 struct list_head active;
0199 struct list_head completed;
0200 struct mpc_dma_tcd *tcd;
0201 dma_addr_t tcd_paddr;
0202
0203
0204 dma_addr_t src_per_paddr;
0205 u32 src_tcd_nunits;
0206 u8 swidth;
0207 dma_addr_t dst_per_paddr;
0208 u32 dst_tcd_nunits;
0209 u8 dwidth;
0210
0211
0212 spinlock_t lock;
0213 };
0214
0215 struct mpc_dma {
0216 struct dma_device dma;
0217 struct tasklet_struct tasklet;
0218 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
0219 struct mpc_dma_regs __iomem *regs;
0220 struct mpc_dma_tcd __iomem *tcd;
0221 int irq;
0222 int irq2;
0223 uint error_status;
0224 int is_mpc8308;
0225
0226
0227 spinlock_t error_status_lock;
0228 };
0229
0230 #define DRV_NAME "mpc512x_dma"
0231
0232
0233 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
0234 {
0235 return container_of(c, struct mpc_dma_chan, chan);
0236 }
0237
0238
0239 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
0240 {
0241 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
0242
0243 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
0244 }
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
0255 {
0256 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
0257 struct mpc_dma_desc *first = NULL;
0258 struct mpc_dma_desc *prev = NULL;
0259 struct mpc_dma_desc *mdesc;
0260 int cid = mchan->chan.chan_id;
0261
0262 while (!list_empty(&mchan->queued)) {
0263 mdesc = list_first_entry(&mchan->queued,
0264 struct mpc_dma_desc, node);
0265
0266
0267
0268
0269
0270
0271 if (mdesc->will_access_peripheral) {
0272 if (list_empty(&mchan->active))
0273 list_move_tail(&mdesc->node, &mchan->active);
0274 break;
0275 } else {
0276 list_move_tail(&mdesc->node, &mchan->active);
0277 }
0278 }
0279
0280
0281 list_for_each_entry(mdesc, &mchan->active, node) {
0282 if (!first)
0283 first = mdesc;
0284
0285 if (!prev) {
0286 prev = mdesc;
0287 continue;
0288 }
0289
0290 prev->tcd->dlast_sga = mdesc->tcd_paddr;
0291 prev->tcd->e_sg = 1;
0292 mdesc->tcd->start = 1;
0293
0294 prev = mdesc;
0295 }
0296
0297 prev->tcd->int_maj = 1;
0298
0299
0300 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
0301
0302 if (first != prev)
0303 mdma->tcd[cid].e_sg = 1;
0304
0305 if (mdma->is_mpc8308) {
0306
0307 out_8(&mdma->regs->dmassrt, cid);
0308 } else if (first->will_access_peripheral) {
0309
0310 out_8(&mdma->regs->dmaserq, cid);
0311 } else {
0312
0313 out_8(&mdma->regs->dmassrt, cid);
0314 }
0315 }
0316
0317
0318 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
0319 {
0320 struct mpc_dma_chan *mchan;
0321 struct mpc_dma_desc *mdesc;
0322 u32 status = is | es;
0323 int ch;
0324
0325 while ((ch = fls(status) - 1) >= 0) {
0326 status &= ~(1 << ch);
0327 mchan = &mdma->channels[ch + off];
0328
0329 spin_lock(&mchan->lock);
0330
0331 out_8(&mdma->regs->dmacint, ch + off);
0332 out_8(&mdma->regs->dmacerr, ch + off);
0333
0334
0335 if (es & (1 << ch))
0336 list_for_each_entry(mdesc, &mchan->active, node)
0337 mdesc->error = -EIO;
0338
0339
0340 list_splice_tail_init(&mchan->active, &mchan->completed);
0341 if (!list_empty(&mchan->queued))
0342 mpc_dma_execute(mchan);
0343
0344 spin_unlock(&mchan->lock);
0345 }
0346 }
0347
0348
0349 static irqreturn_t mpc_dma_irq(int irq, void *data)
0350 {
0351 struct mpc_dma *mdma = data;
0352 uint es;
0353
0354
0355 es = in_be32(&mdma->regs->dmaes);
0356 spin_lock(&mdma->error_status_lock);
0357 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
0358 mdma->error_status = es;
0359 spin_unlock(&mdma->error_status_lock);
0360
0361
0362 if (mdma->dma.chancnt > 32) {
0363 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
0364 in_be32(&mdma->regs->dmaerrh), 32);
0365 }
0366 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
0367 in_be32(&mdma->regs->dmaerrl), 0);
0368
0369
0370 tasklet_schedule(&mdma->tasklet);
0371
0372 return IRQ_HANDLED;
0373 }
0374
0375
0376 static void mpc_dma_process_completed(struct mpc_dma *mdma)
0377 {
0378 dma_cookie_t last_cookie = 0;
0379 struct mpc_dma_chan *mchan;
0380 struct mpc_dma_desc *mdesc;
0381 struct dma_async_tx_descriptor *desc;
0382 unsigned long flags;
0383 LIST_HEAD(list);
0384 int i;
0385
0386 for (i = 0; i < mdma->dma.chancnt; i++) {
0387 mchan = &mdma->channels[i];
0388
0389
0390 spin_lock_irqsave(&mchan->lock, flags);
0391 if (!list_empty(&mchan->completed))
0392 list_splice_tail_init(&mchan->completed, &list);
0393 spin_unlock_irqrestore(&mchan->lock, flags);
0394
0395 if (list_empty(&list))
0396 continue;
0397
0398
0399 list_for_each_entry(mdesc, &list, node) {
0400 desc = &mdesc->desc;
0401
0402 dmaengine_desc_get_callback_invoke(desc, NULL);
0403
0404 last_cookie = desc->cookie;
0405 dma_run_dependencies(desc);
0406 }
0407
0408
0409 spin_lock_irqsave(&mchan->lock, flags);
0410 list_splice_tail_init(&list, &mchan->free);
0411 mchan->chan.completed_cookie = last_cookie;
0412 spin_unlock_irqrestore(&mchan->lock, flags);
0413 }
0414 }
0415
0416
0417 static void mpc_dma_tasklet(struct tasklet_struct *t)
0418 {
0419 struct mpc_dma *mdma = from_tasklet(mdma, t, tasklet);
0420 unsigned long flags;
0421 uint es;
0422
0423 spin_lock_irqsave(&mdma->error_status_lock, flags);
0424 es = mdma->error_status;
0425 mdma->error_status = 0;
0426 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
0427
0428
0429 if (es) {
0430 dev_err(mdma->dma.dev,
0431 "Hardware reported following error(s) on channel %u:\n",
0432 MPC_DMA_DMAES_ERRCHN(es));
0433
0434 if (es & MPC_DMA_DMAES_GPE)
0435 dev_err(mdma->dma.dev, "- Group Priority Error\n");
0436 if (es & MPC_DMA_DMAES_CPE)
0437 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
0438 if (es & MPC_DMA_DMAES_SAE)
0439 dev_err(mdma->dma.dev, "- Source Address Error\n");
0440 if (es & MPC_DMA_DMAES_SOE)
0441 dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n");
0442 if (es & MPC_DMA_DMAES_DAE)
0443 dev_err(mdma->dma.dev, "- Destination Address Error\n");
0444 if (es & MPC_DMA_DMAES_DOE)
0445 dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n");
0446 if (es & MPC_DMA_DMAES_NCE)
0447 dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n");
0448 if (es & MPC_DMA_DMAES_SGE)
0449 dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n");
0450 if (es & MPC_DMA_DMAES_SBE)
0451 dev_err(mdma->dma.dev, "- Source Bus Error\n");
0452 if (es & MPC_DMA_DMAES_DBE)
0453 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
0454 }
0455
0456 mpc_dma_process_completed(mdma);
0457 }
0458
0459
0460 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
0461 {
0462 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
0463 struct mpc_dma_desc *mdesc;
0464 unsigned long flags;
0465 dma_cookie_t cookie;
0466
0467 mdesc = container_of(txd, struct mpc_dma_desc, desc);
0468
0469 spin_lock_irqsave(&mchan->lock, flags);
0470
0471
0472 list_move_tail(&mdesc->node, &mchan->queued);
0473
0474
0475 if (list_empty(&mchan->active))
0476 mpc_dma_execute(mchan);
0477
0478
0479 cookie = dma_cookie_assign(txd);
0480 spin_unlock_irqrestore(&mchan->lock, flags);
0481
0482 return cookie;
0483 }
0484
0485
0486 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
0487 {
0488 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
0489 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
0490 struct mpc_dma_desc *mdesc;
0491 struct mpc_dma_tcd *tcd;
0492 dma_addr_t tcd_paddr;
0493 unsigned long flags;
0494 LIST_HEAD(descs);
0495 int i;
0496
0497
0498 tcd = dma_alloc_coherent(mdma->dma.dev,
0499 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
0500 &tcd_paddr, GFP_KERNEL);
0501 if (!tcd)
0502 return -ENOMEM;
0503
0504
0505 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
0506 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
0507 if (!mdesc) {
0508 dev_notice(mdma->dma.dev,
0509 "Memory allocation error. Allocated only %u descriptors\n", i);
0510 break;
0511 }
0512
0513 dma_async_tx_descriptor_init(&mdesc->desc, chan);
0514 mdesc->desc.flags = DMA_CTRL_ACK;
0515 mdesc->desc.tx_submit = mpc_dma_tx_submit;
0516
0517 mdesc->tcd = &tcd[i];
0518 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
0519
0520 list_add_tail(&mdesc->node, &descs);
0521 }
0522
0523
0524 if (i == 0) {
0525 dma_free_coherent(mdma->dma.dev,
0526 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
0527 tcd, tcd_paddr);
0528 return -ENOMEM;
0529 }
0530
0531 spin_lock_irqsave(&mchan->lock, flags);
0532 mchan->tcd = tcd;
0533 mchan->tcd_paddr = tcd_paddr;
0534 list_splice_tail_init(&descs, &mchan->free);
0535 spin_unlock_irqrestore(&mchan->lock, flags);
0536
0537
0538 out_8(&mdma->regs->dmaseei, chan->chan_id);
0539
0540 return 0;
0541 }
0542
0543
0544 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
0545 {
0546 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
0547 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
0548 struct mpc_dma_desc *mdesc, *tmp;
0549 struct mpc_dma_tcd *tcd;
0550 dma_addr_t tcd_paddr;
0551 unsigned long flags;
0552 LIST_HEAD(descs);
0553
0554 spin_lock_irqsave(&mchan->lock, flags);
0555
0556
0557 BUG_ON(!list_empty(&mchan->prepared));
0558 BUG_ON(!list_empty(&mchan->queued));
0559 BUG_ON(!list_empty(&mchan->active));
0560 BUG_ON(!list_empty(&mchan->completed));
0561
0562
0563 list_splice_tail_init(&mchan->free, &descs);
0564 tcd = mchan->tcd;
0565 tcd_paddr = mchan->tcd_paddr;
0566
0567 spin_unlock_irqrestore(&mchan->lock, flags);
0568
0569
0570 dma_free_coherent(mdma->dma.dev,
0571 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
0572 tcd, tcd_paddr);
0573
0574
0575 list_for_each_entry_safe(mdesc, tmp, &descs, node)
0576 kfree(mdesc);
0577
0578
0579 out_8(&mdma->regs->dmaceei, chan->chan_id);
0580 }
0581
0582
0583 static void mpc_dma_issue_pending(struct dma_chan *chan)
0584 {
0585
0586
0587
0588
0589 }
0590
0591
0592 static enum dma_status
0593 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
0594 struct dma_tx_state *txstate)
0595 {
0596 return dma_cookie_status(chan, cookie, txstate);
0597 }
0598
0599
0600 static struct dma_async_tx_descriptor *
0601 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
0602 size_t len, unsigned long flags)
0603 {
0604 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
0605 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
0606 struct mpc_dma_desc *mdesc = NULL;
0607 struct mpc_dma_tcd *tcd;
0608 unsigned long iflags;
0609
0610
0611 spin_lock_irqsave(&mchan->lock, iflags);
0612 if (!list_empty(&mchan->free)) {
0613 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
0614 node);
0615 list_del(&mdesc->node);
0616 }
0617 spin_unlock_irqrestore(&mchan->lock, iflags);
0618
0619 if (!mdesc) {
0620
0621 mpc_dma_process_completed(mdma);
0622 return NULL;
0623 }
0624
0625 mdesc->error = 0;
0626 mdesc->will_access_peripheral = 0;
0627 tcd = mdesc->tcd;
0628
0629
0630 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
0631
0632 if (IS_ALIGNED(src | dst | len, 32)) {
0633 tcd->ssize = MPC_DMA_TSIZE_32;
0634 tcd->dsize = MPC_DMA_TSIZE_32;
0635 tcd->soff = 32;
0636 tcd->doff = 32;
0637 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
0638
0639 tcd->ssize = MPC_DMA_TSIZE_16;
0640 tcd->dsize = MPC_DMA_TSIZE_16;
0641 tcd->soff = 16;
0642 tcd->doff = 16;
0643 } else if (IS_ALIGNED(src | dst | len, 4)) {
0644 tcd->ssize = MPC_DMA_TSIZE_4;
0645 tcd->dsize = MPC_DMA_TSIZE_4;
0646 tcd->soff = 4;
0647 tcd->doff = 4;
0648 } else if (IS_ALIGNED(src | dst | len, 2)) {
0649 tcd->ssize = MPC_DMA_TSIZE_2;
0650 tcd->dsize = MPC_DMA_TSIZE_2;
0651 tcd->soff = 2;
0652 tcd->doff = 2;
0653 } else {
0654 tcd->ssize = MPC_DMA_TSIZE_1;
0655 tcd->dsize = MPC_DMA_TSIZE_1;
0656 tcd->soff = 1;
0657 tcd->doff = 1;
0658 }
0659
0660 tcd->saddr = src;
0661 tcd->daddr = dst;
0662 tcd->nbytes = len;
0663 tcd->biter = 1;
0664 tcd->citer = 1;
0665
0666
0667 spin_lock_irqsave(&mchan->lock, iflags);
0668 list_add_tail(&mdesc->node, &mchan->prepared);
0669 spin_unlock_irqrestore(&mchan->lock, iflags);
0670
0671 return &mdesc->desc;
0672 }
0673
0674 inline u8 buswidth_to_dmatsize(u8 buswidth)
0675 {
0676 u8 res;
0677
0678 for (res = 0; buswidth > 1; buswidth /= 2)
0679 res++;
0680 return res;
0681 }
0682
0683 static struct dma_async_tx_descriptor *
0684 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
0685 unsigned int sg_len, enum dma_transfer_direction direction,
0686 unsigned long flags, void *context)
0687 {
0688 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
0689 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
0690 struct mpc_dma_desc *mdesc = NULL;
0691 dma_addr_t per_paddr;
0692 u32 tcd_nunits;
0693 struct mpc_dma_tcd *tcd;
0694 unsigned long iflags;
0695 struct scatterlist *sg;
0696 size_t len;
0697 int iter, i;
0698
0699
0700 if (sg_len != 1)
0701 return NULL;
0702
0703 if (!is_slave_direction(direction))
0704 return NULL;
0705
0706 for_each_sg(sgl, sg, sg_len, i) {
0707 spin_lock_irqsave(&mchan->lock, iflags);
0708
0709 mdesc = list_first_entry(&mchan->free,
0710 struct mpc_dma_desc, node);
0711 if (!mdesc) {
0712 spin_unlock_irqrestore(&mchan->lock, iflags);
0713
0714 mpc_dma_process_completed(mdma);
0715 return NULL;
0716 }
0717
0718 list_del(&mdesc->node);
0719
0720 if (direction == DMA_DEV_TO_MEM) {
0721 per_paddr = mchan->src_per_paddr;
0722 tcd_nunits = mchan->src_tcd_nunits;
0723 } else {
0724 per_paddr = mchan->dst_per_paddr;
0725 tcd_nunits = mchan->dst_tcd_nunits;
0726 }
0727
0728 spin_unlock_irqrestore(&mchan->lock, iflags);
0729
0730 if (per_paddr == 0 || tcd_nunits == 0)
0731 goto err_prep;
0732
0733 mdesc->error = 0;
0734 mdesc->will_access_peripheral = 1;
0735
0736
0737 tcd = mdesc->tcd;
0738
0739 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
0740
0741 if (direction == DMA_DEV_TO_MEM) {
0742 tcd->saddr = per_paddr;
0743 tcd->daddr = sg_dma_address(sg);
0744
0745 if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
0746 goto err_prep;
0747
0748 tcd->soff = 0;
0749 tcd->doff = mchan->dwidth;
0750 } else {
0751 tcd->saddr = sg_dma_address(sg);
0752 tcd->daddr = per_paddr;
0753
0754 if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
0755 goto err_prep;
0756
0757 tcd->soff = mchan->swidth;
0758 tcd->doff = 0;
0759 }
0760
0761 tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
0762 tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
0763
0764 if (mdma->is_mpc8308) {
0765 tcd->nbytes = sg_dma_len(sg);
0766 if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
0767 goto err_prep;
0768
0769
0770 tcd->biter = 1;
0771 tcd->citer = 1;
0772 } else {
0773 len = sg_dma_len(sg);
0774 tcd->nbytes = tcd_nunits * tcd->ssize;
0775 if (!IS_ALIGNED(len, tcd->nbytes))
0776 goto err_prep;
0777
0778 iter = len / tcd->nbytes;
0779 if (iter >= 1 << 15) {
0780
0781 goto err_prep;
0782 }
0783
0784 tcd->biter = iter & 0x1ff;
0785 tcd->biter_linkch = iter >> 9;
0786 tcd->citer = tcd->biter;
0787 tcd->citer_linkch = tcd->biter_linkch;
0788 }
0789
0790 tcd->e_sg = 0;
0791 tcd->d_req = 1;
0792
0793
0794 spin_lock_irqsave(&mchan->lock, iflags);
0795 list_add_tail(&mdesc->node, &mchan->prepared);
0796 spin_unlock_irqrestore(&mchan->lock, iflags);
0797 }
0798
0799 return &mdesc->desc;
0800
0801 err_prep:
0802
0803 spin_lock_irqsave(&mchan->lock, iflags);
0804 list_add_tail(&mdesc->node, &mchan->free);
0805 spin_unlock_irqrestore(&mchan->lock, iflags);
0806
0807 return NULL;
0808 }
0809
0810 inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
0811 {
0812 switch (buswidth) {
0813 case 16:
0814 if (is_mpc8308)
0815 return false;
0816 break;
0817 case 1:
0818 case 2:
0819 case 4:
0820 case 32:
0821 break;
0822 default:
0823 return false;
0824 }
0825
0826 return true;
0827 }
0828
0829 static int mpc_dma_device_config(struct dma_chan *chan,
0830 struct dma_slave_config *cfg)
0831 {
0832 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
0833 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
0834 unsigned long flags;
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850 if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
0851 !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
0852 return -EINVAL;
0853 }
0854
0855 if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
0856 !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
0857 return -EINVAL;
0858
0859 spin_lock_irqsave(&mchan->lock, flags);
0860
0861 mchan->src_per_paddr = cfg->src_addr;
0862 mchan->src_tcd_nunits = cfg->src_maxburst;
0863 mchan->swidth = cfg->src_addr_width;
0864 mchan->dst_per_paddr = cfg->dst_addr;
0865 mchan->dst_tcd_nunits = cfg->dst_maxburst;
0866 mchan->dwidth = cfg->dst_addr_width;
0867
0868
0869 if (mchan->src_tcd_nunits == 0)
0870 mchan->src_tcd_nunits = 1;
0871 if (mchan->dst_tcd_nunits == 0)
0872 mchan->dst_tcd_nunits = 1;
0873
0874 spin_unlock_irqrestore(&mchan->lock, flags);
0875
0876 return 0;
0877 }
0878
0879 static int mpc_dma_device_terminate_all(struct dma_chan *chan)
0880 {
0881 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
0882 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
0883 unsigned long flags;
0884
0885
0886 spin_lock_irqsave(&mchan->lock, flags);
0887
0888 out_8(&mdma->regs->dmacerq, chan->chan_id);
0889 list_splice_tail_init(&mchan->prepared, &mchan->free);
0890 list_splice_tail_init(&mchan->queued, &mchan->free);
0891 list_splice_tail_init(&mchan->active, &mchan->free);
0892
0893 spin_unlock_irqrestore(&mchan->lock, flags);
0894
0895 return 0;
0896 }
0897
0898 static int mpc_dma_probe(struct platform_device *op)
0899 {
0900 struct device_node *dn = op->dev.of_node;
0901 struct device *dev = &op->dev;
0902 struct dma_device *dma;
0903 struct mpc_dma *mdma;
0904 struct mpc_dma_chan *mchan;
0905 struct resource res;
0906 ulong regs_start, regs_size;
0907 int retval, i;
0908 u8 chancnt;
0909
0910 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
0911 if (!mdma) {
0912 retval = -ENOMEM;
0913 goto err;
0914 }
0915
0916 mdma->irq = irq_of_parse_and_map(dn, 0);
0917 if (!mdma->irq) {
0918 dev_err(dev, "Error mapping IRQ!\n");
0919 retval = -EINVAL;
0920 goto err;
0921 }
0922
0923 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
0924 mdma->is_mpc8308 = 1;
0925 mdma->irq2 = irq_of_parse_and_map(dn, 1);
0926 if (!mdma->irq2) {
0927 dev_err(dev, "Error mapping IRQ!\n");
0928 retval = -EINVAL;
0929 goto err_dispose1;
0930 }
0931 }
0932
0933 retval = of_address_to_resource(dn, 0, &res);
0934 if (retval) {
0935 dev_err(dev, "Error parsing memory region!\n");
0936 goto err_dispose2;
0937 }
0938
0939 regs_start = res.start;
0940 regs_size = resource_size(&res);
0941
0942 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
0943 dev_err(dev, "Error requesting memory region!\n");
0944 retval = -EBUSY;
0945 goto err_dispose2;
0946 }
0947
0948 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
0949 if (!mdma->regs) {
0950 dev_err(dev, "Error mapping memory region!\n");
0951 retval = -ENOMEM;
0952 goto err_dispose2;
0953 }
0954
0955 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
0956 + MPC_DMA_TCD_OFFSET);
0957
0958 retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
0959 if (retval) {
0960 dev_err(dev, "Error requesting IRQ!\n");
0961 retval = -EINVAL;
0962 goto err_dispose2;
0963 }
0964
0965 if (mdma->is_mpc8308) {
0966 retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
0967 DRV_NAME, mdma);
0968 if (retval) {
0969 dev_err(dev, "Error requesting IRQ2!\n");
0970 retval = -EINVAL;
0971 goto err_free1;
0972 }
0973 }
0974
0975 spin_lock_init(&mdma->error_status_lock);
0976
0977 dma = &mdma->dma;
0978 dma->dev = dev;
0979 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
0980 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
0981 dma->device_issue_pending = mpc_dma_issue_pending;
0982 dma->device_tx_status = mpc_dma_tx_status;
0983 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
0984 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
0985 dma->device_config = mpc_dma_device_config;
0986 dma->device_terminate_all = mpc_dma_device_terminate_all;
0987
0988 INIT_LIST_HEAD(&dma->channels);
0989 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
0990 dma_cap_set(DMA_SLAVE, dma->cap_mask);
0991
0992 if (mdma->is_mpc8308)
0993 chancnt = MPC8308_DMACHAN_MAX;
0994 else
0995 chancnt = MPC512x_DMACHAN_MAX;
0996
0997 for (i = 0; i < chancnt; i++) {
0998 mchan = &mdma->channels[i];
0999
1000 mchan->chan.device = dma;
1001 dma_cookie_init(&mchan->chan);
1002
1003 INIT_LIST_HEAD(&mchan->free);
1004 INIT_LIST_HEAD(&mchan->prepared);
1005 INIT_LIST_HEAD(&mchan->queued);
1006 INIT_LIST_HEAD(&mchan->active);
1007 INIT_LIST_HEAD(&mchan->completed);
1008
1009 spin_lock_init(&mchan->lock);
1010 list_add_tail(&mchan->chan.device_node, &dma->channels);
1011 }
1012
1013 tasklet_setup(&mdma->tasklet, mpc_dma_tasklet);
1014
1015
1016
1017
1018
1019
1020
1021 if (mdma->is_mpc8308) {
1022
1023 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1024
1025
1026 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1027
1028 out_be32(&mdma->regs->dmaeeil, 0);
1029
1030
1031 out_be32(&mdma->regs->dmaintl, 0xFFFF);
1032 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1033 } else {
1034 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1035 MPC_DMA_DMACR_ERGA |
1036 MPC_DMA_DMACR_ERCA);
1037
1038
1039 out_be32(&mdma->regs->dmaerqh, 0);
1040 out_be32(&mdma->regs->dmaerql, 0);
1041
1042
1043 out_be32(&mdma->regs->dmaeeih, 0);
1044 out_be32(&mdma->regs->dmaeeil, 0);
1045
1046
1047 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1048 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1049 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1050 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1051
1052
1053 out_be32(&mdma->regs->dmaihsa, 0);
1054 out_be32(&mdma->regs->dmailsa, 0);
1055 }
1056
1057
1058 dev_set_drvdata(dev, mdma);
1059 retval = dma_async_device_register(dma);
1060 if (retval)
1061 goto err_free2;
1062
1063
1064 if (dev->of_node) {
1065 retval = of_dma_controller_register(dev->of_node,
1066 of_dma_xlate_by_chan_id, mdma);
1067 if (retval)
1068 dev_warn(dev, "Could not register for OF lookup\n");
1069 }
1070
1071 return 0;
1072
1073 err_free2:
1074 if (mdma->is_mpc8308)
1075 free_irq(mdma->irq2, mdma);
1076 err_free1:
1077 free_irq(mdma->irq, mdma);
1078 err_dispose2:
1079 if (mdma->is_mpc8308)
1080 irq_dispose_mapping(mdma->irq2);
1081 err_dispose1:
1082 irq_dispose_mapping(mdma->irq);
1083 err:
1084 return retval;
1085 }
1086
1087 static int mpc_dma_remove(struct platform_device *op)
1088 {
1089 struct device *dev = &op->dev;
1090 struct mpc_dma *mdma = dev_get_drvdata(dev);
1091
1092 if (dev->of_node)
1093 of_dma_controller_free(dev->of_node);
1094 dma_async_device_unregister(&mdma->dma);
1095 if (mdma->is_mpc8308) {
1096 free_irq(mdma->irq2, mdma);
1097 irq_dispose_mapping(mdma->irq2);
1098 }
1099 free_irq(mdma->irq, mdma);
1100 irq_dispose_mapping(mdma->irq);
1101 tasklet_kill(&mdma->tasklet);
1102
1103 return 0;
1104 }
1105
1106 static const struct of_device_id mpc_dma_match[] = {
1107 { .compatible = "fsl,mpc5121-dma", },
1108 { .compatible = "fsl,mpc8308-dma", },
1109 {},
1110 };
1111 MODULE_DEVICE_TABLE(of, mpc_dma_match);
1112
1113 static struct platform_driver mpc_dma_driver = {
1114 .probe = mpc_dma_probe,
1115 .remove = mpc_dma_remove,
1116 .driver = {
1117 .name = DRV_NAME,
1118 .of_match_table = mpc_dma_match,
1119 },
1120 };
1121
1122 module_platform_driver(mpc_dma_driver);
1123
1124 MODULE_LICENSE("GPL");
1125 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");