0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 #include <linux/amba/bus.h>
0064 #include <linux/amba/pl08x.h>
0065 #include <linux/debugfs.h>
0066 #include <linux/delay.h>
0067 #include <linux/device.h>
0068 #include <linux/dmaengine.h>
0069 #include <linux/dmapool.h>
0070 #include <linux/dma-mapping.h>
0071 #include <linux/export.h>
0072 #include <linux/init.h>
0073 #include <linux/interrupt.h>
0074 #include <linux/module.h>
0075 #include <linux/of.h>
0076 #include <linux/of_dma.h>
0077 #include <linux/pm_runtime.h>
0078 #include <linux/seq_file.h>
0079 #include <linux/slab.h>
0080 #include <linux/amba/pl080.h>
0081
0082 #include "dmaengine.h"
0083 #include "virt-dma.h"
0084
0085 #define DRIVER_NAME "pl08xdmac"
0086
0087 #define PL80X_DMA_BUSWIDTHS \
0088 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
0089 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
0090 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
0091 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
0092
0093 static struct amba_driver pl08x_amba_driver;
0094 struct pl08x_driver_data;
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 struct vendor_data {
0112 u8 config_offset;
0113 u8 channels;
0114 u8 signals;
0115 bool dualmaster;
0116 bool nomadik;
0117 bool pl080s;
0118 bool ftdmac020;
0119 u32 max_transfer_size;
0120 };
0121
0122
0123
0124
0125
0126
0127
0128
0129 struct pl08x_bus_data {
0130 dma_addr_t addr;
0131 u8 maxwidth;
0132 u8 buswidth;
0133 };
0134
0135 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 struct pl08x_phy_chan {
0157 unsigned int id;
0158 void __iomem *base;
0159 void __iomem *reg_config;
0160 void __iomem *reg_control;
0161 void __iomem *reg_src;
0162 void __iomem *reg_dst;
0163 void __iomem *reg_lli;
0164 void __iomem *reg_busy;
0165 spinlock_t lock;
0166 struct pl08x_dma_chan *serving;
0167 bool locked;
0168 bool ftdmac020;
0169 bool pl080s;
0170 };
0171
0172
0173
0174
0175
0176
0177
0178
0179 struct pl08x_sg {
0180 dma_addr_t src_addr;
0181 dma_addr_t dst_addr;
0182 size_t len;
0183 struct list_head node;
0184 };
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 struct pl08x_txd {
0199 struct virt_dma_desc vd;
0200 struct list_head dsg_list;
0201 dma_addr_t llis_bus;
0202 u32 *llis_va;
0203
0204 u32 cctl;
0205
0206
0207
0208
0209 u32 ccfg;
0210 bool done;
0211 bool cyclic;
0212 };
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 enum pl08x_dma_chan_state {
0226 PL08X_CHAN_IDLE,
0227 PL08X_CHAN_RUNNING,
0228 PL08X_CHAN_PAUSED,
0229 PL08X_CHAN_WAITING,
0230 };
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 struct pl08x_dma_chan {
0248 struct virt_dma_chan vc;
0249 struct pl08x_phy_chan *phychan;
0250 const char *name;
0251 struct pl08x_channel_data *cd;
0252 struct dma_slave_config cfg;
0253 struct pl08x_txd *at;
0254 struct pl08x_driver_data *host;
0255 enum pl08x_dma_chan_state state;
0256 bool slave;
0257 int signal;
0258 unsigned mux_use;
0259 unsigned long waiting_at;
0260 };
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 struct pl08x_driver_data {
0279 struct dma_device slave;
0280 struct dma_device memcpy;
0281 bool has_slave;
0282 void __iomem *base;
0283 struct amba_device *adev;
0284 const struct vendor_data *vd;
0285 struct pl08x_platform_data *pd;
0286 struct pl08x_phy_chan *phy_chans;
0287 struct dma_pool *pool;
0288 u8 lli_buses;
0289 u8 mem_buses;
0290 u8 lli_words;
0291 };
0292
0293
0294
0295
0296
0297
0298 #define PL080_LLI_SRC 0
0299 #define PL080_LLI_DST 1
0300 #define PL080_LLI_LLI 2
0301 #define PL080_LLI_CCTL 3
0302 #define PL080S_LLI_CCTL2 4
0303
0304
0305 #define PL080_LLI_WORDS 4
0306 #define PL080S_LLI_WORDS 8
0307
0308
0309
0310
0311
0312 #define MAX_NUM_TSFR_LLIS 512
0313 #define PL08X_ALIGN 8
0314
0315 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
0316 {
0317 return container_of(chan, struct pl08x_dma_chan, vc.chan);
0318 }
0319
0320 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
0321 {
0322 return container_of(tx, struct pl08x_txd, vd.tx);
0323 }
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
0334 {
0335 const struct pl08x_platform_data *pd = plchan->host->pd;
0336 int ret;
0337
0338 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
0339 ret = pd->get_xfer_signal(plchan->cd);
0340 if (ret < 0) {
0341 plchan->mux_use = 0;
0342 return ret;
0343 }
0344
0345 plchan->signal = ret;
0346 }
0347 return 0;
0348 }
0349
0350 static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
0351 {
0352 const struct pl08x_platform_data *pd = plchan->host->pd;
0353
0354 if (plchan->signal >= 0) {
0355 WARN_ON(plchan->mux_use == 0);
0356
0357 if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
0358 pd->put_xfer_signal(plchan->cd, plchan->signal);
0359 plchan->signal = -1;
0360 }
0361 }
0362 }
0363
0364
0365
0366
0367
0368
0369 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
0370 {
0371 unsigned int val;
0372
0373
0374 if (ch->reg_busy) {
0375 val = readl(ch->reg_busy);
0376 return !!(val & BIT(ch->id));
0377 }
0378 val = readl(ch->reg_config);
0379 return val & PL080_CONFIG_ACTIVE;
0380 }
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
0392 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
0393 {
0394 if (pl08x->vd->pl080s)
0395 dev_vdbg(&pl08x->adev->dev,
0396 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
0397 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
0398 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
0399 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
0400 lli[PL080S_LLI_CCTL2], ccfg);
0401 else
0402 dev_vdbg(&pl08x->adev->dev,
0403 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
0404 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
0405 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
0406 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
0407
0408 writel_relaxed(lli[PL080_LLI_SRC], phychan->reg_src);
0409 writel_relaxed(lli[PL080_LLI_DST], phychan->reg_dst);
0410 writel_relaxed(lli[PL080_LLI_LLI], phychan->reg_lli);
0411
0412
0413
0414
0415
0416
0417
0418 if (phychan->ftdmac020) {
0419 u32 llictl = lli[PL080_LLI_CCTL];
0420 u32 val = 0;
0421
0422
0423 writel_relaxed(llictl & FTDMAC020_LLI_TRANSFER_SIZE_MASK,
0424 phychan->base + FTDMAC020_CH_SIZE);
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 if (llictl & FTDMAC020_LLI_TC_MSK)
0438 val |= FTDMAC020_CH_CSR_TC_MSK;
0439 val |= ((llictl & FTDMAC020_LLI_SRC_WIDTH_MSK) >>
0440 (FTDMAC020_LLI_SRC_WIDTH_SHIFT -
0441 FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT));
0442 val |= ((llictl & FTDMAC020_LLI_DST_WIDTH_MSK) >>
0443 (FTDMAC020_LLI_DST_WIDTH_SHIFT -
0444 FTDMAC020_CH_CSR_DST_WIDTH_SHIFT));
0445 val |= ((llictl & FTDMAC020_LLI_SRCAD_CTL_MSK) >>
0446 (FTDMAC020_LLI_SRCAD_CTL_SHIFT -
0447 FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT));
0448 val |= ((llictl & FTDMAC020_LLI_DSTAD_CTL_MSK) >>
0449 (FTDMAC020_LLI_DSTAD_CTL_SHIFT -
0450 FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT));
0451 if (llictl & FTDMAC020_LLI_SRC_SEL)
0452 val |= FTDMAC020_CH_CSR_SRC_SEL;
0453 if (llictl & FTDMAC020_LLI_DST_SEL)
0454 val |= FTDMAC020_CH_CSR_DST_SEL;
0455
0456
0457
0458
0459
0460
0461
0462
0463 switch (pl08x->pd->memcpy_burst_size) {
0464 default:
0465 case PL08X_BURST_SZ_1:
0466 val |= PL080_BSIZE_1 <<
0467 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0468 break;
0469 case PL08X_BURST_SZ_4:
0470 val |= PL080_BSIZE_4 <<
0471 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0472 break;
0473 case PL08X_BURST_SZ_8:
0474 val |= PL080_BSIZE_8 <<
0475 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0476 break;
0477 case PL08X_BURST_SZ_16:
0478 val |= PL080_BSIZE_16 <<
0479 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0480 break;
0481 case PL08X_BURST_SZ_32:
0482 val |= PL080_BSIZE_32 <<
0483 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0484 break;
0485 case PL08X_BURST_SZ_64:
0486 val |= PL080_BSIZE_64 <<
0487 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0488 break;
0489 case PL08X_BURST_SZ_128:
0490 val |= PL080_BSIZE_128 <<
0491 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0492 break;
0493 case PL08X_BURST_SZ_256:
0494 val |= PL080_BSIZE_256 <<
0495 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
0496 break;
0497 }
0498
0499
0500 if (pl08x->pd->memcpy_prot_buff)
0501 val |= FTDMAC020_CH_CSR_PROT2;
0502 if (pl08x->pd->memcpy_prot_cache)
0503 val |= FTDMAC020_CH_CSR_PROT3;
0504
0505 val |= FTDMAC020_CH_CSR_PROT1;
0506
0507 writel_relaxed(val, phychan->reg_control);
0508 } else {
0509
0510 writel_relaxed(lli[PL080_LLI_CCTL], phychan->reg_control);
0511 }
0512
0513
0514 if (pl08x->vd->pl080s)
0515 writel_relaxed(lli[PL080S_LLI_CCTL2],
0516 phychan->base + PL080S_CH_CONTROL2);
0517
0518 writel(ccfg, phychan->reg_config);
0519 }
0520
0521
0522
0523
0524
0525
0526
0527 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
0528 {
0529 struct pl08x_driver_data *pl08x = plchan->host;
0530 struct pl08x_phy_chan *phychan = plchan->phychan;
0531 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
0532 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
0533 u32 val;
0534
0535 list_del(&txd->vd.node);
0536
0537 plchan->at = txd;
0538
0539
0540 while (pl08x_phy_channel_busy(phychan))
0541 cpu_relax();
0542
0543 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
0544
0545
0546
0547 while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id))
0548 cpu_relax();
0549
0550
0551 if (phychan->ftdmac020) {
0552 val = readl(phychan->reg_config);
0553 while (val & FTDMAC020_CH_CFG_BUSY)
0554 val = readl(phychan->reg_config);
0555
0556 val = readl(phychan->reg_control);
0557 while (val & FTDMAC020_CH_CSR_EN)
0558 val = readl(phychan->reg_control);
0559
0560 writel(val | FTDMAC020_CH_CSR_EN,
0561 phychan->reg_control);
0562 } else {
0563 val = readl(phychan->reg_config);
0564 while ((val & PL080_CONFIG_ACTIVE) ||
0565 (val & PL080_CONFIG_ENABLE))
0566 val = readl(phychan->reg_config);
0567
0568 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
0569 }
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
0583 {
0584 u32 val;
0585 int timeout;
0586
0587 if (ch->ftdmac020) {
0588
0589 val = readl(ch->reg_control);
0590 val &= ~FTDMAC020_CH_CSR_EN;
0591 writel(val, ch->reg_control);
0592 return;
0593 }
0594
0595
0596 val = readl(ch->reg_config);
0597 val |= PL080_CONFIG_HALT;
0598 writel(val, ch->reg_config);
0599
0600
0601 for (timeout = 1000; timeout; timeout--) {
0602 if (!pl08x_phy_channel_busy(ch))
0603 break;
0604 udelay(1);
0605 }
0606 if (pl08x_phy_channel_busy(ch))
0607 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
0608 }
0609
0610 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
0611 {
0612 u32 val;
0613
0614
0615 if (ch->ftdmac020) {
0616 val = readl(ch->reg_control);
0617 val |= FTDMAC020_CH_CSR_EN;
0618 writel(val, ch->reg_control);
0619 return;
0620 }
0621
0622
0623 val = readl(ch->reg_config);
0624 val &= ~PL080_CONFIG_HALT;
0625 writel(val, ch->reg_config);
0626 }
0627
0628
0629
0630
0631
0632
0633
0634 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
0635 struct pl08x_phy_chan *ch)
0636 {
0637 u32 val;
0638
0639
0640 if (ch->ftdmac020) {
0641
0642 val = readl(ch->reg_config);
0643 val |= (FTDMAC020_CH_CFG_INT_ABT_MASK |
0644 FTDMAC020_CH_CFG_INT_ERR_MASK |
0645 FTDMAC020_CH_CFG_INT_TC_MASK);
0646 writel(val, ch->reg_config);
0647
0648
0649 val = readl(ch->reg_control);
0650 val &= ~FTDMAC020_CH_CSR_EN;
0651 val |= FTDMAC020_CH_CSR_ABT;
0652 writel(val, ch->reg_control);
0653
0654
0655 writel(BIT(ch->id) | BIT(ch->id + 16),
0656 pl08x->base + PL080_ERR_CLEAR);
0657 writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR);
0658
0659 return;
0660 }
0661
0662 val = readl(ch->reg_config);
0663 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
0664 PL080_CONFIG_TC_IRQ_MASK);
0665 writel(val, ch->reg_config);
0666
0667 writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR);
0668 writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR);
0669 }
0670
0671 static u32 get_bytes_in_phy_channel(struct pl08x_phy_chan *ch)
0672 {
0673 u32 val;
0674 u32 bytes;
0675
0676 if (ch->ftdmac020) {
0677 bytes = readl(ch->base + FTDMAC020_CH_SIZE);
0678
0679 val = readl(ch->reg_control);
0680 val &= FTDMAC020_CH_CSR_SRC_WIDTH_MSK;
0681 val >>= FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT;
0682 } else if (ch->pl080s) {
0683 val = readl(ch->base + PL080S_CH_CONTROL2);
0684 bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK;
0685
0686 val = readl(ch->reg_control);
0687 val &= PL080_CONTROL_SWIDTH_MASK;
0688 val >>= PL080_CONTROL_SWIDTH_SHIFT;
0689 } else {
0690
0691 val = readl(ch->reg_control);
0692 bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK;
0693
0694 val &= PL080_CONTROL_SWIDTH_MASK;
0695 val >>= PL080_CONTROL_SWIDTH_SHIFT;
0696 }
0697
0698 switch (val) {
0699 case PL080_WIDTH_8BIT:
0700 break;
0701 case PL080_WIDTH_16BIT:
0702 bytes *= 2;
0703 break;
0704 case PL080_WIDTH_32BIT:
0705 bytes *= 4;
0706 break;
0707 }
0708 return bytes;
0709 }
0710
0711 static u32 get_bytes_in_lli(struct pl08x_phy_chan *ch, const u32 *llis_va)
0712 {
0713 u32 val;
0714 u32 bytes;
0715
0716 if (ch->ftdmac020) {
0717 val = llis_va[PL080_LLI_CCTL];
0718 bytes = val & FTDMAC020_LLI_TRANSFER_SIZE_MASK;
0719
0720 val = llis_va[PL080_LLI_CCTL];
0721 val &= FTDMAC020_LLI_SRC_WIDTH_MSK;
0722 val >>= FTDMAC020_LLI_SRC_WIDTH_SHIFT;
0723 } else if (ch->pl080s) {
0724 val = llis_va[PL080S_LLI_CCTL2];
0725 bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK;
0726
0727 val = llis_va[PL080_LLI_CCTL];
0728 val &= PL080_CONTROL_SWIDTH_MASK;
0729 val >>= PL080_CONTROL_SWIDTH_SHIFT;
0730 } else {
0731
0732 val = llis_va[PL080_LLI_CCTL];
0733 bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK;
0734
0735 val &= PL080_CONTROL_SWIDTH_MASK;
0736 val >>= PL080_CONTROL_SWIDTH_SHIFT;
0737 }
0738
0739 switch (val) {
0740 case PL080_WIDTH_8BIT:
0741 break;
0742 case PL080_WIDTH_16BIT:
0743 bytes *= 2;
0744 break;
0745 case PL080_WIDTH_32BIT:
0746 bytes *= 4;
0747 break;
0748 }
0749 return bytes;
0750 }
0751
0752
0753 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
0754 {
0755 struct pl08x_driver_data *pl08x = plchan->host;
0756 const u32 *llis_va, *llis_va_limit;
0757 struct pl08x_phy_chan *ch;
0758 dma_addr_t llis_bus;
0759 struct pl08x_txd *txd;
0760 u32 llis_max_words;
0761 size_t bytes;
0762 u32 clli;
0763
0764 ch = plchan->phychan;
0765 txd = plchan->at;
0766
0767 if (!ch || !txd)
0768 return 0;
0769
0770
0771
0772
0773
0774 clli = readl(ch->reg_lli) & ~PL080_LLI_LM_AHB2;
0775
0776
0777 bytes = get_bytes_in_phy_channel(ch);
0778
0779 if (!clli)
0780 return bytes;
0781
0782 llis_va = txd->llis_va;
0783 llis_bus = txd->llis_bus;
0784
0785 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
0786 BUG_ON(clli < llis_bus || clli >= llis_bus +
0787 sizeof(u32) * llis_max_words);
0788
0789
0790
0791
0792
0793 llis_va += (clli - llis_bus) / sizeof(u32);
0794
0795 llis_va_limit = llis_va + llis_max_words;
0796
0797 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
0798 bytes += get_bytes_in_lli(ch, llis_va);
0799
0800
0801
0802
0803 if (llis_va[PL080_LLI_LLI] <= clli)
0804 break;
0805 }
0806
0807 return bytes;
0808 }
0809
0810
0811
0812
0813
0814
0815
0816
0817 static struct pl08x_phy_chan *
0818 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
0819 struct pl08x_dma_chan *virt_chan)
0820 {
0821 struct pl08x_phy_chan *ch = NULL;
0822 unsigned long flags;
0823 int i;
0824
0825 for (i = 0; i < pl08x->vd->channels; i++) {
0826 ch = &pl08x->phy_chans[i];
0827
0828 spin_lock_irqsave(&ch->lock, flags);
0829
0830 if (!ch->locked && !ch->serving) {
0831 ch->serving = virt_chan;
0832 spin_unlock_irqrestore(&ch->lock, flags);
0833 break;
0834 }
0835
0836 spin_unlock_irqrestore(&ch->lock, flags);
0837 }
0838
0839 if (i == pl08x->vd->channels) {
0840
0841 return NULL;
0842 }
0843
0844 return ch;
0845 }
0846
0847
0848 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
0849 struct pl08x_phy_chan *ch)
0850 {
0851 ch->serving = NULL;
0852 }
0853
0854
0855
0856
0857
0858
0859 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
0860 {
0861 struct pl08x_driver_data *pl08x = plchan->host;
0862 struct pl08x_phy_chan *ch;
0863
0864 ch = pl08x_get_phy_channel(pl08x, plchan);
0865 if (!ch) {
0866 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
0867 plchan->state = PL08X_CHAN_WAITING;
0868 plchan->waiting_at = jiffies;
0869 return;
0870 }
0871
0872 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
0873 ch->id, plchan->name);
0874
0875 plchan->phychan = ch;
0876 plchan->state = PL08X_CHAN_RUNNING;
0877 pl08x_start_next_txd(plchan);
0878 }
0879
0880 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
0881 struct pl08x_dma_chan *plchan)
0882 {
0883 struct pl08x_driver_data *pl08x = plchan->host;
0884
0885 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
0886 ch->id, plchan->name);
0887
0888
0889
0890
0891
0892
0893 ch->serving = plchan;
0894 plchan->phychan = ch;
0895 plchan->state = PL08X_CHAN_RUNNING;
0896 pl08x_start_next_txd(plchan);
0897 }
0898
0899
0900
0901
0902
0903 static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
0904 {
0905 struct pl08x_driver_data *pl08x = plchan->host;
0906 struct pl08x_dma_chan *p, *next;
0907 unsigned long waiting_at;
0908 retry:
0909 next = NULL;
0910 waiting_at = jiffies;
0911
0912
0913
0914
0915
0916
0917 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
0918 if (p->state == PL08X_CHAN_WAITING &&
0919 p->waiting_at <= waiting_at) {
0920 next = p;
0921 waiting_at = p->waiting_at;
0922 }
0923
0924 if (!next && pl08x->has_slave) {
0925 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
0926 if (p->state == PL08X_CHAN_WAITING &&
0927 p->waiting_at <= waiting_at) {
0928 next = p;
0929 waiting_at = p->waiting_at;
0930 }
0931 }
0932
0933
0934 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
0935
0936 if (next) {
0937 bool success;
0938
0939
0940
0941
0942
0943 spin_lock(&next->vc.lock);
0944
0945 success = next->state == PL08X_CHAN_WAITING;
0946 if (success)
0947 pl08x_phy_reassign_start(plchan->phychan, next);
0948 spin_unlock(&next->vc.lock);
0949
0950
0951 if (!success)
0952 goto retry;
0953 } else {
0954
0955 pl08x_put_phy_channel(pl08x, plchan->phychan);
0956 }
0957
0958 plchan->phychan = NULL;
0959 plchan->state = PL08X_CHAN_IDLE;
0960 }
0961
0962
0963
0964
0965
0966 static inline unsigned int
0967 pl08x_get_bytes_for_lli(struct pl08x_driver_data *pl08x,
0968 u32 cctl,
0969 bool source)
0970 {
0971 u32 val;
0972
0973 if (pl08x->vd->ftdmac020) {
0974 if (source)
0975 val = (cctl & FTDMAC020_LLI_SRC_WIDTH_MSK) >>
0976 FTDMAC020_LLI_SRC_WIDTH_SHIFT;
0977 else
0978 val = (cctl & FTDMAC020_LLI_DST_WIDTH_MSK) >>
0979 FTDMAC020_LLI_DST_WIDTH_SHIFT;
0980 } else {
0981 if (source)
0982 val = (cctl & PL080_CONTROL_SWIDTH_MASK) >>
0983 PL080_CONTROL_SWIDTH_SHIFT;
0984 else
0985 val = (cctl & PL080_CONTROL_DWIDTH_MASK) >>
0986 PL080_CONTROL_DWIDTH_SHIFT;
0987 }
0988
0989 switch (val) {
0990 case PL080_WIDTH_8BIT:
0991 return 1;
0992 case PL080_WIDTH_16BIT:
0993 return 2;
0994 case PL080_WIDTH_32BIT:
0995 return 4;
0996 default:
0997 break;
0998 }
0999 BUG();
1000 return 0;
1001 }
1002
1003 static inline u32 pl08x_lli_control_bits(struct pl08x_driver_data *pl08x,
1004 u32 cctl,
1005 u8 srcwidth, u8 dstwidth,
1006 size_t tsize)
1007 {
1008 u32 retbits = cctl;
1009
1010
1011
1012
1013
1014
1015 if (pl08x->vd->ftdmac020) {
1016 retbits &= ~FTDMAC020_LLI_DST_WIDTH_MSK;
1017 retbits &= ~FTDMAC020_LLI_SRC_WIDTH_MSK;
1018 retbits &= ~FTDMAC020_LLI_TRANSFER_SIZE_MASK;
1019
1020 switch (srcwidth) {
1021 case 1:
1022 retbits |= PL080_WIDTH_8BIT <<
1023 FTDMAC020_LLI_SRC_WIDTH_SHIFT;
1024 break;
1025 case 2:
1026 retbits |= PL080_WIDTH_16BIT <<
1027 FTDMAC020_LLI_SRC_WIDTH_SHIFT;
1028 break;
1029 case 4:
1030 retbits |= PL080_WIDTH_32BIT <<
1031 FTDMAC020_LLI_SRC_WIDTH_SHIFT;
1032 break;
1033 default:
1034 BUG();
1035 break;
1036 }
1037
1038 switch (dstwidth) {
1039 case 1:
1040 retbits |= PL080_WIDTH_8BIT <<
1041 FTDMAC020_LLI_DST_WIDTH_SHIFT;
1042 break;
1043 case 2:
1044 retbits |= PL080_WIDTH_16BIT <<
1045 FTDMAC020_LLI_DST_WIDTH_SHIFT;
1046 break;
1047 case 4:
1048 retbits |= PL080_WIDTH_32BIT <<
1049 FTDMAC020_LLI_DST_WIDTH_SHIFT;
1050 break;
1051 default:
1052 BUG();
1053 break;
1054 }
1055
1056 tsize &= FTDMAC020_LLI_TRANSFER_SIZE_MASK;
1057 retbits |= tsize << FTDMAC020_LLI_TRANSFER_SIZE_SHIFT;
1058 } else {
1059 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
1060 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
1061 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
1062
1063 switch (srcwidth) {
1064 case 1:
1065 retbits |= PL080_WIDTH_8BIT <<
1066 PL080_CONTROL_SWIDTH_SHIFT;
1067 break;
1068 case 2:
1069 retbits |= PL080_WIDTH_16BIT <<
1070 PL080_CONTROL_SWIDTH_SHIFT;
1071 break;
1072 case 4:
1073 retbits |= PL080_WIDTH_32BIT <<
1074 PL080_CONTROL_SWIDTH_SHIFT;
1075 break;
1076 default:
1077 BUG();
1078 break;
1079 }
1080
1081 switch (dstwidth) {
1082 case 1:
1083 retbits |= PL080_WIDTH_8BIT <<
1084 PL080_CONTROL_DWIDTH_SHIFT;
1085 break;
1086 case 2:
1087 retbits |= PL080_WIDTH_16BIT <<
1088 PL080_CONTROL_DWIDTH_SHIFT;
1089 break;
1090 case 4:
1091 retbits |= PL080_WIDTH_32BIT <<
1092 PL080_CONTROL_DWIDTH_SHIFT;
1093 break;
1094 default:
1095 BUG();
1096 break;
1097 }
1098
1099 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
1100 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
1101 }
1102
1103 return retbits;
1104 }
1105
1106 struct pl08x_lli_build_data {
1107 struct pl08x_txd *txd;
1108 struct pl08x_bus_data srcbus;
1109 struct pl08x_bus_data dstbus;
1110 size_t remainder;
1111 u32 lli_bus;
1112 };
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 static void pl08x_choose_master_bus(struct pl08x_driver_data *pl08x,
1124 struct pl08x_lli_build_data *bd,
1125 struct pl08x_bus_data **mbus,
1126 struct pl08x_bus_data **sbus,
1127 u32 cctl)
1128 {
1129 bool dst_incr;
1130 bool src_incr;
1131
1132
1133
1134
1135
1136 if (pl08x->vd->ftdmac020) {
1137 dst_incr = true;
1138 src_incr = true;
1139 } else {
1140 dst_incr = !!(cctl & PL080_CONTROL_DST_INCR);
1141 src_incr = !!(cctl & PL080_CONTROL_SRC_INCR);
1142 }
1143
1144
1145
1146
1147
1148 if (!dst_incr) {
1149 *mbus = &bd->dstbus;
1150 *sbus = &bd->srcbus;
1151 } else if (!src_incr) {
1152 *mbus = &bd->srcbus;
1153 *sbus = &bd->dstbus;
1154 } else {
1155 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
1156 *mbus = &bd->dstbus;
1157 *sbus = &bd->srcbus;
1158 } else {
1159 *mbus = &bd->srcbus;
1160 *sbus = &bd->dstbus;
1161 }
1162 }
1163 }
1164
1165
1166
1167
1168 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
1169 struct pl08x_lli_build_data *bd,
1170 int num_llis, int len, u32 cctl, u32 cctl2)
1171 {
1172 u32 offset = num_llis * pl08x->lli_words;
1173 u32 *llis_va = bd->txd->llis_va + offset;
1174 dma_addr_t llis_bus = bd->txd->llis_bus;
1175
1176 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
1177
1178
1179 offset += pl08x->lli_words;
1180
1181 llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
1182 llis_va[PL080_LLI_DST] = bd->dstbus.addr;
1183 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
1184 llis_va[PL080_LLI_LLI] |= bd->lli_bus;
1185 llis_va[PL080_LLI_CCTL] = cctl;
1186 if (pl08x->vd->pl080s)
1187 llis_va[PL080S_LLI_CCTL2] = cctl2;
1188
1189 if (pl08x->vd->ftdmac020) {
1190
1191 bd->srcbus.addr += len;
1192 bd->dstbus.addr += len;
1193 } else {
1194 if (cctl & PL080_CONTROL_SRC_INCR)
1195 bd->srcbus.addr += len;
1196 if (cctl & PL080_CONTROL_DST_INCR)
1197 bd->dstbus.addr += len;
1198 }
1199
1200 BUG_ON(bd->remainder < len);
1201
1202 bd->remainder -= len;
1203 }
1204
1205 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
1206 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
1207 int num_llis, size_t *total_bytes)
1208 {
1209 *cctl = pl08x_lli_control_bits(pl08x, *cctl, 1, 1, len);
1210 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
1211 (*total_bytes) += len;
1212 }
1213
1214 #if 1
1215 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
1216 const u32 *llis_va, int num_llis)
1217 {
1218 int i;
1219
1220 if (pl08x->vd->pl080s) {
1221 dev_vdbg(&pl08x->adev->dev,
1222 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
1223 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
1224 for (i = 0; i < num_llis; i++) {
1225 dev_vdbg(&pl08x->adev->dev,
1226 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1227 i, llis_va, llis_va[PL080_LLI_SRC],
1228 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
1229 llis_va[PL080_LLI_CCTL],
1230 llis_va[PL080S_LLI_CCTL2]);
1231 llis_va += pl08x->lli_words;
1232 }
1233 } else {
1234 dev_vdbg(&pl08x->adev->dev,
1235 "%-3s %-9s %-10s %-10s %-10s %s\n",
1236 "lli", "", "csrc", "cdst", "clli", "cctl");
1237 for (i = 0; i < num_llis; i++) {
1238 dev_vdbg(&pl08x->adev->dev,
1239 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1240 i, llis_va, llis_va[PL080_LLI_SRC],
1241 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
1242 llis_va[PL080_LLI_CCTL]);
1243 llis_va += pl08x->lli_words;
1244 }
1245 }
1246 }
1247 #else
1248 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
1249 const u32 *llis_va, int num_llis) {}
1250 #endif
1251
1252
1253
1254
1255
1256
1257 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
1258 struct pl08x_txd *txd)
1259 {
1260 struct pl08x_bus_data *mbus, *sbus;
1261 struct pl08x_lli_build_data bd;
1262 int num_llis = 0;
1263 u32 cctl, early_bytes = 0;
1264 size_t max_bytes_per_lli, total_bytes;
1265 u32 *llis_va, *last_lli;
1266 struct pl08x_sg *dsg;
1267
1268 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
1269 if (!txd->llis_va) {
1270 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
1271 return 0;
1272 }
1273
1274 bd.txd = txd;
1275 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
1276 cctl = txd->cctl;
1277
1278
1279 bd.srcbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, true);
1280
1281
1282 bd.dstbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, false);
1283
1284 list_for_each_entry(dsg, &txd->dsg_list, node) {
1285 total_bytes = 0;
1286 cctl = txd->cctl;
1287
1288 bd.srcbus.addr = dsg->src_addr;
1289 bd.dstbus.addr = dsg->dst_addr;
1290 bd.remainder = dsg->len;
1291 bd.srcbus.buswidth = bd.srcbus.maxwidth;
1292 bd.dstbus.buswidth = bd.dstbus.maxwidth;
1293
1294 pl08x_choose_master_bus(pl08x, &bd, &mbus, &sbus, cctl);
1295
1296 dev_vdbg(&pl08x->adev->dev,
1297 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
1298 (u64)bd.srcbus.addr,
1299 cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
1300 bd.srcbus.buswidth,
1301 (u64)bd.dstbus.addr,
1302 cctl & PL080_CONTROL_DST_INCR ? "+" : "",
1303 bd.dstbus.buswidth,
1304 bd.remainder);
1305 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
1306 mbus == &bd.srcbus ? "src" : "dst",
1307 sbus == &bd.srcbus ? "src" : "dst");
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 if (!bd.remainder) {
1331 u32 fc;
1332
1333
1334 if (pl08x->vd->ftdmac020)
1335 fc = PL080_FLOW_MEM2MEM;
1336 else
1337 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
1338 PL080_CONFIG_FLOW_CONTROL_SHIFT;
1339 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
1340 (fc <= PL080_FLOW_SRC2DST_SRC))) {
1341 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
1342 __func__);
1343 return 0;
1344 }
1345
1346 if (!IS_BUS_ALIGNED(&bd.srcbus) ||
1347 !IS_BUS_ALIGNED(&bd.dstbus)) {
1348 dev_err(&pl08x->adev->dev,
1349 "%s src & dst address must be aligned to src"
1350 " & dst width if peripheral is flow controller",
1351 __func__);
1352 return 0;
1353 }
1354
1355 cctl = pl08x_lli_control_bits(pl08x, cctl,
1356 bd.srcbus.buswidth, bd.dstbus.buswidth,
1357 0);
1358 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
1359 0, cctl, 0);
1360 break;
1361 }
1362
1363
1364
1365
1366
1367
1368 if (bd.remainder < mbus->buswidth)
1369 early_bytes = bd.remainder;
1370 else if (!IS_BUS_ALIGNED(mbus)) {
1371 early_bytes = mbus->buswidth -
1372 (mbus->addr & (mbus->buswidth - 1));
1373 if ((bd.remainder - early_bytes) < mbus->buswidth)
1374 early_bytes = bd.remainder;
1375 }
1376
1377 if (early_bytes) {
1378 dev_vdbg(&pl08x->adev->dev,
1379 "%s byte width LLIs (remain 0x%08zx)\n",
1380 __func__, bd.remainder);
1381 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
1382 num_llis++, &total_bytes);
1383 }
1384
1385 if (bd.remainder) {
1386
1387
1388
1389
1390 if (!IS_BUS_ALIGNED(sbus)) {
1391 dev_dbg(&pl08x->adev->dev,
1392 "%s set down bus width to one byte\n",
1393 __func__);
1394
1395 sbus->buswidth = 1;
1396 }
1397
1398
1399
1400
1401
1402 max_bytes_per_lli = bd.srcbus.buswidth *
1403 pl08x->vd->max_transfer_size;
1404 dev_vdbg(&pl08x->adev->dev,
1405 "%s max bytes per lli = %zu\n",
1406 __func__, max_bytes_per_lli);
1407
1408
1409
1410
1411
1412 while (bd.remainder > (mbus->buswidth - 1)) {
1413 size_t lli_len, tsize, width;
1414
1415
1416
1417
1418
1419 lli_len = min(bd.remainder, max_bytes_per_lli);
1420
1421
1422
1423
1424
1425
1426
1427 width = max(mbus->buswidth, sbus->buswidth);
1428 lli_len = (lli_len / width) * width;
1429 tsize = lli_len / bd.srcbus.buswidth;
1430
1431 dev_vdbg(&pl08x->adev->dev,
1432 "%s fill lli with single lli chunk of "
1433 "size 0x%08zx (remainder 0x%08zx)\n",
1434 __func__, lli_len, bd.remainder);
1435
1436 cctl = pl08x_lli_control_bits(pl08x, cctl,
1437 bd.srcbus.buswidth, bd.dstbus.buswidth,
1438 tsize);
1439 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
1440 lli_len, cctl, tsize);
1441 total_bytes += lli_len;
1442 }
1443
1444
1445
1446
1447 if (bd.remainder) {
1448 dev_vdbg(&pl08x->adev->dev,
1449 "%s align with boundary, send odd bytes (remain %zu)\n",
1450 __func__, bd.remainder);
1451 prep_byte_width_lli(pl08x, &bd, &cctl,
1452 bd.remainder, num_llis++, &total_bytes);
1453 }
1454 }
1455
1456 if (total_bytes != dsg->len) {
1457 dev_err(&pl08x->adev->dev,
1458 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1459 __func__, total_bytes, dsg->len);
1460 return 0;
1461 }
1462
1463 if (num_llis >= MAX_NUM_TSFR_LLIS) {
1464 dev_err(&pl08x->adev->dev,
1465 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
1466 __func__, MAX_NUM_TSFR_LLIS);
1467 return 0;
1468 }
1469 }
1470
1471 llis_va = txd->llis_va;
1472 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
1473
1474 if (txd->cyclic) {
1475
1476 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
1477 } else {
1478
1479 last_lli[PL080_LLI_LLI] = 0;
1480
1481 if (pl08x->vd->ftdmac020)
1482 last_lli[PL080_LLI_CCTL] &= ~FTDMAC020_LLI_TC_MSK;
1483 else
1484 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
1485 }
1486
1487 pl08x_dump_lli(pl08x, llis_va, num_llis);
1488
1489 return num_llis;
1490 }
1491
1492 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
1493 struct pl08x_txd *txd)
1494 {
1495 struct pl08x_sg *dsg, *_dsg;
1496
1497 if (txd->llis_va)
1498 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
1499
1500 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
1501 list_del(&dsg->node);
1502 kfree(dsg);
1503 }
1504
1505 kfree(txd);
1506 }
1507
1508 static void pl08x_desc_free(struct virt_dma_desc *vd)
1509 {
1510 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1511 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
1512
1513 dma_descriptor_unmap(&vd->tx);
1514 if (!txd->done)
1515 pl08x_release_mux(plchan);
1516
1517 pl08x_free_txd(plchan->host, txd);
1518 }
1519
1520 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1521 struct pl08x_dma_chan *plchan)
1522 {
1523 LIST_HEAD(head);
1524
1525 vchan_get_all_descriptors(&plchan->vc, &head);
1526 vchan_dma_desc_free_list(&plchan->vc, &head);
1527 }
1528
1529
1530
1531
1532 static void pl08x_free_chan_resources(struct dma_chan *chan)
1533 {
1534
1535 vchan_free_chan_resources(to_virt_chan(chan));
1536 }
1537
1538
1539
1540
1541
1542
1543 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1544 dma_cookie_t cookie, struct dma_tx_state *txstate)
1545 {
1546 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1547 struct virt_dma_desc *vd;
1548 unsigned long flags;
1549 enum dma_status ret;
1550 size_t bytes = 0;
1551
1552 ret = dma_cookie_status(chan, cookie, txstate);
1553 if (ret == DMA_COMPLETE)
1554 return ret;
1555
1556
1557
1558
1559
1560 if (!txstate) {
1561 if (plchan->state == PL08X_CHAN_PAUSED)
1562 ret = DMA_PAUSED;
1563 return ret;
1564 }
1565
1566 spin_lock_irqsave(&plchan->vc.lock, flags);
1567 ret = dma_cookie_status(chan, cookie, txstate);
1568 if (ret != DMA_COMPLETE) {
1569 vd = vchan_find_desc(&plchan->vc, cookie);
1570 if (vd) {
1571
1572 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1573 struct pl08x_sg *dsg;
1574
1575 list_for_each_entry(dsg, &txd->dsg_list, node)
1576 bytes += dsg->len;
1577 } else {
1578 bytes = pl08x_getbytes_chan(plchan);
1579 }
1580 }
1581 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1582
1583
1584
1585
1586
1587 dma_set_residue(txstate, bytes);
1588
1589 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
1590 ret = DMA_PAUSED;
1591
1592
1593 return ret;
1594 }
1595
1596
1597 struct burst_table {
1598 u32 burstwords;
1599 u32 reg;
1600 };
1601
1602 static const struct burst_table burst_sizes[] = {
1603 {
1604 .burstwords = 256,
1605 .reg = PL080_BSIZE_256,
1606 },
1607 {
1608 .burstwords = 128,
1609 .reg = PL080_BSIZE_128,
1610 },
1611 {
1612 .burstwords = 64,
1613 .reg = PL080_BSIZE_64,
1614 },
1615 {
1616 .burstwords = 32,
1617 .reg = PL080_BSIZE_32,
1618 },
1619 {
1620 .burstwords = 16,
1621 .reg = PL080_BSIZE_16,
1622 },
1623 {
1624 .burstwords = 8,
1625 .reg = PL080_BSIZE_8,
1626 },
1627 {
1628 .burstwords = 4,
1629 .reg = PL080_BSIZE_4,
1630 },
1631 {
1632 .burstwords = 0,
1633 .reg = PL080_BSIZE_1,
1634 },
1635 };
1636
1637
1638
1639
1640
1641
1642 static u32 pl08x_select_bus(bool ftdmac020, u8 src, u8 dst)
1643 {
1644 u32 cctl = 0;
1645 u32 dst_ahb2;
1646 u32 src_ahb2;
1647
1648
1649 if (ftdmac020) {
1650 dst_ahb2 = FTDMAC020_LLI_DST_SEL;
1651 src_ahb2 = FTDMAC020_LLI_SRC_SEL;
1652 } else {
1653 dst_ahb2 = PL080_CONTROL_DST_AHB2;
1654 src_ahb2 = PL080_CONTROL_SRC_AHB2;
1655 }
1656
1657 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1658 cctl |= dst_ahb2;
1659 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1660 cctl |= src_ahb2;
1661
1662 return cctl;
1663 }
1664
1665 static u32 pl08x_cctl(u32 cctl)
1666 {
1667 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1668 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1669 PL080_CONTROL_PROT_MASK);
1670
1671
1672 return cctl | PL080_CONTROL_PROT_SYS;
1673 }
1674
1675 static u32 pl08x_width(enum dma_slave_buswidth width)
1676 {
1677 switch (width) {
1678 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1679 return PL080_WIDTH_8BIT;
1680 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1681 return PL080_WIDTH_16BIT;
1682 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1683 return PL080_WIDTH_32BIT;
1684 default:
1685 return ~0;
1686 }
1687 }
1688
1689 static u32 pl08x_burst(u32 maxburst)
1690 {
1691 int i;
1692
1693 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1694 if (burst_sizes[i].burstwords <= maxburst)
1695 break;
1696
1697 return burst_sizes[i].reg;
1698 }
1699
1700 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1701 enum dma_slave_buswidth addr_width, u32 maxburst)
1702 {
1703 u32 width, burst, cctl = 0;
1704
1705 width = pl08x_width(addr_width);
1706 if (width == ~0)
1707 return ~0;
1708
1709 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1710 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1711
1712
1713
1714
1715
1716
1717 if (plchan->cd->single)
1718 maxburst = 1;
1719
1720 burst = pl08x_burst(maxburst);
1721 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1722 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1723
1724 return pl08x_cctl(cctl);
1725 }
1726
1727
1728
1729
1730
1731 static void pl08x_issue_pending(struct dma_chan *chan)
1732 {
1733 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1734 unsigned long flags;
1735
1736 spin_lock_irqsave(&plchan->vc.lock, flags);
1737 if (vchan_issue_pending(&plchan->vc)) {
1738 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
1739 pl08x_phy_alloc_and_start(plchan);
1740 }
1741 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1742 }
1743
1744 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
1745 {
1746 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1747
1748 if (txd)
1749 INIT_LIST_HEAD(&txd->dsg_list);
1750 return txd;
1751 }
1752
1753 static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x)
1754 {
1755 u32 cctl = 0;
1756
1757
1758 switch (pl08x->pd->memcpy_burst_size) {
1759 default:
1760 dev_err(&pl08x->adev->dev,
1761 "illegal burst size for memcpy, set to 1\n");
1762 fallthrough;
1763 case PL08X_BURST_SZ_1:
1764 cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
1765 PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
1766 break;
1767 case PL08X_BURST_SZ_4:
1768 cctl |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
1769 PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
1770 break;
1771 case PL08X_BURST_SZ_8:
1772 cctl |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
1773 PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
1774 break;
1775 case PL08X_BURST_SZ_16:
1776 cctl |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
1777 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
1778 break;
1779 case PL08X_BURST_SZ_32:
1780 cctl |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
1781 PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
1782 break;
1783 case PL08X_BURST_SZ_64:
1784 cctl |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
1785 PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
1786 break;
1787 case PL08X_BURST_SZ_128:
1788 cctl |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
1789 PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
1790 break;
1791 case PL08X_BURST_SZ_256:
1792 cctl |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
1793 PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
1794 break;
1795 }
1796
1797 switch (pl08x->pd->memcpy_bus_width) {
1798 default:
1799 dev_err(&pl08x->adev->dev,
1800 "illegal bus width for memcpy, set to 8 bits\n");
1801 fallthrough;
1802 case PL08X_BUS_WIDTH_8_BITS:
1803 cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
1804 PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
1805 break;
1806 case PL08X_BUS_WIDTH_16_BITS:
1807 cctl |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
1808 PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
1809 break;
1810 case PL08X_BUS_WIDTH_32_BITS:
1811 cctl |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
1812 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
1813 break;
1814 }
1815
1816
1817 if (pl08x->pd->memcpy_prot_buff)
1818 cctl |= PL080_CONTROL_PROT_BUFF;
1819 if (pl08x->pd->memcpy_prot_cache)
1820 cctl |= PL080_CONTROL_PROT_CACHE;
1821
1822
1823 cctl |= PL080_CONTROL_PROT_SYS;
1824
1825
1826 cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1827
1828 if (pl08x->vd->dualmaster)
1829 cctl |= pl08x_select_bus(false,
1830 pl08x->mem_buses,
1831 pl08x->mem_buses);
1832
1833 return cctl;
1834 }
1835
1836 static u32 pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data *pl08x)
1837 {
1838 u32 cctl = 0;
1839
1840
1841 switch (pl08x->pd->memcpy_bus_width) {
1842 default:
1843 dev_err(&pl08x->adev->dev,
1844 "illegal bus width for memcpy, set to 8 bits\n");
1845 fallthrough;
1846 case PL08X_BUS_WIDTH_8_BITS:
1847 cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
1848 PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
1849 break;
1850 case PL08X_BUS_WIDTH_16_BITS:
1851 cctl |= PL080_WIDTH_16BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
1852 PL080_WIDTH_16BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
1853 break;
1854 case PL08X_BUS_WIDTH_32_BITS:
1855 cctl |= PL080_WIDTH_32BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
1856 PL080_WIDTH_32BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
1857 break;
1858 }
1859
1860
1861
1862
1863
1864 cctl |= FTDMAC020_LLI_TC_MSK;
1865
1866
1867
1868
1869
1870 if (pl08x->vd->dualmaster)
1871 cctl |= pl08x_select_bus(true,
1872 pl08x->mem_buses,
1873 pl08x->mem_buses);
1874
1875 return cctl;
1876 }
1877
1878
1879
1880
1881 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1882 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1883 size_t len, unsigned long flags)
1884 {
1885 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1886 struct pl08x_driver_data *pl08x = plchan->host;
1887 struct pl08x_txd *txd;
1888 struct pl08x_sg *dsg;
1889 int ret;
1890
1891 txd = pl08x_get_txd(plchan);
1892 if (!txd) {
1893 dev_err(&pl08x->adev->dev,
1894 "%s no memory for descriptor\n", __func__);
1895 return NULL;
1896 }
1897
1898 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1899 if (!dsg) {
1900 pl08x_free_txd(pl08x, txd);
1901 return NULL;
1902 }
1903 list_add_tail(&dsg->node, &txd->dsg_list);
1904
1905 dsg->src_addr = src;
1906 dsg->dst_addr = dest;
1907 dsg->len = len;
1908 if (pl08x->vd->ftdmac020) {
1909
1910 txd->ccfg = 0;
1911 txd->cctl = pl08x_ftdmac020_memcpy_cctl(pl08x);
1912 } else {
1913 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1914 PL080_CONFIG_TC_IRQ_MASK |
1915 PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1916 txd->cctl = pl08x_memcpy_cctl(pl08x);
1917 }
1918
1919 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1920 if (!ret) {
1921 pl08x_free_txd(pl08x, txd);
1922 return NULL;
1923 }
1924
1925 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1926 }
1927
1928 static struct pl08x_txd *pl08x_init_txd(
1929 struct dma_chan *chan,
1930 enum dma_transfer_direction direction,
1931 dma_addr_t *slave_addr)
1932 {
1933 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1934 struct pl08x_driver_data *pl08x = plchan->host;
1935 struct pl08x_txd *txd;
1936 enum dma_slave_buswidth addr_width;
1937 int ret, tmp;
1938 u8 src_buses, dst_buses;
1939 u32 maxburst, cctl;
1940
1941 txd = pl08x_get_txd(plchan);
1942 if (!txd) {
1943 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1944 return NULL;
1945 }
1946
1947
1948
1949
1950
1951
1952 if (direction == DMA_MEM_TO_DEV) {
1953 cctl = PL080_CONTROL_SRC_INCR;
1954 *slave_addr = plchan->cfg.dst_addr;
1955 addr_width = plchan->cfg.dst_addr_width;
1956 maxburst = plchan->cfg.dst_maxburst;
1957 src_buses = pl08x->mem_buses;
1958 dst_buses = plchan->cd->periph_buses;
1959 } else if (direction == DMA_DEV_TO_MEM) {
1960 cctl = PL080_CONTROL_DST_INCR;
1961 *slave_addr = plchan->cfg.src_addr;
1962 addr_width = plchan->cfg.src_addr_width;
1963 maxburst = plchan->cfg.src_maxburst;
1964 src_buses = plchan->cd->periph_buses;
1965 dst_buses = pl08x->mem_buses;
1966 } else {
1967 pl08x_free_txd(pl08x, txd);
1968 dev_err(&pl08x->adev->dev,
1969 "%s direction unsupported\n", __func__);
1970 return NULL;
1971 }
1972
1973 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
1974 if (cctl == ~0) {
1975 pl08x_free_txd(pl08x, txd);
1976 dev_err(&pl08x->adev->dev,
1977 "DMA slave configuration botched?\n");
1978 return NULL;
1979 }
1980
1981 txd->cctl = cctl | pl08x_select_bus(false, src_buses, dst_buses);
1982
1983 if (plchan->cfg.device_fc)
1984 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1985 PL080_FLOW_PER2MEM_PER;
1986 else
1987 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
1988 PL080_FLOW_PER2MEM;
1989
1990 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1991 PL080_CONFIG_TC_IRQ_MASK |
1992 tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1993
1994 ret = pl08x_request_mux(plchan);
1995 if (ret < 0) {
1996 pl08x_free_txd(pl08x, txd);
1997 dev_dbg(&pl08x->adev->dev,
1998 "unable to mux for transfer on %s due to platform restrictions\n",
1999 plchan->name);
2000 return NULL;
2001 }
2002
2003 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
2004 plchan->signal, plchan->name);
2005
2006
2007 if (direction == DMA_MEM_TO_DEV)
2008 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
2009 else
2010 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
2011
2012 return txd;
2013 }
2014
2015 static int pl08x_tx_add_sg(struct pl08x_txd *txd,
2016 enum dma_transfer_direction direction,
2017 dma_addr_t slave_addr,
2018 dma_addr_t buf_addr,
2019 unsigned int len)
2020 {
2021 struct pl08x_sg *dsg;
2022
2023 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
2024 if (!dsg)
2025 return -ENOMEM;
2026
2027 list_add_tail(&dsg->node, &txd->dsg_list);
2028
2029 dsg->len = len;
2030 if (direction == DMA_MEM_TO_DEV) {
2031 dsg->src_addr = buf_addr;
2032 dsg->dst_addr = slave_addr;
2033 } else {
2034 dsg->src_addr = slave_addr;
2035 dsg->dst_addr = buf_addr;
2036 }
2037
2038 return 0;
2039 }
2040
2041 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
2042 struct dma_chan *chan, struct scatterlist *sgl,
2043 unsigned int sg_len, enum dma_transfer_direction direction,
2044 unsigned long flags, void *context)
2045 {
2046 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2047 struct pl08x_driver_data *pl08x = plchan->host;
2048 struct pl08x_txd *txd;
2049 struct scatterlist *sg;
2050 int ret, tmp;
2051 dma_addr_t slave_addr;
2052
2053 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
2054 __func__, sg_dma_len(sgl), plchan->name);
2055
2056 txd = pl08x_init_txd(chan, direction, &slave_addr);
2057 if (!txd)
2058 return NULL;
2059
2060 for_each_sg(sgl, sg, sg_len, tmp) {
2061 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
2062 sg_dma_address(sg),
2063 sg_dma_len(sg));
2064 if (ret) {
2065 pl08x_release_mux(plchan);
2066 pl08x_free_txd(pl08x, txd);
2067 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
2068 __func__);
2069 return NULL;
2070 }
2071 }
2072
2073 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
2074 if (!ret) {
2075 pl08x_release_mux(plchan);
2076 pl08x_free_txd(pl08x, txd);
2077 return NULL;
2078 }
2079
2080 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
2081 }
2082
2083 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
2084 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2085 size_t period_len, enum dma_transfer_direction direction,
2086 unsigned long flags)
2087 {
2088 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2089 struct pl08x_driver_data *pl08x = plchan->host;
2090 struct pl08x_txd *txd;
2091 int ret, tmp;
2092 dma_addr_t slave_addr;
2093
2094 dev_dbg(&pl08x->adev->dev,
2095 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
2096 __func__, period_len, buf_len,
2097 direction == DMA_MEM_TO_DEV ? "to" : "from",
2098 plchan->name);
2099
2100 txd = pl08x_init_txd(chan, direction, &slave_addr);
2101 if (!txd)
2102 return NULL;
2103
2104 txd->cyclic = true;
2105 txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
2106 for (tmp = 0; tmp < buf_len; tmp += period_len) {
2107 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
2108 buf_addr + tmp, period_len);
2109 if (ret) {
2110 pl08x_release_mux(plchan);
2111 pl08x_free_txd(pl08x, txd);
2112 return NULL;
2113 }
2114 }
2115
2116 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
2117 if (!ret) {
2118 pl08x_release_mux(plchan);
2119 pl08x_free_txd(pl08x, txd);
2120 return NULL;
2121 }
2122
2123 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
2124 }
2125
2126 static int pl08x_config(struct dma_chan *chan,
2127 struct dma_slave_config *config)
2128 {
2129 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2130 struct pl08x_driver_data *pl08x = plchan->host;
2131
2132 if (!plchan->slave)
2133 return -EINVAL;
2134
2135
2136 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
2137 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
2138 return -EINVAL;
2139
2140 if (config->device_fc && pl08x->vd->pl080s) {
2141 dev_err(&pl08x->adev->dev,
2142 "%s: PL080S does not support peripheral flow control\n",
2143 __func__);
2144 return -EINVAL;
2145 }
2146
2147 plchan->cfg = *config;
2148
2149 return 0;
2150 }
2151
2152 static int pl08x_terminate_all(struct dma_chan *chan)
2153 {
2154 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2155 struct pl08x_driver_data *pl08x = plchan->host;
2156 unsigned long flags;
2157
2158 spin_lock_irqsave(&plchan->vc.lock, flags);
2159 if (!plchan->phychan && !plchan->at) {
2160 spin_unlock_irqrestore(&plchan->vc.lock, flags);
2161 return 0;
2162 }
2163
2164 plchan->state = PL08X_CHAN_IDLE;
2165
2166 if (plchan->phychan) {
2167
2168
2169
2170
2171 pl08x_phy_free(plchan);
2172 }
2173
2174 if (plchan->at) {
2175 vchan_terminate_vdesc(&plchan->at->vd);
2176 plchan->at = NULL;
2177 }
2178
2179 pl08x_free_txd_list(pl08x, plchan);
2180
2181 spin_unlock_irqrestore(&plchan->vc.lock, flags);
2182
2183 return 0;
2184 }
2185
2186 static void pl08x_synchronize(struct dma_chan *chan)
2187 {
2188 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2189
2190 vchan_synchronize(&plchan->vc);
2191 }
2192
2193 static int pl08x_pause(struct dma_chan *chan)
2194 {
2195 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2196 unsigned long flags;
2197
2198
2199
2200
2201
2202 spin_lock_irqsave(&plchan->vc.lock, flags);
2203 if (!plchan->phychan && !plchan->at) {
2204 spin_unlock_irqrestore(&plchan->vc.lock, flags);
2205 return 0;
2206 }
2207
2208 pl08x_pause_phy_chan(plchan->phychan);
2209 plchan->state = PL08X_CHAN_PAUSED;
2210
2211 spin_unlock_irqrestore(&plchan->vc.lock, flags);
2212
2213 return 0;
2214 }
2215
2216 static int pl08x_resume(struct dma_chan *chan)
2217 {
2218 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2219 unsigned long flags;
2220
2221
2222
2223
2224
2225 spin_lock_irqsave(&plchan->vc.lock, flags);
2226 if (!plchan->phychan && !plchan->at) {
2227 spin_unlock_irqrestore(&plchan->vc.lock, flags);
2228 return 0;
2229 }
2230
2231 pl08x_resume_phy_chan(plchan->phychan);
2232 plchan->state = PL08X_CHAN_RUNNING;
2233
2234 spin_unlock_irqrestore(&plchan->vc.lock, flags);
2235
2236 return 0;
2237 }
2238
2239 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
2240 {
2241 struct pl08x_dma_chan *plchan;
2242 char *name = chan_id;
2243
2244
2245 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
2246 return false;
2247
2248 plchan = to_pl08x_chan(chan);
2249
2250
2251 if (!strcmp(plchan->name, name))
2252 return true;
2253
2254 return false;
2255 }
2256 EXPORT_SYMBOL_GPL(pl08x_filter_id);
2257
2258 static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id)
2259 {
2260 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
2261
2262 return plchan->cd == chan_id;
2263 }
2264
2265
2266
2267
2268
2269
2270
2271 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
2272 {
2273
2274 if (pl08x->vd->nomadik)
2275 return;
2276
2277 if (pl08x->vd->ftdmac020) {
2278 writel(PL080_CONFIG_ENABLE, pl08x->base + FTDMAC020_CSR);
2279 return;
2280 }
2281 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
2282 }
2283
2284 static irqreturn_t pl08x_irq(int irq, void *dev)
2285 {
2286 struct pl08x_driver_data *pl08x = dev;
2287 u32 mask = 0, err, tc, i;
2288
2289
2290 err = readl(pl08x->base + PL080_ERR_STATUS);
2291 if (err) {
2292 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
2293 __func__, err);
2294 writel(err, pl08x->base + PL080_ERR_CLEAR);
2295 }
2296 tc = readl(pl08x->base + PL080_TC_STATUS);
2297 if (tc)
2298 writel(tc, pl08x->base + PL080_TC_CLEAR);
2299
2300 if (!err && !tc)
2301 return IRQ_NONE;
2302
2303 for (i = 0; i < pl08x->vd->channels; i++) {
2304 if ((BIT(i) & err) || (BIT(i) & tc)) {
2305
2306 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
2307 struct pl08x_dma_chan *plchan = phychan->serving;
2308 struct pl08x_txd *tx;
2309
2310 if (!plchan) {
2311 dev_err(&pl08x->adev->dev,
2312 "%s Error TC interrupt on unused channel: 0x%08x\n",
2313 __func__, i);
2314 continue;
2315 }
2316
2317 spin_lock(&plchan->vc.lock);
2318 tx = plchan->at;
2319 if (tx && tx->cyclic) {
2320 vchan_cyclic_callback(&tx->vd);
2321 } else if (tx) {
2322 plchan->at = NULL;
2323
2324
2325
2326
2327 pl08x_release_mux(plchan);
2328 tx->done = true;
2329 vchan_cookie_complete(&tx->vd);
2330
2331
2332
2333
2334
2335 if (vchan_next_desc(&plchan->vc))
2336 pl08x_start_next_txd(plchan);
2337 else
2338 pl08x_phy_free(plchan);
2339 }
2340 spin_unlock(&plchan->vc.lock);
2341
2342 mask |= BIT(i);
2343 }
2344 }
2345
2346 return mask ? IRQ_HANDLED : IRQ_NONE;
2347 }
2348
2349 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
2350 {
2351 chan->slave = true;
2352 chan->name = chan->cd->bus_id;
2353 chan->cfg.src_addr = chan->cd->addr;
2354 chan->cfg.dst_addr = chan->cd->addr;
2355 }
2356
2357
2358
2359
2360
2361 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
2362 struct dma_device *dmadev, unsigned int channels, bool slave)
2363 {
2364 struct pl08x_dma_chan *chan;
2365 int i;
2366
2367 INIT_LIST_HEAD(&dmadev->channels);
2368
2369
2370
2371
2372
2373
2374 for (i = 0; i < channels; i++) {
2375 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2376 if (!chan)
2377 return -ENOMEM;
2378
2379 chan->host = pl08x;
2380 chan->state = PL08X_CHAN_IDLE;
2381 chan->signal = -1;
2382
2383 if (slave) {
2384 chan->cd = &pl08x->pd->slave_channels[i];
2385
2386
2387
2388
2389
2390 chan->signal = i;
2391 pl08x_dma_slave_init(chan);
2392 } else {
2393 chan->cd = kzalloc(sizeof(*chan->cd), GFP_KERNEL);
2394 if (!chan->cd) {
2395 kfree(chan);
2396 return -ENOMEM;
2397 }
2398 chan->cd->bus_id = "memcpy";
2399 chan->cd->periph_buses = pl08x->pd->mem_buses;
2400 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
2401 if (!chan->name) {
2402 kfree(chan->cd);
2403 kfree(chan);
2404 return -ENOMEM;
2405 }
2406 }
2407 dev_dbg(&pl08x->adev->dev,
2408 "initialize virtual channel \"%s\"\n",
2409 chan->name);
2410
2411 chan->vc.desc_free = pl08x_desc_free;
2412 vchan_init(&chan->vc, dmadev);
2413 }
2414 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
2415 i, slave ? "slave" : "memcpy");
2416 return i;
2417 }
2418
2419 static void pl08x_free_virtual_channels(struct dma_device *dmadev)
2420 {
2421 struct pl08x_dma_chan *chan = NULL;
2422 struct pl08x_dma_chan *next;
2423
2424 list_for_each_entry_safe(chan,
2425 next, &dmadev->channels, vc.chan.device_node) {
2426 list_del(&chan->vc.chan.device_node);
2427 kfree(chan);
2428 }
2429 }
2430
2431 #ifdef CONFIG_DEBUG_FS
2432 static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
2433 {
2434 switch (state) {
2435 case PL08X_CHAN_IDLE:
2436 return "idle";
2437 case PL08X_CHAN_RUNNING:
2438 return "running";
2439 case PL08X_CHAN_PAUSED:
2440 return "paused";
2441 case PL08X_CHAN_WAITING:
2442 return "waiting";
2443 default:
2444 break;
2445 }
2446 return "UNKNOWN STATE";
2447 }
2448
2449 static int pl08x_debugfs_show(struct seq_file *s, void *data)
2450 {
2451 struct pl08x_driver_data *pl08x = s->private;
2452 struct pl08x_dma_chan *chan;
2453 struct pl08x_phy_chan *ch;
2454 unsigned long flags;
2455 int i;
2456
2457 seq_printf(s, "PL08x physical channels:\n");
2458 seq_printf(s, "CHANNEL:\tUSER:\n");
2459 seq_printf(s, "--------\t-----\n");
2460 for (i = 0; i < pl08x->vd->channels; i++) {
2461 struct pl08x_dma_chan *virt_chan;
2462
2463 ch = &pl08x->phy_chans[i];
2464
2465 spin_lock_irqsave(&ch->lock, flags);
2466 virt_chan = ch->serving;
2467
2468 seq_printf(s, "%d\t\t%s%s\n",
2469 ch->id,
2470 virt_chan ? virt_chan->name : "(none)",
2471 ch->locked ? " LOCKED" : "");
2472
2473 spin_unlock_irqrestore(&ch->lock, flags);
2474 }
2475
2476 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
2477 seq_printf(s, "CHANNEL:\tSTATE:\n");
2478 seq_printf(s, "--------\t------\n");
2479 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
2480 seq_printf(s, "%s\t\t%s\n", chan->name,
2481 pl08x_state_str(chan->state));
2482 }
2483
2484 if (pl08x->has_slave) {
2485 seq_printf(s, "\nPL08x virtual slave channels:\n");
2486 seq_printf(s, "CHANNEL:\tSTATE:\n");
2487 seq_printf(s, "--------\t------\n");
2488 list_for_each_entry(chan, &pl08x->slave.channels,
2489 vc.chan.device_node) {
2490 seq_printf(s, "%s\t\t%s\n", chan->name,
2491 pl08x_state_str(chan->state));
2492 }
2493 }
2494
2495 return 0;
2496 }
2497
2498 DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs);
2499
2500 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2501 {
2502
2503 debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
2504 NULL, pl08x, &pl08x_debugfs_fops);
2505 }
2506
2507 #else
2508 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2509 {
2510 }
2511 #endif
2512
2513 #ifdef CONFIG_OF
2514 static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x,
2515 u32 id)
2516 {
2517 struct pl08x_dma_chan *chan;
2518
2519
2520 if (!pl08x->has_slave)
2521 return NULL;
2522
2523 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
2524 if (chan->signal == id)
2525 return &chan->vc.chan;
2526 }
2527
2528 return NULL;
2529 }
2530
2531 static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec,
2532 struct of_dma *ofdma)
2533 {
2534 struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
2535 struct dma_chan *dma_chan;
2536 struct pl08x_dma_chan *plchan;
2537
2538 if (!pl08x)
2539 return NULL;
2540
2541 if (dma_spec->args_count != 2) {
2542 dev_err(&pl08x->adev->dev,
2543 "DMA channel translation requires two cells\n");
2544 return NULL;
2545 }
2546
2547 dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
2548 if (!dma_chan) {
2549 dev_err(&pl08x->adev->dev,
2550 "DMA slave channel not found\n");
2551 return NULL;
2552 }
2553
2554 plchan = to_pl08x_chan(dma_chan);
2555 dev_dbg(&pl08x->adev->dev,
2556 "translated channel for signal %d\n",
2557 dma_spec->args[0]);
2558
2559
2560 plchan->cd->periph_buses = dma_spec->args[1];
2561 return dma_get_slave_channel(dma_chan);
2562 }
2563
2564 static int pl08x_of_probe(struct amba_device *adev,
2565 struct pl08x_driver_data *pl08x,
2566 struct device_node *np)
2567 {
2568 struct pl08x_platform_data *pd;
2569 struct pl08x_channel_data *chanp = NULL;
2570 u32 val;
2571 int ret;
2572 int i;
2573
2574 pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
2575 if (!pd)
2576 return -ENOMEM;
2577
2578
2579 if (of_property_read_bool(np, "lli-bus-interface-ahb1"))
2580 pd->lli_buses |= PL08X_AHB1;
2581 if (of_property_read_bool(np, "lli-bus-interface-ahb2"))
2582 pd->lli_buses |= PL08X_AHB2;
2583 if (!pd->lli_buses) {
2584 dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n");
2585 pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2;
2586 }
2587
2588
2589 if (of_property_read_bool(np, "mem-bus-interface-ahb1"))
2590 pd->mem_buses |= PL08X_AHB1;
2591 if (of_property_read_bool(np, "mem-bus-interface-ahb2"))
2592 pd->mem_buses |= PL08X_AHB2;
2593 if (!pd->mem_buses) {
2594 dev_info(&adev->dev, "no bus masters for memory stated, assume all\n");
2595 pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2;
2596 }
2597
2598
2599 ret = of_property_read_u32(np, "memcpy-burst-size", &val);
2600 if (ret) {
2601 dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n");
2602 val = 1;
2603 }
2604 switch (val) {
2605 default:
2606 dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
2607 fallthrough;
2608 case 1:
2609 pd->memcpy_burst_size = PL08X_BURST_SZ_1;
2610 break;
2611 case 4:
2612 pd->memcpy_burst_size = PL08X_BURST_SZ_4;
2613 break;
2614 case 8:
2615 pd->memcpy_burst_size = PL08X_BURST_SZ_8;
2616 break;
2617 case 16:
2618 pd->memcpy_burst_size = PL08X_BURST_SZ_16;
2619 break;
2620 case 32:
2621 pd->memcpy_burst_size = PL08X_BURST_SZ_32;
2622 break;
2623 case 64:
2624 pd->memcpy_burst_size = PL08X_BURST_SZ_64;
2625 break;
2626 case 128:
2627 pd->memcpy_burst_size = PL08X_BURST_SZ_128;
2628 break;
2629 case 256:
2630 pd->memcpy_burst_size = PL08X_BURST_SZ_256;
2631 break;
2632 }
2633
2634 ret = of_property_read_u32(np, "memcpy-bus-width", &val);
2635 if (ret) {
2636 dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n");
2637 val = 8;
2638 }
2639 switch (val) {
2640 default:
2641 dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
2642 fallthrough;
2643 case 8:
2644 pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS;
2645 break;
2646 case 16:
2647 pd->memcpy_bus_width = PL08X_BUS_WIDTH_16_BITS;
2648 break;
2649 case 32:
2650 pd->memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS;
2651 break;
2652 }
2653
2654
2655
2656
2657
2658
2659
2660 if (pl08x->vd->signals) {
2661 chanp = devm_kcalloc(&adev->dev,
2662 pl08x->vd->signals,
2663 sizeof(struct pl08x_channel_data),
2664 GFP_KERNEL);
2665 if (!chanp)
2666 return -ENOMEM;
2667
2668 pd->slave_channels = chanp;
2669 for (i = 0; i < pl08x->vd->signals; i++) {
2670
2671
2672
2673 chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i);
2674 chanp++;
2675 }
2676 pd->num_slave_channels = pl08x->vd->signals;
2677 }
2678
2679 pl08x->pd = pd;
2680
2681 return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
2682 pl08x);
2683 }
2684 #else
2685 static inline int pl08x_of_probe(struct amba_device *adev,
2686 struct pl08x_driver_data *pl08x,
2687 struct device_node *np)
2688 {
2689 return -EINVAL;
2690 }
2691 #endif
2692
2693 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2694 {
2695 struct pl08x_driver_data *pl08x;
2696 struct vendor_data *vd = id->data;
2697 struct device_node *np = adev->dev.of_node;
2698 u32 tsfr_size;
2699 int ret = 0;
2700 int i;
2701
2702 ret = amba_request_regions(adev, NULL);
2703 if (ret)
2704 return ret;
2705
2706
2707 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2708 if (ret)
2709 goto out_no_pl08x;
2710
2711
2712 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
2713 if (!pl08x) {
2714 ret = -ENOMEM;
2715 goto out_no_pl08x;
2716 }
2717
2718
2719 pl08x->adev = adev;
2720 pl08x->vd = vd;
2721
2722 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2723 if (!pl08x->base) {
2724 ret = -ENOMEM;
2725 goto out_no_ioremap;
2726 }
2727
2728 if (vd->ftdmac020) {
2729 u32 val;
2730
2731 val = readl(pl08x->base + FTDMAC020_REVISION);
2732 dev_info(&pl08x->adev->dev, "FTDMAC020 %d.%d rel %d\n",
2733 (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
2734 val = readl(pl08x->base + FTDMAC020_FEATURE);
2735 dev_info(&pl08x->adev->dev, "FTDMAC020 %d channels, "
2736 "%s built-in bridge, %s, %s linked lists\n",
2737 (val >> 12) & 0x0f,
2738 (val & BIT(10)) ? "no" : "has",
2739 (val & BIT(9)) ? "AHB0 and AHB1" : "AHB0",
2740 (val & BIT(8)) ? "supports" : "does not support");
2741
2742
2743 if (!(val & BIT(8)))
2744 dev_warn(&pl08x->adev->dev,
2745 "linked lists not supported, required\n");
2746 vd->channels = (val >> 12) & 0x0f;
2747 vd->dualmaster = !!(val & BIT(9));
2748 }
2749
2750
2751 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
2752 pl08x->memcpy.dev = &adev->dev;
2753 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
2754 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
2755 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2756 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
2757 pl08x->memcpy.device_config = pl08x_config;
2758 pl08x->memcpy.device_pause = pl08x_pause;
2759 pl08x->memcpy.device_resume = pl08x_resume;
2760 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2761 pl08x->memcpy.device_synchronize = pl08x_synchronize;
2762 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2763 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2764 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2765 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2766 if (vd->ftdmac020)
2767 pl08x->memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
2768
2769
2770
2771
2772
2773
2774 if (vd->signals) {
2775 pl08x->has_slave = true;
2776 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
2777 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
2778 pl08x->slave.dev = &adev->dev;
2779 pl08x->slave.device_free_chan_resources =
2780 pl08x_free_chan_resources;
2781 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
2782 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2783 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2784 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
2785 pl08x->slave.device_config = pl08x_config;
2786 pl08x->slave.device_pause = pl08x_pause;
2787 pl08x->slave.device_resume = pl08x_resume;
2788 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2789 pl08x->slave.device_synchronize = pl08x_synchronize;
2790 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2791 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2792 pl08x->slave.directions =
2793 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2794 pl08x->slave.residue_granularity =
2795 DMA_RESIDUE_GRANULARITY_SEGMENT;
2796 }
2797
2798
2799 pl08x->pd = dev_get_platdata(&adev->dev);
2800 if (!pl08x->pd) {
2801 if (np) {
2802 ret = pl08x_of_probe(adev, pl08x, np);
2803 if (ret)
2804 goto out_no_platdata;
2805 } else {
2806 dev_err(&adev->dev, "no platform data supplied\n");
2807 ret = -EINVAL;
2808 goto out_no_platdata;
2809 }
2810 } else {
2811 pl08x->slave.filter.map = pl08x->pd->slave_map;
2812 pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len;
2813 pl08x->slave.filter.fn = pl08x_filter_fn;
2814 }
2815
2816
2817 pl08x->lli_buses = PL08X_AHB1;
2818 pl08x->mem_buses = PL08X_AHB1;
2819 if (pl08x->vd->dualmaster) {
2820 pl08x->lli_buses = pl08x->pd->lli_buses;
2821 pl08x->mem_buses = pl08x->pd->mem_buses;
2822 }
2823
2824 if (vd->pl080s)
2825 pl08x->lli_words = PL080S_LLI_WORDS;
2826 else
2827 pl08x->lli_words = PL080_LLI_WORDS;
2828 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
2829
2830
2831 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
2832 tsfr_size, PL08X_ALIGN, 0);
2833 if (!pl08x->pool) {
2834 ret = -ENOMEM;
2835 goto out_no_lli_pool;
2836 }
2837
2838
2839 pl08x_ensure_on(pl08x);
2840
2841
2842 if (vd->ftdmac020)
2843
2844 writel(0x0000FFFF, pl08x->base + PL080_ERR_CLEAR);
2845 else
2846 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2847 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2848
2849
2850 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
2851 if (ret) {
2852 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2853 __func__, adev->irq[0]);
2854 goto out_no_irq;
2855 }
2856
2857
2858 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
2859 GFP_KERNEL);
2860 if (!pl08x->phy_chans) {
2861 ret = -ENOMEM;
2862 goto out_no_phychans;
2863 }
2864
2865 for (i = 0; i < vd->channels; i++) {
2866 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2867
2868 ch->id = i;
2869 ch->base = pl08x->base + PL080_Cx_BASE(i);
2870 if (vd->ftdmac020) {
2871
2872 ch->reg_busy = ch->base + FTDMAC020_CH_BUSY;
2873 ch->reg_config = ch->base + FTDMAC020_CH_CFG;
2874 ch->reg_control = ch->base + FTDMAC020_CH_CSR;
2875 ch->reg_src = ch->base + FTDMAC020_CH_SRC_ADDR;
2876 ch->reg_dst = ch->base + FTDMAC020_CH_DST_ADDR;
2877 ch->reg_lli = ch->base + FTDMAC020_CH_LLP;
2878 ch->ftdmac020 = true;
2879 } else {
2880 ch->reg_config = ch->base + vd->config_offset;
2881 ch->reg_control = ch->base + PL080_CH_CONTROL;
2882 ch->reg_src = ch->base + PL080_CH_SRC_ADDR;
2883 ch->reg_dst = ch->base + PL080_CH_DST_ADDR;
2884 ch->reg_lli = ch->base + PL080_CH_LLI;
2885 }
2886 if (vd->pl080s)
2887 ch->pl080s = true;
2888
2889 spin_lock_init(&ch->lock);
2890
2891
2892
2893
2894
2895
2896 if (vd->nomadik) {
2897 u32 val;
2898
2899 val = readl(ch->reg_config);
2900 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2901 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2902 ch->locked = true;
2903 }
2904 }
2905
2906 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2907 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
2908 }
2909
2910
2911 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2912 pl08x->vd->channels, false);
2913 if (ret <= 0) {
2914 dev_warn(&pl08x->adev->dev,
2915 "%s failed to enumerate memcpy channels - %d\n",
2916 __func__, ret);
2917 goto out_no_memcpy;
2918 }
2919
2920
2921 if (pl08x->has_slave) {
2922 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
2923 pl08x->pd->num_slave_channels, true);
2924 if (ret < 0) {
2925 dev_warn(&pl08x->adev->dev,
2926 "%s failed to enumerate slave channels - %d\n",
2927 __func__, ret);
2928 goto out_no_slave;
2929 }
2930 }
2931
2932 ret = dma_async_device_register(&pl08x->memcpy);
2933 if (ret) {
2934 dev_warn(&pl08x->adev->dev,
2935 "%s failed to register memcpy as an async device - %d\n",
2936 __func__, ret);
2937 goto out_no_memcpy_reg;
2938 }
2939
2940 if (pl08x->has_slave) {
2941 ret = dma_async_device_register(&pl08x->slave);
2942 if (ret) {
2943 dev_warn(&pl08x->adev->dev,
2944 "%s failed to register slave as an async device - %d\n",
2945 __func__, ret);
2946 goto out_no_slave_reg;
2947 }
2948 }
2949
2950 amba_set_drvdata(adev, pl08x);
2951 init_pl08x_debugfs(pl08x);
2952 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2953 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
2954 (unsigned long long)adev->res.start, adev->irq[0]);
2955
2956 return 0;
2957
2958 out_no_slave_reg:
2959 dma_async_device_unregister(&pl08x->memcpy);
2960 out_no_memcpy_reg:
2961 if (pl08x->has_slave)
2962 pl08x_free_virtual_channels(&pl08x->slave);
2963 out_no_slave:
2964 pl08x_free_virtual_channels(&pl08x->memcpy);
2965 out_no_memcpy:
2966 kfree(pl08x->phy_chans);
2967 out_no_phychans:
2968 free_irq(adev->irq[0], pl08x);
2969 out_no_irq:
2970 dma_pool_destroy(pl08x->pool);
2971 out_no_lli_pool:
2972 out_no_platdata:
2973 iounmap(pl08x->base);
2974 out_no_ioremap:
2975 kfree(pl08x);
2976 out_no_pl08x:
2977 amba_release_regions(adev);
2978 return ret;
2979 }
2980
2981
2982 static struct vendor_data vendor_pl080 = {
2983 .config_offset = PL080_CH_CONFIG,
2984 .channels = 8,
2985 .signals = 16,
2986 .dualmaster = true,
2987 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2988 };
2989
2990 static struct vendor_data vendor_nomadik = {
2991 .config_offset = PL080_CH_CONFIG,
2992 .channels = 8,
2993 .signals = 32,
2994 .dualmaster = true,
2995 .nomadik = true,
2996 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2997 };
2998
2999 static struct vendor_data vendor_pl080s = {
3000 .config_offset = PL080S_CH_CONFIG,
3001 .channels = 8,
3002 .signals = 32,
3003 .pl080s = true,
3004 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
3005 };
3006
3007 static struct vendor_data vendor_pl081 = {
3008 .config_offset = PL080_CH_CONFIG,
3009 .channels = 2,
3010 .signals = 16,
3011 .dualmaster = false,
3012 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
3013 };
3014
3015 static struct vendor_data vendor_ftdmac020 = {
3016 .config_offset = PL080_CH_CONFIG,
3017 .ftdmac020 = true,
3018 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
3019 };
3020
3021 static const struct amba_id pl08x_ids[] = {
3022
3023 {
3024 .id = 0x0a141080,
3025 .mask = 0xffffffff,
3026 .data = &vendor_pl080s,
3027 },
3028
3029 {
3030 .id = 0x00041080,
3031 .mask = 0x000fffff,
3032 .data = &vendor_pl080,
3033 },
3034
3035 {
3036 .id = 0x00041081,
3037 .mask = 0x000fffff,
3038 .data = &vendor_pl081,
3039 },
3040
3041 {
3042 .id = 0x00280080,
3043 .mask = 0x00ffffff,
3044 .data = &vendor_nomadik,
3045 },
3046
3047 {
3048 .id = 0x0003b080,
3049 .mask = 0x000fffff,
3050 .data = &vendor_ftdmac020,
3051 },
3052 { 0, 0 },
3053 };
3054
3055 MODULE_DEVICE_TABLE(amba, pl08x_ids);
3056
3057 static struct amba_driver pl08x_amba_driver = {
3058 .drv.name = DRIVER_NAME,
3059 .id_table = pl08x_ids,
3060 .probe = pl08x_probe,
3061 };
3062
3063 static int __init pl08x_init(void)
3064 {
3065 int retval;
3066 retval = amba_driver_register(&pl08x_amba_driver);
3067 if (retval)
3068 printk(KERN_WARNING DRIVER_NAME
3069 "failed to register as an AMBA device (%d)\n",
3070 retval);
3071 return retval;
3072 }
3073 subsys_initcall(pl08x_init);