0001
0002
0003
0004
0005
0006 #include <linux/err.h>
0007 #include <linux/module.h>
0008 #include <linux/init.h>
0009 #include <linux/types.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/slab.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/device.h>
0016 #include <linux/platform_data/mmp_dma.h>
0017 #include <linux/dmapool.h>
0018 #include <linux/of_device.h>
0019 #include <linux/of_dma.h>
0020 #include <linux/of.h>
0021 #include <linux/wait.h>
0022 #include <linux/dma/pxa-dma.h>
0023
0024 #include "dmaengine.h"
0025 #include "virt-dma.h"
0026
0027 #define DCSR(n) (0x0000 + ((n) << 2))
0028 #define DALGN(n) 0x00a0
0029 #define DINT 0x00f0
0030 #define DDADR(n) (0x0200 + ((n) << 4))
0031 #define DSADR(n) (0x0204 + ((n) << 4))
0032 #define DTADR(n) (0x0208 + ((n) << 4))
0033 #define DCMD(n) (0x020c + ((n) << 4))
0034
0035 #define PXA_DCSR_RUN BIT(31)
0036 #define PXA_DCSR_NODESC BIT(30)
0037 #define PXA_DCSR_STOPIRQEN BIT(29)
0038 #define PXA_DCSR_REQPEND BIT(8)
0039 #define PXA_DCSR_STOPSTATE BIT(3)
0040 #define PXA_DCSR_ENDINTR BIT(2)
0041 #define PXA_DCSR_STARTINTR BIT(1)
0042 #define PXA_DCSR_BUSERR BIT(0)
0043
0044 #define PXA_DCSR_EORIRQEN BIT(28)
0045 #define PXA_DCSR_EORJMPEN BIT(27)
0046 #define PXA_DCSR_EORSTOPEN BIT(26)
0047 #define PXA_DCSR_SETCMPST BIT(25)
0048 #define PXA_DCSR_CLRCMPST BIT(24)
0049 #define PXA_DCSR_CMPST BIT(10)
0050 #define PXA_DCSR_EORINTR BIT(9)
0051
0052 #define DRCMR_MAPVLD BIT(7)
0053 #define DRCMR_CHLNUM 0x1f
0054
0055 #define DDADR_DESCADDR 0xfffffff0
0056 #define DDADR_STOP BIT(0)
0057
0058 #define PXA_DCMD_INCSRCADDR BIT(31)
0059 #define PXA_DCMD_INCTRGADDR BIT(30)
0060 #define PXA_DCMD_FLOWSRC BIT(29)
0061 #define PXA_DCMD_FLOWTRG BIT(28)
0062 #define PXA_DCMD_STARTIRQEN BIT(22)
0063 #define PXA_DCMD_ENDIRQEN BIT(21)
0064 #define PXA_DCMD_ENDIAN BIT(18)
0065 #define PXA_DCMD_BURST8 (1 << 16)
0066 #define PXA_DCMD_BURST16 (2 << 16)
0067 #define PXA_DCMD_BURST32 (3 << 16)
0068 #define PXA_DCMD_WIDTH1 (1 << 14)
0069 #define PXA_DCMD_WIDTH2 (2 << 14)
0070 #define PXA_DCMD_WIDTH4 (3 << 14)
0071 #define PXA_DCMD_LENGTH 0x01fff
0072
0073 #define PDMA_ALIGNMENT 3
0074 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
0075
0076 struct pxad_desc_hw {
0077 u32 ddadr;
0078 u32 dsadr;
0079 u32 dtadr;
0080 u32 dcmd;
0081 } __aligned(16);
0082
0083 struct pxad_desc_sw {
0084 struct virt_dma_desc vd;
0085 int nb_desc;
0086 size_t len;
0087 dma_addr_t first;
0088
0089
0090 bool misaligned;
0091 bool cyclic;
0092 struct dma_pool *desc_pool;
0093
0094 struct pxad_desc_hw *hw_desc[];
0095 };
0096
0097 struct pxad_phy {
0098 int idx;
0099 void __iomem *base;
0100 struct pxad_chan *vchan;
0101 };
0102
0103 struct pxad_chan {
0104 struct virt_dma_chan vc;
0105 u32 drcmr;
0106 enum pxad_chan_prio prio;
0107
0108
0109
0110
0111
0112 bool misaligned;
0113 struct dma_slave_config cfg;
0114
0115
0116 struct pxad_phy *phy;
0117 struct dma_pool *desc_pool;
0118 dma_cookie_t bus_error;
0119
0120 wait_queue_head_t wq_state;
0121 };
0122
0123 struct pxad_device {
0124 struct dma_device slave;
0125 int nr_chans;
0126 int nr_requestors;
0127 void __iomem *base;
0128 struct pxad_phy *phys;
0129 spinlock_t phy_lock;
0130 #ifdef CONFIG_DEBUG_FS
0131 struct dentry *dbgfs_root;
0132 struct dentry **dbgfs_chan;
0133 #endif
0134 };
0135
0136 #define tx_to_pxad_desc(tx) \
0137 container_of(tx, struct pxad_desc_sw, async_tx)
0138 #define to_pxad_chan(dchan) \
0139 container_of(dchan, struct pxad_chan, vc.chan)
0140 #define to_pxad_dev(dmadev) \
0141 container_of(dmadev, struct pxad_device, slave)
0142 #define to_pxad_sw_desc(_vd) \
0143 container_of((_vd), struct pxad_desc_sw, vd)
0144
0145 #define _phy_readl_relaxed(phy, _reg) \
0146 readl_relaxed((phy)->base + _reg((phy)->idx))
0147 #define phy_readl_relaxed(phy, _reg) \
0148 ({ \
0149 u32 _v; \
0150 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
0151 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
0152 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
0153 _v); \
0154 _v; \
0155 })
0156 #define phy_writel(phy, val, _reg) \
0157 do { \
0158 writel((val), (phy)->base + _reg((phy)->idx)); \
0159 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
0160 "%s(): writel(0x%08x, %s)\n", \
0161 __func__, (u32)(val), #_reg); \
0162 } while (0)
0163 #define phy_writel_relaxed(phy, val, _reg) \
0164 do { \
0165 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
0166 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
0167 "%s(): writel_relaxed(0x%08x, %s)\n", \
0168 __func__, (u32)(val), #_reg); \
0169 } while (0)
0170
0171 static unsigned int pxad_drcmr(unsigned int line)
0172 {
0173 if (line < 64)
0174 return 0x100 + line * 4;
0175 return 0x1000 + line * 4;
0176 }
0177
0178 static bool pxad_filter_fn(struct dma_chan *chan, void *param);
0179
0180
0181
0182
0183 #ifdef CONFIG_DEBUG_FS
0184 #include <linux/debugfs.h>
0185 #include <linux/uaccess.h>
0186 #include <linux/seq_file.h>
0187
0188 static int requester_chan_show(struct seq_file *s, void *p)
0189 {
0190 struct pxad_phy *phy = s->private;
0191 int i;
0192 u32 drcmr;
0193
0194 seq_printf(s, "DMA channel %d requester :\n", phy->idx);
0195 for (i = 0; i < 70; i++) {
0196 drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
0197 if ((drcmr & DRCMR_CHLNUM) == phy->idx)
0198 seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
0199 !!(drcmr & DRCMR_MAPVLD));
0200 }
0201 return 0;
0202 }
0203
0204 static inline int dbg_burst_from_dcmd(u32 dcmd)
0205 {
0206 int burst = (dcmd >> 16) & 0x3;
0207
0208 return burst ? 4 << burst : 0;
0209 }
0210
0211 static int is_phys_valid(unsigned long addr)
0212 {
0213 return pfn_valid(__phys_to_pfn(addr));
0214 }
0215
0216 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
0217 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
0218
0219 static int descriptors_show(struct seq_file *s, void *p)
0220 {
0221 struct pxad_phy *phy = s->private;
0222 int i, max_show = 20, burst, width;
0223 u32 dcmd;
0224 unsigned long phys_desc, ddadr;
0225 struct pxad_desc_hw *desc;
0226
0227 phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
0228
0229 seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
0230 seq_printf(s, "[%03d] First descriptor unknown\n", 0);
0231 for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
0232 desc = phys_to_virt(phys_desc);
0233 dcmd = desc->dcmd;
0234 burst = dbg_burst_from_dcmd(dcmd);
0235 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
0236
0237 seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
0238 i, phys_desc, desc);
0239 seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
0240 seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
0241 seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
0242 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
0243 dcmd,
0244 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
0245 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
0246 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
0247 PXA_DCMD_STR(ENDIAN), burst, width,
0248 dcmd & PXA_DCMD_LENGTH);
0249 phys_desc = desc->ddadr;
0250 }
0251 if (i == max_show)
0252 seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
0253 i, phys_desc);
0254 else
0255 seq_printf(s, "[%03d] Desc at %08lx is %s\n",
0256 i, phys_desc, phys_desc == DDADR_STOP ?
0257 "DDADR_STOP" : "invalid");
0258
0259 return 0;
0260 }
0261
0262 static int chan_state_show(struct seq_file *s, void *p)
0263 {
0264 struct pxad_phy *phy = s->private;
0265 u32 dcsr, dcmd;
0266 int burst, width;
0267 static const char * const str_prio[] = {
0268 "high", "normal", "low", "invalid"
0269 };
0270
0271 dcsr = _phy_readl_relaxed(phy, DCSR);
0272 dcmd = _phy_readl_relaxed(phy, DCMD);
0273 burst = dbg_burst_from_dcmd(dcmd);
0274 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
0275
0276 seq_printf(s, "DMA channel %d\n", phy->idx);
0277 seq_printf(s, "\tPriority : %s\n",
0278 str_prio[(phy->idx & 0xf) / 4]);
0279 seq_printf(s, "\tUnaligned transfer bit: %s\n",
0280 _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
0281 "yes" : "no");
0282 seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
0283 dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
0284 PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
0285 PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
0286 PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
0287 PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
0288 PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
0289 PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
0290 PXA_DCSR_STR(BUSERR));
0291
0292 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
0293 dcmd,
0294 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
0295 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
0296 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
0297 PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
0298 seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
0299 seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
0300 seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
0301
0302 return 0;
0303 }
0304
0305 static int state_show(struct seq_file *s, void *p)
0306 {
0307 struct pxad_device *pdev = s->private;
0308
0309
0310 seq_puts(s, "DMA engine status\n");
0311 seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
0312
0313 return 0;
0314 }
0315
0316 DEFINE_SHOW_ATTRIBUTE(state);
0317 DEFINE_SHOW_ATTRIBUTE(chan_state);
0318 DEFINE_SHOW_ATTRIBUTE(descriptors);
0319 DEFINE_SHOW_ATTRIBUTE(requester_chan);
0320
0321 static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
0322 int ch, struct dentry *chandir)
0323 {
0324 char chan_name[11];
0325 struct dentry *chan;
0326 void *dt;
0327
0328 scnprintf(chan_name, sizeof(chan_name), "%d", ch);
0329 chan = debugfs_create_dir(chan_name, chandir);
0330 dt = (void *)&pdev->phys[ch];
0331
0332 debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
0333 debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
0334 debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
0335
0336 return chan;
0337 }
0338
0339 static void pxad_init_debugfs(struct pxad_device *pdev)
0340 {
0341 int i;
0342 struct dentry *chandir;
0343
0344 pdev->dbgfs_chan =
0345 kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
0346 GFP_KERNEL);
0347 if (!pdev->dbgfs_chan)
0348 return;
0349
0350 pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
0351
0352 debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
0353
0354 chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
0355
0356 for (i = 0; i < pdev->nr_chans; i++)
0357 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
0358 }
0359
0360 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
0361 {
0362 debugfs_remove_recursive(pdev->dbgfs_root);
0363 }
0364 #else
0365 static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
0366 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
0367 #endif
0368
0369 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
0370 {
0371 int prio, i;
0372 struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
0373 struct pxad_phy *phy, *found = NULL;
0374 unsigned long flags;
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384 spin_lock_irqsave(&pdev->phy_lock, flags);
0385 for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
0386 for (i = 0; i < pdev->nr_chans; i++) {
0387 if (prio != (i & 0xf) >> 2)
0388 continue;
0389 phy = &pdev->phys[i];
0390 if (!phy->vchan) {
0391 phy->vchan = pchan;
0392 found = phy;
0393 goto out_unlock;
0394 }
0395 }
0396 }
0397
0398 out_unlock:
0399 spin_unlock_irqrestore(&pdev->phy_lock, flags);
0400 dev_dbg(&pchan->vc.chan.dev->device,
0401 "%s(): phy=%p(%d)\n", __func__, found,
0402 found ? found->idx : -1);
0403
0404 return found;
0405 }
0406
0407 static void pxad_free_phy(struct pxad_chan *chan)
0408 {
0409 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
0410 unsigned long flags;
0411 u32 reg;
0412
0413 dev_dbg(&chan->vc.chan.dev->device,
0414 "%s(): freeing\n", __func__);
0415 if (!chan->phy)
0416 return;
0417
0418
0419 if (chan->drcmr <= pdev->nr_requestors) {
0420 reg = pxad_drcmr(chan->drcmr);
0421 writel_relaxed(0, chan->phy->base + reg);
0422 }
0423
0424 spin_lock_irqsave(&pdev->phy_lock, flags);
0425 chan->phy->vchan = NULL;
0426 chan->phy = NULL;
0427 spin_unlock_irqrestore(&pdev->phy_lock, flags);
0428 }
0429
0430 static bool is_chan_running(struct pxad_chan *chan)
0431 {
0432 u32 dcsr;
0433 struct pxad_phy *phy = chan->phy;
0434
0435 if (!phy)
0436 return false;
0437 dcsr = phy_readl_relaxed(phy, DCSR);
0438 return dcsr & PXA_DCSR_RUN;
0439 }
0440
0441 static bool is_running_chan_misaligned(struct pxad_chan *chan)
0442 {
0443 u32 dalgn;
0444
0445 BUG_ON(!chan->phy);
0446 dalgn = phy_readl_relaxed(chan->phy, DALGN);
0447 return dalgn & (BIT(chan->phy->idx));
0448 }
0449
0450 static void phy_enable(struct pxad_phy *phy, bool misaligned)
0451 {
0452 struct pxad_device *pdev;
0453 u32 reg, dalgn;
0454
0455 if (!phy->vchan)
0456 return;
0457
0458 dev_dbg(&phy->vchan->vc.chan.dev->device,
0459 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
0460 phy, phy->idx, misaligned);
0461
0462 pdev = to_pxad_dev(phy->vchan->vc.chan.device);
0463 if (phy->vchan->drcmr <= pdev->nr_requestors) {
0464 reg = pxad_drcmr(phy->vchan->drcmr);
0465 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
0466 }
0467
0468 dalgn = phy_readl_relaxed(phy, DALGN);
0469 if (misaligned)
0470 dalgn |= BIT(phy->idx);
0471 else
0472 dalgn &= ~BIT(phy->idx);
0473 phy_writel_relaxed(phy, dalgn, DALGN);
0474
0475 phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
0476 PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
0477 }
0478
0479 static void phy_disable(struct pxad_phy *phy)
0480 {
0481 u32 dcsr;
0482
0483 if (!phy)
0484 return;
0485
0486 dcsr = phy_readl_relaxed(phy, DCSR);
0487 dev_dbg(&phy->vchan->vc.chan.dev->device,
0488 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
0489 phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
0490 }
0491
0492 static void pxad_launch_chan(struct pxad_chan *chan,
0493 struct pxad_desc_sw *desc)
0494 {
0495 dev_dbg(&chan->vc.chan.dev->device,
0496 "%s(): desc=%p\n", __func__, desc);
0497 if (!chan->phy) {
0498 chan->phy = lookup_phy(chan);
0499 if (!chan->phy) {
0500 dev_dbg(&chan->vc.chan.dev->device,
0501 "%s(): no free dma channel\n", __func__);
0502 return;
0503 }
0504 }
0505 chan->bus_error = 0;
0506
0507
0508
0509
0510
0511 phy_writel(chan->phy, desc->first, DDADR);
0512 phy_enable(chan->phy, chan->misaligned);
0513 wake_up(&chan->wq_state);
0514 }
0515
0516 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
0517 unsigned long flags)
0518 {
0519 struct pxad_desc_hw *updater =
0520 sw_desc->hw_desc[sw_desc->nb_desc - 1];
0521 dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
0522
0523 updater->ddadr = DDADR_STOP;
0524 updater->dsadr = dma;
0525 updater->dtadr = dma + 8;
0526 updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
0527 (PXA_DCMD_LENGTH & sizeof(u32));
0528 if (flags & DMA_PREP_INTERRUPT)
0529 updater->dcmd |= PXA_DCMD_ENDIRQEN;
0530 if (sw_desc->cyclic)
0531 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
0532 }
0533
0534 static bool is_desc_completed(struct virt_dma_desc *vd)
0535 {
0536 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
0537 struct pxad_desc_hw *updater =
0538 sw_desc->hw_desc[sw_desc->nb_desc - 1];
0539
0540 return updater->dtadr != (updater->dsadr + 8);
0541 }
0542
0543 static void pxad_desc_chain(struct virt_dma_desc *vd1,
0544 struct virt_dma_desc *vd2)
0545 {
0546 struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
0547 struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
0548 dma_addr_t dma_to_chain;
0549
0550 dma_to_chain = desc2->first;
0551 desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
0552 }
0553
0554 static bool pxad_try_hotchain(struct virt_dma_chan *vc,
0555 struct virt_dma_desc *vd)
0556 {
0557 struct virt_dma_desc *vd_last_issued = NULL;
0558 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
0559
0560
0561
0562
0563
0564
0565
0566
0567 if (is_chan_running(chan)) {
0568 BUG_ON(list_empty(&vc->desc_issued));
0569
0570 if (!is_running_chan_misaligned(chan) &&
0571 to_pxad_sw_desc(vd)->misaligned)
0572 return false;
0573
0574 vd_last_issued = list_entry(vc->desc_issued.prev,
0575 struct virt_dma_desc, node);
0576 pxad_desc_chain(vd_last_issued, vd);
0577 if (is_chan_running(chan) || is_desc_completed(vd))
0578 return true;
0579 }
0580
0581 return false;
0582 }
0583
0584 static unsigned int clear_chan_irq(struct pxad_phy *phy)
0585 {
0586 u32 dcsr;
0587 u32 dint = readl(phy->base + DINT);
0588
0589 if (!(dint & BIT(phy->idx)))
0590 return PXA_DCSR_RUN;
0591
0592
0593 dcsr = phy_readl_relaxed(phy, DCSR);
0594 phy_writel(phy, dcsr, DCSR);
0595 if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
0596 dev_warn(&phy->vchan->vc.chan.dev->device,
0597 "%s(chan=%p): PXA_DCSR_BUSERR\n",
0598 __func__, &phy->vchan);
0599
0600 return dcsr & ~PXA_DCSR_RUN;
0601 }
0602
0603 static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
0604 {
0605 struct pxad_phy *phy = dev_id;
0606 struct pxad_chan *chan = phy->vchan;
0607 struct virt_dma_desc *vd, *tmp;
0608 unsigned int dcsr;
0609 bool vd_completed;
0610 dma_cookie_t last_started = 0;
0611
0612 BUG_ON(!chan);
0613
0614 dcsr = clear_chan_irq(phy);
0615 if (dcsr & PXA_DCSR_RUN)
0616 return IRQ_NONE;
0617
0618 spin_lock(&chan->vc.lock);
0619 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
0620 vd_completed = is_desc_completed(vd);
0621 dev_dbg(&chan->vc.chan.dev->device,
0622 "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
0623 __func__, vd, vd->tx.cookie, vd_completed,
0624 dcsr);
0625 last_started = vd->tx.cookie;
0626 if (to_pxad_sw_desc(vd)->cyclic) {
0627 vchan_cyclic_callback(vd);
0628 break;
0629 }
0630 if (vd_completed) {
0631 list_del(&vd->node);
0632 vchan_cookie_complete(vd);
0633 } else {
0634 break;
0635 }
0636 }
0637
0638 if (dcsr & PXA_DCSR_BUSERR) {
0639 chan->bus_error = last_started;
0640 phy_disable(phy);
0641 }
0642
0643 if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
0644 dev_dbg(&chan->vc.chan.dev->device,
0645 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
0646 __func__,
0647 list_empty(&chan->vc.desc_submitted),
0648 list_empty(&chan->vc.desc_issued));
0649 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
0650
0651 if (list_empty(&chan->vc.desc_issued)) {
0652 chan->misaligned =
0653 !list_empty(&chan->vc.desc_submitted);
0654 } else {
0655 vd = list_first_entry(&chan->vc.desc_issued,
0656 struct virt_dma_desc, node);
0657 pxad_launch_chan(chan, to_pxad_sw_desc(vd));
0658 }
0659 }
0660 spin_unlock(&chan->vc.lock);
0661 wake_up(&chan->wq_state);
0662
0663 return IRQ_HANDLED;
0664 }
0665
0666 static irqreturn_t pxad_int_handler(int irq, void *dev_id)
0667 {
0668 struct pxad_device *pdev = dev_id;
0669 struct pxad_phy *phy;
0670 u32 dint = readl(pdev->base + DINT);
0671 int i, ret = IRQ_NONE;
0672
0673 while (dint) {
0674 i = __ffs(dint);
0675 dint &= (dint - 1);
0676 phy = &pdev->phys[i];
0677 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
0678 ret = IRQ_HANDLED;
0679 }
0680
0681 return ret;
0682 }
0683
0684 static int pxad_alloc_chan_resources(struct dma_chan *dchan)
0685 {
0686 struct pxad_chan *chan = to_pxad_chan(dchan);
0687 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
0688
0689 if (chan->desc_pool)
0690 return 1;
0691
0692 chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
0693 pdev->slave.dev,
0694 sizeof(struct pxad_desc_hw),
0695 __alignof__(struct pxad_desc_hw),
0696 0);
0697 if (!chan->desc_pool) {
0698 dev_err(&chan->vc.chan.dev->device,
0699 "%s(): unable to allocate descriptor pool\n",
0700 __func__);
0701 return -ENOMEM;
0702 }
0703
0704 return 1;
0705 }
0706
0707 static void pxad_free_chan_resources(struct dma_chan *dchan)
0708 {
0709 struct pxad_chan *chan = to_pxad_chan(dchan);
0710
0711 vchan_free_chan_resources(&chan->vc);
0712 dma_pool_destroy(chan->desc_pool);
0713 chan->desc_pool = NULL;
0714
0715 chan->drcmr = U32_MAX;
0716 chan->prio = PXAD_PRIO_LOWEST;
0717 }
0718
0719 static void pxad_free_desc(struct virt_dma_desc *vd)
0720 {
0721 int i;
0722 dma_addr_t dma;
0723 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
0724
0725 BUG_ON(sw_desc->nb_desc == 0);
0726 for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
0727 if (i > 0)
0728 dma = sw_desc->hw_desc[i - 1]->ddadr;
0729 else
0730 dma = sw_desc->first;
0731 dma_pool_free(sw_desc->desc_pool,
0732 sw_desc->hw_desc[i], dma);
0733 }
0734 sw_desc->nb_desc = 0;
0735 kfree(sw_desc);
0736 }
0737
0738 static struct pxad_desc_sw *
0739 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
0740 {
0741 struct pxad_desc_sw *sw_desc;
0742 dma_addr_t dma;
0743 int i;
0744
0745 sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc),
0746 GFP_NOWAIT);
0747 if (!sw_desc)
0748 return NULL;
0749 sw_desc->desc_pool = chan->desc_pool;
0750
0751 for (i = 0; i < nb_hw_desc; i++) {
0752 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
0753 GFP_NOWAIT, &dma);
0754 if (!sw_desc->hw_desc[i]) {
0755 dev_err(&chan->vc.chan.dev->device,
0756 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
0757 __func__, i, sw_desc->desc_pool);
0758 goto err;
0759 }
0760
0761 if (i == 0)
0762 sw_desc->first = dma;
0763 else
0764 sw_desc->hw_desc[i - 1]->ddadr = dma;
0765 sw_desc->nb_desc++;
0766 }
0767
0768 return sw_desc;
0769 err:
0770 pxad_free_desc(&sw_desc->vd);
0771 return NULL;
0772 }
0773
0774 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
0775 {
0776 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
0777 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
0778 struct virt_dma_desc *vd_chained = NULL,
0779 *vd = container_of(tx, struct virt_dma_desc, tx);
0780 dma_cookie_t cookie;
0781 unsigned long flags;
0782
0783 set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
0784
0785 spin_lock_irqsave(&vc->lock, flags);
0786 cookie = dma_cookie_assign(tx);
0787
0788 if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
0789 list_move_tail(&vd->node, &vc->desc_issued);
0790 dev_dbg(&chan->vc.chan.dev->device,
0791 "%s(): txd %p[%x]: submitted (hot linked)\n",
0792 __func__, vd, cookie);
0793 goto out;
0794 }
0795
0796
0797
0798
0799 if (!list_empty(&vc->desc_submitted)) {
0800 vd_chained = list_entry(vc->desc_submitted.prev,
0801 struct virt_dma_desc, node);
0802
0803
0804
0805
0806
0807
0808 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
0809 pxad_desc_chain(vd_chained, vd);
0810 else
0811 vd_chained = NULL;
0812 }
0813 dev_dbg(&chan->vc.chan.dev->device,
0814 "%s(): txd %p[%x]: submitted (%s linked)\n",
0815 __func__, vd, cookie, vd_chained ? "cold" : "not");
0816 list_move_tail(&vd->node, &vc->desc_submitted);
0817 chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
0818
0819 out:
0820 spin_unlock_irqrestore(&vc->lock, flags);
0821 return cookie;
0822 }
0823
0824 static void pxad_issue_pending(struct dma_chan *dchan)
0825 {
0826 struct pxad_chan *chan = to_pxad_chan(dchan);
0827 struct virt_dma_desc *vd_first;
0828 unsigned long flags;
0829
0830 spin_lock_irqsave(&chan->vc.lock, flags);
0831 if (list_empty(&chan->vc.desc_submitted))
0832 goto out;
0833
0834 vd_first = list_first_entry(&chan->vc.desc_submitted,
0835 struct virt_dma_desc, node);
0836 dev_dbg(&chan->vc.chan.dev->device,
0837 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
0838
0839 vchan_issue_pending(&chan->vc);
0840 if (!pxad_try_hotchain(&chan->vc, vd_first))
0841 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
0842 out:
0843 spin_unlock_irqrestore(&chan->vc.lock, flags);
0844 }
0845
0846 static inline struct dma_async_tx_descriptor *
0847 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
0848 unsigned long tx_flags)
0849 {
0850 struct dma_async_tx_descriptor *tx;
0851 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
0852
0853 INIT_LIST_HEAD(&vd->node);
0854 tx = vchan_tx_prep(vc, vd, tx_flags);
0855 tx->tx_submit = pxad_tx_submit;
0856 dev_dbg(&chan->vc.chan.dev->device,
0857 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
0858 vc, vd, vd->tx.cookie,
0859 tx_flags);
0860
0861 return tx;
0862 }
0863
0864 static void pxad_get_config(struct pxad_chan *chan,
0865 enum dma_transfer_direction dir,
0866 u32 *dcmd, u32 *dev_src, u32 *dev_dst)
0867 {
0868 u32 maxburst = 0, dev_addr = 0;
0869 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
0870 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
0871
0872 *dcmd = 0;
0873 if (dir == DMA_DEV_TO_MEM) {
0874 maxburst = chan->cfg.src_maxburst;
0875 width = chan->cfg.src_addr_width;
0876 dev_addr = chan->cfg.src_addr;
0877 *dev_src = dev_addr;
0878 *dcmd |= PXA_DCMD_INCTRGADDR;
0879 if (chan->drcmr <= pdev->nr_requestors)
0880 *dcmd |= PXA_DCMD_FLOWSRC;
0881 }
0882 if (dir == DMA_MEM_TO_DEV) {
0883 maxburst = chan->cfg.dst_maxburst;
0884 width = chan->cfg.dst_addr_width;
0885 dev_addr = chan->cfg.dst_addr;
0886 *dev_dst = dev_addr;
0887 *dcmd |= PXA_DCMD_INCSRCADDR;
0888 if (chan->drcmr <= pdev->nr_requestors)
0889 *dcmd |= PXA_DCMD_FLOWTRG;
0890 }
0891 if (dir == DMA_MEM_TO_MEM)
0892 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
0893 PXA_DCMD_INCSRCADDR;
0894
0895 dev_dbg(&chan->vc.chan.dev->device,
0896 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
0897 __func__, dev_addr, maxburst, width, dir);
0898
0899 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
0900 *dcmd |= PXA_DCMD_WIDTH1;
0901 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
0902 *dcmd |= PXA_DCMD_WIDTH2;
0903 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
0904 *dcmd |= PXA_DCMD_WIDTH4;
0905
0906 if (maxburst == 8)
0907 *dcmd |= PXA_DCMD_BURST8;
0908 else if (maxburst == 16)
0909 *dcmd |= PXA_DCMD_BURST16;
0910 else if (maxburst == 32)
0911 *dcmd |= PXA_DCMD_BURST32;
0912 }
0913
0914 static struct dma_async_tx_descriptor *
0915 pxad_prep_memcpy(struct dma_chan *dchan,
0916 dma_addr_t dma_dst, dma_addr_t dma_src,
0917 size_t len, unsigned long flags)
0918 {
0919 struct pxad_chan *chan = to_pxad_chan(dchan);
0920 struct pxad_desc_sw *sw_desc;
0921 struct pxad_desc_hw *hw_desc;
0922 u32 dcmd;
0923 unsigned int i, nb_desc = 0;
0924 size_t copy;
0925
0926 if (!dchan || !len)
0927 return NULL;
0928
0929 dev_dbg(&chan->vc.chan.dev->device,
0930 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
0931 __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
0932 len, flags);
0933 pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
0934
0935 nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
0936 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
0937 if (!sw_desc)
0938 return NULL;
0939 sw_desc->len = len;
0940
0941 if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
0942 !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
0943 sw_desc->misaligned = true;
0944
0945 i = 0;
0946 do {
0947 hw_desc = sw_desc->hw_desc[i++];
0948 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
0949 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
0950 hw_desc->dsadr = dma_src;
0951 hw_desc->dtadr = dma_dst;
0952 len -= copy;
0953 dma_src += copy;
0954 dma_dst += copy;
0955 } while (len);
0956 set_updater_desc(sw_desc, flags);
0957
0958 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
0959 }
0960
0961 static struct dma_async_tx_descriptor *
0962 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
0963 unsigned int sg_len, enum dma_transfer_direction dir,
0964 unsigned long flags, void *context)
0965 {
0966 struct pxad_chan *chan = to_pxad_chan(dchan);
0967 struct pxad_desc_sw *sw_desc;
0968 size_t len, avail;
0969 struct scatterlist *sg;
0970 dma_addr_t dma;
0971 u32 dcmd, dsadr = 0, dtadr = 0;
0972 unsigned int nb_desc = 0, i, j = 0;
0973
0974 if ((sgl == NULL) || (sg_len == 0))
0975 return NULL;
0976
0977 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
0978 dev_dbg(&chan->vc.chan.dev->device,
0979 "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
0980
0981 for_each_sg(sgl, sg, sg_len, i)
0982 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
0983 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
0984 if (!sw_desc)
0985 return NULL;
0986
0987 for_each_sg(sgl, sg, sg_len, i) {
0988 dma = sg_dma_address(sg);
0989 avail = sg_dma_len(sg);
0990 sw_desc->len += avail;
0991
0992 do {
0993 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
0994 if (dma & 0x7)
0995 sw_desc->misaligned = true;
0996
0997 sw_desc->hw_desc[j]->dcmd =
0998 dcmd | (PXA_DCMD_LENGTH & len);
0999 sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
1000 sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
1001
1002 dma += len;
1003 avail -= len;
1004 } while (avail);
1005 }
1006 set_updater_desc(sw_desc, flags);
1007
1008 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1009 }
1010
1011 static struct dma_async_tx_descriptor *
1012 pxad_prep_dma_cyclic(struct dma_chan *dchan,
1013 dma_addr_t buf_addr, size_t len, size_t period_len,
1014 enum dma_transfer_direction dir, unsigned long flags)
1015 {
1016 struct pxad_chan *chan = to_pxad_chan(dchan);
1017 struct pxad_desc_sw *sw_desc;
1018 struct pxad_desc_hw **phw_desc;
1019 dma_addr_t dma;
1020 u32 dcmd, dsadr = 0, dtadr = 0;
1021 unsigned int nb_desc = 0;
1022
1023 if (!dchan || !len || !period_len)
1024 return NULL;
1025 if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1026 dev_err(&chan->vc.chan.dev->device,
1027 "Unsupported direction for cyclic DMA\n");
1028 return NULL;
1029 }
1030
1031 if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1032 !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1033 return NULL;
1034
1035 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1036 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1037 dev_dbg(&chan->vc.chan.dev->device,
1038 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1039 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1040
1041 nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1042 nb_desc *= DIV_ROUND_UP(len, period_len);
1043 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1044 if (!sw_desc)
1045 return NULL;
1046 sw_desc->cyclic = true;
1047 sw_desc->len = len;
1048
1049 phw_desc = sw_desc->hw_desc;
1050 dma = buf_addr;
1051 do {
1052 phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1053 phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1054 phw_desc[0]->dcmd = dcmd;
1055 phw_desc++;
1056 dma += period_len;
1057 len -= period_len;
1058 } while (len);
1059 set_updater_desc(sw_desc, flags);
1060
1061 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1062 }
1063
1064 static int pxad_config(struct dma_chan *dchan,
1065 struct dma_slave_config *cfg)
1066 {
1067 struct pxad_chan *chan = to_pxad_chan(dchan);
1068
1069 if (!dchan)
1070 return -EINVAL;
1071
1072 chan->cfg = *cfg;
1073 return 0;
1074 }
1075
1076 static int pxad_terminate_all(struct dma_chan *dchan)
1077 {
1078 struct pxad_chan *chan = to_pxad_chan(dchan);
1079 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1080 struct virt_dma_desc *vd = NULL;
1081 unsigned long flags;
1082 struct pxad_phy *phy;
1083 LIST_HEAD(head);
1084
1085 dev_dbg(&chan->vc.chan.dev->device,
1086 "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1087
1088 spin_lock_irqsave(&chan->vc.lock, flags);
1089 vchan_get_all_descriptors(&chan->vc, &head);
1090
1091 list_for_each_entry(vd, &head, node) {
1092 dev_dbg(&chan->vc.chan.dev->device,
1093 "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1094 vd, vd->tx.cookie, is_desc_completed(vd));
1095 }
1096
1097 phy = chan->phy;
1098 if (phy) {
1099 phy_disable(chan->phy);
1100 pxad_free_phy(chan);
1101 chan->phy = NULL;
1102 spin_lock(&pdev->phy_lock);
1103 phy->vchan = NULL;
1104 spin_unlock(&pdev->phy_lock);
1105 }
1106 spin_unlock_irqrestore(&chan->vc.lock, flags);
1107 vchan_dma_desc_free_list(&chan->vc, &head);
1108
1109 return 0;
1110 }
1111
1112 static unsigned int pxad_residue(struct pxad_chan *chan,
1113 dma_cookie_t cookie)
1114 {
1115 struct virt_dma_desc *vd = NULL;
1116 struct pxad_desc_sw *sw_desc = NULL;
1117 struct pxad_desc_hw *hw_desc = NULL;
1118 u32 curr, start, len, end, residue = 0;
1119 unsigned long flags;
1120 bool passed = false;
1121 int i;
1122
1123
1124
1125
1126
1127 if (!chan->phy)
1128 return 0;
1129
1130 spin_lock_irqsave(&chan->vc.lock, flags);
1131
1132 vd = vchan_find_desc(&chan->vc, cookie);
1133 if (!vd)
1134 goto out;
1135
1136 sw_desc = to_pxad_sw_desc(vd);
1137 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1138 curr = phy_readl_relaxed(chan->phy, DSADR);
1139 else
1140 curr = phy_readl_relaxed(chan->phy, DTADR);
1141
1142
1143
1144
1145
1146
1147
1148 rmb();
1149 if (is_desc_completed(vd))
1150 goto out;
1151
1152 for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1153 hw_desc = sw_desc->hw_desc[i];
1154 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1155 start = hw_desc->dsadr;
1156 else
1157 start = hw_desc->dtadr;
1158 len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1159 end = start + len;
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 if (passed) {
1171 residue += len;
1172 } else if (curr >= start && curr <= end) {
1173 residue += end - curr;
1174 passed = true;
1175 }
1176 }
1177 if (!passed)
1178 residue = sw_desc->len;
1179
1180 out:
1181 spin_unlock_irqrestore(&chan->vc.lock, flags);
1182 dev_dbg(&chan->vc.chan.dev->device,
1183 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1184 __func__, vd, cookie, sw_desc, residue);
1185 return residue;
1186 }
1187
1188 static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1189 dma_cookie_t cookie,
1190 struct dma_tx_state *txstate)
1191 {
1192 struct pxad_chan *chan = to_pxad_chan(dchan);
1193 enum dma_status ret;
1194
1195 if (cookie == chan->bus_error)
1196 return DMA_ERROR;
1197
1198 ret = dma_cookie_status(dchan, cookie, txstate);
1199 if (likely(txstate && (ret != DMA_ERROR)))
1200 dma_set_residue(txstate, pxad_residue(chan, cookie));
1201
1202 return ret;
1203 }
1204
1205 static void pxad_synchronize(struct dma_chan *dchan)
1206 {
1207 struct pxad_chan *chan = to_pxad_chan(dchan);
1208
1209 wait_event(chan->wq_state, !is_chan_running(chan));
1210 vchan_synchronize(&chan->vc);
1211 }
1212
1213 static void pxad_free_channels(struct dma_device *dmadev)
1214 {
1215 struct pxad_chan *c, *cn;
1216
1217 list_for_each_entry_safe(c, cn, &dmadev->channels,
1218 vc.chan.device_node) {
1219 list_del(&c->vc.chan.device_node);
1220 tasklet_kill(&c->vc.task);
1221 }
1222 }
1223
1224 static int pxad_remove(struct platform_device *op)
1225 {
1226 struct pxad_device *pdev = platform_get_drvdata(op);
1227
1228 pxad_cleanup_debugfs(pdev);
1229 pxad_free_channels(&pdev->slave);
1230 return 0;
1231 }
1232
1233 static int pxad_init_phys(struct platform_device *op,
1234 struct pxad_device *pdev,
1235 unsigned int nb_phy_chans)
1236 {
1237 int irq0, irq, nr_irq = 0, i, ret;
1238 struct pxad_phy *phy;
1239
1240 irq0 = platform_get_irq(op, 0);
1241 if (irq0 < 0)
1242 return irq0;
1243
1244 pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1245 sizeof(pdev->phys[0]), GFP_KERNEL);
1246 if (!pdev->phys)
1247 return -ENOMEM;
1248
1249 for (i = 0; i < nb_phy_chans; i++)
1250 if (platform_get_irq(op, i) > 0)
1251 nr_irq++;
1252
1253 for (i = 0; i < nb_phy_chans; i++) {
1254 phy = &pdev->phys[i];
1255 phy->base = pdev->base;
1256 phy->idx = i;
1257 irq = platform_get_irq(op, i);
1258 if ((nr_irq > 1) && (irq > 0))
1259 ret = devm_request_irq(&op->dev, irq,
1260 pxad_chan_handler,
1261 IRQF_SHARED, "pxa-dma", phy);
1262 if ((nr_irq == 1) && (i == 0))
1263 ret = devm_request_irq(&op->dev, irq0,
1264 pxad_int_handler,
1265 IRQF_SHARED, "pxa-dma", pdev);
1266 if (ret) {
1267 dev_err(pdev->slave.dev,
1268 "%s(): can't request irq %d:%d\n", __func__,
1269 irq, ret);
1270 return ret;
1271 }
1272 }
1273
1274 return 0;
1275 }
1276
1277 static const struct of_device_id pxad_dt_ids[] = {
1278 { .compatible = "marvell,pdma-1.0", },
1279 {}
1280 };
1281 MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1282
1283 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1284 struct of_dma *ofdma)
1285 {
1286 struct pxad_device *d = ofdma->of_dma_data;
1287 struct dma_chan *chan;
1288
1289 chan = dma_get_any_slave_channel(&d->slave);
1290 if (!chan)
1291 return NULL;
1292
1293 to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1294 to_pxad_chan(chan)->prio = dma_spec->args[1];
1295
1296 return chan;
1297 }
1298
1299 static int pxad_init_dmadev(struct platform_device *op,
1300 struct pxad_device *pdev,
1301 unsigned int nr_phy_chans,
1302 unsigned int nr_requestors)
1303 {
1304 int ret;
1305 unsigned int i;
1306 struct pxad_chan *c;
1307
1308 pdev->nr_chans = nr_phy_chans;
1309 pdev->nr_requestors = nr_requestors;
1310 INIT_LIST_HEAD(&pdev->slave.channels);
1311 pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1312 pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1313 pdev->slave.device_tx_status = pxad_tx_status;
1314 pdev->slave.device_issue_pending = pxad_issue_pending;
1315 pdev->slave.device_config = pxad_config;
1316 pdev->slave.device_synchronize = pxad_synchronize;
1317 pdev->slave.device_terminate_all = pxad_terminate_all;
1318
1319 if (op->dev.coherent_dma_mask)
1320 dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1321 else
1322 dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1323
1324 ret = pxad_init_phys(op, pdev, nr_phy_chans);
1325 if (ret)
1326 return ret;
1327
1328 for (i = 0; i < nr_phy_chans; i++) {
1329 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1330 if (!c)
1331 return -ENOMEM;
1332
1333 c->drcmr = U32_MAX;
1334 c->prio = PXAD_PRIO_LOWEST;
1335 c->vc.desc_free = pxad_free_desc;
1336 vchan_init(&c->vc, &pdev->slave);
1337 init_waitqueue_head(&c->wq_state);
1338 }
1339
1340 return dmaenginem_async_device_register(&pdev->slave);
1341 }
1342
1343 static int pxad_probe(struct platform_device *op)
1344 {
1345 struct pxad_device *pdev;
1346 const struct of_device_id *of_id;
1347 const struct dma_slave_map *slave_map = NULL;
1348 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1349 struct resource *iores;
1350 int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
1351 const enum dma_slave_buswidth widths =
1352 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1353 DMA_SLAVE_BUSWIDTH_4_BYTES;
1354
1355 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1356 if (!pdev)
1357 return -ENOMEM;
1358
1359 spin_lock_init(&pdev->phy_lock);
1360
1361 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1362 pdev->base = devm_ioremap_resource(&op->dev, iores);
1363 if (IS_ERR(pdev->base))
1364 return PTR_ERR(pdev->base);
1365
1366 of_id = of_match_device(pxad_dt_ids, &op->dev);
1367 if (of_id) {
1368
1369 if (of_property_read_u32(op->dev.of_node, "dma-channels",
1370 &dma_channels))
1371 of_property_read_u32(op->dev.of_node, "#dma-channels",
1372 &dma_channels);
1373
1374 ret = of_property_read_u32(op->dev.of_node, "dma-requests",
1375 &nb_requestors);
1376 if (ret)
1377 ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1378 &nb_requestors);
1379 if (ret) {
1380 dev_warn(pdev->slave.dev,
1381 "#dma-requests set to default 32 as missing in OF: %d",
1382 ret);
1383 nb_requestors = 32;
1384 }
1385 } else if (pdata && pdata->dma_channels) {
1386 dma_channels = pdata->dma_channels;
1387 nb_requestors = pdata->nb_requestors;
1388 slave_map = pdata->slave_map;
1389 slave_map_cnt = pdata->slave_map_cnt;
1390 } else {
1391 dma_channels = 32;
1392 }
1393
1394 dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1395 dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1396 dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1397 dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1398 pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1399 pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1400 pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1401 pdev->slave.filter.map = slave_map;
1402 pdev->slave.filter.mapcnt = slave_map_cnt;
1403 pdev->slave.filter.fn = pxad_filter_fn;
1404
1405 pdev->slave.copy_align = PDMA_ALIGNMENT;
1406 pdev->slave.src_addr_widths = widths;
1407 pdev->slave.dst_addr_widths = widths;
1408 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1409 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1410 pdev->slave.descriptor_reuse = true;
1411
1412 pdev->slave.dev = &op->dev;
1413 ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1414 if (ret) {
1415 dev_err(pdev->slave.dev, "unable to register\n");
1416 return ret;
1417 }
1418
1419 if (op->dev.of_node) {
1420
1421 ret = of_dma_controller_register(op->dev.of_node,
1422 pxad_dma_xlate, pdev);
1423 if (ret < 0) {
1424 dev_err(pdev->slave.dev,
1425 "of_dma_controller_register failed\n");
1426 return ret;
1427 }
1428 }
1429
1430 platform_set_drvdata(op, pdev);
1431 pxad_init_debugfs(pdev);
1432 dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1433 dma_channels, nb_requestors);
1434 return 0;
1435 }
1436
1437 static const struct platform_device_id pxad_id_table[] = {
1438 { "pxa-dma", },
1439 { },
1440 };
1441
1442 static struct platform_driver pxad_driver = {
1443 .driver = {
1444 .name = "pxa-dma",
1445 .of_match_table = pxad_dt_ids,
1446 },
1447 .id_table = pxad_id_table,
1448 .probe = pxad_probe,
1449 .remove = pxad_remove,
1450 };
1451
1452 static bool pxad_filter_fn(struct dma_chan *chan, void *param)
1453 {
1454 struct pxad_chan *c = to_pxad_chan(chan);
1455 struct pxad_param *p = param;
1456
1457 if (chan->device->dev->driver != &pxad_driver.driver)
1458 return false;
1459
1460 c->drcmr = p->drcmr;
1461 c->prio = p->prio;
1462
1463 return true;
1464 }
1465
1466 module_platform_driver(pxad_driver);
1467
1468 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1469 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1470 MODULE_LICENSE("GPL v2");