0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/dmaengine.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/bitmap.h>
0011 #include <linux/err.h>
0012 #include <linux/init.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/list.h>
0015 #include <linux/module.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/of.h>
0020 #include <linux/of_dma.h>
0021 #include <linux/of_irq.h>
0022 #include <linux/of_address.h>
0023 #include <linux/of_device.h>
0024 #include <linux/pm_runtime.h>
0025
0026 #include <linux/platform_data/edma.h>
0027
0028 #include "../dmaengine.h"
0029 #include "../virt-dma.h"
0030
0031
0032 #define PARM_OPT 0x00
0033 #define PARM_SRC 0x04
0034 #define PARM_A_B_CNT 0x08
0035 #define PARM_DST 0x0c
0036 #define PARM_SRC_DST_BIDX 0x10
0037 #define PARM_LINK_BCNTRLD 0x14
0038 #define PARM_SRC_DST_CIDX 0x18
0039 #define PARM_CCNT 0x1c
0040
0041 #define PARM_SIZE 0x20
0042
0043
0044 #define SH_ER 0x00
0045 #define SH_ECR 0x08
0046 #define SH_ESR 0x10
0047 #define SH_CER 0x18
0048 #define SH_EER 0x20
0049 #define SH_EECR 0x28
0050 #define SH_EESR 0x30
0051 #define SH_SER 0x38
0052 #define SH_SECR 0x40
0053 #define SH_IER 0x50
0054 #define SH_IECR 0x58
0055 #define SH_IESR 0x60
0056 #define SH_IPR 0x68
0057 #define SH_ICR 0x70
0058 #define SH_IEVAL 0x78
0059 #define SH_QER 0x80
0060 #define SH_QEER 0x84
0061 #define SH_QEECR 0x88
0062 #define SH_QEESR 0x8c
0063 #define SH_QSER 0x90
0064 #define SH_QSECR 0x94
0065 #define SH_SIZE 0x200
0066
0067
0068 #define EDMA_REV 0x0000
0069 #define EDMA_CCCFG 0x0004
0070 #define EDMA_QCHMAP 0x0200
0071 #define EDMA_DMAQNUM 0x0240
0072 #define EDMA_QDMAQNUM 0x0260
0073 #define EDMA_QUETCMAP 0x0280
0074 #define EDMA_QUEPRI 0x0284
0075 #define EDMA_EMR 0x0300
0076 #define EDMA_EMCR 0x0308
0077 #define EDMA_QEMR 0x0310
0078 #define EDMA_QEMCR 0x0314
0079 #define EDMA_CCERR 0x0318
0080 #define EDMA_CCERRCLR 0x031c
0081 #define EDMA_EEVAL 0x0320
0082 #define EDMA_DRAE 0x0340
0083 #define EDMA_QRAE 0x0380
0084 #define EDMA_QUEEVTENTRY 0x0400
0085 #define EDMA_QSTAT 0x0600
0086 #define EDMA_QWMTHRA 0x0620
0087 #define EDMA_QWMTHRB 0x0624
0088 #define EDMA_CCSTAT 0x0640
0089
0090 #define EDMA_M 0x1000
0091 #define EDMA_ECR 0x1008
0092 #define EDMA_ECRH 0x100C
0093 #define EDMA_SHADOW0 0x2000
0094 #define EDMA_PARM 0x4000
0095
0096 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
0097
0098 #define EDMA_DCHMAP 0x0100
0099
0100
0101 #define GET_NUM_DMACH(x) (x & 0x7)
0102 #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4)
0103 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12)
0104 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16)
0105 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20)
0106 #define CHMAP_EXIST BIT(24)
0107
0108
0109 #define EDMA_CCSTAT_ACTV BIT(4)
0110
0111
0112
0113
0114
0115
0116
0117
0118 #define MAX_NR_SG 20
0119 #define EDMA_MAX_SLOTS MAX_NR_SG
0120 #define EDMA_DESCRIPTORS 16
0121
0122 #define EDMA_CHANNEL_ANY -1
0123 #define EDMA_SLOT_ANY -1
0124 #define EDMA_CONT_PARAMS_ANY 1001
0125 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
0126 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136 #define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
0137 #define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
0138
0139
0140 struct edmacc_param {
0141 u32 opt;
0142 u32 src;
0143 u32 a_b_cnt;
0144 u32 dst;
0145 u32 src_dst_bidx;
0146 u32 link_bcntrld;
0147 u32 src_dst_cidx;
0148 u32 ccnt;
0149 } __packed;
0150
0151
0152 #define SAM BIT(0)
0153 #define DAM BIT(1)
0154 #define SYNCDIM BIT(2)
0155 #define STATIC BIT(3)
0156 #define EDMA_FWID (0x07 << 8)
0157 #define TCCMODE BIT(11)
0158 #define EDMA_TCC(t) ((t) << 12)
0159 #define TCINTEN BIT(20)
0160 #define ITCINTEN BIT(21)
0161 #define TCCHEN BIT(22)
0162 #define ITCCHEN BIT(23)
0163
0164 struct edma_pset {
0165 u32 len;
0166 dma_addr_t addr;
0167 struct edmacc_param param;
0168 };
0169
0170 struct edma_desc {
0171 struct virt_dma_desc vdesc;
0172 struct list_head node;
0173 enum dma_transfer_direction direction;
0174 int cyclic;
0175 bool polled;
0176 int absync;
0177 int pset_nr;
0178 struct edma_chan *echan;
0179 int processed;
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 int processed_stat;
0202 u32 sg_len;
0203 u32 residue;
0204 u32 residue_stat;
0205
0206 struct edma_pset pset[];
0207 };
0208
0209 struct edma_cc;
0210
0211 struct edma_tc {
0212 struct device_node *node;
0213 u16 id;
0214 };
0215
0216 struct edma_chan {
0217 struct virt_dma_chan vchan;
0218 struct list_head node;
0219 struct edma_desc *edesc;
0220 struct edma_cc *ecc;
0221 struct edma_tc *tc;
0222 int ch_num;
0223 bool alloced;
0224 bool hw_triggered;
0225 int slot[EDMA_MAX_SLOTS];
0226 int missed;
0227 struct dma_slave_config cfg;
0228 };
0229
0230 struct edma_cc {
0231 struct device *dev;
0232 struct edma_soc_info *info;
0233 void __iomem *base;
0234 int id;
0235 bool legacy_mode;
0236
0237
0238 unsigned num_channels;
0239 unsigned num_qchannels;
0240 unsigned num_region;
0241 unsigned num_slots;
0242 unsigned num_tc;
0243 bool chmap_exist;
0244 enum dma_event_q default_queue;
0245
0246 unsigned int ccint;
0247 unsigned int ccerrint;
0248
0249
0250
0251
0252
0253 unsigned long *slot_inuse;
0254
0255
0256
0257
0258
0259
0260 unsigned long *channels_mask;
0261
0262 struct dma_device dma_slave;
0263 struct dma_device *dma_memcpy;
0264 struct edma_chan *slave_chans;
0265 struct edma_tc *tc_list;
0266 int dummy_slot;
0267 };
0268
0269
0270 static const struct edmacc_param dummy_paramset = {
0271 .link_bcntrld = 0xffff,
0272 .ccnt = 1,
0273 };
0274
0275 #define EDMA_BINDING_LEGACY 0
0276 #define EDMA_BINDING_TPCC 1
0277 static const u32 edma_binding_type[] = {
0278 [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
0279 [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
0280 };
0281
0282 static const struct of_device_id edma_of_ids[] = {
0283 {
0284 .compatible = "ti,edma3",
0285 .data = &edma_binding_type[EDMA_BINDING_LEGACY],
0286 },
0287 {
0288 .compatible = "ti,edma3-tpcc",
0289 .data = &edma_binding_type[EDMA_BINDING_TPCC],
0290 },
0291 {}
0292 };
0293 MODULE_DEVICE_TABLE(of, edma_of_ids);
0294
0295 static const struct of_device_id edma_tptc_of_ids[] = {
0296 { .compatible = "ti,edma3-tptc", },
0297 {}
0298 };
0299 MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
0300
0301 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
0302 {
0303 return (unsigned int)__raw_readl(ecc->base + offset);
0304 }
0305
0306 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
0307 {
0308 __raw_writel(val, ecc->base + offset);
0309 }
0310
0311 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
0312 unsigned or)
0313 {
0314 unsigned val = edma_read(ecc, offset);
0315
0316 val &= and;
0317 val |= or;
0318 edma_write(ecc, offset, val);
0319 }
0320
0321 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
0322 {
0323 unsigned val = edma_read(ecc, offset);
0324
0325 val &= and;
0326 edma_write(ecc, offset, val);
0327 }
0328
0329 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
0330 {
0331 unsigned val = edma_read(ecc, offset);
0332
0333 val |= or;
0334 edma_write(ecc, offset, val);
0335 }
0336
0337 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
0338 int i)
0339 {
0340 return edma_read(ecc, offset + (i << 2));
0341 }
0342
0343 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
0344 unsigned val)
0345 {
0346 edma_write(ecc, offset + (i << 2), val);
0347 }
0348
0349 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
0350 unsigned and, unsigned or)
0351 {
0352 edma_modify(ecc, offset + (i << 2), and, or);
0353 }
0354
0355 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
0356 unsigned or)
0357 {
0358 edma_or(ecc, offset + (i << 2), or);
0359 }
0360
0361 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
0362 unsigned or)
0363 {
0364 edma_or(ecc, offset + ((i * 2 + j) << 2), or);
0365 }
0366
0367 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
0368 int j, unsigned val)
0369 {
0370 edma_write(ecc, offset + ((i * 2 + j) << 2), val);
0371 }
0372
0373 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
0374 {
0375 return edma_read(ecc, EDMA_SHADOW0 + offset);
0376 }
0377
0378 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
0379 int offset, int i)
0380 {
0381 return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
0382 }
0383
0384 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
0385 unsigned val)
0386 {
0387 edma_write(ecc, EDMA_SHADOW0 + offset, val);
0388 }
0389
0390 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
0391 int i, unsigned val)
0392 {
0393 edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
0394 }
0395
0396 static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
0397 int param_no)
0398 {
0399 return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
0400 }
0401
0402 static inline void edma_param_write(struct edma_cc *ecc, int offset,
0403 int param_no, unsigned val)
0404 {
0405 edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
0406 }
0407
0408 static inline void edma_param_modify(struct edma_cc *ecc, int offset,
0409 int param_no, unsigned and, unsigned or)
0410 {
0411 edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
0412 }
0413
0414 static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
0415 unsigned and)
0416 {
0417 edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
0418 }
0419
0420 static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
0421 unsigned or)
0422 {
0423 edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
0424 }
0425
0426 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
0427 int priority)
0428 {
0429 int bit = queue_no * 4;
0430
0431 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
0432 }
0433
0434 static void edma_set_chmap(struct edma_chan *echan, int slot)
0435 {
0436 struct edma_cc *ecc = echan->ecc;
0437 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0438
0439 if (ecc->chmap_exist) {
0440 slot = EDMA_CHAN_SLOT(slot);
0441 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
0442 }
0443 }
0444
0445 static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
0446 {
0447 struct edma_cc *ecc = echan->ecc;
0448 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0449 int idx = EDMA_REG_ARRAY_INDEX(channel);
0450 int ch_bit = EDMA_CHANNEL_BIT(channel);
0451
0452 if (enable) {
0453 edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
0454 edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
0455 } else {
0456 edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
0457 }
0458 }
0459
0460
0461
0462
0463 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
0464 const struct edmacc_param *param)
0465 {
0466 slot = EDMA_CHAN_SLOT(slot);
0467 if (slot >= ecc->num_slots)
0468 return;
0469 memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
0470 }
0471
0472 static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
0473 struct edmacc_param *param)
0474 {
0475 slot = EDMA_CHAN_SLOT(slot);
0476 if (slot >= ecc->num_slots)
0477 return -EINVAL;
0478 memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
0479
0480 return 0;
0481 }
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
0499 {
0500 if (slot >= 0) {
0501 slot = EDMA_CHAN_SLOT(slot);
0502
0503 if (ecc->chmap_exist && slot < ecc->num_channels)
0504 slot = EDMA_SLOT_ANY;
0505 }
0506
0507 if (slot < 0) {
0508 if (ecc->chmap_exist)
0509 slot = 0;
0510 else
0511 slot = ecc->num_channels;
0512 for (;;) {
0513 slot = find_next_zero_bit(ecc->slot_inuse,
0514 ecc->num_slots,
0515 slot);
0516 if (slot == ecc->num_slots)
0517 return -ENOMEM;
0518 if (!test_and_set_bit(slot, ecc->slot_inuse))
0519 break;
0520 }
0521 } else if (slot >= ecc->num_slots) {
0522 return -EINVAL;
0523 } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
0524 return -EBUSY;
0525 }
0526
0527 edma_write_slot(ecc, slot, &dummy_paramset);
0528
0529 return EDMA_CTLR_CHAN(ecc->id, slot);
0530 }
0531
0532 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
0533 {
0534 slot = EDMA_CHAN_SLOT(slot);
0535 if (slot >= ecc->num_slots)
0536 return;
0537
0538 edma_write_slot(ecc, slot, &dummy_paramset);
0539 clear_bit(slot, ecc->slot_inuse);
0540 }
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
0551 {
0552 if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
0553 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
0554
0555 from = EDMA_CHAN_SLOT(from);
0556 to = EDMA_CHAN_SLOT(to);
0557 if (from >= ecc->num_slots || to >= ecc->num_slots)
0558 return;
0559
0560 edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
0561 PARM_OFFSET(to));
0562 }
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
0573 bool dst)
0574 {
0575 u32 offs;
0576
0577 slot = EDMA_CHAN_SLOT(slot);
0578 offs = PARM_OFFSET(slot);
0579 offs += dst ? PARM_DST : PARM_SRC;
0580
0581 return edma_read(ecc, offs);
0582 }
0583
0584
0585
0586
0587
0588
0589
0590 static void edma_start(struct edma_chan *echan)
0591 {
0592 struct edma_cc *ecc = echan->ecc;
0593 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0594 int idx = EDMA_REG_ARRAY_INDEX(channel);
0595 int ch_bit = EDMA_CHANNEL_BIT(channel);
0596
0597 if (!echan->hw_triggered) {
0598
0599 dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
0600 edma_shadow0_read_array(ecc, SH_ESR, idx));
0601 edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
0602 } else {
0603
0604 dev_dbg(ecc->dev, "ER%d %08x\n", idx,
0605 edma_shadow0_read_array(ecc, SH_ER, idx));
0606
0607 edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
0608 edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
0609
0610 edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
0611 edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
0612 dev_dbg(ecc->dev, "EER%d %08x\n", idx,
0613 edma_shadow0_read_array(ecc, SH_EER, idx));
0614 }
0615 }
0616
0617 static void edma_stop(struct edma_chan *echan)
0618 {
0619 struct edma_cc *ecc = echan->ecc;
0620 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0621 int idx = EDMA_REG_ARRAY_INDEX(channel);
0622 int ch_bit = EDMA_CHANNEL_BIT(channel);
0623
0624 edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
0625 edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
0626 edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
0627 edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
0628
0629
0630 edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
0631
0632 dev_dbg(ecc->dev, "EER%d %08x\n", idx,
0633 edma_shadow0_read_array(ecc, SH_EER, idx));
0634
0635
0636
0637
0638 }
0639
0640
0641
0642
0643
0644 static void edma_pause(struct edma_chan *echan)
0645 {
0646 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0647
0648 edma_shadow0_write_array(echan->ecc, SH_EECR,
0649 EDMA_REG_ARRAY_INDEX(channel),
0650 EDMA_CHANNEL_BIT(channel));
0651 }
0652
0653
0654 static void edma_resume(struct edma_chan *echan)
0655 {
0656 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0657
0658 edma_shadow0_write_array(echan->ecc, SH_EESR,
0659 EDMA_REG_ARRAY_INDEX(channel),
0660 EDMA_CHANNEL_BIT(channel));
0661 }
0662
0663 static void edma_trigger_channel(struct edma_chan *echan)
0664 {
0665 struct edma_cc *ecc = echan->ecc;
0666 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0667 int idx = EDMA_REG_ARRAY_INDEX(channel);
0668 int ch_bit = EDMA_CHANNEL_BIT(channel);
0669
0670 edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
0671
0672 dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
0673 edma_shadow0_read_array(ecc, SH_ESR, idx));
0674 }
0675
0676 static void edma_clean_channel(struct edma_chan *echan)
0677 {
0678 struct edma_cc *ecc = echan->ecc;
0679 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0680 int idx = EDMA_REG_ARRAY_INDEX(channel);
0681 int ch_bit = EDMA_CHANNEL_BIT(channel);
0682
0683 dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
0684 edma_read_array(ecc, EDMA_EMR, idx));
0685 edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
0686
0687 edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
0688
0689 edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
0690 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
0691 }
0692
0693
0694 static void edma_assign_channel_eventq(struct edma_chan *echan,
0695 enum dma_event_q eventq_no)
0696 {
0697 struct edma_cc *ecc = echan->ecc;
0698 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0699 int bit = (channel & 0x7) * 4;
0700
0701
0702 if (eventq_no == EVENTQ_DEFAULT)
0703 eventq_no = ecc->default_queue;
0704 if (eventq_no >= ecc->num_tc)
0705 return;
0706
0707 eventq_no &= 7;
0708 edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
0709 eventq_no << bit);
0710 }
0711
0712 static int edma_alloc_channel(struct edma_chan *echan,
0713 enum dma_event_q eventq_no)
0714 {
0715 struct edma_cc *ecc = echan->ecc;
0716 int channel = EDMA_CHAN_SLOT(echan->ch_num);
0717
0718 if (!test_bit(echan->ch_num, ecc->channels_mask)) {
0719 dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
0720 echan->ch_num);
0721 return -EINVAL;
0722 }
0723
0724
0725 edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
0726 EDMA_CHANNEL_BIT(channel));
0727
0728
0729 edma_stop(echan);
0730
0731 edma_setup_interrupt(echan, true);
0732
0733 edma_assign_channel_eventq(echan, eventq_no);
0734
0735 return 0;
0736 }
0737
0738 static void edma_free_channel(struct edma_chan *echan)
0739 {
0740
0741 edma_stop(echan);
0742
0743 edma_setup_interrupt(echan, false);
0744 }
0745
0746 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
0747 {
0748 return container_of(d, struct edma_cc, dma_slave);
0749 }
0750
0751 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
0752 {
0753 return container_of(c, struct edma_chan, vchan.chan);
0754 }
0755
0756 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
0757 {
0758 return container_of(tx, struct edma_desc, vdesc.tx);
0759 }
0760
0761 static void edma_desc_free(struct virt_dma_desc *vdesc)
0762 {
0763 kfree(container_of(vdesc, struct edma_desc, vdesc));
0764 }
0765
0766
0767 static void edma_execute(struct edma_chan *echan)
0768 {
0769 struct edma_cc *ecc = echan->ecc;
0770 struct virt_dma_desc *vdesc;
0771 struct edma_desc *edesc;
0772 struct device *dev = echan->vchan.chan.device->dev;
0773 int i, j, left, nslots;
0774
0775 if (!echan->edesc) {
0776
0777 vdesc = vchan_next_desc(&echan->vchan);
0778 if (!vdesc)
0779 return;
0780 list_del(&vdesc->node);
0781 echan->edesc = to_edma_desc(&vdesc->tx);
0782 }
0783
0784 edesc = echan->edesc;
0785
0786
0787 left = edesc->pset_nr - edesc->processed;
0788 nslots = min(MAX_NR_SG, left);
0789 edesc->sg_len = 0;
0790
0791
0792 for (i = 0; i < nslots; i++) {
0793 j = i + edesc->processed;
0794 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
0795 edesc->sg_len += edesc->pset[j].len;
0796 dev_vdbg(dev,
0797 "\n pset[%d]:\n"
0798 " chnum\t%d\n"
0799 " slot\t%d\n"
0800 " opt\t%08x\n"
0801 " src\t%08x\n"
0802 " dst\t%08x\n"
0803 " abcnt\t%08x\n"
0804 " ccnt\t%08x\n"
0805 " bidx\t%08x\n"
0806 " cidx\t%08x\n"
0807 " lkrld\t%08x\n",
0808 j, echan->ch_num, echan->slot[i],
0809 edesc->pset[j].param.opt,
0810 edesc->pset[j].param.src,
0811 edesc->pset[j].param.dst,
0812 edesc->pset[j].param.a_b_cnt,
0813 edesc->pset[j].param.ccnt,
0814 edesc->pset[j].param.src_dst_bidx,
0815 edesc->pset[j].param.src_dst_cidx,
0816 edesc->pset[j].param.link_bcntrld);
0817
0818 if (i != (nslots - 1))
0819 edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
0820 }
0821
0822 edesc->processed += nslots;
0823
0824
0825
0826
0827
0828
0829 if (edesc->processed == edesc->pset_nr) {
0830 if (edesc->cyclic)
0831 edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
0832 else
0833 edma_link(ecc, echan->slot[nslots - 1],
0834 echan->ecc->dummy_slot);
0835 }
0836
0837 if (echan->missed) {
0838
0839
0840
0841
0842
0843 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
0844 edma_clean_channel(echan);
0845 edma_stop(echan);
0846 edma_start(echan);
0847 edma_trigger_channel(echan);
0848 echan->missed = 0;
0849 } else if (edesc->processed <= MAX_NR_SG) {
0850 dev_dbg(dev, "first transfer starting on channel %d\n",
0851 echan->ch_num);
0852 edma_start(echan);
0853 } else {
0854 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
0855 echan->ch_num, edesc->processed);
0856 edma_resume(echan);
0857 }
0858 }
0859
0860 static int edma_terminate_all(struct dma_chan *chan)
0861 {
0862 struct edma_chan *echan = to_edma_chan(chan);
0863 unsigned long flags;
0864 LIST_HEAD(head);
0865
0866 spin_lock_irqsave(&echan->vchan.lock, flags);
0867
0868
0869
0870
0871
0872
0873 if (echan->edesc) {
0874 edma_stop(echan);
0875
0876 if (!echan->tc && echan->edesc->cyclic)
0877 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
0878
0879 vchan_terminate_vdesc(&echan->edesc->vdesc);
0880 echan->edesc = NULL;
0881 }
0882
0883 vchan_get_all_descriptors(&echan->vchan, &head);
0884 spin_unlock_irqrestore(&echan->vchan.lock, flags);
0885 vchan_dma_desc_free_list(&echan->vchan, &head);
0886
0887 return 0;
0888 }
0889
0890 static void edma_synchronize(struct dma_chan *chan)
0891 {
0892 struct edma_chan *echan = to_edma_chan(chan);
0893
0894 vchan_synchronize(&echan->vchan);
0895 }
0896
0897 static int edma_slave_config(struct dma_chan *chan,
0898 struct dma_slave_config *cfg)
0899 {
0900 struct edma_chan *echan = to_edma_chan(chan);
0901
0902 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
0903 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
0904 return -EINVAL;
0905
0906 if (cfg->src_maxburst > chan->device->max_burst ||
0907 cfg->dst_maxburst > chan->device->max_burst)
0908 return -EINVAL;
0909
0910 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
0911
0912 return 0;
0913 }
0914
0915 static int edma_dma_pause(struct dma_chan *chan)
0916 {
0917 struct edma_chan *echan = to_edma_chan(chan);
0918
0919 if (!echan->edesc)
0920 return -EINVAL;
0921
0922 edma_pause(echan);
0923 return 0;
0924 }
0925
0926 static int edma_dma_resume(struct dma_chan *chan)
0927 {
0928 struct edma_chan *echan = to_edma_chan(chan);
0929
0930 edma_resume(echan);
0931 return 0;
0932 }
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
0946 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
0947 unsigned int acnt, unsigned int dma_length,
0948 enum dma_transfer_direction direction)
0949 {
0950 struct edma_chan *echan = to_edma_chan(chan);
0951 struct device *dev = chan->device->dev;
0952 struct edmacc_param *param = &epset->param;
0953 int bcnt, ccnt, cidx;
0954 int src_bidx, dst_bidx, src_cidx, dst_cidx;
0955 int absync;
0956
0957
0958 if (!burst)
0959 burst = 1;
0960
0961
0962
0963
0964
0965 if (burst == 1) {
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978 absync = false;
0979 ccnt = dma_length / acnt / (SZ_64K - 1);
0980 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
0981
0982
0983
0984
0985 if (bcnt)
0986 ccnt++;
0987 else
0988 bcnt = SZ_64K - 1;
0989 cidx = acnt;
0990 } else {
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000 absync = true;
1001 bcnt = burst;
1002 ccnt = dma_length / (acnt * bcnt);
1003 if (ccnt > (SZ_64K - 1)) {
1004 dev_err(dev, "Exceeded max SG segment size\n");
1005 return -EINVAL;
1006 }
1007 cidx = acnt * bcnt;
1008 }
1009
1010 epset->len = dma_length;
1011
1012 if (direction == DMA_MEM_TO_DEV) {
1013 src_bidx = acnt;
1014 src_cidx = cidx;
1015 dst_bidx = 0;
1016 dst_cidx = 0;
1017 epset->addr = src_addr;
1018 } else if (direction == DMA_DEV_TO_MEM) {
1019 src_bidx = 0;
1020 src_cidx = 0;
1021 dst_bidx = acnt;
1022 dst_cidx = cidx;
1023 epset->addr = dst_addr;
1024 } else if (direction == DMA_MEM_TO_MEM) {
1025 src_bidx = acnt;
1026 src_cidx = cidx;
1027 dst_bidx = acnt;
1028 dst_cidx = cidx;
1029 epset->addr = src_addr;
1030 } else {
1031 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1032 return -EINVAL;
1033 }
1034
1035 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1036
1037 if (absync)
1038 param->opt |= SYNCDIM;
1039
1040 param->src = src_addr;
1041 param->dst = dst_addr;
1042
1043 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1044 param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1045
1046 param->a_b_cnt = bcnt << 16 | acnt;
1047 param->ccnt = ccnt;
1048
1049
1050
1051
1052
1053
1054 param->link_bcntrld = 0xffffffff;
1055 return absync;
1056 }
1057
1058 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1059 struct dma_chan *chan, struct scatterlist *sgl,
1060 unsigned int sg_len, enum dma_transfer_direction direction,
1061 unsigned long tx_flags, void *context)
1062 {
1063 struct edma_chan *echan = to_edma_chan(chan);
1064 struct device *dev = chan->device->dev;
1065 struct edma_desc *edesc;
1066 dma_addr_t src_addr = 0, dst_addr = 0;
1067 enum dma_slave_buswidth dev_width;
1068 u32 burst;
1069 struct scatterlist *sg;
1070 int i, nslots, ret;
1071
1072 if (unlikely(!echan || !sgl || !sg_len))
1073 return NULL;
1074
1075 if (direction == DMA_DEV_TO_MEM) {
1076 src_addr = echan->cfg.src_addr;
1077 dev_width = echan->cfg.src_addr_width;
1078 burst = echan->cfg.src_maxburst;
1079 } else if (direction == DMA_MEM_TO_DEV) {
1080 dst_addr = echan->cfg.dst_addr;
1081 dev_width = echan->cfg.dst_addr_width;
1082 burst = echan->cfg.dst_maxburst;
1083 } else {
1084 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1085 return NULL;
1086 }
1087
1088 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1089 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1090 return NULL;
1091 }
1092
1093 edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
1094 if (!edesc)
1095 return NULL;
1096
1097 edesc->pset_nr = sg_len;
1098 edesc->residue = 0;
1099 edesc->direction = direction;
1100 edesc->echan = echan;
1101
1102
1103 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1104
1105 for (i = 0; i < nslots; i++) {
1106 if (echan->slot[i] < 0) {
1107 echan->slot[i] =
1108 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1109 if (echan->slot[i] < 0) {
1110 kfree(edesc);
1111 dev_err(dev, "%s: Failed to allocate slot\n",
1112 __func__);
1113 return NULL;
1114 }
1115 }
1116 }
1117
1118
1119 for_each_sg(sgl, sg, sg_len, i) {
1120
1121 if (direction == DMA_DEV_TO_MEM)
1122 dst_addr = sg_dma_address(sg);
1123 else
1124 src_addr = sg_dma_address(sg);
1125
1126 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1127 dst_addr, burst, dev_width,
1128 sg_dma_len(sg), direction);
1129 if (ret < 0) {
1130 kfree(edesc);
1131 return NULL;
1132 }
1133
1134 edesc->absync = ret;
1135 edesc->residue += sg_dma_len(sg);
1136
1137 if (i == sg_len - 1)
1138
1139 edesc->pset[i].param.opt |= TCINTEN;
1140 else if (!((i+1) % MAX_NR_SG))
1141
1142
1143
1144
1145
1146
1147 edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
1148 }
1149 edesc->residue_stat = edesc->residue;
1150
1151 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1152 }
1153
1154 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1155 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1156 size_t len, unsigned long tx_flags)
1157 {
1158 int ret, nslots;
1159 struct edma_desc *edesc;
1160 struct device *dev = chan->device->dev;
1161 struct edma_chan *echan = to_edma_chan(chan);
1162 unsigned int width, pset_len, array_size;
1163
1164 if (unlikely(!echan || !len))
1165 return NULL;
1166
1167
1168 switch (__ffs((src | dest | len))) {
1169 case 0:
1170 array_size = SZ_32K - 1;
1171 break;
1172 case 1:
1173 array_size = SZ_32K - 2;
1174 break;
1175 default:
1176 array_size = SZ_32K - 4;
1177 break;
1178 }
1179
1180 if (len < SZ_64K) {
1181
1182
1183
1184
1185
1186 width = len;
1187 pset_len = len;
1188 nslots = 1;
1189 } else {
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 width = array_size;
1202 pset_len = rounddown(len, width);
1203
1204 if (unlikely(pset_len == len))
1205 nslots = 1;
1206 else
1207 nslots = 2;
1208 }
1209
1210 edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
1211 if (!edesc)
1212 return NULL;
1213
1214 edesc->pset_nr = nslots;
1215 edesc->residue = edesc->residue_stat = len;
1216 edesc->direction = DMA_MEM_TO_MEM;
1217 edesc->echan = echan;
1218
1219 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1220 width, pset_len, DMA_MEM_TO_MEM);
1221 if (ret < 0) {
1222 kfree(edesc);
1223 return NULL;
1224 }
1225
1226 edesc->absync = ret;
1227
1228 edesc->pset[0].param.opt |= ITCCHEN;
1229 if (nslots == 1) {
1230
1231 if (tx_flags & DMA_PREP_INTERRUPT)
1232 edesc->pset[0].param.opt |= TCINTEN;
1233 } else {
1234
1235 edesc->pset[0].param.opt |= TCCHEN;
1236
1237 if (echan->slot[1] < 0) {
1238 echan->slot[1] = edma_alloc_slot(echan->ecc,
1239 EDMA_SLOT_ANY);
1240 if (echan->slot[1] < 0) {
1241 kfree(edesc);
1242 dev_err(dev, "%s: Failed to allocate slot\n",
1243 __func__);
1244 return NULL;
1245 }
1246 }
1247 dest += pset_len;
1248 src += pset_len;
1249 pset_len = width = len % array_size;
1250
1251 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1252 width, pset_len, DMA_MEM_TO_MEM);
1253 if (ret < 0) {
1254 kfree(edesc);
1255 return NULL;
1256 }
1257
1258 edesc->pset[1].param.opt |= ITCCHEN;
1259
1260 if (tx_flags & DMA_PREP_INTERRUPT)
1261 edesc->pset[1].param.opt |= TCINTEN;
1262 }
1263
1264 if (!(tx_flags & DMA_PREP_INTERRUPT))
1265 edesc->polled = true;
1266
1267 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1268 }
1269
1270 static struct dma_async_tx_descriptor *
1271 edma_prep_dma_interleaved(struct dma_chan *chan,
1272 struct dma_interleaved_template *xt,
1273 unsigned long tx_flags)
1274 {
1275 struct device *dev = chan->device->dev;
1276 struct edma_chan *echan = to_edma_chan(chan);
1277 struct edmacc_param *param;
1278 struct edma_desc *edesc;
1279 size_t src_icg, dst_icg;
1280 int src_bidx, dst_bidx;
1281
1282
1283 if (is_slave_direction(xt->dir))
1284 return NULL;
1285
1286 if (xt->frame_size != 1 || xt->numf == 0)
1287 return NULL;
1288
1289 if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
1290 return NULL;
1291
1292 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1293 if (src_icg) {
1294 src_bidx = src_icg + xt->sgl[0].size;
1295 } else if (xt->src_inc) {
1296 src_bidx = xt->sgl[0].size;
1297 } else {
1298 dev_err(dev, "%s: SRC constant addressing is not supported\n",
1299 __func__);
1300 return NULL;
1301 }
1302
1303 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1304 if (dst_icg) {
1305 dst_bidx = dst_icg + xt->sgl[0].size;
1306 } else if (xt->dst_inc) {
1307 dst_bidx = xt->sgl[0].size;
1308 } else {
1309 dev_err(dev, "%s: DST constant addressing is not supported\n",
1310 __func__);
1311 return NULL;
1312 }
1313
1314 if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
1315 return NULL;
1316
1317 edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
1318 if (!edesc)
1319 return NULL;
1320
1321 edesc->direction = DMA_MEM_TO_MEM;
1322 edesc->echan = echan;
1323 edesc->pset_nr = 1;
1324
1325 param = &edesc->pset[0].param;
1326
1327 param->src = xt->src_start;
1328 param->dst = xt->dst_start;
1329 param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
1330 param->ccnt = 1;
1331 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1332 param->src_dst_cidx = 0;
1333
1334 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1335 param->opt |= ITCCHEN;
1336
1337 if (tx_flags & DMA_PREP_INTERRUPT)
1338 param->opt |= TCINTEN;
1339 else
1340 edesc->polled = true;
1341
1342 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1343 }
1344
1345 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1346 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1347 size_t period_len, enum dma_transfer_direction direction,
1348 unsigned long tx_flags)
1349 {
1350 struct edma_chan *echan = to_edma_chan(chan);
1351 struct device *dev = chan->device->dev;
1352 struct edma_desc *edesc;
1353 dma_addr_t src_addr, dst_addr;
1354 enum dma_slave_buswidth dev_width;
1355 bool use_intermediate = false;
1356 u32 burst;
1357 int i, ret, nslots;
1358
1359 if (unlikely(!echan || !buf_len || !period_len))
1360 return NULL;
1361
1362 if (direction == DMA_DEV_TO_MEM) {
1363 src_addr = echan->cfg.src_addr;
1364 dst_addr = buf_addr;
1365 dev_width = echan->cfg.src_addr_width;
1366 burst = echan->cfg.src_maxburst;
1367 } else if (direction == DMA_MEM_TO_DEV) {
1368 src_addr = buf_addr;
1369 dst_addr = echan->cfg.dst_addr;
1370 dev_width = echan->cfg.dst_addr_width;
1371 burst = echan->cfg.dst_maxburst;
1372 } else {
1373 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1374 return NULL;
1375 }
1376
1377 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1378 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1379 return NULL;
1380 }
1381
1382 if (unlikely(buf_len % period_len)) {
1383 dev_err(dev, "Period should be multiple of Buffer length\n");
1384 return NULL;
1385 }
1386
1387 nslots = (buf_len / period_len) + 1;
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397 if (nslots > MAX_NR_SG) {
1398
1399
1400
1401
1402
1403
1404 if (burst == period_len) {
1405 period_len = buf_len;
1406 nslots = 2;
1407 use_intermediate = true;
1408 } else {
1409 return NULL;
1410 }
1411 }
1412
1413 edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
1414 if (!edesc)
1415 return NULL;
1416
1417 edesc->cyclic = 1;
1418 edesc->pset_nr = nslots;
1419 edesc->residue = edesc->residue_stat = buf_len;
1420 edesc->direction = direction;
1421 edesc->echan = echan;
1422
1423 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1424 __func__, echan->ch_num, nslots, period_len, buf_len);
1425
1426 for (i = 0; i < nslots; i++) {
1427
1428 if (echan->slot[i] < 0) {
1429 echan->slot[i] =
1430 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1431 if (echan->slot[i] < 0) {
1432 kfree(edesc);
1433 dev_err(dev, "%s: Failed to allocate slot\n",
1434 __func__);
1435 return NULL;
1436 }
1437 }
1438
1439 if (i == nslots - 1) {
1440 memcpy(&edesc->pset[i], &edesc->pset[0],
1441 sizeof(edesc->pset[0]));
1442 break;
1443 }
1444
1445 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1446 dst_addr, burst, dev_width, period_len,
1447 direction);
1448 if (ret < 0) {
1449 kfree(edesc);
1450 return NULL;
1451 }
1452
1453 if (direction == DMA_DEV_TO_MEM)
1454 dst_addr += period_len;
1455 else
1456 src_addr += period_len;
1457
1458 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1459 dev_vdbg(dev,
1460 "\n pset[%d]:\n"
1461 " chnum\t%d\n"
1462 " slot\t%d\n"
1463 " opt\t%08x\n"
1464 " src\t%08x\n"
1465 " dst\t%08x\n"
1466 " abcnt\t%08x\n"
1467 " ccnt\t%08x\n"
1468 " bidx\t%08x\n"
1469 " cidx\t%08x\n"
1470 " lkrld\t%08x\n",
1471 i, echan->ch_num, echan->slot[i],
1472 edesc->pset[i].param.opt,
1473 edesc->pset[i].param.src,
1474 edesc->pset[i].param.dst,
1475 edesc->pset[i].param.a_b_cnt,
1476 edesc->pset[i].param.ccnt,
1477 edesc->pset[i].param.src_dst_bidx,
1478 edesc->pset[i].param.src_dst_cidx,
1479 edesc->pset[i].param.link_bcntrld);
1480
1481 edesc->absync = ret;
1482
1483
1484
1485
1486 if (tx_flags & DMA_PREP_INTERRUPT) {
1487 edesc->pset[i].param.opt |= TCINTEN;
1488
1489
1490 if (use_intermediate)
1491 edesc->pset[i].param.opt |= ITCINTEN;
1492 }
1493 }
1494
1495
1496 if (!echan->tc)
1497 edma_assign_channel_eventq(echan, EVENTQ_0);
1498
1499 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1500 }
1501
1502 static void edma_completion_handler(struct edma_chan *echan)
1503 {
1504 struct device *dev = echan->vchan.chan.device->dev;
1505 struct edma_desc *edesc;
1506
1507 spin_lock(&echan->vchan.lock);
1508 edesc = echan->edesc;
1509 if (edesc) {
1510 if (edesc->cyclic) {
1511 vchan_cyclic_callback(&edesc->vdesc);
1512 spin_unlock(&echan->vchan.lock);
1513 return;
1514 } else if (edesc->processed == edesc->pset_nr) {
1515 edesc->residue = 0;
1516 edma_stop(echan);
1517 vchan_cookie_complete(&edesc->vdesc);
1518 echan->edesc = NULL;
1519
1520 dev_dbg(dev, "Transfer completed on channel %d\n",
1521 echan->ch_num);
1522 } else {
1523 dev_dbg(dev, "Sub transfer completed on channel %d\n",
1524 echan->ch_num);
1525
1526 edma_pause(echan);
1527
1528
1529 edesc->residue -= edesc->sg_len;
1530 edesc->residue_stat = edesc->residue;
1531 edesc->processed_stat = edesc->processed;
1532 }
1533 edma_execute(echan);
1534 }
1535
1536 spin_unlock(&echan->vchan.lock);
1537 }
1538
1539
1540 static irqreturn_t dma_irq_handler(int irq, void *data)
1541 {
1542 struct edma_cc *ecc = data;
1543 int ctlr;
1544 u32 sh_ier;
1545 u32 sh_ipr;
1546 u32 bank;
1547
1548 ctlr = ecc->id;
1549 if (ctlr < 0)
1550 return IRQ_NONE;
1551
1552 dev_vdbg(ecc->dev, "dma_irq_handler\n");
1553
1554 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1555 if (!sh_ipr) {
1556 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1557 if (!sh_ipr)
1558 return IRQ_NONE;
1559 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1560 bank = 1;
1561 } else {
1562 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1563 bank = 0;
1564 }
1565
1566 do {
1567 u32 slot;
1568 u32 channel;
1569
1570 slot = __ffs(sh_ipr);
1571 sh_ipr &= ~(BIT(slot));
1572
1573 if (sh_ier & BIT(slot)) {
1574 channel = (bank << 5) | slot;
1575
1576 edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1577 edma_completion_handler(&ecc->slave_chans[channel]);
1578 }
1579 } while (sh_ipr);
1580
1581 edma_shadow0_write(ecc, SH_IEVAL, 1);
1582 return IRQ_HANDLED;
1583 }
1584
1585 static void edma_error_handler(struct edma_chan *echan)
1586 {
1587 struct edma_cc *ecc = echan->ecc;
1588 struct device *dev = echan->vchan.chan.device->dev;
1589 struct edmacc_param p;
1590 int err;
1591
1592 if (!echan->edesc)
1593 return;
1594
1595 spin_lock(&echan->vchan.lock);
1596
1597 err = edma_read_slot(ecc, echan->slot[0], &p);
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611 if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
1612 dev_dbg(dev, "Error on null slot, setting miss\n");
1613 echan->missed = 1;
1614 } else {
1615
1616
1617
1618
1619 dev_dbg(dev, "Missed event, TRIGGERING\n");
1620 edma_clean_channel(echan);
1621 edma_stop(echan);
1622 edma_start(echan);
1623 edma_trigger_channel(echan);
1624 }
1625 spin_unlock(&echan->vchan.lock);
1626 }
1627
1628 static inline bool edma_error_pending(struct edma_cc *ecc)
1629 {
1630 if (edma_read_array(ecc, EDMA_EMR, 0) ||
1631 edma_read_array(ecc, EDMA_EMR, 1) ||
1632 edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1633 return true;
1634
1635 return false;
1636 }
1637
1638
1639 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1640 {
1641 struct edma_cc *ecc = data;
1642 int i, j;
1643 int ctlr;
1644 unsigned int cnt = 0;
1645 unsigned int val;
1646
1647 ctlr = ecc->id;
1648 if (ctlr < 0)
1649 return IRQ_NONE;
1650
1651 dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1652
1653 if (!edma_error_pending(ecc)) {
1654
1655
1656
1657
1658
1659 dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
1660 __func__);
1661 edma_write(ecc, EDMA_EEVAL, 1);
1662 return IRQ_NONE;
1663 }
1664
1665 while (1) {
1666
1667 for (j = 0; j < 2; j++) {
1668 unsigned long emr;
1669
1670 val = edma_read_array(ecc, EDMA_EMR, j);
1671 if (!val)
1672 continue;
1673
1674 dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1675 emr = val;
1676 for_each_set_bit(i, &emr, 32) {
1677 int k = (j << 5) + i;
1678
1679
1680 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1681
1682 edma_shadow0_write_array(ecc, SH_SECR, j,
1683 BIT(i));
1684 edma_error_handler(&ecc->slave_chans[k]);
1685 }
1686 }
1687
1688 val = edma_read(ecc, EDMA_QEMR);
1689 if (val) {
1690 dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1691
1692 edma_write(ecc, EDMA_QEMCR, val);
1693 edma_shadow0_write(ecc, SH_QSECR, val);
1694 }
1695
1696 val = edma_read(ecc, EDMA_CCERR);
1697 if (val) {
1698 dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1699
1700 edma_write(ecc, EDMA_CCERRCLR, val);
1701 }
1702
1703 if (!edma_error_pending(ecc))
1704 break;
1705 cnt++;
1706 if (cnt > 10)
1707 break;
1708 }
1709 edma_write(ecc, EDMA_EEVAL, 1);
1710 return IRQ_HANDLED;
1711 }
1712
1713
1714 static int edma_alloc_chan_resources(struct dma_chan *chan)
1715 {
1716 struct edma_chan *echan = to_edma_chan(chan);
1717 struct edma_cc *ecc = echan->ecc;
1718 struct device *dev = ecc->dev;
1719 enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1720 int ret;
1721
1722 if (echan->tc) {
1723 eventq_no = echan->tc->id;
1724 } else if (ecc->tc_list) {
1725
1726 echan->tc = &ecc->tc_list[ecc->info->default_queue];
1727 eventq_no = echan->tc->id;
1728 }
1729
1730 ret = edma_alloc_channel(echan, eventq_no);
1731 if (ret)
1732 return ret;
1733
1734 echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1735 if (echan->slot[0] < 0) {
1736 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1737 EDMA_CHAN_SLOT(echan->ch_num));
1738 ret = echan->slot[0];
1739 goto err_slot;
1740 }
1741
1742
1743 edma_set_chmap(echan, echan->slot[0]);
1744 echan->alloced = true;
1745
1746 dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1747 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1748 echan->hw_triggered ? "HW" : "SW");
1749
1750 return 0;
1751
1752 err_slot:
1753 edma_free_channel(echan);
1754 return ret;
1755 }
1756
1757
1758 static void edma_free_chan_resources(struct dma_chan *chan)
1759 {
1760 struct edma_chan *echan = to_edma_chan(chan);
1761 struct device *dev = echan->ecc->dev;
1762 int i;
1763
1764
1765 edma_stop(echan);
1766
1767 vchan_free_chan_resources(&echan->vchan);
1768
1769
1770 for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1771 if (echan->slot[i] >= 0) {
1772 edma_free_slot(echan->ecc, echan->slot[i]);
1773 echan->slot[i] = -1;
1774 }
1775 }
1776
1777
1778 edma_set_chmap(echan, echan->ecc->dummy_slot);
1779
1780
1781 if (echan->alloced) {
1782 edma_free_channel(echan);
1783 echan->alloced = false;
1784 }
1785
1786 echan->tc = NULL;
1787 echan->hw_triggered = false;
1788
1789 dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1790 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1791 }
1792
1793
1794 static void edma_issue_pending(struct dma_chan *chan)
1795 {
1796 struct edma_chan *echan = to_edma_chan(chan);
1797 unsigned long flags;
1798
1799 spin_lock_irqsave(&echan->vchan.lock, flags);
1800 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1801 edma_execute(echan);
1802 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1803 }
1804
1805
1806
1807
1808
1809
1810
1811
1812 #define EDMA_MAX_TR_WAIT_LOOPS 1000
1813
1814 static u32 edma_residue(struct edma_desc *edesc)
1815 {
1816 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1817 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1818 struct edma_chan *echan = edesc->echan;
1819 struct edma_pset *pset = edesc->pset;
1820 dma_addr_t done, pos, pos_old;
1821 int channel = EDMA_CHAN_SLOT(echan->ch_num);
1822 int idx = EDMA_REG_ARRAY_INDEX(channel);
1823 int ch_bit = EDMA_CHANNEL_BIT(channel);
1824 int event_reg;
1825 int i;
1826
1827
1828
1829
1830
1831 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841 if (is_slave_direction(edesc->direction))
1842 event_reg = SH_ER;
1843 else
1844 event_reg = SH_ESR;
1845
1846 pos_old = pos;
1847 while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
1848 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1849 if (pos != pos_old)
1850 break;
1851
1852 if (!--loop_count) {
1853 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1854 "%s: timeout waiting for PaRAM update\n",
1855 __func__);
1856 break;
1857 }
1858
1859 cpu_relax();
1860 }
1861
1862
1863
1864
1865
1866
1867
1868
1869 if (edesc->cyclic) {
1870 done = pos - pset->addr;
1871 edesc->residue_stat = edesc->residue - done;
1872 return edesc->residue_stat;
1873 }
1874
1875
1876
1877
1878
1879 if (!pos)
1880 return 0;
1881
1882
1883
1884
1885 pset += edesc->processed_stat;
1886
1887 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1888
1889
1890
1891
1892
1893 if (pos >= pset->addr && pos < pset->addr + pset->len)
1894 return edesc->residue_stat - (pos - pset->addr);
1895
1896
1897 edesc->processed_stat++;
1898 edesc->residue_stat -= pset->len;
1899 }
1900 return edesc->residue_stat;
1901 }
1902
1903
1904 static enum dma_status edma_tx_status(struct dma_chan *chan,
1905 dma_cookie_t cookie,
1906 struct dma_tx_state *txstate)
1907 {
1908 struct edma_chan *echan = to_edma_chan(chan);
1909 struct dma_tx_state txstate_tmp;
1910 enum dma_status ret;
1911 unsigned long flags;
1912
1913 ret = dma_cookie_status(chan, cookie, txstate);
1914
1915 if (ret == DMA_COMPLETE)
1916 return ret;
1917
1918
1919 if (!txstate)
1920 txstate = &txstate_tmp;
1921
1922 spin_lock_irqsave(&echan->vchan.lock, flags);
1923 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
1924 txstate->residue = edma_residue(echan->edesc);
1925 } else {
1926 struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
1927 cookie);
1928
1929 if (vdesc)
1930 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1931 else
1932 txstate->residue = 0;
1933 }
1934
1935
1936
1937
1938
1939 if (ret != DMA_COMPLETE && !txstate->residue &&
1940 echan->edesc && echan->edesc->polled &&
1941 echan->edesc->vdesc.tx.cookie == cookie) {
1942 edma_stop(echan);
1943 vchan_cookie_complete(&echan->edesc->vdesc);
1944 echan->edesc = NULL;
1945 edma_execute(echan);
1946 ret = DMA_COMPLETE;
1947 }
1948
1949 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1950
1951 return ret;
1952 }
1953
1954 static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1955 {
1956 if (!memcpy_channels)
1957 return false;
1958 while (*memcpy_channels != -1) {
1959 if (*memcpy_channels == ch_num)
1960 return true;
1961 memcpy_channels++;
1962 }
1963 return false;
1964 }
1965
1966 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1967 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1968 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1969 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1970
1971 static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1972 {
1973 struct dma_device *s_ddev = &ecc->dma_slave;
1974 struct dma_device *m_ddev = NULL;
1975 s32 *memcpy_channels = ecc->info->memcpy_channels;
1976 int i, j;
1977
1978 dma_cap_zero(s_ddev->cap_mask);
1979 dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1980 dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1981 if (ecc->legacy_mode && !memcpy_channels) {
1982 dev_warn(ecc->dev,
1983 "Legacy memcpy is enabled, things might not work\n");
1984
1985 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1986 dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
1987 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1988 s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
1989 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1990 }
1991
1992 s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1993 s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1994 s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1995 s_ddev->device_free_chan_resources = edma_free_chan_resources;
1996 s_ddev->device_issue_pending = edma_issue_pending;
1997 s_ddev->device_tx_status = edma_tx_status;
1998 s_ddev->device_config = edma_slave_config;
1999 s_ddev->device_pause = edma_dma_pause;
2000 s_ddev->device_resume = edma_dma_resume;
2001 s_ddev->device_terminate_all = edma_terminate_all;
2002 s_ddev->device_synchronize = edma_synchronize;
2003
2004 s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
2005 s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
2006 s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
2007 s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2008 s_ddev->max_burst = SZ_32K - 1;
2009
2010 s_ddev->dev = ecc->dev;
2011 INIT_LIST_HEAD(&s_ddev->channels);
2012
2013 if (memcpy_channels) {
2014 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
2015 if (!m_ddev) {
2016 dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
2017 memcpy_channels = NULL;
2018 goto ch_setup;
2019 }
2020 ecc->dma_memcpy = m_ddev;
2021
2022 dma_cap_zero(m_ddev->cap_mask);
2023 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
2024 dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
2025
2026 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
2027 m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
2028 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
2029 m_ddev->device_free_chan_resources = edma_free_chan_resources;
2030 m_ddev->device_issue_pending = edma_issue_pending;
2031 m_ddev->device_tx_status = edma_tx_status;
2032 m_ddev->device_config = edma_slave_config;
2033 m_ddev->device_pause = edma_dma_pause;
2034 m_ddev->device_resume = edma_dma_resume;
2035 m_ddev->device_terminate_all = edma_terminate_all;
2036 m_ddev->device_synchronize = edma_synchronize;
2037
2038 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
2039 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
2040 m_ddev->directions = BIT(DMA_MEM_TO_MEM);
2041 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2042
2043 m_ddev->dev = ecc->dev;
2044 INIT_LIST_HEAD(&m_ddev->channels);
2045 } else if (!ecc->legacy_mode) {
2046 dev_info(ecc->dev, "memcpy is disabled\n");
2047 }
2048
2049 ch_setup:
2050 for (i = 0; i < ecc->num_channels; i++) {
2051 struct edma_chan *echan = &ecc->slave_chans[i];
2052 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
2053 echan->ecc = ecc;
2054 echan->vchan.desc_free = edma_desc_free;
2055
2056 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
2057 vchan_init(&echan->vchan, m_ddev);
2058 else
2059 vchan_init(&echan->vchan, s_ddev);
2060
2061 INIT_LIST_HEAD(&echan->node);
2062 for (j = 0; j < EDMA_MAX_SLOTS; j++)
2063 echan->slot[j] = -1;
2064 }
2065 }
2066
2067 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
2068 struct edma_cc *ecc)
2069 {
2070 int i;
2071 u32 value, cccfg;
2072 s8 (*queue_priority_map)[2];
2073
2074
2075 cccfg = edma_read(ecc, EDMA_CCCFG);
2076
2077 value = GET_NUM_REGN(cccfg);
2078 ecc->num_region = BIT(value);
2079
2080 value = GET_NUM_DMACH(cccfg);
2081 ecc->num_channels = BIT(value + 1);
2082
2083 value = GET_NUM_QDMACH(cccfg);
2084 ecc->num_qchannels = value * 2;
2085
2086 value = GET_NUM_PAENTRY(cccfg);
2087 ecc->num_slots = BIT(value + 4);
2088
2089 value = GET_NUM_EVQUE(cccfg);
2090 ecc->num_tc = value + 1;
2091
2092 ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
2093
2094 dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
2095 dev_dbg(dev, "num_region: %u\n", ecc->num_region);
2096 dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
2097 dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
2098 dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
2099 dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
2100 dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
2101
2102
2103 if (pdata->queue_priority_mapping)
2104 return 0;
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116 queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
2117 GFP_KERNEL);
2118 if (!queue_priority_map)
2119 return -ENOMEM;
2120
2121 for (i = 0; i < ecc->num_tc; i++) {
2122 queue_priority_map[i][0] = i;
2123 queue_priority_map[i][1] = i;
2124 }
2125 queue_priority_map[i][0] = -1;
2126 queue_priority_map[i][1] = -1;
2127
2128 pdata->queue_priority_mapping = queue_priority_map;
2129
2130 pdata->default_queue = i - 1;
2131
2132 return 0;
2133 }
2134
2135 #if IS_ENABLED(CONFIG_OF)
2136 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
2137 size_t sz)
2138 {
2139 const char pname[] = "ti,edma-xbar-event-map";
2140 struct resource res;
2141 void __iomem *xbar;
2142 s16 (*xbar_chans)[2];
2143 size_t nelm = sz / sizeof(s16);
2144 u32 shift, offset, mux;
2145 int ret, i;
2146
2147 xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
2148 if (!xbar_chans)
2149 return -ENOMEM;
2150
2151 ret = of_address_to_resource(dev->of_node, 1, &res);
2152 if (ret)
2153 return -ENOMEM;
2154
2155 xbar = devm_ioremap(dev, res.start, resource_size(&res));
2156 if (!xbar)
2157 return -ENOMEM;
2158
2159 ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
2160 nelm);
2161 if (ret)
2162 return -EIO;
2163
2164
2165 nelm >>= 1;
2166 xbar_chans[nelm][0] = -1;
2167 xbar_chans[nelm][1] = -1;
2168
2169 for (i = 0; i < nelm; i++) {
2170 shift = (xbar_chans[i][1] & 0x03) << 3;
2171 offset = xbar_chans[i][1] & 0xfffffffc;
2172 mux = readl(xbar + offset);
2173 mux &= ~(0xff << shift);
2174 mux |= xbar_chans[i][0] << shift;
2175 writel(mux, (xbar + offset));
2176 }
2177
2178 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2179 return 0;
2180 }
2181
2182 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2183 bool legacy_mode)
2184 {
2185 struct edma_soc_info *info;
2186 struct property *prop;
2187 int sz, ret;
2188
2189 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2190 if (!info)
2191 return ERR_PTR(-ENOMEM);
2192
2193 if (legacy_mode) {
2194 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
2195 &sz);
2196 if (prop) {
2197 ret = edma_xbar_event_map(dev, info, sz);
2198 if (ret)
2199 return ERR_PTR(ret);
2200 }
2201 return info;
2202 }
2203
2204
2205 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
2206 if (prop) {
2207 const char pname[] = "ti,edma-memcpy-channels";
2208 size_t nelm = sz / sizeof(s32);
2209 s32 *memcpy_ch;
2210
2211 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2212 GFP_KERNEL);
2213 if (!memcpy_ch)
2214 return ERR_PTR(-ENOMEM);
2215
2216 ret = of_property_read_u32_array(dev->of_node, pname,
2217 (u32 *)memcpy_ch, nelm);
2218 if (ret)
2219 return ERR_PTR(ret);
2220
2221 memcpy_ch[nelm] = -1;
2222 info->memcpy_channels = memcpy_ch;
2223 }
2224
2225 prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2226 &sz);
2227 if (prop) {
2228 const char pname[] = "ti,edma-reserved-slot-ranges";
2229 u32 (*tmp)[2];
2230 s16 (*rsv_slots)[2];
2231 size_t nelm = sz / sizeof(*tmp);
2232 struct edma_rsv_info *rsv_info;
2233 int i;
2234
2235 if (!nelm)
2236 return info;
2237
2238 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2239 if (!tmp)
2240 return ERR_PTR(-ENOMEM);
2241
2242 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2243 if (!rsv_info) {
2244 kfree(tmp);
2245 return ERR_PTR(-ENOMEM);
2246 }
2247
2248 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2249 GFP_KERNEL);
2250 if (!rsv_slots) {
2251 kfree(tmp);
2252 return ERR_PTR(-ENOMEM);
2253 }
2254
2255 ret = of_property_read_u32_array(dev->of_node, pname,
2256 (u32 *)tmp, nelm * 2);
2257 if (ret) {
2258 kfree(tmp);
2259 return ERR_PTR(ret);
2260 }
2261
2262 for (i = 0; i < nelm; i++) {
2263 rsv_slots[i][0] = tmp[i][0];
2264 rsv_slots[i][1] = tmp[i][1];
2265 }
2266 rsv_slots[nelm][0] = -1;
2267 rsv_slots[nelm][1] = -1;
2268
2269 info->rsv = rsv_info;
2270 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2271
2272 kfree(tmp);
2273 }
2274
2275 return info;
2276 }
2277
2278 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2279 struct of_dma *ofdma)
2280 {
2281 struct edma_cc *ecc = ofdma->of_dma_data;
2282 struct dma_chan *chan = NULL;
2283 struct edma_chan *echan;
2284 int i;
2285
2286 if (!ecc || dma_spec->args_count < 1)
2287 return NULL;
2288
2289 for (i = 0; i < ecc->num_channels; i++) {
2290 echan = &ecc->slave_chans[i];
2291 if (echan->ch_num == dma_spec->args[0]) {
2292 chan = &echan->vchan.chan;
2293 break;
2294 }
2295 }
2296
2297 if (!chan)
2298 return NULL;
2299
2300 if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2301 goto out;
2302
2303 if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2304 dma_spec->args[1] < echan->ecc->num_tc) {
2305 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2306 goto out;
2307 }
2308
2309 return NULL;
2310 out:
2311
2312 echan->hw_triggered = true;
2313 return dma_get_slave_channel(chan);
2314 }
2315 #else
2316 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2317 bool legacy_mode)
2318 {
2319 return ERR_PTR(-EINVAL);
2320 }
2321
2322 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2323 struct of_dma *ofdma)
2324 {
2325 return NULL;
2326 }
2327 #endif
2328
2329 static bool edma_filter_fn(struct dma_chan *chan, void *param);
2330
2331 static int edma_probe(struct platform_device *pdev)
2332 {
2333 struct edma_soc_info *info = pdev->dev.platform_data;
2334 s8 (*queue_priority_mapping)[2];
2335 const s16 (*reserved)[2];
2336 int i, irq;
2337 char *irq_name;
2338 struct resource *mem;
2339 struct device_node *node = pdev->dev.of_node;
2340 struct device *dev = &pdev->dev;
2341 struct edma_cc *ecc;
2342 bool legacy_mode = true;
2343 int ret;
2344
2345 if (node) {
2346 const struct of_device_id *match;
2347
2348 match = of_match_node(edma_of_ids, node);
2349 if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
2350 legacy_mode = false;
2351
2352 info = edma_setup_info_from_dt(dev, legacy_mode);
2353 if (IS_ERR(info)) {
2354 dev_err(dev, "failed to get DT data\n");
2355 return PTR_ERR(info);
2356 }
2357 }
2358
2359 if (!info)
2360 return -ENODEV;
2361
2362 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2363 if (ret)
2364 return ret;
2365
2366 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2367 if (!ecc)
2368 return -ENOMEM;
2369
2370 ecc->dev = dev;
2371 ecc->id = pdev->id;
2372 ecc->legacy_mode = legacy_mode;
2373
2374 if (ecc->id < 0)
2375 ecc->id = 0;
2376
2377 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2378 if (!mem) {
2379 dev_dbg(dev, "mem resource not found, using index 0\n");
2380 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2381 if (!mem) {
2382 dev_err(dev, "no mem resource?\n");
2383 return -ENODEV;
2384 }
2385 }
2386 ecc->base = devm_ioremap_resource(dev, mem);
2387 if (IS_ERR(ecc->base))
2388 return PTR_ERR(ecc->base);
2389
2390 platform_set_drvdata(pdev, ecc);
2391
2392 pm_runtime_enable(dev);
2393 ret = pm_runtime_get_sync(dev);
2394 if (ret < 0) {
2395 dev_err(dev, "pm_runtime_get_sync() failed\n");
2396 pm_runtime_disable(dev);
2397 return ret;
2398 }
2399
2400
2401 ret = edma_setup_from_hw(dev, info, ecc);
2402 if (ret)
2403 goto err_disable_pm;
2404
2405
2406 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2407 sizeof(*ecc->slave_chans), GFP_KERNEL);
2408
2409 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2410 sizeof(unsigned long), GFP_KERNEL);
2411
2412 ecc->channels_mask = devm_kcalloc(dev,
2413 BITS_TO_LONGS(ecc->num_channels),
2414 sizeof(unsigned long), GFP_KERNEL);
2415 if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
2416 ret = -ENOMEM;
2417 goto err_disable_pm;
2418 }
2419
2420
2421 bitmap_fill(ecc->channels_mask, ecc->num_channels);
2422
2423 ecc->default_queue = info->default_queue;
2424
2425 if (info->rsv) {
2426
2427 reserved = info->rsv->rsv_slots;
2428 if (reserved) {
2429 for (i = 0; reserved[i][0] != -1; i++)
2430 bitmap_set(ecc->slot_inuse, reserved[i][0],
2431 reserved[i][1]);
2432 }
2433
2434
2435 reserved = info->rsv->rsv_chans;
2436 if (reserved) {
2437 for (i = 0; reserved[i][0] != -1; i++)
2438 bitmap_clear(ecc->channels_mask, reserved[i][0],
2439 reserved[i][1]);
2440 }
2441 }
2442
2443 for (i = 0; i < ecc->num_slots; i++) {
2444
2445 if (!test_bit(i, ecc->slot_inuse))
2446 edma_write_slot(ecc, i, &dummy_paramset);
2447 }
2448
2449 irq = platform_get_irq_byname(pdev, "edma3_ccint");
2450 if (irq < 0 && node)
2451 irq = irq_of_parse_and_map(node, 0);
2452
2453 if (irq >= 0) {
2454 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2455 dev_name(dev));
2456 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2457 ecc);
2458 if (ret) {
2459 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2460 goto err_disable_pm;
2461 }
2462 ecc->ccint = irq;
2463 }
2464
2465 irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2466 if (irq < 0 && node)
2467 irq = irq_of_parse_and_map(node, 2);
2468
2469 if (irq >= 0) {
2470 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2471 dev_name(dev));
2472 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2473 ecc);
2474 if (ret) {
2475 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2476 goto err_disable_pm;
2477 }
2478 ecc->ccerrint = irq;
2479 }
2480
2481 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2482 if (ecc->dummy_slot < 0) {
2483 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2484 ret = ecc->dummy_slot;
2485 goto err_disable_pm;
2486 }
2487
2488 queue_priority_mapping = info->queue_priority_mapping;
2489
2490 if (!ecc->legacy_mode) {
2491 int lowest_priority = 0;
2492 unsigned int array_max;
2493 struct of_phandle_args tc_args;
2494
2495 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2496 sizeof(*ecc->tc_list), GFP_KERNEL);
2497 if (!ecc->tc_list) {
2498 ret = -ENOMEM;
2499 goto err_reg1;
2500 }
2501
2502 for (i = 0;; i++) {
2503 ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2504 1, i, &tc_args);
2505 if (ret || i == ecc->num_tc)
2506 break;
2507
2508 ecc->tc_list[i].node = tc_args.np;
2509 ecc->tc_list[i].id = i;
2510 queue_priority_mapping[i][1] = tc_args.args[0];
2511 if (queue_priority_mapping[i][1] > lowest_priority) {
2512 lowest_priority = queue_priority_mapping[i][1];
2513 info->default_queue = i;
2514 }
2515 }
2516
2517
2518 array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
2519 ret = of_property_read_variable_u32_array(node,
2520 "dma-channel-mask",
2521 (u32 *)ecc->channels_mask,
2522 1, array_max);
2523 if (ret > 0 && ret != array_max)
2524 dev_warn(dev, "dma-channel-mask is not complete.\n");
2525 else if (ret == -EOVERFLOW || ret == -ENODATA)
2526 dev_warn(dev,
2527 "dma-channel-mask is out of range or empty\n");
2528 }
2529
2530
2531 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2532 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2533 queue_priority_mapping[i][1]);
2534
2535 edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
2536 edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
2537 edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
2538
2539 ecc->info = info;
2540
2541
2542 edma_dma_init(ecc, legacy_mode);
2543
2544 for (i = 0; i < ecc->num_channels; i++) {
2545
2546 if (!test_bit(i, ecc->channels_mask))
2547 continue;
2548
2549
2550 edma_assign_channel_eventq(&ecc->slave_chans[i],
2551 info->default_queue);
2552
2553 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2554 }
2555
2556 ecc->dma_slave.filter.map = info->slave_map;
2557 ecc->dma_slave.filter.mapcnt = info->slavecnt;
2558 ecc->dma_slave.filter.fn = edma_filter_fn;
2559
2560 ret = dma_async_device_register(&ecc->dma_slave);
2561 if (ret) {
2562 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2563 goto err_reg1;
2564 }
2565
2566 if (ecc->dma_memcpy) {
2567 ret = dma_async_device_register(ecc->dma_memcpy);
2568 if (ret) {
2569 dev_err(dev, "memcpy ddev registration failed (%d)\n",
2570 ret);
2571 dma_async_device_unregister(&ecc->dma_slave);
2572 goto err_reg1;
2573 }
2574 }
2575
2576 if (node)
2577 of_dma_controller_register(node, of_edma_xlate, ecc);
2578
2579 dev_info(dev, "TI EDMA DMA engine driver\n");
2580
2581 return 0;
2582
2583 err_reg1:
2584 edma_free_slot(ecc, ecc->dummy_slot);
2585 err_disable_pm:
2586 pm_runtime_put_sync(dev);
2587 pm_runtime_disable(dev);
2588 return ret;
2589 }
2590
2591 static void edma_cleanupp_vchan(struct dma_device *dmadev)
2592 {
2593 struct edma_chan *echan, *_echan;
2594
2595 list_for_each_entry_safe(echan, _echan,
2596 &dmadev->channels, vchan.chan.device_node) {
2597 list_del(&echan->vchan.chan.device_node);
2598 tasklet_kill(&echan->vchan.task);
2599 }
2600 }
2601
2602 static int edma_remove(struct platform_device *pdev)
2603 {
2604 struct device *dev = &pdev->dev;
2605 struct edma_cc *ecc = dev_get_drvdata(dev);
2606
2607 devm_free_irq(dev, ecc->ccint, ecc);
2608 devm_free_irq(dev, ecc->ccerrint, ecc);
2609
2610 edma_cleanupp_vchan(&ecc->dma_slave);
2611
2612 if (dev->of_node)
2613 of_dma_controller_free(dev->of_node);
2614 dma_async_device_unregister(&ecc->dma_slave);
2615 if (ecc->dma_memcpy)
2616 dma_async_device_unregister(ecc->dma_memcpy);
2617 edma_free_slot(ecc, ecc->dummy_slot);
2618 pm_runtime_put_sync(dev);
2619 pm_runtime_disable(dev);
2620
2621 return 0;
2622 }
2623
2624 #ifdef CONFIG_PM_SLEEP
2625 static int edma_pm_suspend(struct device *dev)
2626 {
2627 struct edma_cc *ecc = dev_get_drvdata(dev);
2628 struct edma_chan *echan = ecc->slave_chans;
2629 int i;
2630
2631 for (i = 0; i < ecc->num_channels; i++) {
2632 if (echan[i].alloced)
2633 edma_setup_interrupt(&echan[i], false);
2634 }
2635
2636 return 0;
2637 }
2638
2639 static int edma_pm_resume(struct device *dev)
2640 {
2641 struct edma_cc *ecc = dev_get_drvdata(dev);
2642 struct edma_chan *echan = ecc->slave_chans;
2643 int i;
2644 s8 (*queue_priority_mapping)[2];
2645
2646
2647 edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
2648
2649 queue_priority_mapping = ecc->info->queue_priority_mapping;
2650
2651
2652 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2653 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2654 queue_priority_mapping[i][1]);
2655
2656 for (i = 0; i < ecc->num_channels; i++) {
2657 if (echan[i].alloced) {
2658
2659 edma_or_array2(ecc, EDMA_DRAE, 0,
2660 EDMA_REG_ARRAY_INDEX(i),
2661 EDMA_CHANNEL_BIT(i));
2662
2663 edma_setup_interrupt(&echan[i], true);
2664
2665
2666 edma_set_chmap(&echan[i], echan[i].slot[0]);
2667 }
2668 }
2669
2670 return 0;
2671 }
2672 #endif
2673
2674 static const struct dev_pm_ops edma_pm_ops = {
2675 SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2676 };
2677
2678 static struct platform_driver edma_driver = {
2679 .probe = edma_probe,
2680 .remove = edma_remove,
2681 .driver = {
2682 .name = "edma",
2683 .pm = &edma_pm_ops,
2684 .of_match_table = edma_of_ids,
2685 },
2686 };
2687
2688 static int edma_tptc_probe(struct platform_device *pdev)
2689 {
2690 pm_runtime_enable(&pdev->dev);
2691 return pm_runtime_get_sync(&pdev->dev);
2692 }
2693
2694 static struct platform_driver edma_tptc_driver = {
2695 .probe = edma_tptc_probe,
2696 .driver = {
2697 .name = "edma3-tptc",
2698 .of_match_table = edma_tptc_of_ids,
2699 },
2700 };
2701
2702 static bool edma_filter_fn(struct dma_chan *chan, void *param)
2703 {
2704 bool match = false;
2705
2706 if (chan->device->dev->driver == &edma_driver.driver) {
2707 struct edma_chan *echan = to_edma_chan(chan);
2708 unsigned ch_req = *(unsigned *)param;
2709 if (ch_req == echan->ch_num) {
2710
2711 echan->hw_triggered = true;
2712 match = true;
2713 }
2714 }
2715 return match;
2716 }
2717
2718 static int edma_init(void)
2719 {
2720 int ret;
2721
2722 ret = platform_driver_register(&edma_tptc_driver);
2723 if (ret)
2724 return ret;
2725
2726 return platform_driver_register(&edma_driver);
2727 }
2728 subsys_initcall(edma_init);
2729
2730 static void __exit edma_exit(void)
2731 {
2732 platform_driver_unregister(&edma_driver);
2733 platform_driver_unregister(&edma_tptc_driver);
2734 }
2735 module_exit(edma_exit);
2736
2737 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2738 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2739 MODULE_LICENSE("GPL v2");