0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/bitops.h>
0037 #include <linux/dmapool.h>
0038 #include <linux/dma/xilinx_dma.h>
0039 #include <linux/init.h>
0040 #include <linux/interrupt.h>
0041 #include <linux/io.h>
0042 #include <linux/iopoll.h>
0043 #include <linux/module.h>
0044 #include <linux/of_address.h>
0045 #include <linux/of_dma.h>
0046 #include <linux/of_platform.h>
0047 #include <linux/of_irq.h>
0048 #include <linux/slab.h>
0049 #include <linux/clk.h>
0050 #include <linux/io-64-nonatomic-lo-hi.h>
0051
0052 #include "../dmaengine.h"
0053
0054
0055 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
0056 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
0057 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
0058 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
0059
0060
0061 #define XILINX_DMA_REG_DMACR 0x0000
0062 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
0063 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
0064 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
0065 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
0066 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
0067 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
0068 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
0069 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
0070 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
0071 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
0072 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
0073 #define XILINX_DMA_DMACR_RESET BIT(2)
0074 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
0075 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
0076 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
0077 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
0078 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
0079 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
0080
0081 #define XILINX_DMA_REG_DMASR 0x0004
0082 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
0083 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
0084 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
0085 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
0086 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
0087 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
0088 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
0089 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
0090 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
0091 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
0092 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
0093 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
0094 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
0095 #define XILINX_DMA_DMASR_IDLE BIT(1)
0096 #define XILINX_DMA_DMASR_HALTED BIT(0)
0097 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
0098 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
0099
0100 #define XILINX_DMA_REG_CURDESC 0x0008
0101 #define XILINX_DMA_REG_TAILDESC 0x0010
0102 #define XILINX_DMA_REG_REG_INDEX 0x0014
0103 #define XILINX_DMA_REG_FRMSTORE 0x0018
0104 #define XILINX_DMA_REG_THRESHOLD 0x001c
0105 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
0106 #define XILINX_DMA_REG_PARK_PTR 0x0028
0107 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
0108 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
0109 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
0110 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
0111 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
0112
0113
0114 #define XILINX_DMA_REG_VSIZE 0x0000
0115 #define XILINX_DMA_REG_HSIZE 0x0004
0116
0117 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
0118 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
0119 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
0120
0121 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
0122 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
0123
0124 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
0125 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
0126
0127
0128 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
0129 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
0130 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
0131
0132 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
0133 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
0134 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
0135 XILINX_DMA_DMASR_ERR_IRQ)
0136
0137 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
0138 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
0139 XILINX_DMA_DMASR_SOF_LATE_ERR | \
0140 XILINX_DMA_DMASR_SG_DEC_ERR | \
0141 XILINX_DMA_DMASR_SG_SLV_ERR | \
0142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
0143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
0144 XILINX_DMA_DMASR_DMA_DEC_ERR | \
0145 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
0146 XILINX_DMA_DMASR_DMA_INT_ERR)
0147
0148
0149
0150
0151
0152
0153 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
0154 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
0155 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
0156 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
0157 XILINX_DMA_DMASR_DMA_INT_ERR)
0158
0159
0160 #define XILINX_DMA_FLUSH_S2MM 3
0161 #define XILINX_DMA_FLUSH_MM2S 2
0162 #define XILINX_DMA_FLUSH_BOTH 1
0163
0164
0165 #define XILINX_DMA_LOOP_COUNT 1000000
0166
0167
0168 #define XILINX_DMA_REG_SRCDSTADDR 0x18
0169 #define XILINX_DMA_REG_BTT 0x28
0170
0171
0172 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
0173 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
0174 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
0175 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
0176 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
0177 #define XILINX_DMA_CR_COALESCE_SHIFT 16
0178 #define XILINX_DMA_BD_SOP BIT(27)
0179 #define XILINX_DMA_BD_EOP BIT(26)
0180 #define XILINX_DMA_COALESCE_MAX 255
0181 #define XILINX_DMA_NUM_DESCS 255
0182 #define XILINX_DMA_NUM_APP_WORDS 5
0183
0184
0185 #define XILINX_CDMA_REG_SRCADDR 0x18
0186 #define XILINX_CDMA_REG_DSTADDR 0x20
0187
0188
0189 #define XILINX_CDMA_CR_SGMODE BIT(3)
0190
0191 #define xilinx_prep_dma_addr_t(addr) \
0192 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
0193
0194
0195 #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
0196 #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
0197 #define XILINX_MCDMA_CHEN_OFFSET 0x0008
0198 #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
0199 #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
0200 #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
0201 #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
0202 #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
0203 #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
0204 #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
0205
0206
0207 #define XILINX_MCDMA_COALESCE_SHIFT 16
0208 #define XILINX_MCDMA_COALESCE_MAX 24
0209 #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
0210 #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
0211 #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
0212 #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
0213 #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
0214 #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
0215 #define XILINX_MCDMA_BD_EOP BIT(30)
0216 #define XILINX_MCDMA_BD_SOP BIT(31)
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 struct xilinx_vdma_desc_hw {
0230 u32 next_desc;
0231 u32 pad1;
0232 u32 buf_addr;
0233 u32 buf_addr_msb;
0234 u32 vsize;
0235 u32 hsize;
0236 u32 stride;
0237 } __aligned(64);
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 struct xilinx_axidma_desc_hw {
0252 u32 next_desc;
0253 u32 next_desc_msb;
0254 u32 buf_addr;
0255 u32 buf_addr_msb;
0256 u32 reserved1;
0257 u32 reserved2;
0258 u32 control;
0259 u32 status;
0260 u32 app[XILINX_DMA_NUM_APP_WORDS];
0261 } __aligned(64);
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 struct xilinx_aximcdma_desc_hw {
0276 u32 next_desc;
0277 u32 next_desc_msb;
0278 u32 buf_addr;
0279 u32 buf_addr_msb;
0280 u32 rsvd;
0281 u32 control;
0282 u32 status;
0283 u32 sideband_status;
0284 u32 app[XILINX_DMA_NUM_APP_WORDS];
0285 } __aligned(64);
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 struct xilinx_cdma_desc_hw {
0299 u32 next_desc;
0300 u32 next_desc_msb;
0301 u32 src_addr;
0302 u32 src_addr_msb;
0303 u32 dest_addr;
0304 u32 dest_addr_msb;
0305 u32 control;
0306 u32 status;
0307 } __aligned(64);
0308
0309
0310
0311
0312
0313
0314
0315 struct xilinx_vdma_tx_segment {
0316 struct xilinx_vdma_desc_hw hw;
0317 struct list_head node;
0318 dma_addr_t phys;
0319 } __aligned(64);
0320
0321
0322
0323
0324
0325
0326
0327 struct xilinx_axidma_tx_segment {
0328 struct xilinx_axidma_desc_hw hw;
0329 struct list_head node;
0330 dma_addr_t phys;
0331 } __aligned(64);
0332
0333
0334
0335
0336
0337
0338
0339 struct xilinx_aximcdma_tx_segment {
0340 struct xilinx_aximcdma_desc_hw hw;
0341 struct list_head node;
0342 dma_addr_t phys;
0343 } __aligned(64);
0344
0345
0346
0347
0348
0349
0350
0351 struct xilinx_cdma_tx_segment {
0352 struct xilinx_cdma_desc_hw hw;
0353 struct list_head node;
0354 dma_addr_t phys;
0355 } __aligned(64);
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 struct xilinx_dma_tx_descriptor {
0367 struct dma_async_tx_descriptor async_tx;
0368 struct list_head segments;
0369 struct list_head node;
0370 bool cyclic;
0371 bool err;
0372 u32 residue;
0373 };
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 struct xilinx_dma_chan {
0415 struct xilinx_dma_device *xdev;
0416 u32 ctrl_offset;
0417 u32 desc_offset;
0418 spinlock_t lock;
0419 struct list_head pending_list;
0420 struct list_head active_list;
0421 struct list_head done_list;
0422 struct list_head free_seg_list;
0423 struct dma_chan common;
0424 struct dma_pool *desc_pool;
0425 struct device *dev;
0426 int irq;
0427 int id;
0428 enum dma_transfer_direction direction;
0429 int num_frms;
0430 bool has_sg;
0431 bool cyclic;
0432 bool genlock;
0433 bool err;
0434 bool idle;
0435 bool terminating;
0436 struct tasklet_struct tasklet;
0437 struct xilinx_vdma_config config;
0438 bool flush_on_fsync;
0439 u32 desc_pendingcount;
0440 bool ext_addr;
0441 u32 desc_submitcount;
0442 struct xilinx_axidma_tx_segment *seg_v;
0443 struct xilinx_aximcdma_tx_segment *seg_mv;
0444 dma_addr_t seg_p;
0445 struct xilinx_axidma_tx_segment *cyclic_seg_v;
0446 dma_addr_t cyclic_seg_p;
0447 void (*start_transfer)(struct xilinx_dma_chan *chan);
0448 int (*stop_transfer)(struct xilinx_dma_chan *chan);
0449 u16 tdest;
0450 bool has_vflip;
0451 };
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 enum xdma_ip_type {
0463 XDMA_TYPE_AXIDMA = 0,
0464 XDMA_TYPE_CDMA,
0465 XDMA_TYPE_VDMA,
0466 XDMA_TYPE_AXIMCDMA
0467 };
0468
0469 struct xilinx_dma_config {
0470 enum xdma_ip_type dmatype;
0471 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
0472 struct clk **tx_clk, struct clk **txs_clk,
0473 struct clk **rx_clk, struct clk **rxs_clk);
0474 irqreturn_t (*irq_handler)(int irq, void *data);
0475 const int max_channels;
0476 };
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 struct xilinx_dma_device {
0498 void __iomem *regs;
0499 struct device *dev;
0500 struct dma_device common;
0501 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
0502 u32 flush_on_fsync;
0503 bool ext_addr;
0504 struct platform_device *pdev;
0505 const struct xilinx_dma_config *dma_config;
0506 struct clk *axi_clk;
0507 struct clk *tx_clk;
0508 struct clk *txs_clk;
0509 struct clk *rx_clk;
0510 struct clk *rxs_clk;
0511 u32 s2mm_chan_id;
0512 u32 mm2s_chan_id;
0513 u32 max_buffer_len;
0514 };
0515
0516
0517 #define to_xilinx_chan(chan) \
0518 container_of(chan, struct xilinx_dma_chan, common)
0519 #define to_dma_tx_descriptor(tx) \
0520 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
0521 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
0522 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
0523 val, cond, delay_us, timeout_us)
0524
0525
0526 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
0527 {
0528 return ioread32(chan->xdev->regs + reg);
0529 }
0530
0531 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
0532 {
0533 iowrite32(value, chan->xdev->regs + reg);
0534 }
0535
0536 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
0537 u32 value)
0538 {
0539 dma_write(chan, chan->desc_offset + reg, value);
0540 }
0541
0542 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
0543 {
0544 return dma_read(chan, chan->ctrl_offset + reg);
0545 }
0546
0547 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
0548 u32 value)
0549 {
0550 dma_write(chan, chan->ctrl_offset + reg, value);
0551 }
0552
0553 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
0554 u32 clr)
0555 {
0556 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
0557 }
0558
0559 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
0560 u32 set)
0561 {
0562 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
0563 }
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
0577 u32 value_lsb, u32 value_msb)
0578 {
0579
0580 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
0581
0582
0583 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
0584 }
0585
0586 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
0587 {
0588 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
0589 }
0590
0591 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
0592 dma_addr_t addr)
0593 {
0594 if (chan->ext_addr)
0595 dma_writeq(chan, reg, addr);
0596 else
0597 dma_ctrl_write(chan, reg, addr);
0598 }
0599
0600 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
0601 struct xilinx_axidma_desc_hw *hw,
0602 dma_addr_t buf_addr, size_t sg_used,
0603 size_t period_len)
0604 {
0605 if (chan->ext_addr) {
0606 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
0607 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
0608 period_len);
0609 } else {
0610 hw->buf_addr = buf_addr + sg_used + period_len;
0611 }
0612 }
0613
0614 static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
0615 struct xilinx_aximcdma_desc_hw *hw,
0616 dma_addr_t buf_addr, size_t sg_used)
0617 {
0618 if (chan->ext_addr) {
0619 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
0620 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
0621 } else {
0622 hw->buf_addr = buf_addr + sg_used;
0623 }
0624 }
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636 static struct xilinx_vdma_tx_segment *
0637 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
0638 {
0639 struct xilinx_vdma_tx_segment *segment;
0640 dma_addr_t phys;
0641
0642 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
0643 if (!segment)
0644 return NULL;
0645
0646 segment->phys = phys;
0647
0648 return segment;
0649 }
0650
0651
0652
0653
0654
0655
0656
0657 static struct xilinx_cdma_tx_segment *
0658 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
0659 {
0660 struct xilinx_cdma_tx_segment *segment;
0661 dma_addr_t phys;
0662
0663 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
0664 if (!segment)
0665 return NULL;
0666
0667 segment->phys = phys;
0668
0669 return segment;
0670 }
0671
0672
0673
0674
0675
0676
0677
0678 static struct xilinx_axidma_tx_segment *
0679 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
0680 {
0681 struct xilinx_axidma_tx_segment *segment = NULL;
0682 unsigned long flags;
0683
0684 spin_lock_irqsave(&chan->lock, flags);
0685 if (!list_empty(&chan->free_seg_list)) {
0686 segment = list_first_entry(&chan->free_seg_list,
0687 struct xilinx_axidma_tx_segment,
0688 node);
0689 list_del(&segment->node);
0690 }
0691 spin_unlock_irqrestore(&chan->lock, flags);
0692
0693 if (!segment)
0694 dev_dbg(chan->dev, "Could not find free tx segment\n");
0695
0696 return segment;
0697 }
0698
0699
0700
0701
0702
0703
0704
0705 static struct xilinx_aximcdma_tx_segment *
0706 xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
0707 {
0708 struct xilinx_aximcdma_tx_segment *segment = NULL;
0709 unsigned long flags;
0710
0711 spin_lock_irqsave(&chan->lock, flags);
0712 if (!list_empty(&chan->free_seg_list)) {
0713 segment = list_first_entry(&chan->free_seg_list,
0714 struct xilinx_aximcdma_tx_segment,
0715 node);
0716 list_del(&segment->node);
0717 }
0718 spin_unlock_irqrestore(&chan->lock, flags);
0719
0720 return segment;
0721 }
0722
0723 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
0724 {
0725 u32 next_desc = hw->next_desc;
0726 u32 next_desc_msb = hw->next_desc_msb;
0727
0728 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
0729
0730 hw->next_desc = next_desc;
0731 hw->next_desc_msb = next_desc_msb;
0732 }
0733
0734 static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
0735 {
0736 u32 next_desc = hw->next_desc;
0737 u32 next_desc_msb = hw->next_desc_msb;
0738
0739 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
0740
0741 hw->next_desc = next_desc;
0742 hw->next_desc_msb = next_desc_msb;
0743 }
0744
0745
0746
0747
0748
0749
0750 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
0751 struct xilinx_axidma_tx_segment *segment)
0752 {
0753 xilinx_dma_clean_hw_desc(&segment->hw);
0754
0755 list_add_tail(&segment->node, &chan->free_seg_list);
0756 }
0757
0758
0759
0760
0761
0762
0763 static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
0764 struct xilinx_aximcdma_tx_segment *
0765 segment)
0766 {
0767 xilinx_mcdma_clean_hw_desc(&segment->hw);
0768
0769 list_add_tail(&segment->node, &chan->free_seg_list);
0770 }
0771
0772
0773
0774
0775
0776
0777 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
0778 struct xilinx_cdma_tx_segment *segment)
0779 {
0780 dma_pool_free(chan->desc_pool, segment, segment->phys);
0781 }
0782
0783
0784
0785
0786
0787
0788 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
0789 struct xilinx_vdma_tx_segment *segment)
0790 {
0791 dma_pool_free(chan->desc_pool, segment, segment->phys);
0792 }
0793
0794
0795
0796
0797
0798
0799
0800 static struct xilinx_dma_tx_descriptor *
0801 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
0802 {
0803 struct xilinx_dma_tx_descriptor *desc;
0804
0805 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
0806 if (!desc)
0807 return NULL;
0808
0809 INIT_LIST_HEAD(&desc->segments);
0810
0811 return desc;
0812 }
0813
0814
0815
0816
0817
0818
0819 static void
0820 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
0821 struct xilinx_dma_tx_descriptor *desc)
0822 {
0823 struct xilinx_vdma_tx_segment *segment, *next;
0824 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
0825 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
0826 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
0827
0828 if (!desc)
0829 return;
0830
0831 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
0832 list_for_each_entry_safe(segment, next, &desc->segments, node) {
0833 list_del(&segment->node);
0834 xilinx_vdma_free_tx_segment(chan, segment);
0835 }
0836 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
0837 list_for_each_entry_safe(cdma_segment, cdma_next,
0838 &desc->segments, node) {
0839 list_del(&cdma_segment->node);
0840 xilinx_cdma_free_tx_segment(chan, cdma_segment);
0841 }
0842 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
0843 list_for_each_entry_safe(axidma_segment, axidma_next,
0844 &desc->segments, node) {
0845 list_del(&axidma_segment->node);
0846 xilinx_dma_free_tx_segment(chan, axidma_segment);
0847 }
0848 } else {
0849 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
0850 &desc->segments, node) {
0851 list_del(&aximcdma_segment->node);
0852 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
0853 }
0854 }
0855
0856 kfree(desc);
0857 }
0858
0859
0860
0861
0862
0863
0864
0865
0866 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
0867 struct list_head *list)
0868 {
0869 struct xilinx_dma_tx_descriptor *desc, *next;
0870
0871 list_for_each_entry_safe(desc, next, list, node) {
0872 list_del(&desc->node);
0873 xilinx_dma_free_tx_descriptor(chan, desc);
0874 }
0875 }
0876
0877
0878
0879
0880
0881 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
0882 {
0883 unsigned long flags;
0884
0885 spin_lock_irqsave(&chan->lock, flags);
0886
0887 xilinx_dma_free_desc_list(chan, &chan->pending_list);
0888 xilinx_dma_free_desc_list(chan, &chan->done_list);
0889 xilinx_dma_free_desc_list(chan, &chan->active_list);
0890
0891 spin_unlock_irqrestore(&chan->lock, flags);
0892 }
0893
0894
0895
0896
0897
0898 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
0899 {
0900 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
0901 unsigned long flags;
0902
0903 dev_dbg(chan->dev, "Free all channel resources.\n");
0904
0905 xilinx_dma_free_descriptors(chan);
0906
0907 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
0908 spin_lock_irqsave(&chan->lock, flags);
0909 INIT_LIST_HEAD(&chan->free_seg_list);
0910 spin_unlock_irqrestore(&chan->lock, flags);
0911
0912
0913 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
0914 XILINX_DMA_NUM_DESCS, chan->seg_v,
0915 chan->seg_p);
0916
0917
0918 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
0919 chan->cyclic_seg_v, chan->cyclic_seg_p);
0920 }
0921
0922 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
0923 spin_lock_irqsave(&chan->lock, flags);
0924 INIT_LIST_HEAD(&chan->free_seg_list);
0925 spin_unlock_irqrestore(&chan->lock, flags);
0926
0927
0928 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
0929 XILINX_DMA_NUM_DESCS, chan->seg_mv,
0930 chan->seg_p);
0931 }
0932
0933 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
0934 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
0935 dma_pool_destroy(chan->desc_pool);
0936 chan->desc_pool = NULL;
0937 }
0938
0939 }
0940
0941
0942
0943
0944
0945
0946
0947
0948 static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
0949 struct xilinx_dma_tx_descriptor *desc)
0950 {
0951 struct xilinx_cdma_tx_segment *cdma_seg;
0952 struct xilinx_axidma_tx_segment *axidma_seg;
0953 struct xilinx_aximcdma_tx_segment *aximcdma_seg;
0954 struct xilinx_cdma_desc_hw *cdma_hw;
0955 struct xilinx_axidma_desc_hw *axidma_hw;
0956 struct xilinx_aximcdma_desc_hw *aximcdma_hw;
0957 struct list_head *entry;
0958 u32 residue = 0;
0959
0960 list_for_each(entry, &desc->segments) {
0961 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
0962 cdma_seg = list_entry(entry,
0963 struct xilinx_cdma_tx_segment,
0964 node);
0965 cdma_hw = &cdma_seg->hw;
0966 residue += (cdma_hw->control - cdma_hw->status) &
0967 chan->xdev->max_buffer_len;
0968 } else if (chan->xdev->dma_config->dmatype ==
0969 XDMA_TYPE_AXIDMA) {
0970 axidma_seg = list_entry(entry,
0971 struct xilinx_axidma_tx_segment,
0972 node);
0973 axidma_hw = &axidma_seg->hw;
0974 residue += (axidma_hw->control - axidma_hw->status) &
0975 chan->xdev->max_buffer_len;
0976 } else {
0977 aximcdma_seg =
0978 list_entry(entry,
0979 struct xilinx_aximcdma_tx_segment,
0980 node);
0981 aximcdma_hw = &aximcdma_seg->hw;
0982 residue +=
0983 (aximcdma_hw->control - aximcdma_hw->status) &
0984 chan->xdev->max_buffer_len;
0985 }
0986 }
0987
0988 return residue;
0989 }
0990
0991
0992
0993
0994
0995
0996
0997 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
0998 struct xilinx_dma_tx_descriptor *desc,
0999 unsigned long *flags)
1000 {
1001 struct dmaengine_desc_callback cb;
1002
1003 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1004 if (dmaengine_desc_callback_valid(&cb)) {
1005 spin_unlock_irqrestore(&chan->lock, *flags);
1006 dmaengine_desc_callback_invoke(&cb, NULL);
1007 spin_lock_irqsave(&chan->lock, *flags);
1008 }
1009 }
1010
1011
1012
1013
1014
1015 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1016 {
1017 struct xilinx_dma_tx_descriptor *desc, *next;
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&chan->lock, flags);
1021
1022 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1023 struct dmaengine_result result;
1024
1025 if (desc->cyclic) {
1026 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1027 break;
1028 }
1029
1030
1031 list_del(&desc->node);
1032
1033 if (unlikely(desc->err)) {
1034 if (chan->direction == DMA_DEV_TO_MEM)
1035 result.result = DMA_TRANS_READ_FAILED;
1036 else
1037 result.result = DMA_TRANS_WRITE_FAILED;
1038 } else {
1039 result.result = DMA_TRANS_NOERROR;
1040 }
1041
1042 result.residue = desc->residue;
1043
1044
1045 spin_unlock_irqrestore(&chan->lock, flags);
1046 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1047 spin_lock_irqsave(&chan->lock, flags);
1048
1049
1050 dma_run_dependencies(&desc->async_tx);
1051 xilinx_dma_free_tx_descriptor(chan, desc);
1052
1053
1054
1055
1056
1057 if (chan->terminating)
1058 break;
1059 }
1060
1061 spin_unlock_irqrestore(&chan->lock, flags);
1062 }
1063
1064
1065
1066
1067
1068 static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1069 {
1070 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1071
1072 xilinx_dma_chan_desc_cleanup(chan);
1073 }
1074
1075
1076
1077
1078
1079
1080
1081 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1082 {
1083 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1084 int i;
1085
1086
1087 if (chan->desc_pool)
1088 return 0;
1089
1090
1091
1092
1093
1094 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1095
1096 chan->seg_v = dma_alloc_coherent(chan->dev,
1097 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1098 &chan->seg_p, GFP_KERNEL);
1099 if (!chan->seg_v) {
1100 dev_err(chan->dev,
1101 "unable to allocate channel %d descriptors\n",
1102 chan->id);
1103 return -ENOMEM;
1104 }
1105
1106
1107
1108
1109
1110
1111 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1112 sizeof(*chan->cyclic_seg_v),
1113 &chan->cyclic_seg_p,
1114 GFP_KERNEL);
1115 if (!chan->cyclic_seg_v) {
1116 dev_err(chan->dev,
1117 "unable to allocate desc segment for cyclic DMA\n");
1118 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1119 XILINX_DMA_NUM_DESCS, chan->seg_v,
1120 chan->seg_p);
1121 return -ENOMEM;
1122 }
1123 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1124
1125 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1126 chan->seg_v[i].hw.next_desc =
1127 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1128 ((i + 1) % XILINX_DMA_NUM_DESCS));
1129 chan->seg_v[i].hw.next_desc_msb =
1130 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1131 ((i + 1) % XILINX_DMA_NUM_DESCS));
1132 chan->seg_v[i].phys = chan->seg_p +
1133 sizeof(*chan->seg_v) * i;
1134 list_add_tail(&chan->seg_v[i].node,
1135 &chan->free_seg_list);
1136 }
1137 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1138
1139 chan->seg_mv = dma_alloc_coherent(chan->dev,
1140 sizeof(*chan->seg_mv) *
1141 XILINX_DMA_NUM_DESCS,
1142 &chan->seg_p, GFP_KERNEL);
1143 if (!chan->seg_mv) {
1144 dev_err(chan->dev,
1145 "unable to allocate channel %d descriptors\n",
1146 chan->id);
1147 return -ENOMEM;
1148 }
1149 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1150 chan->seg_mv[i].hw.next_desc =
1151 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1152 ((i + 1) % XILINX_DMA_NUM_DESCS));
1153 chan->seg_mv[i].hw.next_desc_msb =
1154 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1155 ((i + 1) % XILINX_DMA_NUM_DESCS));
1156 chan->seg_mv[i].phys = chan->seg_p +
1157 sizeof(*chan->seg_mv) * i;
1158 list_add_tail(&chan->seg_mv[i].node,
1159 &chan->free_seg_list);
1160 }
1161 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1162 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1163 chan->dev,
1164 sizeof(struct xilinx_cdma_tx_segment),
1165 __alignof__(struct xilinx_cdma_tx_segment),
1166 0);
1167 } else {
1168 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1169 chan->dev,
1170 sizeof(struct xilinx_vdma_tx_segment),
1171 __alignof__(struct xilinx_vdma_tx_segment),
1172 0);
1173 }
1174
1175 if (!chan->desc_pool &&
1176 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1177 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1178 dev_err(chan->dev,
1179 "unable to allocate channel %d descriptor pool\n",
1180 chan->id);
1181 return -ENOMEM;
1182 }
1183
1184 dma_cookie_init(dchan);
1185
1186 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1187
1188
1189
1190 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1191 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1192 }
1193
1194 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1195 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1196 XILINX_CDMA_CR_SGMODE);
1197
1198 return 0;
1199 }
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1210 int size, int done)
1211 {
1212 size_t copy;
1213
1214 copy = min_t(size_t, size - done,
1215 chan->xdev->max_buffer_len);
1216
1217 if ((copy + done < size) &&
1218 chan->xdev->common.copy_align) {
1219
1220
1221
1222
1223 copy = rounddown(copy,
1224 (1 << chan->xdev->common.copy_align));
1225 }
1226 return copy;
1227 }
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1238 dma_cookie_t cookie,
1239 struct dma_tx_state *txstate)
1240 {
1241 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1242 struct xilinx_dma_tx_descriptor *desc;
1243 enum dma_status ret;
1244 unsigned long flags;
1245 u32 residue = 0;
1246
1247 ret = dma_cookie_status(dchan, cookie, txstate);
1248 if (ret == DMA_COMPLETE || !txstate)
1249 return ret;
1250
1251 spin_lock_irqsave(&chan->lock, flags);
1252 if (!list_empty(&chan->active_list)) {
1253 desc = list_last_entry(&chan->active_list,
1254 struct xilinx_dma_tx_descriptor, node);
1255
1256
1257
1258
1259 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1260 residue = xilinx_dma_get_residue(chan, desc);
1261 }
1262 spin_unlock_irqrestore(&chan->lock, flags);
1263
1264 dma_set_residue(txstate, residue);
1265
1266 return ret;
1267 }
1268
1269
1270
1271
1272
1273
1274
1275 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1276 {
1277 u32 val;
1278
1279 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1280
1281
1282 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1283 val & XILINX_DMA_DMASR_HALTED, 0,
1284 XILINX_DMA_LOOP_COUNT);
1285 }
1286
1287
1288
1289
1290
1291
1292
1293 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1294 {
1295 u32 val;
1296
1297 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1298 val & XILINX_DMA_DMASR_IDLE, 0,
1299 XILINX_DMA_LOOP_COUNT);
1300 }
1301
1302
1303
1304
1305
1306 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1307 {
1308 int err;
1309 u32 val;
1310
1311 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1312
1313
1314 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1315 !(val & XILINX_DMA_DMASR_HALTED), 0,
1316 XILINX_DMA_LOOP_COUNT);
1317
1318 if (err) {
1319 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1320 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1321
1322 chan->err = true;
1323 }
1324 }
1325
1326
1327
1328
1329
1330 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1331 {
1332 struct xilinx_vdma_config *config = &chan->config;
1333 struct xilinx_dma_tx_descriptor *desc;
1334 u32 reg, j;
1335 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1336 int i = 0;
1337
1338
1339 if (chan->err)
1340 return;
1341
1342 if (!chan->idle)
1343 return;
1344
1345 if (list_empty(&chan->pending_list))
1346 return;
1347
1348 desc = list_first_entry(&chan->pending_list,
1349 struct xilinx_dma_tx_descriptor, node);
1350
1351
1352 if (chan->has_vflip) {
1353 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1354 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1355 reg |= config->vflip_en;
1356 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1357 reg);
1358 }
1359
1360 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1361
1362 if (config->frm_cnt_en)
1363 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1364 else
1365 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1366
1367
1368 if (config->park)
1369 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1370 else
1371 reg |= XILINX_DMA_DMACR_CIRC_EN;
1372
1373 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1374
1375 j = chan->desc_submitcount;
1376 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1377 if (chan->direction == DMA_MEM_TO_DEV) {
1378 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1379 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1380 } else {
1381 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1382 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1383 }
1384 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1385
1386
1387 xilinx_dma_start(chan);
1388
1389 if (chan->err)
1390 return;
1391
1392
1393 if (chan->desc_submitcount < chan->num_frms)
1394 i = chan->desc_submitcount;
1395
1396 list_for_each_entry(segment, &desc->segments, node) {
1397 if (chan->ext_addr)
1398 vdma_desc_write_64(chan,
1399 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1400 segment->hw.buf_addr,
1401 segment->hw.buf_addr_msb);
1402 else
1403 vdma_desc_write(chan,
1404 XILINX_VDMA_REG_START_ADDRESS(i++),
1405 segment->hw.buf_addr);
1406
1407 last = segment;
1408 }
1409
1410 if (!last)
1411 return;
1412
1413
1414 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1415 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1416 last->hw.stride);
1417 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1418
1419 chan->desc_submitcount++;
1420 chan->desc_pendingcount--;
1421 list_move_tail(&desc->node, &chan->active_list);
1422 if (chan->desc_submitcount == chan->num_frms)
1423 chan->desc_submitcount = 0;
1424
1425 chan->idle = false;
1426 }
1427
1428
1429
1430
1431
1432 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1433 {
1434 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1435 struct xilinx_cdma_tx_segment *tail_segment;
1436 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1437
1438 if (chan->err)
1439 return;
1440
1441 if (!chan->idle)
1442 return;
1443
1444 if (list_empty(&chan->pending_list))
1445 return;
1446
1447 head_desc = list_first_entry(&chan->pending_list,
1448 struct xilinx_dma_tx_descriptor, node);
1449 tail_desc = list_last_entry(&chan->pending_list,
1450 struct xilinx_dma_tx_descriptor, node);
1451 tail_segment = list_last_entry(&tail_desc->segments,
1452 struct xilinx_cdma_tx_segment, node);
1453
1454 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1455 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1456 ctrl_reg |= chan->desc_pendingcount <<
1457 XILINX_DMA_CR_COALESCE_SHIFT;
1458 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1459 }
1460
1461 if (chan->has_sg) {
1462 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1463 XILINX_CDMA_CR_SGMODE);
1464
1465 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1466 XILINX_CDMA_CR_SGMODE);
1467
1468 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1469 head_desc->async_tx.phys);
1470
1471
1472 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1473 tail_segment->phys);
1474 } else {
1475
1476 struct xilinx_cdma_tx_segment *segment;
1477 struct xilinx_cdma_desc_hw *hw;
1478
1479 segment = list_first_entry(&head_desc->segments,
1480 struct xilinx_cdma_tx_segment,
1481 node);
1482
1483 hw = &segment->hw;
1484
1485 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1486 xilinx_prep_dma_addr_t(hw->src_addr));
1487 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1488 xilinx_prep_dma_addr_t(hw->dest_addr));
1489
1490
1491 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1492 hw->control & chan->xdev->max_buffer_len);
1493 }
1494
1495 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1496 chan->desc_pendingcount = 0;
1497 chan->idle = false;
1498 }
1499
1500
1501
1502
1503
1504 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1505 {
1506 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1507 struct xilinx_axidma_tx_segment *tail_segment;
1508 u32 reg;
1509
1510 if (chan->err)
1511 return;
1512
1513 if (list_empty(&chan->pending_list))
1514 return;
1515
1516 if (!chan->idle)
1517 return;
1518
1519 head_desc = list_first_entry(&chan->pending_list,
1520 struct xilinx_dma_tx_descriptor, node);
1521 tail_desc = list_last_entry(&chan->pending_list,
1522 struct xilinx_dma_tx_descriptor, node);
1523 tail_segment = list_last_entry(&tail_desc->segments,
1524 struct xilinx_axidma_tx_segment, node);
1525
1526 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1527
1528 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1529 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1530 reg |= chan->desc_pendingcount <<
1531 XILINX_DMA_CR_COALESCE_SHIFT;
1532 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1533 }
1534
1535 if (chan->has_sg)
1536 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1537 head_desc->async_tx.phys);
1538
1539 xilinx_dma_start(chan);
1540
1541 if (chan->err)
1542 return;
1543
1544
1545 if (chan->has_sg) {
1546 if (chan->cyclic)
1547 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1548 chan->cyclic_seg_v->phys);
1549 else
1550 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1551 tail_segment->phys);
1552 } else {
1553 struct xilinx_axidma_tx_segment *segment;
1554 struct xilinx_axidma_desc_hw *hw;
1555
1556 segment = list_first_entry(&head_desc->segments,
1557 struct xilinx_axidma_tx_segment,
1558 node);
1559 hw = &segment->hw;
1560
1561 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1562 xilinx_prep_dma_addr_t(hw->buf_addr));
1563
1564
1565 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1566 hw->control & chan->xdev->max_buffer_len);
1567 }
1568
1569 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1570 chan->desc_pendingcount = 0;
1571 chan->idle = false;
1572 }
1573
1574
1575
1576
1577
1578 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1579 {
1580 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1581 struct xilinx_aximcdma_tx_segment *tail_segment;
1582 u32 reg;
1583
1584
1585
1586
1587
1588
1589 if (chan->err)
1590 return;
1591
1592 if (!chan->idle)
1593 return;
1594
1595 if (list_empty(&chan->pending_list))
1596 return;
1597
1598 head_desc = list_first_entry(&chan->pending_list,
1599 struct xilinx_dma_tx_descriptor, node);
1600 tail_desc = list_last_entry(&chan->pending_list,
1601 struct xilinx_dma_tx_descriptor, node);
1602 tail_segment = list_last_entry(&tail_desc->segments,
1603 struct xilinx_aximcdma_tx_segment, node);
1604
1605 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1606
1607 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1608 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1609 reg |= chan->desc_pendingcount <<
1610 XILINX_MCDMA_COALESCE_SHIFT;
1611 }
1612
1613 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1614 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1615
1616
1617 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1618 head_desc->async_tx.phys);
1619
1620
1621 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1622 reg |= BIT(chan->tdest);
1623 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1624
1625
1626 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1627 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1628 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1629
1630 xilinx_dma_start(chan);
1631
1632 if (chan->err)
1633 return;
1634
1635
1636 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1637 tail_segment->phys);
1638
1639 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1640 chan->desc_pendingcount = 0;
1641 chan->idle = false;
1642 }
1643
1644
1645
1646
1647
1648 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1649 {
1650 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1651 unsigned long flags;
1652
1653 spin_lock_irqsave(&chan->lock, flags);
1654 chan->start_transfer(chan);
1655 spin_unlock_irqrestore(&chan->lock, flags);
1656 }
1657
1658
1659
1660
1661
1662
1663 static int xilinx_dma_device_config(struct dma_chan *dchan,
1664 struct dma_slave_config *config)
1665 {
1666 return 0;
1667 }
1668
1669
1670
1671
1672
1673
1674
1675 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1676 {
1677 struct xilinx_dma_tx_descriptor *desc, *next;
1678
1679
1680 if (list_empty(&chan->active_list))
1681 return;
1682
1683 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1684 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1685 XDMA_TYPE_VDMA)
1686 desc->residue = xilinx_dma_get_residue(chan, desc);
1687 else
1688 desc->residue = 0;
1689 desc->err = chan->err;
1690
1691 list_del(&desc->node);
1692 if (!desc->cyclic)
1693 dma_cookie_complete(&desc->async_tx);
1694 list_add_tail(&desc->node, &chan->done_list);
1695 }
1696 }
1697
1698
1699
1700
1701
1702
1703
1704 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1705 {
1706 int err;
1707 u32 tmp;
1708
1709 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1710
1711
1712 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1713 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1714 XILINX_DMA_LOOP_COUNT);
1715
1716 if (err) {
1717 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1718 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1719 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1720 return -ETIMEDOUT;
1721 }
1722
1723 chan->err = false;
1724 chan->idle = true;
1725 chan->desc_pendingcount = 0;
1726 chan->desc_submitcount = 0;
1727
1728 return err;
1729 }
1730
1731
1732
1733
1734
1735
1736
1737 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1738 {
1739 int err;
1740
1741
1742 err = xilinx_dma_reset(chan);
1743 if (err)
1744 return err;
1745
1746
1747 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1748 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1749
1750 return 0;
1751 }
1752
1753
1754
1755
1756
1757
1758
1759
1760 static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1761 {
1762 struct xilinx_dma_chan *chan = data;
1763 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1764
1765 if (chan->direction == DMA_DEV_TO_MEM)
1766 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1767 else
1768 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1769
1770
1771 chan_sermask = dma_ctrl_read(chan, ser_offset);
1772 chan_id = ffs(chan_sermask);
1773
1774 if (!chan_id)
1775 return IRQ_NONE;
1776
1777 if (chan->direction == DMA_DEV_TO_MEM)
1778 chan_offset = chan->xdev->dma_config->max_channels / 2;
1779
1780 chan_offset = chan_offset + (chan_id - 1);
1781 chan = chan->xdev->chan[chan_offset];
1782
1783 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1784 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1785 return IRQ_NONE;
1786
1787 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1788 status & XILINX_MCDMA_IRQ_ALL_MASK);
1789
1790 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1791 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1792 chan,
1793 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1794 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1795 (chan->tdest)),
1796 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1797 (chan->tdest)));
1798 chan->err = true;
1799 }
1800
1801 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1802
1803
1804
1805
1806 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1807 }
1808
1809 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1810 spin_lock(&chan->lock);
1811 xilinx_dma_complete_descriptor(chan);
1812 chan->idle = true;
1813 chan->start_transfer(chan);
1814 spin_unlock(&chan->lock);
1815 }
1816
1817 tasklet_schedule(&chan->tasklet);
1818 return IRQ_HANDLED;
1819 }
1820
1821
1822
1823
1824
1825
1826
1827
1828 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1829 {
1830 struct xilinx_dma_chan *chan = data;
1831 u32 status;
1832
1833
1834 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1835 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1836 return IRQ_NONE;
1837
1838 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1839 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1840
1841 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1842
1843
1844
1845
1846
1847
1848
1849 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1850
1851 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1852 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1853
1854 if (!chan->flush_on_fsync ||
1855 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1856 dev_err(chan->dev,
1857 "Channel %p has errors %x, cdr %x tdr %x\n",
1858 chan, errors,
1859 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1860 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1861 chan->err = true;
1862 }
1863 }
1864
1865 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1866
1867
1868
1869
1870 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1871 }
1872
1873 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1874 spin_lock(&chan->lock);
1875 xilinx_dma_complete_descriptor(chan);
1876 chan->idle = true;
1877 chan->start_transfer(chan);
1878 spin_unlock(&chan->lock);
1879 }
1880
1881 tasklet_schedule(&chan->tasklet);
1882 return IRQ_HANDLED;
1883 }
1884
1885
1886
1887
1888
1889
1890 static void append_desc_queue(struct xilinx_dma_chan *chan,
1891 struct xilinx_dma_tx_descriptor *desc)
1892 {
1893 struct xilinx_vdma_tx_segment *tail_segment;
1894 struct xilinx_dma_tx_descriptor *tail_desc;
1895 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1896 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1897 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1898
1899 if (list_empty(&chan->pending_list))
1900 goto append;
1901
1902
1903
1904
1905
1906 tail_desc = list_last_entry(&chan->pending_list,
1907 struct xilinx_dma_tx_descriptor, node);
1908 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1909 tail_segment = list_last_entry(&tail_desc->segments,
1910 struct xilinx_vdma_tx_segment,
1911 node);
1912 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1913 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1914 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1915 struct xilinx_cdma_tx_segment,
1916 node);
1917 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1918 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1919 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1920 struct xilinx_axidma_tx_segment,
1921 node);
1922 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1923 } else {
1924 aximcdma_tail_segment =
1925 list_last_entry(&tail_desc->segments,
1926 struct xilinx_aximcdma_tx_segment,
1927 node);
1928 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1929 }
1930
1931
1932
1933
1934
1935 append:
1936 list_add_tail(&desc->node, &chan->pending_list);
1937 chan->desc_pendingcount++;
1938
1939 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1940 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1941 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1942 chan->desc_pendingcount = chan->num_frms;
1943 }
1944 }
1945
1946
1947
1948
1949
1950
1951
1952 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1953 {
1954 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1955 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1956 dma_cookie_t cookie;
1957 unsigned long flags;
1958 int err;
1959
1960 if (chan->cyclic) {
1961 xilinx_dma_free_tx_descriptor(chan, desc);
1962 return -EBUSY;
1963 }
1964
1965 if (chan->err) {
1966
1967
1968
1969
1970 err = xilinx_dma_chan_reset(chan);
1971 if (err < 0)
1972 return err;
1973 }
1974
1975 spin_lock_irqsave(&chan->lock, flags);
1976
1977 cookie = dma_cookie_assign(tx);
1978
1979
1980 append_desc_queue(chan, desc);
1981
1982 if (desc->cyclic)
1983 chan->cyclic = true;
1984
1985 chan->terminating = false;
1986
1987 spin_unlock_irqrestore(&chan->lock, flags);
1988
1989 return cookie;
1990 }
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001 static struct dma_async_tx_descriptor *
2002 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
2003 struct dma_interleaved_template *xt,
2004 unsigned long flags)
2005 {
2006 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2007 struct xilinx_dma_tx_descriptor *desc;
2008 struct xilinx_vdma_tx_segment *segment;
2009 struct xilinx_vdma_desc_hw *hw;
2010
2011 if (!is_slave_direction(xt->dir))
2012 return NULL;
2013
2014 if (!xt->numf || !xt->sgl[0].size)
2015 return NULL;
2016
2017 if (xt->frame_size != 1)
2018 return NULL;
2019
2020
2021 desc = xilinx_dma_alloc_tx_descriptor(chan);
2022 if (!desc)
2023 return NULL;
2024
2025 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2026 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2027 async_tx_ack(&desc->async_tx);
2028
2029
2030 segment = xilinx_vdma_alloc_tx_segment(chan);
2031 if (!segment)
2032 goto error;
2033
2034
2035 hw = &segment->hw;
2036 hw->vsize = xt->numf;
2037 hw->hsize = xt->sgl[0].size;
2038 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2039 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2040 hw->stride |= chan->config.frm_dly <<
2041 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2042
2043 if (xt->dir != DMA_MEM_TO_DEV) {
2044 if (chan->ext_addr) {
2045 hw->buf_addr = lower_32_bits(xt->dst_start);
2046 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2047 } else {
2048 hw->buf_addr = xt->dst_start;
2049 }
2050 } else {
2051 if (chan->ext_addr) {
2052 hw->buf_addr = lower_32_bits(xt->src_start);
2053 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2054 } else {
2055 hw->buf_addr = xt->src_start;
2056 }
2057 }
2058
2059
2060 list_add_tail(&segment->node, &desc->segments);
2061
2062
2063 segment = list_first_entry(&desc->segments,
2064 struct xilinx_vdma_tx_segment, node);
2065 desc->async_tx.phys = segment->phys;
2066
2067 return &desc->async_tx;
2068
2069 error:
2070 xilinx_dma_free_tx_descriptor(chan, desc);
2071 return NULL;
2072 }
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084 static struct dma_async_tx_descriptor *
2085 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2086 dma_addr_t dma_src, size_t len, unsigned long flags)
2087 {
2088 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2089 struct xilinx_dma_tx_descriptor *desc;
2090 struct xilinx_cdma_tx_segment *segment;
2091 struct xilinx_cdma_desc_hw *hw;
2092
2093 if (!len || len > chan->xdev->max_buffer_len)
2094 return NULL;
2095
2096 desc = xilinx_dma_alloc_tx_descriptor(chan);
2097 if (!desc)
2098 return NULL;
2099
2100 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2101 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2102
2103
2104 segment = xilinx_cdma_alloc_tx_segment(chan);
2105 if (!segment)
2106 goto error;
2107
2108 hw = &segment->hw;
2109 hw->control = len;
2110 hw->src_addr = dma_src;
2111 hw->dest_addr = dma_dst;
2112 if (chan->ext_addr) {
2113 hw->src_addr_msb = upper_32_bits(dma_src);
2114 hw->dest_addr_msb = upper_32_bits(dma_dst);
2115 }
2116
2117
2118 list_add_tail(&segment->node, &desc->segments);
2119
2120 desc->async_tx.phys = segment->phys;
2121 hw->next_desc = segment->phys;
2122
2123 return &desc->async_tx;
2124
2125 error:
2126 xilinx_dma_free_tx_descriptor(chan, desc);
2127 return NULL;
2128 }
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2142 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2143 enum dma_transfer_direction direction, unsigned long flags,
2144 void *context)
2145 {
2146 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2147 struct xilinx_dma_tx_descriptor *desc;
2148 struct xilinx_axidma_tx_segment *segment = NULL;
2149 u32 *app_w = (u32 *)context;
2150 struct scatterlist *sg;
2151 size_t copy;
2152 size_t sg_used;
2153 unsigned int i;
2154
2155 if (!is_slave_direction(direction))
2156 return NULL;
2157
2158
2159 desc = xilinx_dma_alloc_tx_descriptor(chan);
2160 if (!desc)
2161 return NULL;
2162
2163 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2164 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2165
2166
2167 for_each_sg(sgl, sg, sg_len, i) {
2168 sg_used = 0;
2169
2170
2171 while (sg_used < sg_dma_len(sg)) {
2172 struct xilinx_axidma_desc_hw *hw;
2173
2174
2175 segment = xilinx_axidma_alloc_tx_segment(chan);
2176 if (!segment)
2177 goto error;
2178
2179
2180
2181
2182
2183 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2184 sg_used);
2185 hw = &segment->hw;
2186
2187
2188 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2189 sg_used, 0);
2190
2191 hw->control = copy;
2192
2193 if (chan->direction == DMA_MEM_TO_DEV) {
2194 if (app_w)
2195 memcpy(hw->app, app_w, sizeof(u32) *
2196 XILINX_DMA_NUM_APP_WORDS);
2197 }
2198
2199 sg_used += copy;
2200
2201
2202
2203
2204
2205 list_add_tail(&segment->node, &desc->segments);
2206 }
2207 }
2208
2209 segment = list_first_entry(&desc->segments,
2210 struct xilinx_axidma_tx_segment, node);
2211 desc->async_tx.phys = segment->phys;
2212
2213
2214 if (chan->direction == DMA_MEM_TO_DEV) {
2215 segment->hw.control |= XILINX_DMA_BD_SOP;
2216 segment = list_last_entry(&desc->segments,
2217 struct xilinx_axidma_tx_segment,
2218 node);
2219 segment->hw.control |= XILINX_DMA_BD_EOP;
2220 }
2221
2222 return &desc->async_tx;
2223
2224 error:
2225 xilinx_dma_free_tx_descriptor(chan, desc);
2226 return NULL;
2227 }
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2241 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2242 size_t period_len, enum dma_transfer_direction direction,
2243 unsigned long flags)
2244 {
2245 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2246 struct xilinx_dma_tx_descriptor *desc;
2247 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2248 size_t copy, sg_used;
2249 unsigned int num_periods;
2250 int i;
2251 u32 reg;
2252
2253 if (!period_len)
2254 return NULL;
2255
2256 num_periods = buf_len / period_len;
2257
2258 if (!num_periods)
2259 return NULL;
2260
2261 if (!is_slave_direction(direction))
2262 return NULL;
2263
2264
2265 desc = xilinx_dma_alloc_tx_descriptor(chan);
2266 if (!desc)
2267 return NULL;
2268
2269 chan->direction = direction;
2270 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2271 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2272
2273 for (i = 0; i < num_periods; ++i) {
2274 sg_used = 0;
2275
2276 while (sg_used < period_len) {
2277 struct xilinx_axidma_desc_hw *hw;
2278
2279
2280 segment = xilinx_axidma_alloc_tx_segment(chan);
2281 if (!segment)
2282 goto error;
2283
2284
2285
2286
2287
2288 copy = xilinx_dma_calc_copysize(chan, period_len,
2289 sg_used);
2290 hw = &segment->hw;
2291 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2292 period_len * i);
2293 hw->control = copy;
2294
2295 if (prev)
2296 prev->hw.next_desc = segment->phys;
2297
2298 prev = segment;
2299 sg_used += copy;
2300
2301
2302
2303
2304
2305 list_add_tail(&segment->node, &desc->segments);
2306 }
2307 }
2308
2309 head_segment = list_first_entry(&desc->segments,
2310 struct xilinx_axidma_tx_segment, node);
2311 desc->async_tx.phys = head_segment->phys;
2312
2313 desc->cyclic = true;
2314 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2315 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2316 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2317
2318 segment = list_last_entry(&desc->segments,
2319 struct xilinx_axidma_tx_segment,
2320 node);
2321 segment->hw.next_desc = (u32) head_segment->phys;
2322
2323
2324 if (direction == DMA_MEM_TO_DEV) {
2325 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2326 segment->hw.control |= XILINX_DMA_BD_EOP;
2327 }
2328
2329 return &desc->async_tx;
2330
2331 error:
2332 xilinx_dma_free_tx_descriptor(chan, desc);
2333 return NULL;
2334 }
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347 static struct dma_async_tx_descriptor *
2348 xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2349 unsigned int sg_len,
2350 enum dma_transfer_direction direction,
2351 unsigned long flags, void *context)
2352 {
2353 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2354 struct xilinx_dma_tx_descriptor *desc;
2355 struct xilinx_aximcdma_tx_segment *segment = NULL;
2356 u32 *app_w = (u32 *)context;
2357 struct scatterlist *sg;
2358 size_t copy;
2359 size_t sg_used;
2360 unsigned int i;
2361
2362 if (!is_slave_direction(direction))
2363 return NULL;
2364
2365
2366 desc = xilinx_dma_alloc_tx_descriptor(chan);
2367 if (!desc)
2368 return NULL;
2369
2370 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2371 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2372
2373
2374 for_each_sg(sgl, sg, sg_len, i) {
2375 sg_used = 0;
2376
2377
2378 while (sg_used < sg_dma_len(sg)) {
2379 struct xilinx_aximcdma_desc_hw *hw;
2380
2381
2382 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2383 if (!segment)
2384 goto error;
2385
2386
2387
2388
2389
2390 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2391 chan->xdev->max_buffer_len);
2392 hw = &segment->hw;
2393
2394
2395 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2396 sg_used);
2397 hw->control = copy;
2398
2399 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2400 memcpy(hw->app, app_w, sizeof(u32) *
2401 XILINX_DMA_NUM_APP_WORDS);
2402 }
2403
2404 sg_used += copy;
2405
2406
2407
2408
2409 list_add_tail(&segment->node, &desc->segments);
2410 }
2411 }
2412
2413 segment = list_first_entry(&desc->segments,
2414 struct xilinx_aximcdma_tx_segment, node);
2415 desc->async_tx.phys = segment->phys;
2416
2417
2418 if (chan->direction == DMA_MEM_TO_DEV) {
2419 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2420 segment = list_last_entry(&desc->segments,
2421 struct xilinx_aximcdma_tx_segment,
2422 node);
2423 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2424 }
2425
2426 return &desc->async_tx;
2427
2428 error:
2429 xilinx_dma_free_tx_descriptor(chan, desc);
2430
2431 return NULL;
2432 }
2433
2434
2435
2436
2437
2438
2439
2440 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2441 {
2442 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2443 u32 reg;
2444 int err;
2445
2446 if (!chan->cyclic) {
2447 err = chan->stop_transfer(chan);
2448 if (err) {
2449 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2450 chan, dma_ctrl_read(chan,
2451 XILINX_DMA_REG_DMASR));
2452 chan->err = true;
2453 }
2454 }
2455
2456 xilinx_dma_chan_reset(chan);
2457
2458 chan->terminating = true;
2459 xilinx_dma_free_descriptors(chan);
2460 chan->idle = true;
2461
2462 if (chan->cyclic) {
2463 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2464 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2465 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2466 chan->cyclic = false;
2467 }
2468
2469 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2470 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2471 XILINX_CDMA_CR_SGMODE);
2472
2473 return 0;
2474 }
2475
2476 static void xilinx_dma_synchronize(struct dma_chan *dchan)
2477 {
2478 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2479
2480 tasklet_kill(&chan->tasklet);
2481 }
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2497 struct xilinx_vdma_config *cfg)
2498 {
2499 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2500 u32 dmacr;
2501
2502 if (cfg->reset)
2503 return xilinx_dma_chan_reset(chan);
2504
2505 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2506
2507 chan->config.frm_dly = cfg->frm_dly;
2508 chan->config.park = cfg->park;
2509
2510
2511 chan->config.gen_lock = cfg->gen_lock;
2512 chan->config.master = cfg->master;
2513
2514 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2515 if (cfg->gen_lock && chan->genlock) {
2516 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2517 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2518 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2519 }
2520
2521 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2522 chan->config.vflip_en = cfg->vflip_en;
2523
2524 if (cfg->park)
2525 chan->config.park_frm = cfg->park_frm;
2526 else
2527 chan->config.park_frm = -1;
2528
2529 chan->config.coalesc = cfg->coalesc;
2530 chan->config.delay = cfg->delay;
2531
2532 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2533 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2534 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2535 chan->config.coalesc = cfg->coalesc;
2536 }
2537
2538 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2539 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2540 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2541 chan->config.delay = cfg->delay;
2542 }
2543
2544
2545 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2546 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2547
2548 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2549
2550 return 0;
2551 }
2552 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2563 {
2564
2565 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2566 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2567
2568 if (chan->irq > 0)
2569 free_irq(chan->irq, chan);
2570
2571 tasklet_kill(&chan->tasklet);
2572
2573 list_del(&chan->common.device_node);
2574 }
2575
2576 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2577 struct clk **tx_clk, struct clk **rx_clk,
2578 struct clk **sg_clk, struct clk **tmp_clk)
2579 {
2580 int err;
2581
2582 *tmp_clk = NULL;
2583
2584 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2585 if (IS_ERR(*axi_clk))
2586 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2587
2588 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2589 if (IS_ERR(*tx_clk))
2590 *tx_clk = NULL;
2591
2592 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2593 if (IS_ERR(*rx_clk))
2594 *rx_clk = NULL;
2595
2596 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2597 if (IS_ERR(*sg_clk))
2598 *sg_clk = NULL;
2599
2600 err = clk_prepare_enable(*axi_clk);
2601 if (err) {
2602 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2603 return err;
2604 }
2605
2606 err = clk_prepare_enable(*tx_clk);
2607 if (err) {
2608 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2609 goto err_disable_axiclk;
2610 }
2611
2612 err = clk_prepare_enable(*rx_clk);
2613 if (err) {
2614 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2615 goto err_disable_txclk;
2616 }
2617
2618 err = clk_prepare_enable(*sg_clk);
2619 if (err) {
2620 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2621 goto err_disable_rxclk;
2622 }
2623
2624 return 0;
2625
2626 err_disable_rxclk:
2627 clk_disable_unprepare(*rx_clk);
2628 err_disable_txclk:
2629 clk_disable_unprepare(*tx_clk);
2630 err_disable_axiclk:
2631 clk_disable_unprepare(*axi_clk);
2632
2633 return err;
2634 }
2635
2636 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2637 struct clk **dev_clk, struct clk **tmp_clk,
2638 struct clk **tmp1_clk, struct clk **tmp2_clk)
2639 {
2640 int err;
2641
2642 *tmp_clk = NULL;
2643 *tmp1_clk = NULL;
2644 *tmp2_clk = NULL;
2645
2646 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2647 if (IS_ERR(*axi_clk))
2648 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2649
2650 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2651 if (IS_ERR(*dev_clk))
2652 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2653
2654 err = clk_prepare_enable(*axi_clk);
2655 if (err) {
2656 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2657 return err;
2658 }
2659
2660 err = clk_prepare_enable(*dev_clk);
2661 if (err) {
2662 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2663 goto err_disable_axiclk;
2664 }
2665
2666 return 0;
2667
2668 err_disable_axiclk:
2669 clk_disable_unprepare(*axi_clk);
2670
2671 return err;
2672 }
2673
2674 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2675 struct clk **tx_clk, struct clk **txs_clk,
2676 struct clk **rx_clk, struct clk **rxs_clk)
2677 {
2678 int err;
2679
2680 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2681 if (IS_ERR(*axi_clk))
2682 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2683
2684 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2685 if (IS_ERR(*tx_clk))
2686 *tx_clk = NULL;
2687
2688 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2689 if (IS_ERR(*txs_clk))
2690 *txs_clk = NULL;
2691
2692 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2693 if (IS_ERR(*rx_clk))
2694 *rx_clk = NULL;
2695
2696 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2697 if (IS_ERR(*rxs_clk))
2698 *rxs_clk = NULL;
2699
2700 err = clk_prepare_enable(*axi_clk);
2701 if (err) {
2702 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2703 err);
2704 return err;
2705 }
2706
2707 err = clk_prepare_enable(*tx_clk);
2708 if (err) {
2709 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2710 goto err_disable_axiclk;
2711 }
2712
2713 err = clk_prepare_enable(*txs_clk);
2714 if (err) {
2715 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2716 goto err_disable_txclk;
2717 }
2718
2719 err = clk_prepare_enable(*rx_clk);
2720 if (err) {
2721 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2722 goto err_disable_txsclk;
2723 }
2724
2725 err = clk_prepare_enable(*rxs_clk);
2726 if (err) {
2727 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2728 goto err_disable_rxclk;
2729 }
2730
2731 return 0;
2732
2733 err_disable_rxclk:
2734 clk_disable_unprepare(*rx_clk);
2735 err_disable_txsclk:
2736 clk_disable_unprepare(*txs_clk);
2737 err_disable_txclk:
2738 clk_disable_unprepare(*tx_clk);
2739 err_disable_axiclk:
2740 clk_disable_unprepare(*axi_clk);
2741
2742 return err;
2743 }
2744
2745 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2746 {
2747 clk_disable_unprepare(xdev->rxs_clk);
2748 clk_disable_unprepare(xdev->rx_clk);
2749 clk_disable_unprepare(xdev->txs_clk);
2750 clk_disable_unprepare(xdev->tx_clk);
2751 clk_disable_unprepare(xdev->axi_clk);
2752 }
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2765 struct device_node *node)
2766 {
2767 struct xilinx_dma_chan *chan;
2768 bool has_dre = false;
2769 u32 value, width;
2770 int err;
2771
2772
2773 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2774 if (!chan)
2775 return -ENOMEM;
2776
2777 chan->dev = xdev->dev;
2778 chan->xdev = xdev;
2779 chan->desc_pendingcount = 0x0;
2780 chan->ext_addr = xdev->ext_addr;
2781
2782
2783
2784
2785
2786 chan->idle = true;
2787
2788 spin_lock_init(&chan->lock);
2789 INIT_LIST_HEAD(&chan->pending_list);
2790 INIT_LIST_HEAD(&chan->done_list);
2791 INIT_LIST_HEAD(&chan->active_list);
2792 INIT_LIST_HEAD(&chan->free_seg_list);
2793
2794
2795 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2796
2797 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2798
2799 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2800 if (err) {
2801 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2802 return err;
2803 }
2804 width = value >> 3;
2805
2806
2807 if (width > 8)
2808 has_dre = false;
2809
2810 if (!has_dre)
2811 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2812
2813 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2814 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2815 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2816 chan->direction = DMA_MEM_TO_DEV;
2817 chan->id = xdev->mm2s_chan_id++;
2818 chan->tdest = chan->id;
2819
2820 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2821 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2822 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2823 chan->config.park = 1;
2824
2825 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2826 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2827 chan->flush_on_fsync = true;
2828 }
2829 } else if (of_device_is_compatible(node,
2830 "xlnx,axi-vdma-s2mm-channel") ||
2831 of_device_is_compatible(node,
2832 "xlnx,axi-dma-s2mm-channel")) {
2833 chan->direction = DMA_DEV_TO_MEM;
2834 chan->id = xdev->s2mm_chan_id++;
2835 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2836 chan->has_vflip = of_property_read_bool(node,
2837 "xlnx,enable-vert-flip");
2838 if (chan->has_vflip) {
2839 chan->config.vflip_en = dma_read(chan,
2840 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2841 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2842 }
2843
2844 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2845 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2846 else
2847 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2848
2849 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2850 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2851 chan->config.park = 1;
2852
2853 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2854 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2855 chan->flush_on_fsync = true;
2856 }
2857 } else {
2858 dev_err(xdev->dev, "Invalid channel compatible node\n");
2859 return -EINVAL;
2860 }
2861
2862
2863 chan->irq = of_irq_get(node, chan->tdest);
2864 if (chan->irq < 0)
2865 return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
2866 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2867 IRQF_SHARED, "xilinx-dma-controller", chan);
2868 if (err) {
2869 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2870 return err;
2871 }
2872
2873 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2874 chan->start_transfer = xilinx_dma_start_transfer;
2875 chan->stop_transfer = xilinx_dma_stop_transfer;
2876 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2877 chan->start_transfer = xilinx_mcdma_start_transfer;
2878 chan->stop_transfer = xilinx_dma_stop_transfer;
2879 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2880 chan->start_transfer = xilinx_cdma_start_transfer;
2881 chan->stop_transfer = xilinx_cdma_stop_transfer;
2882 } else {
2883 chan->start_transfer = xilinx_vdma_start_transfer;
2884 chan->stop_transfer = xilinx_dma_stop_transfer;
2885 }
2886
2887
2888 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2889 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
2890 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2891 XILINX_DMA_DMASR_SG_MASK)
2892 chan->has_sg = true;
2893 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2894 chan->has_sg ? "enabled" : "disabled");
2895 }
2896
2897
2898 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
2899
2900
2901
2902
2903
2904 chan->common.device = &xdev->common;
2905
2906 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2907 xdev->chan[chan->id] = chan;
2908
2909
2910 err = xilinx_dma_chan_reset(chan);
2911 if (err < 0) {
2912 dev_err(xdev->dev, "Reset channel failed\n");
2913 return err;
2914 }
2915
2916 return 0;
2917 }
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2930 struct device_node *node)
2931 {
2932 int ret, i;
2933 u32 nr_channels = 1;
2934
2935 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2936 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2937 dev_warn(xdev->dev, "missing dma-channels property\n");
2938
2939 for (i = 0; i < nr_channels; i++) {
2940 ret = xilinx_dma_chan_probe(xdev, node);
2941 if (ret)
2942 return ret;
2943 }
2944
2945 return 0;
2946 }
2947
2948
2949
2950
2951
2952
2953
2954
2955 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2956 struct of_dma *ofdma)
2957 {
2958 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2959 int chan_id = dma_spec->args[0];
2960
2961 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
2962 return NULL;
2963
2964 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2965 }
2966
2967 static const struct xilinx_dma_config axidma_config = {
2968 .dmatype = XDMA_TYPE_AXIDMA,
2969 .clk_init = axidma_clk_init,
2970 .irq_handler = xilinx_dma_irq_handler,
2971 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2972 };
2973
2974 static const struct xilinx_dma_config aximcdma_config = {
2975 .dmatype = XDMA_TYPE_AXIMCDMA,
2976 .clk_init = axidma_clk_init,
2977 .irq_handler = xilinx_mcdma_irq_handler,
2978 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
2979 };
2980 static const struct xilinx_dma_config axicdma_config = {
2981 .dmatype = XDMA_TYPE_CDMA,
2982 .clk_init = axicdma_clk_init,
2983 .irq_handler = xilinx_dma_irq_handler,
2984 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
2985 };
2986
2987 static const struct xilinx_dma_config axivdma_config = {
2988 .dmatype = XDMA_TYPE_VDMA,
2989 .clk_init = axivdma_clk_init,
2990 .irq_handler = xilinx_dma_irq_handler,
2991 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2992 };
2993
2994 static const struct of_device_id xilinx_dma_of_ids[] = {
2995 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2996 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2997 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2998 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
2999 {}
3000 };
3001 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
3002
3003
3004
3005
3006
3007
3008
3009 static int xilinx_dma_probe(struct platform_device *pdev)
3010 {
3011 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
3012 struct clk **, struct clk **, struct clk **)
3013 = axivdma_clk_init;
3014 struct device_node *node = pdev->dev.of_node;
3015 struct xilinx_dma_device *xdev;
3016 struct device_node *child, *np = pdev->dev.of_node;
3017 u32 num_frames, addr_width, len_width;
3018 int i, err;
3019
3020
3021 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
3022 if (!xdev)
3023 return -ENOMEM;
3024
3025 xdev->dev = &pdev->dev;
3026 if (np) {
3027 const struct of_device_id *match;
3028
3029 match = of_match_node(xilinx_dma_of_ids, np);
3030 if (match && match->data) {
3031 xdev->dma_config = match->data;
3032 clk_init = xdev->dma_config->clk_init;
3033 }
3034 }
3035
3036 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3037 &xdev->rx_clk, &xdev->rxs_clk);
3038 if (err)
3039 return err;
3040
3041
3042 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3043 if (IS_ERR(xdev->regs)) {
3044 err = PTR_ERR(xdev->regs);
3045 goto disable_clks;
3046 }
3047
3048 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3049 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3050
3051 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3052 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3053 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3054 &len_width)) {
3055 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3056 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3057 dev_warn(xdev->dev,
3058 "invalid xlnx,sg-length-width property value. Using default width\n");
3059 } else {
3060 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3061 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3062 xdev->max_buffer_len =
3063 GENMASK(len_width - 1, 0);
3064 }
3065 }
3066 }
3067
3068 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3069 err = of_property_read_u32(node, "xlnx,num-fstores",
3070 &num_frames);
3071 if (err < 0) {
3072 dev_err(xdev->dev,
3073 "missing xlnx,num-fstores property\n");
3074 goto disable_clks;
3075 }
3076
3077 err = of_property_read_u32(node, "xlnx,flush-fsync",
3078 &xdev->flush_on_fsync);
3079 if (err < 0)
3080 dev_warn(xdev->dev,
3081 "missing xlnx,flush-fsync property\n");
3082 }
3083
3084 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3085 if (err < 0)
3086 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3087
3088 if (addr_width > 32)
3089 xdev->ext_addr = true;
3090 else
3091 xdev->ext_addr = false;
3092
3093
3094 err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
3095 if (err < 0) {
3096 dev_err(xdev->dev, "DMA mask error %d\n", err);
3097 goto disable_clks;
3098 }
3099
3100
3101 xdev->common.dev = &pdev->dev;
3102
3103 INIT_LIST_HEAD(&xdev->common.channels);
3104 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3105 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3106 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3107 }
3108
3109 xdev->common.device_alloc_chan_resources =
3110 xilinx_dma_alloc_chan_resources;
3111 xdev->common.device_free_chan_resources =
3112 xilinx_dma_free_chan_resources;
3113 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3114 xdev->common.device_synchronize = xilinx_dma_synchronize;
3115 xdev->common.device_tx_status = xilinx_dma_tx_status;
3116 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3117 xdev->common.device_config = xilinx_dma_device_config;
3118 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3119 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3120 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3121 xdev->common.device_prep_dma_cyclic =
3122 xilinx_dma_prep_dma_cyclic;
3123
3124 xdev->common.residue_granularity =
3125 DMA_RESIDUE_GRANULARITY_SEGMENT;
3126 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3127 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3128 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3129
3130 xdev->common.residue_granularity =
3131 DMA_RESIDUE_GRANULARITY_SEGMENT;
3132 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3133 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3134 } else {
3135 xdev->common.device_prep_interleaved_dma =
3136 xilinx_vdma_dma_prep_interleaved;
3137 }
3138
3139 platform_set_drvdata(pdev, xdev);
3140
3141
3142 for_each_child_of_node(node, child) {
3143 err = xilinx_dma_child_probe(xdev, child);
3144 if (err < 0)
3145 goto error;
3146 }
3147
3148 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3149 for (i = 0; i < xdev->dma_config->max_channels; i++)
3150 if (xdev->chan[i])
3151 xdev->chan[i]->num_frms = num_frames;
3152 }
3153
3154
3155 err = dma_async_device_register(&xdev->common);
3156 if (err) {
3157 dev_err(xdev->dev, "failed to register the dma device\n");
3158 goto error;
3159 }
3160
3161 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3162 xdev);
3163 if (err < 0) {
3164 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3165 dma_async_device_unregister(&xdev->common);
3166 goto error;
3167 }
3168
3169 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3170 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3171 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3172 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3173 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3174 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3175 else
3176 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3177
3178 return 0;
3179
3180 error:
3181 for (i = 0; i < xdev->dma_config->max_channels; i++)
3182 if (xdev->chan[i])
3183 xilinx_dma_chan_remove(xdev->chan[i]);
3184 disable_clks:
3185 xdma_disable_allclks(xdev);
3186
3187 return err;
3188 }
3189
3190
3191
3192
3193
3194
3195
3196 static int xilinx_dma_remove(struct platform_device *pdev)
3197 {
3198 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3199 int i;
3200
3201 of_dma_controller_free(pdev->dev.of_node);
3202
3203 dma_async_device_unregister(&xdev->common);
3204
3205 for (i = 0; i < xdev->dma_config->max_channels; i++)
3206 if (xdev->chan[i])
3207 xilinx_dma_chan_remove(xdev->chan[i]);
3208
3209 xdma_disable_allclks(xdev);
3210
3211 return 0;
3212 }
3213
3214 static struct platform_driver xilinx_vdma_driver = {
3215 .driver = {
3216 .name = "xilinx-vdma",
3217 .of_match_table = xilinx_dma_of_ids,
3218 },
3219 .probe = xilinx_dma_probe,
3220 .remove = xilinx_dma_remove,
3221 };
3222
3223 module_platform_driver(xilinx_vdma_driver);
3224
3225 MODULE_AUTHOR("Xilinx, Inc.");
3226 MODULE_DESCRIPTION("Xilinx VDMA driver");
3227 MODULE_LICENSE("GPL v2");