0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/bitfield.h>
0011 #include <linux/bits.h>
0012 #include <linux/clk.h>
0013 #include <linux/debugfs.h>
0014 #include <linux/delay.h>
0015 #include <linux/dma/xilinx_dpdma.h>
0016 #include <linux/dmaengine.h>
0017 #include <linux/dmapool.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/module.h>
0020 #include <linux/of.h>
0021 #include <linux/of_dma.h>
0022 #include <linux/platform_device.h>
0023 #include <linux/sched.h>
0024 #include <linux/slab.h>
0025 #include <linux/spinlock.h>
0026 #include <linux/wait.h>
0027
0028 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
0029
0030 #include "../dmaengine.h"
0031 #include "../virt-dma.h"
0032
0033
0034 #define XILINX_DPDMA_ERR_CTRL 0x000
0035 #define XILINX_DPDMA_ISR 0x004
0036 #define XILINX_DPDMA_IMR 0x008
0037 #define XILINX_DPDMA_IEN 0x00c
0038 #define XILINX_DPDMA_IDS 0x010
0039 #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0)
0040 #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0)
0041 #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6)
0042 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6)
0043 #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12)
0044 #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12)
0045 #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16)
0046 #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18)
0047 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
0048 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
0049 #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
0050 #define XILINX_DPDMA_INTR_VSYNC BIT(27)
0051 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000
0052 #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000
0053 #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000
0054 #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000
0055 #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041
0056 #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000
0057 #define XILINX_DPDMA_INTR_ALL 0x0fffffff
0058 #define XILINX_DPDMA_EISR 0x014
0059 #define XILINX_DPDMA_EIMR 0x018
0060 #define XILINX_DPDMA_EIEN 0x01c
0061 #define XILINX_DPDMA_EIDS 0x020
0062 #define XILINX_DPDMA_EINTR_INV_APB BIT(0)
0063 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1)
0064 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1)
0065 #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7)
0066 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7)
0067 #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13)
0068 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13)
0069 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19)
0070 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19)
0071 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25)
0072 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25)
0073 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
0074 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082
0075 #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
0076 #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
0077 #define XILINX_DPDMA_EINTR_ALL 0xffffffff
0078 #define XILINX_DPDMA_CNTL 0x100
0079 #define XILINX_DPDMA_GBL 0x104
0080 #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0)
0081 #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6)
0082 #define XILINX_DPDMA_ALC0_CNTL 0x108
0083 #define XILINX_DPDMA_ALC0_STATUS 0x10c
0084 #define XILINX_DPDMA_ALC0_MAX 0x110
0085 #define XILINX_DPDMA_ALC0_MIN 0x114
0086 #define XILINX_DPDMA_ALC0_ACC 0x118
0087 #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
0088 #define XILINX_DPDMA_ALC1_CNTL 0x120
0089 #define XILINX_DPDMA_ALC1_STATUS 0x124
0090 #define XILINX_DPDMA_ALC1_MAX 0x128
0091 #define XILINX_DPDMA_ALC1_MIN 0x12c
0092 #define XILINX_DPDMA_ALC1_ACC 0x130
0093 #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
0094
0095
0096 #define XILINX_DPDMA_CH_BASE 0x200
0097 #define XILINX_DPDMA_CH_OFFSET 0x100
0098 #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000
0099 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0)
0100 #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004
0101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008
0102 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c
0103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010
0104 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014
0105 #define XILINX_DPDMA_CH_CNTL 0x018
0106 #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
0107 #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
0108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2)
0109 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6)
0110 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10)
0111 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
0112 #define XILINX_DPDMA_CH_STATUS 0x01c
0113 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21)
0114 #define XILINX_DPDMA_CH_VDO 0x020
0115 #define XILINX_DPDMA_CH_PYLD_SZ 0x024
0116 #define XILINX_DPDMA_CH_DESC_ID 0x028
0117 #define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
0118
0119
0120 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
0121 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
0122 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
0123 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
0124 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
0125 #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
0126 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
0127 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
0128 #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0)
0129 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0)
0130 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18)
0131 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0)
0132 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16)
0133
0134 #define XILINX_DPDMA_ALIGN_BYTES 256
0135 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
0136
0137 #define XILINX_DPDMA_NUM_CHAN 6
0138
0139 struct xilinx_dpdma_chan;
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 struct xilinx_dpdma_hw_desc {
0161 u32 control;
0162 u32 desc_id;
0163 u32 xfer_size;
0164 u32 hsize_stride;
0165 u32 timestamp_lsb;
0166 u32 timestamp_msb;
0167 u32 addr_ext;
0168 u32 next_desc;
0169 u32 src_addr;
0170 u32 addr_ext_23;
0171 u32 addr_ext_45;
0172 u32 src_addr2;
0173 u32 src_addr3;
0174 u32 src_addr4;
0175 u32 src_addr5;
0176 u32 crc;
0177 } __aligned(XILINX_DPDMA_ALIGN_BYTES);
0178
0179
0180
0181
0182
0183
0184
0185 struct xilinx_dpdma_sw_desc {
0186 struct xilinx_dpdma_hw_desc hw;
0187 struct list_head node;
0188 dma_addr_t dma_addr;
0189 };
0190
0191
0192
0193
0194
0195
0196
0197
0198 struct xilinx_dpdma_tx_desc {
0199 struct virt_dma_desc vdesc;
0200 struct xilinx_dpdma_chan *chan;
0201 struct list_head descriptors;
0202 bool error;
0203 };
0204
0205 #define to_dpdma_tx_desc(_desc) \
0206 container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 struct xilinx_dpdma_chan {
0226 struct virt_dma_chan vchan;
0227 void __iomem *reg;
0228 unsigned int id;
0229
0230 wait_queue_head_t wait_to_stop;
0231 bool running;
0232 bool first_frame;
0233 bool video_group;
0234
0235 spinlock_t lock;
0236 struct dma_pool *desc_pool;
0237 struct tasklet_struct err_task;
0238
0239 struct {
0240 struct xilinx_dpdma_tx_desc *pending;
0241 struct xilinx_dpdma_tx_desc *active;
0242 } desc;
0243
0244 struct xilinx_dpdma_device *xdev;
0245 };
0246
0247 #define to_xilinx_chan(_chan) \
0248 container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 struct xilinx_dpdma_device {
0261 struct dma_device common;
0262 void __iomem *reg;
0263 struct device *dev;
0264 int irq;
0265
0266 struct clk *axi_clk;
0267 struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
0268
0269 bool ext_addr;
0270 };
0271
0272
0273
0274
0275 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
0276 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
0277
0278
0279 enum xilinx_dpdma_testcases {
0280 DPDMA_TC_INTR_DONE,
0281 DPDMA_TC_NONE
0282 };
0283
0284 struct xilinx_dpdma_debugfs {
0285 enum xilinx_dpdma_testcases testcase;
0286 u16 xilinx_dpdma_irq_done_count;
0287 unsigned int chan_id;
0288 };
0289
0290 static struct xilinx_dpdma_debugfs dpdma_debugfs;
0291 struct xilinx_dpdma_debugfs_request {
0292 const char *name;
0293 enum xilinx_dpdma_testcases tc;
0294 ssize_t (*read)(char *buf);
0295 int (*write)(char *args);
0296 };
0297
0298 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
0299 {
0300 if (IS_ENABLED(CONFIG_DEBUG_FS) && chan->id == dpdma_debugfs.chan_id)
0301 dpdma_debugfs.xilinx_dpdma_irq_done_count++;
0302 }
0303
0304 static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
0305 {
0306 size_t out_str_len;
0307
0308 dpdma_debugfs.testcase = DPDMA_TC_NONE;
0309
0310 out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
0311 out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
0312 out_str_len);
0313 snprintf(buf, out_str_len, "%d",
0314 dpdma_debugfs.xilinx_dpdma_irq_done_count);
0315
0316 return 0;
0317 }
0318
0319 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
0320 {
0321 char *arg;
0322 int ret;
0323 u32 id;
0324
0325 arg = strsep(&args, " ");
0326 if (!arg || strncasecmp(arg, "start", 5))
0327 return -EINVAL;
0328
0329 arg = strsep(&args, " ");
0330 if (!arg)
0331 return -EINVAL;
0332
0333 ret = kstrtou32(arg, 0, &id);
0334 if (ret < 0)
0335 return ret;
0336
0337 if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
0338 return -EINVAL;
0339
0340 dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
0341 dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
0342 dpdma_debugfs.chan_id = id;
0343
0344 return 0;
0345 }
0346
0347
0348 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
0349 {
0350 .name = "DESCRIPTOR_DONE_INTR",
0351 .tc = DPDMA_TC_INTR_DONE,
0352 .read = xilinx_dpdma_debugfs_desc_done_irq_read,
0353 .write = xilinx_dpdma_debugfs_desc_done_irq_write,
0354 },
0355 };
0356
0357 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
0358 size_t size, loff_t *pos)
0359 {
0360 enum xilinx_dpdma_testcases testcase;
0361 char *kern_buff;
0362 int ret = 0;
0363
0364 if (*pos != 0 || size <= 0)
0365 return -EINVAL;
0366
0367 kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
0368 if (!kern_buff) {
0369 dpdma_debugfs.testcase = DPDMA_TC_NONE;
0370 return -ENOMEM;
0371 }
0372
0373 testcase = READ_ONCE(dpdma_debugfs.testcase);
0374 if (testcase != DPDMA_TC_NONE) {
0375 ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
0376 if (ret < 0)
0377 goto done;
0378 } else {
0379 strscpy(kern_buff, "No testcase executed",
0380 XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
0381 }
0382
0383 size = min(size, strlen(kern_buff));
0384 if (copy_to_user(buf, kern_buff, size))
0385 ret = -EFAULT;
0386
0387 done:
0388 kfree(kern_buff);
0389 if (ret)
0390 return ret;
0391
0392 *pos = size + 1;
0393 return size;
0394 }
0395
0396 static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
0397 const char __user *buf, size_t size,
0398 loff_t *pos)
0399 {
0400 char *kern_buff, *kern_buff_start;
0401 char *testcase;
0402 unsigned int i;
0403 int ret;
0404
0405 if (*pos != 0 || size <= 0)
0406 return -EINVAL;
0407
0408
0409 if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
0410 return -EBUSY;
0411
0412 kern_buff = kzalloc(size, GFP_KERNEL);
0413 if (!kern_buff)
0414 return -ENOMEM;
0415 kern_buff_start = kern_buff;
0416
0417 ret = strncpy_from_user(kern_buff, buf, size);
0418 if (ret < 0)
0419 goto done;
0420
0421
0422 testcase = strsep(&kern_buff, " ");
0423
0424 for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
0425 if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
0426 break;
0427 }
0428
0429 if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
0430 ret = -EINVAL;
0431 goto done;
0432 }
0433
0434 ret = dpdma_debugfs_reqs[i].write(kern_buff);
0435 if (ret < 0)
0436 goto done;
0437
0438 ret = size;
0439
0440 done:
0441 kfree(kern_buff_start);
0442 return ret;
0443 }
0444
0445 static const struct file_operations fops_xilinx_dpdma_dbgfs = {
0446 .owner = THIS_MODULE,
0447 .read = xilinx_dpdma_debugfs_read,
0448 .write = xilinx_dpdma_debugfs_write,
0449 };
0450
0451 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
0452 {
0453 struct dentry *dent;
0454
0455 dpdma_debugfs.testcase = DPDMA_TC_NONE;
0456
0457 dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
0458 NULL, &fops_xilinx_dpdma_dbgfs);
0459 if (IS_ERR(dent))
0460 dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
0461 }
0462
0463
0464
0465
0466
0467 static inline u32 dpdma_read(void __iomem *base, u32 offset)
0468 {
0469 return ioread32(base + offset);
0470 }
0471
0472 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
0473 {
0474 iowrite32(val, base + offset);
0475 }
0476
0477 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
0478 {
0479 dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
0480 }
0481
0482 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
0483 {
0484 dpdma_write(base, offset, dpdma_read(base, offset) | set);
0485 }
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
0505 struct xilinx_dpdma_sw_desc *sw_desc,
0506 struct xilinx_dpdma_sw_desc *prev,
0507 dma_addr_t dma_addr[],
0508 unsigned int num_src_addr)
0509 {
0510 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
0511 unsigned int i;
0512
0513 hw_desc->src_addr = lower_32_bits(dma_addr[0]);
0514 if (xdev->ext_addr)
0515 hw_desc->addr_ext |=
0516 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK,
0517 upper_32_bits(dma_addr[0]));
0518
0519 for (i = 1; i < num_src_addr; i++) {
0520 u32 *addr = &hw_desc->src_addr2;
0521
0522 addr[i - 1] = lower_32_bits(dma_addr[i]);
0523
0524 if (xdev->ext_addr) {
0525 u32 *addr_ext = &hw_desc->addr_ext_23;
0526 u32 addr_msb;
0527
0528 addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0);
0529 addr_msb <<= 16 * ((i - 1) % 2);
0530 addr_ext[(i - 1) / 2] |= addr_msb;
0531 }
0532 }
0533
0534 if (!prev)
0535 return;
0536
0537 prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
0538 if (xdev->ext_addr)
0539 prev->hw.addr_ext |=
0540 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
0541 upper_32_bits(sw_desc->dma_addr));
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 static struct xilinx_dpdma_sw_desc *
0553 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
0554 {
0555 struct xilinx_dpdma_sw_desc *sw_desc;
0556 dma_addr_t dma_addr;
0557
0558 sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr);
0559 if (!sw_desc)
0560 return NULL;
0561
0562 sw_desc->dma_addr = dma_addr;
0563
0564 return sw_desc;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574 static void
0575 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
0576 struct xilinx_dpdma_sw_desc *sw_desc)
0577 {
0578 dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr);
0579 }
0580
0581
0582
0583
0584
0585
0586
0587
0588 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
0589 struct xilinx_dpdma_tx_desc *tx_desc)
0590 {
0591 struct xilinx_dpdma_sw_desc *sw_desc;
0592 struct device *dev = chan->xdev->dev;
0593 unsigned int i = 0;
0594
0595 dev_dbg(dev, "------- TX descriptor dump start -------\n");
0596 dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
0597
0598 list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
0599 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
0600
0601 dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
0602 dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr);
0603 dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
0604 dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
0605 dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
0606 dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
0607 dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
0608 dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
0609 dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
0610 dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
0611 dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
0612 dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
0613 dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
0614 dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
0615 dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
0616 dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
0617 dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
0618 dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
0619 }
0620
0621 dev_dbg(dev, "------- TX descriptor dump end -------\n");
0622 }
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632 static struct xilinx_dpdma_tx_desc *
0633 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
0634 {
0635 struct xilinx_dpdma_tx_desc *tx_desc;
0636
0637 tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT);
0638 if (!tx_desc)
0639 return NULL;
0640
0641 INIT_LIST_HEAD(&tx_desc->descriptors);
0642 tx_desc->chan = chan;
0643 tx_desc->error = false;
0644
0645 return tx_desc;
0646 }
0647
0648
0649
0650
0651
0652
0653
0654 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
0655 {
0656 struct xilinx_dpdma_sw_desc *sw_desc, *next;
0657 struct xilinx_dpdma_tx_desc *desc;
0658
0659 if (!vdesc)
0660 return;
0661
0662 desc = to_dpdma_tx_desc(vdesc);
0663
0664 list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
0665 list_del(&sw_desc->node);
0666 xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc);
0667 }
0668
0669 kfree(desc);
0670 }
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683 static struct xilinx_dpdma_tx_desc *
0684 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
0685 struct dma_interleaved_template *xt)
0686 {
0687 struct xilinx_dpdma_tx_desc *tx_desc;
0688 struct xilinx_dpdma_sw_desc *sw_desc;
0689 struct xilinx_dpdma_hw_desc *hw_desc;
0690 size_t hsize = xt->sgl[0].size;
0691 size_t stride = hsize + xt->sgl[0].icg;
0692
0693 if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
0694 dev_err(chan->xdev->dev,
0695 "chan%u: buffer should be aligned at %d B\n",
0696 chan->id, XILINX_DPDMA_ALIGN_BYTES);
0697 return NULL;
0698 }
0699
0700 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
0701 if (!tx_desc)
0702 return NULL;
0703
0704 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
0705 if (!sw_desc) {
0706 xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
0707 return NULL;
0708 }
0709
0710 xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc,
0711 &xt->src_start, 1);
0712
0713 hw_desc = &sw_desc->hw;
0714 hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
0715 hw_desc->xfer_size = hsize * xt->numf;
0716 hw_desc->hsize_stride =
0717 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) |
0718 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
0719 stride / 16);
0720 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
0721 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
0722 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
0723 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
0724
0725 list_add_tail(&sw_desc->node, &tx_desc->descriptors);
0726
0727 return tx_desc;
0728 }
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
0741 {
0742 u32 reg;
0743
0744 reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id)
0745 | XILINX_DPDMA_INTR_GLOBAL_MASK;
0746 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
0747 reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)
0748 | XILINX_DPDMA_INTR_GLOBAL_ERR;
0749 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
0750
0751 reg = XILINX_DPDMA_CH_CNTL_ENABLE
0752 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK,
0753 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
0754 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK,
0755 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
0756 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK,
0757 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS);
0758 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
0759 }
0760
0761
0762
0763
0764
0765
0766
0767 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
0768 {
0769 u32 reg;
0770
0771 reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
0772 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
0773 reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
0774 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
0775
0776 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
0777 }
0778
0779
0780
0781
0782
0783
0784
0785 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
0786 {
0787 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
0788 }
0789
0790
0791
0792
0793
0794
0795
0796 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
0797 {
0798 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
0799 }
0800
0801 static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
0802 {
0803 struct xilinx_dpdma_device *xdev = chan->xdev;
0804 u32 channels = 0;
0805 unsigned int i;
0806
0807 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
0808 if (xdev->chan[i]->video_group && !xdev->chan[i]->running)
0809 return 0;
0810
0811 if (xdev->chan[i]->video_group)
0812 channels |= BIT(i);
0813 }
0814
0815 return channels;
0816 }
0817
0818
0819
0820
0821
0822
0823
0824
0825 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
0826 {
0827 struct xilinx_dpdma_device *xdev = chan->xdev;
0828 struct xilinx_dpdma_sw_desc *sw_desc;
0829 struct xilinx_dpdma_tx_desc *desc;
0830 struct virt_dma_desc *vdesc;
0831 u32 reg, channels;
0832 bool first_frame;
0833
0834 lockdep_assert_held(&chan->lock);
0835
0836 if (chan->desc.pending)
0837 return;
0838
0839 if (!chan->running) {
0840 xilinx_dpdma_chan_unpause(chan);
0841 xilinx_dpdma_chan_enable(chan);
0842 chan->first_frame = true;
0843 chan->running = true;
0844 }
0845
0846 vdesc = vchan_next_desc(&chan->vchan);
0847 if (!vdesc)
0848 return;
0849
0850 desc = to_dpdma_tx_desc(vdesc);
0851 chan->desc.pending = desc;
0852 list_del(&desc->vdesc.node);
0853
0854
0855
0856
0857
0858 list_for_each_entry(sw_desc, &desc->descriptors, node)
0859 sw_desc->hw.desc_id = desc->vdesc.tx.cookie
0860 & XILINX_DPDMA_CH_DESC_ID_MASK;
0861
0862 sw_desc = list_first_entry(&desc->descriptors,
0863 struct xilinx_dpdma_sw_desc, node);
0864 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
0865 lower_32_bits(sw_desc->dma_addr));
0866 if (xdev->ext_addr)
0867 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
0868 FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
0869 upper_32_bits(sw_desc->dma_addr)));
0870
0871 first_frame = chan->first_frame;
0872 chan->first_frame = false;
0873
0874 if (chan->video_group) {
0875 channels = xilinx_dpdma_chan_video_group_ready(chan);
0876
0877
0878
0879
0880 if (!channels)
0881 return;
0882 } else {
0883 channels = BIT(chan->id);
0884 }
0885
0886 if (first_frame)
0887 reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
0888 else
0889 reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
0890
0891 dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
0903 {
0904 return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK,
0905 dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS));
0906 }
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
0923 {
0924 u32 cnt;
0925
0926 cnt = xilinx_dpdma_chan_ostand(chan);
0927 if (cnt) {
0928 dev_dbg(chan->xdev->dev,
0929 "chan%u: %d outstanding transactions\n",
0930 chan->id, cnt);
0931 return -EWOULDBLOCK;
0932 }
0933
0934
0935 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
0936 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
0937 wake_up(&chan->wait_to_stop);
0938
0939 return 0;
0940 }
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
0953 {
0954 int ret;
0955
0956
0957 ret = wait_event_interruptible_timeout(chan->wait_to_stop,
0958 !xilinx_dpdma_chan_ostand(chan),
0959 msecs_to_jiffies(50));
0960 if (ret > 0) {
0961 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
0962 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
0963 return 0;
0964 }
0965
0966 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
0967 chan->id, xilinx_dpdma_chan_ostand(chan));
0968
0969 if (ret == 0)
0970 return -ETIMEDOUT;
0971
0972 return ret;
0973 }
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
0986 {
0987 u32 cnt, loop = 50000;
0988
0989
0990 do {
0991 cnt = xilinx_dpdma_chan_ostand(chan);
0992 udelay(1);
0993 } while (loop-- > 0 && cnt);
0994
0995 if (loop) {
0996 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
0997 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
0998 return 0;
0999 }
1000
1001 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
1002 chan->id, xilinx_dpdma_chan_ostand(chan));
1003
1004 return -ETIMEDOUT;
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1017 {
1018 unsigned long flags;
1019 int ret;
1020
1021 ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1022 if (ret)
1023 return ret;
1024
1025 spin_lock_irqsave(&chan->lock, flags);
1026 xilinx_dpdma_chan_disable(chan);
1027 chan->running = false;
1028 spin_unlock_irqrestore(&chan->lock, flags);
1029
1030 return 0;
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
1043 {
1044 struct xilinx_dpdma_tx_desc *active;
1045 unsigned long flags;
1046
1047 spin_lock_irqsave(&chan->lock, flags);
1048
1049 xilinx_dpdma_debugfs_desc_done_irq(chan);
1050
1051 active = chan->desc.active;
1052 if (active)
1053 vchan_cyclic_callback(&active->vdesc);
1054 else
1055 dev_warn(chan->xdev->dev,
1056 "chan%u: DONE IRQ with no active descriptor!\n",
1057 chan->id);
1058
1059 spin_unlock_irqrestore(&chan->lock, flags);
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
1071 {
1072 struct xilinx_dpdma_tx_desc *pending;
1073 struct xilinx_dpdma_sw_desc *sw_desc;
1074 unsigned long flags;
1075 u32 desc_id;
1076
1077 spin_lock_irqsave(&chan->lock, flags);
1078
1079 pending = chan->desc.pending;
1080 if (!chan->running || !pending)
1081 goto out;
1082
1083 desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
1084 & XILINX_DPDMA_CH_DESC_ID_MASK;
1085
1086
1087 sw_desc = list_first_entry(&pending->descriptors,
1088 struct xilinx_dpdma_sw_desc, node);
1089 if (sw_desc->hw.desc_id != desc_id) {
1090 dev_dbg(chan->xdev->dev,
1091 "chan%u: vsync race lost (%u != %u), retrying\n",
1092 chan->id, sw_desc->hw.desc_id, desc_id);
1093 goto out;
1094 }
1095
1096
1097
1098
1099
1100 if (chan->desc.active)
1101 vchan_cookie_complete(&chan->desc.active->vdesc);
1102 chan->desc.active = pending;
1103 chan->desc.pending = NULL;
1104
1105 xilinx_dpdma_chan_queue_transfer(chan);
1106
1107 out:
1108 spin_unlock_irqrestore(&chan->lock, flags);
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 static bool
1120 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1121 {
1122 if (!chan)
1123 return false;
1124
1125 if (chan->running &&
1126 ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1127 (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1128 return true;
1129
1130 return false;
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1143 {
1144 struct xilinx_dpdma_device *xdev = chan->xdev;
1145 struct xilinx_dpdma_tx_desc *active;
1146 unsigned long flags;
1147
1148 spin_lock_irqsave(&chan->lock, flags);
1149
1150 dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n",
1151 chan->id,
1152 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1153 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1154 dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n",
1155 chan->id,
1156 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1157 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1158
1159 xilinx_dpdma_chan_disable(chan);
1160 chan->running = false;
1161
1162 if (!chan->desc.active)
1163 goto out_unlock;
1164
1165 active = chan->desc.active;
1166 chan->desc.active = NULL;
1167
1168 xilinx_dpdma_chan_dump_tx_desc(chan, active);
1169
1170 if (active->error)
1171 dev_dbg(xdev->dev, "chan%u: repeated error on desc\n",
1172 chan->id);
1173
1174
1175 if (!chan->desc.pending &&
1176 list_empty(&chan->vchan.desc_issued)) {
1177 active->error = true;
1178 list_add_tail(&active->vdesc.node,
1179 &chan->vchan.desc_issued);
1180 } else {
1181 xilinx_dpdma_chan_free_tx_desc(&active->vdesc);
1182 }
1183
1184 out_unlock:
1185 spin_unlock_irqrestore(&chan->lock, flags);
1186 }
1187
1188
1189
1190
1191
1192 static struct dma_async_tx_descriptor *
1193 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1194 struct dma_interleaved_template *xt,
1195 unsigned long flags)
1196 {
1197 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1198 struct xilinx_dpdma_tx_desc *desc;
1199
1200 if (xt->dir != DMA_MEM_TO_DEV)
1201 return NULL;
1202
1203 if (!xt->numf || !xt->sgl[0].size)
1204 return NULL;
1205
1206 if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT))
1207 return NULL;
1208
1209 desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt);
1210 if (!desc)
1211 return NULL;
1212
1213 vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK);
1214
1215 return &desc->vdesc.tx;
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1227 {
1228 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1229 size_t align = __alignof__(struct xilinx_dpdma_sw_desc);
1230
1231 chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1232 chan->xdev->dev,
1233 sizeof(struct xilinx_dpdma_sw_desc),
1234 align, 0);
1235 if (!chan->desc_pool) {
1236 dev_err(chan->xdev->dev,
1237 "chan%u: failed to allocate a descriptor pool\n",
1238 chan->id);
1239 return -ENOMEM;
1240 }
1241
1242 return 0;
1243 }
1244
1245
1246
1247
1248
1249
1250
1251
1252 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1253 {
1254 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1255
1256 vchan_free_chan_resources(&chan->vchan);
1257
1258 dma_pool_destroy(chan->desc_pool);
1259 chan->desc_pool = NULL;
1260 }
1261
1262 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1263 {
1264 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1265 unsigned long flags;
1266
1267 spin_lock_irqsave(&chan->vchan.lock, flags);
1268 if (vchan_issue_pending(&chan->vchan))
1269 xilinx_dpdma_chan_queue_transfer(chan);
1270 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1271 }
1272
1273 static int xilinx_dpdma_config(struct dma_chan *dchan,
1274 struct dma_slave_config *config)
1275 {
1276 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1277 struct xilinx_dpdma_peripheral_config *pconfig;
1278 unsigned long flags;
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 pconfig = config->peripheral_config;
1293 if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig)))
1294 return -EINVAL;
1295
1296 spin_lock_irqsave(&chan->lock, flags);
1297 if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig)
1298 chan->video_group = pconfig->video_group;
1299 spin_unlock_irqrestore(&chan->lock, flags);
1300
1301 return 0;
1302 }
1303
1304 static int xilinx_dpdma_pause(struct dma_chan *dchan)
1305 {
1306 xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1307
1308 return 0;
1309 }
1310
1311 static int xilinx_dpdma_resume(struct dma_chan *dchan)
1312 {
1313 xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1314
1315 return 0;
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1334 {
1335 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1336 struct xilinx_dpdma_device *xdev = chan->xdev;
1337 LIST_HEAD(descriptors);
1338 unsigned long flags;
1339 unsigned int i;
1340
1341
1342 if (chan->video_group) {
1343 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
1344 if (xdev->chan[i]->video_group &&
1345 xdev->chan[i]->running) {
1346 xilinx_dpdma_chan_pause(xdev->chan[i]);
1347 xdev->chan[i]->video_group = false;
1348 }
1349 }
1350 } else {
1351 xilinx_dpdma_chan_pause(chan);
1352 }
1353
1354
1355 spin_lock_irqsave(&chan->vchan.lock, flags);
1356 vchan_get_all_descriptors(&chan->vchan, &descriptors);
1357 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1358
1359 vchan_dma_desc_free_list(&chan->vchan, &descriptors);
1360
1361 return 0;
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
1378 {
1379 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1380 unsigned long flags;
1381
1382 xilinx_dpdma_chan_stop(chan);
1383
1384 spin_lock_irqsave(&chan->vchan.lock, flags);
1385 if (chan->desc.pending) {
1386 vchan_terminate_vdesc(&chan->desc.pending->vdesc);
1387 chan->desc.pending = NULL;
1388 }
1389 if (chan->desc.active) {
1390 vchan_terminate_vdesc(&chan->desc.active->vdesc);
1391 chan->desc.active = NULL;
1392 }
1393 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1394
1395 vchan_synchronize(&chan->vchan);
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1410 {
1411 if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1412 eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR)
1413 return true;
1414
1415 return false;
1416 }
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev,
1429 u32 isr, u32 eisr)
1430 {
1431 bool err = xilinx_dpdma_err(isr, eisr);
1432 unsigned int i;
1433
1434 dev_dbg_ratelimited(xdev->dev,
1435 "error irq: isr = 0x%08x, eisr = 0x%08x\n",
1436 isr, eisr);
1437
1438
1439 dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1440 isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1441 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1442 eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1443
1444 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1445 if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1446 tasklet_schedule(&xdev->chan[i]->err_task);
1447 }
1448
1449
1450
1451
1452
1453
1454
1455 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
1456 {
1457 dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1458 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1459 }
1460
1461
1462
1463
1464
1465
1466
1467 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
1468 {
1469 dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
1470 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1482 {
1483 struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
1484 struct xilinx_dpdma_device *xdev = chan->xdev;
1485 unsigned long flags;
1486
1487
1488 xilinx_dpdma_chan_poll_no_ostand(chan);
1489
1490 xilinx_dpdma_chan_handle_err(chan);
1491
1492 dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1493 XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1494 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1495 XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1496
1497 spin_lock_irqsave(&chan->lock, flags);
1498 xilinx_dpdma_chan_queue_transfer(chan);
1499 spin_unlock_irqrestore(&chan->lock, flags);
1500 }
1501
1502 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
1503 {
1504 struct xilinx_dpdma_device *xdev = data;
1505 unsigned long mask;
1506 unsigned int i;
1507 u32 status;
1508 u32 error;
1509
1510 status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
1511 error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
1512 if (!status && !error)
1513 return IRQ_NONE;
1514
1515 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
1516 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
1517
1518 if (status & XILINX_DPDMA_INTR_VSYNC) {
1519
1520
1521
1522
1523 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1524 struct xilinx_dpdma_chan *chan = xdev->chan[i];
1525
1526 if (chan)
1527 xilinx_dpdma_chan_vsync_irq(chan);
1528 }
1529 }
1530
1531 mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status);
1532 if (mask) {
1533 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1534 xilinx_dpdma_chan_done_irq(xdev->chan[i]);
1535 }
1536
1537 mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status);
1538 if (mask) {
1539 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1540 xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
1541 }
1542
1543 mask = status & XILINX_DPDMA_INTR_ERR_ALL;
1544 if (mask || error)
1545 xilinx_dpdma_handle_err_irq(xdev, mask, error);
1546
1547 return IRQ_HANDLED;
1548 }
1549
1550
1551
1552
1553
1554 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
1555 unsigned int chan_id)
1556 {
1557 struct xilinx_dpdma_chan *chan;
1558
1559 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1560 if (!chan)
1561 return -ENOMEM;
1562
1563 chan->id = chan_id;
1564 chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE
1565 + XILINX_DPDMA_CH_OFFSET * chan->id;
1566 chan->running = false;
1567 chan->xdev = xdev;
1568
1569 spin_lock_init(&chan->lock);
1570 init_waitqueue_head(&chan->wait_to_stop);
1571
1572 tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
1573
1574 chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
1575 vchan_init(&chan->vchan, &xdev->common);
1576
1577 xdev->chan[chan->id] = chan;
1578
1579 return 0;
1580 }
1581
1582 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
1583 {
1584 if (!chan)
1585 return;
1586
1587 tasklet_kill(&chan->err_task);
1588 list_del(&chan->vchan.chan.device_node);
1589 }
1590
1591 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1592 struct of_dma *ofdma)
1593 {
1594 struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
1595 u32 chan_id = dma_spec->args[0];
1596
1597 if (chan_id >= ARRAY_SIZE(xdev->chan))
1598 return NULL;
1599
1600 if (!xdev->chan[chan_id])
1601 return NULL;
1602
1603 return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
1604 }
1605
1606 static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
1607 {
1608 unsigned int i;
1609 void __iomem *reg;
1610
1611
1612 xilinx_dpdma_disable_irq(xdev);
1613
1614
1615 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1616 reg = xdev->reg + XILINX_DPDMA_CH_BASE
1617 + XILINX_DPDMA_CH_OFFSET * i;
1618 dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
1619 }
1620
1621
1622 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
1623 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
1624 }
1625
1626 static int xilinx_dpdma_probe(struct platform_device *pdev)
1627 {
1628 struct xilinx_dpdma_device *xdev;
1629 struct dma_device *ddev;
1630 unsigned int i;
1631 int ret;
1632
1633 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1634 if (!xdev)
1635 return -ENOMEM;
1636
1637 xdev->dev = &pdev->dev;
1638 xdev->ext_addr = sizeof(dma_addr_t) > 4;
1639
1640 INIT_LIST_HEAD(&xdev->common.channels);
1641
1642 platform_set_drvdata(pdev, xdev);
1643
1644 xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
1645 if (IS_ERR(xdev->axi_clk))
1646 return PTR_ERR(xdev->axi_clk);
1647
1648 xdev->reg = devm_platform_ioremap_resource(pdev, 0);
1649 if (IS_ERR(xdev->reg))
1650 return PTR_ERR(xdev->reg);
1651
1652 dpdma_hw_init(xdev);
1653
1654 xdev->irq = platform_get_irq(pdev, 0);
1655 if (xdev->irq < 0)
1656 return xdev->irq;
1657
1658 ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
1659 dev_name(xdev->dev), xdev);
1660 if (ret) {
1661 dev_err(xdev->dev, "failed to request IRQ\n");
1662 return ret;
1663 }
1664
1665 ddev = &xdev->common;
1666 ddev->dev = &pdev->dev;
1667
1668 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1669 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1670 dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
1671 dma_cap_set(DMA_REPEAT, ddev->cap_mask);
1672 dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
1673 ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
1674
1675 ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
1676 ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1677 ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
1678
1679 ddev->device_tx_status = dma_cookie_status;
1680 ddev->device_issue_pending = xilinx_dpdma_issue_pending;
1681 ddev->device_config = xilinx_dpdma_config;
1682 ddev->device_pause = xilinx_dpdma_pause;
1683 ddev->device_resume = xilinx_dpdma_resume;
1684 ddev->device_terminate_all = xilinx_dpdma_terminate_all;
1685 ddev->device_synchronize = xilinx_dpdma_synchronize;
1686 ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
1687 ddev->directions = BIT(DMA_MEM_TO_DEV);
1688 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1689
1690 for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) {
1691 ret = xilinx_dpdma_chan_init(xdev, i);
1692 if (ret < 0) {
1693 dev_err(xdev->dev, "failed to initialize channel %u\n",
1694 i);
1695 goto error;
1696 }
1697 }
1698
1699 ret = clk_prepare_enable(xdev->axi_clk);
1700 if (ret) {
1701 dev_err(xdev->dev, "failed to enable the axi clock\n");
1702 goto error;
1703 }
1704
1705 ret = dma_async_device_register(ddev);
1706 if (ret) {
1707 dev_err(xdev->dev, "failed to register the dma device\n");
1708 goto error_dma_async;
1709 }
1710
1711 ret = of_dma_controller_register(xdev->dev->of_node,
1712 of_dma_xilinx_xlate, ddev);
1713 if (ret) {
1714 dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
1715 goto error_of_dma;
1716 }
1717
1718 xilinx_dpdma_enable_irq(xdev);
1719
1720 xilinx_dpdma_debugfs_init(xdev);
1721
1722 dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
1723
1724 return 0;
1725
1726 error_of_dma:
1727 dma_async_device_unregister(ddev);
1728 error_dma_async:
1729 clk_disable_unprepare(xdev->axi_clk);
1730 error:
1731 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1732 xilinx_dpdma_chan_remove(xdev->chan[i]);
1733
1734 free_irq(xdev->irq, xdev);
1735
1736 return ret;
1737 }
1738
1739 static int xilinx_dpdma_remove(struct platform_device *pdev)
1740 {
1741 struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
1742 unsigned int i;
1743
1744
1745 free_irq(xdev->irq, xdev);
1746
1747 xilinx_dpdma_disable_irq(xdev);
1748 of_dma_controller_free(pdev->dev.of_node);
1749 dma_async_device_unregister(&xdev->common);
1750 clk_disable_unprepare(xdev->axi_clk);
1751
1752 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1753 xilinx_dpdma_chan_remove(xdev->chan[i]);
1754
1755 return 0;
1756 }
1757
1758 static const struct of_device_id xilinx_dpdma_of_match[] = {
1759 { .compatible = "xlnx,zynqmp-dpdma",},
1760 { },
1761 };
1762 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
1763
1764 static struct platform_driver xilinx_dpdma_driver = {
1765 .probe = xilinx_dpdma_probe,
1766 .remove = xilinx_dpdma_remove,
1767 .driver = {
1768 .name = "xilinx-zynqmp-dpdma",
1769 .of_match_table = xilinx_dpdma_of_match,
1770 },
1771 };
1772
1773 module_platform_driver(xilinx_dpdma_driver);
1774
1775 MODULE_AUTHOR("Xilinx, Inc.");
1776 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
1777 MODULE_LICENSE("GPL v2");