0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/dmapool.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/init.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/module.h>
0015 #include <linux/of.h>
0016 #include <linux/of_device.h>
0017 #include <linux/of_dma.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/slab.h>
0020
0021 #include "dmaengine.h"
0022 #include "virt-dma.h"
0023
0024
0025 #define JZ_DMA_REG_DMAC 0x00
0026 #define JZ_DMA_REG_DIRQP 0x04
0027 #define JZ_DMA_REG_DDR 0x08
0028 #define JZ_DMA_REG_DDRS 0x0c
0029 #define JZ_DMA_REG_DCKE 0x10
0030 #define JZ_DMA_REG_DCKES 0x14
0031 #define JZ_DMA_REG_DCKEC 0x18
0032 #define JZ_DMA_REG_DMACP 0x1c
0033 #define JZ_DMA_REG_DSIRQP 0x20
0034 #define JZ_DMA_REG_DSIRQM 0x24
0035 #define JZ_DMA_REG_DCIRQP 0x28
0036 #define JZ_DMA_REG_DCIRQM 0x2c
0037
0038
0039 #define JZ_DMA_REG_CHAN(n) (n * 0x20)
0040 #define JZ_DMA_REG_DSA 0x00
0041 #define JZ_DMA_REG_DTA 0x04
0042 #define JZ_DMA_REG_DTC 0x08
0043 #define JZ_DMA_REG_DRT 0x0c
0044 #define JZ_DMA_REG_DCS 0x10
0045 #define JZ_DMA_REG_DCM 0x14
0046 #define JZ_DMA_REG_DDA 0x18
0047 #define JZ_DMA_REG_DSD 0x1c
0048
0049 #define JZ_DMA_DMAC_DMAE BIT(0)
0050 #define JZ_DMA_DMAC_AR BIT(2)
0051 #define JZ_DMA_DMAC_HLT BIT(3)
0052 #define JZ_DMA_DMAC_FAIC BIT(27)
0053 #define JZ_DMA_DMAC_FMSC BIT(31)
0054
0055 #define JZ_DMA_DRT_AUTO 0x8
0056
0057 #define JZ_DMA_DCS_CTE BIT(0)
0058 #define JZ_DMA_DCS_HLT BIT(2)
0059 #define JZ_DMA_DCS_TT BIT(3)
0060 #define JZ_DMA_DCS_AR BIT(4)
0061 #define JZ_DMA_DCS_DES8 BIT(30)
0062
0063 #define JZ_DMA_DCM_LINK BIT(0)
0064 #define JZ_DMA_DCM_TIE BIT(1)
0065 #define JZ_DMA_DCM_STDE BIT(2)
0066 #define JZ_DMA_DCM_TSZ_SHIFT 8
0067 #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
0068 #define JZ_DMA_DCM_DP_SHIFT 12
0069 #define JZ_DMA_DCM_SP_SHIFT 14
0070 #define JZ_DMA_DCM_DAI BIT(22)
0071 #define JZ_DMA_DCM_SAI BIT(23)
0072
0073 #define JZ_DMA_SIZE_4_BYTE 0x0
0074 #define JZ_DMA_SIZE_1_BYTE 0x1
0075 #define JZ_DMA_SIZE_2_BYTE 0x2
0076 #define JZ_DMA_SIZE_16_BYTE 0x3
0077 #define JZ_DMA_SIZE_32_BYTE 0x4
0078 #define JZ_DMA_SIZE_64_BYTE 0x5
0079 #define JZ_DMA_SIZE_128_BYTE 0x6
0080
0081 #define JZ_DMA_WIDTH_32_BIT 0x0
0082 #define JZ_DMA_WIDTH_8_BIT 0x1
0083 #define JZ_DMA_WIDTH_16_BIT 0x2
0084
0085 #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
0086 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
0087 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
0088
0089 #define JZ4780_DMA_CTRL_OFFSET 0x1000
0090
0091
0092 #define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
0093 #define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
0094 #define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
0095 #define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
0096 #define JZ_SOC_DATA_BREAK_LINKS BIT(4)
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 struct jz4780_dma_hwdesc {
0108 u32 dcm;
0109 u32 dsa;
0110 u32 dta;
0111 u32 dtc;
0112 };
0113
0114
0115 #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
0116 #define JZ_DMA_MAX_DESC \
0117 (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
0118
0119 struct jz4780_dma_desc {
0120 struct virt_dma_desc vdesc;
0121
0122 struct jz4780_dma_hwdesc *desc;
0123 dma_addr_t desc_phys;
0124 unsigned int count;
0125 enum dma_transaction_type type;
0126 u32 transfer_type;
0127 u32 status;
0128 };
0129
0130 struct jz4780_dma_chan {
0131 struct virt_dma_chan vchan;
0132 unsigned int id;
0133 struct dma_pool *desc_pool;
0134
0135 u32 transfer_type_tx, transfer_type_rx;
0136 u32 transfer_shift;
0137 struct dma_slave_config config;
0138
0139 struct jz4780_dma_desc *desc;
0140 unsigned int curr_hwdesc;
0141 };
0142
0143 struct jz4780_dma_soc_data {
0144 unsigned int nb_channels;
0145 unsigned int transfer_ord_max;
0146 unsigned long flags;
0147 };
0148
0149 struct jz4780_dma_dev {
0150 struct dma_device dma_device;
0151 void __iomem *chn_base;
0152 void __iomem *ctrl_base;
0153 struct clk *clk;
0154 unsigned int irq;
0155 const struct jz4780_dma_soc_data *soc_data;
0156
0157 u32 chan_reserved;
0158 struct jz4780_dma_chan chan[];
0159 };
0160
0161 struct jz4780_dma_filter_data {
0162 u32 transfer_type_tx, transfer_type_rx;
0163 int channel;
0164 };
0165
0166 static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
0167 {
0168 return container_of(chan, struct jz4780_dma_chan, vchan.chan);
0169 }
0170
0171 static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
0172 struct virt_dma_desc *vdesc)
0173 {
0174 return container_of(vdesc, struct jz4780_dma_desc, vdesc);
0175 }
0176
0177 static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
0178 struct jz4780_dma_chan *jzchan)
0179 {
0180 return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
0181 dma_device);
0182 }
0183
0184 static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
0185 unsigned int chn, unsigned int reg)
0186 {
0187 return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
0188 }
0189
0190 static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
0191 unsigned int chn, unsigned int reg, u32 val)
0192 {
0193 writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
0194 }
0195
0196 static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
0197 unsigned int reg)
0198 {
0199 return readl(jzdma->ctrl_base + reg);
0200 }
0201
0202 static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
0203 unsigned int reg, u32 val)
0204 {
0205 writel(val, jzdma->ctrl_base + reg);
0206 }
0207
0208 static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
0209 unsigned int chn)
0210 {
0211 if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
0212 unsigned int reg;
0213
0214 if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
0215 reg = JZ_DMA_REG_DCKE;
0216 else
0217 reg = JZ_DMA_REG_DCKES;
0218
0219 jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
0220 }
0221 }
0222
0223 static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
0224 unsigned int chn)
0225 {
0226 if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
0227 !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
0228 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
0229 }
0230
0231 static struct jz4780_dma_desc *
0232 jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count,
0233 enum dma_transaction_type type,
0234 enum dma_transfer_direction direction)
0235 {
0236 struct jz4780_dma_desc *desc;
0237
0238 if (count > JZ_DMA_MAX_DESC)
0239 return NULL;
0240
0241 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
0242 if (!desc)
0243 return NULL;
0244
0245 desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
0246 &desc->desc_phys);
0247 if (!desc->desc) {
0248 kfree(desc);
0249 return NULL;
0250 }
0251
0252 desc->count = count;
0253 desc->type = type;
0254
0255 if (direction == DMA_DEV_TO_MEM)
0256 desc->transfer_type = jzchan->transfer_type_rx;
0257 else
0258 desc->transfer_type = jzchan->transfer_type_tx;
0259
0260 return desc;
0261 }
0262
0263 static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
0264 {
0265 struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
0266 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
0267
0268 dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
0269 kfree(desc);
0270 }
0271
0272 static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
0273 unsigned long val, u32 *shift)
0274 {
0275 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
0276 int ord = ffs(val) - 1;
0277
0278
0279
0280
0281
0282
0283
0284
0285 if (ord == 3)
0286 ord = 2;
0287 else if (ord > jzdma->soc_data->transfer_ord_max)
0288 ord = jzdma->soc_data->transfer_ord_max;
0289
0290 *shift = ord;
0291
0292 switch (ord) {
0293 case 0:
0294 return JZ_DMA_SIZE_1_BYTE;
0295 case 1:
0296 return JZ_DMA_SIZE_2_BYTE;
0297 case 2:
0298 return JZ_DMA_SIZE_4_BYTE;
0299 case 4:
0300 return JZ_DMA_SIZE_16_BYTE;
0301 case 5:
0302 return JZ_DMA_SIZE_32_BYTE;
0303 case 6:
0304 return JZ_DMA_SIZE_64_BYTE;
0305 default:
0306 return JZ_DMA_SIZE_128_BYTE;
0307 }
0308 }
0309
0310 static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
0311 struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
0312 enum dma_transfer_direction direction)
0313 {
0314 struct dma_slave_config *config = &jzchan->config;
0315 u32 width, maxburst, tsz;
0316
0317 if (direction == DMA_MEM_TO_DEV) {
0318 desc->dcm = JZ_DMA_DCM_SAI;
0319 desc->dsa = addr;
0320 desc->dta = config->dst_addr;
0321
0322 width = config->dst_addr_width;
0323 maxburst = config->dst_maxburst;
0324 } else {
0325 desc->dcm = JZ_DMA_DCM_DAI;
0326 desc->dsa = config->src_addr;
0327 desc->dta = addr;
0328
0329 width = config->src_addr_width;
0330 maxburst = config->src_maxburst;
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340 tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
0341 &jzchan->transfer_shift);
0342
0343 switch (width) {
0344 case DMA_SLAVE_BUSWIDTH_1_BYTE:
0345 case DMA_SLAVE_BUSWIDTH_2_BYTES:
0346 break;
0347 case DMA_SLAVE_BUSWIDTH_4_BYTES:
0348 width = JZ_DMA_WIDTH_32_BIT;
0349 break;
0350 default:
0351 return -EINVAL;
0352 }
0353
0354 desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
0355 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
0356 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
0357
0358 desc->dtc = len >> jzchan->transfer_shift;
0359 return 0;
0360 }
0361
0362 static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
0363 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
0364 enum dma_transfer_direction direction, unsigned long flags,
0365 void *context)
0366 {
0367 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0368 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
0369 struct jz4780_dma_desc *desc;
0370 unsigned int i;
0371 int err;
0372
0373 desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction);
0374 if (!desc)
0375 return NULL;
0376
0377 for (i = 0; i < sg_len; i++) {
0378 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
0379 sg_dma_address(&sgl[i]),
0380 sg_dma_len(&sgl[i]),
0381 direction);
0382 if (err < 0) {
0383 jz4780_dma_desc_free(&jzchan->desc->vdesc);
0384 return NULL;
0385 }
0386
0387 desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
0388
0389 if (i != (sg_len - 1) &&
0390 !(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
0391
0392 desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
0393
0394
0395
0396
0397
0398
0399 desc->desc[i].dtc |=
0400 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
0401 }
0402 }
0403
0404 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
0405 }
0406
0407 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
0408 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
0409 size_t period_len, enum dma_transfer_direction direction,
0410 unsigned long flags)
0411 {
0412 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0413 struct jz4780_dma_desc *desc;
0414 unsigned int periods, i;
0415 int err;
0416
0417 if (buf_len % period_len)
0418 return NULL;
0419
0420 periods = buf_len / period_len;
0421
0422 desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction);
0423 if (!desc)
0424 return NULL;
0425
0426 for (i = 0; i < periods; i++) {
0427 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
0428 period_len, direction);
0429 if (err < 0) {
0430 jz4780_dma_desc_free(&jzchan->desc->vdesc);
0431 return NULL;
0432 }
0433
0434 buf_addr += period_len;
0435
0436
0437
0438
0439
0440
0441
0442 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
0443
0444
0445
0446
0447
0448
0449
0450 if (i != (periods - 1)) {
0451 desc->desc[i].dtc |=
0452 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
0453 }
0454 }
0455
0456 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
0457 }
0458
0459 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
0460 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
0461 size_t len, unsigned long flags)
0462 {
0463 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0464 struct jz4780_dma_desc *desc;
0465 u32 tsz;
0466
0467 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0);
0468 if (!desc)
0469 return NULL;
0470
0471 tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
0472 &jzchan->transfer_shift);
0473
0474 desc->transfer_type = JZ_DMA_DRT_AUTO;
0475
0476 desc->desc[0].dsa = src;
0477 desc->desc[0].dta = dest;
0478 desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
0479 tsz << JZ_DMA_DCM_TSZ_SHIFT |
0480 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
0481 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
0482 desc->desc[0].dtc = len >> jzchan->transfer_shift;
0483
0484 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
0485 }
0486
0487 static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
0488 {
0489 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
0490 struct virt_dma_desc *vdesc;
0491 unsigned int i;
0492 dma_addr_t desc_phys;
0493
0494 if (!jzchan->desc) {
0495 vdesc = vchan_next_desc(&jzchan->vchan);
0496 if (!vdesc)
0497 return;
0498
0499 list_del(&vdesc->node);
0500
0501 jzchan->desc = to_jz4780_dma_desc(vdesc);
0502 jzchan->curr_hwdesc = 0;
0503
0504 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 for (i = 0; i < jzchan->desc->count; i++)
0520 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
0521 }
0522 } else {
0523
0524
0525
0526
0527
0528 jzchan->curr_hwdesc =
0529 (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
0530 }
0531
0532
0533 jz4780_dma_chan_enable(jzdma, jzchan->id);
0534
0535
0536 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
0537
0538
0539 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
0540 jzchan->desc->transfer_type);
0541
0542
0543
0544
0545
0546
0547
0548 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
0549 jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
0550
0551
0552 desc_phys = jzchan->desc->desc_phys +
0553 (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
0554 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
0555 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
0556
0557
0558 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
0559 JZ_DMA_DCS_CTE);
0560 }
0561
0562 static void jz4780_dma_issue_pending(struct dma_chan *chan)
0563 {
0564 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0565 unsigned long flags;
0566
0567 spin_lock_irqsave(&jzchan->vchan.lock, flags);
0568
0569 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
0570 jz4780_dma_begin(jzchan);
0571
0572 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
0573 }
0574
0575 static int jz4780_dma_terminate_all(struct dma_chan *chan)
0576 {
0577 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0578 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
0579 unsigned long flags;
0580 LIST_HEAD(head);
0581
0582 spin_lock_irqsave(&jzchan->vchan.lock, flags);
0583
0584
0585 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
0586 if (jzchan->desc) {
0587 vchan_terminate_vdesc(&jzchan->desc->vdesc);
0588 jzchan->desc = NULL;
0589 }
0590
0591 jz4780_dma_chan_disable(jzdma, jzchan->id);
0592
0593 vchan_get_all_descriptors(&jzchan->vchan, &head);
0594
0595 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
0596
0597 vchan_dma_desc_free_list(&jzchan->vchan, &head);
0598 return 0;
0599 }
0600
0601 static void jz4780_dma_synchronize(struct dma_chan *chan)
0602 {
0603 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0604 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
0605
0606 vchan_synchronize(&jzchan->vchan);
0607 jz4780_dma_chan_disable(jzdma, jzchan->id);
0608 }
0609
0610 static int jz4780_dma_config(struct dma_chan *chan,
0611 struct dma_slave_config *config)
0612 {
0613 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0614
0615 if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
0616 || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
0617 return -EINVAL;
0618
0619
0620 memcpy(&jzchan->config, config, sizeof(jzchan->config));
0621
0622 return 0;
0623 }
0624
0625 static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
0626 struct jz4780_dma_desc *desc, unsigned int next_sg)
0627 {
0628 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
0629 unsigned int count = 0;
0630 unsigned int i;
0631
0632 for (i = next_sg; i < desc->count; i++)
0633 count += desc->desc[i].dtc & GENMASK(23, 0);
0634
0635 if (next_sg != 0)
0636 count += jz4780_dma_chn_readl(jzdma, jzchan->id,
0637 JZ_DMA_REG_DTC);
0638
0639 return count << jzchan->transfer_shift;
0640 }
0641
0642 static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
0643 dma_cookie_t cookie, struct dma_tx_state *txstate)
0644 {
0645 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0646 struct virt_dma_desc *vdesc;
0647 enum dma_status status;
0648 unsigned long flags;
0649 unsigned long residue = 0;
0650
0651 spin_lock_irqsave(&jzchan->vchan.lock, flags);
0652
0653 status = dma_cookie_status(chan, cookie, txstate);
0654 if ((status == DMA_COMPLETE) || (txstate == NULL))
0655 goto out_unlock_irqrestore;
0656
0657 vdesc = vchan_find_desc(&jzchan->vchan, cookie);
0658 if (vdesc) {
0659
0660 residue = jz4780_dma_desc_residue(jzchan,
0661 to_jz4780_dma_desc(vdesc), 0);
0662 } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
0663 residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
0664 jzchan->curr_hwdesc + 1);
0665 }
0666 dma_set_residue(txstate, residue);
0667
0668 if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
0669 && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
0670 status = DMA_ERROR;
0671
0672 out_unlock_irqrestore:
0673 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
0674 return status;
0675 }
0676
0677 static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
0678 struct jz4780_dma_chan *jzchan)
0679 {
0680 const unsigned int soc_flags = jzdma->soc_data->flags;
0681 struct jz4780_dma_desc *desc = jzchan->desc;
0682 u32 dcs;
0683 bool ack = true;
0684
0685 spin_lock(&jzchan->vchan.lock);
0686
0687 dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
0688 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
0689
0690 if (dcs & JZ_DMA_DCS_AR) {
0691 dev_warn(&jzchan->vchan.chan.dev->device,
0692 "address error (DCS=0x%x)\n", dcs);
0693 }
0694
0695 if (dcs & JZ_DMA_DCS_HLT) {
0696 dev_warn(&jzchan->vchan.chan.dev->device,
0697 "channel halt (DCS=0x%x)\n", dcs);
0698 }
0699
0700 if (jzchan->desc) {
0701 jzchan->desc->status = dcs;
0702
0703 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
0704 if (jzchan->desc->type == DMA_CYCLIC) {
0705 vchan_cyclic_callback(&jzchan->desc->vdesc);
0706
0707 jz4780_dma_begin(jzchan);
0708 } else if (dcs & JZ_DMA_DCS_TT) {
0709 if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) ||
0710 (jzchan->curr_hwdesc + 1 == desc->count)) {
0711 vchan_cookie_complete(&desc->vdesc);
0712 jzchan->desc = NULL;
0713 }
0714
0715 jz4780_dma_begin(jzchan);
0716 } else {
0717
0718 ack = false;
0719 jz4780_dma_chn_writel(jzdma, jzchan->id,
0720 JZ_DMA_REG_DCS,
0721 JZ_DMA_DCS_CTE);
0722 }
0723 }
0724 } else {
0725 dev_err(&jzchan->vchan.chan.dev->device,
0726 "channel IRQ with no active transfer\n");
0727 }
0728
0729 spin_unlock(&jzchan->vchan.lock);
0730
0731 return ack;
0732 }
0733
0734 static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
0735 {
0736 struct jz4780_dma_dev *jzdma = data;
0737 unsigned int nb_channels = jzdma->soc_data->nb_channels;
0738 unsigned long pending;
0739 u32 dmac;
0740 int i;
0741
0742 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
0743
0744 for_each_set_bit(i, &pending, nb_channels) {
0745 if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
0746 pending &= ~BIT(i);
0747 }
0748
0749
0750 dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
0751 dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
0752 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
0753
0754
0755 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
0756
0757 return IRQ_HANDLED;
0758 }
0759
0760 static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
0761 {
0762 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0763
0764 jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
0765 chan->device->dev,
0766 JZ_DMA_DESC_BLOCK_SIZE,
0767 PAGE_SIZE, 0);
0768 if (!jzchan->desc_pool) {
0769 dev_err(&chan->dev->device,
0770 "failed to allocate descriptor pool\n");
0771 return -ENOMEM;
0772 }
0773
0774 return 0;
0775 }
0776
0777 static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
0778 {
0779 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0780
0781 vchan_free_chan_resources(&jzchan->vchan);
0782 dma_pool_destroy(jzchan->desc_pool);
0783 jzchan->desc_pool = NULL;
0784 }
0785
0786 static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
0787 {
0788 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
0789 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
0790 struct jz4780_dma_filter_data *data = param;
0791
0792
0793 if (data->channel > -1) {
0794 if (data->channel != jzchan->id)
0795 return false;
0796 } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
0797 return false;
0798 }
0799
0800 jzchan->transfer_type_tx = data->transfer_type_tx;
0801 jzchan->transfer_type_rx = data->transfer_type_rx;
0802
0803 return true;
0804 }
0805
0806 static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
0807 struct of_dma *ofdma)
0808 {
0809 struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
0810 dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
0811 struct jz4780_dma_filter_data data;
0812
0813 if (dma_spec->args_count == 2) {
0814 data.transfer_type_tx = dma_spec->args[0];
0815 data.transfer_type_rx = dma_spec->args[0];
0816 data.channel = dma_spec->args[1];
0817 } else if (dma_spec->args_count == 3) {
0818 data.transfer_type_tx = dma_spec->args[0];
0819 data.transfer_type_rx = dma_spec->args[1];
0820 data.channel = dma_spec->args[2];
0821 } else {
0822 return NULL;
0823 }
0824
0825 if (data.channel > -1) {
0826 if (data.channel >= jzdma->soc_data->nb_channels) {
0827 dev_err(jzdma->dma_device.dev,
0828 "device requested non-existent channel %u\n",
0829 data.channel);
0830 return NULL;
0831 }
0832
0833
0834 if (!(jzdma->chan_reserved & BIT(data.channel))) {
0835 dev_err(jzdma->dma_device.dev,
0836 "device requested unreserved channel %u\n",
0837 data.channel);
0838 return NULL;
0839 }
0840
0841 jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx;
0842 jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx;
0843
0844 return dma_get_slave_channel(
0845 &jzdma->chan[data.channel].vchan.chan);
0846 } else {
0847 return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
0848 ofdma->of_node);
0849 }
0850 }
0851
0852 static int jz4780_dma_probe(struct platform_device *pdev)
0853 {
0854 struct device *dev = &pdev->dev;
0855 const struct jz4780_dma_soc_data *soc_data;
0856 struct jz4780_dma_dev *jzdma;
0857 struct jz4780_dma_chan *jzchan;
0858 struct dma_device *dd;
0859 struct resource *res;
0860 int i, ret;
0861
0862 if (!dev->of_node) {
0863 dev_err(dev, "This driver must be probed from devicetree\n");
0864 return -EINVAL;
0865 }
0866
0867 soc_data = device_get_match_data(dev);
0868 if (!soc_data)
0869 return -EINVAL;
0870
0871 jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
0872 soc_data->nb_channels), GFP_KERNEL);
0873 if (!jzdma)
0874 return -ENOMEM;
0875
0876 jzdma->soc_data = soc_data;
0877 platform_set_drvdata(pdev, jzdma);
0878
0879 jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0);
0880 if (IS_ERR(jzdma->chn_base))
0881 return PTR_ERR(jzdma->chn_base);
0882
0883 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
0884 if (res) {
0885 jzdma->ctrl_base = devm_ioremap_resource(dev, res);
0886 if (IS_ERR(jzdma->ctrl_base))
0887 return PTR_ERR(jzdma->ctrl_base);
0888 } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
0889
0890
0891
0892
0893
0894 jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
0895 } else {
0896 dev_err(dev, "failed to get I/O memory\n");
0897 return -EINVAL;
0898 }
0899
0900 jzdma->clk = devm_clk_get(dev, NULL);
0901 if (IS_ERR(jzdma->clk)) {
0902 dev_err(dev, "failed to get clock\n");
0903 ret = PTR_ERR(jzdma->clk);
0904 return ret;
0905 }
0906
0907 clk_prepare_enable(jzdma->clk);
0908
0909
0910 of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
0911 0, &jzdma->chan_reserved);
0912
0913 dd = &jzdma->dma_device;
0914
0915
0916
0917
0918
0919
0920
0921 dma_set_max_seg_size(dev, 0xffffff);
0922
0923 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
0924 dma_cap_set(DMA_SLAVE, dd->cap_mask);
0925 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
0926
0927 dd->dev = dev;
0928 dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
0929 dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
0930 dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
0931 dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
0932 dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
0933 dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
0934 dd->device_config = jz4780_dma_config;
0935 dd->device_terminate_all = jz4780_dma_terminate_all;
0936 dd->device_synchronize = jz4780_dma_synchronize;
0937 dd->device_tx_status = jz4780_dma_tx_status;
0938 dd->device_issue_pending = jz4780_dma_issue_pending;
0939 dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
0940 dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
0941 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
0942 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0943 dd->max_sg_burst = JZ_DMA_MAX_DESC;
0944
0945
0946
0947
0948
0949
0950 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
0951 JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
0952
0953 if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
0954 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
0955
0956 INIT_LIST_HEAD(&dd->channels);
0957
0958 for (i = 0; i < soc_data->nb_channels; i++) {
0959 jzchan = &jzdma->chan[i];
0960 jzchan->id = i;
0961
0962 vchan_init(&jzchan->vchan, dd);
0963 jzchan->vchan.desc_free = jz4780_dma_desc_free;
0964 }
0965
0966
0967
0968
0969
0970
0971 jz4780_dma_chan_enable(jzdma, 1);
0972 jz4780_dma_chan_disable(jzdma, 1);
0973
0974 ret = platform_get_irq(pdev, 0);
0975 if (ret < 0)
0976 goto err_disable_clk;
0977
0978 jzdma->irq = ret;
0979
0980 ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
0981 jzdma);
0982 if (ret) {
0983 dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
0984 goto err_disable_clk;
0985 }
0986
0987 ret = dmaenginem_async_device_register(dd);
0988 if (ret) {
0989 dev_err(dev, "failed to register device\n");
0990 goto err_free_irq;
0991 }
0992
0993
0994 ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
0995 jzdma);
0996 if (ret) {
0997 dev_err(dev, "failed to register OF DMA controller\n");
0998 goto err_free_irq;
0999 }
1000
1001 dev_info(dev, "JZ4780 DMA controller initialised\n");
1002 return 0;
1003
1004 err_free_irq:
1005 free_irq(jzdma->irq, jzdma);
1006
1007 err_disable_clk:
1008 clk_disable_unprepare(jzdma->clk);
1009 return ret;
1010 }
1011
1012 static int jz4780_dma_remove(struct platform_device *pdev)
1013 {
1014 struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
1015 int i;
1016
1017 of_dma_controller_free(pdev->dev.of_node);
1018
1019 clk_disable_unprepare(jzdma->clk);
1020 free_irq(jzdma->irq, jzdma);
1021
1022 for (i = 0; i < jzdma->soc_data->nb_channels; i++)
1023 tasklet_kill(&jzdma->chan[i].vchan.task);
1024
1025 return 0;
1026 }
1027
1028 static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
1029 .nb_channels = 6,
1030 .transfer_ord_max = 5,
1031 .flags = JZ_SOC_DATA_BREAK_LINKS,
1032 };
1033
1034 static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
1035 .nb_channels = 6,
1036 .transfer_ord_max = 5,
1037 .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
1038 JZ_SOC_DATA_BREAK_LINKS,
1039 };
1040
1041 static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
1042 .nb_channels = 5,
1043 .transfer_ord_max = 6,
1044 .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1045 };
1046
1047 static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = {
1048 .nb_channels = 2,
1049 .transfer_ord_max = 6,
1050 .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1051 };
1052
1053 static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = {
1054 .nb_channels = 3,
1055 .transfer_ord_max = 6,
1056 .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1057 };
1058
1059 static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
1060 .nb_channels = 5,
1061 .transfer_ord_max = 6,
1062 .flags = JZ_SOC_DATA_PER_CHAN_PM,
1063 };
1064
1065 static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = {
1066 .nb_channels = 2,
1067 .transfer_ord_max = 6,
1068 .flags = JZ_SOC_DATA_PER_CHAN_PM,
1069 };
1070
1071 static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = {
1072 .nb_channels = 3,
1073 .transfer_ord_max = 6,
1074 .flags = JZ_SOC_DATA_PER_CHAN_PM,
1075 };
1076
1077 static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
1078 .nb_channels = 6,
1079 .transfer_ord_max = 6,
1080 .flags = JZ_SOC_DATA_PER_CHAN_PM,
1081 };
1082
1083 static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
1084 .nb_channels = 32,
1085 .transfer_ord_max = 7,
1086 .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
1087 };
1088
1089 static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
1090 .nb_channels = 8,
1091 .transfer_ord_max = 7,
1092 .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
1093 };
1094
1095 static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
1096 .nb_channels = 32,
1097 .transfer_ord_max = 7,
1098 .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
1099 };
1100
1101 static const struct of_device_id jz4780_dma_dt_match[] = {
1102 { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
1103 { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
1104 { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
1105 { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data },
1106 { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data },
1107 { .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
1108 { .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data },
1109 { .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data },
1110 { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
1111 { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
1112 { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
1113 { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
1114 {},
1115 };
1116 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
1117
1118 static struct platform_driver jz4780_dma_driver = {
1119 .probe = jz4780_dma_probe,
1120 .remove = jz4780_dma_remove,
1121 .driver = {
1122 .name = "jz4780-dma",
1123 .of_match_table = jz4780_dma_dt_match,
1124 },
1125 };
1126
1127 static int __init jz4780_dma_init(void)
1128 {
1129 return platform_driver_register(&jz4780_dma_driver);
1130 }
1131 subsys_initcall(jz4780_dma_init);
1132
1133 static void __exit jz4780_dma_exit(void)
1134 {
1135 platform_driver_unregister(&jz4780_dma_driver);
1136 }
1137 module_exit(jz4780_dma_exit);
1138
1139 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
1140 MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
1141 MODULE_LICENSE("GPL");