0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bitfield.h>
0010
0011 #include "dw-edma-core.h"
0012 #include "dw-edma-v0-core.h"
0013 #include "dw-edma-v0-regs.h"
0014 #include "dw-edma-v0-debugfs.h"
0015
0016 enum dw_edma_control {
0017 DW_EDMA_V0_CB = BIT(0),
0018 DW_EDMA_V0_TCB = BIT(1),
0019 DW_EDMA_V0_LLP = BIT(2),
0020 DW_EDMA_V0_LIE = BIT(3),
0021 DW_EDMA_V0_RIE = BIT(4),
0022 DW_EDMA_V0_CCS = BIT(8),
0023 DW_EDMA_V0_LLE = BIT(9),
0024 };
0025
0026 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
0027 {
0028 return dw->chip->reg_base;
0029 }
0030
0031 #define SET_32(dw, name, value) \
0032 writel(value, &(__dw_regs(dw)->name))
0033
0034 #define GET_32(dw, name) \
0035 readl(&(__dw_regs(dw)->name))
0036
0037 #define SET_RW_32(dw, dir, name, value) \
0038 do { \
0039 if ((dir) == EDMA_DIR_WRITE) \
0040 SET_32(dw, wr_##name, value); \
0041 else \
0042 SET_32(dw, rd_##name, value); \
0043 } while (0)
0044
0045 #define GET_RW_32(dw, dir, name) \
0046 ((dir) == EDMA_DIR_WRITE \
0047 ? GET_32(dw, wr_##name) \
0048 : GET_32(dw, rd_##name))
0049
0050 #define SET_BOTH_32(dw, name, value) \
0051 do { \
0052 SET_32(dw, wr_##name, value); \
0053 SET_32(dw, rd_##name, value); \
0054 } while (0)
0055
0056 #ifdef CONFIG_64BIT
0057
0058 #define SET_64(dw, name, value) \
0059 writeq(value, &(__dw_regs(dw)->name))
0060
0061 #define GET_64(dw, name) \
0062 readq(&(__dw_regs(dw)->name))
0063
0064 #define SET_RW_64(dw, dir, name, value) \
0065 do { \
0066 if ((dir) == EDMA_DIR_WRITE) \
0067 SET_64(dw, wr_##name, value); \
0068 else \
0069 SET_64(dw, rd_##name, value); \
0070 } while (0)
0071
0072 #define GET_RW_64(dw, dir, name) \
0073 ((dir) == EDMA_DIR_WRITE \
0074 ? GET_64(dw, wr_##name) \
0075 : GET_64(dw, rd_##name))
0076
0077 #define SET_BOTH_64(dw, name, value) \
0078 do { \
0079 SET_64(dw, wr_##name, value); \
0080 SET_64(dw, rd_##name, value); \
0081 } while (0)
0082
0083 #endif
0084
0085 #define SET_COMPAT(dw, name, value) \
0086 writel(value, &(__dw_regs(dw)->type.unroll.name))
0087
0088 #define SET_RW_COMPAT(dw, dir, name, value) \
0089 do { \
0090 if ((dir) == EDMA_DIR_WRITE) \
0091 SET_COMPAT(dw, wr_##name, value); \
0092 else \
0093 SET_COMPAT(dw, rd_##name, value); \
0094 } while (0)
0095
0096 static inline struct dw_edma_v0_ch_regs __iomem *
0097 __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
0098 {
0099 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
0100 return &(__dw_regs(dw)->type.legacy.ch);
0101
0102 if (dir == EDMA_DIR_WRITE)
0103 return &__dw_regs(dw)->type.unroll.ch[ch].wr;
0104
0105 return &__dw_regs(dw)->type.unroll.ch[ch].rd;
0106 }
0107
0108 static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
0109 u32 value, void __iomem *addr)
0110 {
0111 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
0112 u32 viewport_sel;
0113 unsigned long flags;
0114
0115 raw_spin_lock_irqsave(&dw->lock, flags);
0116
0117 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
0118 if (dir == EDMA_DIR_READ)
0119 viewport_sel |= BIT(31);
0120
0121 writel(viewport_sel,
0122 &(__dw_regs(dw)->type.legacy.viewport_sel));
0123 writel(value, addr);
0124
0125 raw_spin_unlock_irqrestore(&dw->lock, flags);
0126 } else {
0127 writel(value, addr);
0128 }
0129 }
0130
0131 static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
0132 const void __iomem *addr)
0133 {
0134 u32 value;
0135
0136 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
0137 u32 viewport_sel;
0138 unsigned long flags;
0139
0140 raw_spin_lock_irqsave(&dw->lock, flags);
0141
0142 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
0143 if (dir == EDMA_DIR_READ)
0144 viewport_sel |= BIT(31);
0145
0146 writel(viewport_sel,
0147 &(__dw_regs(dw)->type.legacy.viewport_sel));
0148 value = readl(addr);
0149
0150 raw_spin_unlock_irqrestore(&dw->lock, flags);
0151 } else {
0152 value = readl(addr);
0153 }
0154
0155 return value;
0156 }
0157
0158 #define SET_CH_32(dw, dir, ch, name, value) \
0159 writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
0160
0161 #define GET_CH_32(dw, dir, ch, name) \
0162 readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
0163
0164 #define SET_LL_32(ll, value) \
0165 writel(value, ll)
0166
0167 #ifdef CONFIG_64BIT
0168
0169 static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
0170 u64 value, void __iomem *addr)
0171 {
0172 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
0173 u32 viewport_sel;
0174 unsigned long flags;
0175
0176 raw_spin_lock_irqsave(&dw->lock, flags);
0177
0178 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
0179 if (dir == EDMA_DIR_READ)
0180 viewport_sel |= BIT(31);
0181
0182 writel(viewport_sel,
0183 &(__dw_regs(dw)->type.legacy.viewport_sel));
0184 writeq(value, addr);
0185
0186 raw_spin_unlock_irqrestore(&dw->lock, flags);
0187 } else {
0188 writeq(value, addr);
0189 }
0190 }
0191
0192 static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
0193 const void __iomem *addr)
0194 {
0195 u32 value;
0196
0197 if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
0198 u32 viewport_sel;
0199 unsigned long flags;
0200
0201 raw_spin_lock_irqsave(&dw->lock, flags);
0202
0203 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
0204 if (dir == EDMA_DIR_READ)
0205 viewport_sel |= BIT(31);
0206
0207 writel(viewport_sel,
0208 &(__dw_regs(dw)->type.legacy.viewport_sel));
0209 value = readq(addr);
0210
0211 raw_spin_unlock_irqrestore(&dw->lock, flags);
0212 } else {
0213 value = readq(addr);
0214 }
0215
0216 return value;
0217 }
0218
0219 #define SET_CH_64(dw, dir, ch, name, value) \
0220 writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
0221
0222 #define GET_CH_64(dw, dir, ch, name) \
0223 readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
0224
0225 #define SET_LL_64(ll, value) \
0226 writeq(value, ll)
0227
0228 #endif
0229
0230
0231 void dw_edma_v0_core_off(struct dw_edma *dw)
0232 {
0233 SET_BOTH_32(dw, int_mask,
0234 EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
0235 SET_BOTH_32(dw, int_clear,
0236 EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
0237 SET_BOTH_32(dw, engine_en, 0);
0238 }
0239
0240 u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
0241 {
0242 u32 num_ch;
0243
0244 if (dir == EDMA_DIR_WRITE)
0245 num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK,
0246 GET_32(dw, ctrl));
0247 else
0248 num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK,
0249 GET_32(dw, ctrl));
0250
0251 if (num_ch > EDMA_V0_MAX_NR_CH)
0252 num_ch = EDMA_V0_MAX_NR_CH;
0253
0254 return (u16)num_ch;
0255 }
0256
0257 enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
0258 {
0259 struct dw_edma *dw = chan->dw;
0260 u32 tmp;
0261
0262 tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
0263 GET_CH_32(dw, chan->dir, chan->id, ch_control1));
0264
0265 if (tmp == 1)
0266 return DMA_IN_PROGRESS;
0267 else if (tmp == 3)
0268 return DMA_COMPLETE;
0269 else
0270 return DMA_ERROR;
0271 }
0272
0273 void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
0274 {
0275 struct dw_edma *dw = chan->dw;
0276
0277 SET_RW_32(dw, chan->dir, int_clear,
0278 FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
0279 }
0280
0281 void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
0282 {
0283 struct dw_edma *dw = chan->dw;
0284
0285 SET_RW_32(dw, chan->dir, int_clear,
0286 FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
0287 }
0288
0289 u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
0290 {
0291 return FIELD_GET(EDMA_V0_DONE_INT_MASK,
0292 GET_RW_32(dw, dir, int_status));
0293 }
0294
0295 u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
0296 {
0297 return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
0298 GET_RW_32(dw, dir, int_status));
0299 }
0300
0301 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
0302 {
0303 struct dw_edma_burst *child;
0304 struct dw_edma_chan *chan = chunk->chan;
0305 struct dw_edma_v0_lli __iomem *lli;
0306 struct dw_edma_v0_llp __iomem *llp;
0307 u32 control = 0, i = 0;
0308 int j;
0309
0310 lli = chunk->ll_region.vaddr;
0311
0312 if (chunk->cb)
0313 control = DW_EDMA_V0_CB;
0314
0315 j = chunk->bursts_alloc;
0316 list_for_each_entry(child, &chunk->burst->list, list) {
0317 j--;
0318 if (!j) {
0319 control |= DW_EDMA_V0_LIE;
0320 if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
0321 control |= DW_EDMA_V0_RIE;
0322 }
0323
0324 SET_LL_32(&lli[i].control, control);
0325
0326 SET_LL_32(&lli[i].transfer_size, child->sz);
0327
0328 #ifdef CONFIG_64BIT
0329 SET_LL_64(&lli[i].sar.reg, child->sar);
0330 #else
0331 SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
0332 SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
0333 #endif
0334
0335 #ifdef CONFIG_64BIT
0336 SET_LL_64(&lli[i].dar.reg, child->dar);
0337 #else
0338 SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
0339 SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
0340 #endif
0341 i++;
0342 }
0343
0344 llp = (void __iomem *)&lli[i];
0345 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
0346 if (!chunk->cb)
0347 control |= DW_EDMA_V0_CB;
0348
0349
0350 SET_LL_32(&llp->control, control);
0351
0352 #ifdef CONFIG_64BIT
0353 SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
0354 #else
0355 SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
0356 SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
0357 #endif
0358 }
0359
0360 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
0361 {
0362 struct dw_edma_chan *chan = chunk->chan;
0363 struct dw_edma *dw = chan->dw;
0364 u32 tmp;
0365
0366 dw_edma_v0_core_write_chunk(chunk);
0367
0368 if (first) {
0369
0370 SET_RW_32(dw, chan->dir, engine_en, BIT(0));
0371 if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
0372 switch (chan->id) {
0373 case 0:
0374 SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
0375 BIT(0));
0376 break;
0377 case 1:
0378 SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
0379 BIT(0));
0380 break;
0381 case 2:
0382 SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
0383 BIT(0));
0384 break;
0385 case 3:
0386 SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
0387 BIT(0));
0388 break;
0389 case 4:
0390 SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
0391 BIT(0));
0392 break;
0393 case 5:
0394 SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
0395 BIT(0));
0396 break;
0397 case 6:
0398 SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
0399 BIT(0));
0400 break;
0401 case 7:
0402 SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
0403 BIT(0));
0404 break;
0405 }
0406 }
0407
0408 tmp = GET_RW_32(dw, chan->dir, int_mask);
0409 tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
0410 tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
0411 SET_RW_32(dw, chan->dir, int_mask, tmp);
0412
0413 tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
0414 tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
0415 SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
0416
0417 SET_CH_32(dw, chan->dir, chan->id, ch_control1,
0418 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
0419
0420
0421 SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
0422 lower_32_bits(chunk->ll_region.paddr));
0423 SET_CH_32(dw, chan->dir, chan->id, llp.msb,
0424 upper_32_bits(chunk->ll_region.paddr));
0425 }
0426
0427 SET_RW_32(dw, chan->dir, doorbell,
0428 FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
0429 }
0430
0431 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
0432 {
0433 struct dw_edma *dw = chan->dw;
0434 u32 tmp = 0;
0435
0436
0437 SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo);
0438 SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi);
0439
0440 SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo);
0441 SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi);
0442
0443 switch (chan->id) {
0444 case 0:
0445 case 1:
0446 tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data);
0447 break;
0448
0449 case 2:
0450 case 3:
0451 tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data);
0452 break;
0453
0454 case 4:
0455 case 5:
0456 tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data);
0457 break;
0458
0459 case 6:
0460 case 7:
0461 tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data);
0462 break;
0463 }
0464
0465 if (chan->id & BIT(0)) {
0466
0467 tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
0468 tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
0469 chan->msi.data);
0470 } else {
0471
0472 tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
0473 tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
0474 chan->msi.data);
0475 }
0476
0477 switch (chan->id) {
0478 case 0:
0479 case 1:
0480 SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp);
0481 break;
0482
0483 case 2:
0484 case 3:
0485 SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp);
0486 break;
0487
0488 case 4:
0489 case 5:
0490 SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp);
0491 break;
0492
0493 case 6:
0494 case 7:
0495 SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
0496 break;
0497 }
0498
0499 return 0;
0500 }
0501
0502
0503 void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
0504 {
0505 dw_edma_v0_debugfs_on(dw);
0506 }
0507
0508 void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
0509 {
0510 dw_edma_v0_debugfs_off(dw);
0511 }