0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dma-mapping.h>
0010 #include <linux/kernel.h>
0011 #include <linux/slab.h>
0012 #include <linux/export.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/clk.h>
0016 #include <linux/delay.h>
0017 #include <linux/log2.h>
0018 #include <linux/pm.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/err.h>
0021 #include <linux/of.h>
0022 #include <linux/of_dma.h>
0023 #include <linux/amba/bus.h>
0024 #include <linux/regulator/consumer.h>
0025 #include <linux/platform_data/dma-ste-dma40.h>
0026
0027 #include "dmaengine.h"
0028 #include "ste_dma40_ll.h"
0029
0030 #define D40_NAME "dma40"
0031
0032 #define D40_PHY_CHAN -1
0033
0034
0035 #define D40_CHAN_POS(chan) (2 * (chan / 2))
0036 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
0037
0038
0039 #define D40_SUSPEND_MAX_IT 500
0040
0041
0042 #define DMA40_AUTOSUSPEND_DELAY 100
0043
0044
0045 #define LCLA_ALIGNMENT 0x40000
0046
0047
0048 #define D40_LCLA_LINK_PER_EVENT_GRP 128
0049 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
0050
0051
0052 #define D40_MAX_LOG_CHAN_PER_PHY 32
0053
0054
0055 #define MAX_LCLA_ALLOC_ATTEMPTS 256
0056
0057
0058 #define D40_ALLOC_FREE BIT(31)
0059 #define D40_ALLOC_PHY BIT(30)
0060 #define D40_ALLOC_LOG_FREE 0
0061
0062 #define D40_MEMCPY_MAX_CHANS 8
0063
0064
0065 #define DB8500_DMA_MEMCPY_EV_0 51
0066 #define DB8500_DMA_MEMCPY_EV_1 56
0067 #define DB8500_DMA_MEMCPY_EV_2 57
0068 #define DB8500_DMA_MEMCPY_EV_3 58
0069 #define DB8500_DMA_MEMCPY_EV_4 59
0070 #define DB8500_DMA_MEMCPY_EV_5 60
0071
0072 static int dma40_memcpy_channels[] = {
0073 DB8500_DMA_MEMCPY_EV_0,
0074 DB8500_DMA_MEMCPY_EV_1,
0075 DB8500_DMA_MEMCPY_EV_2,
0076 DB8500_DMA_MEMCPY_EV_3,
0077 DB8500_DMA_MEMCPY_EV_4,
0078 DB8500_DMA_MEMCPY_EV_5,
0079 };
0080
0081
0082 static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
0083 .mode = STEDMA40_MODE_PHYSICAL,
0084 .dir = DMA_MEM_TO_MEM,
0085
0086 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
0087 .src_info.psize = STEDMA40_PSIZE_PHY_1,
0088 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
0089
0090 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
0091 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
0092 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
0093 };
0094
0095
0096 static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
0097 .mode = STEDMA40_MODE_LOGICAL,
0098 .dir = DMA_MEM_TO_MEM,
0099
0100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
0101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
0102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
0103
0104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
0105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
0106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
0107 };
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 enum d40_command {
0118 D40_DMA_STOP = 0,
0119 D40_DMA_RUN = 1,
0120 D40_DMA_SUSPEND_REQ = 2,
0121 D40_DMA_SUSPENDED = 3
0122 };
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 enum d40_events {
0134 D40_DEACTIVATE_EVENTLINE = 0,
0135 D40_ACTIVATE_EVENTLINE = 1,
0136 D40_SUSPEND_REQ_EVENTLINE = 2,
0137 D40_ROUND_EVENTLINE = 3
0138 };
0139
0140
0141
0142
0143
0144
0145 static __maybe_unused u32 d40_backup_regs[] = {
0146 D40_DREG_LCPA,
0147 D40_DREG_LCLA,
0148 D40_DREG_PRMSE,
0149 D40_DREG_PRMSO,
0150 D40_DREG_PRMOE,
0151 D40_DREG_PRMOO,
0152 };
0153
0154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168 static u32 d40_backup_regs_v4a[] = {
0169 D40_DREG_PSEG1,
0170 D40_DREG_PSEG2,
0171 D40_DREG_PSEG3,
0172 D40_DREG_PSEG4,
0173 D40_DREG_PCEG1,
0174 D40_DREG_PCEG2,
0175 D40_DREG_PCEG3,
0176 D40_DREG_PCEG4,
0177 D40_DREG_RSEG1,
0178 D40_DREG_RSEG2,
0179 D40_DREG_RSEG3,
0180 D40_DREG_RSEG4,
0181 D40_DREG_RCEG1,
0182 D40_DREG_RCEG2,
0183 D40_DREG_RCEG3,
0184 D40_DREG_RCEG4,
0185 };
0186
0187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
0188
0189 static u32 d40_backup_regs_v4b[] = {
0190 D40_DREG_CPSEG1,
0191 D40_DREG_CPSEG2,
0192 D40_DREG_CPSEG3,
0193 D40_DREG_CPSEG4,
0194 D40_DREG_CPSEG5,
0195 D40_DREG_CPCEG1,
0196 D40_DREG_CPCEG2,
0197 D40_DREG_CPCEG3,
0198 D40_DREG_CPCEG4,
0199 D40_DREG_CPCEG5,
0200 D40_DREG_CRSEG1,
0201 D40_DREG_CRSEG2,
0202 D40_DREG_CRSEG3,
0203 D40_DREG_CRSEG4,
0204 D40_DREG_CRSEG5,
0205 D40_DREG_CRCEG1,
0206 D40_DREG_CRCEG2,
0207 D40_DREG_CRCEG3,
0208 D40_DREG_CRCEG4,
0209 D40_DREG_CRCEG5,
0210 };
0211
0212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
0213
0214 static __maybe_unused u32 d40_backup_regs_chan[] = {
0215 D40_CHAN_REG_SSCFG,
0216 D40_CHAN_REG_SSELT,
0217 D40_CHAN_REG_SSPTR,
0218 D40_CHAN_REG_SSLNK,
0219 D40_CHAN_REG_SDCFG,
0220 D40_CHAN_REG_SDELT,
0221 D40_CHAN_REG_SDPTR,
0222 D40_CHAN_REG_SDLNK,
0223 };
0224
0225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
0226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 struct d40_interrupt_lookup {
0238 u32 src;
0239 u32 clr;
0240 bool is_error;
0241 int offset;
0242 };
0243
0244
0245 static struct d40_interrupt_lookup il_v4a[] = {
0246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
0247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
0248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
0249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
0250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
0251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
0252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
0253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
0254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
0255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
0256 };
0257
0258 static struct d40_interrupt_lookup il_v4b[] = {
0259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
0260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
0261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
0262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
0263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
0264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
0265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
0266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
0267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
0268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
0269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
0270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
0271 };
0272
0273
0274
0275
0276
0277
0278
0279 struct d40_reg_val {
0280 unsigned int reg;
0281 unsigned int val;
0282 };
0283
0284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
0285
0286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
0287
0288
0289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
0290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
0291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
0292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
0293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
0294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
0295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
0296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
0297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
0298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
0299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
0300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
0301 };
0302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
0303
0304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
0305
0306
0307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
0308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
0309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
0310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
0311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
0312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
0313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
0314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
0315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
0316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
0317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
0318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
0319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
0320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
0321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
0322 };
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335 struct d40_lli_pool {
0336 void *base;
0337 int size;
0338 dma_addr_t dma_addr;
0339
0340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
0341 };
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 struct d40_desc {
0363
0364 struct d40_phy_lli_bidir lli_phy;
0365
0366 struct d40_log_lli_bidir lli_log;
0367
0368 struct d40_lli_pool lli_pool;
0369 int lli_len;
0370 int lli_current;
0371 int lcla_alloc;
0372
0373 struct dma_async_tx_descriptor txd;
0374 struct list_head node;
0375
0376 bool is_in_client_list;
0377 bool cyclic;
0378 };
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 struct d40_lcla_pool {
0393 void *base;
0394 dma_addr_t dma_addr;
0395 void *base_unaligned;
0396 int pages;
0397 spinlock_t lock;
0398 struct d40_desc **alloc_map;
0399 };
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 struct d40_phy_res {
0416 spinlock_t lock;
0417 bool reserved;
0418 int num;
0419 u32 allocated_src;
0420 u32 allocated_dst;
0421 bool use_soft_lli;
0422 };
0423
0424 struct d40_base;
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458 struct d40_chan {
0459 spinlock_t lock;
0460 int log_num;
0461 int pending_tx;
0462 bool busy;
0463 struct d40_phy_res *phy_chan;
0464 struct dma_chan chan;
0465 struct tasklet_struct tasklet;
0466 struct list_head client;
0467 struct list_head pending_queue;
0468 struct list_head active;
0469 struct list_head done;
0470 struct list_head queue;
0471 struct list_head prepare_queue;
0472 struct stedma40_chan_cfg dma_cfg;
0473 struct dma_slave_config slave_config;
0474 bool configured;
0475 struct d40_base *base;
0476
0477 u32 src_def_cfg;
0478 u32 dst_def_cfg;
0479 struct d40_def_lcsp log_def;
0480 struct d40_log_lli_full *lcpa;
0481
0482 dma_addr_t runtime_addr;
0483 enum dma_transfer_direction runtime_direction;
0484 };
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 struct d40_gen_dmac {
0504 u32 *backup;
0505 u32 backup_size;
0506 u32 realtime_en;
0507 u32 realtime_clear;
0508 u32 high_prio_en;
0509 u32 high_prio_clear;
0510 u32 interrupt_en;
0511 u32 interrupt_clear;
0512 struct d40_interrupt_lookup *il;
0513 u32 il_size;
0514 struct d40_reg_val *init_reg;
0515 u32 init_reg_size;
0516 };
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566 struct d40_base {
0567 spinlock_t interrupt_lock;
0568 spinlock_t execmd_lock;
0569 struct device *dev;
0570 void __iomem *virtbase;
0571 u8 rev:4;
0572 struct clk *clk;
0573 phys_addr_t phy_start;
0574 resource_size_t phy_size;
0575 int irq;
0576 int num_memcpy_chans;
0577 int num_phy_chans;
0578 int num_log_chans;
0579 struct dma_device dma_both;
0580 struct dma_device dma_slave;
0581 struct dma_device dma_memcpy;
0582 struct d40_chan *phy_chans;
0583 struct d40_chan *log_chans;
0584 struct d40_chan **lookup_log_chans;
0585 struct d40_chan **lookup_phy_chans;
0586 struct stedma40_platform_data *plat_data;
0587 struct regulator *lcpa_regulator;
0588
0589 struct d40_phy_res *phy_res;
0590 struct d40_lcla_pool lcla_pool;
0591 void *lcpa_base;
0592 dma_addr_t phy_lcpa;
0593 resource_size_t lcpa_size;
0594 struct kmem_cache *desc_slab;
0595 u32 reg_val_backup[BACKUP_REGS_SZ];
0596 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
0597 u32 *reg_val_backup_chan;
0598 u32 *regs_interrupt;
0599 u16 gcc_pwr_off_mask;
0600 struct d40_gen_dmac gen_dmac;
0601 };
0602
0603 static struct device *chan2dev(struct d40_chan *d40c)
0604 {
0605 return &d40c->chan.dev->device;
0606 }
0607
0608 static bool chan_is_physical(struct d40_chan *chan)
0609 {
0610 return chan->log_num == D40_PHY_CHAN;
0611 }
0612
0613 static bool chan_is_logical(struct d40_chan *chan)
0614 {
0615 return !chan_is_physical(chan);
0616 }
0617
0618 static void __iomem *chan_base(struct d40_chan *chan)
0619 {
0620 return chan->base->virtbase + D40_DREG_PCBASE +
0621 chan->phy_chan->num * D40_DREG_PCDELTA;
0622 }
0623
0624 #define d40_err(dev, format, arg...) \
0625 dev_err(dev, "[%s] " format, __func__, ## arg)
0626
0627 #define chan_err(d40c, format, arg...) \
0628 d40_err(chan2dev(d40c), format, ## arg)
0629
0630 static int d40_set_runtime_config_write(struct dma_chan *chan,
0631 struct dma_slave_config *config,
0632 enum dma_transfer_direction direction);
0633
0634 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
0635 int lli_len)
0636 {
0637 bool is_log = chan_is_logical(d40c);
0638 u32 align;
0639 void *base;
0640
0641 if (is_log)
0642 align = sizeof(struct d40_log_lli);
0643 else
0644 align = sizeof(struct d40_phy_lli);
0645
0646 if (lli_len == 1) {
0647 base = d40d->lli_pool.pre_alloc_lli;
0648 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
0649 d40d->lli_pool.base = NULL;
0650 } else {
0651 d40d->lli_pool.size = lli_len * 2 * align;
0652
0653 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
0654 d40d->lli_pool.base = base;
0655
0656 if (d40d->lli_pool.base == NULL)
0657 return -ENOMEM;
0658 }
0659
0660 if (is_log) {
0661 d40d->lli_log.src = PTR_ALIGN(base, align);
0662 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
0663
0664 d40d->lli_pool.dma_addr = 0;
0665 } else {
0666 d40d->lli_phy.src = PTR_ALIGN(base, align);
0667 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
0668
0669 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
0670 d40d->lli_phy.src,
0671 d40d->lli_pool.size,
0672 DMA_TO_DEVICE);
0673
0674 if (dma_mapping_error(d40c->base->dev,
0675 d40d->lli_pool.dma_addr)) {
0676 kfree(d40d->lli_pool.base);
0677 d40d->lli_pool.base = NULL;
0678 d40d->lli_pool.dma_addr = 0;
0679 return -ENOMEM;
0680 }
0681 }
0682
0683 return 0;
0684 }
0685
0686 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
0687 {
0688 if (d40d->lli_pool.dma_addr)
0689 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
0690 d40d->lli_pool.size, DMA_TO_DEVICE);
0691
0692 kfree(d40d->lli_pool.base);
0693 d40d->lli_pool.base = NULL;
0694 d40d->lli_pool.size = 0;
0695 d40d->lli_log.src = NULL;
0696 d40d->lli_log.dst = NULL;
0697 d40d->lli_phy.src = NULL;
0698 d40d->lli_phy.dst = NULL;
0699 }
0700
0701 static int d40_lcla_alloc_one(struct d40_chan *d40c,
0702 struct d40_desc *d40d)
0703 {
0704 unsigned long flags;
0705 int i;
0706 int ret = -EINVAL;
0707
0708 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
0709
0710
0711
0712
0713
0714 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
0715 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
0716
0717 if (!d40c->base->lcla_pool.alloc_map[idx]) {
0718 d40c->base->lcla_pool.alloc_map[idx] = d40d;
0719 d40d->lcla_alloc++;
0720 ret = i;
0721 break;
0722 }
0723 }
0724
0725 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
0726
0727 return ret;
0728 }
0729
0730 static int d40_lcla_free_all(struct d40_chan *d40c,
0731 struct d40_desc *d40d)
0732 {
0733 unsigned long flags;
0734 int i;
0735 int ret = -EINVAL;
0736
0737 if (chan_is_physical(d40c))
0738 return 0;
0739
0740 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
0741
0742 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
0743 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
0744
0745 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
0746 d40c->base->lcla_pool.alloc_map[idx] = NULL;
0747 d40d->lcla_alloc--;
0748 if (d40d->lcla_alloc == 0) {
0749 ret = 0;
0750 break;
0751 }
0752 }
0753 }
0754
0755 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
0756
0757 return ret;
0758
0759 }
0760
0761 static void d40_desc_remove(struct d40_desc *d40d)
0762 {
0763 list_del(&d40d->node);
0764 }
0765
0766 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
0767 {
0768 struct d40_desc *desc = NULL;
0769
0770 if (!list_empty(&d40c->client)) {
0771 struct d40_desc *d;
0772 struct d40_desc *_d;
0773
0774 list_for_each_entry_safe(d, _d, &d40c->client, node) {
0775 if (async_tx_test_ack(&d->txd)) {
0776 d40_desc_remove(d);
0777 desc = d;
0778 memset(desc, 0, sizeof(*desc));
0779 break;
0780 }
0781 }
0782 }
0783
0784 if (!desc)
0785 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
0786
0787 if (desc)
0788 INIT_LIST_HEAD(&desc->node);
0789
0790 return desc;
0791 }
0792
0793 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
0794 {
0795
0796 d40_pool_lli_free(d40c, d40d);
0797 d40_lcla_free_all(d40c, d40d);
0798 kmem_cache_free(d40c->base->desc_slab, d40d);
0799 }
0800
0801 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
0802 {
0803 list_add_tail(&desc->node, &d40c->active);
0804 }
0805
0806 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
0807 {
0808 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
0809 struct d40_phy_lli *lli_src = desc->lli_phy.src;
0810 void __iomem *base = chan_base(chan);
0811
0812 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
0813 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
0814 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
0815 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
0816
0817 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
0818 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
0819 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
0820 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
0821 }
0822
0823 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
0824 {
0825 list_add_tail(&desc->node, &d40c->done);
0826 }
0827
0828 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
0829 {
0830 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
0831 struct d40_log_lli_bidir *lli = &desc->lli_log;
0832 int lli_current = desc->lli_current;
0833 int lli_len = desc->lli_len;
0834 bool cyclic = desc->cyclic;
0835 int curr_lcla = -EINVAL;
0836 int first_lcla = 0;
0837 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
0838 bool linkback;
0839
0840
0841
0842
0843
0844 linkback = cyclic && lli_current == 0;
0845
0846
0847
0848
0849
0850 if (linkback || (lli_len - lli_current > 1)) {
0851
0852
0853
0854
0855
0856
0857 if (!(chan->phy_chan->use_soft_lli &&
0858 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
0859 curr_lcla = d40_lcla_alloc_one(chan, desc);
0860
0861 first_lcla = curr_lcla;
0862 }
0863
0864
0865
0866
0867
0868
0869
0870 if (!linkback || curr_lcla == -EINVAL) {
0871 unsigned int flags = 0;
0872
0873 if (curr_lcla == -EINVAL)
0874 flags |= LLI_TERM_INT;
0875
0876 d40_log_lli_lcpa_write(chan->lcpa,
0877 &lli->dst[lli_current],
0878 &lli->src[lli_current],
0879 curr_lcla,
0880 flags);
0881 lli_current++;
0882 }
0883
0884 if (curr_lcla < 0)
0885 goto set_current;
0886
0887 for (; lli_current < lli_len; lli_current++) {
0888 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
0889 8 * curr_lcla * 2;
0890 struct d40_log_lli *lcla = pool->base + lcla_offset;
0891 unsigned int flags = 0;
0892 int next_lcla;
0893
0894 if (lli_current + 1 < lli_len)
0895 next_lcla = d40_lcla_alloc_one(chan, desc);
0896 else
0897 next_lcla = linkback ? first_lcla : -EINVAL;
0898
0899 if (cyclic || next_lcla == -EINVAL)
0900 flags |= LLI_TERM_INT;
0901
0902 if (linkback && curr_lcla == first_lcla) {
0903
0904 d40_log_lli_lcpa_write(chan->lcpa,
0905 &lli->dst[lli_current],
0906 &lli->src[lli_current],
0907 next_lcla, flags);
0908 }
0909
0910
0911
0912
0913
0914 d40_log_lli_lcla_write(lcla,
0915 &lli->dst[lli_current],
0916 &lli->src[lli_current],
0917 next_lcla, flags);
0918
0919
0920
0921
0922
0923 if (!use_esram_lcla) {
0924 dma_sync_single_range_for_device(chan->base->dev,
0925 pool->dma_addr, lcla_offset,
0926 2 * sizeof(struct d40_log_lli),
0927 DMA_TO_DEVICE);
0928 }
0929 curr_lcla = next_lcla;
0930
0931 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
0932 lli_current++;
0933 break;
0934 }
0935 }
0936 set_current:
0937 desc->lli_current = lli_current;
0938 }
0939
0940 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
0941 {
0942 if (chan_is_physical(d40c)) {
0943 d40_phy_lli_load(d40c, d40d);
0944 d40d->lli_current = d40d->lli_len;
0945 } else
0946 d40_log_lli_to_lcxa(d40c, d40d);
0947 }
0948
0949 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
0950 {
0951 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
0952 }
0953
0954
0955 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
0956 {
0957 d40_desc_remove(desc);
0958 desc->is_in_client_list = false;
0959 list_add_tail(&desc->node, &d40c->pending_queue);
0960 }
0961
0962 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
0963 {
0964 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
0965 node);
0966 }
0967
0968 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
0969 {
0970 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
0971 }
0972
0973 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
0974 {
0975 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
0976 }
0977
0978 static int d40_psize_2_burst_size(bool is_log, int psize)
0979 {
0980 if (is_log) {
0981 if (psize == STEDMA40_PSIZE_LOG_1)
0982 return 1;
0983 } else {
0984 if (psize == STEDMA40_PSIZE_PHY_1)
0985 return 1;
0986 }
0987
0988 return 2 << psize;
0989 }
0990
0991
0992
0993
0994
0995
0996
0997 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
0998 {
0999 int dmalen;
1000 u32 max_w = max(data_width1, data_width2);
1001 u32 min_w = min(data_width1, data_width2);
1002 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1003
1004 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1005 seg_max -= max_w;
1006
1007 if (!IS_ALIGNED(size, max_w))
1008 return -EINVAL;
1009
1010 if (size <= seg_max)
1011 dmalen = 1;
1012 else {
1013 dmalen = size / seg_max;
1014 if (dmalen * seg_max < size)
1015 dmalen++;
1016 }
1017 return dmalen;
1018 }
1019
1020 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1021 u32 data_width1, u32 data_width2)
1022 {
1023 struct scatterlist *sg;
1024 int i;
1025 int len = 0;
1026 int ret;
1027
1028 for_each_sg(sgl, sg, sg_len, i) {
1029 ret = d40_size_2_dmalen(sg_dma_len(sg),
1030 data_width1, data_width2);
1031 if (ret < 0)
1032 return ret;
1033 len += ret;
1034 }
1035 return len;
1036 }
1037
1038 static int __d40_execute_command_phy(struct d40_chan *d40c,
1039 enum d40_command command)
1040 {
1041 u32 status;
1042 int i;
1043 void __iomem *active_reg;
1044 int ret = 0;
1045 unsigned long flags;
1046 u32 wmask;
1047
1048 if (command == D40_DMA_STOP) {
1049 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1050 if (ret)
1051 return ret;
1052 }
1053
1054 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1055
1056 if (d40c->phy_chan->num % 2 == 0)
1057 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1058 else
1059 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1060
1061 if (command == D40_DMA_SUSPEND_REQ) {
1062 status = (readl(active_reg) &
1063 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1064 D40_CHAN_POS(d40c->phy_chan->num);
1065
1066 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1067 goto unlock;
1068 }
1069
1070 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1071 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1072 active_reg);
1073
1074 if (command == D40_DMA_SUSPEND_REQ) {
1075
1076 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1077 status = (readl(active_reg) &
1078 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1079 D40_CHAN_POS(d40c->phy_chan->num);
1080
1081 cpu_relax();
1082
1083
1084
1085
1086 udelay(3);
1087
1088 if (status == D40_DMA_STOP ||
1089 status == D40_DMA_SUSPENDED)
1090 break;
1091 }
1092
1093 if (i == D40_SUSPEND_MAX_IT) {
1094 chan_err(d40c,
1095 "unable to suspend the chl %d (log: %d) status %x\n",
1096 d40c->phy_chan->num, d40c->log_num,
1097 status);
1098 dump_stack();
1099 ret = -EBUSY;
1100 }
1101
1102 }
1103 unlock:
1104 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1105 return ret;
1106 }
1107
1108 static void d40_term_all(struct d40_chan *d40c)
1109 {
1110 struct d40_desc *d40d;
1111 struct d40_desc *_d;
1112
1113
1114 while ((d40d = d40_first_done(d40c))) {
1115 d40_desc_remove(d40d);
1116 d40_desc_free(d40c, d40d);
1117 }
1118
1119
1120 while ((d40d = d40_first_active_get(d40c))) {
1121 d40_desc_remove(d40d);
1122 d40_desc_free(d40c, d40d);
1123 }
1124
1125
1126 while ((d40d = d40_first_queued(d40c))) {
1127 d40_desc_remove(d40d);
1128 d40_desc_free(d40c, d40d);
1129 }
1130
1131
1132 while ((d40d = d40_first_pending(d40c))) {
1133 d40_desc_remove(d40d);
1134 d40_desc_free(d40c, d40d);
1135 }
1136
1137
1138 if (!list_empty(&d40c->client))
1139 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1140 d40_desc_remove(d40d);
1141 d40_desc_free(d40c, d40d);
1142 }
1143
1144
1145 if (!list_empty(&d40c->prepare_queue))
1146 list_for_each_entry_safe(d40d, _d,
1147 &d40c->prepare_queue, node) {
1148 d40_desc_remove(d40d);
1149 d40_desc_free(d40c, d40d);
1150 }
1151
1152 d40c->pending_tx = 0;
1153 }
1154
1155 static void __d40_config_set_event(struct d40_chan *d40c,
1156 enum d40_events event_type, u32 event,
1157 int reg)
1158 {
1159 void __iomem *addr = chan_base(d40c) + reg;
1160 int tries;
1161 u32 status;
1162
1163 switch (event_type) {
1164
1165 case D40_DEACTIVATE_EVENTLINE:
1166
1167 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1168 | ~D40_EVENTLINE_MASK(event), addr);
1169 break;
1170
1171 case D40_SUSPEND_REQ_EVENTLINE:
1172 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1173 D40_EVENTLINE_POS(event);
1174
1175 if (status == D40_DEACTIVATE_EVENTLINE ||
1176 status == D40_SUSPEND_REQ_EVENTLINE)
1177 break;
1178
1179 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1180 | ~D40_EVENTLINE_MASK(event), addr);
1181
1182 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1183
1184 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1185 D40_EVENTLINE_POS(event);
1186
1187 cpu_relax();
1188
1189
1190
1191
1192 udelay(3);
1193
1194 if (status == D40_DEACTIVATE_EVENTLINE)
1195 break;
1196 }
1197
1198 if (tries == D40_SUSPEND_MAX_IT) {
1199 chan_err(d40c,
1200 "unable to stop the event_line chl %d (log: %d)"
1201 "status %x\n", d40c->phy_chan->num,
1202 d40c->log_num, status);
1203 }
1204 break;
1205
1206 case D40_ACTIVATE_EVENTLINE:
1207
1208
1209
1210
1211
1212 tries = 100;
1213 while (--tries) {
1214 writel((D40_ACTIVATE_EVENTLINE <<
1215 D40_EVENTLINE_POS(event)) |
1216 ~D40_EVENTLINE_MASK(event), addr);
1217
1218 if (readl(addr) & D40_EVENTLINE_MASK(event))
1219 break;
1220 }
1221
1222 if (tries != 99)
1223 dev_dbg(chan2dev(d40c),
1224 "[%s] workaround enable S%cLNK (%d tries)\n",
1225 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1226 100 - tries);
1227
1228 WARN_ON(!tries);
1229 break;
1230
1231 case D40_ROUND_EVENTLINE:
1232 BUG();
1233 break;
1234
1235 }
1236 }
1237
1238 static void d40_config_set_event(struct d40_chan *d40c,
1239 enum d40_events event_type)
1240 {
1241 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1242
1243
1244 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1245 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1246 __d40_config_set_event(d40c, event_type, event,
1247 D40_CHAN_REG_SSLNK);
1248
1249 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1250 __d40_config_set_event(d40c, event_type, event,
1251 D40_CHAN_REG_SDLNK);
1252 }
1253
1254 static u32 d40_chan_has_events(struct d40_chan *d40c)
1255 {
1256 void __iomem *chanbase = chan_base(d40c);
1257 u32 val;
1258
1259 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1260 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1261
1262 return val;
1263 }
1264
1265 static int
1266 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1267 {
1268 unsigned long flags;
1269 int ret = 0;
1270 u32 active_status;
1271 void __iomem *active_reg;
1272
1273 if (d40c->phy_chan->num % 2 == 0)
1274 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1275 else
1276 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1277
1278
1279 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1280
1281 switch (command) {
1282 case D40_DMA_STOP:
1283 case D40_DMA_SUSPEND_REQ:
1284
1285 active_status = (readl(active_reg) &
1286 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1287 D40_CHAN_POS(d40c->phy_chan->num);
1288
1289 if (active_status == D40_DMA_RUN)
1290 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1291 else
1292 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1293
1294 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1295 ret = __d40_execute_command_phy(d40c, command);
1296
1297 break;
1298
1299 case D40_DMA_RUN:
1300
1301 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1302 ret = __d40_execute_command_phy(d40c, command);
1303 break;
1304
1305 case D40_DMA_SUSPENDED:
1306 BUG();
1307 break;
1308 }
1309
1310 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1311 return ret;
1312 }
1313
1314 static int d40_channel_execute_command(struct d40_chan *d40c,
1315 enum d40_command command)
1316 {
1317 if (chan_is_logical(d40c))
1318 return __d40_execute_command_log(d40c, command);
1319 else
1320 return __d40_execute_command_phy(d40c, command);
1321 }
1322
1323 static u32 d40_get_prmo(struct d40_chan *d40c)
1324 {
1325 static const unsigned int phy_map[] = {
1326 [STEDMA40_PCHAN_BASIC_MODE]
1327 = D40_DREG_PRMO_PCHAN_BASIC,
1328 [STEDMA40_PCHAN_MODULO_MODE]
1329 = D40_DREG_PRMO_PCHAN_MODULO,
1330 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1331 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1332 };
1333 static const unsigned int log_map[] = {
1334 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1335 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1336 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1337 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1338 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1339 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1340 };
1341
1342 if (chan_is_physical(d40c))
1343 return phy_map[d40c->dma_cfg.mode_opt];
1344 else
1345 return log_map[d40c->dma_cfg.mode_opt];
1346 }
1347
1348 static void d40_config_write(struct d40_chan *d40c)
1349 {
1350 u32 addr_base;
1351 u32 var;
1352
1353
1354 addr_base = (d40c->phy_chan->num % 2) * 4;
1355
1356 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1357 D40_CHAN_POS(d40c->phy_chan->num);
1358 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1359
1360
1361 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1362
1363 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1364
1365 if (chan_is_logical(d40c)) {
1366 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1367 & D40_SREG_ELEM_LOG_LIDX_MASK;
1368 void __iomem *chanbase = chan_base(d40c);
1369
1370
1371 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1372 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1373
1374
1375 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1376 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1377
1378
1379 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1380 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1381 }
1382 }
1383
1384 static u32 d40_residue(struct d40_chan *d40c)
1385 {
1386 u32 num_elt;
1387
1388 if (chan_is_logical(d40c))
1389 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1390 >> D40_MEM_LCSP2_ECNT_POS;
1391 else {
1392 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1393 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1394 >> D40_SREG_ELEM_PHY_ECNT_POS;
1395 }
1396
1397 return num_elt * d40c->dma_cfg.dst_info.data_width;
1398 }
1399
1400 static bool d40_tx_is_linked(struct d40_chan *d40c)
1401 {
1402 bool is_link;
1403
1404 if (chan_is_logical(d40c))
1405 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1406 else
1407 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1408 & D40_SREG_LNK_PHYS_LNK_MASK;
1409
1410 return is_link;
1411 }
1412
1413 static int d40_pause(struct dma_chan *chan)
1414 {
1415 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1416 int res = 0;
1417 unsigned long flags;
1418
1419 if (d40c->phy_chan == NULL) {
1420 chan_err(d40c, "Channel is not allocated!\n");
1421 return -EINVAL;
1422 }
1423
1424 if (!d40c->busy)
1425 return 0;
1426
1427 spin_lock_irqsave(&d40c->lock, flags);
1428 pm_runtime_get_sync(d40c->base->dev);
1429
1430 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1431
1432 pm_runtime_mark_last_busy(d40c->base->dev);
1433 pm_runtime_put_autosuspend(d40c->base->dev);
1434 spin_unlock_irqrestore(&d40c->lock, flags);
1435 return res;
1436 }
1437
1438 static int d40_resume(struct dma_chan *chan)
1439 {
1440 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1441 int res = 0;
1442 unsigned long flags;
1443
1444 if (d40c->phy_chan == NULL) {
1445 chan_err(d40c, "Channel is not allocated!\n");
1446 return -EINVAL;
1447 }
1448
1449 if (!d40c->busy)
1450 return 0;
1451
1452 spin_lock_irqsave(&d40c->lock, flags);
1453 pm_runtime_get_sync(d40c->base->dev);
1454
1455
1456 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1457 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1458
1459 pm_runtime_mark_last_busy(d40c->base->dev);
1460 pm_runtime_put_autosuspend(d40c->base->dev);
1461 spin_unlock_irqrestore(&d40c->lock, flags);
1462 return res;
1463 }
1464
1465 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1466 {
1467 struct d40_chan *d40c = container_of(tx->chan,
1468 struct d40_chan,
1469 chan);
1470 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1471 unsigned long flags;
1472 dma_cookie_t cookie;
1473
1474 spin_lock_irqsave(&d40c->lock, flags);
1475 cookie = dma_cookie_assign(tx);
1476 d40_desc_queue(d40c, d40d);
1477 spin_unlock_irqrestore(&d40c->lock, flags);
1478
1479 return cookie;
1480 }
1481
1482 static int d40_start(struct d40_chan *d40c)
1483 {
1484 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1485 }
1486
1487 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1488 {
1489 struct d40_desc *d40d;
1490 int err;
1491
1492
1493 d40d = d40_first_queued(d40c);
1494
1495 if (d40d != NULL) {
1496 if (!d40c->busy) {
1497 d40c->busy = true;
1498 pm_runtime_get_sync(d40c->base->dev);
1499 }
1500
1501
1502 d40_desc_remove(d40d);
1503
1504
1505 d40_desc_submit(d40c, d40d);
1506
1507
1508 d40_desc_load(d40c, d40d);
1509
1510
1511 err = d40_start(d40c);
1512
1513 if (err)
1514 return NULL;
1515 }
1516
1517 return d40d;
1518 }
1519
1520
1521 static void dma_tc_handle(struct d40_chan *d40c)
1522 {
1523 struct d40_desc *d40d;
1524
1525
1526 d40d = d40_first_active_get(d40c);
1527
1528 if (d40d == NULL)
1529 return;
1530
1531 if (d40d->cyclic) {
1532
1533
1534
1535
1536
1537
1538 if (d40d->lli_current < d40d->lli_len
1539 && !d40_tx_is_linked(d40c)
1540 && !d40_residue(d40c)) {
1541 d40_lcla_free_all(d40c, d40d);
1542 d40_desc_load(d40c, d40d);
1543 (void) d40_start(d40c);
1544
1545 if (d40d->lli_current == d40d->lli_len)
1546 d40d->lli_current = 0;
1547 }
1548 } else {
1549 d40_lcla_free_all(d40c, d40d);
1550
1551 if (d40d->lli_current < d40d->lli_len) {
1552 d40_desc_load(d40c, d40d);
1553
1554 (void) d40_start(d40c);
1555 return;
1556 }
1557
1558 if (d40_queue_start(d40c) == NULL) {
1559 d40c->busy = false;
1560
1561 pm_runtime_mark_last_busy(d40c->base->dev);
1562 pm_runtime_put_autosuspend(d40c->base->dev);
1563 }
1564
1565 d40_desc_remove(d40d);
1566 d40_desc_done(d40c, d40d);
1567 }
1568
1569 d40c->pending_tx++;
1570 tasklet_schedule(&d40c->tasklet);
1571
1572 }
1573
1574 static void dma_tasklet(struct tasklet_struct *t)
1575 {
1576 struct d40_chan *d40c = from_tasklet(d40c, t, tasklet);
1577 struct d40_desc *d40d;
1578 unsigned long flags;
1579 bool callback_active;
1580 struct dmaengine_desc_callback cb;
1581
1582 spin_lock_irqsave(&d40c->lock, flags);
1583
1584
1585 d40d = d40_first_done(d40c);
1586 if (d40d == NULL) {
1587
1588 d40d = d40_first_active_get(d40c);
1589 if (d40d == NULL || !d40d->cyclic)
1590 goto check_pending_tx;
1591 }
1592
1593 if (!d40d->cyclic)
1594 dma_cookie_complete(&d40d->txd);
1595
1596
1597
1598
1599
1600 if (d40c->pending_tx == 0) {
1601 spin_unlock_irqrestore(&d40c->lock, flags);
1602 return;
1603 }
1604
1605
1606 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1607 dmaengine_desc_get_callback(&d40d->txd, &cb);
1608
1609 if (!d40d->cyclic) {
1610 if (async_tx_test_ack(&d40d->txd)) {
1611 d40_desc_remove(d40d);
1612 d40_desc_free(d40c, d40d);
1613 } else if (!d40d->is_in_client_list) {
1614 d40_desc_remove(d40d);
1615 d40_lcla_free_all(d40c, d40d);
1616 list_add_tail(&d40d->node, &d40c->client);
1617 d40d->is_in_client_list = true;
1618 }
1619 }
1620
1621 d40c->pending_tx--;
1622
1623 if (d40c->pending_tx)
1624 tasklet_schedule(&d40c->tasklet);
1625
1626 spin_unlock_irqrestore(&d40c->lock, flags);
1627
1628 if (callback_active)
1629 dmaengine_desc_callback_invoke(&cb, NULL);
1630
1631 return;
1632 check_pending_tx:
1633
1634 if (d40c->pending_tx > 0)
1635 d40c->pending_tx--;
1636 spin_unlock_irqrestore(&d40c->lock, flags);
1637 }
1638
1639 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1640 {
1641 int i;
1642 u32 idx;
1643 u32 row;
1644 long chan = -1;
1645 struct d40_chan *d40c;
1646 struct d40_base *base = data;
1647 u32 *regs = base->regs_interrupt;
1648 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1649 u32 il_size = base->gen_dmac.il_size;
1650
1651 spin_lock(&base->interrupt_lock);
1652
1653
1654 for (i = 0; i < il_size; i++)
1655 regs[i] = readl(base->virtbase + il[i].src);
1656
1657 for (;;) {
1658
1659 chan = find_next_bit((unsigned long *)regs,
1660 BITS_PER_LONG * il_size, chan + 1);
1661
1662
1663 if (chan == BITS_PER_LONG * il_size)
1664 break;
1665
1666 row = chan / BITS_PER_LONG;
1667 idx = chan & (BITS_PER_LONG - 1);
1668
1669 if (il[row].offset == D40_PHY_CHAN)
1670 d40c = base->lookup_phy_chans[idx];
1671 else
1672 d40c = base->lookup_log_chans[il[row].offset + idx];
1673
1674 if (!d40c) {
1675
1676
1677
1678
1679 continue;
1680 }
1681
1682
1683 writel(BIT(idx), base->virtbase + il[row].clr);
1684
1685 spin_lock(&d40c->lock);
1686
1687 if (!il[row].is_error)
1688 dma_tc_handle(d40c);
1689 else
1690 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1691 chan, il[row].offset, idx);
1692
1693 spin_unlock(&d40c->lock);
1694 }
1695
1696 spin_unlock(&base->interrupt_lock);
1697
1698 return IRQ_HANDLED;
1699 }
1700
1701 static int d40_validate_conf(struct d40_chan *d40c,
1702 struct stedma40_chan_cfg *conf)
1703 {
1704 int res = 0;
1705 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1706
1707 if (!conf->dir) {
1708 chan_err(d40c, "Invalid direction.\n");
1709 res = -EINVAL;
1710 }
1711
1712 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1713 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1714 (conf->dev_type < 0)) {
1715 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1716 res = -EINVAL;
1717 }
1718
1719 if (conf->dir == DMA_DEV_TO_DEV) {
1720
1721
1722
1723
1724 chan_err(d40c, "periph to periph not supported\n");
1725 res = -EINVAL;
1726 }
1727
1728 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1729 conf->src_info.data_width !=
1730 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1731 conf->dst_info.data_width) {
1732
1733
1734
1735
1736
1737 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1738 res = -EINVAL;
1739 }
1740
1741 return res;
1742 }
1743
1744 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1745 bool is_src, int log_event_line, bool is_log,
1746 bool *first_user)
1747 {
1748 unsigned long flags;
1749 spin_lock_irqsave(&phy->lock, flags);
1750
1751 *first_user = ((phy->allocated_src | phy->allocated_dst)
1752 == D40_ALLOC_FREE);
1753
1754 if (!is_log) {
1755
1756 if (phy->allocated_src == D40_ALLOC_FREE &&
1757 phy->allocated_dst == D40_ALLOC_FREE) {
1758 phy->allocated_dst = D40_ALLOC_PHY;
1759 phy->allocated_src = D40_ALLOC_PHY;
1760 goto found_unlock;
1761 } else
1762 goto not_found_unlock;
1763 }
1764
1765
1766 if (is_src) {
1767 if (phy->allocated_src == D40_ALLOC_PHY)
1768 goto not_found_unlock;
1769
1770 if (phy->allocated_src == D40_ALLOC_FREE)
1771 phy->allocated_src = D40_ALLOC_LOG_FREE;
1772
1773 if (!(phy->allocated_src & BIT(log_event_line))) {
1774 phy->allocated_src |= BIT(log_event_line);
1775 goto found_unlock;
1776 } else
1777 goto not_found_unlock;
1778 } else {
1779 if (phy->allocated_dst == D40_ALLOC_PHY)
1780 goto not_found_unlock;
1781
1782 if (phy->allocated_dst == D40_ALLOC_FREE)
1783 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1784
1785 if (!(phy->allocated_dst & BIT(log_event_line))) {
1786 phy->allocated_dst |= BIT(log_event_line);
1787 goto found_unlock;
1788 }
1789 }
1790 not_found_unlock:
1791 spin_unlock_irqrestore(&phy->lock, flags);
1792 return false;
1793 found_unlock:
1794 spin_unlock_irqrestore(&phy->lock, flags);
1795 return true;
1796 }
1797
1798 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1799 int log_event_line)
1800 {
1801 unsigned long flags;
1802 bool is_free = false;
1803
1804 spin_lock_irqsave(&phy->lock, flags);
1805 if (!log_event_line) {
1806 phy->allocated_dst = D40_ALLOC_FREE;
1807 phy->allocated_src = D40_ALLOC_FREE;
1808 is_free = true;
1809 goto unlock;
1810 }
1811
1812
1813 if (is_src) {
1814 phy->allocated_src &= ~BIT(log_event_line);
1815 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1816 phy->allocated_src = D40_ALLOC_FREE;
1817 } else {
1818 phy->allocated_dst &= ~BIT(log_event_line);
1819 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1820 phy->allocated_dst = D40_ALLOC_FREE;
1821 }
1822
1823 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1824 D40_ALLOC_FREE);
1825 unlock:
1826 spin_unlock_irqrestore(&phy->lock, flags);
1827
1828 return is_free;
1829 }
1830
1831 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1832 {
1833 int dev_type = d40c->dma_cfg.dev_type;
1834 int event_group;
1835 int event_line;
1836 struct d40_phy_res *phys;
1837 int i;
1838 int j;
1839 int log_num;
1840 int num_phy_chans;
1841 bool is_src;
1842 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1843
1844 phys = d40c->base->phy_res;
1845 num_phy_chans = d40c->base->num_phy_chans;
1846
1847 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1848 log_num = 2 * dev_type;
1849 is_src = true;
1850 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1851 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1852
1853 log_num = 2 * dev_type + 1;
1854 is_src = false;
1855 } else
1856 return -EINVAL;
1857
1858 event_group = D40_TYPE_TO_GROUP(dev_type);
1859 event_line = D40_TYPE_TO_EVENT(dev_type);
1860
1861 if (!is_log) {
1862 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1863
1864 if (d40c->dma_cfg.use_fixed_channel) {
1865 i = d40c->dma_cfg.phy_channel;
1866 if (d40_alloc_mask_set(&phys[i], is_src,
1867 0, is_log,
1868 first_phy_user))
1869 goto found_phy;
1870 } else {
1871 for (i = 0; i < num_phy_chans; i++) {
1872 if (d40_alloc_mask_set(&phys[i], is_src,
1873 0, is_log,
1874 first_phy_user))
1875 goto found_phy;
1876 }
1877 }
1878 } else
1879 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1880 int phy_num = j + event_group * 2;
1881 for (i = phy_num; i < phy_num + 2; i++) {
1882 if (d40_alloc_mask_set(&phys[i],
1883 is_src,
1884 0,
1885 is_log,
1886 first_phy_user))
1887 goto found_phy;
1888 }
1889 }
1890 return -EINVAL;
1891 found_phy:
1892 d40c->phy_chan = &phys[i];
1893 d40c->log_num = D40_PHY_CHAN;
1894 goto out;
1895 }
1896 if (dev_type == -1)
1897 return -EINVAL;
1898
1899
1900 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1901 int phy_num = j + event_group * 2;
1902
1903 if (d40c->dma_cfg.use_fixed_channel) {
1904 i = d40c->dma_cfg.phy_channel;
1905
1906 if ((i != phy_num) && (i != phy_num + 1)) {
1907 dev_err(chan2dev(d40c),
1908 "invalid fixed phy channel %d\n", i);
1909 return -EINVAL;
1910 }
1911
1912 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1913 is_log, first_phy_user))
1914 goto found_log;
1915
1916 dev_err(chan2dev(d40c),
1917 "could not allocate fixed phy channel %d\n", i);
1918 return -EINVAL;
1919 }
1920
1921
1922
1923
1924
1925
1926 if (is_src) {
1927 for (i = phy_num; i < phy_num + 2; i++) {
1928 if (d40_alloc_mask_set(&phys[i], is_src,
1929 event_line, is_log,
1930 first_phy_user))
1931 goto found_log;
1932 }
1933 } else {
1934 for (i = phy_num + 1; i >= phy_num; i--) {
1935 if (d40_alloc_mask_set(&phys[i], is_src,
1936 event_line, is_log,
1937 first_phy_user))
1938 goto found_log;
1939 }
1940 }
1941 }
1942 return -EINVAL;
1943
1944 found_log:
1945 d40c->phy_chan = &phys[i];
1946 d40c->log_num = log_num;
1947 out:
1948
1949 if (is_log)
1950 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1951 else
1952 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1953
1954 return 0;
1955
1956 }
1957
1958 static int d40_config_memcpy(struct d40_chan *d40c)
1959 {
1960 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1961
1962 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1963 d40c->dma_cfg = dma40_memcpy_conf_log;
1964 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1965
1966 d40_log_cfg(&d40c->dma_cfg,
1967 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1968
1969 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1970 dma_has_cap(DMA_SLAVE, cap)) {
1971 d40c->dma_cfg = dma40_memcpy_conf_phy;
1972
1973
1974 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1975
1976
1977 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1978 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1979
1980 } else {
1981 chan_err(d40c, "No memcpy\n");
1982 return -EINVAL;
1983 }
1984
1985 return 0;
1986 }
1987
1988 static int d40_free_dma(struct d40_chan *d40c)
1989 {
1990
1991 int res = 0;
1992 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1993 struct d40_phy_res *phy = d40c->phy_chan;
1994 bool is_src;
1995
1996
1997 d40_term_all(d40c);
1998
1999 if (phy == NULL) {
2000 chan_err(d40c, "phy == null\n");
2001 return -EINVAL;
2002 }
2003
2004 if (phy->allocated_src == D40_ALLOC_FREE &&
2005 phy->allocated_dst == D40_ALLOC_FREE) {
2006 chan_err(d40c, "channel already free\n");
2007 return -EINVAL;
2008 }
2009
2010 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2011 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2012 is_src = false;
2013 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2014 is_src = true;
2015 else {
2016 chan_err(d40c, "Unknown direction\n");
2017 return -EINVAL;
2018 }
2019
2020 pm_runtime_get_sync(d40c->base->dev);
2021 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2022 if (res) {
2023 chan_err(d40c, "stop failed\n");
2024 goto mark_last_busy;
2025 }
2026
2027 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2028
2029 if (chan_is_logical(d40c))
2030 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2031 else
2032 d40c->base->lookup_phy_chans[phy->num] = NULL;
2033
2034 if (d40c->busy) {
2035 pm_runtime_mark_last_busy(d40c->base->dev);
2036 pm_runtime_put_autosuspend(d40c->base->dev);
2037 }
2038
2039 d40c->busy = false;
2040 d40c->phy_chan = NULL;
2041 d40c->configured = false;
2042 mark_last_busy:
2043 pm_runtime_mark_last_busy(d40c->base->dev);
2044 pm_runtime_put_autosuspend(d40c->base->dev);
2045 return res;
2046 }
2047
2048 static bool d40_is_paused(struct d40_chan *d40c)
2049 {
2050 void __iomem *chanbase = chan_base(d40c);
2051 bool is_paused = false;
2052 unsigned long flags;
2053 void __iomem *active_reg;
2054 u32 status;
2055 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2056
2057 spin_lock_irqsave(&d40c->lock, flags);
2058
2059 if (chan_is_physical(d40c)) {
2060 if (d40c->phy_chan->num % 2 == 0)
2061 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2062 else
2063 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2064
2065 status = (readl(active_reg) &
2066 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2067 D40_CHAN_POS(d40c->phy_chan->num);
2068 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2069 is_paused = true;
2070 goto unlock;
2071 }
2072
2073 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2074 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2075 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2076 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2077 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2078 } else {
2079 chan_err(d40c, "Unknown direction\n");
2080 goto unlock;
2081 }
2082
2083 status = (status & D40_EVENTLINE_MASK(event)) >>
2084 D40_EVENTLINE_POS(event);
2085
2086 if (status != D40_DMA_RUN)
2087 is_paused = true;
2088 unlock:
2089 spin_unlock_irqrestore(&d40c->lock, flags);
2090 return is_paused;
2091
2092 }
2093
2094 static u32 stedma40_residue(struct dma_chan *chan)
2095 {
2096 struct d40_chan *d40c =
2097 container_of(chan, struct d40_chan, chan);
2098 u32 bytes_left;
2099 unsigned long flags;
2100
2101 spin_lock_irqsave(&d40c->lock, flags);
2102 bytes_left = d40_residue(d40c);
2103 spin_unlock_irqrestore(&d40c->lock, flags);
2104
2105 return bytes_left;
2106 }
2107
2108 static int
2109 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2110 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2111 unsigned int sg_len, dma_addr_t src_dev_addr,
2112 dma_addr_t dst_dev_addr)
2113 {
2114 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2115 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2116 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2117 int ret;
2118
2119 ret = d40_log_sg_to_lli(sg_src, sg_len,
2120 src_dev_addr,
2121 desc->lli_log.src,
2122 chan->log_def.lcsp1,
2123 src_info->data_width,
2124 dst_info->data_width);
2125
2126 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2127 dst_dev_addr,
2128 desc->lli_log.dst,
2129 chan->log_def.lcsp3,
2130 dst_info->data_width,
2131 src_info->data_width);
2132
2133 return ret < 0 ? ret : 0;
2134 }
2135
2136 static int
2137 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2138 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2139 unsigned int sg_len, dma_addr_t src_dev_addr,
2140 dma_addr_t dst_dev_addr)
2141 {
2142 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2143 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2144 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2145 unsigned long flags = 0;
2146 int ret;
2147
2148 if (desc->cyclic)
2149 flags |= LLI_CYCLIC | LLI_TERM_INT;
2150
2151 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2152 desc->lli_phy.src,
2153 virt_to_phys(desc->lli_phy.src),
2154 chan->src_def_cfg,
2155 src_info, dst_info, flags);
2156
2157 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2158 desc->lli_phy.dst,
2159 virt_to_phys(desc->lli_phy.dst),
2160 chan->dst_def_cfg,
2161 dst_info, src_info, flags);
2162
2163 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2164 desc->lli_pool.size, DMA_TO_DEVICE);
2165
2166 return ret < 0 ? ret : 0;
2167 }
2168
2169 static struct d40_desc *
2170 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2171 unsigned int sg_len, unsigned long dma_flags)
2172 {
2173 struct stedma40_chan_cfg *cfg;
2174 struct d40_desc *desc;
2175 int ret;
2176
2177 desc = d40_desc_get(chan);
2178 if (!desc)
2179 return NULL;
2180
2181 cfg = &chan->dma_cfg;
2182 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2183 cfg->dst_info.data_width);
2184 if (desc->lli_len < 0) {
2185 chan_err(chan, "Unaligned size\n");
2186 goto free_desc;
2187 }
2188
2189 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2190 if (ret < 0) {
2191 chan_err(chan, "Could not allocate lli\n");
2192 goto free_desc;
2193 }
2194
2195 desc->lli_current = 0;
2196 desc->txd.flags = dma_flags;
2197 desc->txd.tx_submit = d40_tx_submit;
2198
2199 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2200
2201 return desc;
2202 free_desc:
2203 d40_desc_free(chan, desc);
2204 return NULL;
2205 }
2206
2207 static struct dma_async_tx_descriptor *
2208 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2209 struct scatterlist *sg_dst, unsigned int sg_len,
2210 enum dma_transfer_direction direction, unsigned long dma_flags)
2211 {
2212 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2213 dma_addr_t src_dev_addr;
2214 dma_addr_t dst_dev_addr;
2215 struct d40_desc *desc;
2216 unsigned long flags;
2217 int ret;
2218
2219 if (!chan->phy_chan) {
2220 chan_err(chan, "Cannot prepare unallocated channel\n");
2221 return NULL;
2222 }
2223
2224 d40_set_runtime_config_write(dchan, &chan->slave_config, direction);
2225
2226 spin_lock_irqsave(&chan->lock, flags);
2227
2228 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2229 if (desc == NULL)
2230 goto unlock;
2231
2232 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2233 desc->cyclic = true;
2234
2235 src_dev_addr = 0;
2236 dst_dev_addr = 0;
2237 if (direction == DMA_DEV_TO_MEM)
2238 src_dev_addr = chan->runtime_addr;
2239 else if (direction == DMA_MEM_TO_DEV)
2240 dst_dev_addr = chan->runtime_addr;
2241
2242 if (chan_is_logical(chan))
2243 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2244 sg_len, src_dev_addr, dst_dev_addr);
2245 else
2246 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2247 sg_len, src_dev_addr, dst_dev_addr);
2248
2249 if (ret) {
2250 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2251 chan_is_logical(chan) ? "log" : "phy", ret);
2252 goto free_desc;
2253 }
2254
2255
2256
2257
2258
2259 list_add_tail(&desc->node, &chan->prepare_queue);
2260
2261 spin_unlock_irqrestore(&chan->lock, flags);
2262
2263 return &desc->txd;
2264 free_desc:
2265 d40_desc_free(chan, desc);
2266 unlock:
2267 spin_unlock_irqrestore(&chan->lock, flags);
2268 return NULL;
2269 }
2270
2271 bool stedma40_filter(struct dma_chan *chan, void *data)
2272 {
2273 struct stedma40_chan_cfg *info = data;
2274 struct d40_chan *d40c =
2275 container_of(chan, struct d40_chan, chan);
2276 int err;
2277
2278 if (data) {
2279 err = d40_validate_conf(d40c, info);
2280 if (!err)
2281 d40c->dma_cfg = *info;
2282 } else
2283 err = d40_config_memcpy(d40c);
2284
2285 if (!err)
2286 d40c->configured = true;
2287
2288 return err == 0;
2289 }
2290 EXPORT_SYMBOL(stedma40_filter);
2291
2292 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2293 {
2294 bool realtime = d40c->dma_cfg.realtime;
2295 bool highprio = d40c->dma_cfg.high_priority;
2296 u32 rtreg;
2297 u32 event = D40_TYPE_TO_EVENT(dev_type);
2298 u32 group = D40_TYPE_TO_GROUP(dev_type);
2299 u32 bit = BIT(event);
2300 u32 prioreg;
2301 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2302
2303 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2304
2305
2306
2307
2308
2309
2310
2311
2312 if (!src && chan_is_logical(d40c))
2313 highprio = false;
2314
2315 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2316
2317
2318 if (!src)
2319 bit <<= 16;
2320
2321 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2322 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2323 }
2324
2325 static void d40_set_prio_realtime(struct d40_chan *d40c)
2326 {
2327 if (d40c->base->rev < 3)
2328 return;
2329
2330 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2331 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2332 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2333
2334 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2335 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2336 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2337 }
2338
2339 #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2340 #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2341 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2342 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2343 #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
2344
2345 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2346 struct of_dma *ofdma)
2347 {
2348 struct stedma40_chan_cfg cfg;
2349 dma_cap_mask_t cap;
2350 u32 flags;
2351
2352 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2353
2354 dma_cap_zero(cap);
2355 dma_cap_set(DMA_SLAVE, cap);
2356
2357 cfg.dev_type = dma_spec->args[0];
2358 flags = dma_spec->args[2];
2359
2360 switch (D40_DT_FLAGS_MODE(flags)) {
2361 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2362 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2363 }
2364
2365 switch (D40_DT_FLAGS_DIR(flags)) {
2366 case 0:
2367 cfg.dir = DMA_MEM_TO_DEV;
2368 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2369 break;
2370 case 1:
2371 cfg.dir = DMA_DEV_TO_MEM;
2372 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2373 break;
2374 }
2375
2376 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2377 cfg.phy_channel = dma_spec->args[1];
2378 cfg.use_fixed_channel = true;
2379 }
2380
2381 if (D40_DT_FLAGS_HIGH_PRIO(flags))
2382 cfg.high_priority = true;
2383
2384 return dma_request_channel(cap, stedma40_filter, &cfg);
2385 }
2386
2387
2388 static int d40_alloc_chan_resources(struct dma_chan *chan)
2389 {
2390 int err;
2391 unsigned long flags;
2392 struct d40_chan *d40c =
2393 container_of(chan, struct d40_chan, chan);
2394 bool is_free_phy;
2395 spin_lock_irqsave(&d40c->lock, flags);
2396
2397 dma_cookie_init(chan);
2398
2399
2400 if (!d40c->configured) {
2401 err = d40_config_memcpy(d40c);
2402 if (err) {
2403 chan_err(d40c, "Failed to configure memcpy channel\n");
2404 goto mark_last_busy;
2405 }
2406 }
2407
2408 err = d40_allocate_channel(d40c, &is_free_phy);
2409 if (err) {
2410 chan_err(d40c, "Failed to allocate channel\n");
2411 d40c->configured = false;
2412 goto mark_last_busy;
2413 }
2414
2415 pm_runtime_get_sync(d40c->base->dev);
2416
2417 d40_set_prio_realtime(d40c);
2418
2419 if (chan_is_logical(d40c)) {
2420 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2421 d40c->lcpa = d40c->base->lcpa_base +
2422 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2423 else
2424 d40c->lcpa = d40c->base->lcpa_base +
2425 d40c->dma_cfg.dev_type *
2426 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2427
2428
2429 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2430 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2431 }
2432
2433 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2434 chan_is_logical(d40c) ? "logical" : "physical",
2435 d40c->phy_chan->num,
2436 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2437
2438
2439
2440
2441
2442
2443
2444 if (is_free_phy)
2445 d40_config_write(d40c);
2446 mark_last_busy:
2447 pm_runtime_mark_last_busy(d40c->base->dev);
2448 pm_runtime_put_autosuspend(d40c->base->dev);
2449 spin_unlock_irqrestore(&d40c->lock, flags);
2450 return err;
2451 }
2452
2453 static void d40_free_chan_resources(struct dma_chan *chan)
2454 {
2455 struct d40_chan *d40c =
2456 container_of(chan, struct d40_chan, chan);
2457 int err;
2458 unsigned long flags;
2459
2460 if (d40c->phy_chan == NULL) {
2461 chan_err(d40c, "Cannot free unallocated channel\n");
2462 return;
2463 }
2464
2465 spin_lock_irqsave(&d40c->lock, flags);
2466
2467 err = d40_free_dma(d40c);
2468
2469 if (err)
2470 chan_err(d40c, "Failed to free channel\n");
2471 spin_unlock_irqrestore(&d40c->lock, flags);
2472 }
2473
2474 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2475 dma_addr_t dst,
2476 dma_addr_t src,
2477 size_t size,
2478 unsigned long dma_flags)
2479 {
2480 struct scatterlist dst_sg;
2481 struct scatterlist src_sg;
2482
2483 sg_init_table(&dst_sg, 1);
2484 sg_init_table(&src_sg, 1);
2485
2486 sg_dma_address(&dst_sg) = dst;
2487 sg_dma_address(&src_sg) = src;
2488
2489 sg_dma_len(&dst_sg) = size;
2490 sg_dma_len(&src_sg) = size;
2491
2492 return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2493 DMA_MEM_TO_MEM, dma_flags);
2494 }
2495
2496 static struct dma_async_tx_descriptor *
2497 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2498 unsigned int sg_len, enum dma_transfer_direction direction,
2499 unsigned long dma_flags, void *context)
2500 {
2501 if (!is_slave_direction(direction))
2502 return NULL;
2503
2504 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2505 }
2506
2507 static struct dma_async_tx_descriptor *
2508 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2509 size_t buf_len, size_t period_len,
2510 enum dma_transfer_direction direction, unsigned long flags)
2511 {
2512 unsigned int periods = buf_len / period_len;
2513 struct dma_async_tx_descriptor *txd;
2514 struct scatterlist *sg;
2515 int i;
2516
2517 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2518 if (!sg)
2519 return NULL;
2520
2521 for (i = 0; i < periods; i++) {
2522 sg_dma_address(&sg[i]) = dma_addr;
2523 sg_dma_len(&sg[i]) = period_len;
2524 dma_addr += period_len;
2525 }
2526
2527 sg_chain(sg, periods + 1, sg);
2528
2529 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2530 DMA_PREP_INTERRUPT);
2531
2532 kfree(sg);
2533
2534 return txd;
2535 }
2536
2537 static enum dma_status d40_tx_status(struct dma_chan *chan,
2538 dma_cookie_t cookie,
2539 struct dma_tx_state *txstate)
2540 {
2541 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2542 enum dma_status ret;
2543
2544 if (d40c->phy_chan == NULL) {
2545 chan_err(d40c, "Cannot read status of unallocated channel\n");
2546 return -EINVAL;
2547 }
2548
2549 ret = dma_cookie_status(chan, cookie, txstate);
2550 if (ret != DMA_COMPLETE && txstate)
2551 dma_set_residue(txstate, stedma40_residue(chan));
2552
2553 if (d40_is_paused(d40c))
2554 ret = DMA_PAUSED;
2555
2556 return ret;
2557 }
2558
2559 static void d40_issue_pending(struct dma_chan *chan)
2560 {
2561 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2562 unsigned long flags;
2563
2564 if (d40c->phy_chan == NULL) {
2565 chan_err(d40c, "Channel is not allocated!\n");
2566 return;
2567 }
2568
2569 spin_lock_irqsave(&d40c->lock, flags);
2570
2571 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2572
2573
2574 if (!d40c->busy)
2575 (void) d40_queue_start(d40c);
2576
2577 spin_unlock_irqrestore(&d40c->lock, flags);
2578 }
2579
2580 static int d40_terminate_all(struct dma_chan *chan)
2581 {
2582 unsigned long flags;
2583 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2584 int ret;
2585
2586 if (d40c->phy_chan == NULL) {
2587 chan_err(d40c, "Channel is not allocated!\n");
2588 return -EINVAL;
2589 }
2590
2591 spin_lock_irqsave(&d40c->lock, flags);
2592
2593 pm_runtime_get_sync(d40c->base->dev);
2594 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2595 if (ret)
2596 chan_err(d40c, "Failed to stop channel\n");
2597
2598 d40_term_all(d40c);
2599 pm_runtime_mark_last_busy(d40c->base->dev);
2600 pm_runtime_put_autosuspend(d40c->base->dev);
2601 if (d40c->busy) {
2602 pm_runtime_mark_last_busy(d40c->base->dev);
2603 pm_runtime_put_autosuspend(d40c->base->dev);
2604 }
2605 d40c->busy = false;
2606
2607 spin_unlock_irqrestore(&d40c->lock, flags);
2608 return 0;
2609 }
2610
2611 static int
2612 dma40_config_to_halfchannel(struct d40_chan *d40c,
2613 struct stedma40_half_channel_info *info,
2614 u32 maxburst)
2615 {
2616 int psize;
2617
2618 if (chan_is_logical(d40c)) {
2619 if (maxburst >= 16)
2620 psize = STEDMA40_PSIZE_LOG_16;
2621 else if (maxburst >= 8)
2622 psize = STEDMA40_PSIZE_LOG_8;
2623 else if (maxburst >= 4)
2624 psize = STEDMA40_PSIZE_LOG_4;
2625 else
2626 psize = STEDMA40_PSIZE_LOG_1;
2627 } else {
2628 if (maxburst >= 16)
2629 psize = STEDMA40_PSIZE_PHY_16;
2630 else if (maxburst >= 8)
2631 psize = STEDMA40_PSIZE_PHY_8;
2632 else if (maxburst >= 4)
2633 psize = STEDMA40_PSIZE_PHY_4;
2634 else
2635 psize = STEDMA40_PSIZE_PHY_1;
2636 }
2637
2638 info->psize = psize;
2639 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2640
2641 return 0;
2642 }
2643
2644 static int d40_set_runtime_config(struct dma_chan *chan,
2645 struct dma_slave_config *config)
2646 {
2647 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2648
2649 memcpy(&d40c->slave_config, config, sizeof(*config));
2650
2651 return 0;
2652 }
2653
2654
2655 static int d40_set_runtime_config_write(struct dma_chan *chan,
2656 struct dma_slave_config *config,
2657 enum dma_transfer_direction direction)
2658 {
2659 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2660 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2661 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2662 dma_addr_t config_addr;
2663 u32 src_maxburst, dst_maxburst;
2664 int ret;
2665
2666 if (d40c->phy_chan == NULL) {
2667 chan_err(d40c, "Channel is not allocated!\n");
2668 return -EINVAL;
2669 }
2670
2671 src_addr_width = config->src_addr_width;
2672 src_maxburst = config->src_maxburst;
2673 dst_addr_width = config->dst_addr_width;
2674 dst_maxburst = config->dst_maxburst;
2675
2676 if (direction == DMA_DEV_TO_MEM) {
2677 config_addr = config->src_addr;
2678
2679 if (cfg->dir != DMA_DEV_TO_MEM)
2680 dev_dbg(d40c->base->dev,
2681 "channel was not configured for peripheral "
2682 "to memory transfer (%d) overriding\n",
2683 cfg->dir);
2684 cfg->dir = DMA_DEV_TO_MEM;
2685
2686
2687 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2688 dst_addr_width = src_addr_width;
2689 if (dst_maxburst == 0)
2690 dst_maxburst = src_maxburst;
2691
2692 } else if (direction == DMA_MEM_TO_DEV) {
2693 config_addr = config->dst_addr;
2694
2695 if (cfg->dir != DMA_MEM_TO_DEV)
2696 dev_dbg(d40c->base->dev,
2697 "channel was not configured for memory "
2698 "to peripheral transfer (%d) overriding\n",
2699 cfg->dir);
2700 cfg->dir = DMA_MEM_TO_DEV;
2701
2702
2703 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2704 src_addr_width = dst_addr_width;
2705 if (src_maxburst == 0)
2706 src_maxburst = dst_maxburst;
2707 } else {
2708 dev_err(d40c->base->dev,
2709 "unrecognized channel direction %d\n",
2710 direction);
2711 return -EINVAL;
2712 }
2713
2714 if (config_addr <= 0) {
2715 dev_err(d40c->base->dev, "no address supplied\n");
2716 return -EINVAL;
2717 }
2718
2719 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2720 dev_err(d40c->base->dev,
2721 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2722 src_maxburst,
2723 src_addr_width,
2724 dst_maxburst,
2725 dst_addr_width);
2726 return -EINVAL;
2727 }
2728
2729 if (src_maxburst > 16) {
2730 src_maxburst = 16;
2731 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2732 } else if (dst_maxburst > 16) {
2733 dst_maxburst = 16;
2734 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2735 }
2736
2737
2738 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2739 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2740 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2741 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2742 !is_power_of_2(src_addr_width) ||
2743 !is_power_of_2(dst_addr_width))
2744 return -EINVAL;
2745
2746 cfg->src_info.data_width = src_addr_width;
2747 cfg->dst_info.data_width = dst_addr_width;
2748
2749 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2750 src_maxburst);
2751 if (ret)
2752 return ret;
2753
2754 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2755 dst_maxburst);
2756 if (ret)
2757 return ret;
2758
2759
2760 if (chan_is_logical(d40c))
2761 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2762 else
2763 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2764
2765
2766 d40c->runtime_addr = config_addr;
2767 d40c->runtime_direction = direction;
2768 dev_dbg(d40c->base->dev,
2769 "configured channel %s for %s, data width %d/%d, "
2770 "maxburst %d/%d elements, LE, no flow control\n",
2771 dma_chan_name(chan),
2772 (direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2773 src_addr_width, dst_addr_width,
2774 src_maxburst, dst_maxburst);
2775
2776 return 0;
2777 }
2778
2779
2780
2781 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2782 struct d40_chan *chans, int offset,
2783 int num_chans)
2784 {
2785 int i = 0;
2786 struct d40_chan *d40c;
2787
2788 INIT_LIST_HEAD(&dma->channels);
2789
2790 for (i = offset; i < offset + num_chans; i++) {
2791 d40c = &chans[i];
2792 d40c->base = base;
2793 d40c->chan.device = dma;
2794
2795 spin_lock_init(&d40c->lock);
2796
2797 d40c->log_num = D40_PHY_CHAN;
2798
2799 INIT_LIST_HEAD(&d40c->done);
2800 INIT_LIST_HEAD(&d40c->active);
2801 INIT_LIST_HEAD(&d40c->queue);
2802 INIT_LIST_HEAD(&d40c->pending_queue);
2803 INIT_LIST_HEAD(&d40c->client);
2804 INIT_LIST_HEAD(&d40c->prepare_queue);
2805
2806 tasklet_setup(&d40c->tasklet, dma_tasklet);
2807
2808 list_add_tail(&d40c->chan.device_node,
2809 &dma->channels);
2810 }
2811 }
2812
2813 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2814 {
2815 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2816 dev->device_prep_slave_sg = d40_prep_slave_sg;
2817 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2818 }
2819
2820 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2821 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2822 dev->directions = BIT(DMA_MEM_TO_MEM);
2823
2824
2825
2826
2827 dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2828 }
2829
2830 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2831 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2832
2833 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2834 dev->device_free_chan_resources = d40_free_chan_resources;
2835 dev->device_issue_pending = d40_issue_pending;
2836 dev->device_tx_status = d40_tx_status;
2837 dev->device_config = d40_set_runtime_config;
2838 dev->device_pause = d40_pause;
2839 dev->device_resume = d40_resume;
2840 dev->device_terminate_all = d40_terminate_all;
2841 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2842 dev->dev = base->dev;
2843 }
2844
2845 static int __init d40_dmaengine_init(struct d40_base *base,
2846 int num_reserved_chans)
2847 {
2848 int err ;
2849
2850 d40_chan_init(base, &base->dma_slave, base->log_chans,
2851 0, base->num_log_chans);
2852
2853 dma_cap_zero(base->dma_slave.cap_mask);
2854 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2855 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2856
2857 d40_ops_init(base, &base->dma_slave);
2858
2859 err = dmaenginem_async_device_register(&base->dma_slave);
2860
2861 if (err) {
2862 d40_err(base->dev, "Failed to register slave channels\n");
2863 goto exit;
2864 }
2865
2866 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2867 base->num_log_chans, base->num_memcpy_chans);
2868
2869 dma_cap_zero(base->dma_memcpy.cap_mask);
2870 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2871
2872 d40_ops_init(base, &base->dma_memcpy);
2873
2874 err = dmaenginem_async_device_register(&base->dma_memcpy);
2875
2876 if (err) {
2877 d40_err(base->dev,
2878 "Failed to register memcpy only channels\n");
2879 goto exit;
2880 }
2881
2882 d40_chan_init(base, &base->dma_both, base->phy_chans,
2883 0, num_reserved_chans);
2884
2885 dma_cap_zero(base->dma_both.cap_mask);
2886 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2887 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2888 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2889
2890 d40_ops_init(base, &base->dma_both);
2891 err = dmaenginem_async_device_register(&base->dma_both);
2892
2893 if (err) {
2894 d40_err(base->dev,
2895 "Failed to register logical and physical capable channels\n");
2896 goto exit;
2897 }
2898 return 0;
2899 exit:
2900 return err;
2901 }
2902
2903
2904 #ifdef CONFIG_PM_SLEEP
2905 static int dma40_suspend(struct device *dev)
2906 {
2907 struct d40_base *base = dev_get_drvdata(dev);
2908 int ret;
2909
2910 ret = pm_runtime_force_suspend(dev);
2911 if (ret)
2912 return ret;
2913
2914 if (base->lcpa_regulator)
2915 ret = regulator_disable(base->lcpa_regulator);
2916 return ret;
2917 }
2918
2919 static int dma40_resume(struct device *dev)
2920 {
2921 struct d40_base *base = dev_get_drvdata(dev);
2922 int ret = 0;
2923
2924 if (base->lcpa_regulator) {
2925 ret = regulator_enable(base->lcpa_regulator);
2926 if (ret)
2927 return ret;
2928 }
2929
2930 return pm_runtime_force_resume(dev);
2931 }
2932 #endif
2933
2934 #ifdef CONFIG_PM
2935 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2936 u32 *regaddr, int num, bool save)
2937 {
2938 int i;
2939
2940 for (i = 0; i < num; i++) {
2941 void __iomem *addr = baseaddr + regaddr[i];
2942
2943 if (save)
2944 backup[i] = readl_relaxed(addr);
2945 else
2946 writel_relaxed(backup[i], addr);
2947 }
2948 }
2949
2950 static void d40_save_restore_registers(struct d40_base *base, bool save)
2951 {
2952 int i;
2953
2954
2955 for (i = 0; i < base->num_phy_chans; i++) {
2956 void __iomem *addr;
2957 int idx;
2958
2959 if (base->phy_res[i].reserved)
2960 continue;
2961
2962 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2963 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2964
2965 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2966 d40_backup_regs_chan,
2967 ARRAY_SIZE(d40_backup_regs_chan),
2968 save);
2969 }
2970
2971
2972 dma40_backup(base->virtbase, base->reg_val_backup,
2973 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2974 save);
2975
2976
2977 if (base->gen_dmac.backup)
2978 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2979 base->gen_dmac.backup,
2980 base->gen_dmac.backup_size,
2981 save);
2982 }
2983
2984 static int dma40_runtime_suspend(struct device *dev)
2985 {
2986 struct d40_base *base = dev_get_drvdata(dev);
2987
2988 d40_save_restore_registers(base, true);
2989
2990
2991 if (base->rev != 1)
2992 writel_relaxed(base->gcc_pwr_off_mask,
2993 base->virtbase + D40_DREG_GCC);
2994
2995 return 0;
2996 }
2997
2998 static int dma40_runtime_resume(struct device *dev)
2999 {
3000 struct d40_base *base = dev_get_drvdata(dev);
3001
3002 d40_save_restore_registers(base, false);
3003
3004 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3005 base->virtbase + D40_DREG_GCC);
3006 return 0;
3007 }
3008 #endif
3009
3010 static const struct dev_pm_ops dma40_pm_ops = {
3011 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3012 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3013 dma40_runtime_resume,
3014 NULL)
3015 };
3016
3017
3018
3019 static int __init d40_phy_res_init(struct d40_base *base)
3020 {
3021 int i;
3022 int num_phy_chans_avail = 0;
3023 u32 val[2];
3024 int odd_even_bit = -2;
3025 int gcc = D40_DREG_GCC_ENA;
3026
3027 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3028 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3029
3030 for (i = 0; i < base->num_phy_chans; i++) {
3031 base->phy_res[i].num = i;
3032 odd_even_bit += 2 * ((i % 2) == 0);
3033 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3034
3035 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3036 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3037 base->phy_res[i].reserved = true;
3038 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3039 D40_DREG_GCC_SRC);
3040 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3041 D40_DREG_GCC_DST);
3042
3043
3044 } else {
3045 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3046 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3047 base->phy_res[i].reserved = false;
3048 num_phy_chans_avail++;
3049 }
3050 spin_lock_init(&base->phy_res[i].lock);
3051 }
3052
3053
3054 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3055 int chan = base->plat_data->disabled_channels[i];
3056
3057 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3058 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3059 base->phy_res[chan].reserved = true;
3060 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3061 D40_DREG_GCC_SRC);
3062 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3063 D40_DREG_GCC_DST);
3064 num_phy_chans_avail--;
3065 }
3066
3067
3068 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3069 int chan = base->plat_data->soft_lli_chans[i];
3070
3071 base->phy_res[chan].use_soft_lli = true;
3072 }
3073
3074 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3075 num_phy_chans_avail, base->num_phy_chans);
3076
3077
3078 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3079
3080 for (i = 0; i < base->num_phy_chans; i++) {
3081
3082 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3083 (val[0] & 0x3) != 1)
3084 dev_info(base->dev,
3085 "[%s] INFO: channel %d is misconfigured (%d)\n",
3086 __func__, i, val[0] & 0x3);
3087
3088 val[0] = val[0] >> 2;
3089 }
3090
3091
3092
3093
3094
3095
3096
3097 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3098 base->gcc_pwr_off_mask = gcc;
3099
3100 return num_phy_chans_avail;
3101 }
3102
3103 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3104 {
3105 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3106 struct clk *clk;
3107 void __iomem *virtbase;
3108 struct resource *res;
3109 struct d40_base *base;
3110 int num_log_chans;
3111 int num_phy_chans;
3112 int num_memcpy_chans;
3113 int clk_ret = -EINVAL;
3114 int i;
3115 u32 pid;
3116 u32 cid;
3117 u8 rev;
3118
3119 clk = clk_get(&pdev->dev, NULL);
3120 if (IS_ERR(clk)) {
3121 d40_err(&pdev->dev, "No matching clock found\n");
3122 goto check_prepare_enabled;
3123 }
3124
3125 clk_ret = clk_prepare_enable(clk);
3126 if (clk_ret) {
3127 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3128 goto disable_unprepare;
3129 }
3130
3131
3132 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3133 if (!res)
3134 goto disable_unprepare;
3135
3136 if (request_mem_region(res->start, resource_size(res),
3137 D40_NAME " I/O base") == NULL)
3138 goto release_region;
3139
3140 virtbase = ioremap(res->start, resource_size(res));
3141 if (!virtbase)
3142 goto release_region;
3143
3144
3145 for (pid = 0, i = 0; i < 4; i++)
3146 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3147 & 255) << (i * 8);
3148 for (cid = 0, i = 0; i < 4; i++)
3149 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3150 & 255) << (i * 8);
3151
3152 if (cid != AMBA_CID) {
3153 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3154 goto unmap_io;
3155 }
3156 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3157 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3158 AMBA_MANF_BITS(pid),
3159 AMBA_VENDOR_ST);
3160 goto unmap_io;
3161 }
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171 rev = AMBA_REV_BITS(pid);
3172 if (rev < 2) {
3173 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3174 goto unmap_io;
3175 }
3176
3177
3178 if (plat_data->num_of_phy_chans)
3179 num_phy_chans = plat_data->num_of_phy_chans;
3180 else
3181 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3182
3183
3184 if (plat_data->num_of_memcpy_chans)
3185 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3186 else
3187 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3188
3189 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3190
3191 dev_info(&pdev->dev,
3192 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3193 rev, &res->start, num_phy_chans, num_log_chans);
3194
3195 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3196 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3197 sizeof(struct d40_chan), GFP_KERNEL);
3198
3199 if (base == NULL)
3200 goto unmap_io;
3201
3202 base->rev = rev;
3203 base->clk = clk;
3204 base->num_memcpy_chans = num_memcpy_chans;
3205 base->num_phy_chans = num_phy_chans;
3206 base->num_log_chans = num_log_chans;
3207 base->phy_start = res->start;
3208 base->phy_size = resource_size(res);
3209 base->virtbase = virtbase;
3210 base->plat_data = plat_data;
3211 base->dev = &pdev->dev;
3212 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3213 base->log_chans = &base->phy_chans[num_phy_chans];
3214
3215 if (base->plat_data->num_of_phy_chans == 14) {
3216 base->gen_dmac.backup = d40_backup_regs_v4b;
3217 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3218 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3219 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3220 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3221 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3222 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3223 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3224 base->gen_dmac.il = il_v4b;
3225 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3226 base->gen_dmac.init_reg = dma_init_reg_v4b;
3227 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3228 } else {
3229 if (base->rev >= 3) {
3230 base->gen_dmac.backup = d40_backup_regs_v4a;
3231 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3232 }
3233 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3234 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3235 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3236 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3237 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3238 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3239 base->gen_dmac.il = il_v4a;
3240 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3241 base->gen_dmac.init_reg = dma_init_reg_v4a;
3242 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3243 }
3244
3245 base->phy_res = kcalloc(num_phy_chans,
3246 sizeof(*base->phy_res),
3247 GFP_KERNEL);
3248 if (!base->phy_res)
3249 goto free_base;
3250
3251 base->lookup_phy_chans = kcalloc(num_phy_chans,
3252 sizeof(*base->lookup_phy_chans),
3253 GFP_KERNEL);
3254 if (!base->lookup_phy_chans)
3255 goto free_phy_res;
3256
3257 base->lookup_log_chans = kcalloc(num_log_chans,
3258 sizeof(*base->lookup_log_chans),
3259 GFP_KERNEL);
3260 if (!base->lookup_log_chans)
3261 goto free_phy_chans;
3262
3263 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3264 sizeof(d40_backup_regs_chan),
3265 GFP_KERNEL);
3266 if (!base->reg_val_backup_chan)
3267 goto free_log_chans;
3268
3269 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3270 * D40_LCLA_LINK_PER_EVENT_GRP,
3271 sizeof(*base->lcla_pool.alloc_map),
3272 GFP_KERNEL);
3273 if (!base->lcla_pool.alloc_map)
3274 goto free_backup_chan;
3275
3276 base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3277 sizeof(*base->regs_interrupt),
3278 GFP_KERNEL);
3279 if (!base->regs_interrupt)
3280 goto free_map;
3281
3282 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3283 0, SLAB_HWCACHE_ALIGN,
3284 NULL);
3285 if (base->desc_slab == NULL)
3286 goto free_regs;
3287
3288
3289 return base;
3290 free_regs:
3291 kfree(base->regs_interrupt);
3292 free_map:
3293 kfree(base->lcla_pool.alloc_map);
3294 free_backup_chan:
3295 kfree(base->reg_val_backup_chan);
3296 free_log_chans:
3297 kfree(base->lookup_log_chans);
3298 free_phy_chans:
3299 kfree(base->lookup_phy_chans);
3300 free_phy_res:
3301 kfree(base->phy_res);
3302 free_base:
3303 kfree(base);
3304 unmap_io:
3305 iounmap(virtbase);
3306 release_region:
3307 release_mem_region(res->start, resource_size(res));
3308 check_prepare_enabled:
3309 if (!clk_ret)
3310 disable_unprepare:
3311 clk_disable_unprepare(clk);
3312 if (!IS_ERR(clk))
3313 clk_put(clk);
3314 return NULL;
3315 }
3316
3317 static void __init d40_hw_init(struct d40_base *base)
3318 {
3319
3320 int i;
3321 u32 prmseo[2] = {0, 0};
3322 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3323 u32 pcmis = 0;
3324 u32 pcicr = 0;
3325 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3326 u32 reg_size = base->gen_dmac.init_reg_size;
3327
3328 for (i = 0; i < reg_size; i++)
3329 writel(dma_init_reg[i].val,
3330 base->virtbase + dma_init_reg[i].reg);
3331
3332
3333 for (i = 0; i < base->num_phy_chans; i++) {
3334
3335 activeo[i % 2] = activeo[i % 2] << 2;
3336
3337 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3338 == D40_ALLOC_PHY) {
3339 activeo[i % 2] |= 3;
3340 continue;
3341 }
3342
3343
3344 pcmis = (pcmis << 1) | 1;
3345
3346
3347 pcicr = (pcicr << 1) | 1;
3348
3349
3350 prmseo[i % 2] = prmseo[i % 2] << 2;
3351 prmseo[i % 2] |= 1;
3352
3353 }
3354
3355 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3356 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3357 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3358 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3359
3360
3361 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3362
3363
3364 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3365
3366
3367 base->gen_dmac.init_reg = NULL;
3368 base->gen_dmac.init_reg_size = 0;
3369 }
3370
3371 static int __init d40_lcla_allocate(struct d40_base *base)
3372 {
3373 struct d40_lcla_pool *pool = &base->lcla_pool;
3374 unsigned long *page_list;
3375 int i, j;
3376 int ret;
3377
3378
3379
3380
3381
3382
3383 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3384 sizeof(*page_list),
3385 GFP_KERNEL);
3386 if (!page_list)
3387 return -ENOMEM;
3388
3389
3390 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3391
3392 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3393 page_list[i] = __get_free_pages(GFP_KERNEL,
3394 base->lcla_pool.pages);
3395 if (!page_list[i]) {
3396
3397 d40_err(base->dev, "Failed to allocate %d pages.\n",
3398 base->lcla_pool.pages);
3399 ret = -ENOMEM;
3400
3401 for (j = 0; j < i; j++)
3402 free_pages(page_list[j], base->lcla_pool.pages);
3403 goto free_page_list;
3404 }
3405
3406 if ((virt_to_phys((void *)page_list[i]) &
3407 (LCLA_ALIGNMENT - 1)) == 0)
3408 break;
3409 }
3410
3411 for (j = 0; j < i; j++)
3412 free_pages(page_list[j], base->lcla_pool.pages);
3413
3414 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3415 base->lcla_pool.base = (void *)page_list[i];
3416 } else {
3417
3418
3419
3420
3421 dev_warn(base->dev,
3422 "[%s] Failed to get %d pages @ 18 bit align.\n",
3423 __func__, base->lcla_pool.pages);
3424 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3425 base->num_phy_chans +
3426 LCLA_ALIGNMENT,
3427 GFP_KERNEL);
3428 if (!base->lcla_pool.base_unaligned) {
3429 ret = -ENOMEM;
3430 goto free_page_list;
3431 }
3432
3433 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3434 LCLA_ALIGNMENT);
3435 }
3436
3437 pool->dma_addr = dma_map_single(base->dev, pool->base,
3438 SZ_1K * base->num_phy_chans,
3439 DMA_TO_DEVICE);
3440 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3441 pool->dma_addr = 0;
3442 ret = -ENOMEM;
3443 goto free_page_list;
3444 }
3445
3446 writel(virt_to_phys(base->lcla_pool.base),
3447 base->virtbase + D40_DREG_LCLA);
3448 ret = 0;
3449 free_page_list:
3450 kfree(page_list);
3451 return ret;
3452 }
3453
3454 static int __init d40_of_probe(struct platform_device *pdev,
3455 struct device_node *np)
3456 {
3457 struct stedma40_platform_data *pdata;
3458 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3459 const __be32 *list;
3460
3461 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3462 if (!pdata)
3463 return -ENOMEM;
3464
3465
3466 of_property_read_u32(np, "dma-channels", &num_phy);
3467 if (num_phy > 0)
3468 pdata->num_of_phy_chans = num_phy;
3469
3470 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3471 num_memcpy /= sizeof(*list);
3472
3473 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3474 d40_err(&pdev->dev,
3475 "Invalid number of memcpy channels specified (%d)\n",
3476 num_memcpy);
3477 return -EINVAL;
3478 }
3479 pdata->num_of_memcpy_chans = num_memcpy;
3480
3481 of_property_read_u32_array(np, "memcpy-channels",
3482 dma40_memcpy_channels,
3483 num_memcpy);
3484
3485 list = of_get_property(np, "disabled-channels", &num_disabled);
3486 num_disabled /= sizeof(*list);
3487
3488 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3489 d40_err(&pdev->dev,
3490 "Invalid number of disabled channels specified (%d)\n",
3491 num_disabled);
3492 return -EINVAL;
3493 }
3494
3495 of_property_read_u32_array(np, "disabled-channels",
3496 pdata->disabled_channels,
3497 num_disabled);
3498 pdata->disabled_channels[num_disabled] = -1;
3499
3500 pdev->dev.platform_data = pdata;
3501
3502 return 0;
3503 }
3504
3505 static int __init d40_probe(struct platform_device *pdev)
3506 {
3507 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3508 struct device_node *np = pdev->dev.of_node;
3509 int ret = -ENOENT;
3510 struct d40_base *base;
3511 struct resource *res;
3512 int num_reserved_chans;
3513 u32 val;
3514
3515 if (!plat_data) {
3516 if (np) {
3517 if (d40_of_probe(pdev, np)) {
3518 ret = -ENOMEM;
3519 goto report_failure;
3520 }
3521 } else {
3522 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3523 goto report_failure;
3524 }
3525 }
3526
3527 base = d40_hw_detect_init(pdev);
3528 if (!base)
3529 goto report_failure;
3530
3531 num_reserved_chans = d40_phy_res_init(base);
3532
3533 platform_set_drvdata(pdev, base);
3534
3535 spin_lock_init(&base->interrupt_lock);
3536 spin_lock_init(&base->execmd_lock);
3537
3538
3539 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3540 if (!res) {
3541 ret = -ENOENT;
3542 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3543 goto destroy_cache;
3544 }
3545 base->lcpa_size = resource_size(res);
3546 base->phy_lcpa = res->start;
3547
3548 if (request_mem_region(res->start, resource_size(res),
3549 D40_NAME " I/O lcpa") == NULL) {
3550 ret = -EBUSY;
3551 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3552 goto destroy_cache;
3553 }
3554
3555
3556 val = readl(base->virtbase + D40_DREG_LCPA);
3557 if (res->start != val && val != 0) {
3558 dev_warn(&pdev->dev,
3559 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3560 __func__, val, &res->start);
3561 } else
3562 writel(res->start, base->virtbase + D40_DREG_LCPA);
3563
3564 base->lcpa_base = ioremap(res->start, resource_size(res));
3565 if (!base->lcpa_base) {
3566 ret = -ENOMEM;
3567 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3568 goto destroy_cache;
3569 }
3570
3571 if (base->plat_data->use_esram_lcla) {
3572 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3573 "lcla_esram");
3574 if (!res) {
3575 ret = -ENOENT;
3576 d40_err(&pdev->dev,
3577 "No \"lcla_esram\" memory resource\n");
3578 goto destroy_cache;
3579 }
3580 base->lcla_pool.base = ioremap(res->start,
3581 resource_size(res));
3582 if (!base->lcla_pool.base) {
3583 ret = -ENOMEM;
3584 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3585 goto destroy_cache;
3586 }
3587 writel(res->start, base->virtbase + D40_DREG_LCLA);
3588
3589 } else {
3590 ret = d40_lcla_allocate(base);
3591 if (ret) {
3592 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3593 goto destroy_cache;
3594 }
3595 }
3596
3597 spin_lock_init(&base->lcla_pool.lock);
3598
3599 base->irq = platform_get_irq(pdev, 0);
3600
3601 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3602 if (ret) {
3603 d40_err(&pdev->dev, "No IRQ defined\n");
3604 goto destroy_cache;
3605 }
3606
3607 if (base->plat_data->use_esram_lcla) {
3608
3609 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3610 if (IS_ERR(base->lcpa_regulator)) {
3611 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3612 ret = PTR_ERR(base->lcpa_regulator);
3613 base->lcpa_regulator = NULL;
3614 goto destroy_cache;
3615 }
3616
3617 ret = regulator_enable(base->lcpa_regulator);
3618 if (ret) {
3619 d40_err(&pdev->dev,
3620 "Failed to enable lcpa_regulator\n");
3621 regulator_put(base->lcpa_regulator);
3622 base->lcpa_regulator = NULL;
3623 goto destroy_cache;
3624 }
3625 }
3626
3627 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3628
3629 pm_runtime_irq_safe(base->dev);
3630 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3631 pm_runtime_use_autosuspend(base->dev);
3632 pm_runtime_mark_last_busy(base->dev);
3633 pm_runtime_set_active(base->dev);
3634 pm_runtime_enable(base->dev);
3635
3636 ret = d40_dmaengine_init(base, num_reserved_chans);
3637 if (ret)
3638 goto destroy_cache;
3639
3640 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3641 if (ret) {
3642 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3643 goto destroy_cache;
3644 }
3645
3646 d40_hw_init(base);
3647
3648 if (np) {
3649 ret = of_dma_controller_register(np, d40_xlate, NULL);
3650 if (ret)
3651 dev_err(&pdev->dev,
3652 "could not register of_dma_controller\n");
3653 }
3654
3655 dev_info(base->dev, "initialized\n");
3656 return 0;
3657 destroy_cache:
3658 kmem_cache_destroy(base->desc_slab);
3659 if (base->virtbase)
3660 iounmap(base->virtbase);
3661
3662 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3663 iounmap(base->lcla_pool.base);
3664 base->lcla_pool.base = NULL;
3665 }
3666
3667 if (base->lcla_pool.dma_addr)
3668 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3669 SZ_1K * base->num_phy_chans,
3670 DMA_TO_DEVICE);
3671
3672 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3673 free_pages((unsigned long)base->lcla_pool.base,
3674 base->lcla_pool.pages);
3675
3676 kfree(base->lcla_pool.base_unaligned);
3677
3678 if (base->lcpa_base)
3679 iounmap(base->lcpa_base);
3680
3681 if (base->phy_lcpa)
3682 release_mem_region(base->phy_lcpa,
3683 base->lcpa_size);
3684 if (base->phy_start)
3685 release_mem_region(base->phy_start,
3686 base->phy_size);
3687 if (base->clk) {
3688 clk_disable_unprepare(base->clk);
3689 clk_put(base->clk);
3690 }
3691
3692 if (base->lcpa_regulator) {
3693 regulator_disable(base->lcpa_regulator);
3694 regulator_put(base->lcpa_regulator);
3695 }
3696
3697 kfree(base->lcla_pool.alloc_map);
3698 kfree(base->lookup_log_chans);
3699 kfree(base->lookup_phy_chans);
3700 kfree(base->phy_res);
3701 kfree(base);
3702 report_failure:
3703 d40_err(&pdev->dev, "probe failed\n");
3704 return ret;
3705 }
3706
3707 static const struct of_device_id d40_match[] = {
3708 { .compatible = "stericsson,dma40", },
3709 {}
3710 };
3711
3712 static struct platform_driver d40_driver = {
3713 .driver = {
3714 .name = D40_NAME,
3715 .pm = &dma40_pm_ops,
3716 .of_match_table = d40_match,
3717 },
3718 };
3719
3720 static int __init stedma40_init(void)
3721 {
3722 return platform_driver_probe(&d40_driver, d40_probe);
3723 }
3724 subsys_initcall(stedma40_init);