0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/init.h>
0014 #include <linux/iopoll.h>
0015 #include <linux/module.h>
0016 #include <linux/types.h>
0017 #include <linux/bitfield.h>
0018 #include <linux/bitops.h>
0019 #include <linux/mm.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/clk.h>
0022 #include <linux/delay.h>
0023 #include <linux/sched.h>
0024 #include <linux/semaphore.h>
0025 #include <linux/spinlock.h>
0026 #include <linux/device.h>
0027 #include <linux/dma-mapping.h>
0028 #include <linux/firmware.h>
0029 #include <linux/slab.h>
0030 #include <linux/platform_device.h>
0031 #include <linux/dmaengine.h>
0032 #include <linux/of.h>
0033 #include <linux/of_address.h>
0034 #include <linux/of_device.h>
0035 #include <linux/of_dma.h>
0036 #include <linux/workqueue.h>
0037
0038 #include <asm/irq.h>
0039 #include <linux/dma/imx-dma.h>
0040 #include <linux/regmap.h>
0041 #include <linux/mfd/syscon.h>
0042 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
0043
0044 #include "dmaengine.h"
0045 #include "virt-dma.h"
0046
0047
0048 #define SDMA_H_C0PTR 0x000
0049 #define SDMA_H_INTR 0x004
0050 #define SDMA_H_STATSTOP 0x008
0051 #define SDMA_H_START 0x00c
0052 #define SDMA_H_EVTOVR 0x010
0053 #define SDMA_H_DSPOVR 0x014
0054 #define SDMA_H_HOSTOVR 0x018
0055 #define SDMA_H_EVTPEND 0x01c
0056 #define SDMA_H_DSPENBL 0x020
0057 #define SDMA_H_RESET 0x024
0058 #define SDMA_H_EVTERR 0x028
0059 #define SDMA_H_INTRMSK 0x02c
0060 #define SDMA_H_PSW 0x030
0061 #define SDMA_H_EVTERRDBG 0x034
0062 #define SDMA_H_CONFIG 0x038
0063 #define SDMA_ONCE_ENB 0x040
0064 #define SDMA_ONCE_DATA 0x044
0065 #define SDMA_ONCE_INSTR 0x048
0066 #define SDMA_ONCE_STAT 0x04c
0067 #define SDMA_ONCE_CMD 0x050
0068 #define SDMA_EVT_MIRROR 0x054
0069 #define SDMA_ILLINSTADDR 0x058
0070 #define SDMA_CHN0ADDR 0x05c
0071 #define SDMA_ONCE_RTB 0x060
0072 #define SDMA_XTRIG_CONF1 0x070
0073 #define SDMA_XTRIG_CONF2 0x074
0074 #define SDMA_CHNENBL0_IMX35 0x200
0075 #define SDMA_CHNENBL0_IMX31 0x080
0076 #define SDMA_CHNPRI_0 0x100
0077 #define SDMA_DONE0_CONFIG 0x1000
0078
0079
0080
0081
0082 #define BD_DONE 0x01
0083 #define BD_WRAP 0x02
0084 #define BD_CONT 0x04
0085 #define BD_INTR 0x08
0086 #define BD_RROR 0x10
0087 #define BD_LAST 0x20
0088 #define BD_EXTD 0x80
0089
0090
0091
0092
0093 #define DND_END_OF_FRAME 0x80
0094 #define DND_END_OF_XFER 0x40
0095 #define DND_DONE 0x20
0096 #define DND_UNUSED 0x01
0097
0098
0099
0100
0101 #define BD_IPCV2_END_OF_FRAME 0x40
0102
0103 #define IPCV2_MAX_NODES 50
0104
0105
0106
0107
0108 #define DATA_ERROR 0x10000000
0109
0110
0111
0112
0113 #define C0_ADDR 0x01
0114 #define C0_LOAD 0x02
0115 #define C0_DUMP 0x03
0116 #define C0_SETCTX 0x07
0117 #define C0_GETCTX 0x03
0118 #define C0_SETDM 0x01
0119 #define C0_SETPM 0x04
0120 #define C0_GETDM 0x02
0121 #define C0_GETPM 0x08
0122
0123
0124
0125 #define CHANGE_ENDIANNESS 0x80
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166 #define SDMA_WATERMARK_LEVEL_LWML 0xFF
0167 #define SDMA_WATERMARK_LEVEL_PS BIT(8)
0168 #define SDMA_WATERMARK_LEVEL_PA BIT(9)
0169 #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
0170 #define SDMA_WATERMARK_LEVEL_SP BIT(11)
0171 #define SDMA_WATERMARK_LEVEL_DP BIT(12)
0172 #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
0173 #define SDMA_WATERMARK_LEVEL_LWE BIT(28)
0174 #define SDMA_WATERMARK_LEVEL_HWE BIT(29)
0175 #define SDMA_WATERMARK_LEVEL_CONT BIT(31)
0176
0177 #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
0178 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
0179 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
0180
0181 #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
0182 BIT(DMA_MEM_TO_DEV) | \
0183 BIT(DMA_DEV_TO_DEV))
0184
0185 #define SDMA_WATERMARK_LEVEL_N_FIFOS GENMASK(15, 12)
0186 #define SDMA_WATERMARK_LEVEL_OFF_FIFOS GENMASK(19, 16)
0187 #define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO GENMASK(31, 28)
0188 #define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
0189
0190 #define SDMA_DONE0_CONFIG_DONE_SEL BIT(7)
0191 #define SDMA_DONE0_CONFIG_DONE_DIS BIT(6)
0192
0193
0194
0195
0196
0197
0198
0199 struct sdma_script_start_addrs {
0200 s32 ap_2_ap_addr;
0201 s32 ap_2_bp_addr;
0202 s32 ap_2_ap_fixed_addr;
0203 s32 bp_2_ap_addr;
0204 s32 loopback_on_dsp_side_addr;
0205 s32 mcu_interrupt_only_addr;
0206 s32 firi_2_per_addr;
0207 s32 firi_2_mcu_addr;
0208 s32 per_2_firi_addr;
0209 s32 mcu_2_firi_addr;
0210 s32 uart_2_per_addr;
0211 s32 uart_2_mcu_addr;
0212 s32 per_2_app_addr;
0213 s32 mcu_2_app_addr;
0214 s32 per_2_per_addr;
0215 s32 uartsh_2_per_addr;
0216 s32 uartsh_2_mcu_addr;
0217 s32 per_2_shp_addr;
0218 s32 mcu_2_shp_addr;
0219 s32 ata_2_mcu_addr;
0220 s32 mcu_2_ata_addr;
0221 s32 app_2_per_addr;
0222 s32 app_2_mcu_addr;
0223 s32 shp_2_per_addr;
0224 s32 shp_2_mcu_addr;
0225 s32 mshc_2_mcu_addr;
0226 s32 mcu_2_mshc_addr;
0227 s32 spdif_2_mcu_addr;
0228 s32 mcu_2_spdif_addr;
0229 s32 asrc_2_mcu_addr;
0230 s32 ext_mem_2_ipu_addr;
0231 s32 descrambler_addr;
0232 s32 dptc_dvfs_addr;
0233 s32 utra_addr;
0234 s32 ram_code_start_addr;
0235
0236 s32 mcu_2_ssish_addr;
0237 s32 ssish_2_mcu_addr;
0238 s32 hdmi_dma_addr;
0239
0240 s32 zcanfd_2_mcu_addr;
0241 s32 zqspi_2_mcu_addr;
0242 s32 mcu_2_ecspi_addr;
0243 s32 mcu_2_sai_addr;
0244 s32 sai_2_mcu_addr;
0245 s32 uart_2_mcu_rom_addr;
0246 s32 uartsh_2_mcu_rom_addr;
0247
0248 s32 mcu_2_zqspi_addr;
0249
0250 };
0251
0252
0253
0254
0255 struct sdma_mode_count {
0256 #define SDMA_BD_MAX_CNT 0xffff
0257 u32 count : 16;
0258 u32 status : 8;
0259 u32 command : 8;
0260 };
0261
0262
0263
0264
0265 struct sdma_buffer_descriptor {
0266 struct sdma_mode_count mode;
0267 u32 buffer_addr;
0268 u32 ext_buffer_addr;
0269 } __attribute__ ((packed));
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279 struct sdma_channel_control {
0280 u32 current_bd_ptr;
0281 u32 base_bd_ptr;
0282 u32 unused[2];
0283 } __attribute__ ((packed));
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 struct sdma_state_registers {
0301 u32 pc :14;
0302 u32 unused1: 1;
0303 u32 t : 1;
0304 u32 rpc :14;
0305 u32 unused0: 1;
0306 u32 sf : 1;
0307 u32 spc :14;
0308 u32 unused2: 1;
0309 u32 df : 1;
0310 u32 epc :14;
0311 u32 lm : 2;
0312 } __attribute__ ((packed));
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 struct sdma_context_data {
0343 struct sdma_state_registers channel_state;
0344 u32 gReg[8];
0345 u32 mda;
0346 u32 msa;
0347 u32 ms;
0348 u32 md;
0349 u32 pda;
0350 u32 psa;
0351 u32 ps;
0352 u32 pd;
0353 u32 ca;
0354 u32 cs;
0355 u32 dda;
0356 u32 dsa;
0357 u32 ds;
0358 u32 dd;
0359 u32 scratch0;
0360 u32 scratch1;
0361 u32 scratch2;
0362 u32 scratch3;
0363 u32 scratch4;
0364 u32 scratch5;
0365 u32 scratch6;
0366 u32 scratch7;
0367 } __attribute__ ((packed));
0368
0369
0370 struct sdma_engine;
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 struct sdma_desc {
0386 struct virt_dma_desc vd;
0387 unsigned int num_bd;
0388 dma_addr_t bd_phys;
0389 unsigned int buf_tail;
0390 unsigned int buf_ptail;
0391 unsigned int period_len;
0392 unsigned int chn_real_count;
0393 unsigned int chn_count;
0394 struct sdma_channel *sdmac;
0395 struct sdma_buffer_descriptor *bd;
0396 };
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 struct sdma_channel {
0439 struct virt_dma_chan vc;
0440 struct sdma_desc *desc;
0441 struct sdma_engine *sdma;
0442 unsigned int channel;
0443 enum dma_transfer_direction direction;
0444 struct dma_slave_config slave_config;
0445 enum sdma_peripheral_type peripheral_type;
0446 unsigned int event_id0;
0447 unsigned int event_id1;
0448 enum dma_slave_buswidth word_size;
0449 unsigned int pc_from_device, pc_to_device;
0450 unsigned int device_to_device;
0451 unsigned int pc_to_pc;
0452 unsigned long flags;
0453 dma_addr_t per_address, per_address2;
0454 unsigned long event_mask[2];
0455 unsigned long watermark_level;
0456 u32 shp_addr, per_addr;
0457 enum dma_status status;
0458 struct imx_dma_data data;
0459 struct work_struct terminate_worker;
0460 struct list_head terminated;
0461 bool is_ram_script;
0462 unsigned int n_fifos_src;
0463 unsigned int n_fifos_dst;
0464 unsigned int stride_fifos_src;
0465 unsigned int stride_fifos_dst;
0466 unsigned int words_per_fifo;
0467 bool sw_done;
0468 };
0469
0470 #define IMX_DMA_SG_LOOP BIT(0)
0471
0472 #define MAX_DMA_CHANNELS 32
0473 #define MXC_SDMA_DEFAULT_PRIORITY 1
0474 #define MXC_SDMA_MIN_PRIORITY 1
0475 #define MXC_SDMA_MAX_PRIORITY 7
0476
0477 #define SDMA_FIRMWARE_MAGIC 0x414d4453
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 struct sdma_firmware_header {
0494 u32 magic;
0495 u32 version_major;
0496 u32 version_minor;
0497 u32 script_addrs_start;
0498 u32 num_script_addrs;
0499 u32 ram_code_start;
0500 u32 ram_code_size;
0501 };
0502
0503 struct sdma_driver_data {
0504 int chnenbl0;
0505 int num_events;
0506 struct sdma_script_start_addrs *script_addrs;
0507 bool check_ratio;
0508
0509
0510
0511
0512
0513
0514 bool ecspi_fixed;
0515 };
0516
0517 struct sdma_engine {
0518 struct device *dev;
0519 struct sdma_channel channel[MAX_DMA_CHANNELS];
0520 struct sdma_channel_control *channel_control;
0521 void __iomem *regs;
0522 struct sdma_context_data *context;
0523 dma_addr_t context_phys;
0524 struct dma_device dma_device;
0525 struct clk *clk_ipg;
0526 struct clk *clk_ahb;
0527 spinlock_t channel_0_lock;
0528 u32 script_number;
0529 struct sdma_script_start_addrs *script_addrs;
0530 const struct sdma_driver_data *drvdata;
0531 u32 spba_start_addr;
0532 u32 spba_end_addr;
0533 unsigned int irq;
0534 dma_addr_t bd0_phys;
0535 struct sdma_buffer_descriptor *bd0;
0536
0537 bool clk_ratio;
0538 bool fw_loaded;
0539 };
0540
0541 static int sdma_config_write(struct dma_chan *chan,
0542 struct dma_slave_config *dmaengine_cfg,
0543 enum dma_transfer_direction direction);
0544
0545 static struct sdma_driver_data sdma_imx31 = {
0546 .chnenbl0 = SDMA_CHNENBL0_IMX31,
0547 .num_events = 32,
0548 };
0549
0550 static struct sdma_script_start_addrs sdma_script_imx25 = {
0551 .ap_2_ap_addr = 729,
0552 .uart_2_mcu_addr = 904,
0553 .per_2_app_addr = 1255,
0554 .mcu_2_app_addr = 834,
0555 .uartsh_2_mcu_addr = 1120,
0556 .per_2_shp_addr = 1329,
0557 .mcu_2_shp_addr = 1048,
0558 .ata_2_mcu_addr = 1560,
0559 .mcu_2_ata_addr = 1479,
0560 .app_2_per_addr = 1189,
0561 .app_2_mcu_addr = 770,
0562 .shp_2_per_addr = 1407,
0563 .shp_2_mcu_addr = 979,
0564 };
0565
0566 static struct sdma_driver_data sdma_imx25 = {
0567 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0568 .num_events = 48,
0569 .script_addrs = &sdma_script_imx25,
0570 };
0571
0572 static struct sdma_driver_data sdma_imx35 = {
0573 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0574 .num_events = 48,
0575 };
0576
0577 static struct sdma_script_start_addrs sdma_script_imx51 = {
0578 .ap_2_ap_addr = 642,
0579 .uart_2_mcu_addr = 817,
0580 .mcu_2_app_addr = 747,
0581 .mcu_2_shp_addr = 961,
0582 .ata_2_mcu_addr = 1473,
0583 .mcu_2_ata_addr = 1392,
0584 .app_2_per_addr = 1033,
0585 .app_2_mcu_addr = 683,
0586 .shp_2_per_addr = 1251,
0587 .shp_2_mcu_addr = 892,
0588 };
0589
0590 static struct sdma_driver_data sdma_imx51 = {
0591 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0592 .num_events = 48,
0593 .script_addrs = &sdma_script_imx51,
0594 };
0595
0596 static struct sdma_script_start_addrs sdma_script_imx53 = {
0597 .ap_2_ap_addr = 642,
0598 .app_2_mcu_addr = 683,
0599 .mcu_2_app_addr = 747,
0600 .uart_2_mcu_addr = 817,
0601 .shp_2_mcu_addr = 891,
0602 .mcu_2_shp_addr = 960,
0603 .uartsh_2_mcu_addr = 1032,
0604 .spdif_2_mcu_addr = 1100,
0605 .mcu_2_spdif_addr = 1134,
0606 .firi_2_mcu_addr = 1193,
0607 .mcu_2_firi_addr = 1290,
0608 };
0609
0610 static struct sdma_driver_data sdma_imx53 = {
0611 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0612 .num_events = 48,
0613 .script_addrs = &sdma_script_imx53,
0614 };
0615
0616 static struct sdma_script_start_addrs sdma_script_imx6q = {
0617 .ap_2_ap_addr = 642,
0618 .uart_2_mcu_addr = 817,
0619 .mcu_2_app_addr = 747,
0620 .per_2_per_addr = 6331,
0621 .uartsh_2_mcu_addr = 1032,
0622 .mcu_2_shp_addr = 960,
0623 .app_2_mcu_addr = 683,
0624 .shp_2_mcu_addr = 891,
0625 .spdif_2_mcu_addr = 1100,
0626 .mcu_2_spdif_addr = 1134,
0627 };
0628
0629 static struct sdma_driver_data sdma_imx6q = {
0630 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0631 .num_events = 48,
0632 .script_addrs = &sdma_script_imx6q,
0633 };
0634
0635 static struct sdma_driver_data sdma_imx6ul = {
0636 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0637 .num_events = 48,
0638 .script_addrs = &sdma_script_imx6q,
0639 .ecspi_fixed = true,
0640 };
0641
0642 static struct sdma_script_start_addrs sdma_script_imx7d = {
0643 .ap_2_ap_addr = 644,
0644 .uart_2_mcu_addr = 819,
0645 .mcu_2_app_addr = 749,
0646 .uartsh_2_mcu_addr = 1034,
0647 .mcu_2_shp_addr = 962,
0648 .app_2_mcu_addr = 685,
0649 .shp_2_mcu_addr = 893,
0650 .spdif_2_mcu_addr = 1102,
0651 .mcu_2_spdif_addr = 1136,
0652 };
0653
0654 static struct sdma_driver_data sdma_imx7d = {
0655 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0656 .num_events = 48,
0657 .script_addrs = &sdma_script_imx7d,
0658 };
0659
0660 static struct sdma_driver_data sdma_imx8mq = {
0661 .chnenbl0 = SDMA_CHNENBL0_IMX35,
0662 .num_events = 48,
0663 .script_addrs = &sdma_script_imx7d,
0664 .check_ratio = 1,
0665 };
0666
0667 static const struct of_device_id sdma_dt_ids[] = {
0668 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
0669 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
0670 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
0671 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
0672 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
0673 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
0674 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
0675 { .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
0676 { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
0677 { }
0678 };
0679 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
0680
0681 #define SDMA_H_CONFIG_DSPDMA BIT(12)
0682 #define SDMA_H_CONFIG_RTD_PINS BIT(11)
0683 #define SDMA_H_CONFIG_ACR BIT(4)
0684 #define SDMA_H_CONFIG_CSM (3)
0685
0686 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
0687 {
0688 u32 chnenbl0 = sdma->drvdata->chnenbl0;
0689 return chnenbl0 + event * 4;
0690 }
0691
0692 static int sdma_config_ownership(struct sdma_channel *sdmac,
0693 bool event_override, bool mcu_override, bool dsp_override)
0694 {
0695 struct sdma_engine *sdma = sdmac->sdma;
0696 int channel = sdmac->channel;
0697 unsigned long evt, mcu, dsp;
0698
0699 if (event_override && mcu_override && dsp_override)
0700 return -EINVAL;
0701
0702 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
0703 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
0704 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
0705
0706 if (dsp_override)
0707 __clear_bit(channel, &dsp);
0708 else
0709 __set_bit(channel, &dsp);
0710
0711 if (event_override)
0712 __clear_bit(channel, &evt);
0713 else
0714 __set_bit(channel, &evt);
0715
0716 if (mcu_override)
0717 __clear_bit(channel, &mcu);
0718 else
0719 __set_bit(channel, &mcu);
0720
0721 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
0722 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
0723 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
0724
0725 return 0;
0726 }
0727
0728 static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
0729 {
0730 return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
0731 }
0732
0733 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
0734 {
0735 writel(BIT(channel), sdma->regs + SDMA_H_START);
0736 }
0737
0738
0739
0740
0741 static int sdma_run_channel0(struct sdma_engine *sdma)
0742 {
0743 int ret;
0744 u32 reg;
0745
0746 sdma_enable_channel(sdma, 0);
0747
0748 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
0749 reg, !(reg & 1), 1, 500);
0750 if (ret)
0751 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
0752
0753
0754 reg = readl(sdma->regs + SDMA_H_CONFIG);
0755 if ((reg & SDMA_H_CONFIG_CSM) == 0) {
0756 reg |= SDMA_H_CONFIG_CSM;
0757 writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
0758 }
0759
0760 return ret;
0761 }
0762
0763 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
0764 u32 address)
0765 {
0766 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
0767 void *buf_virt;
0768 dma_addr_t buf_phys;
0769 int ret;
0770 unsigned long flags;
0771
0772 buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
0773 if (!buf_virt)
0774 return -ENOMEM;
0775
0776 spin_lock_irqsave(&sdma->channel_0_lock, flags);
0777
0778 bd0->mode.command = C0_SETPM;
0779 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
0780 bd0->mode.count = size / 2;
0781 bd0->buffer_addr = buf_phys;
0782 bd0->ext_buffer_addr = address;
0783
0784 memcpy(buf_virt, buf, size);
0785
0786 ret = sdma_run_channel0(sdma);
0787
0788 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
0789
0790 dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
0791
0792 return ret;
0793 }
0794
0795 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
0796 {
0797 struct sdma_engine *sdma = sdmac->sdma;
0798 int channel = sdmac->channel;
0799 unsigned long val;
0800 u32 chnenbl = chnenbl_ofs(sdma, event);
0801
0802 val = readl_relaxed(sdma->regs + chnenbl);
0803 __set_bit(channel, &val);
0804 writel_relaxed(val, sdma->regs + chnenbl);
0805
0806
0807 if (sdmac->sw_done) {
0808 val = readl_relaxed(sdma->regs + SDMA_DONE0_CONFIG);
0809 val |= SDMA_DONE0_CONFIG_DONE_SEL;
0810 val &= ~SDMA_DONE0_CONFIG_DONE_DIS;
0811 writel_relaxed(val, sdma->regs + SDMA_DONE0_CONFIG);
0812 }
0813 }
0814
0815 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
0816 {
0817 struct sdma_engine *sdma = sdmac->sdma;
0818 int channel = sdmac->channel;
0819 u32 chnenbl = chnenbl_ofs(sdma, event);
0820 unsigned long val;
0821
0822 val = readl_relaxed(sdma->regs + chnenbl);
0823 __clear_bit(channel, &val);
0824 writel_relaxed(val, sdma->regs + chnenbl);
0825 }
0826
0827 static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
0828 {
0829 return container_of(t, struct sdma_desc, vd.tx);
0830 }
0831
0832 static void sdma_start_desc(struct sdma_channel *sdmac)
0833 {
0834 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
0835 struct sdma_desc *desc;
0836 struct sdma_engine *sdma = sdmac->sdma;
0837 int channel = sdmac->channel;
0838
0839 if (!vd) {
0840 sdmac->desc = NULL;
0841 return;
0842 }
0843 sdmac->desc = desc = to_sdma_desc(&vd->tx);
0844
0845 list_del(&vd->node);
0846
0847 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
0848 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
0849 sdma_enable_channel(sdma, sdmac->channel);
0850 }
0851
0852 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
0853 {
0854 struct sdma_buffer_descriptor *bd;
0855 int error = 0;
0856 enum dma_status old_status = sdmac->status;
0857
0858
0859
0860
0861
0862 while (sdmac->desc) {
0863 struct sdma_desc *desc = sdmac->desc;
0864
0865 bd = &desc->bd[desc->buf_tail];
0866
0867 if (bd->mode.status & BD_DONE)
0868 break;
0869
0870 if (bd->mode.status & BD_RROR) {
0871 bd->mode.status &= ~BD_RROR;
0872 sdmac->status = DMA_ERROR;
0873 error = -EIO;
0874 }
0875
0876
0877
0878
0879
0880
0881 desc->chn_real_count = bd->mode.count;
0882 bd->mode.count = desc->period_len;
0883 desc->buf_ptail = desc->buf_tail;
0884 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
0885
0886
0887
0888
0889
0890
0891
0892 spin_unlock(&sdmac->vc.lock);
0893 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
0894 spin_lock(&sdmac->vc.lock);
0895
0896
0897 bd->mode.status |= BD_DONE;
0898
0899 if (error)
0900 sdmac->status = old_status;
0901 }
0902
0903
0904
0905
0906
0907 if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
0908 dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
0909 sdma_enable_channel(sdmac->sdma, sdmac->channel);
0910 }
0911 }
0912
0913 static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
0914 {
0915 struct sdma_channel *sdmac = (struct sdma_channel *) data;
0916 struct sdma_buffer_descriptor *bd;
0917 int i, error = 0;
0918
0919 sdmac->desc->chn_real_count = 0;
0920
0921
0922
0923
0924 for (i = 0; i < sdmac->desc->num_bd; i++) {
0925 bd = &sdmac->desc->bd[i];
0926
0927 if (bd->mode.status & (BD_DONE | BD_RROR))
0928 error = -EIO;
0929 sdmac->desc->chn_real_count += bd->mode.count;
0930 }
0931
0932 if (error)
0933 sdmac->status = DMA_ERROR;
0934 else
0935 sdmac->status = DMA_COMPLETE;
0936 }
0937
0938 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
0939 {
0940 struct sdma_engine *sdma = dev_id;
0941 unsigned long stat;
0942
0943 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
0944 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
0945
0946 stat &= ~1;
0947
0948 while (stat) {
0949 int channel = fls(stat) - 1;
0950 struct sdma_channel *sdmac = &sdma->channel[channel];
0951 struct sdma_desc *desc;
0952
0953 spin_lock(&sdmac->vc.lock);
0954 desc = sdmac->desc;
0955 if (desc) {
0956 if (sdmac->flags & IMX_DMA_SG_LOOP) {
0957 sdma_update_channel_loop(sdmac);
0958 } else {
0959 mxc_sdma_handle_channel_normal(sdmac);
0960 vchan_cookie_complete(&desc->vd);
0961 sdma_start_desc(sdmac);
0962 }
0963 }
0964
0965 spin_unlock(&sdmac->vc.lock);
0966 __clear_bit(channel, &stat);
0967 }
0968
0969 return IRQ_HANDLED;
0970 }
0971
0972
0973
0974
0975 static int sdma_get_pc(struct sdma_channel *sdmac,
0976 enum sdma_peripheral_type peripheral_type)
0977 {
0978 struct sdma_engine *sdma = sdmac->sdma;
0979 int per_2_emi = 0, emi_2_per = 0;
0980
0981
0982
0983
0984 int per_2_per = 0, emi_2_emi = 0;
0985
0986 sdmac->pc_from_device = 0;
0987 sdmac->pc_to_device = 0;
0988 sdmac->device_to_device = 0;
0989 sdmac->pc_to_pc = 0;
0990 sdmac->is_ram_script = false;
0991
0992 switch (peripheral_type) {
0993 case IMX_DMATYPE_MEMORY:
0994 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
0995 break;
0996 case IMX_DMATYPE_DSP:
0997 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
0998 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
0999 break;
1000 case IMX_DMATYPE_FIRI:
1001 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
1002 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
1003 break;
1004 case IMX_DMATYPE_UART:
1005 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
1006 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1007 break;
1008 case IMX_DMATYPE_UART_SP:
1009 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
1010 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1011 break;
1012 case IMX_DMATYPE_ATA:
1013 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
1014 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
1015 break;
1016 case IMX_DMATYPE_CSPI:
1017 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1018
1019
1020 if (sdmac->sdma->drvdata->ecspi_fixed) {
1021 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1022 } else {
1023 emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
1024 sdmac->is_ram_script = true;
1025 }
1026
1027 break;
1028 case IMX_DMATYPE_EXT:
1029 case IMX_DMATYPE_SSI:
1030 case IMX_DMATYPE_SAI:
1031 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1032 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1033 break;
1034 case IMX_DMATYPE_SSI_DUAL:
1035 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
1036 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
1037 sdmac->is_ram_script = true;
1038 break;
1039 case IMX_DMATYPE_SSI_SP:
1040 case IMX_DMATYPE_MMC:
1041 case IMX_DMATYPE_SDHC:
1042 case IMX_DMATYPE_CSPI_SP:
1043 case IMX_DMATYPE_ESAI:
1044 case IMX_DMATYPE_MSHC_SP:
1045 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1046 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1047 break;
1048 case IMX_DMATYPE_ASRC:
1049 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
1050 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
1051 per_2_per = sdma->script_addrs->per_2_per_addr;
1052 sdmac->is_ram_script = true;
1053 break;
1054 case IMX_DMATYPE_ASRC_SP:
1055 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1056 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1057 per_2_per = sdma->script_addrs->per_2_per_addr;
1058 break;
1059 case IMX_DMATYPE_MSHC:
1060 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
1061 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
1062 break;
1063 case IMX_DMATYPE_CCM:
1064 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
1065 break;
1066 case IMX_DMATYPE_SPDIF:
1067 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
1068 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
1069 break;
1070 case IMX_DMATYPE_IPU_MEMORY:
1071 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
1072 break;
1073 case IMX_DMATYPE_MULTI_SAI:
1074 per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
1075 emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
1076 break;
1077 default:
1078 dev_err(sdma->dev, "Unsupported transfer type %d\n",
1079 peripheral_type);
1080 return -EINVAL;
1081 }
1082
1083 sdmac->pc_from_device = per_2_emi;
1084 sdmac->pc_to_device = emi_2_per;
1085 sdmac->device_to_device = per_2_per;
1086 sdmac->pc_to_pc = emi_2_emi;
1087
1088 return 0;
1089 }
1090
1091 static int sdma_load_context(struct sdma_channel *sdmac)
1092 {
1093 struct sdma_engine *sdma = sdmac->sdma;
1094 int channel = sdmac->channel;
1095 int load_address;
1096 struct sdma_context_data *context = sdma->context;
1097 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
1098 int ret;
1099 unsigned long flags;
1100
1101 if (sdmac->direction == DMA_DEV_TO_MEM)
1102 load_address = sdmac->pc_from_device;
1103 else if (sdmac->direction == DMA_DEV_TO_DEV)
1104 load_address = sdmac->device_to_device;
1105 else if (sdmac->direction == DMA_MEM_TO_MEM)
1106 load_address = sdmac->pc_to_pc;
1107 else
1108 load_address = sdmac->pc_to_device;
1109
1110 if (load_address < 0)
1111 return load_address;
1112
1113 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1114 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1115 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1116 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1117 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1118 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1119
1120 spin_lock_irqsave(&sdma->channel_0_lock, flags);
1121
1122 memset(context, 0, sizeof(*context));
1123 context->channel_state.pc = load_address;
1124
1125
1126
1127
1128 context->gReg[0] = sdmac->event_mask[1];
1129 context->gReg[1] = sdmac->event_mask[0];
1130 context->gReg[2] = sdmac->per_addr;
1131 context->gReg[6] = sdmac->shp_addr;
1132 context->gReg[7] = sdmac->watermark_level;
1133
1134 bd0->mode.command = C0_SETDM;
1135 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1136 bd0->mode.count = sizeof(*context) / 4;
1137 bd0->buffer_addr = sdma->context_phys;
1138 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1139 ret = sdma_run_channel0(sdma);
1140
1141 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1142
1143 return ret;
1144 }
1145
1146 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1147 {
1148 return container_of(chan, struct sdma_channel, vc.chan);
1149 }
1150
1151 static int sdma_disable_channel(struct dma_chan *chan)
1152 {
1153 struct sdma_channel *sdmac = to_sdma_chan(chan);
1154 struct sdma_engine *sdma = sdmac->sdma;
1155 int channel = sdmac->channel;
1156
1157 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1158 sdmac->status = DMA_ERROR;
1159
1160 return 0;
1161 }
1162 static void sdma_channel_terminate_work(struct work_struct *work)
1163 {
1164 struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1165 terminate_worker);
1166
1167
1168
1169
1170
1171
1172 usleep_range(1000, 2000);
1173
1174 vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated);
1175 }
1176
1177 static int sdma_terminate_all(struct dma_chan *chan)
1178 {
1179 struct sdma_channel *sdmac = to_sdma_chan(chan);
1180 unsigned long flags;
1181
1182 spin_lock_irqsave(&sdmac->vc.lock, flags);
1183
1184 sdma_disable_channel(chan);
1185
1186 if (sdmac->desc) {
1187 vchan_terminate_vdesc(&sdmac->desc->vd);
1188
1189
1190
1191
1192
1193
1194 vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
1195 sdmac->desc = NULL;
1196 schedule_work(&sdmac->terminate_worker);
1197 }
1198
1199 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1200
1201 return 0;
1202 }
1203
1204 static void sdma_channel_synchronize(struct dma_chan *chan)
1205 {
1206 struct sdma_channel *sdmac = to_sdma_chan(chan);
1207
1208 vchan_synchronize(&sdmac->vc);
1209
1210 flush_work(&sdmac->terminate_worker);
1211 }
1212
1213 static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1214 {
1215 struct sdma_engine *sdma = sdmac->sdma;
1216
1217 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1218 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1219
1220 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1221 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1222
1223 if (sdmac->event_id0 > 31)
1224 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1225
1226 if (sdmac->event_id1 > 31)
1227 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1228
1229
1230
1231
1232
1233
1234 if (lwml > hwml) {
1235 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1236 SDMA_WATERMARK_LEVEL_HWML);
1237 sdmac->watermark_level |= hwml;
1238 sdmac->watermark_level |= lwml << 16;
1239 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1240 }
1241
1242 if (sdmac->per_address2 >= sdma->spba_start_addr &&
1243 sdmac->per_address2 <= sdma->spba_end_addr)
1244 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1245
1246 if (sdmac->per_address >= sdma->spba_start_addr &&
1247 sdmac->per_address <= sdma->spba_end_addr)
1248 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1249
1250 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1251 }
1252
1253 static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
1254 {
1255 unsigned int n_fifos;
1256 unsigned int stride_fifos;
1257 unsigned int words_per_fifo;
1258
1259 if (sdmac->sw_done)
1260 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
1261
1262 if (sdmac->direction == DMA_DEV_TO_MEM) {
1263 n_fifos = sdmac->n_fifos_src;
1264 stride_fifos = sdmac->stride_fifos_src;
1265 } else {
1266 n_fifos = sdmac->n_fifos_dst;
1267 stride_fifos = sdmac->stride_fifos_dst;
1268 }
1269
1270 words_per_fifo = sdmac->words_per_fifo;
1271
1272 sdmac->watermark_level |=
1273 FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
1274 sdmac->watermark_level |=
1275 FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
1276 if (words_per_fifo)
1277 sdmac->watermark_level |=
1278 FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
1279 }
1280
1281 static int sdma_config_channel(struct dma_chan *chan)
1282 {
1283 struct sdma_channel *sdmac = to_sdma_chan(chan);
1284 int ret;
1285
1286 sdma_disable_channel(chan);
1287
1288 sdmac->event_mask[0] = 0;
1289 sdmac->event_mask[1] = 0;
1290 sdmac->shp_addr = 0;
1291 sdmac->per_addr = 0;
1292
1293 switch (sdmac->peripheral_type) {
1294 case IMX_DMATYPE_DSP:
1295 sdma_config_ownership(sdmac, false, true, true);
1296 break;
1297 case IMX_DMATYPE_MEMORY:
1298 sdma_config_ownership(sdmac, false, true, false);
1299 break;
1300 default:
1301 sdma_config_ownership(sdmac, true, true, false);
1302 break;
1303 }
1304
1305 ret = sdma_get_pc(sdmac, sdmac->peripheral_type);
1306 if (ret)
1307 return ret;
1308
1309 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1310 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1311
1312 if (sdmac->event_id1) {
1313 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1314 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1315 sdma_set_watermarklevel_for_p2p(sdmac);
1316 } else {
1317 if (sdmac->peripheral_type ==
1318 IMX_DMATYPE_MULTI_SAI)
1319 sdma_set_watermarklevel_for_sais(sdmac);
1320
1321 __set_bit(sdmac->event_id0, sdmac->event_mask);
1322 }
1323
1324
1325 sdmac->shp_addr = sdmac->per_address;
1326 sdmac->per_addr = sdmac->per_address2;
1327 } else {
1328 sdmac->watermark_level = 0;
1329 }
1330
1331 return 0;
1332 }
1333
1334 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1335 unsigned int priority)
1336 {
1337 struct sdma_engine *sdma = sdmac->sdma;
1338 int channel = sdmac->channel;
1339
1340 if (priority < MXC_SDMA_MIN_PRIORITY
1341 || priority > MXC_SDMA_MAX_PRIORITY) {
1342 return -EINVAL;
1343 }
1344
1345 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1346
1347 return 0;
1348 }
1349
1350 static int sdma_request_channel0(struct sdma_engine *sdma)
1351 {
1352 int ret = -EBUSY;
1353
1354 sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
1355 GFP_NOWAIT);
1356 if (!sdma->bd0) {
1357 ret = -ENOMEM;
1358 goto out;
1359 }
1360
1361 sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1362 sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1363
1364 sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1365 return 0;
1366 out:
1367
1368 return ret;
1369 }
1370
1371
1372 static int sdma_alloc_bd(struct sdma_desc *desc)
1373 {
1374 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1375 int ret = 0;
1376
1377 desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1378 &desc->bd_phys, GFP_NOWAIT);
1379 if (!desc->bd) {
1380 ret = -ENOMEM;
1381 goto out;
1382 }
1383 out:
1384 return ret;
1385 }
1386
1387 static void sdma_free_bd(struct sdma_desc *desc)
1388 {
1389 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1390
1391 dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1392 desc->bd_phys);
1393 }
1394
1395 static void sdma_desc_free(struct virt_dma_desc *vd)
1396 {
1397 struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1398
1399 sdma_free_bd(desc);
1400 kfree(desc);
1401 }
1402
1403 static int sdma_alloc_chan_resources(struct dma_chan *chan)
1404 {
1405 struct sdma_channel *sdmac = to_sdma_chan(chan);
1406 struct imx_dma_data *data = chan->private;
1407 struct imx_dma_data mem_data;
1408 int prio, ret;
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419 if (!data) {
1420 dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1421 mem_data.priority = 2;
1422 mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1423 mem_data.dma_request = 0;
1424 mem_data.dma_request2 = 0;
1425 data = &mem_data;
1426
1427 ret = sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1428 if (ret)
1429 return ret;
1430 }
1431
1432 switch (data->priority) {
1433 case DMA_PRIO_HIGH:
1434 prio = 3;
1435 break;
1436 case DMA_PRIO_MEDIUM:
1437 prio = 2;
1438 break;
1439 case DMA_PRIO_LOW:
1440 default:
1441 prio = 1;
1442 break;
1443 }
1444
1445 sdmac->peripheral_type = data->peripheral_type;
1446 sdmac->event_id0 = data->dma_request;
1447 sdmac->event_id1 = data->dma_request2;
1448
1449 ret = clk_enable(sdmac->sdma->clk_ipg);
1450 if (ret)
1451 return ret;
1452 ret = clk_enable(sdmac->sdma->clk_ahb);
1453 if (ret)
1454 goto disable_clk_ipg;
1455
1456 ret = sdma_set_channel_priority(sdmac, prio);
1457 if (ret)
1458 goto disable_clk_ahb;
1459
1460 return 0;
1461
1462 disable_clk_ahb:
1463 clk_disable(sdmac->sdma->clk_ahb);
1464 disable_clk_ipg:
1465 clk_disable(sdmac->sdma->clk_ipg);
1466 return ret;
1467 }
1468
1469 static void sdma_free_chan_resources(struct dma_chan *chan)
1470 {
1471 struct sdma_channel *sdmac = to_sdma_chan(chan);
1472 struct sdma_engine *sdma = sdmac->sdma;
1473
1474 sdma_terminate_all(chan);
1475
1476 sdma_channel_synchronize(chan);
1477
1478 sdma_event_disable(sdmac, sdmac->event_id0);
1479 if (sdmac->event_id1)
1480 sdma_event_disable(sdmac, sdmac->event_id1);
1481
1482 sdmac->event_id0 = 0;
1483 sdmac->event_id1 = 0;
1484
1485 sdma_set_channel_priority(sdmac, 0);
1486
1487 clk_disable(sdma->clk_ipg);
1488 clk_disable(sdma->clk_ahb);
1489 }
1490
1491 static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1492 enum dma_transfer_direction direction, u32 bds)
1493 {
1494 struct sdma_desc *desc;
1495
1496 if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
1497 dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n");
1498 goto err_out;
1499 }
1500
1501 desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1502 if (!desc)
1503 goto err_out;
1504
1505 sdmac->status = DMA_IN_PROGRESS;
1506 sdmac->direction = direction;
1507 sdmac->flags = 0;
1508
1509 desc->chn_count = 0;
1510 desc->chn_real_count = 0;
1511 desc->buf_tail = 0;
1512 desc->buf_ptail = 0;
1513 desc->sdmac = sdmac;
1514 desc->num_bd = bds;
1515
1516 if (sdma_alloc_bd(desc))
1517 goto err_desc_out;
1518
1519
1520 if (direction == DMA_MEM_TO_MEM)
1521 sdma_config_ownership(sdmac, false, true, false);
1522
1523 if (sdma_load_context(sdmac))
1524 goto err_desc_out;
1525
1526 return desc;
1527
1528 err_desc_out:
1529 kfree(desc);
1530 err_out:
1531 return NULL;
1532 }
1533
1534 static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1535 struct dma_chan *chan, dma_addr_t dma_dst,
1536 dma_addr_t dma_src, size_t len, unsigned long flags)
1537 {
1538 struct sdma_channel *sdmac = to_sdma_chan(chan);
1539 struct sdma_engine *sdma = sdmac->sdma;
1540 int channel = sdmac->channel;
1541 size_t count;
1542 int i = 0, param;
1543 struct sdma_buffer_descriptor *bd;
1544 struct sdma_desc *desc;
1545
1546 if (!chan || !len)
1547 return NULL;
1548
1549 dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1550 &dma_src, &dma_dst, len, channel);
1551
1552 desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1553 len / SDMA_BD_MAX_CNT + 1);
1554 if (!desc)
1555 return NULL;
1556
1557 do {
1558 count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1559 bd = &desc->bd[i];
1560 bd->buffer_addr = dma_src;
1561 bd->ext_buffer_addr = dma_dst;
1562 bd->mode.count = count;
1563 desc->chn_count += count;
1564 bd->mode.command = 0;
1565
1566 dma_src += count;
1567 dma_dst += count;
1568 len -= count;
1569 i++;
1570
1571 param = BD_DONE | BD_EXTD | BD_CONT;
1572
1573 if (!len) {
1574 param |= BD_INTR;
1575 param |= BD_LAST;
1576 param &= ~BD_CONT;
1577 }
1578
1579 dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1580 i, count, bd->buffer_addr,
1581 param & BD_WRAP ? "wrap" : "",
1582 param & BD_INTR ? " intr" : "");
1583
1584 bd->mode.status = param;
1585 } while (len);
1586
1587 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1588 }
1589
1590 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1591 struct dma_chan *chan, struct scatterlist *sgl,
1592 unsigned int sg_len, enum dma_transfer_direction direction,
1593 unsigned long flags, void *context)
1594 {
1595 struct sdma_channel *sdmac = to_sdma_chan(chan);
1596 struct sdma_engine *sdma = sdmac->sdma;
1597 int i, count;
1598 int channel = sdmac->channel;
1599 struct scatterlist *sg;
1600 struct sdma_desc *desc;
1601
1602 sdma_config_write(chan, &sdmac->slave_config, direction);
1603
1604 desc = sdma_transfer_init(sdmac, direction, sg_len);
1605 if (!desc)
1606 goto err_out;
1607
1608 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1609 sg_len, channel);
1610
1611 for_each_sg(sgl, sg, sg_len, i) {
1612 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1613 int param;
1614
1615 bd->buffer_addr = sg->dma_address;
1616
1617 count = sg_dma_len(sg);
1618
1619 if (count > SDMA_BD_MAX_CNT) {
1620 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1621 channel, count, SDMA_BD_MAX_CNT);
1622 goto err_bd_out;
1623 }
1624
1625 bd->mode.count = count;
1626 desc->chn_count += count;
1627
1628 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1629 goto err_bd_out;
1630
1631 switch (sdmac->word_size) {
1632 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1633 bd->mode.command = 0;
1634 if (count & 3 || sg->dma_address & 3)
1635 goto err_bd_out;
1636 break;
1637 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1638 bd->mode.command = 2;
1639 if (count & 1 || sg->dma_address & 1)
1640 goto err_bd_out;
1641 break;
1642 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1643 bd->mode.command = 1;
1644 break;
1645 default:
1646 goto err_bd_out;
1647 }
1648
1649 param = BD_DONE | BD_EXTD | BD_CONT;
1650
1651 if (i + 1 == sg_len) {
1652 param |= BD_INTR;
1653 param |= BD_LAST;
1654 param &= ~BD_CONT;
1655 }
1656
1657 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1658 i, count, (u64)sg->dma_address,
1659 param & BD_WRAP ? "wrap" : "",
1660 param & BD_INTR ? " intr" : "");
1661
1662 bd->mode.status = param;
1663 }
1664
1665 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1666 err_bd_out:
1667 sdma_free_bd(desc);
1668 kfree(desc);
1669 err_out:
1670 sdmac->status = DMA_ERROR;
1671 return NULL;
1672 }
1673
1674 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1675 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1676 size_t period_len, enum dma_transfer_direction direction,
1677 unsigned long flags)
1678 {
1679 struct sdma_channel *sdmac = to_sdma_chan(chan);
1680 struct sdma_engine *sdma = sdmac->sdma;
1681 int num_periods = buf_len / period_len;
1682 int channel = sdmac->channel;
1683 int i = 0, buf = 0;
1684 struct sdma_desc *desc;
1685
1686 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1687
1688 sdma_config_write(chan, &sdmac->slave_config, direction);
1689
1690 desc = sdma_transfer_init(sdmac, direction, num_periods);
1691 if (!desc)
1692 goto err_out;
1693
1694 desc->period_len = period_len;
1695
1696 sdmac->flags |= IMX_DMA_SG_LOOP;
1697
1698 if (period_len > SDMA_BD_MAX_CNT) {
1699 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1700 channel, period_len, SDMA_BD_MAX_CNT);
1701 goto err_bd_out;
1702 }
1703
1704 while (buf < buf_len) {
1705 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1706 int param;
1707
1708 bd->buffer_addr = dma_addr;
1709
1710 bd->mode.count = period_len;
1711
1712 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1713 goto err_bd_out;
1714 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1715 bd->mode.command = 0;
1716 else
1717 bd->mode.command = sdmac->word_size;
1718
1719 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1720 if (i + 1 == num_periods)
1721 param |= BD_WRAP;
1722
1723 dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1724 i, period_len, (u64)dma_addr,
1725 param & BD_WRAP ? "wrap" : "",
1726 param & BD_INTR ? " intr" : "");
1727
1728 bd->mode.status = param;
1729
1730 dma_addr += period_len;
1731 buf += period_len;
1732
1733 i++;
1734 }
1735
1736 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1737 err_bd_out:
1738 sdma_free_bd(desc);
1739 kfree(desc);
1740 err_out:
1741 sdmac->status = DMA_ERROR;
1742 return NULL;
1743 }
1744
1745 static int sdma_config_write(struct dma_chan *chan,
1746 struct dma_slave_config *dmaengine_cfg,
1747 enum dma_transfer_direction direction)
1748 {
1749 struct sdma_channel *sdmac = to_sdma_chan(chan);
1750
1751 if (direction == DMA_DEV_TO_MEM) {
1752 sdmac->per_address = dmaengine_cfg->src_addr;
1753 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1754 dmaengine_cfg->src_addr_width;
1755 sdmac->word_size = dmaengine_cfg->src_addr_width;
1756 } else if (direction == DMA_DEV_TO_DEV) {
1757 sdmac->per_address2 = dmaengine_cfg->src_addr;
1758 sdmac->per_address = dmaengine_cfg->dst_addr;
1759 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1760 SDMA_WATERMARK_LEVEL_LWML;
1761 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1762 SDMA_WATERMARK_LEVEL_HWML;
1763 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1764 } else {
1765 sdmac->per_address = dmaengine_cfg->dst_addr;
1766 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1767 dmaengine_cfg->dst_addr_width;
1768 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1769 }
1770 sdmac->direction = direction;
1771 return sdma_config_channel(chan);
1772 }
1773
1774 static int sdma_config(struct dma_chan *chan,
1775 struct dma_slave_config *dmaengine_cfg)
1776 {
1777 struct sdma_channel *sdmac = to_sdma_chan(chan);
1778 struct sdma_engine *sdma = sdmac->sdma;
1779
1780 memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1781
1782 if (dmaengine_cfg->peripheral_config) {
1783 struct sdma_peripheral_config *sdmacfg = dmaengine_cfg->peripheral_config;
1784 if (dmaengine_cfg->peripheral_size != sizeof(struct sdma_peripheral_config)) {
1785 dev_err(sdma->dev, "Invalid peripheral size %zu, expected %zu\n",
1786 dmaengine_cfg->peripheral_size,
1787 sizeof(struct sdma_peripheral_config));
1788 return -EINVAL;
1789 }
1790 sdmac->n_fifos_src = sdmacfg->n_fifos_src;
1791 sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
1792 sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
1793 sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
1794 sdmac->words_per_fifo = sdmacfg->words_per_fifo;
1795 sdmac->sw_done = sdmacfg->sw_done;
1796 }
1797
1798
1799 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1800 return -EINVAL;
1801 sdma_event_enable(sdmac, sdmac->event_id0);
1802
1803 if (sdmac->event_id1) {
1804 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1805 return -EINVAL;
1806 sdma_event_enable(sdmac, sdmac->event_id1);
1807 }
1808
1809 return 0;
1810 }
1811
1812 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1813 dma_cookie_t cookie,
1814 struct dma_tx_state *txstate)
1815 {
1816 struct sdma_channel *sdmac = to_sdma_chan(chan);
1817 struct sdma_desc *desc = NULL;
1818 u32 residue;
1819 struct virt_dma_desc *vd;
1820 enum dma_status ret;
1821 unsigned long flags;
1822
1823 ret = dma_cookie_status(chan, cookie, txstate);
1824 if (ret == DMA_COMPLETE || !txstate)
1825 return ret;
1826
1827 spin_lock_irqsave(&sdmac->vc.lock, flags);
1828
1829 vd = vchan_find_desc(&sdmac->vc, cookie);
1830 if (vd)
1831 desc = to_sdma_desc(&vd->tx);
1832 else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
1833 desc = sdmac->desc;
1834
1835 if (desc) {
1836 if (sdmac->flags & IMX_DMA_SG_LOOP)
1837 residue = (desc->num_bd - desc->buf_ptail) *
1838 desc->period_len - desc->chn_real_count;
1839 else
1840 residue = desc->chn_count - desc->chn_real_count;
1841 } else {
1842 residue = 0;
1843 }
1844
1845 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1846
1847 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1848 residue);
1849
1850 return sdmac->status;
1851 }
1852
1853 static void sdma_issue_pending(struct dma_chan *chan)
1854 {
1855 struct sdma_channel *sdmac = to_sdma_chan(chan);
1856 unsigned long flags;
1857
1858 spin_lock_irqsave(&sdmac->vc.lock, flags);
1859 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1860 sdma_start_desc(sdmac);
1861 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1862 }
1863
1864 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1865 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1866 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45
1867 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
1868
1869 static void sdma_add_scripts(struct sdma_engine *sdma,
1870 const struct sdma_script_start_addrs *addr)
1871 {
1872 s32 *addr_arr = (u32 *)addr;
1873 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1874 int i;
1875
1876
1877 if (!sdma->script_number)
1878 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1879
1880 if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
1881 / sizeof(s32)) {
1882 dev_err(sdma->dev,
1883 "SDMA script number %d not match with firmware.\n",
1884 sdma->script_number);
1885 return;
1886 }
1887
1888 for (i = 0; i < sdma->script_number; i++)
1889 if (addr_arr[i] > 0)
1890 saddr_arr[i] = addr_arr[i];
1891
1892
1893
1894
1895
1896
1897
1898 if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
1899 if (addr->uart_2_mcu_rom_addr)
1900 sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
1901 if (addr->uartsh_2_mcu_rom_addr)
1902 sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
1903 }
1904 }
1905
1906 static void sdma_load_firmware(const struct firmware *fw, void *context)
1907 {
1908 struct sdma_engine *sdma = context;
1909 const struct sdma_firmware_header *header;
1910 const struct sdma_script_start_addrs *addr;
1911 unsigned short *ram_code;
1912
1913 if (!fw) {
1914 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1915
1916 return;
1917 }
1918
1919 if (fw->size < sizeof(*header))
1920 goto err_firmware;
1921
1922 header = (struct sdma_firmware_header *)fw->data;
1923
1924 if (header->magic != SDMA_FIRMWARE_MAGIC)
1925 goto err_firmware;
1926 if (header->ram_code_start + header->ram_code_size > fw->size)
1927 goto err_firmware;
1928 switch (header->version_major) {
1929 case 1:
1930 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1931 break;
1932 case 2:
1933 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1934 break;
1935 case 3:
1936 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1937 break;
1938 case 4:
1939 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1940 break;
1941 default:
1942 dev_err(sdma->dev, "unknown firmware version\n");
1943 goto err_firmware;
1944 }
1945
1946 addr = (void *)header + header->script_addrs_start;
1947 ram_code = (void *)header + header->ram_code_start;
1948
1949 clk_enable(sdma->clk_ipg);
1950 clk_enable(sdma->clk_ahb);
1951
1952 sdma_load_script(sdma, ram_code,
1953 header->ram_code_size,
1954 addr->ram_code_start_addr);
1955 clk_disable(sdma->clk_ipg);
1956 clk_disable(sdma->clk_ahb);
1957
1958 sdma_add_scripts(sdma, addr);
1959
1960 sdma->fw_loaded = true;
1961
1962 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1963 header->version_major,
1964 header->version_minor);
1965
1966 err_firmware:
1967 release_firmware(fw);
1968 }
1969
1970 #define EVENT_REMAP_CELLS 3
1971
1972 static int sdma_event_remap(struct sdma_engine *sdma)
1973 {
1974 struct device_node *np = sdma->dev->of_node;
1975 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1976 struct property *event_remap;
1977 struct regmap *gpr;
1978 char propname[] = "fsl,sdma-event-remap";
1979 u32 reg, val, shift, num_map, i;
1980 int ret = 0;
1981
1982 if (IS_ERR(np) || !gpr_np)
1983 goto out;
1984
1985 event_remap = of_find_property(np, propname, NULL);
1986 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1987 if (!num_map) {
1988 dev_dbg(sdma->dev, "no event needs to be remapped\n");
1989 goto out;
1990 } else if (num_map % EVENT_REMAP_CELLS) {
1991 dev_err(sdma->dev, "the property %s must modulo %d\n",
1992 propname, EVENT_REMAP_CELLS);
1993 ret = -EINVAL;
1994 goto out;
1995 }
1996
1997 gpr = syscon_node_to_regmap(gpr_np);
1998 if (IS_ERR(gpr)) {
1999 dev_err(sdma->dev, "failed to get gpr regmap\n");
2000 ret = PTR_ERR(gpr);
2001 goto out;
2002 }
2003
2004 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
2005 ret = of_property_read_u32_index(np, propname, i, ®);
2006 if (ret) {
2007 dev_err(sdma->dev, "failed to read property %s index %d\n",
2008 propname, i);
2009 goto out;
2010 }
2011
2012 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
2013 if (ret) {
2014 dev_err(sdma->dev, "failed to read property %s index %d\n",
2015 propname, i + 1);
2016 goto out;
2017 }
2018
2019 ret = of_property_read_u32_index(np, propname, i + 2, &val);
2020 if (ret) {
2021 dev_err(sdma->dev, "failed to read property %s index %d\n",
2022 propname, i + 2);
2023 goto out;
2024 }
2025
2026 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
2027 }
2028
2029 out:
2030 if (gpr_np)
2031 of_node_put(gpr_np);
2032
2033 return ret;
2034 }
2035
2036 static int sdma_get_firmware(struct sdma_engine *sdma,
2037 const char *fw_name)
2038 {
2039 int ret;
2040
2041 ret = request_firmware_nowait(THIS_MODULE,
2042 FW_ACTION_UEVENT, fw_name, sdma->dev,
2043 GFP_KERNEL, sdma, sdma_load_firmware);
2044
2045 return ret;
2046 }
2047
2048 static int sdma_init(struct sdma_engine *sdma)
2049 {
2050 int i, ret;
2051 dma_addr_t ccb_phys;
2052
2053 ret = clk_enable(sdma->clk_ipg);
2054 if (ret)
2055 return ret;
2056 ret = clk_enable(sdma->clk_ahb);
2057 if (ret)
2058 goto disable_clk_ipg;
2059
2060 if (sdma->drvdata->check_ratio &&
2061 (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
2062 sdma->clk_ratio = 1;
2063
2064
2065 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
2066
2067 sdma->channel_control = dma_alloc_coherent(sdma->dev,
2068 MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
2069 sizeof(struct sdma_context_data),
2070 &ccb_phys, GFP_KERNEL);
2071
2072 if (!sdma->channel_control) {
2073 ret = -ENOMEM;
2074 goto err_dma_alloc;
2075 }
2076
2077 sdma->context = (void *)sdma->channel_control +
2078 MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
2079 sdma->context_phys = ccb_phys +
2080 MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
2081
2082
2083 for (i = 0; i < sdma->drvdata->num_events; i++)
2084 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
2085
2086
2087 for (i = 0; i < MAX_DMA_CHANNELS; i++)
2088 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
2089
2090 ret = sdma_request_channel0(sdma);
2091 if (ret)
2092 goto err_dma_alloc;
2093
2094 sdma_config_ownership(&sdma->channel[0], false, true, false);
2095
2096
2097 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
2098
2099
2100 if (sdma->clk_ratio)
2101 writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
2102 else
2103 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
2104
2105 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
2106
2107
2108 sdma_set_channel_priority(&sdma->channel[0], 7);
2109
2110 clk_disable(sdma->clk_ipg);
2111 clk_disable(sdma->clk_ahb);
2112
2113 return 0;
2114
2115 err_dma_alloc:
2116 clk_disable(sdma->clk_ahb);
2117 disable_clk_ipg:
2118 clk_disable(sdma->clk_ipg);
2119 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
2120 return ret;
2121 }
2122
2123 static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
2124 {
2125 struct sdma_channel *sdmac = to_sdma_chan(chan);
2126 struct imx_dma_data *data = fn_param;
2127
2128 if (!imx_dma_is_general_purpose(chan))
2129 return false;
2130
2131 sdmac->data = *data;
2132 chan->private = &sdmac->data;
2133
2134 return true;
2135 }
2136
2137 static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
2138 struct of_dma *ofdma)
2139 {
2140 struct sdma_engine *sdma = ofdma->of_dma_data;
2141 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
2142 struct imx_dma_data data;
2143
2144 if (dma_spec->args_count != 3)
2145 return NULL;
2146
2147 data.dma_request = dma_spec->args[0];
2148 data.peripheral_type = dma_spec->args[1];
2149 data.priority = dma_spec->args[2];
2150
2151
2152
2153
2154
2155
2156
2157 data.dma_request2 = 0;
2158
2159 return __dma_request_channel(&mask, sdma_filter_fn, &data,
2160 ofdma->of_node);
2161 }
2162
2163 static int sdma_probe(struct platform_device *pdev)
2164 {
2165 struct device_node *np = pdev->dev.of_node;
2166 struct device_node *spba_bus;
2167 const char *fw_name;
2168 int ret;
2169 int irq;
2170 struct resource *iores;
2171 struct resource spba_res;
2172 int i;
2173 struct sdma_engine *sdma;
2174 s32 *saddr_arr;
2175
2176 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2177 if (ret)
2178 return ret;
2179
2180 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2181 if (!sdma)
2182 return -ENOMEM;
2183
2184 spin_lock_init(&sdma->channel_0_lock);
2185
2186 sdma->dev = &pdev->dev;
2187 sdma->drvdata = of_device_get_match_data(sdma->dev);
2188
2189 irq = platform_get_irq(pdev, 0);
2190 if (irq < 0)
2191 return irq;
2192
2193 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2194 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
2195 if (IS_ERR(sdma->regs))
2196 return PTR_ERR(sdma->regs);
2197
2198 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2199 if (IS_ERR(sdma->clk_ipg))
2200 return PTR_ERR(sdma->clk_ipg);
2201
2202 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2203 if (IS_ERR(sdma->clk_ahb))
2204 return PTR_ERR(sdma->clk_ahb);
2205
2206 ret = clk_prepare(sdma->clk_ipg);
2207 if (ret)
2208 return ret;
2209
2210 ret = clk_prepare(sdma->clk_ahb);
2211 if (ret)
2212 goto err_clk;
2213
2214 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
2215 dev_name(&pdev->dev), sdma);
2216 if (ret)
2217 goto err_irq;
2218
2219 sdma->irq = irq;
2220
2221 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2222 if (!sdma->script_addrs) {
2223 ret = -ENOMEM;
2224 goto err_irq;
2225 }
2226
2227
2228 saddr_arr = (s32 *)sdma->script_addrs;
2229 for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
2230 saddr_arr[i] = -EINVAL;
2231
2232 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2233 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2234 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2235
2236 INIT_LIST_HEAD(&sdma->dma_device.channels);
2237
2238 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2239 struct sdma_channel *sdmac = &sdma->channel[i];
2240
2241 sdmac->sdma = sdma;
2242
2243 sdmac->channel = i;
2244 sdmac->vc.desc_free = sdma_desc_free;
2245 INIT_LIST_HEAD(&sdmac->terminated);
2246 INIT_WORK(&sdmac->terminate_worker,
2247 sdma_channel_terminate_work);
2248
2249
2250
2251
2252
2253 if (i)
2254 vchan_init(&sdmac->vc, &sdma->dma_device);
2255 }
2256
2257 ret = sdma_init(sdma);
2258 if (ret)
2259 goto err_init;
2260
2261 ret = sdma_event_remap(sdma);
2262 if (ret)
2263 goto err_init;
2264
2265 if (sdma->drvdata->script_addrs)
2266 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
2267
2268 sdma->dma_device.dev = &pdev->dev;
2269
2270 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2271 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2272 sdma->dma_device.device_tx_status = sdma_tx_status;
2273 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2274 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2275 sdma->dma_device.device_config = sdma_config;
2276 sdma->dma_device.device_terminate_all = sdma_terminate_all;
2277 sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2278 sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2279 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2280 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2281 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2282 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2283 sdma->dma_device.device_issue_pending = sdma_issue_pending;
2284 sdma->dma_device.copy_align = 2;
2285 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2286
2287 platform_set_drvdata(pdev, sdma);
2288
2289 ret = dma_async_device_register(&sdma->dma_device);
2290 if (ret) {
2291 dev_err(&pdev->dev, "unable to register\n");
2292 goto err_init;
2293 }
2294
2295 if (np) {
2296 ret = of_dma_controller_register(np, sdma_xlate, sdma);
2297 if (ret) {
2298 dev_err(&pdev->dev, "failed to register controller\n");
2299 goto err_register;
2300 }
2301
2302 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2303 ret = of_address_to_resource(spba_bus, 0, &spba_res);
2304 if (!ret) {
2305 sdma->spba_start_addr = spba_res.start;
2306 sdma->spba_end_addr = spba_res.end;
2307 }
2308 of_node_put(spba_bus);
2309 }
2310
2311
2312
2313
2314
2315
2316 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2317 &fw_name);
2318 if (ret) {
2319 dev_warn(&pdev->dev, "failed to get firmware name\n");
2320 } else {
2321 ret = sdma_get_firmware(sdma, fw_name);
2322 if (ret)
2323 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2324 }
2325
2326 return 0;
2327
2328 err_register:
2329 dma_async_device_unregister(&sdma->dma_device);
2330 err_init:
2331 kfree(sdma->script_addrs);
2332 err_irq:
2333 clk_unprepare(sdma->clk_ahb);
2334 err_clk:
2335 clk_unprepare(sdma->clk_ipg);
2336 return ret;
2337 }
2338
2339 static int sdma_remove(struct platform_device *pdev)
2340 {
2341 struct sdma_engine *sdma = platform_get_drvdata(pdev);
2342 int i;
2343
2344 devm_free_irq(&pdev->dev, sdma->irq, sdma);
2345 dma_async_device_unregister(&sdma->dma_device);
2346 kfree(sdma->script_addrs);
2347 clk_unprepare(sdma->clk_ahb);
2348 clk_unprepare(sdma->clk_ipg);
2349
2350 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2351 struct sdma_channel *sdmac = &sdma->channel[i];
2352
2353 tasklet_kill(&sdmac->vc.task);
2354 sdma_free_chan_resources(&sdmac->vc.chan);
2355 }
2356
2357 platform_set_drvdata(pdev, NULL);
2358 return 0;
2359 }
2360
2361 static struct platform_driver sdma_driver = {
2362 .driver = {
2363 .name = "imx-sdma",
2364 .of_match_table = sdma_dt_ids,
2365 },
2366 .remove = sdma_remove,
2367 .probe = sdma_probe,
2368 };
2369
2370 module_platform_driver(sdma_driver);
2371
2372 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2373 MODULE_DESCRIPTION("i.MX SDMA driver");
2374 #if IS_ENABLED(CONFIG_SOC_IMX6Q)
2375 MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2376 #endif
2377 #if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
2378 MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2379 #endif
2380 MODULE_LICENSE("GPL");