0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/bitfield.h>
0009 #include <linux/clk.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmapool.h>
0012 #include <linux/err.h>
0013 #include <linux/export.h>
0014 #include <linux/init.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/iopoll.h>
0017 #include <linux/of_dma.h>
0018 #include <linux/of_irq.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/reset.h>
0021
0022 #include "../dmaengine.h"
0023 #include "../virt-dma.h"
0024
0025 #define DRIVER_NAME "lgm-dma"
0026
0027 #define DMA_ID 0x0008
0028 #define DMA_ID_REV GENMASK(7, 0)
0029 #define DMA_ID_PNR GENMASK(19, 16)
0030 #define DMA_ID_CHNR GENMASK(26, 20)
0031 #define DMA_ID_DW_128B BIT(27)
0032 #define DMA_ID_AW_36B BIT(28)
0033 #define DMA_VER32 0x32
0034 #define DMA_VER31 0x31
0035 #define DMA_VER22 0x0A
0036
0037 #define DMA_CTRL 0x0010
0038 #define DMA_CTRL_RST BIT(0)
0039 #define DMA_CTRL_DSRAM_PATH BIT(1)
0040 #define DMA_CTRL_DBURST_WR BIT(3)
0041 #define DMA_CTRL_VLD_DF_ACK BIT(4)
0042 #define DMA_CTRL_CH_FL BIT(6)
0043 #define DMA_CTRL_DS_FOD BIT(7)
0044 #define DMA_CTRL_DRB BIT(8)
0045 #define DMA_CTRL_ENBE BIT(9)
0046 #define DMA_CTRL_DESC_TMOUT_CNT_V31 GENMASK(27, 16)
0047 #define DMA_CTRL_DESC_TMOUT_EN_V31 BIT(30)
0048 #define DMA_CTRL_PKTARB BIT(31)
0049
0050 #define DMA_CPOLL 0x0014
0051 #define DMA_CPOLL_CNT GENMASK(15, 4)
0052 #define DMA_CPOLL_EN BIT(31)
0053
0054 #define DMA_CS 0x0018
0055 #define DMA_CS_MASK GENMASK(5, 0)
0056
0057 #define DMA_CCTRL 0x001C
0058 #define DMA_CCTRL_ON BIT(0)
0059 #define DMA_CCTRL_RST BIT(1)
0060 #define DMA_CCTRL_CH_POLL_EN BIT(2)
0061 #define DMA_CCTRL_CH_ABC BIT(3)
0062 #define DMA_CDBA_MSB GENMASK(7, 4)
0063 #define DMA_CCTRL_DIR_TX BIT(8)
0064 #define DMA_CCTRL_CLASS GENMASK(11, 9)
0065 #define DMA_CCTRL_CLASSH GENMASK(19, 18)
0066 #define DMA_CCTRL_WR_NP_EN BIT(21)
0067 #define DMA_CCTRL_PDEN BIT(23)
0068 #define DMA_MAX_CLASS (SZ_32 - 1)
0069
0070 #define DMA_CDBA 0x0020
0071 #define DMA_CDLEN 0x0024
0072 #define DMA_CIS 0x0028
0073 #define DMA_CIE 0x002C
0074 #define DMA_CI_EOP BIT(1)
0075 #define DMA_CI_DUR BIT(2)
0076 #define DMA_CI_DESCPT BIT(3)
0077 #define DMA_CI_CHOFF BIT(4)
0078 #define DMA_CI_RDERR BIT(5)
0079 #define DMA_CI_ALL \
0080 (DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
0081
0082 #define DMA_PS 0x0040
0083 #define DMA_PCTRL 0x0044
0084 #define DMA_PCTRL_RXBL16 BIT(0)
0085 #define DMA_PCTRL_TXBL16 BIT(1)
0086 #define DMA_PCTRL_RXBL GENMASK(3, 2)
0087 #define DMA_PCTRL_RXBL_8 3
0088 #define DMA_PCTRL_TXBL GENMASK(5, 4)
0089 #define DMA_PCTRL_TXBL_8 3
0090 #define DMA_PCTRL_PDEN BIT(6)
0091 #define DMA_PCTRL_RXBL32 BIT(7)
0092 #define DMA_PCTRL_RXENDI GENMASK(9, 8)
0093 #define DMA_PCTRL_TXENDI GENMASK(11, 10)
0094 #define DMA_PCTRL_TXBL32 BIT(15)
0095 #define DMA_PCTRL_MEM_FLUSH BIT(16)
0096
0097 #define DMA_IRNEN1 0x00E8
0098 #define DMA_IRNCR1 0x00EC
0099 #define DMA_IRNEN 0x00F4
0100 #define DMA_IRNCR 0x00F8
0101 #define DMA_C_DP_TICK 0x100
0102 #define DMA_C_DP_TICK_TIKNARB GENMASK(15, 0)
0103 #define DMA_C_DP_TICK_TIKARB GENMASK(31, 16)
0104
0105 #define DMA_C_HDRM 0x110
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 #define DMA_C_HDRM_HDR_SUM BIT(30)
0116
0117 #define DMA_C_BOFF 0x120
0118 #define DMA_C_BOFF_BOF_LEN GENMASK(7, 0)
0119 #define DMA_C_BOFF_EN BIT(31)
0120
0121 #define DMA_ORRC 0x190
0122 #define DMA_ORRC_ORRCNT GENMASK(8, 4)
0123 #define DMA_ORRC_EN BIT(31)
0124
0125 #define DMA_C_ENDIAN 0x200
0126 #define DMA_C_END_DATAENDI GENMASK(1, 0)
0127 #define DMA_C_END_DE_EN BIT(7)
0128 #define DMA_C_END_DESENDI GENMASK(9, 8)
0129 #define DMA_C_END_DES_EN BIT(16)
0130
0131
0132 #define DMA_ADDR_36BIT BIT(0)
0133 #define DMA_DATA_128BIT BIT(1)
0134 #define DMA_CHAN_FLOW_CTL BIT(2)
0135 #define DMA_DESC_FOD BIT(3)
0136 #define DMA_DESC_IN_SRAM BIT(4)
0137 #define DMA_EN_BYTE_EN BIT(5)
0138 #define DMA_DBURST_WR BIT(6)
0139 #define DMA_VALID_DESC_FETCH_ACK BIT(7)
0140 #define DMA_DFT_DRB BIT(8)
0141
0142 #define DMA_ORRC_MAX_CNT (SZ_32 - 1)
0143 #define DMA_DFT_POLL_CNT SZ_4
0144 #define DMA_DFT_BURST_V22 SZ_2
0145 #define DMA_BURSTL_8DW SZ_8
0146 #define DMA_BURSTL_16DW SZ_16
0147 #define DMA_BURSTL_32DW SZ_32
0148 #define DMA_DFT_BURST DMA_BURSTL_16DW
0149 #define DMA_MAX_DESC_NUM (SZ_8K - 1)
0150 #define DMA_CHAN_BOFF_MAX (SZ_256 - 1)
0151 #define DMA_DFT_ENDIAN 0
0152
0153 #define DMA_DFT_DESC_TCNT 50
0154 #define DMA_HDR_LEN_MAX (SZ_16K - 1)
0155
0156
0157 #define DMA_TX_CH BIT(0)
0158 #define DMA_RX_CH BIT(1)
0159 #define DEVICE_ALLOC_DESC BIT(2)
0160 #define CHAN_IN_USE BIT(3)
0161 #define DMA_HW_DESC BIT(4)
0162
0163
0164 #define DESC_DATA_LEN GENMASK(15, 0)
0165 #define DESC_BYTE_OFF GENMASK(25, 23)
0166 #define DESC_EOP BIT(28)
0167 #define DESC_SOP BIT(29)
0168 #define DESC_C BIT(30)
0169 #define DESC_OWN BIT(31)
0170
0171 #define DMA_CHAN_RST 1
0172 #define DMA_MAX_SIZE (BIT(16) - 1)
0173 #define MAX_LOWER_CHANS 32
0174 #define MASK_LOWER_CHANS GENMASK(4, 0)
0175 #define DMA_OWN 1
0176 #define HIGH_4_BITS GENMASK(3, 0)
0177 #define DMA_DFT_DESC_NUM 1
0178 #define DMA_PKT_DROP_DIS 0
0179
0180 enum ldma_chan_on_off {
0181 DMA_CH_OFF = 0,
0182 DMA_CH_ON = 1,
0183 };
0184
0185 enum {
0186 DMA_TYPE_TX = 0,
0187 DMA_TYPE_RX,
0188 DMA_TYPE_MCPY,
0189 };
0190
0191 struct ldma_dev;
0192 struct ldma_port;
0193
0194 struct ldma_chan {
0195 struct virt_dma_chan vchan;
0196 struct ldma_port *port;
0197 char name[8];
0198 int nr;
0199 u32 flags;
0200 enum ldma_chan_on_off onoff;
0201 dma_addr_t desc_phys;
0202 void *desc_base;
0203 u32 desc_cnt;
0204 int rst;
0205 u32 hdrm_len;
0206 bool hdrm_csum;
0207 u32 boff_len;
0208 u32 data_endian;
0209 u32 desc_endian;
0210 bool pden;
0211 bool desc_rx_np;
0212 bool data_endian_en;
0213 bool desc_endian_en;
0214 bool abc_en;
0215 bool desc_init;
0216 struct dma_pool *desc_pool;
0217 u32 desc_num;
0218 struct dw2_desc_sw *ds;
0219 struct work_struct work;
0220 struct dma_slave_config config;
0221 };
0222
0223 struct ldma_port {
0224 struct ldma_dev *ldev;
0225 u32 portid;
0226 u32 rxbl;
0227 u32 txbl;
0228 u32 rxendi;
0229 u32 txendi;
0230 u32 pkt_drop;
0231 };
0232
0233
0234 struct ldma_inst_data {
0235 bool desc_in_sram;
0236 bool chan_fc;
0237 bool desc_fod;
0238 bool valid_desc_fetch_ack;
0239 u32 orrc;
0240 const char *name;
0241 u32 type;
0242 };
0243
0244 struct ldma_dev {
0245 struct device *dev;
0246 void __iomem *base;
0247 struct reset_control *rst;
0248 struct clk *core_clk;
0249 struct dma_device dma_dev;
0250 u32 ver;
0251 int irq;
0252 struct ldma_port *ports;
0253 struct ldma_chan *chans;
0254 spinlock_t dev_lock;
0255 u32 chan_nrs;
0256 u32 port_nrs;
0257 u32 channels_mask;
0258 u32 flags;
0259 u32 pollcnt;
0260 const struct ldma_inst_data *inst;
0261 struct workqueue_struct *wq;
0262 };
0263
0264 struct dw2_desc {
0265 u32 field;
0266 u32 addr;
0267 } __packed __aligned(8);
0268
0269 struct dw2_desc_sw {
0270 struct virt_dma_desc vdesc;
0271 struct ldma_chan *chan;
0272 dma_addr_t desc_phys;
0273 size_t desc_cnt;
0274 size_t size;
0275 struct dw2_desc *desc_hw;
0276 };
0277
0278 static inline void
0279 ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
0280 {
0281 u32 old_val, new_val;
0282
0283 old_val = readl(d->base + ofs);
0284 new_val = (old_val & ~mask) | (val & mask);
0285
0286 if (new_val != old_val)
0287 writel(new_val, d->base + ofs);
0288 }
0289
0290 static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan)
0291 {
0292 return container_of(chan, struct ldma_chan, vchan.chan);
0293 }
0294
0295 static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
0296 {
0297 return container_of(dma_dev, struct ldma_dev, dma_dev);
0298 }
0299
0300 static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc)
0301 {
0302 return container_of(vdesc, struct dw2_desc_sw, vdesc);
0303 }
0304
0305 static inline bool ldma_chan_tx(struct ldma_chan *c)
0306 {
0307 return !!(c->flags & DMA_TX_CH);
0308 }
0309
0310 static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c)
0311 {
0312 return !!(c->flags & DMA_HW_DESC);
0313 }
0314
0315 static void ldma_dev_reset(struct ldma_dev *d)
0316
0317 {
0318 unsigned long flags;
0319
0320 spin_lock_irqsave(&d->dev_lock, flags);
0321 ldma_update_bits(d, DMA_CTRL_RST, DMA_CTRL_RST, DMA_CTRL);
0322 spin_unlock_irqrestore(&d->dev_lock, flags);
0323 }
0324
0325 static void ldma_dev_pkt_arb_cfg(struct ldma_dev *d, bool enable)
0326 {
0327 unsigned long flags;
0328 u32 mask = DMA_CTRL_PKTARB;
0329 u32 val = enable ? DMA_CTRL_PKTARB : 0;
0330
0331 spin_lock_irqsave(&d->dev_lock, flags);
0332 ldma_update_bits(d, mask, val, DMA_CTRL);
0333 spin_unlock_irqrestore(&d->dev_lock, flags);
0334 }
0335
0336 static void ldma_dev_sram_desc_cfg(struct ldma_dev *d, bool enable)
0337 {
0338 unsigned long flags;
0339 u32 mask = DMA_CTRL_DSRAM_PATH;
0340 u32 val = enable ? DMA_CTRL_DSRAM_PATH : 0;
0341
0342 spin_lock_irqsave(&d->dev_lock, flags);
0343 ldma_update_bits(d, mask, val, DMA_CTRL);
0344 spin_unlock_irqrestore(&d->dev_lock, flags);
0345 }
0346
0347 static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable)
0348 {
0349 unsigned long flags;
0350 u32 mask, val;
0351
0352 if (d->inst->type != DMA_TYPE_TX)
0353 return;
0354
0355 mask = DMA_CTRL_CH_FL;
0356 val = enable ? DMA_CTRL_CH_FL : 0;
0357
0358 spin_lock_irqsave(&d->dev_lock, flags);
0359 ldma_update_bits(d, mask, val, DMA_CTRL);
0360 spin_unlock_irqrestore(&d->dev_lock, flags);
0361 }
0362
0363 static void ldma_dev_global_polling_enable(struct ldma_dev *d)
0364 {
0365 unsigned long flags;
0366 u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT;
0367 u32 val = DMA_CPOLL_EN;
0368
0369 val |= FIELD_PREP(DMA_CPOLL_CNT, d->pollcnt);
0370
0371 spin_lock_irqsave(&d->dev_lock, flags);
0372 ldma_update_bits(d, mask, val, DMA_CPOLL);
0373 spin_unlock_irqrestore(&d->dev_lock, flags);
0374 }
0375
0376 static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable)
0377 {
0378 unsigned long flags;
0379 u32 mask, val;
0380
0381 if (d->inst->type == DMA_TYPE_MCPY)
0382 return;
0383
0384 mask = DMA_CTRL_DS_FOD;
0385 val = enable ? DMA_CTRL_DS_FOD : 0;
0386
0387 spin_lock_irqsave(&d->dev_lock, flags);
0388 ldma_update_bits(d, mask, val, DMA_CTRL);
0389 spin_unlock_irqrestore(&d->dev_lock, flags);
0390 }
0391
0392 static void ldma_dev_byte_enable_cfg(struct ldma_dev *d, bool enable)
0393 {
0394 unsigned long flags;
0395 u32 mask = DMA_CTRL_ENBE;
0396 u32 val = enable ? DMA_CTRL_ENBE : 0;
0397
0398 spin_lock_irqsave(&d->dev_lock, flags);
0399 ldma_update_bits(d, mask, val, DMA_CTRL);
0400 spin_unlock_irqrestore(&d->dev_lock, flags);
0401 }
0402
0403 static void ldma_dev_orrc_cfg(struct ldma_dev *d)
0404 {
0405 unsigned long flags;
0406 u32 val = 0;
0407 u32 mask;
0408
0409 if (d->inst->type == DMA_TYPE_RX)
0410 return;
0411
0412 mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT;
0413 if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT)
0414 val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc);
0415
0416 spin_lock_irqsave(&d->dev_lock, flags);
0417 ldma_update_bits(d, mask, val, DMA_ORRC);
0418 spin_unlock_irqrestore(&d->dev_lock, flags);
0419 }
0420
0421 static void ldma_dev_df_tout_cfg(struct ldma_dev *d, bool enable, int tcnt)
0422 {
0423 u32 mask = DMA_CTRL_DESC_TMOUT_CNT_V31;
0424 unsigned long flags;
0425 u32 val;
0426
0427 if (enable)
0428 val = DMA_CTRL_DESC_TMOUT_EN_V31 | FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31, tcnt);
0429 else
0430 val = 0;
0431
0432 spin_lock_irqsave(&d->dev_lock, flags);
0433 ldma_update_bits(d, mask, val, DMA_CTRL);
0434 spin_unlock_irqrestore(&d->dev_lock, flags);
0435 }
0436
0437 static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable)
0438 {
0439 unsigned long flags;
0440 u32 mask, val;
0441
0442 if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY)
0443 return;
0444
0445 mask = DMA_CTRL_DBURST_WR;
0446 val = enable ? DMA_CTRL_DBURST_WR : 0;
0447
0448 spin_lock_irqsave(&d->dev_lock, flags);
0449 ldma_update_bits(d, mask, val, DMA_CTRL);
0450 spin_unlock_irqrestore(&d->dev_lock, flags);
0451 }
0452
0453 static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable)
0454 {
0455 unsigned long flags;
0456 u32 mask, val;
0457
0458 if (d->inst->type != DMA_TYPE_TX)
0459 return;
0460
0461 mask = DMA_CTRL_VLD_DF_ACK;
0462 val = enable ? DMA_CTRL_VLD_DF_ACK : 0;
0463
0464 spin_lock_irqsave(&d->dev_lock, flags);
0465 ldma_update_bits(d, mask, val, DMA_CTRL);
0466 spin_unlock_irqrestore(&d->dev_lock, flags);
0467 }
0468
0469 static void ldma_dev_drb_cfg(struct ldma_dev *d, int enable)
0470 {
0471 unsigned long flags;
0472 u32 mask = DMA_CTRL_DRB;
0473 u32 val = enable ? DMA_CTRL_DRB : 0;
0474
0475 spin_lock_irqsave(&d->dev_lock, flags);
0476 ldma_update_bits(d, mask, val, DMA_CTRL);
0477 spin_unlock_irqrestore(&d->dev_lock, flags);
0478 }
0479
0480 static int ldma_dev_cfg(struct ldma_dev *d)
0481 {
0482 bool enable;
0483
0484 ldma_dev_pkt_arb_cfg(d, true);
0485 ldma_dev_global_polling_enable(d);
0486
0487 enable = !!(d->flags & DMA_DFT_DRB);
0488 ldma_dev_drb_cfg(d, enable);
0489
0490 enable = !!(d->flags & DMA_EN_BYTE_EN);
0491 ldma_dev_byte_enable_cfg(d, enable);
0492
0493 enable = !!(d->flags & DMA_CHAN_FLOW_CTL);
0494 ldma_dev_chan_flow_ctl_cfg(d, enable);
0495
0496 enable = !!(d->flags & DMA_DESC_FOD);
0497 ldma_dev_desc_fetch_on_demand_cfg(d, enable);
0498
0499 enable = !!(d->flags & DMA_DESC_IN_SRAM);
0500 ldma_dev_sram_desc_cfg(d, enable);
0501
0502 enable = !!(d->flags & DMA_DBURST_WR);
0503 ldma_dev_dburst_wr_cfg(d, enable);
0504
0505 enable = !!(d->flags & DMA_VALID_DESC_FETCH_ACK);
0506 ldma_dev_vld_fetch_ack_cfg(d, enable);
0507
0508 if (d->ver > DMA_VER22) {
0509 ldma_dev_orrc_cfg(d);
0510 ldma_dev_df_tout_cfg(d, true, DMA_DFT_DESC_TCNT);
0511 }
0512
0513 dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n",
0514 d->inst->name, readl(d->base + DMA_CTRL));
0515
0516 return 0;
0517 }
0518
0519 static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val)
0520 {
0521 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0522 u32 class_low, class_high;
0523 unsigned long flags;
0524 u32 reg;
0525
0526 spin_lock_irqsave(&d->dev_lock, flags);
0527 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0528 reg = readl(d->base + DMA_CCTRL);
0529
0530 if (reg & DMA_CCTRL_DIR_TX)
0531 c->flags |= DMA_TX_CH;
0532 else
0533 c->flags |= DMA_RX_CH;
0534
0535
0536 class_low = FIELD_GET(DMA_CCTRL_CLASS, reg);
0537 class_high = FIELD_GET(DMA_CCTRL_CLASSH, reg);
0538 val &= ~DMA_CCTRL_CLASS;
0539 val |= FIELD_PREP(DMA_CCTRL_CLASS, class_low);
0540 val &= ~DMA_CCTRL_CLASSH;
0541 val |= FIELD_PREP(DMA_CCTRL_CLASSH, class_high);
0542 writel(val, d->base + DMA_CCTRL);
0543 spin_unlock_irqrestore(&d->dev_lock, flags);
0544
0545 return 0;
0546 }
0547
0548 static void ldma_chan_irq_init(struct ldma_chan *c)
0549 {
0550 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0551 unsigned long flags;
0552 u32 enofs, crofs;
0553 u32 cn_bit;
0554
0555 if (c->nr < MAX_LOWER_CHANS) {
0556 enofs = DMA_IRNEN;
0557 crofs = DMA_IRNCR;
0558 } else {
0559 enofs = DMA_IRNEN1;
0560 crofs = DMA_IRNCR1;
0561 }
0562
0563 cn_bit = BIT(c->nr & MASK_LOWER_CHANS);
0564 spin_lock_irqsave(&d->dev_lock, flags);
0565 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0566
0567
0568 writel(0, d->base + DMA_CIE);
0569 writel(DMA_CI_ALL, d->base + DMA_CIS);
0570
0571 ldma_update_bits(d, cn_bit, 0, enofs);
0572 writel(cn_bit, d->base + crofs);
0573 spin_unlock_irqrestore(&d->dev_lock, flags);
0574 }
0575
0576 static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
0577 {
0578 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0579 u32 class_val;
0580
0581 if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
0582 return;
0583
0584
0585 class_val = FIELD_PREP(DMA_CCTRL_CLASS, val & 0x7);
0586
0587 class_val |= FIELD_PREP(DMA_CCTRL_CLASSH, (val >> 3) & 0x3);
0588
0589 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0590 ldma_update_bits(d, DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH, class_val,
0591 DMA_CCTRL);
0592 }
0593
0594 static int ldma_chan_on(struct ldma_chan *c)
0595 {
0596 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0597 unsigned long flags;
0598
0599
0600 if (WARN_ON(!c->desc_init))
0601 return -EINVAL;
0602
0603 spin_lock_irqsave(&d->dev_lock, flags);
0604 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0605 ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL);
0606 spin_unlock_irqrestore(&d->dev_lock, flags);
0607
0608 c->onoff = DMA_CH_ON;
0609
0610 return 0;
0611 }
0612
0613 static int ldma_chan_off(struct ldma_chan *c)
0614 {
0615 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0616 unsigned long flags;
0617 u32 val;
0618 int ret;
0619
0620 spin_lock_irqsave(&d->dev_lock, flags);
0621 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0622 ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL);
0623 spin_unlock_irqrestore(&d->dev_lock, flags);
0624
0625 ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
0626 !(val & DMA_CCTRL_ON), 0, 10000);
0627 if (ret)
0628 return ret;
0629
0630 c->onoff = DMA_CH_OFF;
0631
0632 return 0;
0633 }
0634
0635 static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
0636 int desc_num)
0637 {
0638 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0639 unsigned long flags;
0640
0641 spin_lock_irqsave(&d->dev_lock, flags);
0642 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0643 writel(lower_32_bits(desc_base), d->base + DMA_CDBA);
0644
0645
0646 if (IS_ENABLED(CONFIG_64BIT)) {
0647 u32 hi = upper_32_bits(desc_base) & HIGH_4_BITS;
0648
0649 ldma_update_bits(d, DMA_CDBA_MSB,
0650 FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL);
0651 }
0652 writel(desc_num, d->base + DMA_CDLEN);
0653 spin_unlock_irqrestore(&d->dev_lock, flags);
0654
0655 c->desc_init = true;
0656 }
0657
0658 static struct dma_async_tx_descriptor *
0659 ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num)
0660 {
0661 struct ldma_chan *c = to_ldma_chan(chan);
0662 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0663 struct dma_async_tx_descriptor *tx;
0664 struct dw2_desc_sw *ds;
0665
0666 if (!desc_num) {
0667 dev_err(d->dev, "Channel %d must allocate descriptor first\n",
0668 c->nr);
0669 return NULL;
0670 }
0671
0672 if (desc_num > DMA_MAX_DESC_NUM) {
0673 dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
0674 c->nr, desc_num);
0675 return NULL;
0676 }
0677
0678 ldma_chan_desc_hw_cfg(c, desc_base, desc_num);
0679
0680 c->flags |= DMA_HW_DESC;
0681 c->desc_cnt = desc_num;
0682 c->desc_phys = desc_base;
0683
0684 ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
0685 if (!ds)
0686 return NULL;
0687
0688 tx = &ds->vdesc.tx;
0689 dma_async_tx_descriptor_init(tx, chan);
0690
0691 return tx;
0692 }
0693
0694 static int ldma_chan_reset(struct ldma_chan *c)
0695 {
0696 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0697 unsigned long flags;
0698 u32 val;
0699 int ret;
0700
0701 ret = ldma_chan_off(c);
0702 if (ret)
0703 return ret;
0704
0705 spin_lock_irqsave(&d->dev_lock, flags);
0706 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0707 ldma_update_bits(d, DMA_CCTRL_RST, DMA_CCTRL_RST, DMA_CCTRL);
0708 spin_unlock_irqrestore(&d->dev_lock, flags);
0709
0710 ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
0711 !(val & DMA_CCTRL_RST), 0, 10000);
0712 if (ret)
0713 return ret;
0714
0715 c->rst = 1;
0716 c->desc_init = false;
0717
0718 return 0;
0719 }
0720
0721 static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len)
0722 {
0723 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0724 u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN;
0725 u32 val;
0726
0727 if (boff_len > 0 && boff_len <= DMA_CHAN_BOFF_MAX)
0728 val = FIELD_PREP(DMA_C_BOFF_BOF_LEN, boff_len) | DMA_C_BOFF_EN;
0729 else
0730 val = 0;
0731
0732 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0733 ldma_update_bits(d, mask, val, DMA_C_BOFF);
0734 }
0735
0736 static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable,
0737 u32 endian_type)
0738 {
0739 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0740 u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI;
0741 u32 val;
0742
0743 if (enable)
0744 val = DMA_C_END_DE_EN | FIELD_PREP(DMA_C_END_DATAENDI, endian_type);
0745 else
0746 val = 0;
0747
0748 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0749 ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
0750 }
0751
0752 static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable,
0753 u32 endian_type)
0754 {
0755 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0756 u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI;
0757 u32 val;
0758
0759 if (enable)
0760 val = DMA_C_END_DES_EN | FIELD_PREP(DMA_C_END_DESENDI, endian_type);
0761 else
0762 val = 0;
0763
0764 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0765 ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
0766 }
0767
0768 static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum)
0769 {
0770 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0771 u32 mask, val;
0772
0773
0774 if (!csum && (!hdr_len || hdr_len > DMA_HDR_LEN_MAX))
0775 return;
0776
0777 mask = DMA_C_HDRM_HDR_SUM;
0778 val = DMA_C_HDRM_HDR_SUM;
0779
0780 if (!csum && hdr_len)
0781 val = hdr_len;
0782
0783 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0784 ldma_update_bits(d, mask, val, DMA_C_HDRM);
0785 }
0786
0787 static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable)
0788 {
0789 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0790 u32 mask, val;
0791
0792
0793 if (ldma_chan_tx(c))
0794 return;
0795
0796 mask = DMA_CCTRL_WR_NP_EN;
0797 val = enable ? DMA_CCTRL_WR_NP_EN : 0;
0798
0799 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0800 ldma_update_bits(d, mask, val, DMA_CCTRL);
0801 }
0802
0803 static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable)
0804 {
0805 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0806 u32 mask, val;
0807
0808 if (d->ver < DMA_VER32 || ldma_chan_tx(c))
0809 return;
0810
0811 mask = DMA_CCTRL_CH_ABC;
0812 val = enable ? DMA_CCTRL_CH_ABC : 0;
0813
0814 ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
0815 ldma_update_bits(d, mask, val, DMA_CCTRL);
0816 }
0817
0818 static int ldma_port_cfg(struct ldma_port *p)
0819 {
0820 unsigned long flags;
0821 struct ldma_dev *d;
0822 u32 reg;
0823
0824 d = p->ldev;
0825 reg = FIELD_PREP(DMA_PCTRL_TXENDI, p->txendi);
0826 reg |= FIELD_PREP(DMA_PCTRL_RXENDI, p->rxendi);
0827
0828 if (d->ver == DMA_VER22) {
0829 reg |= FIELD_PREP(DMA_PCTRL_TXBL, p->txbl);
0830 reg |= FIELD_PREP(DMA_PCTRL_RXBL, p->rxbl);
0831 } else {
0832 reg |= FIELD_PREP(DMA_PCTRL_PDEN, p->pkt_drop);
0833
0834 if (p->txbl == DMA_BURSTL_32DW)
0835 reg |= DMA_PCTRL_TXBL32;
0836 else if (p->txbl == DMA_BURSTL_16DW)
0837 reg |= DMA_PCTRL_TXBL16;
0838 else
0839 reg |= FIELD_PREP(DMA_PCTRL_TXBL, DMA_PCTRL_TXBL_8);
0840
0841 if (p->rxbl == DMA_BURSTL_32DW)
0842 reg |= DMA_PCTRL_RXBL32;
0843 else if (p->rxbl == DMA_BURSTL_16DW)
0844 reg |= DMA_PCTRL_RXBL16;
0845 else
0846 reg |= FIELD_PREP(DMA_PCTRL_RXBL, DMA_PCTRL_RXBL_8);
0847 }
0848
0849 spin_lock_irqsave(&d->dev_lock, flags);
0850 writel(p->portid, d->base + DMA_PS);
0851 writel(reg, d->base + DMA_PCTRL);
0852 spin_unlock_irqrestore(&d->dev_lock, flags);
0853
0854 reg = readl(d->base + DMA_PCTRL);
0855 dev_dbg(d->dev, "Port Control 0x%08x configuration done\n", reg);
0856
0857 return 0;
0858 }
0859
0860 static int ldma_chan_cfg(struct ldma_chan *c)
0861 {
0862 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
0863 unsigned long flags;
0864 u32 reg;
0865
0866 reg = c->pden ? DMA_CCTRL_PDEN : 0;
0867 reg |= c->onoff ? DMA_CCTRL_ON : 0;
0868 reg |= c->rst ? DMA_CCTRL_RST : 0;
0869
0870 ldma_chan_cctrl_cfg(c, reg);
0871 ldma_chan_irq_init(c);
0872
0873 if (d->ver <= DMA_VER22)
0874 return 0;
0875
0876 spin_lock_irqsave(&d->dev_lock, flags);
0877 ldma_chan_set_class(c, c->nr);
0878 ldma_chan_byte_offset_cfg(c, c->boff_len);
0879 ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian);
0880 ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian);
0881 ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum);
0882 ldma_chan_rxwr_np_cfg(c, c->desc_rx_np);
0883 ldma_chan_abc_cfg(c, c->abc_en);
0884 spin_unlock_irqrestore(&d->dev_lock, flags);
0885
0886 if (ldma_chan_is_hw_desc(c))
0887 ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
0888
0889 return 0;
0890 }
0891
0892 static void ldma_dev_init(struct ldma_dev *d)
0893 {
0894 unsigned long ch_mask = (unsigned long)d->channels_mask;
0895 struct ldma_port *p;
0896 struct ldma_chan *c;
0897 int i;
0898 u32 j;
0899
0900 spin_lock_init(&d->dev_lock);
0901 ldma_dev_reset(d);
0902 ldma_dev_cfg(d);
0903
0904
0905 for (i = 0; i < d->port_nrs; i++) {
0906 p = &d->ports[i];
0907 ldma_port_cfg(p);
0908 }
0909
0910
0911 for_each_set_bit(j, &ch_mask, d->chan_nrs) {
0912 c = &d->chans[j];
0913 ldma_chan_cfg(c);
0914 }
0915 }
0916
0917 static int ldma_cfg_init(struct ldma_dev *d)
0918 {
0919 struct fwnode_handle *fwnode = dev_fwnode(d->dev);
0920 struct ldma_port *p;
0921 int i;
0922
0923 if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en"))
0924 d->flags |= DMA_EN_BYTE_EN;
0925
0926 if (fwnode_property_read_bool(fwnode, "intel,dma-dburst-wr"))
0927 d->flags |= DMA_DBURST_WR;
0928
0929 if (fwnode_property_read_bool(fwnode, "intel,dma-drb"))
0930 d->flags |= DMA_DFT_DRB;
0931
0932 if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
0933 &d->pollcnt))
0934 d->pollcnt = DMA_DFT_POLL_CNT;
0935
0936 if (d->inst->chan_fc)
0937 d->flags |= DMA_CHAN_FLOW_CTL;
0938
0939 if (d->inst->desc_fod)
0940 d->flags |= DMA_DESC_FOD;
0941
0942 if (d->inst->desc_in_sram)
0943 d->flags |= DMA_DESC_IN_SRAM;
0944
0945 if (d->inst->valid_desc_fetch_ack)
0946 d->flags |= DMA_VALID_DESC_FETCH_ACK;
0947
0948 if (d->ver > DMA_VER22) {
0949 if (!d->port_nrs)
0950 return -EINVAL;
0951
0952 for (i = 0; i < d->port_nrs; i++) {
0953 p = &d->ports[i];
0954 p->rxendi = DMA_DFT_ENDIAN;
0955 p->txendi = DMA_DFT_ENDIAN;
0956 p->rxbl = DMA_DFT_BURST;
0957 p->txbl = DMA_DFT_BURST;
0958 p->pkt_drop = DMA_PKT_DROP_DIS;
0959 }
0960 }
0961
0962 return 0;
0963 }
0964
0965 static void dma_free_desc_resource(struct virt_dma_desc *vdesc)
0966 {
0967 struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc);
0968 struct ldma_chan *c = ds->chan;
0969
0970 dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys);
0971 kfree(ds);
0972 }
0973
0974 static struct dw2_desc_sw *
0975 dma_alloc_desc_resource(int num, struct ldma_chan *c)
0976 {
0977 struct device *dev = c->vchan.chan.device->dev;
0978 struct dw2_desc_sw *ds;
0979
0980 if (num > c->desc_num) {
0981 dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num);
0982 return NULL;
0983 }
0984
0985 ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
0986 if (!ds)
0987 return NULL;
0988
0989 ds->chan = c;
0990 ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC,
0991 &ds->desc_phys);
0992 if (!ds->desc_hw) {
0993 dev_dbg(dev, "out of memory for link descriptor\n");
0994 kfree(ds);
0995 return NULL;
0996 }
0997 ds->desc_cnt = num;
0998
0999 return ds;
1000 }
1001
1002 static void ldma_chan_irq_en(struct ldma_chan *c)
1003 {
1004 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1005 unsigned long flags;
1006
1007 spin_lock_irqsave(&d->dev_lock, flags);
1008 writel(c->nr, d->base + DMA_CS);
1009 writel(DMA_CI_EOP, d->base + DMA_CIE);
1010 writel(BIT(c->nr), d->base + DMA_IRNEN);
1011 spin_unlock_irqrestore(&d->dev_lock, flags);
1012 }
1013
1014 static void ldma_issue_pending(struct dma_chan *chan)
1015 {
1016 struct ldma_chan *c = to_ldma_chan(chan);
1017 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1018 unsigned long flags;
1019
1020 if (d->ver == DMA_VER22) {
1021 spin_lock_irqsave(&c->vchan.lock, flags);
1022 if (vchan_issue_pending(&c->vchan)) {
1023 struct virt_dma_desc *vdesc;
1024
1025
1026 vdesc = vchan_next_desc(&c->vchan);
1027 if (!vdesc) {
1028 c->ds = NULL;
1029 spin_unlock_irqrestore(&c->vchan.lock, flags);
1030 return;
1031 }
1032 list_del(&vdesc->node);
1033 c->ds = to_lgm_dma_desc(vdesc);
1034 ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt);
1035 ldma_chan_irq_en(c);
1036 }
1037 spin_unlock_irqrestore(&c->vchan.lock, flags);
1038 }
1039 ldma_chan_on(c);
1040 }
1041
1042 static void ldma_synchronize(struct dma_chan *chan)
1043 {
1044 struct ldma_chan *c = to_ldma_chan(chan);
1045
1046
1047
1048
1049
1050 cancel_work_sync(&c->work);
1051 vchan_synchronize(&c->vchan);
1052 if (c->ds)
1053 dma_free_desc_resource(&c->ds->vdesc);
1054 }
1055
1056 static int ldma_terminate_all(struct dma_chan *chan)
1057 {
1058 struct ldma_chan *c = to_ldma_chan(chan);
1059 unsigned long flags;
1060 LIST_HEAD(head);
1061
1062 spin_lock_irqsave(&c->vchan.lock, flags);
1063 vchan_get_all_descriptors(&c->vchan, &head);
1064 spin_unlock_irqrestore(&c->vchan.lock, flags);
1065 vchan_dma_desc_free_list(&c->vchan, &head);
1066
1067 return ldma_chan_reset(c);
1068 }
1069
1070 static int ldma_resume_chan(struct dma_chan *chan)
1071 {
1072 struct ldma_chan *c = to_ldma_chan(chan);
1073
1074 ldma_chan_on(c);
1075
1076 return 0;
1077 }
1078
1079 static int ldma_pause_chan(struct dma_chan *chan)
1080 {
1081 struct ldma_chan *c = to_ldma_chan(chan);
1082
1083 return ldma_chan_off(c);
1084 }
1085
1086 static enum dma_status
1087 ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1088 struct dma_tx_state *txstate)
1089 {
1090 struct ldma_chan *c = to_ldma_chan(chan);
1091 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1092 enum dma_status status = DMA_COMPLETE;
1093
1094 if (d->ver == DMA_VER22)
1095 status = dma_cookie_status(chan, cookie, txstate);
1096
1097 return status;
1098 }
1099
1100 static void dma_chan_irq(int irq, void *data)
1101 {
1102 struct ldma_chan *c = data;
1103 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1104 u32 stat;
1105
1106
1107 writel(c->nr, d->base + DMA_CS);
1108 stat = readl(d->base + DMA_CIS);
1109 if (!stat)
1110 return;
1111
1112 writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
1113 writel(stat, d->base + DMA_CIS);
1114 queue_work(d->wq, &c->work);
1115 }
1116
1117 static irqreturn_t dma_interrupt(int irq, void *dev_id)
1118 {
1119 struct ldma_dev *d = dev_id;
1120 struct ldma_chan *c;
1121 unsigned long irncr;
1122 u32 cid;
1123
1124 irncr = readl(d->base + DMA_IRNCR);
1125 if (!irncr) {
1126 dev_err(d->dev, "dummy interrupt\n");
1127 return IRQ_NONE;
1128 }
1129
1130 for_each_set_bit(cid, &irncr, d->chan_nrs) {
1131
1132 writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN);
1133
1134 writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR);
1135
1136 c = &d->chans[cid];
1137 dma_chan_irq(irq, c);
1138 }
1139
1140 return IRQ_HANDLED;
1141 }
1142
1143 static void prep_slave_burst_len(struct ldma_chan *c)
1144 {
1145 struct ldma_port *p = c->port;
1146 struct dma_slave_config *cfg = &c->config;
1147
1148 if (cfg->dst_maxburst)
1149 cfg->src_maxburst = cfg->dst_maxburst;
1150
1151
1152 p->txbl = ilog2(cfg->src_maxburst);
1153 p->rxbl = p->txbl;
1154 }
1155
1156 static struct dma_async_tx_descriptor *
1157 ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1158 unsigned int sglen, enum dma_transfer_direction dir,
1159 unsigned long flags, void *context)
1160 {
1161 struct ldma_chan *c = to_ldma_chan(chan);
1162 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1163 size_t len, avail, total = 0;
1164 struct dw2_desc *hw_ds;
1165 struct dw2_desc_sw *ds;
1166 struct scatterlist *sg;
1167 int num = sglen, i;
1168 dma_addr_t addr;
1169
1170 if (!sgl)
1171 return NULL;
1172
1173 if (d->ver > DMA_VER22)
1174 return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen);
1175
1176 for_each_sg(sgl, sg, sglen, i) {
1177 avail = sg_dma_len(sg);
1178 if (avail > DMA_MAX_SIZE)
1179 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
1180 }
1181
1182 ds = dma_alloc_desc_resource(num, c);
1183 if (!ds)
1184 return NULL;
1185
1186 c->ds = ds;
1187
1188 num = 0;
1189
1190 for_each_sg(sgl, sg, sglen, i) {
1191 addr = sg_dma_address(sg);
1192 avail = sg_dma_len(sg);
1193 total += avail;
1194
1195 do {
1196 len = min_t(size_t, avail, DMA_MAX_SIZE);
1197
1198 hw_ds = &ds->desc_hw[num];
1199 switch (sglen) {
1200 case 1:
1201 hw_ds->field &= ~DESC_SOP;
1202 hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
1203
1204 hw_ds->field &= ~DESC_EOP;
1205 hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
1206 break;
1207 default:
1208 if (num == 0) {
1209 hw_ds->field &= ~DESC_SOP;
1210 hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
1211
1212 hw_ds->field &= ~DESC_EOP;
1213 hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
1214 } else if (num == (sglen - 1)) {
1215 hw_ds->field &= ~DESC_SOP;
1216 hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
1217 hw_ds->field &= ~DESC_EOP;
1218 hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
1219 } else {
1220 hw_ds->field &= ~DESC_SOP;
1221 hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
1222
1223 hw_ds->field &= ~DESC_EOP;
1224 hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
1225 }
1226 break;
1227 }
1228
1229 hw_ds->addr = (u32)addr;
1230
1231 hw_ds->field &= ~DESC_DATA_LEN;
1232 hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len);
1233
1234 hw_ds->field &= ~DESC_C;
1235 hw_ds->field |= FIELD_PREP(DESC_C, 0);
1236
1237 hw_ds->field &= ~DESC_BYTE_OFF;
1238 hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3);
1239
1240
1241 wmb();
1242 hw_ds->field &= ~DESC_OWN;
1243 hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN);
1244
1245
1246 wmb();
1247 num++;
1248 addr += len;
1249 avail -= len;
1250 } while (avail);
1251 }
1252
1253 ds->size = total;
1254 prep_slave_burst_len(c);
1255
1256 return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
1257 }
1258
1259 static int
1260 ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1261 {
1262 struct ldma_chan *c = to_ldma_chan(chan);
1263
1264 memcpy(&c->config, cfg, sizeof(c->config));
1265
1266 return 0;
1267 }
1268
1269 static int ldma_alloc_chan_resources(struct dma_chan *chan)
1270 {
1271 struct ldma_chan *c = to_ldma_chan(chan);
1272 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1273 struct device *dev = c->vchan.chan.device->dev;
1274 size_t desc_sz;
1275
1276 if (d->ver > DMA_VER22) {
1277 c->flags |= CHAN_IN_USE;
1278 return 0;
1279 }
1280
1281 if (c->desc_pool)
1282 return c->desc_num;
1283
1284 desc_sz = c->desc_num * sizeof(struct dw2_desc);
1285 c->desc_pool = dma_pool_create(c->name, dev, desc_sz,
1286 __alignof__(struct dw2_desc), 0);
1287
1288 if (!c->desc_pool) {
1289 dev_err(dev, "unable to allocate descriptor pool\n");
1290 return -ENOMEM;
1291 }
1292
1293 return c->desc_num;
1294 }
1295
1296 static void ldma_free_chan_resources(struct dma_chan *chan)
1297 {
1298 struct ldma_chan *c = to_ldma_chan(chan);
1299 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
1300
1301 if (d->ver == DMA_VER22) {
1302 dma_pool_destroy(c->desc_pool);
1303 c->desc_pool = NULL;
1304 vchan_free_chan_resources(to_virt_chan(chan));
1305 ldma_chan_reset(c);
1306 } else {
1307 c->flags &= ~CHAN_IN_USE;
1308 }
1309 }
1310
1311 static void dma_work(struct work_struct *work)
1312 {
1313 struct ldma_chan *c = container_of(work, struct ldma_chan, work);
1314 struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx;
1315 struct virt_dma_chan *vc = &c->vchan;
1316 struct dmaengine_desc_callback cb;
1317 struct virt_dma_desc *vd, *_vd;
1318 unsigned long flags;
1319 LIST_HEAD(head);
1320
1321 spin_lock_irqsave(&c->vchan.lock, flags);
1322 list_splice_tail_init(&vc->desc_completed, &head);
1323 spin_unlock_irqrestore(&c->vchan.lock, flags);
1324 dmaengine_desc_get_callback(tx, &cb);
1325 dma_cookie_complete(tx);
1326 dmaengine_desc_callback_invoke(&cb, NULL);
1327
1328 list_for_each_entry_safe(vd, _vd, &head, node) {
1329 dmaengine_desc_get_callback(tx, &cb);
1330 dma_cookie_complete(tx);
1331 list_del(&vd->node);
1332 dmaengine_desc_callback_invoke(&cb, NULL);
1333
1334 vchan_vdesc_fini(vd);
1335 }
1336 c->ds = NULL;
1337 }
1338
1339 static void
1340 update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst)
1341 {
1342 if (ldma_chan_tx(c))
1343 p->txbl = ilog2(burst);
1344 else
1345 p->rxbl = ilog2(burst);
1346 }
1347
1348 static void
1349 update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst)
1350 {
1351 if (ldma_chan_tx(c))
1352 p->txbl = burst;
1353 else
1354 p->rxbl = burst;
1355 }
1356
1357 static int
1358 update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
1359 {
1360 struct ldma_dev *d = ofdma->of_dma_data;
1361 u32 chan_id = spec->args[0];
1362 u32 port_id = spec->args[1];
1363 u32 burst = spec->args[2];
1364 struct ldma_port *p;
1365 struct ldma_chan *c;
1366
1367 if (chan_id >= d->chan_nrs || port_id >= d->port_nrs)
1368 return 0;
1369
1370 p = &d->ports[port_id];
1371 c = &d->chans[chan_id];
1372 c->port = p;
1373
1374 if (d->ver == DMA_VER22)
1375 update_burst_len_v22(c, p, burst);
1376 else
1377 update_burst_len_v3X(c, p, burst);
1378
1379 ldma_port_cfg(p);
1380
1381 return 1;
1382 }
1383
1384 static struct dma_chan *ldma_xlate(struct of_phandle_args *spec,
1385 struct of_dma *ofdma)
1386 {
1387 struct ldma_dev *d = ofdma->of_dma_data;
1388 u32 chan_id = spec->args[0];
1389 int ret;
1390
1391 if (!spec->args_count)
1392 return NULL;
1393
1394
1395 if (spec->args_count > 1) {
1396 ret = update_client_configs(ofdma, spec);
1397 if (!ret)
1398 return NULL;
1399 }
1400
1401 return dma_get_slave_channel(&d->chans[chan_id].vchan.chan);
1402 }
1403
1404 static void ldma_dma_init_v22(int i, struct ldma_dev *d)
1405 {
1406 struct ldma_chan *c;
1407
1408 c = &d->chans[i];
1409 c->nr = i;
1410 c->rst = DMA_CHAN_RST;
1411 c->desc_num = DMA_DFT_DESC_NUM;
1412 snprintf(c->name, sizeof(c->name), "chan%d", c->nr);
1413 INIT_WORK(&c->work, dma_work);
1414 c->vchan.desc_free = dma_free_desc_resource;
1415 vchan_init(&c->vchan, &d->dma_dev);
1416 }
1417
1418 static void ldma_dma_init_v3X(int i, struct ldma_dev *d)
1419 {
1420 struct ldma_chan *c;
1421
1422 c = &d->chans[i];
1423 c->data_endian = DMA_DFT_ENDIAN;
1424 c->desc_endian = DMA_DFT_ENDIAN;
1425 c->data_endian_en = false;
1426 c->desc_endian_en = false;
1427 c->desc_rx_np = false;
1428 c->flags |= DEVICE_ALLOC_DESC;
1429 c->onoff = DMA_CH_OFF;
1430 c->rst = DMA_CHAN_RST;
1431 c->abc_en = true;
1432 c->hdrm_csum = false;
1433 c->boff_len = 0;
1434 c->nr = i;
1435 c->vchan.desc_free = dma_free_desc_resource;
1436 vchan_init(&c->vchan, &d->dma_dev);
1437 }
1438
1439 static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev)
1440 {
1441 int ret;
1442
1443 ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs);
1444 if (ret < 0) {
1445 dev_err(d->dev, "unable to read dma-channels property\n");
1446 return ret;
1447 }
1448
1449 d->irq = platform_get_irq(pdev, 0);
1450 if (d->irq < 0)
1451 return d->irq;
1452
1453 ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0,
1454 DRIVER_NAME, d);
1455 if (ret)
1456 return ret;
1457
1458 d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM |
1459 WQ_HIGHPRI);
1460 if (!d->wq)
1461 return -ENOMEM;
1462
1463 return 0;
1464 }
1465
1466 static void ldma_clk_disable(void *data)
1467 {
1468 struct ldma_dev *d = data;
1469
1470 clk_disable_unprepare(d->core_clk);
1471 reset_control_assert(d->rst);
1472 }
1473
1474 static const struct ldma_inst_data dma0 = {
1475 .name = "dma0",
1476 .chan_fc = false,
1477 .desc_fod = false,
1478 .desc_in_sram = false,
1479 .valid_desc_fetch_ack = false,
1480 };
1481
1482 static const struct ldma_inst_data dma2tx = {
1483 .name = "dma2tx",
1484 .type = DMA_TYPE_TX,
1485 .orrc = 16,
1486 .chan_fc = true,
1487 .desc_fod = true,
1488 .desc_in_sram = true,
1489 .valid_desc_fetch_ack = true,
1490 };
1491
1492 static const struct ldma_inst_data dma1rx = {
1493 .name = "dma1rx",
1494 .type = DMA_TYPE_RX,
1495 .orrc = 16,
1496 .chan_fc = false,
1497 .desc_fod = true,
1498 .desc_in_sram = true,
1499 .valid_desc_fetch_ack = false,
1500 };
1501
1502 static const struct ldma_inst_data dma1tx = {
1503 .name = "dma1tx",
1504 .type = DMA_TYPE_TX,
1505 .orrc = 16,
1506 .chan_fc = true,
1507 .desc_fod = true,
1508 .desc_in_sram = true,
1509 .valid_desc_fetch_ack = true,
1510 };
1511
1512 static const struct ldma_inst_data dma0tx = {
1513 .name = "dma0tx",
1514 .type = DMA_TYPE_TX,
1515 .orrc = 16,
1516 .chan_fc = true,
1517 .desc_fod = true,
1518 .desc_in_sram = true,
1519 .valid_desc_fetch_ack = true,
1520 };
1521
1522 static const struct ldma_inst_data dma3 = {
1523 .name = "dma3",
1524 .type = DMA_TYPE_MCPY,
1525 .orrc = 16,
1526 .chan_fc = false,
1527 .desc_fod = false,
1528 .desc_in_sram = true,
1529 .valid_desc_fetch_ack = false,
1530 };
1531
1532 static const struct ldma_inst_data toe_dma30 = {
1533 .name = "toe_dma30",
1534 .type = DMA_TYPE_MCPY,
1535 .orrc = 16,
1536 .chan_fc = false,
1537 .desc_fod = false,
1538 .desc_in_sram = true,
1539 .valid_desc_fetch_ack = true,
1540 };
1541
1542 static const struct ldma_inst_data toe_dma31 = {
1543 .name = "toe_dma31",
1544 .type = DMA_TYPE_MCPY,
1545 .orrc = 16,
1546 .chan_fc = false,
1547 .desc_fod = false,
1548 .desc_in_sram = true,
1549 .valid_desc_fetch_ack = true,
1550 };
1551
1552 static const struct of_device_id intel_ldma_match[] = {
1553 { .compatible = "intel,lgm-cdma", .data = &dma0},
1554 { .compatible = "intel,lgm-dma2tx", .data = &dma2tx},
1555 { .compatible = "intel,lgm-dma1rx", .data = &dma1rx},
1556 { .compatible = "intel,lgm-dma1tx", .data = &dma1tx},
1557 { .compatible = "intel,lgm-dma0tx", .data = &dma0tx},
1558 { .compatible = "intel,lgm-dma3", .data = &dma3},
1559 { .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30},
1560 { .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31},
1561 {}
1562 };
1563
1564 static int intel_ldma_probe(struct platform_device *pdev)
1565 {
1566 struct device *dev = &pdev->dev;
1567 struct dma_device *dma_dev;
1568 unsigned long ch_mask;
1569 struct ldma_chan *c;
1570 struct ldma_port *p;
1571 struct ldma_dev *d;
1572 u32 id, bitn = 32, j;
1573 int i, ret;
1574
1575 d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1576 if (!d)
1577 return -ENOMEM;
1578
1579
1580 d->dev = &pdev->dev;
1581
1582 d->inst = device_get_match_data(dev);
1583 if (!d->inst) {
1584 dev_err(dev, "No device match found\n");
1585 return -ENODEV;
1586 }
1587
1588 d->base = devm_platform_ioremap_resource(pdev, 0);
1589 if (IS_ERR(d->base))
1590 return PTR_ERR(d->base);
1591
1592
1593 d->core_clk = devm_clk_get_optional(dev, NULL);
1594 if (IS_ERR(d->core_clk))
1595 return PTR_ERR(d->core_clk);
1596
1597 d->rst = devm_reset_control_get_optional(dev, NULL);
1598 if (IS_ERR(d->rst))
1599 return PTR_ERR(d->rst);
1600
1601 clk_prepare_enable(d->core_clk);
1602 reset_control_deassert(d->rst);
1603
1604 ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
1605 if (ret) {
1606 dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
1607 return ret;
1608 }
1609
1610 id = readl(d->base + DMA_ID);
1611 d->chan_nrs = FIELD_GET(DMA_ID_CHNR, id);
1612 d->port_nrs = FIELD_GET(DMA_ID_PNR, id);
1613 d->ver = FIELD_GET(DMA_ID_REV, id);
1614
1615 if (id & DMA_ID_AW_36B)
1616 d->flags |= DMA_ADDR_36BIT;
1617
1618 if (IS_ENABLED(CONFIG_64BIT) && (id & DMA_ID_AW_36B))
1619 bitn = 36;
1620
1621 if (id & DMA_ID_DW_128B)
1622 d->flags |= DMA_DATA_128BIT;
1623
1624 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(bitn));
1625 if (ret) {
1626 dev_err(dev, "No usable DMA configuration\n");
1627 return ret;
1628 }
1629
1630 if (d->ver == DMA_VER22) {
1631 ret = ldma_init_v22(d, pdev);
1632 if (ret)
1633 return ret;
1634 }
1635
1636 ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask);
1637 if (ret < 0)
1638 d->channels_mask = GENMASK(d->chan_nrs - 1, 0);
1639
1640 dma_dev = &d->dma_dev;
1641
1642 dma_cap_zero(dma_dev->cap_mask);
1643 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1644
1645
1646 INIT_LIST_HEAD(&dma_dev->channels);
1647
1648
1649 d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
1650 if (!d->ports)
1651 return -ENOMEM;
1652
1653
1654 d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL);
1655 if (!d->chans)
1656 return -ENOMEM;
1657
1658 for (i = 0; i < d->port_nrs; i++) {
1659 p = &d->ports[i];
1660 p->portid = i;
1661 p->ldev = d;
1662 }
1663
1664 ret = ldma_cfg_init(d);
1665 if (ret)
1666 return ret;
1667
1668 dma_dev->dev = &pdev->dev;
1669
1670 ch_mask = (unsigned long)d->channels_mask;
1671 for_each_set_bit(j, &ch_mask, d->chan_nrs) {
1672 if (d->ver == DMA_VER22)
1673 ldma_dma_init_v22(j, d);
1674 else
1675 ldma_dma_init_v3X(j, d);
1676 }
1677
1678 dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
1679 dma_dev->device_free_chan_resources = ldma_free_chan_resources;
1680 dma_dev->device_terminate_all = ldma_terminate_all;
1681 dma_dev->device_issue_pending = ldma_issue_pending;
1682 dma_dev->device_tx_status = ldma_tx_status;
1683 dma_dev->device_resume = ldma_resume_chan;
1684 dma_dev->device_pause = ldma_pause_chan;
1685 dma_dev->device_prep_slave_sg = ldma_prep_slave_sg;
1686
1687 if (d->ver == DMA_VER22) {
1688 dma_dev->device_config = ldma_slave_config;
1689 dma_dev->device_synchronize = ldma_synchronize;
1690 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1691 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1692 dma_dev->directions = BIT(DMA_MEM_TO_DEV) |
1693 BIT(DMA_DEV_TO_MEM);
1694 dma_dev->residue_granularity =
1695 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1696 }
1697
1698 platform_set_drvdata(pdev, d);
1699
1700 ldma_dev_init(d);
1701
1702 ret = dma_async_device_register(dma_dev);
1703 if (ret) {
1704 dev_err(dev, "Failed to register slave DMA engine device\n");
1705 return ret;
1706 }
1707
1708 ret = of_dma_controller_register(pdev->dev.of_node, ldma_xlate, d);
1709 if (ret) {
1710 dev_err(dev, "Failed to register of DMA controller\n");
1711 dma_async_device_unregister(dma_dev);
1712 return ret;
1713 }
1714
1715 dev_info(dev, "Init done - rev: %x, ports: %d channels: %d\n", d->ver,
1716 d->port_nrs, d->chan_nrs);
1717
1718 return 0;
1719 }
1720
1721 static struct platform_driver intel_ldma_driver = {
1722 .probe = intel_ldma_probe,
1723 .driver = {
1724 .name = DRIVER_NAME,
1725 .of_match_table = intel_ldma_match,
1726 },
1727 };
1728
1729
1730
1731
1732
1733
1734
1735 static int __init intel_ldma_init(void)
1736 {
1737 return platform_driver_register(&intel_ldma_driver);
1738 }
1739
1740 device_initcall(intel_ldma_init);