0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/init.h>
0012 #include <linux/module.h>
0013 #include <linux/of_device.h>
0014 #include <linux/of_dma.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/remoteproc.h>
0018 #include <linux/slab.h>
0019
0020 #include "st_fdma.h"
0021
0022 static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
0023 {
0024 return container_of(c, struct st_fdma_chan, vchan.chan);
0025 }
0026
0027 static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
0028 {
0029 return container_of(vd, struct st_fdma_desc, vdesc);
0030 }
0031
0032 static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
0033 {
0034 struct st_fdma_dev *fdev = fchan->fdev;
0035 u32 req_line_cfg = fchan->cfg.req_line;
0036 u32 dreq_line;
0037 int try = 0;
0038
0039
0040
0041
0042
0043
0044 do {
0045 if (fdev->dreq_mask == ~0L) {
0046 dev_err(fdev->dev, "No req lines available\n");
0047 return -EINVAL;
0048 }
0049
0050 if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
0051 dev_err(fdev->dev, "Invalid or used req line\n");
0052 return -EINVAL;
0053 } else {
0054 dreq_line = req_line_cfg;
0055 }
0056
0057 try++;
0058 } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
0059
0060 dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
0061 dreq_line, fdev->dreq_mask);
0062
0063 return dreq_line;
0064 }
0065
0066 static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
0067 {
0068 struct st_fdma_dev *fdev = fchan->fdev;
0069
0070 dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
0071 clear_bit(fchan->dreq_line, &fdev->dreq_mask);
0072 }
0073
0074 static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
0075 {
0076 struct virt_dma_desc *vdesc;
0077 unsigned long nbytes, ch_cmd, cmd;
0078
0079 vdesc = vchan_next_desc(&fchan->vchan);
0080 if (!vdesc)
0081 return;
0082
0083 fchan->fdesc = to_st_fdma_desc(vdesc);
0084 nbytes = fchan->fdesc->node[0].desc->nbytes;
0085 cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
0086 ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
0087
0088
0089 fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
0090 fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
0091 writel(cmd,
0092 fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
0093
0094 dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
0095 }
0096
0097 static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
0098 unsigned long int_sta)
0099 {
0100 unsigned long ch_sta, ch_err;
0101 int ch_id = fchan->vchan.chan.chan_id;
0102 struct st_fdma_dev *fdev = fchan->fdev;
0103
0104 ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
0105 ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
0106 ch_sta &= FDMA_CH_CMD_STA_MASK;
0107
0108 if (int_sta & FDMA_INT_STA_ERR) {
0109 dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
0110 fchan->status = DMA_ERROR;
0111 return;
0112 }
0113
0114 switch (ch_sta) {
0115 case FDMA_CH_CMD_STA_PAUSED:
0116 fchan->status = DMA_PAUSED;
0117 break;
0118
0119 case FDMA_CH_CMD_STA_RUNNING:
0120 fchan->status = DMA_IN_PROGRESS;
0121 break;
0122 }
0123 }
0124
0125 static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
0126 {
0127 struct st_fdma_dev *fdev = dev_id;
0128 irqreturn_t ret = IRQ_NONE;
0129 struct st_fdma_chan *fchan = &fdev->chans[0];
0130 unsigned long int_sta, clr;
0131
0132 int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
0133 clr = int_sta;
0134
0135 for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
0136 if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
0137 continue;
0138
0139 spin_lock(&fchan->vchan.lock);
0140 st_fdma_ch_sta_update(fchan, int_sta);
0141
0142 if (fchan->fdesc) {
0143 if (!fchan->fdesc->iscyclic) {
0144 list_del(&fchan->fdesc->vdesc.node);
0145 vchan_cookie_complete(&fchan->fdesc->vdesc);
0146 fchan->fdesc = NULL;
0147 fchan->status = DMA_COMPLETE;
0148 } else {
0149 vchan_cyclic_callback(&fchan->fdesc->vdesc);
0150 }
0151
0152
0153 if (!fchan->fdesc)
0154 st_fdma_xfer_desc(fchan);
0155 }
0156
0157 spin_unlock(&fchan->vchan.lock);
0158 ret = IRQ_HANDLED;
0159 }
0160
0161 fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
0162
0163 return ret;
0164 }
0165
0166 static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
0167 struct of_dma *ofdma)
0168 {
0169 struct st_fdma_dev *fdev = ofdma->of_dma_data;
0170 struct dma_chan *chan;
0171 struct st_fdma_chan *fchan;
0172 int ret;
0173
0174 if (dma_spec->args_count < 1)
0175 return ERR_PTR(-EINVAL);
0176
0177 if (fdev->dma_device.dev->of_node != dma_spec->np)
0178 return ERR_PTR(-EINVAL);
0179
0180 ret = rproc_boot(fdev->slim_rproc->rproc);
0181 if (ret == -ENOENT)
0182 return ERR_PTR(-EPROBE_DEFER);
0183 else if (ret)
0184 return ERR_PTR(ret);
0185
0186 chan = dma_get_any_slave_channel(&fdev->dma_device);
0187 if (!chan)
0188 goto err_chan;
0189
0190 fchan = to_st_fdma_chan(chan);
0191
0192 fchan->cfg.of_node = dma_spec->np;
0193 fchan->cfg.req_line = dma_spec->args[0];
0194 fchan->cfg.req_ctrl = 0;
0195 fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
0196
0197 if (dma_spec->args_count > 1)
0198 fchan->cfg.req_ctrl = dma_spec->args[1]
0199 & FDMA_REQ_CTRL_CFG_MASK;
0200
0201 if (dma_spec->args_count > 2)
0202 fchan->cfg.type = dma_spec->args[2];
0203
0204 if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
0205 fchan->dreq_line = 0;
0206 } else {
0207 fchan->dreq_line = st_fdma_dreq_get(fchan);
0208 if (IS_ERR_VALUE(fchan->dreq_line)) {
0209 chan = ERR_PTR(fchan->dreq_line);
0210 goto err_chan;
0211 }
0212 }
0213
0214 dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
0215 fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
0216
0217 return chan;
0218
0219 err_chan:
0220 rproc_shutdown(fdev->slim_rproc->rproc);
0221 return chan;
0222
0223 }
0224
0225 static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
0226 {
0227 struct st_fdma_desc *fdesc;
0228 int i;
0229
0230 fdesc = to_st_fdma_desc(vdesc);
0231 for (i = 0; i < fdesc->n_nodes; i++)
0232 dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
0233 fdesc->node[i].pdesc);
0234 kfree(fdesc);
0235 }
0236
0237 static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
0238 int sg_len)
0239 {
0240 struct st_fdma_desc *fdesc;
0241 int i;
0242
0243 fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
0244 if (!fdesc)
0245 return NULL;
0246
0247 fdesc->fchan = fchan;
0248 fdesc->n_nodes = sg_len;
0249 for (i = 0; i < sg_len; i++) {
0250 fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
0251 GFP_NOWAIT, &fdesc->node[i].pdesc);
0252 if (!fdesc->node[i].desc)
0253 goto err;
0254 }
0255 return fdesc;
0256
0257 err:
0258 while (--i >= 0)
0259 dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
0260 fdesc->node[i].pdesc);
0261 kfree(fdesc);
0262 return NULL;
0263 }
0264
0265 static int st_fdma_alloc_chan_res(struct dma_chan *chan)
0266 {
0267 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0268
0269
0270 fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
0271 fchan->fdev->dev,
0272 sizeof(struct st_fdma_hw_node),
0273 __alignof__(struct st_fdma_hw_node),
0274 0);
0275
0276 if (!fchan->node_pool) {
0277 dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
0278 return -ENOMEM;
0279 }
0280
0281 dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
0282 fchan->vchan.chan.chan_id, fchan->cfg.type);
0283
0284 return 0;
0285 }
0286
0287 static void st_fdma_free_chan_res(struct dma_chan *chan)
0288 {
0289 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0290 struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
0291 unsigned long flags;
0292
0293 dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
0294 __func__, fchan->vchan.chan.chan_id);
0295
0296 if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
0297 st_fdma_dreq_put(fchan);
0298
0299 spin_lock_irqsave(&fchan->vchan.lock, flags);
0300 fchan->fdesc = NULL;
0301 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
0302
0303 dma_pool_destroy(fchan->node_pool);
0304 fchan->node_pool = NULL;
0305 memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
0306
0307 rproc_shutdown(rproc);
0308 }
0309
0310 static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
0311 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
0312 size_t len, unsigned long flags)
0313 {
0314 struct st_fdma_chan *fchan;
0315 struct st_fdma_desc *fdesc;
0316 struct st_fdma_hw_node *hw_node;
0317
0318 if (!len)
0319 return NULL;
0320
0321 fchan = to_st_fdma_chan(chan);
0322
0323
0324 fdesc = st_fdma_alloc_desc(fchan, 1);
0325 if (!fdesc) {
0326 dev_err(fchan->fdev->dev, "no memory for desc\n");
0327 return NULL;
0328 }
0329
0330 hw_node = fdesc->node[0].desc;
0331 hw_node->next = 0;
0332 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
0333 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
0334 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
0335 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
0336 hw_node->nbytes = len;
0337 hw_node->saddr = src;
0338 hw_node->daddr = dst;
0339 hw_node->generic.length = len;
0340 hw_node->generic.sstride = 0;
0341 hw_node->generic.dstride = 0;
0342
0343 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
0344 }
0345
0346 static int config_reqctrl(struct st_fdma_chan *fchan,
0347 enum dma_transfer_direction direction)
0348 {
0349 u32 maxburst = 0, addr = 0;
0350 enum dma_slave_buswidth width;
0351 int ch_id = fchan->vchan.chan.chan_id;
0352 struct st_fdma_dev *fdev = fchan->fdev;
0353
0354 switch (direction) {
0355
0356 case DMA_DEV_TO_MEM:
0357 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
0358 maxburst = fchan->scfg.src_maxburst;
0359 width = fchan->scfg.src_addr_width;
0360 addr = fchan->scfg.src_addr;
0361 break;
0362
0363 case DMA_MEM_TO_DEV:
0364 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
0365 maxburst = fchan->scfg.dst_maxburst;
0366 width = fchan->scfg.dst_addr_width;
0367 addr = fchan->scfg.dst_addr;
0368 break;
0369
0370 default:
0371 return -EINVAL;
0372 }
0373
0374 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
0375
0376 switch (width) {
0377
0378 case DMA_SLAVE_BUSWIDTH_1_BYTE:
0379 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
0380 break;
0381
0382 case DMA_SLAVE_BUSWIDTH_2_BYTES:
0383 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
0384 break;
0385
0386 case DMA_SLAVE_BUSWIDTH_4_BYTES:
0387 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
0388 break;
0389
0390 case DMA_SLAVE_BUSWIDTH_8_BYTES:
0391 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
0392 break;
0393
0394 default:
0395 return -EINVAL;
0396 }
0397
0398 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
0399 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
0400 dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
0401
0402 fchan->cfg.dev_addr = addr;
0403 fchan->cfg.dir = direction;
0404
0405 dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
0406 ch_id, addr, fchan->cfg.req_ctrl);
0407
0408 return 0;
0409 }
0410
0411 static void fill_hw_node(struct st_fdma_hw_node *hw_node,
0412 struct st_fdma_chan *fchan,
0413 enum dma_transfer_direction direction)
0414 {
0415 if (direction == DMA_MEM_TO_DEV) {
0416 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
0417 hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
0418 hw_node->daddr = fchan->cfg.dev_addr;
0419 } else {
0420 hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
0421 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
0422 hw_node->saddr = fchan->cfg.dev_addr;
0423 }
0424
0425 hw_node->generic.sstride = 0;
0426 hw_node->generic.dstride = 0;
0427 }
0428
0429 static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
0430 size_t len, enum dma_transfer_direction direction)
0431 {
0432 struct st_fdma_chan *fchan;
0433
0434 if (!chan || !len)
0435 return NULL;
0436
0437 fchan = to_st_fdma_chan(chan);
0438
0439 if (!is_slave_direction(direction)) {
0440 dev_err(fchan->fdev->dev, "bad direction?\n");
0441 return NULL;
0442 }
0443
0444 return fchan;
0445 }
0446
0447 static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
0448 struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
0449 size_t period_len, enum dma_transfer_direction direction,
0450 unsigned long flags)
0451 {
0452 struct st_fdma_chan *fchan;
0453 struct st_fdma_desc *fdesc;
0454 int sg_len, i;
0455
0456 fchan = st_fdma_prep_common(chan, len, direction);
0457 if (!fchan)
0458 return NULL;
0459
0460 if (!period_len)
0461 return NULL;
0462
0463 if (config_reqctrl(fchan, direction)) {
0464 dev_err(fchan->fdev->dev, "bad width or direction\n");
0465 return NULL;
0466 }
0467
0468
0469 if (len % period_len != 0) {
0470 dev_err(fchan->fdev->dev, "len is not multiple of period\n");
0471 return NULL;
0472 }
0473
0474 sg_len = len / period_len;
0475 fdesc = st_fdma_alloc_desc(fchan, sg_len);
0476 if (!fdesc) {
0477 dev_err(fchan->fdev->dev, "no memory for desc\n");
0478 return NULL;
0479 }
0480
0481 fdesc->iscyclic = true;
0482
0483 for (i = 0; i < sg_len; i++) {
0484 struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
0485
0486 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
0487
0488 hw_node->control =
0489 FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
0490 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
0491
0492 fill_hw_node(hw_node, fchan, direction);
0493
0494 if (direction == DMA_MEM_TO_DEV)
0495 hw_node->saddr = buf_addr + (i * period_len);
0496 else
0497 hw_node->daddr = buf_addr + (i * period_len);
0498
0499 hw_node->nbytes = period_len;
0500 hw_node->generic.length = period_len;
0501 }
0502
0503 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
0504 }
0505
0506 static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
0507 struct dma_chan *chan, struct scatterlist *sgl,
0508 unsigned int sg_len, enum dma_transfer_direction direction,
0509 unsigned long flags, void *context)
0510 {
0511 struct st_fdma_chan *fchan;
0512 struct st_fdma_desc *fdesc;
0513 struct st_fdma_hw_node *hw_node;
0514 struct scatterlist *sg;
0515 int i;
0516
0517 fchan = st_fdma_prep_common(chan, sg_len, direction);
0518 if (!fchan)
0519 return NULL;
0520
0521 if (!sgl)
0522 return NULL;
0523
0524 fdesc = st_fdma_alloc_desc(fchan, sg_len);
0525 if (!fdesc) {
0526 dev_err(fchan->fdev->dev, "no memory for desc\n");
0527 return NULL;
0528 }
0529
0530 fdesc->iscyclic = false;
0531
0532 for_each_sg(sgl, sg, sg_len, i) {
0533 hw_node = fdesc->node[i].desc;
0534
0535 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
0536 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
0537
0538 fill_hw_node(hw_node, fchan, direction);
0539
0540 if (direction == DMA_MEM_TO_DEV)
0541 hw_node->saddr = sg_dma_address(sg);
0542 else
0543 hw_node->daddr = sg_dma_address(sg);
0544
0545 hw_node->nbytes = sg_dma_len(sg);
0546 hw_node->generic.length = sg_dma_len(sg);
0547 }
0548
0549
0550 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
0551
0552 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
0553 }
0554
0555 static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
0556 struct virt_dma_desc *vdesc,
0557 bool in_progress)
0558 {
0559 struct st_fdma_desc *fdesc = fchan->fdesc;
0560 size_t residue = 0;
0561 dma_addr_t cur_addr = 0;
0562 int i;
0563
0564 if (in_progress) {
0565 cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
0566 cur_addr &= FDMA_CH_CMD_DATA_MASK;
0567 }
0568
0569 for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
0570 if (cur_addr == fdesc->node[i].pdesc) {
0571 residue += fnode_read(fchan, FDMA_CNTN_OFST);
0572 break;
0573 }
0574 residue += fdesc->node[i].desc->nbytes;
0575 }
0576
0577 return residue;
0578 }
0579
0580 static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
0581 dma_cookie_t cookie,
0582 struct dma_tx_state *txstate)
0583 {
0584 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0585 struct virt_dma_desc *vd;
0586 enum dma_status ret;
0587 unsigned long flags;
0588
0589 ret = dma_cookie_status(chan, cookie, txstate);
0590 if (ret == DMA_COMPLETE || !txstate)
0591 return ret;
0592
0593 spin_lock_irqsave(&fchan->vchan.lock, flags);
0594 vd = vchan_find_desc(&fchan->vchan, cookie);
0595 if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
0596 txstate->residue = st_fdma_desc_residue(fchan, vd, true);
0597 else if (vd)
0598 txstate->residue = st_fdma_desc_residue(fchan, vd, false);
0599 else
0600 txstate->residue = 0;
0601
0602 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
0603
0604 return ret;
0605 }
0606
0607 static void st_fdma_issue_pending(struct dma_chan *chan)
0608 {
0609 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0610 unsigned long flags;
0611
0612 spin_lock_irqsave(&fchan->vchan.lock, flags);
0613
0614 if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
0615 st_fdma_xfer_desc(fchan);
0616
0617 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
0618 }
0619
0620 static int st_fdma_pause(struct dma_chan *chan)
0621 {
0622 unsigned long flags;
0623 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0624 int ch_id = fchan->vchan.chan.chan_id;
0625 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
0626
0627 dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
0628
0629 spin_lock_irqsave(&fchan->vchan.lock, flags);
0630 if (fchan->fdesc)
0631 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
0632 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
0633
0634 return 0;
0635 }
0636
0637 static int st_fdma_resume(struct dma_chan *chan)
0638 {
0639 unsigned long flags;
0640 unsigned long val;
0641 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0642 int ch_id = fchan->vchan.chan.chan_id;
0643
0644 dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
0645
0646 spin_lock_irqsave(&fchan->vchan.lock, flags);
0647 if (fchan->fdesc) {
0648 val = fchan_read(fchan, FDMA_CH_CMD_OFST);
0649 val &= FDMA_CH_CMD_DATA_MASK;
0650 fchan_write(fchan, val, FDMA_CH_CMD_OFST);
0651 }
0652 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
0653
0654 return 0;
0655 }
0656
0657 static int st_fdma_terminate_all(struct dma_chan *chan)
0658 {
0659 unsigned long flags;
0660 LIST_HEAD(head);
0661 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0662 int ch_id = fchan->vchan.chan.chan_id;
0663 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
0664
0665 dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
0666
0667 spin_lock_irqsave(&fchan->vchan.lock, flags);
0668 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
0669 fchan->fdesc = NULL;
0670 vchan_get_all_descriptors(&fchan->vchan, &head);
0671 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
0672 vchan_dma_desc_free_list(&fchan->vchan, &head);
0673
0674 return 0;
0675 }
0676
0677 static int st_fdma_slave_config(struct dma_chan *chan,
0678 struct dma_slave_config *slave_cfg)
0679 {
0680 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
0681
0682 memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
0683 return 0;
0684 }
0685
0686 static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
0687 .name = "STiH407",
0688 .id = 0,
0689 };
0690
0691 static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
0692 .name = "STiH407",
0693 .id = 1,
0694 };
0695
0696 static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
0697 .name = "STiH407",
0698 .id = 2,
0699 };
0700
0701 static const struct of_device_id st_fdma_match[] = {
0702 { .compatible = "st,stih407-fdma-mpe31-11"
0703 , .data = &fdma_mpe31_stih407_11 },
0704 { .compatible = "st,stih407-fdma-mpe31-12"
0705 , .data = &fdma_mpe31_stih407_12 },
0706 { .compatible = "st,stih407-fdma-mpe31-13"
0707 , .data = &fdma_mpe31_stih407_13 },
0708 {},
0709 };
0710 MODULE_DEVICE_TABLE(of, st_fdma_match);
0711
0712 static int st_fdma_parse_dt(struct platform_device *pdev,
0713 const struct st_fdma_driverdata *drvdata,
0714 struct st_fdma_dev *fdev)
0715 {
0716 snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
0717 drvdata->name, drvdata->id);
0718
0719 return of_property_read_u32(pdev->dev.of_node, "dma-channels",
0720 &fdev->nr_channels);
0721 }
0722 #define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
0723 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
0724 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
0725 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
0726
0727 static void st_fdma_free(struct st_fdma_dev *fdev)
0728 {
0729 struct st_fdma_chan *fchan;
0730 int i;
0731
0732 for (i = 0; i < fdev->nr_channels; i++) {
0733 fchan = &fdev->chans[i];
0734 list_del(&fchan->vchan.chan.device_node);
0735 tasklet_kill(&fchan->vchan.task);
0736 }
0737 }
0738
0739 static int st_fdma_probe(struct platform_device *pdev)
0740 {
0741 struct st_fdma_dev *fdev;
0742 const struct of_device_id *match;
0743 struct device_node *np = pdev->dev.of_node;
0744 const struct st_fdma_driverdata *drvdata;
0745 int ret, i;
0746
0747 match = of_match_device((st_fdma_match), &pdev->dev);
0748 if (!match || !match->data) {
0749 dev_err(&pdev->dev, "No device match found\n");
0750 return -ENODEV;
0751 }
0752
0753 drvdata = match->data;
0754
0755 fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
0756 if (!fdev)
0757 return -ENOMEM;
0758
0759 ret = st_fdma_parse_dt(pdev, drvdata, fdev);
0760 if (ret) {
0761 dev_err(&pdev->dev, "unable to find platform data\n");
0762 goto err;
0763 }
0764
0765 fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
0766 sizeof(struct st_fdma_chan), GFP_KERNEL);
0767 if (!fdev->chans)
0768 return -ENOMEM;
0769
0770 fdev->dev = &pdev->dev;
0771 fdev->drvdata = drvdata;
0772 platform_set_drvdata(pdev, fdev);
0773
0774 fdev->irq = platform_get_irq(pdev, 0);
0775 if (fdev->irq < 0)
0776 return -EINVAL;
0777
0778 ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
0779 dev_name(&pdev->dev), fdev);
0780 if (ret) {
0781 dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
0782 goto err;
0783 }
0784
0785 fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
0786 if (IS_ERR(fdev->slim_rproc)) {
0787 ret = PTR_ERR(fdev->slim_rproc);
0788 dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
0789 goto err;
0790 }
0791
0792
0793 INIT_LIST_HEAD(&fdev->dma_device.channels);
0794 for (i = 0; i < fdev->nr_channels; i++) {
0795 struct st_fdma_chan *fchan = &fdev->chans[i];
0796
0797 fchan->fdev = fdev;
0798 fchan->vchan.desc_free = st_fdma_free_desc;
0799 vchan_init(&fchan->vchan, &fdev->dma_device);
0800 }
0801
0802
0803 fdev->dreq_mask = BIT(0) | BIT(31);
0804
0805 dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
0806 dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
0807 dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
0808
0809 fdev->dma_device.dev = &pdev->dev;
0810 fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
0811 fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
0812 fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
0813 fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
0814 fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
0815 fdev->dma_device.device_tx_status = st_fdma_tx_status;
0816 fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
0817 fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
0818 fdev->dma_device.device_config = st_fdma_slave_config;
0819 fdev->dma_device.device_pause = st_fdma_pause;
0820 fdev->dma_device.device_resume = st_fdma_resume;
0821
0822 fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
0823 fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
0824 fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
0825 fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0826
0827 ret = dmaenginem_async_device_register(&fdev->dma_device);
0828 if (ret) {
0829 dev_err(&pdev->dev,
0830 "Failed to register DMA device (%d)\n", ret);
0831 goto err_rproc;
0832 }
0833
0834 ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
0835 if (ret) {
0836 dev_err(&pdev->dev,
0837 "Failed to register controller (%d)\n", ret);
0838 goto err_rproc;
0839 }
0840
0841 dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
0842
0843 return 0;
0844
0845 err_rproc:
0846 st_fdma_free(fdev);
0847 st_slim_rproc_put(fdev->slim_rproc);
0848 err:
0849 return ret;
0850 }
0851
0852 static int st_fdma_remove(struct platform_device *pdev)
0853 {
0854 struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
0855
0856 devm_free_irq(&pdev->dev, fdev->irq, fdev);
0857 st_slim_rproc_put(fdev->slim_rproc);
0858 of_dma_controller_free(pdev->dev.of_node);
0859
0860 return 0;
0861 }
0862
0863 static struct platform_driver st_fdma_platform_driver = {
0864 .driver = {
0865 .name = DRIVER_NAME,
0866 .of_match_table = st_fdma_match,
0867 },
0868 .probe = st_fdma_probe,
0869 .remove = st_fdma_remove,
0870 };
0871 module_platform_driver(st_fdma_platform_driver);
0872
0873 MODULE_LICENSE("GPL v2");
0874 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
0875 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
0876 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
0877 MODULE_ALIAS("platform:" DRIVER_NAME);