0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011 #include <linux/kernel.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/mutex.h>
0016 #include <linux/ccp.h>
0017
0018 #include "ccp-dev.h"
0019 #include "../../dma/dmaengine.h"
0020
0021 #define CCP_DMA_WIDTH(_mask) \
0022 ({ \
0023 u64 mask = _mask + 1; \
0024 (mask == 0) ? 64 : fls64(mask); \
0025 })
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 static unsigned int dma_chan_attr = CCP_DMA_DFLT;
0036 module_param(dma_chan_attr, uint, 0444);
0037 MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
0038
0039 static unsigned int dmaengine = 1;
0040 module_param(dmaengine, uint, 0444);
0041 MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)");
0042
0043 static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
0044 {
0045 switch (dma_chan_attr) {
0046 case CCP_DMA_DFLT:
0047 return ccp->vdata->dma_chan_attr;
0048
0049 case CCP_DMA_PRIV:
0050 return DMA_PRIVATE;
0051
0052 case CCP_DMA_PUB:
0053 return 0;
0054
0055 default:
0056 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
0057 dma_chan_attr);
0058 return ccp->vdata->dma_chan_attr;
0059 }
0060 }
0061
0062 static void ccp_free_cmd_resources(struct ccp_device *ccp,
0063 struct list_head *list)
0064 {
0065 struct ccp_dma_cmd *cmd, *ctmp;
0066
0067 list_for_each_entry_safe(cmd, ctmp, list, entry) {
0068 list_del(&cmd->entry);
0069 kmem_cache_free(ccp->dma_cmd_cache, cmd);
0070 }
0071 }
0072
0073 static void ccp_free_desc_resources(struct ccp_device *ccp,
0074 struct list_head *list)
0075 {
0076 struct ccp_dma_desc *desc, *dtmp;
0077
0078 list_for_each_entry_safe(desc, dtmp, list, entry) {
0079 ccp_free_cmd_resources(ccp, &desc->active);
0080 ccp_free_cmd_resources(ccp, &desc->pending);
0081
0082 list_del(&desc->entry);
0083 kmem_cache_free(ccp->dma_desc_cache, desc);
0084 }
0085 }
0086
0087 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
0088 {
0089 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0090 dma_chan);
0091 unsigned long flags;
0092
0093 dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
0094
0095 spin_lock_irqsave(&chan->lock, flags);
0096
0097 ccp_free_desc_resources(chan->ccp, &chan->complete);
0098 ccp_free_desc_resources(chan->ccp, &chan->active);
0099 ccp_free_desc_resources(chan->ccp, &chan->pending);
0100 ccp_free_desc_resources(chan->ccp, &chan->created);
0101
0102 spin_unlock_irqrestore(&chan->lock, flags);
0103 }
0104
0105 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
0106 struct list_head *list)
0107 {
0108 struct ccp_dma_desc *desc, *dtmp;
0109
0110 list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
0111 if (!async_tx_test_ack(&desc->tx_desc))
0112 continue;
0113
0114 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
0115
0116 ccp_free_cmd_resources(ccp, &desc->active);
0117 ccp_free_cmd_resources(ccp, &desc->pending);
0118
0119 list_del(&desc->entry);
0120 kmem_cache_free(ccp->dma_desc_cache, desc);
0121 }
0122 }
0123
0124 static void ccp_do_cleanup(unsigned long data)
0125 {
0126 struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
0127 unsigned long flags;
0128
0129 dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
0130 dma_chan_name(&chan->dma_chan));
0131
0132 spin_lock_irqsave(&chan->lock, flags);
0133
0134 ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
0135
0136 spin_unlock_irqrestore(&chan->lock, flags);
0137 }
0138
0139 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
0140 {
0141 struct ccp_dma_cmd *cmd;
0142 int ret;
0143
0144 cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
0145 list_move(&cmd->entry, &desc->active);
0146
0147 dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
0148 desc->tx_desc.cookie, cmd);
0149
0150 ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
0151 if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
0152 return 0;
0153
0154 dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
0155 ret, desc->tx_desc.cookie, cmd);
0156
0157 return ret;
0158 }
0159
0160 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
0161 {
0162 struct ccp_dma_cmd *cmd;
0163
0164 cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
0165 entry);
0166 if (!cmd)
0167 return;
0168
0169 dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
0170 __func__, desc->tx_desc.cookie, cmd);
0171
0172 list_del(&cmd->entry);
0173 kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
0174 }
0175
0176 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
0177 struct ccp_dma_desc *desc)
0178 {
0179
0180 if (desc)
0181 list_move(&desc->entry, &chan->complete);
0182
0183
0184 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
0185 entry);
0186
0187 return desc;
0188 }
0189
0190 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
0191 struct ccp_dma_desc *desc)
0192 {
0193 struct dma_async_tx_descriptor *tx_desc;
0194 unsigned long flags;
0195
0196
0197 do {
0198 if (desc) {
0199
0200 ccp_free_active_cmd(desc);
0201
0202 if (!list_empty(&desc->pending)) {
0203
0204 if (desc->status != DMA_ERROR)
0205 return desc;
0206
0207
0208 ccp_free_cmd_resources(desc->ccp,
0209 &desc->pending);
0210 }
0211
0212 tx_desc = &desc->tx_desc;
0213 } else {
0214 tx_desc = NULL;
0215 }
0216
0217 spin_lock_irqsave(&chan->lock, flags);
0218
0219 if (desc) {
0220 if (desc->status != DMA_ERROR)
0221 desc->status = DMA_COMPLETE;
0222
0223 dev_dbg(desc->ccp->dev,
0224 "%s - tx %d complete, status=%u\n", __func__,
0225 desc->tx_desc.cookie, desc->status);
0226
0227 dma_cookie_complete(tx_desc);
0228 dma_descriptor_unmap(tx_desc);
0229 }
0230
0231 desc = __ccp_next_dma_desc(chan, desc);
0232
0233 spin_unlock_irqrestore(&chan->lock, flags);
0234
0235 if (tx_desc) {
0236 dmaengine_desc_get_callback_invoke(tx_desc, NULL);
0237
0238 dma_run_dependencies(tx_desc);
0239 }
0240 } while (desc);
0241
0242 return NULL;
0243 }
0244
0245 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
0246 {
0247 struct ccp_dma_desc *desc;
0248
0249 if (list_empty(&chan->pending))
0250 return NULL;
0251
0252 desc = list_empty(&chan->active)
0253 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
0254 : NULL;
0255
0256 list_splice_tail_init(&chan->pending, &chan->active);
0257
0258 return desc;
0259 }
0260
0261 static void ccp_cmd_callback(void *data, int err)
0262 {
0263 struct ccp_dma_desc *desc = data;
0264 struct ccp_dma_chan *chan;
0265 int ret;
0266
0267 if (err == -EINPROGRESS)
0268 return;
0269
0270 chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
0271 dma_chan);
0272
0273 dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
0274 __func__, desc->tx_desc.cookie, err);
0275
0276 if (err)
0277 desc->status = DMA_ERROR;
0278
0279 while (true) {
0280
0281 desc = ccp_handle_active_desc(chan, desc);
0282
0283
0284 if (!desc || (chan->status == DMA_PAUSED))
0285 break;
0286
0287 ret = ccp_issue_next_cmd(desc);
0288 if (!ret)
0289 break;
0290
0291 desc->status = DMA_ERROR;
0292 }
0293
0294 tasklet_schedule(&chan->cleanup_tasklet);
0295 }
0296
0297 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
0298 {
0299 struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
0300 tx_desc);
0301 struct ccp_dma_chan *chan;
0302 dma_cookie_t cookie;
0303 unsigned long flags;
0304
0305 chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
0306
0307 spin_lock_irqsave(&chan->lock, flags);
0308
0309 cookie = dma_cookie_assign(tx_desc);
0310 list_move_tail(&desc->entry, &chan->pending);
0311
0312 spin_unlock_irqrestore(&chan->lock, flags);
0313
0314 dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
0315 __func__, cookie);
0316
0317 return cookie;
0318 }
0319
0320 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
0321 {
0322 struct ccp_dma_cmd *cmd;
0323
0324 cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
0325 if (cmd)
0326 memset(cmd, 0, sizeof(*cmd));
0327
0328 return cmd;
0329 }
0330
0331 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
0332 unsigned long flags)
0333 {
0334 struct ccp_dma_desc *desc;
0335
0336 desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
0337 if (!desc)
0338 return NULL;
0339
0340 dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
0341 desc->tx_desc.flags = flags;
0342 desc->tx_desc.tx_submit = ccp_tx_submit;
0343 desc->ccp = chan->ccp;
0344 INIT_LIST_HEAD(&desc->entry);
0345 INIT_LIST_HEAD(&desc->pending);
0346 INIT_LIST_HEAD(&desc->active);
0347 desc->status = DMA_IN_PROGRESS;
0348
0349 return desc;
0350 }
0351
0352 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
0353 struct scatterlist *dst_sg,
0354 unsigned int dst_nents,
0355 struct scatterlist *src_sg,
0356 unsigned int src_nents,
0357 unsigned long flags)
0358 {
0359 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0360 dma_chan);
0361 struct ccp_device *ccp = chan->ccp;
0362 struct ccp_dma_desc *desc;
0363 struct ccp_dma_cmd *cmd;
0364 struct ccp_cmd *ccp_cmd;
0365 struct ccp_passthru_nomap_engine *ccp_pt;
0366 unsigned int src_offset, src_len;
0367 unsigned int dst_offset, dst_len;
0368 unsigned int len;
0369 unsigned long sflags;
0370 size_t total_len;
0371
0372 if (!dst_sg || !src_sg)
0373 return NULL;
0374
0375 if (!dst_nents || !src_nents)
0376 return NULL;
0377
0378 desc = ccp_alloc_dma_desc(chan, flags);
0379 if (!desc)
0380 return NULL;
0381
0382 total_len = 0;
0383
0384 src_len = sg_dma_len(src_sg);
0385 src_offset = 0;
0386
0387 dst_len = sg_dma_len(dst_sg);
0388 dst_offset = 0;
0389
0390 while (true) {
0391 if (!src_len) {
0392 src_nents--;
0393 if (!src_nents)
0394 break;
0395
0396 src_sg = sg_next(src_sg);
0397 if (!src_sg)
0398 break;
0399
0400 src_len = sg_dma_len(src_sg);
0401 src_offset = 0;
0402 continue;
0403 }
0404
0405 if (!dst_len) {
0406 dst_nents--;
0407 if (!dst_nents)
0408 break;
0409
0410 dst_sg = sg_next(dst_sg);
0411 if (!dst_sg)
0412 break;
0413
0414 dst_len = sg_dma_len(dst_sg);
0415 dst_offset = 0;
0416 continue;
0417 }
0418
0419 len = min(dst_len, src_len);
0420
0421 cmd = ccp_alloc_dma_cmd(chan);
0422 if (!cmd)
0423 goto err;
0424
0425 ccp_cmd = &cmd->ccp_cmd;
0426 ccp_cmd->ccp = chan->ccp;
0427 ccp_pt = &ccp_cmd->u.passthru_nomap;
0428 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
0429 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
0430 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
0431 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
0432 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
0433 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
0434 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
0435 ccp_pt->src_len = len;
0436 ccp_pt->final = 1;
0437 ccp_cmd->callback = ccp_cmd_callback;
0438 ccp_cmd->data = desc;
0439
0440 list_add_tail(&cmd->entry, &desc->pending);
0441
0442 dev_dbg(ccp->dev,
0443 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
0444 cmd, &ccp_pt->src_dma,
0445 &ccp_pt->dst_dma, ccp_pt->src_len);
0446
0447 total_len += len;
0448
0449 src_len -= len;
0450 src_offset += len;
0451
0452 dst_len -= len;
0453 dst_offset += len;
0454 }
0455
0456 desc->len = total_len;
0457
0458 if (list_empty(&desc->pending))
0459 goto err;
0460
0461 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
0462
0463 spin_lock_irqsave(&chan->lock, sflags);
0464
0465 list_add_tail(&desc->entry, &chan->created);
0466
0467 spin_unlock_irqrestore(&chan->lock, sflags);
0468
0469 return desc;
0470
0471 err:
0472 ccp_free_cmd_resources(ccp, &desc->pending);
0473 kmem_cache_free(ccp->dma_desc_cache, desc);
0474
0475 return NULL;
0476 }
0477
0478 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
0479 struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
0480 unsigned long flags)
0481 {
0482 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0483 dma_chan);
0484 struct ccp_dma_desc *desc;
0485 struct scatterlist dst_sg, src_sg;
0486
0487 dev_dbg(chan->ccp->dev,
0488 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
0489 __func__, &src, &dst, len, flags);
0490
0491 sg_init_table(&dst_sg, 1);
0492 sg_dma_address(&dst_sg) = dst;
0493 sg_dma_len(&dst_sg) = len;
0494
0495 sg_init_table(&src_sg, 1);
0496 sg_dma_address(&src_sg) = src;
0497 sg_dma_len(&src_sg) = len;
0498
0499 desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
0500 if (!desc)
0501 return NULL;
0502
0503 return &desc->tx_desc;
0504 }
0505
0506 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
0507 struct dma_chan *dma_chan, unsigned long flags)
0508 {
0509 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0510 dma_chan);
0511 struct ccp_dma_desc *desc;
0512
0513 desc = ccp_alloc_dma_desc(chan, flags);
0514 if (!desc)
0515 return NULL;
0516
0517 return &desc->tx_desc;
0518 }
0519
0520 static void ccp_issue_pending(struct dma_chan *dma_chan)
0521 {
0522 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0523 dma_chan);
0524 struct ccp_dma_desc *desc;
0525 unsigned long flags;
0526
0527 dev_dbg(chan->ccp->dev, "%s\n", __func__);
0528
0529 spin_lock_irqsave(&chan->lock, flags);
0530
0531 desc = __ccp_pending_to_active(chan);
0532
0533 spin_unlock_irqrestore(&chan->lock, flags);
0534
0535
0536 if (desc)
0537 ccp_cmd_callback(desc, 0);
0538 }
0539
0540 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
0541 dma_cookie_t cookie,
0542 struct dma_tx_state *state)
0543 {
0544 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0545 dma_chan);
0546 struct ccp_dma_desc *desc;
0547 enum dma_status ret;
0548 unsigned long flags;
0549
0550 if (chan->status == DMA_PAUSED) {
0551 ret = DMA_PAUSED;
0552 goto out;
0553 }
0554
0555 ret = dma_cookie_status(dma_chan, cookie, state);
0556 if (ret == DMA_COMPLETE) {
0557 spin_lock_irqsave(&chan->lock, flags);
0558
0559
0560 list_for_each_entry(desc, &chan->complete, entry) {
0561 if (desc->tx_desc.cookie != cookie)
0562 continue;
0563
0564 ret = desc->status;
0565 break;
0566 }
0567
0568 spin_unlock_irqrestore(&chan->lock, flags);
0569 }
0570
0571 out:
0572 dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
0573
0574 return ret;
0575 }
0576
0577 static int ccp_pause(struct dma_chan *dma_chan)
0578 {
0579 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0580 dma_chan);
0581
0582 chan->status = DMA_PAUSED;
0583
0584
0585
0586 return 0;
0587 }
0588
0589 static int ccp_resume(struct dma_chan *dma_chan)
0590 {
0591 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0592 dma_chan);
0593 struct ccp_dma_desc *desc;
0594 unsigned long flags;
0595
0596 spin_lock_irqsave(&chan->lock, flags);
0597
0598 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
0599 entry);
0600
0601 spin_unlock_irqrestore(&chan->lock, flags);
0602
0603
0604 chan->status = DMA_IN_PROGRESS;
0605
0606
0607 if (desc)
0608 ccp_cmd_callback(desc, 0);
0609
0610 return 0;
0611 }
0612
0613 static int ccp_terminate_all(struct dma_chan *dma_chan)
0614 {
0615 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
0616 dma_chan);
0617 unsigned long flags;
0618
0619 dev_dbg(chan->ccp->dev, "%s\n", __func__);
0620
0621
0622
0623 spin_lock_irqsave(&chan->lock, flags);
0624
0625
0626 ccp_free_desc_resources(chan->ccp, &chan->active);
0627 ccp_free_desc_resources(chan->ccp, &chan->pending);
0628 ccp_free_desc_resources(chan->ccp, &chan->created);
0629
0630 spin_unlock_irqrestore(&chan->lock, flags);
0631
0632 return 0;
0633 }
0634
0635 static void ccp_dma_release(struct ccp_device *ccp)
0636 {
0637 struct ccp_dma_chan *chan;
0638 struct dma_chan *dma_chan;
0639 unsigned int i;
0640
0641 for (i = 0; i < ccp->cmd_q_count; i++) {
0642 chan = ccp->ccp_dma_chan + i;
0643 dma_chan = &chan->dma_chan;
0644 tasklet_kill(&chan->cleanup_tasklet);
0645 list_del_rcu(&dma_chan->device_node);
0646 }
0647 }
0648
0649 int ccp_dmaengine_register(struct ccp_device *ccp)
0650 {
0651 struct ccp_dma_chan *chan;
0652 struct dma_device *dma_dev = &ccp->dma_dev;
0653 struct dma_chan *dma_chan;
0654 char *dma_cmd_cache_name;
0655 char *dma_desc_cache_name;
0656 unsigned int i;
0657 int ret;
0658
0659 if (!dmaengine)
0660 return 0;
0661
0662 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
0663 sizeof(*(ccp->ccp_dma_chan)),
0664 GFP_KERNEL);
0665 if (!ccp->ccp_dma_chan)
0666 return -ENOMEM;
0667
0668 dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
0669 "%s-dmaengine-cmd-cache",
0670 ccp->name);
0671 if (!dma_cmd_cache_name)
0672 return -ENOMEM;
0673
0674 ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
0675 sizeof(struct ccp_dma_cmd),
0676 sizeof(void *),
0677 SLAB_HWCACHE_ALIGN, NULL);
0678 if (!ccp->dma_cmd_cache)
0679 return -ENOMEM;
0680
0681 dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
0682 "%s-dmaengine-desc-cache",
0683 ccp->name);
0684 if (!dma_desc_cache_name) {
0685 ret = -ENOMEM;
0686 goto err_cache;
0687 }
0688
0689 ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
0690 sizeof(struct ccp_dma_desc),
0691 sizeof(void *),
0692 SLAB_HWCACHE_ALIGN, NULL);
0693 if (!ccp->dma_desc_cache) {
0694 ret = -ENOMEM;
0695 goto err_cache;
0696 }
0697
0698 dma_dev->dev = ccp->dev;
0699 dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
0700 dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
0701 dma_dev->directions = DMA_MEM_TO_MEM;
0702 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
0703 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
0704 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
0705
0706
0707
0708
0709
0710
0711
0712 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
0713 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
0714
0715 INIT_LIST_HEAD(&dma_dev->channels);
0716 for (i = 0; i < ccp->cmd_q_count; i++) {
0717 chan = ccp->ccp_dma_chan + i;
0718 dma_chan = &chan->dma_chan;
0719
0720 chan->ccp = ccp;
0721
0722 spin_lock_init(&chan->lock);
0723 INIT_LIST_HEAD(&chan->created);
0724 INIT_LIST_HEAD(&chan->pending);
0725 INIT_LIST_HEAD(&chan->active);
0726 INIT_LIST_HEAD(&chan->complete);
0727
0728 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
0729 (unsigned long)chan);
0730
0731 dma_chan->device = dma_dev;
0732 dma_cookie_init(dma_chan);
0733
0734 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
0735 }
0736
0737 dma_dev->device_free_chan_resources = ccp_free_chan_resources;
0738 dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
0739 dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
0740 dma_dev->device_issue_pending = ccp_issue_pending;
0741 dma_dev->device_tx_status = ccp_tx_status;
0742 dma_dev->device_pause = ccp_pause;
0743 dma_dev->device_resume = ccp_resume;
0744 dma_dev->device_terminate_all = ccp_terminate_all;
0745
0746 ret = dma_async_device_register(dma_dev);
0747 if (ret)
0748 goto err_reg;
0749
0750 return 0;
0751
0752 err_reg:
0753 ccp_dma_release(ccp);
0754 kmem_cache_destroy(ccp->dma_desc_cache);
0755
0756 err_cache:
0757 kmem_cache_destroy(ccp->dma_cmd_cache);
0758
0759 return ret;
0760 }
0761
0762 void ccp_dmaengine_unregister(struct ccp_device *ccp)
0763 {
0764 struct dma_device *dma_dev = &ccp->dma_dev;
0765
0766 if (!dmaengine)
0767 return;
0768
0769 dma_async_device_unregister(dma_dev);
0770 ccp_dma_release(ccp);
0771
0772 kmem_cache_destroy(ccp->dma_desc_cache);
0773 kmem_cache_destroy(ccp->dma_cmd_cache);
0774 }