0001
0002
0003
0004
0005
0006
0007 #include <linux/device.h>
0008 #include <linux/dmaengine.h>
0009 #include <linux/module.h>
0010 #include <linux/spinlock.h>
0011
0012 #include "virt-dma.h"
0013
0014 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
0015 {
0016 return container_of(tx, struct virt_dma_desc, tx);
0017 }
0018
0019 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
0020 {
0021 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
0022 struct virt_dma_desc *vd = to_virt_desc(tx);
0023 unsigned long flags;
0024 dma_cookie_t cookie;
0025
0026 spin_lock_irqsave(&vc->lock, flags);
0027 cookie = dma_cookie_assign(tx);
0028
0029 list_move_tail(&vd->node, &vc->desc_submitted);
0030 spin_unlock_irqrestore(&vc->lock, flags);
0031
0032 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
0033 vc, vd, cookie);
0034
0035 return cookie;
0036 }
0037 EXPORT_SYMBOL_GPL(vchan_tx_submit);
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
0050 {
0051 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
0052 struct virt_dma_desc *vd = to_virt_desc(tx);
0053 unsigned long flags;
0054
0055 spin_lock_irqsave(&vc->lock, flags);
0056 list_del(&vd->node);
0057 spin_unlock_irqrestore(&vc->lock, flags);
0058
0059 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
0060 vc, vd, vd->tx.cookie);
0061 vc->desc_free(vd);
0062 return 0;
0063 }
0064 EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
0065
0066 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
0067 dma_cookie_t cookie)
0068 {
0069 struct virt_dma_desc *vd;
0070
0071 list_for_each_entry(vd, &vc->desc_issued, node)
0072 if (vd->tx.cookie == cookie)
0073 return vd;
0074
0075 return NULL;
0076 }
0077 EXPORT_SYMBOL_GPL(vchan_find_desc);
0078
0079
0080
0081
0082
0083 static void vchan_complete(struct tasklet_struct *t)
0084 {
0085 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
0086 struct virt_dma_desc *vd, *_vd;
0087 struct dmaengine_desc_callback cb;
0088 LIST_HEAD(head);
0089
0090 spin_lock_irq(&vc->lock);
0091 list_splice_tail_init(&vc->desc_completed, &head);
0092 vd = vc->cyclic;
0093 if (vd) {
0094 vc->cyclic = NULL;
0095 dmaengine_desc_get_callback(&vd->tx, &cb);
0096 } else {
0097 memset(&cb, 0, sizeof(cb));
0098 }
0099 spin_unlock_irq(&vc->lock);
0100
0101 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
0102
0103 list_for_each_entry_safe(vd, _vd, &head, node) {
0104 dmaengine_desc_get_callback(&vd->tx, &cb);
0105
0106 list_del(&vd->node);
0107 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
0108 vchan_vdesc_fini(vd);
0109 }
0110 }
0111
0112 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
0113 {
0114 struct virt_dma_desc *vd, *_vd;
0115
0116 list_for_each_entry_safe(vd, _vd, head, node) {
0117 list_del(&vd->node);
0118 vchan_vdesc_fini(vd);
0119 }
0120 }
0121 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
0122
0123 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
0124 {
0125 dma_cookie_init(&vc->chan);
0126
0127 spin_lock_init(&vc->lock);
0128 INIT_LIST_HEAD(&vc->desc_allocated);
0129 INIT_LIST_HEAD(&vc->desc_submitted);
0130 INIT_LIST_HEAD(&vc->desc_issued);
0131 INIT_LIST_HEAD(&vc->desc_completed);
0132 INIT_LIST_HEAD(&vc->desc_terminated);
0133
0134 tasklet_setup(&vc->task, vchan_complete);
0135
0136 vc->chan.device = dmadev;
0137 list_add_tail(&vc->chan.device_node, &dmadev->channels);
0138 }
0139 EXPORT_SYMBOL_GPL(vchan_init);
0140
0141 MODULE_AUTHOR("Russell King");
0142 MODULE_LICENSE("GPL");