0001
0002
0003
0004
0005
0006
0007 #ifndef VIRT_DMA_H
0008 #define VIRT_DMA_H
0009
0010 #include <linux/dmaengine.h>
0011 #include <linux/interrupt.h>
0012
0013 #include "dmaengine.h"
0014
0015 struct virt_dma_desc {
0016 struct dma_async_tx_descriptor tx;
0017 struct dmaengine_result tx_result;
0018
0019 struct list_head node;
0020 };
0021
0022 struct virt_dma_chan {
0023 struct dma_chan chan;
0024 struct tasklet_struct task;
0025 void (*desc_free)(struct virt_dma_desc *);
0026
0027 spinlock_t lock;
0028
0029
0030 struct list_head desc_allocated;
0031 struct list_head desc_submitted;
0032 struct list_head desc_issued;
0033 struct list_head desc_completed;
0034 struct list_head desc_terminated;
0035
0036 struct virt_dma_desc *cyclic;
0037 };
0038
0039 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
0040 {
0041 return container_of(chan, struct virt_dma_chan, chan);
0042 }
0043
0044 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
0045 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
0046 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
0047 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
0048 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
0049
0050
0051
0052
0053
0054
0055
0056 static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
0057 struct virt_dma_desc *vd, unsigned long tx_flags)
0058 {
0059 unsigned long flags;
0060
0061 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
0062 vd->tx.flags = tx_flags;
0063 vd->tx.tx_submit = vchan_tx_submit;
0064 vd->tx.desc_free = vchan_tx_desc_free;
0065
0066 vd->tx_result.result = DMA_TRANS_NOERROR;
0067 vd->tx_result.residue = 0;
0068
0069 spin_lock_irqsave(&vc->lock, flags);
0070 list_add_tail(&vd->node, &vc->desc_allocated);
0071 spin_unlock_irqrestore(&vc->lock, flags);
0072
0073 return &vd->tx;
0074 }
0075
0076
0077
0078
0079
0080
0081
0082 static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
0083 {
0084 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
0085 return !list_empty(&vc->desc_issued);
0086 }
0087
0088
0089
0090
0091
0092
0093
0094 static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
0095 {
0096 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
0097 dma_cookie_t cookie;
0098
0099 cookie = vd->tx.cookie;
0100 dma_cookie_complete(&vd->tx);
0101 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
0102 vd, cookie);
0103 list_add_tail(&vd->node, &vc->desc_completed);
0104
0105 tasklet_schedule(&vc->task);
0106 }
0107
0108
0109
0110
0111
0112 static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
0113 {
0114 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
0115
0116 if (dmaengine_desc_test_reuse(&vd->tx)) {
0117 unsigned long flags;
0118
0119 spin_lock_irqsave(&vc->lock, flags);
0120 list_add(&vd->node, &vc->desc_allocated);
0121 spin_unlock_irqrestore(&vc->lock, flags);
0122 } else {
0123 vc->desc_free(vd);
0124 }
0125 }
0126
0127
0128
0129
0130
0131 static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
0132 {
0133 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
0134
0135 vc->cyclic = vd;
0136 tasklet_schedule(&vc->task);
0137 }
0138
0139
0140
0141
0142
0143
0144
0145 static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
0146 {
0147 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
0148
0149 list_add_tail(&vd->node, &vc->desc_terminated);
0150
0151 if (vc->cyclic == vd)
0152 vc->cyclic = NULL;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161 static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
0162 {
0163 return list_first_entry_or_null(&vc->desc_issued,
0164 struct virt_dma_desc, node);
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
0178 struct list_head *head)
0179 {
0180 list_splice_tail_init(&vc->desc_allocated, head);
0181 list_splice_tail_init(&vc->desc_submitted, head);
0182 list_splice_tail_init(&vc->desc_issued, head);
0183 list_splice_tail_init(&vc->desc_completed, head);
0184 list_splice_tail_init(&vc->desc_terminated, head);
0185 }
0186
0187 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
0188 {
0189 struct virt_dma_desc *vd;
0190 unsigned long flags;
0191 LIST_HEAD(head);
0192
0193 spin_lock_irqsave(&vc->lock, flags);
0194 vchan_get_all_descriptors(vc, &head);
0195 list_for_each_entry(vd, &head, node)
0196 dmaengine_desc_clear_reuse(&vd->tx);
0197 spin_unlock_irqrestore(&vc->lock, flags);
0198
0199 vchan_dma_desc_free_list(vc, &head);
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 static inline void vchan_synchronize(struct virt_dma_chan *vc)
0212 {
0213 LIST_HEAD(head);
0214 unsigned long flags;
0215
0216 tasklet_kill(&vc->task);
0217
0218 spin_lock_irqsave(&vc->lock, flags);
0219
0220 list_splice_tail_init(&vc->desc_terminated, &head);
0221
0222 spin_unlock_irqrestore(&vc->lock, flags);
0223
0224 vchan_dma_desc_free_list(vc, &head);
0225 }
0226
0227 #endif