0001
0002
0003
0004
0005
0006
0007 #include <linux/slab.h>
0008 #include <linux/kernel.h>
0009 #include <linux/dmaengine.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/err.h>
0013 #include <linux/module.h>
0014
0015 #include <linux/iio/iio.h>
0016 #include <linux/iio/sysfs.h>
0017 #include <linux/iio/buffer.h>
0018 #include <linux/iio/buffer_impl.h>
0019 #include <linux/iio/buffer-dma.h>
0020 #include <linux/iio/buffer-dmaengine.h>
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 struct dmaengine_buffer {
0033 struct iio_dma_buffer_queue queue;
0034
0035 struct dma_chan *chan;
0036 struct list_head active;
0037
0038 size_t align;
0039 size_t max_size;
0040 };
0041
0042 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
0043 struct iio_buffer *buffer)
0044 {
0045 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
0046 }
0047
0048 static void iio_dmaengine_buffer_block_done(void *data,
0049 const struct dmaengine_result *result)
0050 {
0051 struct iio_dma_buffer_block *block = data;
0052 unsigned long flags;
0053
0054 spin_lock_irqsave(&block->queue->list_lock, flags);
0055 list_del(&block->head);
0056 spin_unlock_irqrestore(&block->queue->list_lock, flags);
0057 block->bytes_used -= result->residue;
0058 iio_dma_buffer_block_done(block);
0059 }
0060
0061 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
0062 struct iio_dma_buffer_block *block)
0063 {
0064 struct dmaengine_buffer *dmaengine_buffer =
0065 iio_buffer_to_dmaengine_buffer(&queue->buffer);
0066 struct dma_async_tx_descriptor *desc;
0067 dma_cookie_t cookie;
0068
0069 block->bytes_used = min(block->size, dmaengine_buffer->max_size);
0070 block->bytes_used = round_down(block->bytes_used,
0071 dmaengine_buffer->align);
0072
0073 desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
0074 block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
0075 DMA_PREP_INTERRUPT);
0076 if (!desc)
0077 return -ENOMEM;
0078
0079 desc->callback_result = iio_dmaengine_buffer_block_done;
0080 desc->callback_param = block;
0081
0082 cookie = dmaengine_submit(desc);
0083 if (dma_submit_error(cookie))
0084 return dma_submit_error(cookie);
0085
0086 spin_lock_irq(&dmaengine_buffer->queue.list_lock);
0087 list_add_tail(&block->head, &dmaengine_buffer->active);
0088 spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
0089
0090 dma_async_issue_pending(dmaengine_buffer->chan);
0091
0092 return 0;
0093 }
0094
0095 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
0096 {
0097 struct dmaengine_buffer *dmaengine_buffer =
0098 iio_buffer_to_dmaengine_buffer(&queue->buffer);
0099
0100 dmaengine_terminate_sync(dmaengine_buffer->chan);
0101 iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
0102 }
0103
0104 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
0105 {
0106 struct dmaengine_buffer *dmaengine_buffer =
0107 iio_buffer_to_dmaengine_buffer(buf);
0108
0109 iio_dma_buffer_release(&dmaengine_buffer->queue);
0110 kfree(dmaengine_buffer);
0111 }
0112
0113 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
0114 .read = iio_dma_buffer_read,
0115 .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
0116 .set_length = iio_dma_buffer_set_length,
0117 .request_update = iio_dma_buffer_request_update,
0118 .enable = iio_dma_buffer_enable,
0119 .disable = iio_dma_buffer_disable,
0120 .data_available = iio_dma_buffer_data_available,
0121 .release = iio_dmaengine_buffer_release,
0122
0123 .modes = INDIO_BUFFER_HARDWARE,
0124 .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
0125 };
0126
0127 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
0128 .submit = iio_dmaengine_buffer_submit_block,
0129 .abort = iio_dmaengine_buffer_abort,
0130 };
0131
0132 static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
0133 struct device_attribute *attr, char *buf)
0134 {
0135 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
0136 struct dmaengine_buffer *dmaengine_buffer =
0137 iio_buffer_to_dmaengine_buffer(buffer);
0138
0139 return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
0140 }
0141
0142 static IIO_DEVICE_ATTR(length_align_bytes, 0444,
0143 iio_dmaengine_buffer_get_length_align, NULL, 0);
0144
0145 static const struct attribute *iio_dmaengine_buffer_attrs[] = {
0146 &iio_dev_attr_length_align_bytes.dev_attr.attr,
0147 NULL,
0148 };
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
0163 const char *channel)
0164 {
0165 struct dmaengine_buffer *dmaengine_buffer;
0166 unsigned int width, src_width, dest_width;
0167 struct dma_slave_caps caps;
0168 struct dma_chan *chan;
0169 int ret;
0170
0171 dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
0172 if (!dmaengine_buffer)
0173 return ERR_PTR(-ENOMEM);
0174
0175 chan = dma_request_chan(dev, channel);
0176 if (IS_ERR(chan)) {
0177 ret = PTR_ERR(chan);
0178 goto err_free;
0179 }
0180
0181 ret = dma_get_slave_caps(chan, &caps);
0182 if (ret < 0)
0183 goto err_free;
0184
0185
0186 if (caps.src_addr_widths)
0187 src_width = __ffs(caps.src_addr_widths);
0188 else
0189 src_width = 1;
0190 if (caps.dst_addr_widths)
0191 dest_width = __ffs(caps.dst_addr_widths);
0192 else
0193 dest_width = 1;
0194 width = max(src_width, dest_width);
0195
0196 INIT_LIST_HEAD(&dmaengine_buffer->active);
0197 dmaengine_buffer->chan = chan;
0198 dmaengine_buffer->align = width;
0199 dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
0200
0201 iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
0202 &iio_dmaengine_default_ops);
0203
0204 dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
0205 dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
0206
0207 return &dmaengine_buffer->queue.buffer;
0208
0209 err_free:
0210 kfree(dmaengine_buffer);
0211 return ERR_PTR(ret);
0212 }
0213
0214
0215
0216
0217
0218
0219
0220 static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
0221 {
0222 struct dmaengine_buffer *dmaengine_buffer =
0223 iio_buffer_to_dmaengine_buffer(buffer);
0224
0225 iio_dma_buffer_exit(&dmaengine_buffer->queue);
0226 dma_release_channel(dmaengine_buffer->chan);
0227
0228 iio_buffer_put(buffer);
0229 }
0230
0231 static void __devm_iio_dmaengine_buffer_free(void *buffer)
0232 {
0233 iio_dmaengine_buffer_free(buffer);
0234 }
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
0248 const char *channel)
0249 {
0250 struct iio_buffer *buffer;
0251 int ret;
0252
0253 buffer = iio_dmaengine_buffer_alloc(dev, channel);
0254 if (IS_ERR(buffer))
0255 return buffer;
0256
0257 ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
0258 buffer);
0259 if (ret)
0260 return ERR_PTR(ret);
0261
0262 return buffer;
0263 }
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276 int devm_iio_dmaengine_buffer_setup(struct device *dev,
0277 struct iio_dev *indio_dev,
0278 const char *channel)
0279 {
0280 struct iio_buffer *buffer;
0281
0282 buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
0283 channel);
0284 if (IS_ERR(buffer))
0285 return PTR_ERR(buffer);
0286
0287 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
0288
0289 return iio_device_attach_buffer(indio_dev, buffer);
0290 }
0291 EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_setup);
0292
0293 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
0294 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
0295 MODULE_LICENSE("GPL");