Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright 2013-2015 Analog Devices Inc.
0004  *  Author: Lars-Peter Clausen <lars@metafoo.de>
0005  */
0006 
0007 #include <linux/slab.h>
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/device.h>
0011 #include <linux/workqueue.h>
0012 #include <linux/mutex.h>
0013 #include <linux/sched.h>
0014 #include <linux/poll.h>
0015 #include <linux/iio/buffer_impl.h>
0016 #include <linux/iio/buffer-dma.h>
0017 #include <linux/dma-mapping.h>
0018 #include <linux/sizes.h>
0019 
0020 /*
0021  * For DMA buffers the storage is sub-divided into so called blocks. Each block
0022  * has its own memory buffer. The size of the block is the granularity at which
0023  * memory is exchanged between the hardware and the application. Increasing the
0024  * basic unit of data exchange from one sample to one block decreases the
0025  * management overhead that is associated with each sample. E.g. if we say the
0026  * management overhead for one exchange is x and the unit of exchange is one
0027  * sample the overhead will be x for each sample. Whereas when using a block
0028  * which contains n samples the overhead per sample is reduced to x/n. This
0029  * allows to achieve much higher samplerates than what can be sustained with
0030  * the one sample approach.
0031  *
0032  * Blocks are exchanged between the DMA controller and the application via the
0033  * means of two queues. The incoming queue and the outgoing queue. Blocks on the
0034  * incoming queue are waiting for the DMA controller to pick them up and fill
0035  * them with data. Block on the outgoing queue have been filled with data and
0036  * are waiting for the application to dequeue them and read the data.
0037  *
0038  * A block can be in one of the following states:
0039  *  * Owned by the application. In this state the application can read data from
0040  *    the block.
0041  *  * On the incoming list: Blocks on the incoming list are queued up to be
0042  *    processed by the DMA controller.
0043  *  * Owned by the DMA controller: The DMA controller is processing the block
0044  *    and filling it with data.
0045  *  * On the outgoing list: Blocks on the outgoing list have been successfully
0046  *    processed by the DMA controller and contain data. They can be dequeued by
0047  *    the application.
0048  *  * Dead: A block that is dead has been marked as to be freed. It might still
0049  *    be owned by either the application or the DMA controller at the moment.
0050  *    But once they are done processing it instead of going to either the
0051  *    incoming or outgoing queue the block will be freed.
0052  *
0053  * In addition to this blocks are reference counted and the memory associated
0054  * with both the block structure as well as the storage memory for the block
0055  * will be freed when the last reference to the block is dropped. This means a
0056  * block must not be accessed without holding a reference.
0057  *
0058  * The iio_dma_buffer implementation provides a generic infrastructure for
0059  * managing the blocks.
0060  *
0061  * A driver for a specific piece of hardware that has DMA capabilities need to
0062  * implement the submit() callback from the iio_dma_buffer_ops structure. This
0063  * callback is supposed to initiate the DMA transfer copying data from the
0064  * converter to the memory region of the block. Once the DMA transfer has been
0065  * completed the driver must call iio_dma_buffer_block_done() for the completed
0066  * block.
0067  *
0068  * Prior to this it must set the bytes_used field of the block contains
0069  * the actual number of bytes in the buffer. Typically this will be equal to the
0070  * size of the block, but if the DMA hardware has certain alignment requirements
0071  * for the transfer length it might choose to use less than the full size. In
0072  * either case it is expected that bytes_used is a multiple of the bytes per
0073  * datum, i.e. the block must not contain partial samples.
0074  *
0075  * The driver must call iio_dma_buffer_block_done() for each block it has
0076  * received through its submit_block() callback, even if it does not actually
0077  * perform a DMA transfer for the block, e.g. because the buffer was disabled
0078  * before the block transfer was started. In this case it should set bytes_used
0079  * to 0.
0080  *
0081  * In addition it is recommended that a driver implements the abort() callback.
0082  * It will be called when the buffer is disabled and can be used to cancel
0083  * pending and stop active transfers.
0084  *
0085  * The specific driver implementation should use the default callback
0086  * implementations provided by this module for the iio_buffer_access_funcs
0087  * struct. It may overload some callbacks with custom variants if the hardware
0088  * has special requirements that are not handled by the generic functions. If a
0089  * driver chooses to overload a callback it has to ensure that the generic
0090  * callback is called from within the custom callback.
0091  */
0092 
0093 static void iio_buffer_block_release(struct kref *kref)
0094 {
0095     struct iio_dma_buffer_block *block = container_of(kref,
0096         struct iio_dma_buffer_block, kref);
0097 
0098     WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
0099 
0100     dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
0101                     block->vaddr, block->phys_addr);
0102 
0103     iio_buffer_put(&block->queue->buffer);
0104     kfree(block);
0105 }
0106 
0107 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
0108 {
0109     kref_get(&block->kref);
0110 }
0111 
0112 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
0113 {
0114     kref_put(&block->kref, iio_buffer_block_release);
0115 }
0116 
0117 /*
0118  * dma_free_coherent can sleep, hence we need to take some special care to be
0119  * able to drop a reference from an atomic context.
0120  */
0121 static LIST_HEAD(iio_dma_buffer_dead_blocks);
0122 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
0123 
0124 static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
0125 {
0126     struct iio_dma_buffer_block *block, *_block;
0127     LIST_HEAD(block_list);
0128 
0129     spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
0130     list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
0131     spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
0132 
0133     list_for_each_entry_safe(block, _block, &block_list, head)
0134         iio_buffer_block_release(&block->kref);
0135 }
0136 static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
0137 
0138 static void iio_buffer_block_release_atomic(struct kref *kref)
0139 {
0140     struct iio_dma_buffer_block *block;
0141     unsigned long flags;
0142 
0143     block = container_of(kref, struct iio_dma_buffer_block, kref);
0144 
0145     spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
0146     list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
0147     spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
0148 
0149     schedule_work(&iio_dma_buffer_cleanup_work);
0150 }
0151 
0152 /*
0153  * Version of iio_buffer_block_put() that can be called from atomic context
0154  */
0155 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
0156 {
0157     kref_put(&block->kref, iio_buffer_block_release_atomic);
0158 }
0159 
0160 static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
0161 {
0162     return container_of(buf, struct iio_dma_buffer_queue, buffer);
0163 }
0164 
0165 static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
0166     struct iio_dma_buffer_queue *queue, size_t size)
0167 {
0168     struct iio_dma_buffer_block *block;
0169 
0170     block = kzalloc(sizeof(*block), GFP_KERNEL);
0171     if (!block)
0172         return NULL;
0173 
0174     block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
0175         &block->phys_addr, GFP_KERNEL);
0176     if (!block->vaddr) {
0177         kfree(block);
0178         return NULL;
0179     }
0180 
0181     block->size = size;
0182     block->state = IIO_BLOCK_STATE_DEQUEUED;
0183     block->queue = queue;
0184     INIT_LIST_HEAD(&block->head);
0185     kref_init(&block->kref);
0186 
0187     iio_buffer_get(&queue->buffer);
0188 
0189     return block;
0190 }
0191 
0192 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
0193 {
0194     struct iio_dma_buffer_queue *queue = block->queue;
0195 
0196     /*
0197      * The buffer has already been freed by the application, just drop the
0198      * reference.
0199      */
0200     if (block->state != IIO_BLOCK_STATE_DEAD) {
0201         block->state = IIO_BLOCK_STATE_DONE;
0202         list_add_tail(&block->head, &queue->outgoing);
0203     }
0204 }
0205 
0206 /**
0207  * iio_dma_buffer_block_done() - Indicate that a block has been completed
0208  * @block: The completed block
0209  *
0210  * Should be called when the DMA controller has finished handling the block to
0211  * pass back ownership of the block to the queue.
0212  */
0213 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
0214 {
0215     struct iio_dma_buffer_queue *queue = block->queue;
0216     unsigned long flags;
0217 
0218     spin_lock_irqsave(&queue->list_lock, flags);
0219     _iio_dma_buffer_block_done(block);
0220     spin_unlock_irqrestore(&queue->list_lock, flags);
0221 
0222     iio_buffer_block_put_atomic(block);
0223     wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
0224 }
0225 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
0226 
0227 /**
0228  * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
0229  *   aborted
0230  * @queue: Queue for which to complete blocks.
0231  * @list: List of aborted blocks. All blocks in this list must be from @queue.
0232  *
0233  * Typically called from the abort() callback after the DMA controller has been
0234  * stopped. This will set bytes_used to 0 for each block in the list and then
0235  * hand the blocks back to the queue.
0236  */
0237 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
0238     struct list_head *list)
0239 {
0240     struct iio_dma_buffer_block *block, *_block;
0241     unsigned long flags;
0242 
0243     spin_lock_irqsave(&queue->list_lock, flags);
0244     list_for_each_entry_safe(block, _block, list, head) {
0245         list_del(&block->head);
0246         block->bytes_used = 0;
0247         _iio_dma_buffer_block_done(block);
0248         iio_buffer_block_put_atomic(block);
0249     }
0250     spin_unlock_irqrestore(&queue->list_lock, flags);
0251 
0252     wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
0253 }
0254 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
0255 
0256 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
0257 {
0258     /*
0259      * If the core owns the block it can be re-used. This should be the
0260      * default case when enabling the buffer, unless the DMA controller does
0261      * not support abort and has not given back the block yet.
0262      */
0263     switch (block->state) {
0264     case IIO_BLOCK_STATE_DEQUEUED:
0265     case IIO_BLOCK_STATE_QUEUED:
0266     case IIO_BLOCK_STATE_DONE:
0267         return true;
0268     default:
0269         return false;
0270     }
0271 }
0272 
0273 /**
0274  * iio_dma_buffer_request_update() - DMA buffer request_update callback
0275  * @buffer: The buffer which to request an update
0276  *
0277  * Should be used as the iio_dma_buffer_request_update() callback for
0278  * iio_buffer_access_ops struct for DMA buffers.
0279  */
0280 int iio_dma_buffer_request_update(struct iio_buffer *buffer)
0281 {
0282     struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0283     struct iio_dma_buffer_block *block;
0284     bool try_reuse = false;
0285     size_t size;
0286     int ret = 0;
0287     int i;
0288 
0289     /*
0290      * Split the buffer into two even parts. This is used as a double
0291      * buffering scheme with usually one block at a time being used by the
0292      * DMA and the other one by the application.
0293      */
0294     size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
0295         queue->buffer.length, 2);
0296 
0297     mutex_lock(&queue->lock);
0298 
0299     /* Allocations are page aligned */
0300     if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
0301         try_reuse = true;
0302 
0303     queue->fileio.block_size = size;
0304     queue->fileio.active_block = NULL;
0305 
0306     spin_lock_irq(&queue->list_lock);
0307     for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0308         block = queue->fileio.blocks[i];
0309 
0310         /* If we can't re-use it free it */
0311         if (block && (!iio_dma_block_reusable(block) || !try_reuse))
0312             block->state = IIO_BLOCK_STATE_DEAD;
0313     }
0314 
0315     /*
0316      * At this point all blocks are either owned by the core or marked as
0317      * dead. This means we can reset the lists without having to fear
0318      * corrution.
0319      */
0320     INIT_LIST_HEAD(&queue->outgoing);
0321     spin_unlock_irq(&queue->list_lock);
0322 
0323     INIT_LIST_HEAD(&queue->incoming);
0324 
0325     for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0326         if (queue->fileio.blocks[i]) {
0327             block = queue->fileio.blocks[i];
0328             if (block->state == IIO_BLOCK_STATE_DEAD) {
0329                 /* Could not reuse it */
0330                 iio_buffer_block_put(block);
0331                 block = NULL;
0332             } else {
0333                 block->size = size;
0334             }
0335         } else {
0336             block = NULL;
0337         }
0338 
0339         if (!block) {
0340             block = iio_dma_buffer_alloc_block(queue, size);
0341             if (!block) {
0342                 ret = -ENOMEM;
0343                 goto out_unlock;
0344             }
0345             queue->fileio.blocks[i] = block;
0346         }
0347 
0348         block->state = IIO_BLOCK_STATE_QUEUED;
0349         list_add_tail(&block->head, &queue->incoming);
0350     }
0351 
0352 out_unlock:
0353     mutex_unlock(&queue->lock);
0354 
0355     return ret;
0356 }
0357 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
0358 
0359 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
0360     struct iio_dma_buffer_block *block)
0361 {
0362     int ret;
0363 
0364     /*
0365      * If the hardware has already been removed we put the block into
0366      * limbo. It will neither be on the incoming nor outgoing list, nor will
0367      * it ever complete. It will just wait to be freed eventually.
0368      */
0369     if (!queue->ops)
0370         return;
0371 
0372     block->state = IIO_BLOCK_STATE_ACTIVE;
0373     iio_buffer_block_get(block);
0374     ret = queue->ops->submit(queue, block);
0375     if (ret) {
0376         /*
0377          * This is a bit of a problem and there is not much we can do
0378          * other then wait for the buffer to be disabled and re-enabled
0379          * and try again. But it should not really happen unless we run
0380          * out of memory or something similar.
0381          *
0382          * TODO: Implement support in the IIO core to allow buffers to
0383          * notify consumers that something went wrong and the buffer
0384          * should be disabled.
0385          */
0386         iio_buffer_block_put(block);
0387     }
0388 }
0389 
0390 /**
0391  * iio_dma_buffer_enable() - Enable DMA buffer
0392  * @buffer: IIO buffer to enable
0393  * @indio_dev: IIO device the buffer is attached to
0394  *
0395  * Needs to be called when the device that the buffer is attached to starts
0396  * sampling. Typically should be the iio_buffer_access_ops enable callback.
0397  *
0398  * This will allocate the DMA buffers and start the DMA transfers.
0399  */
0400 int iio_dma_buffer_enable(struct iio_buffer *buffer,
0401     struct iio_dev *indio_dev)
0402 {
0403     struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0404     struct iio_dma_buffer_block *block, *_block;
0405 
0406     mutex_lock(&queue->lock);
0407     queue->active = true;
0408     list_for_each_entry_safe(block, _block, &queue->incoming, head) {
0409         list_del(&block->head);
0410         iio_dma_buffer_submit_block(queue, block);
0411     }
0412     mutex_unlock(&queue->lock);
0413 
0414     return 0;
0415 }
0416 EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
0417 
0418 /**
0419  * iio_dma_buffer_disable() - Disable DMA buffer
0420  * @buffer: IIO DMA buffer to disable
0421  * @indio_dev: IIO device the buffer is attached to
0422  *
0423  * Needs to be called when the device that the buffer is attached to stops
0424  * sampling. Typically should be the iio_buffer_access_ops disable callback.
0425  */
0426 int iio_dma_buffer_disable(struct iio_buffer *buffer,
0427     struct iio_dev *indio_dev)
0428 {
0429     struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0430 
0431     mutex_lock(&queue->lock);
0432     queue->active = false;
0433 
0434     if (queue->ops && queue->ops->abort)
0435         queue->ops->abort(queue);
0436     mutex_unlock(&queue->lock);
0437 
0438     return 0;
0439 }
0440 EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
0441 
0442 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
0443     struct iio_dma_buffer_block *block)
0444 {
0445     if (block->state == IIO_BLOCK_STATE_DEAD) {
0446         iio_buffer_block_put(block);
0447     } else if (queue->active) {
0448         iio_dma_buffer_submit_block(queue, block);
0449     } else {
0450         block->state = IIO_BLOCK_STATE_QUEUED;
0451         list_add_tail(&block->head, &queue->incoming);
0452     }
0453 }
0454 
0455 static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
0456     struct iio_dma_buffer_queue *queue)
0457 {
0458     struct iio_dma_buffer_block *block;
0459 
0460     spin_lock_irq(&queue->list_lock);
0461     block = list_first_entry_or_null(&queue->outgoing, struct
0462         iio_dma_buffer_block, head);
0463     if (block != NULL) {
0464         list_del(&block->head);
0465         block->state = IIO_BLOCK_STATE_DEQUEUED;
0466     }
0467     spin_unlock_irq(&queue->list_lock);
0468 
0469     return block;
0470 }
0471 
0472 /**
0473  * iio_dma_buffer_read() - DMA buffer read callback
0474  * @buffer: Buffer to read form
0475  * @n: Number of bytes to read
0476  * @user_buffer: Userspace buffer to copy the data to
0477  *
0478  * Should be used as the read callback for iio_buffer_access_ops
0479  * struct for DMA buffers.
0480  */
0481 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
0482     char __user *user_buffer)
0483 {
0484     struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0485     struct iio_dma_buffer_block *block;
0486     int ret;
0487 
0488     if (n < buffer->bytes_per_datum)
0489         return -EINVAL;
0490 
0491     mutex_lock(&queue->lock);
0492 
0493     if (!queue->fileio.active_block) {
0494         block = iio_dma_buffer_dequeue(queue);
0495         if (block == NULL) {
0496             ret = 0;
0497             goto out_unlock;
0498         }
0499         queue->fileio.pos = 0;
0500         queue->fileio.active_block = block;
0501     } else {
0502         block = queue->fileio.active_block;
0503     }
0504 
0505     n = rounddown(n, buffer->bytes_per_datum);
0506     if (n > block->bytes_used - queue->fileio.pos)
0507         n = block->bytes_used - queue->fileio.pos;
0508 
0509     if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
0510         ret = -EFAULT;
0511         goto out_unlock;
0512     }
0513 
0514     queue->fileio.pos += n;
0515 
0516     if (queue->fileio.pos == block->bytes_used) {
0517         queue->fileio.active_block = NULL;
0518         iio_dma_buffer_enqueue(queue, block);
0519     }
0520 
0521     ret = n;
0522 
0523 out_unlock:
0524     mutex_unlock(&queue->lock);
0525 
0526     return ret;
0527 }
0528 EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
0529 
0530 /**
0531  * iio_dma_buffer_data_available() - DMA buffer data_available callback
0532  * @buf: Buffer to check for data availability
0533  *
0534  * Should be used as the data_available callback for iio_buffer_access_ops
0535  * struct for DMA buffers.
0536  */
0537 size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
0538 {
0539     struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
0540     struct iio_dma_buffer_block *block;
0541     size_t data_available = 0;
0542 
0543     /*
0544      * For counting the available bytes we'll use the size of the block not
0545      * the number of actual bytes available in the block. Otherwise it is
0546      * possible that we end up with a value that is lower than the watermark
0547      * but won't increase since all blocks are in use.
0548      */
0549 
0550     mutex_lock(&queue->lock);
0551     if (queue->fileio.active_block)
0552         data_available += queue->fileio.active_block->size;
0553 
0554     spin_lock_irq(&queue->list_lock);
0555     list_for_each_entry(block, &queue->outgoing, head)
0556         data_available += block->size;
0557     spin_unlock_irq(&queue->list_lock);
0558     mutex_unlock(&queue->lock);
0559 
0560     return data_available;
0561 }
0562 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
0563 
0564 /**
0565  * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
0566  * @buffer: Buffer to set the bytes-per-datum for
0567  * @bpd: The new bytes-per-datum value
0568  *
0569  * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
0570  * struct for DMA buffers.
0571  */
0572 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
0573 {
0574     buffer->bytes_per_datum = bpd;
0575 
0576     return 0;
0577 }
0578 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
0579 
0580 /**
0581  * iio_dma_buffer_set_length - DMA buffer set_length callback
0582  * @buffer: Buffer to set the length for
0583  * @length: The new buffer length
0584  *
0585  * Should be used as the set_length callback for iio_buffer_access_ops
0586  * struct for DMA buffers.
0587  */
0588 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
0589 {
0590     /* Avoid an invalid state */
0591     if (length < 2)
0592         length = 2;
0593     buffer->length = length;
0594     buffer->watermark = length / 2;
0595 
0596     return 0;
0597 }
0598 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
0599 
0600 /**
0601  * iio_dma_buffer_init() - Initialize DMA buffer queue
0602  * @queue: Buffer to initialize
0603  * @dev: DMA device
0604  * @ops: DMA buffer queue callback operations
0605  *
0606  * The DMA device will be used by the queue to do DMA memory allocations. So it
0607  * should refer to the device that will perform the DMA to ensure that
0608  * allocations are done from a memory region that can be accessed by the device.
0609  */
0610 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
0611     struct device *dev, const struct iio_dma_buffer_ops *ops)
0612 {
0613     iio_buffer_init(&queue->buffer);
0614     queue->buffer.length = PAGE_SIZE;
0615     queue->buffer.watermark = queue->buffer.length / 2;
0616     queue->dev = dev;
0617     queue->ops = ops;
0618 
0619     INIT_LIST_HEAD(&queue->incoming);
0620     INIT_LIST_HEAD(&queue->outgoing);
0621 
0622     mutex_init(&queue->lock);
0623     spin_lock_init(&queue->list_lock);
0624 
0625     return 0;
0626 }
0627 EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
0628 
0629 /**
0630  * iio_dma_buffer_exit() - Cleanup DMA buffer queue
0631  * @queue: Buffer to cleanup
0632  *
0633  * After this function has completed it is safe to free any resources that are
0634  * associated with the buffer and are accessed inside the callback operations.
0635  */
0636 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
0637 {
0638     unsigned int i;
0639 
0640     mutex_lock(&queue->lock);
0641 
0642     spin_lock_irq(&queue->list_lock);
0643     for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0644         if (!queue->fileio.blocks[i])
0645             continue;
0646         queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
0647     }
0648     INIT_LIST_HEAD(&queue->outgoing);
0649     spin_unlock_irq(&queue->list_lock);
0650 
0651     INIT_LIST_HEAD(&queue->incoming);
0652 
0653     for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0654         if (!queue->fileio.blocks[i])
0655             continue;
0656         iio_buffer_block_put(queue->fileio.blocks[i]);
0657         queue->fileio.blocks[i] = NULL;
0658     }
0659     queue->fileio.active_block = NULL;
0660     queue->ops = NULL;
0661 
0662     mutex_unlock(&queue->lock);
0663 }
0664 EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
0665 
0666 /**
0667  * iio_dma_buffer_release() - Release final buffer resources
0668  * @queue: Buffer to release
0669  *
0670  * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
0671  * called in the buffers release callback implementation right before freeing
0672  * the memory associated with the buffer.
0673  */
0674 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
0675 {
0676     mutex_destroy(&queue->lock);
0677 }
0678 EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
0679 
0680 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
0681 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
0682 MODULE_LICENSE("GPL v2");