0001
0002
0003
0004
0005
0006
0007 #include <linux/slab.h>
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/device.h>
0011 #include <linux/workqueue.h>
0012 #include <linux/mutex.h>
0013 #include <linux/sched.h>
0014 #include <linux/poll.h>
0015 #include <linux/iio/buffer_impl.h>
0016 #include <linux/iio/buffer-dma.h>
0017 #include <linux/dma-mapping.h>
0018 #include <linux/sizes.h>
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 static void iio_buffer_block_release(struct kref *kref)
0094 {
0095 struct iio_dma_buffer_block *block = container_of(kref,
0096 struct iio_dma_buffer_block, kref);
0097
0098 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
0099
0100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
0101 block->vaddr, block->phys_addr);
0102
0103 iio_buffer_put(&block->queue->buffer);
0104 kfree(block);
0105 }
0106
0107 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
0108 {
0109 kref_get(&block->kref);
0110 }
0111
0112 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
0113 {
0114 kref_put(&block->kref, iio_buffer_block_release);
0115 }
0116
0117
0118
0119
0120
0121 static LIST_HEAD(iio_dma_buffer_dead_blocks);
0122 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
0123
0124 static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
0125 {
0126 struct iio_dma_buffer_block *block, *_block;
0127 LIST_HEAD(block_list);
0128
0129 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
0130 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
0131 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
0132
0133 list_for_each_entry_safe(block, _block, &block_list, head)
0134 iio_buffer_block_release(&block->kref);
0135 }
0136 static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
0137
0138 static void iio_buffer_block_release_atomic(struct kref *kref)
0139 {
0140 struct iio_dma_buffer_block *block;
0141 unsigned long flags;
0142
0143 block = container_of(kref, struct iio_dma_buffer_block, kref);
0144
0145 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
0146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
0147 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
0148
0149 schedule_work(&iio_dma_buffer_cleanup_work);
0150 }
0151
0152
0153
0154
0155 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
0156 {
0157 kref_put(&block->kref, iio_buffer_block_release_atomic);
0158 }
0159
0160 static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
0161 {
0162 return container_of(buf, struct iio_dma_buffer_queue, buffer);
0163 }
0164
0165 static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
0166 struct iio_dma_buffer_queue *queue, size_t size)
0167 {
0168 struct iio_dma_buffer_block *block;
0169
0170 block = kzalloc(sizeof(*block), GFP_KERNEL);
0171 if (!block)
0172 return NULL;
0173
0174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
0175 &block->phys_addr, GFP_KERNEL);
0176 if (!block->vaddr) {
0177 kfree(block);
0178 return NULL;
0179 }
0180
0181 block->size = size;
0182 block->state = IIO_BLOCK_STATE_DEQUEUED;
0183 block->queue = queue;
0184 INIT_LIST_HEAD(&block->head);
0185 kref_init(&block->kref);
0186
0187 iio_buffer_get(&queue->buffer);
0188
0189 return block;
0190 }
0191
0192 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
0193 {
0194 struct iio_dma_buffer_queue *queue = block->queue;
0195
0196
0197
0198
0199
0200 if (block->state != IIO_BLOCK_STATE_DEAD) {
0201 block->state = IIO_BLOCK_STATE_DONE;
0202 list_add_tail(&block->head, &queue->outgoing);
0203 }
0204 }
0205
0206
0207
0208
0209
0210
0211
0212
0213 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
0214 {
0215 struct iio_dma_buffer_queue *queue = block->queue;
0216 unsigned long flags;
0217
0218 spin_lock_irqsave(&queue->list_lock, flags);
0219 _iio_dma_buffer_block_done(block);
0220 spin_unlock_irqrestore(&queue->list_lock, flags);
0221
0222 iio_buffer_block_put_atomic(block);
0223 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
0224 }
0225 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
0238 struct list_head *list)
0239 {
0240 struct iio_dma_buffer_block *block, *_block;
0241 unsigned long flags;
0242
0243 spin_lock_irqsave(&queue->list_lock, flags);
0244 list_for_each_entry_safe(block, _block, list, head) {
0245 list_del(&block->head);
0246 block->bytes_used = 0;
0247 _iio_dma_buffer_block_done(block);
0248 iio_buffer_block_put_atomic(block);
0249 }
0250 spin_unlock_irqrestore(&queue->list_lock, flags);
0251
0252 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
0253 }
0254 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
0255
0256 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
0257 {
0258
0259
0260
0261
0262
0263 switch (block->state) {
0264 case IIO_BLOCK_STATE_DEQUEUED:
0265 case IIO_BLOCK_STATE_QUEUED:
0266 case IIO_BLOCK_STATE_DONE:
0267 return true;
0268 default:
0269 return false;
0270 }
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280 int iio_dma_buffer_request_update(struct iio_buffer *buffer)
0281 {
0282 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0283 struct iio_dma_buffer_block *block;
0284 bool try_reuse = false;
0285 size_t size;
0286 int ret = 0;
0287 int i;
0288
0289
0290
0291
0292
0293
0294 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
0295 queue->buffer.length, 2);
0296
0297 mutex_lock(&queue->lock);
0298
0299
0300 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
0301 try_reuse = true;
0302
0303 queue->fileio.block_size = size;
0304 queue->fileio.active_block = NULL;
0305
0306 spin_lock_irq(&queue->list_lock);
0307 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0308 block = queue->fileio.blocks[i];
0309
0310
0311 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
0312 block->state = IIO_BLOCK_STATE_DEAD;
0313 }
0314
0315
0316
0317
0318
0319
0320 INIT_LIST_HEAD(&queue->outgoing);
0321 spin_unlock_irq(&queue->list_lock);
0322
0323 INIT_LIST_HEAD(&queue->incoming);
0324
0325 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0326 if (queue->fileio.blocks[i]) {
0327 block = queue->fileio.blocks[i];
0328 if (block->state == IIO_BLOCK_STATE_DEAD) {
0329
0330 iio_buffer_block_put(block);
0331 block = NULL;
0332 } else {
0333 block->size = size;
0334 }
0335 } else {
0336 block = NULL;
0337 }
0338
0339 if (!block) {
0340 block = iio_dma_buffer_alloc_block(queue, size);
0341 if (!block) {
0342 ret = -ENOMEM;
0343 goto out_unlock;
0344 }
0345 queue->fileio.blocks[i] = block;
0346 }
0347
0348 block->state = IIO_BLOCK_STATE_QUEUED;
0349 list_add_tail(&block->head, &queue->incoming);
0350 }
0351
0352 out_unlock:
0353 mutex_unlock(&queue->lock);
0354
0355 return ret;
0356 }
0357 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
0358
0359 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
0360 struct iio_dma_buffer_block *block)
0361 {
0362 int ret;
0363
0364
0365
0366
0367
0368
0369 if (!queue->ops)
0370 return;
0371
0372 block->state = IIO_BLOCK_STATE_ACTIVE;
0373 iio_buffer_block_get(block);
0374 ret = queue->ops->submit(queue, block);
0375 if (ret) {
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 iio_buffer_block_put(block);
0387 }
0388 }
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 int iio_dma_buffer_enable(struct iio_buffer *buffer,
0401 struct iio_dev *indio_dev)
0402 {
0403 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0404 struct iio_dma_buffer_block *block, *_block;
0405
0406 mutex_lock(&queue->lock);
0407 queue->active = true;
0408 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
0409 list_del(&block->head);
0410 iio_dma_buffer_submit_block(queue, block);
0411 }
0412 mutex_unlock(&queue->lock);
0413
0414 return 0;
0415 }
0416 EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 int iio_dma_buffer_disable(struct iio_buffer *buffer,
0427 struct iio_dev *indio_dev)
0428 {
0429 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0430
0431 mutex_lock(&queue->lock);
0432 queue->active = false;
0433
0434 if (queue->ops && queue->ops->abort)
0435 queue->ops->abort(queue);
0436 mutex_unlock(&queue->lock);
0437
0438 return 0;
0439 }
0440 EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
0441
0442 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
0443 struct iio_dma_buffer_block *block)
0444 {
0445 if (block->state == IIO_BLOCK_STATE_DEAD) {
0446 iio_buffer_block_put(block);
0447 } else if (queue->active) {
0448 iio_dma_buffer_submit_block(queue, block);
0449 } else {
0450 block->state = IIO_BLOCK_STATE_QUEUED;
0451 list_add_tail(&block->head, &queue->incoming);
0452 }
0453 }
0454
0455 static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
0456 struct iio_dma_buffer_queue *queue)
0457 {
0458 struct iio_dma_buffer_block *block;
0459
0460 spin_lock_irq(&queue->list_lock);
0461 block = list_first_entry_or_null(&queue->outgoing, struct
0462 iio_dma_buffer_block, head);
0463 if (block != NULL) {
0464 list_del(&block->head);
0465 block->state = IIO_BLOCK_STATE_DEQUEUED;
0466 }
0467 spin_unlock_irq(&queue->list_lock);
0468
0469 return block;
0470 }
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
0482 char __user *user_buffer)
0483 {
0484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
0485 struct iio_dma_buffer_block *block;
0486 int ret;
0487
0488 if (n < buffer->bytes_per_datum)
0489 return -EINVAL;
0490
0491 mutex_lock(&queue->lock);
0492
0493 if (!queue->fileio.active_block) {
0494 block = iio_dma_buffer_dequeue(queue);
0495 if (block == NULL) {
0496 ret = 0;
0497 goto out_unlock;
0498 }
0499 queue->fileio.pos = 0;
0500 queue->fileio.active_block = block;
0501 } else {
0502 block = queue->fileio.active_block;
0503 }
0504
0505 n = rounddown(n, buffer->bytes_per_datum);
0506 if (n > block->bytes_used - queue->fileio.pos)
0507 n = block->bytes_used - queue->fileio.pos;
0508
0509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
0510 ret = -EFAULT;
0511 goto out_unlock;
0512 }
0513
0514 queue->fileio.pos += n;
0515
0516 if (queue->fileio.pos == block->bytes_used) {
0517 queue->fileio.active_block = NULL;
0518 iio_dma_buffer_enqueue(queue, block);
0519 }
0520
0521 ret = n;
0522
0523 out_unlock:
0524 mutex_unlock(&queue->lock);
0525
0526 return ret;
0527 }
0528 EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
0529
0530
0531
0532
0533
0534
0535
0536
0537 size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
0538 {
0539 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
0540 struct iio_dma_buffer_block *block;
0541 size_t data_available = 0;
0542
0543
0544
0545
0546
0547
0548
0549
0550 mutex_lock(&queue->lock);
0551 if (queue->fileio.active_block)
0552 data_available += queue->fileio.active_block->size;
0553
0554 spin_lock_irq(&queue->list_lock);
0555 list_for_each_entry(block, &queue->outgoing, head)
0556 data_available += block->size;
0557 spin_unlock_irq(&queue->list_lock);
0558 mutex_unlock(&queue->lock);
0559
0560 return data_available;
0561 }
0562 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
0573 {
0574 buffer->bytes_per_datum = bpd;
0575
0576 return 0;
0577 }
0578 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
0589 {
0590
0591 if (length < 2)
0592 length = 2;
0593 buffer->length = length;
0594 buffer->watermark = length / 2;
0595
0596 return 0;
0597 }
0598 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
0611 struct device *dev, const struct iio_dma_buffer_ops *ops)
0612 {
0613 iio_buffer_init(&queue->buffer);
0614 queue->buffer.length = PAGE_SIZE;
0615 queue->buffer.watermark = queue->buffer.length / 2;
0616 queue->dev = dev;
0617 queue->ops = ops;
0618
0619 INIT_LIST_HEAD(&queue->incoming);
0620 INIT_LIST_HEAD(&queue->outgoing);
0621
0622 mutex_init(&queue->lock);
0623 spin_lock_init(&queue->list_lock);
0624
0625 return 0;
0626 }
0627 EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
0628
0629
0630
0631
0632
0633
0634
0635
0636 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
0637 {
0638 unsigned int i;
0639
0640 mutex_lock(&queue->lock);
0641
0642 spin_lock_irq(&queue->list_lock);
0643 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0644 if (!queue->fileio.blocks[i])
0645 continue;
0646 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
0647 }
0648 INIT_LIST_HEAD(&queue->outgoing);
0649 spin_unlock_irq(&queue->list_lock);
0650
0651 INIT_LIST_HEAD(&queue->incoming);
0652
0653 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
0654 if (!queue->fileio.blocks[i])
0655 continue;
0656 iio_buffer_block_put(queue->fileio.blocks[i]);
0657 queue->fileio.blocks[i] = NULL;
0658 }
0659 queue->fileio.active_block = NULL;
0660 queue->ops = NULL;
0661
0662 mutex_unlock(&queue->lock);
0663 }
0664 EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
0675 {
0676 mutex_destroy(&queue->lock);
0677 }
0678 EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
0679
0680 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
0681 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
0682 MODULE_LICENSE("GPL v2");