0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/anon_inodes.h>
0013 #include <linux/kernel.h>
0014 #include <linux/export.h>
0015 #include <linux/device.h>
0016 #include <linux/file.h>
0017 #include <linux/fs.h>
0018 #include <linux/cdev.h>
0019 #include <linux/slab.h>
0020 #include <linux/poll.h>
0021 #include <linux/sched/signal.h>
0022
0023 #include <linux/iio/iio.h>
0024 #include <linux/iio/iio-opaque.h>
0025 #include "iio_core.h"
0026 #include "iio_core_trigger.h"
0027 #include <linux/iio/sysfs.h>
0028 #include <linux/iio/buffer.h>
0029 #include <linux/iio/buffer_impl.h>
0030
0031 static const char * const iio_endian_prefix[] = {
0032 [IIO_BE] = "be",
0033 [IIO_LE] = "le",
0034 };
0035
0036 static bool iio_buffer_is_active(struct iio_buffer *buf)
0037 {
0038 return !list_empty(&buf->buffer_list);
0039 }
0040
0041 static size_t iio_buffer_data_available(struct iio_buffer *buf)
0042 {
0043 return buf->access->data_available(buf);
0044 }
0045
0046 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
0047 struct iio_buffer *buf, size_t required)
0048 {
0049 if (!indio_dev->info->hwfifo_flush_to_buffer)
0050 return -ENODEV;
0051
0052 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
0053 }
0054
0055 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
0056 size_t to_wait, int to_flush)
0057 {
0058 size_t avail;
0059 int flushed = 0;
0060
0061
0062 if (!indio_dev->info)
0063 return true;
0064
0065
0066 if (!iio_buffer_is_active(buf)) {
0067 to_wait = min_t(size_t, to_wait, 1);
0068 to_flush = 0;
0069 }
0070
0071 avail = iio_buffer_data_available(buf);
0072
0073 if (avail >= to_wait) {
0074
0075 if (!to_wait && avail < to_flush)
0076 iio_buffer_flush_hwfifo(indio_dev, buf,
0077 to_flush - avail);
0078 return true;
0079 }
0080
0081 if (to_flush)
0082 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
0083 to_wait - avail);
0084 if (flushed <= 0)
0085 return false;
0086
0087 if (avail + flushed >= to_wait)
0088 return true;
0089
0090 return false;
0091 }
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
0107 size_t n, loff_t *f_ps)
0108 {
0109 struct iio_dev_buffer_pair *ib = filp->private_data;
0110 struct iio_buffer *rb = ib->buffer;
0111 struct iio_dev *indio_dev = ib->indio_dev;
0112 DEFINE_WAIT_FUNC(wait, woken_wake_function);
0113 size_t datum_size;
0114 size_t to_wait;
0115 int ret = 0;
0116
0117 if (!indio_dev->info)
0118 return -ENODEV;
0119
0120 if (!rb || !rb->access->read)
0121 return -EINVAL;
0122
0123 if (rb->direction != IIO_BUFFER_DIRECTION_IN)
0124 return -EPERM;
0125
0126 datum_size = rb->bytes_per_datum;
0127
0128
0129
0130
0131
0132 if (!datum_size)
0133 return 0;
0134
0135 if (filp->f_flags & O_NONBLOCK)
0136 to_wait = 0;
0137 else
0138 to_wait = min_t(size_t, n / datum_size, rb->watermark);
0139
0140 add_wait_queue(&rb->pollq, &wait);
0141 do {
0142 if (!indio_dev->info) {
0143 ret = -ENODEV;
0144 break;
0145 }
0146
0147 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
0148 if (signal_pending(current)) {
0149 ret = -ERESTARTSYS;
0150 break;
0151 }
0152
0153 wait_woken(&wait, TASK_INTERRUPTIBLE,
0154 MAX_SCHEDULE_TIMEOUT);
0155 continue;
0156 }
0157
0158 ret = rb->access->read(rb, n, buf);
0159 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
0160 ret = -EAGAIN;
0161 } while (ret == 0);
0162 remove_wait_queue(&rb->pollq, &wait);
0163
0164 return ret;
0165 }
0166
0167 static size_t iio_buffer_space_available(struct iio_buffer *buf)
0168 {
0169 if (buf->access->space_available)
0170 return buf->access->space_available(buf);
0171
0172 return SIZE_MAX;
0173 }
0174
0175 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
0176 size_t n, loff_t *f_ps)
0177 {
0178 struct iio_dev_buffer_pair *ib = filp->private_data;
0179 struct iio_buffer *rb = ib->buffer;
0180 struct iio_dev *indio_dev = ib->indio_dev;
0181 DEFINE_WAIT_FUNC(wait, woken_wake_function);
0182 int ret = 0;
0183 size_t written;
0184
0185 if (!indio_dev->info)
0186 return -ENODEV;
0187
0188 if (!rb || !rb->access->write)
0189 return -EINVAL;
0190
0191 if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
0192 return -EPERM;
0193
0194 written = 0;
0195 add_wait_queue(&rb->pollq, &wait);
0196 do {
0197 if (indio_dev->info == NULL)
0198 return -ENODEV;
0199
0200 if (!iio_buffer_space_available(rb)) {
0201 if (signal_pending(current)) {
0202 ret = -ERESTARTSYS;
0203 break;
0204 }
0205
0206 wait_woken(&wait, TASK_INTERRUPTIBLE,
0207 MAX_SCHEDULE_TIMEOUT);
0208 continue;
0209 }
0210
0211 ret = rb->access->write(rb, n - written, buf + written);
0212 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
0213 ret = -EAGAIN;
0214
0215 if (ret > 0) {
0216 written += ret;
0217 if (written != n && !(filp->f_flags & O_NONBLOCK))
0218 continue;
0219 }
0220 } while (ret == 0);
0221 remove_wait_queue(&rb->pollq, &wait);
0222
0223 return ret < 0 ? ret : n;
0224 }
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235 static __poll_t iio_buffer_poll(struct file *filp,
0236 struct poll_table_struct *wait)
0237 {
0238 struct iio_dev_buffer_pair *ib = filp->private_data;
0239 struct iio_buffer *rb = ib->buffer;
0240 struct iio_dev *indio_dev = ib->indio_dev;
0241
0242 if (!indio_dev->info || rb == NULL)
0243 return 0;
0244
0245 poll_wait(filp, &rb->pollq, wait);
0246
0247 switch (rb->direction) {
0248 case IIO_BUFFER_DIRECTION_IN:
0249 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
0250 return EPOLLIN | EPOLLRDNORM;
0251 break;
0252 case IIO_BUFFER_DIRECTION_OUT:
0253 if (iio_buffer_space_available(rb))
0254 return EPOLLOUT | EPOLLWRNORM;
0255 break;
0256 }
0257
0258 return 0;
0259 }
0260
0261 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
0262 size_t n, loff_t *f_ps)
0263 {
0264 struct iio_dev_buffer_pair *ib = filp->private_data;
0265 struct iio_buffer *rb = ib->buffer;
0266
0267
0268 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
0269 return -EBUSY;
0270
0271 return iio_buffer_read(filp, buf, n, f_ps);
0272 }
0273
0274 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
0275 size_t n, loff_t *f_ps)
0276 {
0277 struct iio_dev_buffer_pair *ib = filp->private_data;
0278 struct iio_buffer *rb = ib->buffer;
0279
0280
0281 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
0282 return -EBUSY;
0283
0284 return iio_buffer_write(filp, buf, n, f_ps);
0285 }
0286
0287 __poll_t iio_buffer_poll_wrapper(struct file *filp,
0288 struct poll_table_struct *wait)
0289 {
0290 struct iio_dev_buffer_pair *ib = filp->private_data;
0291 struct iio_buffer *rb = ib->buffer;
0292
0293
0294 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
0295 return 0;
0296
0297 return iio_buffer_poll(filp, wait);
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
0308 {
0309 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0310 struct iio_buffer *buffer;
0311 unsigned int i;
0312
0313 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
0314 buffer = iio_dev_opaque->attached_buffers[i];
0315 wake_up(&buffer->pollq);
0316 }
0317 }
0318
0319 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
0320 {
0321 if (!buffer || !buffer->access || !buffer->access->remove_from)
0322 return -EINVAL;
0323
0324 return buffer->access->remove_from(buffer, data);
0325 }
0326 EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
0327
0328 void iio_buffer_init(struct iio_buffer *buffer)
0329 {
0330 INIT_LIST_HEAD(&buffer->demux_list);
0331 INIT_LIST_HEAD(&buffer->buffer_list);
0332 init_waitqueue_head(&buffer->pollq);
0333 kref_init(&buffer->ref);
0334 if (!buffer->watermark)
0335 buffer->watermark = 1;
0336 }
0337 EXPORT_SYMBOL(iio_buffer_init);
0338
0339 void iio_device_detach_buffers(struct iio_dev *indio_dev)
0340 {
0341 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0342 struct iio_buffer *buffer;
0343 unsigned int i;
0344
0345 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
0346 buffer = iio_dev_opaque->attached_buffers[i];
0347 iio_buffer_put(buffer);
0348 }
0349
0350 kfree(iio_dev_opaque->attached_buffers);
0351 }
0352
0353 static ssize_t iio_show_scan_index(struct device *dev,
0354 struct device_attribute *attr,
0355 char *buf)
0356 {
0357 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
0358 }
0359
0360 static ssize_t iio_show_fixed_type(struct device *dev,
0361 struct device_attribute *attr,
0362 char *buf)
0363 {
0364 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
0365 u8 type = this_attr->c->scan_type.endianness;
0366
0367 if (type == IIO_CPU) {
0368 #ifdef __LITTLE_ENDIAN
0369 type = IIO_LE;
0370 #else
0371 type = IIO_BE;
0372 #endif
0373 }
0374 if (this_attr->c->scan_type.repeat > 1)
0375 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
0376 iio_endian_prefix[type],
0377 this_attr->c->scan_type.sign,
0378 this_attr->c->scan_type.realbits,
0379 this_attr->c->scan_type.storagebits,
0380 this_attr->c->scan_type.repeat,
0381 this_attr->c->scan_type.shift);
0382 else
0383 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
0384 iio_endian_prefix[type],
0385 this_attr->c->scan_type.sign,
0386 this_attr->c->scan_type.realbits,
0387 this_attr->c->scan_type.storagebits,
0388 this_attr->c->scan_type.shift);
0389 }
0390
0391 static ssize_t iio_scan_el_show(struct device *dev,
0392 struct device_attribute *attr,
0393 char *buf)
0394 {
0395 int ret;
0396 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
0397
0398
0399 ret = !!test_bit(to_iio_dev_attr(attr)->address,
0400 buffer->scan_mask);
0401
0402 return sysfs_emit(buf, "%d\n", ret);
0403 }
0404
0405
0406 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
0407 unsigned int masklength,
0408 const unsigned long *mask,
0409 bool strict)
0410 {
0411 if (bitmap_empty(mask, masklength))
0412 return NULL;
0413 while (*av_masks) {
0414 if (strict) {
0415 if (bitmap_equal(mask, av_masks, masklength))
0416 return av_masks;
0417 } else {
0418 if (bitmap_subset(mask, av_masks, masklength))
0419 return av_masks;
0420 }
0421 av_masks += BITS_TO_LONGS(masklength);
0422 }
0423 return NULL;
0424 }
0425
0426 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
0427 const unsigned long *mask)
0428 {
0429 if (!indio_dev->setup_ops->validate_scan_mask)
0430 return true;
0431
0432 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
0433 }
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445 static int iio_scan_mask_set(struct iio_dev *indio_dev,
0446 struct iio_buffer *buffer, int bit)
0447 {
0448 const unsigned long *mask;
0449 unsigned long *trialmask;
0450
0451 if (!indio_dev->masklength) {
0452 WARN(1, "Trying to set scanmask prior to registering buffer\n");
0453 return -EINVAL;
0454 }
0455
0456 trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
0457 if (!trialmask)
0458 return -ENOMEM;
0459 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
0460 set_bit(bit, trialmask);
0461
0462 if (!iio_validate_scan_mask(indio_dev, trialmask))
0463 goto err_invalid_mask;
0464
0465 if (indio_dev->available_scan_masks) {
0466 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
0467 indio_dev->masklength,
0468 trialmask, false);
0469 if (!mask)
0470 goto err_invalid_mask;
0471 }
0472 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
0473
0474 bitmap_free(trialmask);
0475
0476 return 0;
0477
0478 err_invalid_mask:
0479 bitmap_free(trialmask);
0480 return -EINVAL;
0481 }
0482
0483 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
0484 {
0485 clear_bit(bit, buffer->scan_mask);
0486 return 0;
0487 }
0488
0489 static int iio_scan_mask_query(struct iio_dev *indio_dev,
0490 struct iio_buffer *buffer, int bit)
0491 {
0492 if (bit > indio_dev->masklength)
0493 return -EINVAL;
0494
0495 if (!buffer->scan_mask)
0496 return 0;
0497
0498
0499 return !!test_bit(bit, buffer->scan_mask);
0500 };
0501
0502 static ssize_t iio_scan_el_store(struct device *dev,
0503 struct device_attribute *attr,
0504 const char *buf,
0505 size_t len)
0506 {
0507 int ret;
0508 bool state;
0509 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0510 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
0511 struct iio_buffer *buffer = this_attr->buffer;
0512
0513 ret = kstrtobool(buf, &state);
0514 if (ret < 0)
0515 return ret;
0516 mutex_lock(&indio_dev->mlock);
0517 if (iio_buffer_is_active(buffer)) {
0518 ret = -EBUSY;
0519 goto error_ret;
0520 }
0521 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
0522 if (ret < 0)
0523 goto error_ret;
0524 if (!state && ret) {
0525 ret = iio_scan_mask_clear(buffer, this_attr->address);
0526 if (ret)
0527 goto error_ret;
0528 } else if (state && !ret) {
0529 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
0530 if (ret)
0531 goto error_ret;
0532 }
0533
0534 error_ret:
0535 mutex_unlock(&indio_dev->mlock);
0536
0537 return ret < 0 ? ret : len;
0538
0539 }
0540
0541 static ssize_t iio_scan_el_ts_show(struct device *dev,
0542 struct device_attribute *attr,
0543 char *buf)
0544 {
0545 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
0546
0547 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
0548 }
0549
0550 static ssize_t iio_scan_el_ts_store(struct device *dev,
0551 struct device_attribute *attr,
0552 const char *buf,
0553 size_t len)
0554 {
0555 int ret;
0556 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0557 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
0558 bool state;
0559
0560 ret = kstrtobool(buf, &state);
0561 if (ret < 0)
0562 return ret;
0563
0564 mutex_lock(&indio_dev->mlock);
0565 if (iio_buffer_is_active(buffer)) {
0566 ret = -EBUSY;
0567 goto error_ret;
0568 }
0569 buffer->scan_timestamp = state;
0570 error_ret:
0571 mutex_unlock(&indio_dev->mlock);
0572
0573 return ret ? ret : len;
0574 }
0575
0576 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
0577 struct iio_buffer *buffer,
0578 const struct iio_chan_spec *chan)
0579 {
0580 int ret, attrcount = 0;
0581
0582 ret = __iio_add_chan_devattr("index",
0583 chan,
0584 &iio_show_scan_index,
0585 NULL,
0586 0,
0587 IIO_SEPARATE,
0588 &indio_dev->dev,
0589 buffer,
0590 &buffer->buffer_attr_list);
0591 if (ret)
0592 return ret;
0593 attrcount++;
0594 ret = __iio_add_chan_devattr("type",
0595 chan,
0596 &iio_show_fixed_type,
0597 NULL,
0598 0,
0599 0,
0600 &indio_dev->dev,
0601 buffer,
0602 &buffer->buffer_attr_list);
0603 if (ret)
0604 return ret;
0605 attrcount++;
0606 if (chan->type != IIO_TIMESTAMP)
0607 ret = __iio_add_chan_devattr("en",
0608 chan,
0609 &iio_scan_el_show,
0610 &iio_scan_el_store,
0611 chan->scan_index,
0612 0,
0613 &indio_dev->dev,
0614 buffer,
0615 &buffer->buffer_attr_list);
0616 else
0617 ret = __iio_add_chan_devattr("en",
0618 chan,
0619 &iio_scan_el_ts_show,
0620 &iio_scan_el_ts_store,
0621 chan->scan_index,
0622 0,
0623 &indio_dev->dev,
0624 buffer,
0625 &buffer->buffer_attr_list);
0626 if (ret)
0627 return ret;
0628 attrcount++;
0629 ret = attrcount;
0630 return ret;
0631 }
0632
0633 static ssize_t length_show(struct device *dev, struct device_attribute *attr,
0634 char *buf)
0635 {
0636 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
0637
0638 return sysfs_emit(buf, "%d\n", buffer->length);
0639 }
0640
0641 static ssize_t length_store(struct device *dev, struct device_attribute *attr,
0642 const char *buf, size_t len)
0643 {
0644 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0645 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
0646 unsigned int val;
0647 int ret;
0648
0649 ret = kstrtouint(buf, 10, &val);
0650 if (ret)
0651 return ret;
0652
0653 if (val == buffer->length)
0654 return len;
0655
0656 mutex_lock(&indio_dev->mlock);
0657 if (iio_buffer_is_active(buffer)) {
0658 ret = -EBUSY;
0659 } else {
0660 buffer->access->set_length(buffer, val);
0661 ret = 0;
0662 }
0663 if (ret)
0664 goto out;
0665 if (buffer->length && buffer->length < buffer->watermark)
0666 buffer->watermark = buffer->length;
0667 out:
0668 mutex_unlock(&indio_dev->mlock);
0669
0670 return ret ? ret : len;
0671 }
0672
0673 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
0674 char *buf)
0675 {
0676 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
0677
0678 return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
0679 }
0680
0681 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
0682 unsigned int scan_index)
0683 {
0684 const struct iio_chan_spec *ch;
0685 unsigned int bytes;
0686
0687 ch = iio_find_channel_from_si(indio_dev, scan_index);
0688 bytes = ch->scan_type.storagebits / 8;
0689 if (ch->scan_type.repeat > 1)
0690 bytes *= ch->scan_type.repeat;
0691 return bytes;
0692 }
0693
0694 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
0695 {
0696 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0697
0698 return iio_storage_bytes_for_si(indio_dev,
0699 iio_dev_opaque->scan_index_timestamp);
0700 }
0701
0702 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
0703 const unsigned long *mask, bool timestamp)
0704 {
0705 unsigned int bytes = 0;
0706 int length, i, largest = 0;
0707
0708
0709 for_each_set_bit(i, mask,
0710 indio_dev->masklength) {
0711 length = iio_storage_bytes_for_si(indio_dev, i);
0712 bytes = ALIGN(bytes, length);
0713 bytes += length;
0714 largest = max(largest, length);
0715 }
0716
0717 if (timestamp) {
0718 length = iio_storage_bytes_for_timestamp(indio_dev);
0719 bytes = ALIGN(bytes, length);
0720 bytes += length;
0721 largest = max(largest, length);
0722 }
0723
0724 bytes = ALIGN(bytes, largest);
0725 return bytes;
0726 }
0727
0728 static void iio_buffer_activate(struct iio_dev *indio_dev,
0729 struct iio_buffer *buffer)
0730 {
0731 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0732
0733 iio_buffer_get(buffer);
0734 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
0735 }
0736
0737 static void iio_buffer_deactivate(struct iio_buffer *buffer)
0738 {
0739 list_del_init(&buffer->buffer_list);
0740 wake_up_interruptible(&buffer->pollq);
0741 iio_buffer_put(buffer);
0742 }
0743
0744 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
0745 {
0746 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0747 struct iio_buffer *buffer, *_buffer;
0748
0749 list_for_each_entry_safe(buffer, _buffer,
0750 &iio_dev_opaque->buffer_list, buffer_list)
0751 iio_buffer_deactivate(buffer);
0752 }
0753
0754 static int iio_buffer_enable(struct iio_buffer *buffer,
0755 struct iio_dev *indio_dev)
0756 {
0757 if (!buffer->access->enable)
0758 return 0;
0759 return buffer->access->enable(buffer, indio_dev);
0760 }
0761
0762 static int iio_buffer_disable(struct iio_buffer *buffer,
0763 struct iio_dev *indio_dev)
0764 {
0765 if (!buffer->access->disable)
0766 return 0;
0767 return buffer->access->disable(buffer, indio_dev);
0768 }
0769
0770 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
0771 struct iio_buffer *buffer)
0772 {
0773 unsigned int bytes;
0774
0775 if (!buffer->access->set_bytes_per_datum)
0776 return;
0777
0778 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
0779 buffer->scan_timestamp);
0780
0781 buffer->access->set_bytes_per_datum(buffer, bytes);
0782 }
0783
0784 static int iio_buffer_request_update(struct iio_dev *indio_dev,
0785 struct iio_buffer *buffer)
0786 {
0787 int ret;
0788
0789 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
0790 if (buffer->access->request_update) {
0791 ret = buffer->access->request_update(buffer);
0792 if (ret) {
0793 dev_dbg(&indio_dev->dev,
0794 "Buffer not started: buffer parameter update failed (%d)\n",
0795 ret);
0796 return ret;
0797 }
0798 }
0799
0800 return 0;
0801 }
0802
0803 static void iio_free_scan_mask(struct iio_dev *indio_dev,
0804 const unsigned long *mask)
0805 {
0806
0807 if (!indio_dev->available_scan_masks)
0808 bitmap_free(mask);
0809 }
0810
0811 struct iio_device_config {
0812 unsigned int mode;
0813 unsigned int watermark;
0814 const unsigned long *scan_mask;
0815 unsigned int scan_bytes;
0816 bool scan_timestamp;
0817 };
0818
0819 static int iio_verify_update(struct iio_dev *indio_dev,
0820 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
0821 struct iio_device_config *config)
0822 {
0823 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0824 unsigned long *compound_mask;
0825 const unsigned long *scan_mask;
0826 bool strict_scanmask = false;
0827 struct iio_buffer *buffer;
0828 bool scan_timestamp;
0829 unsigned int modes;
0830
0831 if (insert_buffer &&
0832 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
0833 dev_dbg(&indio_dev->dev,
0834 "At least one scan element must be enabled first\n");
0835 return -EINVAL;
0836 }
0837
0838 memset(config, 0, sizeof(*config));
0839 config->watermark = ~0;
0840
0841
0842
0843
0844
0845 if (remove_buffer && !insert_buffer &&
0846 list_is_singular(&iio_dev_opaque->buffer_list))
0847 return 0;
0848
0849 modes = indio_dev->modes;
0850
0851 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
0852 if (buffer == remove_buffer)
0853 continue;
0854 modes &= buffer->access->modes;
0855 config->watermark = min(config->watermark, buffer->watermark);
0856 }
0857
0858 if (insert_buffer) {
0859 modes &= insert_buffer->access->modes;
0860 config->watermark = min(config->watermark,
0861 insert_buffer->watermark);
0862 }
0863
0864
0865 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
0866 config->mode = INDIO_BUFFER_TRIGGERED;
0867 } else if (modes & INDIO_BUFFER_HARDWARE) {
0868
0869
0870
0871
0872 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
0873 return -EINVAL;
0874 config->mode = INDIO_BUFFER_HARDWARE;
0875 strict_scanmask = true;
0876 } else if (modes & INDIO_BUFFER_SOFTWARE) {
0877 config->mode = INDIO_BUFFER_SOFTWARE;
0878 } else {
0879
0880 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
0881 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
0882 return -EINVAL;
0883 }
0884
0885
0886 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
0887 if (compound_mask == NULL)
0888 return -ENOMEM;
0889
0890 scan_timestamp = false;
0891
0892 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
0893 if (buffer == remove_buffer)
0894 continue;
0895 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
0896 indio_dev->masklength);
0897 scan_timestamp |= buffer->scan_timestamp;
0898 }
0899
0900 if (insert_buffer) {
0901 bitmap_or(compound_mask, compound_mask,
0902 insert_buffer->scan_mask, indio_dev->masklength);
0903 scan_timestamp |= insert_buffer->scan_timestamp;
0904 }
0905
0906 if (indio_dev->available_scan_masks) {
0907 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
0908 indio_dev->masklength,
0909 compound_mask,
0910 strict_scanmask);
0911 bitmap_free(compound_mask);
0912 if (scan_mask == NULL)
0913 return -EINVAL;
0914 } else {
0915 scan_mask = compound_mask;
0916 }
0917
0918 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
0919 scan_mask, scan_timestamp);
0920 config->scan_mask = scan_mask;
0921 config->scan_timestamp = scan_timestamp;
0922
0923 return 0;
0924 }
0925
0926
0927
0928
0929
0930
0931
0932
0933 struct iio_demux_table {
0934 unsigned int from;
0935 unsigned int to;
0936 unsigned int length;
0937 struct list_head l;
0938 };
0939
0940 static void iio_buffer_demux_free(struct iio_buffer *buffer)
0941 {
0942 struct iio_demux_table *p, *q;
0943 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
0944 list_del(&p->l);
0945 kfree(p);
0946 }
0947 }
0948
0949 static int iio_buffer_add_demux(struct iio_buffer *buffer,
0950 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
0951 unsigned int length)
0952 {
0953
0954 if (*p && (*p)->from + (*p)->length == in_loc &&
0955 (*p)->to + (*p)->length == out_loc) {
0956 (*p)->length += length;
0957 } else {
0958 *p = kmalloc(sizeof(**p), GFP_KERNEL);
0959 if (*p == NULL)
0960 return -ENOMEM;
0961 (*p)->from = in_loc;
0962 (*p)->to = out_loc;
0963 (*p)->length = length;
0964 list_add_tail(&(*p)->l, &buffer->demux_list);
0965 }
0966
0967 return 0;
0968 }
0969
0970 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
0971 struct iio_buffer *buffer)
0972 {
0973 int ret, in_ind = -1, out_ind, length;
0974 unsigned int in_loc = 0, out_loc = 0;
0975 struct iio_demux_table *p = NULL;
0976
0977
0978 iio_buffer_demux_free(buffer);
0979 kfree(buffer->demux_bounce);
0980 buffer->demux_bounce = NULL;
0981
0982
0983 if (bitmap_equal(indio_dev->active_scan_mask,
0984 buffer->scan_mask,
0985 indio_dev->masklength))
0986 return 0;
0987
0988
0989 for_each_set_bit(out_ind,
0990 buffer->scan_mask,
0991 indio_dev->masklength) {
0992 in_ind = find_next_bit(indio_dev->active_scan_mask,
0993 indio_dev->masklength,
0994 in_ind + 1);
0995 while (in_ind != out_ind) {
0996 length = iio_storage_bytes_for_si(indio_dev, in_ind);
0997
0998 in_loc = roundup(in_loc, length) + length;
0999 in_ind = find_next_bit(indio_dev->active_scan_mask,
1000 indio_dev->masklength,
1001 in_ind + 1);
1002 }
1003 length = iio_storage_bytes_for_si(indio_dev, in_ind);
1004 out_loc = roundup(out_loc, length);
1005 in_loc = roundup(in_loc, length);
1006 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1007 if (ret)
1008 goto error_clear_mux_table;
1009 out_loc += length;
1010 in_loc += length;
1011 }
1012
1013 if (buffer->scan_timestamp) {
1014 length = iio_storage_bytes_for_timestamp(indio_dev);
1015 out_loc = roundup(out_loc, length);
1016 in_loc = roundup(in_loc, length);
1017 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1018 if (ret)
1019 goto error_clear_mux_table;
1020 out_loc += length;
1021 }
1022 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1023 if (buffer->demux_bounce == NULL) {
1024 ret = -ENOMEM;
1025 goto error_clear_mux_table;
1026 }
1027 return 0;
1028
1029 error_clear_mux_table:
1030 iio_buffer_demux_free(buffer);
1031
1032 return ret;
1033 }
1034
1035 static int iio_update_demux(struct iio_dev *indio_dev)
1036 {
1037 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1038 struct iio_buffer *buffer;
1039 int ret;
1040
1041 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1042 ret = iio_buffer_update_demux(indio_dev, buffer);
1043 if (ret < 0)
1044 goto error_clear_mux_table;
1045 }
1046 return 0;
1047
1048 error_clear_mux_table:
1049 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1050 iio_buffer_demux_free(buffer);
1051
1052 return ret;
1053 }
1054
1055 static int iio_enable_buffers(struct iio_dev *indio_dev,
1056 struct iio_device_config *config)
1057 {
1058 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1059 struct iio_buffer *buffer, *tmp = NULL;
1060 int ret;
1061
1062 indio_dev->active_scan_mask = config->scan_mask;
1063 indio_dev->scan_timestamp = config->scan_timestamp;
1064 indio_dev->scan_bytes = config->scan_bytes;
1065 iio_dev_opaque->currentmode = config->mode;
1066
1067 iio_update_demux(indio_dev);
1068
1069
1070 if (indio_dev->setup_ops->preenable) {
1071 ret = indio_dev->setup_ops->preenable(indio_dev);
1072 if (ret) {
1073 dev_dbg(&indio_dev->dev,
1074 "Buffer not started: buffer preenable failed (%d)\n", ret);
1075 goto err_undo_config;
1076 }
1077 }
1078
1079 if (indio_dev->info->update_scan_mode) {
1080 ret = indio_dev->info
1081 ->update_scan_mode(indio_dev,
1082 indio_dev->active_scan_mask);
1083 if (ret < 0) {
1084 dev_dbg(&indio_dev->dev,
1085 "Buffer not started: update scan mode failed (%d)\n",
1086 ret);
1087 goto err_run_postdisable;
1088 }
1089 }
1090
1091 if (indio_dev->info->hwfifo_set_watermark)
1092 indio_dev->info->hwfifo_set_watermark(indio_dev,
1093 config->watermark);
1094
1095 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1096 ret = iio_buffer_enable(buffer, indio_dev);
1097 if (ret) {
1098 tmp = buffer;
1099 goto err_disable_buffers;
1100 }
1101 }
1102
1103 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1104 ret = iio_trigger_attach_poll_func(indio_dev->trig,
1105 indio_dev->pollfunc);
1106 if (ret)
1107 goto err_disable_buffers;
1108 }
1109
1110 if (indio_dev->setup_ops->postenable) {
1111 ret = indio_dev->setup_ops->postenable(indio_dev);
1112 if (ret) {
1113 dev_dbg(&indio_dev->dev,
1114 "Buffer not started: postenable failed (%d)\n", ret);
1115 goto err_detach_pollfunc;
1116 }
1117 }
1118
1119 return 0;
1120
1121 err_detach_pollfunc:
1122 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1123 iio_trigger_detach_poll_func(indio_dev->trig,
1124 indio_dev->pollfunc);
1125 }
1126 err_disable_buffers:
1127 buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
1128 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1129 buffer_list)
1130 iio_buffer_disable(buffer, indio_dev);
1131 err_run_postdisable:
1132 if (indio_dev->setup_ops->postdisable)
1133 indio_dev->setup_ops->postdisable(indio_dev);
1134 err_undo_config:
1135 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1136 indio_dev->active_scan_mask = NULL;
1137
1138 return ret;
1139 }
1140
1141 static int iio_disable_buffers(struct iio_dev *indio_dev)
1142 {
1143 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1144 struct iio_buffer *buffer;
1145 int ret = 0;
1146 int ret2;
1147
1148
1149 if (list_empty(&iio_dev_opaque->buffer_list))
1150 return 0;
1151
1152
1153
1154
1155
1156
1157
1158
1159 if (indio_dev->setup_ops->predisable) {
1160 ret2 = indio_dev->setup_ops->predisable(indio_dev);
1161 if (ret2 && !ret)
1162 ret = ret2;
1163 }
1164
1165 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1166 iio_trigger_detach_poll_func(indio_dev->trig,
1167 indio_dev->pollfunc);
1168 }
1169
1170 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1171 ret2 = iio_buffer_disable(buffer, indio_dev);
1172 if (ret2 && !ret)
1173 ret = ret2;
1174 }
1175
1176 if (indio_dev->setup_ops->postdisable) {
1177 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1178 if (ret2 && !ret)
1179 ret = ret2;
1180 }
1181
1182 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1183 indio_dev->active_scan_mask = NULL;
1184 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1185
1186 return ret;
1187 }
1188
1189 static int __iio_update_buffers(struct iio_dev *indio_dev,
1190 struct iio_buffer *insert_buffer,
1191 struct iio_buffer *remove_buffer)
1192 {
1193 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1194 struct iio_device_config new_config;
1195 int ret;
1196
1197 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1198 &new_config);
1199 if (ret)
1200 return ret;
1201
1202 if (insert_buffer) {
1203 ret = iio_buffer_request_update(indio_dev, insert_buffer);
1204 if (ret)
1205 goto err_free_config;
1206 }
1207
1208 ret = iio_disable_buffers(indio_dev);
1209 if (ret)
1210 goto err_deactivate_all;
1211
1212 if (remove_buffer)
1213 iio_buffer_deactivate(remove_buffer);
1214 if (insert_buffer)
1215 iio_buffer_activate(indio_dev, insert_buffer);
1216
1217
1218 if (list_empty(&iio_dev_opaque->buffer_list))
1219 return 0;
1220
1221 ret = iio_enable_buffers(indio_dev, &new_config);
1222 if (ret)
1223 goto err_deactivate_all;
1224
1225 return 0;
1226
1227 err_deactivate_all:
1228
1229
1230
1231
1232
1233
1234
1235
1236 iio_buffer_deactivate_all(indio_dev);
1237
1238 err_free_config:
1239 iio_free_scan_mask(indio_dev, new_config.scan_mask);
1240 return ret;
1241 }
1242
1243 int iio_update_buffers(struct iio_dev *indio_dev,
1244 struct iio_buffer *insert_buffer,
1245 struct iio_buffer *remove_buffer)
1246 {
1247 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1248 int ret;
1249
1250 if (insert_buffer == remove_buffer)
1251 return 0;
1252
1253 if (insert_buffer &&
1254 (insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT))
1255 return -EINVAL;
1256
1257 mutex_lock(&iio_dev_opaque->info_exist_lock);
1258 mutex_lock(&indio_dev->mlock);
1259
1260 if (insert_buffer && iio_buffer_is_active(insert_buffer))
1261 insert_buffer = NULL;
1262
1263 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1264 remove_buffer = NULL;
1265
1266 if (!insert_buffer && !remove_buffer) {
1267 ret = 0;
1268 goto out_unlock;
1269 }
1270
1271 if (indio_dev->info == NULL) {
1272 ret = -ENODEV;
1273 goto out_unlock;
1274 }
1275
1276 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1277
1278 out_unlock:
1279 mutex_unlock(&indio_dev->mlock);
1280 mutex_unlock(&iio_dev_opaque->info_exist_lock);
1281
1282 return ret;
1283 }
1284 EXPORT_SYMBOL_GPL(iio_update_buffers);
1285
1286 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1287 {
1288 iio_disable_buffers(indio_dev);
1289 iio_buffer_deactivate_all(indio_dev);
1290 }
1291
1292 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
1293 const char *buf, size_t len)
1294 {
1295 int ret;
1296 bool requested_state;
1297 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1298 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1299 bool inlist;
1300
1301 ret = kstrtobool(buf, &requested_state);
1302 if (ret < 0)
1303 return ret;
1304
1305 mutex_lock(&indio_dev->mlock);
1306
1307
1308 inlist = iio_buffer_is_active(buffer);
1309
1310 if (inlist == requested_state)
1311 goto done;
1312
1313 if (requested_state)
1314 ret = __iio_update_buffers(indio_dev, buffer, NULL);
1315 else
1316 ret = __iio_update_buffers(indio_dev, NULL, buffer);
1317
1318 done:
1319 mutex_unlock(&indio_dev->mlock);
1320 return (ret < 0) ? ret : len;
1321 }
1322
1323 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
1324 char *buf)
1325 {
1326 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1327
1328 return sysfs_emit(buf, "%u\n", buffer->watermark);
1329 }
1330
1331 static ssize_t watermark_store(struct device *dev,
1332 struct device_attribute *attr,
1333 const char *buf, size_t len)
1334 {
1335 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1336 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1337 unsigned int val;
1338 int ret;
1339
1340 ret = kstrtouint(buf, 10, &val);
1341 if (ret)
1342 return ret;
1343 if (!val)
1344 return -EINVAL;
1345
1346 mutex_lock(&indio_dev->mlock);
1347
1348 if (val > buffer->length) {
1349 ret = -EINVAL;
1350 goto out;
1351 }
1352
1353 if (iio_buffer_is_active(buffer)) {
1354 ret = -EBUSY;
1355 goto out;
1356 }
1357
1358 buffer->watermark = val;
1359 out:
1360 mutex_unlock(&indio_dev->mlock);
1361
1362 return ret ? ret : len;
1363 }
1364
1365 static ssize_t data_available_show(struct device *dev,
1366 struct device_attribute *attr, char *buf)
1367 {
1368 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1369
1370 return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1371 }
1372
1373 static ssize_t direction_show(struct device *dev,
1374 struct device_attribute *attr,
1375 char *buf)
1376 {
1377 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1378
1379 switch (buffer->direction) {
1380 case IIO_BUFFER_DIRECTION_IN:
1381 return sysfs_emit(buf, "in\n");
1382 case IIO_BUFFER_DIRECTION_OUT:
1383 return sysfs_emit(buf, "out\n");
1384 default:
1385 return -EINVAL;
1386 }
1387 }
1388
1389 static DEVICE_ATTR_RW(length);
1390 static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
1391 static DEVICE_ATTR_RW(enable);
1392 static DEVICE_ATTR_RW(watermark);
1393 static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
1394 static DEVICE_ATTR_RO(data_available);
1395 static DEVICE_ATTR_RO(direction);
1396
1397
1398
1399
1400
1401
1402
1403 static struct attribute *iio_buffer_attrs[] = {
1404 &dev_attr_length.attr,
1405 &dev_attr_enable.attr,
1406 &dev_attr_watermark.attr,
1407 &dev_attr_data_available.attr,
1408 &dev_attr_direction.attr,
1409 };
1410
1411 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1412
1413 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1414 struct attribute *attr)
1415 {
1416 struct device_attribute *dattr = to_dev_attr(attr);
1417 struct iio_dev_attr *iio_attr;
1418
1419 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1420 if (!iio_attr)
1421 return NULL;
1422
1423 iio_attr->buffer = buffer;
1424 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1425 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1426 if (!iio_attr->dev_attr.attr.name) {
1427 kfree(iio_attr);
1428 return NULL;
1429 }
1430
1431 sysfs_attr_init(&iio_attr->dev_attr.attr);
1432
1433 list_add(&iio_attr->l, &buffer->buffer_attr_list);
1434
1435 return &iio_attr->dev_attr.attr;
1436 }
1437
1438 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1439 struct attribute **buffer_attrs,
1440 int buffer_attrcount,
1441 int scan_el_attrcount)
1442 {
1443 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1444 struct attribute_group *group;
1445 struct attribute **attrs;
1446 int ret;
1447
1448 attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1449 if (!attrs)
1450 return -ENOMEM;
1451
1452 memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1453
1454 group = &iio_dev_opaque->legacy_buffer_group;
1455 group->attrs = attrs;
1456 group->name = "buffer";
1457
1458 ret = iio_device_register_sysfs_group(indio_dev, group);
1459 if (ret)
1460 goto error_free_buffer_attrs;
1461
1462 attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1463 if (!attrs) {
1464 ret = -ENOMEM;
1465 goto error_free_buffer_attrs;
1466 }
1467
1468 memcpy(attrs, &buffer_attrs[buffer_attrcount],
1469 scan_el_attrcount * sizeof(*attrs));
1470
1471 group = &iio_dev_opaque->legacy_scan_el_group;
1472 group->attrs = attrs;
1473 group->name = "scan_elements";
1474
1475 ret = iio_device_register_sysfs_group(indio_dev, group);
1476 if (ret)
1477 goto error_free_scan_el_attrs;
1478
1479 return 0;
1480
1481 error_free_scan_el_attrs:
1482 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1483 error_free_buffer_attrs:
1484 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1485
1486 return ret;
1487 }
1488
1489 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1490 {
1491 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1492
1493 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1494 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1495 }
1496
1497 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1498 {
1499 struct iio_dev_buffer_pair *ib = filep->private_data;
1500 struct iio_dev *indio_dev = ib->indio_dev;
1501 struct iio_buffer *buffer = ib->buffer;
1502
1503 wake_up(&buffer->pollq);
1504
1505 kfree(ib);
1506 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1507 iio_device_put(indio_dev);
1508
1509 return 0;
1510 }
1511
1512 static const struct file_operations iio_buffer_chrdev_fileops = {
1513 .owner = THIS_MODULE,
1514 .llseek = noop_llseek,
1515 .read = iio_buffer_read,
1516 .write = iio_buffer_write,
1517 .poll = iio_buffer_poll,
1518 .release = iio_buffer_chrdev_release,
1519 };
1520
1521 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1522 {
1523 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1524 int __user *ival = (int __user *)arg;
1525 struct iio_dev_buffer_pair *ib;
1526 struct iio_buffer *buffer;
1527 int fd, idx, ret;
1528
1529 if (copy_from_user(&idx, ival, sizeof(idx)))
1530 return -EFAULT;
1531
1532 if (idx >= iio_dev_opaque->attached_buffers_cnt)
1533 return -ENODEV;
1534
1535 iio_device_get(indio_dev);
1536
1537 buffer = iio_dev_opaque->attached_buffers[idx];
1538
1539 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1540 ret = -EBUSY;
1541 goto error_iio_dev_put;
1542 }
1543
1544 ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1545 if (!ib) {
1546 ret = -ENOMEM;
1547 goto error_clear_busy_bit;
1548 }
1549
1550 ib->indio_dev = indio_dev;
1551 ib->buffer = buffer;
1552
1553 fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1554 ib, O_RDWR | O_CLOEXEC);
1555 if (fd < 0) {
1556 ret = fd;
1557 goto error_free_ib;
1558 }
1559
1560 if (copy_to_user(ival, &fd, sizeof(fd))) {
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571 return -EFAULT;
1572 }
1573
1574 return 0;
1575
1576 error_free_ib:
1577 kfree(ib);
1578 error_clear_busy_bit:
1579 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1580 error_iio_dev_put:
1581 iio_device_put(indio_dev);
1582 return ret;
1583 }
1584
1585 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1586 unsigned int cmd, unsigned long arg)
1587 {
1588 switch (cmd) {
1589 case IIO_BUFFER_GET_FD_IOCTL:
1590 return iio_device_buffer_getfd(indio_dev, arg);
1591 default:
1592 return IIO_IOCTL_UNHANDLED;
1593 }
1594 }
1595
1596 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1597 struct iio_dev *indio_dev,
1598 int index)
1599 {
1600 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1601 struct iio_dev_attr *p;
1602 struct attribute **attr;
1603 int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1604 const struct iio_chan_spec *channels;
1605
1606 buffer_attrcount = 0;
1607 if (buffer->attrs) {
1608 while (buffer->attrs[buffer_attrcount] != NULL)
1609 buffer_attrcount++;
1610 }
1611
1612 scan_el_attrcount = 0;
1613 INIT_LIST_HEAD(&buffer->buffer_attr_list);
1614 channels = indio_dev->channels;
1615 if (channels) {
1616
1617 for (i = 0; i < indio_dev->num_channels; i++) {
1618 if (channels[i].scan_index < 0)
1619 continue;
1620
1621
1622 if (channels[i].scan_type.storagebits <
1623 channels[i].scan_type.realbits +
1624 channels[i].scan_type.shift) {
1625 dev_err(&indio_dev->dev,
1626 "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
1627 i, channels[i].scan_type.storagebits,
1628 channels[i].scan_type.realbits,
1629 channels[i].scan_type.shift);
1630 ret = -EINVAL;
1631 goto error_cleanup_dynamic;
1632 }
1633
1634 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1635 &channels[i]);
1636 if (ret < 0)
1637 goto error_cleanup_dynamic;
1638 scan_el_attrcount += ret;
1639 if (channels[i].type == IIO_TIMESTAMP)
1640 iio_dev_opaque->scan_index_timestamp =
1641 channels[i].scan_index;
1642 }
1643 if (indio_dev->masklength && buffer->scan_mask == NULL) {
1644 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1645 GFP_KERNEL);
1646 if (buffer->scan_mask == NULL) {
1647 ret = -ENOMEM;
1648 goto error_cleanup_dynamic;
1649 }
1650 }
1651 }
1652
1653 attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
1654 attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
1655 if (!attr) {
1656 ret = -ENOMEM;
1657 goto error_free_scan_mask;
1658 }
1659
1660 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1661 if (!buffer->access->set_length)
1662 attr[0] = &dev_attr_length_ro.attr;
1663
1664 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1665 attr[2] = &dev_attr_watermark_ro.attr;
1666
1667 if (buffer->attrs)
1668 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1669 sizeof(struct attribute *) * buffer_attrcount);
1670
1671 buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1672 buffer->buffer_group.attrs = attr;
1673
1674 for (i = 0; i < buffer_attrcount; i++) {
1675 struct attribute *wrapped;
1676
1677 wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1678 if (!wrapped) {
1679 ret = -ENOMEM;
1680 goto error_free_buffer_attrs;
1681 }
1682 attr[i] = wrapped;
1683 }
1684
1685 attrn = 0;
1686 list_for_each_entry(p, &buffer->buffer_attr_list, l)
1687 attr[attrn++] = &p->dev_attr.attr;
1688
1689 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1690 if (!buffer->buffer_group.name) {
1691 ret = -ENOMEM;
1692 goto error_free_buffer_attrs;
1693 }
1694
1695 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1696 if (ret)
1697 goto error_free_buffer_attr_group_name;
1698
1699
1700 if (index > 0)
1701 return 0;
1702
1703 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1704 buffer_attrcount,
1705 scan_el_attrcount);
1706 if (ret)
1707 goto error_free_buffer_attr_group_name;
1708
1709 return 0;
1710
1711 error_free_buffer_attr_group_name:
1712 kfree(buffer->buffer_group.name);
1713 error_free_buffer_attrs:
1714 kfree(buffer->buffer_group.attrs);
1715 error_free_scan_mask:
1716 bitmap_free(buffer->scan_mask);
1717 error_cleanup_dynamic:
1718 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1719
1720 return ret;
1721 }
1722
1723 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
1724 struct iio_dev *indio_dev,
1725 int index)
1726 {
1727 if (index == 0)
1728 iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1729 bitmap_free(buffer->scan_mask);
1730 kfree(buffer->buffer_group.name);
1731 kfree(buffer->buffer_group.attrs);
1732 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1733 }
1734
1735 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1736 {
1737 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1738 const struct iio_chan_spec *channels;
1739 struct iio_buffer *buffer;
1740 int ret, i, idx;
1741 size_t sz;
1742
1743 channels = indio_dev->channels;
1744 if (channels) {
1745 int ml = indio_dev->masklength;
1746
1747 for (i = 0; i < indio_dev->num_channels; i++)
1748 ml = max(ml, channels[i].scan_index + 1);
1749 indio_dev->masklength = ml;
1750 }
1751
1752 if (!iio_dev_opaque->attached_buffers_cnt)
1753 return 0;
1754
1755 for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
1756 buffer = iio_dev_opaque->attached_buffers[idx];
1757 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
1758 if (ret)
1759 goto error_unwind_sysfs_and_mask;
1760 }
1761
1762 sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
1763 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1764 if (!iio_dev_opaque->buffer_ioctl_handler) {
1765 ret = -ENOMEM;
1766 goto error_unwind_sysfs_and_mask;
1767 }
1768
1769 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1770 iio_device_ioctl_handler_register(indio_dev,
1771 iio_dev_opaque->buffer_ioctl_handler);
1772
1773 return 0;
1774
1775 error_unwind_sysfs_and_mask:
1776 while (idx--) {
1777 buffer = iio_dev_opaque->attached_buffers[idx];
1778 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
1779 }
1780 return ret;
1781 }
1782
1783 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1784 {
1785 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1786 struct iio_buffer *buffer;
1787 int i;
1788
1789 if (!iio_dev_opaque->attached_buffers_cnt)
1790 return;
1791
1792 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1793 kfree(iio_dev_opaque->buffer_ioctl_handler);
1794
1795 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1796 buffer = iio_dev_opaque->attached_buffers[i];
1797 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
1798 }
1799 }
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1811 const unsigned long *mask)
1812 {
1813 return bitmap_weight(mask, indio_dev->masklength) == 1;
1814 }
1815 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1816
1817 static const void *iio_demux(struct iio_buffer *buffer,
1818 const void *datain)
1819 {
1820 struct iio_demux_table *t;
1821
1822 if (list_empty(&buffer->demux_list))
1823 return datain;
1824 list_for_each_entry(t, &buffer->demux_list, l)
1825 memcpy(buffer->demux_bounce + t->to,
1826 datain + t->from, t->length);
1827
1828 return buffer->demux_bounce;
1829 }
1830
1831 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1832 {
1833 const void *dataout = iio_demux(buffer, data);
1834 int ret;
1835
1836 ret = buffer->access->store_to(buffer, dataout);
1837 if (ret)
1838 return ret;
1839
1840
1841
1842
1843
1844 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1845 return 0;
1846 }
1847
1848
1849
1850
1851
1852
1853 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1854 {
1855 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1856 int ret;
1857 struct iio_buffer *buf;
1858
1859 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1860 ret = iio_push_to_buffer(buf, data);
1861 if (ret < 0)
1862 return ret;
1863 }
1864
1865 return 0;
1866 }
1867 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
1883 const void *data,
1884 size_t data_sz,
1885 int64_t timestamp)
1886 {
1887 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1888
1889
1890
1891
1892
1893
1894
1895
1896 data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
1897 if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) {
1898 void *bb;
1899
1900 bb = devm_krealloc(&indio_dev->dev,
1901 iio_dev_opaque->bounce_buffer,
1902 indio_dev->scan_bytes, GFP_KERNEL);
1903 if (!bb)
1904 return -ENOMEM;
1905 iio_dev_opaque->bounce_buffer = bb;
1906 iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
1907 }
1908 memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
1909 return iio_push_to_buffers_with_timestamp(indio_dev,
1910 iio_dev_opaque->bounce_buffer,
1911 timestamp);
1912 }
1913 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924 static void iio_buffer_release(struct kref *ref)
1925 {
1926 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1927
1928 buffer->access->release(buffer);
1929 }
1930
1931
1932
1933
1934
1935
1936
1937 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1938 {
1939 if (buffer)
1940 kref_get(&buffer->ref);
1941
1942 return buffer;
1943 }
1944 EXPORT_SYMBOL_GPL(iio_buffer_get);
1945
1946
1947
1948
1949
1950 void iio_buffer_put(struct iio_buffer *buffer)
1951 {
1952 if (buffer)
1953 kref_put(&buffer->ref, iio_buffer_release);
1954 }
1955 EXPORT_SYMBOL_GPL(iio_buffer_put);
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1971 struct iio_buffer *buffer)
1972 {
1973 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1974 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1975 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1976
1977 cnt++;
1978
1979 new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1980 if (!new)
1981 return -ENOMEM;
1982 iio_dev_opaque->attached_buffers = new;
1983
1984 buffer = iio_buffer_get(buffer);
1985
1986
1987 if (!indio_dev->buffer)
1988 indio_dev->buffer = buffer;
1989
1990 iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1991 iio_dev_opaque->attached_buffers_cnt = cnt;
1992
1993 return 0;
1994 }
1995 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);