0001
0002
0003
0004
0005
0006 #include <linux/err.h>
0007 #include <linux/export.h>
0008 #include <linux/slab.h>
0009 #include <linux/mutex.h>
0010 #include <linux/of.h>
0011
0012 #include <linux/iio/iio.h>
0013 #include <linux/iio/iio-opaque.h>
0014 #include "iio_core.h"
0015 #include <linux/iio/machine.h>
0016 #include <linux/iio/driver.h>
0017 #include <linux/iio/consumer.h>
0018
0019 struct iio_map_internal {
0020 struct iio_dev *indio_dev;
0021 struct iio_map *map;
0022 struct list_head l;
0023 };
0024
0025 static LIST_HEAD(iio_map_list);
0026 static DEFINE_MUTEX(iio_map_list_lock);
0027
0028 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
0029 {
0030 int ret = -ENODEV;
0031 struct iio_map_internal *mapi, *next;
0032
0033 list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
0034 if (indio_dev == mapi->indio_dev) {
0035 list_del(&mapi->l);
0036 kfree(mapi);
0037 ret = 0;
0038 }
0039 }
0040 return ret;
0041 }
0042
0043 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
0044 {
0045 int i = 0, ret = 0;
0046 struct iio_map_internal *mapi;
0047
0048 if (maps == NULL)
0049 return 0;
0050
0051 mutex_lock(&iio_map_list_lock);
0052 while (maps[i].consumer_dev_name != NULL) {
0053 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
0054 if (mapi == NULL) {
0055 ret = -ENOMEM;
0056 goto error_ret;
0057 }
0058 mapi->map = &maps[i];
0059 mapi->indio_dev = indio_dev;
0060 list_add_tail(&mapi->l, &iio_map_list);
0061 i++;
0062 }
0063 error_ret:
0064 if (ret)
0065 iio_map_array_unregister_locked(indio_dev);
0066 mutex_unlock(&iio_map_list_lock);
0067
0068 return ret;
0069 }
0070 EXPORT_SYMBOL_GPL(iio_map_array_register);
0071
0072
0073
0074
0075
0076 int iio_map_array_unregister(struct iio_dev *indio_dev)
0077 {
0078 int ret;
0079
0080 mutex_lock(&iio_map_list_lock);
0081 ret = iio_map_array_unregister_locked(indio_dev);
0082 mutex_unlock(&iio_map_list_lock);
0083
0084 return ret;
0085 }
0086 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
0087
0088 static void iio_map_array_unregister_cb(void *indio_dev)
0089 {
0090 iio_map_array_unregister(indio_dev);
0091 }
0092
0093 int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
0094 {
0095 int ret;
0096
0097 ret = iio_map_array_register(indio_dev, maps);
0098 if (ret)
0099 return ret;
0100
0101 return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
0102 }
0103 EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
0104
0105 static const struct iio_chan_spec
0106 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
0107 {
0108 int i;
0109 const struct iio_chan_spec *chan = NULL;
0110
0111 for (i = 0; i < indio_dev->num_channels; i++)
0112 if (indio_dev->channels[i].datasheet_name &&
0113 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
0114 chan = &indio_dev->channels[i];
0115 break;
0116 }
0117 return chan;
0118 }
0119
0120 #ifdef CONFIG_OF
0121
0122 static int iio_dev_node_match(struct device *dev, const void *data)
0123 {
0124 return dev->of_node == data && dev->type == &iio_device_type;
0125 }
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137 static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
0138 const struct of_phandle_args *iiospec)
0139 {
0140 if (!iiospec->args_count)
0141 return 0;
0142
0143 if (iiospec->args[0] >= indio_dev->num_channels) {
0144 dev_err(&indio_dev->dev, "invalid channel index %u\n",
0145 iiospec->args[0]);
0146 return -EINVAL;
0147 }
0148
0149 return iiospec->args[0];
0150 }
0151
0152 static int __of_iio_channel_get(struct iio_channel *channel,
0153 struct device_node *np, int index)
0154 {
0155 struct device *idev;
0156 struct iio_dev *indio_dev;
0157 int err;
0158 struct of_phandle_args iiospec;
0159
0160 err = of_parse_phandle_with_args(np, "io-channels",
0161 "#io-channel-cells",
0162 index, &iiospec);
0163 if (err)
0164 return err;
0165
0166 idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
0167 iio_dev_node_match);
0168 of_node_put(iiospec.np);
0169 if (idev == NULL)
0170 return -EPROBE_DEFER;
0171
0172 indio_dev = dev_to_iio_dev(idev);
0173 channel->indio_dev = indio_dev;
0174 if (indio_dev->info->of_xlate)
0175 index = indio_dev->info->of_xlate(indio_dev, &iiospec);
0176 else
0177 index = __of_iio_simple_xlate(indio_dev, &iiospec);
0178 if (index < 0)
0179 goto err_put;
0180 channel->channel = &indio_dev->channels[index];
0181
0182 return 0;
0183
0184 err_put:
0185 iio_device_put(indio_dev);
0186 return index;
0187 }
0188
0189 static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
0190 {
0191 struct iio_channel *channel;
0192 int err;
0193
0194 if (index < 0)
0195 return ERR_PTR(-EINVAL);
0196
0197 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
0198 if (channel == NULL)
0199 return ERR_PTR(-ENOMEM);
0200
0201 err = __of_iio_channel_get(channel, np, index);
0202 if (err)
0203 goto err_free_channel;
0204
0205 return channel;
0206
0207 err_free_channel:
0208 kfree(channel);
0209 return ERR_PTR(err);
0210 }
0211
0212 struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
0213 const char *name)
0214 {
0215 struct iio_channel *chan = NULL;
0216
0217
0218 while (np) {
0219 int index = 0;
0220
0221
0222
0223
0224
0225
0226
0227 if (name)
0228 index = of_property_match_string(np, "io-channel-names",
0229 name);
0230 chan = of_iio_channel_get(np, index);
0231 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
0232 break;
0233 else if (name && index >= 0) {
0234 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
0235 np, name ? name : "", index);
0236 return NULL;
0237 }
0238
0239
0240
0241
0242
0243
0244 np = np->parent;
0245 if (np && !of_get_property(np, "io-channel-ranges", NULL))
0246 return NULL;
0247 }
0248
0249 return chan;
0250 }
0251 EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
0252
0253 static struct iio_channel *of_iio_channel_get_all(struct device *dev)
0254 {
0255 struct iio_channel *chans;
0256 int i, mapind, nummaps = 0;
0257 int ret;
0258
0259 do {
0260 ret = of_parse_phandle_with_args(dev->of_node,
0261 "io-channels",
0262 "#io-channel-cells",
0263 nummaps, NULL);
0264 if (ret < 0)
0265 break;
0266 } while (++nummaps);
0267
0268 if (nummaps == 0)
0269 return NULL;
0270
0271
0272 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
0273 if (chans == NULL)
0274 return ERR_PTR(-ENOMEM);
0275
0276
0277 for (mapind = 0; mapind < nummaps; mapind++) {
0278 ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
0279 mapind);
0280 if (ret)
0281 goto error_free_chans;
0282 }
0283 return chans;
0284
0285 error_free_chans:
0286 for (i = 0; i < mapind; i++)
0287 iio_device_put(chans[i].indio_dev);
0288 kfree(chans);
0289 return ERR_PTR(ret);
0290 }
0291
0292 #else
0293
0294 static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
0295 {
0296 return NULL;
0297 }
0298
0299 #endif
0300
0301 static struct iio_channel *iio_channel_get_sys(const char *name,
0302 const char *channel_name)
0303 {
0304 struct iio_map_internal *c_i = NULL, *c = NULL;
0305 struct iio_channel *channel;
0306 int err;
0307
0308 if (name == NULL && channel_name == NULL)
0309 return ERR_PTR(-ENODEV);
0310
0311
0312 mutex_lock(&iio_map_list_lock);
0313 list_for_each_entry(c_i, &iio_map_list, l) {
0314 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
0315 (channel_name &&
0316 strcmp(channel_name, c_i->map->consumer_channel) != 0))
0317 continue;
0318 c = c_i;
0319 iio_device_get(c->indio_dev);
0320 break;
0321 }
0322 mutex_unlock(&iio_map_list_lock);
0323 if (c == NULL)
0324 return ERR_PTR(-ENODEV);
0325
0326 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
0327 if (channel == NULL) {
0328 err = -ENOMEM;
0329 goto error_no_mem;
0330 }
0331
0332 channel->indio_dev = c->indio_dev;
0333
0334 if (c->map->adc_channel_label) {
0335 channel->channel =
0336 iio_chan_spec_from_name(channel->indio_dev,
0337 c->map->adc_channel_label);
0338
0339 if (channel->channel == NULL) {
0340 err = -EINVAL;
0341 goto error_no_chan;
0342 }
0343 }
0344
0345 return channel;
0346
0347 error_no_chan:
0348 kfree(channel);
0349 error_no_mem:
0350 iio_device_put(c->indio_dev);
0351 return ERR_PTR(err);
0352 }
0353
0354 struct iio_channel *iio_channel_get(struct device *dev,
0355 const char *channel_name)
0356 {
0357 const char *name = dev ? dev_name(dev) : NULL;
0358 struct iio_channel *channel;
0359
0360 if (dev) {
0361 channel = of_iio_channel_get_by_name(dev->of_node,
0362 channel_name);
0363 if (channel != NULL)
0364 return channel;
0365 }
0366
0367 return iio_channel_get_sys(name, channel_name);
0368 }
0369 EXPORT_SYMBOL_GPL(iio_channel_get);
0370
0371 void iio_channel_release(struct iio_channel *channel)
0372 {
0373 if (!channel)
0374 return;
0375 iio_device_put(channel->indio_dev);
0376 kfree(channel);
0377 }
0378 EXPORT_SYMBOL_GPL(iio_channel_release);
0379
0380 static void devm_iio_channel_free(void *iio_channel)
0381 {
0382 iio_channel_release(iio_channel);
0383 }
0384
0385 struct iio_channel *devm_iio_channel_get(struct device *dev,
0386 const char *channel_name)
0387 {
0388 struct iio_channel *channel;
0389 int ret;
0390
0391 channel = iio_channel_get(dev, channel_name);
0392 if (IS_ERR(channel))
0393 return channel;
0394
0395 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
0396 if (ret)
0397 return ERR_PTR(ret);
0398
0399 return channel;
0400 }
0401 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
0402
0403 struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
0404 struct device_node *np,
0405 const char *channel_name)
0406 {
0407 struct iio_channel *channel;
0408 int ret;
0409
0410 channel = of_iio_channel_get_by_name(np, channel_name);
0411 if (IS_ERR(channel))
0412 return channel;
0413
0414 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
0415 if (ret)
0416 return ERR_PTR(ret);
0417
0418 return channel;
0419 }
0420 EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
0421
0422 struct iio_channel *iio_channel_get_all(struct device *dev)
0423 {
0424 const char *name;
0425 struct iio_channel *chans;
0426 struct iio_map_internal *c = NULL;
0427 int nummaps = 0;
0428 int mapind = 0;
0429 int i, ret;
0430
0431 if (dev == NULL)
0432 return ERR_PTR(-EINVAL);
0433
0434 chans = of_iio_channel_get_all(dev);
0435 if (chans)
0436 return chans;
0437
0438 name = dev_name(dev);
0439
0440 mutex_lock(&iio_map_list_lock);
0441
0442 list_for_each_entry(c, &iio_map_list, l)
0443 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
0444 continue;
0445 else
0446 nummaps++;
0447
0448 if (nummaps == 0) {
0449 ret = -ENODEV;
0450 goto error_ret;
0451 }
0452
0453
0454 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
0455 if (chans == NULL) {
0456 ret = -ENOMEM;
0457 goto error_ret;
0458 }
0459
0460
0461 list_for_each_entry(c, &iio_map_list, l) {
0462 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
0463 continue;
0464 chans[mapind].indio_dev = c->indio_dev;
0465 chans[mapind].data = c->map->consumer_data;
0466 chans[mapind].channel =
0467 iio_chan_spec_from_name(chans[mapind].indio_dev,
0468 c->map->adc_channel_label);
0469 if (chans[mapind].channel == NULL) {
0470 ret = -EINVAL;
0471 goto error_free_chans;
0472 }
0473 iio_device_get(chans[mapind].indio_dev);
0474 mapind++;
0475 }
0476 if (mapind == 0) {
0477 ret = -ENODEV;
0478 goto error_free_chans;
0479 }
0480 mutex_unlock(&iio_map_list_lock);
0481
0482 return chans;
0483
0484 error_free_chans:
0485 for (i = 0; i < nummaps; i++)
0486 iio_device_put(chans[i].indio_dev);
0487 kfree(chans);
0488 error_ret:
0489 mutex_unlock(&iio_map_list_lock);
0490
0491 return ERR_PTR(ret);
0492 }
0493 EXPORT_SYMBOL_GPL(iio_channel_get_all);
0494
0495 void iio_channel_release_all(struct iio_channel *channels)
0496 {
0497 struct iio_channel *chan = &channels[0];
0498
0499 while (chan->indio_dev) {
0500 iio_device_put(chan->indio_dev);
0501 chan++;
0502 }
0503 kfree(channels);
0504 }
0505 EXPORT_SYMBOL_GPL(iio_channel_release_all);
0506
0507 static void devm_iio_channel_free_all(void *iio_channels)
0508 {
0509 iio_channel_release_all(iio_channels);
0510 }
0511
0512 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
0513 {
0514 struct iio_channel *channels;
0515 int ret;
0516
0517 channels = iio_channel_get_all(dev);
0518 if (IS_ERR(channels))
0519 return channels;
0520
0521 ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
0522 channels);
0523 if (ret)
0524 return ERR_PTR(ret);
0525
0526 return channels;
0527 }
0528 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
0529
0530 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
0531 enum iio_chan_info_enum info)
0532 {
0533 int unused;
0534 int vals[INDIO_MAX_RAW_ELEMENTS];
0535 int ret;
0536 int val_len = 2;
0537
0538 if (val2 == NULL)
0539 val2 = &unused;
0540
0541 if (!iio_channel_has_info(chan->channel, info))
0542 return -EINVAL;
0543
0544 if (chan->indio_dev->info->read_raw_multi) {
0545 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
0546 chan->channel, INDIO_MAX_RAW_ELEMENTS,
0547 vals, &val_len, info);
0548 *val = vals[0];
0549 *val2 = vals[1];
0550 } else
0551 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
0552 chan->channel, val, val2, info);
0553
0554 return ret;
0555 }
0556
0557 int iio_read_channel_raw(struct iio_channel *chan, int *val)
0558 {
0559 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0560 int ret;
0561
0562 mutex_lock(&iio_dev_opaque->info_exist_lock);
0563 if (chan->indio_dev->info == NULL) {
0564 ret = -ENODEV;
0565 goto err_unlock;
0566 }
0567
0568 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
0569 err_unlock:
0570 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0571
0572 return ret;
0573 }
0574 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
0575
0576 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
0577 {
0578 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0579 int ret;
0580
0581 mutex_lock(&iio_dev_opaque->info_exist_lock);
0582 if (chan->indio_dev->info == NULL) {
0583 ret = -ENODEV;
0584 goto err_unlock;
0585 }
0586
0587 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
0588 err_unlock:
0589 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0590
0591 return ret;
0592 }
0593 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
0594
0595 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
0596 int raw, int *processed, unsigned int scale)
0597 {
0598 int scale_type, scale_val, scale_val2;
0599 int offset_type, offset_val, offset_val2;
0600 s64 raw64 = raw;
0601
0602 offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
0603 IIO_CHAN_INFO_OFFSET);
0604 if (offset_type >= 0) {
0605 switch (offset_type) {
0606 case IIO_VAL_INT:
0607 break;
0608 case IIO_VAL_INT_PLUS_MICRO:
0609 case IIO_VAL_INT_PLUS_NANO:
0610
0611
0612
0613
0614 break;
0615 case IIO_VAL_FRACTIONAL:
0616 offset_val /= offset_val2;
0617 break;
0618 case IIO_VAL_FRACTIONAL_LOG2:
0619 offset_val >>= offset_val2;
0620 break;
0621 default:
0622 return -EINVAL;
0623 }
0624
0625 raw64 += offset_val;
0626 }
0627
0628 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
0629 IIO_CHAN_INFO_SCALE);
0630 if (scale_type < 0) {
0631
0632
0633
0634
0635 *processed = raw * scale;
0636 return 0;
0637 }
0638
0639 switch (scale_type) {
0640 case IIO_VAL_INT:
0641 *processed = raw64 * scale_val * scale;
0642 break;
0643 case IIO_VAL_INT_PLUS_MICRO:
0644 if (scale_val2 < 0)
0645 *processed = -raw64 * scale_val;
0646 else
0647 *processed = raw64 * scale_val;
0648 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
0649 1000000LL);
0650 break;
0651 case IIO_VAL_INT_PLUS_NANO:
0652 if (scale_val2 < 0)
0653 *processed = -raw64 * scale_val;
0654 else
0655 *processed = raw64 * scale_val;
0656 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
0657 1000000000LL);
0658 break;
0659 case IIO_VAL_FRACTIONAL:
0660 *processed = div_s64(raw64 * (s64)scale_val * scale,
0661 scale_val2);
0662 break;
0663 case IIO_VAL_FRACTIONAL_LOG2:
0664 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
0665 break;
0666 default:
0667 return -EINVAL;
0668 }
0669
0670 return 0;
0671 }
0672
0673 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
0674 int *processed, unsigned int scale)
0675 {
0676 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0677 int ret;
0678
0679 mutex_lock(&iio_dev_opaque->info_exist_lock);
0680 if (chan->indio_dev->info == NULL) {
0681 ret = -ENODEV;
0682 goto err_unlock;
0683 }
0684
0685 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
0686 scale);
0687 err_unlock:
0688 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0689
0690 return ret;
0691 }
0692 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
0693
0694 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
0695 enum iio_chan_info_enum attribute)
0696 {
0697 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0698 int ret;
0699
0700 mutex_lock(&iio_dev_opaque->info_exist_lock);
0701 if (chan->indio_dev->info == NULL) {
0702 ret = -ENODEV;
0703 goto err_unlock;
0704 }
0705
0706 ret = iio_channel_read(chan, val, val2, attribute);
0707 err_unlock:
0708 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0709
0710 return ret;
0711 }
0712 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
0713
0714 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
0715 {
0716 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
0717 }
0718 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
0719
0720 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
0721 unsigned int scale)
0722 {
0723 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0724 int ret;
0725
0726 mutex_lock(&iio_dev_opaque->info_exist_lock);
0727 if (chan->indio_dev->info == NULL) {
0728 ret = -ENODEV;
0729 goto err_unlock;
0730 }
0731
0732 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
0733 ret = iio_channel_read(chan, val, NULL,
0734 IIO_CHAN_INFO_PROCESSED);
0735 if (ret < 0)
0736 goto err_unlock;
0737 *val *= scale;
0738 } else {
0739 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
0740 if (ret < 0)
0741 goto err_unlock;
0742 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
0743 scale);
0744 }
0745
0746 err_unlock:
0747 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0748
0749 return ret;
0750 }
0751 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
0752
0753 int iio_read_channel_processed(struct iio_channel *chan, int *val)
0754 {
0755
0756 return iio_read_channel_processed_scale(chan, val, 1);
0757 }
0758 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
0759
0760 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
0761 {
0762 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
0763 }
0764 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
0765
0766 static int iio_channel_read_avail(struct iio_channel *chan,
0767 const int **vals, int *type, int *length,
0768 enum iio_chan_info_enum info)
0769 {
0770 if (!iio_channel_has_available(chan->channel, info))
0771 return -EINVAL;
0772
0773 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
0774 vals, type, length, info);
0775 }
0776
0777 int iio_read_avail_channel_attribute(struct iio_channel *chan,
0778 const int **vals, int *type, int *length,
0779 enum iio_chan_info_enum attribute)
0780 {
0781 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0782 int ret;
0783
0784 mutex_lock(&iio_dev_opaque->info_exist_lock);
0785 if (!chan->indio_dev->info) {
0786 ret = -ENODEV;
0787 goto err_unlock;
0788 }
0789
0790 ret = iio_channel_read_avail(chan, vals, type, length, attribute);
0791 err_unlock:
0792 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0793
0794 return ret;
0795 }
0796 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
0797
0798 int iio_read_avail_channel_raw(struct iio_channel *chan,
0799 const int **vals, int *length)
0800 {
0801 int ret;
0802 int type;
0803
0804 ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
0805 IIO_CHAN_INFO_RAW);
0806
0807 if (ret >= 0 && type != IIO_VAL_INT)
0808
0809 ret = -EINVAL;
0810
0811 return ret;
0812 }
0813 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
0814
0815 static int iio_channel_read_max(struct iio_channel *chan,
0816 int *val, int *val2, int *type,
0817 enum iio_chan_info_enum info)
0818 {
0819 int unused;
0820 const int *vals;
0821 int length;
0822 int ret;
0823
0824 if (!val2)
0825 val2 = &unused;
0826
0827 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
0828 switch (ret) {
0829 case IIO_AVAIL_RANGE:
0830 switch (*type) {
0831 case IIO_VAL_INT:
0832 *val = vals[2];
0833 break;
0834 default:
0835 *val = vals[4];
0836 *val2 = vals[5];
0837 }
0838 return 0;
0839
0840 case IIO_AVAIL_LIST:
0841 if (length <= 0)
0842 return -EINVAL;
0843 switch (*type) {
0844 case IIO_VAL_INT:
0845 *val = vals[--length];
0846 while (length) {
0847 if (vals[--length] > *val)
0848 *val = vals[length];
0849 }
0850 break;
0851 default:
0852
0853 return -EINVAL;
0854 }
0855 return 0;
0856
0857 default:
0858 return ret;
0859 }
0860 }
0861
0862 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
0863 {
0864 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0865 int ret;
0866 int type;
0867
0868 mutex_lock(&iio_dev_opaque->info_exist_lock);
0869 if (!chan->indio_dev->info) {
0870 ret = -ENODEV;
0871 goto err_unlock;
0872 }
0873
0874 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
0875 err_unlock:
0876 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0877
0878 return ret;
0879 }
0880 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
0881
0882 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
0883 {
0884 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0885 int ret = 0;
0886
0887
0888 mutex_lock(&iio_dev_opaque->info_exist_lock);
0889 if (chan->indio_dev->info == NULL) {
0890 ret = -ENODEV;
0891 goto err_unlock;
0892 }
0893
0894 *type = chan->channel->type;
0895 err_unlock:
0896 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0897
0898 return ret;
0899 }
0900 EXPORT_SYMBOL_GPL(iio_get_channel_type);
0901
0902 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
0903 enum iio_chan_info_enum info)
0904 {
0905 return chan->indio_dev->info->write_raw(chan->indio_dev,
0906 chan->channel, val, val2, info);
0907 }
0908
0909 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
0910 enum iio_chan_info_enum attribute)
0911 {
0912 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
0913 int ret;
0914
0915 mutex_lock(&iio_dev_opaque->info_exist_lock);
0916 if (chan->indio_dev->info == NULL) {
0917 ret = -ENODEV;
0918 goto err_unlock;
0919 }
0920
0921 ret = iio_channel_write(chan, val, val2, attribute);
0922 err_unlock:
0923 mutex_unlock(&iio_dev_opaque->info_exist_lock);
0924
0925 return ret;
0926 }
0927 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
0928
0929 int iio_write_channel_raw(struct iio_channel *chan, int val)
0930 {
0931 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
0932 }
0933 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
0934
0935 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
0936 {
0937 const struct iio_chan_spec_ext_info *ext_info;
0938 unsigned int i = 0;
0939
0940 if (!chan->channel->ext_info)
0941 return i;
0942
0943 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
0944 ++i;
0945
0946 return i;
0947 }
0948 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
0949
0950 static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
0951 const struct iio_channel *chan,
0952 const char *attr)
0953 {
0954 const struct iio_chan_spec_ext_info *ext_info;
0955
0956 if (!chan->channel->ext_info)
0957 return NULL;
0958
0959 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
0960 if (!strcmp(attr, ext_info->name))
0961 return ext_info;
0962 }
0963
0964 return NULL;
0965 }
0966
0967 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
0968 const char *attr, char *buf)
0969 {
0970 const struct iio_chan_spec_ext_info *ext_info;
0971
0972 ext_info = iio_lookup_ext_info(chan, attr);
0973 if (!ext_info)
0974 return -EINVAL;
0975
0976 return ext_info->read(chan->indio_dev, ext_info->private,
0977 chan->channel, buf);
0978 }
0979 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
0980
0981 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
0982 const char *buf, size_t len)
0983 {
0984 const struct iio_chan_spec_ext_info *ext_info;
0985
0986 ext_info = iio_lookup_ext_info(chan, attr);
0987 if (!ext_info)
0988 return -EINVAL;
0989
0990 return ext_info->write(chan->indio_dev, ext_info->private,
0991 chan->channel, buf, len);
0992 }
0993 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);