0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/idr.h>
0009 #include <linux/err.h>
0010 #include <linux/device.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/list.h>
0013 #include <linux/slab.h>
0014
0015 #include <linux/iio/iio.h>
0016 #include <linux/iio/iio-opaque.h>
0017 #include <linux/iio/trigger.h>
0018 #include "iio_core.h"
0019 #include "iio_core_trigger.h"
0020 #include <linux/iio/trigger_consumer.h>
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 static DEFINE_IDA(iio_trigger_ida);
0034
0035
0036 static LIST_HEAD(iio_trigger_list);
0037 static DEFINE_MUTEX(iio_trigger_list_lock);
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
0050 char *buf)
0051 {
0052 struct iio_trigger *trig = to_iio_trigger(dev);
0053 return sysfs_emit(buf, "%s\n", trig->name);
0054 }
0055
0056 static DEVICE_ATTR_RO(name);
0057
0058 static struct attribute *iio_trig_dev_attrs[] = {
0059 &dev_attr_name.attr,
0060 NULL,
0061 };
0062 ATTRIBUTE_GROUPS(iio_trig_dev);
0063
0064 static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
0065
0066 int iio_trigger_register(struct iio_trigger *trig_info)
0067 {
0068 int ret;
0069
0070 trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
0071 if (trig_info->id < 0)
0072 return trig_info->id;
0073
0074
0075 dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
0076
0077 ret = device_add(&trig_info->dev);
0078 if (ret)
0079 goto error_unregister_id;
0080
0081
0082 mutex_lock(&iio_trigger_list_lock);
0083 if (__iio_trigger_find_by_name(trig_info->name)) {
0084 pr_err("Duplicate trigger name '%s'\n", trig_info->name);
0085 ret = -EEXIST;
0086 goto error_device_del;
0087 }
0088 list_add_tail(&trig_info->list, &iio_trigger_list);
0089 mutex_unlock(&iio_trigger_list_lock);
0090
0091 return 0;
0092
0093 error_device_del:
0094 mutex_unlock(&iio_trigger_list_lock);
0095 device_del(&trig_info->dev);
0096 error_unregister_id:
0097 ida_free(&iio_trigger_ida, trig_info->id);
0098 return ret;
0099 }
0100 EXPORT_SYMBOL(iio_trigger_register);
0101
0102 void iio_trigger_unregister(struct iio_trigger *trig_info)
0103 {
0104 mutex_lock(&iio_trigger_list_lock);
0105 list_del(&trig_info->list);
0106 mutex_unlock(&iio_trigger_list_lock);
0107
0108 ida_free(&iio_trigger_ida, trig_info->id);
0109
0110 device_del(&trig_info->dev);
0111 }
0112 EXPORT_SYMBOL(iio_trigger_unregister);
0113
0114 int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
0115 {
0116 struct iio_dev_opaque *iio_dev_opaque;
0117
0118 if (!indio_dev || !trig)
0119 return -EINVAL;
0120
0121 iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0122 mutex_lock(&indio_dev->mlock);
0123 WARN_ON(iio_dev_opaque->trig_readonly);
0124
0125 indio_dev->trig = iio_trigger_get(trig);
0126 iio_dev_opaque->trig_readonly = true;
0127 mutex_unlock(&indio_dev->mlock);
0128
0129 return 0;
0130 }
0131 EXPORT_SYMBOL(iio_trigger_set_immutable);
0132
0133
0134 static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
0135 {
0136 struct iio_trigger *iter;
0137
0138 list_for_each_entry(iter, &iio_trigger_list, list)
0139 if (!strcmp(iter->name, name))
0140 return iter;
0141
0142 return NULL;
0143 }
0144
0145 static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
0146 {
0147 struct iio_trigger *trig = NULL, *iter;
0148
0149 mutex_lock(&iio_trigger_list_lock);
0150 list_for_each_entry(iter, &iio_trigger_list, list)
0151 if (sysfs_streq(iter->name, name)) {
0152 trig = iter;
0153 iio_trigger_get(trig);
0154 break;
0155 }
0156 mutex_unlock(&iio_trigger_list_lock);
0157
0158 return trig;
0159 }
0160
0161 static void iio_reenable_work_fn(struct work_struct *work)
0162 {
0163 struct iio_trigger *trig = container_of(work, struct iio_trigger,
0164 reenable_work);
0165
0166
0167
0168
0169
0170 trig->ops->reenable(trig);
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
0188 {
0189 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
0190 trig->ops->reenable)
0191 schedule_work(&trig->reenable_work);
0192 }
0193
0194 void iio_trigger_poll(struct iio_trigger *trig)
0195 {
0196 int i;
0197
0198 if (!atomic_read(&trig->use_count)) {
0199 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0200
0201 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0202 if (trig->subirqs[i].enabled)
0203 generic_handle_irq(trig->subirq_base + i);
0204 else
0205 iio_trigger_notify_done_atomic(trig);
0206 }
0207 }
0208 }
0209 EXPORT_SYMBOL(iio_trigger_poll);
0210
0211 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
0212 {
0213 iio_trigger_poll(private);
0214 return IRQ_HANDLED;
0215 }
0216 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
0217
0218 void iio_trigger_poll_chained(struct iio_trigger *trig)
0219 {
0220 int i;
0221
0222 if (!atomic_read(&trig->use_count)) {
0223 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0224
0225 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0226 if (trig->subirqs[i].enabled)
0227 handle_nested_irq(trig->subirq_base + i);
0228 else
0229 iio_trigger_notify_done(trig);
0230 }
0231 }
0232 }
0233 EXPORT_SYMBOL(iio_trigger_poll_chained);
0234
0235 void iio_trigger_notify_done(struct iio_trigger *trig)
0236 {
0237 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
0238 trig->ops->reenable)
0239 trig->ops->reenable(trig);
0240 }
0241 EXPORT_SYMBOL(iio_trigger_notify_done);
0242
0243
0244 static int iio_trigger_get_irq(struct iio_trigger *trig)
0245 {
0246 int ret;
0247
0248 mutex_lock(&trig->pool_lock);
0249 ret = bitmap_find_free_region(trig->pool,
0250 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
0251 ilog2(1));
0252 mutex_unlock(&trig->pool_lock);
0253 if (ret >= 0)
0254 ret += trig->subirq_base;
0255
0256 return ret;
0257 }
0258
0259 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
0260 {
0261 mutex_lock(&trig->pool_lock);
0262 clear_bit(irq - trig->subirq_base, trig->pool);
0263 mutex_unlock(&trig->pool_lock);
0264 }
0265
0266
0267
0268
0269
0270
0271
0272
0273 int iio_trigger_attach_poll_func(struct iio_trigger *trig,
0274 struct iio_poll_func *pf)
0275 {
0276 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
0277 bool notinuse =
0278 bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0279 int ret = 0;
0280
0281
0282 __module_get(iio_dev_opaque->driver_module);
0283
0284
0285 pf->irq = iio_trigger_get_irq(trig);
0286 if (pf->irq < 0) {
0287 pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
0288 trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0289 goto out_put_module;
0290 }
0291
0292
0293 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
0294 pf->type, pf->name,
0295 pf);
0296 if (ret < 0)
0297 goto out_put_irq;
0298
0299
0300 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
0301 ret = trig->ops->set_trigger_state(trig, true);
0302 if (ret < 0)
0303 goto out_free_irq;
0304 }
0305
0306
0307
0308
0309
0310
0311 if (pf->indio_dev->dev.parent == trig->dev.parent)
0312 trig->attached_own_device = true;
0313
0314 return ret;
0315
0316 out_free_irq:
0317 free_irq(pf->irq, pf);
0318 out_put_irq:
0319 iio_trigger_put_irq(trig, pf->irq);
0320 out_put_module:
0321 module_put(iio_dev_opaque->driver_module);
0322 return ret;
0323 }
0324
0325 int iio_trigger_detach_poll_func(struct iio_trigger *trig,
0326 struct iio_poll_func *pf)
0327 {
0328 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
0329 bool no_other_users =
0330 bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
0331 int ret = 0;
0332
0333 if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
0334 ret = trig->ops->set_trigger_state(trig, false);
0335 if (ret)
0336 return ret;
0337 }
0338 if (pf->indio_dev->dev.parent == trig->dev.parent)
0339 trig->attached_own_device = false;
0340 iio_trigger_put_irq(trig, pf->irq);
0341 free_irq(pf->irq, pf);
0342 module_put(iio_dev_opaque->driver_module);
0343
0344 return ret;
0345 }
0346
0347 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
0348 {
0349 struct iio_poll_func *pf = p;
0350
0351 pf->timestamp = iio_get_time_ns(pf->indio_dev);
0352 return IRQ_WAKE_THREAD;
0353 }
0354 EXPORT_SYMBOL(iio_pollfunc_store_time);
0355
0356 struct iio_poll_func
0357 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
0358 irqreturn_t (*thread)(int irq, void *p),
0359 int type,
0360 struct iio_dev *indio_dev,
0361 const char *fmt,
0362 ...)
0363 {
0364 va_list vargs;
0365 struct iio_poll_func *pf;
0366
0367 pf = kmalloc(sizeof(*pf), GFP_KERNEL);
0368 if (!pf)
0369 return NULL;
0370 va_start(vargs, fmt);
0371 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
0372 va_end(vargs);
0373 if (pf->name == NULL) {
0374 kfree(pf);
0375 return NULL;
0376 }
0377 pf->h = h;
0378 pf->thread = thread;
0379 pf->type = type;
0380 pf->indio_dev = indio_dev;
0381
0382 return pf;
0383 }
0384 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
0385
0386 void iio_dealloc_pollfunc(struct iio_poll_func *pf)
0387 {
0388 kfree(pf->name);
0389 kfree(pf);
0390 }
0391 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406 static ssize_t current_trigger_show(struct device *dev,
0407 struct device_attribute *attr, char *buf)
0408 {
0409 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0410
0411 if (indio_dev->trig)
0412 return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
0413 return 0;
0414 }
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 static ssize_t current_trigger_store(struct device *dev,
0431 struct device_attribute *attr,
0432 const char *buf, size_t len)
0433 {
0434 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0435 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0436 struct iio_trigger *oldtrig = indio_dev->trig;
0437 struct iio_trigger *trig;
0438 int ret;
0439
0440 mutex_lock(&indio_dev->mlock);
0441 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
0442 mutex_unlock(&indio_dev->mlock);
0443 return -EBUSY;
0444 }
0445 if (iio_dev_opaque->trig_readonly) {
0446 mutex_unlock(&indio_dev->mlock);
0447 return -EPERM;
0448 }
0449 mutex_unlock(&indio_dev->mlock);
0450
0451 trig = iio_trigger_acquire_by_name(buf);
0452 if (oldtrig == trig) {
0453 ret = len;
0454 goto out_trigger_put;
0455 }
0456
0457 if (trig && indio_dev->info->validate_trigger) {
0458 ret = indio_dev->info->validate_trigger(indio_dev, trig);
0459 if (ret)
0460 goto out_trigger_put;
0461 }
0462
0463 if (trig && trig->ops && trig->ops->validate_device) {
0464 ret = trig->ops->validate_device(trig, indio_dev);
0465 if (ret)
0466 goto out_trigger_put;
0467 }
0468
0469 indio_dev->trig = trig;
0470
0471 if (oldtrig) {
0472 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
0473 iio_trigger_detach_poll_func(oldtrig,
0474 indio_dev->pollfunc_event);
0475 iio_trigger_put(oldtrig);
0476 }
0477 if (indio_dev->trig) {
0478 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
0479 iio_trigger_attach_poll_func(indio_dev->trig,
0480 indio_dev->pollfunc_event);
0481 }
0482
0483 return len;
0484
0485 out_trigger_put:
0486 if (trig)
0487 iio_trigger_put(trig);
0488 return ret;
0489 }
0490
0491 static DEVICE_ATTR_RW(current_trigger);
0492
0493 static struct attribute *iio_trigger_consumer_attrs[] = {
0494 &dev_attr_current_trigger.attr,
0495 NULL,
0496 };
0497
0498 static const struct attribute_group iio_trigger_consumer_attr_group = {
0499 .name = "trigger",
0500 .attrs = iio_trigger_consumer_attrs,
0501 };
0502
0503 static void iio_trig_release(struct device *device)
0504 {
0505 struct iio_trigger *trig = to_iio_trigger(device);
0506 int i;
0507
0508 if (trig->subirq_base) {
0509 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0510 irq_modify_status(trig->subirq_base + i,
0511 IRQ_NOAUTOEN,
0512 IRQ_NOREQUEST | IRQ_NOPROBE);
0513 irq_set_chip(trig->subirq_base + i,
0514 NULL);
0515 irq_set_handler(trig->subirq_base + i,
0516 NULL);
0517 }
0518
0519 irq_free_descs(trig->subirq_base,
0520 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0521 }
0522 kfree(trig->name);
0523 kfree(trig);
0524 }
0525
0526 static const struct device_type iio_trig_type = {
0527 .release = iio_trig_release,
0528 .groups = iio_trig_dev_groups,
0529 };
0530
0531 static void iio_trig_subirqmask(struct irq_data *d)
0532 {
0533 struct irq_chip *chip = irq_data_get_irq_chip(d);
0534 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
0535
0536 trig->subirqs[d->irq - trig->subirq_base].enabled = false;
0537 }
0538
0539 static void iio_trig_subirqunmask(struct irq_data *d)
0540 {
0541 struct irq_chip *chip = irq_data_get_irq_chip(d);
0542 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
0543
0544 trig->subirqs[d->irq - trig->subirq_base].enabled = true;
0545 }
0546
0547 static __printf(3, 0)
0548 struct iio_trigger *viio_trigger_alloc(struct device *parent,
0549 struct module *this_mod,
0550 const char *fmt,
0551 va_list vargs)
0552 {
0553 struct iio_trigger *trig;
0554 int i;
0555
0556 trig = kzalloc(sizeof(*trig), GFP_KERNEL);
0557 if (!trig)
0558 return NULL;
0559
0560 trig->dev.parent = parent;
0561 trig->dev.type = &iio_trig_type;
0562 trig->dev.bus = &iio_bus_type;
0563 device_initialize(&trig->dev);
0564 INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
0565
0566 mutex_init(&trig->pool_lock);
0567 trig->subirq_base = irq_alloc_descs(-1, 0,
0568 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
0569 0);
0570 if (trig->subirq_base < 0)
0571 goto free_trig;
0572
0573 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
0574 if (trig->name == NULL)
0575 goto free_descs;
0576
0577 INIT_LIST_HEAD(&trig->list);
0578
0579 trig->owner = this_mod;
0580
0581 trig->subirq_chip.name = trig->name;
0582 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
0583 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
0584 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0585 irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
0586 irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
0587 irq_modify_status(trig->subirq_base + i,
0588 IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
0589 }
0590
0591 return trig;
0592
0593 free_descs:
0594 irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0595 free_trig:
0596 kfree(trig);
0597 return NULL;
0598 }
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 struct iio_trigger *__iio_trigger_alloc(struct device *parent,
0612 struct module *this_mod,
0613 const char *fmt, ...)
0614 {
0615 struct iio_trigger *trig;
0616 va_list vargs;
0617
0618 va_start(vargs, fmt);
0619 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
0620 va_end(vargs);
0621
0622 return trig;
0623 }
0624 EXPORT_SYMBOL(__iio_trigger_alloc);
0625
0626 void iio_trigger_free(struct iio_trigger *trig)
0627 {
0628 if (trig)
0629 put_device(&trig->dev);
0630 }
0631 EXPORT_SYMBOL(iio_trigger_free);
0632
0633 static void devm_iio_trigger_release(struct device *dev, void *res)
0634 {
0635 iio_trigger_free(*(struct iio_trigger **)res);
0636 }
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
0654 struct module *this_mod,
0655 const char *fmt, ...)
0656 {
0657 struct iio_trigger **ptr, *trig;
0658 va_list vargs;
0659
0660 ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
0661 GFP_KERNEL);
0662 if (!ptr)
0663 return NULL;
0664
0665
0666 va_start(vargs, fmt);
0667 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
0668 va_end(vargs);
0669 if (trig) {
0670 *ptr = trig;
0671 devres_add(parent, ptr);
0672 } else {
0673 devres_free(ptr);
0674 }
0675
0676 return trig;
0677 }
0678 EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
0679
0680 static void devm_iio_trigger_unreg(void *trigger_info)
0681 {
0682 iio_trigger_unregister(trigger_info);
0683 }
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 int devm_iio_trigger_register(struct device *dev,
0699 struct iio_trigger *trig_info)
0700 {
0701 int ret;
0702
0703 ret = iio_trigger_register(trig_info);
0704 if (ret)
0705 return ret;
0706
0707 return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
0708 }
0709 EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
0710
0711 bool iio_trigger_using_own(struct iio_dev *indio_dev)
0712 {
0713 return indio_dev->trig->attached_own_device;
0714 }
0715 EXPORT_SYMBOL(iio_trigger_using_own);
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 int iio_trigger_validate_own_device(struct iio_trigger *trig,
0730 struct iio_dev *indio_dev)
0731 {
0732 if (indio_dev->dev.parent != trig->dev.parent)
0733 return -EINVAL;
0734 return 0;
0735 }
0736 EXPORT_SYMBOL(iio_trigger_validate_own_device);
0737
0738 int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
0739 {
0740 return iio_device_register_sysfs_group(indio_dev,
0741 &iio_trigger_consumer_attr_group);
0742 }
0743
0744 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
0745 {
0746
0747 if (indio_dev->trig)
0748 iio_trigger_put(indio_dev->trig);
0749 }