Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* The industrial I/O core, trigger handling functions
0003  *
0004  * Copyright (c) 2008 Jonathan Cameron
0005  */
0006 
0007 #include <linux/kernel.h>
0008 #include <linux/idr.h>
0009 #include <linux/err.h>
0010 #include <linux/device.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/list.h>
0013 #include <linux/slab.h>
0014 
0015 #include <linux/iio/iio.h>
0016 #include <linux/iio/iio-opaque.h>
0017 #include <linux/iio/trigger.h>
0018 #include "iio_core.h"
0019 #include "iio_core_trigger.h"
0020 #include <linux/iio/trigger_consumer.h>
0021 
0022 /* RFC - Question of approach
0023  * Make the common case (single sensor single trigger)
0024  * simple by starting trigger capture from when first sensors
0025  * is added.
0026  *
0027  * Complex simultaneous start requires use of 'hold' functionality
0028  * of the trigger. (not implemented)
0029  *
0030  * Any other suggestions?
0031  */
0032 
0033 static DEFINE_IDA(iio_trigger_ida);
0034 
0035 /* Single list of all available triggers */
0036 static LIST_HEAD(iio_trigger_list);
0037 static DEFINE_MUTEX(iio_trigger_list_lock);
0038 
0039 /**
0040  * name_show() - retrieve useful identifying name
0041  * @dev:    device associated with the iio_trigger
0042  * @attr:   pointer to the device_attribute structure that is
0043  *      being processed
0044  * @buf:    buffer to print the name into
0045  *
0046  * Return: a negative number on failure or the number of written
0047  *     characters on success.
0048  */
0049 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
0050              char *buf)
0051 {
0052     struct iio_trigger *trig = to_iio_trigger(dev);
0053     return sysfs_emit(buf, "%s\n", trig->name);
0054 }
0055 
0056 static DEVICE_ATTR_RO(name);
0057 
0058 static struct attribute *iio_trig_dev_attrs[] = {
0059     &dev_attr_name.attr,
0060     NULL,
0061 };
0062 ATTRIBUTE_GROUPS(iio_trig_dev);
0063 
0064 static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
0065 
0066 int iio_trigger_register(struct iio_trigger *trig_info)
0067 {
0068     int ret;
0069 
0070     trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
0071     if (trig_info->id < 0)
0072         return trig_info->id;
0073 
0074     /* Set the name used for the sysfs directory etc */
0075     dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
0076 
0077     ret = device_add(&trig_info->dev);
0078     if (ret)
0079         goto error_unregister_id;
0080 
0081     /* Add to list of available triggers held by the IIO core */
0082     mutex_lock(&iio_trigger_list_lock);
0083     if (__iio_trigger_find_by_name(trig_info->name)) {
0084         pr_err("Duplicate trigger name '%s'\n", trig_info->name);
0085         ret = -EEXIST;
0086         goto error_device_del;
0087     }
0088     list_add_tail(&trig_info->list, &iio_trigger_list);
0089     mutex_unlock(&iio_trigger_list_lock);
0090 
0091     return 0;
0092 
0093 error_device_del:
0094     mutex_unlock(&iio_trigger_list_lock);
0095     device_del(&trig_info->dev);
0096 error_unregister_id:
0097     ida_free(&iio_trigger_ida, trig_info->id);
0098     return ret;
0099 }
0100 EXPORT_SYMBOL(iio_trigger_register);
0101 
0102 void iio_trigger_unregister(struct iio_trigger *trig_info)
0103 {
0104     mutex_lock(&iio_trigger_list_lock);
0105     list_del(&trig_info->list);
0106     mutex_unlock(&iio_trigger_list_lock);
0107 
0108     ida_free(&iio_trigger_ida, trig_info->id);
0109     /* Possible issue in here */
0110     device_del(&trig_info->dev);
0111 }
0112 EXPORT_SYMBOL(iio_trigger_unregister);
0113 
0114 int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
0115 {
0116     struct iio_dev_opaque *iio_dev_opaque;
0117 
0118     if (!indio_dev || !trig)
0119         return -EINVAL;
0120 
0121     iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0122     mutex_lock(&indio_dev->mlock);
0123     WARN_ON(iio_dev_opaque->trig_readonly);
0124 
0125     indio_dev->trig = iio_trigger_get(trig);
0126     iio_dev_opaque->trig_readonly = true;
0127     mutex_unlock(&indio_dev->mlock);
0128 
0129     return 0;
0130 }
0131 EXPORT_SYMBOL(iio_trigger_set_immutable);
0132 
0133 /* Search for trigger by name, assuming iio_trigger_list_lock held */
0134 static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
0135 {
0136     struct iio_trigger *iter;
0137 
0138     list_for_each_entry(iter, &iio_trigger_list, list)
0139         if (!strcmp(iter->name, name))
0140             return iter;
0141 
0142     return NULL;
0143 }
0144 
0145 static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
0146 {
0147     struct iio_trigger *trig = NULL, *iter;
0148 
0149     mutex_lock(&iio_trigger_list_lock);
0150     list_for_each_entry(iter, &iio_trigger_list, list)
0151         if (sysfs_streq(iter->name, name)) {
0152             trig = iter;
0153             iio_trigger_get(trig);
0154             break;
0155         }
0156     mutex_unlock(&iio_trigger_list_lock);
0157 
0158     return trig;
0159 }
0160 
0161 static void iio_reenable_work_fn(struct work_struct *work)
0162 {
0163     struct iio_trigger *trig = container_of(work, struct iio_trigger,
0164                         reenable_work);
0165 
0166     /*
0167      * This 'might' occur after the trigger state is set to disabled -
0168      * in that case the driver should skip reenabling.
0169      */
0170     trig->ops->reenable(trig);
0171 }
0172 
0173 /*
0174  * In general, reenable callbacks may need to sleep and this path is
0175  * not performance sensitive, so just queue up a work item
0176  * to reneable the trigger for us.
0177  *
0178  * Races that can cause this.
0179  * 1) A handler occurs entirely in interrupt context so the counter
0180  *    the final decrement is still in this interrupt.
0181  * 2) The trigger has been removed, but one last interrupt gets through.
0182  *
0183  * For (1) we must call reenable, but not in atomic context.
0184  * For (2) it should be safe to call reenanble, if drivers never blindly
0185  * reenable after state is off.
0186  */
0187 static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
0188 {
0189     if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
0190         trig->ops->reenable)
0191         schedule_work(&trig->reenable_work);
0192 }
0193 
0194 void iio_trigger_poll(struct iio_trigger *trig)
0195 {
0196     int i;
0197 
0198     if (!atomic_read(&trig->use_count)) {
0199         atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0200 
0201         for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0202             if (trig->subirqs[i].enabled)
0203                 generic_handle_irq(trig->subirq_base + i);
0204             else
0205                 iio_trigger_notify_done_atomic(trig);
0206         }
0207     }
0208 }
0209 EXPORT_SYMBOL(iio_trigger_poll);
0210 
0211 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
0212 {
0213     iio_trigger_poll(private);
0214     return IRQ_HANDLED;
0215 }
0216 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
0217 
0218 void iio_trigger_poll_chained(struct iio_trigger *trig)
0219 {
0220     int i;
0221 
0222     if (!atomic_read(&trig->use_count)) {
0223         atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0224 
0225         for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0226             if (trig->subirqs[i].enabled)
0227                 handle_nested_irq(trig->subirq_base + i);
0228             else
0229                 iio_trigger_notify_done(trig);
0230         }
0231     }
0232 }
0233 EXPORT_SYMBOL(iio_trigger_poll_chained);
0234 
0235 void iio_trigger_notify_done(struct iio_trigger *trig)
0236 {
0237     if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
0238         trig->ops->reenable)
0239         trig->ops->reenable(trig);
0240 }
0241 EXPORT_SYMBOL(iio_trigger_notify_done);
0242 
0243 /* Trigger Consumer related functions */
0244 static int iio_trigger_get_irq(struct iio_trigger *trig)
0245 {
0246     int ret;
0247 
0248     mutex_lock(&trig->pool_lock);
0249     ret = bitmap_find_free_region(trig->pool,
0250                       CONFIG_IIO_CONSUMERS_PER_TRIGGER,
0251                       ilog2(1));
0252     mutex_unlock(&trig->pool_lock);
0253     if (ret >= 0)
0254         ret += trig->subirq_base;
0255 
0256     return ret;
0257 }
0258 
0259 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
0260 {
0261     mutex_lock(&trig->pool_lock);
0262     clear_bit(irq - trig->subirq_base, trig->pool);
0263     mutex_unlock(&trig->pool_lock);
0264 }
0265 
0266 /* Complexity in here.  With certain triggers (datardy) an acknowledgement
0267  * may be needed if the pollfuncs do not include the data read for the
0268  * triggering device.
0269  * This is not currently handled.  Alternative of not enabling trigger unless
0270  * the relevant function is in there may be the best option.
0271  */
0272 /* Worth protecting against double additions? */
0273 int iio_trigger_attach_poll_func(struct iio_trigger *trig,
0274                  struct iio_poll_func *pf)
0275 {
0276     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
0277     bool notinuse =
0278         bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0279     int ret = 0;
0280 
0281     /* Prevent the module from being removed whilst attached to a trigger */
0282     __module_get(iio_dev_opaque->driver_module);
0283 
0284     /* Get irq number */
0285     pf->irq = iio_trigger_get_irq(trig);
0286     if (pf->irq < 0) {
0287         pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
0288             trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0289         goto out_put_module;
0290     }
0291 
0292     /* Request irq */
0293     ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
0294                    pf->type, pf->name,
0295                    pf);
0296     if (ret < 0)
0297         goto out_put_irq;
0298 
0299     /* Enable trigger in driver */
0300     if (trig->ops && trig->ops->set_trigger_state && notinuse) {
0301         ret = trig->ops->set_trigger_state(trig, true);
0302         if (ret < 0)
0303             goto out_free_irq;
0304     }
0305 
0306     /*
0307      * Check if we just registered to our own trigger: we determine that
0308      * this is the case if the IIO device and the trigger device share the
0309      * same parent device.
0310      */
0311     if (pf->indio_dev->dev.parent == trig->dev.parent)
0312         trig->attached_own_device = true;
0313 
0314     return ret;
0315 
0316 out_free_irq:
0317     free_irq(pf->irq, pf);
0318 out_put_irq:
0319     iio_trigger_put_irq(trig, pf->irq);
0320 out_put_module:
0321     module_put(iio_dev_opaque->driver_module);
0322     return ret;
0323 }
0324 
0325 int iio_trigger_detach_poll_func(struct iio_trigger *trig,
0326                  struct iio_poll_func *pf)
0327 {
0328     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
0329     bool no_other_users =
0330         bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
0331     int ret = 0;
0332 
0333     if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
0334         ret = trig->ops->set_trigger_state(trig, false);
0335         if (ret)
0336             return ret;
0337     }
0338     if (pf->indio_dev->dev.parent == trig->dev.parent)
0339         trig->attached_own_device = false;
0340     iio_trigger_put_irq(trig, pf->irq);
0341     free_irq(pf->irq, pf);
0342     module_put(iio_dev_opaque->driver_module);
0343 
0344     return ret;
0345 }
0346 
0347 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
0348 {
0349     struct iio_poll_func *pf = p;
0350 
0351     pf->timestamp = iio_get_time_ns(pf->indio_dev);
0352     return IRQ_WAKE_THREAD;
0353 }
0354 EXPORT_SYMBOL(iio_pollfunc_store_time);
0355 
0356 struct iio_poll_func
0357 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
0358             irqreturn_t (*thread)(int irq, void *p),
0359             int type,
0360             struct iio_dev *indio_dev,
0361             const char *fmt,
0362             ...)
0363 {
0364     va_list vargs;
0365     struct iio_poll_func *pf;
0366 
0367     pf = kmalloc(sizeof(*pf), GFP_KERNEL);
0368     if (!pf)
0369         return NULL;
0370     va_start(vargs, fmt);
0371     pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
0372     va_end(vargs);
0373     if (pf->name == NULL) {
0374         kfree(pf);
0375         return NULL;
0376     }
0377     pf->h = h;
0378     pf->thread = thread;
0379     pf->type = type;
0380     pf->indio_dev = indio_dev;
0381 
0382     return pf;
0383 }
0384 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
0385 
0386 void iio_dealloc_pollfunc(struct iio_poll_func *pf)
0387 {
0388     kfree(pf->name);
0389     kfree(pf);
0390 }
0391 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
0392 
0393 /**
0394  * current_trigger_show() - trigger consumer sysfs query current trigger
0395  * @dev:    device associated with an industrial I/O device
0396  * @attr:   pointer to the device_attribute structure that
0397  *      is being processed
0398  * @buf:    buffer where the current trigger name will be printed into
0399  *
0400  * For trigger consumers the current_trigger interface allows the trigger
0401  * used by the device to be queried.
0402  *
0403  * Return: a negative number on failure, the number of characters written
0404  *     on success or 0 if no trigger is available
0405  */
0406 static ssize_t current_trigger_show(struct device *dev,
0407                     struct device_attribute *attr, char *buf)
0408 {
0409     struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0410 
0411     if (indio_dev->trig)
0412         return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
0413     return 0;
0414 }
0415 
0416 /**
0417  * current_trigger_store() - trigger consumer sysfs set current trigger
0418  * @dev:    device associated with an industrial I/O device
0419  * @attr:   device attribute that is being processed
0420  * @buf:    string buffer that holds the name of the trigger
0421  * @len:    length of the trigger name held by buf
0422  *
0423  * For trigger consumers the current_trigger interface allows the trigger
0424  * used for this device to be specified at run time based on the trigger's
0425  * name.
0426  *
0427  * Return: negative error code on failure or length of the buffer
0428  *     on success
0429  */
0430 static ssize_t current_trigger_store(struct device *dev,
0431                      struct device_attribute *attr,
0432                      const char *buf, size_t len)
0433 {
0434     struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0435     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0436     struct iio_trigger *oldtrig = indio_dev->trig;
0437     struct iio_trigger *trig;
0438     int ret;
0439 
0440     mutex_lock(&indio_dev->mlock);
0441     if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
0442         mutex_unlock(&indio_dev->mlock);
0443         return -EBUSY;
0444     }
0445     if (iio_dev_opaque->trig_readonly) {
0446         mutex_unlock(&indio_dev->mlock);
0447         return -EPERM;
0448     }
0449     mutex_unlock(&indio_dev->mlock);
0450 
0451     trig = iio_trigger_acquire_by_name(buf);
0452     if (oldtrig == trig) {
0453         ret = len;
0454         goto out_trigger_put;
0455     }
0456 
0457     if (trig && indio_dev->info->validate_trigger) {
0458         ret = indio_dev->info->validate_trigger(indio_dev, trig);
0459         if (ret)
0460             goto out_trigger_put;
0461     }
0462 
0463     if (trig && trig->ops && trig->ops->validate_device) {
0464         ret = trig->ops->validate_device(trig, indio_dev);
0465         if (ret)
0466             goto out_trigger_put;
0467     }
0468 
0469     indio_dev->trig = trig;
0470 
0471     if (oldtrig) {
0472         if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
0473             iio_trigger_detach_poll_func(oldtrig,
0474                              indio_dev->pollfunc_event);
0475         iio_trigger_put(oldtrig);
0476     }
0477     if (indio_dev->trig) {
0478         if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
0479             iio_trigger_attach_poll_func(indio_dev->trig,
0480                              indio_dev->pollfunc_event);
0481     }
0482 
0483     return len;
0484 
0485 out_trigger_put:
0486     if (trig)
0487         iio_trigger_put(trig);
0488     return ret;
0489 }
0490 
0491 static DEVICE_ATTR_RW(current_trigger);
0492 
0493 static struct attribute *iio_trigger_consumer_attrs[] = {
0494     &dev_attr_current_trigger.attr,
0495     NULL,
0496 };
0497 
0498 static const struct attribute_group iio_trigger_consumer_attr_group = {
0499     .name = "trigger",
0500     .attrs = iio_trigger_consumer_attrs,
0501 };
0502 
0503 static void iio_trig_release(struct device *device)
0504 {
0505     struct iio_trigger *trig = to_iio_trigger(device);
0506     int i;
0507 
0508     if (trig->subirq_base) {
0509         for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0510             irq_modify_status(trig->subirq_base + i,
0511                       IRQ_NOAUTOEN,
0512                       IRQ_NOREQUEST | IRQ_NOPROBE);
0513             irq_set_chip(trig->subirq_base + i,
0514                      NULL);
0515             irq_set_handler(trig->subirq_base + i,
0516                     NULL);
0517         }
0518 
0519         irq_free_descs(trig->subirq_base,
0520                    CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0521     }
0522     kfree(trig->name);
0523     kfree(trig);
0524 }
0525 
0526 static const struct device_type iio_trig_type = {
0527     .release = iio_trig_release,
0528     .groups = iio_trig_dev_groups,
0529 };
0530 
0531 static void iio_trig_subirqmask(struct irq_data *d)
0532 {
0533     struct irq_chip *chip = irq_data_get_irq_chip(d);
0534     struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
0535 
0536     trig->subirqs[d->irq - trig->subirq_base].enabled = false;
0537 }
0538 
0539 static void iio_trig_subirqunmask(struct irq_data *d)
0540 {
0541     struct irq_chip *chip = irq_data_get_irq_chip(d);
0542     struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
0543 
0544     trig->subirqs[d->irq - trig->subirq_base].enabled = true;
0545 }
0546 
0547 static __printf(3, 0)
0548 struct iio_trigger *viio_trigger_alloc(struct device *parent,
0549                        struct module *this_mod,
0550                        const char *fmt,
0551                        va_list vargs)
0552 {
0553     struct iio_trigger *trig;
0554     int i;
0555 
0556     trig = kzalloc(sizeof(*trig), GFP_KERNEL);
0557     if (!trig)
0558         return NULL;
0559 
0560     trig->dev.parent = parent;
0561     trig->dev.type = &iio_trig_type;
0562     trig->dev.bus = &iio_bus_type;
0563     device_initialize(&trig->dev);
0564     INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
0565 
0566     mutex_init(&trig->pool_lock);
0567     trig->subirq_base = irq_alloc_descs(-1, 0,
0568                         CONFIG_IIO_CONSUMERS_PER_TRIGGER,
0569                         0);
0570     if (trig->subirq_base < 0)
0571         goto free_trig;
0572 
0573     trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
0574     if (trig->name == NULL)
0575         goto free_descs;
0576 
0577     INIT_LIST_HEAD(&trig->list);
0578 
0579     trig->owner = this_mod;
0580 
0581     trig->subirq_chip.name = trig->name;
0582     trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
0583     trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
0584     for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
0585         irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
0586         irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
0587         irq_modify_status(trig->subirq_base + i,
0588                   IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
0589     }
0590 
0591     return trig;
0592 
0593 free_descs:
0594     irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
0595 free_trig:
0596     kfree(trig);
0597     return NULL;
0598 }
0599 
0600 /**
0601  * __iio_trigger_alloc - Allocate a trigger
0602  * @parent:     Device to allocate iio_trigger for
0603  * @this_mod:       module allocating the trigger
0604  * @fmt:        trigger name format. If it includes format
0605  *          specifiers, the additional arguments following
0606  *          format are formatted and inserted in the resulting
0607  *          string replacing their respective specifiers.
0608  * RETURNS:
0609  * Pointer to allocated iio_trigger on success, NULL on failure.
0610  */
0611 struct iio_trigger *__iio_trigger_alloc(struct device *parent,
0612                     struct module *this_mod,
0613                     const char *fmt, ...)
0614 {
0615     struct iio_trigger *trig;
0616     va_list vargs;
0617 
0618     va_start(vargs, fmt);
0619     trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
0620     va_end(vargs);
0621 
0622     return trig;
0623 }
0624 EXPORT_SYMBOL(__iio_trigger_alloc);
0625 
0626 void iio_trigger_free(struct iio_trigger *trig)
0627 {
0628     if (trig)
0629         put_device(&trig->dev);
0630 }
0631 EXPORT_SYMBOL(iio_trigger_free);
0632 
0633 static void devm_iio_trigger_release(struct device *dev, void *res)
0634 {
0635     iio_trigger_free(*(struct iio_trigger **)res);
0636 }
0637 
0638 /**
0639  * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
0640  * Managed iio_trigger_alloc.  iio_trigger allocated with this function is
0641  * automatically freed on driver detach.
0642  * @parent:     Device to allocate iio_trigger for
0643  * @this_mod:       module allocating the trigger
0644  * @fmt:        trigger name format. If it includes format
0645  *          specifiers, the additional arguments following
0646  *          format are formatted and inserted in the resulting
0647  *          string replacing their respective specifiers.
0648  *
0649  *
0650  * RETURNS:
0651  * Pointer to allocated iio_trigger on success, NULL on failure.
0652  */
0653 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
0654                          struct module *this_mod,
0655                          const char *fmt, ...)
0656 {
0657     struct iio_trigger **ptr, *trig;
0658     va_list vargs;
0659 
0660     ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
0661                GFP_KERNEL);
0662     if (!ptr)
0663         return NULL;
0664 
0665     /* use raw alloc_dr for kmalloc caller tracing */
0666     va_start(vargs, fmt);
0667     trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
0668     va_end(vargs);
0669     if (trig) {
0670         *ptr = trig;
0671         devres_add(parent, ptr);
0672     } else {
0673         devres_free(ptr);
0674     }
0675 
0676     return trig;
0677 }
0678 EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
0679 
0680 static void devm_iio_trigger_unreg(void *trigger_info)
0681 {
0682     iio_trigger_unregister(trigger_info);
0683 }
0684 
0685 /**
0686  * devm_iio_trigger_register - Resource-managed iio_trigger_register()
0687  * @dev:    device this trigger was allocated for
0688  * @trig_info:  trigger to register
0689  *
0690  * Managed iio_trigger_register().  The IIO trigger registered with this
0691  * function is automatically unregistered on driver detach. This function
0692  * calls iio_trigger_register() internally. Refer to that function for more
0693  * information.
0694  *
0695  * RETURNS:
0696  * 0 on success, negative error number on failure.
0697  */
0698 int devm_iio_trigger_register(struct device *dev,
0699                   struct iio_trigger *trig_info)
0700 {
0701     int ret;
0702 
0703     ret = iio_trigger_register(trig_info);
0704     if (ret)
0705         return ret;
0706 
0707     return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
0708 }
0709 EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
0710 
0711 bool iio_trigger_using_own(struct iio_dev *indio_dev)
0712 {
0713     return indio_dev->trig->attached_own_device;
0714 }
0715 EXPORT_SYMBOL(iio_trigger_using_own);
0716 
0717 /**
0718  * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
0719  *  the same device
0720  * @trig: The IIO trigger to check
0721  * @indio_dev: the IIO device to check
0722  *
0723  * This function can be used as the validate_device callback for triggers that
0724  * can only be attached to their own device.
0725  *
0726  * Return: 0 if both the trigger and the IIO device belong to the same
0727  * device, -EINVAL otherwise.
0728  */
0729 int iio_trigger_validate_own_device(struct iio_trigger *trig,
0730                     struct iio_dev *indio_dev)
0731 {
0732     if (indio_dev->dev.parent != trig->dev.parent)
0733         return -EINVAL;
0734     return 0;
0735 }
0736 EXPORT_SYMBOL(iio_trigger_validate_own_device);
0737 
0738 int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
0739 {
0740     return iio_device_register_sysfs_group(indio_dev,
0741                            &iio_trigger_consumer_attr_group);
0742 }
0743 
0744 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
0745 {
0746     /* Clean up an associated but not attached trigger reference */
0747     if (indio_dev->trig)
0748         iio_trigger_put(indio_dev->trig);
0749 }