Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Industrial I/O event handling
0003  *
0004  * Copyright (c) 2008 Jonathan Cameron
0005  *
0006  * Based on elements of hwmon and input subsystems.
0007  */
0008 
0009 #include <linux/anon_inodes.h>
0010 #include <linux/device.h>
0011 #include <linux/fs.h>
0012 #include <linux/kernel.h>
0013 #include <linux/kfifo.h>
0014 #include <linux/module.h>
0015 #include <linux/poll.h>
0016 #include <linux/sched.h>
0017 #include <linux/slab.h>
0018 #include <linux/uaccess.h>
0019 #include <linux/wait.h>
0020 #include <linux/iio/iio.h>
0021 #include <linux/iio/iio-opaque.h>
0022 #include "iio_core.h"
0023 #include <linux/iio/sysfs.h>
0024 #include <linux/iio/events.h>
0025 
0026 /**
0027  * struct iio_event_interface - chrdev interface for an event line
0028  * @wait:       wait queue to allow blocking reads of events
0029  * @det_events:     list of detected events
0030  * @dev_attr_list:  list of event interface sysfs attribute
0031  * @flags:      file operations related flags including busy flag.
0032  * @group:      event interface sysfs attribute group
0033  * @read_lock:      lock to protect kfifo read operations
0034  * @ioctl_handler:  handler for event ioctl() calls
0035  */
0036 struct iio_event_interface {
0037     wait_queue_head_t   wait;
0038     DECLARE_KFIFO(det_events, struct iio_event_data, 16);
0039 
0040     struct list_head    dev_attr_list;
0041     unsigned long       flags;
0042     struct attribute_group  group;
0043     struct mutex        read_lock;
0044     struct iio_ioctl_handler    ioctl_handler;
0045 };
0046 
0047 bool iio_event_enabled(const struct iio_event_interface *ev_int)
0048 {
0049     return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
0050 }
0051 
0052 /**
0053  * iio_push_event() - try to add event to the list for userspace reading
0054  * @indio_dev:      IIO device structure
0055  * @ev_code:        What event
0056  * @timestamp:      When the event occurred
0057  *
0058  * Note: The caller must make sure that this function is not running
0059  * concurrently for the same indio_dev more than once.
0060  *
0061  * This function may be safely used as soon as a valid reference to iio_dev has
0062  * been obtained via iio_device_alloc(), but any events that are submitted
0063  * before iio_device_register() has successfully completed will be silently
0064  * discarded.
0065  **/
0066 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
0067 {
0068     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0069     struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
0070     struct iio_event_data ev;
0071     int copied;
0072 
0073     if (!ev_int)
0074         return 0;
0075 
0076     /* Does anyone care? */
0077     if (iio_event_enabled(ev_int)) {
0078 
0079         ev.id = ev_code;
0080         ev.timestamp = timestamp;
0081 
0082         copied = kfifo_put(&ev_int->det_events, ev);
0083         if (copied != 0)
0084             wake_up_poll(&ev_int->wait, EPOLLIN);
0085     }
0086 
0087     return 0;
0088 }
0089 EXPORT_SYMBOL(iio_push_event);
0090 
0091 /**
0092  * iio_event_poll() - poll the event queue to find out if it has data
0093  * @filep:  File structure pointer to identify the device
0094  * @wait:   Poll table pointer to add the wait queue on
0095  *
0096  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
0097  *     or a negative error code on failure
0098  */
0099 static __poll_t iio_event_poll(struct file *filep,
0100                  struct poll_table_struct *wait)
0101 {
0102     struct iio_dev *indio_dev = filep->private_data;
0103     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0104     struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
0105     __poll_t events = 0;
0106 
0107     if (!indio_dev->info)
0108         return events;
0109 
0110     poll_wait(filep, &ev_int->wait, wait);
0111 
0112     if (!kfifo_is_empty(&ev_int->det_events))
0113         events = EPOLLIN | EPOLLRDNORM;
0114 
0115     return events;
0116 }
0117 
0118 static ssize_t iio_event_chrdev_read(struct file *filep,
0119                      char __user *buf,
0120                      size_t count,
0121                      loff_t *f_ps)
0122 {
0123     struct iio_dev *indio_dev = filep->private_data;
0124     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0125     struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
0126     unsigned int copied;
0127     int ret;
0128 
0129     if (!indio_dev->info)
0130         return -ENODEV;
0131 
0132     if (count < sizeof(struct iio_event_data))
0133         return -EINVAL;
0134 
0135     do {
0136         if (kfifo_is_empty(&ev_int->det_events)) {
0137             if (filep->f_flags & O_NONBLOCK)
0138                 return -EAGAIN;
0139 
0140             ret = wait_event_interruptible(ev_int->wait,
0141                     !kfifo_is_empty(&ev_int->det_events) ||
0142                     indio_dev->info == NULL);
0143             if (ret)
0144                 return ret;
0145             if (indio_dev->info == NULL)
0146                 return -ENODEV;
0147         }
0148 
0149         if (mutex_lock_interruptible(&ev_int->read_lock))
0150             return -ERESTARTSYS;
0151         ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
0152         mutex_unlock(&ev_int->read_lock);
0153 
0154         if (ret)
0155             return ret;
0156 
0157         /*
0158          * If we couldn't read anything from the fifo (a different
0159          * thread might have been faster) we either return -EAGAIN if
0160          * the file descriptor is non-blocking, otherwise we go back to
0161          * sleep and wait for more data to arrive.
0162          */
0163         if (copied == 0 && (filep->f_flags & O_NONBLOCK))
0164             return -EAGAIN;
0165 
0166     } while (copied == 0);
0167 
0168     return copied;
0169 }
0170 
0171 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
0172 {
0173     struct iio_dev *indio_dev = filep->private_data;
0174     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0175     struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
0176 
0177     clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
0178 
0179     iio_device_put(indio_dev);
0180 
0181     return 0;
0182 }
0183 
0184 static const struct file_operations iio_event_chrdev_fileops = {
0185     .read =  iio_event_chrdev_read,
0186     .poll =  iio_event_poll,
0187     .release = iio_event_chrdev_release,
0188     .owner = THIS_MODULE,
0189     .llseek = noop_llseek,
0190 };
0191 
0192 static int iio_event_getfd(struct iio_dev *indio_dev)
0193 {
0194     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0195     struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
0196     int fd;
0197 
0198     if (ev_int == NULL)
0199         return -ENODEV;
0200 
0201     fd = mutex_lock_interruptible(&indio_dev->mlock);
0202     if (fd)
0203         return fd;
0204 
0205     if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
0206         fd = -EBUSY;
0207         goto unlock;
0208     }
0209 
0210     iio_device_get(indio_dev);
0211 
0212     fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
0213                 indio_dev, O_RDONLY | O_CLOEXEC);
0214     if (fd < 0) {
0215         clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
0216         iio_device_put(indio_dev);
0217     } else {
0218         kfifo_reset_out(&ev_int->det_events);
0219     }
0220 
0221 unlock:
0222     mutex_unlock(&indio_dev->mlock);
0223     return fd;
0224 }
0225 
0226 static const char * const iio_ev_type_text[] = {
0227     [IIO_EV_TYPE_THRESH] = "thresh",
0228     [IIO_EV_TYPE_MAG] = "mag",
0229     [IIO_EV_TYPE_ROC] = "roc",
0230     [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
0231     [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
0232     [IIO_EV_TYPE_CHANGE] = "change",
0233     [IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
0234 };
0235 
0236 static const char * const iio_ev_dir_text[] = {
0237     [IIO_EV_DIR_EITHER] = "either",
0238     [IIO_EV_DIR_RISING] = "rising",
0239     [IIO_EV_DIR_FALLING] = "falling"
0240 };
0241 
0242 static const char * const iio_ev_info_text[] = {
0243     [IIO_EV_INFO_ENABLE] = "en",
0244     [IIO_EV_INFO_VALUE] = "value",
0245     [IIO_EV_INFO_HYSTERESIS] = "hysteresis",
0246     [IIO_EV_INFO_PERIOD] = "period",
0247     [IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
0248     [IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
0249     [IIO_EV_INFO_TIMEOUT] = "timeout",
0250 };
0251 
0252 static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
0253 {
0254     return attr->c->event_spec[attr->address & 0xffff].dir;
0255 }
0256 
0257 static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
0258 {
0259     return attr->c->event_spec[attr->address & 0xffff].type;
0260 }
0261 
0262 static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
0263 {
0264     return (attr->address >> 16) & 0xffff;
0265 }
0266 
0267 static ssize_t iio_ev_state_store(struct device *dev,
0268                   struct device_attribute *attr,
0269                   const char *buf,
0270                   size_t len)
0271 {
0272     struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0273     struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
0274     int ret;
0275     bool val;
0276 
0277     ret = kstrtobool(buf, &val);
0278     if (ret < 0)
0279         return ret;
0280 
0281     ret = indio_dev->info->write_event_config(indio_dev,
0282         this_attr->c, iio_ev_attr_type(this_attr),
0283         iio_ev_attr_dir(this_attr), val);
0284 
0285     return (ret < 0) ? ret : len;
0286 }
0287 
0288 static ssize_t iio_ev_state_show(struct device *dev,
0289                  struct device_attribute *attr,
0290                  char *buf)
0291 {
0292     struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0293     struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
0294     int val;
0295 
0296     val = indio_dev->info->read_event_config(indio_dev,
0297         this_attr->c, iio_ev_attr_type(this_attr),
0298         iio_ev_attr_dir(this_attr));
0299     if (val < 0)
0300         return val;
0301     else
0302         return sysfs_emit(buf, "%d\n", val);
0303 }
0304 
0305 static ssize_t iio_ev_value_show(struct device *dev,
0306                  struct device_attribute *attr,
0307                  char *buf)
0308 {
0309     struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0310     struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
0311     int val, val2, val_arr[2];
0312     int ret;
0313 
0314     ret = indio_dev->info->read_event_value(indio_dev,
0315         this_attr->c, iio_ev_attr_type(this_attr),
0316         iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
0317         &val, &val2);
0318     if (ret < 0)
0319         return ret;
0320     val_arr[0] = val;
0321     val_arr[1] = val2;
0322     return iio_format_value(buf, ret, 2, val_arr);
0323 }
0324 
0325 static ssize_t iio_ev_value_store(struct device *dev,
0326                   struct device_attribute *attr,
0327                   const char *buf,
0328                   size_t len)
0329 {
0330     struct iio_dev *indio_dev = dev_to_iio_dev(dev);
0331     struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
0332     int val, val2;
0333     int ret;
0334 
0335     if (!indio_dev->info->write_event_value)
0336         return -EINVAL;
0337 
0338     ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
0339     if (ret)
0340         return ret;
0341     ret = indio_dev->info->write_event_value(indio_dev,
0342         this_attr->c, iio_ev_attr_type(this_attr),
0343         iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
0344         val, val2);
0345     if (ret < 0)
0346         return ret;
0347 
0348     return len;
0349 }
0350 
0351 static int iio_device_add_event(struct iio_dev *indio_dev,
0352     const struct iio_chan_spec *chan, unsigned int spec_index,
0353     enum iio_event_type type, enum iio_event_direction dir,
0354     enum iio_shared_by shared_by, const unsigned long *mask)
0355 {
0356     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0357     ssize_t (*show)(struct device *, struct device_attribute *, char *);
0358     ssize_t (*store)(struct device *, struct device_attribute *,
0359         const char *, size_t);
0360     unsigned int attrcount = 0;
0361     unsigned int i;
0362     char *postfix;
0363     int ret;
0364 
0365     for_each_set_bit(i, mask, sizeof(*mask)*8) {
0366         if (i >= ARRAY_SIZE(iio_ev_info_text))
0367             return -EINVAL;
0368         if (dir != IIO_EV_DIR_NONE)
0369             postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
0370                     iio_ev_type_text[type],
0371                     iio_ev_dir_text[dir],
0372                     iio_ev_info_text[i]);
0373         else
0374             postfix = kasprintf(GFP_KERNEL, "%s_%s",
0375                     iio_ev_type_text[type],
0376                     iio_ev_info_text[i]);
0377         if (postfix == NULL)
0378             return -ENOMEM;
0379 
0380         if (i == IIO_EV_INFO_ENABLE) {
0381             show = iio_ev_state_show;
0382             store = iio_ev_state_store;
0383         } else {
0384             show = iio_ev_value_show;
0385             store = iio_ev_value_store;
0386         }
0387 
0388         ret = __iio_add_chan_devattr(postfix, chan, show, store,
0389              (i << 16) | spec_index, shared_by, &indio_dev->dev,
0390              NULL,
0391             &iio_dev_opaque->event_interface->dev_attr_list);
0392         kfree(postfix);
0393 
0394         if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
0395             continue;
0396 
0397         if (ret)
0398             return ret;
0399 
0400         attrcount++;
0401     }
0402 
0403     return attrcount;
0404 }
0405 
0406 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
0407     struct iio_chan_spec const *chan)
0408 {
0409     int ret = 0, i, attrcount = 0;
0410     enum iio_event_direction dir;
0411     enum iio_event_type type;
0412 
0413     for (i = 0; i < chan->num_event_specs; i++) {
0414         type = chan->event_spec[i].type;
0415         dir = chan->event_spec[i].dir;
0416 
0417         ret = iio_device_add_event(indio_dev, chan, i, type, dir,
0418             IIO_SEPARATE, &chan->event_spec[i].mask_separate);
0419         if (ret < 0)
0420             return ret;
0421         attrcount += ret;
0422 
0423         ret = iio_device_add_event(indio_dev, chan, i, type, dir,
0424             IIO_SHARED_BY_TYPE,
0425             &chan->event_spec[i].mask_shared_by_type);
0426         if (ret < 0)
0427             return ret;
0428         attrcount += ret;
0429 
0430         ret = iio_device_add_event(indio_dev, chan, i, type, dir,
0431             IIO_SHARED_BY_DIR,
0432             &chan->event_spec[i].mask_shared_by_dir);
0433         if (ret < 0)
0434             return ret;
0435         attrcount += ret;
0436 
0437         ret = iio_device_add_event(indio_dev, chan, i, type, dir,
0438             IIO_SHARED_BY_ALL,
0439             &chan->event_spec[i].mask_shared_by_all);
0440         if (ret < 0)
0441             return ret;
0442         attrcount += ret;
0443     }
0444     ret = attrcount;
0445     return ret;
0446 }
0447 
0448 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
0449 {
0450     int j, ret, attrcount = 0;
0451 
0452     /* Dynamically created from the channels array */
0453     for (j = 0; j < indio_dev->num_channels; j++) {
0454         ret = iio_device_add_event_sysfs(indio_dev,
0455                          &indio_dev->channels[j]);
0456         if (ret < 0)
0457             return ret;
0458         attrcount += ret;
0459     }
0460     return attrcount;
0461 }
0462 
0463 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
0464 {
0465     int j;
0466 
0467     for (j = 0; j < indio_dev->num_channels; j++) {
0468         if (indio_dev->channels[j].num_event_specs != 0)
0469             return true;
0470     }
0471     return false;
0472 }
0473 
0474 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
0475 {
0476     INIT_KFIFO(ev_int->det_events);
0477     init_waitqueue_head(&ev_int->wait);
0478     mutex_init(&ev_int->read_lock);
0479 }
0480 
0481 static long iio_event_ioctl(struct iio_dev *indio_dev, struct file *filp,
0482                 unsigned int cmd, unsigned long arg)
0483 {
0484     int __user *ip = (int __user *)arg;
0485     int fd;
0486 
0487     if (cmd == IIO_GET_EVENT_FD_IOCTL) {
0488         fd = iio_event_getfd(indio_dev);
0489         if (fd < 0)
0490             return fd;
0491         if (copy_to_user(ip, &fd, sizeof(fd)))
0492             return -EFAULT;
0493         return 0;
0494     }
0495 
0496     return IIO_IOCTL_UNHANDLED;
0497 }
0498 
0499 static const char *iio_event_group_name = "events";
0500 int iio_device_register_eventset(struct iio_dev *indio_dev)
0501 {
0502     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0503     struct iio_event_interface *ev_int;
0504     struct iio_dev_attr *p;
0505     int ret = 0, attrcount_orig = 0, attrcount, attrn;
0506     struct attribute **attr;
0507 
0508     if (!(indio_dev->info->event_attrs ||
0509           iio_check_for_dynamic_events(indio_dev)))
0510         return 0;
0511 
0512     ev_int = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
0513     if (ev_int == NULL)
0514         return -ENOMEM;
0515 
0516     iio_dev_opaque->event_interface = ev_int;
0517 
0518     INIT_LIST_HEAD(&ev_int->dev_attr_list);
0519 
0520     iio_setup_ev_int(ev_int);
0521     if (indio_dev->info->event_attrs != NULL) {
0522         attr = indio_dev->info->event_attrs->attrs;
0523         while (*attr++ != NULL)
0524             attrcount_orig++;
0525     }
0526     attrcount = attrcount_orig;
0527     if (indio_dev->channels) {
0528         ret = __iio_add_event_config_attrs(indio_dev);
0529         if (ret < 0)
0530             goto error_free_setup_event_lines;
0531         attrcount += ret;
0532     }
0533 
0534     ev_int->group.name = iio_event_group_name;
0535     ev_int->group.attrs = kcalloc(attrcount + 1,
0536                       sizeof(ev_int->group.attrs[0]),
0537                       GFP_KERNEL);
0538     if (ev_int->group.attrs == NULL) {
0539         ret = -ENOMEM;
0540         goto error_free_setup_event_lines;
0541     }
0542     if (indio_dev->info->event_attrs)
0543         memcpy(ev_int->group.attrs,
0544                indio_dev->info->event_attrs->attrs,
0545                sizeof(ev_int->group.attrs[0]) * attrcount_orig);
0546     attrn = attrcount_orig;
0547     /* Add all elements from the list. */
0548     list_for_each_entry(p, &ev_int->dev_attr_list, l)
0549         ev_int->group.attrs[attrn++] = &p->dev_attr.attr;
0550 
0551     ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group);
0552     if (ret)
0553         goto error_free_setup_event_lines;
0554 
0555     ev_int->ioctl_handler.ioctl = iio_event_ioctl;
0556     iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev,
0557                       &ev_int->ioctl_handler);
0558 
0559     return 0;
0560 
0561 error_free_setup_event_lines:
0562     iio_free_chan_devattr_list(&ev_int->dev_attr_list);
0563     kfree(ev_int);
0564     iio_dev_opaque->event_interface = NULL;
0565     return ret;
0566 }
0567 
0568 /**
0569  * iio_device_wakeup_eventset - Wakes up the event waitqueue
0570  * @indio_dev: The IIO device
0571  *
0572  * Wakes up the event waitqueue used for poll() and blocking read().
0573  * Should usually be called when the device is unregistered.
0574  */
0575 void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
0576 {
0577     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0578 
0579     if (iio_dev_opaque->event_interface == NULL)
0580         return;
0581     wake_up(&iio_dev_opaque->event_interface->wait);
0582 }
0583 
0584 void iio_device_unregister_eventset(struct iio_dev *indio_dev)
0585 {
0586     struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
0587     struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
0588 
0589     if (ev_int == NULL)
0590         return;
0591 
0592     iio_device_ioctl_handler_unregister(&ev_int->ioctl_handler);
0593     iio_free_chan_devattr_list(&ev_int->dev_attr_list);
0594     kfree(ev_int->group.attrs);
0595     kfree(ev_int);
0596     iio_dev_opaque->event_interface = NULL;
0597 }