Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * v4l2-event.c
0004  *
0005  * V4L2 events.
0006  *
0007  * Copyright (C) 2009--2010 Nokia Corporation.
0008  *
0009  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
0010  */
0011 
0012 #include <media/v4l2-dev.h>
0013 #include <media/v4l2-fh.h>
0014 #include <media/v4l2-event.h>
0015 
0016 #include <linux/mm.h>
0017 #include <linux/sched.h>
0018 #include <linux/slab.h>
0019 #include <linux/export.h>
0020 
0021 static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
0022 {
0023     idx += sev->first;
0024     return idx >= sev->elems ? idx - sev->elems : idx;
0025 }
0026 
0027 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
0028 {
0029     struct v4l2_kevent *kev;
0030     struct timespec64 ts;
0031     unsigned long flags;
0032 
0033     spin_lock_irqsave(&fh->vdev->fh_lock, flags);
0034 
0035     if (list_empty(&fh->available)) {
0036         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
0037         return -ENOENT;
0038     }
0039 
0040     WARN_ON(fh->navailable == 0);
0041 
0042     kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
0043     list_del(&kev->list);
0044     fh->navailable--;
0045 
0046     kev->event.pending = fh->navailable;
0047     *event = kev->event;
0048     ts = ns_to_timespec64(kev->ts);
0049     event->timestamp.tv_sec = ts.tv_sec;
0050     event->timestamp.tv_nsec = ts.tv_nsec;
0051     kev->sev->first = sev_pos(kev->sev, 1);
0052     kev->sev->in_use--;
0053 
0054     spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
0055 
0056     return 0;
0057 }
0058 
0059 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
0060                int nonblocking)
0061 {
0062     int ret;
0063 
0064     if (nonblocking)
0065         return __v4l2_event_dequeue(fh, event);
0066 
0067     /* Release the vdev lock while waiting */
0068     if (fh->vdev->lock)
0069         mutex_unlock(fh->vdev->lock);
0070 
0071     do {
0072         ret = wait_event_interruptible(fh->wait,
0073                            fh->navailable != 0);
0074         if (ret < 0)
0075             break;
0076 
0077         ret = __v4l2_event_dequeue(fh, event);
0078     } while (ret == -ENOENT);
0079 
0080     if (fh->vdev->lock)
0081         mutex_lock(fh->vdev->lock);
0082 
0083     return ret;
0084 }
0085 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
0086 
0087 /* Caller must hold fh->vdev->fh_lock! */
0088 static struct v4l2_subscribed_event *v4l2_event_subscribed(
0089         struct v4l2_fh *fh, u32 type, u32 id)
0090 {
0091     struct v4l2_subscribed_event *sev;
0092 
0093     assert_spin_locked(&fh->vdev->fh_lock);
0094 
0095     list_for_each_entry(sev, &fh->subscribed, list)
0096         if (sev->type == type && sev->id == id)
0097             return sev;
0098 
0099     return NULL;
0100 }
0101 
0102 static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
0103                   const struct v4l2_event *ev, u64 ts)
0104 {
0105     struct v4l2_subscribed_event *sev;
0106     struct v4l2_kevent *kev;
0107     bool copy_payload = true;
0108 
0109     /* Are we subscribed? */
0110     sev = v4l2_event_subscribed(fh, ev->type, ev->id);
0111     if (sev == NULL)
0112         return;
0113 
0114     /* Increase event sequence number on fh. */
0115     fh->sequence++;
0116 
0117     /* Do we have any free events? */
0118     if (sev->in_use == sev->elems) {
0119         /* no, remove the oldest one */
0120         kev = sev->events + sev_pos(sev, 0);
0121         list_del(&kev->list);
0122         sev->in_use--;
0123         sev->first = sev_pos(sev, 1);
0124         fh->navailable--;
0125         if (sev->elems == 1) {
0126             if (sev->ops && sev->ops->replace) {
0127                 sev->ops->replace(&kev->event, ev);
0128                 copy_payload = false;
0129             }
0130         } else if (sev->ops && sev->ops->merge) {
0131             struct v4l2_kevent *second_oldest =
0132                 sev->events + sev_pos(sev, 0);
0133             sev->ops->merge(&kev->event, &second_oldest->event);
0134         }
0135     }
0136 
0137     /* Take one and fill it. */
0138     kev = sev->events + sev_pos(sev, sev->in_use);
0139     kev->event.type = ev->type;
0140     if (copy_payload)
0141         kev->event.u = ev->u;
0142     kev->event.id = ev->id;
0143     kev->ts = ts;
0144     kev->event.sequence = fh->sequence;
0145     sev->in_use++;
0146     list_add_tail(&kev->list, &fh->available);
0147 
0148     fh->navailable++;
0149 
0150     wake_up_all(&fh->wait);
0151 }
0152 
0153 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
0154 {
0155     struct v4l2_fh *fh;
0156     unsigned long flags;
0157     u64 ts;
0158 
0159     if (vdev == NULL)
0160         return;
0161 
0162     ts = ktime_get_ns();
0163 
0164     spin_lock_irqsave(&vdev->fh_lock, flags);
0165 
0166     list_for_each_entry(fh, &vdev->fh_list, list)
0167         __v4l2_event_queue_fh(fh, ev, ts);
0168 
0169     spin_unlock_irqrestore(&vdev->fh_lock, flags);
0170 }
0171 EXPORT_SYMBOL_GPL(v4l2_event_queue);
0172 
0173 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
0174 {
0175     unsigned long flags;
0176     u64 ts = ktime_get_ns();
0177 
0178     spin_lock_irqsave(&fh->vdev->fh_lock, flags);
0179     __v4l2_event_queue_fh(fh, ev, ts);
0180     spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
0181 }
0182 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
0183 
0184 int v4l2_event_pending(struct v4l2_fh *fh)
0185 {
0186     return fh->navailable;
0187 }
0188 EXPORT_SYMBOL_GPL(v4l2_event_pending);
0189 
0190 void v4l2_event_wake_all(struct video_device *vdev)
0191 {
0192     struct v4l2_fh *fh;
0193     unsigned long flags;
0194 
0195     if (!vdev)
0196         return;
0197 
0198     spin_lock_irqsave(&vdev->fh_lock, flags);
0199 
0200     list_for_each_entry(fh, &vdev->fh_list, list)
0201         wake_up_all(&fh->wait);
0202 
0203     spin_unlock_irqrestore(&vdev->fh_lock, flags);
0204 }
0205 EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
0206 
0207 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
0208 {
0209     struct v4l2_fh *fh = sev->fh;
0210     unsigned int i;
0211 
0212     lockdep_assert_held(&fh->subscribe_lock);
0213     assert_spin_locked(&fh->vdev->fh_lock);
0214 
0215     /* Remove any pending events for this subscription */
0216     for (i = 0; i < sev->in_use; i++) {
0217         list_del(&sev->events[sev_pos(sev, i)].list);
0218         fh->navailable--;
0219     }
0220     list_del(&sev->list);
0221 }
0222 
0223 int v4l2_event_subscribe(struct v4l2_fh *fh,
0224              const struct v4l2_event_subscription *sub, unsigned int elems,
0225              const struct v4l2_subscribed_event_ops *ops)
0226 {
0227     struct v4l2_subscribed_event *sev, *found_ev;
0228     unsigned long flags;
0229     unsigned int i;
0230     int ret = 0;
0231 
0232     if (sub->type == V4L2_EVENT_ALL)
0233         return -EINVAL;
0234 
0235     if (elems < 1)
0236         elems = 1;
0237 
0238     sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
0239     if (!sev)
0240         return -ENOMEM;
0241     for (i = 0; i < elems; i++)
0242         sev->events[i].sev = sev;
0243     sev->type = sub->type;
0244     sev->id = sub->id;
0245     sev->flags = sub->flags;
0246     sev->fh = fh;
0247     sev->ops = ops;
0248     sev->elems = elems;
0249 
0250     mutex_lock(&fh->subscribe_lock);
0251 
0252     spin_lock_irqsave(&fh->vdev->fh_lock, flags);
0253     found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
0254     if (!found_ev)
0255         list_add(&sev->list, &fh->subscribed);
0256     spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
0257 
0258     if (found_ev) {
0259         /* Already listening */
0260         kvfree(sev);
0261     } else if (sev->ops && sev->ops->add) {
0262         ret = sev->ops->add(sev, elems);
0263         if (ret) {
0264             spin_lock_irqsave(&fh->vdev->fh_lock, flags);
0265             __v4l2_event_unsubscribe(sev);
0266             spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
0267             kvfree(sev);
0268         }
0269     }
0270 
0271     mutex_unlock(&fh->subscribe_lock);
0272 
0273     return ret;
0274 }
0275 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
0276 
0277 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
0278 {
0279     struct v4l2_event_subscription sub;
0280     struct v4l2_subscribed_event *sev;
0281     unsigned long flags;
0282 
0283     do {
0284         sev = NULL;
0285 
0286         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
0287         if (!list_empty(&fh->subscribed)) {
0288             sev = list_first_entry(&fh->subscribed,
0289                     struct v4l2_subscribed_event, list);
0290             sub.type = sev->type;
0291             sub.id = sev->id;
0292         }
0293         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
0294         if (sev)
0295             v4l2_event_unsubscribe(fh, &sub);
0296     } while (sev);
0297 }
0298 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
0299 
0300 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
0301                const struct v4l2_event_subscription *sub)
0302 {
0303     struct v4l2_subscribed_event *sev;
0304     unsigned long flags;
0305 
0306     if (sub->type == V4L2_EVENT_ALL) {
0307         v4l2_event_unsubscribe_all(fh);
0308         return 0;
0309     }
0310 
0311     mutex_lock(&fh->subscribe_lock);
0312 
0313     spin_lock_irqsave(&fh->vdev->fh_lock, flags);
0314 
0315     sev = v4l2_event_subscribed(fh, sub->type, sub->id);
0316     if (sev != NULL)
0317         __v4l2_event_unsubscribe(sev);
0318 
0319     spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
0320 
0321     if (sev && sev->ops && sev->ops->del)
0322         sev->ops->del(sev);
0323 
0324     mutex_unlock(&fh->subscribe_lock);
0325 
0326     kvfree(sev);
0327 
0328     return 0;
0329 }
0330 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
0331 
0332 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
0333                   struct v4l2_event_subscription *sub)
0334 {
0335     return v4l2_event_unsubscribe(fh, sub);
0336 }
0337 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
0338 
0339 static void v4l2_event_src_replace(struct v4l2_event *old,
0340                 const struct v4l2_event *new)
0341 {
0342     u32 old_changes = old->u.src_change.changes;
0343 
0344     old->u.src_change = new->u.src_change;
0345     old->u.src_change.changes |= old_changes;
0346 }
0347 
0348 static void v4l2_event_src_merge(const struct v4l2_event *old,
0349                 struct v4l2_event *new)
0350 {
0351     new->u.src_change.changes |= old->u.src_change.changes;
0352 }
0353 
0354 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
0355     .replace = v4l2_event_src_replace,
0356     .merge = v4l2_event_src_merge,
0357 };
0358 
0359 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
0360                 const struct v4l2_event_subscription *sub)
0361 {
0362     if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
0363         return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
0364     return -EINVAL;
0365 }
0366 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
0367 
0368 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
0369         struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
0370 {
0371     return v4l2_src_change_event_subscribe(fh, sub);
0372 }
0373 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);