Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Media device request objects
0004  *
0005  * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
0006  * Copyright (C) 2018 Intel Corporation
0007  * Copyright (C) 2018 Google, Inc.
0008  *
0009  * Author: Hans Verkuil <hans.verkuil@cisco.com>
0010  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
0011  */
0012 
0013 #include <linux/anon_inodes.h>
0014 #include <linux/file.h>
0015 #include <linux/refcount.h>
0016 
0017 #include <media/media-device.h>
0018 #include <media/media-request.h>
0019 
0020 static const char * const request_state[] = {
0021     [MEDIA_REQUEST_STATE_IDLE]   = "idle",
0022     [MEDIA_REQUEST_STATE_VALIDATING] = "validating",
0023     [MEDIA_REQUEST_STATE_QUEUED]     = "queued",
0024     [MEDIA_REQUEST_STATE_COMPLETE]   = "complete",
0025     [MEDIA_REQUEST_STATE_CLEANING]   = "cleaning",
0026     [MEDIA_REQUEST_STATE_UPDATING]   = "updating",
0027 };
0028 
0029 static const char *
0030 media_request_state_str(enum media_request_state state)
0031 {
0032     BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
0033 
0034     if (WARN_ON(state >= ARRAY_SIZE(request_state)))
0035         return "invalid";
0036     return request_state[state];
0037 }
0038 
0039 static void media_request_clean(struct media_request *req)
0040 {
0041     struct media_request_object *obj, *obj_safe;
0042 
0043     /* Just a sanity check. No other code path is allowed to change this. */
0044     WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
0045     WARN_ON(req->updating_count);
0046     WARN_ON(req->access_count);
0047 
0048     list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
0049         media_request_object_unbind(obj);
0050         media_request_object_put(obj);
0051     }
0052 
0053     req->updating_count = 0;
0054     req->access_count = 0;
0055     WARN_ON(req->num_incomplete_objects);
0056     req->num_incomplete_objects = 0;
0057     wake_up_interruptible_all(&req->poll_wait);
0058 }
0059 
0060 static void media_request_release(struct kref *kref)
0061 {
0062     struct media_request *req =
0063         container_of(kref, struct media_request, kref);
0064     struct media_device *mdev = req->mdev;
0065 
0066     dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
0067 
0068     /* No other users, no need for a spinlock */
0069     req->state = MEDIA_REQUEST_STATE_CLEANING;
0070 
0071     media_request_clean(req);
0072 
0073     if (mdev->ops->req_free)
0074         mdev->ops->req_free(req);
0075     else
0076         kfree(req);
0077 }
0078 
0079 void media_request_put(struct media_request *req)
0080 {
0081     kref_put(&req->kref, media_request_release);
0082 }
0083 EXPORT_SYMBOL_GPL(media_request_put);
0084 
0085 static int media_request_close(struct inode *inode, struct file *filp)
0086 {
0087     struct media_request *req = filp->private_data;
0088 
0089     media_request_put(req);
0090     return 0;
0091 }
0092 
0093 static __poll_t media_request_poll(struct file *filp,
0094                    struct poll_table_struct *wait)
0095 {
0096     struct media_request *req = filp->private_data;
0097     unsigned long flags;
0098     __poll_t ret = 0;
0099 
0100     if (!(poll_requested_events(wait) & EPOLLPRI))
0101         return 0;
0102 
0103     poll_wait(filp, &req->poll_wait, wait);
0104     spin_lock_irqsave(&req->lock, flags);
0105     if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
0106         ret = EPOLLPRI;
0107         goto unlock;
0108     }
0109     if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
0110         ret = EPOLLERR;
0111         goto unlock;
0112     }
0113 
0114 unlock:
0115     spin_unlock_irqrestore(&req->lock, flags);
0116     return ret;
0117 }
0118 
0119 static long media_request_ioctl_queue(struct media_request *req)
0120 {
0121     struct media_device *mdev = req->mdev;
0122     enum media_request_state state;
0123     unsigned long flags;
0124     int ret;
0125 
0126     dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
0127 
0128     /*
0129      * Ensure the request that is validated will be the one that gets queued
0130      * next by serialising the queueing process. This mutex is also used
0131      * to serialize with canceling a vb2 queue and with setting values such
0132      * as controls in a request.
0133      */
0134     mutex_lock(&mdev->req_queue_mutex);
0135 
0136     media_request_get(req);
0137 
0138     spin_lock_irqsave(&req->lock, flags);
0139     if (req->state == MEDIA_REQUEST_STATE_IDLE)
0140         req->state = MEDIA_REQUEST_STATE_VALIDATING;
0141     state = req->state;
0142     spin_unlock_irqrestore(&req->lock, flags);
0143     if (state != MEDIA_REQUEST_STATE_VALIDATING) {
0144         dev_dbg(mdev->dev,
0145             "request: unable to queue %s, request in state %s\n",
0146             req->debug_str, media_request_state_str(state));
0147         media_request_put(req);
0148         mutex_unlock(&mdev->req_queue_mutex);
0149         return -EBUSY;
0150     }
0151 
0152     ret = mdev->ops->req_validate(req);
0153 
0154     /*
0155      * If the req_validate was successful, then we mark the state as QUEUED
0156      * and call req_queue. The reason we set the state first is that this
0157      * allows req_queue to unbind or complete the queued objects in case
0158      * they are immediately 'consumed'. State changes from QUEUED to another
0159      * state can only happen if either the driver changes the state or if
0160      * the user cancels the vb2 queue. The driver can only change the state
0161      * after each object is queued through the req_queue op (and note that
0162      * that op cannot fail), so setting the state to QUEUED up front is
0163      * safe.
0164      *
0165      * The other reason for changing the state is if the vb2 queue is
0166      * canceled, and that uses the req_queue_mutex which is still locked
0167      * while req_queue is called, so that's safe as well.
0168      */
0169     spin_lock_irqsave(&req->lock, flags);
0170     req->state = ret ? MEDIA_REQUEST_STATE_IDLE
0171              : MEDIA_REQUEST_STATE_QUEUED;
0172     spin_unlock_irqrestore(&req->lock, flags);
0173 
0174     if (!ret)
0175         mdev->ops->req_queue(req);
0176 
0177     mutex_unlock(&mdev->req_queue_mutex);
0178 
0179     if (ret) {
0180         dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
0181             req->debug_str, ret);
0182         media_request_put(req);
0183     }
0184 
0185     return ret;
0186 }
0187 
0188 static long media_request_ioctl_reinit(struct media_request *req)
0189 {
0190     struct media_device *mdev = req->mdev;
0191     unsigned long flags;
0192 
0193     spin_lock_irqsave(&req->lock, flags);
0194     if (req->state != MEDIA_REQUEST_STATE_IDLE &&
0195         req->state != MEDIA_REQUEST_STATE_COMPLETE) {
0196         dev_dbg(mdev->dev,
0197             "request: %s not in idle or complete state, cannot reinit\n",
0198             req->debug_str);
0199         spin_unlock_irqrestore(&req->lock, flags);
0200         return -EBUSY;
0201     }
0202     if (req->access_count) {
0203         dev_dbg(mdev->dev,
0204             "request: %s is being accessed, cannot reinit\n",
0205             req->debug_str);
0206         spin_unlock_irqrestore(&req->lock, flags);
0207         return -EBUSY;
0208     }
0209     req->state = MEDIA_REQUEST_STATE_CLEANING;
0210     spin_unlock_irqrestore(&req->lock, flags);
0211 
0212     media_request_clean(req);
0213 
0214     spin_lock_irqsave(&req->lock, flags);
0215     req->state = MEDIA_REQUEST_STATE_IDLE;
0216     spin_unlock_irqrestore(&req->lock, flags);
0217 
0218     return 0;
0219 }
0220 
0221 static long media_request_ioctl(struct file *filp, unsigned int cmd,
0222                 unsigned long arg)
0223 {
0224     struct media_request *req = filp->private_data;
0225 
0226     switch (cmd) {
0227     case MEDIA_REQUEST_IOC_QUEUE:
0228         return media_request_ioctl_queue(req);
0229     case MEDIA_REQUEST_IOC_REINIT:
0230         return media_request_ioctl_reinit(req);
0231     default:
0232         return -ENOIOCTLCMD;
0233     }
0234 }
0235 
0236 static const struct file_operations request_fops = {
0237     .owner = THIS_MODULE,
0238     .poll = media_request_poll,
0239     .unlocked_ioctl = media_request_ioctl,
0240 #ifdef CONFIG_COMPAT
0241     .compat_ioctl = media_request_ioctl,
0242 #endif /* CONFIG_COMPAT */
0243     .release = media_request_close,
0244 };
0245 
0246 struct media_request *
0247 media_request_get_by_fd(struct media_device *mdev, int request_fd)
0248 {
0249     struct fd f;
0250     struct media_request *req;
0251 
0252     if (!mdev || !mdev->ops ||
0253         !mdev->ops->req_validate || !mdev->ops->req_queue)
0254         return ERR_PTR(-EBADR);
0255 
0256     f = fdget(request_fd);
0257     if (!f.file)
0258         goto err_no_req_fd;
0259 
0260     if (f.file->f_op != &request_fops)
0261         goto err_fput;
0262     req = f.file->private_data;
0263     if (req->mdev != mdev)
0264         goto err_fput;
0265 
0266     /*
0267      * Note: as long as someone has an open filehandle of the request,
0268      * the request can never be released. The fdget() above ensures that
0269      * even if userspace closes the request filehandle, the release()
0270      * fop won't be called, so the media_request_get() always succeeds
0271      * and there is no race condition where the request was released
0272      * before media_request_get() is called.
0273      */
0274     media_request_get(req);
0275     fdput(f);
0276 
0277     return req;
0278 
0279 err_fput:
0280     fdput(f);
0281 
0282 err_no_req_fd:
0283     dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
0284     return ERR_PTR(-EINVAL);
0285 }
0286 EXPORT_SYMBOL_GPL(media_request_get_by_fd);
0287 
0288 int media_request_alloc(struct media_device *mdev, int *alloc_fd)
0289 {
0290     struct media_request *req;
0291     struct file *filp;
0292     int fd;
0293     int ret;
0294 
0295     /* Either both are NULL or both are non-NULL */
0296     if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
0297         return -ENOMEM;
0298 
0299     if (mdev->ops->req_alloc)
0300         req = mdev->ops->req_alloc(mdev);
0301     else
0302         req = kzalloc(sizeof(*req), GFP_KERNEL);
0303     if (!req)
0304         return -ENOMEM;
0305 
0306     fd = get_unused_fd_flags(O_CLOEXEC);
0307     if (fd < 0) {
0308         ret = fd;
0309         goto err_free_req;
0310     }
0311 
0312     filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
0313     if (IS_ERR(filp)) {
0314         ret = PTR_ERR(filp);
0315         goto err_put_fd;
0316     }
0317 
0318     filp->private_data = req;
0319     req->mdev = mdev;
0320     req->state = MEDIA_REQUEST_STATE_IDLE;
0321     req->num_incomplete_objects = 0;
0322     kref_init(&req->kref);
0323     INIT_LIST_HEAD(&req->objects);
0324     spin_lock_init(&req->lock);
0325     init_waitqueue_head(&req->poll_wait);
0326     req->updating_count = 0;
0327     req->access_count = 0;
0328 
0329     *alloc_fd = fd;
0330 
0331     snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
0332          atomic_inc_return(&mdev->request_id), fd);
0333     dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
0334 
0335     fd_install(fd, filp);
0336 
0337     return 0;
0338 
0339 err_put_fd:
0340     put_unused_fd(fd);
0341 
0342 err_free_req:
0343     if (mdev->ops->req_free)
0344         mdev->ops->req_free(req);
0345     else
0346         kfree(req);
0347 
0348     return ret;
0349 }
0350 
0351 static void media_request_object_release(struct kref *kref)
0352 {
0353     struct media_request_object *obj =
0354         container_of(kref, struct media_request_object, kref);
0355     struct media_request *req = obj->req;
0356 
0357     if (WARN_ON(req))
0358         media_request_object_unbind(obj);
0359     obj->ops->release(obj);
0360 }
0361 
0362 struct media_request_object *
0363 media_request_object_find(struct media_request *req,
0364               const struct media_request_object_ops *ops,
0365               void *priv)
0366 {
0367     struct media_request_object *obj;
0368     struct media_request_object *found = NULL;
0369     unsigned long flags;
0370 
0371     if (WARN_ON(!ops || !priv))
0372         return NULL;
0373 
0374     spin_lock_irqsave(&req->lock, flags);
0375     list_for_each_entry(obj, &req->objects, list) {
0376         if (obj->ops == ops && obj->priv == priv) {
0377             media_request_object_get(obj);
0378             found = obj;
0379             break;
0380         }
0381     }
0382     spin_unlock_irqrestore(&req->lock, flags);
0383     return found;
0384 }
0385 EXPORT_SYMBOL_GPL(media_request_object_find);
0386 
0387 void media_request_object_put(struct media_request_object *obj)
0388 {
0389     kref_put(&obj->kref, media_request_object_release);
0390 }
0391 EXPORT_SYMBOL_GPL(media_request_object_put);
0392 
0393 void media_request_object_init(struct media_request_object *obj)
0394 {
0395     obj->ops = NULL;
0396     obj->req = NULL;
0397     obj->priv = NULL;
0398     obj->completed = false;
0399     INIT_LIST_HEAD(&obj->list);
0400     kref_init(&obj->kref);
0401 }
0402 EXPORT_SYMBOL_GPL(media_request_object_init);
0403 
0404 int media_request_object_bind(struct media_request *req,
0405                   const struct media_request_object_ops *ops,
0406                   void *priv, bool is_buffer,
0407                   struct media_request_object *obj)
0408 {
0409     unsigned long flags;
0410     int ret = -EBUSY;
0411 
0412     if (WARN_ON(!ops->release))
0413         return -EBADR;
0414 
0415     spin_lock_irqsave(&req->lock, flags);
0416 
0417     if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
0418             req->state != MEDIA_REQUEST_STATE_QUEUED))
0419         goto unlock;
0420 
0421     obj->req = req;
0422     obj->ops = ops;
0423     obj->priv = priv;
0424 
0425     if (is_buffer)
0426         list_add_tail(&obj->list, &req->objects);
0427     else
0428         list_add(&obj->list, &req->objects);
0429     req->num_incomplete_objects++;
0430     ret = 0;
0431 
0432 unlock:
0433     spin_unlock_irqrestore(&req->lock, flags);
0434     return ret;
0435 }
0436 EXPORT_SYMBOL_GPL(media_request_object_bind);
0437 
0438 void media_request_object_unbind(struct media_request_object *obj)
0439 {
0440     struct media_request *req = obj->req;
0441     unsigned long flags;
0442     bool completed = false;
0443 
0444     if (WARN_ON(!req))
0445         return;
0446 
0447     spin_lock_irqsave(&req->lock, flags);
0448     list_del(&obj->list);
0449     obj->req = NULL;
0450 
0451     if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
0452         goto unlock;
0453 
0454     if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
0455         goto unlock;
0456 
0457     if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
0458         if (!obj->completed)
0459             req->num_incomplete_objects--;
0460         goto unlock;
0461     }
0462 
0463     if (WARN_ON(!req->num_incomplete_objects))
0464         goto unlock;
0465 
0466     req->num_incomplete_objects--;
0467     if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
0468         !req->num_incomplete_objects) {
0469         req->state = MEDIA_REQUEST_STATE_COMPLETE;
0470         completed = true;
0471         wake_up_interruptible_all(&req->poll_wait);
0472     }
0473 
0474 unlock:
0475     spin_unlock_irqrestore(&req->lock, flags);
0476     if (obj->ops->unbind)
0477         obj->ops->unbind(obj);
0478     if (completed)
0479         media_request_put(req);
0480 }
0481 EXPORT_SYMBOL_GPL(media_request_object_unbind);
0482 
0483 void media_request_object_complete(struct media_request_object *obj)
0484 {
0485     struct media_request *req = obj->req;
0486     unsigned long flags;
0487     bool completed = false;
0488 
0489     spin_lock_irqsave(&req->lock, flags);
0490     if (obj->completed)
0491         goto unlock;
0492     obj->completed = true;
0493     if (WARN_ON(!req->num_incomplete_objects) ||
0494         WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
0495         goto unlock;
0496 
0497     if (!--req->num_incomplete_objects) {
0498         req->state = MEDIA_REQUEST_STATE_COMPLETE;
0499         wake_up_interruptible_all(&req->poll_wait);
0500         completed = true;
0501     }
0502 unlock:
0503     spin_unlock_irqrestore(&req->lock, flags);
0504     if (completed)
0505         media_request_put(req);
0506 }
0507 EXPORT_SYMBOL_GPL(media_request_object_complete);