Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2017 Red Hat
0003  * Parts ported from amdgpu (fence wait code).
0004  * Copyright 2016 Advanced Micro Devices, Inc.
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the "Software"),
0008  * to deal in the Software without restriction, including without limitation
0009  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0010  * and/or sell copies of the Software, and to permit persons to whom the
0011  * Software is furnished to do so, subject to the following conditions:
0012  *
0013  * The above copyright notice and this permission notice (including the next
0014  * paragraph) shall be included in all copies or substantial portions of the
0015  * Software.
0016  *
0017  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0018  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0019  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0020  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0021  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0022  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0023  * IN THE SOFTWARE.
0024  *
0025  * Authors:
0026  *
0027  */
0028 
0029 /**
0030  * DOC: Overview
0031  *
0032  * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
0033  * container for a synchronization primitive which can be used by userspace
0034  * to explicitly synchronize GPU commands, can be shared between userspace
0035  * processes, and can be shared between different DRM drivers.
0036  * Their primary use-case is to implement Vulkan fences and semaphores.
0037  * The syncobj userspace API provides ioctls for several operations:
0038  *
0039  *  - Creation and destruction of syncobjs
0040  *  - Import and export of syncobjs to/from a syncobj file descriptor
0041  *  - Import and export a syncobj's underlying fence to/from a sync file
0042  *  - Reset a syncobj (set its fence to NULL)
0043  *  - Signal a syncobj (set a trivially signaled fence)
0044  *  - Wait for a syncobj's fence to appear and be signaled
0045  *
0046  * The syncobj userspace API also provides operations to manipulate a syncobj
0047  * in terms of a timeline of struct &dma_fence_chain rather than a single
0048  * struct &dma_fence, through the following operations:
0049  *
0050  *   - Signal a given point on the timeline
0051  *   - Wait for a given point to appear and/or be signaled
0052  *   - Import and export from/to a given point of a timeline
0053  *
0054  * At it's core, a syncobj is simply a wrapper around a pointer to a struct
0055  * &dma_fence which may be NULL.
0056  * When a syncobj is first created, its pointer is either NULL or a pointer
0057  * to an already signaled fence depending on whether the
0058  * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
0059  * &DRM_IOCTL_SYNCOBJ_CREATE.
0060  *
0061  * If the syncobj is considered as a binary (its state is either signaled or
0062  * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
0063  * the syncobj, the syncobj's fence is replaced with a fence which will be
0064  * signaled by the completion of that work.
0065  * If the syncobj is considered as a timeline primitive, when GPU work is
0066  * enqueued in a DRM driver to signal the a given point of the syncobj, a new
0067  * struct &dma_fence_chain pointing to the DRM driver's fence and also
0068  * pointing to the previous fence that was in the syncobj. The new struct
0069  * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
0070  * completion of the DRM driver's work and also any work associated with the
0071  * fence previously in the syncobj.
0072  *
0073  * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
0074  * time the work is enqueued, it waits on the syncobj's fence before
0075  * submitting the work to hardware. That fence is either :
0076  *
0077  *    - The syncobj's current fence if the syncobj is considered as a binary
0078  *      primitive.
0079  *    - The struct &dma_fence associated with a given point if the syncobj is
0080  *      considered as a timeline primitive.
0081  *
0082  * If the syncobj's fence is NULL or not present in the syncobj's timeline,
0083  * the enqueue operation is expected to fail.
0084  *
0085  * With binary syncobj, all manipulation of the syncobjs's fence happens in
0086  * terms of the current fence at the time the ioctl is called by userspace
0087  * regardless of whether that operation is an immediate host-side operation
0088  * (signal or reset) or or an operation which is enqueued in some driver
0089  * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
0090  * to manipulate a syncobj from the host by resetting its pointer to NULL or
0091  * setting its pointer to a fence which is already signaled.
0092  *
0093  * With a timeline syncobj, all manipulation of the synobj's fence happens in
0094  * terms of a u64 value referring to point in the timeline. See
0095  * dma_fence_chain_find_seqno() to see how a given point is found in the
0096  * timeline.
0097  *
0098  * Note that applications should be careful to always use timeline set of
0099  * ioctl() when dealing with syncobj considered as timeline. Using a binary
0100  * set of ioctl() with a syncobj considered as timeline could result incorrect
0101  * synchronization. The use of binary syncobj is supported through the
0102  * timeline set of ioctl() by using a point value of 0, this will reproduce
0103  * the behavior of the binary set of ioctl() (for example replace the
0104  * syncobj's fence when signaling).
0105  *
0106  *
0107  * Host-side wait on syncobjs
0108  * --------------------------
0109  *
0110  * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
0111  * host-side wait on all of the syncobj fences simultaneously.
0112  * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
0113  * all of the syncobj fences to be signaled before it returns.
0114  * Otherwise, it returns once at least one syncobj fence has been signaled
0115  * and the index of a signaled fence is written back to the client.
0116  *
0117  * Unlike the enqueued GPU work dependencies which fail if they see a NULL
0118  * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
0119  * the host-side wait will first wait for the syncobj to receive a non-NULL
0120  * fence and then wait on that fence.
0121  * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
0122  * syncobjs in the array has a NULL fence, -EINVAL will be returned.
0123  * Assuming the syncobj starts off with a NULL fence, this allows a client
0124  * to do a host wait in one thread (or process) which waits on GPU work
0125  * submitted in another thread (or process) without having to manually
0126  * synchronize between the two.
0127  * This requirement is inherited from the Vulkan fence API.
0128  *
0129  * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
0130  * handles as well as an array of u64 points and does a host-side wait on all
0131  * of syncobj fences at the given points simultaneously.
0132  *
0133  * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
0134  * fence to materialize on the timeline without waiting for the fence to be
0135  * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
0136  * requirement is inherited from the wait-before-signal behavior required by
0137  * the Vulkan timeline semaphore API.
0138  *
0139  *
0140  * Import/export of syncobjs
0141  * -------------------------
0142  *
0143  * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
0144  * provide two mechanisms for import/export of syncobjs.
0145  *
0146  * The first lets the client import or export an entire syncobj to a file
0147  * descriptor.
0148  * These fd's are opaque and have no other use case, except passing the
0149  * syncobj between processes.
0150  * All exported file descriptors and any syncobj handles created as a
0151  * result of importing those file descriptors own a reference to the
0152  * same underlying struct &drm_syncobj and the syncobj can be used
0153  * persistently across all the processes with which it is shared.
0154  * The syncobj is freed only once the last reference is dropped.
0155  * Unlike dma-buf, importing a syncobj creates a new handle (with its own
0156  * reference) for every import instead of de-duplicating.
0157  * The primary use-case of this persistent import/export is for shared
0158  * Vulkan fences and semaphores.
0159  *
0160  * The second import/export mechanism, which is indicated by
0161  * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
0162  * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
0163  * import/export the syncobj's current fence from/to a &sync_file.
0164  * When a syncobj is exported to a sync file, that sync file wraps the
0165  * sycnobj's fence at the time of export and any later signal or reset
0166  * operations on the syncobj will not affect the exported sync file.
0167  * When a sync file is imported into a syncobj, the syncobj's fence is set
0168  * to the fence wrapped by that sync file.
0169  * Because sync files are immutable, resetting or signaling the syncobj
0170  * will not affect any sync files whose fences have been imported into the
0171  * syncobj.
0172  *
0173  *
0174  * Import/export of timeline points in timeline syncobjs
0175  * -----------------------------------------------------
0176  *
0177  * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
0178  * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
0179  * into another syncobj.
0180  *
0181  * Note that if you want to transfer a struct &dma_fence_chain from a given
0182  * point on a timeline syncobj from/into a binary syncobj, you can use the
0183  * point 0 to mean take/replace the fence in the syncobj.
0184  */
0185 
0186 #include <linux/anon_inodes.h>
0187 #include <linux/dma-fence-unwrap.h>
0188 #include <linux/file.h>
0189 #include <linux/fs.h>
0190 #include <linux/sched/signal.h>
0191 #include <linux/sync_file.h>
0192 #include <linux/uaccess.h>
0193 
0194 #include <drm/drm.h>
0195 #include <drm/drm_drv.h>
0196 #include <drm/drm_file.h>
0197 #include <drm/drm_gem.h>
0198 #include <drm/drm_print.h>
0199 #include <drm/drm_syncobj.h>
0200 #include <drm/drm_utils.h>
0201 
0202 #include "drm_internal.h"
0203 
0204 struct syncobj_wait_entry {
0205     struct list_head node;
0206     struct task_struct *task;
0207     struct dma_fence *fence;
0208     struct dma_fence_cb fence_cb;
0209     u64    point;
0210 };
0211 
0212 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
0213                       struct syncobj_wait_entry *wait);
0214 
0215 /**
0216  * drm_syncobj_find - lookup and reference a sync object.
0217  * @file_private: drm file private pointer
0218  * @handle: sync object handle to lookup.
0219  *
0220  * Returns a reference to the syncobj pointed to by handle or NULL. The
0221  * reference must be released by calling drm_syncobj_put().
0222  */
0223 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
0224                      u32 handle)
0225 {
0226     struct drm_syncobj *syncobj;
0227 
0228     spin_lock(&file_private->syncobj_table_lock);
0229 
0230     /* Check if we currently have a reference on the object */
0231     syncobj = idr_find(&file_private->syncobj_idr, handle);
0232     if (syncobj)
0233         drm_syncobj_get(syncobj);
0234 
0235     spin_unlock(&file_private->syncobj_table_lock);
0236 
0237     return syncobj;
0238 }
0239 EXPORT_SYMBOL(drm_syncobj_find);
0240 
0241 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
0242                        struct syncobj_wait_entry *wait)
0243 {
0244     struct dma_fence *fence;
0245 
0246     if (wait->fence)
0247         return;
0248 
0249     spin_lock(&syncobj->lock);
0250     /* We've already tried once to get a fence and failed.  Now that we
0251      * have the lock, try one more time just to be sure we don't add a
0252      * callback when a fence has already been set.
0253      */
0254     fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
0255     if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
0256         dma_fence_put(fence);
0257         list_add_tail(&wait->node, &syncobj->cb_list);
0258     } else if (!fence) {
0259         wait->fence = dma_fence_get_stub();
0260     } else {
0261         wait->fence = fence;
0262     }
0263     spin_unlock(&syncobj->lock);
0264 }
0265 
0266 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
0267                     struct syncobj_wait_entry *wait)
0268 {
0269     if (!wait->node.next)
0270         return;
0271 
0272     spin_lock(&syncobj->lock);
0273     list_del_init(&wait->node);
0274     spin_unlock(&syncobj->lock);
0275 }
0276 
0277 /**
0278  * drm_syncobj_add_point - add new timeline point to the syncobj
0279  * @syncobj: sync object to add timeline point do
0280  * @chain: chain node to use to add the point
0281  * @fence: fence to encapsulate in the chain node
0282  * @point: sequence number to use for the point
0283  *
0284  * Add the chain node as new timeline point to the syncobj.
0285  */
0286 void drm_syncobj_add_point(struct drm_syncobj *syncobj,
0287                struct dma_fence_chain *chain,
0288                struct dma_fence *fence,
0289                uint64_t point)
0290 {
0291     struct syncobj_wait_entry *cur, *tmp;
0292     struct dma_fence *prev;
0293 
0294     dma_fence_get(fence);
0295 
0296     spin_lock(&syncobj->lock);
0297 
0298     prev = drm_syncobj_fence_get(syncobj);
0299     /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
0300     if (prev && prev->seqno >= point)
0301         DRM_DEBUG("You are adding an unorder point to timeline!\n");
0302     dma_fence_chain_init(chain, prev, fence, point);
0303     rcu_assign_pointer(syncobj->fence, &chain->base);
0304 
0305     list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
0306         syncobj_wait_syncobj_func(syncobj, cur);
0307     spin_unlock(&syncobj->lock);
0308 
0309     /* Walk the chain once to trigger garbage collection */
0310     dma_fence_chain_for_each(fence, prev);
0311     dma_fence_put(prev);
0312 }
0313 EXPORT_SYMBOL(drm_syncobj_add_point);
0314 
0315 /**
0316  * drm_syncobj_replace_fence - replace fence in a sync object.
0317  * @syncobj: Sync object to replace fence in
0318  * @fence: fence to install in sync file.
0319  *
0320  * This replaces the fence on a sync object.
0321  */
0322 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
0323                    struct dma_fence *fence)
0324 {
0325     struct dma_fence *old_fence;
0326     struct syncobj_wait_entry *cur, *tmp;
0327 
0328     if (fence)
0329         dma_fence_get(fence);
0330 
0331     spin_lock(&syncobj->lock);
0332 
0333     old_fence = rcu_dereference_protected(syncobj->fence,
0334                           lockdep_is_held(&syncobj->lock));
0335     rcu_assign_pointer(syncobj->fence, fence);
0336 
0337     if (fence != old_fence) {
0338         list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
0339             syncobj_wait_syncobj_func(syncobj, cur);
0340     }
0341 
0342     spin_unlock(&syncobj->lock);
0343 
0344     dma_fence_put(old_fence);
0345 }
0346 EXPORT_SYMBOL(drm_syncobj_replace_fence);
0347 
0348 /**
0349  * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
0350  * @syncobj: sync object to assign the fence on
0351  *
0352  * Assign a already signaled stub fence to the sync object.
0353  */
0354 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
0355 {
0356     struct dma_fence *fence = dma_fence_allocate_private_stub();
0357 
0358     if (IS_ERR(fence))
0359         return PTR_ERR(fence);
0360 
0361     drm_syncobj_replace_fence(syncobj, fence);
0362     dma_fence_put(fence);
0363     return 0;
0364 }
0365 
0366 /* 5s default for wait submission */
0367 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
0368 /**
0369  * drm_syncobj_find_fence - lookup and reference the fence in a sync object
0370  * @file_private: drm file private pointer
0371  * @handle: sync object handle to lookup.
0372  * @point: timeline point
0373  * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
0374  * @fence: out parameter for the fence
0375  *
0376  * This is just a convenience function that combines drm_syncobj_find() and
0377  * drm_syncobj_fence_get().
0378  *
0379  * Returns 0 on success or a negative error value on failure. On success @fence
0380  * contains a reference to the fence, which must be released by calling
0381  * dma_fence_put().
0382  */
0383 int drm_syncobj_find_fence(struct drm_file *file_private,
0384                u32 handle, u64 point, u64 flags,
0385                struct dma_fence **fence)
0386 {
0387     struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
0388     struct syncobj_wait_entry wait;
0389     u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
0390     int ret;
0391 
0392     if (!syncobj)
0393         return -ENOENT;
0394 
0395     /* Waiting for userspace with locks help is illegal cause that can
0396      * trivial deadlock with page faults for example. Make lockdep complain
0397      * about it early on.
0398      */
0399     if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
0400         might_sleep();
0401         lockdep_assert_none_held_once();
0402     }
0403 
0404     *fence = drm_syncobj_fence_get(syncobj);
0405 
0406     if (*fence) {
0407         ret = dma_fence_chain_find_seqno(fence, point);
0408         if (!ret) {
0409             /* If the requested seqno is already signaled
0410              * drm_syncobj_find_fence may return a NULL
0411              * fence. To make sure the recipient gets
0412              * signalled, use a new fence instead.
0413              */
0414             if (!*fence)
0415                 *fence = dma_fence_get_stub();
0416 
0417             goto out;
0418         }
0419         dma_fence_put(*fence);
0420     } else {
0421         ret = -EINVAL;
0422     }
0423 
0424     if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
0425         goto out;
0426 
0427     memset(&wait, 0, sizeof(wait));
0428     wait.task = current;
0429     wait.point = point;
0430     drm_syncobj_fence_add_wait(syncobj, &wait);
0431 
0432     do {
0433         set_current_state(TASK_INTERRUPTIBLE);
0434         if (wait.fence) {
0435             ret = 0;
0436             break;
0437         }
0438                 if (timeout == 0) {
0439                         ret = -ETIME;
0440                         break;
0441                 }
0442 
0443         if (signal_pending(current)) {
0444             ret = -ERESTARTSYS;
0445             break;
0446         }
0447 
0448                 timeout = schedule_timeout(timeout);
0449     } while (1);
0450 
0451     __set_current_state(TASK_RUNNING);
0452     *fence = wait.fence;
0453 
0454     if (wait.node.next)
0455         drm_syncobj_remove_wait(syncobj, &wait);
0456 
0457 out:
0458     drm_syncobj_put(syncobj);
0459 
0460     return ret;
0461 }
0462 EXPORT_SYMBOL(drm_syncobj_find_fence);
0463 
0464 /**
0465  * drm_syncobj_free - free a sync object.
0466  * @kref: kref to free.
0467  *
0468  * Only to be called from kref_put in drm_syncobj_put.
0469  */
0470 void drm_syncobj_free(struct kref *kref)
0471 {
0472     struct drm_syncobj *syncobj = container_of(kref,
0473                            struct drm_syncobj,
0474                            refcount);
0475     drm_syncobj_replace_fence(syncobj, NULL);
0476     kfree(syncobj);
0477 }
0478 EXPORT_SYMBOL(drm_syncobj_free);
0479 
0480 /**
0481  * drm_syncobj_create - create a new syncobj
0482  * @out_syncobj: returned syncobj
0483  * @flags: DRM_SYNCOBJ_* flags
0484  * @fence: if non-NULL, the syncobj will represent this fence
0485  *
0486  * This is the first function to create a sync object. After creating, drivers
0487  * probably want to make it available to userspace, either through
0488  * drm_syncobj_get_handle() or drm_syncobj_get_fd().
0489  *
0490  * Returns 0 on success or a negative error value on failure.
0491  */
0492 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
0493                struct dma_fence *fence)
0494 {
0495     int ret;
0496     struct drm_syncobj *syncobj;
0497 
0498     syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
0499     if (!syncobj)
0500         return -ENOMEM;
0501 
0502     kref_init(&syncobj->refcount);
0503     INIT_LIST_HEAD(&syncobj->cb_list);
0504     spin_lock_init(&syncobj->lock);
0505 
0506     if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
0507         ret = drm_syncobj_assign_null_handle(syncobj);
0508         if (ret < 0) {
0509             drm_syncobj_put(syncobj);
0510             return ret;
0511         }
0512     }
0513 
0514     if (fence)
0515         drm_syncobj_replace_fence(syncobj, fence);
0516 
0517     *out_syncobj = syncobj;
0518     return 0;
0519 }
0520 EXPORT_SYMBOL(drm_syncobj_create);
0521 
0522 /**
0523  * drm_syncobj_get_handle - get a handle from a syncobj
0524  * @file_private: drm file private pointer
0525  * @syncobj: Sync object to export
0526  * @handle: out parameter with the new handle
0527  *
0528  * Exports a sync object created with drm_syncobj_create() as a handle on
0529  * @file_private to userspace.
0530  *
0531  * Returns 0 on success or a negative error value on failure.
0532  */
0533 int drm_syncobj_get_handle(struct drm_file *file_private,
0534                struct drm_syncobj *syncobj, u32 *handle)
0535 {
0536     int ret;
0537 
0538     /* take a reference to put in the idr */
0539     drm_syncobj_get(syncobj);
0540 
0541     idr_preload(GFP_KERNEL);
0542     spin_lock(&file_private->syncobj_table_lock);
0543     ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
0544     spin_unlock(&file_private->syncobj_table_lock);
0545 
0546     idr_preload_end();
0547 
0548     if (ret < 0) {
0549         drm_syncobj_put(syncobj);
0550         return ret;
0551     }
0552 
0553     *handle = ret;
0554     return 0;
0555 }
0556 EXPORT_SYMBOL(drm_syncobj_get_handle);
0557 
0558 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
0559                     u32 *handle, uint32_t flags)
0560 {
0561     int ret;
0562     struct drm_syncobj *syncobj;
0563 
0564     ret = drm_syncobj_create(&syncobj, flags, NULL);
0565     if (ret)
0566         return ret;
0567 
0568     ret = drm_syncobj_get_handle(file_private, syncobj, handle);
0569     drm_syncobj_put(syncobj);
0570     return ret;
0571 }
0572 
0573 static int drm_syncobj_destroy(struct drm_file *file_private,
0574                    u32 handle)
0575 {
0576     struct drm_syncobj *syncobj;
0577 
0578     spin_lock(&file_private->syncobj_table_lock);
0579     syncobj = idr_remove(&file_private->syncobj_idr, handle);
0580     spin_unlock(&file_private->syncobj_table_lock);
0581 
0582     if (!syncobj)
0583         return -EINVAL;
0584 
0585     drm_syncobj_put(syncobj);
0586     return 0;
0587 }
0588 
0589 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
0590 {
0591     struct drm_syncobj *syncobj = file->private_data;
0592 
0593     drm_syncobj_put(syncobj);
0594     return 0;
0595 }
0596 
0597 static const struct file_operations drm_syncobj_file_fops = {
0598     .release = drm_syncobj_file_release,
0599 };
0600 
0601 /**
0602  * drm_syncobj_get_fd - get a file descriptor from a syncobj
0603  * @syncobj: Sync object to export
0604  * @p_fd: out parameter with the new file descriptor
0605  *
0606  * Exports a sync object created with drm_syncobj_create() as a file descriptor.
0607  *
0608  * Returns 0 on success or a negative error value on failure.
0609  */
0610 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
0611 {
0612     struct file *file;
0613     int fd;
0614 
0615     fd = get_unused_fd_flags(O_CLOEXEC);
0616     if (fd < 0)
0617         return fd;
0618 
0619     file = anon_inode_getfile("syncobj_file",
0620                   &drm_syncobj_file_fops,
0621                   syncobj, 0);
0622     if (IS_ERR(file)) {
0623         put_unused_fd(fd);
0624         return PTR_ERR(file);
0625     }
0626 
0627     drm_syncobj_get(syncobj);
0628     fd_install(fd, file);
0629 
0630     *p_fd = fd;
0631     return 0;
0632 }
0633 EXPORT_SYMBOL(drm_syncobj_get_fd);
0634 
0635 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
0636                     u32 handle, int *p_fd)
0637 {
0638     struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
0639     int ret;
0640 
0641     if (!syncobj)
0642         return -EINVAL;
0643 
0644     ret = drm_syncobj_get_fd(syncobj, p_fd);
0645     drm_syncobj_put(syncobj);
0646     return ret;
0647 }
0648 
0649 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
0650                     int fd, u32 *handle)
0651 {
0652     struct drm_syncobj *syncobj;
0653     struct fd f = fdget(fd);
0654     int ret;
0655 
0656     if (!f.file)
0657         return -EINVAL;
0658 
0659     if (f.file->f_op != &drm_syncobj_file_fops) {
0660         fdput(f);
0661         return -EINVAL;
0662     }
0663 
0664     /* take a reference to put in the idr */
0665     syncobj = f.file->private_data;
0666     drm_syncobj_get(syncobj);
0667 
0668     idr_preload(GFP_KERNEL);
0669     spin_lock(&file_private->syncobj_table_lock);
0670     ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
0671     spin_unlock(&file_private->syncobj_table_lock);
0672     idr_preload_end();
0673 
0674     if (ret > 0) {
0675         *handle = ret;
0676         ret = 0;
0677     } else
0678         drm_syncobj_put(syncobj);
0679 
0680     fdput(f);
0681     return ret;
0682 }
0683 
0684 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
0685                           int fd, int handle)
0686 {
0687     struct dma_fence *fence = sync_file_get_fence(fd);
0688     struct drm_syncobj *syncobj;
0689 
0690     if (!fence)
0691         return -EINVAL;
0692 
0693     syncobj = drm_syncobj_find(file_private, handle);
0694     if (!syncobj) {
0695         dma_fence_put(fence);
0696         return -ENOENT;
0697     }
0698 
0699     drm_syncobj_replace_fence(syncobj, fence);
0700     dma_fence_put(fence);
0701     drm_syncobj_put(syncobj);
0702     return 0;
0703 }
0704 
0705 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
0706                     int handle, int *p_fd)
0707 {
0708     int ret;
0709     struct dma_fence *fence;
0710     struct sync_file *sync_file;
0711     int fd = get_unused_fd_flags(O_CLOEXEC);
0712 
0713     if (fd < 0)
0714         return fd;
0715 
0716     ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
0717     if (ret)
0718         goto err_put_fd;
0719 
0720     sync_file = sync_file_create(fence);
0721 
0722     dma_fence_put(fence);
0723 
0724     if (!sync_file) {
0725         ret = -EINVAL;
0726         goto err_put_fd;
0727     }
0728 
0729     fd_install(fd, sync_file->file);
0730 
0731     *p_fd = fd;
0732     return 0;
0733 err_put_fd:
0734     put_unused_fd(fd);
0735     return ret;
0736 }
0737 /**
0738  * drm_syncobj_open - initializes syncobj file-private structures at devnode open time
0739  * @file_private: drm file-private structure to set up
0740  *
0741  * Called at device open time, sets up the structure for handling refcounting
0742  * of sync objects.
0743  */
0744 void
0745 drm_syncobj_open(struct drm_file *file_private)
0746 {
0747     idr_init_base(&file_private->syncobj_idr, 1);
0748     spin_lock_init(&file_private->syncobj_table_lock);
0749 }
0750 
0751 static int
0752 drm_syncobj_release_handle(int id, void *ptr, void *data)
0753 {
0754     struct drm_syncobj *syncobj = ptr;
0755 
0756     drm_syncobj_put(syncobj);
0757     return 0;
0758 }
0759 
0760 /**
0761  * drm_syncobj_release - release file-private sync object resources
0762  * @file_private: drm file-private structure to clean up
0763  *
0764  * Called at close time when the filp is going away.
0765  *
0766  * Releases any remaining references on objects by this filp.
0767  */
0768 void
0769 drm_syncobj_release(struct drm_file *file_private)
0770 {
0771     idr_for_each(&file_private->syncobj_idr,
0772              &drm_syncobj_release_handle, file_private);
0773     idr_destroy(&file_private->syncobj_idr);
0774 }
0775 
0776 int
0777 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
0778              struct drm_file *file_private)
0779 {
0780     struct drm_syncobj_create *args = data;
0781 
0782     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0783         return -EOPNOTSUPP;
0784 
0785     /* no valid flags yet */
0786     if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
0787         return -EINVAL;
0788 
0789     return drm_syncobj_create_as_handle(file_private,
0790                         &args->handle, args->flags);
0791 }
0792 
0793 int
0794 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
0795               struct drm_file *file_private)
0796 {
0797     struct drm_syncobj_destroy *args = data;
0798 
0799     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0800         return -EOPNOTSUPP;
0801 
0802     /* make sure padding is empty */
0803     if (args->pad)
0804         return -EINVAL;
0805     return drm_syncobj_destroy(file_private, args->handle);
0806 }
0807 
0808 int
0809 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
0810                    struct drm_file *file_private)
0811 {
0812     struct drm_syncobj_handle *args = data;
0813 
0814     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0815         return -EOPNOTSUPP;
0816 
0817     if (args->pad)
0818         return -EINVAL;
0819 
0820     if (args->flags != 0 &&
0821         args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
0822         return -EINVAL;
0823 
0824     if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
0825         return drm_syncobj_export_sync_file(file_private, args->handle,
0826                             &args->fd);
0827 
0828     return drm_syncobj_handle_to_fd(file_private, args->handle,
0829                     &args->fd);
0830 }
0831 
0832 int
0833 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
0834                    struct drm_file *file_private)
0835 {
0836     struct drm_syncobj_handle *args = data;
0837 
0838     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0839         return -EOPNOTSUPP;
0840 
0841     if (args->pad)
0842         return -EINVAL;
0843 
0844     if (args->flags != 0 &&
0845         args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
0846         return -EINVAL;
0847 
0848     if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
0849         return drm_syncobj_import_sync_file_fence(file_private,
0850                               args->fd,
0851                               args->handle);
0852 
0853     return drm_syncobj_fd_to_handle(file_private, args->fd,
0854                     &args->handle);
0855 }
0856 
0857 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
0858                         struct drm_syncobj_transfer *args)
0859 {
0860     struct drm_syncobj *timeline_syncobj = NULL;
0861     struct dma_fence *fence, *tmp;
0862     struct dma_fence_chain *chain;
0863     int ret;
0864 
0865     timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
0866     if (!timeline_syncobj) {
0867         return -ENOENT;
0868     }
0869     ret = drm_syncobj_find_fence(file_private, args->src_handle,
0870                      args->src_point, args->flags,
0871                      &tmp);
0872     if (ret)
0873         goto err_put_timeline;
0874 
0875     fence = dma_fence_unwrap_merge(tmp);
0876     dma_fence_put(tmp);
0877     if (!fence) {
0878         ret = -ENOMEM;
0879         goto err_put_timeline;
0880     }
0881 
0882     chain = dma_fence_chain_alloc();
0883     if (!chain) {
0884         ret = -ENOMEM;
0885         goto err_free_fence;
0886     }
0887 
0888     drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
0889 err_free_fence:
0890     dma_fence_put(fence);
0891 err_put_timeline:
0892     drm_syncobj_put(timeline_syncobj);
0893 
0894     return ret;
0895 }
0896 
0897 static int
0898 drm_syncobj_transfer_to_binary(struct drm_file *file_private,
0899                    struct drm_syncobj_transfer *args)
0900 {
0901     struct drm_syncobj *binary_syncobj = NULL;
0902     struct dma_fence *fence;
0903     int ret;
0904 
0905     binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
0906     if (!binary_syncobj)
0907         return -ENOENT;
0908     ret = drm_syncobj_find_fence(file_private, args->src_handle,
0909                      args->src_point, args->flags, &fence);
0910     if (ret)
0911         goto err;
0912     drm_syncobj_replace_fence(binary_syncobj, fence);
0913     dma_fence_put(fence);
0914 err:
0915     drm_syncobj_put(binary_syncobj);
0916 
0917     return ret;
0918 }
0919 int
0920 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
0921                struct drm_file *file_private)
0922 {
0923     struct drm_syncobj_transfer *args = data;
0924     int ret;
0925 
0926     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
0927         return -EOPNOTSUPP;
0928 
0929     if (args->pad)
0930         return -EINVAL;
0931 
0932     if (args->dst_point)
0933         ret = drm_syncobj_transfer_to_timeline(file_private, args);
0934     else
0935         ret = drm_syncobj_transfer_to_binary(file_private, args);
0936 
0937     return ret;
0938 }
0939 
0940 static void syncobj_wait_fence_func(struct dma_fence *fence,
0941                     struct dma_fence_cb *cb)
0942 {
0943     struct syncobj_wait_entry *wait =
0944         container_of(cb, struct syncobj_wait_entry, fence_cb);
0945 
0946     wake_up_process(wait->task);
0947 }
0948 
0949 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
0950                       struct syncobj_wait_entry *wait)
0951 {
0952     struct dma_fence *fence;
0953 
0954     /* This happens inside the syncobj lock */
0955     fence = rcu_dereference_protected(syncobj->fence,
0956                       lockdep_is_held(&syncobj->lock));
0957     dma_fence_get(fence);
0958     if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
0959         dma_fence_put(fence);
0960         return;
0961     } else if (!fence) {
0962         wait->fence = dma_fence_get_stub();
0963     } else {
0964         wait->fence = fence;
0965     }
0966 
0967     wake_up_process(wait->task);
0968     list_del_init(&wait->node);
0969 }
0970 
0971 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
0972                           void __user *user_points,
0973                           uint32_t count,
0974                           uint32_t flags,
0975                           signed long timeout,
0976                           uint32_t *idx)
0977 {
0978     struct syncobj_wait_entry *entries;
0979     struct dma_fence *fence;
0980     uint64_t *points;
0981     uint32_t signaled_count, i;
0982 
0983     if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
0984         lockdep_assert_none_held_once();
0985 
0986     points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
0987     if (points == NULL)
0988         return -ENOMEM;
0989 
0990     if (!user_points) {
0991         memset(points, 0, count * sizeof(uint64_t));
0992 
0993     } else if (copy_from_user(points, user_points,
0994                   sizeof(uint64_t) * count)) {
0995         timeout = -EFAULT;
0996         goto err_free_points;
0997     }
0998 
0999     entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1000     if (!entries) {
1001         timeout = -ENOMEM;
1002         goto err_free_points;
1003     }
1004     /* Walk the list of sync objects and initialize entries.  We do
1005      * this up-front so that we can properly return -EINVAL if there is
1006      * a syncobj with a missing fence and then never have the chance of
1007      * returning -EINVAL again.
1008      */
1009     signaled_count = 0;
1010     for (i = 0; i < count; ++i) {
1011         struct dma_fence *fence;
1012 
1013         entries[i].task = current;
1014         entries[i].point = points[i];
1015         fence = drm_syncobj_fence_get(syncobjs[i]);
1016         if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
1017             dma_fence_put(fence);
1018             if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
1019                 continue;
1020             } else {
1021                 timeout = -EINVAL;
1022                 goto cleanup_entries;
1023             }
1024         }
1025 
1026         if (fence)
1027             entries[i].fence = fence;
1028         else
1029             entries[i].fence = dma_fence_get_stub();
1030 
1031         if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1032             dma_fence_is_signaled(entries[i].fence)) {
1033             if (signaled_count == 0 && idx)
1034                 *idx = i;
1035             signaled_count++;
1036         }
1037     }
1038 
1039     if (signaled_count == count ||
1040         (signaled_count > 0 &&
1041          !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
1042         goto cleanup_entries;
1043 
1044     /* There's a very annoying laxness in the dma_fence API here, in
1045      * that backends are not required to automatically report when a
1046      * fence is signaled prior to fence->ops->enable_signaling() being
1047      * called.  So here if we fail to match signaled_count, we need to
1048      * fallthough and try a 0 timeout wait!
1049      */
1050 
1051     if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
1052         for (i = 0; i < count; ++i)
1053             drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
1054     }
1055 
1056     do {
1057         set_current_state(TASK_INTERRUPTIBLE);
1058 
1059         signaled_count = 0;
1060         for (i = 0; i < count; ++i) {
1061             fence = entries[i].fence;
1062             if (!fence)
1063                 continue;
1064 
1065             if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1066                 dma_fence_is_signaled(fence) ||
1067                 (!entries[i].fence_cb.func &&
1068                  dma_fence_add_callback(fence,
1069                             &entries[i].fence_cb,
1070                             syncobj_wait_fence_func))) {
1071                 /* The fence has been signaled */
1072                 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
1073                     signaled_count++;
1074                 } else {
1075                     if (idx)
1076                         *idx = i;
1077                     goto done_waiting;
1078                 }
1079             }
1080         }
1081 
1082         if (signaled_count == count)
1083             goto done_waiting;
1084 
1085         if (timeout == 0) {
1086             timeout = -ETIME;
1087             goto done_waiting;
1088         }
1089 
1090         if (signal_pending(current)) {
1091             timeout = -ERESTARTSYS;
1092             goto done_waiting;
1093         }
1094 
1095         timeout = schedule_timeout(timeout);
1096     } while (1);
1097 
1098 done_waiting:
1099     __set_current_state(TASK_RUNNING);
1100 
1101 cleanup_entries:
1102     for (i = 0; i < count; ++i) {
1103         drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
1104         if (entries[i].fence_cb.func)
1105             dma_fence_remove_callback(entries[i].fence,
1106                           &entries[i].fence_cb);
1107         dma_fence_put(entries[i].fence);
1108     }
1109     kfree(entries);
1110 
1111 err_free_points:
1112     kfree(points);
1113 
1114     return timeout;
1115 }
1116 
1117 /**
1118  * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1119  *
1120  * @timeout_nsec: timeout nsec component in ns, 0 for poll
1121  *
1122  * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1123  */
1124 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1125 {
1126     ktime_t abs_timeout, now;
1127     u64 timeout_ns, timeout_jiffies64;
1128 
1129     /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1130     if (timeout_nsec == 0)
1131         return 0;
1132 
1133     abs_timeout = ns_to_ktime(timeout_nsec);
1134     now = ktime_get();
1135 
1136     if (!ktime_after(abs_timeout, now))
1137         return 0;
1138 
1139     timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1140 
1141     timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1142     /*  clamp timeout to avoid infinite timeout */
1143     if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1144         return MAX_SCHEDULE_TIMEOUT - 1;
1145 
1146     return timeout_jiffies64 + 1;
1147 }
1148 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
1149 
1150 static int drm_syncobj_array_wait(struct drm_device *dev,
1151                   struct drm_file *file_private,
1152                   struct drm_syncobj_wait *wait,
1153                   struct drm_syncobj_timeline_wait *timeline_wait,
1154                   struct drm_syncobj **syncobjs, bool timeline)
1155 {
1156     signed long timeout = 0;
1157     uint32_t first = ~0;
1158 
1159     if (!timeline) {
1160         timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1161         timeout = drm_syncobj_array_wait_timeout(syncobjs,
1162                              NULL,
1163                              wait->count_handles,
1164                              wait->flags,
1165                              timeout, &first);
1166         if (timeout < 0)
1167             return timeout;
1168         wait->first_signaled = first;
1169     } else {
1170         timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1171         timeout = drm_syncobj_array_wait_timeout(syncobjs,
1172                              u64_to_user_ptr(timeline_wait->points),
1173                              timeline_wait->count_handles,
1174                              timeline_wait->flags,
1175                              timeout, &first);
1176         if (timeout < 0)
1177             return timeout;
1178         timeline_wait->first_signaled = first;
1179     }
1180     return 0;
1181 }
1182 
1183 static int drm_syncobj_array_find(struct drm_file *file_private,
1184                   void __user *user_handles,
1185                   uint32_t count_handles,
1186                   struct drm_syncobj ***syncobjs_out)
1187 {
1188     uint32_t i, *handles;
1189     struct drm_syncobj **syncobjs;
1190     int ret;
1191 
1192     handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1193     if (handles == NULL)
1194         return -ENOMEM;
1195 
1196     if (copy_from_user(handles, user_handles,
1197                sizeof(uint32_t) * count_handles)) {
1198         ret = -EFAULT;
1199         goto err_free_handles;
1200     }
1201 
1202     syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1203     if (syncobjs == NULL) {
1204         ret = -ENOMEM;
1205         goto err_free_handles;
1206     }
1207 
1208     for (i = 0; i < count_handles; i++) {
1209         syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1210         if (!syncobjs[i]) {
1211             ret = -ENOENT;
1212             goto err_put_syncobjs;
1213         }
1214     }
1215 
1216     kfree(handles);
1217     *syncobjs_out = syncobjs;
1218     return 0;
1219 
1220 err_put_syncobjs:
1221     while (i-- > 0)
1222         drm_syncobj_put(syncobjs[i]);
1223     kfree(syncobjs);
1224 err_free_handles:
1225     kfree(handles);
1226 
1227     return ret;
1228 }
1229 
1230 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1231                    uint32_t count)
1232 {
1233     uint32_t i;
1234 
1235     for (i = 0; i < count; i++)
1236         drm_syncobj_put(syncobjs[i]);
1237     kfree(syncobjs);
1238 }
1239 
1240 int
1241 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1242                struct drm_file *file_private)
1243 {
1244     struct drm_syncobj_wait *args = data;
1245     struct drm_syncobj **syncobjs;
1246     int ret = 0;
1247 
1248     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1249         return -EOPNOTSUPP;
1250 
1251     if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1252                 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1253         return -EINVAL;
1254 
1255     if (args->count_handles == 0)
1256         return -EINVAL;
1257 
1258     ret = drm_syncobj_array_find(file_private,
1259                      u64_to_user_ptr(args->handles),
1260                      args->count_handles,
1261                      &syncobjs);
1262     if (ret < 0)
1263         return ret;
1264 
1265     ret = drm_syncobj_array_wait(dev, file_private,
1266                      args, NULL, syncobjs, false);
1267 
1268     drm_syncobj_array_free(syncobjs, args->count_handles);
1269 
1270     return ret;
1271 }
1272 
1273 int
1274 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1275                 struct drm_file *file_private)
1276 {
1277     struct drm_syncobj_timeline_wait *args = data;
1278     struct drm_syncobj **syncobjs;
1279     int ret = 0;
1280 
1281     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1282         return -EOPNOTSUPP;
1283 
1284     if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1285                 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1286                 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1287         return -EINVAL;
1288 
1289     if (args->count_handles == 0)
1290         return -EINVAL;
1291 
1292     ret = drm_syncobj_array_find(file_private,
1293                      u64_to_user_ptr(args->handles),
1294                      args->count_handles,
1295                      &syncobjs);
1296     if (ret < 0)
1297         return ret;
1298 
1299     ret = drm_syncobj_array_wait(dev, file_private,
1300                      NULL, args, syncobjs, true);
1301 
1302     drm_syncobj_array_free(syncobjs, args->count_handles);
1303 
1304     return ret;
1305 }
1306 
1307 
1308 int
1309 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1310             struct drm_file *file_private)
1311 {
1312     struct drm_syncobj_array *args = data;
1313     struct drm_syncobj **syncobjs;
1314     uint32_t i;
1315     int ret;
1316 
1317     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1318         return -EOPNOTSUPP;
1319 
1320     if (args->pad != 0)
1321         return -EINVAL;
1322 
1323     if (args->count_handles == 0)
1324         return -EINVAL;
1325 
1326     ret = drm_syncobj_array_find(file_private,
1327                      u64_to_user_ptr(args->handles),
1328                      args->count_handles,
1329                      &syncobjs);
1330     if (ret < 0)
1331         return ret;
1332 
1333     for (i = 0; i < args->count_handles; i++)
1334         drm_syncobj_replace_fence(syncobjs[i], NULL);
1335 
1336     drm_syncobj_array_free(syncobjs, args->count_handles);
1337 
1338     return 0;
1339 }
1340 
1341 int
1342 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1343              struct drm_file *file_private)
1344 {
1345     struct drm_syncobj_array *args = data;
1346     struct drm_syncobj **syncobjs;
1347     uint32_t i;
1348     int ret;
1349 
1350     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1351         return -EOPNOTSUPP;
1352 
1353     if (args->pad != 0)
1354         return -EINVAL;
1355 
1356     if (args->count_handles == 0)
1357         return -EINVAL;
1358 
1359     ret = drm_syncobj_array_find(file_private,
1360                      u64_to_user_ptr(args->handles),
1361                      args->count_handles,
1362                      &syncobjs);
1363     if (ret < 0)
1364         return ret;
1365 
1366     for (i = 0; i < args->count_handles; i++) {
1367         ret = drm_syncobj_assign_null_handle(syncobjs[i]);
1368         if (ret < 0)
1369             break;
1370     }
1371 
1372     drm_syncobj_array_free(syncobjs, args->count_handles);
1373 
1374     return ret;
1375 }
1376 
1377 int
1378 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1379                   struct drm_file *file_private)
1380 {
1381     struct drm_syncobj_timeline_array *args = data;
1382     struct drm_syncobj **syncobjs;
1383     struct dma_fence_chain **chains;
1384     uint64_t *points;
1385     uint32_t i, j;
1386     int ret;
1387 
1388     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1389         return -EOPNOTSUPP;
1390 
1391     if (args->flags != 0)
1392         return -EINVAL;
1393 
1394     if (args->count_handles == 0)
1395         return -EINVAL;
1396 
1397     ret = drm_syncobj_array_find(file_private,
1398                      u64_to_user_ptr(args->handles),
1399                      args->count_handles,
1400                      &syncobjs);
1401     if (ret < 0)
1402         return ret;
1403 
1404     points = kmalloc_array(args->count_handles, sizeof(*points),
1405                    GFP_KERNEL);
1406     if (!points) {
1407         ret = -ENOMEM;
1408         goto out;
1409     }
1410     if (!u64_to_user_ptr(args->points)) {
1411         memset(points, 0, args->count_handles * sizeof(uint64_t));
1412     } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1413                   sizeof(uint64_t) * args->count_handles)) {
1414         ret = -EFAULT;
1415         goto err_points;
1416     }
1417 
1418     chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1419     if (!chains) {
1420         ret = -ENOMEM;
1421         goto err_points;
1422     }
1423     for (i = 0; i < args->count_handles; i++) {
1424         chains[i] = dma_fence_chain_alloc();
1425         if (!chains[i]) {
1426             for (j = 0; j < i; j++)
1427                 dma_fence_chain_free(chains[j]);
1428             ret = -ENOMEM;
1429             goto err_chains;
1430         }
1431     }
1432 
1433     for (i = 0; i < args->count_handles; i++) {
1434         struct dma_fence *fence = dma_fence_get_stub();
1435 
1436         drm_syncobj_add_point(syncobjs[i], chains[i],
1437                       fence, points[i]);
1438         dma_fence_put(fence);
1439     }
1440 err_chains:
1441     kfree(chains);
1442 err_points:
1443     kfree(points);
1444 out:
1445     drm_syncobj_array_free(syncobjs, args->count_handles);
1446 
1447     return ret;
1448 }
1449 
1450 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1451                 struct drm_file *file_private)
1452 {
1453     struct drm_syncobj_timeline_array *args = data;
1454     struct drm_syncobj **syncobjs;
1455     uint64_t __user *points = u64_to_user_ptr(args->points);
1456     uint32_t i;
1457     int ret;
1458 
1459     if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1460         return -EOPNOTSUPP;
1461 
1462     if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
1463         return -EINVAL;
1464 
1465     if (args->count_handles == 0)
1466         return -EINVAL;
1467 
1468     ret = drm_syncobj_array_find(file_private,
1469                      u64_to_user_ptr(args->handles),
1470                      args->count_handles,
1471                      &syncobjs);
1472     if (ret < 0)
1473         return ret;
1474 
1475     for (i = 0; i < args->count_handles; i++) {
1476         struct dma_fence_chain *chain;
1477         struct dma_fence *fence;
1478         uint64_t point;
1479 
1480         fence = drm_syncobj_fence_get(syncobjs[i]);
1481         chain = to_dma_fence_chain(fence);
1482         if (chain) {
1483             struct dma_fence *iter, *last_signaled =
1484                 dma_fence_get(fence);
1485 
1486             if (args->flags &
1487                 DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
1488                 point = fence->seqno;
1489             } else {
1490                 dma_fence_chain_for_each(iter, fence) {
1491                     if (iter->context != fence->context) {
1492                         dma_fence_put(iter);
1493                         /* It is most likely that timeline has
1494                         * unorder points. */
1495                         break;
1496                     }
1497                     dma_fence_put(last_signaled);
1498                     last_signaled = dma_fence_get(iter);
1499                 }
1500                 point = dma_fence_is_signaled(last_signaled) ?
1501                     last_signaled->seqno :
1502                     to_dma_fence_chain(last_signaled)->prev_seqno;
1503             }
1504             dma_fence_put(last_signaled);
1505         } else {
1506             point = 0;
1507         }
1508         dma_fence_put(fence);
1509         ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1510         ret = ret ? -EFAULT : 0;
1511         if (ret)
1512             break;
1513     }
1514     drm_syncobj_array_free(syncobjs, args->count_handles);
1515 
1516     return ret;
1517 }