Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Fence mechanism for dma-buf and to allow for asynchronous dma access
0004  *
0005  * Copyright (C) 2012 Canonical Ltd
0006  * Copyright (C) 2012 Texas Instruments
0007  *
0008  * Authors:
0009  * Rob Clark <robdclark@gmail.com>
0010  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
0011  */
0012 
0013 #include <linux/slab.h>
0014 #include <linux/export.h>
0015 #include <linux/atomic.h>
0016 #include <linux/dma-fence.h>
0017 #include <linux/sched/signal.h>
0018 #include <linux/seq_file.h>
0019 
0020 #define CREATE_TRACE_POINTS
0021 #include <trace/events/dma_fence.h>
0022 
0023 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
0024 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
0025 EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
0026 
0027 static DEFINE_SPINLOCK(dma_fence_stub_lock);
0028 static struct dma_fence dma_fence_stub;
0029 
0030 /*
0031  * fence context counter: each execution context should have its own
0032  * fence context, this allows checking if fences belong to the same
0033  * context or not. One device can have multiple separate contexts,
0034  * and they're used if some engine can run independently of another.
0035  */
0036 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
0037 
0038 /**
0039  * DOC: DMA fences overview
0040  *
0041  * DMA fences, represented by &struct dma_fence, are the kernel internal
0042  * synchronization primitive for DMA operations like GPU rendering, video
0043  * encoding/decoding, or displaying buffers on a screen.
0044  *
0045  * A fence is initialized using dma_fence_init() and completed using
0046  * dma_fence_signal(). Fences are associated with a context, allocated through
0047  * dma_fence_context_alloc(), and all fences on the same context are
0048  * fully ordered.
0049  *
0050  * Since the purposes of fences is to facilitate cross-device and
0051  * cross-application synchronization, there's multiple ways to use one:
0052  *
0053  * - Individual fences can be exposed as a &sync_file, accessed as a file
0054  *   descriptor from userspace, created by calling sync_file_create(). This is
0055  *   called explicit fencing, since userspace passes around explicit
0056  *   synchronization points.
0057  *
0058  * - Some subsystems also have their own explicit fencing primitives, like
0059  *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
0060  *   fence to be updated.
0061  *
0062  * - Then there's also implicit fencing, where the synchronization points are
0063  *   implicitly passed around as part of shared &dma_buf instances. Such
0064  *   implicit fences are stored in &struct dma_resv through the
0065  *   &dma_buf.resv pointer.
0066  */
0067 
0068 /**
0069  * DOC: fence cross-driver contract
0070  *
0071  * Since &dma_fence provide a cross driver contract, all drivers must follow the
0072  * same rules:
0073  *
0074  * * Fences must complete in a reasonable time. Fences which represent kernels
0075  *   and shaders submitted by userspace, which could run forever, must be backed
0076  *   up by timeout and gpu hang recovery code. Minimally that code must prevent
0077  *   further command submission and force complete all in-flight fences, e.g.
0078  *   when the driver or hardware do not support gpu reset, or if the gpu reset
0079  *   failed for some reason. Ideally the driver supports gpu recovery which only
0080  *   affects the offending userspace context, and no other userspace
0081  *   submissions.
0082  *
0083  * * Drivers may have different ideas of what completion within a reasonable
0084  *   time means. Some hang recovery code uses a fixed timeout, others a mix
0085  *   between observing forward progress and increasingly strict timeouts.
0086  *   Drivers should not try to second guess timeout handling of fences from
0087  *   other drivers.
0088  *
0089  * * To ensure there's no deadlocks of dma_fence_wait() against other locks
0090  *   drivers should annotate all code required to reach dma_fence_signal(),
0091  *   which completes the fences, with dma_fence_begin_signalling() and
0092  *   dma_fence_end_signalling().
0093  *
0094  * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
0095  *   This means any code required for fence completion cannot acquire a
0096  *   &dma_resv lock. Note that this also pulls in the entire established
0097  *   locking hierarchy around dma_resv_lock() and dma_resv_unlock().
0098  *
0099  * * Drivers are allowed to call dma_fence_wait() from their &shrinker
0100  *   callbacks. This means any code required for fence completion cannot
0101  *   allocate memory with GFP_KERNEL.
0102  *
0103  * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
0104  *   respectively &mmu_interval_notifier callbacks. This means any code required
0105  *   for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
0106  *   Only GFP_ATOMIC is permissible, which might fail.
0107  *
0108  * Note that only GPU drivers have a reasonable excuse for both requiring
0109  * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
0110  * track asynchronous compute work using &dma_fence. No driver outside of
0111  * drivers/gpu should ever call dma_fence_wait() in such contexts.
0112  */
0113 
0114 static const char *dma_fence_stub_get_name(struct dma_fence *fence)
0115 {
0116         return "stub";
0117 }
0118 
0119 static const struct dma_fence_ops dma_fence_stub_ops = {
0120     .get_driver_name = dma_fence_stub_get_name,
0121     .get_timeline_name = dma_fence_stub_get_name,
0122 };
0123 
0124 /**
0125  * dma_fence_get_stub - return a signaled fence
0126  *
0127  * Return a stub fence which is already signaled. The fence's
0128  * timestamp corresponds to the first time after boot this
0129  * function is called.
0130  */
0131 struct dma_fence *dma_fence_get_stub(void)
0132 {
0133     spin_lock(&dma_fence_stub_lock);
0134     if (!dma_fence_stub.ops) {
0135         dma_fence_init(&dma_fence_stub,
0136                    &dma_fence_stub_ops,
0137                    &dma_fence_stub_lock,
0138                    0, 0);
0139         dma_fence_signal_locked(&dma_fence_stub);
0140     }
0141     spin_unlock(&dma_fence_stub_lock);
0142 
0143     return dma_fence_get(&dma_fence_stub);
0144 }
0145 EXPORT_SYMBOL(dma_fence_get_stub);
0146 
0147 /**
0148  * dma_fence_allocate_private_stub - return a private, signaled fence
0149  *
0150  * Return a newly allocated and signaled stub fence.
0151  */
0152 struct dma_fence *dma_fence_allocate_private_stub(void)
0153 {
0154     struct dma_fence *fence;
0155 
0156     fence = kzalloc(sizeof(*fence), GFP_KERNEL);
0157     if (fence == NULL)
0158         return ERR_PTR(-ENOMEM);
0159 
0160     dma_fence_init(fence,
0161                &dma_fence_stub_ops,
0162                &dma_fence_stub_lock,
0163                0, 0);
0164     dma_fence_signal(fence);
0165 
0166     return fence;
0167 }
0168 EXPORT_SYMBOL(dma_fence_allocate_private_stub);
0169 
0170 /**
0171  * dma_fence_context_alloc - allocate an array of fence contexts
0172  * @num: amount of contexts to allocate
0173  *
0174  * This function will return the first index of the number of fence contexts
0175  * allocated.  The fence context is used for setting &dma_fence.context to a
0176  * unique number by passing the context to dma_fence_init().
0177  */
0178 u64 dma_fence_context_alloc(unsigned num)
0179 {
0180     WARN_ON(!num);
0181     return atomic64_fetch_add(num, &dma_fence_context_counter);
0182 }
0183 EXPORT_SYMBOL(dma_fence_context_alloc);
0184 
0185 /**
0186  * DOC: fence signalling annotation
0187  *
0188  * Proving correctness of all the kernel code around &dma_fence through code
0189  * review and testing is tricky for a few reasons:
0190  *
0191  * * It is a cross-driver contract, and therefore all drivers must follow the
0192  *   same rules for lock nesting order, calling contexts for various functions
0193  *   and anything else significant for in-kernel interfaces. But it is also
0194  *   impossible to test all drivers in a single machine, hence brute-force N vs.
0195  *   N testing of all combinations is impossible. Even just limiting to the
0196  *   possible combinations is infeasible.
0197  *
0198  * * There is an enormous amount of driver code involved. For render drivers
0199  *   there's the tail of command submission, after fences are published,
0200  *   scheduler code, interrupt and workers to process job completion,
0201  *   and timeout, gpu reset and gpu hang recovery code. Plus for integration
0202  *   with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
0203  *   and &shrinker. For modesetting drivers there's the commit tail functions
0204  *   between when fences for an atomic modeset are published, and when the
0205  *   corresponding vblank completes, including any interrupt processing and
0206  *   related workers. Auditing all that code, across all drivers, is not
0207  *   feasible.
0208  *
0209  * * Due to how many other subsystems are involved and the locking hierarchies
0210  *   this pulls in there is extremely thin wiggle-room for driver-specific
0211  *   differences. &dma_fence interacts with almost all of the core memory
0212  *   handling through page fault handlers via &dma_resv, dma_resv_lock() and
0213  *   dma_resv_unlock(). On the other side it also interacts through all
0214  *   allocation sites through &mmu_notifier and &shrinker.
0215  *
0216  * Furthermore lockdep does not handle cross-release dependencies, which means
0217  * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
0218  * at runtime with some quick testing. The simplest example is one thread
0219  * waiting on a &dma_fence while holding a lock::
0220  *
0221  *     lock(A);
0222  *     dma_fence_wait(B);
0223  *     unlock(A);
0224  *
0225  * while the other thread is stuck trying to acquire the same lock, which
0226  * prevents it from signalling the fence the previous thread is stuck waiting
0227  * on::
0228  *
0229  *     lock(A);
0230  *     unlock(A);
0231  *     dma_fence_signal(B);
0232  *
0233  * By manually annotating all code relevant to signalling a &dma_fence we can
0234  * teach lockdep about these dependencies, which also helps with the validation
0235  * headache since now lockdep can check all the rules for us::
0236  *
0237  *    cookie = dma_fence_begin_signalling();
0238  *    lock(A);
0239  *    unlock(A);
0240  *    dma_fence_signal(B);
0241  *    dma_fence_end_signalling(cookie);
0242  *
0243  * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
0244  * annotate critical sections the following rules need to be observed:
0245  *
0246  * * All code necessary to complete a &dma_fence must be annotated, from the
0247  *   point where a fence is accessible to other threads, to the point where
0248  *   dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
0249  *   and due to the very strict rules and many corner cases it is infeasible to
0250  *   catch these just with review or normal stress testing.
0251  *
0252  * * &struct dma_resv deserves a special note, since the readers are only
0253  *   protected by rcu. This means the signalling critical section starts as soon
0254  *   as the new fences are installed, even before dma_resv_unlock() is called.
0255  *
0256  * * The only exception are fast paths and opportunistic signalling code, which
0257  *   calls dma_fence_signal() purely as an optimization, but is not required to
0258  *   guarantee completion of a &dma_fence. The usual example is a wait IOCTL
0259  *   which calls dma_fence_signal(), while the mandatory completion path goes
0260  *   through a hardware interrupt and possible job completion worker.
0261  *
0262  * * To aid composability of code, the annotations can be freely nested, as long
0263  *   as the overall locking hierarchy is consistent. The annotations also work
0264  *   both in interrupt and process context. Due to implementation details this
0265  *   requires that callers pass an opaque cookie from
0266  *   dma_fence_begin_signalling() to dma_fence_end_signalling().
0267  *
0268  * * Validation against the cross driver contract is implemented by priming
0269  *   lockdep with the relevant hierarchy at boot-up. This means even just
0270  *   testing with a single device is enough to validate a driver, at least as
0271  *   far as deadlocks with dma_fence_wait() against dma_fence_signal() are
0272  *   concerned.
0273  */
0274 #ifdef CONFIG_LOCKDEP
0275 static struct lockdep_map dma_fence_lockdep_map = {
0276     .name = "dma_fence_map"
0277 };
0278 
0279 /**
0280  * dma_fence_begin_signalling - begin a critical DMA fence signalling section
0281  *
0282  * Drivers should use this to annotate the beginning of any code section
0283  * required to eventually complete &dma_fence by calling dma_fence_signal().
0284  *
0285  * The end of these critical sections are annotated with
0286  * dma_fence_end_signalling().
0287  *
0288  * Returns:
0289  *
0290  * Opaque cookie needed by the implementation, which needs to be passed to
0291  * dma_fence_end_signalling().
0292  */
0293 bool dma_fence_begin_signalling(void)
0294 {
0295     /* explicitly nesting ... */
0296     if (lock_is_held_type(&dma_fence_lockdep_map, 1))
0297         return true;
0298 
0299     /* rely on might_sleep check for soft/hardirq locks */
0300     if (in_atomic())
0301         return true;
0302 
0303     /* ... and non-recursive readlock */
0304     lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
0305 
0306     return false;
0307 }
0308 EXPORT_SYMBOL(dma_fence_begin_signalling);
0309 
0310 /**
0311  * dma_fence_end_signalling - end a critical DMA fence signalling section
0312  * @cookie: opaque cookie from dma_fence_begin_signalling()
0313  *
0314  * Closes a critical section annotation opened by dma_fence_begin_signalling().
0315  */
0316 void dma_fence_end_signalling(bool cookie)
0317 {
0318     if (cookie)
0319         return;
0320 
0321     lock_release(&dma_fence_lockdep_map, _RET_IP_);
0322 }
0323 EXPORT_SYMBOL(dma_fence_end_signalling);
0324 
0325 void __dma_fence_might_wait(void)
0326 {
0327     bool tmp;
0328 
0329     tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
0330     if (tmp)
0331         lock_release(&dma_fence_lockdep_map, _THIS_IP_);
0332     lock_map_acquire(&dma_fence_lockdep_map);
0333     lock_map_release(&dma_fence_lockdep_map);
0334     if (tmp)
0335         lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
0336 }
0337 #endif
0338 
0339 
0340 /**
0341  * dma_fence_signal_timestamp_locked - signal completion of a fence
0342  * @fence: the fence to signal
0343  * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
0344  *
0345  * Signal completion for software callbacks on a fence, this will unblock
0346  * dma_fence_wait() calls and run all the callbacks added with
0347  * dma_fence_add_callback(). Can be called multiple times, but since a fence
0348  * can only go from the unsignaled to the signaled state and not back, it will
0349  * only be effective the first time. Set the timestamp provided as the fence
0350  * signal timestamp.
0351  *
0352  * Unlike dma_fence_signal_timestamp(), this function must be called with
0353  * &dma_fence.lock held.
0354  *
0355  * Returns 0 on success and a negative error value when @fence has been
0356  * signalled already.
0357  */
0358 int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
0359                       ktime_t timestamp)
0360 {
0361     struct dma_fence_cb *cur, *tmp;
0362     struct list_head cb_list;
0363 
0364     lockdep_assert_held(fence->lock);
0365 
0366     if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
0367                       &fence->flags)))
0368         return -EINVAL;
0369 
0370     /* Stash the cb_list before replacing it with the timestamp */
0371     list_replace(&fence->cb_list, &cb_list);
0372 
0373     fence->timestamp = timestamp;
0374     set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
0375     trace_dma_fence_signaled(fence);
0376 
0377     list_for_each_entry_safe(cur, tmp, &cb_list, node) {
0378         INIT_LIST_HEAD(&cur->node);
0379         cur->func(fence, cur);
0380     }
0381 
0382     return 0;
0383 }
0384 EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
0385 
0386 /**
0387  * dma_fence_signal_timestamp - signal completion of a fence
0388  * @fence: the fence to signal
0389  * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
0390  *
0391  * Signal completion for software callbacks on a fence, this will unblock
0392  * dma_fence_wait() calls and run all the callbacks added with
0393  * dma_fence_add_callback(). Can be called multiple times, but since a fence
0394  * can only go from the unsignaled to the signaled state and not back, it will
0395  * only be effective the first time. Set the timestamp provided as the fence
0396  * signal timestamp.
0397  *
0398  * Returns 0 on success and a negative error value when @fence has been
0399  * signalled already.
0400  */
0401 int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
0402 {
0403     unsigned long flags;
0404     int ret;
0405 
0406     if (!fence)
0407         return -EINVAL;
0408 
0409     spin_lock_irqsave(fence->lock, flags);
0410     ret = dma_fence_signal_timestamp_locked(fence, timestamp);
0411     spin_unlock_irqrestore(fence->lock, flags);
0412 
0413     return ret;
0414 }
0415 EXPORT_SYMBOL(dma_fence_signal_timestamp);
0416 
0417 /**
0418  * dma_fence_signal_locked - signal completion of a fence
0419  * @fence: the fence to signal
0420  *
0421  * Signal completion for software callbacks on a fence, this will unblock
0422  * dma_fence_wait() calls and run all the callbacks added with
0423  * dma_fence_add_callback(). Can be called multiple times, but since a fence
0424  * can only go from the unsignaled to the signaled state and not back, it will
0425  * only be effective the first time.
0426  *
0427  * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
0428  * held.
0429  *
0430  * Returns 0 on success and a negative error value when @fence has been
0431  * signalled already.
0432  */
0433 int dma_fence_signal_locked(struct dma_fence *fence)
0434 {
0435     return dma_fence_signal_timestamp_locked(fence, ktime_get());
0436 }
0437 EXPORT_SYMBOL(dma_fence_signal_locked);
0438 
0439 /**
0440  * dma_fence_signal - signal completion of a fence
0441  * @fence: the fence to signal
0442  *
0443  * Signal completion for software callbacks on a fence, this will unblock
0444  * dma_fence_wait() calls and run all the callbacks added with
0445  * dma_fence_add_callback(). Can be called multiple times, but since a fence
0446  * can only go from the unsignaled to the signaled state and not back, it will
0447  * only be effective the first time.
0448  *
0449  * Returns 0 on success and a negative error value when @fence has been
0450  * signalled already.
0451  */
0452 int dma_fence_signal(struct dma_fence *fence)
0453 {
0454     unsigned long flags;
0455     int ret;
0456     bool tmp;
0457 
0458     if (!fence)
0459         return -EINVAL;
0460 
0461     tmp = dma_fence_begin_signalling();
0462 
0463     spin_lock_irqsave(fence->lock, flags);
0464     ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
0465     spin_unlock_irqrestore(fence->lock, flags);
0466 
0467     dma_fence_end_signalling(tmp);
0468 
0469     return ret;
0470 }
0471 EXPORT_SYMBOL(dma_fence_signal);
0472 
0473 /**
0474  * dma_fence_wait_timeout - sleep until the fence gets signaled
0475  * or until timeout elapses
0476  * @fence: the fence to wait on
0477  * @intr: if true, do an interruptible wait
0478  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
0479  *
0480  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
0481  * remaining timeout in jiffies on success. Other error values may be
0482  * returned on custom implementations.
0483  *
0484  * Performs a synchronous wait on this fence. It is assumed the caller
0485  * directly or indirectly (buf-mgr between reservation and committing)
0486  * holds a reference to the fence, otherwise the fence might be
0487  * freed before return, resulting in undefined behavior.
0488  *
0489  * See also dma_fence_wait() and dma_fence_wait_any_timeout().
0490  */
0491 signed long
0492 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
0493 {
0494     signed long ret;
0495 
0496     if (WARN_ON(timeout < 0))
0497         return -EINVAL;
0498 
0499     might_sleep();
0500 
0501     __dma_fence_might_wait();
0502 
0503     trace_dma_fence_wait_start(fence);
0504     if (fence->ops->wait)
0505         ret = fence->ops->wait(fence, intr, timeout);
0506     else
0507         ret = dma_fence_default_wait(fence, intr, timeout);
0508     trace_dma_fence_wait_end(fence);
0509     return ret;
0510 }
0511 EXPORT_SYMBOL(dma_fence_wait_timeout);
0512 
0513 /**
0514  * dma_fence_release - default relese function for fences
0515  * @kref: &dma_fence.recfount
0516  *
0517  * This is the default release functions for &dma_fence. Drivers shouldn't call
0518  * this directly, but instead call dma_fence_put().
0519  */
0520 void dma_fence_release(struct kref *kref)
0521 {
0522     struct dma_fence *fence =
0523         container_of(kref, struct dma_fence, refcount);
0524 
0525     trace_dma_fence_destroy(fence);
0526 
0527     if (WARN(!list_empty(&fence->cb_list) &&
0528          !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
0529          "Fence %s:%s:%llx:%llx released with pending signals!\n",
0530          fence->ops->get_driver_name(fence),
0531          fence->ops->get_timeline_name(fence),
0532          fence->context, fence->seqno)) {
0533         unsigned long flags;
0534 
0535         /*
0536          * Failed to signal before release, likely a refcounting issue.
0537          *
0538          * This should never happen, but if it does make sure that we
0539          * don't leave chains dangling. We set the error flag first
0540          * so that the callbacks know this signal is due to an error.
0541          */
0542         spin_lock_irqsave(fence->lock, flags);
0543         fence->error = -EDEADLK;
0544         dma_fence_signal_locked(fence);
0545         spin_unlock_irqrestore(fence->lock, flags);
0546     }
0547 
0548     if (fence->ops->release)
0549         fence->ops->release(fence);
0550     else
0551         dma_fence_free(fence);
0552 }
0553 EXPORT_SYMBOL(dma_fence_release);
0554 
0555 /**
0556  * dma_fence_free - default release function for &dma_fence.
0557  * @fence: fence to release
0558  *
0559  * This is the default implementation for &dma_fence_ops.release. It calls
0560  * kfree_rcu() on @fence.
0561  */
0562 void dma_fence_free(struct dma_fence *fence)
0563 {
0564     kfree_rcu(fence, rcu);
0565 }
0566 EXPORT_SYMBOL(dma_fence_free);
0567 
0568 static bool __dma_fence_enable_signaling(struct dma_fence *fence)
0569 {
0570     bool was_set;
0571 
0572     lockdep_assert_held(fence->lock);
0573 
0574     was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
0575                    &fence->flags);
0576 
0577     if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
0578         return false;
0579 
0580     if (!was_set && fence->ops->enable_signaling) {
0581         trace_dma_fence_enable_signal(fence);
0582 
0583         if (!fence->ops->enable_signaling(fence)) {
0584             dma_fence_signal_locked(fence);
0585             return false;
0586         }
0587     }
0588 
0589     return true;
0590 }
0591 
0592 /**
0593  * dma_fence_enable_sw_signaling - enable signaling on fence
0594  * @fence: the fence to enable
0595  *
0596  * This will request for sw signaling to be enabled, to make the fence
0597  * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
0598  * internally.
0599  */
0600 void dma_fence_enable_sw_signaling(struct dma_fence *fence)
0601 {
0602     unsigned long flags;
0603 
0604     if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
0605         return;
0606 
0607     spin_lock_irqsave(fence->lock, flags);
0608     __dma_fence_enable_signaling(fence);
0609     spin_unlock_irqrestore(fence->lock, flags);
0610 }
0611 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
0612 
0613 /**
0614  * dma_fence_add_callback - add a callback to be called when the fence
0615  * is signaled
0616  * @fence: the fence to wait on
0617  * @cb: the callback to register
0618  * @func: the function to call
0619  *
0620  * Add a software callback to the fence. The caller should keep a reference to
0621  * the fence.
0622  *
0623  * @cb will be initialized by dma_fence_add_callback(), no initialization
0624  * by the caller is required. Any number of callbacks can be registered
0625  * to a fence, but a callback can only be registered to one fence at a time.
0626  *
0627  * If fence is already signaled, this function will return -ENOENT (and
0628  * *not* call the callback).
0629  *
0630  * Note that the callback can be called from an atomic context or irq context.
0631  *
0632  * Returns 0 in case of success, -ENOENT if the fence is already signaled
0633  * and -EINVAL in case of error.
0634  */
0635 int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
0636                dma_fence_func_t func)
0637 {
0638     unsigned long flags;
0639     int ret = 0;
0640 
0641     if (WARN_ON(!fence || !func))
0642         return -EINVAL;
0643 
0644     if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
0645         INIT_LIST_HEAD(&cb->node);
0646         return -ENOENT;
0647     }
0648 
0649     spin_lock_irqsave(fence->lock, flags);
0650 
0651     if (__dma_fence_enable_signaling(fence)) {
0652         cb->func = func;
0653         list_add_tail(&cb->node, &fence->cb_list);
0654     } else {
0655         INIT_LIST_HEAD(&cb->node);
0656         ret = -ENOENT;
0657     }
0658 
0659     spin_unlock_irqrestore(fence->lock, flags);
0660 
0661     return ret;
0662 }
0663 EXPORT_SYMBOL(dma_fence_add_callback);
0664 
0665 /**
0666  * dma_fence_get_status - returns the status upon completion
0667  * @fence: the dma_fence to query
0668  *
0669  * This wraps dma_fence_get_status_locked() to return the error status
0670  * condition on a signaled fence. See dma_fence_get_status_locked() for more
0671  * details.
0672  *
0673  * Returns 0 if the fence has not yet been signaled, 1 if the fence has
0674  * been signaled without an error condition, or a negative error code
0675  * if the fence has been completed in err.
0676  */
0677 int dma_fence_get_status(struct dma_fence *fence)
0678 {
0679     unsigned long flags;
0680     int status;
0681 
0682     spin_lock_irqsave(fence->lock, flags);
0683     status = dma_fence_get_status_locked(fence);
0684     spin_unlock_irqrestore(fence->lock, flags);
0685 
0686     return status;
0687 }
0688 EXPORT_SYMBOL(dma_fence_get_status);
0689 
0690 /**
0691  * dma_fence_remove_callback - remove a callback from the signaling list
0692  * @fence: the fence to wait on
0693  * @cb: the callback to remove
0694  *
0695  * Remove a previously queued callback from the fence. This function returns
0696  * true if the callback is successfully removed, or false if the fence has
0697  * already been signaled.
0698  *
0699  * *WARNING*:
0700  * Cancelling a callback should only be done if you really know what you're
0701  * doing, since deadlocks and race conditions could occur all too easily. For
0702  * this reason, it should only ever be done on hardware lockup recovery,
0703  * with a reference held to the fence.
0704  *
0705  * Behaviour is undefined if @cb has not been added to @fence using
0706  * dma_fence_add_callback() beforehand.
0707  */
0708 bool
0709 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
0710 {
0711     unsigned long flags;
0712     bool ret;
0713 
0714     spin_lock_irqsave(fence->lock, flags);
0715 
0716     ret = !list_empty(&cb->node);
0717     if (ret)
0718         list_del_init(&cb->node);
0719 
0720     spin_unlock_irqrestore(fence->lock, flags);
0721 
0722     return ret;
0723 }
0724 EXPORT_SYMBOL(dma_fence_remove_callback);
0725 
0726 struct default_wait_cb {
0727     struct dma_fence_cb base;
0728     struct task_struct *task;
0729 };
0730 
0731 static void
0732 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
0733 {
0734     struct default_wait_cb *wait =
0735         container_of(cb, struct default_wait_cb, base);
0736 
0737     wake_up_state(wait->task, TASK_NORMAL);
0738 }
0739 
0740 /**
0741  * dma_fence_default_wait - default sleep until the fence gets signaled
0742  * or until timeout elapses
0743  * @fence: the fence to wait on
0744  * @intr: if true, do an interruptible wait
0745  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
0746  *
0747  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
0748  * remaining timeout in jiffies on success. If timeout is zero the value one is
0749  * returned if the fence is already signaled for consistency with other
0750  * functions taking a jiffies timeout.
0751  */
0752 signed long
0753 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
0754 {
0755     struct default_wait_cb cb;
0756     unsigned long flags;
0757     signed long ret = timeout ? timeout : 1;
0758 
0759     if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
0760         return ret;
0761 
0762     spin_lock_irqsave(fence->lock, flags);
0763 
0764     if (intr && signal_pending(current)) {
0765         ret = -ERESTARTSYS;
0766         goto out;
0767     }
0768 
0769     if (!__dma_fence_enable_signaling(fence))
0770         goto out;
0771 
0772     if (!timeout) {
0773         ret = 0;
0774         goto out;
0775     }
0776 
0777     cb.base.func = dma_fence_default_wait_cb;
0778     cb.task = current;
0779     list_add(&cb.base.node, &fence->cb_list);
0780 
0781     while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
0782         if (intr)
0783             __set_current_state(TASK_INTERRUPTIBLE);
0784         else
0785             __set_current_state(TASK_UNINTERRUPTIBLE);
0786         spin_unlock_irqrestore(fence->lock, flags);
0787 
0788         ret = schedule_timeout(ret);
0789 
0790         spin_lock_irqsave(fence->lock, flags);
0791         if (ret > 0 && intr && signal_pending(current))
0792             ret = -ERESTARTSYS;
0793     }
0794 
0795     if (!list_empty(&cb.base.node))
0796         list_del(&cb.base.node);
0797     __set_current_state(TASK_RUNNING);
0798 
0799 out:
0800     spin_unlock_irqrestore(fence->lock, flags);
0801     return ret;
0802 }
0803 EXPORT_SYMBOL(dma_fence_default_wait);
0804 
0805 static bool
0806 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
0807                 uint32_t *idx)
0808 {
0809     int i;
0810 
0811     for (i = 0; i < count; ++i) {
0812         struct dma_fence *fence = fences[i];
0813         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
0814             if (idx)
0815                 *idx = i;
0816             return true;
0817         }
0818     }
0819     return false;
0820 }
0821 
0822 /**
0823  * dma_fence_wait_any_timeout - sleep until any fence gets signaled
0824  * or until timeout elapses
0825  * @fences: array of fences to wait on
0826  * @count: number of fences to wait on
0827  * @intr: if true, do an interruptible wait
0828  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
0829  * @idx: used to store the first signaled fence index, meaningful only on
0830  *  positive return
0831  *
0832  * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
0833  * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
0834  * on success.
0835  *
0836  * Synchronous waits for the first fence in the array to be signaled. The
0837  * caller needs to hold a reference to all fences in the array, otherwise a
0838  * fence might be freed before return, resulting in undefined behavior.
0839  *
0840  * See also dma_fence_wait() and dma_fence_wait_timeout().
0841  */
0842 signed long
0843 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
0844                bool intr, signed long timeout, uint32_t *idx)
0845 {
0846     struct default_wait_cb *cb;
0847     signed long ret = timeout;
0848     unsigned i;
0849 
0850     if (WARN_ON(!fences || !count || timeout < 0))
0851         return -EINVAL;
0852 
0853     if (timeout == 0) {
0854         for (i = 0; i < count; ++i)
0855             if (dma_fence_is_signaled(fences[i])) {
0856                 if (idx)
0857                     *idx = i;
0858                 return 1;
0859             }
0860 
0861         return 0;
0862     }
0863 
0864     cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
0865     if (cb == NULL) {
0866         ret = -ENOMEM;
0867         goto err_free_cb;
0868     }
0869 
0870     for (i = 0; i < count; ++i) {
0871         struct dma_fence *fence = fences[i];
0872 
0873         cb[i].task = current;
0874         if (dma_fence_add_callback(fence, &cb[i].base,
0875                        dma_fence_default_wait_cb)) {
0876             /* This fence is already signaled */
0877             if (idx)
0878                 *idx = i;
0879             goto fence_rm_cb;
0880         }
0881     }
0882 
0883     while (ret > 0) {
0884         if (intr)
0885             set_current_state(TASK_INTERRUPTIBLE);
0886         else
0887             set_current_state(TASK_UNINTERRUPTIBLE);
0888 
0889         if (dma_fence_test_signaled_any(fences, count, idx))
0890             break;
0891 
0892         ret = schedule_timeout(ret);
0893 
0894         if (ret > 0 && intr && signal_pending(current))
0895             ret = -ERESTARTSYS;
0896     }
0897 
0898     __set_current_state(TASK_RUNNING);
0899 
0900 fence_rm_cb:
0901     while (i-- > 0)
0902         dma_fence_remove_callback(fences[i], &cb[i].base);
0903 
0904 err_free_cb:
0905     kfree(cb);
0906 
0907     return ret;
0908 }
0909 EXPORT_SYMBOL(dma_fence_wait_any_timeout);
0910 
0911 /**
0912  * dma_fence_describe - Dump fence describtion into seq_file
0913  * @fence: the 6fence to describe
0914  * @seq: the seq_file to put the textual description into
0915  *
0916  * Dump a textual description of the fence and it's state into the seq_file.
0917  */
0918 void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
0919 {
0920     seq_printf(seq, "%s %s seq %llu %ssignalled\n",
0921            fence->ops->get_driver_name(fence),
0922            fence->ops->get_timeline_name(fence), fence->seqno,
0923            dma_fence_is_signaled(fence) ? "" : "un");
0924 }
0925 EXPORT_SYMBOL(dma_fence_describe);
0926 
0927 /**
0928  * dma_fence_init - Initialize a custom fence.
0929  * @fence: the fence to initialize
0930  * @ops: the dma_fence_ops for operations on this fence
0931  * @lock: the irqsafe spinlock to use for locking this fence
0932  * @context: the execution context this fence is run on
0933  * @seqno: a linear increasing sequence number for this context
0934  *
0935  * Initializes an allocated fence, the caller doesn't have to keep its
0936  * refcount after committing with this fence, but it will need to hold a
0937  * refcount again if &dma_fence_ops.enable_signaling gets called.
0938  *
0939  * context and seqno are used for easy comparison between fences, allowing
0940  * to check which fence is later by simply using dma_fence_later().
0941  */
0942 void
0943 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
0944            spinlock_t *lock, u64 context, u64 seqno)
0945 {
0946     BUG_ON(!lock);
0947     BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
0948 
0949     kref_init(&fence->refcount);
0950     fence->ops = ops;
0951     INIT_LIST_HEAD(&fence->cb_list);
0952     fence->lock = lock;
0953     fence->context = context;
0954     fence->seqno = seqno;
0955     fence->flags = 0UL;
0956     fence->error = 0;
0957 
0958     trace_dma_fence_init(fence);
0959 }
0960 EXPORT_SYMBOL(dma_fence_init);