Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2019 Intel Corporation
0005  */
0006 
0007 #ifndef _I915_ACTIVE_H_
0008 #define _I915_ACTIVE_H_
0009 
0010 #include <linux/lockdep.h>
0011 
0012 #include "i915_active_types.h"
0013 #include "i915_request.h"
0014 
0015 struct i915_request;
0016 struct intel_engine_cs;
0017 struct intel_timeline;
0018 
0019 /*
0020  * We treat requests as fences. This is not be to confused with our
0021  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
0022  * We use the fences to synchronize access from the CPU with activity on the
0023  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
0024  * is reading them. We also track fences at a higher level to provide
0025  * implicit synchronisation around GEM objects, e.g. set-domain will wait
0026  * for outstanding GPU rendering before marking the object ready for CPU
0027  * access, or a pageflip will wait until the GPU is complete before showing
0028  * the frame on the scanout.
0029  *
0030  * In order to use a fence, the object must track the fence it needs to
0031  * serialise with. For example, GEM objects want to track both read and
0032  * write access so that we can perform concurrent read operations between
0033  * the CPU and GPU engines, as well as waiting for all rendering to
0034  * complete, or waiting for the last GPU user of a "fence register". The
0035  * object then embeds a #i915_active_fence to track the most recent (in
0036  * retirement order) request relevant for the desired mode of access.
0037  * The #i915_active_fence is updated with i915_active_fence_set() to
0038  * track the most recent fence request, typically this is done as part of
0039  * i915_vma_move_to_active().
0040  *
0041  * When the #i915_active_fence completes (is retired), it will
0042  * signal its completion to the owner through a callback as well as mark
0043  * itself as idle (i915_active_fence.request == NULL). The owner
0044  * can then perform any action, such as delayed freeing of an active
0045  * resource including itself.
0046  */
0047 
0048 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
0049 
0050 /**
0051  * __i915_active_fence_init - prepares the activity tracker for use
0052  * @active - the active tracker
0053  * @fence - initial fence to track, can be NULL
0054  * @func - a callback when then the tracker is retired (becomes idle),
0055  *         can be NULL
0056  *
0057  * i915_active_fence_init() prepares the embedded @active struct for use as
0058  * an activity tracker, that is for tracking the last known active fence
0059  * associated with it. When the last fence becomes idle, when it is retired
0060  * after completion, the optional callback @func is invoked.
0061  */
0062 static inline void
0063 __i915_active_fence_init(struct i915_active_fence *active,
0064              void *fence,
0065              dma_fence_func_t fn)
0066 {
0067     RCU_INIT_POINTER(active->fence, fence);
0068     active->cb.func = fn ?: i915_active_noop;
0069 }
0070 
0071 #define INIT_ACTIVE_FENCE(A) \
0072     __i915_active_fence_init((A), NULL, NULL)
0073 
0074 struct dma_fence *
0075 __i915_active_fence_set(struct i915_active_fence *active,
0076             struct dma_fence *fence);
0077 
0078 /**
0079  * i915_active_fence_set - updates the tracker to watch the current fence
0080  * @active - the active tracker
0081  * @rq - the request to watch
0082  *
0083  * i915_active_fence_set() watches the given @rq for completion. While
0084  * that @rq is busy, the @active reports busy. When that @rq is signaled
0085  * (or else retired) the @active tracker is updated to report idle.
0086  */
0087 int __must_check
0088 i915_active_fence_set(struct i915_active_fence *active,
0089               struct i915_request *rq);
0090 /**
0091  * i915_active_fence_get - return a reference to the active fence
0092  * @active - the active tracker
0093  *
0094  * i915_active_fence_get() returns a reference to the active fence,
0095  * or NULL if the active tracker is idle. The reference is obtained under RCU,
0096  * so no locking is required by the caller.
0097  *
0098  * The reference should be freed with dma_fence_put().
0099  */
0100 static inline struct dma_fence *
0101 i915_active_fence_get(struct i915_active_fence *active)
0102 {
0103     struct dma_fence *fence;
0104 
0105     rcu_read_lock();
0106     fence = dma_fence_get_rcu_safe(&active->fence);
0107     rcu_read_unlock();
0108 
0109     return fence;
0110 }
0111 
0112 /**
0113  * i915_active_fence_isset - report whether the active tracker is assigned
0114  * @active - the active tracker
0115  *
0116  * i915_active_fence_isset() returns true if the active tracker is currently
0117  * assigned to a fence. Due to the lazy retiring, that fence may be idle
0118  * and this may report stale information.
0119  */
0120 static inline bool
0121 i915_active_fence_isset(const struct i915_active_fence *active)
0122 {
0123     return rcu_access_pointer(active->fence);
0124 }
0125 
0126 /*
0127  * GPU activity tracking
0128  *
0129  * Each set of commands submitted to the GPU compromises a single request that
0130  * signals a fence upon completion. struct i915_request combines the
0131  * command submission, scheduling and fence signaling roles. If we want to see
0132  * if a particular task is complete, we need to grab the fence (struct
0133  * i915_request) for that task and check or wait for it to be signaled. More
0134  * often though we want to track the status of a bunch of tasks, for example
0135  * to wait for the GPU to finish accessing some memory across a variety of
0136  * different command pipelines from different clients. We could choose to
0137  * track every single request associated with the task, but knowing that
0138  * each request belongs to an ordered timeline (later requests within a
0139  * timeline must wait for earlier requests), we need only track the
0140  * latest request in each timeline to determine the overall status of the
0141  * task.
0142  *
0143  * struct i915_active provides this tracking across timelines. It builds a
0144  * composite shared-fence, and is updated as new work is submitted to the task,
0145  * forming a snapshot of the current status. It should be embedded into the
0146  * different resources that need to track their associated GPU activity to
0147  * provide a callback when that GPU activity has ceased, or otherwise to
0148  * provide a serialisation point either for request submission or for CPU
0149  * synchronisation.
0150  */
0151 
0152 void __i915_active_init(struct i915_active *ref,
0153             int (*active)(struct i915_active *ref),
0154             void (*retire)(struct i915_active *ref),
0155             unsigned long flags,
0156             struct lock_class_key *mkey,
0157             struct lock_class_key *wkey);
0158 
0159 /* Specialise each class of i915_active to avoid impossible lockdep cycles. */
0160 #define i915_active_init(ref, active, retire, flags) do {           \
0161     static struct lock_class_key __mkey;                    \
0162     static struct lock_class_key __wkey;                    \
0163                                         \
0164     __i915_active_init(ref, active, retire, flags, &__mkey, &__wkey);   \
0165 } while (0)
0166 
0167 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq);
0168 
0169 struct dma_fence *
0170 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
0171 
0172 int __i915_active_wait(struct i915_active *ref, int state);
0173 static inline int i915_active_wait(struct i915_active *ref)
0174 {
0175     return __i915_active_wait(ref, TASK_INTERRUPTIBLE);
0176 }
0177 
0178 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
0179                    struct i915_active *ref,
0180                    unsigned int flags);
0181 int i915_request_await_active(struct i915_request *rq,
0182                   struct i915_active *ref,
0183                   unsigned int flags);
0184 #define I915_ACTIVE_AWAIT_EXCL BIT(0)
0185 #define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
0186 #define I915_ACTIVE_AWAIT_BARRIER BIT(2)
0187 
0188 int i915_active_acquire(struct i915_active *ref);
0189 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx);
0190 bool i915_active_acquire_if_busy(struct i915_active *ref);
0191 
0192 void i915_active_release(struct i915_active *ref);
0193 
0194 static inline void __i915_active_acquire(struct i915_active *ref)
0195 {
0196     GEM_BUG_ON(!atomic_read(&ref->count));
0197     atomic_inc(&ref->count);
0198 }
0199 
0200 static inline bool
0201 i915_active_is_idle(const struct i915_active *ref)
0202 {
0203     return !atomic_read(&ref->count);
0204 }
0205 
0206 void i915_active_fini(struct i915_active *ref);
0207 
0208 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
0209                         struct intel_engine_cs *engine);
0210 void i915_active_acquire_barrier(struct i915_active *ref);
0211 void i915_request_add_active_barriers(struct i915_request *rq);
0212 
0213 void i915_active_print(struct i915_active *ref, struct drm_printer *m);
0214 void i915_active_unlock_wait(struct i915_active *ref);
0215 
0216 struct i915_active *i915_active_create(void);
0217 struct i915_active *i915_active_get(struct i915_active *ref);
0218 void i915_active_put(struct i915_active *ref);
0219 
0220 static inline int __i915_request_await_exclusive(struct i915_request *rq,
0221                          struct i915_active *active)
0222 {
0223     struct dma_fence *fence;
0224     int err = 0;
0225 
0226     fence = i915_active_fence_get(&active->excl);
0227     if (fence) {
0228         err = i915_request_await_dma_fence(rq, fence);
0229         dma_fence_put(fence);
0230     }
0231 
0232     return err;
0233 }
0234 
0235 void i915_active_module_exit(void);
0236 int i915_active_module_init(void);
0237 
0238 #endif /* _I915_ACTIVE_H_ */