Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2008-2018 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  *
0023  */
0024 
0025 #ifndef I915_REQUEST_H
0026 #define I915_REQUEST_H
0027 
0028 #include <linux/dma-fence.h>
0029 #include <linux/hrtimer.h>
0030 #include <linux/irq_work.h>
0031 #include <linux/llist.h>
0032 #include <linux/lockdep.h>
0033 
0034 #include "gem/i915_gem_context_types.h"
0035 #include "gt/intel_context_types.h"
0036 #include "gt/intel_engine_types.h"
0037 #include "gt/intel_timeline_types.h"
0038 
0039 #include "i915_gem.h"
0040 #include "i915_scheduler.h"
0041 #include "i915_selftest.h"
0042 #include "i915_sw_fence.h"
0043 #include "i915_vma_resource.h"
0044 
0045 #include <uapi/drm/i915_drm.h>
0046 
0047 struct drm_file;
0048 struct drm_i915_gem_object;
0049 struct drm_printer;
0050 struct i915_deps;
0051 struct i915_request;
0052 
0053 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
0054 struct i915_capture_list {
0055     struct i915_vma_resource *vma_res;
0056     struct i915_capture_list *next;
0057 };
0058 
0059 void i915_request_free_capture_list(struct i915_capture_list *capture);
0060 #else
0061 #define i915_request_free_capture_list(_a) do {} while (0)
0062 #endif
0063 
0064 #define RQ_TRACE(rq, fmt, ...) do {                 \
0065     const struct i915_request *rq__ = (rq);             \
0066     ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,  \
0067              rq__->fence.context, rq__->fence.seqno,        \
0068              hwsp_seqno(rq__), ##__VA_ARGS__);          \
0069 } while (0)
0070 
0071 enum {
0072     /*
0073      * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
0074      *
0075      * Set by __i915_request_submit() on handing over to HW, and cleared
0076      * by __i915_request_unsubmit() if we preempt this request.
0077      *
0078      * Finally cleared for consistency on retiring the request, when
0079      * we know the HW is no longer running this request.
0080      *
0081      * See i915_request_is_active()
0082      */
0083     I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
0084 
0085     /*
0086      * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
0087      *
0088      * Using the scheduler, when a request is ready for execution it is put
0089      * into the priority queue, and removed from that queue when transferred
0090      * to the HW runlists. We want to track its membership within the
0091      * priority queue so that we can easily check before rescheduling.
0092      *
0093      * See i915_request_in_priority_queue()
0094      */
0095     I915_FENCE_FLAG_PQUEUE,
0096 
0097     /*
0098      * I915_FENCE_FLAG_HOLD - this request is currently on hold
0099      *
0100      * This request has been suspended, pending an ongoing investigation.
0101      */
0102     I915_FENCE_FLAG_HOLD,
0103 
0104     /*
0105      * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
0106      * breadcrumb that marks the end of semaphore waits and start of the
0107      * user payload.
0108      */
0109     I915_FENCE_FLAG_INITIAL_BREADCRUMB,
0110 
0111     /*
0112      * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
0113      *
0114      * Internal bookkeeping used by the breadcrumb code to track when
0115      * a request is on the various signal_list.
0116      */
0117     I915_FENCE_FLAG_SIGNAL,
0118 
0119     /*
0120      * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
0121      *
0122      * The execution of some requests should not be interrupted. This is
0123      * a sensitive operation as it makes the request super important,
0124      * blocking other higher priority work. Abuse of this flag will
0125      * lead to quality of service issues.
0126      */
0127     I915_FENCE_FLAG_NOPREEMPT,
0128 
0129     /*
0130      * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
0131      *
0132      * A high priority sentinel request may be submitted to clear the
0133      * submission queue. As it will be the only request in-flight, upon
0134      * execution all other active requests will have been preempted and
0135      * unsubmitted. This preemptive pulse is used to re-evaluate the
0136      * in-flight requests, particularly in cases where an active context
0137      * is banned and those active requests need to be cancelled.
0138      */
0139     I915_FENCE_FLAG_SENTINEL,
0140 
0141     /*
0142      * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
0143      *
0144      * Some requests are more important than others! In particular, a
0145      * request that the user is waiting on is typically required for
0146      * interactive latency, for which we want to minimise by upclocking
0147      * the GPU. Here we track such boost requests on a per-request basis.
0148      */
0149     I915_FENCE_FLAG_BOOST,
0150 
0151     /*
0152      * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a
0153      * parent-child relationship (parallel submission, multi-lrc) should
0154      * trigger a submission to the GuC rather than just moving the context
0155      * tail.
0156      */
0157     I915_FENCE_FLAG_SUBMIT_PARALLEL,
0158 
0159     /*
0160      * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a
0161      * parent-child relationship (parallel submission, multi-lrc) that
0162      * hit an error while generating requests in the execbuf IOCTL.
0163      * Indicates this request should be skipped as another request in
0164      * submission / relationship encoutered an error.
0165      */
0166     I915_FENCE_FLAG_SKIP_PARALLEL,
0167 
0168     /*
0169      * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite
0170      * fence (dma_fence_array) and i915 generated for parallel submission.
0171      */
0172     I915_FENCE_FLAG_COMPOSITE,
0173 };
0174 
0175 /**
0176  * Request queue structure.
0177  *
0178  * The request queue allows us to note sequence numbers that have been emitted
0179  * and may be associated with active buffers to be retired.
0180  *
0181  * By keeping this list, we can avoid having to do questionable sequence
0182  * number comparisons on buffer last_read|write_seqno. It also allows an
0183  * emission time to be associated with the request for tracking how far ahead
0184  * of the GPU the submission is.
0185  *
0186  * When modifying this structure be very aware that we perform a lockless
0187  * RCU lookup of it that may race against reallocation of the struct
0188  * from the slab freelist. We intentionally do not zero the structure on
0189  * allocation so that the lookup can use the dangling pointers (and is
0190  * cogniscent that those pointers may be wrong). Instead, everything that
0191  * needs to be initialised must be done so explicitly.
0192  *
0193  * The requests are reference counted.
0194  */
0195 struct i915_request {
0196     struct dma_fence fence;
0197     spinlock_t lock;
0198 
0199     struct drm_i915_private *i915;
0200 
0201     /**
0202      * Context and ring buffer related to this request
0203      * Contexts are refcounted, so when this request is associated with a
0204      * context, we must increment the context's refcount, to guarantee that
0205      * it persists while any request is linked to it. Requests themselves
0206      * are also refcounted, so the request will only be freed when the last
0207      * reference to it is dismissed, and the code in
0208      * i915_request_free() will then decrement the refcount on the
0209      * context.
0210      */
0211     struct intel_engine_cs *engine;
0212     struct intel_context *context;
0213     struct intel_ring *ring;
0214     struct intel_timeline __rcu *timeline;
0215 
0216     struct list_head signal_link;
0217     struct llist_node signal_node;
0218 
0219     /*
0220      * The rcu epoch of when this request was allocated. Used to judiciously
0221      * apply backpressure on future allocations to ensure that under
0222      * mempressure there is sufficient RCU ticks for us to reclaim our
0223      * RCU protected slabs.
0224      */
0225     unsigned long rcustate;
0226 
0227     /*
0228      * We pin the timeline->mutex while constructing the request to
0229      * ensure that no caller accidentally drops it during construction.
0230      * The timeline->mutex must be held to ensure that only this caller
0231      * can use the ring and manipulate the associated timeline during
0232      * construction.
0233      */
0234     struct pin_cookie cookie;
0235 
0236     /*
0237      * Fences for the various phases in the request's lifetime.
0238      *
0239      * The submit fence is used to await upon all of the request's
0240      * dependencies. When it is signaled, the request is ready to run.
0241      * It is used by the driver to then queue the request for execution.
0242      */
0243     struct i915_sw_fence submit;
0244     union {
0245         wait_queue_entry_t submitq;
0246         struct i915_sw_dma_fence_cb dmaq;
0247         struct i915_request_duration_cb {
0248             struct dma_fence_cb cb;
0249             ktime_t emitted;
0250         } duration;
0251     };
0252     struct llist_head execute_cb;
0253     struct i915_sw_fence semaphore;
0254     /**
0255      * @submit_work: complete submit fence from an IRQ if needed for
0256      * locking hierarchy reasons.
0257      */
0258     struct irq_work submit_work;
0259 
0260     /*
0261      * A list of everyone we wait upon, and everyone who waits upon us.
0262      * Even though we will not be submitted to the hardware before the
0263      * submit fence is signaled (it waits for all external events as well
0264      * as our own requests), the scheduler still needs to know the
0265      * dependency tree for the lifetime of the request (from execbuf
0266      * to retirement), i.e. bidirectional dependency information for the
0267      * request not tied to individual fences.
0268      */
0269     struct i915_sched_node sched;
0270     struct i915_dependency dep;
0271     intel_engine_mask_t execution_mask;
0272 
0273     /*
0274      * A convenience pointer to the current breadcrumb value stored in
0275      * the HW status page (or our timeline's local equivalent). The full
0276      * path would be rq->hw_context->ring->timeline->hwsp_seqno.
0277      */
0278     const u32 *hwsp_seqno;
0279 
0280     /** Position in the ring of the start of the request */
0281     u32 head;
0282 
0283     /** Position in the ring of the start of the user packets */
0284     u32 infix;
0285 
0286     /**
0287      * Position in the ring of the start of the postfix.
0288      * This is required to calculate the maximum available ring space
0289      * without overwriting the postfix.
0290      */
0291     u32 postfix;
0292 
0293     /** Position in the ring of the end of the whole request */
0294     u32 tail;
0295 
0296     /** Position in the ring of the end of any workarounds after the tail */
0297     u32 wa_tail;
0298 
0299     /** Preallocate space in the ring for the emitting the request */
0300     u32 reserved_space;
0301 
0302     /** Batch buffer pointer for selftest internal use. */
0303     I915_SELFTEST_DECLARE(struct i915_vma *batch);
0304 
0305     struct i915_vma_resource *batch_res;
0306 
0307 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
0308     /**
0309      * Additional buffers requested by userspace to be captured upon
0310      * a GPU hang. The vma/obj on this list are protected by their
0311      * active reference - all objects on this list must also be
0312      * on the active_list (of their final request).
0313      */
0314     struct i915_capture_list *capture_list;
0315 #endif
0316 
0317     /** Time at which this request was emitted, in jiffies. */
0318     unsigned long emitted_jiffies;
0319 
0320     /** timeline->request entry for this request */
0321     struct list_head link;
0322 
0323     /** Watchdog support fields. */
0324     struct i915_request_watchdog {
0325         struct llist_node link;
0326         struct hrtimer timer;
0327     } watchdog;
0328 
0329     /**
0330      * @guc_fence_link: Requests may need to be stalled when using GuC
0331      * submission waiting for certain GuC operations to complete. If that is
0332      * the case, stalled requests are added to a per context list of stalled
0333      * requests. The below list_head is the link in that list. Protected by
0334      * ce->guc_state.lock.
0335      */
0336     struct list_head guc_fence_link;
0337 
0338     /**
0339      * @guc_prio: Priority level while the request is in flight. Differs
0340      * from i915 scheduler priority. See comment above
0341      * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
0342      * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
0343      * GUC_PRIO_FINI) outside the GuC priority range are used to indicate
0344      * if the priority has not been initialized yet or if no more updates
0345      * are possible because the request has completed.
0346      */
0347 #define GUC_PRIO_INIT   0xff
0348 #define GUC_PRIO_FINI   0xfe
0349     u8 guc_prio;
0350 
0351     I915_SELFTEST_DECLARE(struct {
0352         struct list_head link;
0353         unsigned long delay;
0354     } mock;)
0355 };
0356 
0357 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
0358 
0359 extern const struct dma_fence_ops i915_fence_ops;
0360 
0361 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
0362 {
0363     return fence->ops == &i915_fence_ops;
0364 }
0365 
0366 struct kmem_cache *i915_request_slab_cache(void);
0367 
0368 struct i915_request * __must_check
0369 __i915_request_create(struct intel_context *ce, gfp_t gfp);
0370 struct i915_request * __must_check
0371 i915_request_create(struct intel_context *ce);
0372 
0373 void __i915_request_skip(struct i915_request *rq);
0374 bool i915_request_set_error_once(struct i915_request *rq, int error);
0375 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
0376 
0377 struct i915_request *__i915_request_commit(struct i915_request *request);
0378 void __i915_request_queue(struct i915_request *rq,
0379               const struct i915_sched_attr *attr);
0380 void __i915_request_queue_bh(struct i915_request *rq);
0381 
0382 bool i915_request_retire(struct i915_request *rq);
0383 void i915_request_retire_upto(struct i915_request *rq);
0384 
0385 static inline struct i915_request *
0386 to_request(struct dma_fence *fence)
0387 {
0388     /* We assume that NULL fence/request are interoperable */
0389     BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
0390     GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
0391     return container_of(fence, struct i915_request, fence);
0392 }
0393 
0394 static inline struct i915_request *
0395 i915_request_get(struct i915_request *rq)
0396 {
0397     return to_request(dma_fence_get(&rq->fence));
0398 }
0399 
0400 static inline struct i915_request *
0401 i915_request_get_rcu(struct i915_request *rq)
0402 {
0403     return to_request(dma_fence_get_rcu(&rq->fence));
0404 }
0405 
0406 static inline void
0407 i915_request_put(struct i915_request *rq)
0408 {
0409     dma_fence_put(&rq->fence);
0410 }
0411 
0412 int i915_request_await_object(struct i915_request *to,
0413                   struct drm_i915_gem_object *obj,
0414                   bool write);
0415 int i915_request_await_dma_fence(struct i915_request *rq,
0416                  struct dma_fence *fence);
0417 int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
0418 int i915_request_await_execution(struct i915_request *rq,
0419                  struct dma_fence *fence);
0420 
0421 void i915_request_add(struct i915_request *rq);
0422 
0423 bool __i915_request_submit(struct i915_request *request);
0424 void i915_request_submit(struct i915_request *request);
0425 
0426 void __i915_request_unsubmit(struct i915_request *request);
0427 void i915_request_unsubmit(struct i915_request *request);
0428 
0429 void i915_request_cancel(struct i915_request *rq, int error);
0430 
0431 long i915_request_wait_timeout(struct i915_request *rq,
0432                    unsigned int flags,
0433                    long timeout)
0434     __attribute__((nonnull(1)));
0435 
0436 long i915_request_wait(struct i915_request *rq,
0437                unsigned int flags,
0438                long timeout)
0439     __attribute__((nonnull(1)));
0440 #define I915_WAIT_INTERRUPTIBLE BIT(0)
0441 #define I915_WAIT_PRIORITY  BIT(1) /* small priority bump for the request */
0442 #define I915_WAIT_ALL       BIT(2) /* used by i915_gem_object_wait() */
0443 
0444 void i915_request_show(struct drm_printer *m,
0445                const struct i915_request *rq,
0446                const char *prefix,
0447                int indent);
0448 
0449 static inline bool i915_request_signaled(const struct i915_request *rq)
0450 {
0451     /* The request may live longer than its HWSP, so check flags first! */
0452     return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
0453 }
0454 
0455 static inline bool i915_request_is_active(const struct i915_request *rq)
0456 {
0457     return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
0458 }
0459 
0460 static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
0461 {
0462     return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
0463 }
0464 
0465 static inline bool
0466 i915_request_has_initial_breadcrumb(const struct i915_request *rq)
0467 {
0468     return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
0469 }
0470 
0471 /**
0472  * Returns true if seq1 is later than seq2.
0473  */
0474 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
0475 {
0476     return (s32)(seq1 - seq2) >= 0;
0477 }
0478 
0479 static inline u32 __hwsp_seqno(const struct i915_request *rq)
0480 {
0481     const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
0482 
0483     return READ_ONCE(*hwsp);
0484 }
0485 
0486 /**
0487  * hwsp_seqno - the current breadcrumb value in the HW status page
0488  * @rq: the request, to chase the relevant HW status page
0489  *
0490  * The emphasis in naming here is that hwsp_seqno() is not a property of the
0491  * request, but an indication of the current HW state (associated with this
0492  * request). Its value will change as the GPU executes more requests.
0493  *
0494  * Returns the current breadcrumb value in the associated HW status page (or
0495  * the local timeline's equivalent) for this request. The request itself
0496  * has the associated breadcrumb value of rq->fence.seqno, when the HW
0497  * status page has that breadcrumb or later, this request is complete.
0498  */
0499 static inline u32 hwsp_seqno(const struct i915_request *rq)
0500 {
0501     u32 seqno;
0502 
0503     rcu_read_lock(); /* the HWSP may be freed at runtime */
0504     seqno = __hwsp_seqno(rq);
0505     rcu_read_unlock();
0506 
0507     return seqno;
0508 }
0509 
0510 static inline bool __i915_request_has_started(const struct i915_request *rq)
0511 {
0512     return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
0513 }
0514 
0515 /**
0516  * i915_request_started - check if the request has begun being executed
0517  * @rq: the request
0518  *
0519  * If the timeline is not using initial breadcrumbs, a request is
0520  * considered started if the previous request on its timeline (i.e.
0521  * context) has been signaled.
0522  *
0523  * If the timeline is using semaphores, it will also be emitting an
0524  * "initial breadcrumb" after the semaphores are complete and just before
0525  * it began executing the user payload. A request can therefore be active
0526  * on the HW and not yet started as it is still busywaiting on its
0527  * dependencies (via HW semaphores).
0528  *
0529  * If the request has started, its dependencies will have been signaled
0530  * (either by fences or by semaphores) and it will have begun processing
0531  * the user payload.
0532  *
0533  * However, even if a request has started, it may have been preempted and
0534  * so no longer active, or it may have already completed.
0535  *
0536  * See also i915_request_is_active().
0537  *
0538  * Returns true if the request has begun executing the user payload, or
0539  * has completed:
0540  */
0541 static inline bool i915_request_started(const struct i915_request *rq)
0542 {
0543     bool result;
0544 
0545     if (i915_request_signaled(rq))
0546         return true;
0547 
0548     result = true;
0549     rcu_read_lock(); /* the HWSP may be freed at runtime */
0550     if (likely(!i915_request_signaled(rq)))
0551         /* Remember: started but may have since been preempted! */
0552         result = __i915_request_has_started(rq);
0553     rcu_read_unlock();
0554 
0555     return result;
0556 }
0557 
0558 /**
0559  * i915_request_is_running - check if the request may actually be executing
0560  * @rq: the request
0561  *
0562  * Returns true if the request is currently submitted to hardware, has passed
0563  * its start point (i.e. the context is setup and not busywaiting). Note that
0564  * it may no longer be running by the time the function returns!
0565  */
0566 static inline bool i915_request_is_running(const struct i915_request *rq)
0567 {
0568     bool result;
0569 
0570     if (!i915_request_is_active(rq))
0571         return false;
0572 
0573     rcu_read_lock();
0574     result = __i915_request_has_started(rq) && i915_request_is_active(rq);
0575     rcu_read_unlock();
0576 
0577     return result;
0578 }
0579 
0580 /**
0581  * i915_request_is_ready - check if the request is ready for execution
0582  * @rq: the request
0583  *
0584  * Upon construction, the request is instructed to wait upon various
0585  * signals before it is ready to be executed by the HW. That is, we do
0586  * not want to start execution and read data before it is written. In practice,
0587  * this is controlled with a mixture of interrupts and semaphores. Once
0588  * the submit fence is completed, the backend scheduler will place the
0589  * request into its queue and from there submit it for execution. So we
0590  * can detect when a request is eligible for execution (and is under control
0591  * of the scheduler) by querying where it is in any of the scheduler's lists.
0592  *
0593  * Returns true if the request is ready for execution (it may be inflight),
0594  * false otherwise.
0595  */
0596 static inline bool i915_request_is_ready(const struct i915_request *rq)
0597 {
0598     return !list_empty(&rq->sched.link);
0599 }
0600 
0601 static inline bool __i915_request_is_complete(const struct i915_request *rq)
0602 {
0603     return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
0604 }
0605 
0606 static inline bool i915_request_completed(const struct i915_request *rq)
0607 {
0608     bool result;
0609 
0610     if (i915_request_signaled(rq))
0611         return true;
0612 
0613     result = true;
0614     rcu_read_lock(); /* the HWSP may be freed at runtime */
0615     if (likely(!i915_request_signaled(rq)))
0616         result = __i915_request_is_complete(rq);
0617     rcu_read_unlock();
0618 
0619     return result;
0620 }
0621 
0622 static inline void i915_request_mark_complete(struct i915_request *rq)
0623 {
0624     WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
0625            (u32 *)&rq->fence.seqno);
0626 }
0627 
0628 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
0629 {
0630     return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
0631 }
0632 
0633 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
0634 {
0635     /* Preemption should only be disabled very rarely */
0636     return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
0637 }
0638 
0639 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
0640 {
0641     return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
0642 }
0643 
0644 static inline bool i915_request_on_hold(const struct i915_request *rq)
0645 {
0646     return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
0647 }
0648 
0649 static inline void i915_request_set_hold(struct i915_request *rq)
0650 {
0651     set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
0652 }
0653 
0654 static inline void i915_request_clear_hold(struct i915_request *rq)
0655 {
0656     clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
0657 }
0658 
0659 static inline struct intel_timeline *
0660 i915_request_timeline(const struct i915_request *rq)
0661 {
0662     /* Valid only while the request is being constructed (or retired). */
0663     return rcu_dereference_protected(rq->timeline,
0664                      lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
0665                      test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
0666 }
0667 
0668 static inline struct i915_gem_context *
0669 i915_request_gem_context(const struct i915_request *rq)
0670 {
0671     /* Valid only while the request is being constructed (or retired). */
0672     return rcu_dereference_protected(rq->context->gem_context, true);
0673 }
0674 
0675 static inline struct intel_timeline *
0676 i915_request_active_timeline(const struct i915_request *rq)
0677 {
0678     /*
0679      * When in use during submission, we are protected by a guarantee that
0680      * the context/timeline is pinned and must remain pinned until after
0681      * this submission.
0682      */
0683     return rcu_dereference_protected(rq->timeline,
0684                      lockdep_is_held(&rq->engine->sched_engine->lock));
0685 }
0686 
0687 static inline u32
0688 i915_request_active_seqno(const struct i915_request *rq)
0689 {
0690     u32 hwsp_phys_base =
0691         page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
0692     u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
0693 
0694     /*
0695      * Because of wraparound, we cannot simply take tl->hwsp_offset,
0696      * but instead use the fact that the relative for vaddr is the
0697      * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
0698      * and combine them with the relative offset in rq->hwsp_seqno.
0699      *
0700      * As rw->hwsp_seqno is rewritten when signaled, this only works
0701      * when the request isn't signaled yet, but at that point you
0702      * no longer need the offset.
0703      */
0704 
0705     return hwsp_phys_base + hwsp_relative_offset;
0706 }
0707 
0708 bool
0709 i915_request_active_engine(struct i915_request *rq,
0710                struct intel_engine_cs **active);
0711 
0712 void i915_request_notify_execute_cb_imm(struct i915_request *rq);
0713 
0714 enum i915_request_state {
0715     I915_REQUEST_UNKNOWN = 0,
0716     I915_REQUEST_COMPLETE,
0717     I915_REQUEST_PENDING,
0718     I915_REQUEST_QUEUED,
0719     I915_REQUEST_ACTIVE,
0720 };
0721 
0722 enum i915_request_state i915_test_request_state(struct i915_request *rq);
0723 
0724 void i915_request_module_exit(void);
0725 int i915_request_module_init(void);
0726 
0727 #endif /* I915_REQUEST_H */