0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #ifndef I915_REQUEST_H
0026 #define I915_REQUEST_H
0027
0028 #include <linux/dma-fence.h>
0029 #include <linux/hrtimer.h>
0030 #include <linux/irq_work.h>
0031 #include <linux/llist.h>
0032 #include <linux/lockdep.h>
0033
0034 #include "gem/i915_gem_context_types.h"
0035 #include "gt/intel_context_types.h"
0036 #include "gt/intel_engine_types.h"
0037 #include "gt/intel_timeline_types.h"
0038
0039 #include "i915_gem.h"
0040 #include "i915_scheduler.h"
0041 #include "i915_selftest.h"
0042 #include "i915_sw_fence.h"
0043 #include "i915_vma_resource.h"
0044
0045 #include <uapi/drm/i915_drm.h>
0046
0047 struct drm_file;
0048 struct drm_i915_gem_object;
0049 struct drm_printer;
0050 struct i915_deps;
0051 struct i915_request;
0052
0053 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
0054 struct i915_capture_list {
0055 struct i915_vma_resource *vma_res;
0056 struct i915_capture_list *next;
0057 };
0058
0059 void i915_request_free_capture_list(struct i915_capture_list *capture);
0060 #else
0061 #define i915_request_free_capture_list(_a) do {} while (0)
0062 #endif
0063
0064 #define RQ_TRACE(rq, fmt, ...) do { \
0065 const struct i915_request *rq__ = (rq); \
0066 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
0067 rq__->fence.context, rq__->fence.seqno, \
0068 hwsp_seqno(rq__), ##__VA_ARGS__); \
0069 } while (0)
0070
0071 enum {
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 I915_FENCE_FLAG_PQUEUE,
0096
0097
0098
0099
0100
0101
0102 I915_FENCE_FLAG_HOLD,
0103
0104
0105
0106
0107
0108
0109 I915_FENCE_FLAG_INITIAL_BREADCRUMB,
0110
0111
0112
0113
0114
0115
0116
0117 I915_FENCE_FLAG_SIGNAL,
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 I915_FENCE_FLAG_NOPREEMPT,
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 I915_FENCE_FLAG_SENTINEL,
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 I915_FENCE_FLAG_BOOST,
0150
0151
0152
0153
0154
0155
0156
0157 I915_FENCE_FLAG_SUBMIT_PARALLEL,
0158
0159
0160
0161
0162
0163
0164
0165
0166 I915_FENCE_FLAG_SKIP_PARALLEL,
0167
0168
0169
0170
0171
0172 I915_FENCE_FLAG_COMPOSITE,
0173 };
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 struct i915_request {
0196 struct dma_fence fence;
0197 spinlock_t lock;
0198
0199 struct drm_i915_private *i915;
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 struct intel_engine_cs *engine;
0212 struct intel_context *context;
0213 struct intel_ring *ring;
0214 struct intel_timeline __rcu *timeline;
0215
0216 struct list_head signal_link;
0217 struct llist_node signal_node;
0218
0219
0220
0221
0222
0223
0224
0225 unsigned long rcustate;
0226
0227
0228
0229
0230
0231
0232
0233
0234 struct pin_cookie cookie;
0235
0236
0237
0238
0239
0240
0241
0242
0243 struct i915_sw_fence submit;
0244 union {
0245 wait_queue_entry_t submitq;
0246 struct i915_sw_dma_fence_cb dmaq;
0247 struct i915_request_duration_cb {
0248 struct dma_fence_cb cb;
0249 ktime_t emitted;
0250 } duration;
0251 };
0252 struct llist_head execute_cb;
0253 struct i915_sw_fence semaphore;
0254
0255
0256
0257
0258 struct irq_work submit_work;
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 struct i915_sched_node sched;
0270 struct i915_dependency dep;
0271 intel_engine_mask_t execution_mask;
0272
0273
0274
0275
0276
0277
0278 const u32 *hwsp_seqno;
0279
0280
0281 u32 head;
0282
0283
0284 u32 infix;
0285
0286
0287
0288
0289
0290
0291 u32 postfix;
0292
0293
0294 u32 tail;
0295
0296
0297 u32 wa_tail;
0298
0299
0300 u32 reserved_space;
0301
0302
0303 I915_SELFTEST_DECLARE(struct i915_vma *batch);
0304
0305 struct i915_vma_resource *batch_res;
0306
0307 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
0308
0309
0310
0311
0312
0313
0314 struct i915_capture_list *capture_list;
0315 #endif
0316
0317
0318 unsigned long emitted_jiffies;
0319
0320
0321 struct list_head link;
0322
0323
0324 struct i915_request_watchdog {
0325 struct llist_node link;
0326 struct hrtimer timer;
0327 } watchdog;
0328
0329
0330
0331
0332
0333
0334
0335
0336 struct list_head guc_fence_link;
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 #define GUC_PRIO_INIT 0xff
0348 #define GUC_PRIO_FINI 0xfe
0349 u8 guc_prio;
0350
0351 I915_SELFTEST_DECLARE(struct {
0352 struct list_head link;
0353 unsigned long delay;
0354 } mock;)
0355 };
0356
0357 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
0358
0359 extern const struct dma_fence_ops i915_fence_ops;
0360
0361 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
0362 {
0363 return fence->ops == &i915_fence_ops;
0364 }
0365
0366 struct kmem_cache *i915_request_slab_cache(void);
0367
0368 struct i915_request * __must_check
0369 __i915_request_create(struct intel_context *ce, gfp_t gfp);
0370 struct i915_request * __must_check
0371 i915_request_create(struct intel_context *ce);
0372
0373 void __i915_request_skip(struct i915_request *rq);
0374 bool i915_request_set_error_once(struct i915_request *rq, int error);
0375 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
0376
0377 struct i915_request *__i915_request_commit(struct i915_request *request);
0378 void __i915_request_queue(struct i915_request *rq,
0379 const struct i915_sched_attr *attr);
0380 void __i915_request_queue_bh(struct i915_request *rq);
0381
0382 bool i915_request_retire(struct i915_request *rq);
0383 void i915_request_retire_upto(struct i915_request *rq);
0384
0385 static inline struct i915_request *
0386 to_request(struct dma_fence *fence)
0387 {
0388
0389 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
0390 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
0391 return container_of(fence, struct i915_request, fence);
0392 }
0393
0394 static inline struct i915_request *
0395 i915_request_get(struct i915_request *rq)
0396 {
0397 return to_request(dma_fence_get(&rq->fence));
0398 }
0399
0400 static inline struct i915_request *
0401 i915_request_get_rcu(struct i915_request *rq)
0402 {
0403 return to_request(dma_fence_get_rcu(&rq->fence));
0404 }
0405
0406 static inline void
0407 i915_request_put(struct i915_request *rq)
0408 {
0409 dma_fence_put(&rq->fence);
0410 }
0411
0412 int i915_request_await_object(struct i915_request *to,
0413 struct drm_i915_gem_object *obj,
0414 bool write);
0415 int i915_request_await_dma_fence(struct i915_request *rq,
0416 struct dma_fence *fence);
0417 int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
0418 int i915_request_await_execution(struct i915_request *rq,
0419 struct dma_fence *fence);
0420
0421 void i915_request_add(struct i915_request *rq);
0422
0423 bool __i915_request_submit(struct i915_request *request);
0424 void i915_request_submit(struct i915_request *request);
0425
0426 void __i915_request_unsubmit(struct i915_request *request);
0427 void i915_request_unsubmit(struct i915_request *request);
0428
0429 void i915_request_cancel(struct i915_request *rq, int error);
0430
0431 long i915_request_wait_timeout(struct i915_request *rq,
0432 unsigned int flags,
0433 long timeout)
0434 __attribute__((nonnull(1)));
0435
0436 long i915_request_wait(struct i915_request *rq,
0437 unsigned int flags,
0438 long timeout)
0439 __attribute__((nonnull(1)));
0440 #define I915_WAIT_INTERRUPTIBLE BIT(0)
0441 #define I915_WAIT_PRIORITY BIT(1)
0442 #define I915_WAIT_ALL BIT(2)
0443
0444 void i915_request_show(struct drm_printer *m,
0445 const struct i915_request *rq,
0446 const char *prefix,
0447 int indent);
0448
0449 static inline bool i915_request_signaled(const struct i915_request *rq)
0450 {
0451
0452 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
0453 }
0454
0455 static inline bool i915_request_is_active(const struct i915_request *rq)
0456 {
0457 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
0458 }
0459
0460 static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
0461 {
0462 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
0463 }
0464
0465 static inline bool
0466 i915_request_has_initial_breadcrumb(const struct i915_request *rq)
0467 {
0468 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
0469 }
0470
0471
0472
0473
0474 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
0475 {
0476 return (s32)(seq1 - seq2) >= 0;
0477 }
0478
0479 static inline u32 __hwsp_seqno(const struct i915_request *rq)
0480 {
0481 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
0482
0483 return READ_ONCE(*hwsp);
0484 }
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 static inline u32 hwsp_seqno(const struct i915_request *rq)
0500 {
0501 u32 seqno;
0502
0503 rcu_read_lock();
0504 seqno = __hwsp_seqno(rq);
0505 rcu_read_unlock();
0506
0507 return seqno;
0508 }
0509
0510 static inline bool __i915_request_has_started(const struct i915_request *rq)
0511 {
0512 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
0513 }
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541 static inline bool i915_request_started(const struct i915_request *rq)
0542 {
0543 bool result;
0544
0545 if (i915_request_signaled(rq))
0546 return true;
0547
0548 result = true;
0549 rcu_read_lock();
0550 if (likely(!i915_request_signaled(rq)))
0551
0552 result = __i915_request_has_started(rq);
0553 rcu_read_unlock();
0554
0555 return result;
0556 }
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566 static inline bool i915_request_is_running(const struct i915_request *rq)
0567 {
0568 bool result;
0569
0570 if (!i915_request_is_active(rq))
0571 return false;
0572
0573 rcu_read_lock();
0574 result = __i915_request_has_started(rq) && i915_request_is_active(rq);
0575 rcu_read_unlock();
0576
0577 return result;
0578 }
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596 static inline bool i915_request_is_ready(const struct i915_request *rq)
0597 {
0598 return !list_empty(&rq->sched.link);
0599 }
0600
0601 static inline bool __i915_request_is_complete(const struct i915_request *rq)
0602 {
0603 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
0604 }
0605
0606 static inline bool i915_request_completed(const struct i915_request *rq)
0607 {
0608 bool result;
0609
0610 if (i915_request_signaled(rq))
0611 return true;
0612
0613 result = true;
0614 rcu_read_lock();
0615 if (likely(!i915_request_signaled(rq)))
0616 result = __i915_request_is_complete(rq);
0617 rcu_read_unlock();
0618
0619 return result;
0620 }
0621
0622 static inline void i915_request_mark_complete(struct i915_request *rq)
0623 {
0624 WRITE_ONCE(rq->hwsp_seqno,
0625 (u32 *)&rq->fence.seqno);
0626 }
0627
0628 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
0629 {
0630 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
0631 }
0632
0633 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
0634 {
0635
0636 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
0637 }
0638
0639 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
0640 {
0641 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
0642 }
0643
0644 static inline bool i915_request_on_hold(const struct i915_request *rq)
0645 {
0646 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
0647 }
0648
0649 static inline void i915_request_set_hold(struct i915_request *rq)
0650 {
0651 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
0652 }
0653
0654 static inline void i915_request_clear_hold(struct i915_request *rq)
0655 {
0656 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
0657 }
0658
0659 static inline struct intel_timeline *
0660 i915_request_timeline(const struct i915_request *rq)
0661 {
0662
0663 return rcu_dereference_protected(rq->timeline,
0664 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
0665 test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
0666 }
0667
0668 static inline struct i915_gem_context *
0669 i915_request_gem_context(const struct i915_request *rq)
0670 {
0671
0672 return rcu_dereference_protected(rq->context->gem_context, true);
0673 }
0674
0675 static inline struct intel_timeline *
0676 i915_request_active_timeline(const struct i915_request *rq)
0677 {
0678
0679
0680
0681
0682
0683 return rcu_dereference_protected(rq->timeline,
0684 lockdep_is_held(&rq->engine->sched_engine->lock));
0685 }
0686
0687 static inline u32
0688 i915_request_active_seqno(const struct i915_request *rq)
0689 {
0690 u32 hwsp_phys_base =
0691 page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
0692 u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 return hwsp_phys_base + hwsp_relative_offset;
0706 }
0707
0708 bool
0709 i915_request_active_engine(struct i915_request *rq,
0710 struct intel_engine_cs **active);
0711
0712 void i915_request_notify_execute_cb_imm(struct i915_request *rq);
0713
0714 enum i915_request_state {
0715 I915_REQUEST_UNKNOWN = 0,
0716 I915_REQUEST_COMPLETE,
0717 I915_REQUEST_PENDING,
0718 I915_REQUEST_QUEUED,
0719 I915_REQUEST_ACTIVE,
0720 };
0721
0722 enum i915_request_state i915_test_request_state(struct i915_request *rq);
0723
0724 void i915_request_module_exit(void);
0725 int i915_request_module_init(void);
0726
0727 #endif