0001
0002
0003
0004
0005
0006 #include <drm/drm_cache.h>
0007
0008 #include "gem/i915_gem_internal.h"
0009
0010 #include "gen2_engine_cs.h"
0011 #include "gen6_engine_cs.h"
0012 #include "gen6_ppgtt.h"
0013 #include "gen7_renderclear.h"
0014 #include "i915_drv.h"
0015 #include "i915_mitigations.h"
0016 #include "intel_breadcrumbs.h"
0017 #include "intel_context.h"
0018 #include "intel_engine_regs.h"
0019 #include "intel_gt.h"
0020 #include "intel_gt_irq.h"
0021 #include "intel_gt_regs.h"
0022 #include "intel_reset.h"
0023 #include "intel_ring.h"
0024 #include "shmem_utils.h"
0025 #include "intel_engine_heartbeat.h"
0026 #include "intel_engine_pm.h"
0027
0028
0029
0030
0031 #define LEGACY_REQUEST_SIZE 200
0032
0033 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
0034 {
0035
0036
0037
0038
0039 if (engine->class == RENDER_CLASS) {
0040 if (GRAPHICS_VER(engine->i915) >= 6)
0041 mask &= ~BIT(0);
0042 else
0043 mask &= ~I915_USER_INTERRUPT;
0044 }
0045
0046 intel_engine_set_hwsp_writemask(engine, mask);
0047 }
0048
0049 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
0050 {
0051 u32 addr;
0052
0053 addr = lower_32_bits(phys);
0054 if (GRAPHICS_VER(engine->i915) >= 4)
0055 addr |= (phys >> 28) & 0xf0;
0056
0057 intel_uncore_write(engine->uncore, HWS_PGA, addr);
0058 }
0059
0060 static struct page *status_page(struct intel_engine_cs *engine)
0061 {
0062 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
0063
0064 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
0065 return sg_page(obj->mm.pages->sgl);
0066 }
0067
0068 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
0069 {
0070 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
0071 set_hwstam(engine, ~0u);
0072 }
0073
0074 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
0075 {
0076 i915_reg_t hwsp;
0077
0078
0079
0080
0081
0082 if (GRAPHICS_VER(engine->i915) == 7) {
0083 switch (engine->id) {
0084
0085
0086
0087
0088 default:
0089 GEM_BUG_ON(engine->id);
0090 fallthrough;
0091 case RCS0:
0092 hwsp = RENDER_HWS_PGA_GEN7;
0093 break;
0094 case BCS0:
0095 hwsp = BLT_HWS_PGA_GEN7;
0096 break;
0097 case VCS0:
0098 hwsp = BSD_HWS_PGA_GEN7;
0099 break;
0100 case VECS0:
0101 hwsp = VEBOX_HWS_PGA_GEN7;
0102 break;
0103 }
0104 } else if (GRAPHICS_VER(engine->i915) == 6) {
0105 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
0106 } else {
0107 hwsp = RING_HWS_PGA(engine->mmio_base);
0108 }
0109
0110 intel_uncore_write_fw(engine->uncore, hwsp, offset);
0111 intel_uncore_posting_read_fw(engine->uncore, hwsp);
0112 }
0113
0114 static void flush_cs_tlb(struct intel_engine_cs *engine)
0115 {
0116 if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
0117 return;
0118
0119
0120 if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0)
0121 drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n",
0122 engine->name);
0123
0124 ENGINE_WRITE_FW(engine, RING_INSTPM,
0125 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
0126 INSTPM_SYNC_FLUSH));
0127 if (__intel_wait_for_register_fw(engine->uncore,
0128 RING_INSTPM(engine->mmio_base),
0129 INSTPM_SYNC_FLUSH, 0,
0130 2000, 0, NULL))
0131 ENGINE_TRACE(engine,
0132 "wait for SyncFlush to complete for TLB invalidation timed out\n");
0133 }
0134
0135 static void ring_setup_status_page(struct intel_engine_cs *engine)
0136 {
0137 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
0138 set_hwstam(engine, ~0u);
0139
0140 flush_cs_tlb(engine);
0141 }
0142
0143 static struct i915_address_space *vm_alias(struct i915_address_space *vm)
0144 {
0145 if (i915_is_ggtt(vm))
0146 vm = &i915_vm_to_ggtt(vm)->alias->vm;
0147
0148 return vm;
0149 }
0150
0151 static u32 pp_dir(struct i915_address_space *vm)
0152 {
0153 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
0154 }
0155
0156 static void set_pp_dir(struct intel_engine_cs *engine)
0157 {
0158 struct i915_address_space *vm = vm_alias(engine->gt->vm);
0159
0160 if (!vm)
0161 return;
0162
0163 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
0164 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
0165
0166 if (GRAPHICS_VER(engine->i915) >= 7) {
0167 ENGINE_WRITE_FW(engine,
0168 RING_MODE_GEN7,
0169 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
0170 }
0171 }
0172
0173 static bool stop_ring(struct intel_engine_cs *engine)
0174 {
0175
0176 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
0177 ENGINE_POSTING_READ(engine, RING_HEAD);
0178
0179
0180 ENGINE_WRITE_FW(engine, RING_CTL, 0);
0181 ENGINE_POSTING_READ(engine, RING_CTL);
0182
0183
0184 ENGINE_WRITE_FW(engine, RING_HEAD, 0);
0185 ENGINE_WRITE_FW(engine, RING_TAIL, 0);
0186
0187 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
0188 }
0189
0190 static int xcs_resume(struct intel_engine_cs *engine)
0191 {
0192 struct intel_ring *ring = engine->legacy.ring;
0193
0194 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
0195 ring->head, ring->tail);
0196
0197
0198
0199
0200
0201 intel_synchronize_hardirq(engine->i915);
0202 if (!stop_ring(engine))
0203 goto err;
0204
0205 if (HWS_NEEDS_PHYSICAL(engine->i915))
0206 ring_setup_phys_status_page(engine);
0207 else
0208 ring_setup_status_page(engine);
0209
0210 intel_breadcrumbs_reset(engine->breadcrumbs);
0211
0212
0213 ENGINE_POSTING_READ(engine, RING_HEAD);
0214
0215
0216
0217
0218
0219
0220
0221 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
0222
0223
0224 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
0225 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
0226 intel_ring_update_space(ring);
0227
0228 set_pp_dir(engine);
0229
0230
0231 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
0232 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
0233 ENGINE_POSTING_READ(engine, RING_TAIL);
0234
0235 ENGINE_WRITE_FW(engine, RING_CTL,
0236 RING_CTL_SIZE(ring->size) | RING_VALID);
0237
0238
0239 if (__intel_wait_for_register_fw(engine->uncore,
0240 RING_CTL(engine->mmio_base),
0241 RING_VALID, RING_VALID,
0242 5000, 0, NULL))
0243 goto err;
0244
0245 if (GRAPHICS_VER(engine->i915) > 2)
0246 ENGINE_WRITE_FW(engine,
0247 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
0248
0249
0250 if (ring->tail != ring->head) {
0251 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
0252 ENGINE_POSTING_READ(engine, RING_TAIL);
0253 }
0254
0255
0256 intel_engine_signal_breadcrumbs(engine);
0257 return 0;
0258
0259 err:
0260 drm_err(&engine->i915->drm,
0261 "%s initialization failed; "
0262 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
0263 engine->name,
0264 ENGINE_READ(engine, RING_CTL),
0265 ENGINE_READ(engine, RING_CTL) & RING_VALID,
0266 ENGINE_READ(engine, RING_HEAD), ring->head,
0267 ENGINE_READ(engine, RING_TAIL), ring->tail,
0268 ENGINE_READ(engine, RING_START),
0269 i915_ggtt_offset(ring->vma));
0270 return -EIO;
0271 }
0272
0273 static void sanitize_hwsp(struct intel_engine_cs *engine)
0274 {
0275 struct intel_timeline *tl;
0276
0277 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
0278 intel_timeline_reset_seqno(tl);
0279 }
0280
0281 static void xcs_sanitize(struct intel_engine_cs *engine)
0282 {
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
0293 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
0294
0295
0296
0297
0298
0299
0300 sanitize_hwsp(engine);
0301
0302
0303 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
0304
0305 intel_engine_reset_pinned_contexts(engine);
0306 }
0307
0308 static void reset_prepare(struct intel_engine_cs *engine)
0309 {
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 ENGINE_TRACE(engine, "\n");
0326 intel_engine_stop_cs(engine);
0327
0328 if (!stop_ring(engine)) {
0329
0330 ENGINE_TRACE(engine,
0331 "HEAD not reset to zero, "
0332 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",
0333 ENGINE_READ_FW(engine, RING_CTL),
0334 ENGINE_READ_FW(engine, RING_HEAD),
0335 ENGINE_READ_FW(engine, RING_TAIL),
0336 ENGINE_READ_FW(engine, RING_START));
0337 if (!stop_ring(engine)) {
0338 drm_err(&engine->i915->drm,
0339 "failed to set %s head to zero "
0340 "ctl %08x head %08x tail %08x start %08x\n",
0341 engine->name,
0342 ENGINE_READ_FW(engine, RING_CTL),
0343 ENGINE_READ_FW(engine, RING_HEAD),
0344 ENGINE_READ_FW(engine, RING_TAIL),
0345 ENGINE_READ_FW(engine, RING_START));
0346 }
0347 }
0348 }
0349
0350 static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
0351 {
0352 struct i915_request *pos, *rq;
0353 unsigned long flags;
0354 u32 head;
0355
0356 rq = NULL;
0357 spin_lock_irqsave(&engine->sched_engine->lock, flags);
0358 rcu_read_lock();
0359 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
0360 if (!__i915_request_is_complete(pos)) {
0361 rq = pos;
0362 break;
0363 }
0364 }
0365 rcu_read_unlock();
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 if (rq) {
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 __i915_request_reset(rq, stalled);
0406
0407 GEM_BUG_ON(rq->ring != engine->legacy.ring);
0408 head = rq->head;
0409 } else {
0410 head = engine->legacy.ring->tail;
0411 }
0412 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
0413
0414 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
0415 }
0416
0417 static void reset_finish(struct intel_engine_cs *engine)
0418 {
0419 }
0420
0421 static void reset_cancel(struct intel_engine_cs *engine)
0422 {
0423 struct i915_request *request;
0424 unsigned long flags;
0425
0426 spin_lock_irqsave(&engine->sched_engine->lock, flags);
0427
0428
0429 list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
0430 i915_request_put(i915_request_mark_eio(request));
0431 intel_engine_signal_breadcrumbs(engine);
0432
0433
0434
0435 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
0436 }
0437
0438 static void i9xx_submit_request(struct i915_request *request)
0439 {
0440 i915_request_submit(request);
0441 wmb();
0442
0443 ENGINE_WRITE(request->engine, RING_TAIL,
0444 intel_ring_set_tail(request->ring, request->tail));
0445 }
0446
0447 static void __ring_context_fini(struct intel_context *ce)
0448 {
0449 i915_vma_put(ce->state);
0450 }
0451
0452 static void ring_context_destroy(struct kref *ref)
0453 {
0454 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
0455
0456 GEM_BUG_ON(intel_context_is_pinned(ce));
0457
0458 if (ce->state)
0459 __ring_context_fini(ce);
0460
0461 intel_context_fini(ce);
0462 intel_context_free(ce);
0463 }
0464
0465 static int ring_context_init_default_state(struct intel_context *ce,
0466 struct i915_gem_ww_ctx *ww)
0467 {
0468 struct drm_i915_gem_object *obj = ce->state->obj;
0469 void *vaddr;
0470
0471 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
0472 if (IS_ERR(vaddr))
0473 return PTR_ERR(vaddr);
0474
0475 shmem_read(ce->engine->default_state, 0,
0476 vaddr, ce->engine->context_size);
0477
0478 i915_gem_object_flush_map(obj);
0479 __i915_gem_object_release_map(obj);
0480
0481 __set_bit(CONTEXT_VALID_BIT, &ce->flags);
0482 return 0;
0483 }
0484
0485 static int ring_context_pre_pin(struct intel_context *ce,
0486 struct i915_gem_ww_ctx *ww,
0487 void **unused)
0488 {
0489 struct i915_address_space *vm;
0490 int err = 0;
0491
0492 if (ce->engine->default_state &&
0493 !test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
0494 err = ring_context_init_default_state(ce, ww);
0495 if (err)
0496 return err;
0497 }
0498
0499 vm = vm_alias(ce->vm);
0500 if (vm)
0501 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
0502
0503 return err;
0504 }
0505
0506 static void __context_unpin_ppgtt(struct intel_context *ce)
0507 {
0508 struct i915_address_space *vm;
0509
0510 vm = vm_alias(ce->vm);
0511 if (vm)
0512 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
0513 }
0514
0515 static void ring_context_unpin(struct intel_context *ce)
0516 {
0517 }
0518
0519 static void ring_context_post_unpin(struct intel_context *ce)
0520 {
0521 __context_unpin_ppgtt(ce);
0522 }
0523
0524 static struct i915_vma *
0525 alloc_context_vma(struct intel_engine_cs *engine)
0526 {
0527 struct drm_i915_private *i915 = engine->i915;
0528 struct drm_i915_gem_object *obj;
0529 struct i915_vma *vma;
0530 int err;
0531
0532 obj = i915_gem_object_create_shmem(i915, engine->context_size);
0533 if (IS_ERR(obj))
0534 return ERR_CAST(obj);
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 if (IS_IVYBRIDGE(i915))
0552 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
0553
0554 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
0555 if (IS_ERR(vma)) {
0556 err = PTR_ERR(vma);
0557 goto err_obj;
0558 }
0559
0560 return vma;
0561
0562 err_obj:
0563 i915_gem_object_put(obj);
0564 return ERR_PTR(err);
0565 }
0566
0567 static int ring_context_alloc(struct intel_context *ce)
0568 {
0569 struct intel_engine_cs *engine = ce->engine;
0570
0571
0572 GEM_BUG_ON(!engine->legacy.ring);
0573 ce->ring = engine->legacy.ring;
0574 ce->timeline = intel_timeline_get(engine->legacy.timeline);
0575
0576 GEM_BUG_ON(ce->state);
0577 if (engine->context_size) {
0578 struct i915_vma *vma;
0579
0580 vma = alloc_context_vma(engine);
0581 if (IS_ERR(vma))
0582 return PTR_ERR(vma);
0583
0584 ce->state = vma;
0585 }
0586
0587 return 0;
0588 }
0589
0590 static int ring_context_pin(struct intel_context *ce, void *unused)
0591 {
0592 return 0;
0593 }
0594
0595 static void ring_context_reset(struct intel_context *ce)
0596 {
0597 intel_ring_reset(ce->ring, ce->ring->emit);
0598 clear_bit(CONTEXT_VALID_BIT, &ce->flags);
0599 }
0600
0601 static void ring_context_revoke(struct intel_context *ce,
0602 struct i915_request *rq,
0603 unsigned int preempt_timeout_ms)
0604 {
0605 struct intel_engine_cs *engine;
0606
0607 if (!rq || !i915_request_is_active(rq))
0608 return;
0609
0610 engine = rq->engine;
0611 lockdep_assert_held(&engine->sched_engine->lock);
0612 list_for_each_entry_continue(rq, &engine->sched_engine->requests,
0613 sched.link)
0614 if (rq->context == ce) {
0615 i915_request_set_error_once(rq, -EIO);
0616 __i915_request_skip(rq);
0617 }
0618 }
0619
0620 static void ring_context_cancel_request(struct intel_context *ce,
0621 struct i915_request *rq)
0622 {
0623 struct intel_engine_cs *engine = NULL;
0624
0625 i915_request_active_engine(rq, &engine);
0626
0627 if (engine && intel_engine_pulse(engine))
0628 intel_gt_handle_error(engine->gt, engine->mask, 0,
0629 "request cancellation by %s",
0630 current->comm);
0631 }
0632
0633 static const struct intel_context_ops ring_context_ops = {
0634 .alloc = ring_context_alloc,
0635
0636 .cancel_request = ring_context_cancel_request,
0637
0638 .revoke = ring_context_revoke,
0639
0640 .pre_pin = ring_context_pre_pin,
0641 .pin = ring_context_pin,
0642 .unpin = ring_context_unpin,
0643 .post_unpin = ring_context_post_unpin,
0644
0645 .enter = intel_context_enter_engine,
0646 .exit = intel_context_exit_engine,
0647
0648 .reset = ring_context_reset,
0649 .destroy = ring_context_destroy,
0650 };
0651
0652 static int load_pd_dir(struct i915_request *rq,
0653 struct i915_address_space *vm,
0654 u32 valid)
0655 {
0656 const struct intel_engine_cs * const engine = rq->engine;
0657 u32 *cs;
0658
0659 cs = intel_ring_begin(rq, 12);
0660 if (IS_ERR(cs))
0661 return PTR_ERR(cs);
0662
0663 *cs++ = MI_LOAD_REGISTER_IMM(1);
0664 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
0665 *cs++ = valid;
0666
0667 *cs++ = MI_LOAD_REGISTER_IMM(1);
0668 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
0669 *cs++ = pp_dir(vm);
0670
0671
0672 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
0673 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
0674 *cs++ = intel_gt_scratch_offset(engine->gt,
0675 INTEL_GT_SCRATCH_FIELD_DEFAULT);
0676
0677 *cs++ = MI_LOAD_REGISTER_IMM(1);
0678 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
0679 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
0680
0681 intel_ring_advance(rq, cs);
0682
0683 return rq->engine->emit_flush(rq, EMIT_FLUSH);
0684 }
0685
0686 static int mi_set_context(struct i915_request *rq,
0687 struct intel_context *ce,
0688 u32 flags)
0689 {
0690 struct intel_engine_cs *engine = rq->engine;
0691 struct drm_i915_private *i915 = engine->i915;
0692 enum intel_engine_id id;
0693 const int num_engines =
0694 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
0695 bool force_restore = false;
0696 int len;
0697 u32 *cs;
0698
0699 len = 4;
0700 if (GRAPHICS_VER(i915) == 7)
0701 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
0702 else if (GRAPHICS_VER(i915) == 5)
0703 len += 2;
0704 if (flags & MI_FORCE_RESTORE) {
0705 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
0706 flags &= ~MI_FORCE_RESTORE;
0707 force_restore = true;
0708 len += 2;
0709 }
0710
0711 cs = intel_ring_begin(rq, len);
0712 if (IS_ERR(cs))
0713 return PTR_ERR(cs);
0714
0715
0716 if (GRAPHICS_VER(i915) == 7) {
0717 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
0718 if (num_engines) {
0719 struct intel_engine_cs *signaller;
0720
0721 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
0722 for_each_engine(signaller, engine->gt, id) {
0723 if (signaller == engine)
0724 continue;
0725
0726 *cs++ = i915_mmio_reg_offset(
0727 RING_PSMI_CTL(signaller->mmio_base));
0728 *cs++ = _MASKED_BIT_ENABLE(
0729 GEN6_PSMI_SLEEP_MSG_DISABLE);
0730 }
0731 }
0732 } else if (GRAPHICS_VER(i915) == 5) {
0733
0734
0735
0736
0737
0738
0739 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
0740 }
0741
0742 if (force_restore) {
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755 *cs++ = MI_SET_CONTEXT;
0756 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
0757 MI_MM_SPACE_GTT |
0758 MI_RESTORE_INHIBIT;
0759 }
0760
0761 *cs++ = MI_NOOP;
0762 *cs++ = MI_SET_CONTEXT;
0763 *cs++ = i915_ggtt_offset(ce->state) | flags;
0764
0765
0766
0767
0768 *cs++ = MI_NOOP;
0769
0770 if (GRAPHICS_VER(i915) == 7) {
0771 if (num_engines) {
0772 struct intel_engine_cs *signaller;
0773 i915_reg_t last_reg = INVALID_MMIO_REG;
0774
0775 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
0776 for_each_engine(signaller, engine->gt, id) {
0777 if (signaller == engine)
0778 continue;
0779
0780 last_reg = RING_PSMI_CTL(signaller->mmio_base);
0781 *cs++ = i915_mmio_reg_offset(last_reg);
0782 *cs++ = _MASKED_BIT_DISABLE(
0783 GEN6_PSMI_SLEEP_MSG_DISABLE);
0784 }
0785
0786
0787 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
0788 *cs++ = i915_mmio_reg_offset(last_reg);
0789 *cs++ = intel_gt_scratch_offset(engine->gt,
0790 INTEL_GT_SCRATCH_FIELD_DEFAULT);
0791 *cs++ = MI_NOOP;
0792 }
0793 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
0794 } else if (GRAPHICS_VER(i915) == 5) {
0795 *cs++ = MI_SUSPEND_FLUSH;
0796 }
0797
0798 intel_ring_advance(rq, cs);
0799
0800 return 0;
0801 }
0802
0803 static int remap_l3_slice(struct i915_request *rq, int slice)
0804 {
0805 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
0806 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
0807 int i;
0808
0809 if (!remap_info)
0810 return 0;
0811
0812 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
0813 if (IS_ERR(cs))
0814 return PTR_ERR(cs);
0815
0816
0817
0818
0819
0820
0821 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW);
0822 for (i = 0; i < L3LOG_DW; i++) {
0823 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
0824 *cs++ = remap_info[i];
0825 }
0826 *cs++ = MI_NOOP;
0827 intel_ring_advance(rq, cs);
0828
0829 return 0;
0830 #undef L3LOG_DW
0831 }
0832
0833 static int remap_l3(struct i915_request *rq)
0834 {
0835 struct i915_gem_context *ctx = i915_request_gem_context(rq);
0836 int i, err;
0837
0838 if (!ctx || !ctx->remap_slice)
0839 return 0;
0840
0841 for (i = 0; i < MAX_L3_SLICES; i++) {
0842 if (!(ctx->remap_slice & BIT(i)))
0843 continue;
0844
0845 err = remap_l3_slice(rq, i);
0846 if (err)
0847 return err;
0848 }
0849
0850 ctx->remap_slice = 0;
0851 return 0;
0852 }
0853
0854 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
0855 {
0856 int ret;
0857
0858 if (!vm)
0859 return 0;
0860
0861 ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
0862 if (ret)
0863 return ret;
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
0874 if (ret)
0875 return ret;
0876
0877 return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
0878 }
0879
0880 static int clear_residuals(struct i915_request *rq)
0881 {
0882 struct intel_engine_cs *engine = rq->engine;
0883 int ret;
0884
0885 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
0886 if (ret)
0887 return ret;
0888
0889 if (engine->kernel_context->state) {
0890 ret = mi_set_context(rq,
0891 engine->kernel_context,
0892 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
0893 if (ret)
0894 return ret;
0895 }
0896
0897 ret = engine->emit_bb_start(rq,
0898 engine->wa_ctx.vma->node.start, 0,
0899 0);
0900 if (ret)
0901 return ret;
0902
0903 ret = engine->emit_flush(rq, EMIT_FLUSH);
0904 if (ret)
0905 return ret;
0906
0907
0908 return engine->emit_flush(rq, EMIT_INVALIDATE);
0909 }
0910
0911 static int switch_context(struct i915_request *rq)
0912 {
0913 struct intel_engine_cs *engine = rq->engine;
0914 struct intel_context *ce = rq->context;
0915 void **residuals = NULL;
0916 int ret;
0917
0918 GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
0919
0920 if (engine->wa_ctx.vma && ce != engine->kernel_context) {
0921 if (engine->wa_ctx.vma->private != ce &&
0922 i915_mitigate_clear_residuals()) {
0923 ret = clear_residuals(rq);
0924 if (ret)
0925 return ret;
0926
0927 residuals = &engine->wa_ctx.vma->private;
0928 }
0929 }
0930
0931 ret = switch_mm(rq, vm_alias(ce->vm));
0932 if (ret)
0933 return ret;
0934
0935 if (ce->state) {
0936 u32 flags;
0937
0938 GEM_BUG_ON(engine->id != RCS0);
0939
0940
0941 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
0942 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
0943
0944 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
0945 if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
0946 flags |= MI_RESTORE_EXT_STATE_EN;
0947 else
0948 flags |= MI_RESTORE_INHIBIT;
0949
0950 ret = mi_set_context(rq, ce, flags);
0951 if (ret)
0952 return ret;
0953 }
0954
0955 ret = remap_l3(rq);
0956 if (ret)
0957 return ret;
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968 if (residuals) {
0969 intel_context_put(*residuals);
0970 *residuals = intel_context_get(ce);
0971 }
0972
0973 return 0;
0974 }
0975
0976 static int ring_request_alloc(struct i915_request *request)
0977 {
0978 int ret;
0979
0980 GEM_BUG_ON(!intel_context_is_pinned(request->context));
0981 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
0982
0983
0984
0985
0986
0987
0988 request->reserved_space += LEGACY_REQUEST_SIZE;
0989
0990
0991 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
0992 if (ret)
0993 return ret;
0994
0995 ret = switch_context(request);
0996 if (ret)
0997 return ret;
0998
0999 request->reserved_space -= LEGACY_REQUEST_SIZE;
1000 return 0;
1001 }
1002
1003 static void gen6_bsd_submit_request(struct i915_request *request)
1004 {
1005 struct intel_uncore *uncore = request->engine->uncore;
1006
1007 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1008
1009
1010
1011
1012
1013
1014 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
1015 _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
1016
1017
1018 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
1019
1020
1021 if (__intel_wait_for_register_fw(uncore,
1022 RING_PSMI_CTL(GEN6_BSD_RING_BASE),
1023 GEN6_BSD_SLEEP_INDICATOR,
1024 0,
1025 1000, 0, NULL))
1026 drm_err(&uncore->i915->drm,
1027 "timed out waiting for the BSD ring to wake up\n");
1028
1029
1030 i9xx_submit_request(request);
1031
1032
1033
1034
1035 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
1036 _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
1037
1038 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1039 }
1040
1041 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
1042 {
1043 engine->submit_request = i9xx_submit_request;
1044 }
1045
1046 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
1047 {
1048 engine->submit_request = gen6_bsd_submit_request;
1049 }
1050
1051 static void ring_release(struct intel_engine_cs *engine)
1052 {
1053 struct drm_i915_private *dev_priv = engine->i915;
1054
1055 drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
1056 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
1057
1058 intel_engine_cleanup_common(engine);
1059
1060 if (engine->wa_ctx.vma) {
1061 intel_context_put(engine->wa_ctx.vma->private);
1062 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1063 }
1064
1065 intel_ring_unpin(engine->legacy.ring);
1066 intel_ring_put(engine->legacy.ring);
1067
1068 intel_timeline_unpin(engine->legacy.timeline);
1069 intel_timeline_put(engine->legacy.timeline);
1070 }
1071
1072 static void irq_handler(struct intel_engine_cs *engine, u16 iir)
1073 {
1074 intel_engine_signal_breadcrumbs(engine);
1075 }
1076
1077 static void setup_irq(struct intel_engine_cs *engine)
1078 {
1079 struct drm_i915_private *i915 = engine->i915;
1080
1081 intel_engine_set_irq_handler(engine, irq_handler);
1082
1083 if (GRAPHICS_VER(i915) >= 6) {
1084 engine->irq_enable = gen6_irq_enable;
1085 engine->irq_disable = gen6_irq_disable;
1086 } else if (GRAPHICS_VER(i915) >= 5) {
1087 engine->irq_enable = gen5_irq_enable;
1088 engine->irq_disable = gen5_irq_disable;
1089 } else if (GRAPHICS_VER(i915) >= 3) {
1090 engine->irq_enable = gen3_irq_enable;
1091 engine->irq_disable = gen3_irq_disable;
1092 } else {
1093 engine->irq_enable = gen2_irq_enable;
1094 engine->irq_disable = gen2_irq_disable;
1095 }
1096 }
1097
1098 static void add_to_engine(struct i915_request *rq)
1099 {
1100 lockdep_assert_held(&rq->engine->sched_engine->lock);
1101 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
1102 }
1103
1104 static void remove_from_engine(struct i915_request *rq)
1105 {
1106 spin_lock_irq(&rq->engine->sched_engine->lock);
1107 list_del_init(&rq->sched.link);
1108
1109
1110 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
1111
1112 spin_unlock_irq(&rq->engine->sched_engine->lock);
1113
1114 i915_request_notify_execute_cb_imm(rq);
1115 }
1116
1117 static void setup_common(struct intel_engine_cs *engine)
1118 {
1119 struct drm_i915_private *i915 = engine->i915;
1120
1121
1122 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
1123
1124 setup_irq(engine);
1125
1126 engine->resume = xcs_resume;
1127 engine->sanitize = xcs_sanitize;
1128
1129 engine->reset.prepare = reset_prepare;
1130 engine->reset.rewind = reset_rewind;
1131 engine->reset.cancel = reset_cancel;
1132 engine->reset.finish = reset_finish;
1133
1134 engine->add_active_request = add_to_engine;
1135 engine->remove_active_request = remove_from_engine;
1136
1137 engine->cops = &ring_context_ops;
1138 engine->request_alloc = ring_request_alloc;
1139
1140
1141
1142
1143
1144
1145 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
1146 if (GRAPHICS_VER(i915) == 5)
1147 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
1148
1149 engine->set_default_submission = i9xx_set_default_submission;
1150
1151 if (GRAPHICS_VER(i915) >= 6)
1152 engine->emit_bb_start = gen6_emit_bb_start;
1153 else if (GRAPHICS_VER(i915) >= 4)
1154 engine->emit_bb_start = gen4_emit_bb_start;
1155 else if (IS_I830(i915) || IS_I845G(i915))
1156 engine->emit_bb_start = i830_emit_bb_start;
1157 else
1158 engine->emit_bb_start = gen3_emit_bb_start;
1159 }
1160
1161 static void setup_rcs(struct intel_engine_cs *engine)
1162 {
1163 struct drm_i915_private *i915 = engine->i915;
1164
1165 if (HAS_L3_DPF(i915))
1166 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1167
1168 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1169
1170 if (GRAPHICS_VER(i915) >= 7) {
1171 engine->emit_flush = gen7_emit_flush_rcs;
1172 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
1173 } else if (GRAPHICS_VER(i915) == 6) {
1174 engine->emit_flush = gen6_emit_flush_rcs;
1175 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
1176 } else if (GRAPHICS_VER(i915) == 5) {
1177 engine->emit_flush = gen4_emit_flush_rcs;
1178 } else {
1179 if (GRAPHICS_VER(i915) < 4)
1180 engine->emit_flush = gen2_emit_flush;
1181 else
1182 engine->emit_flush = gen4_emit_flush_rcs;
1183 engine->irq_enable_mask = I915_USER_INTERRUPT;
1184 }
1185
1186 if (IS_HASWELL(i915))
1187 engine->emit_bb_start = hsw_emit_bb_start;
1188 }
1189
1190 static void setup_vcs(struct intel_engine_cs *engine)
1191 {
1192 struct drm_i915_private *i915 = engine->i915;
1193
1194 if (GRAPHICS_VER(i915) >= 6) {
1195
1196 if (GRAPHICS_VER(i915) == 6)
1197 engine->set_default_submission = gen6_bsd_set_default_submission;
1198 engine->emit_flush = gen6_emit_flush_vcs;
1199 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1200
1201 if (GRAPHICS_VER(i915) == 6)
1202 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1203 else
1204 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1205 } else {
1206 engine->emit_flush = gen4_emit_flush_vcs;
1207 if (GRAPHICS_VER(i915) == 5)
1208 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1209 else
1210 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1211 }
1212 }
1213
1214 static void setup_bcs(struct intel_engine_cs *engine)
1215 {
1216 struct drm_i915_private *i915 = engine->i915;
1217
1218 engine->emit_flush = gen6_emit_flush_xcs;
1219 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1220
1221 if (GRAPHICS_VER(i915) == 6)
1222 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1223 else
1224 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1225 }
1226
1227 static void setup_vecs(struct intel_engine_cs *engine)
1228 {
1229 struct drm_i915_private *i915 = engine->i915;
1230
1231 GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
1232
1233 engine->emit_flush = gen6_emit_flush_xcs;
1234 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1235 engine->irq_enable = hsw_irq_enable_vecs;
1236 engine->irq_disable = hsw_irq_disable_vecs;
1237
1238 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1239 }
1240
1241 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
1242 struct i915_vma * const vma)
1243 {
1244 return gen7_setup_clear_gpr_bb(engine, vma);
1245 }
1246
1247 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
1248 struct i915_gem_ww_ctx *ww,
1249 struct i915_vma *vma)
1250 {
1251 int err;
1252
1253 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH);
1254 if (err)
1255 return err;
1256
1257 err = i915_vma_sync(vma);
1258 if (err)
1259 goto err_unpin;
1260
1261 err = gen7_ctx_switch_bb_setup(engine, vma);
1262 if (err)
1263 goto err_unpin;
1264
1265 engine->wa_ctx.vma = vma;
1266 return 0;
1267
1268 err_unpin:
1269 i915_vma_unpin(vma);
1270 return err;
1271 }
1272
1273 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
1274 {
1275 struct drm_i915_gem_object *obj;
1276 struct i915_vma *vma;
1277 int size, err;
1278
1279 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
1280 return NULL;
1281
1282 err = gen7_ctx_switch_bb_setup(engine, NULL );
1283 if (err < 0)
1284 return ERR_PTR(err);
1285 if (!err)
1286 return NULL;
1287
1288 size = ALIGN(err, PAGE_SIZE);
1289
1290 obj = i915_gem_object_create_internal(engine->i915, size);
1291 if (IS_ERR(obj))
1292 return ERR_CAST(obj);
1293
1294 vma = i915_vma_instance(obj, engine->gt->vm, NULL);
1295 if (IS_ERR(vma)) {
1296 i915_gem_object_put(obj);
1297 return ERR_CAST(vma);
1298 }
1299
1300 vma->private = intel_context_create(engine);
1301 if (IS_ERR(vma->private)) {
1302 err = PTR_ERR(vma->private);
1303 vma->private = NULL;
1304 i915_gem_object_put(obj);
1305 return ERR_PTR(err);
1306 }
1307
1308 return vma;
1309 }
1310
1311 int intel_ring_submission_setup(struct intel_engine_cs *engine)
1312 {
1313 struct i915_gem_ww_ctx ww;
1314 struct intel_timeline *timeline;
1315 struct intel_ring *ring;
1316 struct i915_vma *gen7_wa_vma;
1317 int err;
1318
1319 setup_common(engine);
1320
1321 switch (engine->class) {
1322 case RENDER_CLASS:
1323 setup_rcs(engine);
1324 break;
1325 case VIDEO_DECODE_CLASS:
1326 setup_vcs(engine);
1327 break;
1328 case COPY_ENGINE_CLASS:
1329 setup_bcs(engine);
1330 break;
1331 case VIDEO_ENHANCEMENT_CLASS:
1332 setup_vecs(engine);
1333 break;
1334 default:
1335 MISSING_CASE(engine->class);
1336 return -ENODEV;
1337 }
1338
1339 timeline = intel_timeline_create_from_engine(engine,
1340 I915_GEM_HWS_SEQNO_ADDR);
1341 if (IS_ERR(timeline)) {
1342 err = PTR_ERR(timeline);
1343 goto err;
1344 }
1345 GEM_BUG_ON(timeline->has_initial_breadcrumb);
1346
1347 ring = intel_engine_create_ring(engine, SZ_16K);
1348 if (IS_ERR(ring)) {
1349 err = PTR_ERR(ring);
1350 goto err_timeline;
1351 }
1352
1353 GEM_BUG_ON(engine->legacy.ring);
1354 engine->legacy.ring = ring;
1355 engine->legacy.timeline = timeline;
1356
1357 gen7_wa_vma = gen7_ctx_vma(engine);
1358 if (IS_ERR(gen7_wa_vma)) {
1359 err = PTR_ERR(gen7_wa_vma);
1360 goto err_ring;
1361 }
1362
1363 i915_gem_ww_ctx_init(&ww, false);
1364
1365 retry:
1366 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
1367 if (!err && gen7_wa_vma)
1368 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
1369 if (!err)
1370 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
1371 if (!err)
1372 err = intel_timeline_pin(timeline, &ww);
1373 if (!err) {
1374 err = intel_ring_pin(ring, &ww);
1375 if (err)
1376 intel_timeline_unpin(timeline);
1377 }
1378 if (err)
1379 goto out;
1380
1381 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
1382
1383 if (gen7_wa_vma) {
1384 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
1385 if (err) {
1386 intel_ring_unpin(ring);
1387 intel_timeline_unpin(timeline);
1388 }
1389 }
1390
1391 out:
1392 if (err == -EDEADLK) {
1393 err = i915_gem_ww_ctx_backoff(&ww);
1394 if (!err)
1395 goto retry;
1396 }
1397 i915_gem_ww_ctx_fini(&ww);
1398 if (err)
1399 goto err_gen7_put;
1400
1401
1402 engine->release = ring_release;
1403
1404 return 0;
1405
1406 err_gen7_put:
1407 if (gen7_wa_vma) {
1408 intel_context_put(gen7_wa_vma->private);
1409 i915_gem_object_put(gen7_wa_vma->obj);
1410 }
1411 err_ring:
1412 intel_ring_put(ring);
1413 err_timeline:
1414 intel_timeline_put(timeline);
1415 err:
1416 intel_engine_cleanup_common(engine);
1417 return err;
1418 }
1419
1420 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1421 #include "selftest_ring_submission.c"
1422 #endif