0001
0002
0003
0004
0005
0006 #include "i915_drv.h"
0007
0008 #include "intel_breadcrumbs.h"
0009 #include "intel_context.h"
0010 #include "intel_engine.h"
0011 #include "intel_engine_heartbeat.h"
0012 #include "intel_engine_pm.h"
0013 #include "intel_gt.h"
0014 #include "intel_gt_pm.h"
0015 #include "intel_rc6.h"
0016 #include "intel_ring.h"
0017 #include "shmem_utils.h"
0018
0019 static void dbg_poison_ce(struct intel_context *ce)
0020 {
0021 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
0022 return;
0023
0024 if (ce->state) {
0025 struct drm_i915_gem_object *obj = ce->state->obj;
0026 int type = i915_coherent_map_type(ce->engine->i915, obj, true);
0027 void *map;
0028
0029 if (!i915_gem_object_trylock(obj, NULL))
0030 return;
0031
0032 map = i915_gem_object_pin_map(obj, type);
0033 if (!IS_ERR(map)) {
0034 memset(map, CONTEXT_REDZONE, obj->base.size);
0035 i915_gem_object_flush_map(obj);
0036 i915_gem_object_unpin_map(obj);
0037 }
0038 i915_gem_object_unlock(obj);
0039 }
0040 }
0041
0042 static int __engine_unpark(struct intel_wakeref *wf)
0043 {
0044 struct intel_engine_cs *engine =
0045 container_of(wf, typeof(*engine), wakeref);
0046 struct intel_context *ce;
0047
0048 ENGINE_TRACE(engine, "\n");
0049
0050 intel_gt_pm_get(engine->gt);
0051
0052
0053 ce = engine->kernel_context;
0054 if (ce) {
0055 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
0056
0057
0058 while (unlikely(intel_context_inflight(ce)))
0059 intel_engine_flush_submission(engine);
0060
0061
0062 dbg_poison_ce(ce);
0063
0064
0065 ce->ops->reset(ce);
0066
0067 CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
0068 ce->timeline->seqno,
0069 READ_ONCE(*ce->timeline->hwsp_seqno),
0070 ce->ring->emit);
0071 GEM_BUG_ON(ce->timeline->seqno !=
0072 READ_ONCE(*ce->timeline->hwsp_seqno));
0073 }
0074
0075 if (engine->unpark)
0076 engine->unpark(engine);
0077
0078 intel_breadcrumbs_unpark(engine->breadcrumbs);
0079 intel_engine_unpark_heartbeat(engine);
0080 return 0;
0081 }
0082
0083 static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
0084 {
0085 struct i915_request *rq = to_request(fence);
0086
0087 ewma__engine_latency_add(&rq->engine->latency,
0088 ktime_us_delta(rq->fence.timestamp,
0089 rq->duration.emitted));
0090 }
0091
0092 static void
0093 __queue_and_release_pm(struct i915_request *rq,
0094 struct intel_timeline *tl,
0095 struct intel_engine_cs *engine)
0096 {
0097 struct intel_gt_timelines *timelines = &engine->gt->timelines;
0098
0099 ENGINE_TRACE(engine, "parking\n");
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 spin_lock(&timelines->lock);
0111
0112
0113 if (!atomic_fetch_inc(&tl->active_count))
0114 list_add_tail(&tl->link, &timelines->active_list);
0115
0116
0117 __i915_request_queue_bh(rq);
0118
0119
0120 __intel_wakeref_defer_park(&engine->wakeref);
0121
0122 spin_unlock(&timelines->lock);
0123 }
0124
0125 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
0126 {
0127 struct intel_context *ce = engine->kernel_context;
0128 struct i915_request *rq;
0129 bool result = true;
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 if (intel_engine_uses_guc(engine))
0142 return true;
0143
0144
0145 if (intel_gt_is_wedged(engine->gt))
0146 return true;
0147
0148 GEM_BUG_ON(!intel_context_is_barrier(ce));
0149 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
0150
0151
0152 if (engine->wakeref_serial == engine->serial)
0153 return true;
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 set_bit(CONTEXT_IS_PARKING, &ce->flags);
0184 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
0185
0186 rq = __i915_request_create(ce, GFP_NOWAIT);
0187 if (IS_ERR(rq))
0188
0189 goto out_unlock;
0190
0191
0192 engine->wakeref_serial = engine->serial + 1;
0193 i915_request_add_active_barriers(rq);
0194
0195
0196 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
0197 if (likely(!__i915_request_commit(rq))) {
0198
0199
0200
0201
0202
0203
0204
0205 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
0206 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
0207 rq->duration.emitted = ktime_get();
0208 }
0209
0210
0211 __queue_and_release_pm(rq, ce->timeline, engine);
0212
0213 result = false;
0214 out_unlock:
0215 clear_bit(CONTEXT_IS_PARKING, &ce->flags);
0216 return result;
0217 }
0218
0219 static void call_idle_barriers(struct intel_engine_cs *engine)
0220 {
0221 struct llist_node *node, *next;
0222
0223 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
0224 struct dma_fence_cb *cb =
0225 container_of((struct list_head *)node,
0226 typeof(*cb), node);
0227
0228 cb->func(ERR_PTR(-EAGAIN), cb);
0229 }
0230 }
0231
0232 static int __engine_park(struct intel_wakeref *wf)
0233 {
0234 struct intel_engine_cs *engine =
0235 container_of(wf, typeof(*engine), wakeref);
0236
0237 engine->saturated = 0;
0238
0239
0240
0241
0242
0243
0244
0245
0246 if (!switch_to_kernel_context(engine))
0247 return -EBUSY;
0248
0249 ENGINE_TRACE(engine, "parked\n");
0250
0251 call_idle_barriers(engine);
0252
0253 intel_engine_park_heartbeat(engine);
0254 intel_breadcrumbs_park(engine->breadcrumbs);
0255
0256
0257 GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
0258
0259 if (engine->park)
0260 engine->park(engine);
0261
0262
0263 intel_gt_pm_put_async(engine->gt);
0264 return 0;
0265 }
0266
0267 static const struct intel_wakeref_ops wf_ops = {
0268 .get = __engine_unpark,
0269 .put = __engine_park,
0270 };
0271
0272 void intel_engine_init__pm(struct intel_engine_cs *engine)
0273 {
0274 struct intel_runtime_pm *rpm = engine->uncore->rpm;
0275
0276 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
0277 intel_engine_init_heartbeat(engine);
0278 }
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288 void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
0289 {
0290 struct intel_context *ce;
0291
0292 list_for_each_entry(ce, &engine->pinned_contexts_list,
0293 pinned_contexts_link) {
0294
0295 if (ce == engine->kernel_context)
0296 continue;
0297
0298 dbg_poison_ce(ce);
0299 ce->ops->reset(ce);
0300 }
0301 }
0302
0303 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0304 #include "selftest_engine_pm.c"
0305 #endif