Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #include "gem/i915_gem_context.h"
0007 #include "gem/i915_gem_pm.h"
0008 
0009 #include "i915_drv.h"
0010 #include "i915_trace.h"
0011 
0012 #include "intel_context.h"
0013 #include "intel_engine.h"
0014 #include "intel_engine_pm.h"
0015 #include "intel_ring.h"
0016 
0017 static struct kmem_cache *slab_ce;
0018 
0019 static struct intel_context *intel_context_alloc(void)
0020 {
0021     return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
0022 }
0023 
0024 static void rcu_context_free(struct rcu_head *rcu)
0025 {
0026     struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
0027 
0028     trace_intel_context_free(ce);
0029     kmem_cache_free(slab_ce, ce);
0030 }
0031 
0032 void intel_context_free(struct intel_context *ce)
0033 {
0034     call_rcu(&ce->rcu, rcu_context_free);
0035 }
0036 
0037 struct intel_context *
0038 intel_context_create(struct intel_engine_cs *engine)
0039 {
0040     struct intel_context *ce;
0041 
0042     ce = intel_context_alloc();
0043     if (!ce)
0044         return ERR_PTR(-ENOMEM);
0045 
0046     intel_context_init(ce, engine);
0047     trace_intel_context_create(ce);
0048     return ce;
0049 }
0050 
0051 int intel_context_alloc_state(struct intel_context *ce)
0052 {
0053     int err = 0;
0054 
0055     if (mutex_lock_interruptible(&ce->pin_mutex))
0056         return -EINTR;
0057 
0058     if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
0059         if (intel_context_is_banned(ce)) {
0060             err = -EIO;
0061             goto unlock;
0062         }
0063 
0064         err = ce->ops->alloc(ce);
0065         if (unlikely(err))
0066             goto unlock;
0067 
0068         set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
0069     }
0070 
0071 unlock:
0072     mutex_unlock(&ce->pin_mutex);
0073     return err;
0074 }
0075 
0076 static int intel_context_active_acquire(struct intel_context *ce)
0077 {
0078     int err;
0079 
0080     __i915_active_acquire(&ce->active);
0081 
0082     if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) ||
0083         intel_context_is_parallel(ce))
0084         return 0;
0085 
0086     /* Preallocate tracking nodes */
0087     err = i915_active_acquire_preallocate_barrier(&ce->active,
0088                               ce->engine);
0089     if (err)
0090         i915_active_release(&ce->active);
0091 
0092     return err;
0093 }
0094 
0095 static void intel_context_active_release(struct intel_context *ce)
0096 {
0097     /* Nodes preallocated in intel_context_active() */
0098     i915_active_acquire_barrier(&ce->active);
0099     i915_active_release(&ce->active);
0100 }
0101 
0102 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
0103 {
0104     unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
0105     int err;
0106 
0107     err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
0108     if (err)
0109         return err;
0110 
0111     err = i915_active_acquire(&vma->active);
0112     if (err)
0113         goto err_unpin;
0114 
0115     /*
0116      * And mark it as a globally pinned object to let the shrinker know
0117      * it cannot reclaim the object until we release it.
0118      */
0119     i915_vma_make_unshrinkable(vma);
0120     vma->obj->mm.dirty = true;
0121 
0122     return 0;
0123 
0124 err_unpin:
0125     i915_vma_unpin(vma);
0126     return err;
0127 }
0128 
0129 static void __context_unpin_state(struct i915_vma *vma)
0130 {
0131     i915_vma_make_shrinkable(vma);
0132     i915_active_release(&vma->active);
0133     __i915_vma_unpin(vma);
0134 }
0135 
0136 static int __ring_active(struct intel_ring *ring,
0137              struct i915_gem_ww_ctx *ww)
0138 {
0139     int err;
0140 
0141     err = intel_ring_pin(ring, ww);
0142     if (err)
0143         return err;
0144 
0145     err = i915_active_acquire(&ring->vma->active);
0146     if (err)
0147         goto err_pin;
0148 
0149     return 0;
0150 
0151 err_pin:
0152     intel_ring_unpin(ring);
0153     return err;
0154 }
0155 
0156 static void __ring_retire(struct intel_ring *ring)
0157 {
0158     i915_active_release(&ring->vma->active);
0159     intel_ring_unpin(ring);
0160 }
0161 
0162 static int intel_context_pre_pin(struct intel_context *ce,
0163                  struct i915_gem_ww_ctx *ww)
0164 {
0165     int err;
0166 
0167     CE_TRACE(ce, "active\n");
0168 
0169     err = __ring_active(ce->ring, ww);
0170     if (err)
0171         return err;
0172 
0173     err = intel_timeline_pin(ce->timeline, ww);
0174     if (err)
0175         goto err_ring;
0176 
0177     if (!ce->state)
0178         return 0;
0179 
0180     err = __context_pin_state(ce->state, ww);
0181     if (err)
0182         goto err_timeline;
0183 
0184 
0185     return 0;
0186 
0187 err_timeline:
0188     intel_timeline_unpin(ce->timeline);
0189 err_ring:
0190     __ring_retire(ce->ring);
0191     return err;
0192 }
0193 
0194 static void intel_context_post_unpin(struct intel_context *ce)
0195 {
0196     if (ce->state)
0197         __context_unpin_state(ce->state);
0198 
0199     intel_timeline_unpin(ce->timeline);
0200     __ring_retire(ce->ring);
0201 }
0202 
0203 int __intel_context_do_pin_ww(struct intel_context *ce,
0204                   struct i915_gem_ww_ctx *ww)
0205 {
0206     bool handoff = false;
0207     void *vaddr;
0208     int err = 0;
0209 
0210     if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
0211         err = intel_context_alloc_state(ce);
0212         if (err)
0213             return err;
0214     }
0215 
0216     /*
0217      * We always pin the context/ring/timeline here, to ensure a pin
0218      * refcount for __intel_context_active(), which prevent a lock
0219      * inversion of ce->pin_mutex vs dma_resv_lock().
0220      */
0221 
0222     err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
0223     if (!err)
0224         err = i915_gem_object_lock(ce->ring->vma->obj, ww);
0225     if (!err && ce->state)
0226         err = i915_gem_object_lock(ce->state->obj, ww);
0227     if (!err)
0228         err = intel_context_pre_pin(ce, ww);
0229     if (err)
0230         return err;
0231 
0232     err = ce->ops->pre_pin(ce, ww, &vaddr);
0233     if (err)
0234         goto err_ctx_unpin;
0235 
0236     err = i915_active_acquire(&ce->active);
0237     if (err)
0238         goto err_post_unpin;
0239 
0240     err = mutex_lock_interruptible(&ce->pin_mutex);
0241     if (err)
0242         goto err_release;
0243 
0244     intel_engine_pm_might_get(ce->engine);
0245 
0246     if (unlikely(intel_context_is_closed(ce))) {
0247         err = -ENOENT;
0248         goto err_unlock;
0249     }
0250 
0251     if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
0252         err = intel_context_active_acquire(ce);
0253         if (unlikely(err))
0254             goto err_unlock;
0255 
0256         err = ce->ops->pin(ce, vaddr);
0257         if (err) {
0258             intel_context_active_release(ce);
0259             goto err_unlock;
0260         }
0261 
0262         CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
0263              i915_ggtt_offset(ce->ring->vma),
0264              ce->ring->head, ce->ring->tail);
0265 
0266         handoff = true;
0267         smp_mb__before_atomic(); /* flush pin before it is visible */
0268         atomic_inc(&ce->pin_count);
0269     }
0270 
0271     GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
0272 
0273     trace_intel_context_do_pin(ce);
0274 
0275 err_unlock:
0276     mutex_unlock(&ce->pin_mutex);
0277 err_release:
0278     i915_active_release(&ce->active);
0279 err_post_unpin:
0280     if (!handoff)
0281         ce->ops->post_unpin(ce);
0282 err_ctx_unpin:
0283     intel_context_post_unpin(ce);
0284 
0285     /*
0286      * Unlock the hwsp_ggtt object since it's shared.
0287      * In principle we can unlock all the global state locked above
0288      * since it's pinned and doesn't need fencing, and will
0289      * thus remain resident until it is explicitly unpinned.
0290      */
0291     i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
0292 
0293     return err;
0294 }
0295 
0296 int __intel_context_do_pin(struct intel_context *ce)
0297 {
0298     struct i915_gem_ww_ctx ww;
0299     int err;
0300 
0301     i915_gem_ww_ctx_init(&ww, true);
0302 retry:
0303     err = __intel_context_do_pin_ww(ce, &ww);
0304     if (err == -EDEADLK) {
0305         err = i915_gem_ww_ctx_backoff(&ww);
0306         if (!err)
0307             goto retry;
0308     }
0309     i915_gem_ww_ctx_fini(&ww);
0310     return err;
0311 }
0312 
0313 void __intel_context_do_unpin(struct intel_context *ce, int sub)
0314 {
0315     if (!atomic_sub_and_test(sub, &ce->pin_count))
0316         return;
0317 
0318     CE_TRACE(ce, "unpin\n");
0319     ce->ops->unpin(ce);
0320     ce->ops->post_unpin(ce);
0321 
0322     /*
0323      * Once released, we may asynchronously drop the active reference.
0324      * As that may be the only reference keeping the context alive,
0325      * take an extra now so that it is not freed before we finish
0326      * dereferencing it.
0327      */
0328     intel_context_get(ce);
0329     intel_context_active_release(ce);
0330     trace_intel_context_do_unpin(ce);
0331     intel_context_put(ce);
0332 }
0333 
0334 static void __intel_context_retire(struct i915_active *active)
0335 {
0336     struct intel_context *ce = container_of(active, typeof(*ce), active);
0337 
0338     CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
0339          intel_context_get_total_runtime_ns(ce),
0340          intel_context_get_avg_runtime_ns(ce));
0341 
0342     set_bit(CONTEXT_VALID_BIT, &ce->flags);
0343     intel_context_post_unpin(ce);
0344     intel_context_put(ce);
0345 }
0346 
0347 static int __intel_context_active(struct i915_active *active)
0348 {
0349     struct intel_context *ce = container_of(active, typeof(*ce), active);
0350 
0351     intel_context_get(ce);
0352 
0353     /* everything should already be activated by intel_context_pre_pin() */
0354     GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
0355     __intel_ring_pin(ce->ring);
0356 
0357     __intel_timeline_pin(ce->timeline);
0358 
0359     if (ce->state) {
0360         GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
0361         __i915_vma_pin(ce->state);
0362         i915_vma_make_unshrinkable(ce->state);
0363     }
0364 
0365     return 0;
0366 }
0367 
0368 static int
0369 sw_fence_dummy_notify(struct i915_sw_fence *sf,
0370               enum i915_sw_fence_notify state)
0371 {
0372     return NOTIFY_DONE;
0373 }
0374 
0375 void
0376 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
0377 {
0378     GEM_BUG_ON(!engine->cops);
0379     GEM_BUG_ON(!engine->gt->vm);
0380 
0381     kref_init(&ce->ref);
0382 
0383     ce->engine = engine;
0384     ce->ops = engine->cops;
0385     ce->sseu = engine->sseu;
0386     ce->ring = NULL;
0387     ce->ring_size = SZ_4K;
0388 
0389     ewma_runtime_init(&ce->stats.runtime.avg);
0390 
0391     ce->vm = i915_vm_get(engine->gt->vm);
0392 
0393     /* NB ce->signal_link/lock is used under RCU */
0394     spin_lock_init(&ce->signal_lock);
0395     INIT_LIST_HEAD(&ce->signals);
0396 
0397     mutex_init(&ce->pin_mutex);
0398 
0399     spin_lock_init(&ce->guc_state.lock);
0400     INIT_LIST_HEAD(&ce->guc_state.fences);
0401     INIT_LIST_HEAD(&ce->guc_state.requests);
0402 
0403     ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
0404     INIT_LIST_HEAD(&ce->guc_id.link);
0405 
0406     INIT_LIST_HEAD(&ce->destroyed_link);
0407 
0408     INIT_LIST_HEAD(&ce->parallel.child_list);
0409 
0410     /*
0411      * Initialize fence to be complete as this is expected to be complete
0412      * unless there is a pending schedule disable outstanding.
0413      */
0414     i915_sw_fence_init(&ce->guc_state.blocked,
0415                sw_fence_dummy_notify);
0416     i915_sw_fence_commit(&ce->guc_state.blocked);
0417 
0418     i915_active_init(&ce->active,
0419              __intel_context_active, __intel_context_retire, 0);
0420 }
0421 
0422 void intel_context_fini(struct intel_context *ce)
0423 {
0424     struct intel_context *child, *next;
0425 
0426     if (ce->timeline)
0427         intel_timeline_put(ce->timeline);
0428     i915_vm_put(ce->vm);
0429 
0430     /* Need to put the creation ref for the children */
0431     if (intel_context_is_parent(ce))
0432         for_each_child_safe(ce, child, next)
0433             intel_context_put(child);
0434 
0435     mutex_destroy(&ce->pin_mutex);
0436     i915_active_fini(&ce->active);
0437     i915_sw_fence_fini(&ce->guc_state.blocked);
0438 }
0439 
0440 void i915_context_module_exit(void)
0441 {
0442     kmem_cache_destroy(slab_ce);
0443 }
0444 
0445 int __init i915_context_module_init(void)
0446 {
0447     slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
0448     if (!slab_ce)
0449         return -ENOMEM;
0450 
0451     return 0;
0452 }
0453 
0454 void intel_context_enter_engine(struct intel_context *ce)
0455 {
0456     intel_engine_pm_get(ce->engine);
0457     intel_timeline_enter(ce->timeline);
0458 }
0459 
0460 void intel_context_exit_engine(struct intel_context *ce)
0461 {
0462     intel_timeline_exit(ce->timeline);
0463     intel_engine_pm_put(ce->engine);
0464 }
0465 
0466 int intel_context_prepare_remote_request(struct intel_context *ce,
0467                      struct i915_request *rq)
0468 {
0469     struct intel_timeline *tl = ce->timeline;
0470     int err;
0471 
0472     /* Only suitable for use in remotely modifying this context */
0473     GEM_BUG_ON(rq->context == ce);
0474 
0475     if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
0476         /* Queue this switch after current activity by this context. */
0477         err = i915_active_fence_set(&tl->last_request, rq);
0478         if (err)
0479             return err;
0480     }
0481 
0482     /*
0483      * Guarantee context image and the timeline remains pinned until the
0484      * modifying request is retired by setting the ce activity tracker.
0485      *
0486      * But we only need to take one pin on the account of it. Or in other
0487      * words transfer the pinned ce object to tracked active request.
0488      */
0489     GEM_BUG_ON(i915_active_is_idle(&ce->active));
0490     return i915_active_add_request(&ce->active, rq);
0491 }
0492 
0493 struct i915_request *intel_context_create_request(struct intel_context *ce)
0494 {
0495     struct i915_gem_ww_ctx ww;
0496     struct i915_request *rq;
0497     int err;
0498 
0499     i915_gem_ww_ctx_init(&ww, true);
0500 retry:
0501     err = intel_context_pin_ww(ce, &ww);
0502     if (!err) {
0503         rq = i915_request_create(ce);
0504         intel_context_unpin(ce);
0505     } else if (err == -EDEADLK) {
0506         err = i915_gem_ww_ctx_backoff(&ww);
0507         if (!err)
0508             goto retry;
0509         rq = ERR_PTR(err);
0510     } else {
0511         rq = ERR_PTR(err);
0512     }
0513 
0514     i915_gem_ww_ctx_fini(&ww);
0515 
0516     if (IS_ERR(rq))
0517         return rq;
0518 
0519     /*
0520      * timeline->mutex should be the inner lock, but is used as outer lock.
0521      * Hack around this to shut up lockdep in selftests..
0522      */
0523     lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
0524     mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
0525     mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
0526     rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
0527 
0528     return rq;
0529 }
0530 
0531 struct i915_request *intel_context_find_active_request(struct intel_context *ce)
0532 {
0533     struct intel_context *parent = intel_context_to_parent(ce);
0534     struct i915_request *rq, *active = NULL;
0535     unsigned long flags;
0536 
0537     GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
0538 
0539     /*
0540      * We search the parent list to find an active request on the submitted
0541      * context. The parent list contains the requests for all the contexts
0542      * in the relationship so we have to do a compare of each request's
0543      * context.
0544      */
0545     spin_lock_irqsave(&parent->guc_state.lock, flags);
0546     list_for_each_entry_reverse(rq, &parent->guc_state.requests,
0547                     sched.link) {
0548         if (rq->context != ce)
0549             continue;
0550         if (i915_request_completed(rq))
0551             break;
0552 
0553         active = rq;
0554     }
0555     spin_unlock_irqrestore(&parent->guc_state.lock, flags);
0556 
0557     return active;
0558 }
0559 
0560 void intel_context_bind_parent_child(struct intel_context *parent,
0561                      struct intel_context *child)
0562 {
0563     /*
0564      * Callers responsibility to validate that this function is used
0565      * correctly but we use GEM_BUG_ON here ensure that they do.
0566      */
0567     GEM_BUG_ON(intel_context_is_pinned(parent));
0568     GEM_BUG_ON(intel_context_is_child(parent));
0569     GEM_BUG_ON(intel_context_is_pinned(child));
0570     GEM_BUG_ON(intel_context_is_child(child));
0571     GEM_BUG_ON(intel_context_is_parent(child));
0572 
0573     parent->parallel.child_index = parent->parallel.number_children++;
0574     list_add_tail(&child->parallel.child_link,
0575               &parent->parallel.child_list);
0576     child->parallel.parent = parent;
0577 }
0578 
0579 u64 intel_context_get_total_runtime_ns(const struct intel_context *ce)
0580 {
0581     u64 total, active;
0582 
0583     total = ce->stats.runtime.total;
0584     if (ce->ops->flags & COPS_RUNTIME_CYCLES)
0585         total *= ce->engine->gt->clock_period_ns;
0586 
0587     active = READ_ONCE(ce->stats.active);
0588     if (active)
0589         active = intel_context_clock() - active;
0590 
0591     return total + active;
0592 }
0593 
0594 u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
0595 {
0596     u64 avg = ewma_runtime_read(&ce->stats.runtime.avg);
0597 
0598     if (ce->ops->flags & COPS_RUNTIME_CYCLES)
0599         avg *= ce->engine->gt->clock_period_ns;
0600 
0601     return avg;
0602 }
0603 
0604 bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
0605 {
0606     bool ret = intel_context_set_banned(ce);
0607 
0608     trace_intel_context_ban(ce);
0609 
0610     if (ce->ops->revoke)
0611         ce->ops->revoke(ce, rq,
0612                 INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS);
0613 
0614     return ret;
0615 }
0616 
0617 bool intel_context_exit_nonpersistent(struct intel_context *ce,
0618                       struct i915_request *rq)
0619 {
0620     bool ret = intel_context_set_exiting(ce);
0621 
0622     if (ce->ops->revoke)
0623         ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
0624 
0625     return ret;
0626 }
0627 
0628 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0629 #include "selftest_context.c"
0630 #endif