Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #include <linux/workqueue.h>
0007 
0008 #include "i915_drv.h" /* for_each_engine() */
0009 #include "i915_request.h"
0010 #include "intel_engine_heartbeat.h"
0011 #include "intel_execlists_submission.h"
0012 #include "intel_gt.h"
0013 #include "intel_gt_pm.h"
0014 #include "intel_gt_requests.h"
0015 #include "intel_timeline.h"
0016 
0017 static bool retire_requests(struct intel_timeline *tl)
0018 {
0019     struct i915_request *rq, *rn;
0020 
0021     list_for_each_entry_safe(rq, rn, &tl->requests, link)
0022         if (!i915_request_retire(rq))
0023             return false;
0024 
0025     /* And check nothing new was submitted */
0026     return !i915_active_fence_isset(&tl->last_request);
0027 }
0028 
0029 static bool engine_active(const struct intel_engine_cs *engine)
0030 {
0031     return !list_empty(&engine->kernel_context->timeline->requests);
0032 }
0033 
0034 static bool flush_submission(struct intel_gt *gt, long timeout)
0035 {
0036     struct intel_engine_cs *engine;
0037     enum intel_engine_id id;
0038     bool active = false;
0039 
0040     if (!timeout)
0041         return false;
0042 
0043     if (!intel_gt_pm_is_awake(gt))
0044         return false;
0045 
0046     for_each_engine(engine, gt, id) {
0047         intel_engine_flush_submission(engine);
0048 
0049         /* Flush the background retirement and idle barriers */
0050         flush_work(&engine->retire_work);
0051         flush_delayed_work(&engine->wakeref.work);
0052 
0053         /* Is the idle barrier still outstanding? */
0054         active |= engine_active(engine);
0055     }
0056 
0057     return active;
0058 }
0059 
0060 static void engine_retire(struct work_struct *work)
0061 {
0062     struct intel_engine_cs *engine =
0063         container_of(work, typeof(*engine), retire_work);
0064     struct intel_timeline *tl = xchg(&engine->retire, NULL);
0065 
0066     do {
0067         struct intel_timeline *next = xchg(&tl->retire, NULL);
0068 
0069         /*
0070          * Our goal here is to retire _idle_ timelines as soon as
0071          * possible (as they are idle, we do not expect userspace
0072          * to be cleaning up anytime soon).
0073          *
0074          * If the timeline is currently locked, either it is being
0075          * retired elsewhere or about to be!
0076          */
0077         if (mutex_trylock(&tl->mutex)) {
0078             retire_requests(tl);
0079             mutex_unlock(&tl->mutex);
0080         }
0081         intel_timeline_put(tl);
0082 
0083         GEM_BUG_ON(!next);
0084         tl = ptr_mask_bits(next, 1);
0085     } while (tl);
0086 }
0087 
0088 static bool add_retire(struct intel_engine_cs *engine,
0089                struct intel_timeline *tl)
0090 {
0091 #define STUB ((struct intel_timeline *)1)
0092     struct intel_timeline *first;
0093 
0094     /*
0095      * We open-code a llist here to include the additional tag [BIT(0)]
0096      * so that we know when the timeline is already on a
0097      * retirement queue: either this engine or another.
0098      */
0099 
0100     if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
0101         return false;
0102 
0103     intel_timeline_get(tl);
0104     first = READ_ONCE(engine->retire);
0105     do
0106         tl->retire = ptr_pack_bits(first, 1, 1);
0107     while (!try_cmpxchg(&engine->retire, &first, tl));
0108 
0109     return !first;
0110 }
0111 
0112 void intel_engine_add_retire(struct intel_engine_cs *engine,
0113                  struct intel_timeline *tl)
0114 {
0115     /* We don't deal well with the engine disappearing beneath us */
0116     GEM_BUG_ON(intel_engine_is_virtual(engine));
0117 
0118     if (add_retire(engine, tl))
0119         schedule_work(&engine->retire_work);
0120 }
0121 
0122 void intel_engine_init_retire(struct intel_engine_cs *engine)
0123 {
0124     INIT_WORK(&engine->retire_work, engine_retire);
0125 }
0126 
0127 void intel_engine_fini_retire(struct intel_engine_cs *engine)
0128 {
0129     flush_work(&engine->retire_work);
0130     GEM_BUG_ON(engine->retire);
0131 }
0132 
0133 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
0134                       long *remaining_timeout)
0135 {
0136     struct intel_gt_timelines *timelines = &gt->timelines;
0137     struct intel_timeline *tl, *tn;
0138     unsigned long active_count = 0;
0139     LIST_HEAD(free);
0140 
0141     flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
0142     spin_lock(&timelines->lock);
0143     list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
0144         if (!mutex_trylock(&tl->mutex)) {
0145             active_count++; /* report busy to caller, try again? */
0146             continue;
0147         }
0148 
0149         intel_timeline_get(tl);
0150         GEM_BUG_ON(!atomic_read(&tl->active_count));
0151         atomic_inc(&tl->active_count); /* pin the list element */
0152         spin_unlock(&timelines->lock);
0153 
0154         if (timeout > 0) {
0155             struct dma_fence *fence;
0156 
0157             fence = i915_active_fence_get(&tl->last_request);
0158             if (fence) {
0159                 mutex_unlock(&tl->mutex);
0160 
0161                 timeout = dma_fence_wait_timeout(fence,
0162                                  true,
0163                                  timeout);
0164                 dma_fence_put(fence);
0165 
0166                 /* Retirement is best effort */
0167                 if (!mutex_trylock(&tl->mutex)) {
0168                     active_count++;
0169                     goto out_active;
0170                 }
0171             }
0172         }
0173 
0174         if (!retire_requests(tl))
0175             active_count++;
0176         mutex_unlock(&tl->mutex);
0177 
0178 out_active: spin_lock(&timelines->lock);
0179 
0180         /* Resume list iteration after reacquiring spinlock */
0181         list_safe_reset_next(tl, tn, link);
0182         if (atomic_dec_and_test(&tl->active_count))
0183             list_del(&tl->link);
0184 
0185         /* Defer the final release to after the spinlock */
0186         if (refcount_dec_and_test(&tl->kref.refcount)) {
0187             GEM_BUG_ON(atomic_read(&tl->active_count));
0188             list_add(&tl->link, &free);
0189         }
0190     }
0191     spin_unlock(&timelines->lock);
0192 
0193     list_for_each_entry_safe(tl, tn, &free, link)
0194         __intel_timeline_free(&tl->kref);
0195 
0196     if (flush_submission(gt, timeout)) /* Wait, there's more! */
0197         active_count++;
0198 
0199     if (remaining_timeout)
0200         *remaining_timeout = timeout;
0201 
0202     return active_count ? timeout : 0;
0203 }
0204 
0205 static void retire_work_handler(struct work_struct *work)
0206 {
0207     struct intel_gt *gt =
0208         container_of(work, typeof(*gt), requests.retire_work.work);
0209 
0210     schedule_delayed_work(&gt->requests.retire_work,
0211                   round_jiffies_up_relative(HZ));
0212     intel_gt_retire_requests(gt);
0213 }
0214 
0215 void intel_gt_init_requests(struct intel_gt *gt)
0216 {
0217     INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
0218 }
0219 
0220 void intel_gt_park_requests(struct intel_gt *gt)
0221 {
0222     cancel_delayed_work(&gt->requests.retire_work);
0223 }
0224 
0225 void intel_gt_unpark_requests(struct intel_gt *gt)
0226 {
0227     schedule_delayed_work(&gt->requests.retire_work,
0228                   round_jiffies_up_relative(HZ));
0229 }
0230 
0231 void intel_gt_fini_requests(struct intel_gt *gt)
0232 {
0233     /* Wait until the work is marked as finished before unloading! */
0234     cancel_delayed_work_sync(&gt->requests.retire_work);
0235 
0236     flush_work(&gt->watchdog.work);
0237 }
0238 
0239 void intel_gt_watchdog_work(struct work_struct *work)
0240 {
0241     struct intel_gt *gt =
0242         container_of(work, typeof(*gt), watchdog.work);
0243     struct i915_request *rq, *rn;
0244     struct llist_node *first;
0245 
0246     first = llist_del_all(&gt->watchdog.list);
0247     if (!first)
0248         return;
0249 
0250     llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
0251         if (!i915_request_completed(rq)) {
0252             struct dma_fence *f = &rq->fence;
0253 
0254             pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
0255                   f->ops->get_driver_name(f),
0256                   f->ops->get_timeline_name(f),
0257                   f->seqno);
0258             i915_request_cancel(rq, -EINTR);
0259         }
0260         i915_request_put(rq);
0261     }
0262 }