Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #include <linux/string_helpers.h>
0007 #include <linux/suspend.h>
0008 
0009 #include "i915_drv.h"
0010 #include "i915_params.h"
0011 #include "intel_context.h"
0012 #include "intel_engine_pm.h"
0013 #include "intel_gt.h"
0014 #include "intel_gt_clock_utils.h"
0015 #include "intel_gt_pm.h"
0016 #include "intel_gt_requests.h"
0017 #include "intel_llc.h"
0018 #include "intel_pm.h"
0019 #include "intel_rc6.h"
0020 #include "intel_rps.h"
0021 #include "intel_wakeref.h"
0022 #include "pxp/intel_pxp_pm.h"
0023 
0024 #define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
0025 
0026 static void user_forcewake(struct intel_gt *gt, bool suspend)
0027 {
0028     int count = atomic_read(&gt->user_wakeref);
0029 
0030     /* Inside suspend/resume so single threaded, no races to worry about. */
0031     if (likely(!count))
0032         return;
0033 
0034     intel_gt_pm_get(gt);
0035     if (suspend) {
0036         GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
0037         atomic_sub(count, &gt->wakeref.count);
0038     } else {
0039         atomic_add(count, &gt->wakeref.count);
0040     }
0041     intel_gt_pm_put(gt);
0042 }
0043 
0044 static void runtime_begin(struct intel_gt *gt)
0045 {
0046     local_irq_disable();
0047     write_seqcount_begin(&gt->stats.lock);
0048     gt->stats.start = ktime_get();
0049     gt->stats.active = true;
0050     write_seqcount_end(&gt->stats.lock);
0051     local_irq_enable();
0052 }
0053 
0054 static void runtime_end(struct intel_gt *gt)
0055 {
0056     local_irq_disable();
0057     write_seqcount_begin(&gt->stats.lock);
0058     gt->stats.active = false;
0059     gt->stats.total =
0060         ktime_add(gt->stats.total,
0061               ktime_sub(ktime_get(), gt->stats.start));
0062     write_seqcount_end(&gt->stats.lock);
0063     local_irq_enable();
0064 }
0065 
0066 static int __gt_unpark(struct intel_wakeref *wf)
0067 {
0068     struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
0069     struct drm_i915_private *i915 = gt->i915;
0070 
0071     GT_TRACE(gt, "\n");
0072 
0073     /*
0074      * It seems that the DMC likes to transition between the DC states a lot
0075      * when there are no connected displays (no active power domains) during
0076      * command submission.
0077      *
0078      * This activity has negative impact on the performance of the chip with
0079      * huge latencies observed in the interrupt handler and elsewhere.
0080      *
0081      * Work around it by grabbing a GT IRQ power domain whilst there is any
0082      * GT activity, preventing any DC state transitions.
0083      */
0084     gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
0085     GEM_BUG_ON(!gt->awake);
0086 
0087     intel_rc6_unpark(&gt->rc6);
0088     intel_rps_unpark(&gt->rps);
0089     i915_pmu_gt_unparked(i915);
0090     intel_guc_busyness_unpark(gt);
0091 
0092     intel_gt_unpark_requests(gt);
0093     runtime_begin(gt);
0094 
0095     return 0;
0096 }
0097 
0098 static int __gt_park(struct intel_wakeref *wf)
0099 {
0100     struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
0101     intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
0102     struct drm_i915_private *i915 = gt->i915;
0103 
0104     GT_TRACE(gt, "\n");
0105 
0106     runtime_end(gt);
0107     intel_gt_park_requests(gt);
0108 
0109     intel_guc_busyness_park(gt);
0110     i915_vma_parked(gt);
0111     i915_pmu_gt_parked(i915);
0112     intel_rps_park(&gt->rps);
0113     intel_rc6_park(&gt->rc6);
0114 
0115     /* Everything switched off, flush any residual interrupt just in case */
0116     intel_synchronize_irq(i915);
0117 
0118     /* Defer dropping the display power well for 100ms, it's slow! */
0119     GEM_BUG_ON(!wakeref);
0120     intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
0121 
0122     return 0;
0123 }
0124 
0125 static const struct intel_wakeref_ops wf_ops = {
0126     .get = __gt_unpark,
0127     .put = __gt_park,
0128 };
0129 
0130 void intel_gt_pm_init_early(struct intel_gt *gt)
0131 {
0132     /*
0133      * We access the runtime_pm structure via gt->i915 here rather than
0134      * gt->uncore as we do elsewhere in the file because gt->uncore is not
0135      * yet initialized for all tiles at this point in the driver startup.
0136      * runtime_pm is per-device rather than per-tile, so this is still the
0137      * correct structure.
0138      */
0139     intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
0140     seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
0141 }
0142 
0143 void intel_gt_pm_init(struct intel_gt *gt)
0144 {
0145     /*
0146      * Enabling power-management should be "self-healing". If we cannot
0147      * enable a feature, simply leave it disabled with a notice to the
0148      * user.
0149      */
0150     intel_rc6_init(&gt->rc6);
0151     intel_rps_init(&gt->rps);
0152 }
0153 
0154 static bool reset_engines(struct intel_gt *gt)
0155 {
0156     if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
0157         return false;
0158 
0159     return __intel_gt_reset(gt, ALL_ENGINES) == 0;
0160 }
0161 
0162 static void gt_sanitize(struct intel_gt *gt, bool force)
0163 {
0164     struct intel_engine_cs *engine;
0165     enum intel_engine_id id;
0166     intel_wakeref_t wakeref;
0167 
0168     GT_TRACE(gt, "force:%s", str_yes_no(force));
0169 
0170     /* Use a raw wakeref to avoid calling intel_display_power_get early */
0171     wakeref = intel_runtime_pm_get(gt->uncore->rpm);
0172     intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
0173 
0174     intel_gt_check_clock_frequency(gt);
0175 
0176     /*
0177      * As we have just resumed the machine and woken the device up from
0178      * deep PCI sleep (presumably D3_cold), assume the HW has been reset
0179      * back to defaults, recovering from whatever wedged state we left it
0180      * in and so worth trying to use the device once more.
0181      */
0182     if (intel_gt_is_wedged(gt))
0183         intel_gt_unset_wedged(gt);
0184 
0185     /* For GuC mode, ensure submission is disabled before stopping ring */
0186     intel_uc_reset_prepare(&gt->uc);
0187 
0188     for_each_engine(engine, gt, id) {
0189         if (engine->reset.prepare)
0190             engine->reset.prepare(engine);
0191 
0192         if (engine->sanitize)
0193             engine->sanitize(engine);
0194     }
0195 
0196     if (reset_engines(gt) || force) {
0197         for_each_engine(engine, gt, id)
0198             __intel_engine_reset(engine, false);
0199     }
0200 
0201     intel_uc_reset(&gt->uc, false);
0202 
0203     for_each_engine(engine, gt, id)
0204         if (engine->reset.finish)
0205             engine->reset.finish(engine);
0206 
0207     intel_rps_sanitize(&gt->rps);
0208 
0209     intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
0210     intel_runtime_pm_put(gt->uncore->rpm, wakeref);
0211 }
0212 
0213 void intel_gt_pm_fini(struct intel_gt *gt)
0214 {
0215     intel_rc6_fini(&gt->rc6);
0216 }
0217 
0218 int intel_gt_resume(struct intel_gt *gt)
0219 {
0220     struct intel_engine_cs *engine;
0221     enum intel_engine_id id;
0222     int err;
0223 
0224     err = intel_gt_has_unrecoverable_error(gt);
0225     if (err)
0226         return err;
0227 
0228     GT_TRACE(gt, "\n");
0229 
0230     /*
0231      * After resume, we may need to poke into the pinned kernel
0232      * contexts to paper over any damage caused by the sudden suspend.
0233      * Only the kernel contexts should remain pinned over suspend,
0234      * allowing us to fixup the user contexts on their first pin.
0235      */
0236     gt_sanitize(gt, true);
0237 
0238     intel_gt_pm_get(gt);
0239 
0240     intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
0241     intel_rc6_sanitize(&gt->rc6);
0242     if (intel_gt_is_wedged(gt)) {
0243         err = -EIO;
0244         goto out_fw;
0245     }
0246 
0247     /* Only when the HW is re-initialised, can we replay the requests */
0248     err = intel_gt_init_hw(gt);
0249     if (err) {
0250         i915_probe_error(gt->i915,
0251                  "Failed to initialize GPU, declaring it wedged!\n");
0252         goto err_wedged;
0253     }
0254 
0255     intel_uc_reset_finish(&gt->uc);
0256 
0257     intel_rps_enable(&gt->rps);
0258     intel_llc_enable(&gt->llc);
0259 
0260     for_each_engine(engine, gt, id) {
0261         intel_engine_pm_get(engine);
0262 
0263         engine->serial++; /* kernel context lost */
0264         err = intel_engine_resume(engine);
0265 
0266         intel_engine_pm_put(engine);
0267         if (err) {
0268             drm_err(&gt->i915->drm,
0269                 "Failed to restart %s (%d)\n",
0270                 engine->name, err);
0271             goto err_wedged;
0272         }
0273     }
0274 
0275     intel_rc6_enable(&gt->rc6);
0276 
0277     intel_uc_resume(&gt->uc);
0278 
0279     intel_pxp_resume(&gt->pxp);
0280 
0281     user_forcewake(gt, false);
0282 
0283 out_fw:
0284     intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
0285     intel_gt_pm_put(gt);
0286     return err;
0287 
0288 err_wedged:
0289     intel_gt_set_wedged(gt);
0290     goto out_fw;
0291 }
0292 
0293 static void wait_for_suspend(struct intel_gt *gt)
0294 {
0295     if (!intel_gt_pm_is_awake(gt))
0296         return;
0297 
0298     if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) {
0299         /*
0300          * Forcibly cancel outstanding work and leave
0301          * the gpu quiet.
0302          */
0303         intel_gt_set_wedged(gt);
0304         intel_gt_retire_requests(gt);
0305     }
0306 
0307     intel_gt_pm_wait_for_idle(gt);
0308 }
0309 
0310 void intel_gt_suspend_prepare(struct intel_gt *gt)
0311 {
0312     user_forcewake(gt, true);
0313     wait_for_suspend(gt);
0314 
0315     intel_pxp_suspend_prepare(&gt->pxp);
0316 }
0317 
0318 static suspend_state_t pm_suspend_target(void)
0319 {
0320 #if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
0321     return pm_suspend_target_state;
0322 #else
0323     return PM_SUSPEND_TO_IDLE;
0324 #endif
0325 }
0326 
0327 void intel_gt_suspend_late(struct intel_gt *gt)
0328 {
0329     intel_wakeref_t wakeref;
0330 
0331     /* We expect to be idle already; but also want to be independent */
0332     wait_for_suspend(gt);
0333 
0334     if (is_mock_gt(gt))
0335         return;
0336 
0337     GEM_BUG_ON(gt->awake);
0338 
0339     intel_uc_suspend(&gt->uc);
0340     intel_pxp_suspend(&gt->pxp);
0341 
0342     /*
0343      * On disabling the device, we want to turn off HW access to memory
0344      * that we no longer own.
0345      *
0346      * However, not all suspend-states disable the device. S0 (s2idle)
0347      * is effectively runtime-suspend, the device is left powered on
0348      * but needs to be put into a low power state. We need to keep
0349      * powermanagement enabled, but we also retain system state and so
0350      * it remains safe to keep on using our allocated memory.
0351      */
0352     if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
0353         return;
0354 
0355     with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
0356         intel_rps_disable(&gt->rps);
0357         intel_rc6_disable(&gt->rc6);
0358         intel_llc_disable(&gt->llc);
0359     }
0360 
0361     gt_sanitize(gt, false);
0362 
0363     GT_TRACE(gt, "\n");
0364 }
0365 
0366 void intel_gt_runtime_suspend(struct intel_gt *gt)
0367 {
0368     intel_pxp_runtime_suspend(&gt->pxp);
0369     intel_uc_runtime_suspend(&gt->uc);
0370 
0371     GT_TRACE(gt, "\n");
0372 }
0373 
0374 int intel_gt_runtime_resume(struct intel_gt *gt)
0375 {
0376     int ret;
0377 
0378     GT_TRACE(gt, "\n");
0379     intel_gt_init_swizzling(gt);
0380     intel_ggtt_restore_fences(gt->ggtt);
0381 
0382     ret = intel_uc_runtime_resume(&gt->uc);
0383     if (ret)
0384         return ret;
0385 
0386     intel_pxp_runtime_resume(&gt->pxp);
0387 
0388     return 0;
0389 }
0390 
0391 static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
0392 {
0393     ktime_t total = gt->stats.total;
0394 
0395     if (gt->stats.active)
0396         total = ktime_add(total,
0397                   ktime_sub(ktime_get(), gt->stats.start));
0398 
0399     return total;
0400 }
0401 
0402 ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
0403 {
0404     unsigned int seq;
0405     ktime_t total;
0406 
0407     do {
0408         seq = read_seqcount_begin(&gt->stats.lock);
0409         total = __intel_gt_get_awake_time(gt);
0410     } while (read_seqcount_retry(&gt->stats.lock, seq));
0411 
0412     return total;
0413 }
0414 
0415 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0416 #include "selftest_gt_pm.c"
0417 #endif