Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2012-2014 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  *
0023  * Authors:
0024  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
0025  *    Daniel Vetter <daniel.vetter@ffwll.ch>
0026  *
0027  */
0028 
0029 #include <linux/pm_runtime.h>
0030 
0031 #include <drm/drm_print.h>
0032 
0033 #include "i915_drv.h"
0034 #include "i915_trace.h"
0035 
0036 /**
0037  * DOC: runtime pm
0038  *
0039  * The i915 driver supports dynamic enabling and disabling of entire hardware
0040  * blocks at runtime. This is especially important on the display side where
0041  * software is supposed to control many power gates manually on recent hardware,
0042  * since on the GT side a lot of the power management is done by the hardware.
0043  * But even there some manual control at the device level is required.
0044  *
0045  * Since i915 supports a diverse set of platforms with a unified codebase and
0046  * hardware engineers just love to shuffle functionality around between power
0047  * domains there's a sizeable amount of indirection required. This file provides
0048  * generic functions to the driver for grabbing and releasing references for
0049  * abstract power domains. It then maps those to the actual power wells
0050  * present for a given platform.
0051  */
0052 
0053 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0054 
0055 #include <linux/sort.h>
0056 
0057 #define STACKDEPTH 8
0058 
0059 static noinline depot_stack_handle_t __save_depot_stack(void)
0060 {
0061     unsigned long entries[STACKDEPTH];
0062     unsigned int n;
0063 
0064     n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
0065     return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
0066 }
0067 
0068 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0069 {
0070     spin_lock_init(&rpm->debug.lock);
0071     stack_depot_init();
0072 }
0073 
0074 static noinline depot_stack_handle_t
0075 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0076 {
0077     depot_stack_handle_t stack, *stacks;
0078     unsigned long flags;
0079 
0080     if (rpm->no_wakeref_tracking)
0081         return -1;
0082 
0083     stack = __save_depot_stack();
0084     if (!stack)
0085         return -1;
0086 
0087     spin_lock_irqsave(&rpm->debug.lock, flags);
0088 
0089     if (!rpm->debug.count)
0090         rpm->debug.last_acquire = stack;
0091 
0092     stacks = krealloc(rpm->debug.owners,
0093               (rpm->debug.count + 1) * sizeof(*stacks),
0094               GFP_NOWAIT | __GFP_NOWARN);
0095     if (stacks) {
0096         stacks[rpm->debug.count++] = stack;
0097         rpm->debug.owners = stacks;
0098     } else {
0099         stack = -1;
0100     }
0101 
0102     spin_unlock_irqrestore(&rpm->debug.lock, flags);
0103 
0104     return stack;
0105 }
0106 
0107 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0108                          depot_stack_handle_t stack)
0109 {
0110     struct drm_i915_private *i915 = container_of(rpm,
0111                              struct drm_i915_private,
0112                              runtime_pm);
0113     unsigned long flags, n;
0114     bool found = false;
0115 
0116     if (unlikely(stack == -1))
0117         return;
0118 
0119     spin_lock_irqsave(&rpm->debug.lock, flags);
0120     for (n = rpm->debug.count; n--; ) {
0121         if (rpm->debug.owners[n] == stack) {
0122             memmove(rpm->debug.owners + n,
0123                 rpm->debug.owners + n + 1,
0124                 (--rpm->debug.count - n) * sizeof(stack));
0125             found = true;
0126             break;
0127         }
0128     }
0129     spin_unlock_irqrestore(&rpm->debug.lock, flags);
0130 
0131     if (drm_WARN(&i915->drm, !found,
0132              "Unmatched wakeref (tracking %lu), count %u\n",
0133              rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
0134         char *buf;
0135 
0136         buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
0137         if (!buf)
0138             return;
0139 
0140         stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
0141         DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
0142 
0143         stack = READ_ONCE(rpm->debug.last_release);
0144         if (stack) {
0145             stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
0146             DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
0147         }
0148 
0149         kfree(buf);
0150     }
0151 }
0152 
0153 static int cmphandle(const void *_a, const void *_b)
0154 {
0155     const depot_stack_handle_t * const a = _a, * const b = _b;
0156 
0157     if (*a < *b)
0158         return -1;
0159     else if (*a > *b)
0160         return 1;
0161     else
0162         return 0;
0163 }
0164 
0165 static void
0166 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
0167                  const struct intel_runtime_pm_debug *dbg)
0168 {
0169     unsigned long i;
0170     char *buf;
0171 
0172     buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
0173     if (!buf)
0174         return;
0175 
0176     if (dbg->last_acquire) {
0177         stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
0178         drm_printf(p, "Wakeref last acquired:\n%s", buf);
0179     }
0180 
0181     if (dbg->last_release) {
0182         stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
0183         drm_printf(p, "Wakeref last released:\n%s", buf);
0184     }
0185 
0186     drm_printf(p, "Wakeref count: %lu\n", dbg->count);
0187 
0188     sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
0189 
0190     for (i = 0; i < dbg->count; i++) {
0191         depot_stack_handle_t stack = dbg->owners[i];
0192         unsigned long rep;
0193 
0194         rep = 1;
0195         while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
0196             rep++, i++;
0197         stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
0198         drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
0199     }
0200 
0201     kfree(buf);
0202 }
0203 
0204 static noinline void
0205 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
0206                struct intel_runtime_pm_debug *saved)
0207 {
0208     *saved = *debug;
0209 
0210     debug->owners = NULL;
0211     debug->count = 0;
0212     debug->last_release = __save_depot_stack();
0213 }
0214 
0215 static void
0216 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
0217 {
0218     if (debug->count) {
0219         struct drm_printer p = drm_debug_printer("i915");
0220 
0221         __print_intel_runtime_pm_wakeref(&p, debug);
0222     }
0223 
0224     kfree(debug->owners);
0225 }
0226 
0227 static noinline void
0228 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
0229 {
0230     struct intel_runtime_pm_debug dbg = {};
0231     unsigned long flags;
0232 
0233     if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
0234                      &rpm->debug.lock,
0235                      flags))
0236         return;
0237 
0238     __untrack_all_wakerefs(&rpm->debug, &dbg);
0239     spin_unlock_irqrestore(&rpm->debug.lock, flags);
0240 
0241     dump_and_free_wakeref_tracking(&dbg);
0242 }
0243 
0244 static noinline void
0245 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
0246 {
0247     struct intel_runtime_pm_debug dbg = {};
0248     unsigned long flags;
0249 
0250     spin_lock_irqsave(&rpm->debug.lock, flags);
0251     __untrack_all_wakerefs(&rpm->debug, &dbg);
0252     spin_unlock_irqrestore(&rpm->debug.lock, flags);
0253 
0254     dump_and_free_wakeref_tracking(&dbg);
0255 }
0256 
0257 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0258                     struct drm_printer *p)
0259 {
0260     struct intel_runtime_pm_debug dbg = {};
0261 
0262     do {
0263         unsigned long alloc = dbg.count;
0264         depot_stack_handle_t *s;
0265 
0266         spin_lock_irq(&rpm->debug.lock);
0267         dbg.count = rpm->debug.count;
0268         if (dbg.count <= alloc) {
0269             memcpy(dbg.owners,
0270                    rpm->debug.owners,
0271                    dbg.count * sizeof(*s));
0272         }
0273         dbg.last_acquire = rpm->debug.last_acquire;
0274         dbg.last_release = rpm->debug.last_release;
0275         spin_unlock_irq(&rpm->debug.lock);
0276         if (dbg.count <= alloc)
0277             break;
0278 
0279         s = krealloc(dbg.owners,
0280                  dbg.count * sizeof(*s),
0281                  GFP_NOWAIT | __GFP_NOWARN);
0282         if (!s)
0283             goto out;
0284 
0285         dbg.owners = s;
0286     } while (1);
0287 
0288     __print_intel_runtime_pm_wakeref(p, &dbg);
0289 
0290 out:
0291     kfree(dbg.owners);
0292 }
0293 
0294 #else
0295 
0296 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0297 {
0298 }
0299 
0300 static depot_stack_handle_t
0301 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0302 {
0303     return -1;
0304 }
0305 
0306 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0307                          intel_wakeref_t wref)
0308 {
0309 }
0310 
0311 static void
0312 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
0313 {
0314     atomic_dec(&rpm->wakeref_count);
0315 }
0316 
0317 static void
0318 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
0319 {
0320 }
0321 
0322 #endif
0323 
0324 static void
0325 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
0326 {
0327     if (wakelock) {
0328         atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
0329         assert_rpm_wakelock_held(rpm);
0330     } else {
0331         atomic_inc(&rpm->wakeref_count);
0332         assert_rpm_raw_wakeref_held(rpm);
0333     }
0334 }
0335 
0336 static void
0337 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
0338 {
0339     if (wakelock) {
0340         assert_rpm_wakelock_held(rpm);
0341         atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
0342     } else {
0343         assert_rpm_raw_wakeref_held(rpm);
0344     }
0345 
0346     __intel_wakeref_dec_and_check_tracking(rpm);
0347 }
0348 
0349 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
0350                           bool wakelock)
0351 {
0352     struct drm_i915_private *i915 = container_of(rpm,
0353                              struct drm_i915_private,
0354                              runtime_pm);
0355     int ret;
0356 
0357     ret = pm_runtime_get_sync(rpm->kdev);
0358     drm_WARN_ONCE(&i915->drm, ret < 0,
0359               "pm_runtime_get_sync() failed: %d\n", ret);
0360 
0361     intel_runtime_pm_acquire(rpm, wakelock);
0362 
0363     return track_intel_runtime_pm_wakeref(rpm);
0364 }
0365 
0366 /**
0367  * intel_runtime_pm_get_raw - grab a raw runtime pm reference
0368  * @rpm: the intel_runtime_pm structure
0369  *
0370  * This is the unlocked version of intel_display_power_is_enabled() and should
0371  * only be used from error capture and recovery code where deadlocks are
0372  * possible.
0373  * This function grabs a device-level runtime pm reference (mostly used for
0374  * asynchronous PM management from display code) and ensures that it is powered
0375  * up. Raw references are not considered during wakelock assert checks.
0376  *
0377  * Any runtime pm reference obtained by this function must have a symmetric
0378  * call to intel_runtime_pm_put_raw() to release the reference again.
0379  *
0380  * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
0381  * as True if the wakeref was acquired, or False otherwise.
0382  */
0383 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
0384 {
0385     return __intel_runtime_pm_get(rpm, false);
0386 }
0387 
0388 /**
0389  * intel_runtime_pm_get - grab a runtime pm reference
0390  * @rpm: the intel_runtime_pm structure
0391  *
0392  * This function grabs a device-level runtime pm reference (mostly used for GEM
0393  * code to ensure the GTT or GT is on) and ensures that it is powered up.
0394  *
0395  * Any runtime pm reference obtained by this function must have a symmetric
0396  * call to intel_runtime_pm_put() to release the reference again.
0397  *
0398  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
0399  */
0400 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
0401 {
0402     return __intel_runtime_pm_get(rpm, true);
0403 }
0404 
0405 /**
0406  * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
0407  * @rpm: the intel_runtime_pm structure
0408  * @ignore_usecount: get a ref even if dev->power.usage_count is 0
0409  *
0410  * This function grabs a device-level runtime pm reference if the device is
0411  * already active and ensures that it is powered up. It is illegal to try
0412  * and access the HW should intel_runtime_pm_get_if_active() report failure.
0413  *
0414  * If @ignore_usecount is true, a reference will be acquired even if there is no
0415  * user requiring the device to be powered up (dev->power.usage_count == 0).
0416  * If the function returns false in this case then it's guaranteed that the
0417  * device's runtime suspend hook has been called already or that it will be
0418  * called (and hence it's also guaranteed that the device's runtime resume
0419  * hook will be called eventually).
0420  *
0421  * Any runtime pm reference obtained by this function must have a symmetric
0422  * call to intel_runtime_pm_put() to release the reference again.
0423  *
0424  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
0425  * as True if the wakeref was acquired, or False otherwise.
0426  */
0427 static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
0428                             bool ignore_usecount)
0429 {
0430     if (IS_ENABLED(CONFIG_PM)) {
0431         /*
0432          * In cases runtime PM is disabled by the RPM core and we get
0433          * an -EINVAL return value we are not supposed to call this
0434          * function, since the power state is undefined. This applies
0435          * atm to the late/early system suspend/resume handlers.
0436          */
0437         if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
0438             return 0;
0439     }
0440 
0441     intel_runtime_pm_acquire(rpm, true);
0442 
0443     return track_intel_runtime_pm_wakeref(rpm);
0444 }
0445 
0446 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
0447 {
0448     return __intel_runtime_pm_get_if_active(rpm, false);
0449 }
0450 
0451 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
0452 {
0453     return __intel_runtime_pm_get_if_active(rpm, true);
0454 }
0455 
0456 /**
0457  * intel_runtime_pm_get_noresume - grab a runtime pm reference
0458  * @rpm: the intel_runtime_pm structure
0459  *
0460  * This function grabs a device-level runtime pm reference (mostly used for GEM
0461  * code to ensure the GTT or GT is on).
0462  *
0463  * It will _not_ power up the device but instead only check that it's powered
0464  * on.  Therefore it is only valid to call this functions from contexts where
0465  * the device is known to be powered up and where trying to power it up would
0466  * result in hilarity and deadlocks. That pretty much means only the system
0467  * suspend/resume code where this is used to grab runtime pm references for
0468  * delayed setup down in work items.
0469  *
0470  * Any runtime pm reference obtained by this function must have a symmetric
0471  * call to intel_runtime_pm_put() to release the reference again.
0472  *
0473  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
0474  */
0475 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
0476 {
0477     assert_rpm_wakelock_held(rpm);
0478     pm_runtime_get_noresume(rpm->kdev);
0479 
0480     intel_runtime_pm_acquire(rpm, true);
0481 
0482     return track_intel_runtime_pm_wakeref(rpm);
0483 }
0484 
0485 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
0486                    intel_wakeref_t wref,
0487                    bool wakelock)
0488 {
0489     struct device *kdev = rpm->kdev;
0490 
0491     untrack_intel_runtime_pm_wakeref(rpm, wref);
0492 
0493     intel_runtime_pm_release(rpm, wakelock);
0494 
0495     pm_runtime_mark_last_busy(kdev);
0496     pm_runtime_put_autosuspend(kdev);
0497 }
0498 
0499 /**
0500  * intel_runtime_pm_put_raw - release a raw runtime pm reference
0501  * @rpm: the intel_runtime_pm structure
0502  * @wref: wakeref acquired for the reference that is being released
0503  *
0504  * This function drops the device-level runtime pm reference obtained by
0505  * intel_runtime_pm_get_raw() and might power down the corresponding
0506  * hardware block right away if this is the last reference.
0507  */
0508 void
0509 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
0510 {
0511     __intel_runtime_pm_put(rpm, wref, false);
0512 }
0513 
0514 /**
0515  * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
0516  * @rpm: the intel_runtime_pm structure
0517  *
0518  * This function drops the device-level runtime pm reference obtained by
0519  * intel_runtime_pm_get() and might power down the corresponding
0520  * hardware block right away if this is the last reference.
0521  *
0522  * This function exists only for historical reasons and should be avoided in
0523  * new code, as the correctness of its use cannot be checked. Always use
0524  * intel_runtime_pm_put() instead.
0525  */
0526 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
0527 {
0528     __intel_runtime_pm_put(rpm, -1, true);
0529 }
0530 
0531 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0532 /**
0533  * intel_runtime_pm_put - release a runtime pm reference
0534  * @rpm: the intel_runtime_pm structure
0535  * @wref: wakeref acquired for the reference that is being released
0536  *
0537  * This function drops the device-level runtime pm reference obtained by
0538  * intel_runtime_pm_get() and might power down the corresponding
0539  * hardware block right away if this is the last reference.
0540  */
0541 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
0542 {
0543     __intel_runtime_pm_put(rpm, wref, true);
0544 }
0545 #endif
0546 
0547 /**
0548  * intel_runtime_pm_enable - enable runtime pm
0549  * @rpm: the intel_runtime_pm structure
0550  *
0551  * This function enables runtime pm at the end of the driver load sequence.
0552  *
0553  * Note that this function does currently not enable runtime pm for the
0554  * subordinate display power domains. That is done by
0555  * intel_power_domains_enable().
0556  */
0557 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
0558 {
0559     struct drm_i915_private *i915 = container_of(rpm,
0560                              struct drm_i915_private,
0561                              runtime_pm);
0562     struct device *kdev = rpm->kdev;
0563 
0564     /*
0565      * Disable the system suspend direct complete optimization, which can
0566      * leave the device suspended skipping the driver's suspend handlers
0567      * if the device was already runtime suspended. This is needed due to
0568      * the difference in our runtime and system suspend sequence and
0569      * becaue the HDA driver may require us to enable the audio power
0570      * domain during system suspend.
0571      */
0572     dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
0573 
0574     pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
0575     pm_runtime_mark_last_busy(kdev);
0576 
0577     /*
0578      * Take a permanent reference to disable the RPM functionality and drop
0579      * it only when unloading the driver. Use the low level get/put helpers,
0580      * so the driver's own RPM reference tracking asserts also work on
0581      * platforms without RPM support.
0582      */
0583     if (!rpm->available) {
0584         int ret;
0585 
0586         pm_runtime_dont_use_autosuspend(kdev);
0587         ret = pm_runtime_get_sync(kdev);
0588         drm_WARN(&i915->drm, ret < 0,
0589              "pm_runtime_get_sync() failed: %d\n", ret);
0590     } else {
0591         pm_runtime_use_autosuspend(kdev);
0592     }
0593 
0594     /* Enable by default */
0595     pm_runtime_allow(kdev);
0596 
0597     /*
0598      * The core calls the driver load handler with an RPM reference held.
0599      * We drop that here and will reacquire it during unloading in
0600      * intel_power_domains_fini().
0601      */
0602     pm_runtime_put_autosuspend(kdev);
0603 }
0604 
0605 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
0606 {
0607     struct drm_i915_private *i915 = container_of(rpm,
0608                              struct drm_i915_private,
0609                              runtime_pm);
0610     struct device *kdev = rpm->kdev;
0611 
0612     /* Transfer rpm ownership back to core */
0613     drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
0614          "Failed to pass rpm ownership back to core\n");
0615 
0616     pm_runtime_dont_use_autosuspend(kdev);
0617 
0618     if (!rpm->available)
0619         pm_runtime_put(kdev);
0620 }
0621 
0622 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
0623 {
0624     struct drm_i915_private *i915 = container_of(rpm,
0625                              struct drm_i915_private,
0626                              runtime_pm);
0627     int count = atomic_read(&rpm->wakeref_count);
0628 
0629     drm_WARN(&i915->drm, count,
0630          "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
0631          intel_rpm_raw_wakeref_count(count),
0632          intel_rpm_wakelock_count(count));
0633 
0634     untrack_all_intel_runtime_pm_wakerefs(rpm);
0635 }
0636 
0637 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
0638 {
0639     struct drm_i915_private *i915 =
0640             container_of(rpm, struct drm_i915_private, runtime_pm);
0641     struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
0642     struct device *kdev = &pdev->dev;
0643 
0644     rpm->kdev = kdev;
0645     rpm->available = HAS_RUNTIME_PM(i915);
0646 
0647     init_intel_runtime_pm_wakeref(rpm);
0648 }