Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: MIT */
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #ifndef __INTEL_RUNTIME_PM_H__
0007 #define __INTEL_RUNTIME_PM_H__
0008 
0009 #include <linux/types.h>
0010 
0011 #include "intel_wakeref.h"
0012 
0013 #include "i915_utils.h"
0014 
0015 struct device;
0016 struct drm_i915_private;
0017 struct drm_printer;
0018 
0019 enum i915_drm_suspend_mode {
0020     I915_DRM_SUSPEND_IDLE,
0021     I915_DRM_SUSPEND_MEM,
0022     I915_DRM_SUSPEND_HIBERNATE,
0023 };
0024 
0025 /*
0026  * This struct helps tracking the state needed for runtime PM, which puts the
0027  * device in PCI D3 state. Notice that when this happens, nothing on the
0028  * graphics device works, even register access, so we don't get interrupts nor
0029  * anything else.
0030  *
0031  * Every piece of our code that needs to actually touch the hardware needs to
0032  * either call intel_runtime_pm_get or call intel_display_power_get with the
0033  * appropriate power domain.
0034  *
0035  * Our driver uses the autosuspend delay feature, which means we'll only really
0036  * suspend if we stay with zero refcount for a certain amount of time. The
0037  * default value is currently very conservative (see intel_runtime_pm_enable), but
0038  * it can be changed with the standard runtime PM files from sysfs.
0039  *
0040  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
0041  * goes back to false exactly before we reenable the IRQs. We use this variable
0042  * to check if someone is trying to enable/disable IRQs while they're supposed
0043  * to be disabled. This shouldn't happen and we'll print some error messages in
0044  * case it happens.
0045  *
0046  * For more, read the Documentation/power/runtime_pm.rst.
0047  */
0048 struct intel_runtime_pm {
0049     atomic_t wakeref_count;
0050     struct device *kdev; /* points to i915->drm.dev */
0051     bool available;
0052     bool suspended;
0053     bool irqs_enabled;
0054     bool no_wakeref_tracking;
0055 
0056 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0057     /*
0058      * To aide detection of wakeref leaks and general misuse, we
0059      * track all wakeref holders. With manual markup (i.e. returning
0060      * a cookie to each rpm_get caller which they then supply to their
0061      * paired rpm_put) we can remove corresponding pairs of and keep
0062      * the array trimmed to active wakerefs.
0063      */
0064     struct intel_runtime_pm_debug {
0065         spinlock_t lock;
0066 
0067         depot_stack_handle_t last_acquire;
0068         depot_stack_handle_t last_release;
0069 
0070         depot_stack_handle_t *owners;
0071         unsigned long count;
0072     } debug;
0073 #endif
0074 };
0075 
0076 #define BITS_PER_WAKEREF    \
0077     BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
0078 #define INTEL_RPM_WAKELOCK_SHIFT    (BITS_PER_WAKEREF / 2)
0079 #define INTEL_RPM_WAKELOCK_BIAS     (1 << INTEL_RPM_WAKELOCK_SHIFT)
0080 #define INTEL_RPM_RAW_WAKEREF_MASK  (INTEL_RPM_WAKELOCK_BIAS - 1)
0081 
0082 static inline int
0083 intel_rpm_raw_wakeref_count(int wakeref_count)
0084 {
0085     return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
0086 }
0087 
0088 static inline int
0089 intel_rpm_wakelock_count(int wakeref_count)
0090 {
0091     return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
0092 }
0093 
0094 static inline void
0095 assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
0096 {
0097     WARN_ONCE(rpm->suspended,
0098           "Device suspended during HW access\n");
0099 }
0100 
0101 static inline void
0102 __assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
0103 {
0104     assert_rpm_device_not_suspended(rpm);
0105     WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
0106           "RPM raw-wakeref not held\n");
0107 }
0108 
0109 static inline void
0110 __assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
0111 {
0112     __assert_rpm_raw_wakeref_held(rpm, wakeref_count);
0113     WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
0114           "RPM wakelock ref not held during HW access\n");
0115 }
0116 
0117 static inline void
0118 assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
0119 {
0120     __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
0121 }
0122 
0123 static inline void
0124 assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
0125 {
0126     __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
0127 }
0128 
0129 /**
0130  * disable_rpm_wakeref_asserts - disable the RPM assert checks
0131  * @rpm: the intel_runtime_pm structure
0132  *
0133  * This function disable asserts that check if we hold an RPM wakelock
0134  * reference, while keeping the device-not-suspended checks still enabled.
0135  * It's meant to be used only in special circumstances where our rule about
0136  * the wakelock refcount wrt. the device power state doesn't hold. According
0137  * to this rule at any point where we access the HW or want to keep the HW in
0138  * an active state we must hold an RPM wakelock reference acquired via one of
0139  * the intel_runtime_pm_get() helpers. Currently there are a few special spots
0140  * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
0141  * forcewake release timer, and the GPU RPS and hangcheck works. All other
0142  * users should avoid using this function.
0143  *
0144  * Any calls to this function must have a symmetric call to
0145  * enable_rpm_wakeref_asserts().
0146  */
0147 static inline void
0148 disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
0149 {
0150     atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
0151            &rpm->wakeref_count);
0152 }
0153 
0154 /**
0155  * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
0156  * @rpm: the intel_runtime_pm structure
0157  *
0158  * This function re-enables the RPM assert checks after disabling them with
0159  * disable_rpm_wakeref_asserts. It's meant to be used only in special
0160  * circumstances otherwise its use should be avoided.
0161  *
0162  * Any calls to this function must have a symmetric call to
0163  * disable_rpm_wakeref_asserts().
0164  */
0165 static inline void
0166 enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
0167 {
0168     atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
0169            &rpm->wakeref_count);
0170 }
0171 
0172 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
0173 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
0174 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
0175 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
0176 
0177 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
0178 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
0179 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
0180 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
0181 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
0182 
0183 #define with_intel_runtime_pm(rpm, wf) \
0184     for ((wf) = intel_runtime_pm_get(rpm); (wf); \
0185          intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
0186 
0187 #define with_intel_runtime_pm_if_in_use(rpm, wf) \
0188     for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
0189          intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
0190 
0191 #define with_intel_runtime_pm_if_active(rpm, wf) \
0192     for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
0193          intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
0194 
0195 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
0196 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0197 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
0198 #else
0199 static inline void
0200 intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
0201 {
0202     intel_runtime_pm_put_unchecked(rpm);
0203 }
0204 #endif
0205 void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
0206 
0207 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0208 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0209                     struct drm_printer *p);
0210 #else
0211 static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0212                           struct drm_printer *p)
0213 {
0214 }
0215 #endif
0216 
0217 #endif /* __INTEL_RUNTIME_PM_H__ */