Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2019 Intel Corporation
0005  */
0006 
0007 #ifndef INTEL_WAKEREF_H
0008 #define INTEL_WAKEREF_H
0009 
0010 #include <linux/atomic.h>
0011 #include <linux/bitfield.h>
0012 #include <linux/bits.h>
0013 #include <linux/lockdep.h>
0014 #include <linux/mutex.h>
0015 #include <linux/refcount.h>
0016 #include <linux/stackdepot.h>
0017 #include <linux/timer.h>
0018 #include <linux/workqueue.h>
0019 
0020 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
0021 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
0022 #else
0023 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
0024 #endif
0025 
0026 struct intel_runtime_pm;
0027 struct intel_wakeref;
0028 
0029 typedef depot_stack_handle_t intel_wakeref_t;
0030 
0031 struct intel_wakeref_ops {
0032     int (*get)(struct intel_wakeref *wf);
0033     int (*put)(struct intel_wakeref *wf);
0034 };
0035 
0036 struct intel_wakeref {
0037     atomic_t count;
0038     struct mutex mutex;
0039 
0040     intel_wakeref_t wakeref;
0041 
0042     struct intel_runtime_pm *rpm;
0043     const struct intel_wakeref_ops *ops;
0044 
0045     struct delayed_work work;
0046 };
0047 
0048 struct intel_wakeref_lockclass {
0049     struct lock_class_key mutex;
0050     struct lock_class_key work;
0051 };
0052 
0053 void __intel_wakeref_init(struct intel_wakeref *wf,
0054               struct intel_runtime_pm *rpm,
0055               const struct intel_wakeref_ops *ops,
0056               struct intel_wakeref_lockclass *key);
0057 #define intel_wakeref_init(wf, rpm, ops) do {               \
0058     static struct intel_wakeref_lockclass __key;            \
0059                                     \
0060     __intel_wakeref_init((wf), (rpm), (ops), &__key);       \
0061 } while (0)
0062 
0063 int __intel_wakeref_get_first(struct intel_wakeref *wf);
0064 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
0065 
0066 /**
0067  * intel_wakeref_get: Acquire the wakeref
0068  * @wf: the wakeref
0069  *
0070  * Acquire a hold on the wakeref. The first user to do so, will acquire
0071  * the runtime pm wakeref and then call the @fn underneath the wakeref
0072  * mutex.
0073  *
0074  * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
0075  * will be released and the acquisition unwound, and an error reported.
0076  *
0077  * Returns: 0 if the wakeref was acquired successfully, or a negative error
0078  * code otherwise.
0079  */
0080 static inline int
0081 intel_wakeref_get(struct intel_wakeref *wf)
0082 {
0083     might_sleep();
0084     if (unlikely(!atomic_inc_not_zero(&wf->count)))
0085         return __intel_wakeref_get_first(wf);
0086 
0087     return 0;
0088 }
0089 
0090 /**
0091  * __intel_wakeref_get: Acquire the wakeref, again
0092  * @wf: the wakeref
0093  *
0094  * Increment the wakeref counter, only valid if it is already held by
0095  * the caller.
0096  *
0097  * See intel_wakeref_get().
0098  */
0099 static inline void
0100 __intel_wakeref_get(struct intel_wakeref *wf)
0101 {
0102     INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
0103     atomic_inc(&wf->count);
0104 }
0105 
0106 /**
0107  * intel_wakeref_get_if_in_use: Acquire the wakeref
0108  * @wf: the wakeref
0109  *
0110  * Acquire a hold on the wakeref, but only if the wakeref is already
0111  * active.
0112  *
0113  * Returns: true if the wakeref was acquired, false otherwise.
0114  */
0115 static inline bool
0116 intel_wakeref_get_if_active(struct intel_wakeref *wf)
0117 {
0118     return atomic_inc_not_zero(&wf->count);
0119 }
0120 
0121 enum {
0122     INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
0123     __INTEL_WAKEREF_PUT_LAST_BIT__
0124 };
0125 
0126 static inline void
0127 intel_wakeref_might_get(struct intel_wakeref *wf)
0128 {
0129     might_lock(&wf->mutex);
0130 }
0131 
0132 /**
0133  * intel_wakeref_put_flags: Release the wakeref
0134  * @wf: the wakeref
0135  * @flags: control flags
0136  *
0137  * Release our hold on the wakeref. When there are no more users,
0138  * the runtime pm wakeref will be released after the @fn callback is called
0139  * underneath the wakeref mutex.
0140  *
0141  * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
0142  * is retained and an error reported.
0143  *
0144  * Returns: 0 if the wakeref was released successfully, or a negative error
0145  * code otherwise.
0146  */
0147 static inline void
0148 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
0149 #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
0150 #define INTEL_WAKEREF_PUT_DELAY \
0151     GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
0152 {
0153     INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
0154     if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
0155         __intel_wakeref_put_last(wf, flags);
0156 }
0157 
0158 static inline void
0159 intel_wakeref_put(struct intel_wakeref *wf)
0160 {
0161     might_sleep();
0162     __intel_wakeref_put(wf, 0);
0163 }
0164 
0165 static inline void
0166 intel_wakeref_put_async(struct intel_wakeref *wf)
0167 {
0168     __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
0169 }
0170 
0171 static inline void
0172 intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
0173 {
0174     __intel_wakeref_put(wf,
0175                 INTEL_WAKEREF_PUT_ASYNC |
0176                 FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
0177 }
0178 
0179 static inline void
0180 intel_wakeref_might_put(struct intel_wakeref *wf)
0181 {
0182     might_lock(&wf->mutex);
0183 }
0184 
0185 /**
0186  * intel_wakeref_lock: Lock the wakeref (mutex)
0187  * @wf: the wakeref
0188  *
0189  * Locks the wakeref to prevent it being acquired or released. New users
0190  * can still adjust the counter, but the wakeref itself (and callback)
0191  * cannot be acquired or released.
0192  */
0193 static inline void
0194 intel_wakeref_lock(struct intel_wakeref *wf)
0195     __acquires(wf->mutex)
0196 {
0197     mutex_lock(&wf->mutex);
0198 }
0199 
0200 /**
0201  * intel_wakeref_unlock: Unlock the wakeref
0202  * @wf: the wakeref
0203  *
0204  * Releases a previously acquired intel_wakeref_lock().
0205  */
0206 static inline void
0207 intel_wakeref_unlock(struct intel_wakeref *wf)
0208     __releases(wf->mutex)
0209 {
0210     mutex_unlock(&wf->mutex);
0211 }
0212 
0213 /**
0214  * intel_wakeref_unlock_wait: Wait until the active callback is complete
0215  * @wf: the wakeref
0216  *
0217  * Waits for the active callback (under the @wf->mutex or another CPU) is
0218  * complete.
0219  */
0220 static inline void
0221 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
0222 {
0223     mutex_lock(&wf->mutex);
0224     mutex_unlock(&wf->mutex);
0225     flush_delayed_work(&wf->work);
0226 }
0227 
0228 /**
0229  * intel_wakeref_is_active: Query whether the wakeref is currently held
0230  * @wf: the wakeref
0231  *
0232  * Returns: true if the wakeref is currently held.
0233  */
0234 static inline bool
0235 intel_wakeref_is_active(const struct intel_wakeref *wf)
0236 {
0237     return READ_ONCE(wf->wakeref);
0238 }
0239 
0240 /**
0241  * __intel_wakeref_defer_park: Defer the current park callback
0242  * @wf: the wakeref
0243  */
0244 static inline void
0245 __intel_wakeref_defer_park(struct intel_wakeref *wf)
0246 {
0247     lockdep_assert_held(&wf->mutex);
0248     INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
0249     atomic_set_release(&wf->count, 1);
0250 }
0251 
0252 /**
0253  * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
0254  * @wf: the wakeref
0255  *
0256  * Wait for the earlier asynchronous release of the wakeref. Note
0257  * this will wait for any third party as well, so make sure you only wait
0258  * when you have control over the wakeref and trust no one else is acquiring
0259  * it.
0260  *
0261  * Return: 0 on success, error code if killed.
0262  */
0263 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
0264 
0265 struct intel_wakeref_auto {
0266     struct intel_runtime_pm *rpm;
0267     struct timer_list timer;
0268     intel_wakeref_t wakeref;
0269     spinlock_t lock;
0270     refcount_t count;
0271 };
0272 
0273 /**
0274  * intel_wakeref_auto: Delay the runtime-pm autosuspend
0275  * @wf: the wakeref
0276  * @timeout: relative timeout in jiffies
0277  *
0278  * The runtime-pm core uses a suspend delay after the last wakeref
0279  * is released before triggering runtime suspend of the device. That
0280  * delay is configurable via sysfs with little regard to the device
0281  * characteristics. Instead, we want to tune the autosuspend based on our
0282  * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
0283  * timeout.
0284  *
0285  * Pass @timeout = 0 to cancel a previous autosuspend by executing the
0286  * suspend immediately.
0287  */
0288 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
0289 
0290 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
0291                  struct intel_runtime_pm *rpm);
0292 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
0293 
0294 #endif /* INTEL_WAKEREF_H */