0001
0002
0003
0004
0005
0006
0007 #ifndef INTEL_WAKEREF_H
0008 #define INTEL_WAKEREF_H
0009
0010 #include <linux/atomic.h>
0011 #include <linux/bitfield.h>
0012 #include <linux/bits.h>
0013 #include <linux/lockdep.h>
0014 #include <linux/mutex.h>
0015 #include <linux/refcount.h>
0016 #include <linux/stackdepot.h>
0017 #include <linux/timer.h>
0018 #include <linux/workqueue.h>
0019
0020 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
0021 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
0022 #else
0023 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
0024 #endif
0025
0026 struct intel_runtime_pm;
0027 struct intel_wakeref;
0028
0029 typedef depot_stack_handle_t intel_wakeref_t;
0030
0031 struct intel_wakeref_ops {
0032 int (*get)(struct intel_wakeref *wf);
0033 int (*put)(struct intel_wakeref *wf);
0034 };
0035
0036 struct intel_wakeref {
0037 atomic_t count;
0038 struct mutex mutex;
0039
0040 intel_wakeref_t wakeref;
0041
0042 struct intel_runtime_pm *rpm;
0043 const struct intel_wakeref_ops *ops;
0044
0045 struct delayed_work work;
0046 };
0047
0048 struct intel_wakeref_lockclass {
0049 struct lock_class_key mutex;
0050 struct lock_class_key work;
0051 };
0052
0053 void __intel_wakeref_init(struct intel_wakeref *wf,
0054 struct intel_runtime_pm *rpm,
0055 const struct intel_wakeref_ops *ops,
0056 struct intel_wakeref_lockclass *key);
0057 #define intel_wakeref_init(wf, rpm, ops) do { \
0058 static struct intel_wakeref_lockclass __key; \
0059 \
0060 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
0061 } while (0)
0062
0063 int __intel_wakeref_get_first(struct intel_wakeref *wf);
0064 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080 static inline int
0081 intel_wakeref_get(struct intel_wakeref *wf)
0082 {
0083 might_sleep();
0084 if (unlikely(!atomic_inc_not_zero(&wf->count)))
0085 return __intel_wakeref_get_first(wf);
0086
0087 return 0;
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static inline void
0100 __intel_wakeref_get(struct intel_wakeref *wf)
0101 {
0102 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
0103 atomic_inc(&wf->count);
0104 }
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 static inline bool
0116 intel_wakeref_get_if_active(struct intel_wakeref *wf)
0117 {
0118 return atomic_inc_not_zero(&wf->count);
0119 }
0120
0121 enum {
0122 INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
0123 __INTEL_WAKEREF_PUT_LAST_BIT__
0124 };
0125
0126 static inline void
0127 intel_wakeref_might_get(struct intel_wakeref *wf)
0128 {
0129 might_lock(&wf->mutex);
0130 }
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 static inline void
0148 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
0149 #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
0150 #define INTEL_WAKEREF_PUT_DELAY \
0151 GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
0152 {
0153 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
0154 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
0155 __intel_wakeref_put_last(wf, flags);
0156 }
0157
0158 static inline void
0159 intel_wakeref_put(struct intel_wakeref *wf)
0160 {
0161 might_sleep();
0162 __intel_wakeref_put(wf, 0);
0163 }
0164
0165 static inline void
0166 intel_wakeref_put_async(struct intel_wakeref *wf)
0167 {
0168 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
0169 }
0170
0171 static inline void
0172 intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
0173 {
0174 __intel_wakeref_put(wf,
0175 INTEL_WAKEREF_PUT_ASYNC |
0176 FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
0177 }
0178
0179 static inline void
0180 intel_wakeref_might_put(struct intel_wakeref *wf)
0181 {
0182 might_lock(&wf->mutex);
0183 }
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 static inline void
0194 intel_wakeref_lock(struct intel_wakeref *wf)
0195 __acquires(wf->mutex)
0196 {
0197 mutex_lock(&wf->mutex);
0198 }
0199
0200
0201
0202
0203
0204
0205
0206 static inline void
0207 intel_wakeref_unlock(struct intel_wakeref *wf)
0208 __releases(wf->mutex)
0209 {
0210 mutex_unlock(&wf->mutex);
0211 }
0212
0213
0214
0215
0216
0217
0218
0219
0220 static inline void
0221 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
0222 {
0223 mutex_lock(&wf->mutex);
0224 mutex_unlock(&wf->mutex);
0225 flush_delayed_work(&wf->work);
0226 }
0227
0228
0229
0230
0231
0232
0233
0234 static inline bool
0235 intel_wakeref_is_active(const struct intel_wakeref *wf)
0236 {
0237 return READ_ONCE(wf->wakeref);
0238 }
0239
0240
0241
0242
0243
0244 static inline void
0245 __intel_wakeref_defer_park(struct intel_wakeref *wf)
0246 {
0247 lockdep_assert_held(&wf->mutex);
0248 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
0249 atomic_set_release(&wf->count, 1);
0250 }
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
0264
0265 struct intel_wakeref_auto {
0266 struct intel_runtime_pm *rpm;
0267 struct timer_list timer;
0268 intel_wakeref_t wakeref;
0269 spinlock_t lock;
0270 refcount_t count;
0271 };
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
0289
0290 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
0291 struct intel_runtime_pm *rpm);
0292 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
0293
0294 #endif