0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <linux/pm_runtime.h>
0030
0031 #include <drm/drm_print.h>
0032
0033 #include "i915_drv.h"
0034 #include "i915_trace.h"
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0054
0055 #include <linux/sort.h>
0056
0057 #define STACKDEPTH 8
0058
0059 static noinline depot_stack_handle_t __save_depot_stack(void)
0060 {
0061 unsigned long entries[STACKDEPTH];
0062 unsigned int n;
0063
0064 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
0065 return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
0066 }
0067
0068 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0069 {
0070 spin_lock_init(&rpm->debug.lock);
0071 stack_depot_init();
0072 }
0073
0074 static noinline depot_stack_handle_t
0075 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0076 {
0077 depot_stack_handle_t stack, *stacks;
0078 unsigned long flags;
0079
0080 if (rpm->no_wakeref_tracking)
0081 return -1;
0082
0083 stack = __save_depot_stack();
0084 if (!stack)
0085 return -1;
0086
0087 spin_lock_irqsave(&rpm->debug.lock, flags);
0088
0089 if (!rpm->debug.count)
0090 rpm->debug.last_acquire = stack;
0091
0092 stacks = krealloc(rpm->debug.owners,
0093 (rpm->debug.count + 1) * sizeof(*stacks),
0094 GFP_NOWAIT | __GFP_NOWARN);
0095 if (stacks) {
0096 stacks[rpm->debug.count++] = stack;
0097 rpm->debug.owners = stacks;
0098 } else {
0099 stack = -1;
0100 }
0101
0102 spin_unlock_irqrestore(&rpm->debug.lock, flags);
0103
0104 return stack;
0105 }
0106
0107 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0108 depot_stack_handle_t stack)
0109 {
0110 struct drm_i915_private *i915 = container_of(rpm,
0111 struct drm_i915_private,
0112 runtime_pm);
0113 unsigned long flags, n;
0114 bool found = false;
0115
0116 if (unlikely(stack == -1))
0117 return;
0118
0119 spin_lock_irqsave(&rpm->debug.lock, flags);
0120 for (n = rpm->debug.count; n--; ) {
0121 if (rpm->debug.owners[n] == stack) {
0122 memmove(rpm->debug.owners + n,
0123 rpm->debug.owners + n + 1,
0124 (--rpm->debug.count - n) * sizeof(stack));
0125 found = true;
0126 break;
0127 }
0128 }
0129 spin_unlock_irqrestore(&rpm->debug.lock, flags);
0130
0131 if (drm_WARN(&i915->drm, !found,
0132 "Unmatched wakeref (tracking %lu), count %u\n",
0133 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
0134 char *buf;
0135
0136 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
0137 if (!buf)
0138 return;
0139
0140 stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
0141 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
0142
0143 stack = READ_ONCE(rpm->debug.last_release);
0144 if (stack) {
0145 stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
0146 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
0147 }
0148
0149 kfree(buf);
0150 }
0151 }
0152
0153 static int cmphandle(const void *_a, const void *_b)
0154 {
0155 const depot_stack_handle_t * const a = _a, * const b = _b;
0156
0157 if (*a < *b)
0158 return -1;
0159 else if (*a > *b)
0160 return 1;
0161 else
0162 return 0;
0163 }
0164
0165 static void
0166 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
0167 const struct intel_runtime_pm_debug *dbg)
0168 {
0169 unsigned long i;
0170 char *buf;
0171
0172 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
0173 if (!buf)
0174 return;
0175
0176 if (dbg->last_acquire) {
0177 stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
0178 drm_printf(p, "Wakeref last acquired:\n%s", buf);
0179 }
0180
0181 if (dbg->last_release) {
0182 stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
0183 drm_printf(p, "Wakeref last released:\n%s", buf);
0184 }
0185
0186 drm_printf(p, "Wakeref count: %lu\n", dbg->count);
0187
0188 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
0189
0190 for (i = 0; i < dbg->count; i++) {
0191 depot_stack_handle_t stack = dbg->owners[i];
0192 unsigned long rep;
0193
0194 rep = 1;
0195 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
0196 rep++, i++;
0197 stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
0198 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
0199 }
0200
0201 kfree(buf);
0202 }
0203
0204 static noinline void
0205 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
0206 struct intel_runtime_pm_debug *saved)
0207 {
0208 *saved = *debug;
0209
0210 debug->owners = NULL;
0211 debug->count = 0;
0212 debug->last_release = __save_depot_stack();
0213 }
0214
0215 static void
0216 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
0217 {
0218 if (debug->count) {
0219 struct drm_printer p = drm_debug_printer("i915");
0220
0221 __print_intel_runtime_pm_wakeref(&p, debug);
0222 }
0223
0224 kfree(debug->owners);
0225 }
0226
0227 static noinline void
0228 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
0229 {
0230 struct intel_runtime_pm_debug dbg = {};
0231 unsigned long flags;
0232
0233 if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
0234 &rpm->debug.lock,
0235 flags))
0236 return;
0237
0238 __untrack_all_wakerefs(&rpm->debug, &dbg);
0239 spin_unlock_irqrestore(&rpm->debug.lock, flags);
0240
0241 dump_and_free_wakeref_tracking(&dbg);
0242 }
0243
0244 static noinline void
0245 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
0246 {
0247 struct intel_runtime_pm_debug dbg = {};
0248 unsigned long flags;
0249
0250 spin_lock_irqsave(&rpm->debug.lock, flags);
0251 __untrack_all_wakerefs(&rpm->debug, &dbg);
0252 spin_unlock_irqrestore(&rpm->debug.lock, flags);
0253
0254 dump_and_free_wakeref_tracking(&dbg);
0255 }
0256
0257 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0258 struct drm_printer *p)
0259 {
0260 struct intel_runtime_pm_debug dbg = {};
0261
0262 do {
0263 unsigned long alloc = dbg.count;
0264 depot_stack_handle_t *s;
0265
0266 spin_lock_irq(&rpm->debug.lock);
0267 dbg.count = rpm->debug.count;
0268 if (dbg.count <= alloc) {
0269 memcpy(dbg.owners,
0270 rpm->debug.owners,
0271 dbg.count * sizeof(*s));
0272 }
0273 dbg.last_acquire = rpm->debug.last_acquire;
0274 dbg.last_release = rpm->debug.last_release;
0275 spin_unlock_irq(&rpm->debug.lock);
0276 if (dbg.count <= alloc)
0277 break;
0278
0279 s = krealloc(dbg.owners,
0280 dbg.count * sizeof(*s),
0281 GFP_NOWAIT | __GFP_NOWARN);
0282 if (!s)
0283 goto out;
0284
0285 dbg.owners = s;
0286 } while (1);
0287
0288 __print_intel_runtime_pm_wakeref(p, &dbg);
0289
0290 out:
0291 kfree(dbg.owners);
0292 }
0293
0294 #else
0295
0296 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0297 {
0298 }
0299
0300 static depot_stack_handle_t
0301 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
0302 {
0303 return -1;
0304 }
0305
0306 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
0307 intel_wakeref_t wref)
0308 {
0309 }
0310
0311 static void
0312 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
0313 {
0314 atomic_dec(&rpm->wakeref_count);
0315 }
0316
0317 static void
0318 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
0319 {
0320 }
0321
0322 #endif
0323
0324 static void
0325 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
0326 {
0327 if (wakelock) {
0328 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
0329 assert_rpm_wakelock_held(rpm);
0330 } else {
0331 atomic_inc(&rpm->wakeref_count);
0332 assert_rpm_raw_wakeref_held(rpm);
0333 }
0334 }
0335
0336 static void
0337 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
0338 {
0339 if (wakelock) {
0340 assert_rpm_wakelock_held(rpm);
0341 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
0342 } else {
0343 assert_rpm_raw_wakeref_held(rpm);
0344 }
0345
0346 __intel_wakeref_dec_and_check_tracking(rpm);
0347 }
0348
0349 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
0350 bool wakelock)
0351 {
0352 struct drm_i915_private *i915 = container_of(rpm,
0353 struct drm_i915_private,
0354 runtime_pm);
0355 int ret;
0356
0357 ret = pm_runtime_get_sync(rpm->kdev);
0358 drm_WARN_ONCE(&i915->drm, ret < 0,
0359 "pm_runtime_get_sync() failed: %d\n", ret);
0360
0361 intel_runtime_pm_acquire(rpm, wakelock);
0362
0363 return track_intel_runtime_pm_wakeref(rpm);
0364 }
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
0384 {
0385 return __intel_runtime_pm_get(rpm, false);
0386 }
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
0401 {
0402 return __intel_runtime_pm_get(rpm, true);
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
0428 bool ignore_usecount)
0429 {
0430 if (IS_ENABLED(CONFIG_PM)) {
0431
0432
0433
0434
0435
0436
0437 if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
0438 return 0;
0439 }
0440
0441 intel_runtime_pm_acquire(rpm, true);
0442
0443 return track_intel_runtime_pm_wakeref(rpm);
0444 }
0445
0446 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
0447 {
0448 return __intel_runtime_pm_get_if_active(rpm, false);
0449 }
0450
0451 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
0452 {
0453 return __intel_runtime_pm_get_if_active(rpm, true);
0454 }
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
0476 {
0477 assert_rpm_wakelock_held(rpm);
0478 pm_runtime_get_noresume(rpm->kdev);
0479
0480 intel_runtime_pm_acquire(rpm, true);
0481
0482 return track_intel_runtime_pm_wakeref(rpm);
0483 }
0484
0485 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
0486 intel_wakeref_t wref,
0487 bool wakelock)
0488 {
0489 struct device *kdev = rpm->kdev;
0490
0491 untrack_intel_runtime_pm_wakeref(rpm, wref);
0492
0493 intel_runtime_pm_release(rpm, wakelock);
0494
0495 pm_runtime_mark_last_busy(kdev);
0496 pm_runtime_put_autosuspend(kdev);
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508 void
0509 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
0510 {
0511 __intel_runtime_pm_put(rpm, wref, false);
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
0527 {
0528 __intel_runtime_pm_put(rpm, -1, true);
0529 }
0530
0531 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
0542 {
0543 __intel_runtime_pm_put(rpm, wref, true);
0544 }
0545 #endif
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
0558 {
0559 struct drm_i915_private *i915 = container_of(rpm,
0560 struct drm_i915_private,
0561 runtime_pm);
0562 struct device *kdev = rpm->kdev;
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
0573
0574 pm_runtime_set_autosuspend_delay(kdev, 10000);
0575 pm_runtime_mark_last_busy(kdev);
0576
0577
0578
0579
0580
0581
0582
0583 if (!rpm->available) {
0584 int ret;
0585
0586 pm_runtime_dont_use_autosuspend(kdev);
0587 ret = pm_runtime_get_sync(kdev);
0588 drm_WARN(&i915->drm, ret < 0,
0589 "pm_runtime_get_sync() failed: %d\n", ret);
0590 } else {
0591 pm_runtime_use_autosuspend(kdev);
0592 }
0593
0594
0595 pm_runtime_allow(kdev);
0596
0597
0598
0599
0600
0601
0602 pm_runtime_put_autosuspend(kdev);
0603 }
0604
0605 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
0606 {
0607 struct drm_i915_private *i915 = container_of(rpm,
0608 struct drm_i915_private,
0609 runtime_pm);
0610 struct device *kdev = rpm->kdev;
0611
0612
0613 drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
0614 "Failed to pass rpm ownership back to core\n");
0615
0616 pm_runtime_dont_use_autosuspend(kdev);
0617
0618 if (!rpm->available)
0619 pm_runtime_put(kdev);
0620 }
0621
0622 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
0623 {
0624 struct drm_i915_private *i915 = container_of(rpm,
0625 struct drm_i915_private,
0626 runtime_pm);
0627 int count = atomic_read(&rpm->wakeref_count);
0628
0629 drm_WARN(&i915->drm, count,
0630 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
0631 intel_rpm_raw_wakeref_count(count),
0632 intel_rpm_wakelock_count(count));
0633
0634 untrack_all_intel_runtime_pm_wakerefs(rpm);
0635 }
0636
0637 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
0638 {
0639 struct drm_i915_private *i915 =
0640 container_of(rpm, struct drm_i915_private, runtime_pm);
0641 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
0642 struct device *kdev = &pdev->dev;
0643
0644 rpm->kdev = kdev;
0645 rpm->available = HAS_RUNTIME_PM(i915);
0646
0647 init_intel_runtime_pm_wakeref(rpm);
0648 }