0001
0002
0003
0004
0005
0006 #include <linux/string_helpers.h>
0007
0008 #include "i915_drv.h"
0009 #include "i915_irq.h"
0010 #include "intel_cdclk.h"
0011 #include "intel_combo_phy.h"
0012 #include "intel_de.h"
0013 #include "intel_display_power.h"
0014 #include "intel_display_power_map.h"
0015 #include "intel_display_power_well.h"
0016 #include "intel_display_types.h"
0017 #include "intel_dmc.h"
0018 #include "intel_mchbar_regs.h"
0019 #include "intel_pch_refclk.h"
0020 #include "intel_pcode.h"
0021 #include "intel_pm.h"
0022 #include "intel_snps_phy.h"
0023 #include "vlv_sideband.h"
0024
0025 #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
0026 for_each_power_well(__dev_priv, __power_well) \
0027 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
0028
0029 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
0030 for_each_power_well_reverse(__dev_priv, __power_well) \
0031 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
0032
0033 const char *
0034 intel_display_power_domain_str(enum intel_display_power_domain domain)
0035 {
0036 switch (domain) {
0037 case POWER_DOMAIN_DISPLAY_CORE:
0038 return "DISPLAY_CORE";
0039 case POWER_DOMAIN_PIPE_A:
0040 return "PIPE_A";
0041 case POWER_DOMAIN_PIPE_B:
0042 return "PIPE_B";
0043 case POWER_DOMAIN_PIPE_C:
0044 return "PIPE_C";
0045 case POWER_DOMAIN_PIPE_D:
0046 return "PIPE_D";
0047 case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
0048 return "PIPE_PANEL_FITTER_A";
0049 case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
0050 return "PIPE_PANEL_FITTER_B";
0051 case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
0052 return "PIPE_PANEL_FITTER_C";
0053 case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
0054 return "PIPE_PANEL_FITTER_D";
0055 case POWER_DOMAIN_TRANSCODER_A:
0056 return "TRANSCODER_A";
0057 case POWER_DOMAIN_TRANSCODER_B:
0058 return "TRANSCODER_B";
0059 case POWER_DOMAIN_TRANSCODER_C:
0060 return "TRANSCODER_C";
0061 case POWER_DOMAIN_TRANSCODER_D:
0062 return "TRANSCODER_D";
0063 case POWER_DOMAIN_TRANSCODER_EDP:
0064 return "TRANSCODER_EDP";
0065 case POWER_DOMAIN_TRANSCODER_DSI_A:
0066 return "TRANSCODER_DSI_A";
0067 case POWER_DOMAIN_TRANSCODER_DSI_C:
0068 return "TRANSCODER_DSI_C";
0069 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
0070 return "TRANSCODER_VDSC_PW2";
0071 case POWER_DOMAIN_PORT_DDI_LANES_A:
0072 return "PORT_DDI_LANES_A";
0073 case POWER_DOMAIN_PORT_DDI_LANES_B:
0074 return "PORT_DDI_LANES_B";
0075 case POWER_DOMAIN_PORT_DDI_LANES_C:
0076 return "PORT_DDI_LANES_C";
0077 case POWER_DOMAIN_PORT_DDI_LANES_D:
0078 return "PORT_DDI_LANES_D";
0079 case POWER_DOMAIN_PORT_DDI_LANES_E:
0080 return "PORT_DDI_LANES_E";
0081 case POWER_DOMAIN_PORT_DDI_LANES_F:
0082 return "PORT_DDI_LANES_F";
0083 case POWER_DOMAIN_PORT_DDI_LANES_TC1:
0084 return "PORT_DDI_LANES_TC1";
0085 case POWER_DOMAIN_PORT_DDI_LANES_TC2:
0086 return "PORT_DDI_LANES_TC2";
0087 case POWER_DOMAIN_PORT_DDI_LANES_TC3:
0088 return "PORT_DDI_LANES_TC3";
0089 case POWER_DOMAIN_PORT_DDI_LANES_TC4:
0090 return "PORT_DDI_LANES_TC4";
0091 case POWER_DOMAIN_PORT_DDI_LANES_TC5:
0092 return "PORT_DDI_LANES_TC5";
0093 case POWER_DOMAIN_PORT_DDI_LANES_TC6:
0094 return "PORT_DDI_LANES_TC6";
0095 case POWER_DOMAIN_PORT_DDI_IO_A:
0096 return "PORT_DDI_IO_A";
0097 case POWER_DOMAIN_PORT_DDI_IO_B:
0098 return "PORT_DDI_IO_B";
0099 case POWER_DOMAIN_PORT_DDI_IO_C:
0100 return "PORT_DDI_IO_C";
0101 case POWER_DOMAIN_PORT_DDI_IO_D:
0102 return "PORT_DDI_IO_D";
0103 case POWER_DOMAIN_PORT_DDI_IO_E:
0104 return "PORT_DDI_IO_E";
0105 case POWER_DOMAIN_PORT_DDI_IO_F:
0106 return "PORT_DDI_IO_F";
0107 case POWER_DOMAIN_PORT_DDI_IO_TC1:
0108 return "PORT_DDI_IO_TC1";
0109 case POWER_DOMAIN_PORT_DDI_IO_TC2:
0110 return "PORT_DDI_IO_TC2";
0111 case POWER_DOMAIN_PORT_DDI_IO_TC3:
0112 return "PORT_DDI_IO_TC3";
0113 case POWER_DOMAIN_PORT_DDI_IO_TC4:
0114 return "PORT_DDI_IO_TC4";
0115 case POWER_DOMAIN_PORT_DDI_IO_TC5:
0116 return "PORT_DDI_IO_TC5";
0117 case POWER_DOMAIN_PORT_DDI_IO_TC6:
0118 return "PORT_DDI_IO_TC6";
0119 case POWER_DOMAIN_PORT_DSI:
0120 return "PORT_DSI";
0121 case POWER_DOMAIN_PORT_CRT:
0122 return "PORT_CRT";
0123 case POWER_DOMAIN_PORT_OTHER:
0124 return "PORT_OTHER";
0125 case POWER_DOMAIN_VGA:
0126 return "VGA";
0127 case POWER_DOMAIN_AUDIO_MMIO:
0128 return "AUDIO_MMIO";
0129 case POWER_DOMAIN_AUDIO_PLAYBACK:
0130 return "AUDIO_PLAYBACK";
0131 case POWER_DOMAIN_AUX_A:
0132 return "AUX_A";
0133 case POWER_DOMAIN_AUX_B:
0134 return "AUX_B";
0135 case POWER_DOMAIN_AUX_C:
0136 return "AUX_C";
0137 case POWER_DOMAIN_AUX_D:
0138 return "AUX_D";
0139 case POWER_DOMAIN_AUX_E:
0140 return "AUX_E";
0141 case POWER_DOMAIN_AUX_F:
0142 return "AUX_F";
0143 case POWER_DOMAIN_AUX_USBC1:
0144 return "AUX_USBC1";
0145 case POWER_DOMAIN_AUX_USBC2:
0146 return "AUX_USBC2";
0147 case POWER_DOMAIN_AUX_USBC3:
0148 return "AUX_USBC3";
0149 case POWER_DOMAIN_AUX_USBC4:
0150 return "AUX_USBC4";
0151 case POWER_DOMAIN_AUX_USBC5:
0152 return "AUX_USBC5";
0153 case POWER_DOMAIN_AUX_USBC6:
0154 return "AUX_USBC6";
0155 case POWER_DOMAIN_AUX_IO_A:
0156 return "AUX_IO_A";
0157 case POWER_DOMAIN_AUX_TBT1:
0158 return "AUX_TBT1";
0159 case POWER_DOMAIN_AUX_TBT2:
0160 return "AUX_TBT2";
0161 case POWER_DOMAIN_AUX_TBT3:
0162 return "AUX_TBT3";
0163 case POWER_DOMAIN_AUX_TBT4:
0164 return "AUX_TBT4";
0165 case POWER_DOMAIN_AUX_TBT5:
0166 return "AUX_TBT5";
0167 case POWER_DOMAIN_AUX_TBT6:
0168 return "AUX_TBT6";
0169 case POWER_DOMAIN_GMBUS:
0170 return "GMBUS";
0171 case POWER_DOMAIN_INIT:
0172 return "INIT";
0173 case POWER_DOMAIN_MODESET:
0174 return "MODESET";
0175 case POWER_DOMAIN_GT_IRQ:
0176 return "GT_IRQ";
0177 case POWER_DOMAIN_DC_OFF:
0178 return "DC_OFF";
0179 case POWER_DOMAIN_TC_COLD_OFF:
0180 return "TC_COLD_OFF";
0181 default:
0182 MISSING_CASE(domain);
0183 return "?";
0184 }
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
0200 enum intel_display_power_domain domain)
0201 {
0202 struct i915_power_well *power_well;
0203 bool is_enabled;
0204
0205 if (dev_priv->runtime_pm.suspended)
0206 return false;
0207
0208 is_enabled = true;
0209
0210 for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
0211 if (intel_power_well_is_always_on(power_well))
0212 continue;
0213
0214 if (!intel_power_well_is_enabled_cached(power_well)) {
0215 is_enabled = false;
0216 break;
0217 }
0218 }
0219
0220 return is_enabled;
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
0241 enum intel_display_power_domain domain)
0242 {
0243 struct i915_power_domains *power_domains;
0244 bool ret;
0245
0246 power_domains = &dev_priv->power_domains;
0247
0248 mutex_lock(&power_domains->lock);
0249 ret = __intel_display_power_is_enabled(dev_priv, domain);
0250 mutex_unlock(&power_domains->lock);
0251
0252 return ret;
0253 }
0254
0255 static u32
0256 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
0257 u32 target_dc_state)
0258 {
0259 static const u32 states[] = {
0260 DC_STATE_EN_UPTO_DC6,
0261 DC_STATE_EN_UPTO_DC5,
0262 DC_STATE_EN_DC3CO,
0263 DC_STATE_DISABLE,
0264 };
0265 int i;
0266
0267 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
0268 if (target_dc_state != states[i])
0269 continue;
0270
0271 if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
0272 break;
0273
0274 target_dc_state = states[i + 1];
0275 }
0276
0277 return target_dc_state;
0278 }
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
0290 u32 state)
0291 {
0292 struct i915_power_well *power_well;
0293 bool dc_off_enabled;
0294 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0295
0296 mutex_lock(&power_domains->lock);
0297 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
0298
0299 if (drm_WARN_ON(&dev_priv->drm, !power_well))
0300 goto unlock;
0301
0302 state = sanitize_target_dc_state(dev_priv, state);
0303
0304 if (state == dev_priv->dmc.target_dc_state)
0305 goto unlock;
0306
0307 dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
0308
0309
0310
0311
0312 if (!dc_off_enabled)
0313 intel_power_well_enable(dev_priv, power_well);
0314
0315 dev_priv->dmc.target_dc_state = state;
0316
0317 if (!dc_off_enabled)
0318 intel_power_well_disable(dev_priv, power_well);
0319
0320 unlock:
0321 mutex_unlock(&power_domains->lock);
0322 }
0323
0324 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
0325
0326 static void __async_put_domains_mask(struct i915_power_domains *power_domains,
0327 struct intel_power_domain_mask *mask)
0328 {
0329 bitmap_or(mask->bits,
0330 power_domains->async_put_domains[0].bits,
0331 power_domains->async_put_domains[1].bits,
0332 POWER_DOMAIN_NUM);
0333 }
0334
0335 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0336
0337 static bool
0338 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
0339 {
0340 struct drm_i915_private *i915 = container_of(power_domains,
0341 struct drm_i915_private,
0342 power_domains);
0343
0344 return !drm_WARN_ON(&i915->drm,
0345 bitmap_intersects(power_domains->async_put_domains[0].bits,
0346 power_domains->async_put_domains[1].bits,
0347 POWER_DOMAIN_NUM));
0348 }
0349
0350 static bool
0351 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
0352 {
0353 struct drm_i915_private *i915 = container_of(power_domains,
0354 struct drm_i915_private,
0355 power_domains);
0356 struct intel_power_domain_mask async_put_mask;
0357 enum intel_display_power_domain domain;
0358 bool err = false;
0359
0360 err |= !assert_async_put_domain_masks_disjoint(power_domains);
0361 __async_put_domains_mask(power_domains, &async_put_mask);
0362 err |= drm_WARN_ON(&i915->drm,
0363 !!power_domains->async_put_wakeref !=
0364 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
0365
0366 for_each_power_domain(domain, &async_put_mask)
0367 err |= drm_WARN_ON(&i915->drm,
0368 power_domains->domain_use_count[domain] != 1);
0369
0370 return !err;
0371 }
0372
0373 static void print_power_domains(struct i915_power_domains *power_domains,
0374 const char *prefix, struct intel_power_domain_mask *mask)
0375 {
0376 struct drm_i915_private *i915 = container_of(power_domains,
0377 struct drm_i915_private,
0378 power_domains);
0379 enum intel_display_power_domain domain;
0380
0381 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
0382 for_each_power_domain(domain, mask)
0383 drm_dbg(&i915->drm, "%s use_count %d\n",
0384 intel_display_power_domain_str(domain),
0385 power_domains->domain_use_count[domain]);
0386 }
0387
0388 static void
0389 print_async_put_domains_state(struct i915_power_domains *power_domains)
0390 {
0391 struct drm_i915_private *i915 = container_of(power_domains,
0392 struct drm_i915_private,
0393 power_domains);
0394
0395 drm_dbg(&i915->drm, "async_put_wakeref %u\n",
0396 power_domains->async_put_wakeref);
0397
0398 print_power_domains(power_domains, "async_put_domains[0]",
0399 &power_domains->async_put_domains[0]);
0400 print_power_domains(power_domains, "async_put_domains[1]",
0401 &power_domains->async_put_domains[1]);
0402 }
0403
0404 static void
0405 verify_async_put_domains_state(struct i915_power_domains *power_domains)
0406 {
0407 if (!__async_put_domains_state_ok(power_domains))
0408 print_async_put_domains_state(power_domains);
0409 }
0410
0411 #else
0412
0413 static void
0414 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
0415 {
0416 }
0417
0418 static void
0419 verify_async_put_domains_state(struct i915_power_domains *power_domains)
0420 {
0421 }
0422
0423 #endif
0424
0425 static void async_put_domains_mask(struct i915_power_domains *power_domains,
0426 struct intel_power_domain_mask *mask)
0427
0428 {
0429 assert_async_put_domain_masks_disjoint(power_domains);
0430
0431 __async_put_domains_mask(power_domains, mask);
0432 }
0433
0434 static void
0435 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
0436 enum intel_display_power_domain domain)
0437 {
0438 assert_async_put_domain_masks_disjoint(power_domains);
0439
0440 clear_bit(domain, power_domains->async_put_domains[0].bits);
0441 clear_bit(domain, power_domains->async_put_domains[1].bits);
0442 }
0443
0444 static bool
0445 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
0446 enum intel_display_power_domain domain)
0447 {
0448 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0449 struct intel_power_domain_mask async_put_mask;
0450 bool ret = false;
0451
0452 async_put_domains_mask(power_domains, &async_put_mask);
0453 if (!test_bit(domain, async_put_mask.bits))
0454 goto out_verify;
0455
0456 async_put_domains_clear_domain(power_domains, domain);
0457
0458 ret = true;
0459
0460 async_put_domains_mask(power_domains, &async_put_mask);
0461 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
0462 goto out_verify;
0463
0464 cancel_delayed_work(&power_domains->async_put_work);
0465 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
0466 fetch_and_zero(&power_domains->async_put_wakeref));
0467 out_verify:
0468 verify_async_put_domains_state(power_domains);
0469
0470 return ret;
0471 }
0472
0473 static void
0474 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
0475 enum intel_display_power_domain domain)
0476 {
0477 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0478 struct i915_power_well *power_well;
0479
0480 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
0481 return;
0482
0483 for_each_power_domain_well(dev_priv, power_well, domain)
0484 intel_power_well_get(dev_priv, power_well);
0485
0486 power_domains->domain_use_count[domain]++;
0487 }
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
0502 enum intel_display_power_domain domain)
0503 {
0504 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0505 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
0506
0507 mutex_lock(&power_domains->lock);
0508 __intel_display_power_get_domain(dev_priv, domain);
0509 mutex_unlock(&power_domains->lock);
0510
0511 return wakeref;
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 intel_wakeref_t
0527 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
0528 enum intel_display_power_domain domain)
0529 {
0530 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0531 intel_wakeref_t wakeref;
0532 bool is_enabled;
0533
0534 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
0535 if (!wakeref)
0536 return false;
0537
0538 mutex_lock(&power_domains->lock);
0539
0540 if (__intel_display_power_is_enabled(dev_priv, domain)) {
0541 __intel_display_power_get_domain(dev_priv, domain);
0542 is_enabled = true;
0543 } else {
0544 is_enabled = false;
0545 }
0546
0547 mutex_unlock(&power_domains->lock);
0548
0549 if (!is_enabled) {
0550 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
0551 wakeref = 0;
0552 }
0553
0554 return wakeref;
0555 }
0556
0557 static void
0558 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
0559 enum intel_display_power_domain domain)
0560 {
0561 struct i915_power_domains *power_domains;
0562 struct i915_power_well *power_well;
0563 const char *name = intel_display_power_domain_str(domain);
0564 struct intel_power_domain_mask async_put_mask;
0565
0566 power_domains = &dev_priv->power_domains;
0567
0568 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
0569 "Use count on domain %s is already zero\n",
0570 name);
0571 async_put_domains_mask(power_domains, &async_put_mask);
0572 drm_WARN(&dev_priv->drm,
0573 test_bit(domain, async_put_mask.bits),
0574 "Async disabling of domain %s is pending\n",
0575 name);
0576
0577 power_domains->domain_use_count[domain]--;
0578
0579 for_each_power_domain_well_reverse(dev_priv, power_well, domain)
0580 intel_power_well_put(dev_priv, power_well);
0581 }
0582
0583 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
0584 enum intel_display_power_domain domain)
0585 {
0586 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0587
0588 mutex_lock(&power_domains->lock);
0589 __intel_display_power_put_domain(dev_priv, domain);
0590 mutex_unlock(&power_domains->lock);
0591 }
0592
0593 static void
0594 queue_async_put_domains_work(struct i915_power_domains *power_domains,
0595 intel_wakeref_t wakeref)
0596 {
0597 struct drm_i915_private *i915 = container_of(power_domains,
0598 struct drm_i915_private,
0599 power_domains);
0600 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
0601 power_domains->async_put_wakeref = wakeref;
0602 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
0603 &power_domains->async_put_work,
0604 msecs_to_jiffies(100)));
0605 }
0606
0607 static void
0608 release_async_put_domains(struct i915_power_domains *power_domains,
0609 struct intel_power_domain_mask *mask)
0610 {
0611 struct drm_i915_private *dev_priv =
0612 container_of(power_domains, struct drm_i915_private,
0613 power_domains);
0614 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
0615 enum intel_display_power_domain domain;
0616 intel_wakeref_t wakeref;
0617
0618
0619
0620
0621
0622
0623 assert_rpm_raw_wakeref_held(rpm);
0624 wakeref = intel_runtime_pm_get(rpm);
0625
0626 for_each_power_domain(domain, mask) {
0627
0628 async_put_domains_clear_domain(power_domains, domain);
0629 __intel_display_power_put_domain(dev_priv, domain);
0630 }
0631
0632 intel_runtime_pm_put(rpm, wakeref);
0633 }
0634
0635 static void
0636 intel_display_power_put_async_work(struct work_struct *work)
0637 {
0638 struct drm_i915_private *dev_priv =
0639 container_of(work, struct drm_i915_private,
0640 power_domains.async_put_work.work);
0641 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0642 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
0643 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
0644 intel_wakeref_t old_work_wakeref = 0;
0645
0646 mutex_lock(&power_domains->lock);
0647
0648
0649
0650
0651
0652 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
0653 if (!old_work_wakeref)
0654 goto out_verify;
0655
0656 release_async_put_domains(power_domains,
0657 &power_domains->async_put_domains[0]);
0658
0659
0660 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
0661 bitmap_copy(power_domains->async_put_domains[0].bits,
0662 power_domains->async_put_domains[1].bits,
0663 POWER_DOMAIN_NUM);
0664 bitmap_zero(power_domains->async_put_domains[1].bits,
0665 POWER_DOMAIN_NUM);
0666 queue_async_put_domains_work(power_domains,
0667 fetch_and_zero(&new_work_wakeref));
0668 } else {
0669
0670
0671
0672
0673 cancel_delayed_work(&power_domains->async_put_work);
0674 }
0675
0676 out_verify:
0677 verify_async_put_domains_state(power_domains);
0678
0679 mutex_unlock(&power_domains->lock);
0680
0681 if (old_work_wakeref)
0682 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
0683 if (new_work_wakeref)
0684 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
0685 }
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 void __intel_display_power_put_async(struct drm_i915_private *i915,
0698 enum intel_display_power_domain domain,
0699 intel_wakeref_t wakeref)
0700 {
0701 struct i915_power_domains *power_domains = &i915->power_domains;
0702 struct intel_runtime_pm *rpm = &i915->runtime_pm;
0703 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
0704
0705 mutex_lock(&power_domains->lock);
0706
0707 if (power_domains->domain_use_count[domain] > 1) {
0708 __intel_display_power_put_domain(i915, domain);
0709
0710 goto out_verify;
0711 }
0712
0713 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
0714
0715
0716 if (power_domains->async_put_wakeref) {
0717 set_bit(domain, power_domains->async_put_domains[1].bits);
0718 } else {
0719 set_bit(domain, power_domains->async_put_domains[0].bits);
0720 queue_async_put_domains_work(power_domains,
0721 fetch_and_zero(&work_wakeref));
0722 }
0723
0724 out_verify:
0725 verify_async_put_domains_state(power_domains);
0726
0727 mutex_unlock(&power_domains->lock);
0728
0729 if (work_wakeref)
0730 intel_runtime_pm_put_raw(rpm, work_wakeref);
0731
0732 intel_runtime_pm_put(rpm, wakeref);
0733 }
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747 void intel_display_power_flush_work(struct drm_i915_private *i915)
0748 {
0749 struct i915_power_domains *power_domains = &i915->power_domains;
0750 struct intel_power_domain_mask async_put_mask;
0751 intel_wakeref_t work_wakeref;
0752
0753 mutex_lock(&power_domains->lock);
0754
0755 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
0756 if (!work_wakeref)
0757 goto out_verify;
0758
0759 async_put_domains_mask(power_domains, &async_put_mask);
0760 release_async_put_domains(power_domains, &async_put_mask);
0761 cancel_delayed_work(&power_domains->async_put_work);
0762
0763 out_verify:
0764 verify_async_put_domains_state(power_domains);
0765
0766 mutex_unlock(&power_domains->lock);
0767
0768 if (work_wakeref)
0769 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
0770 }
0771
0772
0773
0774
0775
0776
0777
0778
0779 static void
0780 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
0781 {
0782 struct i915_power_domains *power_domains = &i915->power_domains;
0783
0784 intel_display_power_flush_work(i915);
0785 cancel_delayed_work_sync(&power_domains->async_put_work);
0786
0787 verify_async_put_domains_state(power_domains);
0788
0789 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
0790 }
0791
0792 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803 void intel_display_power_put(struct drm_i915_private *dev_priv,
0804 enum intel_display_power_domain domain,
0805 intel_wakeref_t wakeref)
0806 {
0807 __intel_display_power_put(dev_priv, domain);
0808 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
0809 }
0810 #else
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
0825 enum intel_display_power_domain domain)
0826 {
0827 __intel_display_power_put(dev_priv, domain);
0828 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
0829 }
0830 #endif
0831
0832 void
0833 intel_display_power_get_in_set(struct drm_i915_private *i915,
0834 struct intel_display_power_domain_set *power_domain_set,
0835 enum intel_display_power_domain domain)
0836 {
0837 intel_wakeref_t __maybe_unused wf;
0838
0839 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
0840
0841 wf = intel_display_power_get(i915, domain);
0842 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0843 power_domain_set->wakerefs[domain] = wf;
0844 #endif
0845 set_bit(domain, power_domain_set->mask.bits);
0846 }
0847
0848 bool
0849 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
0850 struct intel_display_power_domain_set *power_domain_set,
0851 enum intel_display_power_domain domain)
0852 {
0853 intel_wakeref_t wf;
0854
0855 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
0856
0857 wf = intel_display_power_get_if_enabled(i915, domain);
0858 if (!wf)
0859 return false;
0860
0861 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0862 power_domain_set->wakerefs[domain] = wf;
0863 #endif
0864 set_bit(domain, power_domain_set->mask.bits);
0865
0866 return true;
0867 }
0868
0869 void
0870 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
0871 struct intel_display_power_domain_set *power_domain_set,
0872 struct intel_power_domain_mask *mask)
0873 {
0874 enum intel_display_power_domain domain;
0875
0876 drm_WARN_ON(&i915->drm,
0877 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
0878
0879 for_each_power_domain(domain, mask) {
0880 intel_wakeref_t __maybe_unused wf = -1;
0881
0882 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0883 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
0884 #endif
0885 intel_display_power_put(i915, domain, wf);
0886 clear_bit(domain, power_domain_set->mask.bits);
0887 }
0888 }
0889
0890 static int
0891 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
0892 int disable_power_well)
0893 {
0894 if (disable_power_well >= 0)
0895 return !!disable_power_well;
0896
0897 return 1;
0898 }
0899
0900 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
0901 int enable_dc)
0902 {
0903 u32 mask;
0904 int requested_dc;
0905 int max_dc;
0906
0907 if (!HAS_DISPLAY(dev_priv))
0908 return 0;
0909
0910 if (IS_DG2(dev_priv))
0911 max_dc = 0;
0912 else if (IS_DG1(dev_priv))
0913 max_dc = 3;
0914 else if (DISPLAY_VER(dev_priv) >= 12)
0915 max_dc = 4;
0916 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
0917 max_dc = 1;
0918 else if (DISPLAY_VER(dev_priv) >= 9)
0919 max_dc = 2;
0920 else
0921 max_dc = 0;
0922
0923
0924
0925
0926
0927
0928 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
0929 DISPLAY_VER(dev_priv) >= 11 ?
0930 DC_STATE_EN_DC9 : 0;
0931
0932 if (!dev_priv->params.disable_power_well)
0933 max_dc = 0;
0934
0935 if (enable_dc >= 0 && enable_dc <= max_dc) {
0936 requested_dc = enable_dc;
0937 } else if (enable_dc == -1) {
0938 requested_dc = max_dc;
0939 } else if (enable_dc > max_dc && enable_dc <= 4) {
0940 drm_dbg_kms(&dev_priv->drm,
0941 "Adjusting requested max DC state (%d->%d)\n",
0942 enable_dc, max_dc);
0943 requested_dc = max_dc;
0944 } else {
0945 drm_err(&dev_priv->drm,
0946 "Unexpected value for enable_dc (%d)\n", enable_dc);
0947 requested_dc = max_dc;
0948 }
0949
0950 switch (requested_dc) {
0951 case 4:
0952 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
0953 break;
0954 case 3:
0955 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
0956 break;
0957 case 2:
0958 mask |= DC_STATE_EN_UPTO_DC6;
0959 break;
0960 case 1:
0961 mask |= DC_STATE_EN_UPTO_DC5;
0962 break;
0963 }
0964
0965 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
0966
0967 return mask;
0968 }
0969
0970
0971
0972
0973
0974
0975
0976
0977 int intel_power_domains_init(struct drm_i915_private *dev_priv)
0978 {
0979 struct i915_power_domains *power_domains = &dev_priv->power_domains;
0980
0981 dev_priv->params.disable_power_well =
0982 sanitize_disable_power_well_option(dev_priv,
0983 dev_priv->params.disable_power_well);
0984 dev_priv->dmc.allowed_dc_mask =
0985 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
0986
0987 dev_priv->dmc.target_dc_state =
0988 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
0989
0990 mutex_init(&power_domains->lock);
0991
0992 INIT_DELAYED_WORK(&power_domains->async_put_work,
0993 intel_display_power_put_async_work);
0994
0995 return intel_display_power_map_init(power_domains);
0996 }
0997
0998
0999
1000
1001
1002
1003
1004 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
1005 {
1006 intel_display_power_map_cleanup(&dev_priv->power_domains);
1007 }
1008
1009 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1010 {
1011 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1012 struct i915_power_well *power_well;
1013
1014 mutex_lock(&power_domains->lock);
1015 for_each_power_well(dev_priv, power_well)
1016 intel_power_well_sync_hw(dev_priv, power_well);
1017 mutex_unlock(&power_domains->lock);
1018 }
1019
1020 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
1021 enum dbuf_slice slice, bool enable)
1022 {
1023 i915_reg_t reg = DBUF_CTL_S(slice);
1024 bool state;
1025
1026 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
1027 enable ? DBUF_POWER_REQUEST : 0);
1028 intel_de_posting_read(dev_priv, reg);
1029 udelay(10);
1030
1031 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
1032 drm_WARN(&dev_priv->drm, enable != state,
1033 "DBuf slice %d power %s timeout!\n",
1034 slice, str_enable_disable(enable));
1035 }
1036
1037 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1038 u8 req_slices)
1039 {
1040 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1041 u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
1042 enum dbuf_slice slice;
1043
1044 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
1045 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1046 req_slices, slice_mask);
1047
1048 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
1049 req_slices);
1050
1051
1052
1053
1054
1055
1056
1057
1058 mutex_lock(&power_domains->lock);
1059
1060 for_each_dbuf_slice(dev_priv, slice)
1061 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
1062
1063 dev_priv->dbuf.enabled_slices = req_slices;
1064
1065 mutex_unlock(&power_domains->lock);
1066 }
1067
1068 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
1069 {
1070 dev_priv->dbuf.enabled_slices =
1071 intel_enabled_dbuf_slices_mask(dev_priv);
1072
1073
1074
1075
1076
1077 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
1078 dev_priv->dbuf.enabled_slices);
1079 }
1080
1081 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
1082 {
1083 gen9_dbuf_slices_update(dev_priv, 0);
1084 }
1085
1086 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
1087 {
1088 enum dbuf_slice slice;
1089
1090 if (IS_ALDERLAKE_P(dev_priv))
1091 return;
1092
1093 for_each_dbuf_slice(dev_priv, slice)
1094 intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
1095 DBUF_TRACKER_STATE_SERVICE_MASK,
1096 DBUF_TRACKER_STATE_SERVICE(8));
1097 }
1098
1099 static void icl_mbus_init(struct drm_i915_private *dev_priv)
1100 {
1101 unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
1102 u32 mask, val, i;
1103
1104 if (IS_ALDERLAKE_P(dev_priv))
1105 return;
1106
1107 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1108 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1109 MBUS_ABOX_B_CREDIT_MASK |
1110 MBUS_ABOX_BW_CREDIT_MASK;
1111 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1112 MBUS_ABOX_BT_CREDIT_POOL2(16) |
1113 MBUS_ABOX_B_CREDIT(1) |
1114 MBUS_ABOX_BW_CREDIT(1);
1115
1116
1117
1118
1119
1120
1121 if (DISPLAY_VER(dev_priv) == 12)
1122 abox_regs |= BIT(0);
1123
1124 for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1125 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
1126 }
1127
1128 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
1129 {
1130 u32 val = intel_de_read(dev_priv, LCPLL_CTL);
1131
1132
1133
1134
1135
1136
1137
1138 if (val & LCPLL_CD_SOURCE_FCLK)
1139 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
1140
1141 if (val & LCPLL_PLL_DISABLE)
1142 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
1143
1144 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1145 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
1146 }
1147
1148 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
1149 {
1150 struct drm_device *dev = &dev_priv->drm;
1151 struct intel_crtc *crtc;
1152
1153 for_each_intel_crtc(dev, crtc)
1154 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
1155 pipe_name(crtc->pipe));
1156
1157 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
1158 "Display power well on\n");
1159 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
1160 "SPLL enabled\n");
1161 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1162 "WRPLL1 enabled\n");
1163 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1164 "WRPLL2 enabled\n");
1165 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
1166 "Panel power on\n");
1167 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1168 "CPU PWM1 enabled\n");
1169 if (IS_HASWELL(dev_priv))
1170 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1171 "CPU PWM2 enabled\n");
1172 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1173 "PCH PWM1 enabled\n");
1174 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1175 "Utility pin enabled\n");
1176 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1177 "PCH GTC enabled\n");
1178
1179
1180
1181
1182
1183
1184
1185 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
1186 }
1187
1188 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
1189 {
1190 if (IS_HASWELL(dev_priv))
1191 return intel_de_read(dev_priv, D_COMP_HSW);
1192 else
1193 return intel_de_read(dev_priv, D_COMP_BDW);
1194 }
1195
1196 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
1197 {
1198 if (IS_HASWELL(dev_priv)) {
1199 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1200 drm_dbg_kms(&dev_priv->drm,
1201 "Failed to write to D_COMP\n");
1202 } else {
1203 intel_de_write(dev_priv, D_COMP_BDW, val);
1204 intel_de_posting_read(dev_priv, D_COMP_BDW);
1205 }
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
1217 bool switch_to_fclk, bool allow_power_down)
1218 {
1219 u32 val;
1220
1221 assert_can_disable_lcpll(dev_priv);
1222
1223 val = intel_de_read(dev_priv, LCPLL_CTL);
1224
1225 if (switch_to_fclk) {
1226 val |= LCPLL_CD_SOURCE_FCLK;
1227 intel_de_write(dev_priv, LCPLL_CTL, val);
1228
1229 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
1230 LCPLL_CD_SOURCE_FCLK_DONE, 1))
1231 drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
1232
1233 val = intel_de_read(dev_priv, LCPLL_CTL);
1234 }
1235
1236 val |= LCPLL_PLL_DISABLE;
1237 intel_de_write(dev_priv, LCPLL_CTL, val);
1238 intel_de_posting_read(dev_priv, LCPLL_CTL);
1239
1240 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1241 drm_err(&dev_priv->drm, "LCPLL still locked\n");
1242
1243 val = hsw_read_dcomp(dev_priv);
1244 val |= D_COMP_COMP_DISABLE;
1245 hsw_write_dcomp(dev_priv, val);
1246 ndelay(100);
1247
1248 if (wait_for((hsw_read_dcomp(dev_priv) &
1249 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1250 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
1251
1252 if (allow_power_down) {
1253 val = intel_de_read(dev_priv, LCPLL_CTL);
1254 val |= LCPLL_POWER_DOWN_ALLOW;
1255 intel_de_write(dev_priv, LCPLL_CTL, val);
1256 intel_de_posting_read(dev_priv, LCPLL_CTL);
1257 }
1258 }
1259
1260
1261
1262
1263
1264 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
1265 {
1266 u32 val;
1267
1268 val = intel_de_read(dev_priv, LCPLL_CTL);
1269
1270 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1271 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1272 return;
1273
1274
1275
1276
1277
1278 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1279
1280 if (val & LCPLL_POWER_DOWN_ALLOW) {
1281 val &= ~LCPLL_POWER_DOWN_ALLOW;
1282 intel_de_write(dev_priv, LCPLL_CTL, val);
1283 intel_de_posting_read(dev_priv, LCPLL_CTL);
1284 }
1285
1286 val = hsw_read_dcomp(dev_priv);
1287 val |= D_COMP_COMP_FORCE;
1288 val &= ~D_COMP_COMP_DISABLE;
1289 hsw_write_dcomp(dev_priv, val);
1290
1291 val = intel_de_read(dev_priv, LCPLL_CTL);
1292 val &= ~LCPLL_PLL_DISABLE;
1293 intel_de_write(dev_priv, LCPLL_CTL, val);
1294
1295 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1296 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
1297
1298 if (val & LCPLL_CD_SOURCE_FCLK) {
1299 val = intel_de_read(dev_priv, LCPLL_CTL);
1300 val &= ~LCPLL_CD_SOURCE_FCLK;
1301 intel_de_write(dev_priv, LCPLL_CTL, val);
1302
1303 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
1304 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1305 drm_err(&dev_priv->drm,
1306 "Switching back to LCPLL failed\n");
1307 }
1308
1309 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1310
1311 intel_update_cdclk(dev_priv);
1312 intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
1313 }
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
1339 {
1340 u32 val;
1341
1342 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
1343
1344 if (HAS_PCH_LPT_LP(dev_priv)) {
1345 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1346 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
1347 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1348 }
1349
1350 lpt_disable_clkout_dp(dev_priv);
1351 hsw_disable_lcpll(dev_priv, true, true);
1352 }
1353
1354 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
1355 {
1356 u32 val;
1357
1358 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
1359
1360 hsw_restore_lcpll(dev_priv);
1361 intel_init_pch_refclk(dev_priv);
1362
1363 if (HAS_PCH_LPT_LP(dev_priv)) {
1364 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1365 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
1366 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1367 }
1368 }
1369
1370 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
1371 bool enable)
1372 {
1373 i915_reg_t reg;
1374 u32 reset_bits, val;
1375
1376 if (IS_IVYBRIDGE(dev_priv)) {
1377 reg = GEN7_MSG_CTL;
1378 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1379 } else {
1380 reg = HSW_NDE_RSTWRN_OPT;
1381 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1382 }
1383
1384 val = intel_de_read(dev_priv, reg);
1385
1386 if (enable)
1387 val |= reset_bits;
1388 else
1389 val &= ~reset_bits;
1390
1391 intel_de_write(dev_priv, reg, val);
1392 }
1393
1394 static void skl_display_core_init(struct drm_i915_private *dev_priv,
1395 bool resume)
1396 {
1397 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1398 struct i915_power_well *well;
1399
1400 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1401
1402
1403 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1404
1405 if (!HAS_DISPLAY(dev_priv))
1406 return;
1407
1408
1409 mutex_lock(&power_domains->lock);
1410
1411 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1412 intel_power_well_enable(dev_priv, well);
1413
1414 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1415 intel_power_well_enable(dev_priv, well);
1416
1417 mutex_unlock(&power_domains->lock);
1418
1419 intel_cdclk_init_hw(dev_priv);
1420
1421 gen9_dbuf_enable(dev_priv);
1422
1423 if (resume)
1424 intel_dmc_load_program(dev_priv);
1425 }
1426
1427 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1428 {
1429 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1430 struct i915_power_well *well;
1431
1432 if (!HAS_DISPLAY(dev_priv))
1433 return;
1434
1435 gen9_disable_dc_states(dev_priv);
1436
1437 gen9_dbuf_disable(dev_priv);
1438
1439 intel_cdclk_uninit_hw(dev_priv);
1440
1441
1442
1443
1444 mutex_lock(&power_domains->lock);
1445
1446
1447
1448
1449
1450
1451
1452 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1453 intel_power_well_disable(dev_priv, well);
1454
1455 mutex_unlock(&power_domains->lock);
1456
1457 usleep_range(10, 30);
1458 }
1459
1460 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
1461 {
1462 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1463 struct i915_power_well *well;
1464
1465 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1466
1467
1468
1469
1470
1471
1472
1473 intel_pch_reset_handshake(dev_priv, false);
1474
1475 if (!HAS_DISPLAY(dev_priv))
1476 return;
1477
1478
1479 mutex_lock(&power_domains->lock);
1480
1481 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1482 intel_power_well_enable(dev_priv, well);
1483
1484 mutex_unlock(&power_domains->lock);
1485
1486 intel_cdclk_init_hw(dev_priv);
1487
1488 gen9_dbuf_enable(dev_priv);
1489
1490 if (resume)
1491 intel_dmc_load_program(dev_priv);
1492 }
1493
1494 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
1495 {
1496 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1497 struct i915_power_well *well;
1498
1499 if (!HAS_DISPLAY(dev_priv))
1500 return;
1501
1502 gen9_disable_dc_states(dev_priv);
1503
1504 gen9_dbuf_disable(dev_priv);
1505
1506 intel_cdclk_uninit_hw(dev_priv);
1507
1508
1509
1510
1511
1512
1513
1514
1515 mutex_lock(&power_domains->lock);
1516
1517 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1518 intel_power_well_disable(dev_priv, well);
1519
1520 mutex_unlock(&power_domains->lock);
1521
1522 usleep_range(10, 30);
1523 }
1524
1525 struct buddy_page_mask {
1526 u32 page_mask;
1527 u8 type;
1528 u8 num_channels;
1529 };
1530
1531 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1532 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
1533 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
1534 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1535 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1536 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
1537 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
1538 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1539 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1540 {}
1541 };
1542
1543 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1544 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1545 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
1546 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
1547 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1548 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1549 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
1550 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
1551 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1552 {}
1553 };
1554
1555 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
1556 {
1557 enum intel_dram_type type = dev_priv->dram_info.type;
1558 u8 num_channels = dev_priv->dram_info.num_channels;
1559 const struct buddy_page_mask *table;
1560 unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
1561 int config, i;
1562
1563
1564 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
1565 return;
1566
1567 if (IS_ALDERLAKE_S(dev_priv) ||
1568 IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1569 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1570 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
1571
1572 table = wa_1409767108_buddy_page_masks;
1573 else
1574 table = tgl_buddy_page_masks;
1575
1576 for (config = 0; table[config].page_mask != 0; config++)
1577 if (table[config].num_channels == num_channels &&
1578 table[config].type == type)
1579 break;
1580
1581 if (table[config].page_mask == 0) {
1582 drm_dbg(&dev_priv->drm,
1583 "Unknown memory configuration; disabling address buddy logic.\n");
1584 for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1585 intel_de_write(dev_priv, BW_BUDDY_CTL(i),
1586 BW_BUDDY_DISABLE);
1587 } else {
1588 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1589 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
1590 table[config].page_mask);
1591
1592
1593 if (DISPLAY_VER(dev_priv) == 12)
1594 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
1595 BW_BUDDY_TLB_REQ_TIMER_MASK,
1596 BW_BUDDY_TLB_REQ_TIMER(0x8));
1597 }
1598 }
1599 }
1600
1601 static void icl_display_core_init(struct drm_i915_private *dev_priv,
1602 bool resume)
1603 {
1604 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1605 struct i915_power_well *well;
1606 u32 val;
1607
1608 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1609
1610
1611 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1612 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1613 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
1614 PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1615
1616
1617 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1618
1619 if (!HAS_DISPLAY(dev_priv))
1620 return;
1621
1622
1623 intel_combo_phy_init(dev_priv);
1624
1625
1626
1627
1628
1629 mutex_lock(&power_domains->lock);
1630 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1631 intel_power_well_enable(dev_priv, well);
1632 mutex_unlock(&power_domains->lock);
1633
1634
1635 intel_cdclk_init_hw(dev_priv);
1636
1637 if (DISPLAY_VER(dev_priv) >= 12)
1638 gen12_dbuf_slices_config(dev_priv);
1639
1640
1641 gen9_dbuf_enable(dev_priv);
1642
1643
1644 icl_mbus_init(dev_priv);
1645
1646
1647 if (DISPLAY_VER(dev_priv) >= 12)
1648 tgl_bw_buddy_init(dev_priv);
1649
1650
1651 if (IS_DG2(dev_priv))
1652 intel_snps_phy_wait_for_calibration(dev_priv);
1653
1654 if (resume)
1655 intel_dmc_load_program(dev_priv);
1656
1657
1658 if (DISPLAY_VER(dev_priv) >= 12) {
1659 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1660 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
1661 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
1662 }
1663
1664
1665 if (DISPLAY_VER(dev_priv) >= 13)
1666 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1667 }
1668
1669 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
1670 {
1671 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1672 struct i915_power_well *well;
1673
1674 if (!HAS_DISPLAY(dev_priv))
1675 return;
1676
1677 gen9_disable_dc_states(dev_priv);
1678
1679
1680
1681
1682 gen9_dbuf_disable(dev_priv);
1683
1684
1685 intel_cdclk_uninit_hw(dev_priv);
1686
1687
1688
1689
1690
1691
1692 mutex_lock(&power_domains->lock);
1693 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1694 intel_power_well_disable(dev_priv, well);
1695 mutex_unlock(&power_domains->lock);
1696
1697
1698 intel_combo_phy_uninit(dev_priv);
1699 }
1700
1701 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1702 {
1703 struct i915_power_well *cmn_bc =
1704 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1705 struct i915_power_well *cmn_d =
1706 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1707
1708
1709
1710
1711
1712
1713
1714
1715 dev_priv->chv_phy_control =
1716 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1717 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1718 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1719 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1720 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1721
1722
1723
1724
1725
1726
1727
1728
1729 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
1730 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
1731 unsigned int mask;
1732
1733 mask = status & DPLL_PORTB_READY_MASK;
1734 if (mask == 0xf)
1735 mask = 0x0;
1736 else
1737 dev_priv->chv_phy_control |=
1738 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1739
1740 dev_priv->chv_phy_control |=
1741 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1742
1743 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1744 if (mask == 0xf)
1745 mask = 0x0;
1746 else
1747 dev_priv->chv_phy_control |=
1748 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1749
1750 dev_priv->chv_phy_control |=
1751 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1752
1753 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1754
1755 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
1756 } else {
1757 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
1758 }
1759
1760 if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1761 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
1762 unsigned int mask;
1763
1764 mask = status & DPLL_PORTD_READY_MASK;
1765
1766 if (mask == 0xf)
1767 mask = 0x0;
1768 else
1769 dev_priv->chv_phy_control |=
1770 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1771
1772 dev_priv->chv_phy_control |=
1773 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1774
1775 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1776
1777 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
1778 } else {
1779 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
1780 }
1781
1782 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
1783 dev_priv->chv_phy_control);
1784
1785
1786 }
1787
1788 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1789 {
1790 struct i915_power_well *cmn =
1791 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1792 struct i915_power_well *disp2d =
1793 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
1794
1795
1796 if (intel_power_well_is_enabled(dev_priv, cmn) &&
1797 intel_power_well_is_enabled(dev_priv, disp2d) &&
1798 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
1799 return;
1800
1801 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
1802
1803
1804 intel_power_well_enable(dev_priv, disp2d);
1805
1806
1807
1808
1809
1810
1811
1812
1813 intel_power_well_disable(dev_priv, cmn);
1814 }
1815
1816 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
1817 {
1818 bool ret;
1819
1820 vlv_punit_get(dev_priv);
1821 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1822 vlv_punit_put(dev_priv);
1823
1824 return ret;
1825 }
1826
1827 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
1828 {
1829 drm_WARN(&dev_priv->drm,
1830 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
1831 "VED not power gated\n");
1832 }
1833
1834 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
1835 {
1836 static const struct pci_device_id isp_ids[] = {
1837 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1838 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1839 {}
1840 };
1841
1842 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
1843 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
1844 "ISP not power gated\n");
1845 }
1846
1847 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
1866 {
1867 struct i915_power_domains *power_domains = &i915->power_domains;
1868
1869 power_domains->initializing = true;
1870
1871 if (DISPLAY_VER(i915) >= 11) {
1872 icl_display_core_init(i915, resume);
1873 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
1874 bxt_display_core_init(i915, resume);
1875 } else if (DISPLAY_VER(i915) == 9) {
1876 skl_display_core_init(i915, resume);
1877 } else if (IS_CHERRYVIEW(i915)) {
1878 mutex_lock(&power_domains->lock);
1879 chv_phy_control_init(i915);
1880 mutex_unlock(&power_domains->lock);
1881 assert_isp_power_gated(i915);
1882 } else if (IS_VALLEYVIEW(i915)) {
1883 mutex_lock(&power_domains->lock);
1884 vlv_cmnlane_wa(i915);
1885 mutex_unlock(&power_domains->lock);
1886 assert_ved_power_gated(i915);
1887 assert_isp_power_gated(i915);
1888 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
1889 hsw_assert_cdclk(i915);
1890 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1891 } else if (IS_IVYBRIDGE(i915)) {
1892 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1893 }
1894
1895
1896
1897
1898
1899
1900
1901 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
1902 power_domains->init_wakeref =
1903 intel_display_power_get(i915, POWER_DOMAIN_INIT);
1904
1905
1906 if (!i915->params.disable_power_well) {
1907 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
1908 i915->power_domains.disable_wakeref = intel_display_power_get(i915,
1909 POWER_DOMAIN_INIT);
1910 }
1911 intel_power_domains_sync_hw(i915);
1912
1913 power_domains->initializing = false;
1914 }
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
1928 {
1929 intel_wakeref_t wakeref __maybe_unused =
1930 fetch_and_zero(&i915->power_domains.init_wakeref);
1931
1932
1933 if (!i915->params.disable_power_well)
1934 intel_display_power_put(i915, POWER_DOMAIN_INIT,
1935 fetch_and_zero(&i915->power_domains.disable_wakeref));
1936
1937 intel_display_power_flush_work_sync(i915);
1938
1939 intel_power_domains_verify_state(i915);
1940
1941
1942 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1943 }
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955 void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
1956 {
1957 struct i915_power_domains *power_domains = &i915->power_domains;
1958 struct i915_power_well *power_well;
1959
1960 mutex_lock(&power_domains->lock);
1961
1962 for_each_power_well_reverse(i915, power_well) {
1963 if (power_well->desc->always_on || power_well->count ||
1964 !intel_power_well_is_enabled(i915, power_well))
1965 continue;
1966
1967 drm_dbg_kms(&i915->drm,
1968 "BIOS left unused %s power well enabled, disabling it\n",
1969 intel_power_well_name(power_well));
1970 intel_power_well_disable(i915, power_well);
1971 }
1972
1973 mutex_unlock(&power_domains->lock);
1974 }
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988 void intel_power_domains_enable(struct drm_i915_private *i915)
1989 {
1990 intel_wakeref_t wakeref __maybe_unused =
1991 fetch_and_zero(&i915->power_domains.init_wakeref);
1992
1993 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
1994 intel_power_domains_verify_state(i915);
1995 }
1996
1997
1998
1999
2000
2001
2002
2003
2004 void intel_power_domains_disable(struct drm_i915_private *i915)
2005 {
2006 struct i915_power_domains *power_domains = &i915->power_domains;
2007
2008 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2009 power_domains->init_wakeref =
2010 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2011
2012 intel_power_domains_verify_state(i915);
2013 }
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026 void intel_power_domains_suspend(struct drm_i915_private *i915,
2027 enum i915_drm_suspend_mode suspend_mode)
2028 {
2029 struct i915_power_domains *power_domains = &i915->power_domains;
2030 intel_wakeref_t wakeref __maybe_unused =
2031 fetch_and_zero(&power_domains->init_wakeref);
2032
2033 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2034
2035
2036
2037
2038
2039
2040
2041
2042 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
2043 suspend_mode == I915_DRM_SUSPEND_IDLE &&
2044 intel_dmc_has_payload(i915)) {
2045 intel_display_power_flush_work(i915);
2046 intel_power_domains_verify_state(i915);
2047 return;
2048 }
2049
2050
2051
2052
2053
2054 if (!i915->params.disable_power_well)
2055 intel_display_power_put(i915, POWER_DOMAIN_INIT,
2056 fetch_and_zero(&i915->power_domains.disable_wakeref));
2057
2058 intel_display_power_flush_work(i915);
2059 intel_power_domains_verify_state(i915);
2060
2061 if (DISPLAY_VER(i915) >= 11)
2062 icl_display_core_uninit(i915);
2063 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
2064 bxt_display_core_uninit(i915);
2065 else if (DISPLAY_VER(i915) == 9)
2066 skl_display_core_uninit(i915);
2067
2068 power_domains->display_core_suspended = true;
2069 }
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081 void intel_power_domains_resume(struct drm_i915_private *i915)
2082 {
2083 struct i915_power_domains *power_domains = &i915->power_domains;
2084
2085 if (power_domains->display_core_suspended) {
2086 intel_power_domains_init_hw(i915, true);
2087 power_domains->display_core_suspended = false;
2088 } else {
2089 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2090 power_domains->init_wakeref =
2091 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2092 }
2093
2094 intel_power_domains_verify_state(i915);
2095 }
2096
2097 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2098
2099 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
2100 {
2101 struct i915_power_domains *power_domains = &i915->power_domains;
2102 struct i915_power_well *power_well;
2103
2104 for_each_power_well(i915, power_well) {
2105 enum intel_display_power_domain domain;
2106
2107 drm_dbg(&i915->drm, "%-25s %d\n",
2108 intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2109
2110 for_each_power_domain(domain, intel_power_well_domains(power_well))
2111 drm_dbg(&i915->drm, " %-23s %d\n",
2112 intel_display_power_domain_str(domain),
2113 power_domains->domain_use_count[domain]);
2114 }
2115 }
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2128 {
2129 struct i915_power_domains *power_domains = &i915->power_domains;
2130 struct i915_power_well *power_well;
2131 bool dump_domain_info;
2132
2133 mutex_lock(&power_domains->lock);
2134
2135 verify_async_put_domains_state(power_domains);
2136
2137 dump_domain_info = false;
2138 for_each_power_well(i915, power_well) {
2139 enum intel_display_power_domain domain;
2140 int domains_count;
2141 bool enabled;
2142
2143 enabled = intel_power_well_is_enabled(i915, power_well);
2144 if ((intel_power_well_refcount(power_well) ||
2145 intel_power_well_is_always_on(power_well)) !=
2146 enabled)
2147 drm_err(&i915->drm,
2148 "power well %s state mismatch (refcount %d/enabled %d)",
2149 intel_power_well_name(power_well),
2150 intel_power_well_refcount(power_well), enabled);
2151
2152 domains_count = 0;
2153 for_each_power_domain(domain, intel_power_well_domains(power_well))
2154 domains_count += power_domains->domain_use_count[domain];
2155
2156 if (intel_power_well_refcount(power_well) != domains_count) {
2157 drm_err(&i915->drm,
2158 "power well %s refcount/domain refcount mismatch "
2159 "(refcount %d/domains refcount %d)\n",
2160 intel_power_well_name(power_well),
2161 intel_power_well_refcount(power_well),
2162 domains_count);
2163 dump_domain_info = true;
2164 }
2165 }
2166
2167 if (dump_domain_info) {
2168 static bool dumped;
2169
2170 if (!dumped) {
2171 intel_power_domains_dump_info(i915);
2172 dumped = true;
2173 }
2174 }
2175
2176 mutex_unlock(&power_domains->lock);
2177 }
2178
2179 #else
2180
2181 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2182 {
2183 }
2184
2185 #endif
2186
2187 void intel_display_power_suspend_late(struct drm_i915_private *i915)
2188 {
2189 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2190 IS_BROXTON(i915)) {
2191 bxt_enable_dc9(i915);
2192 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2193 hsw_enable_pc8(i915);
2194 }
2195
2196
2197 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2198 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2199 }
2200
2201 void intel_display_power_resume_early(struct drm_i915_private *i915)
2202 {
2203 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2204 IS_BROXTON(i915)) {
2205 gen9_sanitize_dc_state(i915);
2206 bxt_disable_dc9(i915);
2207 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2208 hsw_disable_pc8(i915);
2209 }
2210
2211
2212 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2213 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2214 }
2215
2216 void intel_display_power_suspend(struct drm_i915_private *i915)
2217 {
2218 if (DISPLAY_VER(i915) >= 11) {
2219 icl_display_core_uninit(i915);
2220 bxt_enable_dc9(i915);
2221 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2222 bxt_display_core_uninit(i915);
2223 bxt_enable_dc9(i915);
2224 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2225 hsw_enable_pc8(i915);
2226 }
2227 }
2228
2229 void intel_display_power_resume(struct drm_i915_private *i915)
2230 {
2231 if (DISPLAY_VER(i915) >= 11) {
2232 bxt_disable_dc9(i915);
2233 icl_display_core_init(i915, true);
2234 if (intel_dmc_has_payload(i915)) {
2235 if (i915->dmc.allowed_dc_mask &
2236 DC_STATE_EN_UPTO_DC6)
2237 skl_enable_dc6(i915);
2238 else if (i915->dmc.allowed_dc_mask &
2239 DC_STATE_EN_UPTO_DC5)
2240 gen9_enable_dc5(i915);
2241 }
2242 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2243 bxt_disable_dc9(i915);
2244 bxt_display_core_init(i915, true);
2245 if (intel_dmc_has_payload(i915) &&
2246 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2247 gen9_enable_dc5(i915);
2248 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2249 hsw_disable_pc8(i915);
2250 }
2251 }
2252
2253 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
2254 {
2255 struct i915_power_domains *power_domains = &i915->power_domains;
2256 int i;
2257
2258 mutex_lock(&power_domains->lock);
2259
2260 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2261 for (i = 0; i < power_domains->power_well_count; i++) {
2262 struct i915_power_well *power_well;
2263 enum intel_display_power_domain power_domain;
2264
2265 power_well = &power_domains->power_wells[i];
2266 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2267 intel_power_well_refcount(power_well));
2268
2269 for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2270 seq_printf(m, " %-23s %d\n",
2271 intel_display_power_domain_str(power_domain),
2272 power_domains->domain_use_count[power_domain]);
2273 }
2274
2275 mutex_unlock(&power_domains->lock);
2276 }
2277
2278 struct intel_ddi_port_domains {
2279 enum port port_start;
2280 enum port port_end;
2281 enum aux_ch aux_ch_start;
2282 enum aux_ch aux_ch_end;
2283
2284 enum intel_display_power_domain ddi_lanes;
2285 enum intel_display_power_domain ddi_io;
2286 enum intel_display_power_domain aux_legacy_usbc;
2287 enum intel_display_power_domain aux_tbt;
2288 };
2289
2290 static const struct intel_ddi_port_domains
2291 i9xx_port_domains[] = {
2292 {
2293 .port_start = PORT_A,
2294 .port_end = PORT_F,
2295 .aux_ch_start = AUX_CH_A,
2296 .aux_ch_end = AUX_CH_F,
2297
2298 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2299 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2300 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2301 .aux_tbt = POWER_DOMAIN_INVALID,
2302 },
2303 };
2304
2305 static const struct intel_ddi_port_domains
2306 d11_port_domains[] = {
2307 {
2308 .port_start = PORT_A,
2309 .port_end = PORT_B,
2310 .aux_ch_start = AUX_CH_A,
2311 .aux_ch_end = AUX_CH_B,
2312
2313 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2314 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2315 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2316 .aux_tbt = POWER_DOMAIN_INVALID,
2317 }, {
2318 .port_start = PORT_C,
2319 .port_end = PORT_F,
2320 .aux_ch_start = AUX_CH_C,
2321 .aux_ch_end = AUX_CH_F,
2322
2323 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2324 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2325 .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2326 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2327 },
2328 };
2329
2330 static const struct intel_ddi_port_domains
2331 d12_port_domains[] = {
2332 {
2333 .port_start = PORT_A,
2334 .port_end = PORT_C,
2335 .aux_ch_start = AUX_CH_A,
2336 .aux_ch_end = AUX_CH_C,
2337
2338 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2339 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2340 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2341 .aux_tbt = POWER_DOMAIN_INVALID,
2342 }, {
2343 .port_start = PORT_TC1,
2344 .port_end = PORT_TC6,
2345 .aux_ch_start = AUX_CH_USBC1,
2346 .aux_ch_end = AUX_CH_USBC6,
2347
2348 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2349 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2350 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2351 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2352 },
2353 };
2354
2355 static const struct intel_ddi_port_domains
2356 d13_port_domains[] = {
2357 {
2358 .port_start = PORT_A,
2359 .port_end = PORT_C,
2360 .aux_ch_start = AUX_CH_A,
2361 .aux_ch_end = AUX_CH_C,
2362
2363 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2364 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2365 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2366 .aux_tbt = POWER_DOMAIN_INVALID,
2367 }, {
2368 .port_start = PORT_TC1,
2369 .port_end = PORT_TC4,
2370 .aux_ch_start = AUX_CH_USBC1,
2371 .aux_ch_end = AUX_CH_USBC4,
2372
2373 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2374 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2375 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2376 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2377 }, {
2378 .port_start = PORT_D_XELPD,
2379 .port_end = PORT_E_XELPD,
2380 .aux_ch_start = AUX_CH_D_XELPD,
2381 .aux_ch_end = AUX_CH_E_XELPD,
2382
2383 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2384 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2385 .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2386 .aux_tbt = POWER_DOMAIN_INVALID,
2387 },
2388 };
2389
2390 static void
2391 intel_port_domains_for_platform(struct drm_i915_private *i915,
2392 const struct intel_ddi_port_domains **domains,
2393 int *domains_size)
2394 {
2395 if (DISPLAY_VER(i915) >= 13) {
2396 *domains = d13_port_domains;
2397 *domains_size = ARRAY_SIZE(d13_port_domains);
2398 } else if (DISPLAY_VER(i915) >= 12) {
2399 *domains = d12_port_domains;
2400 *domains_size = ARRAY_SIZE(d12_port_domains);
2401 } else if (DISPLAY_VER(i915) >= 11) {
2402 *domains = d11_port_domains;
2403 *domains_size = ARRAY_SIZE(d11_port_domains);
2404 } else {
2405 *domains = i9xx_port_domains;
2406 *domains_size = ARRAY_SIZE(i9xx_port_domains);
2407 }
2408 }
2409
2410 static const struct intel_ddi_port_domains *
2411 intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
2412 {
2413 const struct intel_ddi_port_domains *domains;
2414 int domains_size;
2415 int i;
2416
2417 intel_port_domains_for_platform(i915, &domains, &domains_size);
2418 for (i = 0; i < domains_size; i++)
2419 if (port >= domains[i].port_start && port <= domains[i].port_end)
2420 return &domains[i];
2421
2422 return NULL;
2423 }
2424
2425 enum intel_display_power_domain
2426 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
2427 {
2428 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2429
2430 if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
2431 return POWER_DOMAIN_PORT_DDI_IO_A;
2432
2433 return domains->ddi_io + (int)(port - domains->port_start);
2434 }
2435
2436 enum intel_display_power_domain
2437 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
2438 {
2439 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2440
2441 if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
2442 return POWER_DOMAIN_PORT_DDI_LANES_A;
2443
2444 return domains->ddi_lanes + (int)(port - domains->port_start);
2445 }
2446
2447 static const struct intel_ddi_port_domains *
2448 intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
2449 {
2450 const struct intel_ddi_port_domains *domains;
2451 int domains_size;
2452 int i;
2453
2454 intel_port_domains_for_platform(i915, &domains, &domains_size);
2455 for (i = 0; i < domains_size; i++)
2456 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2457 return &domains[i];
2458
2459 return NULL;
2460 }
2461
2462 enum intel_display_power_domain
2463 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2464 {
2465 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2466
2467 if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
2468 return POWER_DOMAIN_AUX_A;
2469
2470 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2471 }
2472
2473 enum intel_display_power_domain
2474 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2475 {
2476 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2477
2478 if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
2479 return POWER_DOMAIN_AUX_TBT1;
2480
2481 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2482 }