Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: MIT */
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #include <linux/string_helpers.h>
0007 
0008 #include "i915_drv.h"
0009 #include "i915_irq.h"
0010 #include "intel_cdclk.h"
0011 #include "intel_combo_phy.h"
0012 #include "intel_de.h"
0013 #include "intel_display_power.h"
0014 #include "intel_display_power_map.h"
0015 #include "intel_display_power_well.h"
0016 #include "intel_display_types.h"
0017 #include "intel_dmc.h"
0018 #include "intel_mchbar_regs.h"
0019 #include "intel_pch_refclk.h"
0020 #include "intel_pcode.h"
0021 #include "intel_pm.h"
0022 #include "intel_snps_phy.h"
0023 #include "vlv_sideband.h"
0024 
0025 #define for_each_power_domain_well(__dev_priv, __power_well, __domain)  \
0026     for_each_power_well(__dev_priv, __power_well)               \
0027         for_each_if(test_bit((__domain), (__power_well)->domains.bits))
0028 
0029 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
0030     for_each_power_well_reverse(__dev_priv, __power_well)               \
0031         for_each_if(test_bit((__domain), (__power_well)->domains.bits))
0032 
0033 const char *
0034 intel_display_power_domain_str(enum intel_display_power_domain domain)
0035 {
0036     switch (domain) {
0037     case POWER_DOMAIN_DISPLAY_CORE:
0038         return "DISPLAY_CORE";
0039     case POWER_DOMAIN_PIPE_A:
0040         return "PIPE_A";
0041     case POWER_DOMAIN_PIPE_B:
0042         return "PIPE_B";
0043     case POWER_DOMAIN_PIPE_C:
0044         return "PIPE_C";
0045     case POWER_DOMAIN_PIPE_D:
0046         return "PIPE_D";
0047     case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
0048         return "PIPE_PANEL_FITTER_A";
0049     case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
0050         return "PIPE_PANEL_FITTER_B";
0051     case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
0052         return "PIPE_PANEL_FITTER_C";
0053     case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
0054         return "PIPE_PANEL_FITTER_D";
0055     case POWER_DOMAIN_TRANSCODER_A:
0056         return "TRANSCODER_A";
0057     case POWER_DOMAIN_TRANSCODER_B:
0058         return "TRANSCODER_B";
0059     case POWER_DOMAIN_TRANSCODER_C:
0060         return "TRANSCODER_C";
0061     case POWER_DOMAIN_TRANSCODER_D:
0062         return "TRANSCODER_D";
0063     case POWER_DOMAIN_TRANSCODER_EDP:
0064         return "TRANSCODER_EDP";
0065     case POWER_DOMAIN_TRANSCODER_DSI_A:
0066         return "TRANSCODER_DSI_A";
0067     case POWER_DOMAIN_TRANSCODER_DSI_C:
0068         return "TRANSCODER_DSI_C";
0069     case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
0070         return "TRANSCODER_VDSC_PW2";
0071     case POWER_DOMAIN_PORT_DDI_LANES_A:
0072         return "PORT_DDI_LANES_A";
0073     case POWER_DOMAIN_PORT_DDI_LANES_B:
0074         return "PORT_DDI_LANES_B";
0075     case POWER_DOMAIN_PORT_DDI_LANES_C:
0076         return "PORT_DDI_LANES_C";
0077     case POWER_DOMAIN_PORT_DDI_LANES_D:
0078         return "PORT_DDI_LANES_D";
0079     case POWER_DOMAIN_PORT_DDI_LANES_E:
0080         return "PORT_DDI_LANES_E";
0081     case POWER_DOMAIN_PORT_DDI_LANES_F:
0082         return "PORT_DDI_LANES_F";
0083     case POWER_DOMAIN_PORT_DDI_LANES_TC1:
0084         return "PORT_DDI_LANES_TC1";
0085     case POWER_DOMAIN_PORT_DDI_LANES_TC2:
0086         return "PORT_DDI_LANES_TC2";
0087     case POWER_DOMAIN_PORT_DDI_LANES_TC3:
0088         return "PORT_DDI_LANES_TC3";
0089     case POWER_DOMAIN_PORT_DDI_LANES_TC4:
0090         return "PORT_DDI_LANES_TC4";
0091     case POWER_DOMAIN_PORT_DDI_LANES_TC5:
0092         return "PORT_DDI_LANES_TC5";
0093     case POWER_DOMAIN_PORT_DDI_LANES_TC6:
0094         return "PORT_DDI_LANES_TC6";
0095     case POWER_DOMAIN_PORT_DDI_IO_A:
0096         return "PORT_DDI_IO_A";
0097     case POWER_DOMAIN_PORT_DDI_IO_B:
0098         return "PORT_DDI_IO_B";
0099     case POWER_DOMAIN_PORT_DDI_IO_C:
0100         return "PORT_DDI_IO_C";
0101     case POWER_DOMAIN_PORT_DDI_IO_D:
0102         return "PORT_DDI_IO_D";
0103     case POWER_DOMAIN_PORT_DDI_IO_E:
0104         return "PORT_DDI_IO_E";
0105     case POWER_DOMAIN_PORT_DDI_IO_F:
0106         return "PORT_DDI_IO_F";
0107     case POWER_DOMAIN_PORT_DDI_IO_TC1:
0108         return "PORT_DDI_IO_TC1";
0109     case POWER_DOMAIN_PORT_DDI_IO_TC2:
0110         return "PORT_DDI_IO_TC2";
0111     case POWER_DOMAIN_PORT_DDI_IO_TC3:
0112         return "PORT_DDI_IO_TC3";
0113     case POWER_DOMAIN_PORT_DDI_IO_TC4:
0114         return "PORT_DDI_IO_TC4";
0115     case POWER_DOMAIN_PORT_DDI_IO_TC5:
0116         return "PORT_DDI_IO_TC5";
0117     case POWER_DOMAIN_PORT_DDI_IO_TC6:
0118         return "PORT_DDI_IO_TC6";
0119     case POWER_DOMAIN_PORT_DSI:
0120         return "PORT_DSI";
0121     case POWER_DOMAIN_PORT_CRT:
0122         return "PORT_CRT";
0123     case POWER_DOMAIN_PORT_OTHER:
0124         return "PORT_OTHER";
0125     case POWER_DOMAIN_VGA:
0126         return "VGA";
0127     case POWER_DOMAIN_AUDIO_MMIO:
0128         return "AUDIO_MMIO";
0129     case POWER_DOMAIN_AUDIO_PLAYBACK:
0130         return "AUDIO_PLAYBACK";
0131     case POWER_DOMAIN_AUX_A:
0132         return "AUX_A";
0133     case POWER_DOMAIN_AUX_B:
0134         return "AUX_B";
0135     case POWER_DOMAIN_AUX_C:
0136         return "AUX_C";
0137     case POWER_DOMAIN_AUX_D:
0138         return "AUX_D";
0139     case POWER_DOMAIN_AUX_E:
0140         return "AUX_E";
0141     case POWER_DOMAIN_AUX_F:
0142         return "AUX_F";
0143     case POWER_DOMAIN_AUX_USBC1:
0144         return "AUX_USBC1";
0145     case POWER_DOMAIN_AUX_USBC2:
0146         return "AUX_USBC2";
0147     case POWER_DOMAIN_AUX_USBC3:
0148         return "AUX_USBC3";
0149     case POWER_DOMAIN_AUX_USBC4:
0150         return "AUX_USBC4";
0151     case POWER_DOMAIN_AUX_USBC5:
0152         return "AUX_USBC5";
0153     case POWER_DOMAIN_AUX_USBC6:
0154         return "AUX_USBC6";
0155     case POWER_DOMAIN_AUX_IO_A:
0156         return "AUX_IO_A";
0157     case POWER_DOMAIN_AUX_TBT1:
0158         return "AUX_TBT1";
0159     case POWER_DOMAIN_AUX_TBT2:
0160         return "AUX_TBT2";
0161     case POWER_DOMAIN_AUX_TBT3:
0162         return "AUX_TBT3";
0163     case POWER_DOMAIN_AUX_TBT4:
0164         return "AUX_TBT4";
0165     case POWER_DOMAIN_AUX_TBT5:
0166         return "AUX_TBT5";
0167     case POWER_DOMAIN_AUX_TBT6:
0168         return "AUX_TBT6";
0169     case POWER_DOMAIN_GMBUS:
0170         return "GMBUS";
0171     case POWER_DOMAIN_INIT:
0172         return "INIT";
0173     case POWER_DOMAIN_MODESET:
0174         return "MODESET";
0175     case POWER_DOMAIN_GT_IRQ:
0176         return "GT_IRQ";
0177     case POWER_DOMAIN_DC_OFF:
0178         return "DC_OFF";
0179     case POWER_DOMAIN_TC_COLD_OFF:
0180         return "TC_COLD_OFF";
0181     default:
0182         MISSING_CASE(domain);
0183         return "?";
0184     }
0185 }
0186 
0187 /**
0188  * __intel_display_power_is_enabled - unlocked check for a power domain
0189  * @dev_priv: i915 device instance
0190  * @domain: power domain to check
0191  *
0192  * This is the unlocked version of intel_display_power_is_enabled() and should
0193  * only be used from error capture and recovery code where deadlocks are
0194  * possible.
0195  *
0196  * Returns:
0197  * True when the power domain is enabled, false otherwise.
0198  */
0199 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
0200                       enum intel_display_power_domain domain)
0201 {
0202     struct i915_power_well *power_well;
0203     bool is_enabled;
0204 
0205     if (dev_priv->runtime_pm.suspended)
0206         return false;
0207 
0208     is_enabled = true;
0209 
0210     for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
0211         if (intel_power_well_is_always_on(power_well))
0212             continue;
0213 
0214         if (!intel_power_well_is_enabled_cached(power_well)) {
0215             is_enabled = false;
0216             break;
0217         }
0218     }
0219 
0220     return is_enabled;
0221 }
0222 
0223 /**
0224  * intel_display_power_is_enabled - check for a power domain
0225  * @dev_priv: i915 device instance
0226  * @domain: power domain to check
0227  *
0228  * This function can be used to check the hw power domain state. It is mostly
0229  * used in hardware state readout functions. Everywhere else code should rely
0230  * upon explicit power domain reference counting to ensure that the hardware
0231  * block is powered up before accessing it.
0232  *
0233  * Callers must hold the relevant modesetting locks to ensure that concurrent
0234  * threads can't disable the power well while the caller tries to read a few
0235  * registers.
0236  *
0237  * Returns:
0238  * True when the power domain is enabled, false otherwise.
0239  */
0240 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
0241                     enum intel_display_power_domain domain)
0242 {
0243     struct i915_power_domains *power_domains;
0244     bool ret;
0245 
0246     power_domains = &dev_priv->power_domains;
0247 
0248     mutex_lock(&power_domains->lock);
0249     ret = __intel_display_power_is_enabled(dev_priv, domain);
0250     mutex_unlock(&power_domains->lock);
0251 
0252     return ret;
0253 }
0254 
0255 static u32
0256 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
0257              u32 target_dc_state)
0258 {
0259     static const u32 states[] = {
0260         DC_STATE_EN_UPTO_DC6,
0261         DC_STATE_EN_UPTO_DC5,
0262         DC_STATE_EN_DC3CO,
0263         DC_STATE_DISABLE,
0264     };
0265     int i;
0266 
0267     for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
0268         if (target_dc_state != states[i])
0269             continue;
0270 
0271         if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
0272             break;
0273 
0274         target_dc_state = states[i + 1];
0275     }
0276 
0277     return target_dc_state;
0278 }
0279 
0280 /**
0281  * intel_display_power_set_target_dc_state - Set target dc state.
0282  * @dev_priv: i915 device
0283  * @state: state which needs to be set as target_dc_state.
0284  *
0285  * This function set the "DC off" power well target_dc_state,
0286  * based upon this target_dc_stste, "DC off" power well will
0287  * enable desired DC state.
0288  */
0289 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
0290                          u32 state)
0291 {
0292     struct i915_power_well *power_well;
0293     bool dc_off_enabled;
0294     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0295 
0296     mutex_lock(&power_domains->lock);
0297     power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
0298 
0299     if (drm_WARN_ON(&dev_priv->drm, !power_well))
0300         goto unlock;
0301 
0302     state = sanitize_target_dc_state(dev_priv, state);
0303 
0304     if (state == dev_priv->dmc.target_dc_state)
0305         goto unlock;
0306 
0307     dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
0308     /*
0309      * If DC off power well is disabled, need to enable and disable the
0310      * DC off power well to effect target DC state.
0311      */
0312     if (!dc_off_enabled)
0313         intel_power_well_enable(dev_priv, power_well);
0314 
0315     dev_priv->dmc.target_dc_state = state;
0316 
0317     if (!dc_off_enabled)
0318         intel_power_well_disable(dev_priv, power_well);
0319 
0320 unlock:
0321     mutex_unlock(&power_domains->lock);
0322 }
0323 
0324 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
0325 
0326 static void __async_put_domains_mask(struct i915_power_domains *power_domains,
0327                      struct intel_power_domain_mask *mask)
0328 {
0329     bitmap_or(mask->bits,
0330           power_domains->async_put_domains[0].bits,
0331           power_domains->async_put_domains[1].bits,
0332           POWER_DOMAIN_NUM);
0333 }
0334 
0335 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0336 
0337 static bool
0338 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
0339 {
0340     struct drm_i915_private *i915 = container_of(power_domains,
0341                              struct drm_i915_private,
0342                              power_domains);
0343 
0344     return !drm_WARN_ON(&i915->drm,
0345                 bitmap_intersects(power_domains->async_put_domains[0].bits,
0346                           power_domains->async_put_domains[1].bits,
0347                           POWER_DOMAIN_NUM));
0348 }
0349 
0350 static bool
0351 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
0352 {
0353     struct drm_i915_private *i915 = container_of(power_domains,
0354                              struct drm_i915_private,
0355                              power_domains);
0356     struct intel_power_domain_mask async_put_mask;
0357     enum intel_display_power_domain domain;
0358     bool err = false;
0359 
0360     err |= !assert_async_put_domain_masks_disjoint(power_domains);
0361     __async_put_domains_mask(power_domains, &async_put_mask);
0362     err |= drm_WARN_ON(&i915->drm,
0363                !!power_domains->async_put_wakeref !=
0364                !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
0365 
0366     for_each_power_domain(domain, &async_put_mask)
0367         err |= drm_WARN_ON(&i915->drm,
0368                    power_domains->domain_use_count[domain] != 1);
0369 
0370     return !err;
0371 }
0372 
0373 static void print_power_domains(struct i915_power_domains *power_domains,
0374                 const char *prefix, struct intel_power_domain_mask *mask)
0375 {
0376     struct drm_i915_private *i915 = container_of(power_domains,
0377                              struct drm_i915_private,
0378                              power_domains);
0379     enum intel_display_power_domain domain;
0380 
0381     drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
0382     for_each_power_domain(domain, mask)
0383         drm_dbg(&i915->drm, "%s use_count %d\n",
0384             intel_display_power_domain_str(domain),
0385             power_domains->domain_use_count[domain]);
0386 }
0387 
0388 static void
0389 print_async_put_domains_state(struct i915_power_domains *power_domains)
0390 {
0391     struct drm_i915_private *i915 = container_of(power_domains,
0392                              struct drm_i915_private,
0393                              power_domains);
0394 
0395     drm_dbg(&i915->drm, "async_put_wakeref %u\n",
0396         power_domains->async_put_wakeref);
0397 
0398     print_power_domains(power_domains, "async_put_domains[0]",
0399                 &power_domains->async_put_domains[0]);
0400     print_power_domains(power_domains, "async_put_domains[1]",
0401                 &power_domains->async_put_domains[1]);
0402 }
0403 
0404 static void
0405 verify_async_put_domains_state(struct i915_power_domains *power_domains)
0406 {
0407     if (!__async_put_domains_state_ok(power_domains))
0408         print_async_put_domains_state(power_domains);
0409 }
0410 
0411 #else
0412 
0413 static void
0414 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
0415 {
0416 }
0417 
0418 static void
0419 verify_async_put_domains_state(struct i915_power_domains *power_domains)
0420 {
0421 }
0422 
0423 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
0424 
0425 static void async_put_domains_mask(struct i915_power_domains *power_domains,
0426                    struct intel_power_domain_mask *mask)
0427 
0428 {
0429     assert_async_put_domain_masks_disjoint(power_domains);
0430 
0431     __async_put_domains_mask(power_domains, mask);
0432 }
0433 
0434 static void
0435 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
0436                    enum intel_display_power_domain domain)
0437 {
0438     assert_async_put_domain_masks_disjoint(power_domains);
0439 
0440     clear_bit(domain, power_domains->async_put_domains[0].bits);
0441     clear_bit(domain, power_domains->async_put_domains[1].bits);
0442 }
0443 
0444 static bool
0445 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
0446                        enum intel_display_power_domain domain)
0447 {
0448     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0449     struct intel_power_domain_mask async_put_mask;
0450     bool ret = false;
0451 
0452     async_put_domains_mask(power_domains, &async_put_mask);
0453     if (!test_bit(domain, async_put_mask.bits))
0454         goto out_verify;
0455 
0456     async_put_domains_clear_domain(power_domains, domain);
0457 
0458     ret = true;
0459 
0460     async_put_domains_mask(power_domains, &async_put_mask);
0461     if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
0462         goto out_verify;
0463 
0464     cancel_delayed_work(&power_domains->async_put_work);
0465     intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
0466                  fetch_and_zero(&power_domains->async_put_wakeref));
0467 out_verify:
0468     verify_async_put_domains_state(power_domains);
0469 
0470     return ret;
0471 }
0472 
0473 static void
0474 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
0475                  enum intel_display_power_domain domain)
0476 {
0477     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0478     struct i915_power_well *power_well;
0479 
0480     if (intel_display_power_grab_async_put_ref(dev_priv, domain))
0481         return;
0482 
0483     for_each_power_domain_well(dev_priv, power_well, domain)
0484         intel_power_well_get(dev_priv, power_well);
0485 
0486     power_domains->domain_use_count[domain]++;
0487 }
0488 
0489 /**
0490  * intel_display_power_get - grab a power domain reference
0491  * @dev_priv: i915 device instance
0492  * @domain: power domain to reference
0493  *
0494  * This function grabs a power domain reference for @domain and ensures that the
0495  * power domain and all its parents are powered up. Therefore users should only
0496  * grab a reference to the innermost power domain they need.
0497  *
0498  * Any power domain reference obtained by this function must have a symmetric
0499  * call to intel_display_power_put() to release the reference again.
0500  */
0501 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
0502                     enum intel_display_power_domain domain)
0503 {
0504     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0505     intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
0506 
0507     mutex_lock(&power_domains->lock);
0508     __intel_display_power_get_domain(dev_priv, domain);
0509     mutex_unlock(&power_domains->lock);
0510 
0511     return wakeref;
0512 }
0513 
0514 /**
0515  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
0516  * @dev_priv: i915 device instance
0517  * @domain: power domain to reference
0518  *
0519  * This function grabs a power domain reference for @domain and ensures that the
0520  * power domain and all its parents are powered up. Therefore users should only
0521  * grab a reference to the innermost power domain they need.
0522  *
0523  * Any power domain reference obtained by this function must have a symmetric
0524  * call to intel_display_power_put() to release the reference again.
0525  */
0526 intel_wakeref_t
0527 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
0528                    enum intel_display_power_domain domain)
0529 {
0530     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0531     intel_wakeref_t wakeref;
0532     bool is_enabled;
0533 
0534     wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
0535     if (!wakeref)
0536         return false;
0537 
0538     mutex_lock(&power_domains->lock);
0539 
0540     if (__intel_display_power_is_enabled(dev_priv, domain)) {
0541         __intel_display_power_get_domain(dev_priv, domain);
0542         is_enabled = true;
0543     } else {
0544         is_enabled = false;
0545     }
0546 
0547     mutex_unlock(&power_domains->lock);
0548 
0549     if (!is_enabled) {
0550         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
0551         wakeref = 0;
0552     }
0553 
0554     return wakeref;
0555 }
0556 
0557 static void
0558 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
0559                  enum intel_display_power_domain domain)
0560 {
0561     struct i915_power_domains *power_domains;
0562     struct i915_power_well *power_well;
0563     const char *name = intel_display_power_domain_str(domain);
0564     struct intel_power_domain_mask async_put_mask;
0565 
0566     power_domains = &dev_priv->power_domains;
0567 
0568     drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
0569          "Use count on domain %s is already zero\n",
0570          name);
0571     async_put_domains_mask(power_domains, &async_put_mask);
0572     drm_WARN(&dev_priv->drm,
0573          test_bit(domain, async_put_mask.bits),
0574          "Async disabling of domain %s is pending\n",
0575          name);
0576 
0577     power_domains->domain_use_count[domain]--;
0578 
0579     for_each_power_domain_well_reverse(dev_priv, power_well, domain)
0580         intel_power_well_put(dev_priv, power_well);
0581 }
0582 
0583 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
0584                       enum intel_display_power_domain domain)
0585 {
0586     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0587 
0588     mutex_lock(&power_domains->lock);
0589     __intel_display_power_put_domain(dev_priv, domain);
0590     mutex_unlock(&power_domains->lock);
0591 }
0592 
0593 static void
0594 queue_async_put_domains_work(struct i915_power_domains *power_domains,
0595                  intel_wakeref_t wakeref)
0596 {
0597     struct drm_i915_private *i915 = container_of(power_domains,
0598                              struct drm_i915_private,
0599                              power_domains);
0600     drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
0601     power_domains->async_put_wakeref = wakeref;
0602     drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
0603                             &power_domains->async_put_work,
0604                             msecs_to_jiffies(100)));
0605 }
0606 
0607 static void
0608 release_async_put_domains(struct i915_power_domains *power_domains,
0609               struct intel_power_domain_mask *mask)
0610 {
0611     struct drm_i915_private *dev_priv =
0612         container_of(power_domains, struct drm_i915_private,
0613                  power_domains);
0614     struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
0615     enum intel_display_power_domain domain;
0616     intel_wakeref_t wakeref;
0617 
0618     /*
0619      * The caller must hold already raw wakeref, upgrade that to a proper
0620      * wakeref to make the state checker happy about the HW access during
0621      * power well disabling.
0622      */
0623     assert_rpm_raw_wakeref_held(rpm);
0624     wakeref = intel_runtime_pm_get(rpm);
0625 
0626     for_each_power_domain(domain, mask) {
0627         /* Clear before put, so put's sanity check is happy. */
0628         async_put_domains_clear_domain(power_domains, domain);
0629         __intel_display_power_put_domain(dev_priv, domain);
0630     }
0631 
0632     intel_runtime_pm_put(rpm, wakeref);
0633 }
0634 
0635 static void
0636 intel_display_power_put_async_work(struct work_struct *work)
0637 {
0638     struct drm_i915_private *dev_priv =
0639         container_of(work, struct drm_i915_private,
0640                  power_domains.async_put_work.work);
0641     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0642     struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
0643     intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
0644     intel_wakeref_t old_work_wakeref = 0;
0645 
0646     mutex_lock(&power_domains->lock);
0647 
0648     /*
0649      * Bail out if all the domain refs pending to be released were grabbed
0650      * by subsequent gets or a flush_work.
0651      */
0652     old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
0653     if (!old_work_wakeref)
0654         goto out_verify;
0655 
0656     release_async_put_domains(power_domains,
0657                   &power_domains->async_put_domains[0]);
0658 
0659     /* Requeue the work if more domains were async put meanwhile. */
0660     if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
0661         bitmap_copy(power_domains->async_put_domains[0].bits,
0662                 power_domains->async_put_domains[1].bits,
0663                 POWER_DOMAIN_NUM);
0664         bitmap_zero(power_domains->async_put_domains[1].bits,
0665                 POWER_DOMAIN_NUM);
0666         queue_async_put_domains_work(power_domains,
0667                          fetch_and_zero(&new_work_wakeref));
0668     } else {
0669         /*
0670          * Cancel the work that got queued after this one got dequeued,
0671          * since here we released the corresponding async-put reference.
0672          */
0673         cancel_delayed_work(&power_domains->async_put_work);
0674     }
0675 
0676 out_verify:
0677     verify_async_put_domains_state(power_domains);
0678 
0679     mutex_unlock(&power_domains->lock);
0680 
0681     if (old_work_wakeref)
0682         intel_runtime_pm_put_raw(rpm, old_work_wakeref);
0683     if (new_work_wakeref)
0684         intel_runtime_pm_put_raw(rpm, new_work_wakeref);
0685 }
0686 
0687 /**
0688  * intel_display_power_put_async - release a power domain reference asynchronously
0689  * @i915: i915 device instance
0690  * @domain: power domain to reference
0691  * @wakeref: wakeref acquired for the reference that is being released
0692  *
0693  * This function drops the power domain reference obtained by
0694  * intel_display_power_get*() and schedules a work to power down the
0695  * corresponding hardware block if this is the last reference.
0696  */
0697 void __intel_display_power_put_async(struct drm_i915_private *i915,
0698                      enum intel_display_power_domain domain,
0699                      intel_wakeref_t wakeref)
0700 {
0701     struct i915_power_domains *power_domains = &i915->power_domains;
0702     struct intel_runtime_pm *rpm = &i915->runtime_pm;
0703     intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
0704 
0705     mutex_lock(&power_domains->lock);
0706 
0707     if (power_domains->domain_use_count[domain] > 1) {
0708         __intel_display_power_put_domain(i915, domain);
0709 
0710         goto out_verify;
0711     }
0712 
0713     drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
0714 
0715     /* Let a pending work requeue itself or queue a new one. */
0716     if (power_domains->async_put_wakeref) {
0717         set_bit(domain, power_domains->async_put_domains[1].bits);
0718     } else {
0719         set_bit(domain, power_domains->async_put_domains[0].bits);
0720         queue_async_put_domains_work(power_domains,
0721                          fetch_and_zero(&work_wakeref));
0722     }
0723 
0724 out_verify:
0725     verify_async_put_domains_state(power_domains);
0726 
0727     mutex_unlock(&power_domains->lock);
0728 
0729     if (work_wakeref)
0730         intel_runtime_pm_put_raw(rpm, work_wakeref);
0731 
0732     intel_runtime_pm_put(rpm, wakeref);
0733 }
0734 
0735 /**
0736  * intel_display_power_flush_work - flushes the async display power disabling work
0737  * @i915: i915 device instance
0738  *
0739  * Flushes any pending work that was scheduled by a preceding
0740  * intel_display_power_put_async() call, completing the disabling of the
0741  * corresponding power domains.
0742  *
0743  * Note that the work handler function may still be running after this
0744  * function returns; to ensure that the work handler isn't running use
0745  * intel_display_power_flush_work_sync() instead.
0746  */
0747 void intel_display_power_flush_work(struct drm_i915_private *i915)
0748 {
0749     struct i915_power_domains *power_domains = &i915->power_domains;
0750     struct intel_power_domain_mask async_put_mask;
0751     intel_wakeref_t work_wakeref;
0752 
0753     mutex_lock(&power_domains->lock);
0754 
0755     work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
0756     if (!work_wakeref)
0757         goto out_verify;
0758 
0759     async_put_domains_mask(power_domains, &async_put_mask);
0760     release_async_put_domains(power_domains, &async_put_mask);
0761     cancel_delayed_work(&power_domains->async_put_work);
0762 
0763 out_verify:
0764     verify_async_put_domains_state(power_domains);
0765 
0766     mutex_unlock(&power_domains->lock);
0767 
0768     if (work_wakeref)
0769         intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
0770 }
0771 
0772 /**
0773  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
0774  * @i915: i915 device instance
0775  *
0776  * Like intel_display_power_flush_work(), but also ensure that the work
0777  * handler function is not running any more when this function returns.
0778  */
0779 static void
0780 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
0781 {
0782     struct i915_power_domains *power_domains = &i915->power_domains;
0783 
0784     intel_display_power_flush_work(i915);
0785     cancel_delayed_work_sync(&power_domains->async_put_work);
0786 
0787     verify_async_put_domains_state(power_domains);
0788 
0789     drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
0790 }
0791 
0792 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0793 /**
0794  * intel_display_power_put - release a power domain reference
0795  * @dev_priv: i915 device instance
0796  * @domain: power domain to reference
0797  * @wakeref: wakeref acquired for the reference that is being released
0798  *
0799  * This function drops the power domain reference obtained by
0800  * intel_display_power_get() and might power down the corresponding hardware
0801  * block right away if this is the last reference.
0802  */
0803 void intel_display_power_put(struct drm_i915_private *dev_priv,
0804                  enum intel_display_power_domain domain,
0805                  intel_wakeref_t wakeref)
0806 {
0807     __intel_display_power_put(dev_priv, domain);
0808     intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
0809 }
0810 #else
0811 /**
0812  * intel_display_power_put_unchecked - release an unchecked power domain reference
0813  * @dev_priv: i915 device instance
0814  * @domain: power domain to reference
0815  *
0816  * This function drops the power domain reference obtained by
0817  * intel_display_power_get() and might power down the corresponding hardware
0818  * block right away if this is the last reference.
0819  *
0820  * This function is only for the power domain code's internal use to suppress wakeref
0821  * tracking when the correspondig debug kconfig option is disabled, should not
0822  * be used otherwise.
0823  */
0824 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
0825                        enum intel_display_power_domain domain)
0826 {
0827     __intel_display_power_put(dev_priv, domain);
0828     intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
0829 }
0830 #endif
0831 
0832 void
0833 intel_display_power_get_in_set(struct drm_i915_private *i915,
0834                    struct intel_display_power_domain_set *power_domain_set,
0835                    enum intel_display_power_domain domain)
0836 {
0837     intel_wakeref_t __maybe_unused wf;
0838 
0839     drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
0840 
0841     wf = intel_display_power_get(i915, domain);
0842 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0843     power_domain_set->wakerefs[domain] = wf;
0844 #endif
0845     set_bit(domain, power_domain_set->mask.bits);
0846 }
0847 
0848 bool
0849 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
0850                       struct intel_display_power_domain_set *power_domain_set,
0851                       enum intel_display_power_domain domain)
0852 {
0853     intel_wakeref_t wf;
0854 
0855     drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
0856 
0857     wf = intel_display_power_get_if_enabled(i915, domain);
0858     if (!wf)
0859         return false;
0860 
0861 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0862     power_domain_set->wakerefs[domain] = wf;
0863 #endif
0864     set_bit(domain, power_domain_set->mask.bits);
0865 
0866     return true;
0867 }
0868 
0869 void
0870 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
0871                     struct intel_display_power_domain_set *power_domain_set,
0872                     struct intel_power_domain_mask *mask)
0873 {
0874     enum intel_display_power_domain domain;
0875 
0876     drm_WARN_ON(&i915->drm,
0877             !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
0878 
0879     for_each_power_domain(domain, mask) {
0880         intel_wakeref_t __maybe_unused wf = -1;
0881 
0882 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0883         wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
0884 #endif
0885         intel_display_power_put(i915, domain, wf);
0886         clear_bit(domain, power_domain_set->mask.bits);
0887     }
0888 }
0889 
0890 static int
0891 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
0892                    int disable_power_well)
0893 {
0894     if (disable_power_well >= 0)
0895         return !!disable_power_well;
0896 
0897     return 1;
0898 }
0899 
0900 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
0901                    int enable_dc)
0902 {
0903     u32 mask;
0904     int requested_dc;
0905     int max_dc;
0906 
0907     if (!HAS_DISPLAY(dev_priv))
0908         return 0;
0909 
0910     if (IS_DG2(dev_priv))
0911         max_dc = 0;
0912     else if (IS_DG1(dev_priv))
0913         max_dc = 3;
0914     else if (DISPLAY_VER(dev_priv) >= 12)
0915         max_dc = 4;
0916     else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
0917         max_dc = 1;
0918     else if (DISPLAY_VER(dev_priv) >= 9)
0919         max_dc = 2;
0920     else
0921         max_dc = 0;
0922 
0923     /*
0924      * DC9 has a separate HW flow from the rest of the DC states,
0925      * not depending on the DMC firmware. It's needed by system
0926      * suspend/resume, so allow it unconditionally.
0927      */
0928     mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
0929         DISPLAY_VER(dev_priv) >= 11 ?
0930            DC_STATE_EN_DC9 : 0;
0931 
0932     if (!dev_priv->params.disable_power_well)
0933         max_dc = 0;
0934 
0935     if (enable_dc >= 0 && enable_dc <= max_dc) {
0936         requested_dc = enable_dc;
0937     } else if (enable_dc == -1) {
0938         requested_dc = max_dc;
0939     } else if (enable_dc > max_dc && enable_dc <= 4) {
0940         drm_dbg_kms(&dev_priv->drm,
0941                 "Adjusting requested max DC state (%d->%d)\n",
0942                 enable_dc, max_dc);
0943         requested_dc = max_dc;
0944     } else {
0945         drm_err(&dev_priv->drm,
0946             "Unexpected value for enable_dc (%d)\n", enable_dc);
0947         requested_dc = max_dc;
0948     }
0949 
0950     switch (requested_dc) {
0951     case 4:
0952         mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
0953         break;
0954     case 3:
0955         mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
0956         break;
0957     case 2:
0958         mask |= DC_STATE_EN_UPTO_DC6;
0959         break;
0960     case 1:
0961         mask |= DC_STATE_EN_UPTO_DC5;
0962         break;
0963     }
0964 
0965     drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
0966 
0967     return mask;
0968 }
0969 
0970 /**
0971  * intel_power_domains_init - initializes the power domain structures
0972  * @dev_priv: i915 device instance
0973  *
0974  * Initializes the power domain structures for @dev_priv depending upon the
0975  * supported platform.
0976  */
0977 int intel_power_domains_init(struct drm_i915_private *dev_priv)
0978 {
0979     struct i915_power_domains *power_domains = &dev_priv->power_domains;
0980 
0981     dev_priv->params.disable_power_well =
0982         sanitize_disable_power_well_option(dev_priv,
0983                            dev_priv->params.disable_power_well);
0984     dev_priv->dmc.allowed_dc_mask =
0985         get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
0986 
0987     dev_priv->dmc.target_dc_state =
0988         sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
0989 
0990     mutex_init(&power_domains->lock);
0991 
0992     INIT_DELAYED_WORK(&power_domains->async_put_work,
0993               intel_display_power_put_async_work);
0994 
0995     return intel_display_power_map_init(power_domains);
0996 }
0997 
0998 /**
0999  * intel_power_domains_cleanup - clean up power domains resources
1000  * @dev_priv: i915 device instance
1001  *
1002  * Release any resources acquired by intel_power_domains_init()
1003  */
1004 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
1005 {
1006     intel_display_power_map_cleanup(&dev_priv->power_domains);
1007 }
1008 
1009 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1010 {
1011     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1012     struct i915_power_well *power_well;
1013 
1014     mutex_lock(&power_domains->lock);
1015     for_each_power_well(dev_priv, power_well)
1016         intel_power_well_sync_hw(dev_priv, power_well);
1017     mutex_unlock(&power_domains->lock);
1018 }
1019 
1020 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
1021                 enum dbuf_slice slice, bool enable)
1022 {
1023     i915_reg_t reg = DBUF_CTL_S(slice);
1024     bool state;
1025 
1026     intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
1027              enable ? DBUF_POWER_REQUEST : 0);
1028     intel_de_posting_read(dev_priv, reg);
1029     udelay(10);
1030 
1031     state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
1032     drm_WARN(&dev_priv->drm, enable != state,
1033          "DBuf slice %d power %s timeout!\n",
1034          slice, str_enable_disable(enable));
1035 }
1036 
1037 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1038                  u8 req_slices)
1039 {
1040     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1041     u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
1042     enum dbuf_slice slice;
1043 
1044     drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
1045          "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1046          req_slices, slice_mask);
1047 
1048     drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
1049             req_slices);
1050 
1051     /*
1052      * Might be running this in parallel to gen9_dc_off_power_well_enable
1053      * being called from intel_dp_detect for instance,
1054      * which causes assertion triggered by race condition,
1055      * as gen9_assert_dbuf_enabled might preempt this when registers
1056      * were already updated, while dev_priv was not.
1057      */
1058     mutex_lock(&power_domains->lock);
1059 
1060     for_each_dbuf_slice(dev_priv, slice)
1061         gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
1062 
1063     dev_priv->dbuf.enabled_slices = req_slices;
1064 
1065     mutex_unlock(&power_domains->lock);
1066 }
1067 
1068 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
1069 {
1070     dev_priv->dbuf.enabled_slices =
1071         intel_enabled_dbuf_slices_mask(dev_priv);
1072 
1073     /*
1074      * Just power up at least 1 slice, we will
1075      * figure out later which slices we have and what we need.
1076      */
1077     gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
1078                 dev_priv->dbuf.enabled_slices);
1079 }
1080 
1081 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
1082 {
1083     gen9_dbuf_slices_update(dev_priv, 0);
1084 }
1085 
1086 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
1087 {
1088     enum dbuf_slice slice;
1089 
1090     if (IS_ALDERLAKE_P(dev_priv))
1091         return;
1092 
1093     for_each_dbuf_slice(dev_priv, slice)
1094         intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
1095                  DBUF_TRACKER_STATE_SERVICE_MASK,
1096                  DBUF_TRACKER_STATE_SERVICE(8));
1097 }
1098 
1099 static void icl_mbus_init(struct drm_i915_private *dev_priv)
1100 {
1101     unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
1102     u32 mask, val, i;
1103 
1104     if (IS_ALDERLAKE_P(dev_priv))
1105         return;
1106 
1107     mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1108         MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1109         MBUS_ABOX_B_CREDIT_MASK |
1110         MBUS_ABOX_BW_CREDIT_MASK;
1111     val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1112         MBUS_ABOX_BT_CREDIT_POOL2(16) |
1113         MBUS_ABOX_B_CREDIT(1) |
1114         MBUS_ABOX_BW_CREDIT(1);
1115 
1116     /*
1117      * gen12 platforms that use abox1 and abox2 for pixel data reads still
1118      * expect us to program the abox_ctl0 register as well, even though
1119      * we don't have to program other instance-0 registers like BW_BUDDY.
1120      */
1121     if (DISPLAY_VER(dev_priv) == 12)
1122         abox_regs |= BIT(0);
1123 
1124     for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1125         intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
1126 }
1127 
1128 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
1129 {
1130     u32 val = intel_de_read(dev_priv, LCPLL_CTL);
1131 
1132     /*
1133      * The LCPLL register should be turned on by the BIOS. For now
1134      * let's just check its state and print errors in case
1135      * something is wrong.  Don't even try to turn it on.
1136      */
1137 
1138     if (val & LCPLL_CD_SOURCE_FCLK)
1139         drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
1140 
1141     if (val & LCPLL_PLL_DISABLE)
1142         drm_err(&dev_priv->drm, "LCPLL is disabled\n");
1143 
1144     if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1145         drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
1146 }
1147 
1148 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
1149 {
1150     struct drm_device *dev = &dev_priv->drm;
1151     struct intel_crtc *crtc;
1152 
1153     for_each_intel_crtc(dev, crtc)
1154         I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
1155                 pipe_name(crtc->pipe));
1156 
1157     I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
1158             "Display power well on\n");
1159     I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
1160             "SPLL enabled\n");
1161     I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1162             "WRPLL1 enabled\n");
1163     I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1164             "WRPLL2 enabled\n");
1165     I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
1166             "Panel power on\n");
1167     I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1168             "CPU PWM1 enabled\n");
1169     if (IS_HASWELL(dev_priv))
1170         I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1171                 "CPU PWM2 enabled\n");
1172     I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1173             "PCH PWM1 enabled\n");
1174     I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1175             "Utility pin enabled\n");
1176     I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1177             "PCH GTC enabled\n");
1178 
1179     /*
1180      * In theory we can still leave IRQs enabled, as long as only the HPD
1181      * interrupts remain enabled. We used to check for that, but since it's
1182      * gen-specific and since we only disable LCPLL after we fully disable
1183      * the interrupts, the check below should be enough.
1184      */
1185     I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
1186 }
1187 
1188 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
1189 {
1190     if (IS_HASWELL(dev_priv))
1191         return intel_de_read(dev_priv, D_COMP_HSW);
1192     else
1193         return intel_de_read(dev_priv, D_COMP_BDW);
1194 }
1195 
1196 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
1197 {
1198     if (IS_HASWELL(dev_priv)) {
1199         if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1200             drm_dbg_kms(&dev_priv->drm,
1201                     "Failed to write to D_COMP\n");
1202     } else {
1203         intel_de_write(dev_priv, D_COMP_BDW, val);
1204         intel_de_posting_read(dev_priv, D_COMP_BDW);
1205     }
1206 }
1207 
1208 /*
1209  * This function implements pieces of two sequences from BSpec:
1210  * - Sequence for display software to disable LCPLL
1211  * - Sequence for display software to allow package C8+
1212  * The steps implemented here are just the steps that actually touch the LCPLL
1213  * register. Callers should take care of disabling all the display engine
1214  * functions, doing the mode unset, fixing interrupts, etc.
1215  */
1216 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
1217                   bool switch_to_fclk, bool allow_power_down)
1218 {
1219     u32 val;
1220 
1221     assert_can_disable_lcpll(dev_priv);
1222 
1223     val = intel_de_read(dev_priv, LCPLL_CTL);
1224 
1225     if (switch_to_fclk) {
1226         val |= LCPLL_CD_SOURCE_FCLK;
1227         intel_de_write(dev_priv, LCPLL_CTL, val);
1228 
1229         if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
1230                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
1231             drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
1232 
1233         val = intel_de_read(dev_priv, LCPLL_CTL);
1234     }
1235 
1236     val |= LCPLL_PLL_DISABLE;
1237     intel_de_write(dev_priv, LCPLL_CTL, val);
1238     intel_de_posting_read(dev_priv, LCPLL_CTL);
1239 
1240     if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1241         drm_err(&dev_priv->drm, "LCPLL still locked\n");
1242 
1243     val = hsw_read_dcomp(dev_priv);
1244     val |= D_COMP_COMP_DISABLE;
1245     hsw_write_dcomp(dev_priv, val);
1246     ndelay(100);
1247 
1248     if (wait_for((hsw_read_dcomp(dev_priv) &
1249               D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1250         drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
1251 
1252     if (allow_power_down) {
1253         val = intel_de_read(dev_priv, LCPLL_CTL);
1254         val |= LCPLL_POWER_DOWN_ALLOW;
1255         intel_de_write(dev_priv, LCPLL_CTL, val);
1256         intel_de_posting_read(dev_priv, LCPLL_CTL);
1257     }
1258 }
1259 
1260 /*
1261  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1262  * source.
1263  */
1264 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
1265 {
1266     u32 val;
1267 
1268     val = intel_de_read(dev_priv, LCPLL_CTL);
1269 
1270     if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1271             LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1272         return;
1273 
1274     /*
1275      * Make sure we're not on PC8 state before disabling PC8, otherwise
1276      * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1277      */
1278     intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1279 
1280     if (val & LCPLL_POWER_DOWN_ALLOW) {
1281         val &= ~LCPLL_POWER_DOWN_ALLOW;
1282         intel_de_write(dev_priv, LCPLL_CTL, val);
1283         intel_de_posting_read(dev_priv, LCPLL_CTL);
1284     }
1285 
1286     val = hsw_read_dcomp(dev_priv);
1287     val |= D_COMP_COMP_FORCE;
1288     val &= ~D_COMP_COMP_DISABLE;
1289     hsw_write_dcomp(dev_priv, val);
1290 
1291     val = intel_de_read(dev_priv, LCPLL_CTL);
1292     val &= ~LCPLL_PLL_DISABLE;
1293     intel_de_write(dev_priv, LCPLL_CTL, val);
1294 
1295     if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1296         drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
1297 
1298     if (val & LCPLL_CD_SOURCE_FCLK) {
1299         val = intel_de_read(dev_priv, LCPLL_CTL);
1300         val &= ~LCPLL_CD_SOURCE_FCLK;
1301         intel_de_write(dev_priv, LCPLL_CTL, val);
1302 
1303         if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
1304                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1305             drm_err(&dev_priv->drm,
1306                 "Switching back to LCPLL failed\n");
1307     }
1308 
1309     intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1310 
1311     intel_update_cdclk(dev_priv);
1312     intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
1313 }
1314 
1315 /*
1316  * Package states C8 and deeper are really deep PC states that can only be
1317  * reached when all the devices on the system allow it, so even if the graphics
1318  * device allows PC8+, it doesn't mean the system will actually get to these
1319  * states. Our driver only allows PC8+ when going into runtime PM.
1320  *
1321  * The requirements for PC8+ are that all the outputs are disabled, the power
1322  * well is disabled and most interrupts are disabled, and these are also
1323  * requirements for runtime PM. When these conditions are met, we manually do
1324  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1325  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1326  * hang the machine.
1327  *
1328  * When we really reach PC8 or deeper states (not just when we allow it) we lose
1329  * the state of some registers, so when we come back from PC8+ we need to
1330  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1331  * need to take care of the registers kept by RC6. Notice that this happens even
1332  * if we don't put the device in PCI D3 state (which is what currently happens
1333  * because of the runtime PM support).
1334  *
1335  * For more, read "Display Sequences for Package C8" on the hardware
1336  * documentation.
1337  */
1338 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
1339 {
1340     u32 val;
1341 
1342     drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
1343 
1344     if (HAS_PCH_LPT_LP(dev_priv)) {
1345         val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1346         val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
1347         intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1348     }
1349 
1350     lpt_disable_clkout_dp(dev_priv);
1351     hsw_disable_lcpll(dev_priv, true, true);
1352 }
1353 
1354 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
1355 {
1356     u32 val;
1357 
1358     drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
1359 
1360     hsw_restore_lcpll(dev_priv);
1361     intel_init_pch_refclk(dev_priv);
1362 
1363     if (HAS_PCH_LPT_LP(dev_priv)) {
1364         val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1365         val |= PCH_LP_PARTITION_LEVEL_DISABLE;
1366         intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1367     }
1368 }
1369 
1370 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
1371                       bool enable)
1372 {
1373     i915_reg_t reg;
1374     u32 reset_bits, val;
1375 
1376     if (IS_IVYBRIDGE(dev_priv)) {
1377         reg = GEN7_MSG_CTL;
1378         reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1379     } else {
1380         reg = HSW_NDE_RSTWRN_OPT;
1381         reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1382     }
1383 
1384     val = intel_de_read(dev_priv, reg);
1385 
1386     if (enable)
1387         val |= reset_bits;
1388     else
1389         val &= ~reset_bits;
1390 
1391     intel_de_write(dev_priv, reg, val);
1392 }
1393 
1394 static void skl_display_core_init(struct drm_i915_private *dev_priv,
1395                   bool resume)
1396 {
1397     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1398     struct i915_power_well *well;
1399 
1400     gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1401 
1402     /* enable PCH reset handshake */
1403     intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1404 
1405     if (!HAS_DISPLAY(dev_priv))
1406         return;
1407 
1408     /* enable PG1 and Misc I/O */
1409     mutex_lock(&power_domains->lock);
1410 
1411     well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1412     intel_power_well_enable(dev_priv, well);
1413 
1414     well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1415     intel_power_well_enable(dev_priv, well);
1416 
1417     mutex_unlock(&power_domains->lock);
1418 
1419     intel_cdclk_init_hw(dev_priv);
1420 
1421     gen9_dbuf_enable(dev_priv);
1422 
1423     if (resume)
1424         intel_dmc_load_program(dev_priv);
1425 }
1426 
1427 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1428 {
1429     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1430     struct i915_power_well *well;
1431 
1432     if (!HAS_DISPLAY(dev_priv))
1433         return;
1434 
1435     gen9_disable_dc_states(dev_priv);
1436 
1437     gen9_dbuf_disable(dev_priv);
1438 
1439     intel_cdclk_uninit_hw(dev_priv);
1440 
1441     /* The spec doesn't call for removing the reset handshake flag */
1442     /* disable PG1 and Misc I/O */
1443 
1444     mutex_lock(&power_domains->lock);
1445 
1446     /*
1447      * BSpec says to keep the MISC IO power well enabled here, only
1448      * remove our request for power well 1.
1449      * Note that even though the driver's request is removed power well 1
1450      * may stay enabled after this due to DMC's own request on it.
1451      */
1452     well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1453     intel_power_well_disable(dev_priv, well);
1454 
1455     mutex_unlock(&power_domains->lock);
1456 
1457     usleep_range(10, 30);       /* 10 us delay per Bspec */
1458 }
1459 
1460 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
1461 {
1462     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1463     struct i915_power_well *well;
1464 
1465     gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1466 
1467     /*
1468      * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1469      * or else the reset will hang because there is no PCH to respond.
1470      * Move the handshake programming to initialization sequence.
1471      * Previously was left up to BIOS.
1472      */
1473     intel_pch_reset_handshake(dev_priv, false);
1474 
1475     if (!HAS_DISPLAY(dev_priv))
1476         return;
1477 
1478     /* Enable PG1 */
1479     mutex_lock(&power_domains->lock);
1480 
1481     well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1482     intel_power_well_enable(dev_priv, well);
1483 
1484     mutex_unlock(&power_domains->lock);
1485 
1486     intel_cdclk_init_hw(dev_priv);
1487 
1488     gen9_dbuf_enable(dev_priv);
1489 
1490     if (resume)
1491         intel_dmc_load_program(dev_priv);
1492 }
1493 
1494 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
1495 {
1496     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1497     struct i915_power_well *well;
1498 
1499     if (!HAS_DISPLAY(dev_priv))
1500         return;
1501 
1502     gen9_disable_dc_states(dev_priv);
1503 
1504     gen9_dbuf_disable(dev_priv);
1505 
1506     intel_cdclk_uninit_hw(dev_priv);
1507 
1508     /* The spec doesn't call for removing the reset handshake flag */
1509 
1510     /*
1511      * Disable PW1 (PG1).
1512      * Note that even though the driver's request is removed power well 1
1513      * may stay enabled after this due to DMC's own request on it.
1514      */
1515     mutex_lock(&power_domains->lock);
1516 
1517     well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1518     intel_power_well_disable(dev_priv, well);
1519 
1520     mutex_unlock(&power_domains->lock);
1521 
1522     usleep_range(10, 30);       /* 10 us delay per Bspec */
1523 }
1524 
1525 struct buddy_page_mask {
1526     u32 page_mask;
1527     u8 type;
1528     u8 num_channels;
1529 };
1530 
1531 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1532     { .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
1533     { .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0xF },
1534     { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1535     { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1536     { .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
1537     { .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
1538     { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1539     { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1540     {}
1541 };
1542 
1543 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1544     { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1545     { .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
1546     { .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
1547     { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1548     { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1549     { .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
1550     { .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
1551     { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1552     {}
1553 };
1554 
1555 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
1556 {
1557     enum intel_dram_type type = dev_priv->dram_info.type;
1558     u8 num_channels = dev_priv->dram_info.num_channels;
1559     const struct buddy_page_mask *table;
1560     unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
1561     int config, i;
1562 
1563     /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1564     if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
1565         return;
1566 
1567     if (IS_ALDERLAKE_S(dev_priv) ||
1568         IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1569         IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1570         IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
1571         /* Wa_1409767108:tgl,dg1,adl-s */
1572         table = wa_1409767108_buddy_page_masks;
1573     else
1574         table = tgl_buddy_page_masks;
1575 
1576     for (config = 0; table[config].page_mask != 0; config++)
1577         if (table[config].num_channels == num_channels &&
1578             table[config].type == type)
1579             break;
1580 
1581     if (table[config].page_mask == 0) {
1582         drm_dbg(&dev_priv->drm,
1583             "Unknown memory configuration; disabling address buddy logic.\n");
1584         for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1585             intel_de_write(dev_priv, BW_BUDDY_CTL(i),
1586                        BW_BUDDY_DISABLE);
1587     } else {
1588         for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1589             intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
1590                        table[config].page_mask);
1591 
1592             /* Wa_22010178259:tgl,dg1,rkl,adl-s */
1593             if (DISPLAY_VER(dev_priv) == 12)
1594                 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
1595                          BW_BUDDY_TLB_REQ_TIMER_MASK,
1596                          BW_BUDDY_TLB_REQ_TIMER(0x8));
1597         }
1598     }
1599 }
1600 
1601 static void icl_display_core_init(struct drm_i915_private *dev_priv,
1602                   bool resume)
1603 {
1604     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1605     struct i915_power_well *well;
1606     u32 val;
1607 
1608     gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1609 
1610     /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1611     if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1612         INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1613         intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
1614                  PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1615 
1616     /* 1. Enable PCH reset handshake. */
1617     intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1618 
1619     if (!HAS_DISPLAY(dev_priv))
1620         return;
1621 
1622     /* 2. Initialize all combo phys */
1623     intel_combo_phy_init(dev_priv);
1624 
1625     /*
1626      * 3. Enable Power Well 1 (PG1).
1627      *    The AUX IO power wells will be enabled on demand.
1628      */
1629     mutex_lock(&power_domains->lock);
1630     well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1631     intel_power_well_enable(dev_priv, well);
1632     mutex_unlock(&power_domains->lock);
1633 
1634     /* 4. Enable CDCLK. */
1635     intel_cdclk_init_hw(dev_priv);
1636 
1637     if (DISPLAY_VER(dev_priv) >= 12)
1638         gen12_dbuf_slices_config(dev_priv);
1639 
1640     /* 5. Enable DBUF. */
1641     gen9_dbuf_enable(dev_priv);
1642 
1643     /* 6. Setup MBUS. */
1644     icl_mbus_init(dev_priv);
1645 
1646     /* 7. Program arbiter BW_BUDDY registers */
1647     if (DISPLAY_VER(dev_priv) >= 12)
1648         tgl_bw_buddy_init(dev_priv);
1649 
1650     /* 8. Ensure PHYs have completed calibration and adaptation */
1651     if (IS_DG2(dev_priv))
1652         intel_snps_phy_wait_for_calibration(dev_priv);
1653 
1654     if (resume)
1655         intel_dmc_load_program(dev_priv);
1656 
1657     /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
1658     if (DISPLAY_VER(dev_priv) >= 12) {
1659         val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1660               DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
1661         intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
1662     }
1663 
1664     /* Wa_14011503030:xelpd */
1665     if (DISPLAY_VER(dev_priv) >= 13)
1666         intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1667 }
1668 
1669 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
1670 {
1671     struct i915_power_domains *power_domains = &dev_priv->power_domains;
1672     struct i915_power_well *well;
1673 
1674     if (!HAS_DISPLAY(dev_priv))
1675         return;
1676 
1677     gen9_disable_dc_states(dev_priv);
1678 
1679     /* 1. Disable all display engine functions -> aready done */
1680 
1681     /* 2. Disable DBUF */
1682     gen9_dbuf_disable(dev_priv);
1683 
1684     /* 3. Disable CD clock */
1685     intel_cdclk_uninit_hw(dev_priv);
1686 
1687     /*
1688      * 4. Disable Power Well 1 (PG1).
1689      *    The AUX IO power wells are toggled on demand, so they are already
1690      *    disabled at this point.
1691      */
1692     mutex_lock(&power_domains->lock);
1693     well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1694     intel_power_well_disable(dev_priv, well);
1695     mutex_unlock(&power_domains->lock);
1696 
1697     /* 5. */
1698     intel_combo_phy_uninit(dev_priv);
1699 }
1700 
1701 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1702 {
1703     struct i915_power_well *cmn_bc =
1704         lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1705     struct i915_power_well *cmn_d =
1706         lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1707 
1708     /*
1709      * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1710      * workaround never ever read DISPLAY_PHY_CONTROL, and
1711      * instead maintain a shadow copy ourselves. Use the actual
1712      * power well state and lane status to reconstruct the
1713      * expected initial value.
1714      */
1715     dev_priv->chv_phy_control =
1716         PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1717         PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1718         PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1719         PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1720         PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1721 
1722     /*
1723      * If all lanes are disabled we leave the override disabled
1724      * with all power down bits cleared to match the state we
1725      * would use after disabling the port. Otherwise enable the
1726      * override and set the lane powerdown bits accding to the
1727      * current lane status.
1728      */
1729     if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
1730         u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
1731         unsigned int mask;
1732 
1733         mask = status & DPLL_PORTB_READY_MASK;
1734         if (mask == 0xf)
1735             mask = 0x0;
1736         else
1737             dev_priv->chv_phy_control |=
1738                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1739 
1740         dev_priv->chv_phy_control |=
1741             PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1742 
1743         mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1744         if (mask == 0xf)
1745             mask = 0x0;
1746         else
1747             dev_priv->chv_phy_control |=
1748                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1749 
1750         dev_priv->chv_phy_control |=
1751             PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1752 
1753         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1754 
1755         dev_priv->chv_phy_assert[DPIO_PHY0] = false;
1756     } else {
1757         dev_priv->chv_phy_assert[DPIO_PHY0] = true;
1758     }
1759 
1760     if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1761         u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
1762         unsigned int mask;
1763 
1764         mask = status & DPLL_PORTD_READY_MASK;
1765 
1766         if (mask == 0xf)
1767             mask = 0x0;
1768         else
1769             dev_priv->chv_phy_control |=
1770                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1771 
1772         dev_priv->chv_phy_control |=
1773             PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1774 
1775         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1776 
1777         dev_priv->chv_phy_assert[DPIO_PHY1] = false;
1778     } else {
1779         dev_priv->chv_phy_assert[DPIO_PHY1] = true;
1780     }
1781 
1782     drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
1783             dev_priv->chv_phy_control);
1784 
1785     /* Defer application of initial phy_control to enabling the powerwell */
1786 }
1787 
1788 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1789 {
1790     struct i915_power_well *cmn =
1791         lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1792     struct i915_power_well *disp2d =
1793         lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
1794 
1795     /* If the display might be already active skip this */
1796     if (intel_power_well_is_enabled(dev_priv, cmn) &&
1797         intel_power_well_is_enabled(dev_priv, disp2d) &&
1798         intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
1799         return;
1800 
1801     drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
1802 
1803     /* cmnlane needs DPLL registers */
1804     intel_power_well_enable(dev_priv, disp2d);
1805 
1806     /*
1807      * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1808      * Need to assert and de-assert PHY SB reset by gating the
1809      * common lane power, then un-gating it.
1810      * Simply ungating isn't enough to reset the PHY enough to get
1811      * ports and lanes running.
1812      */
1813     intel_power_well_disable(dev_priv, cmn);
1814 }
1815 
1816 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
1817 {
1818     bool ret;
1819 
1820     vlv_punit_get(dev_priv);
1821     ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1822     vlv_punit_put(dev_priv);
1823 
1824     return ret;
1825 }
1826 
1827 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
1828 {
1829     drm_WARN(&dev_priv->drm,
1830          !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
1831          "VED not power gated\n");
1832 }
1833 
1834 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
1835 {
1836     static const struct pci_device_id isp_ids[] = {
1837         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1838         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1839         {}
1840     };
1841 
1842     drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
1843          !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
1844          "ISP not power gated\n");
1845 }
1846 
1847 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
1848 
1849 /**
1850  * intel_power_domains_init_hw - initialize hardware power domain state
1851  * @i915: i915 device instance
1852  * @resume: Called from resume code paths or not
1853  *
1854  * This function initializes the hardware power domain state and enables all
1855  * power wells belonging to the INIT power domain. Power wells in other
1856  * domains (and not in the INIT domain) are referenced or disabled by
1857  * intel_modeset_readout_hw_state(). After that the reference count of each
1858  * power well must match its HW enabled state, see
1859  * intel_power_domains_verify_state().
1860  *
1861  * It will return with power domains disabled (to be enabled later by
1862  * intel_power_domains_enable()) and must be paired with
1863  * intel_power_domains_driver_remove().
1864  */
1865 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
1866 {
1867     struct i915_power_domains *power_domains = &i915->power_domains;
1868 
1869     power_domains->initializing = true;
1870 
1871     if (DISPLAY_VER(i915) >= 11) {
1872         icl_display_core_init(i915, resume);
1873     } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
1874         bxt_display_core_init(i915, resume);
1875     } else if (DISPLAY_VER(i915) == 9) {
1876         skl_display_core_init(i915, resume);
1877     } else if (IS_CHERRYVIEW(i915)) {
1878         mutex_lock(&power_domains->lock);
1879         chv_phy_control_init(i915);
1880         mutex_unlock(&power_domains->lock);
1881         assert_isp_power_gated(i915);
1882     } else if (IS_VALLEYVIEW(i915)) {
1883         mutex_lock(&power_domains->lock);
1884         vlv_cmnlane_wa(i915);
1885         mutex_unlock(&power_domains->lock);
1886         assert_ved_power_gated(i915);
1887         assert_isp_power_gated(i915);
1888     } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
1889         hsw_assert_cdclk(i915);
1890         intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1891     } else if (IS_IVYBRIDGE(i915)) {
1892         intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1893     }
1894 
1895     /*
1896      * Keep all power wells enabled for any dependent HW access during
1897      * initialization and to make sure we keep BIOS enabled display HW
1898      * resources powered until display HW readout is complete. We drop
1899      * this reference in intel_power_domains_enable().
1900      */
1901     drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
1902     power_domains->init_wakeref =
1903         intel_display_power_get(i915, POWER_DOMAIN_INIT);
1904 
1905     /* Disable power support if the user asked so. */
1906     if (!i915->params.disable_power_well) {
1907         drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
1908         i915->power_domains.disable_wakeref = intel_display_power_get(i915,
1909                                           POWER_DOMAIN_INIT);
1910     }
1911     intel_power_domains_sync_hw(i915);
1912 
1913     power_domains->initializing = false;
1914 }
1915 
1916 /**
1917  * intel_power_domains_driver_remove - deinitialize hw power domain state
1918  * @i915: i915 device instance
1919  *
1920  * De-initializes the display power domain HW state. It also ensures that the
1921  * device stays powered up so that the driver can be reloaded.
1922  *
1923  * It must be called with power domains already disabled (after a call to
1924  * intel_power_domains_disable()) and must be paired with
1925  * intel_power_domains_init_hw().
1926  */
1927 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
1928 {
1929     intel_wakeref_t wakeref __maybe_unused =
1930         fetch_and_zero(&i915->power_domains.init_wakeref);
1931 
1932     /* Remove the refcount we took to keep power well support disabled. */
1933     if (!i915->params.disable_power_well)
1934         intel_display_power_put(i915, POWER_DOMAIN_INIT,
1935                     fetch_and_zero(&i915->power_domains.disable_wakeref));
1936 
1937     intel_display_power_flush_work_sync(i915);
1938 
1939     intel_power_domains_verify_state(i915);
1940 
1941     /* Keep the power well enabled, but cancel its rpm wakeref. */
1942     intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1943 }
1944 
1945 /**
1946  * intel_power_domains_sanitize_state - sanitize power domains state
1947  * @i915: i915 device instance
1948  *
1949  * Sanitize the power domains state during driver loading and system resume.
1950  * The function will disable all display power wells that BIOS has enabled
1951  * without a user for it (any user for a power well has taken a reference
1952  * on it by the time this function is called, after the state of all the
1953  * pipe, encoder, etc. HW resources have been sanitized).
1954  */
1955 void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
1956 {
1957     struct i915_power_domains *power_domains = &i915->power_domains;
1958     struct i915_power_well *power_well;
1959 
1960     mutex_lock(&power_domains->lock);
1961 
1962     for_each_power_well_reverse(i915, power_well) {
1963         if (power_well->desc->always_on || power_well->count ||
1964             !intel_power_well_is_enabled(i915, power_well))
1965             continue;
1966 
1967         drm_dbg_kms(&i915->drm,
1968                 "BIOS left unused %s power well enabled, disabling it\n",
1969                 intel_power_well_name(power_well));
1970         intel_power_well_disable(i915, power_well);
1971     }
1972 
1973     mutex_unlock(&power_domains->lock);
1974 }
1975 
1976 /**
1977  * intel_power_domains_enable - enable toggling of display power wells
1978  * @i915: i915 device instance
1979  *
1980  * Enable the ondemand enabling/disabling of the display power wells. Note that
1981  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
1982  * only at specific points of the display modeset sequence, thus they are not
1983  * affected by the intel_power_domains_enable()/disable() calls. The purpose
1984  * of these function is to keep the rest of power wells enabled until the end
1985  * of display HW readout (which will acquire the power references reflecting
1986  * the current HW state).
1987  */
1988 void intel_power_domains_enable(struct drm_i915_private *i915)
1989 {
1990     intel_wakeref_t wakeref __maybe_unused =
1991         fetch_and_zero(&i915->power_domains.init_wakeref);
1992 
1993     intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
1994     intel_power_domains_verify_state(i915);
1995 }
1996 
1997 /**
1998  * intel_power_domains_disable - disable toggling of display power wells
1999  * @i915: i915 device instance
2000  *
2001  * Disable the ondemand enabling/disabling of the display power wells. See
2002  * intel_power_domains_enable() for which power wells this call controls.
2003  */
2004 void intel_power_domains_disable(struct drm_i915_private *i915)
2005 {
2006     struct i915_power_domains *power_domains = &i915->power_domains;
2007 
2008     drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2009     power_domains->init_wakeref =
2010         intel_display_power_get(i915, POWER_DOMAIN_INIT);
2011 
2012     intel_power_domains_verify_state(i915);
2013 }
2014 
2015 /**
2016  * intel_power_domains_suspend - suspend power domain state
2017  * @i915: i915 device instance
2018  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
2019  *
2020  * This function prepares the hardware power domain state before entering
2021  * system suspend.
2022  *
2023  * It must be called with power domains already disabled (after a call to
2024  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2025  */
2026 void intel_power_domains_suspend(struct drm_i915_private *i915,
2027                  enum i915_drm_suspend_mode suspend_mode)
2028 {
2029     struct i915_power_domains *power_domains = &i915->power_domains;
2030     intel_wakeref_t wakeref __maybe_unused =
2031         fetch_and_zero(&power_domains->init_wakeref);
2032 
2033     intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2034 
2035     /*
2036      * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2037      * support don't manually deinit the power domains. This also means the
2038      * DMC firmware will stay active, it will power down any HW
2039      * resources as required and also enable deeper system power states
2040      * that would be blocked if the firmware was inactive.
2041      */
2042     if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
2043         suspend_mode == I915_DRM_SUSPEND_IDLE &&
2044         intel_dmc_has_payload(i915)) {
2045         intel_display_power_flush_work(i915);
2046         intel_power_domains_verify_state(i915);
2047         return;
2048     }
2049 
2050     /*
2051      * Even if power well support was disabled we still want to disable
2052      * power wells if power domains must be deinitialized for suspend.
2053      */
2054     if (!i915->params.disable_power_well)
2055         intel_display_power_put(i915, POWER_DOMAIN_INIT,
2056                     fetch_and_zero(&i915->power_domains.disable_wakeref));
2057 
2058     intel_display_power_flush_work(i915);
2059     intel_power_domains_verify_state(i915);
2060 
2061     if (DISPLAY_VER(i915) >= 11)
2062         icl_display_core_uninit(i915);
2063     else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
2064         bxt_display_core_uninit(i915);
2065     else if (DISPLAY_VER(i915) == 9)
2066         skl_display_core_uninit(i915);
2067 
2068     power_domains->display_core_suspended = true;
2069 }
2070 
2071 /**
2072  * intel_power_domains_resume - resume power domain state
2073  * @i915: i915 device instance
2074  *
2075  * This function resume the hardware power domain state during system resume.
2076  *
2077  * It will return with power domain support disabled (to be enabled later by
2078  * intel_power_domains_enable()) and must be paired with
2079  * intel_power_domains_suspend().
2080  */
2081 void intel_power_domains_resume(struct drm_i915_private *i915)
2082 {
2083     struct i915_power_domains *power_domains = &i915->power_domains;
2084 
2085     if (power_domains->display_core_suspended) {
2086         intel_power_domains_init_hw(i915, true);
2087         power_domains->display_core_suspended = false;
2088     } else {
2089         drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2090         power_domains->init_wakeref =
2091             intel_display_power_get(i915, POWER_DOMAIN_INIT);
2092     }
2093 
2094     intel_power_domains_verify_state(i915);
2095 }
2096 
2097 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2098 
2099 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
2100 {
2101     struct i915_power_domains *power_domains = &i915->power_domains;
2102     struct i915_power_well *power_well;
2103 
2104     for_each_power_well(i915, power_well) {
2105         enum intel_display_power_domain domain;
2106 
2107         drm_dbg(&i915->drm, "%-25s %d\n",
2108             intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2109 
2110         for_each_power_domain(domain, intel_power_well_domains(power_well))
2111             drm_dbg(&i915->drm, "  %-23s %d\n",
2112                 intel_display_power_domain_str(domain),
2113                 power_domains->domain_use_count[domain]);
2114     }
2115 }
2116 
2117 /**
2118  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2119  * @i915: i915 device instance
2120  *
2121  * Verify if the reference count of each power well matches its HW enabled
2122  * state and the total refcount of the domains it belongs to. This must be
2123  * called after modeset HW state sanitization, which is responsible for
2124  * acquiring reference counts for any power wells in use and disabling the
2125  * ones left on by BIOS but not required by any active output.
2126  */
2127 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2128 {
2129     struct i915_power_domains *power_domains = &i915->power_domains;
2130     struct i915_power_well *power_well;
2131     bool dump_domain_info;
2132 
2133     mutex_lock(&power_domains->lock);
2134 
2135     verify_async_put_domains_state(power_domains);
2136 
2137     dump_domain_info = false;
2138     for_each_power_well(i915, power_well) {
2139         enum intel_display_power_domain domain;
2140         int domains_count;
2141         bool enabled;
2142 
2143         enabled = intel_power_well_is_enabled(i915, power_well);
2144         if ((intel_power_well_refcount(power_well) ||
2145              intel_power_well_is_always_on(power_well)) !=
2146             enabled)
2147             drm_err(&i915->drm,
2148                 "power well %s state mismatch (refcount %d/enabled %d)",
2149                 intel_power_well_name(power_well),
2150                 intel_power_well_refcount(power_well), enabled);
2151 
2152         domains_count = 0;
2153         for_each_power_domain(domain, intel_power_well_domains(power_well))
2154             domains_count += power_domains->domain_use_count[domain];
2155 
2156         if (intel_power_well_refcount(power_well) != domains_count) {
2157             drm_err(&i915->drm,
2158                 "power well %s refcount/domain refcount mismatch "
2159                 "(refcount %d/domains refcount %d)\n",
2160                 intel_power_well_name(power_well),
2161                 intel_power_well_refcount(power_well),
2162                 domains_count);
2163             dump_domain_info = true;
2164         }
2165     }
2166 
2167     if (dump_domain_info) {
2168         static bool dumped;
2169 
2170         if (!dumped) {
2171             intel_power_domains_dump_info(i915);
2172             dumped = true;
2173         }
2174     }
2175 
2176     mutex_unlock(&power_domains->lock);
2177 }
2178 
2179 #else
2180 
2181 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2182 {
2183 }
2184 
2185 #endif
2186 
2187 void intel_display_power_suspend_late(struct drm_i915_private *i915)
2188 {
2189     if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2190         IS_BROXTON(i915)) {
2191         bxt_enable_dc9(i915);
2192     } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2193         hsw_enable_pc8(i915);
2194     }
2195 
2196     /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2197     if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2198         intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2199 }
2200 
2201 void intel_display_power_resume_early(struct drm_i915_private *i915)
2202 {
2203     if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2204         IS_BROXTON(i915)) {
2205         gen9_sanitize_dc_state(i915);
2206         bxt_disable_dc9(i915);
2207     } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2208         hsw_disable_pc8(i915);
2209     }
2210 
2211     /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2212     if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2213         intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2214 }
2215 
2216 void intel_display_power_suspend(struct drm_i915_private *i915)
2217 {
2218     if (DISPLAY_VER(i915) >= 11) {
2219         icl_display_core_uninit(i915);
2220         bxt_enable_dc9(i915);
2221     } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2222         bxt_display_core_uninit(i915);
2223         bxt_enable_dc9(i915);
2224     } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2225         hsw_enable_pc8(i915);
2226     }
2227 }
2228 
2229 void intel_display_power_resume(struct drm_i915_private *i915)
2230 {
2231     if (DISPLAY_VER(i915) >= 11) {
2232         bxt_disable_dc9(i915);
2233         icl_display_core_init(i915, true);
2234         if (intel_dmc_has_payload(i915)) {
2235             if (i915->dmc.allowed_dc_mask &
2236                 DC_STATE_EN_UPTO_DC6)
2237                 skl_enable_dc6(i915);
2238             else if (i915->dmc.allowed_dc_mask &
2239                  DC_STATE_EN_UPTO_DC5)
2240                 gen9_enable_dc5(i915);
2241         }
2242     } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2243         bxt_disable_dc9(i915);
2244         bxt_display_core_init(i915, true);
2245         if (intel_dmc_has_payload(i915) &&
2246             (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2247             gen9_enable_dc5(i915);
2248     } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2249         hsw_disable_pc8(i915);
2250     }
2251 }
2252 
2253 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
2254 {
2255     struct i915_power_domains *power_domains = &i915->power_domains;
2256     int i;
2257 
2258     mutex_lock(&power_domains->lock);
2259 
2260     seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2261     for (i = 0; i < power_domains->power_well_count; i++) {
2262         struct i915_power_well *power_well;
2263         enum intel_display_power_domain power_domain;
2264 
2265         power_well = &power_domains->power_wells[i];
2266         seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2267                intel_power_well_refcount(power_well));
2268 
2269         for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2270             seq_printf(m, "  %-23s %d\n",
2271                    intel_display_power_domain_str(power_domain),
2272                    power_domains->domain_use_count[power_domain]);
2273     }
2274 
2275     mutex_unlock(&power_domains->lock);
2276 }
2277 
2278 struct intel_ddi_port_domains {
2279     enum port port_start;
2280     enum port port_end;
2281     enum aux_ch aux_ch_start;
2282     enum aux_ch aux_ch_end;
2283 
2284     enum intel_display_power_domain ddi_lanes;
2285     enum intel_display_power_domain ddi_io;
2286     enum intel_display_power_domain aux_legacy_usbc;
2287     enum intel_display_power_domain aux_tbt;
2288 };
2289 
2290 static const struct intel_ddi_port_domains
2291 i9xx_port_domains[] = {
2292     {
2293         .port_start = PORT_A,
2294         .port_end = PORT_F,
2295         .aux_ch_start = AUX_CH_A,
2296         .aux_ch_end = AUX_CH_F,
2297 
2298         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2299         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2300         .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2301         .aux_tbt = POWER_DOMAIN_INVALID,
2302     },
2303 };
2304 
2305 static const struct intel_ddi_port_domains
2306 d11_port_domains[] = {
2307     {
2308         .port_start = PORT_A,
2309         .port_end = PORT_B,
2310         .aux_ch_start = AUX_CH_A,
2311         .aux_ch_end = AUX_CH_B,
2312 
2313         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2314         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2315         .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2316         .aux_tbt = POWER_DOMAIN_INVALID,
2317     }, {
2318         .port_start = PORT_C,
2319         .port_end = PORT_F,
2320         .aux_ch_start = AUX_CH_C,
2321         .aux_ch_end = AUX_CH_F,
2322 
2323         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2324         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2325         .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2326         .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2327     },
2328 };
2329 
2330 static const struct intel_ddi_port_domains
2331 d12_port_domains[] = {
2332     {
2333         .port_start = PORT_A,
2334         .port_end = PORT_C,
2335         .aux_ch_start = AUX_CH_A,
2336         .aux_ch_end = AUX_CH_C,
2337 
2338         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2339         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2340         .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2341         .aux_tbt = POWER_DOMAIN_INVALID,
2342     }, {
2343         .port_start = PORT_TC1,
2344         .port_end = PORT_TC6,
2345         .aux_ch_start = AUX_CH_USBC1,
2346         .aux_ch_end = AUX_CH_USBC6,
2347 
2348         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2349         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2350         .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2351         .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2352     },
2353 };
2354 
2355 static const struct intel_ddi_port_domains
2356 d13_port_domains[] = {
2357     {
2358         .port_start = PORT_A,
2359         .port_end = PORT_C,
2360         .aux_ch_start = AUX_CH_A,
2361         .aux_ch_end = AUX_CH_C,
2362 
2363         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2364         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2365         .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2366         .aux_tbt = POWER_DOMAIN_INVALID,
2367     }, {
2368         .port_start = PORT_TC1,
2369         .port_end = PORT_TC4,
2370         .aux_ch_start = AUX_CH_USBC1,
2371         .aux_ch_end = AUX_CH_USBC4,
2372 
2373         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2374         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2375         .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2376         .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2377     }, {
2378         .port_start = PORT_D_XELPD,
2379         .port_end = PORT_E_XELPD,
2380         .aux_ch_start = AUX_CH_D_XELPD,
2381         .aux_ch_end = AUX_CH_E_XELPD,
2382 
2383         .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2384         .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2385         .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2386         .aux_tbt = POWER_DOMAIN_INVALID,
2387     },
2388 };
2389 
2390 static void
2391 intel_port_domains_for_platform(struct drm_i915_private *i915,
2392                 const struct intel_ddi_port_domains **domains,
2393                 int *domains_size)
2394 {
2395     if (DISPLAY_VER(i915) >= 13) {
2396         *domains = d13_port_domains;
2397         *domains_size = ARRAY_SIZE(d13_port_domains);
2398     } else if (DISPLAY_VER(i915) >= 12) {
2399         *domains = d12_port_domains;
2400         *domains_size = ARRAY_SIZE(d12_port_domains);
2401     } else if (DISPLAY_VER(i915) >= 11) {
2402         *domains = d11_port_domains;
2403         *domains_size = ARRAY_SIZE(d11_port_domains);
2404     } else {
2405         *domains = i9xx_port_domains;
2406         *domains_size = ARRAY_SIZE(i9xx_port_domains);
2407     }
2408 }
2409 
2410 static const struct intel_ddi_port_domains *
2411 intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
2412 {
2413     const struct intel_ddi_port_domains *domains;
2414     int domains_size;
2415     int i;
2416 
2417     intel_port_domains_for_platform(i915, &domains, &domains_size);
2418     for (i = 0; i < domains_size; i++)
2419         if (port >= domains[i].port_start && port <= domains[i].port_end)
2420             return &domains[i];
2421 
2422     return NULL;
2423 }
2424 
2425 enum intel_display_power_domain
2426 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
2427 {
2428     const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2429 
2430     if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
2431         return POWER_DOMAIN_PORT_DDI_IO_A;
2432 
2433     return domains->ddi_io + (int)(port - domains->port_start);
2434 }
2435 
2436 enum intel_display_power_domain
2437 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
2438 {
2439     const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2440 
2441     if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
2442         return POWER_DOMAIN_PORT_DDI_LANES_A;
2443 
2444     return domains->ddi_lanes + (int)(port - domains->port_start);
2445 }
2446 
2447 static const struct intel_ddi_port_domains *
2448 intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
2449 {
2450     const struct intel_ddi_port_domains *domains;
2451     int domains_size;
2452     int i;
2453 
2454     intel_port_domains_for_platform(i915, &domains, &domains_size);
2455     for (i = 0; i < domains_size; i++)
2456         if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2457             return &domains[i];
2458 
2459     return NULL;
2460 }
2461 
2462 enum intel_display_power_domain
2463 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2464 {
2465     const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2466 
2467     if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
2468         return POWER_DOMAIN_AUX_A;
2469 
2470     return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2471 }
2472 
2473 enum intel_display_power_domain
2474 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2475 {
2476     const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2477 
2478     if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
2479         return POWER_DOMAIN_AUX_TBT1;
2480 
2481     return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2482 }