0001
0002
0003
0004
0005
0006 #include "g4x_dp.h"
0007 #include "i915_drv.h"
0008 #include "intel_de.h"
0009 #include "intel_display_power_well.h"
0010 #include "intel_display_types.h"
0011 #include "intel_dp.h"
0012 #include "intel_dpll.h"
0013 #include "intel_lvds.h"
0014 #include "intel_pps.h"
0015
0016 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
0017 enum pipe pipe);
0018
0019 static void pps_init_delays(struct intel_dp *intel_dp);
0020 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
0021
0022 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
0023 {
0024 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0025 intel_wakeref_t wakeref;
0026
0027
0028
0029
0030 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
0031 mutex_lock(&dev_priv->pps_mutex);
0032
0033 return wakeref;
0034 }
0035
0036 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
0037 intel_wakeref_t wakeref)
0038 {
0039 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0040
0041 mutex_unlock(&dev_priv->pps_mutex);
0042 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
0043
0044 return 0;
0045 }
0046
0047 static void
0048 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
0049 {
0050 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0051 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0052 enum pipe pipe = intel_dp->pps.pps_pipe;
0053 bool pll_enabled, release_cl_override = false;
0054 enum dpio_phy phy = DPIO_PHY(pipe);
0055 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
0056 u32 DP;
0057
0058 if (drm_WARN(&dev_priv->drm,
0059 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
0060 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
0061 pipe_name(pipe), dig_port->base.base.base.id,
0062 dig_port->base.base.name))
0063 return;
0064
0065 drm_dbg_kms(&dev_priv->drm,
0066 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
0067 pipe_name(pipe), dig_port->base.base.base.id,
0068 dig_port->base.base.name);
0069
0070
0071
0072
0073 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
0074 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
0075 DP |= DP_PORT_WIDTH(1);
0076 DP |= DP_LINK_TRAIN_PAT_1;
0077
0078 if (IS_CHERRYVIEW(dev_priv))
0079 DP |= DP_PIPE_SEL_CHV(pipe);
0080 else
0081 DP |= DP_PIPE_SEL(pipe);
0082
0083 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
0084
0085
0086
0087
0088
0089 if (!pll_enabled) {
0090 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
0091 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
0092
0093 if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
0094 drm_err(&dev_priv->drm,
0095 "Failed to force on pll for pipe %c!\n",
0096 pipe_name(pipe));
0097 return;
0098 }
0099 }
0100
0101
0102
0103
0104
0105
0106
0107 intel_de_write(dev_priv, intel_dp->output_reg, DP);
0108 intel_de_posting_read(dev_priv, intel_dp->output_reg);
0109
0110 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
0111 intel_de_posting_read(dev_priv, intel_dp->output_reg);
0112
0113 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
0114 intel_de_posting_read(dev_priv, intel_dp->output_reg);
0115
0116 if (!pll_enabled) {
0117 vlv_force_pll_off(dev_priv, pipe);
0118
0119 if (release_cl_override)
0120 chv_phy_powergate_ch(dev_priv, phy, ch, false);
0121 }
0122 }
0123
0124 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
0125 {
0126 struct intel_encoder *encoder;
0127 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
0128
0129
0130
0131
0132
0133 for_each_intel_dp(&dev_priv->drm, encoder) {
0134 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
0135
0136 if (encoder->type == INTEL_OUTPUT_EDP) {
0137 drm_WARN_ON(&dev_priv->drm,
0138 intel_dp->pps.active_pipe != INVALID_PIPE &&
0139 intel_dp->pps.active_pipe !=
0140 intel_dp->pps.pps_pipe);
0141
0142 if (intel_dp->pps.pps_pipe != INVALID_PIPE)
0143 pipes &= ~(1 << intel_dp->pps.pps_pipe);
0144 } else {
0145 drm_WARN_ON(&dev_priv->drm,
0146 intel_dp->pps.pps_pipe != INVALID_PIPE);
0147
0148 if (intel_dp->pps.active_pipe != INVALID_PIPE)
0149 pipes &= ~(1 << intel_dp->pps.active_pipe);
0150 }
0151 }
0152
0153 if (pipes == 0)
0154 return INVALID_PIPE;
0155
0156 return ffs(pipes) - 1;
0157 }
0158
0159 static enum pipe
0160 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
0161 {
0162 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0163 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0164 enum pipe pipe;
0165
0166 lockdep_assert_held(&dev_priv->pps_mutex);
0167
0168
0169 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
0170
0171 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
0172 intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
0173
0174 if (intel_dp->pps.pps_pipe != INVALID_PIPE)
0175 return intel_dp->pps.pps_pipe;
0176
0177 pipe = vlv_find_free_pps(dev_priv);
0178
0179
0180
0181
0182
0183 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
0184 pipe = PIPE_A;
0185
0186 vlv_steal_power_sequencer(dev_priv, pipe);
0187 intel_dp->pps.pps_pipe = pipe;
0188
0189 drm_dbg_kms(&dev_priv->drm,
0190 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
0191 pipe_name(intel_dp->pps.pps_pipe),
0192 dig_port->base.base.base.id,
0193 dig_port->base.base.name);
0194
0195
0196 pps_init_delays(intel_dp);
0197 pps_init_registers(intel_dp, true);
0198
0199
0200
0201
0202
0203 vlv_power_sequencer_kick(intel_dp);
0204
0205 return intel_dp->pps.pps_pipe;
0206 }
0207
0208 static int
0209 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
0210 {
0211 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0212 struct intel_connector *connector = intel_dp->attached_connector;
0213 int backlight_controller = connector->panel.vbt.backlight.controller;
0214
0215 lockdep_assert_held(&dev_priv->pps_mutex);
0216
0217
0218 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
0219
0220 if (!intel_dp->pps.pps_reset)
0221 return backlight_controller;
0222
0223 intel_dp->pps.pps_reset = false;
0224
0225
0226
0227
0228
0229 pps_init_registers(intel_dp, false);
0230
0231 return backlight_controller;
0232 }
0233
0234 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
0235 enum pipe pipe);
0236
0237 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
0238 enum pipe pipe)
0239 {
0240 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
0241 }
0242
0243 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
0244 enum pipe pipe)
0245 {
0246 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
0247 }
0248
0249 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
0250 enum pipe pipe)
0251 {
0252 return true;
0253 }
0254
0255 static enum pipe
0256 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
0257 enum port port,
0258 vlv_pipe_check pipe_check)
0259 {
0260 enum pipe pipe;
0261
0262 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
0263 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
0264 PANEL_PORT_SELECT_MASK;
0265
0266 if (port_sel != PANEL_PORT_SELECT_VLV(port))
0267 continue;
0268
0269 if (!pipe_check(dev_priv, pipe))
0270 continue;
0271
0272 return pipe;
0273 }
0274
0275 return INVALID_PIPE;
0276 }
0277
0278 static void
0279 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
0280 {
0281 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0282 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0283 enum port port = dig_port->base.port;
0284
0285 lockdep_assert_held(&dev_priv->pps_mutex);
0286
0287
0288
0289 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
0290 vlv_pipe_has_pp_on);
0291
0292 if (intel_dp->pps.pps_pipe == INVALID_PIPE)
0293 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
0294 vlv_pipe_has_vdd_on);
0295
0296 if (intel_dp->pps.pps_pipe == INVALID_PIPE)
0297 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
0298 vlv_pipe_any);
0299
0300
0301 if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
0302 drm_dbg_kms(&dev_priv->drm,
0303 "no initial power sequencer for [ENCODER:%d:%s]\n",
0304 dig_port->base.base.base.id,
0305 dig_port->base.base.name);
0306 return;
0307 }
0308
0309 drm_dbg_kms(&dev_priv->drm,
0310 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
0311 dig_port->base.base.base.id,
0312 dig_port->base.base.name,
0313 pipe_name(intel_dp->pps.pps_pipe));
0314 }
0315
0316 void intel_pps_reset_all(struct drm_i915_private *dev_priv)
0317 {
0318 struct intel_encoder *encoder;
0319
0320 if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
0321 return;
0322
0323 if (!HAS_DISPLAY(dev_priv))
0324 return;
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 for_each_intel_dp(&dev_priv->drm, encoder) {
0337 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
0338
0339 drm_WARN_ON(&dev_priv->drm,
0340 intel_dp->pps.active_pipe != INVALID_PIPE);
0341
0342 if (encoder->type != INTEL_OUTPUT_EDP)
0343 continue;
0344
0345 if (DISPLAY_VER(dev_priv) >= 9)
0346 intel_dp->pps.pps_reset = true;
0347 else
0348 intel_dp->pps.pps_pipe = INVALID_PIPE;
0349 }
0350 }
0351
0352 struct pps_registers {
0353 i915_reg_t pp_ctrl;
0354 i915_reg_t pp_stat;
0355 i915_reg_t pp_on;
0356 i915_reg_t pp_off;
0357 i915_reg_t pp_div;
0358 };
0359
0360 static void intel_pps_get_registers(struct intel_dp *intel_dp,
0361 struct pps_registers *regs)
0362 {
0363 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0364 int pps_idx = 0;
0365
0366 memset(regs, 0, sizeof(*regs));
0367
0368 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
0369 pps_idx = bxt_power_sequencer_idx(intel_dp);
0370 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
0371 pps_idx = vlv_power_sequencer_pipe(intel_dp);
0372
0373 regs->pp_ctrl = PP_CONTROL(pps_idx);
0374 regs->pp_stat = PP_STATUS(pps_idx);
0375 regs->pp_on = PP_ON_DELAYS(pps_idx);
0376 regs->pp_off = PP_OFF_DELAYS(pps_idx);
0377
0378
0379 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
0380 INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
0381 regs->pp_div = INVALID_MMIO_REG;
0382 else
0383 regs->pp_div = PP_DIVISOR(pps_idx);
0384 }
0385
0386 static i915_reg_t
0387 _pp_ctrl_reg(struct intel_dp *intel_dp)
0388 {
0389 struct pps_registers regs;
0390
0391 intel_pps_get_registers(intel_dp, ®s);
0392
0393 return regs.pp_ctrl;
0394 }
0395
0396 static i915_reg_t
0397 _pp_stat_reg(struct intel_dp *intel_dp)
0398 {
0399 struct pps_registers regs;
0400
0401 intel_pps_get_registers(intel_dp, ®s);
0402
0403 return regs.pp_stat;
0404 }
0405
0406 static bool edp_have_panel_power(struct intel_dp *intel_dp)
0407 {
0408 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0409
0410 lockdep_assert_held(&dev_priv->pps_mutex);
0411
0412 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
0413 intel_dp->pps.pps_pipe == INVALID_PIPE)
0414 return false;
0415
0416 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
0417 }
0418
0419 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
0420 {
0421 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0422
0423 lockdep_assert_held(&dev_priv->pps_mutex);
0424
0425 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
0426 intel_dp->pps.pps_pipe == INVALID_PIPE)
0427 return false;
0428
0429 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
0430 }
0431
0432 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
0433 {
0434 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0435
0436 if (!intel_dp_is_edp(intel_dp))
0437 return;
0438
0439 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
0440 drm_WARN(&dev_priv->drm, 1,
0441 "eDP powered off while attempting aux channel communication.\n");
0442 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
0443 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
0444 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
0445 }
0446 }
0447
0448 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
0449 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
0450
0451 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
0452 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
0453
0454 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
0455 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
0456
0457 static void intel_pps_verify_state(struct intel_dp *intel_dp);
0458
0459 static void wait_panel_status(struct intel_dp *intel_dp,
0460 u32 mask,
0461 u32 value)
0462 {
0463 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0464 i915_reg_t pp_stat_reg, pp_ctrl_reg;
0465
0466 lockdep_assert_held(&dev_priv->pps_mutex);
0467
0468 intel_pps_verify_state(intel_dp);
0469
0470 pp_stat_reg = _pp_stat_reg(intel_dp);
0471 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
0472
0473 drm_dbg_kms(&dev_priv->drm,
0474 "mask %08x value %08x status %08x control %08x\n",
0475 mask, value,
0476 intel_de_read(dev_priv, pp_stat_reg),
0477 intel_de_read(dev_priv, pp_ctrl_reg));
0478
0479 if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
0480 mask, value, 5000))
0481 drm_err(&dev_priv->drm,
0482 "Panel status timeout: status %08x control %08x\n",
0483 intel_de_read(dev_priv, pp_stat_reg),
0484 intel_de_read(dev_priv, pp_ctrl_reg));
0485
0486 drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
0487 }
0488
0489 static void wait_panel_on(struct intel_dp *intel_dp)
0490 {
0491 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
0492
0493 drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
0494 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
0495 }
0496
0497 static void wait_panel_off(struct intel_dp *intel_dp)
0498 {
0499 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
0500
0501 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
0502 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
0503 }
0504
0505 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
0506 {
0507 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
0508 ktime_t panel_power_on_time;
0509 s64 panel_power_off_duration;
0510
0511 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
0512
0513
0514
0515 panel_power_on_time = ktime_get_boottime();
0516 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
0517
0518
0519
0520 if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
0521 wait_remaining_ms_from_jiffies(jiffies,
0522 intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
0523
0524 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
0525 }
0526
0527 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
0528 {
0529 intel_wakeref_t wakeref;
0530
0531 if (!intel_dp_is_edp(intel_dp))
0532 return;
0533
0534 with_intel_pps_lock(intel_dp, wakeref)
0535 wait_panel_power_cycle(intel_dp);
0536 }
0537
0538 static void wait_backlight_on(struct intel_dp *intel_dp)
0539 {
0540 wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
0541 intel_dp->pps.backlight_on_delay);
0542 }
0543
0544 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
0545 {
0546 wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
0547 intel_dp->pps.backlight_off_delay);
0548 }
0549
0550
0551
0552
0553
0554 static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
0555 {
0556 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0557 u32 control;
0558
0559 lockdep_assert_held(&dev_priv->pps_mutex);
0560
0561 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
0562 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
0563 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
0564 control &= ~PANEL_UNLOCK_MASK;
0565 control |= PANEL_UNLOCK_REGS;
0566 }
0567 return control;
0568 }
0569
0570
0571
0572
0573
0574
0575 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
0576 {
0577 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0578 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0579 u32 pp;
0580 i915_reg_t pp_stat_reg, pp_ctrl_reg;
0581 bool need_to_disable = !intel_dp->pps.want_panel_vdd;
0582
0583 lockdep_assert_held(&dev_priv->pps_mutex);
0584
0585 if (!intel_dp_is_edp(intel_dp))
0586 return false;
0587
0588 cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
0589 intel_dp->pps.want_panel_vdd = true;
0590
0591 if (edp_have_panel_vdd(intel_dp))
0592 return need_to_disable;
0593
0594 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
0595 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
0596 intel_aux_power_domain(dig_port));
0597
0598 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
0599 dig_port->base.base.base.id,
0600 dig_port->base.base.name);
0601
0602 if (!edp_have_panel_power(intel_dp))
0603 wait_panel_power_cycle(intel_dp);
0604
0605 pp = ilk_get_pp_control(intel_dp);
0606 pp |= EDP_FORCE_VDD;
0607
0608 pp_stat_reg = _pp_stat_reg(intel_dp);
0609 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
0610
0611 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0612 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0613 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
0614 intel_de_read(dev_priv, pp_stat_reg),
0615 intel_de_read(dev_priv, pp_ctrl_reg));
0616
0617
0618
0619 if (!edp_have_panel_power(intel_dp)) {
0620 drm_dbg_kms(&dev_priv->drm,
0621 "[ENCODER:%d:%s] panel power wasn't enabled\n",
0622 dig_port->base.base.base.id,
0623 dig_port->base.base.name);
0624 msleep(intel_dp->pps.panel_power_up_delay);
0625 }
0626
0627 return need_to_disable;
0628 }
0629
0630
0631
0632
0633
0634
0635
0636 void intel_pps_vdd_on(struct intel_dp *intel_dp)
0637 {
0638 intel_wakeref_t wakeref;
0639 bool vdd;
0640
0641 if (!intel_dp_is_edp(intel_dp))
0642 return;
0643
0644 vdd = false;
0645 with_intel_pps_lock(intel_dp, wakeref)
0646 vdd = intel_pps_vdd_on_unlocked(intel_dp);
0647 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
0648 dp_to_dig_port(intel_dp)->base.base.base.id,
0649 dp_to_dig_port(intel_dp)->base.base.name);
0650 }
0651
0652 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
0653 {
0654 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0655 struct intel_digital_port *dig_port =
0656 dp_to_dig_port(intel_dp);
0657 u32 pp;
0658 i915_reg_t pp_stat_reg, pp_ctrl_reg;
0659
0660 lockdep_assert_held(&dev_priv->pps_mutex);
0661
0662 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
0663
0664 if (!edp_have_panel_vdd(intel_dp))
0665 return;
0666
0667 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
0668 dig_port->base.base.base.id,
0669 dig_port->base.base.name);
0670
0671 pp = ilk_get_pp_control(intel_dp);
0672 pp &= ~EDP_FORCE_VDD;
0673
0674 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
0675 pp_stat_reg = _pp_stat_reg(intel_dp);
0676
0677 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0678 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0679
0680
0681 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
0682 intel_de_read(dev_priv, pp_stat_reg),
0683 intel_de_read(dev_priv, pp_ctrl_reg));
0684
0685 if ((pp & PANEL_POWER_ON) == 0)
0686 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
0687
0688 intel_display_power_put(dev_priv,
0689 intel_aux_power_domain(dig_port),
0690 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
0691 }
0692
0693 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
0694 {
0695 intel_wakeref_t wakeref;
0696
0697 if (!intel_dp_is_edp(intel_dp))
0698 return;
0699
0700 cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
0701
0702
0703
0704
0705 with_intel_pps_lock(intel_dp, wakeref)
0706 intel_pps_vdd_off_sync_unlocked(intel_dp);
0707 }
0708
0709 static void edp_panel_vdd_work(struct work_struct *__work)
0710 {
0711 struct intel_pps *pps = container_of(to_delayed_work(__work),
0712 struct intel_pps, panel_vdd_work);
0713 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
0714 intel_wakeref_t wakeref;
0715
0716 with_intel_pps_lock(intel_dp, wakeref) {
0717 if (!intel_dp->pps.want_panel_vdd)
0718 intel_pps_vdd_off_sync_unlocked(intel_dp);
0719 }
0720 }
0721
0722 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
0723 {
0724 unsigned long delay;
0725
0726
0727
0728
0729
0730 if (intel_dp->pps.initializing)
0731 return;
0732
0733
0734
0735
0736
0737
0738 delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
0739 schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
0740 }
0741
0742
0743
0744
0745
0746
0747 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
0748 {
0749 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0750
0751 lockdep_assert_held(&dev_priv->pps_mutex);
0752
0753 if (!intel_dp_is_edp(intel_dp))
0754 return;
0755
0756 I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
0757 dp_to_dig_port(intel_dp)->base.base.base.id,
0758 dp_to_dig_port(intel_dp)->base.base.name);
0759
0760 intel_dp->pps.want_panel_vdd = false;
0761
0762 if (sync)
0763 intel_pps_vdd_off_sync_unlocked(intel_dp);
0764 else
0765 edp_panel_vdd_schedule_off(intel_dp);
0766 }
0767
0768 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
0769 {
0770 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0771 u32 pp;
0772 i915_reg_t pp_ctrl_reg;
0773
0774 lockdep_assert_held(&dev_priv->pps_mutex);
0775
0776 if (!intel_dp_is_edp(intel_dp))
0777 return;
0778
0779 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
0780 dp_to_dig_port(intel_dp)->base.base.base.id,
0781 dp_to_dig_port(intel_dp)->base.base.name);
0782
0783 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
0784 "[ENCODER:%d:%s] panel power already on\n",
0785 dp_to_dig_port(intel_dp)->base.base.base.id,
0786 dp_to_dig_port(intel_dp)->base.base.name))
0787 return;
0788
0789 wait_panel_power_cycle(intel_dp);
0790
0791 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
0792 pp = ilk_get_pp_control(intel_dp);
0793 if (IS_IRONLAKE(dev_priv)) {
0794
0795 pp &= ~PANEL_POWER_RESET;
0796 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0797 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0798 }
0799
0800 pp |= PANEL_POWER_ON;
0801 if (!IS_IRONLAKE(dev_priv))
0802 pp |= PANEL_POWER_RESET;
0803
0804 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0805 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0806
0807 wait_panel_on(intel_dp);
0808 intel_dp->pps.last_power_on = jiffies;
0809
0810 if (IS_IRONLAKE(dev_priv)) {
0811 pp |= PANEL_POWER_RESET;
0812 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0813 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0814 }
0815 }
0816
0817 void intel_pps_on(struct intel_dp *intel_dp)
0818 {
0819 intel_wakeref_t wakeref;
0820
0821 if (!intel_dp_is_edp(intel_dp))
0822 return;
0823
0824 with_intel_pps_lock(intel_dp, wakeref)
0825 intel_pps_on_unlocked(intel_dp);
0826 }
0827
0828 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
0829 {
0830 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0831 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0832 u32 pp;
0833 i915_reg_t pp_ctrl_reg;
0834
0835 lockdep_assert_held(&dev_priv->pps_mutex);
0836
0837 if (!intel_dp_is_edp(intel_dp))
0838 return;
0839
0840 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
0841 dig_port->base.base.base.id, dig_port->base.base.name);
0842
0843 drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
0844 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
0845 dig_port->base.base.base.id, dig_port->base.base.name);
0846
0847 pp = ilk_get_pp_control(intel_dp);
0848
0849
0850 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
0851 EDP_BLC_ENABLE);
0852
0853 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
0854
0855 intel_dp->pps.want_panel_vdd = false;
0856
0857 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0858 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0859
0860 wait_panel_off(intel_dp);
0861 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
0862
0863
0864 intel_display_power_put(dev_priv,
0865 intel_aux_power_domain(dig_port),
0866 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
0867 }
0868
0869 void intel_pps_off(struct intel_dp *intel_dp)
0870 {
0871 intel_wakeref_t wakeref;
0872
0873 if (!intel_dp_is_edp(intel_dp))
0874 return;
0875
0876 with_intel_pps_lock(intel_dp, wakeref)
0877 intel_pps_off_unlocked(intel_dp);
0878 }
0879
0880
0881 void intel_pps_backlight_on(struct intel_dp *intel_dp)
0882 {
0883 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0884 intel_wakeref_t wakeref;
0885
0886
0887
0888
0889
0890
0891
0892 wait_backlight_on(intel_dp);
0893
0894 with_intel_pps_lock(intel_dp, wakeref) {
0895 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
0896 u32 pp;
0897
0898 pp = ilk_get_pp_control(intel_dp);
0899 pp |= EDP_BLC_ENABLE;
0900
0901 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0902 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0903 }
0904 }
0905
0906
0907 void intel_pps_backlight_off(struct intel_dp *intel_dp)
0908 {
0909 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0910 intel_wakeref_t wakeref;
0911
0912 if (!intel_dp_is_edp(intel_dp))
0913 return;
0914
0915 with_intel_pps_lock(intel_dp, wakeref) {
0916 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
0917 u32 pp;
0918
0919 pp = ilk_get_pp_control(intel_dp);
0920 pp &= ~EDP_BLC_ENABLE;
0921
0922 intel_de_write(dev_priv, pp_ctrl_reg, pp);
0923 intel_de_posting_read(dev_priv, pp_ctrl_reg);
0924 }
0925
0926 intel_dp->pps.last_backlight_off = jiffies;
0927 edp_wait_backlight_off(intel_dp);
0928 }
0929
0930
0931
0932
0933
0934 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
0935 {
0936 struct drm_i915_private *i915 = to_i915(connector->base.dev);
0937 struct intel_dp *intel_dp = intel_attached_dp(connector);
0938 intel_wakeref_t wakeref;
0939 bool is_enabled;
0940
0941 is_enabled = false;
0942 with_intel_pps_lock(intel_dp, wakeref)
0943 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
0944 if (is_enabled == enable)
0945 return;
0946
0947 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
0948 enable ? "enable" : "disable");
0949
0950 if (enable)
0951 intel_pps_backlight_on(intel_dp);
0952 else
0953 intel_pps_backlight_off(intel_dp);
0954 }
0955
0956 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
0957 {
0958 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0959 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
0960 enum pipe pipe = intel_dp->pps.pps_pipe;
0961 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
0962
0963 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
0964
0965 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
0966 return;
0967
0968 intel_pps_vdd_off_sync_unlocked(intel_dp);
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979 drm_dbg_kms(&dev_priv->drm,
0980 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
0981 pipe_name(pipe), dig_port->base.base.base.id,
0982 dig_port->base.base.name);
0983 intel_de_write(dev_priv, pp_on_reg, 0);
0984 intel_de_posting_read(dev_priv, pp_on_reg);
0985
0986 intel_dp->pps.pps_pipe = INVALID_PIPE;
0987 }
0988
0989 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
0990 enum pipe pipe)
0991 {
0992 struct intel_encoder *encoder;
0993
0994 lockdep_assert_held(&dev_priv->pps_mutex);
0995
0996 for_each_intel_dp(&dev_priv->drm, encoder) {
0997 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
0998
0999 drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
1000 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
1001 pipe_name(pipe), encoder->base.base.id,
1002 encoder->base.name);
1003
1004 if (intel_dp->pps.pps_pipe != pipe)
1005 continue;
1006
1007 drm_dbg_kms(&dev_priv->drm,
1008 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
1009 pipe_name(pipe), encoder->base.base.id,
1010 encoder->base.name);
1011
1012
1013 vlv_detach_power_sequencer(intel_dp);
1014 }
1015 }
1016
1017 void vlv_pps_init(struct intel_encoder *encoder,
1018 const struct intel_crtc_state *crtc_state)
1019 {
1020 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1021 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1022 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1023
1024 lockdep_assert_held(&dev_priv->pps_mutex);
1025
1026 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1027
1028 if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1029 intel_dp->pps.pps_pipe != crtc->pipe) {
1030
1031
1032
1033
1034
1035 vlv_detach_power_sequencer(intel_dp);
1036 }
1037
1038
1039
1040
1041
1042 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1043
1044 intel_dp->pps.active_pipe = crtc->pipe;
1045
1046 if (!intel_dp_is_edp(intel_dp))
1047 return;
1048
1049
1050 intel_dp->pps.pps_pipe = crtc->pipe;
1051
1052 drm_dbg_kms(&dev_priv->drm,
1053 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1054 pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1055 encoder->base.name);
1056
1057
1058 pps_init_delays(intel_dp);
1059 pps_init_registers(intel_dp, true);
1060 }
1061
1062 static void pps_vdd_init(struct intel_dp *intel_dp)
1063 {
1064 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1065 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1066
1067 lockdep_assert_held(&dev_priv->pps_mutex);
1068
1069 if (!edp_have_panel_vdd(intel_dp))
1070 return;
1071
1072
1073
1074
1075
1076
1077
1078 drm_dbg_kms(&dev_priv->drm,
1079 "VDD left on by BIOS, adjusting state tracking\n");
1080 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1081 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1082 intel_aux_power_domain(dig_port));
1083 }
1084
1085 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1086 {
1087 intel_wakeref_t wakeref;
1088 bool have_power = false;
1089
1090 with_intel_pps_lock(intel_dp, wakeref) {
1091 have_power = edp_have_panel_power(intel_dp) ||
1092 edp_have_panel_vdd(intel_dp);
1093 }
1094
1095 return have_power;
1096 }
1097
1098 static void pps_init_timestamps(struct intel_dp *intel_dp)
1099 {
1100 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1101 intel_dp->pps.last_power_on = jiffies;
1102 intel_dp->pps.last_backlight_off = jiffies;
1103 }
1104
1105 static void
1106 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1107 {
1108 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1109 u32 pp_on, pp_off, pp_ctl;
1110 struct pps_registers regs;
1111
1112 intel_pps_get_registers(intel_dp, ®s);
1113
1114 pp_ctl = ilk_get_pp_control(intel_dp);
1115
1116
1117 if (!HAS_DDI(dev_priv))
1118 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1119
1120 pp_on = intel_de_read(dev_priv, regs.pp_on);
1121 pp_off = intel_de_read(dev_priv, regs.pp_off);
1122
1123
1124 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1125 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1126 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1127 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1128
1129 if (i915_mmio_reg_valid(regs.pp_div)) {
1130 u32 pp_div;
1131
1132 pp_div = intel_de_read(dev_priv, regs.pp_div);
1133
1134 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1135 } else {
1136 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1137 }
1138 }
1139
1140 static void
1141 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1142 const struct edp_power_seq *seq)
1143 {
1144 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1145
1146 drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1147 state_name,
1148 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1149 }
1150
1151 static void
1152 intel_pps_verify_state(struct intel_dp *intel_dp)
1153 {
1154 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1155 struct edp_power_seq hw;
1156 struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1157
1158 intel_pps_readout_hw_state(intel_dp, &hw);
1159
1160 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1161 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1162 drm_err(&i915->drm, "PPS state mismatch\n");
1163 intel_pps_dump_state(intel_dp, "sw", sw);
1164 intel_pps_dump_state(intel_dp, "hw", &hw);
1165 }
1166 }
1167
1168 static bool pps_delays_valid(struct edp_power_seq *delays)
1169 {
1170 return delays->t1_t3 || delays->t8 || delays->t9 ||
1171 delays->t10 || delays->t11_t12;
1172 }
1173
1174 static void pps_init_delays_bios(struct intel_dp *intel_dp,
1175 struct edp_power_seq *bios)
1176 {
1177 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1178
1179 lockdep_assert_held(&dev_priv->pps_mutex);
1180
1181 if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
1182 intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
1183
1184 *bios = intel_dp->pps.bios_pps_delays;
1185
1186 intel_pps_dump_state(intel_dp, "bios", bios);
1187 }
1188
1189 static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1190 struct edp_power_seq *vbt)
1191 {
1192 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1193 struct intel_connector *connector = intel_dp->attached_connector;
1194
1195 *vbt = connector->panel.vbt.edp.pps;
1196
1197 if (!pps_delays_valid(vbt))
1198 return;
1199
1200
1201
1202
1203
1204
1205 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
1206 vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
1207 drm_dbg_kms(&dev_priv->drm,
1208 "Increasing T12 panel delay as per the quirk to %d\n",
1209 vbt->t11_t12);
1210 }
1211
1212
1213
1214
1215
1216 vbt->t11_t12 += 100 * 10;
1217
1218 intel_pps_dump_state(intel_dp, "vbt", vbt);
1219 }
1220
1221 static void pps_init_delays_spec(struct intel_dp *intel_dp,
1222 struct edp_power_seq *spec)
1223 {
1224 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1225
1226 lockdep_assert_held(&dev_priv->pps_mutex);
1227
1228
1229
1230 spec->t1_t3 = 210 * 10;
1231 spec->t8 = 50 * 10;
1232 spec->t9 = 50 * 10;
1233 spec->t10 = 500 * 10;
1234
1235
1236
1237
1238 spec->t11_t12 = (510 + 100) * 10;
1239
1240 intel_pps_dump_state(intel_dp, "spec", spec);
1241 }
1242
1243 static void pps_init_delays(struct intel_dp *intel_dp)
1244 {
1245 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1246 struct edp_power_seq cur, vbt, spec,
1247 *final = &intel_dp->pps.pps_delays;
1248
1249 lockdep_assert_held(&dev_priv->pps_mutex);
1250
1251
1252 if (pps_delays_valid(final))
1253 return;
1254
1255 pps_init_delays_bios(intel_dp, &cur);
1256 pps_init_delays_vbt(intel_dp, &vbt);
1257 pps_init_delays_spec(intel_dp, &spec);
1258
1259
1260
1261 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
1262 spec.field : \
1263 max(cur.field, vbt.field))
1264 assign_final(t1_t3);
1265 assign_final(t8);
1266 assign_final(t9);
1267 assign_final(t10);
1268 assign_final(t11_t12);
1269 #undef assign_final
1270
1271 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
1272 intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1273 intel_dp->pps.backlight_on_delay = get_delay(t8);
1274 intel_dp->pps.backlight_off_delay = get_delay(t9);
1275 intel_dp->pps.panel_power_down_delay = get_delay(t10);
1276 intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1277 #undef get_delay
1278
1279 drm_dbg_kms(&dev_priv->drm,
1280 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1281 intel_dp->pps.panel_power_up_delay,
1282 intel_dp->pps.panel_power_down_delay,
1283 intel_dp->pps.panel_power_cycle_delay);
1284
1285 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1286 intel_dp->pps.backlight_on_delay,
1287 intel_dp->pps.backlight_off_delay);
1288
1289
1290
1291
1292
1293
1294
1295
1296 final->t8 = 1;
1297 final->t9 = 1;
1298
1299
1300
1301
1302
1303 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1304 }
1305
1306 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1307 {
1308 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1309 u32 pp_on, pp_off, port_sel = 0;
1310 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1311 struct pps_registers regs;
1312 enum port port = dp_to_dig_port(intel_dp)->base.port;
1313 const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1314
1315 lockdep_assert_held(&dev_priv->pps_mutex);
1316
1317 intel_pps_get_registers(intel_dp, ®s);
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 if (force_disable_vdd) {
1332 u32 pp = ilk_get_pp_control(intel_dp);
1333
1334 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1335 "Panel power already on\n");
1336
1337 if (pp & EDP_FORCE_VDD)
1338 drm_dbg_kms(&dev_priv->drm,
1339 "VDD already on, disabling first\n");
1340
1341 pp &= ~EDP_FORCE_VDD;
1342
1343 intel_de_write(dev_priv, regs.pp_ctrl, pp);
1344 }
1345
1346 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1347 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1348 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1349 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1350
1351
1352
1353 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1354 port_sel = PANEL_PORT_SELECT_VLV(port);
1355 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1356 switch (port) {
1357 case PORT_A:
1358 port_sel = PANEL_PORT_SELECT_DPA;
1359 break;
1360 case PORT_C:
1361 port_sel = PANEL_PORT_SELECT_DPC;
1362 break;
1363 case PORT_D:
1364 port_sel = PANEL_PORT_SELECT_DPD;
1365 break;
1366 default:
1367 MISSING_CASE(port);
1368 break;
1369 }
1370 }
1371
1372 pp_on |= port_sel;
1373
1374 intel_de_write(dev_priv, regs.pp_on, pp_on);
1375 intel_de_write(dev_priv, regs.pp_off, pp_off);
1376
1377
1378
1379
1380 if (i915_mmio_reg_valid(regs.pp_div)) {
1381 intel_de_write(dev_priv, regs.pp_div,
1382 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1383 } else {
1384 u32 pp_ctl;
1385
1386 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1387 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1388 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1389 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1390 }
1391
1392 drm_dbg_kms(&dev_priv->drm,
1393 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1394 intel_de_read(dev_priv, regs.pp_on),
1395 intel_de_read(dev_priv, regs.pp_off),
1396 i915_mmio_reg_valid(regs.pp_div) ?
1397 intel_de_read(dev_priv, regs.pp_div) :
1398 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1399 }
1400
1401 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1402 {
1403 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1404 intel_wakeref_t wakeref;
1405
1406 if (!intel_dp_is_edp(intel_dp))
1407 return;
1408
1409 with_intel_pps_lock(intel_dp, wakeref) {
1410
1411
1412
1413
1414 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1415 vlv_initial_power_sequencer_setup(intel_dp);
1416
1417 pps_init_delays(intel_dp);
1418 pps_init_registers(intel_dp, false);
1419 pps_vdd_init(intel_dp);
1420
1421 if (edp_have_panel_vdd(intel_dp))
1422 edp_panel_vdd_schedule_off(intel_dp);
1423 }
1424 }
1425
1426 void intel_pps_init(struct intel_dp *intel_dp)
1427 {
1428 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1429 intel_wakeref_t wakeref;
1430
1431 intel_dp->pps.initializing = true;
1432 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1433
1434 pps_init_timestamps(intel_dp);
1435
1436 with_intel_pps_lock(intel_dp, wakeref) {
1437 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1438 vlv_initial_power_sequencer_setup(intel_dp);
1439
1440 pps_init_delays(intel_dp);
1441 pps_init_registers(intel_dp, false);
1442 pps_vdd_init(intel_dp);
1443 }
1444 }
1445
1446 void intel_pps_init_late(struct intel_dp *intel_dp)
1447 {
1448 intel_wakeref_t wakeref;
1449
1450 with_intel_pps_lock(intel_dp, wakeref) {
1451
1452 memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
1453 pps_init_delays(intel_dp);
1454 pps_init_registers(intel_dp, false);
1455
1456 intel_dp->pps.initializing = false;
1457
1458 if (edp_have_panel_vdd(intel_dp))
1459 edp_panel_vdd_schedule_off(intel_dp);
1460 }
1461 }
1462
1463 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1464 {
1465 int pps_num;
1466 int pps_idx;
1467
1468 if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1469 return;
1470
1471
1472
1473
1474 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1475 pps_num = 2;
1476 else
1477 pps_num = 1;
1478
1479 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1480 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1481
1482 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1483 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1484 }
1485 }
1486
1487 void intel_pps_setup(struct drm_i915_private *i915)
1488 {
1489 if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1490 i915->pps_mmio_base = PCH_PPS_BASE;
1491 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1492 i915->pps_mmio_base = VLV_PPS_BASE;
1493 else
1494 i915->pps_mmio_base = PPS_BASE;
1495 }
1496
1497 void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1498 {
1499 i915_reg_t pp_reg;
1500 u32 val;
1501 enum pipe panel_pipe = INVALID_PIPE;
1502 bool locked = true;
1503
1504 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1505 return;
1506
1507 if (HAS_PCH_SPLIT(dev_priv)) {
1508 u32 port_sel;
1509
1510 pp_reg = PP_CONTROL(0);
1511 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1512
1513 switch (port_sel) {
1514 case PANEL_PORT_SELECT_LVDS:
1515 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1516 break;
1517 case PANEL_PORT_SELECT_DPA:
1518 g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1519 break;
1520 case PANEL_PORT_SELECT_DPC:
1521 g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1522 break;
1523 case PANEL_PORT_SELECT_DPD:
1524 g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1525 break;
1526 default:
1527 MISSING_CASE(port_sel);
1528 break;
1529 }
1530 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1531
1532 pp_reg = PP_CONTROL(pipe);
1533 panel_pipe = pipe;
1534 } else {
1535 u32 port_sel;
1536
1537 pp_reg = PP_CONTROL(0);
1538 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1539
1540 drm_WARN_ON(&dev_priv->drm,
1541 port_sel != PANEL_PORT_SELECT_LVDS);
1542 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1543 }
1544
1545 val = intel_de_read(dev_priv, pp_reg);
1546 if (!(val & PANEL_POWER_ON) ||
1547 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1548 locked = false;
1549
1550 I915_STATE_WARN(panel_pipe == pipe && locked,
1551 "panel assertion failure, pipe %c regs locked\n",
1552 pipe_name(pipe));
1553 }