0001
0002
0003
0004
0005
0006 #include "i915_drv.h"
0007 #include "i915_reg.h"
0008 #include "intel_display.h"
0009 #include "intel_display_power_map.h"
0010 #include "intel_display_types.h"
0011 #include "intel_dp_mst.h"
0012 #include "intel_tc.h"
0013 #include "intel_tc_phy_regs.h"
0014
0015 static const char *tc_port_mode_name(enum tc_port_mode mode)
0016 {
0017 static const char * const names[] = {
0018 [TC_PORT_DISCONNECTED] = "disconnected",
0019 [TC_PORT_TBT_ALT] = "tbt-alt",
0020 [TC_PORT_DP_ALT] = "dp-alt",
0021 [TC_PORT_LEGACY] = "legacy",
0022 };
0023
0024 if (WARN_ON(mode >= ARRAY_SIZE(names)))
0025 mode = TC_PORT_DISCONNECTED;
0026
0027 return names[mode];
0028 }
0029
0030 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
0031 enum tc_port_mode mode)
0032 {
0033 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0034 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
0035
0036 return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
0037 }
0038
0039 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
0040 {
0041 return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
0042 }
0043
0044 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
0045 {
0046 return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
0047 }
0048
0049 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
0050 {
0051 return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
0052 }
0053
0054 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
0055 {
0056 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0057
0058 return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
0059 IS_ALDERLAKE_P(i915);
0060 }
0061
0062 static enum intel_display_power_domain
0063 tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
0064 {
0065 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0066
0067 if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
0068 return POWER_DOMAIN_TC_COLD_OFF;
0069
0070 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
0071 }
0072
0073 static intel_wakeref_t
0074 tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
0075 enum intel_display_power_domain *domain)
0076 {
0077 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0078
0079 *domain = tc_cold_get_power_domain(dig_port, mode);
0080
0081 return intel_display_power_get(i915, *domain);
0082 }
0083
0084 static intel_wakeref_t
0085 tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
0086 {
0087 return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
0088 }
0089
0090 static void
0091 tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
0092 intel_wakeref_t wakeref)
0093 {
0094 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0095
0096
0097
0098
0099
0100
0101 if (wakeref == 0)
0102 return;
0103
0104 intel_display_power_put(i915, domain, wakeref);
0105 }
0106
0107 static void
0108 assert_tc_cold_blocked(struct intel_digital_port *dig_port)
0109 {
0110 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0111 bool enabled;
0112
0113 enabled = intel_display_power_is_enabled(i915,
0114 tc_cold_get_power_domain(dig_port,
0115 dig_port->tc_mode));
0116 drm_WARN_ON(&i915->drm, !enabled);
0117 }
0118
0119 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
0120 {
0121 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0122 struct intel_uncore *uncore = &i915->uncore;
0123 u32 lane_mask;
0124
0125 lane_mask = intel_uncore_read(uncore,
0126 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
0127
0128 drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
0129 assert_tc_cold_blocked(dig_port);
0130
0131 lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
0132 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
0133 }
0134
0135 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
0136 {
0137 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0138 struct intel_uncore *uncore = &i915->uncore;
0139 u32 pin_mask;
0140
0141 pin_mask = intel_uncore_read(uncore,
0142 PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
0143
0144 drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
0145 assert_tc_cold_blocked(dig_port);
0146
0147 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
0148 DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
0149 }
0150
0151 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
0152 {
0153 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0154 intel_wakeref_t wakeref;
0155 u32 lane_mask;
0156
0157 if (dig_port->tc_mode != TC_PORT_DP_ALT)
0158 return 4;
0159
0160 assert_tc_cold_blocked(dig_port);
0161
0162 lane_mask = 0;
0163 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
0164 lane_mask = intel_tc_port_get_lane_mask(dig_port);
0165
0166 switch (lane_mask) {
0167 default:
0168 MISSING_CASE(lane_mask);
0169 fallthrough;
0170 case 0x1:
0171 case 0x2:
0172 case 0x4:
0173 case 0x8:
0174 return 1;
0175 case 0x3:
0176 case 0xc:
0177 return 2;
0178 case 0xf:
0179 return 4;
0180 }
0181 }
0182
0183 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
0184 int required_lanes)
0185 {
0186 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0187 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
0188 struct intel_uncore *uncore = &i915->uncore;
0189 u32 val;
0190
0191 drm_WARN_ON(&i915->drm,
0192 lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
0193
0194 assert_tc_cold_blocked(dig_port);
0195
0196 val = intel_uncore_read(uncore,
0197 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
0198 val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
0199
0200 switch (required_lanes) {
0201 case 1:
0202 val |= lane_reversal ?
0203 DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
0204 DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
0205 break;
0206 case 2:
0207 val |= lane_reversal ?
0208 DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
0209 DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
0210 break;
0211 case 4:
0212 val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
0213 break;
0214 default:
0215 MISSING_CASE(required_lanes);
0216 }
0217
0218 intel_uncore_write(uncore,
0219 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
0220 }
0221
0222 static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
0223 u32 live_status_mask)
0224 {
0225 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0226 u32 valid_hpd_mask;
0227
0228 if (dig_port->tc_legacy_port)
0229 valid_hpd_mask = BIT(TC_PORT_LEGACY);
0230 else
0231 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
0232 BIT(TC_PORT_TBT_ALT);
0233
0234 if (!(live_status_mask & ~valid_hpd_mask))
0235 return;
0236
0237
0238 drm_dbg_kms(&i915->drm,
0239 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
0240 dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
0241
0242 dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
0243 }
0244
0245 static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
0246 {
0247 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0248 struct intel_uncore *uncore = &i915->uncore;
0249 u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
0250 u32 mask = 0;
0251 u32 val;
0252
0253 val = intel_uncore_read(uncore,
0254 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
0255
0256 if (val == 0xffffffff) {
0257 drm_dbg_kms(&i915->drm,
0258 "Port %s: PHY in TCCOLD, nothing connected\n",
0259 dig_port->tc_port_name);
0260 return mask;
0261 }
0262
0263 if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
0264 mask |= BIT(TC_PORT_TBT_ALT);
0265 if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
0266 mask |= BIT(TC_PORT_DP_ALT);
0267
0268 if (intel_uncore_read(uncore, SDEISR) & isr_bit)
0269 mask |= BIT(TC_PORT_LEGACY);
0270
0271
0272 if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
0273 tc_port_fixup_legacy_flag(dig_port, mask);
0274
0275 return mask;
0276 }
0277
0278 static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
0279 {
0280 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0281 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
0282 u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
0283 struct intel_uncore *uncore = &i915->uncore;
0284 u32 val, mask = 0;
0285
0286
0287
0288
0289
0290
0291 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
0292 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
0293 mask |= BIT(TC_PORT_DP_ALT);
0294 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
0295 mask |= BIT(TC_PORT_TBT_ALT);
0296
0297 if (intel_uncore_read(uncore, SDEISR) & isr_bit)
0298 mask |= BIT(TC_PORT_LEGACY);
0299
0300
0301 if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
0302 tc_port_fixup_legacy_flag(dig_port, mask);
0303
0304 return mask;
0305 }
0306
0307 static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
0308 {
0309 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0310
0311 if (IS_ALDERLAKE_P(i915))
0312 return adl_tc_port_live_status_mask(dig_port);
0313
0314 return icl_tc_port_live_status_mask(dig_port);
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
0326 {
0327 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0328 struct intel_uncore *uncore = &i915->uncore;
0329 u32 val;
0330
0331 val = intel_uncore_read(uncore,
0332 PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
0333 if (val == 0xffffffff) {
0334 drm_dbg_kms(&i915->drm,
0335 "Port %s: PHY in TCCOLD, assuming not complete\n",
0336 dig_port->tc_port_name);
0337 return false;
0338 }
0339
0340 return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
0341 }
0342
0343
0344
0345
0346
0347
0348
0349
0350 static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
0351 {
0352 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0353 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
0354 struct intel_uncore *uncore = &i915->uncore;
0355 u32 val;
0356
0357 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
0358 if (val == 0xffffffff) {
0359 drm_dbg_kms(&i915->drm,
0360 "Port %s: PHY in TCCOLD, assuming not complete\n",
0361 dig_port->tc_port_name);
0362 return false;
0363 }
0364
0365 return val & TCSS_DDI_STATUS_READY;
0366 }
0367
0368 static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
0369 {
0370 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0371
0372 if (IS_ALDERLAKE_P(i915))
0373 return adl_tc_phy_status_complete(dig_port);
0374
0375 return icl_tc_phy_status_complete(dig_port);
0376 }
0377
0378 static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
0379 bool take)
0380 {
0381 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0382 struct intel_uncore *uncore = &i915->uncore;
0383 u32 val;
0384
0385 val = intel_uncore_read(uncore,
0386 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
0387 if (val == 0xffffffff) {
0388 drm_dbg_kms(&i915->drm,
0389 "Port %s: PHY in TCCOLD, can't %s ownership\n",
0390 dig_port->tc_port_name, take ? "take" : "release");
0391
0392 return false;
0393 }
0394
0395 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
0396 if (take)
0397 val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
0398
0399 intel_uncore_write(uncore,
0400 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
0401
0402 return true;
0403 }
0404
0405 static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
0406 bool take)
0407 {
0408 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0409 struct intel_uncore *uncore = &i915->uncore;
0410 enum port port = dig_port->base.port;
0411 u32 val;
0412
0413 val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
0414 if (take)
0415 val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
0416 else
0417 val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
0418 intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
0419
0420 return true;
0421 }
0422
0423 static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
0424 {
0425 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0426
0427 if (IS_ALDERLAKE_P(i915))
0428 return adl_tc_phy_take_ownership(dig_port, take);
0429
0430 return icl_tc_phy_take_ownership(dig_port, take);
0431 }
0432
0433 static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
0434 {
0435 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0436 struct intel_uncore *uncore = &i915->uncore;
0437 u32 val;
0438
0439 val = intel_uncore_read(uncore,
0440 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
0441 if (val == 0xffffffff) {
0442 drm_dbg_kms(&i915->drm,
0443 "Port %s: PHY in TCCOLD, assume safe mode\n",
0444 dig_port->tc_port_name);
0445 return true;
0446 }
0447
0448 return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
0449 }
0450
0451 static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
0452 {
0453 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0454 struct intel_uncore *uncore = &i915->uncore;
0455 enum port port = dig_port->base.port;
0456 u32 val;
0457
0458 val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
0459 return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
0460 }
0461
0462 static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
0463 {
0464 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0465
0466 if (IS_ALDERLAKE_P(i915))
0467 return adl_tc_phy_is_owned(dig_port);
0468
0469 return icl_tc_phy_is_owned(dig_port);
0470 }
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483 static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
0484 int required_lanes)
0485 {
0486 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0487 u32 live_status_mask;
0488 int max_lanes;
0489
0490 if (!tc_phy_status_complete(dig_port)) {
0491 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
0492 dig_port->tc_port_name);
0493 goto out_set_tbt_alt_mode;
0494 }
0495
0496 live_status_mask = tc_port_live_status_mask(dig_port);
0497 if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) &&
0498 !dig_port->tc_legacy_port) {
0499 drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
0500 dig_port->tc_port_name, live_status_mask);
0501 goto out_set_tbt_alt_mode;
0502 }
0503
0504 if (!tc_phy_take_ownership(dig_port, true) &&
0505 !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
0506 goto out_set_tbt_alt_mode;
0507
0508 max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
0509 if (dig_port->tc_legacy_port) {
0510 drm_WARN_ON(&i915->drm, max_lanes != 4);
0511 dig_port->tc_mode = TC_PORT_LEGACY;
0512
0513 return;
0514 }
0515
0516
0517
0518
0519
0520 if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
0521 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
0522 dig_port->tc_port_name);
0523 goto out_release_phy;
0524 }
0525
0526 if (max_lanes < required_lanes) {
0527 drm_dbg_kms(&i915->drm,
0528 "Port %s: PHY max lanes %d < required lanes %d\n",
0529 dig_port->tc_port_name,
0530 max_lanes, required_lanes);
0531 goto out_release_phy;
0532 }
0533
0534 dig_port->tc_mode = TC_PORT_DP_ALT;
0535
0536 return;
0537
0538 out_release_phy:
0539 tc_phy_take_ownership(dig_port, false);
0540 out_set_tbt_alt_mode:
0541 dig_port->tc_mode = TC_PORT_TBT_ALT;
0542 }
0543
0544
0545
0546
0547
0548 static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
0549 {
0550 switch (dig_port->tc_mode) {
0551 case TC_PORT_LEGACY:
0552 case TC_PORT_DP_ALT:
0553 tc_phy_take_ownership(dig_port, false);
0554 fallthrough;
0555 case TC_PORT_TBT_ALT:
0556 dig_port->tc_mode = TC_PORT_DISCONNECTED;
0557 fallthrough;
0558 case TC_PORT_DISCONNECTED:
0559 break;
0560 default:
0561 MISSING_CASE(dig_port->tc_mode);
0562 }
0563 }
0564
0565 static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
0566 {
0567 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0568
0569 if (!tc_phy_status_complete(dig_port)) {
0570 drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
0571 dig_port->tc_port_name);
0572 return dig_port->tc_mode == TC_PORT_TBT_ALT;
0573 }
0574
0575
0576 if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
0577 return true;
0578
0579 if (!tc_phy_is_owned(dig_port)) {
0580 drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
0581 dig_port->tc_port_name);
0582
0583 return false;
0584 }
0585
0586 return dig_port->tc_mode == TC_PORT_DP_ALT ||
0587 dig_port->tc_mode == TC_PORT_LEGACY;
0588 }
0589
0590 static enum tc_port_mode
0591 intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
0592 {
0593 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0594 u32 live_status_mask = tc_port_live_status_mask(dig_port);
0595 enum tc_port_mode mode;
0596
0597 if (!tc_phy_is_owned(dig_port) ||
0598 drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
0599 return TC_PORT_TBT_ALT;
0600
0601 mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
0602 if (live_status_mask) {
0603 enum tc_port_mode live_mode = fls(live_status_mask) - 1;
0604
0605 if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
0606 mode = live_mode;
0607 }
0608
0609 return mode;
0610 }
0611
0612 static enum tc_port_mode
0613 intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
0614 {
0615 u32 live_status_mask = tc_port_live_status_mask(dig_port);
0616
0617 if (live_status_mask)
0618 return fls(live_status_mask) - 1;
0619
0620 return TC_PORT_TBT_ALT;
0621 }
0622
0623 static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
0624 int required_lanes, bool force_disconnect)
0625 {
0626 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0627 enum tc_port_mode old_tc_mode = dig_port->tc_mode;
0628
0629 intel_display_power_flush_work(i915);
0630 if (!intel_tc_cold_requires_aux_pw(dig_port)) {
0631 enum intel_display_power_domain aux_domain;
0632 bool aux_powered;
0633
0634 aux_domain = intel_aux_power_domain(dig_port);
0635 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
0636 drm_WARN_ON(&i915->drm, aux_powered);
0637 }
0638
0639 icl_tc_phy_disconnect(dig_port);
0640 if (!force_disconnect)
0641 icl_tc_phy_connect(dig_port, required_lanes);
0642
0643 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
0644 dig_port->tc_port_name,
0645 tc_port_mode_name(old_tc_mode),
0646 tc_port_mode_name(dig_port->tc_mode));
0647 }
0648
0649 static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
0650 {
0651 return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
0652 }
0653
0654 static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
0655 int required_lanes, bool force_disconnect)
0656 {
0657 enum intel_display_power_domain domain;
0658 intel_wakeref_t wref;
0659 bool needs_reset = force_disconnect;
0660
0661 if (!needs_reset) {
0662
0663 wref = tc_cold_block(dig_port, &domain);
0664 needs_reset = intel_tc_port_needs_reset(dig_port);
0665 tc_cold_unblock(dig_port, domain, wref);
0666 }
0667
0668 if (!needs_reset)
0669 return;
0670
0671
0672 wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);
0673
0674 intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);
0675
0676
0677 tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
0678 fetch_and_zero(&dig_port->tc_lock_wakeref));
0679 if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
0680 dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
0681 &dig_port->tc_lock_power_domain);
0682
0683 tc_cold_unblock(dig_port, domain, wref);
0684 }
0685
0686 static void
0687 intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
0688 int refcount)
0689 {
0690 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0691
0692 drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
0693 dig_port->tc_link_refcount = refcount;
0694 }
0695
0696 void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
0697 {
0698 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0699 struct intel_encoder *encoder = &dig_port->base;
0700 intel_wakeref_t tc_cold_wref;
0701 enum intel_display_power_domain domain;
0702 int active_links = 0;
0703
0704 mutex_lock(&dig_port->tc_lock);
0705
0706 if (dig_port->dp.is_mst)
0707 active_links = intel_dp_mst_encoder_active_links(dig_port);
0708 else if (encoder->base.crtc)
0709 active_links = to_intel_crtc(encoder->base.crtc)->active;
0710
0711 drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
0712 drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
0713
0714 tc_cold_wref = tc_cold_block(dig_port, &domain);
0715
0716 dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
0717 if (active_links) {
0718 if (!icl_tc_phy_is_connected(dig_port))
0719 drm_dbg_kms(&i915->drm,
0720 "Port %s: PHY disconnected with %d active link(s)\n",
0721 dig_port->tc_port_name, active_links);
0722 intel_tc_port_link_init_refcount(dig_port, active_links);
0723
0724 dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
0725 &dig_port->tc_lock_power_domain);
0726 } else {
0727
0728
0729
0730
0731
0732
0733 if (dig_port->tc_mode != TC_PORT_TBT_ALT)
0734 drm_dbg_kms(&i915->drm,
0735 "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
0736 dig_port->tc_port_name,
0737 tc_port_mode_name(dig_port->tc_mode));
0738 icl_tc_phy_disconnect(dig_port);
0739 }
0740
0741 tc_cold_unblock(dig_port, domain, tc_cold_wref);
0742
0743 drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
0744 dig_port->tc_port_name,
0745 tc_port_mode_name(dig_port->tc_mode));
0746
0747 mutex_unlock(&dig_port->tc_lock);
0748 }
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760 bool intel_tc_port_connected(struct intel_encoder *encoder)
0761 {
0762 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
0763 bool is_connected;
0764
0765 intel_tc_port_lock(dig_port);
0766
0767 is_connected = tc_port_live_status_mask(dig_port) &
0768 BIT(dig_port->tc_mode);
0769
0770 intel_tc_port_unlock(dig_port);
0771
0772 return is_connected;
0773 }
0774
0775 static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
0776 int required_lanes)
0777 {
0778 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0779
0780 mutex_lock(&dig_port->tc_lock);
0781
0782 cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
0783
0784 if (!dig_port->tc_link_refcount)
0785 intel_tc_port_update_mode(dig_port, required_lanes,
0786 false);
0787
0788 drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
0789 drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
0790 !tc_phy_is_owned(dig_port));
0791 }
0792
0793 void intel_tc_port_lock(struct intel_digital_port *dig_port)
0794 {
0795 __intel_tc_port_lock(dig_port, 1);
0796 }
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
0807 {
0808 struct intel_digital_port *dig_port =
0809 container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
0810
0811 mutex_lock(&dig_port->tc_lock);
0812
0813 if (!dig_port->tc_link_refcount)
0814 intel_tc_port_update_mode(dig_port, 1, true);
0815
0816 mutex_unlock(&dig_port->tc_lock);
0817 }
0818
0819
0820
0821
0822
0823
0824
0825 void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
0826 {
0827 flush_delayed_work(&dig_port->tc_disconnect_phy_work);
0828 }
0829
0830 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
0831 {
0832 if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
0833 queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
0834 msecs_to_jiffies(1000));
0835
0836 mutex_unlock(&dig_port->tc_lock);
0837 }
0838
0839 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
0840 {
0841 return mutex_is_locked(&dig_port->tc_lock) ||
0842 dig_port->tc_link_refcount;
0843 }
0844
0845 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
0846 int required_lanes)
0847 {
0848 __intel_tc_port_lock(dig_port, required_lanes);
0849 dig_port->tc_link_refcount++;
0850 intel_tc_port_unlock(dig_port);
0851 }
0852
0853 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
0854 {
0855 intel_tc_port_lock(dig_port);
0856 --dig_port->tc_link_refcount;
0857 intel_tc_port_unlock(dig_port);
0858
0859
0860
0861
0862
0863
0864
0865 intel_tc_port_flush_work(dig_port);
0866 }
0867
0868 static bool
0869 tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
0870 {
0871 enum intel_display_power_domain domain;
0872 intel_wakeref_t wakeref;
0873 u32 val;
0874
0875 if (!INTEL_INFO(i915)->display.has_modular_fia)
0876 return false;
0877
0878 mutex_lock(&dig_port->tc_lock);
0879 wakeref = tc_cold_block(dig_port, &domain);
0880 val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
0881 tc_cold_unblock(dig_port, domain, wakeref);
0882 mutex_unlock(&dig_port->tc_lock);
0883
0884 drm_WARN_ON(&i915->drm, val == 0xffffffff);
0885
0886 return val & MODULAR_FIA_MASK;
0887 }
0888
0889 static void
0890 tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
0891 {
0892 enum port port = dig_port->base.port;
0893 enum tc_port tc_port = intel_port_to_tc(i915, port);
0894
0895
0896
0897
0898
0899 if (tc_has_modular_fia(i915, dig_port)) {
0900 dig_port->tc_phy_fia = tc_port / 2;
0901 dig_port->tc_phy_fia_idx = tc_port % 2;
0902 } else {
0903 dig_port->tc_phy_fia = FIA1;
0904 dig_port->tc_phy_fia_idx = tc_port;
0905 }
0906 }
0907
0908 void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
0909 {
0910 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0911 enum port port = dig_port->base.port;
0912 enum tc_port tc_port = intel_port_to_tc(i915, port);
0913
0914 if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
0915 return;
0916
0917 snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
0918 "%c/TC#%d", port_name(port), tc_port + 1);
0919
0920 mutex_init(&dig_port->tc_lock);
0921 INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
0922 dig_port->tc_legacy_port = is_legacy;
0923 dig_port->tc_mode = TC_PORT_DISCONNECTED;
0924 dig_port->tc_link_refcount = 0;
0925 tc_port_load_fia_params(i915, dig_port);
0926 }