0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/kernel.h>
0025
0026 #include "i915_drv.h"
0027 #include "i915_irq.h"
0028 #include "intel_display_types.h"
0029 #include "intel_hotplug.h"
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
0088 enum port port)
0089 {
0090 return HPD_PORT_A + port - PORT_A;
0091 }
0092
0093 #define HPD_STORM_DETECT_PERIOD 1000
0094 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
0095 #define HPD_RETRY_DELAY 1000
0096
0097 static enum hpd_pin
0098 intel_connector_hpd_pin(struct intel_connector *connector)
0099 {
0100 struct intel_encoder *encoder = intel_attached_encoder(connector);
0101
0102
0103
0104
0105
0106
0107
0108 return encoder ? encoder->hpd_pin : HPD_NONE;
0109 }
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
0141 enum hpd_pin pin, bool long_hpd)
0142 {
0143 struct i915_hotplug *hpd = &dev_priv->hotplug;
0144 unsigned long start = hpd->stats[pin].last_jiffies;
0145 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
0146 const int increment = long_hpd ? 10 : 1;
0147 const int threshold = hpd->hpd_storm_threshold;
0148 bool storm = false;
0149
0150 if (!threshold ||
0151 (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
0152 return false;
0153
0154 if (!time_in_range(jiffies, start, end)) {
0155 hpd->stats[pin].last_jiffies = jiffies;
0156 hpd->stats[pin].count = 0;
0157 }
0158
0159 hpd->stats[pin].count += increment;
0160 if (hpd->stats[pin].count > threshold) {
0161 hpd->stats[pin].state = HPD_MARK_DISABLED;
0162 drm_dbg_kms(&dev_priv->drm,
0163 "HPD interrupt storm detected on PIN %d\n", pin);
0164 storm = true;
0165 } else {
0166 drm_dbg_kms(&dev_priv->drm,
0167 "Received HPD interrupt on PIN %d - cnt: %d\n",
0168 pin,
0169 hpd->stats[pin].count);
0170 }
0171
0172 return storm;
0173 }
0174
0175 static void
0176 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
0177 {
0178 struct drm_device *dev = &dev_priv->drm;
0179 struct drm_connector_list_iter conn_iter;
0180 struct intel_connector *connector;
0181 bool hpd_disabled = false;
0182
0183 lockdep_assert_held(&dev_priv->irq_lock);
0184
0185 drm_connector_list_iter_begin(dev, &conn_iter);
0186 for_each_intel_connector_iter(connector, &conn_iter) {
0187 enum hpd_pin pin;
0188
0189 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
0190 continue;
0191
0192 pin = intel_connector_hpd_pin(connector);
0193 if (pin == HPD_NONE ||
0194 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
0195 continue;
0196
0197 drm_info(&dev_priv->drm,
0198 "HPD interrupt storm detected on connector %s: "
0199 "switching from hotplug detection to polling\n",
0200 connector->base.name);
0201
0202 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
0203 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
0204 DRM_CONNECTOR_POLL_DISCONNECT;
0205 hpd_disabled = true;
0206 }
0207 drm_connector_list_iter_end(&conn_iter);
0208
0209
0210 if (hpd_disabled) {
0211 drm_kms_helper_poll_enable(dev);
0212 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
0213 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
0214 }
0215 }
0216
0217 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
0218 {
0219 struct drm_i915_private *dev_priv =
0220 container_of(work, typeof(*dev_priv),
0221 hotplug.reenable_work.work);
0222 struct drm_device *dev = &dev_priv->drm;
0223 struct drm_connector_list_iter conn_iter;
0224 struct intel_connector *connector;
0225 intel_wakeref_t wakeref;
0226 enum hpd_pin pin;
0227
0228 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
0229
0230 spin_lock_irq(&dev_priv->irq_lock);
0231
0232 drm_connector_list_iter_begin(dev, &conn_iter);
0233 for_each_intel_connector_iter(connector, &conn_iter) {
0234 pin = intel_connector_hpd_pin(connector);
0235 if (pin == HPD_NONE ||
0236 dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
0237 continue;
0238
0239 if (connector->base.polled != connector->polled)
0240 drm_dbg(&dev_priv->drm,
0241 "Reenabling HPD on connector %s\n",
0242 connector->base.name);
0243 connector->base.polled = connector->polled;
0244 }
0245 drm_connector_list_iter_end(&conn_iter);
0246
0247 for_each_hpd_pin(pin) {
0248 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
0249 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
0250 }
0251
0252 intel_hpd_irq_setup(dev_priv);
0253
0254 spin_unlock_irq(&dev_priv->irq_lock);
0255
0256 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
0257 }
0258
0259 enum intel_hotplug_state
0260 intel_encoder_hotplug(struct intel_encoder *encoder,
0261 struct intel_connector *connector)
0262 {
0263 struct drm_device *dev = connector->base.dev;
0264 enum drm_connector_status old_status;
0265 u64 old_epoch_counter;
0266 bool ret = false;
0267
0268 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
0269 old_status = connector->base.status;
0270 old_epoch_counter = connector->base.epoch_counter;
0271
0272 connector->base.status =
0273 drm_helper_probe_detect(&connector->base, NULL, false);
0274
0275 if (old_epoch_counter != connector->base.epoch_counter)
0276 ret = true;
0277
0278 if (ret) {
0279 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
0280 connector->base.base.id,
0281 connector->base.name,
0282 drm_get_connector_status_name(old_status),
0283 drm_get_connector_status_name(connector->base.status),
0284 old_epoch_counter,
0285 connector->base.epoch_counter);
0286 return INTEL_HOTPLUG_CHANGED;
0287 }
0288 return INTEL_HOTPLUG_UNCHANGED;
0289 }
0290
0291 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
0292 {
0293 return intel_encoder_is_dig_port(encoder) &&
0294 enc_to_dig_port(encoder)->hpd_pulse != NULL;
0295 }
0296
0297 static void i915_digport_work_func(struct work_struct *work)
0298 {
0299 struct drm_i915_private *dev_priv =
0300 container_of(work, struct drm_i915_private, hotplug.dig_port_work);
0301 u32 long_port_mask, short_port_mask;
0302 struct intel_encoder *encoder;
0303 u32 old_bits = 0;
0304
0305 spin_lock_irq(&dev_priv->irq_lock);
0306 long_port_mask = dev_priv->hotplug.long_port_mask;
0307 dev_priv->hotplug.long_port_mask = 0;
0308 short_port_mask = dev_priv->hotplug.short_port_mask;
0309 dev_priv->hotplug.short_port_mask = 0;
0310 spin_unlock_irq(&dev_priv->irq_lock);
0311
0312 for_each_intel_encoder(&dev_priv->drm, encoder) {
0313 struct intel_digital_port *dig_port;
0314 enum port port = encoder->port;
0315 bool long_hpd, short_hpd;
0316 enum irqreturn ret;
0317
0318 if (!intel_encoder_has_hpd_pulse(encoder))
0319 continue;
0320
0321 long_hpd = long_port_mask & BIT(port);
0322 short_hpd = short_port_mask & BIT(port);
0323
0324 if (!long_hpd && !short_hpd)
0325 continue;
0326
0327 dig_port = enc_to_dig_port(encoder);
0328
0329 ret = dig_port->hpd_pulse(dig_port, long_hpd);
0330 if (ret == IRQ_NONE) {
0331
0332 old_bits |= BIT(encoder->hpd_pin);
0333 }
0334 }
0335
0336 if (old_bits) {
0337 spin_lock_irq(&dev_priv->irq_lock);
0338 dev_priv->hotplug.event_bits |= old_bits;
0339 spin_unlock_irq(&dev_priv->irq_lock);
0340 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
0341 }
0342 }
0343
0344
0345
0346
0347
0348
0349
0350
0351 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
0352 {
0353 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0354
0355 spin_lock_irq(&i915->irq_lock);
0356 i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
0357 spin_unlock_irq(&i915->irq_lock);
0358
0359 queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
0360 }
0361
0362
0363
0364
0365 static void i915_hotplug_work_func(struct work_struct *work)
0366 {
0367 struct drm_i915_private *dev_priv =
0368 container_of(work, struct drm_i915_private,
0369 hotplug.hotplug_work.work);
0370 struct drm_device *dev = &dev_priv->drm;
0371 struct drm_connector_list_iter conn_iter;
0372 struct intel_connector *connector;
0373 u32 changed = 0, retry = 0;
0374 u32 hpd_event_bits;
0375 u32 hpd_retry_bits;
0376
0377 mutex_lock(&dev->mode_config.mutex);
0378 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
0379
0380 spin_lock_irq(&dev_priv->irq_lock);
0381
0382 hpd_event_bits = dev_priv->hotplug.event_bits;
0383 dev_priv->hotplug.event_bits = 0;
0384 hpd_retry_bits = dev_priv->hotplug.retry_bits;
0385 dev_priv->hotplug.retry_bits = 0;
0386
0387
0388 intel_hpd_irq_storm_switch_to_polling(dev_priv);
0389
0390 spin_unlock_irq(&dev_priv->irq_lock);
0391
0392 drm_connector_list_iter_begin(dev, &conn_iter);
0393 for_each_intel_connector_iter(connector, &conn_iter) {
0394 enum hpd_pin pin;
0395 u32 hpd_bit;
0396
0397 pin = intel_connector_hpd_pin(connector);
0398 if (pin == HPD_NONE)
0399 continue;
0400
0401 hpd_bit = BIT(pin);
0402 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
0403 struct intel_encoder *encoder =
0404 intel_attached_encoder(connector);
0405
0406 if (hpd_event_bits & hpd_bit)
0407 connector->hotplug_retries = 0;
0408 else
0409 connector->hotplug_retries++;
0410
0411 drm_dbg_kms(&dev_priv->drm,
0412 "Connector %s (pin %i) received hotplug event. (retry %d)\n",
0413 connector->base.name, pin,
0414 connector->hotplug_retries);
0415
0416 switch (encoder->hotplug(encoder, connector)) {
0417 case INTEL_HOTPLUG_UNCHANGED:
0418 break;
0419 case INTEL_HOTPLUG_CHANGED:
0420 changed |= hpd_bit;
0421 break;
0422 case INTEL_HOTPLUG_RETRY:
0423 retry |= hpd_bit;
0424 break;
0425 }
0426 }
0427 }
0428 drm_connector_list_iter_end(&conn_iter);
0429 mutex_unlock(&dev->mode_config.mutex);
0430
0431 if (changed)
0432 drm_kms_helper_hotplug_event(dev);
0433
0434
0435 retry &= ~changed;
0436 if (retry) {
0437 spin_lock_irq(&dev_priv->irq_lock);
0438 dev_priv->hotplug.retry_bits |= retry;
0439 spin_unlock_irq(&dev_priv->irq_lock);
0440
0441 mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
0442 msecs_to_jiffies(HPD_RETRY_DELAY));
0443 }
0444 }
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
0464 u32 pin_mask, u32 long_mask)
0465 {
0466 struct intel_encoder *encoder;
0467 bool storm_detected = false;
0468 bool queue_dig = false, queue_hp = false;
0469 u32 long_hpd_pulse_mask = 0;
0470 u32 short_hpd_pulse_mask = 0;
0471 enum hpd_pin pin;
0472
0473 if (!pin_mask)
0474 return;
0475
0476 spin_lock(&dev_priv->irq_lock);
0477
0478
0479
0480
0481
0482
0483
0484 for_each_intel_encoder(&dev_priv->drm, encoder) {
0485 enum port port = encoder->port;
0486 bool long_hpd;
0487
0488 pin = encoder->hpd_pin;
0489 if (!(BIT(pin) & pin_mask))
0490 continue;
0491
0492 if (!intel_encoder_has_hpd_pulse(encoder))
0493 continue;
0494
0495 long_hpd = long_mask & BIT(pin);
0496
0497 drm_dbg(&dev_priv->drm,
0498 "digital hpd on [ENCODER:%d:%s] - %s\n",
0499 encoder->base.base.id, encoder->base.name,
0500 long_hpd ? "long" : "short");
0501 queue_dig = true;
0502
0503 if (long_hpd) {
0504 long_hpd_pulse_mask |= BIT(pin);
0505 dev_priv->hotplug.long_port_mask |= BIT(port);
0506 } else {
0507 short_hpd_pulse_mask |= BIT(pin);
0508 dev_priv->hotplug.short_port_mask |= BIT(port);
0509 }
0510 }
0511
0512
0513 for_each_hpd_pin(pin) {
0514 bool long_hpd;
0515
0516 if (!(BIT(pin) & pin_mask))
0517 continue;
0518
0519 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
0520
0521
0522
0523
0524
0525
0526 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
0527 "Received HPD interrupt on pin %d although disabled\n",
0528 pin);
0529 continue;
0530 }
0531
0532 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
0533 continue;
0534
0535
0536
0537
0538
0539
0540 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
0541 long_hpd = long_hpd_pulse_mask & BIT(pin);
0542 } else {
0543 dev_priv->hotplug.event_bits |= BIT(pin);
0544 long_hpd = true;
0545 queue_hp = true;
0546 }
0547
0548 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
0549 dev_priv->hotplug.event_bits &= ~BIT(pin);
0550 storm_detected = true;
0551 queue_hp = true;
0552 }
0553 }
0554
0555
0556
0557
0558
0559 if (storm_detected)
0560 intel_hpd_irq_setup(dev_priv);
0561 spin_unlock(&dev_priv->irq_lock);
0562
0563
0564
0565
0566
0567
0568
0569 if (queue_dig)
0570 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
0571 if (queue_hp)
0572 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
0573 }
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589 void intel_hpd_init(struct drm_i915_private *dev_priv)
0590 {
0591 int i;
0592
0593 if (!HAS_DISPLAY(dev_priv))
0594 return;
0595
0596 for_each_hpd_pin(i) {
0597 dev_priv->hotplug.stats[i].count = 0;
0598 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
0599 }
0600
0601
0602
0603
0604
0605 spin_lock_irq(&dev_priv->irq_lock);
0606 intel_hpd_irq_setup(dev_priv);
0607 spin_unlock_irq(&dev_priv->irq_lock);
0608 }
0609
0610 static void i915_hpd_poll_init_work(struct work_struct *work)
0611 {
0612 struct drm_i915_private *dev_priv =
0613 container_of(work, struct drm_i915_private,
0614 hotplug.poll_init_work);
0615 struct drm_device *dev = &dev_priv->drm;
0616 struct drm_connector_list_iter conn_iter;
0617 struct intel_connector *connector;
0618 bool enabled;
0619
0620 mutex_lock(&dev->mode_config.mutex);
0621
0622 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
0623
0624 drm_connector_list_iter_begin(dev, &conn_iter);
0625 for_each_intel_connector_iter(connector, &conn_iter) {
0626 enum hpd_pin pin;
0627
0628 pin = intel_connector_hpd_pin(connector);
0629 if (pin == HPD_NONE)
0630 continue;
0631
0632 connector->base.polled = connector->polled;
0633
0634 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
0635 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
0636 DRM_CONNECTOR_POLL_DISCONNECT;
0637 }
0638 drm_connector_list_iter_end(&conn_iter);
0639
0640 if (enabled)
0641 drm_kms_helper_poll_enable(dev);
0642
0643 mutex_unlock(&dev->mode_config.mutex);
0644
0645
0646
0647
0648
0649 if (!enabled)
0650 drm_helper_hpd_irq_event(dev);
0651 }
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
0670 {
0671 if (!HAS_DISPLAY(dev_priv) ||
0672 !INTEL_DISPLAY_ENABLED(dev_priv))
0673 return;
0674
0675 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
0676
0677
0678
0679
0680
0681
0682
0683 schedule_work(&dev_priv->hotplug.poll_init_work);
0684 }
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
0706 {
0707 if (!HAS_DISPLAY(dev_priv))
0708 return;
0709
0710 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
0711 schedule_work(&dev_priv->hotplug.poll_init_work);
0712 }
0713
0714 void intel_hpd_init_work(struct drm_i915_private *dev_priv)
0715 {
0716 INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
0717 i915_hotplug_work_func);
0718 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
0719 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
0720 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
0721 intel_hpd_irq_storm_reenable_work);
0722 }
0723
0724 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
0725 {
0726 if (!HAS_DISPLAY(dev_priv))
0727 return;
0728
0729 spin_lock_irq(&dev_priv->irq_lock);
0730
0731 dev_priv->hotplug.long_port_mask = 0;
0732 dev_priv->hotplug.short_port_mask = 0;
0733 dev_priv->hotplug.event_bits = 0;
0734 dev_priv->hotplug.retry_bits = 0;
0735
0736 spin_unlock_irq(&dev_priv->irq_lock);
0737
0738 cancel_work_sync(&dev_priv->hotplug.dig_port_work);
0739 cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
0740 cancel_work_sync(&dev_priv->hotplug.poll_init_work);
0741 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
0742 }
0743
0744 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
0745 {
0746 bool ret = false;
0747
0748 if (pin == HPD_NONE)
0749 return false;
0750
0751 spin_lock_irq(&dev_priv->irq_lock);
0752 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
0753 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
0754 ret = true;
0755 }
0756 spin_unlock_irq(&dev_priv->irq_lock);
0757
0758 return ret;
0759 }
0760
0761 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
0762 {
0763 if (pin == HPD_NONE)
0764 return;
0765
0766 spin_lock_irq(&dev_priv->irq_lock);
0767 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
0768 spin_unlock_irq(&dev_priv->irq_lock);
0769 }