Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2015 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  */
0023 
0024 #include <linux/kernel.h>
0025 
0026 #include "i915_drv.h"
0027 #include "i915_irq.h"
0028 #include "intel_display_types.h"
0029 #include "intel_hotplug.h"
0030 
0031 /**
0032  * DOC: Hotplug
0033  *
0034  * Simply put, hotplug occurs when a display is connected to or disconnected
0035  * from the system. However, there may be adapters and docking stations and
0036  * Display Port short pulses and MST devices involved, complicating matters.
0037  *
0038  * Hotplug in i915 is handled in many different levels of abstraction.
0039  *
0040  * The platform dependent interrupt handling code in i915_irq.c enables,
0041  * disables, and does preliminary handling of the interrupts. The interrupt
0042  * handlers gather the hotplug detect (HPD) information from relevant registers
0043  * into a platform independent mask of hotplug pins that have fired.
0044  *
0045  * The platform independent interrupt handler intel_hpd_irq_handler() in
0046  * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
0047  * further processing to appropriate bottom halves (Display Port specific and
0048  * regular hotplug).
0049  *
0050  * The Display Port work function i915_digport_work_func() calls into
0051  * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
0052  * pulses, with failures and non-MST long pulses triggering regular hotplug
0053  * processing on the connector.
0054  *
0055  * The regular hotplug work function i915_hotplug_work_func() calls connector
0056  * detect hooks, and, if connector status changes, triggers sending of hotplug
0057  * uevent to userspace via drm_kms_helper_hotplug_event().
0058  *
0059  * Finally, the userspace is responsible for triggering a modeset upon receiving
0060  * the hotplug uevent, disabling or enabling the crtc as needed.
0061  *
0062  * The hotplug interrupt storm detection and mitigation code keeps track of the
0063  * number of interrupts per hotplug pin per a period of time, and if the number
0064  * of interrupts exceeds a certain threshold, the interrupt is disabled for a
0065  * while before being re-enabled. The intention is to mitigate issues raising
0066  * from broken hardware triggering massive amounts of interrupts and grinding
0067  * the system to a halt.
0068  *
0069  * Current implementation expects that hotplug interrupt storm will not be
0070  * seen when display port sink is connected, hence on platforms whose DP
0071  * callback is handled by i915_digport_work_func reenabling of hpd is not
0072  * performed (it was never expected to be disabled in the first place ;) )
0073  * this is specific to DP sinks handled by this routine and any other display
0074  * such as HDMI or DVI enabled on the same port will have proper logic since
0075  * it will use i915_hotplug_work_func where this logic is handled.
0076  */
0077 
0078 /**
0079  * intel_hpd_pin_default - return default pin associated with certain port.
0080  * @dev_priv: private driver data pointer
0081  * @port: the hpd port to get associated pin
0082  *
0083  * It is only valid and used by digital port encoder.
0084  *
0085  * Return pin that is associatade with @port.
0086  */
0087 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
0088                    enum port port)
0089 {
0090     return HPD_PORT_A + port - PORT_A;
0091 }
0092 
0093 #define HPD_STORM_DETECT_PERIOD     1000
0094 #define HPD_STORM_REENABLE_DELAY    (2 * 60 * 1000)
0095 #define HPD_RETRY_DELAY         1000
0096 
0097 static enum hpd_pin
0098 intel_connector_hpd_pin(struct intel_connector *connector)
0099 {
0100     struct intel_encoder *encoder = intel_attached_encoder(connector);
0101 
0102     /*
0103      * MST connectors get their encoder attached dynamically
0104      * so need to make sure we have an encoder here. But since
0105      * MST encoders have their hpd_pin set to HPD_NONE we don't
0106      * have to special case them beyond that.
0107      */
0108     return encoder ? encoder->hpd_pin : HPD_NONE;
0109 }
0110 
0111 /**
0112  * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
0113  * @dev_priv: private driver data pointer
0114  * @pin: the pin to gather stats on
0115  * @long_hpd: whether the HPD IRQ was long or short
0116  *
0117  * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
0118  * storms. Only the pin specific stats and state are changed, the caller is
0119  * responsible for further action.
0120  *
0121  * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
0122  * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
0123  * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
0124  * short IRQs count as +1. If this threshold is exceeded, it's considered an
0125  * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
0126  *
0127  * By default, most systems will only count long IRQs towards
0128  * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
0129  * suffer from short IRQ storms and must also track these. Because short IRQ
0130  * storms are naturally caused by sideband interactions with DP MST devices,
0131  * short IRQ detection is only enabled for systems without DP MST support.
0132  * Systems which are new enough to support DP MST are far less likely to
0133  * suffer from IRQ storms at all, so this is fine.
0134  *
0135  * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
0136  * and should only be adjusted for automated hotplug testing.
0137  *
0138  * Return true if an IRQ storm was detected on @pin.
0139  */
0140 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
0141                        enum hpd_pin pin, bool long_hpd)
0142 {
0143     struct i915_hotplug *hpd = &dev_priv->hotplug;
0144     unsigned long start = hpd->stats[pin].last_jiffies;
0145     unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
0146     const int increment = long_hpd ? 10 : 1;
0147     const int threshold = hpd->hpd_storm_threshold;
0148     bool storm = false;
0149 
0150     if (!threshold ||
0151         (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
0152         return false;
0153 
0154     if (!time_in_range(jiffies, start, end)) {
0155         hpd->stats[pin].last_jiffies = jiffies;
0156         hpd->stats[pin].count = 0;
0157     }
0158 
0159     hpd->stats[pin].count += increment;
0160     if (hpd->stats[pin].count > threshold) {
0161         hpd->stats[pin].state = HPD_MARK_DISABLED;
0162         drm_dbg_kms(&dev_priv->drm,
0163                 "HPD interrupt storm detected on PIN %d\n", pin);
0164         storm = true;
0165     } else {
0166         drm_dbg_kms(&dev_priv->drm,
0167                 "Received HPD interrupt on PIN %d - cnt: %d\n",
0168                   pin,
0169                   hpd->stats[pin].count);
0170     }
0171 
0172     return storm;
0173 }
0174 
0175 static void
0176 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
0177 {
0178     struct drm_device *dev = &dev_priv->drm;
0179     struct drm_connector_list_iter conn_iter;
0180     struct intel_connector *connector;
0181     bool hpd_disabled = false;
0182 
0183     lockdep_assert_held(&dev_priv->irq_lock);
0184 
0185     drm_connector_list_iter_begin(dev, &conn_iter);
0186     for_each_intel_connector_iter(connector, &conn_iter) {
0187         enum hpd_pin pin;
0188 
0189         if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
0190             continue;
0191 
0192         pin = intel_connector_hpd_pin(connector);
0193         if (pin == HPD_NONE ||
0194             dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
0195             continue;
0196 
0197         drm_info(&dev_priv->drm,
0198              "HPD interrupt storm detected on connector %s: "
0199              "switching from hotplug detection to polling\n",
0200              connector->base.name);
0201 
0202         dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
0203         connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
0204             DRM_CONNECTOR_POLL_DISCONNECT;
0205         hpd_disabled = true;
0206     }
0207     drm_connector_list_iter_end(&conn_iter);
0208 
0209     /* Enable polling and queue hotplug re-enabling. */
0210     if (hpd_disabled) {
0211         drm_kms_helper_poll_enable(dev);
0212         mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
0213                  msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
0214     }
0215 }
0216 
0217 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
0218 {
0219     struct drm_i915_private *dev_priv =
0220         container_of(work, typeof(*dev_priv),
0221                  hotplug.reenable_work.work);
0222     struct drm_device *dev = &dev_priv->drm;
0223     struct drm_connector_list_iter conn_iter;
0224     struct intel_connector *connector;
0225     intel_wakeref_t wakeref;
0226     enum hpd_pin pin;
0227 
0228     wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
0229 
0230     spin_lock_irq(&dev_priv->irq_lock);
0231 
0232     drm_connector_list_iter_begin(dev, &conn_iter);
0233     for_each_intel_connector_iter(connector, &conn_iter) {
0234         pin = intel_connector_hpd_pin(connector);
0235         if (pin == HPD_NONE ||
0236             dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
0237             continue;
0238 
0239         if (connector->base.polled != connector->polled)
0240             drm_dbg(&dev_priv->drm,
0241                 "Reenabling HPD on connector %s\n",
0242                 connector->base.name);
0243         connector->base.polled = connector->polled;
0244     }
0245     drm_connector_list_iter_end(&conn_iter);
0246 
0247     for_each_hpd_pin(pin) {
0248         if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
0249             dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
0250     }
0251 
0252     intel_hpd_irq_setup(dev_priv);
0253 
0254     spin_unlock_irq(&dev_priv->irq_lock);
0255 
0256     intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
0257 }
0258 
0259 enum intel_hotplug_state
0260 intel_encoder_hotplug(struct intel_encoder *encoder,
0261               struct intel_connector *connector)
0262 {
0263     struct drm_device *dev = connector->base.dev;
0264     enum drm_connector_status old_status;
0265     u64 old_epoch_counter;
0266     bool ret = false;
0267 
0268     drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
0269     old_status = connector->base.status;
0270     old_epoch_counter = connector->base.epoch_counter;
0271 
0272     connector->base.status =
0273         drm_helper_probe_detect(&connector->base, NULL, false);
0274 
0275     if (old_epoch_counter != connector->base.epoch_counter)
0276         ret = true;
0277 
0278     if (ret) {
0279         drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
0280                 connector->base.base.id,
0281                 connector->base.name,
0282                 drm_get_connector_status_name(old_status),
0283                 drm_get_connector_status_name(connector->base.status),
0284                 old_epoch_counter,
0285                 connector->base.epoch_counter);
0286         return INTEL_HOTPLUG_CHANGED;
0287     }
0288     return INTEL_HOTPLUG_UNCHANGED;
0289 }
0290 
0291 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
0292 {
0293     return intel_encoder_is_dig_port(encoder) &&
0294         enc_to_dig_port(encoder)->hpd_pulse != NULL;
0295 }
0296 
0297 static void i915_digport_work_func(struct work_struct *work)
0298 {
0299     struct drm_i915_private *dev_priv =
0300         container_of(work, struct drm_i915_private, hotplug.dig_port_work);
0301     u32 long_port_mask, short_port_mask;
0302     struct intel_encoder *encoder;
0303     u32 old_bits = 0;
0304 
0305     spin_lock_irq(&dev_priv->irq_lock);
0306     long_port_mask = dev_priv->hotplug.long_port_mask;
0307     dev_priv->hotplug.long_port_mask = 0;
0308     short_port_mask = dev_priv->hotplug.short_port_mask;
0309     dev_priv->hotplug.short_port_mask = 0;
0310     spin_unlock_irq(&dev_priv->irq_lock);
0311 
0312     for_each_intel_encoder(&dev_priv->drm, encoder) {
0313         struct intel_digital_port *dig_port;
0314         enum port port = encoder->port;
0315         bool long_hpd, short_hpd;
0316         enum irqreturn ret;
0317 
0318         if (!intel_encoder_has_hpd_pulse(encoder))
0319             continue;
0320 
0321         long_hpd = long_port_mask & BIT(port);
0322         short_hpd = short_port_mask & BIT(port);
0323 
0324         if (!long_hpd && !short_hpd)
0325             continue;
0326 
0327         dig_port = enc_to_dig_port(encoder);
0328 
0329         ret = dig_port->hpd_pulse(dig_port, long_hpd);
0330         if (ret == IRQ_NONE) {
0331             /* fall back to old school hpd */
0332             old_bits |= BIT(encoder->hpd_pin);
0333         }
0334     }
0335 
0336     if (old_bits) {
0337         spin_lock_irq(&dev_priv->irq_lock);
0338         dev_priv->hotplug.event_bits |= old_bits;
0339         spin_unlock_irq(&dev_priv->irq_lock);
0340         queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
0341     }
0342 }
0343 
0344 /**
0345  * intel_hpd_trigger_irq - trigger an hpd irq event for a port
0346  * @dig_port: digital port
0347  *
0348  * Trigger an HPD interrupt event for the given port, emulating a short pulse
0349  * generated by the sink, and schedule the dig port work to handle it.
0350  */
0351 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
0352 {
0353     struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
0354 
0355     spin_lock_irq(&i915->irq_lock);
0356     i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
0357     spin_unlock_irq(&i915->irq_lock);
0358 
0359     queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
0360 }
0361 
0362 /*
0363  * Handle hotplug events outside the interrupt handler proper.
0364  */
0365 static void i915_hotplug_work_func(struct work_struct *work)
0366 {
0367     struct drm_i915_private *dev_priv =
0368         container_of(work, struct drm_i915_private,
0369                  hotplug.hotplug_work.work);
0370     struct drm_device *dev = &dev_priv->drm;
0371     struct drm_connector_list_iter conn_iter;
0372     struct intel_connector *connector;
0373     u32 changed = 0, retry = 0;
0374     u32 hpd_event_bits;
0375     u32 hpd_retry_bits;
0376 
0377     mutex_lock(&dev->mode_config.mutex);
0378     drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
0379 
0380     spin_lock_irq(&dev_priv->irq_lock);
0381 
0382     hpd_event_bits = dev_priv->hotplug.event_bits;
0383     dev_priv->hotplug.event_bits = 0;
0384     hpd_retry_bits = dev_priv->hotplug.retry_bits;
0385     dev_priv->hotplug.retry_bits = 0;
0386 
0387     /* Enable polling for connectors which had HPD IRQ storms */
0388     intel_hpd_irq_storm_switch_to_polling(dev_priv);
0389 
0390     spin_unlock_irq(&dev_priv->irq_lock);
0391 
0392     drm_connector_list_iter_begin(dev, &conn_iter);
0393     for_each_intel_connector_iter(connector, &conn_iter) {
0394         enum hpd_pin pin;
0395         u32 hpd_bit;
0396 
0397         pin = intel_connector_hpd_pin(connector);
0398         if (pin == HPD_NONE)
0399             continue;
0400 
0401         hpd_bit = BIT(pin);
0402         if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
0403             struct intel_encoder *encoder =
0404                 intel_attached_encoder(connector);
0405 
0406             if (hpd_event_bits & hpd_bit)
0407                 connector->hotplug_retries = 0;
0408             else
0409                 connector->hotplug_retries++;
0410 
0411             drm_dbg_kms(&dev_priv->drm,
0412                     "Connector %s (pin %i) received hotplug event. (retry %d)\n",
0413                     connector->base.name, pin,
0414                     connector->hotplug_retries);
0415 
0416             switch (encoder->hotplug(encoder, connector)) {
0417             case INTEL_HOTPLUG_UNCHANGED:
0418                 break;
0419             case INTEL_HOTPLUG_CHANGED:
0420                 changed |= hpd_bit;
0421                 break;
0422             case INTEL_HOTPLUG_RETRY:
0423                 retry |= hpd_bit;
0424                 break;
0425             }
0426         }
0427     }
0428     drm_connector_list_iter_end(&conn_iter);
0429     mutex_unlock(&dev->mode_config.mutex);
0430 
0431     if (changed)
0432         drm_kms_helper_hotplug_event(dev);
0433 
0434     /* Remove shared HPD pins that have changed */
0435     retry &= ~changed;
0436     if (retry) {
0437         spin_lock_irq(&dev_priv->irq_lock);
0438         dev_priv->hotplug.retry_bits |= retry;
0439         spin_unlock_irq(&dev_priv->irq_lock);
0440 
0441         mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
0442                  msecs_to_jiffies(HPD_RETRY_DELAY));
0443     }
0444 }
0445 
0446 
0447 /**
0448  * intel_hpd_irq_handler - main hotplug irq handler
0449  * @dev_priv: drm_i915_private
0450  * @pin_mask: a mask of hpd pins that have triggered the irq
0451  * @long_mask: a mask of hpd pins that may be long hpd pulses
0452  *
0453  * This is the main hotplug irq handler for all platforms. The platform specific
0454  * irq handlers call the platform specific hotplug irq handlers, which read and
0455  * decode the appropriate registers into bitmasks about hpd pins that have
0456  * triggered (@pin_mask), and which of those pins may be long pulses
0457  * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
0458  * is not a digital port.
0459  *
0460  * Here, we do hotplug irq storm detection and mitigation, and pass further
0461  * processing to appropriate bottom halves.
0462  */
0463 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
0464                u32 pin_mask, u32 long_mask)
0465 {
0466     struct intel_encoder *encoder;
0467     bool storm_detected = false;
0468     bool queue_dig = false, queue_hp = false;
0469     u32 long_hpd_pulse_mask = 0;
0470     u32 short_hpd_pulse_mask = 0;
0471     enum hpd_pin pin;
0472 
0473     if (!pin_mask)
0474         return;
0475 
0476     spin_lock(&dev_priv->irq_lock);
0477 
0478     /*
0479      * Determine whether ->hpd_pulse() exists for each pin, and
0480      * whether we have a short or a long pulse. This is needed
0481      * as each pin may have up to two encoders (HDMI and DP) and
0482      * only the one of them (DP) will have ->hpd_pulse().
0483      */
0484     for_each_intel_encoder(&dev_priv->drm, encoder) {
0485         enum port port = encoder->port;
0486         bool long_hpd;
0487 
0488         pin = encoder->hpd_pin;
0489         if (!(BIT(pin) & pin_mask))
0490             continue;
0491 
0492         if (!intel_encoder_has_hpd_pulse(encoder))
0493             continue;
0494 
0495         long_hpd = long_mask & BIT(pin);
0496 
0497         drm_dbg(&dev_priv->drm,
0498             "digital hpd on [ENCODER:%d:%s] - %s\n",
0499             encoder->base.base.id, encoder->base.name,
0500             long_hpd ? "long" : "short");
0501         queue_dig = true;
0502 
0503         if (long_hpd) {
0504             long_hpd_pulse_mask |= BIT(pin);
0505             dev_priv->hotplug.long_port_mask |= BIT(port);
0506         } else {
0507             short_hpd_pulse_mask |= BIT(pin);
0508             dev_priv->hotplug.short_port_mask |= BIT(port);
0509         }
0510     }
0511 
0512     /* Now process each pin just once */
0513     for_each_hpd_pin(pin) {
0514         bool long_hpd;
0515 
0516         if (!(BIT(pin) & pin_mask))
0517             continue;
0518 
0519         if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
0520             /*
0521              * On GMCH platforms the interrupt mask bits only
0522              * prevent irq generation, not the setting of the
0523              * hotplug bits itself. So only WARN about unexpected
0524              * interrupts on saner platforms.
0525              */
0526             drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
0527                       "Received HPD interrupt on pin %d although disabled\n",
0528                       pin);
0529             continue;
0530         }
0531 
0532         if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
0533             continue;
0534 
0535         /*
0536          * Delegate to ->hpd_pulse() if one of the encoders for this
0537          * pin has it, otherwise let the hotplug_work deal with this
0538          * pin directly.
0539          */
0540         if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
0541             long_hpd = long_hpd_pulse_mask & BIT(pin);
0542         } else {
0543             dev_priv->hotplug.event_bits |= BIT(pin);
0544             long_hpd = true;
0545             queue_hp = true;
0546         }
0547 
0548         if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
0549             dev_priv->hotplug.event_bits &= ~BIT(pin);
0550             storm_detected = true;
0551             queue_hp = true;
0552         }
0553     }
0554 
0555     /*
0556      * Disable any IRQs that storms were detected on. Polling enablement
0557      * happens later in our hotplug work.
0558      */
0559     if (storm_detected)
0560         intel_hpd_irq_setup(dev_priv);
0561     spin_unlock(&dev_priv->irq_lock);
0562 
0563     /*
0564      * Our hotplug handler can grab modeset locks (by calling down into the
0565      * fb helpers). Hence it must not be run on our own dev-priv->wq work
0566      * queue for otherwise the flush_work in the pageflip code will
0567      * deadlock.
0568      */
0569     if (queue_dig)
0570         queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
0571     if (queue_hp)
0572         queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
0573 }
0574 
0575 /**
0576  * intel_hpd_init - initializes and enables hpd support
0577  * @dev_priv: i915 device instance
0578  *
0579  * This function enables the hotplug support. It requires that interrupts have
0580  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
0581  * poll request can run concurrently to other code, so locking rules must be
0582  * obeyed.
0583  *
0584  * This is a separate step from interrupt enabling to simplify the locking rules
0585  * in the driver load and resume code.
0586  *
0587  * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
0588  */
0589 void intel_hpd_init(struct drm_i915_private *dev_priv)
0590 {
0591     int i;
0592 
0593     if (!HAS_DISPLAY(dev_priv))
0594         return;
0595 
0596     for_each_hpd_pin(i) {
0597         dev_priv->hotplug.stats[i].count = 0;
0598         dev_priv->hotplug.stats[i].state = HPD_ENABLED;
0599     }
0600 
0601     /*
0602      * Interrupt setup is already guaranteed to be single-threaded, this is
0603      * just to make the assert_spin_locked checks happy.
0604      */
0605     spin_lock_irq(&dev_priv->irq_lock);
0606     intel_hpd_irq_setup(dev_priv);
0607     spin_unlock_irq(&dev_priv->irq_lock);
0608 }
0609 
0610 static void i915_hpd_poll_init_work(struct work_struct *work)
0611 {
0612     struct drm_i915_private *dev_priv =
0613         container_of(work, struct drm_i915_private,
0614                  hotplug.poll_init_work);
0615     struct drm_device *dev = &dev_priv->drm;
0616     struct drm_connector_list_iter conn_iter;
0617     struct intel_connector *connector;
0618     bool enabled;
0619 
0620     mutex_lock(&dev->mode_config.mutex);
0621 
0622     enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
0623 
0624     drm_connector_list_iter_begin(dev, &conn_iter);
0625     for_each_intel_connector_iter(connector, &conn_iter) {
0626         enum hpd_pin pin;
0627 
0628         pin = intel_connector_hpd_pin(connector);
0629         if (pin == HPD_NONE)
0630             continue;
0631 
0632         connector->base.polled = connector->polled;
0633 
0634         if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
0635             connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
0636                 DRM_CONNECTOR_POLL_DISCONNECT;
0637     }
0638     drm_connector_list_iter_end(&conn_iter);
0639 
0640     if (enabled)
0641         drm_kms_helper_poll_enable(dev);
0642 
0643     mutex_unlock(&dev->mode_config.mutex);
0644 
0645     /*
0646      * We might have missed any hotplugs that happened while we were
0647      * in the middle of disabling polling
0648      */
0649     if (!enabled)
0650         drm_helper_hpd_irq_event(dev);
0651 }
0652 
0653 /**
0654  * intel_hpd_poll_enable - enable polling for connectors with hpd
0655  * @dev_priv: i915 device instance
0656  *
0657  * This function enables polling for all connectors which support HPD.
0658  * Under certain conditions HPD may not be functional. On most Intel GPUs,
0659  * this happens when we enter runtime suspend.
0660  * On Valleyview and Cherryview systems, this also happens when we shut off all
0661  * of the powerwells.
0662  *
0663  * Since this function can get called in contexts where we're already holding
0664  * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
0665  * worker.
0666  *
0667  * Also see: intel_hpd_init() and intel_hpd_poll_disable().
0668  */
0669 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
0670 {
0671     if (!HAS_DISPLAY(dev_priv) ||
0672         !INTEL_DISPLAY_ENABLED(dev_priv))
0673         return;
0674 
0675     WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
0676 
0677     /*
0678      * We might already be holding dev->mode_config.mutex, so do this in a
0679      * seperate worker
0680      * As well, there's no issue if we race here since we always reschedule
0681      * this worker anyway
0682      */
0683     schedule_work(&dev_priv->hotplug.poll_init_work);
0684 }
0685 
0686 /**
0687  * intel_hpd_poll_disable - disable polling for connectors with hpd
0688  * @dev_priv: i915 device instance
0689  *
0690  * This function disables polling for all connectors which support HPD.
0691  * Under certain conditions HPD may not be functional. On most Intel GPUs,
0692  * this happens when we enter runtime suspend.
0693  * On Valleyview and Cherryview systems, this also happens when we shut off all
0694  * of the powerwells.
0695  *
0696  * Since this function can get called in contexts where we're already holding
0697  * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
0698  * worker.
0699  *
0700  * Also used during driver init to initialize connector->polled
0701  * appropriately for all connectors.
0702  *
0703  * Also see: intel_hpd_init() and intel_hpd_poll_enable().
0704  */
0705 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
0706 {
0707     if (!HAS_DISPLAY(dev_priv))
0708         return;
0709 
0710     WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
0711     schedule_work(&dev_priv->hotplug.poll_init_work);
0712 }
0713 
0714 void intel_hpd_init_work(struct drm_i915_private *dev_priv)
0715 {
0716     INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
0717               i915_hotplug_work_func);
0718     INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
0719     INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
0720     INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
0721               intel_hpd_irq_storm_reenable_work);
0722 }
0723 
0724 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
0725 {
0726     if (!HAS_DISPLAY(dev_priv))
0727         return;
0728 
0729     spin_lock_irq(&dev_priv->irq_lock);
0730 
0731     dev_priv->hotplug.long_port_mask = 0;
0732     dev_priv->hotplug.short_port_mask = 0;
0733     dev_priv->hotplug.event_bits = 0;
0734     dev_priv->hotplug.retry_bits = 0;
0735 
0736     spin_unlock_irq(&dev_priv->irq_lock);
0737 
0738     cancel_work_sync(&dev_priv->hotplug.dig_port_work);
0739     cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
0740     cancel_work_sync(&dev_priv->hotplug.poll_init_work);
0741     cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
0742 }
0743 
0744 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
0745 {
0746     bool ret = false;
0747 
0748     if (pin == HPD_NONE)
0749         return false;
0750 
0751     spin_lock_irq(&dev_priv->irq_lock);
0752     if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
0753         dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
0754         ret = true;
0755     }
0756     spin_unlock_irq(&dev_priv->irq_lock);
0757 
0758     return ret;
0759 }
0760 
0761 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
0762 {
0763     if (pin == HPD_NONE)
0764         return;
0765 
0766     spin_lock_irq(&dev_priv->irq_lock);
0767     dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
0768     spin_unlock_irq(&dev_priv->irq_lock);
0769 }