0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0030
0031 #include <linux/circ_buf.h>
0032 #include <linux/slab.h>
0033 #include <linux/sysrq.h>
0034
0035 #include <drm/drm_drv.h>
0036
0037 #include "display/icl_dsi_regs.h"
0038 #include "display/intel_de.h"
0039 #include "display/intel_display_trace.h"
0040 #include "display/intel_display_types.h"
0041 #include "display/intel_fifo_underrun.h"
0042 #include "display/intel_hotplug.h"
0043 #include "display/intel_lpe_audio.h"
0044 #include "display/intel_psr.h"
0045
0046 #include "gt/intel_breadcrumbs.h"
0047 #include "gt/intel_gt.h"
0048 #include "gt/intel_gt_irq.h"
0049 #include "gt/intel_gt_pm_irq.h"
0050 #include "gt/intel_gt_regs.h"
0051 #include "gt/intel_rps.h"
0052
0053 #include "i915_driver.h"
0054 #include "i915_drv.h"
0055 #include "i915_irq.h"
0056 #include "intel_pm.h"
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 static inline void pmu_irq_stats(struct drm_i915_private *i915,
0072 irqreturn_t res)
0073 {
0074 if (unlikely(res != IRQ_HANDLED))
0075 return;
0076
0077
0078
0079
0080
0081 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
0082 }
0083
0084 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
0085 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
0086 enum hpd_pin pin);
0087
0088 static const u32 hpd_ilk[HPD_NUM_PINS] = {
0089 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
0090 };
0091
0092 static const u32 hpd_ivb[HPD_NUM_PINS] = {
0093 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
0094 };
0095
0096 static const u32 hpd_bdw[HPD_NUM_PINS] = {
0097 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
0098 };
0099
0100 static const u32 hpd_ibx[HPD_NUM_PINS] = {
0101 [HPD_CRT] = SDE_CRT_HOTPLUG,
0102 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
0103 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
0104 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
0105 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
0106 };
0107
0108 static const u32 hpd_cpt[HPD_NUM_PINS] = {
0109 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
0110 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
0111 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
0112 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
0113 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
0114 };
0115
0116 static const u32 hpd_spt[HPD_NUM_PINS] = {
0117 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
0118 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
0119 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
0120 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
0121 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
0122 };
0123
0124 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
0125 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
0126 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
0127 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
0128 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
0129 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
0130 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
0131 };
0132
0133 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
0134 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
0135 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
0136 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
0137 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
0138 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
0139 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
0140 };
0141
0142 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
0143 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
0144 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
0145 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
0146 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
0147 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
0148 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
0149 };
0150
0151 static const u32 hpd_bxt[HPD_NUM_PINS] = {
0152 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
0153 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
0154 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
0155 };
0156
0157 static const u32 hpd_gen11[HPD_NUM_PINS] = {
0158 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
0159 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
0160 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
0161 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
0162 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
0163 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
0164 };
0165
0166 static const u32 hpd_icp[HPD_NUM_PINS] = {
0167 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
0168 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
0169 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
0170 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
0171 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
0172 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
0173 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
0174 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
0175 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
0176 };
0177
0178 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
0179 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
0180 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
0181 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
0182 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
0183 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
0184 };
0185
0186 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
0187 {
0188 struct i915_hotplug *hpd = &dev_priv->hotplug;
0189
0190 if (HAS_GMCH(dev_priv)) {
0191 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
0192 IS_CHERRYVIEW(dev_priv))
0193 hpd->hpd = hpd_status_g4x;
0194 else
0195 hpd->hpd = hpd_status_i915;
0196 return;
0197 }
0198
0199 if (DISPLAY_VER(dev_priv) >= 11)
0200 hpd->hpd = hpd_gen11;
0201 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
0202 hpd->hpd = hpd_bxt;
0203 else if (DISPLAY_VER(dev_priv) >= 8)
0204 hpd->hpd = hpd_bdw;
0205 else if (DISPLAY_VER(dev_priv) >= 7)
0206 hpd->hpd = hpd_ivb;
0207 else
0208 hpd->hpd = hpd_ilk;
0209
0210 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
0211 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
0212 return;
0213
0214 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
0215 hpd->pch_hpd = hpd_sde_dg1;
0216 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
0217 hpd->pch_hpd = hpd_icp;
0218 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
0219 hpd->pch_hpd = hpd_spt;
0220 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
0221 hpd->pch_hpd = hpd_cpt;
0222 else if (HAS_PCH_IBX(dev_priv))
0223 hpd->pch_hpd = hpd_ibx;
0224 else
0225 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
0226 }
0227
0228 static void
0229 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
0230 {
0231 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
0232
0233 drm_crtc_handle_vblank(&crtc->base);
0234 }
0235
0236 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
0237 i915_reg_t iir, i915_reg_t ier)
0238 {
0239 intel_uncore_write(uncore, imr, 0xffffffff);
0240 intel_uncore_posting_read(uncore, imr);
0241
0242 intel_uncore_write(uncore, ier, 0);
0243
0244
0245 intel_uncore_write(uncore, iir, 0xffffffff);
0246 intel_uncore_posting_read(uncore, iir);
0247 intel_uncore_write(uncore, iir, 0xffffffff);
0248 intel_uncore_posting_read(uncore, iir);
0249 }
0250
0251 void gen2_irq_reset(struct intel_uncore *uncore)
0252 {
0253 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
0254 intel_uncore_posting_read16(uncore, GEN2_IMR);
0255
0256 intel_uncore_write16(uncore, GEN2_IER, 0);
0257
0258
0259 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
0260 intel_uncore_posting_read16(uncore, GEN2_IIR);
0261 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
0262 intel_uncore_posting_read16(uncore, GEN2_IIR);
0263 }
0264
0265
0266
0267
0268 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
0269 {
0270 u32 val = intel_uncore_read(uncore, reg);
0271
0272 if (val == 0)
0273 return;
0274
0275 drm_WARN(&uncore->i915->drm, 1,
0276 "Interrupt register 0x%x is not zero: 0x%08x\n",
0277 i915_mmio_reg_offset(reg), val);
0278 intel_uncore_write(uncore, reg, 0xffffffff);
0279 intel_uncore_posting_read(uncore, reg);
0280 intel_uncore_write(uncore, reg, 0xffffffff);
0281 intel_uncore_posting_read(uncore, reg);
0282 }
0283
0284 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
0285 {
0286 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
0287
0288 if (val == 0)
0289 return;
0290
0291 drm_WARN(&uncore->i915->drm, 1,
0292 "Interrupt register 0x%x is not zero: 0x%08x\n",
0293 i915_mmio_reg_offset(GEN2_IIR), val);
0294 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
0295 intel_uncore_posting_read16(uncore, GEN2_IIR);
0296 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
0297 intel_uncore_posting_read16(uncore, GEN2_IIR);
0298 }
0299
0300 void gen3_irq_init(struct intel_uncore *uncore,
0301 i915_reg_t imr, u32 imr_val,
0302 i915_reg_t ier, u32 ier_val,
0303 i915_reg_t iir)
0304 {
0305 gen3_assert_iir_is_zero(uncore, iir);
0306
0307 intel_uncore_write(uncore, ier, ier_val);
0308 intel_uncore_write(uncore, imr, imr_val);
0309 intel_uncore_posting_read(uncore, imr);
0310 }
0311
0312 void gen2_irq_init(struct intel_uncore *uncore,
0313 u32 imr_val, u32 ier_val)
0314 {
0315 gen2_assert_iir_is_zero(uncore);
0316
0317 intel_uncore_write16(uncore, GEN2_IER, ier_val);
0318 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
0319 intel_uncore_posting_read16(uncore, GEN2_IMR);
0320 }
0321
0322
0323 static inline void
0324 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
0325 u32 mask,
0326 u32 bits)
0327 {
0328 u32 val;
0329
0330 lockdep_assert_held(&dev_priv->irq_lock);
0331 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
0332
0333 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
0334 val &= ~mask;
0335 val |= bits;
0336 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
0337 }
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
0352 u32 mask,
0353 u32 bits)
0354 {
0355 spin_lock_irq(&dev_priv->irq_lock);
0356 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
0357 spin_unlock_irq(&dev_priv->irq_lock);
0358 }
0359
0360
0361
0362
0363
0364
0365
0366 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
0367 u32 interrupt_mask, u32 enabled_irq_mask)
0368 {
0369 u32 new_val;
0370
0371 lockdep_assert_held(&dev_priv->irq_lock);
0372 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
0373
0374 new_val = dev_priv->irq_mask;
0375 new_val &= ~interrupt_mask;
0376 new_val |= (~enabled_irq_mask & interrupt_mask);
0377
0378 if (new_val != dev_priv->irq_mask &&
0379 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
0380 dev_priv->irq_mask = new_val;
0381 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
0382 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
0383 }
0384 }
0385
0386 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
0387 {
0388 ilk_update_display_irq(i915, bits, bits);
0389 }
0390
0391 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
0392 {
0393 ilk_update_display_irq(i915, bits, 0);
0394 }
0395
0396
0397
0398
0399
0400
0401
0402 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
0403 u32 interrupt_mask,
0404 u32 enabled_irq_mask)
0405 {
0406 u32 new_val;
0407 u32 old_val;
0408
0409 lockdep_assert_held(&dev_priv->irq_lock);
0410
0411 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
0412
0413 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
0414 return;
0415
0416 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
0417
0418 new_val = old_val;
0419 new_val &= ~interrupt_mask;
0420 new_val |= (~enabled_irq_mask & interrupt_mask);
0421
0422 if (new_val != old_val) {
0423 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
0424 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
0425 }
0426 }
0427
0428
0429
0430
0431
0432
0433
0434
0435 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
0436 enum pipe pipe, u32 interrupt_mask,
0437 u32 enabled_irq_mask)
0438 {
0439 u32 new_val;
0440
0441 lockdep_assert_held(&dev_priv->irq_lock);
0442
0443 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
0444
0445 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
0446 return;
0447
0448 new_val = dev_priv->de_irq_mask[pipe];
0449 new_val &= ~interrupt_mask;
0450 new_val |= (~enabled_irq_mask & interrupt_mask);
0451
0452 if (new_val != dev_priv->de_irq_mask[pipe]) {
0453 dev_priv->de_irq_mask[pipe] = new_val;
0454 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
0455 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
0456 }
0457 }
0458
0459 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
0460 enum pipe pipe, u32 bits)
0461 {
0462 bdw_update_pipe_irq(i915, pipe, bits, bits);
0463 }
0464
0465 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
0466 enum pipe pipe, u32 bits)
0467 {
0468 bdw_update_pipe_irq(i915, pipe, bits, 0);
0469 }
0470
0471
0472
0473
0474
0475
0476
0477 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
0478 u32 interrupt_mask,
0479 u32 enabled_irq_mask)
0480 {
0481 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
0482 sdeimr &= ~interrupt_mask;
0483 sdeimr |= (~enabled_irq_mask & interrupt_mask);
0484
0485 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
0486
0487 lockdep_assert_held(&dev_priv->irq_lock);
0488
0489 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
0490 return;
0491
0492 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
0493 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
0494 }
0495
0496 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
0497 {
0498 ibx_display_interrupt_update(i915, bits, bits);
0499 }
0500
0501 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
0502 {
0503 ibx_display_interrupt_update(i915, bits, 0);
0504 }
0505
0506 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
0507 enum pipe pipe)
0508 {
0509 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
0510 u32 enable_mask = status_mask << 16;
0511
0512 lockdep_assert_held(&dev_priv->irq_lock);
0513
0514 if (DISPLAY_VER(dev_priv) < 5)
0515 goto out;
0516
0517
0518
0519
0520
0521 if (drm_WARN_ON_ONCE(&dev_priv->drm,
0522 status_mask & PIPE_A_PSR_STATUS_VLV))
0523 return 0;
0524
0525
0526
0527
0528 if (drm_WARN_ON_ONCE(&dev_priv->drm,
0529 status_mask & PIPE_B_PSR_STATUS_VLV))
0530 return 0;
0531
0532 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
0533 SPRITE0_FLIP_DONE_INT_EN_VLV |
0534 SPRITE1_FLIP_DONE_INT_EN_VLV);
0535 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
0536 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
0537 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
0538 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
0539
0540 out:
0541 drm_WARN_ONCE(&dev_priv->drm,
0542 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
0543 status_mask & ~PIPESTAT_INT_STATUS_MASK,
0544 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
0545 pipe_name(pipe), enable_mask, status_mask);
0546
0547 return enable_mask;
0548 }
0549
0550 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
0551 enum pipe pipe, u32 status_mask)
0552 {
0553 i915_reg_t reg = PIPESTAT(pipe);
0554 u32 enable_mask;
0555
0556 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
0557 "pipe %c: status_mask=0x%x\n",
0558 pipe_name(pipe), status_mask);
0559
0560 lockdep_assert_held(&dev_priv->irq_lock);
0561 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
0562
0563 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
0564 return;
0565
0566 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
0567 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
0568
0569 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
0570 intel_uncore_posting_read(&dev_priv->uncore, reg);
0571 }
0572
0573 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
0574 enum pipe pipe, u32 status_mask)
0575 {
0576 i915_reg_t reg = PIPESTAT(pipe);
0577 u32 enable_mask;
0578
0579 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
0580 "pipe %c: status_mask=0x%x\n",
0581 pipe_name(pipe), status_mask);
0582
0583 lockdep_assert_held(&dev_priv->irq_lock);
0584 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
0585
0586 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
0587 return;
0588
0589 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
0590 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
0591
0592 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
0593 intel_uncore_posting_read(&dev_priv->uncore, reg);
0594 }
0595
0596 static bool i915_has_asle(struct drm_i915_private *dev_priv)
0597 {
0598 if (!dev_priv->opregion.asle)
0599 return false;
0600
0601 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
0602 }
0603
0604
0605
0606
0607
0608 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
0609 {
0610 if (!i915_has_asle(dev_priv))
0611 return;
0612
0613 spin_lock_irq(&dev_priv->irq_lock);
0614
0615 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
0616 if (DISPLAY_VER(dev_priv) >= 4)
0617 i915_enable_pipestat(dev_priv, PIPE_A,
0618 PIPE_LEGACY_BLC_EVENT_STATUS);
0619
0620 spin_unlock_irq(&dev_priv->irq_lock);
0621 }
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
0677 {
0678 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
0679 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
0680 const struct drm_display_mode *mode = &vblank->hwmode;
0681 enum pipe pipe = to_intel_crtc(crtc)->pipe;
0682 i915_reg_t high_frame, low_frame;
0683 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
0684 unsigned long irqflags;
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 if (!vblank->max_vblank_count)
0698 return 0;
0699
0700 htotal = mode->crtc_htotal;
0701 hsync_start = mode->crtc_hsync_start;
0702 vbl_start = mode->crtc_vblank_start;
0703 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
0704 vbl_start = DIV_ROUND_UP(vbl_start, 2);
0705
0706
0707 vbl_start *= htotal;
0708
0709
0710 vbl_start -= htotal - hsync_start;
0711
0712 high_frame = PIPEFRAME(pipe);
0713 low_frame = PIPEFRAMEPIXEL(pipe);
0714
0715 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
0716
0717
0718
0719
0720
0721
0722 do {
0723 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
0724 low = intel_de_read_fw(dev_priv, low_frame);
0725 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
0726 } while (high1 != high2);
0727
0728 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
0729
0730 high1 >>= PIPE_FRAME_HIGH_SHIFT;
0731 pixel = low & PIPE_PIXEL_MASK;
0732 low >>= PIPE_FRAME_LOW_SHIFT;
0733
0734
0735
0736
0737
0738
0739 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0740 }
0741
0742 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
0743 {
0744 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
0745 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
0746 enum pipe pipe = to_intel_crtc(crtc)->pipe;
0747
0748 if (!vblank->max_vblank_count)
0749 return 0;
0750
0751 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
0752 }
0753
0754 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
0755 {
0756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
0757 struct drm_vblank_crtc *vblank =
0758 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
0759 const struct drm_display_mode *mode = &vblank->hwmode;
0760 u32 htotal = mode->crtc_htotal;
0761 u32 clock = mode->crtc_clock;
0762 u32 scan_prev_time, scan_curr_time, scan_post_time;
0763
0764
0765
0766
0767
0768
0769
0770 do {
0771
0772
0773
0774
0775
0776 scan_prev_time = intel_de_read_fw(dev_priv,
0777 PIPE_FRMTMSTMP(crtc->pipe));
0778
0779
0780
0781
0782
0783 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
0784
0785 scan_post_time = intel_de_read_fw(dev_priv,
0786 PIPE_FRMTMSTMP(crtc->pipe));
0787 } while (scan_post_time != scan_prev_time);
0788
0789 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
0790 clock), 1000 * htotal);
0791 }
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
0802 {
0803 struct drm_vblank_crtc *vblank =
0804 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
0805 const struct drm_display_mode *mode = &vblank->hwmode;
0806 u32 vblank_start = mode->crtc_vblank_start;
0807 u32 vtotal = mode->crtc_vtotal;
0808 u32 scanline;
0809
0810 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
0811 scanline = min(scanline, vtotal - 1);
0812 scanline = (scanline + vblank_start) % vtotal;
0813
0814 return scanline;
0815 }
0816
0817
0818
0819
0820
0821 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
0822 {
0823 struct drm_device *dev = crtc->base.dev;
0824 struct drm_i915_private *dev_priv = to_i915(dev);
0825 const struct drm_display_mode *mode;
0826 struct drm_vblank_crtc *vblank;
0827 enum pipe pipe = crtc->pipe;
0828 int position, vtotal;
0829
0830 if (!crtc->active)
0831 return 0;
0832
0833 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
0834 mode = &vblank->hwmode;
0835
0836 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
0837 return __intel_get_crtc_scanline_from_timestamp(crtc);
0838
0839 vtotal = mode->crtc_vtotal;
0840 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
0841 vtotal /= 2;
0842
0843 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857 if (HAS_DDI(dev_priv) && !position) {
0858 int i, temp;
0859
0860 for (i = 0; i < 100; i++) {
0861 udelay(1);
0862 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
0863 if (temp != position) {
0864 position = temp;
0865 break;
0866 }
0867 }
0868 }
0869
0870
0871
0872
0873
0874 return (position + crtc->scanline_offset) % vtotal;
0875 }
0876
0877 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
0878 bool in_vblank_irq,
0879 int *vpos, int *hpos,
0880 ktime_t *stime, ktime_t *etime,
0881 const struct drm_display_mode *mode)
0882 {
0883 struct drm_device *dev = _crtc->dev;
0884 struct drm_i915_private *dev_priv = to_i915(dev);
0885 struct intel_crtc *crtc = to_intel_crtc(_crtc);
0886 enum pipe pipe = crtc->pipe;
0887 int position;
0888 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0889 unsigned long irqflags;
0890 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
0891 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
0892 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
0893
0894 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
0895 drm_dbg(&dev_priv->drm,
0896 "trying to get scanoutpos for disabled "
0897 "pipe %c\n", pipe_name(pipe));
0898 return false;
0899 }
0900
0901 htotal = mode->crtc_htotal;
0902 hsync_start = mode->crtc_hsync_start;
0903 vtotal = mode->crtc_vtotal;
0904 vbl_start = mode->crtc_vblank_start;
0905 vbl_end = mode->crtc_vblank_end;
0906
0907 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
0908 vbl_start = DIV_ROUND_UP(vbl_start, 2);
0909 vbl_end /= 2;
0910 vtotal /= 2;
0911 }
0912
0913
0914
0915
0916
0917
0918 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
0919
0920
0921
0922
0923 if (stime)
0924 *stime = ktime_get();
0925
0926 if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
0927 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
0928
0929 position = __intel_get_crtc_scanline(crtc);
0930
0931
0932
0933
0934
0935
0936
0937 if (position >= vbl_start && scanlines < position)
0938 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
0939 } else if (use_scanline_counter) {
0940
0941
0942
0943 position = __intel_get_crtc_scanline(crtc);
0944 } else {
0945
0946
0947
0948
0949 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0950
0951
0952 vbl_start *= htotal;
0953 vbl_end *= htotal;
0954 vtotal *= htotal;
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965 if (position >= vtotal)
0966 position = vtotal - 1;
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 position = (position + htotal - hsync_start) % vtotal;
0978 }
0979
0980
0981 if (etime)
0982 *etime = ktime_get();
0983
0984
0985
0986 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
0987
0988
0989
0990
0991
0992
0993
0994 if (position >= vbl_start)
0995 position -= vbl_end;
0996 else
0997 position += vtotal - vbl_end;
0998
0999 if (use_scanline_counter) {
1000 *vpos = position;
1001 *hpos = 0;
1002 } else {
1003 *vpos = position / htotal;
1004 *hpos = position - (*vpos * htotal);
1005 }
1006
1007 return true;
1008 }
1009
1010 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1011 ktime_t *vblank_time, bool in_vblank_irq)
1012 {
1013 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1014 crtc, max_error, vblank_time, in_vblank_irq,
1015 i915_get_crtc_scanoutpos);
1016 }
1017
1018 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1019 {
1020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1021 unsigned long irqflags;
1022 int position;
1023
1024 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1025 position = __intel_get_crtc_scanline(crtc);
1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1027
1028 return position;
1029 }
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 static void ivb_parity_work(struct work_struct *work)
1041 {
1042 struct drm_i915_private *dev_priv =
1043 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1044 struct intel_gt *gt = to_gt(dev_priv);
1045 u32 error_status, row, bank, subbank;
1046 char *parity_event[6];
1047 u32 misccpctl;
1048 u8 slice = 0;
1049
1050
1051
1052
1053
1054 mutex_lock(&dev_priv->drm.struct_mutex);
1055
1056
1057 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1058 goto out;
1059
1060 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1061 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1062 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1063
1064 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1065 i915_reg_t reg;
1066
1067 slice--;
1068 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1069 slice >= NUM_L3_SLICES(dev_priv)))
1070 break;
1071
1072 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1073
1074 reg = GEN7_L3CDERRST1(slice);
1075
1076 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1077 row = GEN7_PARITY_ERROR_ROW(error_status);
1078 bank = GEN7_PARITY_ERROR_BANK(error_status);
1079 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1080
1081 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1082 intel_uncore_posting_read(&dev_priv->uncore, reg);
1083
1084 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1085 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1086 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1087 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1088 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1089 parity_event[5] = NULL;
1090
1091 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1092 KOBJ_CHANGE, parity_event);
1093
1094 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1095 slice, row, bank, subbank);
1096
1097 kfree(parity_event[4]);
1098 kfree(parity_event[3]);
1099 kfree(parity_event[2]);
1100 kfree(parity_event[1]);
1101 }
1102
1103 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1104
1105 out:
1106 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1107 spin_lock_irq(>->irq_lock);
1108 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1109 spin_unlock_irq(>->irq_lock);
1110
1111 mutex_unlock(&dev_priv->drm.struct_mutex);
1112 }
1113
1114 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1115 {
1116 switch (pin) {
1117 case HPD_PORT_TC1:
1118 case HPD_PORT_TC2:
1119 case HPD_PORT_TC3:
1120 case HPD_PORT_TC4:
1121 case HPD_PORT_TC5:
1122 case HPD_PORT_TC6:
1123 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1124 default:
1125 return false;
1126 }
1127 }
1128
1129 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1130 {
1131 switch (pin) {
1132 case HPD_PORT_A:
1133 return val & PORTA_HOTPLUG_LONG_DETECT;
1134 case HPD_PORT_B:
1135 return val & PORTB_HOTPLUG_LONG_DETECT;
1136 case HPD_PORT_C:
1137 return val & PORTC_HOTPLUG_LONG_DETECT;
1138 default:
1139 return false;
1140 }
1141 }
1142
1143 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1144 {
1145 switch (pin) {
1146 case HPD_PORT_A:
1147 case HPD_PORT_B:
1148 case HPD_PORT_C:
1149 case HPD_PORT_D:
1150 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1151 default:
1152 return false;
1153 }
1154 }
1155
1156 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1157 {
1158 switch (pin) {
1159 case HPD_PORT_TC1:
1160 case HPD_PORT_TC2:
1161 case HPD_PORT_TC3:
1162 case HPD_PORT_TC4:
1163 case HPD_PORT_TC5:
1164 case HPD_PORT_TC6:
1165 return val & ICP_TC_HPD_LONG_DETECT(pin);
1166 default:
1167 return false;
1168 }
1169 }
1170
1171 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1172 {
1173 switch (pin) {
1174 case HPD_PORT_E:
1175 return val & PORTE_HOTPLUG_LONG_DETECT;
1176 default:
1177 return false;
1178 }
1179 }
1180
1181 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1182 {
1183 switch (pin) {
1184 case HPD_PORT_A:
1185 return val & PORTA_HOTPLUG_LONG_DETECT;
1186 case HPD_PORT_B:
1187 return val & PORTB_HOTPLUG_LONG_DETECT;
1188 case HPD_PORT_C:
1189 return val & PORTC_HOTPLUG_LONG_DETECT;
1190 case HPD_PORT_D:
1191 return val & PORTD_HOTPLUG_LONG_DETECT;
1192 default:
1193 return false;
1194 }
1195 }
1196
1197 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1198 {
1199 switch (pin) {
1200 case HPD_PORT_A:
1201 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1202 default:
1203 return false;
1204 }
1205 }
1206
1207 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1208 {
1209 switch (pin) {
1210 case HPD_PORT_B:
1211 return val & PORTB_HOTPLUG_LONG_DETECT;
1212 case HPD_PORT_C:
1213 return val & PORTC_HOTPLUG_LONG_DETECT;
1214 case HPD_PORT_D:
1215 return val & PORTD_HOTPLUG_LONG_DETECT;
1216 default:
1217 return false;
1218 }
1219 }
1220
1221 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1222 {
1223 switch (pin) {
1224 case HPD_PORT_B:
1225 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1226 case HPD_PORT_C:
1227 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1228 case HPD_PORT_D:
1229 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1230 default:
1231 return false;
1232 }
1233 }
1234
1235
1236
1237
1238
1239
1240
1241
1242 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1243 u32 *pin_mask, u32 *long_mask,
1244 u32 hotplug_trigger, u32 dig_hotplug_reg,
1245 const u32 hpd[HPD_NUM_PINS],
1246 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1247 {
1248 enum hpd_pin pin;
1249
1250 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1251
1252 for_each_hpd_pin(pin) {
1253 if ((hpd[pin] & hotplug_trigger) == 0)
1254 continue;
1255
1256 *pin_mask |= BIT(pin);
1257
1258 if (long_pulse_detect(pin, dig_hotplug_reg))
1259 *long_mask |= BIT(pin);
1260 }
1261
1262 drm_dbg(&dev_priv->drm,
1263 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1264 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1265
1266 }
1267
1268 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1269 const u32 hpd[HPD_NUM_PINS])
1270 {
1271 struct intel_encoder *encoder;
1272 u32 enabled_irqs = 0;
1273
1274 for_each_intel_encoder(&dev_priv->drm, encoder)
1275 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1276 enabled_irqs |= hpd[encoder->hpd_pin];
1277
1278 return enabled_irqs;
1279 }
1280
1281 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1282 const u32 hpd[HPD_NUM_PINS])
1283 {
1284 struct intel_encoder *encoder;
1285 u32 hotplug_irqs = 0;
1286
1287 for_each_intel_encoder(&dev_priv->drm, encoder)
1288 hotplug_irqs |= hpd[encoder->hpd_pin];
1289
1290 return hotplug_irqs;
1291 }
1292
1293 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1294 hotplug_enables_func hotplug_enables)
1295 {
1296 struct intel_encoder *encoder;
1297 u32 hotplug = 0;
1298
1299 for_each_intel_encoder(&i915->drm, encoder)
1300 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1301
1302 return hotplug;
1303 }
1304
1305 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1306 {
1307 wake_up_all(&dev_priv->gmbus_wait_queue);
1308 }
1309
1310 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1311 {
1312 wake_up_all(&dev_priv->gmbus_wait_queue);
1313 }
1314
1315 #if defined(CONFIG_DEBUG_FS)
1316 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1317 enum pipe pipe,
1318 u32 crc0, u32 crc1,
1319 u32 crc2, u32 crc3,
1320 u32 crc4)
1321 {
1322 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1323 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1324 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1325
1326 trace_intel_pipe_crc(crtc, crcs);
1327
1328 spin_lock(&pipe_crc->lock);
1329
1330
1331
1332
1333
1334
1335
1336
1337 if (pipe_crc->skipped <= 0 ||
1338 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1339 pipe_crc->skipped++;
1340 spin_unlock(&pipe_crc->lock);
1341 return;
1342 }
1343 spin_unlock(&pipe_crc->lock);
1344
1345 drm_crtc_add_crc_entry(&crtc->base, true,
1346 drm_crtc_accurate_vblank_count(&crtc->base),
1347 crcs);
1348 }
1349 #else
1350 static inline void
1351 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1352 enum pipe pipe,
1353 u32 crc0, u32 crc1,
1354 u32 crc2, u32 crc3,
1355 u32 crc4) {}
1356 #endif
1357
1358 static void flip_done_handler(struct drm_i915_private *i915,
1359 enum pipe pipe)
1360 {
1361 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1362 struct drm_crtc_state *crtc_state = crtc->base.state;
1363 struct drm_pending_vblank_event *e = crtc_state->event;
1364 struct drm_device *dev = &i915->drm;
1365 unsigned long irqflags;
1366
1367 spin_lock_irqsave(&dev->event_lock, irqflags);
1368
1369 crtc_state->event = NULL;
1370
1371 drm_crtc_send_vblank_event(&crtc->base, e);
1372
1373 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1374 }
1375
1376 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1377 enum pipe pipe)
1378 {
1379 display_pipe_crc_irq_handler(dev_priv, pipe,
1380 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1381 0, 0, 0, 0);
1382 }
1383
1384 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1385 enum pipe pipe)
1386 {
1387 display_pipe_crc_irq_handler(dev_priv, pipe,
1388 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1389 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1390 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1391 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1392 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1393 }
1394
1395 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1396 enum pipe pipe)
1397 {
1398 u32 res1, res2;
1399
1400 if (DISPLAY_VER(dev_priv) >= 3)
1401 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1402 else
1403 res1 = 0;
1404
1405 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1406 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1407 else
1408 res2 = 0;
1409
1410 display_pipe_crc_irq_handler(dev_priv, pipe,
1411 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1412 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1413 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1414 res1, res2);
1415 }
1416
1417 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1418 {
1419 enum pipe pipe;
1420
1421 for_each_pipe(dev_priv, pipe) {
1422 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1423 PIPESTAT_INT_STATUS_MASK |
1424 PIPE_FIFO_UNDERRUN_STATUS);
1425
1426 dev_priv->pipestat_irq_mask[pipe] = 0;
1427 }
1428 }
1429
1430 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1431 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1432 {
1433 enum pipe pipe;
1434
1435 spin_lock(&dev_priv->irq_lock);
1436
1437 if (!dev_priv->display_irqs_enabled) {
1438 spin_unlock(&dev_priv->irq_lock);
1439 return;
1440 }
1441
1442 for_each_pipe(dev_priv, pipe) {
1443 i915_reg_t reg;
1444 u32 status_mask, enable_mask, iir_bit = 0;
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1456
1457 switch (pipe) {
1458 default:
1459 case PIPE_A:
1460 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1461 break;
1462 case PIPE_B:
1463 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1464 break;
1465 case PIPE_C:
1466 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1467 break;
1468 }
1469 if (iir & iir_bit)
1470 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1471
1472 if (!status_mask)
1473 continue;
1474
1475 reg = PIPESTAT(pipe);
1476 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1477 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488 if (pipe_stats[pipe]) {
1489 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1490 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1491 }
1492 }
1493 spin_unlock(&dev_priv->irq_lock);
1494 }
1495
1496 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1497 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1498 {
1499 enum pipe pipe;
1500
1501 for_each_pipe(dev_priv, pipe) {
1502 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1503 intel_handle_vblank(dev_priv, pipe);
1504
1505 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1506 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1507
1508 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1509 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1510 }
1511 }
1512
1513 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1514 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1515 {
1516 bool blc_event = false;
1517 enum pipe pipe;
1518
1519 for_each_pipe(dev_priv, pipe) {
1520 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1521 intel_handle_vblank(dev_priv, pipe);
1522
1523 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1524 blc_event = true;
1525
1526 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1527 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1528
1529 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1530 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1531 }
1532
1533 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1534 intel_opregion_asle_intr(dev_priv);
1535 }
1536
1537 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1538 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1539 {
1540 bool blc_event = false;
1541 enum pipe pipe;
1542
1543 for_each_pipe(dev_priv, pipe) {
1544 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1545 intel_handle_vblank(dev_priv, pipe);
1546
1547 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1548 blc_event = true;
1549
1550 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1551 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1552
1553 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1554 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1555 }
1556
1557 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1558 intel_opregion_asle_intr(dev_priv);
1559
1560 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1561 gmbus_irq_handler(dev_priv);
1562 }
1563
1564 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1565 u32 pipe_stats[I915_MAX_PIPES])
1566 {
1567 enum pipe pipe;
1568
1569 for_each_pipe(dev_priv, pipe) {
1570 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1571 intel_handle_vblank(dev_priv, pipe);
1572
1573 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1574 flip_done_handler(dev_priv, pipe);
1575
1576 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1577 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1578
1579 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1580 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1581 }
1582
1583 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1584 gmbus_irq_handler(dev_priv);
1585 }
1586
1587 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1588 {
1589 u32 hotplug_status = 0, hotplug_status_mask;
1590 int i;
1591
1592 if (IS_G4X(dev_priv) ||
1593 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1594 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1595 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1596 else
1597 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608 for (i = 0; i < 10; i++) {
1609 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1610
1611 if (tmp == 0)
1612 return hotplug_status;
1613
1614 hotplug_status |= tmp;
1615 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1616 }
1617
1618 drm_WARN_ONCE(&dev_priv->drm, 1,
1619 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1620 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1621
1622 return hotplug_status;
1623 }
1624
1625 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1626 u32 hotplug_status)
1627 {
1628 u32 pin_mask = 0, long_mask = 0;
1629 u32 hotplug_trigger;
1630
1631 if (IS_G4X(dev_priv) ||
1632 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1633 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1634 else
1635 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1636
1637 if (hotplug_trigger) {
1638 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1639 hotplug_trigger, hotplug_trigger,
1640 dev_priv->hotplug.hpd,
1641 i9xx_port_hotplug_long_detect);
1642
1643 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1644 }
1645
1646 if ((IS_G4X(dev_priv) ||
1647 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1648 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1649 dp_aux_irq_handler(dev_priv);
1650 }
1651
1652 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1653 {
1654 struct drm_i915_private *dev_priv = arg;
1655 irqreturn_t ret = IRQ_NONE;
1656
1657 if (!intel_irqs_enabled(dev_priv))
1658 return IRQ_NONE;
1659
1660
1661 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1662
1663 do {
1664 u32 iir, gt_iir, pm_iir;
1665 u32 pipe_stats[I915_MAX_PIPES] = {};
1666 u32 hotplug_status = 0;
1667 u32 ier = 0;
1668
1669 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1670 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1671 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1672
1673 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1674 break;
1675
1676 ret = IRQ_HANDLED;
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1692 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1693 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1694
1695 if (gt_iir)
1696 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1697 if (pm_iir)
1698 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1699
1700 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1701 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1702
1703
1704
1705 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1706
1707 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1708 I915_LPE_PIPE_B_INTERRUPT))
1709 intel_lpe_audio_irq_handler(dev_priv);
1710
1711
1712
1713
1714
1715 if (iir)
1716 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1717
1718 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1719 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1720
1721 if (gt_iir)
1722 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1723 if (pm_iir)
1724 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1725
1726 if (hotplug_status)
1727 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1728
1729 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1730 } while (0);
1731
1732 pmu_irq_stats(dev_priv, ret);
1733
1734 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1735
1736 return ret;
1737 }
1738
1739 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1740 {
1741 struct drm_i915_private *dev_priv = arg;
1742 irqreturn_t ret = IRQ_NONE;
1743
1744 if (!intel_irqs_enabled(dev_priv))
1745 return IRQ_NONE;
1746
1747
1748 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1749
1750 do {
1751 u32 master_ctl, iir;
1752 u32 pipe_stats[I915_MAX_PIPES] = {};
1753 u32 hotplug_status = 0;
1754 u32 ier = 0;
1755
1756 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1757 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1758
1759 if (master_ctl == 0 && iir == 0)
1760 break;
1761
1762 ret = IRQ_HANDLED;
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1778 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1779 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1780
1781 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1782
1783 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1784 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1785
1786
1787
1788 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1789
1790 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1791 I915_LPE_PIPE_B_INTERRUPT |
1792 I915_LPE_PIPE_C_INTERRUPT))
1793 intel_lpe_audio_irq_handler(dev_priv);
1794
1795
1796
1797
1798
1799 if (iir)
1800 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1801
1802 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1803 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1804
1805 if (hotplug_status)
1806 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1807
1808 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1809 } while (0);
1810
1811 pmu_irq_stats(dev_priv, ret);
1812
1813 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1814
1815 return ret;
1816 }
1817
1818 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1819 u32 hotplug_trigger)
1820 {
1821 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1822
1823
1824
1825
1826
1827
1828
1829 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1830 if (!hotplug_trigger) {
1831 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1832 PORTD_HOTPLUG_STATUS_MASK |
1833 PORTC_HOTPLUG_STATUS_MASK |
1834 PORTB_HOTPLUG_STATUS_MASK;
1835 dig_hotplug_reg &= ~mask;
1836 }
1837
1838 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1839 if (!hotplug_trigger)
1840 return;
1841
1842 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1843 hotplug_trigger, dig_hotplug_reg,
1844 dev_priv->hotplug.pch_hpd,
1845 pch_port_hotplug_long_detect);
1846
1847 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1848 }
1849
1850 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1851 {
1852 enum pipe pipe;
1853 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1854
1855 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1856
1857 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1858 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1859 SDE_AUDIO_POWER_SHIFT);
1860 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1861 port_name(port));
1862 }
1863
1864 if (pch_iir & SDE_AUX_MASK)
1865 dp_aux_irq_handler(dev_priv);
1866
1867 if (pch_iir & SDE_GMBUS)
1868 gmbus_irq_handler(dev_priv);
1869
1870 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1871 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1872
1873 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1874 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1875
1876 if (pch_iir & SDE_POISON)
1877 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1878
1879 if (pch_iir & SDE_FDI_MASK) {
1880 for_each_pipe(dev_priv, pipe)
1881 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1882 pipe_name(pipe),
1883 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1884 }
1885
1886 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1887 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1888
1889 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1890 drm_dbg(&dev_priv->drm,
1891 "PCH transcoder CRC error interrupt\n");
1892
1893 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1894 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1895
1896 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1897 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1898 }
1899
1900 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1901 {
1902 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1903 enum pipe pipe;
1904
1905 if (err_int & ERR_INT_POISON)
1906 drm_err(&dev_priv->drm, "Poison interrupt\n");
1907
1908 for_each_pipe(dev_priv, pipe) {
1909 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1910 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1911
1912 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1913 if (IS_IVYBRIDGE(dev_priv))
1914 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1915 else
1916 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1917 }
1918 }
1919
1920 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1921 }
1922
1923 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1924 {
1925 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1926 enum pipe pipe;
1927
1928 if (serr_int & SERR_INT_POISON)
1929 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1930
1931 for_each_pipe(dev_priv, pipe)
1932 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1933 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1934
1935 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1936 }
1937
1938 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1939 {
1940 enum pipe pipe;
1941 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1942
1943 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1944
1945 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1946 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1947 SDE_AUDIO_POWER_SHIFT_CPT);
1948 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1949 port_name(port));
1950 }
1951
1952 if (pch_iir & SDE_AUX_MASK_CPT)
1953 dp_aux_irq_handler(dev_priv);
1954
1955 if (pch_iir & SDE_GMBUS_CPT)
1956 gmbus_irq_handler(dev_priv);
1957
1958 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1959 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1960
1961 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1962 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1963
1964 if (pch_iir & SDE_FDI_MASK_CPT) {
1965 for_each_pipe(dev_priv, pipe)
1966 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1967 pipe_name(pipe),
1968 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1969 }
1970
1971 if (pch_iir & SDE_ERROR_CPT)
1972 cpt_serr_int_handler(dev_priv);
1973 }
1974
1975 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1976 {
1977 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1978 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1979 u32 pin_mask = 0, long_mask = 0;
1980
1981 if (ddi_hotplug_trigger) {
1982 u32 dig_hotplug_reg;
1983
1984 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
1985 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1986
1987 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1988 ddi_hotplug_trigger, dig_hotplug_reg,
1989 dev_priv->hotplug.pch_hpd,
1990 icp_ddi_port_hotplug_long_detect);
1991 }
1992
1993 if (tc_hotplug_trigger) {
1994 u32 dig_hotplug_reg;
1995
1996 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
1997 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1998
1999 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2000 tc_hotplug_trigger, dig_hotplug_reg,
2001 dev_priv->hotplug.pch_hpd,
2002 icp_tc_port_hotplug_long_detect);
2003 }
2004
2005 if (pin_mask)
2006 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2007
2008 if (pch_iir & SDE_GMBUS_ICP)
2009 gmbus_irq_handler(dev_priv);
2010 }
2011
2012 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2013 {
2014 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2015 ~SDE_PORTE_HOTPLUG_SPT;
2016 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2017 u32 pin_mask = 0, long_mask = 0;
2018
2019 if (hotplug_trigger) {
2020 u32 dig_hotplug_reg;
2021
2022 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2023 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2024
2025 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2026 hotplug_trigger, dig_hotplug_reg,
2027 dev_priv->hotplug.pch_hpd,
2028 spt_port_hotplug_long_detect);
2029 }
2030
2031 if (hotplug2_trigger) {
2032 u32 dig_hotplug_reg;
2033
2034 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
2035 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2036
2037 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2038 hotplug2_trigger, dig_hotplug_reg,
2039 dev_priv->hotplug.pch_hpd,
2040 spt_port_hotplug2_long_detect);
2041 }
2042
2043 if (pin_mask)
2044 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2045
2046 if (pch_iir & SDE_GMBUS_CPT)
2047 gmbus_irq_handler(dev_priv);
2048 }
2049
2050 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2051 u32 hotplug_trigger)
2052 {
2053 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2054
2055 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
2056 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2057
2058 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2059 hotplug_trigger, dig_hotplug_reg,
2060 dev_priv->hotplug.hpd,
2061 ilk_port_hotplug_long_detect);
2062
2063 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2064 }
2065
2066 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2067 u32 de_iir)
2068 {
2069 enum pipe pipe;
2070 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2071
2072 if (hotplug_trigger)
2073 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2074
2075 if (de_iir & DE_AUX_CHANNEL_A)
2076 dp_aux_irq_handler(dev_priv);
2077
2078 if (de_iir & DE_GSE)
2079 intel_opregion_asle_intr(dev_priv);
2080
2081 if (de_iir & DE_POISON)
2082 drm_err(&dev_priv->drm, "Poison interrupt\n");
2083
2084 for_each_pipe(dev_priv, pipe) {
2085 if (de_iir & DE_PIPE_VBLANK(pipe))
2086 intel_handle_vblank(dev_priv, pipe);
2087
2088 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2089 flip_done_handler(dev_priv, pipe);
2090
2091 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2092 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2093
2094 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2095 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2096 }
2097
2098
2099 if (de_iir & DE_PCH_EVENT) {
2100 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2101
2102 if (HAS_PCH_CPT(dev_priv))
2103 cpt_irq_handler(dev_priv, pch_iir);
2104 else
2105 ibx_irq_handler(dev_priv, pch_iir);
2106
2107
2108 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2109 }
2110
2111 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2112 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2113 }
2114
2115 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2116 u32 de_iir)
2117 {
2118 enum pipe pipe;
2119 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2120
2121 if (hotplug_trigger)
2122 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2123
2124 if (de_iir & DE_ERR_INT_IVB)
2125 ivb_err_int_handler(dev_priv);
2126
2127 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2128 dp_aux_irq_handler(dev_priv);
2129
2130 if (de_iir & DE_GSE_IVB)
2131 intel_opregion_asle_intr(dev_priv);
2132
2133 for_each_pipe(dev_priv, pipe) {
2134 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2135 intel_handle_vblank(dev_priv, pipe);
2136
2137 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2138 flip_done_handler(dev_priv, pipe);
2139 }
2140
2141
2142 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2143 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2144
2145 cpt_irq_handler(dev_priv, pch_iir);
2146
2147
2148 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2149 }
2150 }
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2161 {
2162 struct drm_i915_private *i915 = arg;
2163 void __iomem * const regs = i915->uncore.regs;
2164 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2165 irqreturn_t ret = IRQ_NONE;
2166
2167 if (unlikely(!intel_irqs_enabled(i915)))
2168 return IRQ_NONE;
2169
2170
2171 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2172
2173
2174 de_ier = raw_reg_read(regs, DEIER);
2175 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2176
2177
2178
2179
2180
2181
2182 if (!HAS_PCH_NOP(i915)) {
2183 sde_ier = raw_reg_read(regs, SDEIER);
2184 raw_reg_write(regs, SDEIER, 0);
2185 }
2186
2187
2188
2189 gt_iir = raw_reg_read(regs, GTIIR);
2190 if (gt_iir) {
2191 raw_reg_write(regs, GTIIR, gt_iir);
2192 if (GRAPHICS_VER(i915) >= 6)
2193 gen6_gt_irq_handler(to_gt(i915), gt_iir);
2194 else
2195 gen5_gt_irq_handler(to_gt(i915), gt_iir);
2196 ret = IRQ_HANDLED;
2197 }
2198
2199 de_iir = raw_reg_read(regs, DEIIR);
2200 if (de_iir) {
2201 raw_reg_write(regs, DEIIR, de_iir);
2202 if (DISPLAY_VER(i915) >= 7)
2203 ivb_display_irq_handler(i915, de_iir);
2204 else
2205 ilk_display_irq_handler(i915, de_iir);
2206 ret = IRQ_HANDLED;
2207 }
2208
2209 if (GRAPHICS_VER(i915) >= 6) {
2210 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2211 if (pm_iir) {
2212 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2213 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2214 ret = IRQ_HANDLED;
2215 }
2216 }
2217
2218 raw_reg_write(regs, DEIER, de_ier);
2219 if (sde_ier)
2220 raw_reg_write(regs, SDEIER, sde_ier);
2221
2222 pmu_irq_stats(i915, ret);
2223
2224
2225 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2226
2227 return ret;
2228 }
2229
2230 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2231 u32 hotplug_trigger)
2232 {
2233 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2234
2235 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2236 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2237
2238 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2239 hotplug_trigger, dig_hotplug_reg,
2240 dev_priv->hotplug.hpd,
2241 bxt_port_hotplug_long_detect);
2242
2243 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2244 }
2245
2246 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2247 {
2248 u32 pin_mask = 0, long_mask = 0;
2249 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2250 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2251
2252 if (trigger_tc) {
2253 u32 dig_hotplug_reg;
2254
2255 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
2256 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2257
2258 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2259 trigger_tc, dig_hotplug_reg,
2260 dev_priv->hotplug.hpd,
2261 gen11_port_hotplug_long_detect);
2262 }
2263
2264 if (trigger_tbt) {
2265 u32 dig_hotplug_reg;
2266
2267 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
2268 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2269
2270 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2271 trigger_tbt, dig_hotplug_reg,
2272 dev_priv->hotplug.hpd,
2273 gen11_port_hotplug_long_detect);
2274 }
2275
2276 if (pin_mask)
2277 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2278 else
2279 drm_err(&dev_priv->drm,
2280 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2281 }
2282
2283 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2284 {
2285 u32 mask;
2286
2287 if (DISPLAY_VER(dev_priv) >= 13)
2288 return TGL_DE_PORT_AUX_DDIA |
2289 TGL_DE_PORT_AUX_DDIB |
2290 TGL_DE_PORT_AUX_DDIC |
2291 XELPD_DE_PORT_AUX_DDID |
2292 XELPD_DE_PORT_AUX_DDIE |
2293 TGL_DE_PORT_AUX_USBC1 |
2294 TGL_DE_PORT_AUX_USBC2 |
2295 TGL_DE_PORT_AUX_USBC3 |
2296 TGL_DE_PORT_AUX_USBC4;
2297 else if (DISPLAY_VER(dev_priv) >= 12)
2298 return TGL_DE_PORT_AUX_DDIA |
2299 TGL_DE_PORT_AUX_DDIB |
2300 TGL_DE_PORT_AUX_DDIC |
2301 TGL_DE_PORT_AUX_USBC1 |
2302 TGL_DE_PORT_AUX_USBC2 |
2303 TGL_DE_PORT_AUX_USBC3 |
2304 TGL_DE_PORT_AUX_USBC4 |
2305 TGL_DE_PORT_AUX_USBC5 |
2306 TGL_DE_PORT_AUX_USBC6;
2307
2308
2309 mask = GEN8_AUX_CHANNEL_A;
2310 if (DISPLAY_VER(dev_priv) >= 9)
2311 mask |= GEN9_AUX_CHANNEL_B |
2312 GEN9_AUX_CHANNEL_C |
2313 GEN9_AUX_CHANNEL_D;
2314
2315 if (DISPLAY_VER(dev_priv) == 11) {
2316 mask |= ICL_AUX_CHANNEL_F;
2317 mask |= ICL_AUX_CHANNEL_E;
2318 }
2319
2320 return mask;
2321 }
2322
2323 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2324 {
2325 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2326 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2327 else if (DISPLAY_VER(dev_priv) >= 11)
2328 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2329 else if (DISPLAY_VER(dev_priv) >= 9)
2330 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2331 else
2332 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2333 }
2334
2335 static void
2336 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2337 {
2338 bool found = false;
2339
2340 if (iir & GEN8_DE_MISC_GSE) {
2341 intel_opregion_asle_intr(dev_priv);
2342 found = true;
2343 }
2344
2345 if (iir & GEN8_DE_EDP_PSR) {
2346 struct intel_encoder *encoder;
2347 u32 psr_iir;
2348 i915_reg_t iir_reg;
2349
2350 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2351 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2352
2353 if (DISPLAY_VER(dev_priv) >= 12)
2354 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2355 else
2356 iir_reg = EDP_PSR_IIR;
2357
2358 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2359 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2360
2361 if (psr_iir)
2362 found = true;
2363
2364 intel_psr_irq_handler(intel_dp, psr_iir);
2365
2366
2367 if (DISPLAY_VER(dev_priv) < 12)
2368 break;
2369 }
2370 }
2371
2372 if (!found)
2373 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2374 }
2375
2376 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2377 u32 te_trigger)
2378 {
2379 enum pipe pipe = INVALID_PIPE;
2380 enum transcoder dsi_trans;
2381 enum port port;
2382 u32 val, tmp;
2383
2384
2385
2386
2387
2388 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2389 val &= PORT_SYNC_MODE_ENABLE;
2390
2391
2392
2393
2394
2395 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2396 PORT_A : PORT_B;
2397 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2398
2399
2400 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2401 val = val & OP_MODE_MASK;
2402
2403 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2404 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2405 return;
2406 }
2407
2408
2409 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2410 switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2411 case TRANS_DDI_EDP_INPUT_A_ON:
2412 pipe = PIPE_A;
2413 break;
2414 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2415 pipe = PIPE_B;
2416 break;
2417 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2418 pipe = PIPE_C;
2419 break;
2420 default:
2421 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2422 return;
2423 }
2424
2425 intel_handle_vblank(dev_priv, pipe);
2426
2427
2428 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2429 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2430 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2431 }
2432
2433 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2434 {
2435 if (DISPLAY_VER(i915) >= 9)
2436 return GEN9_PIPE_PLANE1_FLIP_DONE;
2437 else
2438 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2439 }
2440
2441 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2442 {
2443 u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2444
2445 if (DISPLAY_VER(dev_priv) >= 13)
2446 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2447 XELPD_PIPE_HARD_UNDERRUN;
2448
2449 return mask;
2450 }
2451
2452 static irqreturn_t
2453 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2454 {
2455 irqreturn_t ret = IRQ_NONE;
2456 u32 iir;
2457 enum pipe pipe;
2458
2459 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2460
2461 if (master_ctl & GEN8_DE_MISC_IRQ) {
2462 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2463 if (iir) {
2464 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2465 ret = IRQ_HANDLED;
2466 gen8_de_misc_irq_handler(dev_priv, iir);
2467 } else {
2468 drm_err(&dev_priv->drm,
2469 "The master control interrupt lied (DE MISC)!\n");
2470 }
2471 }
2472
2473 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2474 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2475 if (iir) {
2476 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2477 ret = IRQ_HANDLED;
2478 gen11_hpd_irq_handler(dev_priv, iir);
2479 } else {
2480 drm_err(&dev_priv->drm,
2481 "The master control interrupt lied, (DE HPD)!\n");
2482 }
2483 }
2484
2485 if (master_ctl & GEN8_DE_PORT_IRQ) {
2486 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2487 if (iir) {
2488 bool found = false;
2489
2490 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2491 ret = IRQ_HANDLED;
2492
2493 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2494 dp_aux_irq_handler(dev_priv);
2495 found = true;
2496 }
2497
2498 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2499 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2500
2501 if (hotplug_trigger) {
2502 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2503 found = true;
2504 }
2505 } else if (IS_BROADWELL(dev_priv)) {
2506 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2507
2508 if (hotplug_trigger) {
2509 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2510 found = true;
2511 }
2512 }
2513
2514 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2515 (iir & BXT_DE_PORT_GMBUS)) {
2516 gmbus_irq_handler(dev_priv);
2517 found = true;
2518 }
2519
2520 if (DISPLAY_VER(dev_priv) >= 11) {
2521 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2522
2523 if (te_trigger) {
2524 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2525 found = true;
2526 }
2527 }
2528
2529 if (!found)
2530 drm_err(&dev_priv->drm,
2531 "Unexpected DE Port interrupt\n");
2532 }
2533 else
2534 drm_err(&dev_priv->drm,
2535 "The master control interrupt lied (DE PORT)!\n");
2536 }
2537
2538 for_each_pipe(dev_priv, pipe) {
2539 u32 fault_errors;
2540
2541 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2542 continue;
2543
2544 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2545 if (!iir) {
2546 drm_err(&dev_priv->drm,
2547 "The master control interrupt lied (DE PIPE)!\n");
2548 continue;
2549 }
2550
2551 ret = IRQ_HANDLED;
2552 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2553
2554 if (iir & GEN8_PIPE_VBLANK)
2555 intel_handle_vblank(dev_priv, pipe);
2556
2557 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2558 flip_done_handler(dev_priv, pipe);
2559
2560 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2561 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2562
2563 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2564 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2565
2566 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2567 if (fault_errors)
2568 drm_err(&dev_priv->drm,
2569 "Fault errors on pipe %c: 0x%08x\n",
2570 pipe_name(pipe),
2571 fault_errors);
2572 }
2573
2574 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2575 master_ctl & GEN8_DE_PCH_IRQ) {
2576
2577
2578
2579
2580
2581 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2582 if (iir) {
2583 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2584 ret = IRQ_HANDLED;
2585
2586 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2587 icp_irq_handler(dev_priv, iir);
2588 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2589 spt_irq_handler(dev_priv, iir);
2590 else
2591 cpt_irq_handler(dev_priv, iir);
2592 } else {
2593
2594
2595
2596
2597 drm_dbg(&dev_priv->drm,
2598 "The master control interrupt lied (SDE)!\n");
2599 }
2600 }
2601
2602 return ret;
2603 }
2604
2605 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2606 {
2607 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2608
2609
2610
2611
2612
2613
2614
2615 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2616 }
2617
2618 static inline void gen8_master_intr_enable(void __iomem * const regs)
2619 {
2620 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2621 }
2622
2623 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2624 {
2625 struct drm_i915_private *dev_priv = arg;
2626 void __iomem * const regs = dev_priv->uncore.regs;
2627 u32 master_ctl;
2628
2629 if (!intel_irqs_enabled(dev_priv))
2630 return IRQ_NONE;
2631
2632 master_ctl = gen8_master_intr_disable(regs);
2633 if (!master_ctl) {
2634 gen8_master_intr_enable(regs);
2635 return IRQ_NONE;
2636 }
2637
2638
2639 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2640
2641
2642 if (master_ctl & ~GEN8_GT_IRQS) {
2643 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2644 gen8_de_irq_handler(dev_priv, master_ctl);
2645 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2646 }
2647
2648 gen8_master_intr_enable(regs);
2649
2650 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2651
2652 return IRQ_HANDLED;
2653 }
2654
2655 static u32
2656 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2657 {
2658 void __iomem * const regs = gt->uncore->regs;
2659 u32 iir;
2660
2661 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2662 return 0;
2663
2664 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2665 if (likely(iir))
2666 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2667
2668 return iir;
2669 }
2670
2671 static void
2672 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2673 {
2674 if (iir & GEN11_GU_MISC_GSE)
2675 intel_opregion_asle_intr(gt->i915);
2676 }
2677
2678 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2679 {
2680 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2681
2682
2683
2684
2685
2686
2687
2688 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2689 }
2690
2691 static inline void gen11_master_intr_enable(void __iomem * const regs)
2692 {
2693 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2694 }
2695
2696 static void
2697 gen11_display_irq_handler(struct drm_i915_private *i915)
2698 {
2699 void __iomem * const regs = i915->uncore.regs;
2700 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2701
2702 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2703
2704
2705
2706
2707 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2708 gen8_de_irq_handler(i915, disp_ctl);
2709 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2710 GEN11_DISPLAY_IRQ_ENABLE);
2711
2712 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2713 }
2714
2715 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2716 {
2717 struct drm_i915_private *i915 = arg;
2718 void __iomem * const regs = i915->uncore.regs;
2719 struct intel_gt *gt = to_gt(i915);
2720 u32 master_ctl;
2721 u32 gu_misc_iir;
2722
2723 if (!intel_irqs_enabled(i915))
2724 return IRQ_NONE;
2725
2726 master_ctl = gen11_master_intr_disable(regs);
2727 if (!master_ctl) {
2728 gen11_master_intr_enable(regs);
2729 return IRQ_NONE;
2730 }
2731
2732
2733 gen11_gt_irq_handler(gt, master_ctl);
2734
2735
2736 if (master_ctl & GEN11_DISPLAY_IRQ)
2737 gen11_display_irq_handler(i915);
2738
2739 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2740
2741 gen11_master_intr_enable(regs);
2742
2743 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2744
2745 pmu_irq_stats(i915, IRQ_HANDLED);
2746
2747 return IRQ_HANDLED;
2748 }
2749
2750 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2751 {
2752 u32 val;
2753
2754
2755 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2756
2757
2758 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2759 if (unlikely(!val))
2760 return 0;
2761
2762 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2763
2764 return val;
2765 }
2766
2767 static inline void dg1_master_intr_enable(void __iomem * const regs)
2768 {
2769 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2770 }
2771
2772 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2773 {
2774 struct drm_i915_private * const i915 = arg;
2775 struct intel_gt *gt = to_gt(i915);
2776 void __iomem * const regs = gt->uncore->regs;
2777 u32 master_tile_ctl, master_ctl;
2778 u32 gu_misc_iir;
2779
2780 if (!intel_irqs_enabled(i915))
2781 return IRQ_NONE;
2782
2783 master_tile_ctl = dg1_master_intr_disable(regs);
2784 if (!master_tile_ctl) {
2785 dg1_master_intr_enable(regs);
2786 return IRQ_NONE;
2787 }
2788
2789
2790 if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2791 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2792 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2793 } else {
2794 DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl);
2795 dg1_master_intr_enable(regs);
2796 return IRQ_NONE;
2797 }
2798
2799 gen11_gt_irq_handler(gt, master_ctl);
2800
2801 if (master_ctl & GEN11_DISPLAY_IRQ)
2802 gen11_display_irq_handler(i915);
2803
2804 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2805
2806 dg1_master_intr_enable(regs);
2807
2808 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2809
2810 pmu_irq_stats(i915, IRQ_HANDLED);
2811
2812 return IRQ_HANDLED;
2813 }
2814
2815
2816
2817
2818 int i8xx_enable_vblank(struct drm_crtc *crtc)
2819 {
2820 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2821 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2822 unsigned long irqflags;
2823
2824 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2825 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2826 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2827
2828 return 0;
2829 }
2830
2831 int i915gm_enable_vblank(struct drm_crtc *crtc)
2832 {
2833 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2834
2835
2836
2837
2838
2839
2840
2841 if (dev_priv->vblank_enabled++ == 0)
2842 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2843
2844 return i8xx_enable_vblank(crtc);
2845 }
2846
2847 int i965_enable_vblank(struct drm_crtc *crtc)
2848 {
2849 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2850 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2851 unsigned long irqflags;
2852
2853 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2854 i915_enable_pipestat(dev_priv, pipe,
2855 PIPE_START_VBLANK_INTERRUPT_STATUS);
2856 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2857
2858 return 0;
2859 }
2860
2861 int ilk_enable_vblank(struct drm_crtc *crtc)
2862 {
2863 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2864 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2865 unsigned long irqflags;
2866 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2867 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2868
2869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2870 ilk_enable_display_irq(dev_priv, bit);
2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2872
2873
2874
2875
2876 if (HAS_PSR(dev_priv))
2877 drm_crtc_vblank_restore(crtc);
2878
2879 return 0;
2880 }
2881
2882 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2883 bool enable)
2884 {
2885 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2886 enum port port;
2887 u32 tmp;
2888
2889 if (!(intel_crtc->mode_flags &
2890 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2891 return false;
2892
2893
2894 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2895 port = PORT_B;
2896 else
2897 port = PORT_A;
2898
2899 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2900 if (enable)
2901 tmp &= ~DSI_TE_EVENT;
2902 else
2903 tmp |= DSI_TE_EVENT;
2904
2905 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2906
2907 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2908 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2909
2910 return true;
2911 }
2912
2913 int bdw_enable_vblank(struct drm_crtc *_crtc)
2914 {
2915 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2917 enum pipe pipe = crtc->pipe;
2918 unsigned long irqflags;
2919
2920 if (gen11_dsi_configure_te(crtc, true))
2921 return 0;
2922
2923 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2924 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2925 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2926
2927
2928
2929
2930 if (HAS_PSR(dev_priv))
2931 drm_crtc_vblank_restore(&crtc->base);
2932
2933 return 0;
2934 }
2935
2936
2937
2938
2939 void i8xx_disable_vblank(struct drm_crtc *crtc)
2940 {
2941 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2942 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2943 unsigned long irqflags;
2944
2945 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2946 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2947 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2948 }
2949
2950 void i915gm_disable_vblank(struct drm_crtc *crtc)
2951 {
2952 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2953
2954 i8xx_disable_vblank(crtc);
2955
2956 if (--dev_priv->vblank_enabled == 0)
2957 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2958 }
2959
2960 void i965_disable_vblank(struct drm_crtc *crtc)
2961 {
2962 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2963 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2964 unsigned long irqflags;
2965
2966 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2967 i915_disable_pipestat(dev_priv, pipe,
2968 PIPE_START_VBLANK_INTERRUPT_STATUS);
2969 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2970 }
2971
2972 void ilk_disable_vblank(struct drm_crtc *crtc)
2973 {
2974 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2975 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2976 unsigned long irqflags;
2977 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2978 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2979
2980 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2981 ilk_disable_display_irq(dev_priv, bit);
2982 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2983 }
2984
2985 void bdw_disable_vblank(struct drm_crtc *_crtc)
2986 {
2987 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2988 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2989 enum pipe pipe = crtc->pipe;
2990 unsigned long irqflags;
2991
2992 if (gen11_dsi_configure_te(crtc, false))
2993 return;
2994
2995 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2996 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2997 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2998 }
2999
3000 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3001 {
3002 struct intel_uncore *uncore = &dev_priv->uncore;
3003
3004 if (HAS_PCH_NOP(dev_priv))
3005 return;
3006
3007 GEN3_IRQ_RESET(uncore, SDE);
3008
3009 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3010 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
3011 }
3012
3013 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3014 {
3015 struct intel_uncore *uncore = &dev_priv->uncore;
3016
3017 if (IS_CHERRYVIEW(dev_priv))
3018 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3019 else
3020 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
3021
3022 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3023 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
3024
3025 i9xx_pipestat_irq_reset(dev_priv);
3026
3027 GEN3_IRQ_RESET(uncore, VLV_);
3028 dev_priv->irq_mask = ~0u;
3029 }
3030
3031 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3032 {
3033 struct intel_uncore *uncore = &dev_priv->uncore;
3034
3035 u32 pipestat_mask;
3036 u32 enable_mask;
3037 enum pipe pipe;
3038
3039 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3040
3041 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3042 for_each_pipe(dev_priv, pipe)
3043 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3044
3045 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3046 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3047 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3048 I915_LPE_PIPE_A_INTERRUPT |
3049 I915_LPE_PIPE_B_INTERRUPT;
3050
3051 if (IS_CHERRYVIEW(dev_priv))
3052 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3053 I915_LPE_PIPE_C_INTERRUPT;
3054
3055 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3056
3057 dev_priv->irq_mask = ~enable_mask;
3058
3059 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3060 }
3061
3062
3063
3064 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3065 {
3066 struct intel_uncore *uncore = &dev_priv->uncore;
3067
3068 GEN3_IRQ_RESET(uncore, DE);
3069 dev_priv->irq_mask = ~0u;
3070
3071 if (GRAPHICS_VER(dev_priv) == 7)
3072 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3073
3074 if (IS_HASWELL(dev_priv)) {
3075 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3076 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3077 }
3078
3079 gen5_gt_irq_reset(to_gt(dev_priv));
3080
3081 ibx_irq_reset(dev_priv);
3082 }
3083
3084 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3085 {
3086 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3087 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3088
3089 gen5_gt_irq_reset(to_gt(dev_priv));
3090
3091 spin_lock_irq(&dev_priv->irq_lock);
3092 if (dev_priv->display_irqs_enabled)
3093 vlv_display_irq_reset(dev_priv);
3094 spin_unlock_irq(&dev_priv->irq_lock);
3095 }
3096
3097 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3098 {
3099 struct intel_uncore *uncore = &dev_priv->uncore;
3100 enum pipe pipe;
3101
3102 if (!HAS_DISPLAY(dev_priv))
3103 return;
3104
3105 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3106 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3107
3108 for_each_pipe(dev_priv, pipe)
3109 if (intel_display_power_is_enabled(dev_priv,
3110 POWER_DOMAIN_PIPE(pipe)))
3111 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3112
3113 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3114 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3115 }
3116
3117 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3118 {
3119 struct intel_uncore *uncore = &dev_priv->uncore;
3120
3121 gen8_master_intr_disable(dev_priv->uncore.regs);
3122
3123 gen8_gt_irq_reset(to_gt(dev_priv));
3124 gen8_display_irq_reset(dev_priv);
3125 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3126
3127 if (HAS_PCH_SPLIT(dev_priv))
3128 ibx_irq_reset(dev_priv);
3129
3130 }
3131
3132 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3133 {
3134 struct intel_uncore *uncore = &dev_priv->uncore;
3135 enum pipe pipe;
3136 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3137 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3138
3139 if (!HAS_DISPLAY(dev_priv))
3140 return;
3141
3142 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3143
3144 if (DISPLAY_VER(dev_priv) >= 12) {
3145 enum transcoder trans;
3146
3147 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3148 enum intel_display_power_domain domain;
3149
3150 domain = POWER_DOMAIN_TRANSCODER(trans);
3151 if (!intel_display_power_is_enabled(dev_priv, domain))
3152 continue;
3153
3154 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3155 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3156 }
3157 } else {
3158 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3159 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3160 }
3161
3162 for_each_pipe(dev_priv, pipe)
3163 if (intel_display_power_is_enabled(dev_priv,
3164 POWER_DOMAIN_PIPE(pipe)))
3165 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3166
3167 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3168 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3169 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3170
3171 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3172 GEN3_IRQ_RESET(uncore, SDE);
3173 }
3174
3175 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3176 {
3177 struct intel_gt *gt = to_gt(dev_priv);
3178 struct intel_uncore *uncore = gt->uncore;
3179
3180 gen11_master_intr_disable(dev_priv->uncore.regs);
3181
3182 gen11_gt_irq_reset(gt);
3183 gen11_display_irq_reset(dev_priv);
3184
3185 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3186 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3187 }
3188
3189 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3190 {
3191 struct intel_gt *gt = to_gt(dev_priv);
3192 struct intel_uncore *uncore = gt->uncore;
3193
3194 dg1_master_intr_disable(dev_priv->uncore.regs);
3195
3196 gen11_gt_irq_reset(gt);
3197 gen11_display_irq_reset(dev_priv);
3198
3199 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3200 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3201 }
3202
3203 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3204 u8 pipe_mask)
3205 {
3206 struct intel_uncore *uncore = &dev_priv->uncore;
3207 u32 extra_ier = GEN8_PIPE_VBLANK |
3208 gen8_de_pipe_underrun_mask(dev_priv) |
3209 gen8_de_pipe_flip_done_mask(dev_priv);
3210 enum pipe pipe;
3211
3212 spin_lock_irq(&dev_priv->irq_lock);
3213
3214 if (!intel_irqs_enabled(dev_priv)) {
3215 spin_unlock_irq(&dev_priv->irq_lock);
3216 return;
3217 }
3218
3219 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3220 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3221 dev_priv->de_irq_mask[pipe],
3222 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3223
3224 spin_unlock_irq(&dev_priv->irq_lock);
3225 }
3226
3227 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3228 u8 pipe_mask)
3229 {
3230 struct intel_uncore *uncore = &dev_priv->uncore;
3231 enum pipe pipe;
3232
3233 spin_lock_irq(&dev_priv->irq_lock);
3234
3235 if (!intel_irqs_enabled(dev_priv)) {
3236 spin_unlock_irq(&dev_priv->irq_lock);
3237 return;
3238 }
3239
3240 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3241 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3242
3243 spin_unlock_irq(&dev_priv->irq_lock);
3244
3245
3246 intel_synchronize_irq(dev_priv);
3247 }
3248
3249 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3250 {
3251 struct intel_uncore *uncore = &dev_priv->uncore;
3252
3253 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3254 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3255
3256 gen8_gt_irq_reset(to_gt(dev_priv));
3257
3258 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3259
3260 spin_lock_irq(&dev_priv->irq_lock);
3261 if (dev_priv->display_irqs_enabled)
3262 vlv_display_irq_reset(dev_priv);
3263 spin_unlock_irq(&dev_priv->irq_lock);
3264 }
3265
3266 static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3267 enum hpd_pin pin)
3268 {
3269 switch (pin) {
3270 case HPD_PORT_A:
3271
3272
3273
3274
3275 return HAS_PCH_LPT_LP(i915) ?
3276 PORTA_HOTPLUG_ENABLE : 0;
3277 case HPD_PORT_B:
3278 return PORTB_HOTPLUG_ENABLE |
3279 PORTB_PULSE_DURATION_2ms;
3280 case HPD_PORT_C:
3281 return PORTC_HOTPLUG_ENABLE |
3282 PORTC_PULSE_DURATION_2ms;
3283 case HPD_PORT_D:
3284 return PORTD_HOTPLUG_ENABLE |
3285 PORTD_PULSE_DURATION_2ms;
3286 default:
3287 return 0;
3288 }
3289 }
3290
3291 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3292 {
3293 u32 hotplug;
3294
3295
3296
3297
3298
3299
3300 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3301 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3302 PORTB_HOTPLUG_ENABLE |
3303 PORTC_HOTPLUG_ENABLE |
3304 PORTD_HOTPLUG_ENABLE |
3305 PORTB_PULSE_DURATION_MASK |
3306 PORTC_PULSE_DURATION_MASK |
3307 PORTD_PULSE_DURATION_MASK);
3308 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3309 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3310 }
3311
3312 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3313 {
3314 u32 hotplug_irqs, enabled_irqs;
3315
3316 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3317 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3318
3319 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3320
3321 ibx_hpd_detection_setup(dev_priv);
3322 }
3323
3324 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3325 enum hpd_pin pin)
3326 {
3327 switch (pin) {
3328 case HPD_PORT_A:
3329 case HPD_PORT_B:
3330 case HPD_PORT_C:
3331 case HPD_PORT_D:
3332 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3333 default:
3334 return 0;
3335 }
3336 }
3337
3338 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3339 enum hpd_pin pin)
3340 {
3341 switch (pin) {
3342 case HPD_PORT_TC1:
3343 case HPD_PORT_TC2:
3344 case HPD_PORT_TC3:
3345 case HPD_PORT_TC4:
3346 case HPD_PORT_TC5:
3347 case HPD_PORT_TC6:
3348 return ICP_TC_HPD_ENABLE(pin);
3349 default:
3350 return 0;
3351 }
3352 }
3353
3354 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3355 {
3356 u32 hotplug;
3357
3358 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3359 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3360 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3361 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3362 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
3363 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3364 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3365 }
3366
3367 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3368 {
3369 u32 hotplug;
3370
3371 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3372 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3373 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3374 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3375 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3376 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3377 ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
3378 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3379 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3380 }
3381
3382 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3383 {
3384 u32 hotplug_irqs, enabled_irqs;
3385
3386 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3387 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3388
3389 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3390 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3391
3392 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3393
3394 icp_ddi_hpd_detection_setup(dev_priv);
3395 icp_tc_hpd_detection_setup(dev_priv);
3396 }
3397
3398 static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3399 enum hpd_pin pin)
3400 {
3401 switch (pin) {
3402 case HPD_PORT_TC1:
3403 case HPD_PORT_TC2:
3404 case HPD_PORT_TC3:
3405 case HPD_PORT_TC4:
3406 case HPD_PORT_TC5:
3407 case HPD_PORT_TC6:
3408 return GEN11_HOTPLUG_CTL_ENABLE(pin);
3409 default:
3410 return 0;
3411 }
3412 }
3413
3414 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3415 {
3416 u32 val;
3417
3418 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3419 val |= (INVERT_DDIA_HPD |
3420 INVERT_DDIB_HPD |
3421 INVERT_DDIC_HPD |
3422 INVERT_DDID_HPD);
3423 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3424
3425 icp_hpd_irq_setup(dev_priv);
3426 }
3427
3428 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3429 {
3430 u32 hotplug;
3431
3432 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3433 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3434 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3435 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3436 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3437 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3438 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3439 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3440 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3441 }
3442
3443 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3444 {
3445 u32 hotplug;
3446
3447 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3448 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3449 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3450 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3451 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3452 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3453 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3454 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3455 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3456 }
3457
3458 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3459 {
3460 u32 hotplug_irqs, enabled_irqs;
3461 u32 val;
3462
3463 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3464 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3465
3466 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3467 val &= ~hotplug_irqs;
3468 val |= ~enabled_irqs & hotplug_irqs;
3469 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
3470 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3471
3472 gen11_tc_hpd_detection_setup(dev_priv);
3473 gen11_tbt_hpd_detection_setup(dev_priv);
3474
3475 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3476 icp_hpd_irq_setup(dev_priv);
3477 }
3478
3479 static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3480 enum hpd_pin pin)
3481 {
3482 switch (pin) {
3483 case HPD_PORT_A:
3484 return PORTA_HOTPLUG_ENABLE;
3485 case HPD_PORT_B:
3486 return PORTB_HOTPLUG_ENABLE;
3487 case HPD_PORT_C:
3488 return PORTC_HOTPLUG_ENABLE;
3489 case HPD_PORT_D:
3490 return PORTD_HOTPLUG_ENABLE;
3491 default:
3492 return 0;
3493 }
3494 }
3495
3496 static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3497 enum hpd_pin pin)
3498 {
3499 switch (pin) {
3500 case HPD_PORT_E:
3501 return PORTE_HOTPLUG_ENABLE;
3502 default:
3503 return 0;
3504 }
3505 }
3506
3507 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3508 {
3509 u32 val, hotplug;
3510
3511
3512 if (HAS_PCH_CNP(dev_priv)) {
3513 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3514 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3515 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3516 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3517 }
3518
3519
3520 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3521 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3522 PORTB_HOTPLUG_ENABLE |
3523 PORTC_HOTPLUG_ENABLE |
3524 PORTD_HOTPLUG_ENABLE);
3525 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3526 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3527
3528 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3529 hotplug &= ~PORTE_HOTPLUG_ENABLE;
3530 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3531 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3532 }
3533
3534 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3535 {
3536 u32 hotplug_irqs, enabled_irqs;
3537
3538 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3539 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3540
3541 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3542 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3543
3544 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3545
3546 spt_hpd_detection_setup(dev_priv);
3547 }
3548
3549 static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3550 enum hpd_pin pin)
3551 {
3552 switch (pin) {
3553 case HPD_PORT_A:
3554 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3555 DIGITAL_PORTA_PULSE_DURATION_2ms;
3556 default:
3557 return 0;
3558 }
3559 }
3560
3561 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3562 {
3563 u32 hotplug;
3564
3565
3566
3567
3568
3569
3570 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3571 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
3572 DIGITAL_PORTA_PULSE_DURATION_MASK);
3573 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3574 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3575 }
3576
3577 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3578 {
3579 u32 hotplug_irqs, enabled_irqs;
3580
3581 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3582 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3583
3584 if (DISPLAY_VER(dev_priv) >= 8)
3585 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3586 else
3587 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3588
3589 ilk_hpd_detection_setup(dev_priv);
3590
3591 ibx_hpd_irq_setup(dev_priv);
3592 }
3593
3594 static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3595 enum hpd_pin pin)
3596 {
3597 u32 hotplug;
3598
3599 switch (pin) {
3600 case HPD_PORT_A:
3601 hotplug = PORTA_HOTPLUG_ENABLE;
3602 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3603 hotplug |= BXT_DDIA_HPD_INVERT;
3604 return hotplug;
3605 case HPD_PORT_B:
3606 hotplug = PORTB_HOTPLUG_ENABLE;
3607 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3608 hotplug |= BXT_DDIB_HPD_INVERT;
3609 return hotplug;
3610 case HPD_PORT_C:
3611 hotplug = PORTC_HOTPLUG_ENABLE;
3612 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3613 hotplug |= BXT_DDIC_HPD_INVERT;
3614 return hotplug;
3615 default:
3616 return 0;
3617 }
3618 }
3619
3620 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3621 {
3622 u32 hotplug;
3623
3624 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3625 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3626 PORTB_HOTPLUG_ENABLE |
3627 PORTC_HOTPLUG_ENABLE |
3628 BXT_DDIA_HPD_INVERT |
3629 BXT_DDIB_HPD_INVERT |
3630 BXT_DDIC_HPD_INVERT);
3631 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3632 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3633 }
3634
3635 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3636 {
3637 u32 hotplug_irqs, enabled_irqs;
3638
3639 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3640 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3641
3642 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3643
3644 bxt_hpd_detection_setup(dev_priv);
3645 }
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3659 {
3660 struct intel_uncore *uncore = &dev_priv->uncore;
3661 u32 mask;
3662
3663 if (HAS_PCH_NOP(dev_priv))
3664 return;
3665
3666 if (HAS_PCH_IBX(dev_priv))
3667 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3668 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3669 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3670 else
3671 mask = SDE_GMBUS_CPT;
3672
3673 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3674 }
3675
3676 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3677 {
3678 struct intel_uncore *uncore = &dev_priv->uncore;
3679 u32 display_mask, extra_mask;
3680
3681 if (GRAPHICS_VER(dev_priv) >= 7) {
3682 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3683 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3684 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3685 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3686 DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3687 DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3688 DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3689 DE_DP_A_HOTPLUG_IVB);
3690 } else {
3691 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3692 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3693 DE_PIPEA_CRC_DONE | DE_POISON);
3694 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3695 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3696 DE_PLANE_FLIP_DONE(PLANE_A) |
3697 DE_PLANE_FLIP_DONE(PLANE_B) |
3698 DE_DP_A_HOTPLUG);
3699 }
3700
3701 if (IS_HASWELL(dev_priv)) {
3702 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3703 display_mask |= DE_EDP_PSR_INT_HSW;
3704 }
3705
3706 if (IS_IRONLAKE_M(dev_priv))
3707 extra_mask |= DE_PCU_EVENT;
3708
3709 dev_priv->irq_mask = ~display_mask;
3710
3711 ibx_irq_postinstall(dev_priv);
3712
3713 gen5_gt_irq_postinstall(to_gt(dev_priv));
3714
3715 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3716 display_mask | extra_mask);
3717 }
3718
3719 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3720 {
3721 lockdep_assert_held(&dev_priv->irq_lock);
3722
3723 if (dev_priv->display_irqs_enabled)
3724 return;
3725
3726 dev_priv->display_irqs_enabled = true;
3727
3728 if (intel_irqs_enabled(dev_priv)) {
3729 vlv_display_irq_reset(dev_priv);
3730 vlv_display_irq_postinstall(dev_priv);
3731 }
3732 }
3733
3734 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3735 {
3736 lockdep_assert_held(&dev_priv->irq_lock);
3737
3738 if (!dev_priv->display_irqs_enabled)
3739 return;
3740
3741 dev_priv->display_irqs_enabled = false;
3742
3743 if (intel_irqs_enabled(dev_priv))
3744 vlv_display_irq_reset(dev_priv);
3745 }
3746
3747
3748 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3749 {
3750 gen5_gt_irq_postinstall(to_gt(dev_priv));
3751
3752 spin_lock_irq(&dev_priv->irq_lock);
3753 if (dev_priv->display_irqs_enabled)
3754 vlv_display_irq_postinstall(dev_priv);
3755 spin_unlock_irq(&dev_priv->irq_lock);
3756
3757 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3758 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3759 }
3760
3761 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3762 {
3763 struct intel_uncore *uncore = &dev_priv->uncore;
3764
3765 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3766 GEN8_PIPE_CDCLK_CRC_DONE;
3767 u32 de_pipe_enables;
3768 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3769 u32 de_port_enables;
3770 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3771 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3772 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3773 enum pipe pipe;
3774
3775 if (!HAS_DISPLAY(dev_priv))
3776 return;
3777
3778 if (DISPLAY_VER(dev_priv) <= 10)
3779 de_misc_masked |= GEN8_DE_MISC_GSE;
3780
3781 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3782 de_port_masked |= BXT_DE_PORT_GMBUS;
3783
3784 if (DISPLAY_VER(dev_priv) >= 11) {
3785 enum port port;
3786
3787 if (intel_bios_is_dsi_present(dev_priv, &port))
3788 de_port_masked |= DSI0_TE | DSI1_TE;
3789 }
3790
3791 de_pipe_enables = de_pipe_masked |
3792 GEN8_PIPE_VBLANK |
3793 gen8_de_pipe_underrun_mask(dev_priv) |
3794 gen8_de_pipe_flip_done_mask(dev_priv);
3795
3796 de_port_enables = de_port_masked;
3797 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3798 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3799 else if (IS_BROADWELL(dev_priv))
3800 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3801
3802 if (DISPLAY_VER(dev_priv) >= 12) {
3803 enum transcoder trans;
3804
3805 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3806 enum intel_display_power_domain domain;
3807
3808 domain = POWER_DOMAIN_TRANSCODER(trans);
3809 if (!intel_display_power_is_enabled(dev_priv, domain))
3810 continue;
3811
3812 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3813 }
3814 } else {
3815 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3816 }
3817
3818 for_each_pipe(dev_priv, pipe) {
3819 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3820
3821 if (intel_display_power_is_enabled(dev_priv,
3822 POWER_DOMAIN_PIPE(pipe)))
3823 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3824 dev_priv->de_irq_mask[pipe],
3825 de_pipe_enables);
3826 }
3827
3828 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3829 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3830
3831 if (DISPLAY_VER(dev_priv) >= 11) {
3832 u32 de_hpd_masked = 0;
3833 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3834 GEN11_DE_TBT_HOTPLUG_MASK;
3835
3836 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3837 de_hpd_enables);
3838 }
3839 }
3840
3841 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3842 {
3843 struct intel_uncore *uncore = &dev_priv->uncore;
3844 u32 mask = SDE_GMBUS_ICP;
3845
3846 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3847 }
3848
3849 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3850 {
3851 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3852 icp_irq_postinstall(dev_priv);
3853 else if (HAS_PCH_SPLIT(dev_priv))
3854 ibx_irq_postinstall(dev_priv);
3855
3856 gen8_gt_irq_postinstall(to_gt(dev_priv));
3857 gen8_de_irq_postinstall(dev_priv);
3858
3859 gen8_master_intr_enable(dev_priv->uncore.regs);
3860 }
3861
3862 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3863 {
3864 if (!HAS_DISPLAY(dev_priv))
3865 return;
3866
3867 gen8_de_irq_postinstall(dev_priv);
3868
3869 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3870 GEN11_DISPLAY_IRQ_ENABLE);
3871 }
3872
3873 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3874 {
3875 struct intel_gt *gt = to_gt(dev_priv);
3876 struct intel_uncore *uncore = gt->uncore;
3877 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3878
3879 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3880 icp_irq_postinstall(dev_priv);
3881
3882 gen11_gt_irq_postinstall(gt);
3883 gen11_de_irq_postinstall(dev_priv);
3884
3885 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3886
3887 gen11_master_intr_enable(uncore->regs);
3888 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3889 }
3890
3891 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3892 {
3893 struct intel_gt *gt = to_gt(dev_priv);
3894 struct intel_uncore *uncore = gt->uncore;
3895 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3896
3897 gen11_gt_irq_postinstall(gt);
3898
3899 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3900
3901 if (HAS_DISPLAY(dev_priv)) {
3902 icp_irq_postinstall(dev_priv);
3903 gen8_de_irq_postinstall(dev_priv);
3904 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3905 GEN11_DISPLAY_IRQ_ENABLE);
3906 }
3907
3908 dg1_master_intr_enable(uncore->regs);
3909 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3910 }
3911
3912 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3913 {
3914 gen8_gt_irq_postinstall(to_gt(dev_priv));
3915
3916 spin_lock_irq(&dev_priv->irq_lock);
3917 if (dev_priv->display_irqs_enabled)
3918 vlv_display_irq_postinstall(dev_priv);
3919 spin_unlock_irq(&dev_priv->irq_lock);
3920
3921 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3922 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3923 }
3924
3925 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3926 {
3927 struct intel_uncore *uncore = &dev_priv->uncore;
3928
3929 i9xx_pipestat_irq_reset(dev_priv);
3930
3931 GEN2_IRQ_RESET(uncore);
3932 dev_priv->irq_mask = ~0u;
3933 }
3934
3935 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3936 {
3937 struct intel_uncore *uncore = &dev_priv->uncore;
3938 u16 enable_mask;
3939
3940 intel_uncore_write16(uncore,
3941 EMR,
3942 ~(I915_ERROR_PAGE_TABLE |
3943 I915_ERROR_MEMORY_REFRESH));
3944
3945
3946 dev_priv->irq_mask =
3947 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3948 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3949 I915_MASTER_ERROR_INTERRUPT);
3950
3951 enable_mask =
3952 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3953 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3954 I915_MASTER_ERROR_INTERRUPT |
3955 I915_USER_INTERRUPT;
3956
3957 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3958
3959
3960
3961 spin_lock_irq(&dev_priv->irq_lock);
3962 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3963 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3964 spin_unlock_irq(&dev_priv->irq_lock);
3965 }
3966
3967 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3968 u16 *eir, u16 *eir_stuck)
3969 {
3970 struct intel_uncore *uncore = &i915->uncore;
3971 u16 emr;
3972
3973 *eir = intel_uncore_read16(uncore, EIR);
3974
3975 if (*eir)
3976 intel_uncore_write16(uncore, EIR, *eir);
3977
3978 *eir_stuck = intel_uncore_read16(uncore, EIR);
3979 if (*eir_stuck == 0)
3980 return;
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992 emr = intel_uncore_read16(uncore, EMR);
3993 intel_uncore_write16(uncore, EMR, 0xffff);
3994 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3995 }
3996
3997 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3998 u16 eir, u16 eir_stuck)
3999 {
4000 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4001
4002 if (eir_stuck)
4003 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
4004 eir_stuck);
4005 }
4006
4007 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4008 u32 *eir, u32 *eir_stuck)
4009 {
4010 u32 emr;
4011
4012 *eir = intel_uncore_read(&dev_priv->uncore, EIR);
4013
4014 intel_uncore_write(&dev_priv->uncore, EIR, *eir);
4015
4016 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
4017 if (*eir_stuck == 0)
4018 return;
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030 emr = intel_uncore_read(&dev_priv->uncore, EMR);
4031 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
4032 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
4033 }
4034
4035 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4036 u32 eir, u32 eir_stuck)
4037 {
4038 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4039
4040 if (eir_stuck)
4041 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
4042 eir_stuck);
4043 }
4044
4045 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4046 {
4047 struct drm_i915_private *dev_priv = arg;
4048 irqreturn_t ret = IRQ_NONE;
4049
4050 if (!intel_irqs_enabled(dev_priv))
4051 return IRQ_NONE;
4052
4053
4054 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4055
4056 do {
4057 u32 pipe_stats[I915_MAX_PIPES] = {};
4058 u16 eir = 0, eir_stuck = 0;
4059 u16 iir;
4060
4061 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4062 if (iir == 0)
4063 break;
4064
4065 ret = IRQ_HANDLED;
4066
4067
4068
4069 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4070
4071 if (iir & I915_MASTER_ERROR_INTERRUPT)
4072 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4073
4074 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4075
4076 if (iir & I915_USER_INTERRUPT)
4077 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4078
4079 if (iir & I915_MASTER_ERROR_INTERRUPT)
4080 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4081
4082 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4083 } while (0);
4084
4085 pmu_irq_stats(dev_priv, ret);
4086
4087 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4088
4089 return ret;
4090 }
4091
4092 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4093 {
4094 struct intel_uncore *uncore = &dev_priv->uncore;
4095
4096 if (I915_HAS_HOTPLUG(dev_priv)) {
4097 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4098 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4099 }
4100
4101 i9xx_pipestat_irq_reset(dev_priv);
4102
4103 GEN3_IRQ_RESET(uncore, GEN2_);
4104 dev_priv->irq_mask = ~0u;
4105 }
4106
4107 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4108 {
4109 struct intel_uncore *uncore = &dev_priv->uncore;
4110 u32 enable_mask;
4111
4112 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4113 I915_ERROR_MEMORY_REFRESH));
4114
4115
4116 dev_priv->irq_mask =
4117 ~(I915_ASLE_INTERRUPT |
4118 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4119 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4120 I915_MASTER_ERROR_INTERRUPT);
4121
4122 enable_mask =
4123 I915_ASLE_INTERRUPT |
4124 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4125 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4126 I915_MASTER_ERROR_INTERRUPT |
4127 I915_USER_INTERRUPT;
4128
4129 if (I915_HAS_HOTPLUG(dev_priv)) {
4130
4131 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4132
4133 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4134 }
4135
4136 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4137
4138
4139
4140 spin_lock_irq(&dev_priv->irq_lock);
4141 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4142 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4143 spin_unlock_irq(&dev_priv->irq_lock);
4144
4145 i915_enable_asle_pipestat(dev_priv);
4146 }
4147
4148 static irqreturn_t i915_irq_handler(int irq, void *arg)
4149 {
4150 struct drm_i915_private *dev_priv = arg;
4151 irqreturn_t ret = IRQ_NONE;
4152
4153 if (!intel_irqs_enabled(dev_priv))
4154 return IRQ_NONE;
4155
4156
4157 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4158
4159 do {
4160 u32 pipe_stats[I915_MAX_PIPES] = {};
4161 u32 eir = 0, eir_stuck = 0;
4162 u32 hotplug_status = 0;
4163 u32 iir;
4164
4165 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4166 if (iir == 0)
4167 break;
4168
4169 ret = IRQ_HANDLED;
4170
4171 if (I915_HAS_HOTPLUG(dev_priv) &&
4172 iir & I915_DISPLAY_PORT_INTERRUPT)
4173 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4174
4175
4176
4177 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4178
4179 if (iir & I915_MASTER_ERROR_INTERRUPT)
4180 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4181
4182 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4183
4184 if (iir & I915_USER_INTERRUPT)
4185 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4186
4187 if (iir & I915_MASTER_ERROR_INTERRUPT)
4188 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4189
4190 if (hotplug_status)
4191 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4192
4193 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4194 } while (0);
4195
4196 pmu_irq_stats(dev_priv, ret);
4197
4198 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4199
4200 return ret;
4201 }
4202
4203 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4204 {
4205 struct intel_uncore *uncore = &dev_priv->uncore;
4206
4207 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4208 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4209
4210 i9xx_pipestat_irq_reset(dev_priv);
4211
4212 GEN3_IRQ_RESET(uncore, GEN2_);
4213 dev_priv->irq_mask = ~0u;
4214 }
4215
4216 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4217 {
4218 struct intel_uncore *uncore = &dev_priv->uncore;
4219 u32 enable_mask;
4220 u32 error_mask;
4221
4222
4223
4224
4225
4226 if (IS_G4X(dev_priv)) {
4227 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4228 GM45_ERROR_MEM_PRIV |
4229 GM45_ERROR_CP_PRIV |
4230 I915_ERROR_MEMORY_REFRESH);
4231 } else {
4232 error_mask = ~(I915_ERROR_PAGE_TABLE |
4233 I915_ERROR_MEMORY_REFRESH);
4234 }
4235 intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4236
4237
4238 dev_priv->irq_mask =
4239 ~(I915_ASLE_INTERRUPT |
4240 I915_DISPLAY_PORT_INTERRUPT |
4241 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4242 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4243 I915_MASTER_ERROR_INTERRUPT);
4244
4245 enable_mask =
4246 I915_ASLE_INTERRUPT |
4247 I915_DISPLAY_PORT_INTERRUPT |
4248 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4249 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4250 I915_MASTER_ERROR_INTERRUPT |
4251 I915_USER_INTERRUPT;
4252
4253 if (IS_G4X(dev_priv))
4254 enable_mask |= I915_BSD_USER_INTERRUPT;
4255
4256 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4257
4258
4259
4260 spin_lock_irq(&dev_priv->irq_lock);
4261 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4262 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4263 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4264 spin_unlock_irq(&dev_priv->irq_lock);
4265
4266 i915_enable_asle_pipestat(dev_priv);
4267 }
4268
4269 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4270 {
4271 u32 hotplug_en;
4272
4273 lockdep_assert_held(&dev_priv->irq_lock);
4274
4275
4276
4277 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4278
4279
4280
4281
4282 if (IS_G4X(dev_priv))
4283 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4284 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4285
4286
4287 i915_hotplug_interrupt_update_locked(dev_priv,
4288 HOTPLUG_INT_EN_MASK |
4289 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4290 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4291 hotplug_en);
4292 }
4293
4294 static irqreturn_t i965_irq_handler(int irq, void *arg)
4295 {
4296 struct drm_i915_private *dev_priv = arg;
4297 irqreturn_t ret = IRQ_NONE;
4298
4299 if (!intel_irqs_enabled(dev_priv))
4300 return IRQ_NONE;
4301
4302
4303 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4304
4305 do {
4306 u32 pipe_stats[I915_MAX_PIPES] = {};
4307 u32 eir = 0, eir_stuck = 0;
4308 u32 hotplug_status = 0;
4309 u32 iir;
4310
4311 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4312 if (iir == 0)
4313 break;
4314
4315 ret = IRQ_HANDLED;
4316
4317 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4318 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4319
4320
4321
4322 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4323
4324 if (iir & I915_MASTER_ERROR_INTERRUPT)
4325 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4326
4327 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4328
4329 if (iir & I915_USER_INTERRUPT)
4330 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4331 iir);
4332
4333 if (iir & I915_BSD_USER_INTERRUPT)
4334 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4335 iir >> 25);
4336
4337 if (iir & I915_MASTER_ERROR_INTERRUPT)
4338 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4339
4340 if (hotplug_status)
4341 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4342
4343 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4344 } while (0);
4345
4346 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4347
4348 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4349
4350 return ret;
4351 }
4352
4353 struct intel_hotplug_funcs {
4354 void (*hpd_irq_setup)(struct drm_i915_private *i915);
4355 };
4356
4357 #define HPD_FUNCS(platform) \
4358 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4359 .hpd_irq_setup = platform##_hpd_irq_setup, \
4360 }
4361
4362 HPD_FUNCS(i915);
4363 HPD_FUNCS(dg1);
4364 HPD_FUNCS(gen11);
4365 HPD_FUNCS(bxt);
4366 HPD_FUNCS(icp);
4367 HPD_FUNCS(spt);
4368 HPD_FUNCS(ilk);
4369 #undef HPD_FUNCS
4370
4371 void intel_hpd_irq_setup(struct drm_i915_private *i915)
4372 {
4373 if (i915->display_irqs_enabled && i915->hotplug_funcs)
4374 i915->hotplug_funcs->hpd_irq_setup(i915);
4375 }
4376
4377
4378
4379
4380
4381
4382
4383
4384 void intel_irq_init(struct drm_i915_private *dev_priv)
4385 {
4386 struct drm_device *dev = &dev_priv->drm;
4387 int i;
4388
4389 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4390 for (i = 0; i < MAX_L3_SLICES; ++i)
4391 dev_priv->l3_parity.remap_info[i] = NULL;
4392
4393
4394 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4395 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4396
4397 if (!HAS_DISPLAY(dev_priv))
4398 return;
4399
4400 intel_hpd_init_pins(dev_priv);
4401
4402 intel_hpd_init_work(dev_priv);
4403
4404 dev->vblank_disable_immediate = true;
4405
4406
4407
4408
4409
4410
4411
4412 dev_priv->display_irqs_enabled = true;
4413 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4414 dev_priv->display_irqs_enabled = false;
4415
4416 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4417
4418
4419
4420
4421
4422
4423 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4424
4425 if (HAS_GMCH(dev_priv)) {
4426 if (I915_HAS_HOTPLUG(dev_priv))
4427 dev_priv->hotplug_funcs = &i915_hpd_funcs;
4428 } else {
4429 if (HAS_PCH_DG2(dev_priv))
4430 dev_priv->hotplug_funcs = &icp_hpd_funcs;
4431 else if (HAS_PCH_DG1(dev_priv))
4432 dev_priv->hotplug_funcs = &dg1_hpd_funcs;
4433 else if (DISPLAY_VER(dev_priv) >= 11)
4434 dev_priv->hotplug_funcs = &gen11_hpd_funcs;
4435 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4436 dev_priv->hotplug_funcs = &bxt_hpd_funcs;
4437 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4438 dev_priv->hotplug_funcs = &icp_hpd_funcs;
4439 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4440 dev_priv->hotplug_funcs = &spt_hpd_funcs;
4441 else
4442 dev_priv->hotplug_funcs = &ilk_hpd_funcs;
4443 }
4444 }
4445
4446
4447
4448
4449
4450
4451
4452 void intel_irq_fini(struct drm_i915_private *i915)
4453 {
4454 int i;
4455
4456 for (i = 0; i < MAX_L3_SLICES; ++i)
4457 kfree(i915->l3_parity.remap_info[i]);
4458 }
4459
4460 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4461 {
4462 if (HAS_GMCH(dev_priv)) {
4463 if (IS_CHERRYVIEW(dev_priv))
4464 return cherryview_irq_handler;
4465 else if (IS_VALLEYVIEW(dev_priv))
4466 return valleyview_irq_handler;
4467 else if (GRAPHICS_VER(dev_priv) == 4)
4468 return i965_irq_handler;
4469 else if (GRAPHICS_VER(dev_priv) == 3)
4470 return i915_irq_handler;
4471 else
4472 return i8xx_irq_handler;
4473 } else {
4474 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4475 return dg1_irq_handler;
4476 else if (GRAPHICS_VER(dev_priv) >= 11)
4477 return gen11_irq_handler;
4478 else if (GRAPHICS_VER(dev_priv) >= 8)
4479 return gen8_irq_handler;
4480 else
4481 return ilk_irq_handler;
4482 }
4483 }
4484
4485 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4486 {
4487 if (HAS_GMCH(dev_priv)) {
4488 if (IS_CHERRYVIEW(dev_priv))
4489 cherryview_irq_reset(dev_priv);
4490 else if (IS_VALLEYVIEW(dev_priv))
4491 valleyview_irq_reset(dev_priv);
4492 else if (GRAPHICS_VER(dev_priv) == 4)
4493 i965_irq_reset(dev_priv);
4494 else if (GRAPHICS_VER(dev_priv) == 3)
4495 i915_irq_reset(dev_priv);
4496 else
4497 i8xx_irq_reset(dev_priv);
4498 } else {
4499 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4500 dg1_irq_reset(dev_priv);
4501 else if (GRAPHICS_VER(dev_priv) >= 11)
4502 gen11_irq_reset(dev_priv);
4503 else if (GRAPHICS_VER(dev_priv) >= 8)
4504 gen8_irq_reset(dev_priv);
4505 else
4506 ilk_irq_reset(dev_priv);
4507 }
4508 }
4509
4510 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4511 {
4512 if (HAS_GMCH(dev_priv)) {
4513 if (IS_CHERRYVIEW(dev_priv))
4514 cherryview_irq_postinstall(dev_priv);
4515 else if (IS_VALLEYVIEW(dev_priv))
4516 valleyview_irq_postinstall(dev_priv);
4517 else if (GRAPHICS_VER(dev_priv) == 4)
4518 i965_irq_postinstall(dev_priv);
4519 else if (GRAPHICS_VER(dev_priv) == 3)
4520 i915_irq_postinstall(dev_priv);
4521 else
4522 i8xx_irq_postinstall(dev_priv);
4523 } else {
4524 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4525 dg1_irq_postinstall(dev_priv);
4526 else if (GRAPHICS_VER(dev_priv) >= 11)
4527 gen11_irq_postinstall(dev_priv);
4528 else if (GRAPHICS_VER(dev_priv) >= 8)
4529 gen8_irq_postinstall(dev_priv);
4530 else
4531 ilk_irq_postinstall(dev_priv);
4532 }
4533 }
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546 int intel_irq_install(struct drm_i915_private *dev_priv)
4547 {
4548 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4549 int ret;
4550
4551
4552
4553
4554
4555
4556 dev_priv->runtime_pm.irqs_enabled = true;
4557
4558 dev_priv->irq_enabled = true;
4559
4560 intel_irq_reset(dev_priv);
4561
4562 ret = request_irq(irq, intel_irq_handler(dev_priv),
4563 IRQF_SHARED, DRIVER_NAME, dev_priv);
4564 if (ret < 0) {
4565 dev_priv->irq_enabled = false;
4566 return ret;
4567 }
4568
4569 intel_irq_postinstall(dev_priv);
4570
4571 return ret;
4572 }
4573
4574
4575
4576
4577
4578
4579
4580
4581 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4582 {
4583 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4584
4585
4586
4587
4588
4589
4590
4591 if (!dev_priv->irq_enabled)
4592 return;
4593
4594 dev_priv->irq_enabled = false;
4595
4596 intel_irq_reset(dev_priv);
4597
4598 free_irq(irq, dev_priv);
4599
4600 intel_hpd_cancel_work(dev_priv);
4601 dev_priv->runtime_pm.irqs_enabled = false;
4602 }
4603
4604
4605
4606
4607
4608
4609
4610
4611 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4612 {
4613 intel_irq_reset(dev_priv);
4614 dev_priv->runtime_pm.irqs_enabled = false;
4615 intel_synchronize_irq(dev_priv);
4616 }
4617
4618
4619
4620
4621
4622
4623
4624
4625 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4626 {
4627 dev_priv->runtime_pm.irqs_enabled = true;
4628 intel_irq_reset(dev_priv);
4629 intel_irq_postinstall(dev_priv);
4630 }
4631
4632 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4633 {
4634 return dev_priv->runtime_pm.irqs_enabled;
4635 }
4636
4637 void intel_synchronize_irq(struct drm_i915_private *i915)
4638 {
4639 synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4640 }
4641
4642 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4643 {
4644 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4645 }