0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/module.h>
0029 #include <linux/string_helpers.h>
0030 #include <linux/pm_runtime.h>
0031
0032 #include <drm/drm_atomic_helper.h>
0033 #include <drm/drm_blend.h>
0034 #include <drm/drm_fourcc.h>
0035 #include <drm/drm_plane_helper.h>
0036
0037 #include "display/intel_atomic.h"
0038 #include "display/intel_atomic_plane.h"
0039 #include "display/intel_bw.h"
0040 #include "display/intel_de.h"
0041 #include "display/intel_display_trace.h"
0042 #include "display/intel_display_types.h"
0043 #include "display/intel_fb.h"
0044 #include "display/intel_fbc.h"
0045 #include "display/intel_sprite.h"
0046 #include "display/skl_universal_plane.h"
0047
0048 #include "gt/intel_engine_regs.h"
0049 #include "gt/intel_gt_regs.h"
0050 #include "gt/intel_llc.h"
0051
0052 #include "i915_drv.h"
0053 #include "i915_fixed.h"
0054 #include "i915_irq.h"
0055 #include "intel_mchbar_regs.h"
0056 #include "intel_pcode.h"
0057 #include "intel_pm.h"
0058 #include "vlv_sideband.h"
0059 #include "../../../platform/x86/intel_ips.h"
0060
0061 static void skl_sagv_disable(struct drm_i915_private *dev_priv);
0062
0063 struct drm_i915_clock_gating_funcs {
0064 void (*init_clock_gating)(struct drm_i915_private *i915);
0065 };
0066
0067
0068 struct skl_wm_params {
0069 bool x_tiled, y_tiled;
0070 bool rc_surface;
0071 bool is_planar;
0072 u32 width;
0073 u8 cpp;
0074 u32 plane_pixel_rate;
0075 u32 y_min_scanlines;
0076 u32 plane_bytes_per_line;
0077 uint_fixed_16_16_t plane_blocks_per_line;
0078 uint_fixed_16_16_t y_tile_minimum;
0079 u32 linetime_us;
0080 u32 dbuf_block_size;
0081 };
0082
0083
0084 struct intel_wm_config {
0085 unsigned int num_pipes_active;
0086 bool sprites_enabled;
0087 bool sprites_scaled;
0088 };
0089
0090 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
0091 {
0092 if (HAS_LLC(dev_priv)) {
0093
0094
0095
0096
0097
0098
0099
0100 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
0101 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
0102 SKL_DE_COMPRESSED_HASH_MODE);
0103 }
0104
0105
0106 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
0107 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
0108
0109
0110 intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
0111 intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
0112
0113
0114
0115
0116
0117 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
0118 DISP_FBC_MEMORY_WAKE);
0119 }
0120
0121 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
0122 {
0123 gen9_init_clock_gating(dev_priv);
0124
0125
0126 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
0127 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
0128
0129
0130
0131
0132
0133 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
0134 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
0135
0136
0137
0138
0139
0140 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
0141 PWM1_GATING_DIS | PWM2_GATING_DIS);
0142
0143
0144
0145
0146
0147
0148
0149 intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
0150
0151
0152
0153
0154
0155 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
0156 DISP_FBC_WM_DIS);
0157
0158
0159
0160
0161
0162 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
0163 intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
0164 DPFC_DISABLE_DUMMY0);
0165 }
0166
0167 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
0168 {
0169 gen9_init_clock_gating(dev_priv);
0170
0171
0172
0173
0174
0175
0176 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
0177 PWM1_GATING_DIS | PWM2_GATING_DIS);
0178 }
0179
0180 static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
0181 {
0182 u32 tmp;
0183
0184 tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
0185
0186 switch (tmp & CLKCFG_FSB_MASK) {
0187 case CLKCFG_FSB_533:
0188 dev_priv->fsb_freq = 533;
0189 break;
0190 case CLKCFG_FSB_800:
0191 dev_priv->fsb_freq = 800;
0192 break;
0193 case CLKCFG_FSB_667:
0194 dev_priv->fsb_freq = 667;
0195 break;
0196 case CLKCFG_FSB_400:
0197 dev_priv->fsb_freq = 400;
0198 break;
0199 }
0200
0201 switch (tmp & CLKCFG_MEM_MASK) {
0202 case CLKCFG_MEM_533:
0203 dev_priv->mem_freq = 533;
0204 break;
0205 case CLKCFG_MEM_667:
0206 dev_priv->mem_freq = 667;
0207 break;
0208 case CLKCFG_MEM_800:
0209 dev_priv->mem_freq = 800;
0210 break;
0211 }
0212
0213
0214 tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
0215 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
0216 }
0217
0218 static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
0219 {
0220 u16 ddrpll, csipll;
0221
0222 ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
0223 csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
0224
0225 switch (ddrpll & 0xff) {
0226 case 0xc:
0227 dev_priv->mem_freq = 800;
0228 break;
0229 case 0x10:
0230 dev_priv->mem_freq = 1066;
0231 break;
0232 case 0x14:
0233 dev_priv->mem_freq = 1333;
0234 break;
0235 case 0x18:
0236 dev_priv->mem_freq = 1600;
0237 break;
0238 default:
0239 drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
0240 ddrpll & 0xff);
0241 dev_priv->mem_freq = 0;
0242 break;
0243 }
0244
0245 switch (csipll & 0x3ff) {
0246 case 0x00c:
0247 dev_priv->fsb_freq = 3200;
0248 break;
0249 case 0x00e:
0250 dev_priv->fsb_freq = 3733;
0251 break;
0252 case 0x010:
0253 dev_priv->fsb_freq = 4266;
0254 break;
0255 case 0x012:
0256 dev_priv->fsb_freq = 4800;
0257 break;
0258 case 0x014:
0259 dev_priv->fsb_freq = 5333;
0260 break;
0261 case 0x016:
0262 dev_priv->fsb_freq = 5866;
0263 break;
0264 case 0x018:
0265 dev_priv->fsb_freq = 6400;
0266 break;
0267 default:
0268 drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
0269 csipll & 0x3ff);
0270 dev_priv->fsb_freq = 0;
0271 break;
0272 }
0273 }
0274
0275 static const struct cxsr_latency cxsr_latency_table[] = {
0276 {1, 0, 800, 400, 3382, 33382, 3983, 33983},
0277 {1, 0, 800, 667, 3354, 33354, 3807, 33807},
0278 {1, 0, 800, 800, 3347, 33347, 3763, 33763},
0279 {1, 1, 800, 667, 6420, 36420, 6873, 36873},
0280 {1, 1, 800, 800, 5902, 35902, 6318, 36318},
0281
0282 {1, 0, 667, 400, 3400, 33400, 4021, 34021},
0283 {1, 0, 667, 667, 3372, 33372, 3845, 33845},
0284 {1, 0, 667, 800, 3386, 33386, 3822, 33822},
0285 {1, 1, 667, 667, 6438, 36438, 6911, 36911},
0286 {1, 1, 667, 800, 5941, 35941, 6377, 36377},
0287
0288 {1, 0, 400, 400, 3472, 33472, 4173, 34173},
0289 {1, 0, 400, 667, 3443, 33443, 3996, 33996},
0290 {1, 0, 400, 800, 3430, 33430, 3946, 33946},
0291 {1, 1, 400, 667, 6509, 36509, 7062, 37062},
0292 {1, 1, 400, 800, 5985, 35985, 6501, 36501},
0293
0294 {0, 0, 800, 400, 3438, 33438, 4065, 34065},
0295 {0, 0, 800, 667, 3410, 33410, 3889, 33889},
0296 {0, 0, 800, 800, 3403, 33403, 3845, 33845},
0297 {0, 1, 800, 667, 6476, 36476, 6955, 36955},
0298 {0, 1, 800, 800, 5958, 35958, 6400, 36400},
0299
0300 {0, 0, 667, 400, 3456, 33456, 4103, 34106},
0301 {0, 0, 667, 667, 3428, 33428, 3927, 33927},
0302 {0, 0, 667, 800, 3443, 33443, 3905, 33905},
0303 {0, 1, 667, 667, 6494, 36494, 6993, 36993},
0304 {0, 1, 667, 800, 5998, 35998, 6460, 36460},
0305
0306 {0, 0, 400, 400, 3528, 33528, 4255, 34255},
0307 {0, 0, 400, 667, 3500, 33500, 4079, 34079},
0308 {0, 0, 400, 800, 3487, 33487, 4029, 34029},
0309 {0, 1, 400, 667, 6566, 36566, 7145, 37145},
0310 {0, 1, 400, 800, 6042, 36042, 6584, 36584},
0311 };
0312
0313 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
0314 bool is_ddr3,
0315 int fsb,
0316 int mem)
0317 {
0318 const struct cxsr_latency *latency;
0319 int i;
0320
0321 if (fsb == 0 || mem == 0)
0322 return NULL;
0323
0324 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
0325 latency = &cxsr_latency_table[i];
0326 if (is_desktop == latency->is_desktop &&
0327 is_ddr3 == latency->is_ddr3 &&
0328 fsb == latency->fsb_freq && mem == latency->mem_freq)
0329 return latency;
0330 }
0331
0332 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
0333
0334 return NULL;
0335 }
0336
0337 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
0338 {
0339 u32 val;
0340
0341 vlv_punit_get(dev_priv);
0342
0343 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
0344 if (enable)
0345 val &= ~FORCE_DDR_HIGH_FREQ;
0346 else
0347 val |= FORCE_DDR_HIGH_FREQ;
0348 val &= ~FORCE_DDR_LOW_FREQ;
0349 val |= FORCE_DDR_FREQ_REQ_ACK;
0350 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
0351
0352 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
0353 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
0354 drm_err(&dev_priv->drm,
0355 "timed out waiting for Punit DDR DVFS request\n");
0356
0357 vlv_punit_put(dev_priv);
0358 }
0359
0360 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
0361 {
0362 u32 val;
0363
0364 vlv_punit_get(dev_priv);
0365
0366 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
0367 if (enable)
0368 val |= DSP_MAXFIFO_PM5_ENABLE;
0369 else
0370 val &= ~DSP_MAXFIFO_PM5_ENABLE;
0371 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
0372
0373 vlv_punit_put(dev_priv);
0374 }
0375
0376 #define FW_WM(value, plane) \
0377 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
0378
0379 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
0380 {
0381 bool was_enabled;
0382 u32 val;
0383
0384 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0385 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
0386 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
0387 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
0388 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
0389 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
0390 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
0391 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
0392 } else if (IS_PINEVIEW(dev_priv)) {
0393 val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
0394 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
0395 if (enable)
0396 val |= PINEVIEW_SELF_REFRESH_EN;
0397 else
0398 val &= ~PINEVIEW_SELF_REFRESH_EN;
0399 intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
0400 intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
0401 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
0402 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
0403 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
0404 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
0405 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
0406 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
0407 } else if (IS_I915GM(dev_priv)) {
0408
0409
0410
0411
0412
0413 was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
0414 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
0415 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
0416 intel_uncore_write(&dev_priv->uncore, INSTPM, val);
0417 intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
0418 } else {
0419 return false;
0420 }
0421
0422 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
0423
0424 drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
0425 str_enabled_disabled(enable),
0426 str_enabled_disabled(was_enabled));
0427
0428 return was_enabled;
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
0469 {
0470 bool ret;
0471
0472 mutex_lock(&dev_priv->wm.wm_mutex);
0473 ret = _intel_set_memory_cxsr(dev_priv, enable);
0474 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
0475 dev_priv->wm.vlv.cxsr = enable;
0476 else if (IS_G4X(dev_priv))
0477 dev_priv->wm.g4x.cxsr = enable;
0478 mutex_unlock(&dev_priv->wm.wm_mutex);
0479
0480 return ret;
0481 }
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 static const int pessimal_latency_ns = 5000;
0498
0499 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
0500 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
0501
0502 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
0503 {
0504 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
0505 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
0506 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
0507 enum pipe pipe = crtc->pipe;
0508 int sprite0_start, sprite1_start;
0509 u32 dsparb, dsparb2, dsparb3;
0510
0511 switch (pipe) {
0512 case PIPE_A:
0513 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
0514 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
0515 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
0516 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
0517 break;
0518 case PIPE_B:
0519 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
0520 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
0521 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
0522 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
0523 break;
0524 case PIPE_C:
0525 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
0526 dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
0527 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
0528 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
0529 break;
0530 default:
0531 MISSING_CASE(pipe);
0532 return;
0533 }
0534
0535 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
0536 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
0537 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
0538 fifo_state->plane[PLANE_CURSOR] = 63;
0539 }
0540
0541 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
0542 enum i9xx_plane_id i9xx_plane)
0543 {
0544 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
0545 int size;
0546
0547 size = dsparb & 0x7f;
0548 if (i9xx_plane == PLANE_B)
0549 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
0550
0551 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
0552 dsparb, plane_name(i9xx_plane), size);
0553
0554 return size;
0555 }
0556
0557 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
0558 enum i9xx_plane_id i9xx_plane)
0559 {
0560 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
0561 int size;
0562
0563 size = dsparb & 0x1ff;
0564 if (i9xx_plane == PLANE_B)
0565 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
0566 size >>= 1;
0567
0568 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
0569 dsparb, plane_name(i9xx_plane), size);
0570
0571 return size;
0572 }
0573
0574 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
0575 enum i9xx_plane_id i9xx_plane)
0576 {
0577 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
0578 int size;
0579
0580 size = dsparb & 0x7f;
0581 size >>= 2;
0582
0583 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
0584 dsparb, plane_name(i9xx_plane), size);
0585
0586 return size;
0587 }
0588
0589
0590 static const struct intel_watermark_params pnv_display_wm = {
0591 .fifo_size = PINEVIEW_DISPLAY_FIFO,
0592 .max_wm = PINEVIEW_MAX_WM,
0593 .default_wm = PINEVIEW_DFT_WM,
0594 .guard_size = PINEVIEW_GUARD_WM,
0595 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
0596 };
0597
0598 static const struct intel_watermark_params pnv_display_hplloff_wm = {
0599 .fifo_size = PINEVIEW_DISPLAY_FIFO,
0600 .max_wm = PINEVIEW_MAX_WM,
0601 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
0602 .guard_size = PINEVIEW_GUARD_WM,
0603 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
0604 };
0605
0606 static const struct intel_watermark_params pnv_cursor_wm = {
0607 .fifo_size = PINEVIEW_CURSOR_FIFO,
0608 .max_wm = PINEVIEW_CURSOR_MAX_WM,
0609 .default_wm = PINEVIEW_CURSOR_DFT_WM,
0610 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
0611 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
0612 };
0613
0614 static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
0615 .fifo_size = PINEVIEW_CURSOR_FIFO,
0616 .max_wm = PINEVIEW_CURSOR_MAX_WM,
0617 .default_wm = PINEVIEW_CURSOR_DFT_WM,
0618 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
0619 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
0620 };
0621
0622 static const struct intel_watermark_params i965_cursor_wm_info = {
0623 .fifo_size = I965_CURSOR_FIFO,
0624 .max_wm = I965_CURSOR_MAX_WM,
0625 .default_wm = I965_CURSOR_DFT_WM,
0626 .guard_size = 2,
0627 .cacheline_size = I915_FIFO_LINE_SIZE,
0628 };
0629
0630 static const struct intel_watermark_params i945_wm_info = {
0631 .fifo_size = I945_FIFO_SIZE,
0632 .max_wm = I915_MAX_WM,
0633 .default_wm = 1,
0634 .guard_size = 2,
0635 .cacheline_size = I915_FIFO_LINE_SIZE,
0636 };
0637
0638 static const struct intel_watermark_params i915_wm_info = {
0639 .fifo_size = I915_FIFO_SIZE,
0640 .max_wm = I915_MAX_WM,
0641 .default_wm = 1,
0642 .guard_size = 2,
0643 .cacheline_size = I915_FIFO_LINE_SIZE,
0644 };
0645
0646 static const struct intel_watermark_params i830_a_wm_info = {
0647 .fifo_size = I855GM_FIFO_SIZE,
0648 .max_wm = I915_MAX_WM,
0649 .default_wm = 1,
0650 .guard_size = 2,
0651 .cacheline_size = I830_FIFO_LINE_SIZE,
0652 };
0653
0654 static const struct intel_watermark_params i830_bc_wm_info = {
0655 .fifo_size = I855GM_FIFO_SIZE,
0656 .max_wm = I915_MAX_WM/2,
0657 .default_wm = 1,
0658 .guard_size = 2,
0659 .cacheline_size = I830_FIFO_LINE_SIZE,
0660 };
0661
0662 static const struct intel_watermark_params i845_wm_info = {
0663 .fifo_size = I830_FIFO_SIZE,
0664 .max_wm = I915_MAX_WM,
0665 .default_wm = 1,
0666 .guard_size = 2,
0667 .cacheline_size = I830_FIFO_LINE_SIZE,
0668 };
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703 static unsigned int intel_wm_method1(unsigned int pixel_rate,
0704 unsigned int cpp,
0705 unsigned int latency)
0706 {
0707 u64 ret;
0708
0709 ret = mul_u32_u32(pixel_rate, cpp * latency);
0710 ret = DIV_ROUND_UP_ULL(ret, 10000);
0711
0712 return ret;
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745 static unsigned int intel_wm_method2(unsigned int pixel_rate,
0746 unsigned int htotal,
0747 unsigned int width,
0748 unsigned int cpp,
0749 unsigned int latency)
0750 {
0751 unsigned int ret;
0752
0753
0754
0755
0756
0757 if (WARN_ON_ONCE(htotal == 0))
0758 htotal = 1;
0759
0760 ret = (latency * pixel_rate) / (htotal * 10000);
0761 ret = (ret + 1) * width * cpp;
0762
0763 return ret;
0764 }
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785 static unsigned int intel_calculate_wm(int pixel_rate,
0786 const struct intel_watermark_params *wm,
0787 int fifo_size, int cpp,
0788 unsigned int latency_ns)
0789 {
0790 int entries, wm_size;
0791
0792
0793
0794
0795
0796
0797
0798 entries = intel_wm_method1(pixel_rate, cpp,
0799 latency_ns / 100);
0800 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
0801 wm->guard_size;
0802 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
0803
0804 wm_size = fifo_size - entries;
0805 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
0806
0807
0808 if (wm_size > wm->max_wm)
0809 wm_size = wm->max_wm;
0810 if (wm_size <= 0)
0811 wm_size = wm->default_wm;
0812
0813
0814
0815
0816
0817
0818
0819
0820 if (wm_size <= 8)
0821 wm_size = 8;
0822
0823 return wm_size;
0824 }
0825
0826 static bool is_disabling(int old, int new, int threshold)
0827 {
0828 return old >= threshold && new < threshold;
0829 }
0830
0831 static bool is_enabling(int old, int new, int threshold)
0832 {
0833 return old < threshold && new >= threshold;
0834 }
0835
0836 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
0837 {
0838 return dev_priv->wm.max_level + 1;
0839 }
0840
0841 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
0842 const struct intel_plane_state *plane_state)
0843 {
0844 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
0845
0846
0847 if (!crtc_state->hw.active)
0848 return false;
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 if (plane->id == PLANE_CURSOR)
0859 return plane_state->hw.fb != NULL;
0860 else
0861 return plane_state->uapi.visible;
0862 }
0863
0864 static bool intel_crtc_active(struct intel_crtc *crtc)
0865 {
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 return crtc && crtc->active && crtc->base.primary->state->fb &&
0880 crtc->config->hw.adjusted_mode.crtc_clock;
0881 }
0882
0883 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
0884 {
0885 struct intel_crtc *crtc, *enabled = NULL;
0886
0887 for_each_intel_crtc(&dev_priv->drm, crtc) {
0888 if (intel_crtc_active(crtc)) {
0889 if (enabled)
0890 return NULL;
0891 enabled = crtc;
0892 }
0893 }
0894
0895 return enabled;
0896 }
0897
0898 static void pnv_update_wm(struct drm_i915_private *dev_priv)
0899 {
0900 struct intel_crtc *crtc;
0901 const struct cxsr_latency *latency;
0902 u32 reg;
0903 unsigned int wm;
0904
0905 latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
0906 dev_priv->is_ddr3,
0907 dev_priv->fsb_freq,
0908 dev_priv->mem_freq);
0909 if (!latency) {
0910 drm_dbg_kms(&dev_priv->drm,
0911 "Unknown FSB/MEM found, disable CxSR\n");
0912 intel_set_memory_cxsr(dev_priv, false);
0913 return;
0914 }
0915
0916 crtc = single_enabled_crtc(dev_priv);
0917 if (crtc) {
0918 const struct drm_framebuffer *fb =
0919 crtc->base.primary->state->fb;
0920 int pixel_rate = crtc->config->pixel_rate;
0921 int cpp = fb->format->cpp[0];
0922
0923
0924 wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
0925 pnv_display_wm.fifo_size,
0926 cpp, latency->display_sr);
0927 reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
0928 reg &= ~DSPFW_SR_MASK;
0929 reg |= FW_WM(wm, SR);
0930 intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
0931 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
0932
0933
0934 wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
0935 pnv_display_wm.fifo_size,
0936 4, latency->cursor_sr);
0937 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
0938 reg &= ~DSPFW_CURSOR_SR_MASK;
0939 reg |= FW_WM(wm, CURSOR_SR);
0940 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
0941
0942
0943 wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
0944 pnv_display_hplloff_wm.fifo_size,
0945 cpp, latency->display_hpll_disable);
0946 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
0947 reg &= ~DSPFW_HPLL_SR_MASK;
0948 reg |= FW_WM(wm, HPLL_SR);
0949 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
0950
0951
0952 wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
0953 pnv_display_hplloff_wm.fifo_size,
0954 4, latency->cursor_hpll_disable);
0955 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
0956 reg &= ~DSPFW_HPLL_CURSOR_MASK;
0957 reg |= FW_WM(wm, HPLL_CURSOR);
0958 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
0959 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
0960
0961 intel_set_memory_cxsr(dev_priv, true);
0962 } else {
0963 intel_set_memory_cxsr(dev_priv, false);
0964 }
0965 }
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
0978 {
0979 int tlb_miss = fifo_size * 64 - width * cpp * 8;
0980
0981 return max(0, tlb_miss);
0982 }
0983
0984 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
0985 const struct g4x_wm_values *wm)
0986 {
0987 enum pipe pipe;
0988
0989 for_each_pipe(dev_priv, pipe)
0990 trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
0991
0992 intel_uncore_write(&dev_priv->uncore, DSPFW1,
0993 FW_WM(wm->sr.plane, SR) |
0994 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
0995 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
0996 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
0997 intel_uncore_write(&dev_priv->uncore, DSPFW2,
0998 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
0999 FW_WM(wm->sr.fbc, FBC_SR) |
1000 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
1001 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
1002 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1003 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1004 intel_uncore_write(&dev_priv->uncore, DSPFW3,
1005 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
1006 FW_WM(wm->sr.cursor, CURSOR_SR) |
1007 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
1008 FW_WM(wm->hpll.plane, HPLL_SR));
1009
1010 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
1011 }
1012
1013 #define FW_WM_VLV(value, plane) \
1014 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
1015
1016 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
1017 const struct vlv_wm_values *wm)
1018 {
1019 enum pipe pipe;
1020
1021 for_each_pipe(dev_priv, pipe) {
1022 trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
1023
1024 intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
1025 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
1026 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
1027 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
1028 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
1029 }
1030
1031
1032
1033
1034
1035
1036 intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
1037 intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
1038 intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
1039 intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
1040 intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
1041
1042 intel_uncore_write(&dev_priv->uncore, DSPFW1,
1043 FW_WM(wm->sr.plane, SR) |
1044 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1045 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1046 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1047 intel_uncore_write(&dev_priv->uncore, DSPFW2,
1048 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1049 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1050 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1051 intel_uncore_write(&dev_priv->uncore, DSPFW3,
1052 FW_WM(wm->sr.cursor, CURSOR_SR));
1053
1054 if (IS_CHERRYVIEW(dev_priv)) {
1055 intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
1056 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1057 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1058 intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
1059 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1060 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1061 intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
1062 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1063 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1064 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
1065 FW_WM(wm->sr.plane >> 9, SR_HI) |
1066 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1067 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1068 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1069 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1070 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1071 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1072 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1073 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1074 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1075 } else {
1076 intel_uncore_write(&dev_priv->uncore, DSPFW7,
1077 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1078 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1079 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
1080 FW_WM(wm->sr.plane >> 9, SR_HI) |
1081 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1082 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1083 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1084 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1085 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1086 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1087 }
1088
1089 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
1090 }
1091
1092 #undef FW_WM_VLV
1093
1094 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1095 {
1096
1097 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1098 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1099 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1100
1101 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1102 }
1103
1104 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1105 {
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 switch (plane_id) {
1121 case PLANE_CURSOR:
1122 return 63;
1123 case PLANE_PRIMARY:
1124 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1125 case PLANE_SPRITE0:
1126 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1127 default:
1128 MISSING_CASE(plane_id);
1129 return 0;
1130 }
1131 }
1132
1133 static int g4x_fbc_fifo_size(int level)
1134 {
1135 switch (level) {
1136 case G4X_WM_LEVEL_SR:
1137 return 7;
1138 case G4X_WM_LEVEL_HPLL:
1139 return 15;
1140 default:
1141 MISSING_CASE(level);
1142 return 0;
1143 }
1144 }
1145
1146 static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1147 const struct intel_plane_state *plane_state,
1148 int level)
1149 {
1150 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1151 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1152 const struct drm_display_mode *pipe_mode =
1153 &crtc_state->hw.pipe_mode;
1154 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1155 unsigned int pixel_rate, htotal, cpp, width, wm;
1156
1157 if (latency == 0)
1158 return USHRT_MAX;
1159
1160 if (!intel_wm_plane_visible(crtc_state, plane_state))
1161 return 0;
1162
1163 cpp = plane_state->hw.fb->format->cpp[0];
1164
1165
1166
1167
1168
1169
1170
1171
1172 if (plane->id == PLANE_PRIMARY &&
1173 level != G4X_WM_LEVEL_NORMAL)
1174 cpp = max(cpp, 4u);
1175
1176 pixel_rate = crtc_state->pixel_rate;
1177 htotal = pipe_mode->crtc_htotal;
1178 width = drm_rect_width(&plane_state->uapi.src) >> 16;
1179
1180 if (plane->id == PLANE_CURSOR) {
1181 wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
1182 } else if (plane->id == PLANE_PRIMARY &&
1183 level == G4X_WM_LEVEL_NORMAL) {
1184 wm = intel_wm_method1(pixel_rate, cpp, latency);
1185 } else {
1186 unsigned int small, large;
1187
1188 small = intel_wm_method1(pixel_rate, cpp, latency);
1189 large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
1190
1191 wm = min(small, large);
1192 }
1193
1194 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1195 width, cpp);
1196
1197 wm = DIV_ROUND_UP(wm, 64) + 2;
1198
1199 return min_t(unsigned int, wm, USHRT_MAX);
1200 }
1201
1202 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1203 int level, enum plane_id plane_id, u16 value)
1204 {
1205 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1206 bool dirty = false;
1207
1208 for (; level < intel_wm_num_levels(dev_priv); level++) {
1209 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1210
1211 dirty |= raw->plane[plane_id] != value;
1212 raw->plane[plane_id] = value;
1213 }
1214
1215 return dirty;
1216 }
1217
1218 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1219 int level, u16 value)
1220 {
1221 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1222 bool dirty = false;
1223
1224
1225 level = max(level, G4X_WM_LEVEL_SR);
1226
1227 for (; level < intel_wm_num_levels(dev_priv); level++) {
1228 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1229
1230 dirty |= raw->fbc != value;
1231 raw->fbc = value;
1232 }
1233
1234 return dirty;
1235 }
1236
1237 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1238 const struct intel_plane_state *plane_state,
1239 u32 pri_val);
1240
1241 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1242 const struct intel_plane_state *plane_state)
1243 {
1244 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1245 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1246 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1247 enum plane_id plane_id = plane->id;
1248 bool dirty = false;
1249 int level;
1250
1251 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1252 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1253 if (plane_id == PLANE_PRIMARY)
1254 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1255 goto out;
1256 }
1257
1258 for (level = 0; level < num_levels; level++) {
1259 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1260 int wm, max_wm;
1261
1262 wm = g4x_compute_wm(crtc_state, plane_state, level);
1263 max_wm = g4x_plane_fifo_size(plane_id, level);
1264
1265 if (wm > max_wm)
1266 break;
1267
1268 dirty |= raw->plane[plane_id] != wm;
1269 raw->plane[plane_id] = wm;
1270
1271 if (plane_id != PLANE_PRIMARY ||
1272 level == G4X_WM_LEVEL_NORMAL)
1273 continue;
1274
1275 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1276 raw->plane[plane_id]);
1277 max_wm = g4x_fbc_fifo_size(level);
1278
1279
1280
1281
1282
1283 if (wm > max_wm)
1284 wm = USHRT_MAX;
1285
1286 dirty |= raw->fbc != wm;
1287 raw->fbc = wm;
1288 }
1289
1290
1291 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1292
1293 if (plane_id == PLANE_PRIMARY)
1294 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1295
1296 out:
1297 if (dirty) {
1298 drm_dbg_kms(&dev_priv->drm,
1299 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1300 plane->base.name,
1301 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1302 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1303 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1304
1305 if (plane_id == PLANE_PRIMARY)
1306 drm_dbg_kms(&dev_priv->drm,
1307 "FBC watermarks: SR=%d, HPLL=%d\n",
1308 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1309 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1310 }
1311
1312 return dirty;
1313 }
1314
1315 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1316 enum plane_id plane_id, int level)
1317 {
1318 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1319
1320 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1321 }
1322
1323 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1324 int level)
1325 {
1326 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1327
1328 if (level > dev_priv->wm.max_level)
1329 return false;
1330
1331 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1332 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1333 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1334 }
1335
1336
1337 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1338 struct g4x_wm_state *wm_state, int level)
1339 {
1340 if (level <= G4X_WM_LEVEL_NORMAL) {
1341 enum plane_id plane_id;
1342
1343 for_each_plane_id_on_crtc(crtc, plane_id)
1344 wm_state->wm.plane[plane_id] = USHRT_MAX;
1345 }
1346
1347 if (level <= G4X_WM_LEVEL_SR) {
1348 wm_state->cxsr = false;
1349 wm_state->sr.cursor = USHRT_MAX;
1350 wm_state->sr.plane = USHRT_MAX;
1351 wm_state->sr.fbc = USHRT_MAX;
1352 }
1353
1354 if (level <= G4X_WM_LEVEL_HPLL) {
1355 wm_state->hpll_en = false;
1356 wm_state->hpll.cursor = USHRT_MAX;
1357 wm_state->hpll.plane = USHRT_MAX;
1358 wm_state->hpll.fbc = USHRT_MAX;
1359 }
1360 }
1361
1362 static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
1363 int level)
1364 {
1365 if (level < G4X_WM_LEVEL_SR)
1366 return false;
1367
1368 if (level >= G4X_WM_LEVEL_SR &&
1369 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1370 return false;
1371
1372 if (level >= G4X_WM_LEVEL_HPLL &&
1373 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1374 return false;
1375
1376 return true;
1377 }
1378
1379 static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
1380 struct intel_crtc *crtc)
1381 {
1382 struct intel_crtc_state *crtc_state =
1383 intel_atomic_get_new_crtc_state(state, crtc);
1384 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1385 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1386 const struct g4x_pipe_wm *raw;
1387 const struct intel_plane_state *old_plane_state;
1388 const struct intel_plane_state *new_plane_state;
1389 struct intel_plane *plane;
1390 enum plane_id plane_id;
1391 int i, level;
1392 unsigned int dirty = 0;
1393
1394 for_each_oldnew_intel_plane_in_state(state, plane,
1395 old_plane_state,
1396 new_plane_state, i) {
1397 if (new_plane_state->hw.crtc != &crtc->base &&
1398 old_plane_state->hw.crtc != &crtc->base)
1399 continue;
1400
1401 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1402 dirty |= BIT(plane->id);
1403 }
1404
1405 if (!dirty)
1406 return 0;
1407
1408 level = G4X_WM_LEVEL_NORMAL;
1409 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1410 goto out;
1411
1412 raw = &crtc_state->wm.g4x.raw[level];
1413 for_each_plane_id_on_crtc(crtc, plane_id)
1414 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1415
1416 level = G4X_WM_LEVEL_SR;
1417 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1418 goto out;
1419
1420 raw = &crtc_state->wm.g4x.raw[level];
1421 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1422 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1423 wm_state->sr.fbc = raw->fbc;
1424
1425 wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
1426
1427 level = G4X_WM_LEVEL_HPLL;
1428 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1429 goto out;
1430
1431 raw = &crtc_state->wm.g4x.raw[level];
1432 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1433 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1434 wm_state->hpll.fbc = raw->fbc;
1435
1436 wm_state->hpll_en = wm_state->cxsr;
1437
1438 level++;
1439
1440 out:
1441 if (level == G4X_WM_LEVEL_NORMAL)
1442 return -EINVAL;
1443
1444
1445 g4x_invalidate_wms(crtc, wm_state, level);
1446
1447
1448
1449
1450
1451
1452
1453
1454 wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
1455
1456 return 0;
1457 }
1458
1459 static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
1460 struct intel_crtc *crtc)
1461 {
1462 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1463 struct intel_crtc_state *new_crtc_state =
1464 intel_atomic_get_new_crtc_state(state, crtc);
1465 const struct intel_crtc_state *old_crtc_state =
1466 intel_atomic_get_old_crtc_state(state, crtc);
1467 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1468 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1469 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1470 enum plane_id plane_id;
1471
1472 if (!new_crtc_state->hw.active ||
1473 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
1474 *intermediate = *optimal;
1475
1476 intermediate->cxsr = false;
1477 intermediate->hpll_en = false;
1478 goto out;
1479 }
1480
1481 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1482 !new_crtc_state->disable_cxsr;
1483 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1484 !new_crtc_state->disable_cxsr;
1485 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1486
1487 for_each_plane_id_on_crtc(crtc, plane_id) {
1488 intermediate->wm.plane[plane_id] =
1489 max(optimal->wm.plane[plane_id],
1490 active->wm.plane[plane_id]);
1491
1492 drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
1493 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1494 }
1495
1496 intermediate->sr.plane = max(optimal->sr.plane,
1497 active->sr.plane);
1498 intermediate->sr.cursor = max(optimal->sr.cursor,
1499 active->sr.cursor);
1500 intermediate->sr.fbc = max(optimal->sr.fbc,
1501 active->sr.fbc);
1502
1503 intermediate->hpll.plane = max(optimal->hpll.plane,
1504 active->hpll.plane);
1505 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1506 active->hpll.cursor);
1507 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1508 active->hpll.fbc);
1509
1510 drm_WARN_ON(&dev_priv->drm,
1511 (intermediate->sr.plane >
1512 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1513 intermediate->sr.cursor >
1514 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1515 intermediate->cxsr);
1516 drm_WARN_ON(&dev_priv->drm,
1517 (intermediate->sr.plane >
1518 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1519 intermediate->sr.cursor >
1520 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1521 intermediate->hpll_en);
1522
1523 drm_WARN_ON(&dev_priv->drm,
1524 intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1525 intermediate->fbc_en && intermediate->cxsr);
1526 drm_WARN_ON(&dev_priv->drm,
1527 intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1528 intermediate->fbc_en && intermediate->hpll_en);
1529
1530 out:
1531
1532
1533
1534
1535 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1536 new_crtc_state->wm.need_postvbl_update = true;
1537
1538 return 0;
1539 }
1540
1541 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1542 struct g4x_wm_values *wm)
1543 {
1544 struct intel_crtc *crtc;
1545 int num_active_pipes = 0;
1546
1547 wm->cxsr = true;
1548 wm->hpll_en = true;
1549 wm->fbc_en = true;
1550
1551 for_each_intel_crtc(&dev_priv->drm, crtc) {
1552 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1553
1554 if (!crtc->active)
1555 continue;
1556
1557 if (!wm_state->cxsr)
1558 wm->cxsr = false;
1559 if (!wm_state->hpll_en)
1560 wm->hpll_en = false;
1561 if (!wm_state->fbc_en)
1562 wm->fbc_en = false;
1563
1564 num_active_pipes++;
1565 }
1566
1567 if (num_active_pipes != 1) {
1568 wm->cxsr = false;
1569 wm->hpll_en = false;
1570 wm->fbc_en = false;
1571 }
1572
1573 for_each_intel_crtc(&dev_priv->drm, crtc) {
1574 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1575 enum pipe pipe = crtc->pipe;
1576
1577 wm->pipe[pipe] = wm_state->wm;
1578 if (crtc->active && wm->cxsr)
1579 wm->sr = wm_state->sr;
1580 if (crtc->active && wm->hpll_en)
1581 wm->hpll = wm_state->hpll;
1582 }
1583 }
1584
1585 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1586 {
1587 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1588 struct g4x_wm_values new_wm = {};
1589
1590 g4x_merge_wm(dev_priv, &new_wm);
1591
1592 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1593 return;
1594
1595 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1596 _intel_set_memory_cxsr(dev_priv, false);
1597
1598 g4x_write_wm_values(dev_priv, &new_wm);
1599
1600 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1601 _intel_set_memory_cxsr(dev_priv, true);
1602
1603 *old_wm = new_wm;
1604 }
1605
1606 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1607 struct intel_crtc *crtc)
1608 {
1609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1610 const struct intel_crtc_state *crtc_state =
1611 intel_atomic_get_new_crtc_state(state, crtc);
1612
1613 mutex_lock(&dev_priv->wm.wm_mutex);
1614 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1615 g4x_program_watermarks(dev_priv);
1616 mutex_unlock(&dev_priv->wm.wm_mutex);
1617 }
1618
1619 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1620 struct intel_crtc *crtc)
1621 {
1622 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1623 const struct intel_crtc_state *crtc_state =
1624 intel_atomic_get_new_crtc_state(state, crtc);
1625
1626 if (!crtc_state->wm.need_postvbl_update)
1627 return;
1628
1629 mutex_lock(&dev_priv->wm.wm_mutex);
1630 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1631 g4x_program_watermarks(dev_priv);
1632 mutex_unlock(&dev_priv->wm.wm_mutex);
1633 }
1634
1635
1636 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1637 unsigned int htotal,
1638 unsigned int width,
1639 unsigned int cpp,
1640 unsigned int latency)
1641 {
1642 unsigned int ret;
1643
1644 ret = intel_wm_method2(pixel_rate, htotal,
1645 width, cpp, latency);
1646 ret = DIV_ROUND_UP(ret, 64);
1647
1648 return ret;
1649 }
1650
1651 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1652 {
1653
1654 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1655
1656 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1657
1658 if (IS_CHERRYVIEW(dev_priv)) {
1659 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1660 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1661
1662 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1663 }
1664 }
1665
1666 static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1667 const struct intel_plane_state *plane_state,
1668 int level)
1669 {
1670 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1671 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1672 const struct drm_display_mode *pipe_mode =
1673 &crtc_state->hw.pipe_mode;
1674 unsigned int pixel_rate, htotal, cpp, width, wm;
1675
1676 if (dev_priv->wm.pri_latency[level] == 0)
1677 return USHRT_MAX;
1678
1679 if (!intel_wm_plane_visible(crtc_state, plane_state))
1680 return 0;
1681
1682 cpp = plane_state->hw.fb->format->cpp[0];
1683 pixel_rate = crtc_state->pixel_rate;
1684 htotal = pipe_mode->crtc_htotal;
1685 width = drm_rect_width(&plane_state->uapi.src) >> 16;
1686
1687 if (plane->id == PLANE_CURSOR) {
1688
1689
1690
1691
1692
1693
1694 wm = 63;
1695 } else {
1696 wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
1697 dev_priv->wm.pri_latency[level] * 10);
1698 }
1699
1700 return min_t(unsigned int, wm, USHRT_MAX);
1701 }
1702
1703 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1704 {
1705 return (active_planes & (BIT(PLANE_SPRITE0) |
1706 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1707 }
1708
1709 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1710 {
1711 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1712 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1713 const struct g4x_pipe_wm *raw =
1714 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1715 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1716 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1717 int num_active_planes = hweight8(active_planes);
1718 const int fifo_size = 511;
1719 int fifo_extra, fifo_left = fifo_size;
1720 int sprite0_fifo_extra = 0;
1721 unsigned int total_rate;
1722 enum plane_id plane_id;
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732 if (vlv_need_sprite0_fifo_workaround(active_planes))
1733 sprite0_fifo_extra = 1;
1734
1735 total_rate = raw->plane[PLANE_PRIMARY] +
1736 raw->plane[PLANE_SPRITE0] +
1737 raw->plane[PLANE_SPRITE1] +
1738 sprite0_fifo_extra;
1739
1740 if (total_rate > fifo_size)
1741 return -EINVAL;
1742
1743 if (total_rate == 0)
1744 total_rate = 1;
1745
1746 for_each_plane_id_on_crtc(crtc, plane_id) {
1747 unsigned int rate;
1748
1749 if ((active_planes & BIT(plane_id)) == 0) {
1750 fifo_state->plane[plane_id] = 0;
1751 continue;
1752 }
1753
1754 rate = raw->plane[plane_id];
1755 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1756 fifo_left -= fifo_state->plane[plane_id];
1757 }
1758
1759 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1760 fifo_left -= sprite0_fifo_extra;
1761
1762 fifo_state->plane[PLANE_CURSOR] = 63;
1763
1764 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1765
1766
1767 for_each_plane_id_on_crtc(crtc, plane_id) {
1768 int plane_extra;
1769
1770 if (fifo_left == 0)
1771 break;
1772
1773 if ((active_planes & BIT(plane_id)) == 0)
1774 continue;
1775
1776 plane_extra = min(fifo_extra, fifo_left);
1777 fifo_state->plane[plane_id] += plane_extra;
1778 fifo_left -= plane_extra;
1779 }
1780
1781 drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
1782
1783
1784 if (active_planes == 0) {
1785 drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
1786 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1787 }
1788
1789 return 0;
1790 }
1791
1792
1793 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1794 struct vlv_wm_state *wm_state, int level)
1795 {
1796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1797
1798 for (; level < intel_wm_num_levels(dev_priv); level++) {
1799 enum plane_id plane_id;
1800
1801 for_each_plane_id_on_crtc(crtc, plane_id)
1802 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1803
1804 wm_state->sr[level].cursor = USHRT_MAX;
1805 wm_state->sr[level].plane = USHRT_MAX;
1806 }
1807 }
1808
1809 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1810 {
1811 if (wm > fifo_size)
1812 return USHRT_MAX;
1813 else
1814 return fifo_size - wm;
1815 }
1816
1817
1818
1819
1820
1821 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1822 int level, enum plane_id plane_id, u16 value)
1823 {
1824 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1825 int num_levels = intel_wm_num_levels(dev_priv);
1826 bool dirty = false;
1827
1828 for (; level < num_levels; level++) {
1829 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1830
1831 dirty |= raw->plane[plane_id] != value;
1832 raw->plane[plane_id] = value;
1833 }
1834
1835 return dirty;
1836 }
1837
1838 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1839 const struct intel_plane_state *plane_state)
1840 {
1841 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1842 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1843 enum plane_id plane_id = plane->id;
1844 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1845 int level;
1846 bool dirty = false;
1847
1848 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1849 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1850 goto out;
1851 }
1852
1853 for (level = 0; level < num_levels; level++) {
1854 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1855 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1856 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1857
1858 if (wm > max_wm)
1859 break;
1860
1861 dirty |= raw->plane[plane_id] != wm;
1862 raw->plane[plane_id] = wm;
1863 }
1864
1865
1866 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1867
1868 out:
1869 if (dirty)
1870 drm_dbg_kms(&dev_priv->drm,
1871 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1872 plane->base.name,
1873 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1874 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1875 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1876
1877 return dirty;
1878 }
1879
1880 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1881 enum plane_id plane_id, int level)
1882 {
1883 const struct g4x_pipe_wm *raw =
1884 &crtc_state->wm.vlv.raw[level];
1885 const struct vlv_fifo_state *fifo_state =
1886 &crtc_state->wm.vlv.fifo_state;
1887
1888 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1889 }
1890
1891 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1892 {
1893 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1894 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1895 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1896 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1897 }
1898
1899 static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
1900 struct intel_crtc *crtc)
1901 {
1902 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1903 struct intel_crtc_state *crtc_state =
1904 intel_atomic_get_new_crtc_state(state, crtc);
1905 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1906 const struct vlv_fifo_state *fifo_state =
1907 &crtc_state->wm.vlv.fifo_state;
1908 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1909 int num_active_planes = hweight8(active_planes);
1910 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
1911 const struct intel_plane_state *old_plane_state;
1912 const struct intel_plane_state *new_plane_state;
1913 struct intel_plane *plane;
1914 enum plane_id plane_id;
1915 int level, ret, i;
1916 unsigned int dirty = 0;
1917
1918 for_each_oldnew_intel_plane_in_state(state, plane,
1919 old_plane_state,
1920 new_plane_state, i) {
1921 if (new_plane_state->hw.crtc != &crtc->base &&
1922 old_plane_state->hw.crtc != &crtc->base)
1923 continue;
1924
1925 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1926 dirty |= BIT(plane->id);
1927 }
1928
1929
1930
1931
1932
1933
1934
1935 if (needs_modeset)
1936 crtc_state->fifo_changed = true;
1937
1938 if (!dirty)
1939 return 0;
1940
1941
1942 if (dirty & ~BIT(PLANE_CURSOR)) {
1943 const struct intel_crtc_state *old_crtc_state =
1944 intel_atomic_get_old_crtc_state(state, crtc);
1945 const struct vlv_fifo_state *old_fifo_state =
1946 &old_crtc_state->wm.vlv.fifo_state;
1947
1948 ret = vlv_compute_fifo(crtc_state);
1949 if (ret)
1950 return ret;
1951
1952 if (needs_modeset ||
1953 memcmp(old_fifo_state, fifo_state,
1954 sizeof(*fifo_state)) != 0)
1955 crtc_state->fifo_changed = true;
1956 }
1957
1958
1959 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1960
1961
1962
1963
1964
1965 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1966
1967 for (level = 0; level < wm_state->num_levels; level++) {
1968 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1969 const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1970
1971 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1972 break;
1973
1974 for_each_plane_id_on_crtc(crtc, plane_id) {
1975 wm_state->wm[level].plane[plane_id] =
1976 vlv_invert_wm_value(raw->plane[plane_id],
1977 fifo_state->plane[plane_id]);
1978 }
1979
1980 wm_state->sr[level].plane =
1981 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1982 raw->plane[PLANE_SPRITE0],
1983 raw->plane[PLANE_SPRITE1]),
1984 sr_fifo_size);
1985
1986 wm_state->sr[level].cursor =
1987 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1988 63);
1989 }
1990
1991 if (level == 0)
1992 return -EINVAL;
1993
1994
1995 wm_state->num_levels = level;
1996
1997
1998 vlv_invalidate_wms(crtc, wm_state, level);
1999
2000 return 0;
2001 }
2002
2003 #define VLV_FIFO(plane, value) \
2004 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
2005
2006 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
2007 struct intel_crtc *crtc)
2008 {
2009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2010 struct intel_uncore *uncore = &dev_priv->uncore;
2011 const struct intel_crtc_state *crtc_state =
2012 intel_atomic_get_new_crtc_state(state, crtc);
2013 const struct vlv_fifo_state *fifo_state =
2014 &crtc_state->wm.vlv.fifo_state;
2015 int sprite0_start, sprite1_start, fifo_size;
2016 u32 dsparb, dsparb2, dsparb3;
2017
2018 if (!crtc_state->fifo_changed)
2019 return;
2020
2021 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
2022 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
2023 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
2024
2025 drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
2026 drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
2027
2028 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 spin_lock(&uncore->lock);
2040
2041 switch (crtc->pipe) {
2042 case PIPE_A:
2043 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2044 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2045
2046 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
2047 VLV_FIFO(SPRITEB, 0xff));
2048 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
2049 VLV_FIFO(SPRITEB, sprite1_start));
2050
2051 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
2052 VLV_FIFO(SPRITEB_HI, 0x1));
2053 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
2054 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
2055
2056 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2057 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2058 break;
2059 case PIPE_B:
2060 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2061 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2062
2063 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2064 VLV_FIFO(SPRITED, 0xff));
2065 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2066 VLV_FIFO(SPRITED, sprite1_start));
2067
2068 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2069 VLV_FIFO(SPRITED_HI, 0xff));
2070 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2071 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2072
2073 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2074 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2075 break;
2076 case PIPE_C:
2077 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2078 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2079
2080 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2081 VLV_FIFO(SPRITEF, 0xff));
2082 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2083 VLV_FIFO(SPRITEF, sprite1_start));
2084
2085 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2086 VLV_FIFO(SPRITEF_HI, 0xff));
2087 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2088 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2089
2090 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2091 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2092 break;
2093 default:
2094 break;
2095 }
2096
2097 intel_uncore_posting_read_fw(uncore, DSPARB);
2098
2099 spin_unlock(&uncore->lock);
2100 }
2101
2102 #undef VLV_FIFO
2103
2104 static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
2105 struct intel_crtc *crtc)
2106 {
2107 struct intel_crtc_state *new_crtc_state =
2108 intel_atomic_get_new_crtc_state(state, crtc);
2109 const struct intel_crtc_state *old_crtc_state =
2110 intel_atomic_get_old_crtc_state(state, crtc);
2111 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2112 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2113 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2114 int level;
2115
2116 if (!new_crtc_state->hw.active ||
2117 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
2118 *intermediate = *optimal;
2119
2120 intermediate->cxsr = false;
2121 goto out;
2122 }
2123
2124 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2125 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2126 !new_crtc_state->disable_cxsr;
2127
2128 for (level = 0; level < intermediate->num_levels; level++) {
2129 enum plane_id plane_id;
2130
2131 for_each_plane_id_on_crtc(crtc, plane_id) {
2132 intermediate->wm[level].plane[plane_id] =
2133 min(optimal->wm[level].plane[plane_id],
2134 active->wm[level].plane[plane_id]);
2135 }
2136
2137 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2138 active->sr[level].plane);
2139 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2140 active->sr[level].cursor);
2141 }
2142
2143 vlv_invalidate_wms(crtc, intermediate, level);
2144
2145 out:
2146
2147
2148
2149
2150 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2151 new_crtc_state->wm.need_postvbl_update = true;
2152
2153 return 0;
2154 }
2155
2156 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2157 struct vlv_wm_values *wm)
2158 {
2159 struct intel_crtc *crtc;
2160 int num_active_pipes = 0;
2161
2162 wm->level = dev_priv->wm.max_level;
2163 wm->cxsr = true;
2164
2165 for_each_intel_crtc(&dev_priv->drm, crtc) {
2166 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2167
2168 if (!crtc->active)
2169 continue;
2170
2171 if (!wm_state->cxsr)
2172 wm->cxsr = false;
2173
2174 num_active_pipes++;
2175 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2176 }
2177
2178 if (num_active_pipes != 1)
2179 wm->cxsr = false;
2180
2181 if (num_active_pipes > 1)
2182 wm->level = VLV_WM_LEVEL_PM2;
2183
2184 for_each_intel_crtc(&dev_priv->drm, crtc) {
2185 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2186 enum pipe pipe = crtc->pipe;
2187
2188 wm->pipe[pipe] = wm_state->wm[wm->level];
2189 if (crtc->active && wm->cxsr)
2190 wm->sr = wm_state->sr[wm->level];
2191
2192 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2193 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2194 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2195 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2196 }
2197 }
2198
2199 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2200 {
2201 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2202 struct vlv_wm_values new_wm = {};
2203
2204 vlv_merge_wm(dev_priv, &new_wm);
2205
2206 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2207 return;
2208
2209 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2210 chv_set_memory_dvfs(dev_priv, false);
2211
2212 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2213 chv_set_memory_pm5(dev_priv, false);
2214
2215 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2216 _intel_set_memory_cxsr(dev_priv, false);
2217
2218 vlv_write_wm_values(dev_priv, &new_wm);
2219
2220 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2221 _intel_set_memory_cxsr(dev_priv, true);
2222
2223 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2224 chv_set_memory_pm5(dev_priv, true);
2225
2226 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2227 chv_set_memory_dvfs(dev_priv, true);
2228
2229 *old_wm = new_wm;
2230 }
2231
2232 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2233 struct intel_crtc *crtc)
2234 {
2235 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2236 const struct intel_crtc_state *crtc_state =
2237 intel_atomic_get_new_crtc_state(state, crtc);
2238
2239 mutex_lock(&dev_priv->wm.wm_mutex);
2240 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2241 vlv_program_watermarks(dev_priv);
2242 mutex_unlock(&dev_priv->wm.wm_mutex);
2243 }
2244
2245 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2246 struct intel_crtc *crtc)
2247 {
2248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2249 const struct intel_crtc_state *crtc_state =
2250 intel_atomic_get_new_crtc_state(state, crtc);
2251
2252 if (!crtc_state->wm.need_postvbl_update)
2253 return;
2254
2255 mutex_lock(&dev_priv->wm.wm_mutex);
2256 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2257 vlv_program_watermarks(dev_priv);
2258 mutex_unlock(&dev_priv->wm.wm_mutex);
2259 }
2260
2261 static void i965_update_wm(struct drm_i915_private *dev_priv)
2262 {
2263 struct intel_crtc *crtc;
2264 int srwm = 1;
2265 int cursor_sr = 16;
2266 bool cxsr_enabled;
2267
2268
2269 crtc = single_enabled_crtc(dev_priv);
2270 if (crtc) {
2271
2272 static const int sr_latency_ns = 12000;
2273 const struct drm_display_mode *pipe_mode =
2274 &crtc->config->hw.pipe_mode;
2275 const struct drm_framebuffer *fb =
2276 crtc->base.primary->state->fb;
2277 int pixel_rate = crtc->config->pixel_rate;
2278 int htotal = pipe_mode->crtc_htotal;
2279 int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
2280 int cpp = fb->format->cpp[0];
2281 int entries;
2282
2283 entries = intel_wm_method2(pixel_rate, htotal,
2284 width, cpp, sr_latency_ns / 100);
2285 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2286 srwm = I965_FIFO_SIZE - entries;
2287 if (srwm < 0)
2288 srwm = 1;
2289 srwm &= 0x1ff;
2290 drm_dbg_kms(&dev_priv->drm,
2291 "self-refresh entries: %d, wm: %d\n",
2292 entries, srwm);
2293
2294 entries = intel_wm_method2(pixel_rate, htotal,
2295 crtc->base.cursor->state->crtc_w, 4,
2296 sr_latency_ns / 100);
2297 entries = DIV_ROUND_UP(entries,
2298 i965_cursor_wm_info.cacheline_size) +
2299 i965_cursor_wm_info.guard_size;
2300
2301 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2302 if (cursor_sr > i965_cursor_wm_info.max_wm)
2303 cursor_sr = i965_cursor_wm_info.max_wm;
2304
2305 drm_dbg_kms(&dev_priv->drm,
2306 "self-refresh watermark: display plane %d "
2307 "cursor %d\n", srwm, cursor_sr);
2308
2309 cxsr_enabled = true;
2310 } else {
2311 cxsr_enabled = false;
2312
2313 intel_set_memory_cxsr(dev_priv, false);
2314 }
2315
2316 drm_dbg_kms(&dev_priv->drm,
2317 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2318 srwm);
2319
2320
2321 intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
2322 FW_WM(8, CURSORB) |
2323 FW_WM(8, PLANEB) |
2324 FW_WM(8, PLANEA));
2325 intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
2326 FW_WM(8, PLANEC_OLD));
2327
2328 intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2329
2330 if (cxsr_enabled)
2331 intel_set_memory_cxsr(dev_priv, true);
2332 }
2333
2334 #undef FW_WM
2335
2336 static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
2337 enum i9xx_plane_id i9xx_plane)
2338 {
2339 struct intel_plane *plane;
2340
2341 for_each_intel_plane(&i915->drm, plane) {
2342 if (plane->id == PLANE_PRIMARY &&
2343 plane->i9xx_plane == i9xx_plane)
2344 return intel_crtc_for_pipe(i915, plane->pipe);
2345 }
2346
2347 return NULL;
2348 }
2349
2350 static void i9xx_update_wm(struct drm_i915_private *dev_priv)
2351 {
2352 const struct intel_watermark_params *wm_info;
2353 u32 fwater_lo;
2354 u32 fwater_hi;
2355 int cwm, srwm = 1;
2356 int fifo_size;
2357 int planea_wm, planeb_wm;
2358 struct intel_crtc *crtc;
2359
2360 if (IS_I945GM(dev_priv))
2361 wm_info = &i945_wm_info;
2362 else if (DISPLAY_VER(dev_priv) != 2)
2363 wm_info = &i915_wm_info;
2364 else
2365 wm_info = &i830_a_wm_info;
2366
2367 if (DISPLAY_VER(dev_priv) == 2)
2368 fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
2369 else
2370 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
2371 crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
2372 if (intel_crtc_active(crtc)) {
2373 const struct drm_framebuffer *fb =
2374 crtc->base.primary->state->fb;
2375 int cpp;
2376
2377 if (DISPLAY_VER(dev_priv) == 2)
2378 cpp = 4;
2379 else
2380 cpp = fb->format->cpp[0];
2381
2382 planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
2383 wm_info, fifo_size, cpp,
2384 pessimal_latency_ns);
2385 } else {
2386 planea_wm = fifo_size - wm_info->guard_size;
2387 if (planea_wm > (long)wm_info->max_wm)
2388 planea_wm = wm_info->max_wm;
2389 }
2390
2391 if (DISPLAY_VER(dev_priv) == 2)
2392 wm_info = &i830_bc_wm_info;
2393
2394 if (DISPLAY_VER(dev_priv) == 2)
2395 fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
2396 else
2397 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
2398 crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
2399 if (intel_crtc_active(crtc)) {
2400 const struct drm_framebuffer *fb =
2401 crtc->base.primary->state->fb;
2402 int cpp;
2403
2404 if (DISPLAY_VER(dev_priv) == 2)
2405 cpp = 4;
2406 else
2407 cpp = fb->format->cpp[0];
2408
2409 planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
2410 wm_info, fifo_size, cpp,
2411 pessimal_latency_ns);
2412 } else {
2413 planeb_wm = fifo_size - wm_info->guard_size;
2414 if (planeb_wm > (long)wm_info->max_wm)
2415 planeb_wm = wm_info->max_wm;
2416 }
2417
2418 drm_dbg_kms(&dev_priv->drm,
2419 "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2420
2421 crtc = single_enabled_crtc(dev_priv);
2422 if (IS_I915GM(dev_priv) && crtc) {
2423 struct drm_i915_gem_object *obj;
2424
2425 obj = intel_fb_obj(crtc->base.primary->state->fb);
2426
2427
2428 if (!i915_gem_object_is_tiled(obj))
2429 crtc = NULL;
2430 }
2431
2432
2433
2434
2435 cwm = 2;
2436
2437
2438 intel_set_memory_cxsr(dev_priv, false);
2439
2440
2441 if (HAS_FW_BLC(dev_priv) && crtc) {
2442
2443 static const int sr_latency_ns = 6000;
2444 const struct drm_display_mode *pipe_mode =
2445 &crtc->config->hw.pipe_mode;
2446 const struct drm_framebuffer *fb =
2447 crtc->base.primary->state->fb;
2448 int pixel_rate = crtc->config->pixel_rate;
2449 int htotal = pipe_mode->crtc_htotal;
2450 int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
2451 int cpp;
2452 int entries;
2453
2454 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2455 cpp = 4;
2456 else
2457 cpp = fb->format->cpp[0];
2458
2459 entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
2460 sr_latency_ns / 100);
2461 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2462 drm_dbg_kms(&dev_priv->drm,
2463 "self-refresh entries: %d\n", entries);
2464 srwm = wm_info->fifo_size - entries;
2465 if (srwm < 0)
2466 srwm = 1;
2467
2468 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2469 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
2470 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2471 else
2472 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
2473 }
2474
2475 drm_dbg_kms(&dev_priv->drm,
2476 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2477 planea_wm, planeb_wm, cwm, srwm);
2478
2479 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2480 fwater_hi = (cwm & 0x1f);
2481
2482
2483 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2484 fwater_hi = fwater_hi | (1 << 8);
2485
2486 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2487 intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
2488
2489 if (crtc)
2490 intel_set_memory_cxsr(dev_priv, true);
2491 }
2492
2493 static void i845_update_wm(struct drm_i915_private *dev_priv)
2494 {
2495 struct intel_crtc *crtc;
2496 u32 fwater_lo;
2497 int planea_wm;
2498
2499 crtc = single_enabled_crtc(dev_priv);
2500 if (crtc == NULL)
2501 return;
2502
2503 planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
2504 &i845_wm_info,
2505 i845_get_fifo_size(dev_priv, PLANE_A),
2506 4, pessimal_latency_ns);
2507 fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
2508 fwater_lo |= (3<<8) | planea_wm;
2509
2510 drm_dbg_kms(&dev_priv->drm,
2511 "Setting FIFO watermarks - A: %d\n", planea_wm);
2512
2513 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2514 }
2515
2516
2517 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2518 unsigned int cpp,
2519 unsigned int latency)
2520 {
2521 unsigned int ret;
2522
2523 ret = intel_wm_method1(pixel_rate, cpp, latency);
2524 ret = DIV_ROUND_UP(ret, 64) + 2;
2525
2526 return ret;
2527 }
2528
2529
2530 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2531 unsigned int htotal,
2532 unsigned int width,
2533 unsigned int cpp,
2534 unsigned int latency)
2535 {
2536 unsigned int ret;
2537
2538 ret = intel_wm_method2(pixel_rate, htotal,
2539 width, cpp, latency);
2540 ret = DIV_ROUND_UP(ret, 64) + 2;
2541
2542 return ret;
2543 }
2544
2545 static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2546 {
2547
2548
2549
2550
2551
2552
2553 if (WARN_ON(!cpp))
2554 return 0;
2555 if (WARN_ON(!horiz_pixels))
2556 return 0;
2557
2558 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2559 }
2560
2561 struct ilk_wm_maximums {
2562 u16 pri;
2563 u16 spr;
2564 u16 cur;
2565 u16 fbc;
2566 };
2567
2568
2569
2570
2571
2572 static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2573 const struct intel_plane_state *plane_state,
2574 u32 mem_value, bool is_lp)
2575 {
2576 u32 method1, method2;
2577 int cpp;
2578
2579 if (mem_value == 0)
2580 return U32_MAX;
2581
2582 if (!intel_wm_plane_visible(crtc_state, plane_state))
2583 return 0;
2584
2585 cpp = plane_state->hw.fb->format->cpp[0];
2586
2587 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2588
2589 if (!is_lp)
2590 return method1;
2591
2592 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2593 crtc_state->hw.pipe_mode.crtc_htotal,
2594 drm_rect_width(&plane_state->uapi.src) >> 16,
2595 cpp, mem_value);
2596
2597 return min(method1, method2);
2598 }
2599
2600
2601
2602
2603
2604 static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2605 const struct intel_plane_state *plane_state,
2606 u32 mem_value)
2607 {
2608 u32 method1, method2;
2609 int cpp;
2610
2611 if (mem_value == 0)
2612 return U32_MAX;
2613
2614 if (!intel_wm_plane_visible(crtc_state, plane_state))
2615 return 0;
2616
2617 cpp = plane_state->hw.fb->format->cpp[0];
2618
2619 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2620 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2621 crtc_state->hw.pipe_mode.crtc_htotal,
2622 drm_rect_width(&plane_state->uapi.src) >> 16,
2623 cpp, mem_value);
2624 return min(method1, method2);
2625 }
2626
2627
2628
2629
2630
2631 static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2632 const struct intel_plane_state *plane_state,
2633 u32 mem_value)
2634 {
2635 int cpp;
2636
2637 if (mem_value == 0)
2638 return U32_MAX;
2639
2640 if (!intel_wm_plane_visible(crtc_state, plane_state))
2641 return 0;
2642
2643 cpp = plane_state->hw.fb->format->cpp[0];
2644
2645 return ilk_wm_method2(crtc_state->pixel_rate,
2646 crtc_state->hw.pipe_mode.crtc_htotal,
2647 drm_rect_width(&plane_state->uapi.src) >> 16,
2648 cpp, mem_value);
2649 }
2650
2651
2652 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2653 const struct intel_plane_state *plane_state,
2654 u32 pri_val)
2655 {
2656 int cpp;
2657
2658 if (!intel_wm_plane_visible(crtc_state, plane_state))
2659 return 0;
2660
2661 cpp = plane_state->hw.fb->format->cpp[0];
2662
2663 return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
2664 cpp);
2665 }
2666
2667 static unsigned int
2668 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2669 {
2670 if (DISPLAY_VER(dev_priv) >= 8)
2671 return 3072;
2672 else if (DISPLAY_VER(dev_priv) >= 7)
2673 return 768;
2674 else
2675 return 512;
2676 }
2677
2678 static unsigned int
2679 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2680 int level, bool is_sprite)
2681 {
2682 if (DISPLAY_VER(dev_priv) >= 8)
2683
2684 return level == 0 ? 255 : 2047;
2685 else if (DISPLAY_VER(dev_priv) >= 7)
2686
2687 return level == 0 ? 127 : 1023;
2688 else if (!is_sprite)
2689
2690 return level == 0 ? 127 : 511;
2691 else
2692
2693 return level == 0 ? 63 : 255;
2694 }
2695
2696 static unsigned int
2697 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2698 {
2699 if (DISPLAY_VER(dev_priv) >= 7)
2700 return level == 0 ? 63 : 255;
2701 else
2702 return level == 0 ? 31 : 63;
2703 }
2704
2705 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2706 {
2707 if (DISPLAY_VER(dev_priv) >= 8)
2708 return 31;
2709 else
2710 return 15;
2711 }
2712
2713
2714 static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2715 int level,
2716 const struct intel_wm_config *config,
2717 enum intel_ddb_partitioning ddb_partitioning,
2718 bool is_sprite)
2719 {
2720 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2721
2722
2723 if (is_sprite && !config->sprites_enabled)
2724 return 0;
2725
2726
2727 if (level == 0 || config->num_pipes_active > 1) {
2728 fifo_size /= INTEL_NUM_PIPES(dev_priv);
2729
2730
2731
2732
2733
2734
2735 if (DISPLAY_VER(dev_priv) <= 6)
2736 fifo_size /= 2;
2737 }
2738
2739 if (config->sprites_enabled) {
2740
2741 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2742 if (is_sprite)
2743 fifo_size *= 5;
2744 fifo_size /= 6;
2745 } else {
2746 fifo_size /= 2;
2747 }
2748 }
2749
2750
2751 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2752 }
2753
2754
2755 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2756 int level,
2757 const struct intel_wm_config *config)
2758 {
2759
2760 if (level > 0 && config->num_pipes_active > 1)
2761 return 64;
2762
2763
2764 return ilk_cursor_wm_reg_max(dev_priv, level);
2765 }
2766
2767 static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2768 int level,
2769 const struct intel_wm_config *config,
2770 enum intel_ddb_partitioning ddb_partitioning,
2771 struct ilk_wm_maximums *max)
2772 {
2773 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2774 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2775 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2776 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2777 }
2778
2779 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2780 int level,
2781 struct ilk_wm_maximums *max)
2782 {
2783 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2784 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2785 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2786 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2787 }
2788
2789 static bool ilk_validate_wm_level(int level,
2790 const struct ilk_wm_maximums *max,
2791 struct intel_wm_level *result)
2792 {
2793 bool ret;
2794
2795
2796 if (!result->enable)
2797 return false;
2798
2799 result->enable = result->pri_val <= max->pri &&
2800 result->spr_val <= max->spr &&
2801 result->cur_val <= max->cur;
2802
2803 ret = result->enable;
2804
2805
2806
2807
2808
2809
2810 if (level == 0 && !result->enable) {
2811 if (result->pri_val > max->pri)
2812 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2813 level, result->pri_val, max->pri);
2814 if (result->spr_val > max->spr)
2815 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2816 level, result->spr_val, max->spr);
2817 if (result->cur_val > max->cur)
2818 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2819 level, result->cur_val, max->cur);
2820
2821 result->pri_val = min_t(u32, result->pri_val, max->pri);
2822 result->spr_val = min_t(u32, result->spr_val, max->spr);
2823 result->cur_val = min_t(u32, result->cur_val, max->cur);
2824 result->enable = true;
2825 }
2826
2827 return ret;
2828 }
2829
2830 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2831 const struct intel_crtc *crtc,
2832 int level,
2833 struct intel_crtc_state *crtc_state,
2834 const struct intel_plane_state *pristate,
2835 const struct intel_plane_state *sprstate,
2836 const struct intel_plane_state *curstate,
2837 struct intel_wm_level *result)
2838 {
2839 u16 pri_latency = dev_priv->wm.pri_latency[level];
2840 u16 spr_latency = dev_priv->wm.spr_latency[level];
2841 u16 cur_latency = dev_priv->wm.cur_latency[level];
2842
2843
2844 if (level > 0) {
2845 pri_latency *= 5;
2846 spr_latency *= 5;
2847 cur_latency *= 5;
2848 }
2849
2850 if (pristate) {
2851 result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2852 pri_latency, level);
2853 result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2854 }
2855
2856 if (sprstate)
2857 result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2858
2859 if (curstate)
2860 result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2861
2862 result->enable = true;
2863 }
2864
2865 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2866 u16 wm[])
2867 {
2868 struct intel_uncore *uncore = &dev_priv->uncore;
2869
2870 if (DISPLAY_VER(dev_priv) >= 9) {
2871 u32 val;
2872 int ret, i;
2873 int level, max_level = ilk_wm_max_level(dev_priv);
2874 int mult = IS_DG2(dev_priv) ? 2 : 1;
2875
2876
2877 val = 0;
2878 ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
2879 &val, NULL);
2880
2881 if (ret) {
2882 drm_err(&dev_priv->drm,
2883 "SKL Mailbox read error = %d\n", ret);
2884 return;
2885 }
2886
2887 wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2888 wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2889 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2890 wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2891 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2892 wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2893 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2894
2895
2896 val = 1;
2897 ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
2898 &val, NULL);
2899 if (ret) {
2900 drm_err(&dev_priv->drm,
2901 "SKL Mailbox read error = %d\n", ret);
2902 return;
2903 }
2904
2905 wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2906 wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2907 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2908 wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2909 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2910 wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2911 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2912
2913
2914
2915
2916
2917
2918 for (level = 1; level <= max_level; level++) {
2919 if (wm[level] == 0) {
2920 for (i = level + 1; i <= max_level; i++)
2921 wm[i] = 0;
2922
2923 max_level = level - 1;
2924
2925 break;
2926 }
2927 }
2928
2929
2930
2931
2932
2933
2934
2935
2936 if (wm[0] == 0) {
2937 u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
2938
2939 for (level = 0; level <= max_level; level++)
2940 wm[level] += adjust;
2941 }
2942
2943
2944
2945
2946
2947
2948
2949 if (dev_priv->dram_info.wm_lv_0_adjust_needed)
2950 wm[0] += 1;
2951 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2952 u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2953
2954 wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
2955 if (wm[0] == 0)
2956 wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
2957 wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
2958 wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
2959 wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
2960 wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
2961 } else if (DISPLAY_VER(dev_priv) >= 6) {
2962 u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2963
2964 wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
2965 wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
2966 wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
2967 wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
2968 } else if (DISPLAY_VER(dev_priv) >= 5) {
2969 u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2970
2971
2972 wm[0] = 7;
2973 wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
2974 wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
2975 } else {
2976 MISSING_CASE(INTEL_DEVID(dev_priv));
2977 }
2978 }
2979
2980 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2981 u16 wm[5])
2982 {
2983
2984 if (DISPLAY_VER(dev_priv) == 5)
2985 wm[0] = 13;
2986 }
2987
2988 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2989 u16 wm[5])
2990 {
2991
2992 if (DISPLAY_VER(dev_priv) == 5)
2993 wm[0] = 13;
2994 }
2995
2996 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2997 {
2998
2999 if (HAS_HW_SAGV_WM(dev_priv))
3000 return 5;
3001 else if (DISPLAY_VER(dev_priv) >= 9)
3002 return 7;
3003 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3004 return 4;
3005 else if (DISPLAY_VER(dev_priv) >= 6)
3006 return 3;
3007 else
3008 return 2;
3009 }
3010
3011 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
3012 const char *name,
3013 const u16 wm[])
3014 {
3015 int level, max_level = ilk_wm_max_level(dev_priv);
3016
3017 for (level = 0; level <= max_level; level++) {
3018 unsigned int latency = wm[level];
3019
3020 if (latency == 0) {
3021 drm_dbg_kms(&dev_priv->drm,
3022 "%s WM%d latency not provided\n",
3023 name, level);
3024 continue;
3025 }
3026
3027
3028
3029
3030
3031 if (DISPLAY_VER(dev_priv) >= 9)
3032 latency *= 10;
3033 else if (level > 0)
3034 latency *= 5;
3035
3036 drm_dbg_kms(&dev_priv->drm,
3037 "%s WM%d latency %u (%u.%u usec)\n", name, level,
3038 wm[level], latency / 10, latency % 10);
3039 }
3040 }
3041
3042 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
3043 u16 wm[5], u16 min)
3044 {
3045 int level, max_level = ilk_wm_max_level(dev_priv);
3046
3047 if (wm[0] >= min)
3048 return false;
3049
3050 wm[0] = max(wm[0], min);
3051 for (level = 1; level <= max_level; level++)
3052 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3053
3054 return true;
3055 }
3056
3057 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3058 {
3059 bool changed;
3060
3061
3062
3063
3064
3065 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
3066 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
3067 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3068
3069 if (!changed)
3070 return;
3071
3072 drm_dbg_kms(&dev_priv->drm,
3073 "WM latency values increased to avoid potential underruns\n");
3074 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3075 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3076 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3077 }
3078
3079 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3080 {
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092 if (dev_priv->wm.pri_latency[3] == 0 &&
3093 dev_priv->wm.spr_latency[3] == 0 &&
3094 dev_priv->wm.cur_latency[3] == 0)
3095 return;
3096
3097 dev_priv->wm.pri_latency[3] = 0;
3098 dev_priv->wm.spr_latency[3] = 0;
3099 dev_priv->wm.cur_latency[3] = 0;
3100
3101 drm_dbg_kms(&dev_priv->drm,
3102 "LP3 watermarks disabled due to potential for lost interrupts\n");
3103 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3104 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3105 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3106 }
3107
3108 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3109 {
3110 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3111
3112 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3113 sizeof(dev_priv->wm.pri_latency));
3114 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3115 sizeof(dev_priv->wm.pri_latency));
3116
3117 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3118 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3119
3120 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3121 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3122 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3123
3124 if (DISPLAY_VER(dev_priv) == 6) {
3125 snb_wm_latency_quirk(dev_priv);
3126 snb_wm_lp3_irq_quirk(dev_priv);
3127 }
3128 }
3129
3130 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3131 {
3132 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3133 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3134 }
3135
3136 static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3137 struct intel_pipe_wm *pipe_wm)
3138 {
3139
3140 const struct intel_wm_config config = {
3141 .num_pipes_active = 1,
3142 .sprites_enabled = pipe_wm->sprites_enabled,
3143 .sprites_scaled = pipe_wm->sprites_scaled,
3144 };
3145 struct ilk_wm_maximums max;
3146
3147
3148 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3149
3150
3151 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3152 drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
3153 return false;
3154 }
3155
3156 return true;
3157 }
3158
3159
3160 static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
3161 struct intel_crtc *crtc)
3162 {
3163 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3164 struct intel_crtc_state *crtc_state =
3165 intel_atomic_get_new_crtc_state(state, crtc);
3166 struct intel_pipe_wm *pipe_wm;
3167 struct intel_plane *plane;
3168 const struct intel_plane_state *plane_state;
3169 const struct intel_plane_state *pristate = NULL;
3170 const struct intel_plane_state *sprstate = NULL;
3171 const struct intel_plane_state *curstate = NULL;
3172 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3173 struct ilk_wm_maximums max;
3174
3175 pipe_wm = &crtc_state->wm.ilk.optimal;
3176
3177 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3178 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3179 pristate = plane_state;
3180 else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3181 sprstate = plane_state;
3182 else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
3183 curstate = plane_state;
3184 }
3185
3186 pipe_wm->pipe_enabled = crtc_state->hw.active;
3187 pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
3188 pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
3189
3190 usable_level = max_level;
3191
3192
3193 if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3194 usable_level = 1;
3195
3196
3197 if (pipe_wm->sprites_scaled)
3198 usable_level = 0;
3199
3200 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3201 ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
3202 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3203
3204 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3205 return -EINVAL;
3206
3207 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3208
3209 for (level = 1; level <= usable_level; level++) {
3210 struct intel_wm_level *wm = &pipe_wm->wm[level];
3211
3212 ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
3213 pristate, sprstate, curstate, wm);
3214
3215
3216
3217
3218
3219
3220 if (!ilk_validate_wm_level(level, &max, wm)) {
3221 memset(wm, 0, sizeof(*wm));
3222 break;
3223 }
3224 }
3225
3226 return 0;
3227 }
3228
3229
3230
3231
3232
3233
3234 static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
3235 struct intel_crtc *crtc)
3236 {
3237 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3238 struct intel_crtc_state *new_crtc_state =
3239 intel_atomic_get_new_crtc_state(state, crtc);
3240 const struct intel_crtc_state *old_crtc_state =
3241 intel_atomic_get_old_crtc_state(state, crtc);
3242 struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
3243 const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
3244 int level, max_level = ilk_wm_max_level(dev_priv);
3245
3246
3247
3248
3249
3250
3251 *a = new_crtc_state->wm.ilk.optimal;
3252 if (!new_crtc_state->hw.active ||
3253 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) ||
3254 state->skip_intermediate_wm)
3255 return 0;
3256
3257 a->pipe_enabled |= b->pipe_enabled;
3258 a->sprites_enabled |= b->sprites_enabled;
3259 a->sprites_scaled |= b->sprites_scaled;
3260
3261 for (level = 0; level <= max_level; level++) {
3262 struct intel_wm_level *a_wm = &a->wm[level];
3263 const struct intel_wm_level *b_wm = &b->wm[level];
3264
3265 a_wm->enable &= b_wm->enable;
3266 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3267 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3268 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3269 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3270 }
3271
3272
3273
3274
3275
3276
3277
3278 if (!ilk_validate_pipe_wm(dev_priv, a))
3279 return -EINVAL;
3280
3281
3282
3283
3284
3285 if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
3286 new_crtc_state->wm.need_postvbl_update = true;
3287
3288 return 0;
3289 }
3290
3291
3292
3293
3294 static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3295 int level,
3296 struct intel_wm_level *ret_wm)
3297 {
3298 const struct intel_crtc *crtc;
3299
3300 ret_wm->enable = true;
3301
3302 for_each_intel_crtc(&dev_priv->drm, crtc) {
3303 const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
3304 const struct intel_wm_level *wm = &active->wm[level];
3305
3306 if (!active->pipe_enabled)
3307 continue;
3308
3309
3310
3311
3312
3313
3314 if (!wm->enable)
3315 ret_wm->enable = false;
3316
3317 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3318 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3319 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3320 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3321 }
3322 }
3323
3324
3325
3326
3327 static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3328 const struct intel_wm_config *config,
3329 const struct ilk_wm_maximums *max,
3330 struct intel_pipe_wm *merged)
3331 {
3332 int level, max_level = ilk_wm_max_level(dev_priv);
3333 int last_enabled_level = max_level;
3334
3335
3336 if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3337 config->num_pipes_active > 1)
3338 last_enabled_level = 0;
3339
3340
3341 merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
3342
3343
3344 for (level = 1; level <= max_level; level++) {
3345 struct intel_wm_level *wm = &merged->wm[level];
3346
3347 ilk_merge_wm_level(dev_priv, level, wm);
3348
3349 if (level > last_enabled_level)
3350 wm->enable = false;
3351 else if (!ilk_validate_wm_level(level, max, wm))
3352
3353 last_enabled_level = level - 1;
3354
3355
3356
3357
3358
3359 if (wm->fbc_val > max->fbc) {
3360 if (wm->enable)
3361 merged->fbc_wm_enabled = false;
3362 wm->fbc_val = 0;
3363 }
3364 }
3365
3366
3367 if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
3368 dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
3369 for (level = 2; level <= max_level; level++) {
3370 struct intel_wm_level *wm = &merged->wm[level];
3371
3372 wm->enable = false;
3373 }
3374 }
3375 }
3376
3377 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3378 {
3379
3380 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3381 }
3382
3383
3384 static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3385 int level)
3386 {
3387 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3388 return 2 * level;
3389 else
3390 return dev_priv->wm.pri_latency[level];
3391 }
3392
3393 static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3394 const struct intel_pipe_wm *merged,
3395 enum intel_ddb_partitioning partitioning,
3396 struct ilk_wm_values *results)
3397 {
3398 struct intel_crtc *crtc;
3399 int level, wm_lp;
3400
3401 results->enable_fbc_wm = merged->fbc_wm_enabled;
3402 results->partitioning = partitioning;
3403
3404
3405 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3406 const struct intel_wm_level *r;
3407
3408 level = ilk_wm_lp_to_level(wm_lp, merged);
3409
3410 r = &merged->wm[level];
3411
3412
3413
3414
3415
3416 results->wm_lp[wm_lp - 1] =
3417 WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
3418 WM_LP_PRIMARY(r->pri_val) |
3419 WM_LP_CURSOR(r->cur_val);
3420
3421 if (r->enable)
3422 results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
3423
3424 if (DISPLAY_VER(dev_priv) >= 8)
3425 results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
3426 else
3427 results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
3428
3429 results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
3430
3431
3432
3433
3434
3435 if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
3436 drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
3437 results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
3438 }
3439 }
3440
3441
3442 for_each_intel_crtc(&dev_priv->drm, crtc) {
3443 enum pipe pipe = crtc->pipe;
3444 const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
3445 const struct intel_wm_level *r = &pipe_wm->wm[0];
3446
3447 if (drm_WARN_ON(&dev_priv->drm, !r->enable))
3448 continue;
3449
3450 results->wm_pipe[pipe] =
3451 WM0_PIPE_PRIMARY(r->pri_val) |
3452 WM0_PIPE_SPRITE(r->spr_val) |
3453 WM0_PIPE_CURSOR(r->cur_val);
3454 }
3455 }
3456
3457
3458
3459 static struct intel_pipe_wm *
3460 ilk_find_best_result(struct drm_i915_private *dev_priv,
3461 struct intel_pipe_wm *r1,
3462 struct intel_pipe_wm *r2)
3463 {
3464 int level, max_level = ilk_wm_max_level(dev_priv);
3465 int level1 = 0, level2 = 0;
3466
3467 for (level = 1; level <= max_level; level++) {
3468 if (r1->wm[level].enable)
3469 level1 = level;
3470 if (r2->wm[level].enable)
3471 level2 = level;
3472 }
3473
3474 if (level1 == level2) {
3475 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3476 return r2;
3477 else
3478 return r1;
3479 } else if (level1 > level2) {
3480 return r1;
3481 } else {
3482 return r2;
3483 }
3484 }
3485
3486
3487 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3488 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3489 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3490 #define WM_DIRTY_FBC (1 << 24)
3491 #define WM_DIRTY_DDB (1 << 25)
3492
3493 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3494 const struct ilk_wm_values *old,
3495 const struct ilk_wm_values *new)
3496 {
3497 unsigned int dirty = 0;
3498 enum pipe pipe;
3499 int wm_lp;
3500
3501 for_each_pipe(dev_priv, pipe) {
3502 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3503 dirty |= WM_DIRTY_PIPE(pipe);
3504
3505 dirty |= WM_DIRTY_LP_ALL;
3506 }
3507 }
3508
3509 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3510 dirty |= WM_DIRTY_FBC;
3511
3512 dirty |= WM_DIRTY_LP_ALL;
3513 }
3514
3515 if (old->partitioning != new->partitioning) {
3516 dirty |= WM_DIRTY_DDB;
3517
3518 dirty |= WM_DIRTY_LP_ALL;
3519 }
3520
3521
3522 if (dirty & WM_DIRTY_LP_ALL)
3523 return dirty;
3524
3525
3526 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3527 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3528 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3529 break;
3530 }
3531
3532
3533 for (; wm_lp <= 3; wm_lp++)
3534 dirty |= WM_DIRTY_LP(wm_lp);
3535
3536 return dirty;
3537 }
3538
3539 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3540 unsigned int dirty)
3541 {
3542 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3543 bool changed = false;
3544
3545 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
3546 previous->wm_lp[2] &= ~WM_LP_ENABLE;
3547 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
3548 changed = true;
3549 }
3550 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
3551 previous->wm_lp[1] &= ~WM_LP_ENABLE;
3552 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
3553 changed = true;
3554 }
3555 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
3556 previous->wm_lp[0] &= ~WM_LP_ENABLE;
3557 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
3558 changed = true;
3559 }
3560
3561
3562
3563
3564
3565
3566 return changed;
3567 }
3568
3569
3570
3571
3572
3573 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3574 struct ilk_wm_values *results)
3575 {
3576 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3577 unsigned int dirty;
3578 u32 val;
3579
3580 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3581 if (!dirty)
3582 return;
3583
3584 _ilk_disable_lp_wm(dev_priv, dirty);
3585
3586 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3587 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
3588 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3589 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
3590 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3591 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
3592
3593 if (dirty & WM_DIRTY_DDB) {
3594 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3595 val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
3596 if (results->partitioning == INTEL_DDB_PART_1_2)
3597 val &= ~WM_MISC_DATA_PARTITION_5_6;
3598 else
3599 val |= WM_MISC_DATA_PARTITION_5_6;
3600 intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
3601 } else {
3602 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
3603 if (results->partitioning == INTEL_DDB_PART_1_2)
3604 val &= ~DISP_DATA_PARTITION_5_6;
3605 else
3606 val |= DISP_DATA_PARTITION_5_6;
3607 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
3608 }
3609 }
3610
3611 if (dirty & WM_DIRTY_FBC) {
3612 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
3613 if (results->enable_fbc_wm)
3614 val &= ~DISP_FBC_WM_DIS;
3615 else
3616 val |= DISP_FBC_WM_DIS;
3617 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
3618 }
3619
3620 if (dirty & WM_DIRTY_LP(1) &&
3621 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3622 intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
3623
3624 if (DISPLAY_VER(dev_priv) >= 7) {
3625 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3626 intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
3627 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3628 intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
3629 }
3630
3631 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3632 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
3633 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3634 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
3635 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3636 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
3637
3638 dev_priv->wm.hw = *results;
3639 }
3640
3641 bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3642 {
3643 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3644 }
3645
3646 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
3647 {
3648 u8 enabled_slices = 0;
3649 enum dbuf_slice slice;
3650
3651 for_each_dbuf_slice(dev_priv, slice) {
3652 if (intel_uncore_read(&dev_priv->uncore,
3653 DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
3654 enabled_slices |= BIT(slice);
3655 }
3656
3657 return enabled_slices;
3658 }
3659
3660
3661
3662
3663
3664 static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3665 {
3666 return DISPLAY_VER(dev_priv) == 9;
3667 }
3668
3669 static bool
3670 intel_has_sagv(struct drm_i915_private *dev_priv)
3671 {
3672 return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
3673 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3674 }
3675
3676 static u32
3677 intel_sagv_block_time(struct drm_i915_private *dev_priv)
3678 {
3679 if (DISPLAY_VER(dev_priv) >= 12) {
3680 u32 val = 0;
3681 int ret;
3682
3683 ret = snb_pcode_read(&dev_priv->uncore,
3684 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
3685 &val, NULL);
3686 if (ret) {
3687 drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n");
3688 return 0;
3689 }
3690
3691 return val;
3692 } else if (DISPLAY_VER(dev_priv) == 11) {
3693 return 10;
3694 } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) {
3695 return 30;
3696 } else {
3697 return 0;
3698 }
3699 }
3700
3701 static void intel_sagv_init(struct drm_i915_private *i915)
3702 {
3703 if (!intel_has_sagv(i915))
3704 i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
3705
3706
3707
3708
3709
3710 if (DISPLAY_VER(i915) < 11)
3711 skl_sagv_disable(i915);
3712
3713 drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN);
3714
3715 i915->sagv_block_time_us = intel_sagv_block_time(i915);
3716
3717 drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
3718 str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us);
3719
3720
3721 if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX,
3722 "Excessive SAGV block time %u, ignoring\n",
3723 i915->sagv_block_time_us))
3724 i915->sagv_block_time_us = 0;
3725
3726 if (!intel_has_sagv(i915))
3727 i915->sagv_block_time_us = 0;
3728 }
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741 static void skl_sagv_enable(struct drm_i915_private *dev_priv)
3742 {
3743 int ret;
3744
3745 if (!intel_has_sagv(dev_priv))
3746 return;
3747
3748 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3749 return;
3750
3751 drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
3752 ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
3753 GEN9_SAGV_ENABLE);
3754
3755
3756
3757
3758
3759
3760
3761 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3762 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3763 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3764 return;
3765 } else if (ret < 0) {
3766 drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
3767 return;
3768 }
3769
3770 dev_priv->sagv_status = I915_SAGV_ENABLED;
3771 }
3772
3773 static void skl_sagv_disable(struct drm_i915_private *dev_priv)
3774 {
3775 int ret;
3776
3777 if (!intel_has_sagv(dev_priv))
3778 return;
3779
3780 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3781 return;
3782
3783 drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
3784
3785 ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
3786 GEN9_SAGV_DISABLE,
3787 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3788 1);
3789
3790
3791
3792
3793 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3794 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3795 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3796 return;
3797 } else if (ret < 0) {
3798 drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
3799 return;
3800 }
3801
3802 dev_priv->sagv_status = I915_SAGV_DISABLED;
3803 }
3804
3805 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
3806 {
3807 struct drm_i915_private *i915 = to_i915(state->base.dev);
3808 const struct intel_bw_state *new_bw_state =
3809 intel_atomic_get_new_bw_state(state);
3810
3811 if (!new_bw_state)
3812 return;
3813
3814 if (!intel_can_enable_sagv(i915, new_bw_state))
3815 skl_sagv_disable(i915);
3816 }
3817
3818 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
3819 {
3820 struct drm_i915_private *i915 = to_i915(state->base.dev);
3821 const struct intel_bw_state *new_bw_state =
3822 intel_atomic_get_new_bw_state(state);
3823
3824 if (!new_bw_state)
3825 return;
3826
3827 if (intel_can_enable_sagv(i915, new_bw_state))
3828 skl_sagv_enable(i915);
3829 }
3830
3831 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
3832 {
3833 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3834 const struct intel_bw_state *old_bw_state =
3835 intel_atomic_get_old_bw_state(state);
3836 const struct intel_bw_state *new_bw_state =
3837 intel_atomic_get_new_bw_state(state);
3838 u16 old_mask, new_mask;
3839
3840 if (!new_bw_state)
3841 return;
3842
3843 old_mask = old_bw_state->qgv_points_mask;
3844 new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
3845
3846 if (old_mask == new_mask)
3847 return;
3848
3849 WARN_ON(!new_bw_state->base.changed);
3850
3851 drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
3852 old_mask, new_mask);
3853
3854
3855
3856
3857
3858
3859
3860 icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3861 }
3862
3863 static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
3864 {
3865 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3866 const struct intel_bw_state *old_bw_state =
3867 intel_atomic_get_old_bw_state(state);
3868 const struct intel_bw_state *new_bw_state =
3869 intel_atomic_get_new_bw_state(state);
3870 u16 old_mask, new_mask;
3871
3872 if (!new_bw_state)
3873 return;
3874
3875 old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
3876 new_mask = new_bw_state->qgv_points_mask;
3877
3878 if (old_mask == new_mask)
3879 return;
3880
3881 WARN_ON(!new_bw_state->base.changed);
3882
3883 drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
3884 old_mask, new_mask);
3885
3886
3887
3888
3889
3890
3891
3892 icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3893 }
3894
3895 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
3896 {
3897 struct drm_i915_private *i915 = to_i915(state->base.dev);
3898
3899
3900
3901
3902
3903
3904
3905
3906 if (!intel_has_sagv(i915))
3907 return;
3908
3909 if (DISPLAY_VER(i915) >= 11)
3910 icl_sagv_pre_plane_update(state);
3911 else
3912 skl_sagv_pre_plane_update(state);
3913 }
3914
3915 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
3916 {
3917 struct drm_i915_private *i915 = to_i915(state->base.dev);
3918
3919
3920
3921
3922
3923
3924
3925
3926 if (!intel_has_sagv(i915))
3927 return;
3928
3929 if (DISPLAY_VER(i915) >= 11)
3930 icl_sagv_post_plane_update(state);
3931 else
3932 skl_sagv_post_plane_update(state);
3933 }
3934
3935 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3936 {
3937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3938 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3939 enum plane_id plane_id;
3940 int max_level = INT_MAX;
3941
3942 if (!intel_has_sagv(dev_priv))
3943 return false;
3944
3945 if (!crtc_state->hw.active)
3946 return true;
3947
3948 if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
3949 return false;
3950
3951 for_each_plane_id_on_crtc(crtc, plane_id) {
3952 const struct skl_plane_wm *wm =
3953 &crtc_state->wm.skl.optimal.planes[plane_id];
3954 int level;
3955
3956
3957 if (!wm->wm[0].enable)
3958 continue;
3959
3960
3961 for (level = ilk_wm_max_level(dev_priv);
3962 !wm->wm[level].enable; --level)
3963 { }
3964
3965
3966 max_level = min(level, max_level);
3967 }
3968
3969
3970 if (max_level == INT_MAX)
3971 return true;
3972
3973 for_each_plane_id_on_crtc(crtc, plane_id) {
3974 const struct skl_plane_wm *wm =
3975 &crtc_state->wm.skl.optimal.planes[plane_id];
3976
3977
3978
3979
3980
3981 if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
3982 return false;
3983 }
3984
3985 return true;
3986 }
3987
3988 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3989 {
3990 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3991 enum plane_id plane_id;
3992
3993 if (!crtc_state->hw.active)
3994 return true;
3995
3996 for_each_plane_id_on_crtc(crtc, plane_id) {
3997 const struct skl_plane_wm *wm =
3998 &crtc_state->wm.skl.optimal.planes[plane_id];
3999
4000 if (wm->wm[0].enable && !wm->sagv.wm0.enable)
4001 return false;
4002 }
4003
4004 return true;
4005 }
4006
4007 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
4008 {
4009 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4010 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4011
4012 if (DISPLAY_VER(dev_priv) >= 12)
4013 return tgl_crtc_can_enable_sagv(crtc_state);
4014 else
4015 return skl_crtc_can_enable_sagv(crtc_state);
4016 }
4017
4018 bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
4019 const struct intel_bw_state *bw_state)
4020 {
4021 if (DISPLAY_VER(dev_priv) < 11 &&
4022 bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
4023 return false;
4024
4025 return bw_state->pipe_sagv_reject == 0;
4026 }
4027
4028 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
4029 {
4030 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4031 int ret;
4032 struct intel_crtc *crtc;
4033 struct intel_crtc_state *new_crtc_state;
4034 struct intel_bw_state *new_bw_state = NULL;
4035 const struct intel_bw_state *old_bw_state = NULL;
4036 int i;
4037
4038 for_each_new_intel_crtc_in_state(state, crtc,
4039 new_crtc_state, i) {
4040 new_bw_state = intel_atomic_get_bw_state(state);
4041 if (IS_ERR(new_bw_state))
4042 return PTR_ERR(new_bw_state);
4043
4044 old_bw_state = intel_atomic_get_old_bw_state(state);
4045
4046 if (intel_crtc_can_enable_sagv(new_crtc_state))
4047 new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
4048 else
4049 new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
4050 }
4051
4052 if (!new_bw_state)
4053 return 0;
4054
4055 new_bw_state->active_pipes =
4056 intel_calc_active_pipes(state, old_bw_state->active_pipes);
4057
4058 if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
4059 ret = intel_atomic_lock_global_state(&new_bw_state->base);
4060 if (ret)
4061 return ret;
4062 }
4063
4064 if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
4065 intel_can_enable_sagv(dev_priv, old_bw_state)) {
4066 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
4067 if (ret)
4068 return ret;
4069 } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
4070 ret = intel_atomic_lock_global_state(&new_bw_state->base);
4071 if (ret)
4072 return ret;
4073 }
4074
4075 for_each_new_intel_crtc_in_state(state, crtc,
4076 new_crtc_state, i) {
4077 struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
4078
4079
4080
4081
4082
4083
4084
4085 pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
4086 DISPLAY_VER(dev_priv) >= 12 &&
4087 intel_can_enable_sagv(dev_priv, new_bw_state);
4088 }
4089
4090 return 0;
4091 }
4092
4093 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
4094 u16 start, u16 end)
4095 {
4096 entry->start = start;
4097 entry->end = end;
4098
4099 return end;
4100 }
4101
4102 static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
4103 {
4104 return INTEL_INFO(dev_priv)->display.dbuf.size /
4105 hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask);
4106 }
4107
4108 static void
4109 skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
4110 struct skl_ddb_entry *ddb)
4111 {
4112 int slice_size = intel_dbuf_slice_size(dev_priv);
4113
4114 if (!slice_mask) {
4115 ddb->start = 0;
4116 ddb->end = 0;
4117 return;
4118 }
4119
4120 ddb->start = (ffs(slice_mask) - 1) * slice_size;
4121 ddb->end = fls(slice_mask) * slice_size;
4122
4123 WARN_ON(ddb->start >= ddb->end);
4124 WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size);
4125 }
4126
4127 static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
4128 {
4129 struct skl_ddb_entry ddb;
4130
4131 if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
4132 slice_mask = BIT(DBUF_S1);
4133 else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
4134 slice_mask = BIT(DBUF_S3);
4135
4136 skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
4137
4138 return ddb.start;
4139 }
4140
4141 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
4142 const struct skl_ddb_entry *entry)
4143 {
4144 int slice_size = intel_dbuf_slice_size(dev_priv);
4145 enum dbuf_slice start_slice, end_slice;
4146 u8 slice_mask = 0;
4147
4148 if (!skl_ddb_entry_size(entry))
4149 return 0;
4150
4151 start_slice = entry->start / slice_size;
4152 end_slice = (entry->end - 1) / slice_size;
4153
4154
4155
4156
4157
4158 while (start_slice <= end_slice) {
4159 slice_mask |= BIT(start_slice);
4160 start_slice++;
4161 }
4162
4163 return slice_mask;
4164 }
4165
4166 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
4167 {
4168 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4169 int hdisplay, vdisplay;
4170
4171 if (!crtc_state->hw.active)
4172 return 0;
4173
4174
4175
4176
4177
4178
4179 drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
4180
4181 return hdisplay;
4182 }
4183
4184 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
4185 enum pipe for_pipe,
4186 unsigned int *weight_start,
4187 unsigned int *weight_end,
4188 unsigned int *weight_total)
4189 {
4190 struct drm_i915_private *dev_priv =
4191 to_i915(dbuf_state->base.state->base.dev);
4192 enum pipe pipe;
4193
4194 *weight_start = 0;
4195 *weight_end = 0;
4196 *weight_total = 0;
4197
4198 for_each_pipe(dev_priv, pipe) {
4199 int weight = dbuf_state->weight[pipe];
4200
4201
4202
4203
4204
4205
4206
4207
4208 if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
4209 continue;
4210
4211 *weight_total += weight;
4212 if (pipe < for_pipe) {
4213 *weight_start += weight;
4214 *weight_end += weight;
4215 } else if (pipe == for_pipe) {
4216 *weight_end += weight;
4217 }
4218 }
4219 }
4220
4221 static int
4222 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
4223 {
4224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4225 unsigned int weight_total, weight_start, weight_end;
4226 const struct intel_dbuf_state *old_dbuf_state =
4227 intel_atomic_get_old_dbuf_state(state);
4228 struct intel_dbuf_state *new_dbuf_state =
4229 intel_atomic_get_new_dbuf_state(state);
4230 struct intel_crtc_state *crtc_state;
4231 struct skl_ddb_entry ddb_slices;
4232 enum pipe pipe = crtc->pipe;
4233 unsigned int mbus_offset = 0;
4234 u32 ddb_range_size;
4235 u32 dbuf_slice_mask;
4236 u32 start, end;
4237 int ret;
4238
4239 if (new_dbuf_state->weight[pipe] == 0) {
4240 skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
4241 goto out;
4242 }
4243
4244 dbuf_slice_mask = new_dbuf_state->slices[pipe];
4245
4246 skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
4247 mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
4248 ddb_range_size = skl_ddb_entry_size(&ddb_slices);
4249
4250 intel_crtc_dbuf_weights(new_dbuf_state, pipe,
4251 &weight_start, &weight_end, &weight_total);
4252
4253 start = ddb_range_size * weight_start / weight_total;
4254 end = ddb_range_size * weight_end / weight_total;
4255
4256 skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
4257 ddb_slices.start - mbus_offset + start,
4258 ddb_slices.start - mbus_offset + end);
4259
4260 out:
4261 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
4262 skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
4263 &new_dbuf_state->ddb[pipe]))
4264 return 0;
4265
4266 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
4267 if (ret)
4268 return ret;
4269
4270 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
4271 if (IS_ERR(crtc_state))
4272 return PTR_ERR(crtc_state);
4273
4274
4275
4276
4277
4278 crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
4279 crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
4280
4281 drm_dbg_kms(&dev_priv->drm,
4282 "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
4283 crtc->base.base.id, crtc->base.name,
4284 old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
4285 old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
4286 new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
4287 old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
4288
4289 return 0;
4290 }
4291
4292 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4293 int width, const struct drm_format_info *format,
4294 u64 modifier, unsigned int rotation,
4295 u32 plane_pixel_rate, struct skl_wm_params *wp,
4296 int color_plane);
4297
4298 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4299 struct intel_plane *plane,
4300 int level,
4301 unsigned int latency,
4302 const struct skl_wm_params *wp,
4303 const struct skl_wm_level *result_prev,
4304 struct skl_wm_level *result );
4305
4306 static unsigned int
4307 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
4308 int num_active)
4309 {
4310 struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
4311 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4312 int level, max_level = ilk_wm_max_level(dev_priv);
4313 struct skl_wm_level wm = {};
4314 int ret, min_ddb_alloc = 0;
4315 struct skl_wm_params wp;
4316
4317 ret = skl_compute_wm_params(crtc_state, 256,
4318 drm_format_info(DRM_FORMAT_ARGB8888),
4319 DRM_FORMAT_MOD_LINEAR,
4320 DRM_MODE_ROTATE_0,
4321 crtc_state->pixel_rate, &wp, 0);
4322 drm_WARN_ON(&dev_priv->drm, ret);
4323
4324 for (level = 0; level <= max_level; level++) {
4325 unsigned int latency = dev_priv->wm.skl_latency[level];
4326
4327 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
4328 if (wm.min_ddb_alloc == U16_MAX)
4329 break;
4330
4331 min_ddb_alloc = wm.min_ddb_alloc;
4332 }
4333
4334 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
4335 }
4336
4337 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
4338 {
4339 skl_ddb_entry_init(entry,
4340 REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
4341 REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
4342 if (entry->end)
4343 entry->end++;
4344 }
4345
4346 static void
4347 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
4348 const enum pipe pipe,
4349 const enum plane_id plane_id,
4350 struct skl_ddb_entry *ddb,
4351 struct skl_ddb_entry *ddb_y)
4352 {
4353 u32 val;
4354
4355
4356 if (plane_id == PLANE_CURSOR) {
4357 val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
4358 skl_ddb_entry_init_from_hw(ddb, val);
4359 return;
4360 }
4361
4362 val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
4363 skl_ddb_entry_init_from_hw(ddb, val);
4364
4365 if (DISPLAY_VER(dev_priv) >= 11)
4366 return;
4367
4368 val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
4369 skl_ddb_entry_init_from_hw(ddb_y, val);
4370 }
4371
4372 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4373 struct skl_ddb_entry *ddb,
4374 struct skl_ddb_entry *ddb_y)
4375 {
4376 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4377 enum intel_display_power_domain power_domain;
4378 enum pipe pipe = crtc->pipe;
4379 intel_wakeref_t wakeref;
4380 enum plane_id plane_id;
4381
4382 power_domain = POWER_DOMAIN_PIPE(pipe);
4383 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4384 if (!wakeref)
4385 return;
4386
4387 for_each_plane_id_on_crtc(crtc, plane_id)
4388 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4389 plane_id,
4390 &ddb[plane_id],
4391 &ddb_y[plane_id]);
4392
4393 intel_display_power_put(dev_priv, power_domain, wakeref);
4394 }
4395
4396 struct dbuf_slice_conf_entry {
4397 u8 active_pipes;
4398 u8 dbuf_mask[I915_MAX_PIPES];
4399 bool join_mbus;
4400 };
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
4413
4414 {
4415 {
4416 .active_pipes = BIT(PIPE_A),
4417 .dbuf_mask = {
4418 [PIPE_A] = BIT(DBUF_S1),
4419 },
4420 },
4421 {
4422 .active_pipes = BIT(PIPE_B),
4423 .dbuf_mask = {
4424 [PIPE_B] = BIT(DBUF_S1),
4425 },
4426 },
4427 {
4428 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4429 .dbuf_mask = {
4430 [PIPE_A] = BIT(DBUF_S1),
4431 [PIPE_B] = BIT(DBUF_S2),
4432 },
4433 },
4434 {
4435 .active_pipes = BIT(PIPE_C),
4436 .dbuf_mask = {
4437 [PIPE_C] = BIT(DBUF_S2),
4438 },
4439 },
4440 {
4441 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4442 .dbuf_mask = {
4443 [PIPE_A] = BIT(DBUF_S1),
4444 [PIPE_C] = BIT(DBUF_S2),
4445 },
4446 },
4447 {
4448 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4449 .dbuf_mask = {
4450 [PIPE_B] = BIT(DBUF_S1),
4451 [PIPE_C] = BIT(DBUF_S2),
4452 },
4453 },
4454 {
4455 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4456 .dbuf_mask = {
4457 [PIPE_A] = BIT(DBUF_S1),
4458 [PIPE_B] = BIT(DBUF_S1),
4459 [PIPE_C] = BIT(DBUF_S2),
4460 },
4461 },
4462 {}
4463 };
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
4476
4477 {
4478 {
4479 .active_pipes = BIT(PIPE_A),
4480 .dbuf_mask = {
4481 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4482 },
4483 },
4484 {
4485 .active_pipes = BIT(PIPE_B),
4486 .dbuf_mask = {
4487 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4488 },
4489 },
4490 {
4491 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4492 .dbuf_mask = {
4493 [PIPE_A] = BIT(DBUF_S2),
4494 [PIPE_B] = BIT(DBUF_S1),
4495 },
4496 },
4497 {
4498 .active_pipes = BIT(PIPE_C),
4499 .dbuf_mask = {
4500 [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
4501 },
4502 },
4503 {
4504 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4505 .dbuf_mask = {
4506 [PIPE_A] = BIT(DBUF_S1),
4507 [PIPE_C] = BIT(DBUF_S2),
4508 },
4509 },
4510 {
4511 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4512 .dbuf_mask = {
4513 [PIPE_B] = BIT(DBUF_S1),
4514 [PIPE_C] = BIT(DBUF_S2),
4515 },
4516 },
4517 {
4518 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4519 .dbuf_mask = {
4520 [PIPE_A] = BIT(DBUF_S1),
4521 [PIPE_B] = BIT(DBUF_S1),
4522 [PIPE_C] = BIT(DBUF_S2),
4523 },
4524 },
4525 {
4526 .active_pipes = BIT(PIPE_D),
4527 .dbuf_mask = {
4528 [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
4529 },
4530 },
4531 {
4532 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4533 .dbuf_mask = {
4534 [PIPE_A] = BIT(DBUF_S1),
4535 [PIPE_D] = BIT(DBUF_S2),
4536 },
4537 },
4538 {
4539 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4540 .dbuf_mask = {
4541 [PIPE_B] = BIT(DBUF_S1),
4542 [PIPE_D] = BIT(DBUF_S2),
4543 },
4544 },
4545 {
4546 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4547 .dbuf_mask = {
4548 [PIPE_A] = BIT(DBUF_S1),
4549 [PIPE_B] = BIT(DBUF_S1),
4550 [PIPE_D] = BIT(DBUF_S2),
4551 },
4552 },
4553 {
4554 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4555 .dbuf_mask = {
4556 [PIPE_C] = BIT(DBUF_S1),
4557 [PIPE_D] = BIT(DBUF_S2),
4558 },
4559 },
4560 {
4561 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4562 .dbuf_mask = {
4563 [PIPE_A] = BIT(DBUF_S1),
4564 [PIPE_C] = BIT(DBUF_S2),
4565 [PIPE_D] = BIT(DBUF_S2),
4566 },
4567 },
4568 {
4569 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4570 .dbuf_mask = {
4571 [PIPE_B] = BIT(DBUF_S1),
4572 [PIPE_C] = BIT(DBUF_S2),
4573 [PIPE_D] = BIT(DBUF_S2),
4574 },
4575 },
4576 {
4577 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4578 .dbuf_mask = {
4579 [PIPE_A] = BIT(DBUF_S1),
4580 [PIPE_B] = BIT(DBUF_S1),
4581 [PIPE_C] = BIT(DBUF_S2),
4582 [PIPE_D] = BIT(DBUF_S2),
4583 },
4584 },
4585 {}
4586 };
4587
4588 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
4589 {
4590 .active_pipes = BIT(PIPE_A),
4591 .dbuf_mask = {
4592 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4593 },
4594 },
4595 {
4596 .active_pipes = BIT(PIPE_B),
4597 .dbuf_mask = {
4598 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4599 },
4600 },
4601 {
4602 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4603 .dbuf_mask = {
4604 [PIPE_A] = BIT(DBUF_S1),
4605 [PIPE_B] = BIT(DBUF_S2),
4606 },
4607 },
4608 {
4609 .active_pipes = BIT(PIPE_C),
4610 .dbuf_mask = {
4611 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4612 },
4613 },
4614 {
4615 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4616 .dbuf_mask = {
4617 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4618 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4619 },
4620 },
4621 {
4622 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4623 .dbuf_mask = {
4624 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4625 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4626 },
4627 },
4628 {
4629 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4630 .dbuf_mask = {
4631 [PIPE_A] = BIT(DBUF_S1),
4632 [PIPE_B] = BIT(DBUF_S2),
4633 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4634 },
4635 },
4636 {
4637 .active_pipes = BIT(PIPE_D),
4638 .dbuf_mask = {
4639 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4640 },
4641 },
4642 {
4643 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4644 .dbuf_mask = {
4645 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4646 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4647 },
4648 },
4649 {
4650 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4651 .dbuf_mask = {
4652 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4653 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4654 },
4655 },
4656 {
4657 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4658 .dbuf_mask = {
4659 [PIPE_A] = BIT(DBUF_S1),
4660 [PIPE_B] = BIT(DBUF_S2),
4661 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4662 },
4663 },
4664 {
4665 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4666 .dbuf_mask = {
4667 [PIPE_C] = BIT(DBUF_S3),
4668 [PIPE_D] = BIT(DBUF_S4),
4669 },
4670 },
4671 {
4672 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4673 .dbuf_mask = {
4674 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4675 [PIPE_C] = BIT(DBUF_S3),
4676 [PIPE_D] = BIT(DBUF_S4),
4677 },
4678 },
4679 {
4680 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4681 .dbuf_mask = {
4682 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4683 [PIPE_C] = BIT(DBUF_S3),
4684 [PIPE_D] = BIT(DBUF_S4),
4685 },
4686 },
4687 {
4688 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4689 .dbuf_mask = {
4690 [PIPE_A] = BIT(DBUF_S1),
4691 [PIPE_B] = BIT(DBUF_S2),
4692 [PIPE_C] = BIT(DBUF_S3),
4693 [PIPE_D] = BIT(DBUF_S4),
4694 },
4695 },
4696 {}
4697 };
4698
4699 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
4700
4701
4702
4703
4704 {
4705 .active_pipes = BIT(PIPE_A),
4706 .dbuf_mask = {
4707 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
4708 },
4709 .join_mbus = true,
4710 },
4711 {
4712 .active_pipes = BIT(PIPE_B),
4713 .dbuf_mask = {
4714 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
4715 },
4716 .join_mbus = true,
4717 },
4718 {
4719 .active_pipes = BIT(PIPE_A),
4720 .dbuf_mask = {
4721 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4722 },
4723 .join_mbus = false,
4724 },
4725 {
4726 .active_pipes = BIT(PIPE_B),
4727 .dbuf_mask = {
4728 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4729 },
4730 .join_mbus = false,
4731 },
4732 {
4733 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4734 .dbuf_mask = {
4735 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4736 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4737 },
4738 },
4739 {
4740 .active_pipes = BIT(PIPE_C),
4741 .dbuf_mask = {
4742 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4743 },
4744 },
4745 {
4746 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4747 .dbuf_mask = {
4748 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4749 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4750 },
4751 },
4752 {
4753 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4754 .dbuf_mask = {
4755 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4756 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4757 },
4758 },
4759 {
4760 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4761 .dbuf_mask = {
4762 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4763 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4764 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4765 },
4766 },
4767 {
4768 .active_pipes = BIT(PIPE_D),
4769 .dbuf_mask = {
4770 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4771 },
4772 },
4773 {
4774 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4775 .dbuf_mask = {
4776 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4777 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4778 },
4779 },
4780 {
4781 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4782 .dbuf_mask = {
4783 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4784 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4785 },
4786 },
4787 {
4788 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4789 .dbuf_mask = {
4790 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4791 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4792 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4793 },
4794 },
4795 {
4796 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4797 .dbuf_mask = {
4798 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4799 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4800 },
4801 },
4802 {
4803 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4804 .dbuf_mask = {
4805 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4806 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4807 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4808 },
4809 },
4810 {
4811 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4812 .dbuf_mask = {
4813 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4814 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4815 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4816 },
4817 },
4818 {
4819 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4820 .dbuf_mask = {
4821 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4822 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4823 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4824 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4825 },
4826 },
4827 {}
4828
4829 };
4830
4831 static bool check_mbus_joined(u8 active_pipes,
4832 const struct dbuf_slice_conf_entry *dbuf_slices)
4833 {
4834 int i;
4835
4836 for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
4837 if (dbuf_slices[i].active_pipes == active_pipes)
4838 return dbuf_slices[i].join_mbus;
4839 }
4840 return false;
4841 }
4842
4843 static bool adlp_check_mbus_joined(u8 active_pipes)
4844 {
4845 return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
4846 }
4847
4848 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
4849 const struct dbuf_slice_conf_entry *dbuf_slices)
4850 {
4851 int i;
4852
4853 for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
4854 if (dbuf_slices[i].active_pipes == active_pipes &&
4855 dbuf_slices[i].join_mbus == join_mbus)
4856 return dbuf_slices[i].dbuf_mask[pipe];
4857 }
4858 return 0;
4859 }
4860
4861
4862
4863
4864
4865
4866 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4867 {
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4881 icl_allowed_dbufs);
4882 }
4883
4884 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4885 {
4886 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4887 tgl_allowed_dbufs);
4888 }
4889
4890 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4891 {
4892 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4893 adlp_allowed_dbufs);
4894 }
4895
4896 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4897 {
4898 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4899 dg2_allowed_dbufs);
4900 }
4901
4902 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
4903 {
4904 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4905 enum pipe pipe = crtc->pipe;
4906
4907 if (IS_DG2(dev_priv))
4908 return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4909 else if (IS_ALDERLAKE_P(dev_priv))
4910 return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4911 else if (DISPLAY_VER(dev_priv) == 12)
4912 return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4913 else if (DISPLAY_VER(dev_priv) == 11)
4914 return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4915
4916
4917
4918
4919 return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
4920 }
4921
4922 static bool
4923 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
4924 struct intel_plane *plane)
4925 {
4926 struct drm_i915_private *i915 = to_i915(plane->base.dev);
4927
4928 return DISPLAY_VER(i915) >= 13 &&
4929 crtc_state->uapi.async_flip &&
4930 plane->async_flip;
4931 }
4932
4933 static u64
4934 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
4935 {
4936 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4937 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4938 enum plane_id plane_id;
4939 u64 data_rate = 0;
4940
4941 for_each_plane_id_on_crtc(crtc, plane_id) {
4942 if (plane_id == PLANE_CURSOR)
4943 continue;
4944
4945 data_rate += crtc_state->rel_data_rate[plane_id];
4946
4947 if (DISPLAY_VER(i915) < 11)
4948 data_rate += crtc_state->rel_data_rate_y[plane_id];
4949 }
4950
4951 return data_rate;
4952 }
4953
4954 static const struct skl_wm_level *
4955 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
4956 enum plane_id plane_id,
4957 int level)
4958 {
4959 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
4960
4961 if (level == 0 && pipe_wm->use_sagv_wm)
4962 return &wm->sagv.wm0;
4963
4964 return &wm->wm[level];
4965 }
4966
4967 static const struct skl_wm_level *
4968 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
4969 enum plane_id plane_id)
4970 {
4971 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
4972
4973 if (pipe_wm->use_sagv_wm)
4974 return &wm->sagv.trans_wm;
4975
4976 return &wm->trans_wm;
4977 }
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991 static void
4992 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
4993 {
4994 if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
4995 memset(wm, 0, sizeof(*wm));
4996 }
4997
4998 static void
4999 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
5000 const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
5001 {
5002 if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
5003 uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
5004 memset(wm, 0, sizeof(*wm));
5005 memset(uv_wm, 0, sizeof(*uv_wm));
5006 }
5007 }
5008
5009 static bool icl_need_wm1_wa(struct drm_i915_private *i915,
5010 enum plane_id plane_id)
5011 {
5012
5013
5014
5015
5016
5017 return DISPLAY_VER(i915) == 11 ||
5018 (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
5019 }
5020
5021 struct skl_plane_ddb_iter {
5022 u64 data_rate;
5023 u16 start, size;
5024 };
5025
5026 static void
5027 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
5028 struct skl_ddb_entry *ddb,
5029 const struct skl_wm_level *wm,
5030 u64 data_rate)
5031 {
5032 u16 size, extra = 0;
5033
5034 if (data_rate) {
5035 extra = min_t(u16, iter->size,
5036 DIV64_U64_ROUND_UP(iter->size * data_rate,
5037 iter->data_rate));
5038 iter->size -= extra;
5039 iter->data_rate -= data_rate;
5040 }
5041
5042
5043
5044
5045
5046
5047 size = wm->min_ddb_alloc + extra;
5048 if (size)
5049 iter->start = skl_ddb_entry_init(ddb, iter->start,
5050 iter->start + size);
5051 }
5052
5053 static int
5054 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
5055 struct intel_crtc *crtc)
5056 {
5057 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5058 struct intel_crtc_state *crtc_state =
5059 intel_atomic_get_new_crtc_state(state, crtc);
5060 const struct intel_dbuf_state *dbuf_state =
5061 intel_atomic_get_new_dbuf_state(state);
5062 const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
5063 int num_active = hweight8(dbuf_state->active_pipes);
5064 struct skl_plane_ddb_iter iter;
5065 enum plane_id plane_id;
5066 u16 cursor_size;
5067 u32 blocks;
5068 int level;
5069
5070
5071 memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
5072 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
5073
5074 if (!crtc_state->hw.active)
5075 return 0;
5076
5077 iter.start = alloc->start;
5078 iter.size = skl_ddb_entry_size(alloc);
5079 if (iter.size == 0)
5080 return 0;
5081
5082
5083 cursor_size = skl_cursor_allocation(crtc_state, num_active);
5084 iter.size -= cursor_size;
5085 skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
5086 alloc->end - cursor_size, alloc->end);
5087
5088 iter.data_rate = skl_total_relative_data_rate(crtc_state);
5089
5090
5091
5092
5093
5094 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
5095 blocks = 0;
5096 for_each_plane_id_on_crtc(crtc, plane_id) {
5097 const struct skl_plane_wm *wm =
5098 &crtc_state->wm.skl.optimal.planes[plane_id];
5099
5100 if (plane_id == PLANE_CURSOR) {
5101 const struct skl_ddb_entry *ddb =
5102 &crtc_state->wm.skl.plane_ddb[plane_id];
5103
5104 if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
5105 drm_WARN_ON(&dev_priv->drm,
5106 wm->wm[level].min_ddb_alloc != U16_MAX);
5107 blocks = U32_MAX;
5108 break;
5109 }
5110 continue;
5111 }
5112
5113 blocks += wm->wm[level].min_ddb_alloc;
5114 blocks += wm->uv_wm[level].min_ddb_alloc;
5115 }
5116
5117 if (blocks <= iter.size) {
5118 iter.size -= blocks;
5119 break;
5120 }
5121 }
5122
5123 if (level < 0) {
5124 drm_dbg_kms(&dev_priv->drm,
5125 "Requested display configuration exceeds system DDB limitations");
5126 drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
5127 blocks, iter.size);
5128 return -EINVAL;
5129 }
5130
5131
5132 if (iter.data_rate == 0)
5133 iter.size = 0;
5134
5135
5136
5137
5138
5139
5140 for_each_plane_id_on_crtc(crtc, plane_id) {
5141 struct skl_ddb_entry *ddb =
5142 &crtc_state->wm.skl.plane_ddb[plane_id];
5143 struct skl_ddb_entry *ddb_y =
5144 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5145 const struct skl_plane_wm *wm =
5146 &crtc_state->wm.skl.optimal.planes[plane_id];
5147
5148 if (plane_id == PLANE_CURSOR)
5149 continue;
5150
5151 if (DISPLAY_VER(dev_priv) < 11 &&
5152 crtc_state->nv12_planes & BIT(plane_id)) {
5153 skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
5154 crtc_state->rel_data_rate_y[plane_id]);
5155 skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
5156 crtc_state->rel_data_rate[plane_id]);
5157 } else {
5158 skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
5159 crtc_state->rel_data_rate[plane_id]);
5160 }
5161 }
5162 drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
5163
5164
5165
5166
5167
5168
5169
5170 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
5171 for_each_plane_id_on_crtc(crtc, plane_id) {
5172 const struct skl_ddb_entry *ddb =
5173 &crtc_state->wm.skl.plane_ddb[plane_id];
5174 const struct skl_ddb_entry *ddb_y =
5175 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5176 struct skl_plane_wm *wm =
5177 &crtc_state->wm.skl.optimal.planes[plane_id];
5178
5179 if (DISPLAY_VER(dev_priv) < 11 &&
5180 crtc_state->nv12_planes & BIT(plane_id))
5181 skl_check_nv12_wm_level(&wm->wm[level],
5182 &wm->uv_wm[level],
5183 ddb_y, ddb);
5184 else
5185 skl_check_wm_level(&wm->wm[level], ddb);
5186
5187 if (icl_need_wm1_wa(dev_priv, plane_id) &&
5188 level == 1 && wm->wm[0].enable) {
5189 wm->wm[level].blocks = wm->wm[0].blocks;
5190 wm->wm[level].lines = wm->wm[0].lines;
5191 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
5192 }
5193 }
5194 }
5195
5196
5197
5198
5199
5200 for_each_plane_id_on_crtc(crtc, plane_id) {
5201 const struct skl_ddb_entry *ddb =
5202 &crtc_state->wm.skl.plane_ddb[plane_id];
5203 const struct skl_ddb_entry *ddb_y =
5204 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5205 struct skl_plane_wm *wm =
5206 &crtc_state->wm.skl.optimal.planes[plane_id];
5207
5208 if (DISPLAY_VER(dev_priv) < 11 &&
5209 crtc_state->nv12_planes & BIT(plane_id)) {
5210 skl_check_wm_level(&wm->trans_wm, ddb_y);
5211 } else {
5212 WARN_ON(skl_ddb_entry_size(ddb_y));
5213
5214 skl_check_wm_level(&wm->trans_wm, ddb);
5215 }
5216
5217 skl_check_wm_level(&wm->sagv.wm0, ddb);
5218 skl_check_wm_level(&wm->sagv.trans_wm, ddb);
5219 }
5220
5221 return 0;
5222 }
5223
5224
5225
5226
5227
5228
5229
5230 static uint_fixed_16_16_t
5231 skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
5232 u8 cpp, u32 latency, u32 dbuf_block_size)
5233 {
5234 u32 wm_intermediate_val;
5235 uint_fixed_16_16_t ret;
5236
5237 if (latency == 0)
5238 return FP_16_16_MAX;
5239
5240 wm_intermediate_val = latency * pixel_rate * cpp;
5241 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
5242
5243 if (DISPLAY_VER(dev_priv) >= 10)
5244 ret = add_fixed16_u32(ret, 1);
5245
5246 return ret;
5247 }
5248
5249 static uint_fixed_16_16_t
5250 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
5251 uint_fixed_16_16_t plane_blocks_per_line)
5252 {
5253 u32 wm_intermediate_val;
5254 uint_fixed_16_16_t ret;
5255
5256 if (latency == 0)
5257 return FP_16_16_MAX;
5258
5259 wm_intermediate_val = latency * pixel_rate;
5260 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
5261 pipe_htotal * 1000);
5262 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
5263 return ret;
5264 }
5265
5266 static uint_fixed_16_16_t
5267 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
5268 {
5269 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5270 u32 pixel_rate;
5271 u32 crtc_htotal;
5272 uint_fixed_16_16_t linetime_us;
5273
5274 if (!crtc_state->hw.active)
5275 return u32_to_fixed16(0);
5276
5277 pixel_rate = crtc_state->pixel_rate;
5278
5279 if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
5280 return u32_to_fixed16(0);
5281
5282 crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
5283 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
5284
5285 return linetime_us;
5286 }
5287
5288 static int
5289 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
5290 int width, const struct drm_format_info *format,
5291 u64 modifier, unsigned int rotation,
5292 u32 plane_pixel_rate, struct skl_wm_params *wp,
5293 int color_plane)
5294 {
5295 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5296 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5297 u32 interm_pbpl;
5298
5299
5300 if (color_plane == 1 &&
5301 !intel_format_info_is_yuv_semiplanar(format, modifier)) {
5302 drm_dbg_kms(&dev_priv->drm,
5303 "Non planar format have single plane\n");
5304 return -EINVAL;
5305 }
5306
5307 wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
5308 modifier == I915_FORMAT_MOD_4_TILED ||
5309 modifier == I915_FORMAT_MOD_Yf_TILED ||
5310 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5311 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
5312 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
5313 wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5314 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
5315 wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
5316
5317 wp->width = width;
5318 if (color_plane == 1 && wp->is_planar)
5319 wp->width /= 2;
5320
5321 wp->cpp = format->cpp[color_plane];
5322 wp->plane_pixel_rate = plane_pixel_rate;
5323
5324 if (DISPLAY_VER(dev_priv) >= 11 &&
5325 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
5326 wp->dbuf_block_size = 256;
5327 else
5328 wp->dbuf_block_size = 512;
5329
5330 if (drm_rotation_90_or_270(rotation)) {
5331 switch (wp->cpp) {
5332 case 1:
5333 wp->y_min_scanlines = 16;
5334 break;
5335 case 2:
5336 wp->y_min_scanlines = 8;
5337 break;
5338 case 4:
5339 wp->y_min_scanlines = 4;
5340 break;
5341 default:
5342 MISSING_CASE(wp->cpp);
5343 return -EINVAL;
5344 }
5345 } else {
5346 wp->y_min_scanlines = 4;
5347 }
5348
5349 if (skl_needs_memory_bw_wa(dev_priv))
5350 wp->y_min_scanlines *= 2;
5351
5352 wp->plane_bytes_per_line = wp->width * wp->cpp;
5353 if (wp->y_tiled) {
5354 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
5355 wp->y_min_scanlines,
5356 wp->dbuf_block_size);
5357
5358 if (DISPLAY_VER(dev_priv) >= 10)
5359 interm_pbpl++;
5360
5361 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
5362 wp->y_min_scanlines);
5363 } else {
5364 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
5365 wp->dbuf_block_size);
5366
5367 if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10)
5368 interm_pbpl++;
5369
5370 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
5371 }
5372
5373 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
5374 wp->plane_blocks_per_line);
5375
5376 wp->linetime_us = fixed16_to_u32_round_up(
5377 intel_get_linetime_us(crtc_state));
5378
5379 return 0;
5380 }
5381
5382 static int
5383 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
5384 const struct intel_plane_state *plane_state,
5385 struct skl_wm_params *wp, int color_plane)
5386 {
5387 const struct drm_framebuffer *fb = plane_state->hw.fb;
5388 int width;
5389
5390
5391
5392
5393
5394
5395 width = drm_rect_width(&plane_state->uapi.src) >> 16;
5396
5397 return skl_compute_wm_params(crtc_state, width,
5398 fb->format, fb->modifier,
5399 plane_state->hw.rotation,
5400 intel_plane_pixel_rate(crtc_state, plane_state),
5401 wp, color_plane);
5402 }
5403
5404 static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
5405 {
5406 if (DISPLAY_VER(dev_priv) >= 10)
5407 return true;
5408
5409
5410 return level > 0;
5411 }
5412
5413 static int skl_wm_max_lines(struct drm_i915_private *dev_priv)
5414 {
5415 if (DISPLAY_VER(dev_priv) >= 13)
5416 return 255;
5417 else
5418 return 31;
5419 }
5420
5421 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
5422 struct intel_plane *plane,
5423 int level,
5424 unsigned int latency,
5425 const struct skl_wm_params *wp,
5426 const struct skl_wm_level *result_prev,
5427 struct skl_wm_level *result )
5428 {
5429 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5430 uint_fixed_16_16_t method1, method2;
5431 uint_fixed_16_16_t selected_result;
5432 u32 blocks, lines, min_ddb_alloc = 0;
5433
5434 if (latency == 0 ||
5435 (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
5436
5437 result->min_ddb_alloc = U16_MAX;
5438 return;
5439 }
5440
5441
5442
5443
5444
5445 if ((IS_KABYLAKE(dev_priv) ||
5446 IS_COFFEELAKE(dev_priv) ||
5447 IS_COMETLAKE(dev_priv)) &&
5448 dev_priv->ipc_enabled)
5449 latency += 4;
5450
5451 if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
5452 latency += 15;
5453
5454 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
5455 wp->cpp, latency, wp->dbuf_block_size);
5456 method2 = skl_wm_method2(wp->plane_pixel_rate,
5457 crtc_state->hw.pipe_mode.crtc_htotal,
5458 latency,
5459 wp->plane_blocks_per_line);
5460
5461 if (wp->y_tiled) {
5462 selected_result = max_fixed16(method2, wp->y_tile_minimum);
5463 } else {
5464 if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
5465 wp->dbuf_block_size < 1) &&
5466 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
5467 selected_result = method2;
5468 } else if (latency >= wp->linetime_us) {
5469 if (DISPLAY_VER(dev_priv) == 9)
5470 selected_result = min_fixed16(method1, method2);
5471 else
5472 selected_result = method2;
5473 } else {
5474 selected_result = method1;
5475 }
5476 }
5477
5478 blocks = fixed16_to_u32_round_up(selected_result) + 1;
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495 if (skl_wm_has_lines(dev_priv, level))
5496 blocks = max(blocks,
5497 fixed16_to_u32_round_up(wp->plane_blocks_per_line));
5498 lines = div_round_up_fixed16(selected_result,
5499 wp->plane_blocks_per_line);
5500
5501 if (DISPLAY_VER(dev_priv) == 9) {
5502
5503 if (level == 0 && wp->rc_surface)
5504 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
5505
5506
5507 if (level >= 1 && level <= 7) {
5508 if (wp->y_tiled) {
5509 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
5510 lines += wp->y_min_scanlines;
5511 } else {
5512 blocks++;
5513 }
5514
5515
5516
5517
5518
5519
5520
5521 if (result_prev->blocks > blocks)
5522 blocks = result_prev->blocks;
5523 }
5524 }
5525
5526 if (DISPLAY_VER(dev_priv) >= 11) {
5527 if (wp->y_tiled) {
5528 int extra_lines;
5529
5530 if (lines % wp->y_min_scanlines == 0)
5531 extra_lines = wp->y_min_scanlines;
5532 else
5533 extra_lines = wp->y_min_scanlines * 2 -
5534 lines % wp->y_min_scanlines;
5535
5536 min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
5537 wp->plane_blocks_per_line);
5538 } else {
5539 min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
5540 }
5541 }
5542
5543 if (!skl_wm_has_lines(dev_priv, level))
5544 lines = 0;
5545
5546 if (lines > skl_wm_max_lines(dev_priv)) {
5547
5548 result->min_ddb_alloc = U16_MAX;
5549 return;
5550 }
5551
5552
5553
5554
5555
5556
5557
5558 result->blocks = blocks;
5559 result->lines = lines;
5560
5561 result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
5562 result->enable = true;
5563
5564 if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us)
5565 result->can_sagv = latency >= dev_priv->sagv_block_time_us;
5566 }
5567
5568 static void
5569 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
5570 struct intel_plane *plane,
5571 const struct skl_wm_params *wm_params,
5572 struct skl_wm_level *levels)
5573 {
5574 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5575 int level, max_level = ilk_wm_max_level(dev_priv);
5576 struct skl_wm_level *result_prev = &levels[0];
5577
5578 for (level = 0; level <= max_level; level++) {
5579 struct skl_wm_level *result = &levels[level];
5580 unsigned int latency = dev_priv->wm.skl_latency[level];
5581
5582 skl_compute_plane_wm(crtc_state, plane, level, latency,
5583 wm_params, result_prev, result);
5584
5585 result_prev = result;
5586 }
5587 }
5588
5589 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
5590 struct intel_plane *plane,
5591 const struct skl_wm_params *wm_params,
5592 struct skl_plane_wm *plane_wm)
5593 {
5594 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5595 struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
5596 struct skl_wm_level *levels = plane_wm->wm;
5597 unsigned int latency = 0;
5598
5599 if (dev_priv->sagv_block_time_us)
5600 latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0];
5601
5602 skl_compute_plane_wm(crtc_state, plane, 0, latency,
5603 wm_params, &levels[0],
5604 sagv_wm);
5605 }
5606
5607 static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
5608 struct skl_wm_level *trans_wm,
5609 const struct skl_wm_level *wm0,
5610 const struct skl_wm_params *wp)
5611 {
5612 u16 trans_min, trans_amount, trans_y_tile_min;
5613 u16 wm0_blocks, trans_offset, blocks;
5614
5615
5616 if (!dev_priv->ipc_enabled)
5617 return;
5618
5619
5620
5621
5622
5623 if (DISPLAY_VER(dev_priv) == 9)
5624 return;
5625
5626 if (DISPLAY_VER(dev_priv) >= 11)
5627 trans_min = 4;
5628 else
5629 trans_min = 14;
5630
5631
5632 if (DISPLAY_VER(dev_priv) == 10)
5633 trans_amount = 0;
5634 else
5635 trans_amount = 10;
5636
5637 trans_offset = trans_min + trans_amount;
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649 wm0_blocks = wm0->blocks - 1;
5650
5651 if (wp->y_tiled) {
5652 trans_y_tile_min =
5653 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
5654 blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
5655 } else {
5656 blocks = wm0_blocks + trans_offset;
5657 }
5658 blocks++;
5659
5660
5661
5662
5663
5664
5665 trans_wm->blocks = blocks;
5666 trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
5667 trans_wm->enable = true;
5668 }
5669
5670 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
5671 const struct intel_plane_state *plane_state,
5672 struct intel_plane *plane, int color_plane)
5673 {
5674 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5675 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5676 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
5677 struct skl_wm_params wm_params;
5678 int ret;
5679
5680 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5681 &wm_params, color_plane);
5682 if (ret)
5683 return ret;
5684
5685 skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
5686
5687 skl_compute_transition_wm(dev_priv, &wm->trans_wm,
5688 &wm->wm[0], &wm_params);
5689
5690 if (DISPLAY_VER(dev_priv) >= 12) {
5691 tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
5692
5693 skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
5694 &wm->sagv.wm0, &wm_params);
5695 }
5696
5697 return 0;
5698 }
5699
5700 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5701 const struct intel_plane_state *plane_state,
5702 struct intel_plane *plane)
5703 {
5704 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
5705 struct skl_wm_params wm_params;
5706 int ret;
5707
5708 wm->is_planar = true;
5709
5710
5711 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5712 &wm_params, 1);
5713 if (ret)
5714 return ret;
5715
5716 skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
5717
5718 return 0;
5719 }
5720
5721 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
5722 const struct intel_plane_state *plane_state)
5723 {
5724 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5725 enum plane_id plane_id = plane->id;
5726 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5727 const struct drm_framebuffer *fb = plane_state->hw.fb;
5728 int ret;
5729
5730 memset(wm, 0, sizeof(*wm));
5731
5732 if (!intel_wm_plane_visible(crtc_state, plane_state))
5733 return 0;
5734
5735 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5736 plane, 0);
5737 if (ret)
5738 return ret;
5739
5740 if (fb->format->is_yuv && fb->format->num_planes > 1) {
5741 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
5742 plane);
5743 if (ret)
5744 return ret;
5745 }
5746
5747 return 0;
5748 }
5749
5750 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
5751 const struct intel_plane_state *plane_state)
5752 {
5753 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5754 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5755 enum plane_id plane_id = plane->id;
5756 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5757 int ret;
5758
5759
5760 if (plane_state->planar_slave)
5761 return 0;
5762
5763 memset(wm, 0, sizeof(*wm));
5764
5765 if (plane_state->planar_linked_plane) {
5766 const struct drm_framebuffer *fb = plane_state->hw.fb;
5767
5768 drm_WARN_ON(&dev_priv->drm,
5769 !intel_wm_plane_visible(crtc_state, plane_state));
5770 drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
5771 fb->format->num_planes == 1);
5772
5773 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5774 plane_state->planar_linked_plane, 0);
5775 if (ret)
5776 return ret;
5777
5778 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5779 plane, 1);
5780 if (ret)
5781 return ret;
5782 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5783 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5784 plane, 0);
5785 if (ret)
5786 return ret;
5787 }
5788
5789 return 0;
5790 }
5791
5792 static int skl_build_pipe_wm(struct intel_atomic_state *state,
5793 struct intel_crtc *crtc)
5794 {
5795 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5796 struct intel_crtc_state *crtc_state =
5797 intel_atomic_get_new_crtc_state(state, crtc);
5798 const struct intel_plane_state *plane_state;
5799 struct intel_plane *plane;
5800 int ret, i;
5801
5802 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5803
5804
5805
5806
5807
5808 if (plane->pipe != crtc->pipe)
5809 continue;
5810
5811 if (DISPLAY_VER(dev_priv) >= 11)
5812 ret = icl_build_plane_wm(crtc_state, plane_state);
5813 else
5814 ret = skl_build_plane_wm(crtc_state, plane_state);
5815 if (ret)
5816 return ret;
5817 }
5818
5819 crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
5820
5821 return 0;
5822 }
5823
5824 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5825 i915_reg_t reg,
5826 const struct skl_ddb_entry *entry)
5827 {
5828 if (entry->end)
5829 intel_de_write_fw(dev_priv, reg,
5830 PLANE_BUF_END(entry->end - 1) |
5831 PLANE_BUF_START(entry->start));
5832 else
5833 intel_de_write_fw(dev_priv, reg, 0);
5834 }
5835
5836 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5837 i915_reg_t reg,
5838 const struct skl_wm_level *level)
5839 {
5840 u32 val = 0;
5841
5842 if (level->enable)
5843 val |= PLANE_WM_EN;
5844 if (level->ignore_lines)
5845 val |= PLANE_WM_IGNORE_LINES;
5846 val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
5847 val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
5848
5849 intel_de_write_fw(dev_priv, reg, val);
5850 }
5851
5852 void skl_write_plane_wm(struct intel_plane *plane,
5853 const struct intel_crtc_state *crtc_state)
5854 {
5855 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5856 int level, max_level = ilk_wm_max_level(dev_priv);
5857 enum plane_id plane_id = plane->id;
5858 enum pipe pipe = plane->pipe;
5859 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5860 const struct skl_ddb_entry *ddb =
5861 &crtc_state->wm.skl.plane_ddb[plane_id];
5862 const struct skl_ddb_entry *ddb_y =
5863 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5864
5865 for (level = 0; level <= max_level; level++)
5866 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5867 skl_plane_wm_level(pipe_wm, plane_id, level));
5868
5869 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5870 skl_plane_trans_wm(pipe_wm, plane_id));
5871
5872 if (HAS_HW_SAGV_WM(dev_priv)) {
5873 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5874
5875 skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
5876 &wm->sagv.wm0);
5877 skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
5878 &wm->sagv.trans_wm);
5879 }
5880
5881 skl_ddb_entry_write(dev_priv,
5882 PLANE_BUF_CFG(pipe, plane_id), ddb);
5883
5884 if (DISPLAY_VER(dev_priv) < 11)
5885 skl_ddb_entry_write(dev_priv,
5886 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
5887 }
5888
5889 void skl_write_cursor_wm(struct intel_plane *plane,
5890 const struct intel_crtc_state *crtc_state)
5891 {
5892 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5893 int level, max_level = ilk_wm_max_level(dev_priv);
5894 enum plane_id plane_id = plane->id;
5895 enum pipe pipe = plane->pipe;
5896 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5897 const struct skl_ddb_entry *ddb =
5898 &crtc_state->wm.skl.plane_ddb[plane_id];
5899
5900 for (level = 0; level <= max_level; level++)
5901 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5902 skl_plane_wm_level(pipe_wm, plane_id, level));
5903
5904 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
5905 skl_plane_trans_wm(pipe_wm, plane_id));
5906
5907 if (HAS_HW_SAGV_WM(dev_priv)) {
5908 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5909
5910 skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
5911 &wm->sagv.wm0);
5912 skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
5913 &wm->sagv.trans_wm);
5914 }
5915
5916 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5917 }
5918
5919 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
5920 const struct skl_wm_level *l2)
5921 {
5922 return l1->enable == l2->enable &&
5923 l1->ignore_lines == l2->ignore_lines &&
5924 l1->lines == l2->lines &&
5925 l1->blocks == l2->blocks;
5926 }
5927
5928 static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5929 const struct skl_plane_wm *wm1,
5930 const struct skl_plane_wm *wm2)
5931 {
5932 int level, max_level = ilk_wm_max_level(dev_priv);
5933
5934 for (level = 0; level <= max_level; level++) {
5935
5936
5937
5938
5939
5940 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
5941 return false;
5942 }
5943
5944 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
5945 skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
5946 skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
5947 }
5948
5949 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5950 const struct skl_ddb_entry *b)
5951 {
5952 return a->start < b->end && b->start < a->end;
5953 }
5954
5955 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
5956 const struct skl_ddb_entry *b)
5957 {
5958 if (a->end && b->end) {
5959 a->start = min(a->start, b->start);
5960 a->end = max(a->end, b->end);
5961 } else if (b->end) {
5962 a->start = b->start;
5963 a->end = b->end;
5964 }
5965 }
5966
5967 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5968 const struct skl_ddb_entry *entries,
5969 int num_entries, int ignore_idx)
5970 {
5971 int i;
5972
5973 for (i = 0; i < num_entries; i++) {
5974 if (i != ignore_idx &&
5975 skl_ddb_entries_overlap(ddb, &entries[i]))
5976 return true;
5977 }
5978
5979 return false;
5980 }
5981
5982 static int
5983 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5984 struct intel_crtc_state *new_crtc_state)
5985 {
5986 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
5987 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5988 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5989 struct intel_plane *plane;
5990
5991 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5992 struct intel_plane_state *plane_state;
5993 enum plane_id plane_id = plane->id;
5994
5995 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
5996 &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
5997 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5998 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
5999 continue;
6000
6001 plane_state = intel_atomic_get_plane_state(state, plane);
6002 if (IS_ERR(plane_state))
6003 return PTR_ERR(plane_state);
6004
6005 new_crtc_state->update_planes |= BIT(plane_id);
6006 }
6007
6008 return 0;
6009 }
6010
6011 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
6012 {
6013 struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
6014 u8 enabled_slices;
6015 enum pipe pipe;
6016
6017
6018
6019
6020
6021 enabled_slices = BIT(DBUF_S1);
6022
6023 for_each_pipe(dev_priv, pipe)
6024 enabled_slices |= dbuf_state->slices[pipe];
6025
6026 return enabled_slices;
6027 }
6028
6029 static int
6030 skl_compute_ddb(struct intel_atomic_state *state)
6031 {
6032 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6033 const struct intel_dbuf_state *old_dbuf_state;
6034 struct intel_dbuf_state *new_dbuf_state = NULL;
6035 const struct intel_crtc_state *old_crtc_state;
6036 struct intel_crtc_state *new_crtc_state;
6037 struct intel_crtc *crtc;
6038 int ret, i;
6039
6040 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6041 new_dbuf_state = intel_atomic_get_dbuf_state(state);
6042 if (IS_ERR(new_dbuf_state))
6043 return PTR_ERR(new_dbuf_state);
6044
6045 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
6046 break;
6047 }
6048
6049 if (!new_dbuf_state)
6050 return 0;
6051
6052 new_dbuf_state->active_pipes =
6053 intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
6054
6055 if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
6056 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6057 if (ret)
6058 return ret;
6059 }
6060
6061 if (HAS_MBUS_JOINING(dev_priv))
6062 new_dbuf_state->joined_mbus =
6063 adlp_check_mbus_joined(new_dbuf_state->active_pipes);
6064
6065 for_each_intel_crtc(&dev_priv->drm, crtc) {
6066 enum pipe pipe = crtc->pipe;
6067
6068 new_dbuf_state->slices[pipe] =
6069 skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
6070 new_dbuf_state->joined_mbus);
6071
6072 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
6073 continue;
6074
6075 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6076 if (ret)
6077 return ret;
6078 }
6079
6080 new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
6081
6082 if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
6083 old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
6084 ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
6085 if (ret)
6086 return ret;
6087
6088 if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
6089
6090 ret = intel_modeset_all_pipes(state);
6091 if (ret)
6092 return ret;
6093 }
6094
6095 drm_dbg_kms(&dev_priv->drm,
6096 "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
6097 old_dbuf_state->enabled_slices,
6098 new_dbuf_state->enabled_slices,
6099 INTEL_INFO(dev_priv)->display.dbuf.slice_mask,
6100 str_yes_no(old_dbuf_state->joined_mbus),
6101 str_yes_no(new_dbuf_state->joined_mbus));
6102 }
6103
6104 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6105 enum pipe pipe = crtc->pipe;
6106
6107 new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
6108
6109 if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
6110 continue;
6111
6112 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6113 if (ret)
6114 return ret;
6115 }
6116
6117 for_each_intel_crtc(&dev_priv->drm, crtc) {
6118 ret = skl_crtc_allocate_ddb(state, crtc);
6119 if (ret)
6120 return ret;
6121 }
6122
6123 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6124 new_crtc_state, i) {
6125 ret = skl_crtc_allocate_plane_ddb(state, crtc);
6126 if (ret)
6127 return ret;
6128
6129 ret = skl_ddb_add_affected_planes(old_crtc_state,
6130 new_crtc_state);
6131 if (ret)
6132 return ret;
6133 }
6134
6135 return 0;
6136 }
6137
6138 static char enast(bool enable)
6139 {
6140 return enable ? '*' : ' ';
6141 }
6142
6143 static void
6144 skl_print_wm_changes(struct intel_atomic_state *state)
6145 {
6146 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6147 const struct intel_crtc_state *old_crtc_state;
6148 const struct intel_crtc_state *new_crtc_state;
6149 struct intel_plane *plane;
6150 struct intel_crtc *crtc;
6151 int i;
6152
6153 if (!drm_debug_enabled(DRM_UT_KMS))
6154 return;
6155
6156 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6157 new_crtc_state, i) {
6158 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
6159
6160 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
6161 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
6162
6163 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6164 enum plane_id plane_id = plane->id;
6165 const struct skl_ddb_entry *old, *new;
6166
6167 old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
6168 new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
6169
6170 if (skl_ddb_entry_equal(old, new))
6171 continue;
6172
6173 drm_dbg_kms(&dev_priv->drm,
6174 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
6175 plane->base.base.id, plane->base.name,
6176 old->start, old->end, new->start, new->end,
6177 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
6178 }
6179
6180 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6181 enum plane_id plane_id = plane->id;
6182 const struct skl_plane_wm *old_wm, *new_wm;
6183
6184 old_wm = &old_pipe_wm->planes[plane_id];
6185 new_wm = &new_pipe_wm->planes[plane_id];
6186
6187 if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
6188 continue;
6189
6190 drm_dbg_kms(&dev_priv->drm,
6191 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
6192 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
6193 plane->base.base.id, plane->base.name,
6194 enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
6195 enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
6196 enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
6197 enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
6198 enast(old_wm->trans_wm.enable),
6199 enast(old_wm->sagv.wm0.enable),
6200 enast(old_wm->sagv.trans_wm.enable),
6201 enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
6202 enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
6203 enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
6204 enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
6205 enast(new_wm->trans_wm.enable),
6206 enast(new_wm->sagv.wm0.enable),
6207 enast(new_wm->sagv.trans_wm.enable));
6208
6209 drm_dbg_kms(&dev_priv->drm,
6210 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
6211 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
6212 plane->base.base.id, plane->base.name,
6213 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
6214 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
6215 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
6216 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
6217 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
6218 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
6219 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
6220 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
6221 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
6222 enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
6223 enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
6224 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
6225 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
6226 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
6227 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
6228 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
6229 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
6230 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
6231 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
6232 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
6233 enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
6234 enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
6235
6236 drm_dbg_kms(&dev_priv->drm,
6237 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
6238 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
6239 plane->base.base.id, plane->base.name,
6240 old_wm->wm[0].blocks, old_wm->wm[1].blocks,
6241 old_wm->wm[2].blocks, old_wm->wm[3].blocks,
6242 old_wm->wm[4].blocks, old_wm->wm[5].blocks,
6243 old_wm->wm[6].blocks, old_wm->wm[7].blocks,
6244 old_wm->trans_wm.blocks,
6245 old_wm->sagv.wm0.blocks,
6246 old_wm->sagv.trans_wm.blocks,
6247 new_wm->wm[0].blocks, new_wm->wm[1].blocks,
6248 new_wm->wm[2].blocks, new_wm->wm[3].blocks,
6249 new_wm->wm[4].blocks, new_wm->wm[5].blocks,
6250 new_wm->wm[6].blocks, new_wm->wm[7].blocks,
6251 new_wm->trans_wm.blocks,
6252 new_wm->sagv.wm0.blocks,
6253 new_wm->sagv.trans_wm.blocks);
6254
6255 drm_dbg_kms(&dev_priv->drm,
6256 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
6257 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
6258 plane->base.base.id, plane->base.name,
6259 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
6260 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
6261 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
6262 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
6263 old_wm->trans_wm.min_ddb_alloc,
6264 old_wm->sagv.wm0.min_ddb_alloc,
6265 old_wm->sagv.trans_wm.min_ddb_alloc,
6266 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
6267 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
6268 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
6269 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
6270 new_wm->trans_wm.min_ddb_alloc,
6271 new_wm->sagv.wm0.min_ddb_alloc,
6272 new_wm->sagv.trans_wm.min_ddb_alloc);
6273 }
6274 }
6275 }
6276
6277 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
6278 const struct skl_pipe_wm *old_pipe_wm,
6279 const struct skl_pipe_wm *new_pipe_wm)
6280 {
6281 struct drm_i915_private *i915 = to_i915(plane->base.dev);
6282 int level, max_level = ilk_wm_max_level(i915);
6283
6284 for (level = 0; level <= max_level; level++) {
6285
6286
6287
6288
6289
6290 if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
6291 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
6292 return false;
6293 }
6294
6295 if (HAS_HW_SAGV_WM(i915)) {
6296 const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
6297 const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
6298
6299 if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
6300 !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
6301 return false;
6302 }
6303
6304 return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
6305 skl_plane_trans_wm(new_pipe_wm, plane->id));
6306 }
6307
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
6331 struct intel_crtc *crtc)
6332 {
6333 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6334 const struct intel_crtc_state *old_crtc_state =
6335 intel_atomic_get_old_crtc_state(state, crtc);
6336 struct intel_crtc_state *new_crtc_state =
6337 intel_atomic_get_new_crtc_state(state, crtc);
6338 struct intel_plane *plane;
6339
6340 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6341 struct intel_plane_state *plane_state;
6342 enum plane_id plane_id = plane->id;
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
6353 skl_plane_selected_wm_equals(plane,
6354 &old_crtc_state->wm.skl.optimal,
6355 &new_crtc_state->wm.skl.optimal))
6356 continue;
6357
6358 plane_state = intel_atomic_get_plane_state(state, plane);
6359 if (IS_ERR(plane_state))
6360 return PTR_ERR(plane_state);
6361
6362 new_crtc_state->update_planes |= BIT(plane_id);
6363 }
6364
6365 return 0;
6366 }
6367
6368 static int
6369 skl_compute_wm(struct intel_atomic_state *state)
6370 {
6371 struct intel_crtc *crtc;
6372 struct intel_crtc_state *new_crtc_state;
6373 int ret, i;
6374
6375 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6376 ret = skl_build_pipe_wm(state, crtc);
6377 if (ret)
6378 return ret;
6379 }
6380
6381 ret = skl_compute_ddb(state);
6382 if (ret)
6383 return ret;
6384
6385 ret = intel_compute_sagv_mask(state);
6386 if (ret)
6387 return ret;
6388
6389
6390
6391
6392
6393
6394 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6395 ret = skl_wm_add_affected_planes(state, crtc);
6396 if (ret)
6397 return ret;
6398 }
6399
6400 skl_print_wm_changes(state);
6401
6402 return 0;
6403 }
6404
6405 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
6406 struct intel_wm_config *config)
6407 {
6408 struct intel_crtc *crtc;
6409
6410
6411 for_each_intel_crtc(&dev_priv->drm, crtc) {
6412 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
6413
6414 if (!wm->pipe_enabled)
6415 continue;
6416
6417 config->sprites_enabled |= wm->sprites_enabled;
6418 config->sprites_scaled |= wm->sprites_scaled;
6419 config->num_pipes_active++;
6420 }
6421 }
6422
6423 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
6424 {
6425 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
6426 struct ilk_wm_maximums max;
6427 struct intel_wm_config config = {};
6428 struct ilk_wm_values results = {};
6429 enum intel_ddb_partitioning partitioning;
6430
6431 ilk_compute_wm_config(dev_priv, &config);
6432
6433 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
6434 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
6435
6436
6437 if (DISPLAY_VER(dev_priv) >= 7 &&
6438 config.num_pipes_active == 1 && config.sprites_enabled) {
6439 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
6440 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
6441
6442 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
6443 } else {
6444 best_lp_wm = &lp_wm_1_2;
6445 }
6446
6447 partitioning = (best_lp_wm == &lp_wm_1_2) ?
6448 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
6449
6450 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
6451
6452 ilk_write_wm_values(dev_priv, &results);
6453 }
6454
6455 static void ilk_initial_watermarks(struct intel_atomic_state *state,
6456 struct intel_crtc *crtc)
6457 {
6458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6459 const struct intel_crtc_state *crtc_state =
6460 intel_atomic_get_new_crtc_state(state, crtc);
6461
6462 mutex_lock(&dev_priv->wm.wm_mutex);
6463 crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
6464 ilk_program_watermarks(dev_priv);
6465 mutex_unlock(&dev_priv->wm.wm_mutex);
6466 }
6467
6468 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
6469 struct intel_crtc *crtc)
6470 {
6471 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6472 const struct intel_crtc_state *crtc_state =
6473 intel_atomic_get_new_crtc_state(state, crtc);
6474
6475 if (!crtc_state->wm.need_postvbl_update)
6476 return;
6477
6478 mutex_lock(&dev_priv->wm.wm_mutex);
6479 crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
6480 ilk_program_watermarks(dev_priv);
6481 mutex_unlock(&dev_priv->wm.wm_mutex);
6482 }
6483
6484 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
6485 {
6486 level->enable = val & PLANE_WM_EN;
6487 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
6488 level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
6489 level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
6490 }
6491
6492 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
6493 struct skl_pipe_wm *out)
6494 {
6495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6496 enum pipe pipe = crtc->pipe;
6497 int level, max_level;
6498 enum plane_id plane_id;
6499 u32 val;
6500
6501 max_level = ilk_wm_max_level(dev_priv);
6502
6503 for_each_plane_id_on_crtc(crtc, plane_id) {
6504 struct skl_plane_wm *wm = &out->planes[plane_id];
6505
6506 for (level = 0; level <= max_level; level++) {
6507 if (plane_id != PLANE_CURSOR)
6508 val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
6509 else
6510 val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
6511
6512 skl_wm_level_from_reg_val(val, &wm->wm[level]);
6513 }
6514
6515 if (plane_id != PLANE_CURSOR)
6516 val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
6517 else
6518 val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
6519
6520 skl_wm_level_from_reg_val(val, &wm->trans_wm);
6521
6522 if (HAS_HW_SAGV_WM(dev_priv)) {
6523 if (plane_id != PLANE_CURSOR)
6524 val = intel_uncore_read(&dev_priv->uncore,
6525 PLANE_WM_SAGV(pipe, plane_id));
6526 else
6527 val = intel_uncore_read(&dev_priv->uncore,
6528 CUR_WM_SAGV(pipe));
6529
6530 skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
6531
6532 if (plane_id != PLANE_CURSOR)
6533 val = intel_uncore_read(&dev_priv->uncore,
6534 PLANE_WM_SAGV_TRANS(pipe, plane_id));
6535 else
6536 val = intel_uncore_read(&dev_priv->uncore,
6537 CUR_WM_SAGV_TRANS(pipe));
6538
6539 skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
6540 } else if (DISPLAY_VER(dev_priv) >= 12) {
6541 wm->sagv.wm0 = wm->wm[0];
6542 wm->sagv.trans_wm = wm->trans_wm;
6543 }
6544 }
6545 }
6546
6547 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
6548 {
6549 struct intel_dbuf_state *dbuf_state =
6550 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
6551 struct intel_crtc *crtc;
6552
6553 if (HAS_MBUS_JOINING(dev_priv))
6554 dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
6555
6556 for_each_intel_crtc(&dev_priv->drm, crtc) {
6557 struct intel_crtc_state *crtc_state =
6558 to_intel_crtc_state(crtc->base.state);
6559 enum pipe pipe = crtc->pipe;
6560 unsigned int mbus_offset;
6561 enum plane_id plane_id;
6562 u8 slices;
6563
6564 memset(&crtc_state->wm.skl.optimal, 0,
6565 sizeof(crtc_state->wm.skl.optimal));
6566 if (crtc_state->hw.active)
6567 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
6568 crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
6569
6570 memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
6571
6572 for_each_plane_id_on_crtc(crtc, plane_id) {
6573 struct skl_ddb_entry *ddb =
6574 &crtc_state->wm.skl.plane_ddb[plane_id];
6575 struct skl_ddb_entry *ddb_y =
6576 &crtc_state->wm.skl.plane_ddb_y[plane_id];
6577
6578 if (!crtc_state->hw.active)
6579 continue;
6580
6581 skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
6582 plane_id, ddb, ddb_y);
6583
6584 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
6585 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
6586 }
6587
6588 dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
6589
6590
6591
6592
6593
6594 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
6595 dbuf_state->joined_mbus);
6596 mbus_offset = mbus_ddb_offset(dev_priv, slices);
6597 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
6598 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
6599
6600
6601 dbuf_state->slices[pipe] =
6602 skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
6603
6604 drm_dbg_kms(&dev_priv->drm,
6605 "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
6606 crtc->base.base.id, crtc->base.name,
6607 dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
6608 dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
6609 str_yes_no(dbuf_state->joined_mbus));
6610 }
6611
6612 dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
6613 }
6614
6615 static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
6616 {
6617 const struct intel_dbuf_state *dbuf_state =
6618 to_intel_dbuf_state(i915->dbuf.obj.state);
6619 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
6620 struct intel_crtc *crtc;
6621
6622 for_each_intel_crtc(&i915->drm, crtc) {
6623 const struct intel_crtc_state *crtc_state =
6624 to_intel_crtc_state(crtc->base.state);
6625
6626 entries[crtc->pipe] = crtc_state->wm.skl.ddb;
6627 }
6628
6629 for_each_intel_crtc(&i915->drm, crtc) {
6630 const struct intel_crtc_state *crtc_state =
6631 to_intel_crtc_state(crtc->base.state);
6632 u8 slices;
6633
6634 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
6635 dbuf_state->joined_mbus);
6636 if (dbuf_state->slices[crtc->pipe] & ~slices)
6637 return true;
6638
6639 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
6640 I915_MAX_PIPES, crtc->pipe))
6641 return true;
6642 }
6643
6644 return false;
6645 }
6646
6647 void skl_wm_sanitize(struct drm_i915_private *i915)
6648 {
6649 struct intel_crtc *crtc;
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662 if (!skl_dbuf_is_misconfigured(i915))
6663 return;
6664
6665 drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
6666
6667 for_each_intel_crtc(&i915->drm, crtc) {
6668 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6669 const struct intel_plane_state *plane_state =
6670 to_intel_plane_state(plane->base.state);
6671 struct intel_crtc_state *crtc_state =
6672 to_intel_crtc_state(crtc->base.state);
6673
6674 if (plane_state->uapi.visible)
6675 intel_plane_disable_noatomic(crtc, plane);
6676
6677 drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
6678
6679 memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
6680 }
6681 }
6682
6683 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
6684 {
6685 struct drm_device *dev = crtc->base.dev;
6686 struct drm_i915_private *dev_priv = to_i915(dev);
6687 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6688 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
6689 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
6690 enum pipe pipe = crtc->pipe;
6691
6692 hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
6693
6694 memset(active, 0, sizeof(*active));
6695
6696 active->pipe_enabled = crtc->active;
6697
6698 if (active->pipe_enabled) {
6699 u32 tmp = hw->wm_pipe[pipe];
6700
6701
6702
6703
6704
6705
6706
6707 active->wm[0].enable = true;
6708 active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
6709 active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
6710 active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
6711 } else {
6712 int level, max_level = ilk_wm_max_level(dev_priv);
6713
6714
6715
6716
6717
6718
6719 for (level = 0; level <= max_level; level++)
6720 active->wm[level].enable = true;
6721 }
6722
6723 crtc->wm.active.ilk = *active;
6724 }
6725
6726 #define _FW_WM(value, plane) \
6727 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
6728 #define _FW_WM_VLV(value, plane) \
6729 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
6730
6731 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
6732 struct g4x_wm_values *wm)
6733 {
6734 u32 tmp;
6735
6736 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
6737 wm->sr.plane = _FW_WM(tmp, SR);
6738 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6739 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
6740 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
6741
6742 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
6743 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
6744 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
6745 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
6746 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
6747 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6748 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
6749
6750 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
6751 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
6752 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6753 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
6754 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
6755 }
6756
6757 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
6758 struct vlv_wm_values *wm)
6759 {
6760 enum pipe pipe;
6761 u32 tmp;
6762
6763 for_each_pipe(dev_priv, pipe) {
6764 tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
6765
6766 wm->ddl[pipe].plane[PLANE_PRIMARY] =
6767 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6768 wm->ddl[pipe].plane[PLANE_CURSOR] =
6769 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6770 wm->ddl[pipe].plane[PLANE_SPRITE0] =
6771 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6772 wm->ddl[pipe].plane[PLANE_SPRITE1] =
6773 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6774 }
6775
6776 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
6777 wm->sr.plane = _FW_WM(tmp, SR);
6778 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6779 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
6780 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
6781
6782 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
6783 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
6784 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6785 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
6786
6787 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
6788 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6789
6790 if (IS_CHERRYVIEW(dev_priv)) {
6791 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
6792 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6793 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6794
6795 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
6796 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
6797 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
6798
6799 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
6800 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
6801 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
6802
6803 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
6804 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6805 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
6806 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
6807 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
6808 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6809 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6810 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6811 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6812 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6813 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6814 } else {
6815 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
6816 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6817 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6818
6819 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
6820 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6821 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6822 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6823 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6824 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6825 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6826 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6827 }
6828 }
6829
6830 #undef _FW_WM
6831 #undef _FW_WM_VLV
6832
6833 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
6834 {
6835 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
6836 struct intel_crtc *crtc;
6837
6838 g4x_read_wm_values(dev_priv, wm);
6839
6840 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
6841
6842 for_each_intel_crtc(&dev_priv->drm, crtc) {
6843 struct intel_crtc_state *crtc_state =
6844 to_intel_crtc_state(crtc->base.state);
6845 struct g4x_wm_state *active = &crtc->wm.active.g4x;
6846 struct g4x_pipe_wm *raw;
6847 enum pipe pipe = crtc->pipe;
6848 enum plane_id plane_id;
6849 int level, max_level;
6850
6851 active->cxsr = wm->cxsr;
6852 active->hpll_en = wm->hpll_en;
6853 active->fbc_en = wm->fbc_en;
6854
6855 active->sr = wm->sr;
6856 active->hpll = wm->hpll;
6857
6858 for_each_plane_id_on_crtc(crtc, plane_id) {
6859 active->wm.plane[plane_id] =
6860 wm->pipe[pipe].plane[plane_id];
6861 }
6862
6863 if (wm->cxsr && wm->hpll_en)
6864 max_level = G4X_WM_LEVEL_HPLL;
6865 else if (wm->cxsr)
6866 max_level = G4X_WM_LEVEL_SR;
6867 else
6868 max_level = G4X_WM_LEVEL_NORMAL;
6869
6870 level = G4X_WM_LEVEL_NORMAL;
6871 raw = &crtc_state->wm.g4x.raw[level];
6872 for_each_plane_id_on_crtc(crtc, plane_id)
6873 raw->plane[plane_id] = active->wm.plane[plane_id];
6874
6875 level = G4X_WM_LEVEL_SR;
6876 if (level > max_level)
6877 goto out;
6878
6879 raw = &crtc_state->wm.g4x.raw[level];
6880 raw->plane[PLANE_PRIMARY] = active->sr.plane;
6881 raw->plane[PLANE_CURSOR] = active->sr.cursor;
6882 raw->plane[PLANE_SPRITE0] = 0;
6883 raw->fbc = active->sr.fbc;
6884
6885 level = G4X_WM_LEVEL_HPLL;
6886 if (level > max_level)
6887 goto out;
6888
6889 raw = &crtc_state->wm.g4x.raw[level];
6890 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
6891 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
6892 raw->plane[PLANE_SPRITE0] = 0;
6893 raw->fbc = active->hpll.fbc;
6894
6895 level++;
6896 out:
6897 for_each_plane_id_on_crtc(crtc, plane_id)
6898 g4x_raw_plane_wm_set(crtc_state, level,
6899 plane_id, USHRT_MAX);
6900 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
6901
6902 crtc_state->wm.g4x.optimal = *active;
6903 crtc_state->wm.g4x.intermediate = *active;
6904
6905 drm_dbg_kms(&dev_priv->drm,
6906 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6907 pipe_name(pipe),
6908 wm->pipe[pipe].plane[PLANE_PRIMARY],
6909 wm->pipe[pipe].plane[PLANE_CURSOR],
6910 wm->pipe[pipe].plane[PLANE_SPRITE0]);
6911 }
6912
6913 drm_dbg_kms(&dev_priv->drm,
6914 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
6915 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
6916 drm_dbg_kms(&dev_priv->drm,
6917 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
6918 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
6919 drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
6920 str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
6921 str_yes_no(wm->fbc_en));
6922 }
6923
6924 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
6925 {
6926 struct intel_plane *plane;
6927 struct intel_crtc *crtc;
6928
6929 mutex_lock(&dev_priv->wm.wm_mutex);
6930
6931 for_each_intel_plane(&dev_priv->drm, plane) {
6932 struct intel_crtc *crtc =
6933 intel_crtc_for_pipe(dev_priv, plane->pipe);
6934 struct intel_crtc_state *crtc_state =
6935 to_intel_crtc_state(crtc->base.state);
6936 struct intel_plane_state *plane_state =
6937 to_intel_plane_state(plane->base.state);
6938 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6939 enum plane_id plane_id = plane->id;
6940 int level;
6941
6942 if (plane_state->uapi.visible)
6943 continue;
6944
6945 for (level = 0; level < 3; level++) {
6946 struct g4x_pipe_wm *raw =
6947 &crtc_state->wm.g4x.raw[level];
6948
6949 raw->plane[plane_id] = 0;
6950 wm_state->wm.plane[plane_id] = 0;
6951 }
6952
6953 if (plane_id == PLANE_PRIMARY) {
6954 for (level = 0; level < 3; level++) {
6955 struct g4x_pipe_wm *raw =
6956 &crtc_state->wm.g4x.raw[level];
6957 raw->fbc = 0;
6958 }
6959
6960 wm_state->sr.fbc = 0;
6961 wm_state->hpll.fbc = 0;
6962 wm_state->fbc_en = false;
6963 }
6964 }
6965
6966 for_each_intel_crtc(&dev_priv->drm, crtc) {
6967 struct intel_crtc_state *crtc_state =
6968 to_intel_crtc_state(crtc->base.state);
6969
6970 crtc_state->wm.g4x.intermediate =
6971 crtc_state->wm.g4x.optimal;
6972 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6973 }
6974
6975 g4x_program_watermarks(dev_priv);
6976
6977 mutex_unlock(&dev_priv->wm.wm_mutex);
6978 }
6979
6980 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6981 {
6982 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6983 struct intel_crtc *crtc;
6984 u32 val;
6985
6986 vlv_read_wm_values(dev_priv, wm);
6987
6988 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6989 wm->level = VLV_WM_LEVEL_PM2;
6990
6991 if (IS_CHERRYVIEW(dev_priv)) {
6992 vlv_punit_get(dev_priv);
6993
6994 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6995 if (val & DSP_MAXFIFO_PM5_ENABLE)
6996 wm->level = VLV_WM_LEVEL_PM5;
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
7008 val |= FORCE_DDR_FREQ_REQ_ACK;
7009 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
7010
7011 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
7012 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
7013 drm_dbg_kms(&dev_priv->drm,
7014 "Punit not acking DDR DVFS request, "
7015 "assuming DDR DVFS is disabled\n");
7016 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
7017 } else {
7018 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
7019 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
7020 wm->level = VLV_WM_LEVEL_DDR_DVFS;
7021 }
7022
7023 vlv_punit_put(dev_priv);
7024 }
7025
7026 for_each_intel_crtc(&dev_priv->drm, crtc) {
7027 struct intel_crtc_state *crtc_state =
7028 to_intel_crtc_state(crtc->base.state);
7029 struct vlv_wm_state *active = &crtc->wm.active.vlv;
7030 const struct vlv_fifo_state *fifo_state =
7031 &crtc_state->wm.vlv.fifo_state;
7032 enum pipe pipe = crtc->pipe;
7033 enum plane_id plane_id;
7034 int level;
7035
7036 vlv_get_fifo_size(crtc_state);
7037
7038 active->num_levels = wm->level + 1;
7039 active->cxsr = wm->cxsr;
7040
7041 for (level = 0; level < active->num_levels; level++) {
7042 struct g4x_pipe_wm *raw =
7043 &crtc_state->wm.vlv.raw[level];
7044
7045 active->sr[level].plane = wm->sr.plane;
7046 active->sr[level].cursor = wm->sr.cursor;
7047
7048 for_each_plane_id_on_crtc(crtc, plane_id) {
7049 active->wm[level].plane[plane_id] =
7050 wm->pipe[pipe].plane[plane_id];
7051
7052 raw->plane[plane_id] =
7053 vlv_invert_wm_value(active->wm[level].plane[plane_id],
7054 fifo_state->plane[plane_id]);
7055 }
7056 }
7057
7058 for_each_plane_id_on_crtc(crtc, plane_id)
7059 vlv_raw_plane_wm_set(crtc_state, level,
7060 plane_id, USHRT_MAX);
7061 vlv_invalidate_wms(crtc, active, level);
7062
7063 crtc_state->wm.vlv.optimal = *active;
7064 crtc_state->wm.vlv.intermediate = *active;
7065
7066 drm_dbg_kms(&dev_priv->drm,
7067 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
7068 pipe_name(pipe),
7069 wm->pipe[pipe].plane[PLANE_PRIMARY],
7070 wm->pipe[pipe].plane[PLANE_CURSOR],
7071 wm->pipe[pipe].plane[PLANE_SPRITE0],
7072 wm->pipe[pipe].plane[PLANE_SPRITE1]);
7073 }
7074
7075 drm_dbg_kms(&dev_priv->drm,
7076 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
7077 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
7078 }
7079
7080 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
7081 {
7082 struct intel_plane *plane;
7083 struct intel_crtc *crtc;
7084
7085 mutex_lock(&dev_priv->wm.wm_mutex);
7086
7087 for_each_intel_plane(&dev_priv->drm, plane) {
7088 struct intel_crtc *crtc =
7089 intel_crtc_for_pipe(dev_priv, plane->pipe);
7090 struct intel_crtc_state *crtc_state =
7091 to_intel_crtc_state(crtc->base.state);
7092 struct intel_plane_state *plane_state =
7093 to_intel_plane_state(plane->base.state);
7094 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
7095 const struct vlv_fifo_state *fifo_state =
7096 &crtc_state->wm.vlv.fifo_state;
7097 enum plane_id plane_id = plane->id;
7098 int level;
7099
7100 if (plane_state->uapi.visible)
7101 continue;
7102
7103 for (level = 0; level < wm_state->num_levels; level++) {
7104 struct g4x_pipe_wm *raw =
7105 &crtc_state->wm.vlv.raw[level];
7106
7107 raw->plane[plane_id] = 0;
7108
7109 wm_state->wm[level].plane[plane_id] =
7110 vlv_invert_wm_value(raw->plane[plane_id],
7111 fifo_state->plane[plane_id]);
7112 }
7113 }
7114
7115 for_each_intel_crtc(&dev_priv->drm, crtc) {
7116 struct intel_crtc_state *crtc_state =
7117 to_intel_crtc_state(crtc->base.state);
7118
7119 crtc_state->wm.vlv.intermediate =
7120 crtc_state->wm.vlv.optimal;
7121 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
7122 }
7123
7124 vlv_program_watermarks(dev_priv);
7125
7126 mutex_unlock(&dev_priv->wm.wm_mutex);
7127 }
7128
7129
7130
7131
7132
7133 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
7134 {
7135 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM_LP_ENABLE);
7136 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM_LP_ENABLE);
7137 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM_LP_ENABLE);
7138
7139
7140
7141
7142
7143 }
7144
7145 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
7146 {
7147 struct ilk_wm_values *hw = &dev_priv->wm.hw;
7148 struct intel_crtc *crtc;
7149
7150 ilk_init_lp_watermarks(dev_priv);
7151
7152 for_each_intel_crtc(&dev_priv->drm, crtc)
7153 ilk_pipe_wm_get_hw_state(crtc);
7154
7155 hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
7156 hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
7157 hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
7158
7159 hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
7160 if (DISPLAY_VER(dev_priv) >= 7) {
7161 hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
7162 hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
7163 }
7164
7165 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
7166 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
7167 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
7168 else if (IS_IVYBRIDGE(dev_priv))
7169 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
7170 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
7171
7172 hw->enable_fbc_wm =
7173 !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
7174 }
7175
7176 void intel_wm_state_verify(struct intel_crtc *crtc,
7177 struct intel_crtc_state *new_crtc_state)
7178 {
7179 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7180 struct skl_hw_state {
7181 struct skl_ddb_entry ddb[I915_MAX_PLANES];
7182 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
7183 struct skl_pipe_wm wm;
7184 } *hw;
7185 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
7186 int level, max_level = ilk_wm_max_level(dev_priv);
7187 struct intel_plane *plane;
7188 u8 hw_enabled_slices;
7189
7190 if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
7191 return;
7192
7193 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
7194 if (!hw)
7195 return;
7196
7197 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
7198
7199 skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
7200
7201 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
7202
7203 if (DISPLAY_VER(dev_priv) >= 11 &&
7204 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
7205 drm_err(&dev_priv->drm,
7206 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
7207 dev_priv->dbuf.enabled_slices,
7208 hw_enabled_slices);
7209
7210 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7211 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
7212 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
7213
7214
7215 for (level = 0; level <= max_level; level++) {
7216 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
7217 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
7218
7219 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
7220 continue;
7221
7222 drm_err(&dev_priv->drm,
7223 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
7224 plane->base.base.id, plane->base.name, level,
7225 sw_wm_level->enable,
7226 sw_wm_level->blocks,
7227 sw_wm_level->lines,
7228 hw_wm_level->enable,
7229 hw_wm_level->blocks,
7230 hw_wm_level->lines);
7231 }
7232
7233 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
7234 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
7235
7236 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
7237 drm_err(&dev_priv->drm,
7238 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
7239 plane->base.base.id, plane->base.name,
7240 sw_wm_level->enable,
7241 sw_wm_level->blocks,
7242 sw_wm_level->lines,
7243 hw_wm_level->enable,
7244 hw_wm_level->blocks,
7245 hw_wm_level->lines);
7246 }
7247
7248 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
7249 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
7250
7251 if (HAS_HW_SAGV_WM(dev_priv) &&
7252 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
7253 drm_err(&dev_priv->drm,
7254 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
7255 plane->base.base.id, plane->base.name,
7256 sw_wm_level->enable,
7257 sw_wm_level->blocks,
7258 sw_wm_level->lines,
7259 hw_wm_level->enable,
7260 hw_wm_level->blocks,
7261 hw_wm_level->lines);
7262 }
7263
7264 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
7265 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
7266
7267 if (HAS_HW_SAGV_WM(dev_priv) &&
7268 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
7269 drm_err(&dev_priv->drm,
7270 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
7271 plane->base.base.id, plane->base.name,
7272 sw_wm_level->enable,
7273 sw_wm_level->blocks,
7274 sw_wm_level->lines,
7275 hw_wm_level->enable,
7276 hw_wm_level->blocks,
7277 hw_wm_level->lines);
7278 }
7279
7280
7281 hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
7282 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
7283
7284 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
7285 drm_err(&dev_priv->drm,
7286 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
7287 plane->base.base.id, plane->base.name,
7288 sw_ddb_entry->start, sw_ddb_entry->end,
7289 hw_ddb_entry->start, hw_ddb_entry->end);
7290 }
7291 }
7292
7293 kfree(hw);
7294 }
7295
7296 void intel_enable_ipc(struct drm_i915_private *dev_priv)
7297 {
7298 u32 val;
7299
7300 if (!HAS_IPC(dev_priv))
7301 return;
7302
7303 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
7304
7305 if (dev_priv->ipc_enabled)
7306 val |= DISP_IPC_ENABLE;
7307 else
7308 val &= ~DISP_IPC_ENABLE;
7309
7310 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
7311 }
7312
7313 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
7314 {
7315
7316 if (IS_SKYLAKE(dev_priv))
7317 return false;
7318
7319
7320 if (IS_KABYLAKE(dev_priv) ||
7321 IS_COFFEELAKE(dev_priv) ||
7322 IS_COMETLAKE(dev_priv))
7323 return dev_priv->dram_info.symmetric_memory;
7324
7325 return true;
7326 }
7327
7328 void intel_init_ipc(struct drm_i915_private *dev_priv)
7329 {
7330 if (!HAS_IPC(dev_priv))
7331 return;
7332
7333 dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
7334
7335 intel_enable_ipc(dev_priv);
7336 }
7337
7338 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
7339 {
7340
7341
7342
7343
7344
7345 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7346 }
7347
7348 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
7349 {
7350 enum pipe pipe;
7351
7352 for_each_pipe(dev_priv, pipe) {
7353 intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
7354 intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
7355 DISP_TRICKLE_FEED_DISABLE);
7356
7357 intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
7358 intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
7359 }
7360 }
7361
7362 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
7363 {
7364 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7365
7366
7367
7368
7369
7370 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
7371 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
7372 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
7373
7374 intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
7375 MARIUNIT_CLOCK_GATE_DISABLE |
7376 SVSMUNIT_CLOCK_GATE_DISABLE);
7377 intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
7378 VFMUNIT_CLOCK_GATE_DISABLE);
7379
7380
7381
7382
7383
7384
7385
7386
7387 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7388 (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7389 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7390 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
7391 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
7392 (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7393 DISP_FBC_WM_DIS));
7394
7395
7396
7397
7398
7399
7400
7401
7402 if (IS_IRONLAKE_M(dev_priv)) {
7403
7404 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7405 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7406 ILK_FBCQ_DIS);
7407 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7408 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7409 ILK_DPARB_GATE);
7410 }
7411
7412 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
7413
7414 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7415 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7416 ILK_ELPIN_409_SELECT);
7417
7418 g4x_disable_trickle_feed(dev_priv);
7419
7420 ibx_init_clock_gating(dev_priv);
7421 }
7422
7423 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
7424 {
7425 enum pipe pipe;
7426 u32 val;
7427
7428
7429
7430
7431
7432
7433 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
7434 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
7435 PCH_CPUNIT_CLOCK_GATE_DISABLE);
7436 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
7437 DPLS_EDP_PPS_FIX_DIS);
7438
7439
7440
7441 for_each_pipe(dev_priv, pipe) {
7442 val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
7443 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
7444 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7445 if (dev_priv->vbt.fdi_rx_polarity_inverted)
7446 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7447 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
7448 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
7449 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
7450 }
7451
7452 for_each_pipe(dev_priv, pipe) {
7453 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
7454 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7455 }
7456 }
7457
7458 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
7459 {
7460 u32 tmp;
7461
7462 tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
7463 if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12)
7464 drm_dbg_kms(&dev_priv->drm,
7465 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7466 tmp);
7467 }
7468
7469 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
7470 {
7471 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7472
7473 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
7474
7475 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7476 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7477 ILK_ELPIN_409_SELECT);
7478
7479 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
7480 intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7481 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7482 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7483
7484
7485
7486
7487
7488
7489
7490
7491
7492
7493
7494
7495
7496
7497 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7498 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7499 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7500
7501
7502
7503
7504
7505
7506
7507
7508
7509
7510
7511
7512 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7513 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7514 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7515 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7516 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7517 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7518 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
7519 intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
7520 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7521 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7522
7523 g4x_disable_trickle_feed(dev_priv);
7524
7525 cpt_init_clock_gating(dev_priv);
7526
7527 gen6_check_mch_setup(dev_priv);
7528 }
7529
7530 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
7531 {
7532
7533
7534
7535
7536 if (HAS_PCH_LPT_LP(dev_priv))
7537 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
7538 intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
7539 PCH_LP_PARTITION_LEVEL_DISABLE);
7540
7541
7542 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
7543 intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
7544 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7545 }
7546
7547 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
7548 {
7549 if (HAS_PCH_LPT_LP(dev_priv)) {
7550 u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
7551
7552 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7553 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
7554 }
7555 }
7556
7557 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7558 int general_prio_credits,
7559 int high_prio_credits)
7560 {
7561 u32 misccpctl;
7562 u32 val;
7563
7564
7565 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
7566 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7567
7568 val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
7569 val &= ~L3_PRIO_CREDITS_MASK;
7570 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
7571 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
7572 intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
7573
7574
7575
7576
7577
7578 intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
7579 udelay(1);
7580 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
7581 }
7582
7583 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
7584 {
7585
7586 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
7587 DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7588
7589
7590 intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
7591 0, ICL_DELAY_PMRSP);
7592 }
7593
7594 static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
7595 {
7596
7597 if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
7598 IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
7599 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
7600 DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7601
7602
7603 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
7604 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
7605 TGL_VRH_GATING_DIS);
7606
7607
7608 if (DISPLAY_VER(dev_priv) == 12)
7609 intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
7610 CLKREQ_POLICY_MEM_UP_OVRD, 0);
7611 }
7612
7613 static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
7614 {
7615 gen12lp_init_clock_gating(dev_priv);
7616
7617
7618 intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
7619
7620
7621 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
7622 }
7623
7624 static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
7625 {
7626 gen12lp_init_clock_gating(dev_priv);
7627
7628
7629 if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
7630 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
7631 DPT_GATING_DIS);
7632 }
7633
7634 static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
7635 {
7636
7637 if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
7638 intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
7639 }
7640
7641 static void dg2_init_clock_gating(struct drm_i915_private *i915)
7642 {
7643
7644 intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
7645 SGSI_SIDECLK_DIS);
7646
7647
7648
7649
7650
7651 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
7652 intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
7653 SGR_DIS | SGGI_DIS);
7654 }
7655
7656 static void pvc_init_clock_gating(struct drm_i915_private *dev_priv)
7657 {
7658
7659 if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
7660 intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
7661
7662
7663 if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
7664 intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
7665 }
7666
7667 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
7668 {
7669 if (!HAS_PCH_CNP(dev_priv))
7670 return;
7671
7672
7673 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
7674 CNP_PWM_CGE_GATING_DISABLE);
7675 }
7676
7677 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
7678 {
7679 cnp_init_clock_gating(dev_priv);
7680 gen9_init_clock_gating(dev_priv);
7681
7682
7683 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7684 FBC_LLC_FULLY_OPEN);
7685
7686
7687
7688
7689
7690 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7691 DISP_FBC_WM_DIS);
7692
7693
7694
7695
7696
7697 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
7698 intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
7699 DPFC_NUKE_ON_ANY_MODIFICATION);
7700 }
7701
7702 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
7703 {
7704 gen9_init_clock_gating(dev_priv);
7705
7706
7707 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7708 FBC_LLC_FULLY_OPEN);
7709
7710
7711 if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
7712 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7713 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7714
7715
7716 if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
7717 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7718 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7719
7720
7721
7722
7723
7724 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7725 DISP_FBC_WM_DIS);
7726
7727
7728
7729
7730
7731 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
7732 intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
7733 DPFC_NUKE_ON_ANY_MODIFICATION);
7734 }
7735
7736 static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
7737 {
7738 gen9_init_clock_gating(dev_priv);
7739
7740
7741 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
7742 ~GEN7_DOP_CLOCK_GATE_ENABLE);
7743
7744
7745 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7746 FBC_LLC_FULLY_OPEN);
7747
7748
7749
7750
7751
7752 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7753 DISP_FBC_WM_DIS);
7754
7755
7756
7757
7758
7759 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
7760 intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
7761 DPFC_NUKE_ON_ANY_MODIFICATION);
7762
7763
7764
7765
7766
7767 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
7768 intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
7769 DPFC_DISABLE_DUMMY0);
7770 }
7771
7772 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
7773 {
7774 enum pipe pipe;
7775
7776
7777 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
7778 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
7779 HSW_FBCQ_DIS);
7780
7781
7782 intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7783
7784
7785 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
7786 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7787
7788 for_each_pipe(dev_priv, pipe) {
7789
7790 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
7791 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
7792 BDW_DPRS_MASK_VBLANK_SRD);
7793 }
7794
7795
7796
7797 intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
7798 intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
7799 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7800
7801 intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
7802 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7803
7804
7805 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7806 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7807
7808
7809 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7810
7811
7812 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
7813 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7814
7815 lpt_init_clock_gating(dev_priv);
7816
7817
7818
7819
7820
7821
7822 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
7823 intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
7824 }
7825
7826 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
7827 {
7828
7829 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
7830 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
7831 HSW_FBCQ_DIS);
7832
7833
7834 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7835 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7836 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7837
7838
7839 intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7840
7841 lpt_init_clock_gating(dev_priv);
7842 }
7843
7844 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
7845 {
7846 u32 snpcr;
7847
7848 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7849
7850
7851 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7852 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7853 ILK_FBCQ_DIS);
7854
7855
7856 intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
7857 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7858 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7859
7860 if (IS_IVB_GT1(dev_priv))
7861 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7862 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7863 else {
7864
7865 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7866 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7867 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
7868 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7869 }
7870
7871
7872
7873
7874
7875 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7876 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7877
7878
7879 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7880 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7881 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7882
7883 g4x_disable_trickle_feed(dev_priv);
7884
7885 snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
7886 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7887 snpcr |= GEN6_MBC_SNPCR_MED;
7888 intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
7889
7890 if (!HAS_PCH_NOP(dev_priv))
7891 cpt_init_clock_gating(dev_priv);
7892
7893 gen6_check_mch_setup(dev_priv);
7894 }
7895
7896 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
7897 {
7898
7899 intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
7900 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7901 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7902
7903
7904 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7905 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7906
7907
7908 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7909 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7910 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7911
7912
7913
7914
7915
7916 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7917 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7918
7919
7920
7921
7922 intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
7923 intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7924
7925
7926
7927
7928
7929
7930 intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7931 }
7932
7933 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
7934 {
7935
7936
7937 intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
7938 intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
7939 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7940
7941
7942 intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
7943 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7944
7945
7946 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7947 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7948
7949
7950 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7951 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7952
7953
7954
7955
7956
7957
7958 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7959 }
7960
7961 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7962 {
7963 u32 dspclk_gate;
7964
7965 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
7966 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7967 GS_UNIT_CLOCK_GATE_DISABLE |
7968 CL_UNIT_CLOCK_GATE_DISABLE);
7969 intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
7970 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7971 OVRUNIT_CLOCK_GATE_DISABLE |
7972 OVCUNIT_CLOCK_GATE_DISABLE;
7973 if (IS_GM45(dev_priv))
7974 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7975 intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
7976
7977 g4x_disable_trickle_feed(dev_priv);
7978 }
7979
7980 static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
7981 {
7982 struct intel_uncore *uncore = &dev_priv->uncore;
7983
7984 intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7985 intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
7986 intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
7987 intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
7988 intel_uncore_write16(uncore, DEUC, 0);
7989 intel_uncore_write(uncore,
7990 MI_ARB_STATE,
7991 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7992 }
7993
7994 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
7995 {
7996 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7997 I965_RCC_CLOCK_GATE_DISABLE |
7998 I965_RCPB_CLOCK_GATE_DISABLE |
7999 I965_ISC_CLOCK_GATE_DISABLE |
8000 I965_FBC_CLOCK_GATE_DISABLE);
8001 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
8002 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
8003 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8004 }
8005
8006 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
8007 {
8008 u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
8009
8010 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8011 DSTATE_DOT_CLOCK_GATING;
8012 intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
8013
8014 if (IS_PINEVIEW(dev_priv))
8015 intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
8016 _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
8017
8018
8019 intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
8020 _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
8021
8022
8023 intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
8024
8025
8026 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
8027
8028 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
8029 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8030 }
8031
8032 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
8033 {
8034 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8035
8036
8037 intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
8038 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
8039
8040 intel_uncore_write(&dev_priv->uncore, MEM_MODE,
8041 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
8042
8043
8044
8045
8046
8047
8048
8049
8050 intel_uncore_write(&dev_priv->uncore, SCPD0,
8051 _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
8052 }
8053
8054 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
8055 {
8056 intel_uncore_write(&dev_priv->uncore, MEM_MODE,
8057 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
8058 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
8059 }
8060
8061 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
8062 {
8063 dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
8064 }
8065
8066 void intel_suspend_hw(struct drm_i915_private *dev_priv)
8067 {
8068 if (HAS_PCH_LPT(dev_priv))
8069 lpt_suspend_hw(dev_priv);
8070 }
8071
8072 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
8073 {
8074 drm_dbg_kms(&dev_priv->drm,
8075 "No clock gating settings or workarounds applied.\n");
8076 }
8077
8078 #define CG_FUNCS(platform) \
8079 static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
8080 .init_clock_gating = platform##_init_clock_gating, \
8081 }
8082
8083 CG_FUNCS(pvc);
8084 CG_FUNCS(dg2);
8085 CG_FUNCS(xehpsdv);
8086 CG_FUNCS(adlp);
8087 CG_FUNCS(dg1);
8088 CG_FUNCS(gen12lp);
8089 CG_FUNCS(icl);
8090 CG_FUNCS(cfl);
8091 CG_FUNCS(skl);
8092 CG_FUNCS(kbl);
8093 CG_FUNCS(bxt);
8094 CG_FUNCS(glk);
8095 CG_FUNCS(bdw);
8096 CG_FUNCS(chv);
8097 CG_FUNCS(hsw);
8098 CG_FUNCS(ivb);
8099 CG_FUNCS(vlv);
8100 CG_FUNCS(gen6);
8101 CG_FUNCS(ilk);
8102 CG_FUNCS(g4x);
8103 CG_FUNCS(i965gm);
8104 CG_FUNCS(i965g);
8105 CG_FUNCS(gen3);
8106 CG_FUNCS(i85x);
8107 CG_FUNCS(i830);
8108 CG_FUNCS(nop);
8109 #undef CG_FUNCS
8110
8111
8112
8113
8114
8115
8116
8117
8118
8119
8120 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
8121 {
8122 if (IS_PONTEVECCHIO(dev_priv))
8123 dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
8124 else if (IS_DG2(dev_priv))
8125 dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
8126 else if (IS_XEHPSDV(dev_priv))
8127 dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
8128 else if (IS_ALDERLAKE_P(dev_priv))
8129 dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
8130 else if (IS_DG1(dev_priv))
8131 dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
8132 else if (GRAPHICS_VER(dev_priv) == 12)
8133 dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
8134 else if (GRAPHICS_VER(dev_priv) == 11)
8135 dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
8136 else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
8137 dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
8138 else if (IS_SKYLAKE(dev_priv))
8139 dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
8140 else if (IS_KABYLAKE(dev_priv))
8141 dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
8142 else if (IS_BROXTON(dev_priv))
8143 dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
8144 else if (IS_GEMINILAKE(dev_priv))
8145 dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
8146 else if (IS_BROADWELL(dev_priv))
8147 dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
8148 else if (IS_CHERRYVIEW(dev_priv))
8149 dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
8150 else if (IS_HASWELL(dev_priv))
8151 dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
8152 else if (IS_IVYBRIDGE(dev_priv))
8153 dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
8154 else if (IS_VALLEYVIEW(dev_priv))
8155 dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
8156 else if (GRAPHICS_VER(dev_priv) == 6)
8157 dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
8158 else if (GRAPHICS_VER(dev_priv) == 5)
8159 dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
8160 else if (IS_G4X(dev_priv))
8161 dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
8162 else if (IS_I965GM(dev_priv))
8163 dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
8164 else if (IS_I965G(dev_priv))
8165 dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
8166 else if (GRAPHICS_VER(dev_priv) == 3)
8167 dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
8168 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
8169 dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
8170 else if (GRAPHICS_VER(dev_priv) == 2)
8171 dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
8172 else {
8173 MISSING_CASE(INTEL_DEVID(dev_priv));
8174 dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
8175 }
8176 }
8177
8178 static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
8179 .compute_global_watermarks = skl_compute_wm,
8180 };
8181
8182 static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
8183 .compute_pipe_wm = ilk_compute_pipe_wm,
8184 .compute_intermediate_wm = ilk_compute_intermediate_wm,
8185 .initial_watermarks = ilk_initial_watermarks,
8186 .optimize_watermarks = ilk_optimize_watermarks,
8187 };
8188
8189 static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
8190 .compute_pipe_wm = vlv_compute_pipe_wm,
8191 .compute_intermediate_wm = vlv_compute_intermediate_wm,
8192 .initial_watermarks = vlv_initial_watermarks,
8193 .optimize_watermarks = vlv_optimize_watermarks,
8194 .atomic_update_watermarks = vlv_atomic_update_fifo,
8195 };
8196
8197 static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
8198 .compute_pipe_wm = g4x_compute_pipe_wm,
8199 .compute_intermediate_wm = g4x_compute_intermediate_wm,
8200 .initial_watermarks = g4x_initial_watermarks,
8201 .optimize_watermarks = g4x_optimize_watermarks,
8202 };
8203
8204 static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
8205 .update_wm = pnv_update_wm,
8206 };
8207
8208 static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
8209 .update_wm = i965_update_wm,
8210 };
8211
8212 static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
8213 .update_wm = i9xx_update_wm,
8214 };
8215
8216 static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
8217 .update_wm = i845_update_wm,
8218 };
8219
8220 static const struct drm_i915_wm_disp_funcs nop_funcs = {
8221 };
8222
8223
8224 void intel_init_pm(struct drm_i915_private *dev_priv)
8225 {
8226
8227 if (IS_PINEVIEW(dev_priv))
8228 pnv_get_mem_freq(dev_priv);
8229 else if (GRAPHICS_VER(dev_priv) == 5)
8230 ilk_get_mem_freq(dev_priv);
8231
8232 intel_sagv_init(dev_priv);
8233
8234
8235 if (DISPLAY_VER(dev_priv) >= 9) {
8236 skl_setup_wm_latency(dev_priv);
8237 dev_priv->wm_disp = &skl_wm_funcs;
8238 } else if (HAS_PCH_SPLIT(dev_priv)) {
8239 ilk_setup_wm_latency(dev_priv);
8240
8241 if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
8242 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
8243 (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
8244 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
8245 dev_priv->wm_disp = &ilk_wm_funcs;
8246 } else {
8247 drm_dbg_kms(&dev_priv->drm,
8248 "Failed to read display plane latency. "
8249 "Disable CxSR\n");
8250 dev_priv->wm_disp = &nop_funcs;
8251 }
8252 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8253 vlv_setup_wm_latency(dev_priv);
8254 dev_priv->wm_disp = &vlv_wm_funcs;
8255 } else if (IS_G4X(dev_priv)) {
8256 g4x_setup_wm_latency(dev_priv);
8257 dev_priv->wm_disp = &g4x_wm_funcs;
8258 } else if (IS_PINEVIEW(dev_priv)) {
8259 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
8260 dev_priv->is_ddr3,
8261 dev_priv->fsb_freq,
8262 dev_priv->mem_freq)) {
8263 drm_info(&dev_priv->drm,
8264 "failed to find known CxSR latency "
8265 "(found ddr%s fsb freq %d, mem freq %d), "
8266 "disabling CxSR\n",
8267 (dev_priv->is_ddr3 == 1) ? "3" : "2",
8268 dev_priv->fsb_freq, dev_priv->mem_freq);
8269
8270 intel_set_memory_cxsr(dev_priv, false);
8271 dev_priv->wm_disp = &nop_funcs;
8272 } else
8273 dev_priv->wm_disp = &pnv_wm_funcs;
8274 } else if (DISPLAY_VER(dev_priv) == 4) {
8275 dev_priv->wm_disp = &i965_wm_funcs;
8276 } else if (DISPLAY_VER(dev_priv) == 3) {
8277 dev_priv->wm_disp = &i9xx_wm_funcs;
8278 } else if (DISPLAY_VER(dev_priv) == 2) {
8279 if (INTEL_NUM_PIPES(dev_priv) == 1)
8280 dev_priv->wm_disp = &i845_wm_funcs;
8281 else
8282 dev_priv->wm_disp = &i9xx_wm_funcs;
8283 } else {
8284 drm_err(&dev_priv->drm,
8285 "unexpected fall-through in %s\n", __func__);
8286 dev_priv->wm_disp = &nop_funcs;
8287 }
8288 }
8289
8290 void intel_pm_setup(struct drm_i915_private *dev_priv)
8291 {
8292 dev_priv->runtime_pm.suspended = false;
8293 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
8294 }
8295
8296 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
8297 {
8298 struct intel_dbuf_state *dbuf_state;
8299
8300 dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
8301 if (!dbuf_state)
8302 return NULL;
8303
8304 return &dbuf_state->base;
8305 }
8306
8307 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
8308 struct intel_global_state *state)
8309 {
8310 kfree(state);
8311 }
8312
8313 static const struct intel_global_state_funcs intel_dbuf_funcs = {
8314 .atomic_duplicate_state = intel_dbuf_duplicate_state,
8315 .atomic_destroy_state = intel_dbuf_destroy_state,
8316 };
8317
8318 struct intel_dbuf_state *
8319 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
8320 {
8321 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8322 struct intel_global_state *dbuf_state;
8323
8324 dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
8325 if (IS_ERR(dbuf_state))
8326 return ERR_CAST(dbuf_state);
8327
8328 return to_intel_dbuf_state(dbuf_state);
8329 }
8330
8331 int intel_dbuf_init(struct drm_i915_private *dev_priv)
8332 {
8333 struct intel_dbuf_state *dbuf_state;
8334
8335 dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
8336 if (!dbuf_state)
8337 return -ENOMEM;
8338
8339 intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
8340 &dbuf_state->base, &intel_dbuf_funcs);
8341
8342 return 0;
8343 }
8344
8345
8346
8347
8348
8349 static void update_mbus_pre_enable(struct intel_atomic_state *state)
8350 {
8351 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8352 u32 mbus_ctl, dbuf_min_tracker_val;
8353 enum dbuf_slice slice;
8354 const struct intel_dbuf_state *dbuf_state =
8355 intel_atomic_get_new_dbuf_state(state);
8356
8357 if (!HAS_MBUS_JOINING(dev_priv))
8358 return;
8359
8360
8361
8362
8363
8364 if (dbuf_state->joined_mbus) {
8365 mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
8366 MBUS_JOIN_PIPE_SELECT_NONE;
8367 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
8368 } else {
8369 mbus_ctl = MBUS_HASHING_MODE_2x2 |
8370 MBUS_JOIN_PIPE_SELECT_NONE;
8371 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
8372 }
8373
8374 intel_de_rmw(dev_priv, MBUS_CTL,
8375 MBUS_HASHING_MODE_MASK | MBUS_JOIN |
8376 MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
8377
8378 for_each_dbuf_slice(dev_priv, slice)
8379 intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
8380 DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
8381 dbuf_min_tracker_val);
8382 }
8383
8384 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
8385 {
8386 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8387 const struct intel_dbuf_state *new_dbuf_state =
8388 intel_atomic_get_new_dbuf_state(state);
8389 const struct intel_dbuf_state *old_dbuf_state =
8390 intel_atomic_get_old_dbuf_state(state);
8391
8392 if (!new_dbuf_state ||
8393 ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
8394 && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
8395 return;
8396
8397 WARN_ON(!new_dbuf_state->base.changed);
8398
8399 update_mbus_pre_enable(state);
8400 gen9_dbuf_slices_update(dev_priv,
8401 old_dbuf_state->enabled_slices |
8402 new_dbuf_state->enabled_slices);
8403 }
8404
8405 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
8406 {
8407 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8408 const struct intel_dbuf_state *new_dbuf_state =
8409 intel_atomic_get_new_dbuf_state(state);
8410 const struct intel_dbuf_state *old_dbuf_state =
8411 intel_atomic_get_old_dbuf_state(state);
8412
8413 if (!new_dbuf_state ||
8414 ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
8415 && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
8416 return;
8417
8418 WARN_ON(!new_dbuf_state->base.changed);
8419
8420 gen9_dbuf_slices_update(dev_priv,
8421 new_dbuf_state->enabled_slices);
8422 }
8423
8424 void intel_mbus_dbox_update(struct intel_atomic_state *state)
8425 {
8426 struct drm_i915_private *i915 = to_i915(state->base.dev);
8427 const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
8428 const struct intel_crtc_state *new_crtc_state;
8429 const struct intel_crtc *crtc;
8430 u32 val = 0;
8431 int i;
8432
8433 if (DISPLAY_VER(i915) < 11)
8434 return;
8435
8436 new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
8437 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
8438 if (!new_dbuf_state ||
8439 (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
8440 new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
8441 return;
8442
8443 if (DISPLAY_VER(i915) >= 12) {
8444 val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
8445 val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
8446 val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
8447 }
8448
8449
8450 if (IS_ALDERLAKE_P(i915))
8451 val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
8452 MBUS_DBOX_A_CREDIT(4);
8453 else
8454 val |= MBUS_DBOX_A_CREDIT(2);
8455
8456 if (IS_ALDERLAKE_P(i915)) {
8457 val |= MBUS_DBOX_BW_CREDIT(2);
8458 val |= MBUS_DBOX_B_CREDIT(8);
8459 } else if (DISPLAY_VER(i915) >= 12) {
8460 val |= MBUS_DBOX_BW_CREDIT(2);
8461 val |= MBUS_DBOX_B_CREDIT(12);
8462 } else {
8463 val |= MBUS_DBOX_BW_CREDIT(1);
8464 val |= MBUS_DBOX_B_CREDIT(8);
8465 }
8466
8467 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8468 if (!new_crtc_state->hw.active ||
8469 !intel_crtc_needs_modeset(new_crtc_state))
8470 continue;
8471
8472 intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val);
8473 }
8474 }