0001
0002
0003
0004
0005
0006 #include <linux/string_helpers.h>
0007 #include <linux/kernel.h>
0008
0009 #include <drm/drm_print.h>
0010
0011 #include "i915_drv.h"
0012 #include "i915_reg.h"
0013 #include "i915_trace.h"
0014 #include "i915_utils.h"
0015 #include "intel_pm.h"
0016 #include "vlv_suspend.h"
0017
0018 #include "gt/intel_gt_regs.h"
0019
0020 struct vlv_s0ix_state {
0021
0022 u32 wr_watermark;
0023 u32 gfx_prio_ctrl;
0024 u32 arb_mode;
0025 u32 gfx_pend_tlb0;
0026 u32 gfx_pend_tlb1;
0027 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
0028 u32 media_max_req_count;
0029 u32 gfx_max_req_count;
0030 u32 render_hwsp;
0031 u32 ecochk;
0032 u32 bsd_hwsp;
0033 u32 blt_hwsp;
0034 u32 tlb_rd_addr;
0035
0036
0037 u32 g3dctl;
0038 u32 gsckgctl;
0039 u32 mbctl;
0040
0041
0042 u32 ucgctl1;
0043 u32 ucgctl3;
0044 u32 rcgctl1;
0045 u32 rcgctl2;
0046 u32 rstctl;
0047 u32 misccpctl;
0048
0049
0050 u32 gfxpause;
0051 u32 rpdeuhwtc;
0052 u32 rpdeuc;
0053 u32 ecobus;
0054 u32 pwrdwnupctl;
0055 u32 rp_down_timeout;
0056 u32 rp_deucsw;
0057 u32 rcubmabdtmr;
0058 u32 rcedata;
0059 u32 spare2gh;
0060
0061
0062 u32 gt_imr;
0063 u32 gt_ier;
0064 u32 pm_imr;
0065 u32 pm_ier;
0066 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
0067
0068
0069 u32 tilectl;
0070 u32 gt_fifoctl;
0071 u32 gtlc_wake_ctrl;
0072 u32 gtlc_survive;
0073 u32 pmwgicz;
0074
0075
0076 u32 gu_ctl0;
0077 u32 gu_ctl1;
0078 u32 pcbr;
0079 u32 clock_gate_dis2;
0080 };
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
0109 {
0110 struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
0111 struct intel_uncore *uncore = &i915->uncore;
0112 int i;
0113
0114 if (!s)
0115 return;
0116
0117
0118 s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK);
0119 s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL);
0120 s->arb_mode = intel_uncore_read(uncore, ARB_MODE);
0121 s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0);
0122 s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1);
0123
0124 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
0125 s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i));
0126
0127 s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT);
0128 s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT);
0129
0130 s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7);
0131 s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
0132 s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7);
0133 s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7);
0134
0135 s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR);
0136
0137
0138 s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL);
0139 s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL);
0140 s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL);
0141
0142
0143 s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1);
0144 s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3);
0145 s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1);
0146 s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2);
0147 s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL);
0148 s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL);
0149
0150
0151 s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE);
0152 s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC);
0153 s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC);
0154 s->ecobus = intel_uncore_read(uncore, ECOBUS);
0155 s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL);
0156 s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT);
0157 s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW);
0158 s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR);
0159 s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA);
0160 s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H);
0161
0162
0163 s->gt_imr = intel_uncore_read(uncore, GTIMR);
0164 s->gt_ier = intel_uncore_read(uncore, GTIER);
0165 s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
0166 s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
0167
0168 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
0169 s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i));
0170
0171
0172 s->tilectl = intel_uncore_read(uncore, TILECTL);
0173 s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL);
0174 s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
0175 s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
0176 s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ);
0177
0178
0179 s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0);
0180 s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1);
0181 s->pcbr = intel_uncore_read(uncore, VLV_PCBR);
0182 s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2);
0183
0184
0185
0186
0187
0188
0189
0190
0191 }
0192
0193 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
0194 {
0195 struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
0196 struct intel_uncore *uncore = &i915->uncore;
0197 u32 val;
0198 int i;
0199
0200 if (!s)
0201 return;
0202
0203
0204 intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark);
0205 intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
0206 intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16));
0207 intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
0208 intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
0209
0210 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
0211 intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]);
0212
0213 intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
0214 intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
0215
0216 intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp);
0217 intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk);
0218 intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp);
0219 intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp);
0220
0221 intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
0222
0223
0224 intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl);
0225 intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl);
0226 intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl);
0227
0228
0229 intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1);
0230 intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3);
0231 intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1);
0232 intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2);
0233 intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl);
0234 intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl);
0235
0236
0237 intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause);
0238 intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc);
0239 intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc);
0240 intel_uncore_write(uncore, ECOBUS, s->ecobus);
0241 intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl);
0242 intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout);
0243 intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw);
0244 intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr);
0245 intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata);
0246 intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh);
0247
0248
0249 intel_uncore_write(uncore, GTIMR, s->gt_imr);
0250 intel_uncore_write(uncore, GTIER, s->gt_ier);
0251 intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr);
0252 intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier);
0253
0254 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
0255 intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
0256
0257
0258 intel_uncore_write(uncore, TILECTL, s->tilectl);
0259 intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl);
0260
0261
0262
0263
0264
0265 val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
0266 val &= VLV_GTLC_ALLOWWAKEREQ;
0267 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
0268 intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
0269
0270 val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
0271 val &= VLV_GFX_CLK_FORCE_ON_BIT;
0272 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
0273 intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
0274
0275 intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
0276
0277
0278 intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0);
0279 intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1);
0280 intel_uncore_write(uncore, VLV_PCBR, s->pcbr);
0281 intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
0282 }
0283
0284 static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
0285 u32 mask, u32 val)
0286 {
0287 i915_reg_t reg = VLV_GTLC_PW_STATUS;
0288 u32 reg_value;
0289 int ret;
0290
0291
0292
0293
0294
0295
0296
0297
0298 ret = wait_for(((reg_value =
0299 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
0300 == val, 3);
0301
0302
0303 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
0304
0305 return ret;
0306 }
0307
0308 static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
0309 {
0310 struct intel_uncore *uncore = &i915->uncore;
0311 u32 val;
0312 int err;
0313
0314 val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
0315 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
0316 if (force_on)
0317 val |= VLV_GFX_CLK_FORCE_ON_BIT;
0318 intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
0319
0320 if (!force_on)
0321 return 0;
0322
0323 err = intel_wait_for_register(uncore,
0324 VLV_GTLC_SURVIVABILITY_REG,
0325 VLV_GFX_CLK_STATUS_BIT,
0326 VLV_GFX_CLK_STATUS_BIT,
0327 20);
0328 if (err)
0329 drm_err(&i915->drm,
0330 "timeout waiting for GFX clock force-on (%08x)\n",
0331 intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG));
0332
0333 return err;
0334 }
0335
0336 static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
0337 {
0338 struct intel_uncore *uncore = &i915->uncore;
0339 u32 mask;
0340 u32 val;
0341 int err;
0342
0343 val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
0344 val &= ~VLV_GTLC_ALLOWWAKEREQ;
0345 if (allow)
0346 val |= VLV_GTLC_ALLOWWAKEREQ;
0347 intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
0348 intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
0349
0350 mask = VLV_GTLC_ALLOWWAKEACK;
0351 val = allow ? mask : 0;
0352
0353 err = vlv_wait_for_pw_status(i915, mask, val);
0354 if (err)
0355 drm_err(&i915->drm, "timeout disabling GT waking\n");
0356
0357 return err;
0358 }
0359
0360 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
0361 bool wait_for_on)
0362 {
0363 u32 mask;
0364 u32 val;
0365
0366 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
0367 val = wait_for_on ? mask : 0;
0368
0369
0370
0371
0372
0373
0374
0375
0376 if (vlv_wait_for_pw_status(dev_priv, mask, val))
0377 drm_dbg(&dev_priv->drm,
0378 "timeout waiting for GT wells to go %s\n",
0379 str_on_off(wait_for_on));
0380 }
0381
0382 static void vlv_check_no_gt_access(struct drm_i915_private *i915)
0383 {
0384 struct intel_uncore *uncore = &i915->uncore;
0385
0386 if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
0387 return;
0388
0389 drm_dbg(&i915->drm, "GT register access while GT waking disabled\n");
0390 intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
0391 }
0392
0393 int vlv_suspend_complete(struct drm_i915_private *dev_priv)
0394 {
0395 u32 mask;
0396 int err;
0397
0398 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
0399 return 0;
0400
0401
0402
0403
0404
0405 vlv_wait_for_gt_wells(dev_priv, false);
0406
0407 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
0408 drm_WARN_ON(&dev_priv->drm,
0409 (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask);
0410
0411 vlv_check_no_gt_access(dev_priv);
0412
0413 err = vlv_force_gfx_clock(dev_priv, true);
0414 if (err)
0415 goto err1;
0416
0417 err = vlv_allow_gt_wake(dev_priv, false);
0418 if (err)
0419 goto err2;
0420
0421 vlv_save_gunit_s0ix_state(dev_priv);
0422
0423 err = vlv_force_gfx_clock(dev_priv, false);
0424 if (err)
0425 goto err2;
0426
0427 return 0;
0428
0429 err2:
0430
0431 vlv_allow_gt_wake(dev_priv, true);
0432 err1:
0433 vlv_force_gfx_clock(dev_priv, false);
0434
0435 return err;
0436 }
0437
0438 int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume)
0439 {
0440 int err;
0441 int ret;
0442
0443 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
0444 return 0;
0445
0446
0447
0448
0449
0450
0451 ret = vlv_force_gfx_clock(dev_priv, true);
0452
0453 vlv_restore_gunit_s0ix_state(dev_priv);
0454
0455 err = vlv_allow_gt_wake(dev_priv, true);
0456 if (!ret)
0457 ret = err;
0458
0459 err = vlv_force_gfx_clock(dev_priv, false);
0460 if (!ret)
0461 ret = err;
0462
0463 vlv_check_no_gt_access(dev_priv);
0464
0465 if (rpm_resume)
0466 intel_init_clock_gating(dev_priv);
0467
0468 return ret;
0469 }
0470
0471 int vlv_suspend_init(struct drm_i915_private *i915)
0472 {
0473 if (!IS_VALLEYVIEW(i915))
0474 return 0;
0475
0476
0477 i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
0478 GFP_KERNEL);
0479 if (!i915->vlv_s0ix_state)
0480 return -ENOMEM;
0481
0482 return 0;
0483 }
0484
0485 void vlv_suspend_cleanup(struct drm_i915_private *i915)
0486 {
0487 if (!i915->vlv_s0ix_state)
0488 return;
0489
0490 kfree(i915->vlv_s0ix_state);
0491 i915->vlv_s0ix_state = NULL;
0492 }