0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #include "i915_drv.h"
0040 #include "i915_reg.h"
0041 #include "gvt.h"
0042 #include "i915_pvinfo.h"
0043 #include "intel_mchbar_regs.h"
0044 #include "display/intel_display_types.h"
0045 #include "display/intel_dmc_regs.h"
0046 #include "display/intel_fbc.h"
0047 #include "display/vlv_dsi_pll_regs.h"
0048 #include "gt/intel_gt_regs.h"
0049
0050
0051 #define PCH_PP_STATUS _MMIO(0xc7200)
0052 #define PCH_PP_CONTROL _MMIO(0xc7204)
0053 #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
0054 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
0055 #define PCH_PP_DIVISOR _MMIO(0xc7210)
0056
0057 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
0058 {
0059 struct drm_i915_private *i915 = gvt->gt->i915;
0060
0061 if (IS_BROADWELL(i915))
0062 return D_BDW;
0063 else if (IS_SKYLAKE(i915))
0064 return D_SKL;
0065 else if (IS_KABYLAKE(i915))
0066 return D_KBL;
0067 else if (IS_BROXTON(i915))
0068 return D_BXT;
0069 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
0070 return D_CFL;
0071
0072 return 0;
0073 }
0074
0075 static bool intel_gvt_match_device(struct intel_gvt *gvt,
0076 unsigned long device)
0077 {
0078 return intel_gvt_get_device_type(gvt) & device;
0079 }
0080
0081 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
0082 void *p_data, unsigned int bytes)
0083 {
0084 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
0085 }
0086
0087 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
0088 void *p_data, unsigned int bytes)
0089 {
0090 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
0091 }
0092
0093 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
0094 unsigned int offset)
0095 {
0096 struct intel_gvt_mmio_info *e;
0097
0098 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
0099 if (e->offset == offset)
0100 return e;
0101 }
0102 return NULL;
0103 }
0104
0105 static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
0106 u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
0107 gvt_mmio_func read, gvt_mmio_func write)
0108 {
0109 struct intel_gvt_mmio_info *p;
0110 u32 start, end, i;
0111
0112 if (!intel_gvt_match_device(gvt, device))
0113 return 0;
0114
0115 if (WARN_ON(!IS_ALIGNED(offset, 4)))
0116 return -EINVAL;
0117
0118 start = offset;
0119 end = offset + size;
0120
0121 for (i = start; i < end; i += 4) {
0122 p = intel_gvt_find_mmio_info(gvt, i);
0123 if (!p) {
0124 WARN(1, "assign a handler to a non-tracked mmio %x\n",
0125 i);
0126 return -ENODEV;
0127 }
0128 p->ro_mask = ro_mask;
0129 gvt->mmio.mmio_attribute[i / 4] = flags;
0130 if (read)
0131 p->read = read;
0132 if (write)
0133 p->write = write;
0134 }
0135 return 0;
0136 }
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146 const struct intel_engine_cs *
0147 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
0148 {
0149 struct intel_engine_cs *engine;
0150 enum intel_engine_id id;
0151
0152 offset &= ~GENMASK(11, 0);
0153 for_each_engine(engine, gvt->gt, id)
0154 if (engine->mmio_base == offset)
0155 return engine;
0156
0157 return NULL;
0158 }
0159
0160 #define offset_to_fence_num(offset) \
0161 ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
0162
0163 #define fence_num_to_offset(num) \
0164 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
0165
0166
0167 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
0168 {
0169 switch (reason) {
0170 case GVT_FAILSAFE_UNSUPPORTED_GUEST:
0171 pr_err("Detected your guest driver doesn't support GVT-g.\n");
0172 break;
0173 case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
0174 pr_err("Graphics resource is not enough for the guest\n");
0175 break;
0176 case GVT_FAILSAFE_GUEST_ERR:
0177 pr_err("GVT Internal error for the guest\n");
0178 break;
0179 default:
0180 break;
0181 }
0182 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
0183 vgpu->failsafe = true;
0184 }
0185
0186 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
0187 unsigned int fence_num, void *p_data, unsigned int bytes)
0188 {
0189 unsigned int max_fence = vgpu_fence_sz(vgpu);
0190
0191 if (fence_num >= max_fence) {
0192 gvt_vgpu_err("access oob fence reg %d/%d\n",
0193 fence_num, max_fence);
0194
0195
0196
0197
0198
0199 if (!vgpu->pv_notified)
0200 enter_failsafe_mode(vgpu,
0201 GVT_FAILSAFE_UNSUPPORTED_GUEST);
0202
0203 memset(p_data, 0, bytes);
0204 return -EINVAL;
0205 }
0206 return 0;
0207 }
0208
0209 static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
0210 unsigned int offset, void *p_data, unsigned int bytes)
0211 {
0212 u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
0213
0214 if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
0215 if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
0216 gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
0217 else if (!ips)
0218 gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
0219 else {
0220
0221
0222
0223
0224 gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
0225 ips);
0226 return -EINVAL;
0227 }
0228 }
0229
0230 write_vreg(vgpu, offset, p_data, bytes);
0231 return 0;
0232 }
0233
0234 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
0235 void *p_data, unsigned int bytes)
0236 {
0237 int ret;
0238
0239 ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
0240 p_data, bytes);
0241 if (ret)
0242 return ret;
0243 read_vreg(vgpu, off, p_data, bytes);
0244 return 0;
0245 }
0246
0247 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
0248 void *p_data, unsigned int bytes)
0249 {
0250 struct intel_gvt *gvt = vgpu->gvt;
0251 unsigned int fence_num = offset_to_fence_num(off);
0252 int ret;
0253
0254 ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
0255 if (ret)
0256 return ret;
0257 write_vreg(vgpu, off, p_data, bytes);
0258
0259 mmio_hw_access_pre(gvt->gt);
0260 intel_vgpu_write_fence(vgpu, fence_num,
0261 vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
0262 mmio_hw_access_post(gvt->gt);
0263 return 0;
0264 }
0265
0266 #define CALC_MODE_MASK_REG(old, new) \
0267 (((new) & GENMASK(31, 16)) \
0268 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
0269 | ((new) & ((new) >> 16))))
0270
0271 static int mul_force_wake_write(struct intel_vgpu *vgpu,
0272 unsigned int offset, void *p_data, unsigned int bytes)
0273 {
0274 u32 old, new;
0275 u32 ack_reg_offset;
0276
0277 old = vgpu_vreg(vgpu, offset);
0278 new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
0279
0280 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) {
0281 switch (offset) {
0282 case FORCEWAKE_RENDER_GEN9_REG:
0283 ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
0284 break;
0285 case FORCEWAKE_GT_GEN9_REG:
0286 ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
0287 break;
0288 case FORCEWAKE_MEDIA_GEN9_REG:
0289 ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
0290 break;
0291 default:
0292
0293 gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
0294 return -EINVAL;
0295 }
0296 } else {
0297 ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
0298 }
0299
0300 vgpu_vreg(vgpu, offset) = new;
0301 vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
0302 return 0;
0303 }
0304
0305 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
0306 void *p_data, unsigned int bytes)
0307 {
0308 intel_engine_mask_t engine_mask = 0;
0309 u32 data;
0310
0311 write_vreg(vgpu, offset, p_data, bytes);
0312 data = vgpu_vreg(vgpu, offset);
0313
0314 if (data & GEN6_GRDOM_FULL) {
0315 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
0316 engine_mask = ALL_ENGINES;
0317 } else {
0318 if (data & GEN6_GRDOM_RENDER) {
0319 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
0320 engine_mask |= BIT(RCS0);
0321 }
0322 if (data & GEN6_GRDOM_MEDIA) {
0323 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
0324 engine_mask |= BIT(VCS0);
0325 }
0326 if (data & GEN6_GRDOM_BLT) {
0327 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
0328 engine_mask |= BIT(BCS0);
0329 }
0330 if (data & GEN6_GRDOM_VECS) {
0331 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
0332 engine_mask |= BIT(VECS0);
0333 }
0334 if (data & GEN8_GRDOM_MEDIA2) {
0335 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
0336 engine_mask |= BIT(VCS1);
0337 }
0338 if (data & GEN9_GRDOM_GUC) {
0339 gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
0340 vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
0341 }
0342 engine_mask &= vgpu->gvt->gt->info.engine_mask;
0343 }
0344
0345
0346 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
0347
0348
0349 vgpu_vreg(vgpu, offset) = 0;
0350
0351 return 0;
0352 }
0353
0354 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
0355 void *p_data, unsigned int bytes)
0356 {
0357 return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
0358 }
0359
0360 static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
0361 void *p_data, unsigned int bytes)
0362 {
0363 return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
0364 }
0365
0366 static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
0367 unsigned int offset, void *p_data, unsigned int bytes)
0368 {
0369 write_vreg(vgpu, offset, p_data, bytes);
0370
0371 if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
0372 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
0373 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
0374 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
0375 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
0376
0377 } else
0378 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
0379 ~(PP_ON | PP_SEQUENCE_POWER_DOWN
0380 | PP_CYCLE_DELAY_ACTIVE);
0381 return 0;
0382 }
0383
0384 static int transconf_mmio_write(struct intel_vgpu *vgpu,
0385 unsigned int offset, void *p_data, unsigned int bytes)
0386 {
0387 write_vreg(vgpu, offset, p_data, bytes);
0388
0389 if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
0390 vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
0391 else
0392 vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
0393 return 0;
0394 }
0395
0396 static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
0397 void *p_data, unsigned int bytes)
0398 {
0399 write_vreg(vgpu, offset, p_data, bytes);
0400
0401 if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
0402 vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
0403 else
0404 vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
0405
0406 if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
0407 vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
0408 else
0409 vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
0410
0411 return 0;
0412 }
0413
0414 static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
0415 void *p_data, unsigned int bytes)
0416 {
0417 switch (offset) {
0418 case 0xe651c:
0419 case 0xe661c:
0420 case 0xe671c:
0421 case 0xe681c:
0422 vgpu_vreg(vgpu, offset) = 1 << 17;
0423 break;
0424 case 0xe6c04:
0425 vgpu_vreg(vgpu, offset) = 0x3;
0426 break;
0427 case 0xe6e1c:
0428 vgpu_vreg(vgpu, offset) = 0x2f << 16;
0429 break;
0430 default:
0431 return -EINVAL;
0432 }
0433
0434 read_vreg(vgpu, offset, p_data, bytes);
0435 return 0;
0436 }
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454 static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
0455 {
0456 u32 dp_br = 0;
0457 u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
0458
0459 switch (ddi_pll_sel) {
0460 case PORT_CLK_SEL_LCPLL_2700:
0461 dp_br = 270000 * 2;
0462 break;
0463 case PORT_CLK_SEL_LCPLL_1350:
0464 dp_br = 135000 * 2;
0465 break;
0466 case PORT_CLK_SEL_LCPLL_810:
0467 dp_br = 81000 * 2;
0468 break;
0469 case PORT_CLK_SEL_SPLL:
0470 {
0471 switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
0472 case SPLL_FREQ_810MHz:
0473 dp_br = 81000 * 2;
0474 break;
0475 case SPLL_FREQ_1350MHz:
0476 dp_br = 135000 * 2;
0477 break;
0478 case SPLL_FREQ_2700MHz:
0479 dp_br = 270000 * 2;
0480 break;
0481 default:
0482 gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
0483 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
0484 break;
0485 }
0486 break;
0487 }
0488 case PORT_CLK_SEL_WRPLL1:
0489 case PORT_CLK_SEL_WRPLL2:
0490 {
0491 u32 wrpll_ctl;
0492 int refclk, n, p, r;
0493
0494 if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
0495 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
0496 else
0497 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
0498
0499 switch (wrpll_ctl & WRPLL_REF_MASK) {
0500 case WRPLL_REF_PCH_SSC:
0501 refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
0502 break;
0503 case WRPLL_REF_LCPLL:
0504 refclk = 2700000;
0505 break;
0506 default:
0507 gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
0508 vgpu->id, port_name(port), wrpll_ctl);
0509 goto out;
0510 }
0511
0512 r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
0513 p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
0514 n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
0515
0516 dp_br = (refclk * n / 10) / (p * r) * 2;
0517 break;
0518 }
0519 default:
0520 gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
0521 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
0522 break;
0523 }
0524
0525 out:
0526 return dp_br;
0527 }
0528
0529 static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
0530 {
0531 u32 dp_br = 0;
0532 int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
0533 enum dpio_phy phy = DPIO_PHY0;
0534 enum dpio_channel ch = DPIO_CH0;
0535 struct dpll clock = {0};
0536 u32 temp;
0537
0538
0539 switch (port) {
0540 case PORT_A:
0541 phy = DPIO_PHY1;
0542 ch = DPIO_CH0;
0543 break;
0544 case PORT_B:
0545 phy = DPIO_PHY0;
0546 ch = DPIO_CH0;
0547 break;
0548 case PORT_C:
0549 phy = DPIO_PHY0;
0550 ch = DPIO_CH1;
0551 break;
0552 default:
0553 gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
0554 goto out;
0555 }
0556
0557 temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
0558 if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
0559 gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
0560 vgpu->id, port_name(port), temp);
0561 goto out;
0562 }
0563
0564 clock.m1 = 2;
0565 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
0566 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
0567 if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
0568 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
0569 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
0570 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
0571 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
0572 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
0573 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
0574 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
0575 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
0576 clock.m = clock.m1 * clock.m2;
0577 clock.p = clock.p1 * clock.p2 * 5;
0578
0579 if (clock.n == 0 || clock.p == 0) {
0580 gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
0581 goto out;
0582 }
0583
0584 clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
0585 clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
0586
0587 dp_br = clock.dot;
0588
0589 out:
0590 return dp_br;
0591 }
0592
0593 static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
0594 {
0595 u32 dp_br = 0;
0596 enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
0597
0598
0599 if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
0600 (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
0601 dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
0602 DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
0603 DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
0604 } else {
0605 gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
0606 vgpu->id, port_name(port));
0607 return dp_br;
0608 }
0609
0610
0611 switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
0612 DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
0613 DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
0614 case DPLL_CTRL1_LINK_RATE_810:
0615 dp_br = 81000 * 2;
0616 break;
0617 case DPLL_CTRL1_LINK_RATE_1080:
0618 dp_br = 108000 * 2;
0619 break;
0620 case DPLL_CTRL1_LINK_RATE_1350:
0621 dp_br = 135000 * 2;
0622 break;
0623 case DPLL_CTRL1_LINK_RATE_1620:
0624 dp_br = 162000 * 2;
0625 break;
0626 case DPLL_CTRL1_LINK_RATE_2160:
0627 dp_br = 216000 * 2;
0628 break;
0629 case DPLL_CTRL1_LINK_RATE_2700:
0630 dp_br = 270000 * 2;
0631 break;
0632 default:
0633 dp_br = 0;
0634 gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
0635 vgpu->id, port_name(port), dpll_id);
0636 }
0637
0638 return dp_br;
0639 }
0640
0641 static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
0642 {
0643 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
0644 enum port port;
0645 u32 dp_br, link_m, link_n, htotal, vtotal;
0646
0647
0648 port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
0649 TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
0650 if (port != PORT_B && port != PORT_D) {
0651 gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
0652 return;
0653 }
0654
0655
0656 if (IS_BROADWELL(dev_priv))
0657 dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
0658 else if (IS_BROXTON(dev_priv))
0659 dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
0660 else
0661 dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
0662
0663
0664 link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
0665 link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
0666
0667
0668 htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
0669 vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
0670
0671 if (dp_br && link_n && htotal && vtotal) {
0672 u64 pixel_clk = 0;
0673 u32 new_rate = 0;
0674 u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
0675
0676
0677 pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
0678 pixel_clk *= MSEC_PER_SEC;
0679
0680
0681 new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
0682
0683 if (*old_rate != new_rate)
0684 *old_rate = new_rate;
0685
0686 gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
0687 vgpu->id, pipe_name(PIPE_A), new_rate);
0688 }
0689 }
0690
0691 static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
0692 void *p_data, unsigned int bytes)
0693 {
0694 u32 data;
0695
0696 write_vreg(vgpu, offset, p_data, bytes);
0697 data = vgpu_vreg(vgpu, offset);
0698
0699 if (data & PIPECONF_ENABLE) {
0700 vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
0701 vgpu_update_refresh_rate(vgpu);
0702 vgpu_update_vblank_emulation(vgpu, true);
0703 } else {
0704 vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
0705 vgpu_update_vblank_emulation(vgpu, false);
0706 }
0707 return 0;
0708 }
0709
0710
0711 static i915_reg_t force_nonpriv_white_list[] = {
0712 _MMIO(0xd80),
0713 GEN9_CS_DEBUG_MODE1,
0714 GEN9_CTX_PREEMPT_REG,
0715 CL_PRIMITIVES_COUNT,
0716 PS_INVOCATION_COUNT,
0717 PS_DEPTH_COUNT,
0718 GEN8_CS_CHICKEN1,
0719 _MMIO(0x2690),
0720 _MMIO(0x2694),
0721 _MMIO(0x2698),
0722 _MMIO(0x2754),
0723 _MMIO(0x28a0),
0724 _MMIO(0x4de0),
0725 _MMIO(0x4de4),
0726 _MMIO(0x4dfc),
0727 GEN7_COMMON_SLICE_CHICKEN1,
0728 _MMIO(0x7014),
0729 HDC_CHICKEN0,
0730 GEN8_HDC_CHICKEN1,
0731 _MMIO(0x7700),
0732 _MMIO(0x7704),
0733 _MMIO(0x7708),
0734 _MMIO(0x770c),
0735 _MMIO(0x83a8),
0736 _MMIO(0xb110),
0737 GEN8_L3SQCREG4,
0738 _MMIO(0xe100),
0739 _MMIO(0xe18c),
0740 _MMIO(0xe48c),
0741 _MMIO(0xe5f4),
0742 _MMIO(0x64844),
0743 };
0744
0745
0746 static inline bool in_whitelist(u32 reg)
0747 {
0748 int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
0749 i915_reg_t *array = force_nonpriv_white_list;
0750
0751 while (left < right) {
0752 int mid = (left + right)/2;
0753
0754 if (reg > array[mid].reg)
0755 left = mid + 1;
0756 else if (reg < array[mid].reg)
0757 right = mid;
0758 else
0759 return true;
0760 }
0761 return false;
0762 }
0763
0764 static int force_nonpriv_write(struct intel_vgpu *vgpu,
0765 unsigned int offset, void *p_data, unsigned int bytes)
0766 {
0767 u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
0768 const struct intel_engine_cs *engine =
0769 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
0770
0771 if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
0772 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
0773 vgpu->id, offset, bytes);
0774 return -EINVAL;
0775 }
0776
0777 if (!in_whitelist(reg_nonpriv) &&
0778 reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
0779 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
0780 vgpu->id, reg_nonpriv, offset);
0781 } else
0782 intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
0783
0784 return 0;
0785 }
0786
0787 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
0788 void *p_data, unsigned int bytes)
0789 {
0790 write_vreg(vgpu, offset, p_data, bytes);
0791
0792 if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
0793 vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
0794 } else {
0795 vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
0796 if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
0797 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
0798 &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
0799 }
0800 return 0;
0801 }
0802
0803 static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
0804 unsigned int offset, void *p_data, unsigned int bytes)
0805 {
0806 vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
0807 return 0;
0808 }
0809
0810 #define FDI_LINK_TRAIN_PATTERN1 0
0811 #define FDI_LINK_TRAIN_PATTERN2 1
0812
0813 static int fdi_auto_training_started(struct intel_vgpu *vgpu)
0814 {
0815 u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
0816 u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
0817 u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
0818
0819 if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
0820 (rx_ctl & FDI_RX_ENABLE) &&
0821 (rx_ctl & FDI_AUTO_TRAINING) &&
0822 (tx_ctl & DP_TP_CTL_ENABLE) &&
0823 (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
0824 return 1;
0825 else
0826 return 0;
0827 }
0828
0829 static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
0830 enum pipe pipe, unsigned int train_pattern)
0831 {
0832 i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
0833 unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
0834 unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
0835 unsigned int fdi_iir_check_bits;
0836
0837 fdi_rx_imr = FDI_RX_IMR(pipe);
0838 fdi_tx_ctl = FDI_TX_CTL(pipe);
0839 fdi_rx_ctl = FDI_RX_CTL(pipe);
0840
0841 if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
0842 fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
0843 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
0844 fdi_iir_check_bits = FDI_RX_BIT_LOCK;
0845 } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
0846 fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
0847 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
0848 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
0849 } else {
0850 gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
0851 return -EINVAL;
0852 }
0853
0854 fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
0855 fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
0856
0857
0858 if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
0859 return 0;
0860
0861 if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
0862 == fdi_tx_check_bits)
0863 && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
0864 == fdi_rx_check_bits))
0865 return 1;
0866 else
0867 return 0;
0868 }
0869
0870 #define INVALID_INDEX (~0U)
0871
0872 static unsigned int calc_index(unsigned int offset, unsigned int start,
0873 unsigned int next, unsigned int end, i915_reg_t i915_end)
0874 {
0875 unsigned int range = next - start;
0876
0877 if (!end)
0878 end = i915_mmio_reg_offset(i915_end);
0879 if (offset < start || offset > end)
0880 return INVALID_INDEX;
0881 offset -= start;
0882 return offset / range;
0883 }
0884
0885 #define FDI_RX_CTL_TO_PIPE(offset) \
0886 calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
0887
0888 #define FDI_TX_CTL_TO_PIPE(offset) \
0889 calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
0890
0891 #define FDI_RX_IMR_TO_PIPE(offset) \
0892 calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
0893
0894 static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
0895 unsigned int offset, void *p_data, unsigned int bytes)
0896 {
0897 i915_reg_t fdi_rx_iir;
0898 unsigned int index;
0899 int ret;
0900
0901 if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
0902 index = FDI_RX_CTL_TO_PIPE(offset);
0903 else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
0904 index = FDI_TX_CTL_TO_PIPE(offset);
0905 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
0906 index = FDI_RX_IMR_TO_PIPE(offset);
0907 else {
0908 gvt_vgpu_err("Unsupported registers %x\n", offset);
0909 return -EINVAL;
0910 }
0911
0912 write_vreg(vgpu, offset, p_data, bytes);
0913
0914 fdi_rx_iir = FDI_RX_IIR(index);
0915
0916 ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
0917 if (ret < 0)
0918 return ret;
0919 if (ret)
0920 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
0921
0922 ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
0923 if (ret < 0)
0924 return ret;
0925 if (ret)
0926 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
0927
0928 if (offset == _FDI_RXA_CTL)
0929 if (fdi_auto_training_started(vgpu))
0930 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
0931 DP_TP_STATUS_AUTOTRAIN_DONE;
0932 return 0;
0933 }
0934
0935 #define DP_TP_CTL_TO_PORT(offset) \
0936 calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
0937
0938 static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
0939 void *p_data, unsigned int bytes)
0940 {
0941 i915_reg_t status_reg;
0942 unsigned int index;
0943 u32 data;
0944
0945 write_vreg(vgpu, offset, p_data, bytes);
0946
0947 index = DP_TP_CTL_TO_PORT(offset);
0948 data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
0949 if (data == 0x2) {
0950 status_reg = DP_TP_STATUS(index);
0951 vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
0952 }
0953 return 0;
0954 }
0955
0956 static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
0957 unsigned int offset, void *p_data, unsigned int bytes)
0958 {
0959 u32 reg_val;
0960 u32 sticky_mask;
0961
0962 reg_val = *((u32 *)p_data);
0963 sticky_mask = GENMASK(27, 26) | (1 << 24);
0964
0965 vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
0966 (vgpu_vreg(vgpu, offset) & sticky_mask);
0967 vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
0968 return 0;
0969 }
0970
0971 static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
0972 unsigned int offset, void *p_data, unsigned int bytes)
0973 {
0974 u32 data;
0975
0976 write_vreg(vgpu, offset, p_data, bytes);
0977 data = vgpu_vreg(vgpu, offset);
0978
0979 if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
0980 vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
0981 return 0;
0982 }
0983
0984 static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
0985 unsigned int offset, void *p_data, unsigned int bytes)
0986 {
0987 u32 data;
0988
0989 write_vreg(vgpu, offset, p_data, bytes);
0990 data = vgpu_vreg(vgpu, offset);
0991
0992 if (data & FDI_MPHY_IOSFSB_RESET_CTL)
0993 vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
0994 else
0995 vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
0996 return 0;
0997 }
0998
0999 #define DSPSURF_TO_PIPE(offset) \
1000 calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
1001
1002 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1003 void *p_data, unsigned int bytes)
1004 {
1005 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1006 u32 pipe = DSPSURF_TO_PIPE(offset);
1007 int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
1008
1009 write_vreg(vgpu, offset, p_data, bytes);
1010 vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1011
1012 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1013
1014 if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
1015 intel_vgpu_trigger_virtual_event(vgpu, event);
1016 else
1017 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1018
1019 return 0;
1020 }
1021
1022 #define SPRSURF_TO_PIPE(offset) \
1023 calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
1024
1025 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1026 void *p_data, unsigned int bytes)
1027 {
1028 u32 pipe = SPRSURF_TO_PIPE(offset);
1029 int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
1030
1031 write_vreg(vgpu, offset, p_data, bytes);
1032 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1033
1034 if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1035 intel_vgpu_trigger_virtual_event(vgpu, event);
1036 else
1037 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1038
1039 return 0;
1040 }
1041
1042 static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1043 unsigned int offset, void *p_data,
1044 unsigned int bytes)
1045 {
1046 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1047 enum pipe pipe = REG_50080_TO_PIPE(offset);
1048 enum plane_id plane = REG_50080_TO_PLANE(offset);
1049 int event = SKL_FLIP_EVENT(pipe, plane);
1050
1051 write_vreg(vgpu, offset, p_data, bytes);
1052 if (plane == PLANE_PRIMARY) {
1053 vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1054 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1055 } else {
1056 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1057 }
1058
1059 if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1060 intel_vgpu_trigger_virtual_event(vgpu, event);
1061 else
1062 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1063
1064 return 0;
1065 }
1066
1067 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1068 unsigned int reg)
1069 {
1070 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1071 enum intel_gvt_event_type event;
1072
1073 if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
1074 event = AUX_CHANNEL_A;
1075 else if (reg == _PCH_DPB_AUX_CH_CTL ||
1076 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
1077 event = AUX_CHANNEL_B;
1078 else if (reg == _PCH_DPC_AUX_CH_CTL ||
1079 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
1080 event = AUX_CHANNEL_C;
1081 else if (reg == _PCH_DPD_AUX_CH_CTL ||
1082 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
1083 event = AUX_CHANNEL_D;
1084 else {
1085 drm_WARN_ON(&dev_priv->drm, true);
1086 return -EINVAL;
1087 }
1088
1089 intel_vgpu_trigger_virtual_event(vgpu, event);
1090 return 0;
1091 }
1092
1093 static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1094 unsigned int reg, int len, bool data_valid)
1095 {
1096
1097 value |= DP_AUX_CH_CTL_DONE;
1098 value &= ~DP_AUX_CH_CTL_SEND_BUSY;
1099 value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
1100
1101 if (data_valid)
1102 value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
1103 else
1104 value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
1105
1106
1107 value &= ~(0xf << 20);
1108 value |= (len << 20);
1109 vgpu_vreg(vgpu, reg) = value;
1110
1111 if (value & DP_AUX_CH_CTL_INTERRUPT)
1112 return trigger_aux_channel_interrupt(vgpu, reg);
1113 return 0;
1114 }
1115
1116 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
1117 u8 t)
1118 {
1119 if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
1120
1121
1122 dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
1123
1124 dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
1125 } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1126 DPCD_TRAINING_PATTERN_2) {
1127
1128
1129 dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
1130 dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
1131
1132 dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
1133 dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
1134
1135 dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
1136 DPCD_INTERLANE_ALIGN_DONE;
1137 } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1138 DPCD_LINK_TRAINING_DISABLED) {
1139
1140
1141 dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
1142 }
1143 }
1144
1145 #define _REG_HSW_DP_AUX_CH_CTL(dp) \
1146 ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
1147
1148 #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
1149
1150 #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
1151
1152 #define dpy_is_valid_port(port) \
1153 (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
1154
1155 static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1156 unsigned int offset, void *p_data, unsigned int bytes)
1157 {
1158 struct intel_vgpu_display *display = &vgpu->display;
1159 int msg, addr, ctrl, op, len;
1160 int port_index = OFFSET_TO_DP_AUX_PORT(offset);
1161 struct intel_vgpu_dpcd_data *dpcd = NULL;
1162 struct intel_vgpu_port *port = NULL;
1163 u32 data;
1164
1165 if (!dpy_is_valid_port(port_index)) {
1166 gvt_vgpu_err("Unsupported DP port access!\n");
1167 return 0;
1168 }
1169
1170 write_vreg(vgpu, offset, p_data, bytes);
1171 data = vgpu_vreg(vgpu, offset);
1172
1173 if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
1174 && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
1175
1176 return 0;
1177 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1178 offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
1179
1180 return 0;
1181 }
1182
1183 if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
1184
1185 vgpu_vreg(vgpu, offset) = 0;
1186 return 0;
1187 }
1188
1189 port = &display->ports[port_index];
1190 dpcd = port->dpcd;
1191
1192
1193 msg = vgpu_vreg(vgpu, offset + 4);
1194 addr = (msg >> 8) & 0xffff;
1195 ctrl = (msg >> 24) & 0xff;
1196 len = msg & 0xff;
1197 op = ctrl >> 4;
1198
1199 if (op == GVT_AUX_NATIVE_WRITE) {
1200 int t;
1201 u8 buf[16];
1202
1203 if ((addr + len + 1) >= DPCD_SIZE) {
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1214 dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
1215 return 0;
1216 }
1217
1218
1219
1220
1221
1222
1223 if ((len + 1 + 4) > AUX_BURST_SIZE) {
1224 gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1225 return -EINVAL;
1226 }
1227
1228
1229 for (t = 0; t < 4; t++) {
1230 u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1231
1232 buf[t * 4] = (r >> 24) & 0xff;
1233 buf[t * 4 + 1] = (r >> 16) & 0xff;
1234 buf[t * 4 + 2] = (r >> 8) & 0xff;
1235 buf[t * 4 + 3] = r & 0xff;
1236 }
1237
1238
1239 if (dpcd && dpcd->data_valid) {
1240 for (t = 0; t <= len; t++) {
1241 int p = addr + t;
1242
1243 dpcd->data[p] = buf[t];
1244
1245 if (p == DPCD_TRAINING_PATTERN_SET)
1246 dp_aux_ch_ctl_link_training(dpcd,
1247 buf[t]);
1248 }
1249 }
1250
1251
1252 vgpu_vreg(vgpu, offset + 4) = 0;
1253 dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
1254 dpcd && dpcd->data_valid);
1255 return 0;
1256 }
1257
1258 if (op == GVT_AUX_NATIVE_READ) {
1259 int idx, i, ret = 0;
1260
1261 if ((addr + len + 1) >= DPCD_SIZE) {
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 vgpu_vreg(vgpu, offset + 4) = 0;
1272 vgpu_vreg(vgpu, offset + 8) = 0;
1273 vgpu_vreg(vgpu, offset + 12) = 0;
1274 vgpu_vreg(vgpu, offset + 16) = 0;
1275 vgpu_vreg(vgpu, offset + 20) = 0;
1276
1277 dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1278 true);
1279 return 0;
1280 }
1281
1282 for (idx = 1; idx <= 5; idx++) {
1283
1284 vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1285 }
1286
1287
1288
1289
1290 if ((len + 2) > AUX_BURST_SIZE) {
1291 gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1292 return -EINVAL;
1293 }
1294
1295
1296
1297 if (dpcd && dpcd->data_valid) {
1298 for (i = 1; i <= (len + 1); i++) {
1299 int t;
1300
1301 t = dpcd->data[addr + i - 1];
1302 t <<= (24 - 8 * (i % 4));
1303 ret |= t;
1304
1305 if ((i % 4 == 3) || (i == (len + 1))) {
1306 vgpu_vreg(vgpu, offset +
1307 (i / 4 + 1) * 4) = ret;
1308 ret = 0;
1309 }
1310 }
1311 }
1312 dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1313 dpcd && dpcd->data_valid);
1314 return 0;
1315 }
1316
1317
1318 intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
1319
1320 if (data & DP_AUX_CH_CTL_INTERRUPT)
1321 trigger_aux_channel_interrupt(vgpu, offset);
1322 return 0;
1323 }
1324
1325 static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1326 void *p_data, unsigned int bytes)
1327 {
1328 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
1329 write_vreg(vgpu, offset, p_data, bytes);
1330 return 0;
1331 }
1332
1333 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1334 void *p_data, unsigned int bytes)
1335 {
1336 bool vga_disable;
1337
1338 write_vreg(vgpu, offset, p_data, bytes);
1339 vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1340
1341 gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1342 vga_disable ? "Disable" : "Enable");
1343 return 0;
1344 }
1345
1346 static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1347 unsigned int sbi_offset)
1348 {
1349 struct intel_vgpu_display *display = &vgpu->display;
1350 int num = display->sbi.number;
1351 int i;
1352
1353 for (i = 0; i < num; ++i)
1354 if (display->sbi.registers[i].offset == sbi_offset)
1355 break;
1356
1357 if (i == num)
1358 return 0;
1359
1360 return display->sbi.registers[i].value;
1361 }
1362
1363 static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1364 unsigned int offset, u32 value)
1365 {
1366 struct intel_vgpu_display *display = &vgpu->display;
1367 int num = display->sbi.number;
1368 int i;
1369
1370 for (i = 0; i < num; ++i) {
1371 if (display->sbi.registers[i].offset == offset)
1372 break;
1373 }
1374
1375 if (i == num) {
1376 if (num == SBI_REG_MAX) {
1377 gvt_vgpu_err("SBI caching meets maximum limits\n");
1378 return;
1379 }
1380 display->sbi.number++;
1381 }
1382
1383 display->sbi.registers[i].offset = offset;
1384 display->sbi.registers[i].value = value;
1385 }
1386
1387 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1388 void *p_data, unsigned int bytes)
1389 {
1390 if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1391 SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
1392 unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1393 SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1394 vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
1395 sbi_offset);
1396 }
1397 read_vreg(vgpu, offset, p_data, bytes);
1398 return 0;
1399 }
1400
1401 static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1402 void *p_data, unsigned int bytes)
1403 {
1404 u32 data;
1405
1406 write_vreg(vgpu, offset, p_data, bytes);
1407 data = vgpu_vreg(vgpu, offset);
1408
1409 data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
1410 data |= SBI_READY;
1411
1412 data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
1413 data |= SBI_RESPONSE_SUCCESS;
1414
1415 vgpu_vreg(vgpu, offset) = data;
1416
1417 if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1418 SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
1419 unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1420 SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1421
1422 write_virtual_sbi_register(vgpu, sbi_offset,
1423 vgpu_vreg_t(vgpu, SBI_DATA));
1424 }
1425 return 0;
1426 }
1427
1428 #define _vgtif_reg(x) \
1429 (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
1430
1431 static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1432 void *p_data, unsigned int bytes)
1433 {
1434 bool invalid_read = false;
1435
1436 read_vreg(vgpu, offset, p_data, bytes);
1437
1438 switch (offset) {
1439 case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
1440 if (offset + bytes > _vgtif_reg(vgt_id) + 4)
1441 invalid_read = true;
1442 break;
1443 case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
1444 _vgtif_reg(avail_rs.fence_num):
1445 if (offset + bytes >
1446 _vgtif_reg(avail_rs.fence_num) + 4)
1447 invalid_read = true;
1448 break;
1449 case 0x78010:
1450 case 0x7881c:
1451 break;
1452 default:
1453 invalid_read = true;
1454 break;
1455 }
1456 if (invalid_read)
1457 gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1458 offset, bytes, *(u32 *)p_data);
1459 vgpu->pv_notified = true;
1460 return 0;
1461 }
1462
1463 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1464 {
1465 enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1466 struct intel_vgpu_mm *mm;
1467 u64 *pdps;
1468
1469 pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1470
1471 switch (notification) {
1472 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1473 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1474 fallthrough;
1475 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1476 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1477 return PTR_ERR_OR_ZERO(mm);
1478 case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
1479 case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
1480 return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1481 case VGT_G2V_EXECLIST_CONTEXT_CREATE:
1482 case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
1483 case 1:
1484 break;
1485 default:
1486 gvt_vgpu_err("Invalid PV notification %d\n", notification);
1487 }
1488 return 0;
1489 }
1490
1491 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1492 {
1493 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1494 char *env[3] = {NULL, NULL, NULL};
1495 char vmid_str[20];
1496 char display_ready_str[20];
1497
1498 snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1499 env[0] = display_ready_str;
1500
1501 snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
1502 env[1] = vmid_str;
1503
1504 return kobject_uevent_env(kobj, KOBJ_ADD, env);
1505 }
1506
1507 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1508 void *p_data, unsigned int bytes)
1509 {
1510 u32 data = *(u32 *)p_data;
1511 bool invalid_write = false;
1512
1513 switch (offset) {
1514 case _vgtif_reg(display_ready):
1515 send_display_ready_uevent(vgpu, data ? 1 : 0);
1516 break;
1517 case _vgtif_reg(g2v_notify):
1518 handle_g2v_notification(vgpu, data);
1519 break;
1520
1521 case _vgtif_reg(cursor_x_hot):
1522 case _vgtif_reg(cursor_y_hot):
1523 case _vgtif_reg(pdp[0].lo):
1524 case _vgtif_reg(pdp[0].hi):
1525 case _vgtif_reg(pdp[1].lo):
1526 case _vgtif_reg(pdp[1].hi):
1527 case _vgtif_reg(pdp[2].lo):
1528 case _vgtif_reg(pdp[2].hi):
1529 case _vgtif_reg(pdp[3].lo):
1530 case _vgtif_reg(pdp[3].hi):
1531 case _vgtif_reg(execlist_context_descriptor_lo):
1532 case _vgtif_reg(execlist_context_descriptor_hi):
1533 break;
1534 case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1535 invalid_write = true;
1536 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1537 break;
1538 default:
1539 invalid_write = true;
1540 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1541 offset, bytes, data);
1542 break;
1543 }
1544
1545 if (!invalid_write)
1546 write_vreg(vgpu, offset, p_data, bytes);
1547
1548 return 0;
1549 }
1550
1551 static int pf_write(struct intel_vgpu *vgpu,
1552 unsigned int offset, void *p_data, unsigned int bytes)
1553 {
1554 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1555 u32 val = *(u32 *)p_data;
1556
1557 if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
1558 offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
1559 offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
1560 drm_WARN_ONCE(&i915->drm, true,
1561 "VM(%d): guest is trying to scaling a plane\n",
1562 vgpu->id);
1563 return 0;
1564 }
1565
1566 return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1567 }
1568
1569 static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1570 unsigned int offset, void *p_data, unsigned int bytes)
1571 {
1572 write_vreg(vgpu, offset, p_data, bytes);
1573
1574 if (vgpu_vreg(vgpu, offset) &
1575 HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
1576 vgpu_vreg(vgpu, offset) |=
1577 HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1578 else
1579 vgpu_vreg(vgpu, offset) &=
1580 ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1581 return 0;
1582 }
1583
1584 static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1585 unsigned int offset, void *p_data, unsigned int bytes)
1586 {
1587 write_vreg(vgpu, offset, p_data, bytes);
1588
1589 if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1590 vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1591 else
1592 vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1593
1594 return 0;
1595 }
1596
1597 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1598 unsigned int offset, void *p_data, unsigned int bytes)
1599 {
1600 write_vreg(vgpu, offset, p_data, bytes);
1601
1602 if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1603 vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1604 return 0;
1605 }
1606
1607 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1608 void *p_data, unsigned int bytes)
1609 {
1610 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1611 u32 mode;
1612
1613 write_vreg(vgpu, offset, p_data, bytes);
1614 mode = vgpu_vreg(vgpu, offset);
1615
1616 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1617 drm_WARN_ONCE(&i915->drm, 1,
1618 "VM(%d): iGVT-g doesn't support GuC\n",
1619 vgpu->id);
1620 return 0;
1621 }
1622
1623 return 0;
1624 }
1625
1626 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1627 void *p_data, unsigned int bytes)
1628 {
1629 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1630 u32 trtte = *(u32 *)p_data;
1631
1632 if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1633 drm_WARN(&i915->drm, 1,
1634 "VM(%d): Use physical address for TRTT!\n",
1635 vgpu->id);
1636 return -EINVAL;
1637 }
1638 write_vreg(vgpu, offset, p_data, bytes);
1639
1640 return 0;
1641 }
1642
1643 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1644 void *p_data, unsigned int bytes)
1645 {
1646 write_vreg(vgpu, offset, p_data, bytes);
1647 return 0;
1648 }
1649
1650 static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1651 void *p_data, unsigned int bytes)
1652 {
1653 u32 v = 0;
1654
1655 if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1656 v |= (1 << 0);
1657
1658 if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1659 v |= (1 << 8);
1660
1661 if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1662 v |= (1 << 16);
1663
1664 if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1665 v |= (1 << 24);
1666
1667 vgpu_vreg(vgpu, offset) = v;
1668
1669 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1670 }
1671
1672 static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1673 void *p_data, unsigned int bytes)
1674 {
1675 u32 value = *(u32 *)p_data;
1676 u32 cmd = value & 0xff;
1677 u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1678
1679 switch (cmd) {
1680 case GEN9_PCODE_READ_MEM_LATENCY:
1681 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1682 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1683 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1684 IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1685
1686
1687
1688
1689
1690 if (!*data0)
1691 *data0 = 0x1e1a1100;
1692 else
1693 *data0 = 0x61514b3d;
1694 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1695
1696
1697
1698
1699
1700 if (!*data0)
1701 *data0 = 0x16080707;
1702 else
1703 *data0 = 0x16161616;
1704 }
1705 break;
1706 case SKL_PCODE_CDCLK_CONTROL:
1707 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1708 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1709 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1710 IS_COMETLAKE(vgpu->gvt->gt->i915))
1711 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
1712 break;
1713 case GEN6_PCODE_READ_RC6VIDS:
1714 *data0 |= 0x1;
1715 break;
1716 }
1717
1718 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1719 vgpu->id, value, *data0);
1720
1721
1722
1723
1724
1725
1726 value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1727 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1728 }
1729
1730 static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1731 void *p_data, unsigned int bytes)
1732 {
1733 u32 value = *(u32 *)p_data;
1734 const struct intel_engine_cs *engine =
1735 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1736
1737 if (value != 0 &&
1738 !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1739 gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
1740 offset, value);
1741 return -EINVAL;
1742 }
1743
1744
1745
1746
1747
1748
1749 if (unlikely(!engine)) {
1750 gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
1751 offset);
1752 return -EINVAL;
1753 }
1754 vgpu->hws_pga[engine->id] = value;
1755 gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
1756 vgpu->id, value, offset);
1757
1758 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1759 }
1760
1761 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1762 unsigned int offset, void *p_data, unsigned int bytes)
1763 {
1764 u32 v = *(u32 *)p_data;
1765
1766 if (IS_BROXTON(vgpu->gvt->gt->i915))
1767 v &= (1 << 31) | (1 << 29);
1768 else
1769 v &= (1 << 31) | (1 << 29) | (1 << 9) |
1770 (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
1771 v |= (v >> 1);
1772
1773 return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1774 }
1775
1776 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1777 void *p_data, unsigned int bytes)
1778 {
1779 u32 v = *(u32 *)p_data;
1780
1781
1782 v &= (1 << 31) | (1 << 30);
1783 v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
1784
1785 vgpu_vreg(vgpu, offset) = v;
1786
1787 return 0;
1788 }
1789
1790 static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1791 unsigned int offset, void *p_data, unsigned int bytes)
1792 {
1793 u32 v = *(u32 *)p_data;
1794
1795 if (v & BXT_DE_PLL_PLL_ENABLE)
1796 v |= BXT_DE_PLL_LOCK;
1797
1798 vgpu_vreg(vgpu, offset) = v;
1799
1800 return 0;
1801 }
1802
1803 static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1804 unsigned int offset, void *p_data, unsigned int bytes)
1805 {
1806 u32 v = *(u32 *)p_data;
1807
1808 if (v & PORT_PLL_ENABLE)
1809 v |= PORT_PLL_LOCK;
1810
1811 vgpu_vreg(vgpu, offset) = v;
1812
1813 return 0;
1814 }
1815
1816 static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1817 unsigned int offset, void *p_data, unsigned int bytes)
1818 {
1819 u32 v = *(u32 *)p_data;
1820 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1821
1822 switch (offset) {
1823 case _PHY_CTL_FAMILY_EDP:
1824 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1825 break;
1826 case _PHY_CTL_FAMILY_DDI:
1827 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1828 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1829 break;
1830 }
1831
1832 vgpu_vreg(vgpu, offset) = v;
1833
1834 return 0;
1835 }
1836
1837 static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1838 unsigned int offset, void *p_data, unsigned int bytes)
1839 {
1840 u32 v = vgpu_vreg(vgpu, offset);
1841
1842 v &= ~UNIQUE_TRANGE_EN_METHOD;
1843
1844 vgpu_vreg(vgpu, offset) = v;
1845
1846 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1847 }
1848
1849 static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1850 unsigned int offset, void *p_data, unsigned int bytes)
1851 {
1852 u32 v = *(u32 *)p_data;
1853
1854 if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
1855 vgpu_vreg(vgpu, offset - 0x600) = v;
1856 vgpu_vreg(vgpu, offset - 0x800) = v;
1857 } else {
1858 vgpu_vreg(vgpu, offset - 0x400) = v;
1859 vgpu_vreg(vgpu, offset - 0x600) = v;
1860 }
1861
1862 vgpu_vreg(vgpu, offset) = v;
1863
1864 return 0;
1865 }
1866
1867 static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1868 unsigned int offset, void *p_data, unsigned int bytes)
1869 {
1870 u32 v = *(u32 *)p_data;
1871
1872 if (v & BIT(0)) {
1873 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1874 ~PHY_RESERVED;
1875 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1876 PHY_POWER_GOOD;
1877 }
1878
1879 if (v & BIT(1)) {
1880 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1881 ~PHY_RESERVED;
1882 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1883 PHY_POWER_GOOD;
1884 }
1885
1886
1887 vgpu_vreg(vgpu, offset) = v;
1888
1889 return 0;
1890 }
1891
1892 static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1893 unsigned int offset, void *p_data, unsigned int bytes)
1894 {
1895 vgpu_vreg(vgpu, offset) = 0;
1896 return 0;
1897 }
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909 static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1910 void *p_data, unsigned int bytes)
1911 {
1912 u64 pat =
1913 GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1914 GEN8_PPAT(1, 0) |
1915 GEN8_PPAT(2, 0) |
1916 GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1917 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1918 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1919 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1920 GEN8_PPAT(7, CHV_PPAT_SNOOP);
1921
1922 vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1923
1924 return 0;
1925 }
1926
1927 static int guc_status_read(struct intel_vgpu *vgpu,
1928 unsigned int offset, void *p_data,
1929 unsigned int bytes)
1930 {
1931
1932 read_vreg(vgpu, offset, p_data, bytes);
1933 vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1934 return 0;
1935 }
1936
1937 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1938 unsigned int offset, void *p_data, unsigned int bytes)
1939 {
1940 struct intel_gvt *gvt = vgpu->gvt;
1941 const struct intel_engine_cs *engine =
1942 intel_gvt_render_mmio_to_engine(gvt, offset);
1943
1944
1945
1946
1947
1948
1949
1950
1951 if (!engine ||
1952 vgpu == gvt->scheduler.engine_owner[engine->id] ||
1953 offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1954 offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1955 mmio_hw_access_pre(gvt->gt);
1956 vgpu_vreg(vgpu, offset) =
1957 intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1958 mmio_hw_access_post(gvt->gt);
1959 }
1960
1961 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1962 }
1963
1964 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1965 void *p_data, unsigned int bytes)
1966 {
1967 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1968 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1969 struct intel_vgpu_execlist *execlist;
1970 u32 data = *(u32 *)p_data;
1971 int ret = 0;
1972
1973 if (drm_WARN_ON(&i915->drm, !engine))
1974 return -EINVAL;
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988 if (vgpu->d3_entered)
1989 vgpu->d3_entered = false;
1990
1991 execlist = &vgpu->submission.execlist[engine->id];
1992
1993 execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
1994 if (execlist->elsp_dwords.index == 3) {
1995 ret = intel_vgpu_submit_execlist(vgpu, engine);
1996 if(ret)
1997 gvt_vgpu_err("fail submit workload on ring %s\n",
1998 engine->name);
1999 }
2000
2001 ++execlist->elsp_dwords.index;
2002 execlist->elsp_dwords.index &= 0x3;
2003 return ret;
2004 }
2005
2006 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2007 void *p_data, unsigned int bytes)
2008 {
2009 u32 data = *(u32 *)p_data;
2010 const struct intel_engine_cs *engine =
2011 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2012 bool enable_execlist;
2013 int ret;
2014
2015 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
2016 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2017 IS_COMETLAKE(vgpu->gvt->gt->i915))
2018 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
2019 write_vreg(vgpu, offset, p_data, bytes);
2020
2021 if (IS_MASKED_BITS_ENABLED(data, 1)) {
2022 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2023 return 0;
2024 }
2025
2026 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2027 IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2028 IS_MASKED_BITS_ENABLED(data, 2)) {
2029 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2030 return 0;
2031 }
2032
2033
2034
2035
2036
2037 if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
2038 IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
2039 !vgpu->pv_notified) {
2040 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2041 return 0;
2042 }
2043 if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
2044 IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
2045 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
2046
2047 gvt_dbg_core("EXECLIST %s on ring %s\n",
2048 (enable_execlist ? "enabling" : "disabling"),
2049 engine->name);
2050
2051 if (!enable_execlist)
2052 return 0;
2053
2054 ret = intel_vgpu_select_submission_ops(vgpu,
2055 engine->mask,
2056 INTEL_VGPU_EXECLIST_SUBMISSION);
2057 if (ret)
2058 return ret;
2059
2060 intel_vgpu_start_schedule(vgpu);
2061 }
2062 return 0;
2063 }
2064
2065 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2066 unsigned int offset, void *p_data, unsigned int bytes)
2067 {
2068 unsigned int id = 0;
2069
2070 write_vreg(vgpu, offset, p_data, bytes);
2071 vgpu_vreg(vgpu, offset) = 0;
2072
2073 switch (offset) {
2074 case 0x4260:
2075 id = RCS0;
2076 break;
2077 case 0x4264:
2078 id = VCS0;
2079 break;
2080 case 0x4268:
2081 id = VCS1;
2082 break;
2083 case 0x426c:
2084 id = BCS0;
2085 break;
2086 case 0x4270:
2087 id = VECS0;
2088 break;
2089 default:
2090 return -EINVAL;
2091 }
2092 set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
2093
2094 return 0;
2095 }
2096
2097 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2098 unsigned int offset, void *p_data, unsigned int bytes)
2099 {
2100 u32 data;
2101
2102 write_vreg(vgpu, offset, p_data, bytes);
2103 data = vgpu_vreg(vgpu, offset);
2104
2105 if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
2106 data |= RESET_CTL_READY_TO_RESET;
2107 else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
2108 data &= ~RESET_CTL_READY_TO_RESET;
2109
2110 vgpu_vreg(vgpu, offset) = data;
2111 return 0;
2112 }
2113
2114 static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2115 unsigned int offset, void *p_data,
2116 unsigned int bytes)
2117 {
2118 u32 data = *(u32 *)p_data;
2119
2120 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
2121 write_vreg(vgpu, offset, p_data, bytes);
2122
2123 if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
2124 IS_MASKED_BITS_ENABLED(data, 0x8))
2125 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2126
2127 return 0;
2128 }
2129
2130 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
2131 ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2132 s, f, am, rm, d, r, w); \
2133 if (ret) \
2134 return ret; \
2135 } while (0)
2136
2137 #define MMIO_DH(reg, d, r, w) \
2138 MMIO_F(reg, 4, 0, 0, 0, d, r, w)
2139
2140 #define MMIO_DFH(reg, d, f, r, w) \
2141 MMIO_F(reg, 4, f, 0, 0, d, r, w)
2142
2143 #define MMIO_GM(reg, d, r, w) \
2144 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
2145
2146 #define MMIO_GM_RDR(reg, d, r, w) \
2147 MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
2148
2149 #define MMIO_RO(reg, d, f, rm, r, w) \
2150 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
2151
2152 #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
2153 MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
2154 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
2155 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
2156 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
2157 if (HAS_ENGINE(gvt->gt, VCS1)) \
2158 MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
2159 } while (0)
2160
2161 #define MMIO_RING_DFH(prefix, d, f, r, w) \
2162 MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
2163
2164 #define MMIO_RING_GM(prefix, d, r, w) \
2165 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
2166
2167 #define MMIO_RING_GM_RDR(prefix, d, r, w) \
2168 MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
2169
2170 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
2171 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
2172
2173 static int init_generic_mmio_info(struct intel_gvt *gvt)
2174 {
2175 struct drm_i915_private *dev_priv = gvt->gt->i915;
2176 int ret;
2177
2178 MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
2179 intel_vgpu_reg_imr_handler);
2180
2181 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
2182 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
2183 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
2184
2185 MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
2186
2187
2188 MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
2189 gamw_echo_dev_rw_ia_write);
2190
2191 MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2192 MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2193 MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2194
2195 #define RING_REG(base) _MMIO((base) + 0x28)
2196 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2197 #undef RING_REG
2198
2199 #define RING_REG(base) _MMIO((base) + 0x134)
2200 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2201 #undef RING_REG
2202
2203 #define RING_REG(base) _MMIO((base) + 0x6c)
2204 MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
2205 #undef RING_REG
2206 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
2207
2208 MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
2209 MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
2210 MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
2211
2212 MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
2213 MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
2214 MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
2215 MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
2216 MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
2217
2218
2219 #define RING_REG(base) _MMIO((base) + 0x29c)
2220 MMIO_RING_DFH(RING_REG, D_ALL,
2221 F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
2222 ring_mode_mmio_write);
2223 #undef RING_REG
2224
2225 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2226 NULL, NULL);
2227 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2228 NULL, NULL);
2229 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
2230 mmio_read_from_hw, NULL);
2231 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
2232 mmio_read_from_hw, NULL);
2233
2234 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2235 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2236 NULL, NULL);
2237 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2238 MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2239 MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2240
2241 MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2242 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2243 MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2244 MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2245 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2246 MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2247 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
2248 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2249 NULL, NULL);
2250 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2251 NULL, NULL);
2252 MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
2253 MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
2254 MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
2255 MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
2256 MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
2257 MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
2258 MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2259 MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2260 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2261 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2262
2263
2264 MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
2265 MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
2266 MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
2267 MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
2268 MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
2269 MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2270 reg50080_mmio_write);
2271 MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
2272 MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2273 reg50080_mmio_write);
2274 MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
2275 MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2276 reg50080_mmio_write);
2277 MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
2278 MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2279 reg50080_mmio_write);
2280 MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
2281 MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2282 reg50080_mmio_write);
2283 MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
2284 MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2285 reg50080_mmio_write);
2286
2287 MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
2288 gmbus_mmio_write);
2289 MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2290
2291 MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2292 dp_aux_ch_ctl_mmio_write);
2293 MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2294 dp_aux_ch_ctl_mmio_write);
2295 MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2296 dp_aux_ch_ctl_mmio_write);
2297
2298 MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
2299
2300 MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
2301 MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
2302
2303 MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
2304 MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
2305 MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
2306 MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2307 MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2308 MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2309 MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2310 MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2311 MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2312 MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
2313 MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
2314 MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
2315 MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
2316 MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
2317 MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
2318 MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
2319
2320 MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
2321 PORTA_HOTPLUG_STATUS_MASK
2322 | PORTB_HOTPLUG_STATUS_MASK
2323 | PORTC_HOTPLUG_STATUS_MASK
2324 | PORTD_HOTPLUG_STATUS_MASK,
2325 NULL, NULL);
2326
2327 MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
2328 MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
2329 MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
2330 MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
2331 MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
2332
2333 MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
2334 dp_aux_ch_ctl_mmio_write);
2335
2336 MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2337 MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2338 MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2339 MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2340 MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2341
2342 MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
2343 MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
2344 MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
2345 MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
2346 MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
2347
2348 MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
2349 MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
2350 MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
2351 MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
2352 MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
2353
2354 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
2355 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
2356 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
2357 MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
2358
2359 MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
2360 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2361 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2362 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2363 MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2364 MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2365 MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
2366 MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2367 MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2368 MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2369 MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
2370 MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2371 MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2372
2373 MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2374 MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2375 MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2376
2377 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
2378 MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
2379
2380 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2381 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
2382
2383 MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
2384 MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2385 MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2386 MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2387 MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2388 MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2389
2390 MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2391 MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2392 MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2393 MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2394
2395 MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2396 MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2397 MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2398
2399 MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2400 MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2401 MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2402 MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2403 MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2404 MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2405 MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2406 MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2407 MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2408 MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2409 MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2410 MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2411 MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2412 MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2413 MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2414 MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2415 MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2416
2417 MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2418 MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
2419 MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2420 MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2421 MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2422 MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2423 MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2424 MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2425 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2426 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2427 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2428
2429 MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2430 MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2431 MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
2432
2433 return 0;
2434 }
2435
2436 static int init_bdw_mmio_info(struct intel_gvt *gvt)
2437 {
2438 int ret;
2439
2440 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2441 MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2442 MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2443
2444 MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2445 MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2446 MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2447
2448 MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2449 MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2450 MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2451
2452 MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2453 MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2454 MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2455
2456 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
2457 intel_vgpu_reg_imr_handler);
2458 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
2459 intel_vgpu_reg_ier_handler);
2460 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
2461 intel_vgpu_reg_iir_handler);
2462
2463 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
2464 intel_vgpu_reg_imr_handler);
2465 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
2466 intel_vgpu_reg_ier_handler);
2467 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
2468 intel_vgpu_reg_iir_handler);
2469
2470 MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
2471 intel_vgpu_reg_imr_handler);
2472 MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
2473 intel_vgpu_reg_ier_handler);
2474 MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
2475 intel_vgpu_reg_iir_handler);
2476
2477 MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2478 MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2479 MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2480
2481 MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2482 MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2483 MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2484
2485 MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2486 MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2487 MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2488
2489 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2490 intel_vgpu_reg_master_irq_handler);
2491
2492 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
2493 mmio_read_from_hw, NULL);
2494
2495 #define RING_REG(base) _MMIO((base) + 0xd0)
2496 MMIO_RING_F(RING_REG, 4, F_RO, 0,
2497 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2498 ring_reset_ctl_write);
2499 #undef RING_REG
2500
2501 #define RING_REG(base) _MMIO((base) + 0x230)
2502 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
2503 #undef RING_REG
2504
2505 #define RING_REG(base) _MMIO((base) + 0x234)
2506 MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
2507 NULL, NULL);
2508 #undef RING_REG
2509
2510 #define RING_REG(base) _MMIO((base) + 0x244)
2511 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2512 #undef RING_REG
2513
2514 #define RING_REG(base) _MMIO((base) + 0x370)
2515 MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
2516 #undef RING_REG
2517
2518 #define RING_REG(base) _MMIO((base) + 0x3a0)
2519 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2520 #undef RING_REG
2521
2522 MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2523
2524 #define RING_REG(base) _MMIO((base) + 0x270)
2525 MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2526 #undef RING_REG
2527
2528 MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
2529
2530 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2531
2532 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2533 NULL, NULL);
2534 MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2535 NULL, NULL);
2536 MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2537
2538 MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2539 MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2540 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2541 MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
2542 MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
2543
2544 MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
2545 D_BDW_PLUS, NULL, force_nonpriv_write);
2546
2547 MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
2548
2549 MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
2550
2551 MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2552 MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2553 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2554 MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2555
2556 MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
2557
2558 MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2559 MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2560 MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2561 MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2562 MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2563 MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2564 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2565 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2566 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2567 MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2568 return 0;
2569 }
2570
2571 static int init_skl_mmio_info(struct intel_gvt *gvt)
2572 {
2573 struct drm_i915_private *dev_priv = gvt->gt->i915;
2574 int ret;
2575
2576 MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2577 MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
2578 MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2579 MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
2580 MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2581 MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
2582
2583 MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2584 dp_aux_ch_ctl_mmio_write);
2585 MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2586 dp_aux_ch_ctl_mmio_write);
2587 MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2588 dp_aux_ch_ctl_mmio_write);
2589
2590 MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
2591
2592 MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2593
2594 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2595 MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2596 MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
2597 MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2598 MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2599 MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
2600
2601 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2602 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2603 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2604 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2605 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2606 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2607
2608 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2609 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2610 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2611 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2612 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2613 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2614
2615 MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2616 MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2617 MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2618 MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2619 MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2620 MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2621
2622 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2623 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2624 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2625 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2626
2627 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2628 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2629 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2630 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2631
2632 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2633 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2634 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2635 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2636
2637 MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
2638 MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
2639 MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
2640
2641 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2642 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2643 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2644
2645 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2646 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2647 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2648
2649 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2650 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2651 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2652
2653 MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
2654 MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
2655 MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
2656
2657 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2658 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2659 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2660 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2661
2662 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2663 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2664 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2665 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2666
2667 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2668 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2669 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2670 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2671
2672 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
2673 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
2674 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
2675 MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
2676
2677 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
2678 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
2679 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
2680 MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
2681
2682 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
2683 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
2684 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
2685 MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
2686
2687 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
2688 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
2689 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
2690 MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
2691
2692 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
2693 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
2694 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
2695 MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
2696
2697 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
2698 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
2699 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
2700 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
2701
2702 MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2703
2704 MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2705 NULL, NULL);
2706 MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2707 NULL, NULL);
2708
2709 MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
2710 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2711 MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2712 NULL, NULL);
2713
2714
2715 MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2716 MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2717 MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2718 MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2719 MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2720 MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
2721 NULL, gen9_trtte_write);
2722 MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
2723 NULL, gen9_trtt_chicken_write);
2724
2725 MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2726 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
2727
2728 #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
2729 MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2730 NULL, csfe_chicken1_mmio_write);
2731 #undef CSFE_CHICKEN1_REG
2732 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2733 NULL, NULL);
2734 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2735 NULL, NULL);
2736
2737 MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
2738 MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2739
2740 return 0;
2741 }
2742
2743 static int init_bxt_mmio_info(struct intel_gvt *gvt)
2744 {
2745 int ret;
2746
2747 MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
2748 MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
2749 NULL, bxt_phy_ctl_family_write);
2750 MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
2751 NULL, bxt_phy_ctl_family_write);
2752 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
2753 NULL, bxt_port_pll_enable_write);
2754 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
2755 NULL, bxt_port_pll_enable_write);
2756 MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
2757 bxt_port_pll_enable_write);
2758
2759 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
2760 NULL, bxt_pcs_dw12_grp_write);
2761 MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
2762 bxt_port_tx_dw3_read, NULL);
2763 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
2764 NULL, bxt_pcs_dw12_grp_write);
2765 MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
2766 bxt_port_tx_dw3_read, NULL);
2767 MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
2768 NULL, bxt_pcs_dw12_grp_write);
2769 MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
2770 bxt_port_tx_dw3_read, NULL);
2771 MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
2772 MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
2773 MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2774 MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
2775 MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2776 0, 0, D_BXT, NULL, NULL);
2777 MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2778 0, 0, D_BXT, NULL, NULL);
2779 MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2780 0, 0, D_BXT, NULL, NULL);
2781 MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2782 0, 0, D_BXT, NULL, NULL);
2783
2784 MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2785
2786 MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
2787
2788 return 0;
2789 }
2790
2791 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2792 unsigned int offset)
2793 {
2794 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2795 int num = gvt->mmio.num_mmio_block;
2796 int i;
2797
2798 for (i = 0; i < num; i++, block++) {
2799 if (offset >= i915_mmio_reg_offset(block->offset) &&
2800 offset < i915_mmio_reg_offset(block->offset) + block->size)
2801 return block;
2802 }
2803 return NULL;
2804 }
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2815 {
2816 struct hlist_node *tmp;
2817 struct intel_gvt_mmio_info *e;
2818 int i;
2819
2820 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
2821 kfree(e);
2822
2823 kfree(gvt->mmio.mmio_block);
2824 gvt->mmio.mmio_block = NULL;
2825 gvt->mmio.num_mmio_block = 0;
2826
2827 vfree(gvt->mmio.mmio_attribute);
2828 gvt->mmio.mmio_attribute = NULL;
2829 }
2830
2831 static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2832 u32 size)
2833 {
2834 struct intel_gvt *gvt = iter->data;
2835 struct intel_gvt_mmio_info *info, *p;
2836 u32 start, end, i;
2837
2838 if (WARN_ON(!IS_ALIGNED(offset, 4)))
2839 return -EINVAL;
2840
2841 start = offset;
2842 end = offset + size;
2843
2844 for (i = start; i < end; i += 4) {
2845 p = intel_gvt_find_mmio_info(gvt, i);
2846 if (p) {
2847 WARN(1, "dup mmio definition offset %x\n",
2848 info->offset);
2849
2850
2851
2852
2853
2854 return -EEXIST;
2855 }
2856
2857 info = kzalloc(sizeof(*info), GFP_KERNEL);
2858 if (!info)
2859 return -ENOMEM;
2860
2861 info->offset = i;
2862 info->read = intel_vgpu_default_mmio_read;
2863 info->write = intel_vgpu_default_mmio_write;
2864 INIT_HLIST_NODE(&info->node);
2865 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
2866 gvt->mmio.num_tracked_mmio++;
2867 }
2868 return 0;
2869 }
2870
2871 static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
2872 u32 offset, u32 size)
2873 {
2874 struct intel_gvt *gvt = iter->data;
2875 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2876 void *ret;
2877
2878 ret = krealloc(block,
2879 (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
2880 GFP_KERNEL);
2881 if (!ret)
2882 return -ENOMEM;
2883
2884 gvt->mmio.mmio_block = block = ret;
2885
2886 block += gvt->mmio.num_mmio_block;
2887
2888 memset(block, 0, sizeof(*block));
2889
2890 block->offset = _MMIO(offset);
2891 block->size = size;
2892
2893 gvt->mmio.num_mmio_block++;
2894
2895 return 0;
2896 }
2897
2898 static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2899 u32 size)
2900 {
2901 if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
2902 return handle_mmio(iter, offset, size);
2903 else
2904 return handle_mmio_block(iter, offset, size);
2905 }
2906
2907 static int init_mmio_info(struct intel_gvt *gvt)
2908 {
2909 struct intel_gvt_mmio_table_iter iter = {
2910 .i915 = gvt->gt->i915,
2911 .data = gvt,
2912 .handle_mmio_cb = handle_mmio_cb,
2913 };
2914
2915 return intel_gvt_iterate_mmio_table(&iter);
2916 }
2917
2918 static int init_mmio_block_handlers(struct intel_gvt *gvt)
2919 {
2920 struct gvt_mmio_block *block;
2921
2922 block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
2923 if (!block) {
2924 WARN(1, "fail to assign handlers to mmio block %x\n",
2925 i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
2926 return -ENODEV;
2927 }
2928
2929 block->read = pvinfo_mmio_read;
2930 block->write = pvinfo_mmio_write;
2931
2932 return 0;
2933 }
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2946 {
2947 struct intel_gvt_device_info *info = &gvt->device_info;
2948 struct drm_i915_private *i915 = gvt->gt->i915;
2949 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
2950 int ret;
2951
2952 gvt->mmio.mmio_attribute = vzalloc(size);
2953 if (!gvt->mmio.mmio_attribute)
2954 return -ENOMEM;
2955
2956 ret = init_mmio_info(gvt);
2957 if (ret)
2958 goto err;
2959
2960 ret = init_mmio_block_handlers(gvt);
2961 if (ret)
2962 goto err;
2963
2964 ret = init_generic_mmio_info(gvt);
2965 if (ret)
2966 goto err;
2967
2968 if (IS_BROADWELL(i915)) {
2969 ret = init_bdw_mmio_info(gvt);
2970 if (ret)
2971 goto err;
2972 } else if (IS_SKYLAKE(i915) ||
2973 IS_KABYLAKE(i915) ||
2974 IS_COFFEELAKE(i915) ||
2975 IS_COMETLAKE(i915)) {
2976 ret = init_bdw_mmio_info(gvt);
2977 if (ret)
2978 goto err;
2979 ret = init_skl_mmio_info(gvt);
2980 if (ret)
2981 goto err;
2982 } else if (IS_BROXTON(i915)) {
2983 ret = init_bdw_mmio_info(gvt);
2984 if (ret)
2985 goto err;
2986 ret = init_skl_mmio_info(gvt);
2987 if (ret)
2988 goto err;
2989 ret = init_bxt_mmio_info(gvt);
2990 if (ret)
2991 goto err;
2992 }
2993
2994 return 0;
2995 err:
2996 intel_gvt_clean_mmio_info(gvt);
2997 return ret;
2998 }
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3010 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3011 void *data)
3012 {
3013 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3014 struct intel_gvt_mmio_info *e;
3015 int i, j, ret;
3016
3017 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3018 ret = handler(gvt, e->offset, data);
3019 if (ret)
3020 return ret;
3021 }
3022
3023 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3024
3025 if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3026 continue;
3027
3028 for (j = 0; j < block->size; j += 4) {
3029 ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
3030 if (ret)
3031 return ret;
3032 }
3033 }
3034 return 0;
3035 }
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3048 void *p_data, unsigned int bytes)
3049 {
3050 read_vreg(vgpu, offset, p_data, bytes);
3051 return 0;
3052 }
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3065 void *p_data, unsigned int bytes)
3066 {
3067 write_vreg(vgpu, offset, p_data, bytes);
3068 return 0;
3069 }
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3082 void *p_data, unsigned int bytes)
3083 {
3084 u32 mask, old_vreg;
3085
3086 old_vreg = vgpu_vreg(vgpu, offset);
3087 write_vreg(vgpu, offset, p_data, bytes);
3088 mask = vgpu_vreg(vgpu, offset) >> 16;
3089 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3090 (vgpu_vreg(vgpu, offset) & mask);
3091
3092 return 0;
3093 }
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3107 unsigned int offset)
3108 {
3109 return in_whitelist(offset);
3110 }
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3124 void *pdata, unsigned int bytes, bool is_read)
3125 {
3126 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3127 struct intel_gvt *gvt = vgpu->gvt;
3128 struct intel_gvt_mmio_info *mmio_info;
3129 struct gvt_mmio_block *mmio_block;
3130 gvt_mmio_func func;
3131 int ret;
3132
3133 if (drm_WARN_ON(&i915->drm, bytes > 8))
3134 return -EINVAL;
3135
3136
3137
3138
3139 mmio_block = find_mmio_block(gvt, offset);
3140 if (mmio_block) {
3141 func = is_read ? mmio_block->read : mmio_block->write;
3142 if (func)
3143 return func(vgpu, offset, pdata, bytes);
3144 goto default_rw;
3145 }
3146
3147
3148
3149
3150 mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3151 if (!mmio_info) {
3152 gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
3153 goto default_rw;
3154 }
3155
3156 if (is_read)
3157 return mmio_info->read(vgpu, offset, pdata, bytes);
3158 else {
3159 u64 ro_mask = mmio_info->ro_mask;
3160 u32 old_vreg = 0;
3161 u64 data = 0;
3162
3163 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3164 old_vreg = vgpu_vreg(vgpu, offset);
3165 }
3166
3167 if (likely(!ro_mask))
3168 ret = mmio_info->write(vgpu, offset, pdata, bytes);
3169 else if (!~ro_mask) {
3170 gvt_vgpu_err("try to write RO reg %x\n", offset);
3171 return 0;
3172 } else {
3173
3174 memcpy(&data, pdata, bytes);
3175 data &= ~ro_mask;
3176 data |= vgpu_vreg(vgpu, offset) & ro_mask;
3177 ret = mmio_info->write(vgpu, offset, &data, bytes);
3178 }
3179
3180
3181 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3182 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3183
3184 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3185 | (vgpu_vreg(vgpu, offset) & mask);
3186 }
3187 }
3188
3189 return ret;
3190
3191 default_rw:
3192 return is_read ?
3193 intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3194 intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3195 }
3196
3197 void intel_gvt_restore_fence(struct intel_gvt *gvt)
3198 {
3199 struct intel_vgpu *vgpu;
3200 int i, id;
3201
3202 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3203 mmio_hw_access_pre(gvt->gt);
3204 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3205 intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3206 mmio_hw_access_post(gvt->gt);
3207 }
3208 }
3209
3210 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
3211 {
3212 struct intel_vgpu *vgpu = data;
3213 struct drm_i915_private *dev_priv = gvt->gt->i915;
3214
3215 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3216 intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
3217
3218 return 0;
3219 }
3220
3221 void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3222 {
3223 struct intel_vgpu *vgpu;
3224 int id;
3225
3226 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3227 mmio_hw_access_pre(gvt->gt);
3228 intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3229 mmio_hw_access_post(gvt->gt);
3230 }
3231 }