0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include "i915_drv.h"
0037 #include "gt/intel_context.h"
0038 #include "gt/intel_engine_regs.h"
0039 #include "gt/intel_gpu_commands.h"
0040 #include "gt/intel_gt_regs.h"
0041 #include "gt/intel_ring.h"
0042 #include "gvt.h"
0043 #include "trace.h"
0044
0045 #define GEN9_MOCS_SIZE 64
0046
0047
0048 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
0049 {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false},
0050 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false},
0051 {RCS0, HWSTAM, 0x0, false},
0052 {RCS0, INSTPM, 0xffff, true},
0053 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false},
0054 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false},
0055 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false},
0056 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false},
0057 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false},
0058 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false},
0059 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false},
0060 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false},
0061 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false},
0062 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false},
0063 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false},
0064 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false},
0065 {RCS0, CACHE_MODE_1, 0xffff, true},
0066 {RCS0, GEN7_GT_MODE, 0xffff, true},
0067 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true},
0068 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true},
0069 {RCS0, HDC_CHICKEN0, 0xffff, true},
0070 {RCS0, VF_GUARDBAND, 0xffff, true},
0071
0072 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false},
0073 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false},
0074 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false},
0075 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false},
0076 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false},
0077 {RCS0, INVALID_MMIO_REG, 0, false }
0078 };
0079
0080 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
0081 {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false},
0082 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false},
0083 {RCS0, HWSTAM, 0x0, false},
0084 {RCS0, INSTPM, 0xffff, true},
0085 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false},
0086 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false},
0087 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false},
0088 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false},
0089 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false},
0090 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false},
0091 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false},
0092 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false},
0093 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false},
0094 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false},
0095 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false},
0096 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false},
0097 {RCS0, CACHE_MODE_1, 0xffff, true},
0098 {RCS0, GEN7_GT_MODE, 0xffff, true},
0099 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true},
0100 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true},
0101 {RCS0, HDC_CHICKEN0, 0xffff, true},
0102 {RCS0, VF_GUARDBAND, 0xffff, true},
0103
0104 {RCS0, GEN8_PRIVATE_PAT_LO, 0, false},
0105 {RCS0, GEN8_PRIVATE_PAT_HI, 0, false},
0106 {RCS0, GEN8_CS_CHICKEN1, 0xffff, true},
0107 {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true},
0108 {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false},
0109 {RCS0, GEN8_L3SQCREG4, 0, false},
0110 {RCS0, GEN9_SCRATCH1, 0, false},
0111 {RCS0, GEN9_SCRATCH_LNCF1, 0, false},
0112 {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true},
0113 {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true},
0114 {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true},
0115 {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true},
0116 {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true},
0117 {RCS0, GEN8_ROW_CHICKEN, 0xffff, true},
0118 {RCS0, TRVATTL3PTRDW(0), 0, true},
0119 {RCS0, TRVATTL3PTRDW(1), 0, true},
0120 {RCS0, TRNULLDETCT, 0, true},
0121 {RCS0, TRINVTILEDETCT, 0, true},
0122 {RCS0, TRVADR, 0, true},
0123 {RCS0, TRTTE, 0, true},
0124 {RCS0, _MMIO(0x4dfc), 0, true},
0125
0126 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false},
0127 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false},
0128 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false},
0129 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false},
0130 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false},
0131
0132 {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false},
0133
0134 {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false},
0135
0136 {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true},
0137 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false},
0138 {RCS0, GEN7_UCGCTL4, 0x0, false},
0139 {RCS0, GAMT_CHKN_BIT_REG, 0x0, false},
0140
0141 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false},
0142 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false},
0143 {RCS0, _MMIO(0x20D8), 0xffff, true},
0144
0145 {RCS0, GEN8_GARBCNTL, 0x0, false},
0146 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false},
0147 {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false},
0148 {RCS0, INVALID_MMIO_REG, 0, false }
0149 };
0150
0151 static struct {
0152 bool initialized;
0153 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
0154 u32 l3cc_table[GEN9_MOCS_SIZE / 2];
0155 } gen9_render_mocs;
0156
0157 static u32 gen9_mocs_mmio_offset_list[] = {
0158 [RCS0] = 0xc800,
0159 [VCS0] = 0xc900,
0160 [VCS1] = 0xca00,
0161 [BCS0] = 0xcc00,
0162 [VECS0] = 0xcb00,
0163 };
0164
0165 static void load_render_mocs(const struct intel_engine_cs *engine)
0166 {
0167 struct intel_gvt *gvt = engine->i915->gvt;
0168 struct intel_uncore *uncore = engine->uncore;
0169 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
0170 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
0171 i915_reg_t offset;
0172 int ring_id, i;
0173
0174
0175 if (!regs)
0176 return;
0177
0178 for (ring_id = 0; ring_id < cnt; ring_id++) {
0179 if (!HAS_ENGINE(engine->gt, ring_id))
0180 continue;
0181
0182 offset.reg = regs[ring_id];
0183 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
0184 gen9_render_mocs.control_table[ring_id][i] =
0185 intel_uncore_read_fw(uncore, offset);
0186 offset.reg += 4;
0187 }
0188 }
0189
0190 offset.reg = 0xb020;
0191 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
0192 gen9_render_mocs.l3cc_table[i] =
0193 intel_uncore_read_fw(uncore, offset);
0194 offset.reg += 4;
0195 }
0196 gen9_render_mocs.initialized = true;
0197 }
0198
0199 static int
0200 restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
0201 struct i915_request *req)
0202 {
0203 u32 *cs;
0204 int ret;
0205 struct engine_mmio *mmio;
0206 struct intel_gvt *gvt = vgpu->gvt;
0207 int ring_id = req->engine->id;
0208 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
0209
0210 if (count == 0)
0211 return 0;
0212
0213 ret = req->engine->emit_flush(req, EMIT_BARRIER);
0214 if (ret)
0215 return ret;
0216
0217 cs = intel_ring_begin(req, count * 2 + 2);
0218 if (IS_ERR(cs))
0219 return PTR_ERR(cs);
0220
0221 *cs++ = MI_LOAD_REGISTER_IMM(count);
0222 for (mmio = gvt->engine_mmio_list.mmio;
0223 i915_mmio_reg_valid(mmio->reg); mmio++) {
0224 if (mmio->id != ring_id || !mmio->in_context)
0225 continue;
0226
0227 *cs++ = i915_mmio_reg_offset(mmio->reg);
0228 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
0229 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
0230 *(cs-2), *(cs-1), vgpu->id, ring_id);
0231 }
0232
0233 *cs++ = MI_NOOP;
0234 intel_ring_advance(req, cs);
0235
0236 ret = req->engine->emit_flush(req, EMIT_BARRIER);
0237 if (ret)
0238 return ret;
0239
0240 return 0;
0241 }
0242
0243 static int
0244 restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
0245 struct i915_request *req)
0246 {
0247 unsigned int index;
0248 u32 *cs;
0249
0250 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
0251 if (IS_ERR(cs))
0252 return PTR_ERR(cs);
0253
0254 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
0255
0256 for (index = 0; index < GEN9_MOCS_SIZE; index++) {
0257 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
0258 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
0259 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
0260 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
0261
0262 }
0263
0264 *cs++ = MI_NOOP;
0265 intel_ring_advance(req, cs);
0266
0267 return 0;
0268 }
0269
0270 static int
0271 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
0272 struct i915_request *req)
0273 {
0274 unsigned int index;
0275 u32 *cs;
0276
0277 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
0278 if (IS_ERR(cs))
0279 return PTR_ERR(cs);
0280
0281 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
0282
0283 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
0284 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
0285 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
0286 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
0287 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
0288
0289 }
0290
0291 *cs++ = MI_NOOP;
0292 intel_ring_advance(req, cs);
0293
0294 return 0;
0295 }
0296
0297
0298
0299
0300
0301
0302 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
0303 struct i915_request *req)
0304 {
0305 int ret;
0306 u32 *cs;
0307
0308 cs = intel_ring_begin(req, 2);
0309 if (IS_ERR(cs))
0310 return PTR_ERR(cs);
0311
0312 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
0313 *cs++ = MI_NOOP;
0314 intel_ring_advance(req, cs);
0315
0316 ret = restore_context_mmio_for_inhibit(vgpu, req);
0317 if (ret)
0318 goto out;
0319
0320
0321 if (req->engine->id != RCS0)
0322 goto out;
0323
0324 ret = restore_render_mocs_control_for_inhibit(vgpu, req);
0325 if (ret)
0326 goto out;
0327
0328 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
0329 if (ret)
0330 goto out;
0331
0332 out:
0333 cs = intel_ring_begin(req, 2);
0334 if (IS_ERR(cs))
0335 return PTR_ERR(cs);
0336
0337 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
0338 *cs++ = MI_NOOP;
0339 intel_ring_advance(req, cs);
0340
0341 return ret;
0342 }
0343
0344 static u32 gen8_tlb_mmio_offset_list[] = {
0345 [RCS0] = 0x4260,
0346 [VCS0] = 0x4264,
0347 [VCS1] = 0x4268,
0348 [BCS0] = 0x426c,
0349 [VECS0] = 0x4270,
0350 };
0351
0352 static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
0353 const struct intel_engine_cs *engine)
0354 {
0355 struct intel_uncore *uncore = engine->uncore;
0356 struct intel_vgpu_submission *s = &vgpu->submission;
0357 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
0358 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
0359 enum forcewake_domains fw;
0360 i915_reg_t reg;
0361
0362 if (!regs)
0363 return;
0364
0365 if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
0366 return;
0367
0368 if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
0369 return;
0370
0371 reg = _MMIO(regs[engine->id]);
0372
0373
0374
0375
0376
0377
0378 fw = intel_uncore_forcewake_for_reg(uncore, reg,
0379 FW_REG_READ | FW_REG_WRITE);
0380 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9)
0381 fw |= FORCEWAKE_RENDER;
0382
0383 intel_uncore_forcewake_get(uncore, fw);
0384
0385 intel_uncore_write_fw(uncore, reg, 0x1);
0386
0387 if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
0388 gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
0389 engine->name);
0390 else
0391 vgpu_vreg_t(vgpu, reg) = 0;
0392
0393 intel_uncore_forcewake_put(uncore, fw);
0394
0395 gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
0396 }
0397
0398 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
0399 const struct intel_engine_cs *engine)
0400 {
0401 u32 regs[] = {
0402 [RCS0] = 0xc800,
0403 [VCS0] = 0xc900,
0404 [VCS1] = 0xca00,
0405 [BCS0] = 0xcc00,
0406 [VECS0] = 0xcb00,
0407 };
0408 struct intel_uncore *uncore = engine->uncore;
0409 i915_reg_t offset, l3_offset;
0410 u32 old_v, new_v;
0411 int i;
0412
0413 if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
0414 return;
0415
0416 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9)
0417 return;
0418
0419 if (!pre && !gen9_render_mocs.initialized)
0420 load_render_mocs(engine);
0421
0422 offset.reg = regs[engine->id];
0423 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
0424 if (pre)
0425 old_v = vgpu_vreg_t(pre, offset);
0426 else
0427 old_v = gen9_render_mocs.control_table[engine->id][i];
0428 if (next)
0429 new_v = vgpu_vreg_t(next, offset);
0430 else
0431 new_v = gen9_render_mocs.control_table[engine->id][i];
0432
0433 if (old_v != new_v)
0434 intel_uncore_write_fw(uncore, offset, new_v);
0435
0436 offset.reg += 4;
0437 }
0438
0439 if (engine->id == RCS0) {
0440 l3_offset.reg = 0xb020;
0441 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
0442 if (pre)
0443 old_v = vgpu_vreg_t(pre, l3_offset);
0444 else
0445 old_v = gen9_render_mocs.l3cc_table[i];
0446 if (next)
0447 new_v = vgpu_vreg_t(next, l3_offset);
0448 else
0449 new_v = gen9_render_mocs.l3cc_table[i];
0450
0451 if (old_v != new_v)
0452 intel_uncore_write_fw(uncore, l3_offset, new_v);
0453
0454 l3_offset.reg += 4;
0455 }
0456 }
0457 }
0458
0459 #define CTX_CONTEXT_CONTROL_VAL 0x03
0460
0461 bool is_inhibit_context(struct intel_context *ce)
0462 {
0463 const u32 *reg_state = ce->lrc_reg_state;
0464 u32 inhibit_mask =
0465 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
0466
0467 return inhibit_mask ==
0468 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
0469 }
0470
0471
0472 static void switch_mmio(struct intel_vgpu *pre,
0473 struct intel_vgpu *next,
0474 const struct intel_engine_cs *engine)
0475 {
0476 struct intel_uncore *uncore = engine->uncore;
0477 struct intel_vgpu_submission *s;
0478 struct engine_mmio *mmio;
0479 u32 old_v, new_v;
0480
0481 if (GRAPHICS_VER(engine->i915) >= 9)
0482 switch_mocs(pre, next, engine);
0483
0484 for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
0485 i915_mmio_reg_valid(mmio->reg); mmio++) {
0486 if (mmio->id != engine->id)
0487 continue;
0488
0489
0490
0491
0492
0493 if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context)
0494 continue;
0495
0496
0497 if (pre) {
0498 vgpu_vreg_t(pre, mmio->reg) =
0499 intel_uncore_read_fw(uncore, mmio->reg);
0500 if (mmio->mask)
0501 vgpu_vreg_t(pre, mmio->reg) &=
0502 ~(mmio->mask << 16);
0503 old_v = vgpu_vreg_t(pre, mmio->reg);
0504 } else {
0505 old_v = mmio->value =
0506 intel_uncore_read_fw(uncore, mmio->reg);
0507 }
0508
0509
0510 if (next) {
0511 s = &next->submission;
0512
0513
0514
0515
0516
0517 if (mmio->in_context &&
0518 !is_inhibit_context(s->shadow[engine->id]))
0519 continue;
0520
0521 if (mmio->mask)
0522 new_v = vgpu_vreg_t(next, mmio->reg) |
0523 (mmio->mask << 16);
0524 else
0525 new_v = vgpu_vreg_t(next, mmio->reg);
0526 } else {
0527 if (mmio->in_context)
0528 continue;
0529 if (mmio->mask)
0530 new_v = mmio->value | (mmio->mask << 16);
0531 else
0532 new_v = mmio->value;
0533 }
0534
0535 intel_uncore_write_fw(uncore, mmio->reg, new_v);
0536
0537 trace_render_mmio(pre ? pre->id : 0,
0538 next ? next->id : 0,
0539 "switch",
0540 i915_mmio_reg_offset(mmio->reg),
0541 old_v, new_v);
0542 }
0543
0544 if (next)
0545 handle_tlb_pending_event(next, engine);
0546 }
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
0558 struct intel_vgpu *next,
0559 const struct intel_engine_cs *engine)
0560 {
0561 if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
0562 engine->name))
0563 return;
0564
0565 gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
0566 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
0567
0568
0569
0570
0571
0572
0573 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
0574 switch_mmio(pre, next, engine);
0575 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
0576 }
0577
0578
0579
0580
0581
0582
0583 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
0584 {
0585 struct engine_mmio *mmio;
0586
0587 if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
0588 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
0589 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
0590 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
0591 gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
0592 gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
0593 } else {
0594 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
0595 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
0596 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
0597 }
0598
0599 for (mmio = gvt->engine_mmio_list.mmio;
0600 i915_mmio_reg_valid(mmio->reg); mmio++) {
0601 if (mmio->in_context) {
0602 gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
0603 intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg);
0604 }
0605 }
0606 }