0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include "i915_drv.h"
0035 #include "gvt.h"
0036 #include "i915_pvinfo.h"
0037
0038 void populate_pvinfo_page(struct intel_vgpu *vgpu)
0039 {
0040 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
0041
0042 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
0043 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
0044 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
0045 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
0046 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
0047
0048 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
0049 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
0050 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
0051
0052 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
0053 vgpu_aperture_gmadr_base(vgpu);
0054 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
0055 vgpu_aperture_sz(vgpu);
0056 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
0057 vgpu_hidden_gmadr_base(vgpu);
0058 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
0059 vgpu_hidden_sz(vgpu);
0060
0061 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
0062
0063 vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
0064 vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
0065
0066 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
0067 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
0068 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
0069 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
0070 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
0071 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
0072
0073 drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
0074 }
0075
0076 #define VGPU_MAX_WEIGHT 16
0077 #define VGPU_WEIGHT(vgpu_num) \
0078 (VGPU_MAX_WEIGHT / (vgpu_num))
0079
0080 static const struct {
0081 unsigned int low_mm;
0082 unsigned int high_mm;
0083 unsigned int fence;
0084
0085
0086
0087
0088
0089 unsigned int weight;
0090 enum intel_vgpu_edid edid;
0091 const char *name;
0092 } vgpu_types[] = {
0093
0094 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
0095 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
0096 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
0097 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
0098 };
0099
0100
0101
0102
0103
0104
0105
0106
0107 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
0108 {
0109 unsigned int num_types;
0110 unsigned int i, low_avail, high_avail;
0111 unsigned int min_low;
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
0126 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
0127 num_types = ARRAY_SIZE(vgpu_types);
0128
0129 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
0130 GFP_KERNEL);
0131 if (!gvt->types)
0132 return -ENOMEM;
0133
0134 min_low = MB_TO_BYTES(32);
0135 for (i = 0; i < num_types; ++i) {
0136 if (low_avail / vgpu_types[i].low_mm == 0)
0137 break;
0138
0139 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
0140 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
0141 gvt->types[i].fence = vgpu_types[i].fence;
0142
0143 if (vgpu_types[i].weight < 1 ||
0144 vgpu_types[i].weight > VGPU_MAX_WEIGHT)
0145 return -EINVAL;
0146
0147 gvt->types[i].weight = vgpu_types[i].weight;
0148 gvt->types[i].resolution = vgpu_types[i].edid;
0149 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
0150 high_avail / vgpu_types[i].high_mm);
0151
0152 if (GRAPHICS_VER(gvt->gt->i915) == 8)
0153 sprintf(gvt->types[i].name, "GVTg_V4_%s",
0154 vgpu_types[i].name);
0155 else if (GRAPHICS_VER(gvt->gt->i915) == 9)
0156 sprintf(gvt->types[i].name, "GVTg_V5_%s",
0157 vgpu_types[i].name);
0158
0159 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
0160 i, gvt->types[i].name,
0161 gvt->types[i].avail_instance,
0162 gvt->types[i].low_gm_size,
0163 gvt->types[i].high_gm_size, gvt->types[i].fence,
0164 gvt->types[i].weight,
0165 vgpu_edid_str(gvt->types[i].resolution));
0166 }
0167
0168 gvt->num_types = i;
0169 return 0;
0170 }
0171
0172 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
0173 {
0174 kfree(gvt->types);
0175 }
0176
0177 static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
0178 {
0179 int i;
0180 unsigned int low_gm_avail, high_gm_avail, fence_avail;
0181 unsigned int low_gm_min, high_gm_min, fence_min;
0182
0183
0184
0185
0186 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
0187 gvt->gm.vgpu_allocated_low_gm_size;
0188 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
0189 gvt->gm.vgpu_allocated_high_gm_size;
0190 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
0191 gvt->fence.vgpu_allocated_fence_num;
0192
0193 for (i = 0; i < gvt->num_types; i++) {
0194 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
0195 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
0196 fence_min = fence_avail / gvt->types[i].fence;
0197 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
0198 fence_min);
0199
0200 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
0201 i, gvt->types[i].name,
0202 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
0203 gvt->types[i].high_gm_size, gvt->types[i].fence);
0204 }
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
0215 {
0216 mutex_lock(&vgpu->vgpu_lock);
0217 vgpu->active = true;
0218 mutex_unlock(&vgpu->vgpu_lock);
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
0230 {
0231 mutex_lock(&vgpu->vgpu_lock);
0232
0233 vgpu->active = false;
0234
0235 if (atomic_read(&vgpu->submission.running_workload_num)) {
0236 mutex_unlock(&vgpu->vgpu_lock);
0237 intel_gvt_wait_vgpu_idle(vgpu);
0238 mutex_lock(&vgpu->vgpu_lock);
0239 }
0240
0241 intel_vgpu_stop_schedule(vgpu);
0242
0243 mutex_unlock(&vgpu->vgpu_lock);
0244 }
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
0256 {
0257 intel_gvt_deactivate_vgpu(vgpu);
0258
0259 mutex_lock(&vgpu->vgpu_lock);
0260 vgpu->d3_entered = false;
0261 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
0262 intel_vgpu_dmabuf_cleanup(vgpu);
0263 mutex_unlock(&vgpu->vgpu_lock);
0264 }
0265
0266
0267
0268
0269
0270
0271
0272
0273 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
0274 {
0275 struct intel_gvt *gvt = vgpu->gvt;
0276 struct drm_i915_private *i915 = gvt->gt->i915;
0277
0278 drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
0279
0280
0281
0282
0283
0284 mutex_lock(&gvt->lock);
0285 idr_remove(&gvt->vgpu_idr, vgpu->id);
0286 mutex_unlock(&gvt->lock);
0287
0288 mutex_lock(&vgpu->vgpu_lock);
0289 intel_gvt_debugfs_remove_vgpu(vgpu);
0290 intel_vgpu_clean_sched_policy(vgpu);
0291 intel_vgpu_clean_submission(vgpu);
0292 intel_vgpu_clean_display(vgpu);
0293 intel_vgpu_clean_opregion(vgpu);
0294 intel_vgpu_reset_ggtt(vgpu, true);
0295 intel_vgpu_clean_gtt(vgpu);
0296 intel_vgpu_detach_regions(vgpu);
0297 intel_vgpu_free_resource(vgpu);
0298 intel_vgpu_clean_mmio(vgpu);
0299 intel_vgpu_dmabuf_cleanup(vgpu);
0300 mutex_unlock(&vgpu->vgpu_lock);
0301
0302 mutex_lock(&gvt->lock);
0303 intel_gvt_update_vgpu_types(gvt);
0304 mutex_unlock(&gvt->lock);
0305
0306 vfree(vgpu);
0307 }
0308
0309 #define IDLE_VGPU_IDR 0
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
0321 {
0322 struct intel_vgpu *vgpu;
0323 enum intel_engine_id i;
0324 int ret;
0325
0326 vgpu = vzalloc(sizeof(*vgpu));
0327 if (!vgpu)
0328 return ERR_PTR(-ENOMEM);
0329
0330 vgpu->id = IDLE_VGPU_IDR;
0331 vgpu->gvt = gvt;
0332 mutex_init(&vgpu->vgpu_lock);
0333
0334 for (i = 0; i < I915_NUM_ENGINES; i++)
0335 INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
0336
0337 ret = intel_vgpu_init_sched_policy(vgpu);
0338 if (ret)
0339 goto out_free_vgpu;
0340
0341 vgpu->active = false;
0342
0343 return vgpu;
0344
0345 out_free_vgpu:
0346 vfree(vgpu);
0347 return ERR_PTR(ret);
0348 }
0349
0350
0351
0352
0353
0354
0355
0356
0357 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
0358 {
0359 mutex_lock(&vgpu->vgpu_lock);
0360 intel_vgpu_clean_sched_policy(vgpu);
0361 mutex_unlock(&vgpu->vgpu_lock);
0362
0363 vfree(vgpu);
0364 }
0365
0366 static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
0367 struct intel_vgpu_creation_params *param)
0368 {
0369 struct drm_i915_private *dev_priv = gvt->gt->i915;
0370 struct intel_vgpu *vgpu;
0371 int ret;
0372
0373 gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
0374 param->low_gm_sz, param->high_gm_sz,
0375 param->fence_sz);
0376
0377 vgpu = vzalloc(sizeof(*vgpu));
0378 if (!vgpu)
0379 return ERR_PTR(-ENOMEM);
0380
0381 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
0382 GFP_KERNEL);
0383 if (ret < 0)
0384 goto out_free_vgpu;
0385
0386 vgpu->id = ret;
0387 vgpu->gvt = gvt;
0388 vgpu->sched_ctl.weight = param->weight;
0389 mutex_init(&vgpu->vgpu_lock);
0390 mutex_init(&vgpu->dmabuf_lock);
0391 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
0392 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
0393 idr_init_base(&vgpu->object_idr, 1);
0394 intel_vgpu_init_cfg_space(vgpu, param->primary);
0395 vgpu->d3_entered = false;
0396
0397 ret = intel_vgpu_init_mmio(vgpu);
0398 if (ret)
0399 goto out_clean_idr;
0400
0401 ret = intel_vgpu_alloc_resource(vgpu, param);
0402 if (ret)
0403 goto out_clean_vgpu_mmio;
0404
0405 populate_pvinfo_page(vgpu);
0406
0407 ret = intel_vgpu_init_gtt(vgpu);
0408 if (ret)
0409 goto out_clean_vgpu_resource;
0410
0411 ret = intel_vgpu_init_opregion(vgpu);
0412 if (ret)
0413 goto out_clean_gtt;
0414
0415 ret = intel_vgpu_init_display(vgpu, param->resolution);
0416 if (ret)
0417 goto out_clean_opregion;
0418
0419 ret = intel_vgpu_setup_submission(vgpu);
0420 if (ret)
0421 goto out_clean_display;
0422
0423 ret = intel_vgpu_init_sched_policy(vgpu);
0424 if (ret)
0425 goto out_clean_submission;
0426
0427 intel_gvt_debugfs_add_vgpu(vgpu);
0428
0429 ret = intel_gvt_set_opregion(vgpu);
0430 if (ret)
0431 goto out_clean_sched_policy;
0432
0433 if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
0434 ret = intel_gvt_set_edid(vgpu, PORT_B);
0435 else
0436 ret = intel_gvt_set_edid(vgpu, PORT_D);
0437 if (ret)
0438 goto out_clean_sched_policy;
0439
0440 return vgpu;
0441
0442 out_clean_sched_policy:
0443 intel_vgpu_clean_sched_policy(vgpu);
0444 out_clean_submission:
0445 intel_vgpu_clean_submission(vgpu);
0446 out_clean_display:
0447 intel_vgpu_clean_display(vgpu);
0448 out_clean_opregion:
0449 intel_vgpu_clean_opregion(vgpu);
0450 out_clean_gtt:
0451 intel_vgpu_clean_gtt(vgpu);
0452 out_clean_vgpu_resource:
0453 intel_vgpu_free_resource(vgpu);
0454 out_clean_vgpu_mmio:
0455 intel_vgpu_clean_mmio(vgpu);
0456 out_clean_idr:
0457 idr_remove(&gvt->vgpu_idr, vgpu->id);
0458 out_free_vgpu:
0459 vfree(vgpu);
0460 return ERR_PTR(ret);
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
0474 struct intel_vgpu_type *type)
0475 {
0476 struct intel_vgpu_creation_params param;
0477 struct intel_vgpu *vgpu;
0478
0479 param.primary = 1;
0480 param.low_gm_sz = type->low_gm_size;
0481 param.high_gm_sz = type->high_gm_size;
0482 param.fence_sz = type->fence;
0483 param.weight = type->weight;
0484 param.resolution = type->resolution;
0485
0486
0487 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
0488 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
0489
0490 mutex_lock(&gvt->lock);
0491 vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
0492 if (!IS_ERR(vgpu)) {
0493
0494 intel_gvt_update_vgpu_types(gvt);
0495 intel_gvt_update_reg_whitelist(vgpu);
0496 }
0497 mutex_unlock(&gvt->lock);
0498
0499 return vgpu;
0500 }
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
0531 intel_engine_mask_t engine_mask)
0532 {
0533 struct intel_gvt *gvt = vgpu->gvt;
0534 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
0535 intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
0536
0537 gvt_dbg_core("------------------------------------------\n");
0538 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
0539 vgpu->id, dmlr, engine_mask);
0540
0541 vgpu->resetting_eng = resetting_eng;
0542
0543 intel_vgpu_stop_schedule(vgpu);
0544
0545
0546
0547
0548 if (scheduler->current_vgpu == NULL) {
0549 mutex_unlock(&vgpu->vgpu_lock);
0550 intel_gvt_wait_vgpu_idle(vgpu);
0551 mutex_lock(&vgpu->vgpu_lock);
0552 }
0553
0554 intel_vgpu_reset_submission(vgpu, resetting_eng);
0555
0556 if (engine_mask == ALL_ENGINES || dmlr) {
0557 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
0558 if (engine_mask == ALL_ENGINES)
0559 intel_vgpu_invalidate_ppgtt(vgpu);
0560
0561 if (dmlr) {
0562 if(!vgpu->d3_entered) {
0563 intel_vgpu_invalidate_ppgtt(vgpu);
0564 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
0565 }
0566 intel_vgpu_reset_ggtt(vgpu, true);
0567 intel_vgpu_reset_resource(vgpu);
0568 }
0569
0570 intel_vgpu_reset_mmio(vgpu, dmlr);
0571 populate_pvinfo_page(vgpu);
0572
0573 if (dmlr) {
0574 intel_vgpu_reset_display(vgpu);
0575 intel_vgpu_reset_cfg_space(vgpu);
0576
0577 vgpu->failsafe = false;
0578
0579
0580
0581
0582 if(vgpu->d3_entered)
0583 vgpu->d3_entered = false;
0584 else
0585 vgpu->pv_notified = false;
0586 }
0587 }
0588
0589 vgpu->resetting_eng = 0;
0590 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
0591 gvt_dbg_core("------------------------------------------\n");
0592 }
0593
0594
0595
0596
0597
0598
0599
0600
0601 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
0602 {
0603 mutex_lock(&vgpu->vgpu_lock);
0604 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
0605 mutex_unlock(&vgpu->vgpu_lock);
0606 }