Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
0020  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0021  * SOFTWARE.
0022  *
0023  * Authors:
0024  *    Eddie Dong <eddie.dong@intel.com>
0025  *    Kevin Tian <kevin.tian@intel.com>
0026  *
0027  * Contributors:
0028  *    Ping Gao <ping.a.gao@intel.com>
0029  *    Zhi Wang <zhi.a.wang@intel.com>
0030  *    Bing Niu <bing.niu@intel.com>
0031  *
0032  */
0033 
0034 #include "i915_drv.h"
0035 #include "gvt.h"
0036 #include "i915_pvinfo.h"
0037 
0038 void populate_pvinfo_page(struct intel_vgpu *vgpu)
0039 {
0040     struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
0041     /* setup the ballooning information */
0042     vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
0043     vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
0044     vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
0045     vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
0046     vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
0047 
0048     vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
0049     vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
0050     vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
0051 
0052     vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
0053         vgpu_aperture_gmadr_base(vgpu);
0054     vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
0055         vgpu_aperture_sz(vgpu);
0056     vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
0057         vgpu_hidden_gmadr_base(vgpu);
0058     vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
0059         vgpu_hidden_sz(vgpu);
0060 
0061     vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
0062 
0063     vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
0064     vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
0065 
0066     gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
0067     gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
0068         vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
0069     gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
0070         vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
0071     gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
0072 
0073     drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
0074 }
0075 
0076 #define VGPU_MAX_WEIGHT 16
0077 #define VGPU_WEIGHT(vgpu_num)   \
0078     (VGPU_MAX_WEIGHT / (vgpu_num))
0079 
0080 static const struct {
0081     unsigned int low_mm;
0082     unsigned int high_mm;
0083     unsigned int fence;
0084 
0085     /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
0086      * with a weight of 4 on a contended host, different vGPU type has
0087      * different weight set. Legal weights range from 1 to 16.
0088      */
0089     unsigned int weight;
0090     enum intel_vgpu_edid edid;
0091     const char *name;
0092 } vgpu_types[] = {
0093 /* Fixed vGPU type table */
0094     { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
0095     { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
0096     { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
0097     { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
0098 };
0099 
0100 /**
0101  * intel_gvt_init_vgpu_types - initialize vGPU type list
0102  * @gvt : GVT device
0103  *
0104  * Initialize vGPU type list based on available resource.
0105  *
0106  */
0107 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
0108 {
0109     unsigned int num_types;
0110     unsigned int i, low_avail, high_avail;
0111     unsigned int min_low;
0112 
0113     /* vGPU type name is defined as GVTg_Vx_y which contains
0114      * physical GPU generation type (e.g V4 as BDW server, V5 as
0115      * SKL server).
0116      *
0117      * Depend on physical SKU resource, might see vGPU types like
0118      * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
0119      * different types of vGPU on same physical GPU depending on
0120      * available resource. Each vGPU type will have "avail_instance"
0121      * to indicate how many vGPU instance can be created for this
0122      * type.
0123      *
0124      */
0125     low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
0126     high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
0127     num_types = ARRAY_SIZE(vgpu_types);
0128 
0129     gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
0130                  GFP_KERNEL);
0131     if (!gvt->types)
0132         return -ENOMEM;
0133 
0134     min_low = MB_TO_BYTES(32);
0135     for (i = 0; i < num_types; ++i) {
0136         if (low_avail / vgpu_types[i].low_mm == 0)
0137             break;
0138 
0139         gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
0140         gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
0141         gvt->types[i].fence = vgpu_types[i].fence;
0142 
0143         if (vgpu_types[i].weight < 1 ||
0144                     vgpu_types[i].weight > VGPU_MAX_WEIGHT)
0145             return -EINVAL;
0146 
0147         gvt->types[i].weight = vgpu_types[i].weight;
0148         gvt->types[i].resolution = vgpu_types[i].edid;
0149         gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
0150                            high_avail / vgpu_types[i].high_mm);
0151 
0152         if (GRAPHICS_VER(gvt->gt->i915) == 8)
0153             sprintf(gvt->types[i].name, "GVTg_V4_%s",
0154                 vgpu_types[i].name);
0155         else if (GRAPHICS_VER(gvt->gt->i915) == 9)
0156             sprintf(gvt->types[i].name, "GVTg_V5_%s",
0157                 vgpu_types[i].name);
0158 
0159         gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
0160                  i, gvt->types[i].name,
0161                  gvt->types[i].avail_instance,
0162                  gvt->types[i].low_gm_size,
0163                  gvt->types[i].high_gm_size, gvt->types[i].fence,
0164                  gvt->types[i].weight,
0165                  vgpu_edid_str(gvt->types[i].resolution));
0166     }
0167 
0168     gvt->num_types = i;
0169     return 0;
0170 }
0171 
0172 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
0173 {
0174     kfree(gvt->types);
0175 }
0176 
0177 static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
0178 {
0179     int i;
0180     unsigned int low_gm_avail, high_gm_avail, fence_avail;
0181     unsigned int low_gm_min, high_gm_min, fence_min;
0182 
0183     /* Need to depend on maxium hw resource size but keep on
0184      * static config for now.
0185      */
0186     low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
0187         gvt->gm.vgpu_allocated_low_gm_size;
0188     high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
0189         gvt->gm.vgpu_allocated_high_gm_size;
0190     fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
0191         gvt->fence.vgpu_allocated_fence_num;
0192 
0193     for (i = 0; i < gvt->num_types; i++) {
0194         low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
0195         high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
0196         fence_min = fence_avail / gvt->types[i].fence;
0197         gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
0198                            fence_min);
0199 
0200         gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
0201                i, gvt->types[i].name,
0202                gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
0203                gvt->types[i].high_gm_size, gvt->types[i].fence);
0204     }
0205 }
0206 
0207 /**
0208  * intel_gvt_active_vgpu - activate a virtual GPU
0209  * @vgpu: virtual GPU
0210  *
0211  * This function is called when user wants to activate a virtual GPU.
0212  *
0213  */
0214 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
0215 {
0216     mutex_lock(&vgpu->vgpu_lock);
0217     vgpu->active = true;
0218     mutex_unlock(&vgpu->vgpu_lock);
0219 }
0220 
0221 /**
0222  * intel_gvt_deactive_vgpu - deactivate a virtual GPU
0223  * @vgpu: virtual GPU
0224  *
0225  * This function is called when user wants to deactivate a virtual GPU.
0226  * The virtual GPU will be stopped.
0227  *
0228  */
0229 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
0230 {
0231     mutex_lock(&vgpu->vgpu_lock);
0232 
0233     vgpu->active = false;
0234 
0235     if (atomic_read(&vgpu->submission.running_workload_num)) {
0236         mutex_unlock(&vgpu->vgpu_lock);
0237         intel_gvt_wait_vgpu_idle(vgpu);
0238         mutex_lock(&vgpu->vgpu_lock);
0239     }
0240 
0241     intel_vgpu_stop_schedule(vgpu);
0242 
0243     mutex_unlock(&vgpu->vgpu_lock);
0244 }
0245 
0246 /**
0247  * intel_gvt_release_vgpu - release a virtual GPU
0248  * @vgpu: virtual GPU
0249  *
0250  * This function is called when user wants to release a virtual GPU.
0251  * The virtual GPU will be stopped and all runtime information will be
0252  * destroyed.
0253  *
0254  */
0255 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
0256 {
0257     intel_gvt_deactivate_vgpu(vgpu);
0258 
0259     mutex_lock(&vgpu->vgpu_lock);
0260     vgpu->d3_entered = false;
0261     intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
0262     intel_vgpu_dmabuf_cleanup(vgpu);
0263     mutex_unlock(&vgpu->vgpu_lock);
0264 }
0265 
0266 /**
0267  * intel_gvt_destroy_vgpu - destroy a virtual GPU
0268  * @vgpu: virtual GPU
0269  *
0270  * This function is called when user wants to destroy a virtual GPU.
0271  *
0272  */
0273 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
0274 {
0275     struct intel_gvt *gvt = vgpu->gvt;
0276     struct drm_i915_private *i915 = gvt->gt->i915;
0277 
0278     drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
0279 
0280     /*
0281      * remove idr first so later clean can judge if need to stop
0282      * service if no active vgpu.
0283      */
0284     mutex_lock(&gvt->lock);
0285     idr_remove(&gvt->vgpu_idr, vgpu->id);
0286     mutex_unlock(&gvt->lock);
0287 
0288     mutex_lock(&vgpu->vgpu_lock);
0289     intel_gvt_debugfs_remove_vgpu(vgpu);
0290     intel_vgpu_clean_sched_policy(vgpu);
0291     intel_vgpu_clean_submission(vgpu);
0292     intel_vgpu_clean_display(vgpu);
0293     intel_vgpu_clean_opregion(vgpu);
0294     intel_vgpu_reset_ggtt(vgpu, true);
0295     intel_vgpu_clean_gtt(vgpu);
0296     intel_vgpu_detach_regions(vgpu);
0297     intel_vgpu_free_resource(vgpu);
0298     intel_vgpu_clean_mmio(vgpu);
0299     intel_vgpu_dmabuf_cleanup(vgpu);
0300     mutex_unlock(&vgpu->vgpu_lock);
0301 
0302     mutex_lock(&gvt->lock);
0303     intel_gvt_update_vgpu_types(gvt);
0304     mutex_unlock(&gvt->lock);
0305 
0306     vfree(vgpu);
0307 }
0308 
0309 #define IDLE_VGPU_IDR 0
0310 
0311 /**
0312  * intel_gvt_create_idle_vgpu - create an idle virtual GPU
0313  * @gvt: GVT device
0314  *
0315  * This function is called when user wants to create an idle virtual GPU.
0316  *
0317  * Returns:
0318  * pointer to intel_vgpu, error pointer if failed.
0319  */
0320 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
0321 {
0322     struct intel_vgpu *vgpu;
0323     enum intel_engine_id i;
0324     int ret;
0325 
0326     vgpu = vzalloc(sizeof(*vgpu));
0327     if (!vgpu)
0328         return ERR_PTR(-ENOMEM);
0329 
0330     vgpu->id = IDLE_VGPU_IDR;
0331     vgpu->gvt = gvt;
0332     mutex_init(&vgpu->vgpu_lock);
0333 
0334     for (i = 0; i < I915_NUM_ENGINES; i++)
0335         INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
0336 
0337     ret = intel_vgpu_init_sched_policy(vgpu);
0338     if (ret)
0339         goto out_free_vgpu;
0340 
0341     vgpu->active = false;
0342 
0343     return vgpu;
0344 
0345 out_free_vgpu:
0346     vfree(vgpu);
0347     return ERR_PTR(ret);
0348 }
0349 
0350 /**
0351  * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
0352  * @vgpu: virtual GPU
0353  *
0354  * This function is called when user wants to destroy an idle virtual GPU.
0355  *
0356  */
0357 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
0358 {
0359     mutex_lock(&vgpu->vgpu_lock);
0360     intel_vgpu_clean_sched_policy(vgpu);
0361     mutex_unlock(&vgpu->vgpu_lock);
0362 
0363     vfree(vgpu);
0364 }
0365 
0366 static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
0367         struct intel_vgpu_creation_params *param)
0368 {
0369     struct drm_i915_private *dev_priv = gvt->gt->i915;
0370     struct intel_vgpu *vgpu;
0371     int ret;
0372 
0373     gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
0374             param->low_gm_sz, param->high_gm_sz,
0375             param->fence_sz);
0376 
0377     vgpu = vzalloc(sizeof(*vgpu));
0378     if (!vgpu)
0379         return ERR_PTR(-ENOMEM);
0380 
0381     ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
0382         GFP_KERNEL);
0383     if (ret < 0)
0384         goto out_free_vgpu;
0385 
0386     vgpu->id = ret;
0387     vgpu->gvt = gvt;
0388     vgpu->sched_ctl.weight = param->weight;
0389     mutex_init(&vgpu->vgpu_lock);
0390     mutex_init(&vgpu->dmabuf_lock);
0391     INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
0392     INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
0393     idr_init_base(&vgpu->object_idr, 1);
0394     intel_vgpu_init_cfg_space(vgpu, param->primary);
0395     vgpu->d3_entered = false;
0396 
0397     ret = intel_vgpu_init_mmio(vgpu);
0398     if (ret)
0399         goto out_clean_idr;
0400 
0401     ret = intel_vgpu_alloc_resource(vgpu, param);
0402     if (ret)
0403         goto out_clean_vgpu_mmio;
0404 
0405     populate_pvinfo_page(vgpu);
0406 
0407     ret = intel_vgpu_init_gtt(vgpu);
0408     if (ret)
0409         goto out_clean_vgpu_resource;
0410 
0411     ret = intel_vgpu_init_opregion(vgpu);
0412     if (ret)
0413         goto out_clean_gtt;
0414 
0415     ret = intel_vgpu_init_display(vgpu, param->resolution);
0416     if (ret)
0417         goto out_clean_opregion;
0418 
0419     ret = intel_vgpu_setup_submission(vgpu);
0420     if (ret)
0421         goto out_clean_display;
0422 
0423     ret = intel_vgpu_init_sched_policy(vgpu);
0424     if (ret)
0425         goto out_clean_submission;
0426 
0427     intel_gvt_debugfs_add_vgpu(vgpu);
0428 
0429     ret = intel_gvt_set_opregion(vgpu);
0430     if (ret)
0431         goto out_clean_sched_policy;
0432 
0433     if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
0434         ret = intel_gvt_set_edid(vgpu, PORT_B);
0435     else
0436         ret = intel_gvt_set_edid(vgpu, PORT_D);
0437     if (ret)
0438         goto out_clean_sched_policy;
0439 
0440     return vgpu;
0441 
0442 out_clean_sched_policy:
0443     intel_vgpu_clean_sched_policy(vgpu);
0444 out_clean_submission:
0445     intel_vgpu_clean_submission(vgpu);
0446 out_clean_display:
0447     intel_vgpu_clean_display(vgpu);
0448 out_clean_opregion:
0449     intel_vgpu_clean_opregion(vgpu);
0450 out_clean_gtt:
0451     intel_vgpu_clean_gtt(vgpu);
0452 out_clean_vgpu_resource:
0453     intel_vgpu_free_resource(vgpu);
0454 out_clean_vgpu_mmio:
0455     intel_vgpu_clean_mmio(vgpu);
0456 out_clean_idr:
0457     idr_remove(&gvt->vgpu_idr, vgpu->id);
0458 out_free_vgpu:
0459     vfree(vgpu);
0460     return ERR_PTR(ret);
0461 }
0462 
0463 /**
0464  * intel_gvt_create_vgpu - create a virtual GPU
0465  * @gvt: GVT device
0466  * @type: type of the vGPU to create
0467  *
0468  * This function is called when user wants to create a virtual GPU.
0469  *
0470  * Returns:
0471  * pointer to intel_vgpu, error pointer if failed.
0472  */
0473 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
0474                 struct intel_vgpu_type *type)
0475 {
0476     struct intel_vgpu_creation_params param;
0477     struct intel_vgpu *vgpu;
0478 
0479     param.primary = 1;
0480     param.low_gm_sz = type->low_gm_size;
0481     param.high_gm_sz = type->high_gm_size;
0482     param.fence_sz = type->fence;
0483     param.weight = type->weight;
0484     param.resolution = type->resolution;
0485 
0486     /* XXX current param based on MB */
0487     param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
0488     param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
0489 
0490     mutex_lock(&gvt->lock);
0491     vgpu = __intel_gvt_create_vgpu(gvt, &param);
0492     if (!IS_ERR(vgpu)) {
0493         /* calculate left instance change for types */
0494         intel_gvt_update_vgpu_types(gvt);
0495         intel_gvt_update_reg_whitelist(vgpu);
0496     }
0497     mutex_unlock(&gvt->lock);
0498 
0499     return vgpu;
0500 }
0501 
0502 /**
0503  * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
0504  * @vgpu: virtual GPU
0505  * @dmlr: vGPU Device Model Level Reset or GT Reset
0506  * @engine_mask: engines to reset for GT reset
0507  *
0508  * This function is called when user wants to reset a virtual GPU through
0509  * device model reset or GT reset. The caller should hold the vgpu lock.
0510  *
0511  * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
0512  * the whole vGPU to default state as when it is created. This vGPU function
0513  * is required both for functionary and security concerns.The ultimate goal
0514  * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
0515  * assign a vGPU to a virtual machine we must isse such reset first.
0516  *
0517  * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
0518  * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
0519  * Unlike the FLR, GT reset only reset particular resource of a vGPU per
0520  * the reset request. Guest driver can issue a GT reset by programming the
0521  * virtual GDRST register to reset specific virtual GPU engine or all
0522  * engines.
0523  *
0524  * The parameter dev_level is to identify if we will do DMLR or GT reset.
0525  * The parameter engine_mask is to specific the engines that need to be
0526  * resetted. If value ALL_ENGINES is given for engine_mask, it means
0527  * the caller requests a full GT reset that we will reset all virtual
0528  * GPU engines. For FLR, engine_mask is ignored.
0529  */
0530 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
0531                  intel_engine_mask_t engine_mask)
0532 {
0533     struct intel_gvt *gvt = vgpu->gvt;
0534     struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
0535     intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
0536 
0537     gvt_dbg_core("------------------------------------------\n");
0538     gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
0539              vgpu->id, dmlr, engine_mask);
0540 
0541     vgpu->resetting_eng = resetting_eng;
0542 
0543     intel_vgpu_stop_schedule(vgpu);
0544     /*
0545      * The current_vgpu will set to NULL after stopping the
0546      * scheduler when the reset is triggered by current vgpu.
0547      */
0548     if (scheduler->current_vgpu == NULL) {
0549         mutex_unlock(&vgpu->vgpu_lock);
0550         intel_gvt_wait_vgpu_idle(vgpu);
0551         mutex_lock(&vgpu->vgpu_lock);
0552     }
0553 
0554     intel_vgpu_reset_submission(vgpu, resetting_eng);
0555     /* full GPU reset or device model level reset */
0556     if (engine_mask == ALL_ENGINES || dmlr) {
0557         intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
0558         if (engine_mask == ALL_ENGINES)
0559             intel_vgpu_invalidate_ppgtt(vgpu);
0560         /*fence will not be reset during virtual reset */
0561         if (dmlr) {
0562             if(!vgpu->d3_entered) {
0563                 intel_vgpu_invalidate_ppgtt(vgpu);
0564                 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
0565             }
0566             intel_vgpu_reset_ggtt(vgpu, true);
0567             intel_vgpu_reset_resource(vgpu);
0568         }
0569 
0570         intel_vgpu_reset_mmio(vgpu, dmlr);
0571         populate_pvinfo_page(vgpu);
0572 
0573         if (dmlr) {
0574             intel_vgpu_reset_display(vgpu);
0575             intel_vgpu_reset_cfg_space(vgpu);
0576             /* only reset the failsafe mode when dmlr reset */
0577             vgpu->failsafe = false;
0578             /*
0579              * PCI_D0 is set before dmlr, so reset d3_entered here
0580              * after done using.
0581              */
0582             if(vgpu->d3_entered)
0583                 vgpu->d3_entered = false;
0584             else
0585                 vgpu->pv_notified = false;
0586         }
0587     }
0588 
0589     vgpu->resetting_eng = 0;
0590     gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
0591     gvt_dbg_core("------------------------------------------\n");
0592 }
0593 
0594 /**
0595  * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
0596  * @vgpu: virtual GPU
0597  *
0598  * This function is called when user wants to reset a virtual GPU.
0599  *
0600  */
0601 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
0602 {
0603     mutex_lock(&vgpu->vgpu_lock);
0604     intel_gvt_reset_vgpu_locked(vgpu, true, 0);
0605     mutex_unlock(&vgpu->vgpu_lock);
0606 }