Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
0020  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0021  * SOFTWARE.
0022  */
0023 
0024 #include "i915_drv.h"
0025 #include "i915_vgpu.h"
0026 #include "intel_gvt.h"
0027 #include "gem/i915_gem_dmabuf.h"
0028 #include "gt/intel_context.h"
0029 #include "gt/intel_ring.h"
0030 #include "gt/shmem_utils.h"
0031 
0032 /**
0033  * DOC: Intel GVT-g host support
0034  *
0035  * Intel GVT-g is a graphics virtualization technology which shares the
0036  * GPU among multiple virtual machines on a time-sharing basis. Each
0037  * virtual machine is presented a virtual GPU (vGPU), which has equivalent
0038  * features as the underlying physical GPU (pGPU), so i915 driver can run
0039  * seamlessly in a virtual machine.
0040  *
0041  * To virtualize GPU resources GVT-g driver depends on hypervisor technology
0042  * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
0043  * and be virtualized within GVT-g device module. More architectural design
0044  * doc is available on https://01.org/group/2230/documentation-list.
0045  */
0046 
0047 static LIST_HEAD(intel_gvt_devices);
0048 static const struct intel_vgpu_ops *intel_gvt_ops;
0049 static DEFINE_MUTEX(intel_gvt_mutex);
0050 
0051 static bool is_supported_device(struct drm_i915_private *dev_priv)
0052 {
0053     if (IS_BROADWELL(dev_priv))
0054         return true;
0055     if (IS_SKYLAKE(dev_priv))
0056         return true;
0057     if (IS_KABYLAKE(dev_priv))
0058         return true;
0059     if (IS_BROXTON(dev_priv))
0060         return true;
0061     if (IS_COFFEELAKE(dev_priv))
0062         return true;
0063     if (IS_COMETLAKE(dev_priv))
0064         return true;
0065 
0066     return false;
0067 }
0068 
0069 static void free_initial_hw_state(struct drm_i915_private *dev_priv)
0070 {
0071     struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
0072 
0073     vfree(vgpu->initial_mmio);
0074     vgpu->initial_mmio = NULL;
0075 
0076     kfree(vgpu->initial_cfg_space);
0077     vgpu->initial_cfg_space = NULL;
0078 }
0079 
0080 static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
0081               u32 size)
0082 {
0083     struct drm_i915_private *dev_priv = iter->i915;
0084     u32 *mmio, i;
0085 
0086     for (i = offset; i < offset + size; i += 4) {
0087         mmio = iter->data + i;
0088         *mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore,
0089                           _MMIO(i));
0090     }
0091 }
0092 
0093 static int handle_mmio(struct intel_gvt_mmio_table_iter *iter,
0094                u32 offset, u32 size)
0095 {
0096     if (WARN_ON(!IS_ALIGNED(offset, 4)))
0097         return -EINVAL;
0098 
0099     save_mmio(iter, offset, size);
0100     return 0;
0101 }
0102 
0103 static int save_initial_hw_state(struct drm_i915_private *dev_priv)
0104 {
0105     struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
0106     struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
0107     struct intel_gvt_mmio_table_iter iter;
0108     void *mem;
0109     int i, ret;
0110 
0111     mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL);
0112     if (!mem)
0113         return -ENOMEM;
0114 
0115     vgpu->initial_cfg_space = mem;
0116 
0117     for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4)
0118         pci_read_config_dword(pdev, i, mem + i);
0119 
0120     mem = vzalloc(2 * SZ_1M);
0121     if (!mem) {
0122         ret = -ENOMEM;
0123         goto err_mmio;
0124     }
0125 
0126     vgpu->initial_mmio = mem;
0127 
0128     iter.i915 = dev_priv;
0129     iter.data = vgpu->initial_mmio;
0130     iter.handle_mmio_cb = handle_mmio;
0131 
0132     ret = intel_gvt_iterate_mmio_table(&iter);
0133     if (ret)
0134         goto err_iterate;
0135 
0136     return 0;
0137 
0138 err_iterate:
0139     vfree(vgpu->initial_mmio);
0140     vgpu->initial_mmio = NULL;
0141 err_mmio:
0142     kfree(vgpu->initial_cfg_space);
0143     vgpu->initial_cfg_space = NULL;
0144 
0145     return ret;
0146 }
0147 
0148 static void intel_gvt_init_device(struct drm_i915_private *dev_priv)
0149 {
0150     if (!dev_priv->params.enable_gvt) {
0151         drm_dbg(&dev_priv->drm,
0152             "GVT-g is disabled by kernel params\n");
0153         return;
0154     }
0155 
0156     if (intel_vgpu_active(dev_priv)) {
0157         drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
0158         return;
0159     }
0160 
0161     if (!is_supported_device(dev_priv)) {
0162         drm_info(&dev_priv->drm,
0163              "Unsupported device. GVT-g is disabled\n");
0164         return;
0165     }
0166 
0167     if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
0168         drm_err(&dev_priv->drm,
0169             "Graphics virtualization is not yet supported with GuC submission\n");
0170         return;
0171     }
0172 
0173     if (save_initial_hw_state(dev_priv)) {
0174         drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n");
0175         return;
0176     }
0177 
0178     if (intel_gvt_ops->init_device(dev_priv))
0179         drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
0180 }
0181 
0182 static void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
0183 {
0184     if (dev_priv->gvt)
0185         intel_gvt_ops->clean_device(dev_priv);
0186     free_initial_hw_state(dev_priv);
0187 }
0188 
0189 int intel_gvt_set_ops(const struct intel_vgpu_ops *ops)
0190 {
0191     struct drm_i915_private *dev_priv;
0192 
0193     mutex_lock(&intel_gvt_mutex);
0194     if (intel_gvt_ops) {
0195         mutex_unlock(&intel_gvt_mutex);
0196         return -EINVAL;
0197     }
0198     intel_gvt_ops = ops;
0199 
0200     list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
0201         intel_gvt_init_device(dev_priv);
0202     mutex_unlock(&intel_gvt_mutex);
0203 
0204     return 0;
0205 }
0206 EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT);
0207 
0208 void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
0209 {
0210     struct drm_i915_private *dev_priv;
0211 
0212     mutex_lock(&intel_gvt_mutex);
0213     if (intel_gvt_ops != ops) {
0214         mutex_unlock(&intel_gvt_mutex);
0215         return;
0216     }
0217 
0218     list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
0219         intel_gvt_clean_device(dev_priv);
0220 
0221     intel_gvt_ops = NULL;
0222     mutex_unlock(&intel_gvt_mutex);
0223 }
0224 EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT);
0225 
0226 /**
0227  * intel_gvt_init - initialize GVT components
0228  * @dev_priv: drm i915 private data
0229  *
0230  * This function is called at the initialization stage to create a GVT device.
0231  *
0232  * Returns:
0233  * Zero on success, negative error code if failed.
0234  *
0235  */
0236 int intel_gvt_init(struct drm_i915_private *dev_priv)
0237 {
0238     if (i915_inject_probe_failure(dev_priv))
0239         return -ENODEV;
0240 
0241     mutex_lock(&intel_gvt_mutex);
0242     list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices);
0243     if (intel_gvt_ops)
0244         intel_gvt_init_device(dev_priv);
0245     mutex_unlock(&intel_gvt_mutex);
0246 
0247     return 0;
0248 }
0249 
0250 /**
0251  * intel_gvt_driver_remove - cleanup GVT components when i915 driver is
0252  *               unbinding
0253  * @dev_priv: drm i915 private *
0254  *
0255  * This function is called at the i915 driver unloading stage, to shutdown
0256  * GVT components and release the related resources.
0257  */
0258 void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
0259 {
0260     mutex_lock(&intel_gvt_mutex);
0261     intel_gvt_clean_device(dev_priv);
0262     list_del(&dev_priv->vgpu.entry);
0263     mutex_unlock(&intel_gvt_mutex);
0264 }
0265 
0266 /**
0267  * intel_gvt_resume - GVT resume routine wapper
0268  *
0269  * @dev_priv: drm i915 private *
0270  *
0271  * This function is called at the i915 driver resume stage to restore required
0272  * HW status for GVT so that vGPU can continue running after resumed.
0273  */
0274 void intel_gvt_resume(struct drm_i915_private *dev_priv)
0275 {
0276     mutex_lock(&intel_gvt_mutex);
0277     if (dev_priv->gvt)
0278         intel_gvt_ops->pm_resume(dev_priv);
0279     mutex_unlock(&intel_gvt_mutex);
0280 }
0281 
0282 /*
0283  * Exported here so that the exports only get created when GVT support is
0284  * actually enabled.
0285  */
0286 EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT);
0287 EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT);
0288 EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT);
0289 EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT);
0290 EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT);
0291 EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT);
0292 EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT);
0293 EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT);
0294 EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT);
0295 EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT);
0296 EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT);
0297 EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT);
0298 EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT);
0299 EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT);
0300 EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT);
0301 EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT);
0302 EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT);
0303 EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT);
0304 EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT);
0305 EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT);
0306 EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT);
0307 EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT);
0308 EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT);
0309 EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT);
0310 EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT);
0311 EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT);
0312 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
0313 EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT);
0314 #endif
0315 EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT);
0316 EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT);
0317 EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT);
0318 EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT);
0319 EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT);
0320 EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT);
0321 EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT);
0322 EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT);