0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/dma-mapping.h>
0029 #include <linux/module.h>
0030 #include <linux/pci.h>
0031 #include <linux/cc_platform.h>
0032
0033 #include <drm/drm_aperture.h>
0034 #include <drm/drm_drv.h>
0035 #include <drm/drm_gem_ttm_helper.h>
0036 #include <drm/drm_ioctl.h>
0037 #include <drm/drm_module.h>
0038 #include <drm/drm_sysfs.h>
0039 #include <drm/ttm/ttm_bo_driver.h>
0040 #include <drm/ttm/ttm_range_manager.h>
0041 #include <drm/ttm/ttm_placement.h>
0042 #include <generated/utsrelease.h>
0043
0044 #include "ttm_object.h"
0045 #include "vmwgfx_binding.h"
0046 #include "vmwgfx_devcaps.h"
0047 #include "vmwgfx_drv.h"
0048 #include "vmwgfx_mksstat.h"
0049
0050 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
0051
0052 #define VMW_MIN_INITIAL_WIDTH 800
0053 #define VMW_MIN_INITIAL_HEIGHT 600
0054
0055
0056
0057
0058
0059 #define DRM_IOCTL_VMW_GET_PARAM \
0060 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
0061 struct drm_vmw_getparam_arg)
0062 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
0063 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
0064 union drm_vmw_alloc_dmabuf_arg)
0065 #define DRM_IOCTL_VMW_UNREF_DMABUF \
0066 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
0067 struct drm_vmw_unref_dmabuf_arg)
0068 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
0069 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
0070 struct drm_vmw_cursor_bypass_arg)
0071
0072 #define DRM_IOCTL_VMW_CONTROL_STREAM \
0073 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
0074 struct drm_vmw_control_stream_arg)
0075 #define DRM_IOCTL_VMW_CLAIM_STREAM \
0076 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
0077 struct drm_vmw_stream_arg)
0078 #define DRM_IOCTL_VMW_UNREF_STREAM \
0079 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
0080 struct drm_vmw_stream_arg)
0081
0082 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
0083 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
0084 struct drm_vmw_context_arg)
0085 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
0086 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
0087 struct drm_vmw_context_arg)
0088 #define DRM_IOCTL_VMW_CREATE_SURFACE \
0089 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
0090 union drm_vmw_surface_create_arg)
0091 #define DRM_IOCTL_VMW_UNREF_SURFACE \
0092 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
0093 struct drm_vmw_surface_arg)
0094 #define DRM_IOCTL_VMW_REF_SURFACE \
0095 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
0096 union drm_vmw_surface_reference_arg)
0097 #define DRM_IOCTL_VMW_EXECBUF \
0098 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
0099 struct drm_vmw_execbuf_arg)
0100 #define DRM_IOCTL_VMW_GET_3D_CAP \
0101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
0102 struct drm_vmw_get_3d_cap_arg)
0103 #define DRM_IOCTL_VMW_FENCE_WAIT \
0104 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
0105 struct drm_vmw_fence_wait_arg)
0106 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
0107 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
0108 struct drm_vmw_fence_signaled_arg)
0109 #define DRM_IOCTL_VMW_FENCE_UNREF \
0110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
0111 struct drm_vmw_fence_arg)
0112 #define DRM_IOCTL_VMW_FENCE_EVENT \
0113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
0114 struct drm_vmw_fence_event_arg)
0115 #define DRM_IOCTL_VMW_PRESENT \
0116 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
0117 struct drm_vmw_present_arg)
0118 #define DRM_IOCTL_VMW_PRESENT_READBACK \
0119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
0120 struct drm_vmw_present_readback_arg)
0121 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
0122 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
0123 struct drm_vmw_update_layout_arg)
0124 #define DRM_IOCTL_VMW_CREATE_SHADER \
0125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
0126 struct drm_vmw_shader_create_arg)
0127 #define DRM_IOCTL_VMW_UNREF_SHADER \
0128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
0129 struct drm_vmw_shader_arg)
0130 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
0131 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
0132 union drm_vmw_gb_surface_create_arg)
0133 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
0134 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
0135 union drm_vmw_gb_surface_reference_arg)
0136 #define DRM_IOCTL_VMW_SYNCCPU \
0137 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
0138 struct drm_vmw_synccpu_arg)
0139 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
0140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
0141 struct drm_vmw_context_arg)
0142 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
0143 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
0144 union drm_vmw_gb_surface_create_ext_arg)
0145 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
0146 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
0147 union drm_vmw_gb_surface_reference_ext_arg)
0148 #define DRM_IOCTL_VMW_MSG \
0149 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
0150 struct drm_vmw_msg_arg)
0151 #define DRM_IOCTL_VMW_MKSSTAT_RESET \
0152 DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
0153 #define DRM_IOCTL_VMW_MKSSTAT_ADD \
0154 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \
0155 struct drm_vmw_mksstat_add_arg)
0156 #define DRM_IOCTL_VMW_MKSSTAT_REMOVE \
0157 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \
0158 struct drm_vmw_mksstat_remove_arg)
0159
0160
0161
0162
0163
0164 static const struct drm_ioctl_desc vmw_ioctls[] = {
0165 DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
0166 DRM_RENDER_ALLOW),
0167 DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
0168 DRM_RENDER_ALLOW),
0169 DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
0170 DRM_RENDER_ALLOW),
0171 DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS,
0172 vmw_kms_cursor_bypass_ioctl,
0173 DRM_MASTER),
0174
0175 DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
0176 DRM_MASTER),
0177 DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
0178 DRM_MASTER),
0179 DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
0180 DRM_MASTER),
0181
0182 DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
0183 DRM_RENDER_ALLOW),
0184 DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
0185 DRM_RENDER_ALLOW),
0186 DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
0187 DRM_RENDER_ALLOW),
0188 DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
0189 DRM_RENDER_ALLOW),
0190 DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
0191 DRM_RENDER_ALLOW),
0192 DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl,
0193 DRM_RENDER_ALLOW),
0194 DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
0195 DRM_RENDER_ALLOW),
0196 DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED,
0197 vmw_fence_obj_signaled_ioctl,
0198 DRM_RENDER_ALLOW),
0199 DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
0200 DRM_RENDER_ALLOW),
0201 DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
0202 DRM_RENDER_ALLOW),
0203 DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
0204 DRM_RENDER_ALLOW),
0205
0206
0207 DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl,
0208 DRM_MASTER | DRM_AUTH),
0209 DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK,
0210 vmw_present_readback_ioctl,
0211 DRM_MASTER | DRM_AUTH),
0212
0213
0214
0215
0216
0217 DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT,
0218 vmw_kms_update_layout_ioctl,
0219 DRM_RENDER_ALLOW),
0220 DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER,
0221 vmw_shader_define_ioctl,
0222 DRM_RENDER_ALLOW),
0223 DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER,
0224 vmw_shader_destroy_ioctl,
0225 DRM_RENDER_ALLOW),
0226 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE,
0227 vmw_gb_surface_define_ioctl,
0228 DRM_RENDER_ALLOW),
0229 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF,
0230 vmw_gb_surface_reference_ioctl,
0231 DRM_RENDER_ALLOW),
0232 DRM_IOCTL_DEF_DRV(VMW_SYNCCPU,
0233 vmw_user_bo_synccpu_ioctl,
0234 DRM_RENDER_ALLOW),
0235 DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT,
0236 vmw_extended_context_define_ioctl,
0237 DRM_RENDER_ALLOW),
0238 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT,
0239 vmw_gb_surface_define_ext_ioctl,
0240 DRM_RENDER_ALLOW),
0241 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT,
0242 vmw_gb_surface_reference_ext_ioctl,
0243 DRM_RENDER_ALLOW),
0244 DRM_IOCTL_DEF_DRV(VMW_MSG,
0245 vmw_msg_ioctl,
0246 DRM_RENDER_ALLOW),
0247 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET,
0248 vmw_mksstat_reset_ioctl,
0249 DRM_RENDER_ALLOW),
0250 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD,
0251 vmw_mksstat_add_ioctl,
0252 DRM_RENDER_ALLOW),
0253 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE,
0254 vmw_mksstat_remove_ioctl,
0255 DRM_RENDER_ALLOW),
0256 };
0257
0258 static const struct pci_device_id vmw_pci_id_list[] = {
0259 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
0260 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
0261 { }
0262 };
0263 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
0264
0265 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
0266 static int vmw_restrict_iommu;
0267 static int vmw_force_coherent;
0268 static int vmw_restrict_dma_mask;
0269 static int vmw_assume_16bpp;
0270
0271 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
0272 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
0273 void *ptr);
0274
0275 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
0276 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
0277 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
0278 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
0279 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
0280 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
0281 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
0282 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
0283 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
0284 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
0285
0286
0287 struct bitmap_name {
0288 uint32 value;
0289 const char *name;
0290 };
0291
0292 static const struct bitmap_name cap1_names[] = {
0293 { SVGA_CAP_RECT_COPY, "rect copy" },
0294 { SVGA_CAP_CURSOR, "cursor" },
0295 { SVGA_CAP_CURSOR_BYPASS, "cursor bypass" },
0296 { SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" },
0297 { SVGA_CAP_8BIT_EMULATION, "8bit emulation" },
0298 { SVGA_CAP_ALPHA_CURSOR, "alpha cursor" },
0299 { SVGA_CAP_3D, "3D" },
0300 { SVGA_CAP_EXTENDED_FIFO, "extended fifo" },
0301 { SVGA_CAP_MULTIMON, "multimon" },
0302 { SVGA_CAP_PITCHLOCK, "pitchlock" },
0303 { SVGA_CAP_IRQMASK, "irq mask" },
0304 { SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" },
0305 { SVGA_CAP_GMR, "gmr" },
0306 { SVGA_CAP_TRACES, "traces" },
0307 { SVGA_CAP_GMR2, "gmr2" },
0308 { SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" },
0309 { SVGA_CAP_COMMAND_BUFFERS, "command buffers" },
0310 { SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" },
0311 { SVGA_CAP_GBOBJECTS, "gbobject" },
0312 { SVGA_CAP_DX, "dx" },
0313 { SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" },
0314 { SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" },
0315 { SVGA_CAP_CAP2_REGISTER, "cap2 register" },
0316 };
0317
0318
0319 static const struct bitmap_name cap2_names[] = {
0320 { SVGA_CAP2_GROW_OTABLE, "grow otable" },
0321 { SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" },
0322 { SVGA_CAP2_DX2, "dx2" },
0323 { SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" },
0324 { SVGA_CAP2_SCREENDMA_REG, "screendma reg" },
0325 { SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" },
0326 { SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" },
0327 { SVGA_CAP2_CURSOR_MOB, "cursor mob" },
0328 { SVGA_CAP2_MSHINT, "mshint" },
0329 { SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" },
0330 { SVGA_CAP2_DX3, "dx3" },
0331 { SVGA_CAP2_FRAME_TYPE, "frame type" },
0332 { SVGA_CAP2_COTABLE_COPY, "cotable copy" },
0333 { SVGA_CAP2_TRACE_FULL_FB, "trace full fb" },
0334 { SVGA_CAP2_EXTRA_REGS, "extra regs" },
0335 { SVGA_CAP2_LO_STAGING, "lo staging" },
0336 };
0337
0338 static void vmw_print_bitmap(struct drm_device *drm,
0339 const char *prefix, uint32_t bitmap,
0340 const struct bitmap_name *bnames,
0341 uint32_t num_names)
0342 {
0343 char buf[512];
0344 uint32_t i;
0345 uint32_t offset = 0;
0346 for (i = 0; i < num_names; ++i) {
0347 if ((bitmap & bnames[i].value) != 0) {
0348 offset += snprintf(buf + offset,
0349 ARRAY_SIZE(buf) - offset,
0350 "%s, ", bnames[i].name);
0351 bitmap &= ~bnames[i].value;
0352 }
0353 }
0354
0355 drm_info(drm, "%s: %s\n", prefix, buf);
0356 if (bitmap != 0)
0357 drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap);
0358 }
0359
0360
0361 static void vmw_print_sm_type(struct vmw_private *dev_priv)
0362 {
0363 static const char *names[] = {
0364 [VMW_SM_LEGACY] = "Legacy",
0365 [VMW_SM_4] = "SM4",
0366 [VMW_SM_4_1] = "SM4_1",
0367 [VMW_SM_5] = "SM_5",
0368 [VMW_SM_5_1X] = "SM_5_1X",
0369 [VMW_SM_MAX] = "Invalid"
0370 };
0371 BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
0372 drm_info(&dev_priv->drm, "Available shader model: %s.\n",
0373 names[dev_priv->sm_type]);
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
0390 {
0391 int ret;
0392 struct vmw_buffer_object *vbo;
0393 struct ttm_bo_kmap_obj map;
0394 volatile SVGA3dQueryResult *result;
0395 bool dummy;
0396
0397
0398
0399
0400
0401
0402 ret = vmw_bo_create(dev_priv, PAGE_SIZE,
0403 &vmw_sys_placement, false, true,
0404 &vmw_bo_bo_free, &vbo);
0405 if (unlikely(ret != 0))
0406 return ret;
0407
0408 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
0409 BUG_ON(ret != 0);
0410 vmw_bo_pin_reserved(vbo, true);
0411
0412 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
0413 if (likely(ret == 0)) {
0414 result = ttm_kmap_obj_virtual(&map, &dummy);
0415 result->totalSize = sizeof(*result);
0416 result->state = SVGA3D_QUERYSTATE_PENDING;
0417 result->result32 = 0xff;
0418 ttm_bo_kunmap(&map);
0419 }
0420 vmw_bo_pin_reserved(vbo, false);
0421 ttm_bo_unreserve(&vbo->base);
0422
0423 if (unlikely(ret != 0)) {
0424 DRM_ERROR("Dummy query buffer map failed.\n");
0425 vmw_bo_unreference(&vbo);
0426 } else
0427 dev_priv->dummy_query_bo = vbo;
0428
0429 return ret;
0430 }
0431
0432 static int vmw_device_init(struct vmw_private *dev_priv)
0433 {
0434 bool uses_fb_traces = false;
0435
0436 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
0437 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
0438 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
0439
0440 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
0441 SVGA_REG_ENABLE_HIDE);
0442
0443 uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
0444 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
0445
0446 vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
0447 dev_priv->fifo = vmw_fifo_create(dev_priv);
0448 if (IS_ERR(dev_priv->fifo)) {
0449 int err = PTR_ERR(dev_priv->fifo);
0450 dev_priv->fifo = NULL;
0451 return err;
0452 } else if (!dev_priv->fifo) {
0453 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
0454 }
0455
0456 dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
0457 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
0458 return 0;
0459 }
0460
0461 static void vmw_device_fini(struct vmw_private *vmw)
0462 {
0463
0464
0465
0466 vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
0467 while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
0468 ;
0469
0470 vmw->last_read_seqno = vmw_fence_read(vmw);
0471
0472 vmw_write(vmw, SVGA_REG_CONFIG_DONE,
0473 vmw->config_done_state);
0474 vmw_write(vmw, SVGA_REG_ENABLE,
0475 vmw->enable_state);
0476 vmw_write(vmw, SVGA_REG_TRACES,
0477 vmw->traces_state);
0478
0479 vmw_fifo_destroy(vmw);
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 static int vmw_request_device_late(struct vmw_private *dev_priv)
0493 {
0494 int ret;
0495
0496 if (dev_priv->has_mob) {
0497 ret = vmw_otables_setup(dev_priv);
0498 if (unlikely(ret != 0)) {
0499 DRM_ERROR("Unable to initialize "
0500 "guest Memory OBjects.\n");
0501 return ret;
0502 }
0503 }
0504
0505 if (dev_priv->cman) {
0506 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
0507 if (ret) {
0508 struct vmw_cmdbuf_man *man = dev_priv->cman;
0509
0510 dev_priv->cman = NULL;
0511 vmw_cmdbuf_man_destroy(man);
0512 }
0513 }
0514
0515 return 0;
0516 }
0517
0518 static int vmw_request_device(struct vmw_private *dev_priv)
0519 {
0520 int ret;
0521
0522 ret = vmw_device_init(dev_priv);
0523 if (unlikely(ret != 0)) {
0524 DRM_ERROR("Unable to initialize the device.\n");
0525 return ret;
0526 }
0527 vmw_fence_fifo_up(dev_priv->fman);
0528 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
0529 if (IS_ERR(dev_priv->cman)) {
0530 dev_priv->cman = NULL;
0531 dev_priv->sm_type = VMW_SM_LEGACY;
0532 }
0533
0534 ret = vmw_request_device_late(dev_priv);
0535 if (ret)
0536 goto out_no_mob;
0537
0538 ret = vmw_dummy_query_bo_create(dev_priv);
0539 if (unlikely(ret != 0))
0540 goto out_no_query_bo;
0541
0542 return 0;
0543
0544 out_no_query_bo:
0545 if (dev_priv->cman)
0546 vmw_cmdbuf_remove_pool(dev_priv->cman);
0547 if (dev_priv->has_mob) {
0548 struct ttm_resource_manager *man;
0549
0550 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
0551 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
0552 vmw_otables_takedown(dev_priv);
0553 }
0554 if (dev_priv->cman)
0555 vmw_cmdbuf_man_destroy(dev_priv->cman);
0556 out_no_mob:
0557 vmw_fence_fifo_down(dev_priv->fman);
0558 vmw_device_fini(dev_priv);
0559 return ret;
0560 }
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 static void vmw_release_device_early(struct vmw_private *dev_priv)
0571 {
0572
0573
0574
0575
0576
0577 BUG_ON(dev_priv->pinned_bo != NULL);
0578
0579 vmw_bo_unreference(&dev_priv->dummy_query_bo);
0580 if (dev_priv->cman)
0581 vmw_cmdbuf_remove_pool(dev_priv->cman);
0582
0583 if (dev_priv->has_mob) {
0584 struct ttm_resource_manager *man;
0585
0586 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
0587 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
0588 vmw_otables_takedown(dev_priv);
0589 }
0590 }
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600 static void vmw_release_device_late(struct vmw_private *dev_priv)
0601 {
0602 vmw_fence_fifo_down(dev_priv->fman);
0603 if (dev_priv->cman)
0604 vmw_cmdbuf_man_destroy(dev_priv->cman);
0605
0606 vmw_device_fini(dev_priv);
0607 }
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618 static void vmw_get_initial_size(struct vmw_private *dev_priv)
0619 {
0620 uint32_t width;
0621 uint32_t height;
0622
0623 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
0624 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
0625
0626 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
0627 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
0628
0629 if (width > dev_priv->fb_max_width ||
0630 height > dev_priv->fb_max_height) {
0631
0632
0633
0634
0635
0636 width = VMW_MIN_INITIAL_WIDTH;
0637 height = VMW_MIN_INITIAL_HEIGHT;
0638 }
0639
0640 dev_priv->initial_width = width;
0641 dev_priv->initial_height = height;
0642 }
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
0657 {
0658 static const char *names[vmw_dma_map_max] = {
0659 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
0660 [vmw_dma_map_populate] = "Caching DMA mappings.",
0661 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
0662
0663
0664 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
0665 return -EINVAL;
0666
0667 if (vmw_force_coherent)
0668 dev_priv->map_mode = vmw_dma_alloc_coherent;
0669 else if (vmw_restrict_iommu)
0670 dev_priv->map_mode = vmw_dma_map_bind;
0671 else
0672 dev_priv->map_mode = vmw_dma_map_populate;
0673
0674 drm_info(&dev_priv->drm,
0675 "DMA map mode: %s\n", names[dev_priv->map_mode]);
0676 return 0;
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687 static int vmw_dma_masks(struct vmw_private *dev_priv)
0688 {
0689 struct drm_device *dev = &dev_priv->drm;
0690 int ret = 0;
0691
0692 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
0693 if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
0694 drm_info(&dev_priv->drm,
0695 "Restricting DMA addresses to 44 bits.\n");
0696 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
0697 }
0698
0699 return ret;
0700 }
0701
0702 static int vmw_vram_manager_init(struct vmw_private *dev_priv)
0703 {
0704 int ret;
0705 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
0706 dev_priv->vram_size >> PAGE_SHIFT);
0707 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
0708 return ret;
0709 }
0710
0711 static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
0712 {
0713 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
0714 }
0715
0716 static int vmw_setup_pci_resources(struct vmw_private *dev,
0717 u32 pci_id)
0718 {
0719 resource_size_t rmmio_start;
0720 resource_size_t rmmio_size;
0721 resource_size_t fifo_start;
0722 resource_size_t fifo_size;
0723 int ret;
0724 struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
0725
0726 pci_set_master(pdev);
0727
0728 ret = pci_request_regions(pdev, "vmwgfx probe");
0729 if (ret)
0730 return ret;
0731
0732 dev->pci_id = pci_id;
0733 if (pci_id == VMWGFX_PCI_ID_SVGA3) {
0734 rmmio_start = pci_resource_start(pdev, 0);
0735 rmmio_size = pci_resource_len(pdev, 0);
0736 dev->vram_start = pci_resource_start(pdev, 2);
0737 dev->vram_size = pci_resource_len(pdev, 2);
0738
0739 drm_info(&dev->drm,
0740 "Register MMIO at 0x%pa size is %llu kiB\n",
0741 &rmmio_start, (uint64_t)rmmio_size / 1024);
0742 dev->rmmio = devm_ioremap(dev->drm.dev,
0743 rmmio_start,
0744 rmmio_size);
0745 if (!dev->rmmio) {
0746 drm_err(&dev->drm,
0747 "Failed mapping registers mmio memory.\n");
0748 pci_release_regions(pdev);
0749 return -ENOMEM;
0750 }
0751 } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
0752 dev->io_start = pci_resource_start(pdev, 0);
0753 dev->vram_start = pci_resource_start(pdev, 1);
0754 dev->vram_size = pci_resource_len(pdev, 1);
0755 fifo_start = pci_resource_start(pdev, 2);
0756 fifo_size = pci_resource_len(pdev, 2);
0757
0758 drm_info(&dev->drm,
0759 "FIFO at %pa size is %llu kiB\n",
0760 &fifo_start, (uint64_t)fifo_size / 1024);
0761 dev->fifo_mem = devm_memremap(dev->drm.dev,
0762 fifo_start,
0763 fifo_size,
0764 MEMREMAP_WB);
0765
0766 if (IS_ERR(dev->fifo_mem)) {
0767 drm_err(&dev->drm,
0768 "Failed mapping FIFO memory.\n");
0769 pci_release_regions(pdev);
0770 return PTR_ERR(dev->fifo_mem);
0771 }
0772 } else {
0773 pci_release_regions(pdev);
0774 return -EINVAL;
0775 }
0776
0777
0778
0779
0780
0781
0782
0783 drm_info(&dev->drm,
0784 "VRAM at %pa size is %llu kiB\n",
0785 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
0786
0787 return 0;
0788 }
0789
0790 static int vmw_detect_version(struct vmw_private *dev)
0791 {
0792 uint32_t svga_id;
0793
0794 vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
0795 SVGA_ID_3 : SVGA_ID_2);
0796 svga_id = vmw_read(dev, SVGA_REG_ID);
0797 if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
0798 drm_err(&dev->drm,
0799 "Unsupported SVGA ID 0x%x on chipset 0x%x\n",
0800 svga_id, dev->pci_id);
0801 return -ENOSYS;
0802 }
0803 BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
0804 drm_info(&dev->drm,
0805 "Running on SVGA version %d.\n", (svga_id & 0xff));
0806 return 0;
0807 }
0808
0809 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
0810 {
0811 int ret;
0812 enum vmw_res_type i;
0813 bool refuse_dma = false;
0814 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
0815
0816 dev_priv->drm.dev_private = dev_priv;
0817
0818 mutex_init(&dev_priv->cmdbuf_mutex);
0819 mutex_init(&dev_priv->binding_mutex);
0820 spin_lock_init(&dev_priv->resource_lock);
0821 spin_lock_init(&dev_priv->hw_lock);
0822 spin_lock_init(&dev_priv->waiter_lock);
0823 spin_lock_init(&dev_priv->cursor_lock);
0824
0825 ret = vmw_setup_pci_resources(dev_priv, pci_id);
0826 if (ret)
0827 return ret;
0828 ret = vmw_detect_version(dev_priv);
0829 if (ret)
0830 goto out_no_pci_or_version;
0831
0832
0833 for (i = vmw_res_context; i < vmw_res_max; ++i) {
0834 idr_init_base(&dev_priv->res_idr[i], 1);
0835 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
0836 }
0837
0838 init_waitqueue_head(&dev_priv->fence_queue);
0839 init_waitqueue_head(&dev_priv->fifo_queue);
0840 dev_priv->fence_queue_waiters = 0;
0841 dev_priv->fifo_queue_waiters = 0;
0842
0843 dev_priv->used_memory_size = 0;
0844
0845 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
0846
0847 dev_priv->enable_fb = enable_fbdev;
0848
0849
0850 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
0851 vmw_print_bitmap(&dev_priv->drm, "Capabilities",
0852 dev_priv->capabilities,
0853 cap1_names, ARRAY_SIZE(cap1_names));
0854 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
0855 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
0856 vmw_print_bitmap(&dev_priv->drm, "Capabilities2",
0857 dev_priv->capabilities2,
0858 cap2_names, ARRAY_SIZE(cap2_names));
0859 }
0860
0861 ret = vmw_dma_select_mode(dev_priv);
0862 if (unlikely(ret != 0)) {
0863 drm_info(&dev_priv->drm,
0864 "Restricting capabilities since DMA not available.\n");
0865 refuse_dma = true;
0866 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
0867 drm_info(&dev_priv->drm,
0868 "Disabling 3D acceleration.\n");
0869 }
0870
0871 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
0872 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
0873 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
0874 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
0875
0876 vmw_get_initial_size(dev_priv);
0877
0878 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
0879 dev_priv->max_gmr_ids =
0880 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
0881 dev_priv->max_gmr_pages =
0882 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
0883 dev_priv->memory_size =
0884 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
0885 dev_priv->memory_size -= dev_priv->vram_size;
0886 } else {
0887
0888
0889
0890
0891 dev_priv->memory_size = 512*1024*1024;
0892 }
0893 dev_priv->max_mob_pages = 0;
0894 dev_priv->max_mob_size = 0;
0895 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
0896 uint64_t mem_size;
0897
0898 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
0899 mem_size = vmw_read(dev_priv,
0900 SVGA_REG_GBOBJECT_MEM_SIZE_KB);
0901 else
0902 mem_size =
0903 vmw_read(dev_priv,
0904 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
0905
0906
0907
0908
0909
0910 if (!(dev_priv->capabilities & SVGA_CAP_3D))
0911 mem_size *= 3;
0912
0913 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
0914 dev_priv->max_primary_mem =
0915 vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
0916 dev_priv->max_mob_size =
0917 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
0918 dev_priv->stdu_max_width =
0919 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
0920 dev_priv->stdu_max_height =
0921 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
0922
0923 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
0924 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
0925 dev_priv->texture_max_width = vmw_read(dev_priv,
0926 SVGA_REG_DEV_CAP);
0927 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
0928 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
0929 dev_priv->texture_max_height = vmw_read(dev_priv,
0930 SVGA_REG_DEV_CAP);
0931 } else {
0932 dev_priv->texture_max_width = 8192;
0933 dev_priv->texture_max_height = 8192;
0934 dev_priv->max_primary_mem = dev_priv->vram_size;
0935 }
0936 drm_info(&dev_priv->drm,
0937 "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
0938 (u64)dev_priv->vram_size / 1024,
0939 (u64)dev_priv->fifo_mem_size / 1024,
0940 dev_priv->memory_size / 1024);
0941
0942 drm_info(&dev_priv->drm,
0943 "MOB limits: max mob size = %u kB, max mob pages = %u\n",
0944 dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
0945
0946 ret = vmw_dma_masks(dev_priv);
0947 if (unlikely(ret != 0))
0948 goto out_err0;
0949
0950 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
0951
0952 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
0953 drm_info(&dev_priv->drm,
0954 "Max GMR ids is %u\n",
0955 (unsigned)dev_priv->max_gmr_ids);
0956 drm_info(&dev_priv->drm,
0957 "Max number of GMR pages is %u\n",
0958 (unsigned)dev_priv->max_gmr_pages);
0959 }
0960 drm_info(&dev_priv->drm,
0961 "Maximum display memory size is %llu kiB\n",
0962 (uint64_t)dev_priv->max_primary_mem / 1024);
0963
0964
0965 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
0966 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
0967 !vmw_fifo_have_pitchlock(dev_priv)) {
0968 ret = -ENOSYS;
0969 DRM_ERROR("Hardware has no pitchlock\n");
0970 goto out_err0;
0971 }
0972
0973 dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
0974
0975 if (unlikely(dev_priv->tdev == NULL)) {
0976 drm_err(&dev_priv->drm,
0977 "Unable to initialize TTM object management.\n");
0978 ret = -ENOMEM;
0979 goto out_err0;
0980 }
0981
0982 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
0983 ret = vmw_irq_install(dev_priv);
0984 if (ret != 0) {
0985 drm_err(&dev_priv->drm,
0986 "Failed installing irq: %d\n", ret);
0987 goto out_no_irq;
0988 }
0989 }
0990
0991 dev_priv->fman = vmw_fence_manager_init(dev_priv);
0992 if (unlikely(dev_priv->fman == NULL)) {
0993 ret = -ENOMEM;
0994 goto out_no_fman;
0995 }
0996
0997 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
0998 dev_priv->drm.dev,
0999 dev_priv->drm.anon_inode->i_mapping,
1000 dev_priv->drm.vma_offset_manager,
1001 dev_priv->map_mode == vmw_dma_alloc_coherent,
1002 false);
1003 if (unlikely(ret != 0)) {
1004 drm_err(&dev_priv->drm,
1005 "Failed initializing TTM buffer object driver.\n");
1006 goto out_no_bdev;
1007 }
1008
1009
1010
1011
1012
1013
1014 ret = vmw_vram_manager_init(dev_priv);
1015 if (unlikely(ret != 0)) {
1016 drm_err(&dev_priv->drm,
1017 "Failed initializing memory manager for VRAM.\n");
1018 goto out_no_vram;
1019 }
1020
1021 ret = vmw_devcaps_create(dev_priv);
1022 if (unlikely(ret != 0)) {
1023 drm_err(&dev_priv->drm,
1024 "Failed initializing device caps.\n");
1025 goto out_no_vram;
1026 }
1027
1028
1029
1030
1031
1032
1033 dev_priv->has_gmr = true;
1034
1035 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
1036 refuse_dma ||
1037 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
1038 drm_info(&dev_priv->drm,
1039 "No GMR memory available. "
1040 "Graphics memory resources are very limited.\n");
1041 dev_priv->has_gmr = false;
1042 }
1043
1044 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
1045 dev_priv->has_mob = true;
1046
1047 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
1048 drm_info(&dev_priv->drm,
1049 "No MOB memory available. "
1050 "3D will be disabled.\n");
1051 dev_priv->has_mob = false;
1052 }
1053 if (vmw_sys_man_init(dev_priv) != 0) {
1054 drm_info(&dev_priv->drm,
1055 "No MOB page table memory available. "
1056 "3D will be disabled.\n");
1057 dev_priv->has_mob = false;
1058 }
1059 }
1060
1061 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
1062 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT))
1063 dev_priv->sm_type = VMW_SM_4;
1064 }
1065
1066
1067 if (has_sm4_context(dev_priv) &&
1068 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
1069 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41))
1070 dev_priv->sm_type = VMW_SM_4_1;
1071 if (has_sm4_1_context(dev_priv) &&
1072 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
1073 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
1074 dev_priv->sm_type = VMW_SM_5;
1075 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
1076 dev_priv->sm_type = VMW_SM_5_1X;
1077 }
1078 }
1079 }
1080
1081 ret = vmw_kms_init(dev_priv);
1082 if (unlikely(ret != 0))
1083 goto out_no_kms;
1084 vmw_overlay_init(dev_priv);
1085
1086 ret = vmw_request_device(dev_priv);
1087 if (ret)
1088 goto out_no_fifo;
1089
1090 vmw_print_sm_type(dev_priv);
1091 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1092 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
1093 VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
1094
1095 if (dev_priv->enable_fb) {
1096 vmw_fifo_resource_inc(dev_priv);
1097 vmw_svga_enable(dev_priv);
1098 vmw_fb_init(dev_priv);
1099 }
1100
1101 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
1102 register_pm_notifier(&dev_priv->pm_nb);
1103
1104 return 0;
1105
1106 out_no_fifo:
1107 vmw_overlay_close(dev_priv);
1108 vmw_kms_close(dev_priv);
1109 out_no_kms:
1110 if (dev_priv->has_mob) {
1111 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1112 vmw_sys_man_fini(dev_priv);
1113 }
1114 if (dev_priv->has_gmr)
1115 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1116 vmw_devcaps_destroy(dev_priv);
1117 vmw_vram_manager_fini(dev_priv);
1118 out_no_vram:
1119 ttm_device_fini(&dev_priv->bdev);
1120 out_no_bdev:
1121 vmw_fence_manager_takedown(dev_priv->fman);
1122 out_no_fman:
1123 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1124 vmw_irq_uninstall(&dev_priv->drm);
1125 out_no_irq:
1126 ttm_object_device_release(&dev_priv->tdev);
1127 out_err0:
1128 for (i = vmw_res_context; i < vmw_res_max; ++i)
1129 idr_destroy(&dev_priv->res_idr[i]);
1130
1131 if (dev_priv->ctx.staged_bindings)
1132 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1133 out_no_pci_or_version:
1134 pci_release_regions(pdev);
1135 return ret;
1136 }
1137
1138 static void vmw_driver_unload(struct drm_device *dev)
1139 {
1140 struct vmw_private *dev_priv = vmw_priv(dev);
1141 struct pci_dev *pdev = to_pci_dev(dev->dev);
1142 enum vmw_res_type i;
1143
1144 unregister_pm_notifier(&dev_priv->pm_nb);
1145
1146 if (dev_priv->ctx.res_ht_initialized)
1147 vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
1148 vfree(dev_priv->ctx.cmd_bounce);
1149 if (dev_priv->enable_fb) {
1150 vmw_fb_off(dev_priv);
1151 vmw_fb_close(dev_priv);
1152 vmw_fifo_resource_dec(dev_priv);
1153 vmw_svga_disable(dev_priv);
1154 }
1155
1156 vmw_kms_close(dev_priv);
1157 vmw_overlay_close(dev_priv);
1158
1159 if (dev_priv->has_gmr)
1160 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1161
1162 vmw_release_device_early(dev_priv);
1163 if (dev_priv->has_mob) {
1164 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1165 vmw_sys_man_fini(dev_priv);
1166 }
1167 vmw_devcaps_destroy(dev_priv);
1168 vmw_vram_manager_fini(dev_priv);
1169 ttm_device_fini(&dev_priv->bdev);
1170 vmw_release_device_late(dev_priv);
1171 vmw_fence_manager_takedown(dev_priv->fman);
1172 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1173 vmw_irq_uninstall(&dev_priv->drm);
1174
1175 ttm_object_device_release(&dev_priv->tdev);
1176 if (dev_priv->ctx.staged_bindings)
1177 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1178
1179 for (i = vmw_res_context; i < vmw_res_max; ++i)
1180 idr_destroy(&dev_priv->res_idr[i]);
1181
1182 vmw_mksstat_remove_all(dev_priv);
1183
1184 pci_release_regions(pdev);
1185 }
1186
1187 static void vmw_postclose(struct drm_device *dev,
1188 struct drm_file *file_priv)
1189 {
1190 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1191
1192 ttm_object_file_release(&vmw_fp->tfile);
1193 kfree(vmw_fp);
1194 }
1195
1196 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1197 {
1198 struct vmw_private *dev_priv = vmw_priv(dev);
1199 struct vmw_fpriv *vmw_fp;
1200 int ret = -ENOMEM;
1201
1202 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1203 if (unlikely(!vmw_fp))
1204 return ret;
1205
1206 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1207 if (unlikely(vmw_fp->tfile == NULL))
1208 goto out_no_tfile;
1209
1210 file_priv->driver_priv = vmw_fp;
1211
1212 return 0;
1213
1214 out_no_tfile:
1215 kfree(vmw_fp);
1216 return ret;
1217 }
1218
1219 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1220 unsigned long arg,
1221 long (*ioctl_func)(struct file *, unsigned int,
1222 unsigned long))
1223 {
1224 struct drm_file *file_priv = filp->private_data;
1225 struct drm_device *dev = file_priv->minor->dev;
1226 unsigned int nr = DRM_IOCTL_NR(cmd);
1227 unsigned int flags;
1228
1229
1230
1231
1232
1233 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1234 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1235 const struct drm_ioctl_desc *ioctl =
1236 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1237
1238 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1239 return ioctl_func(filp, cmd, arg);
1240 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1241 if (!drm_is_current_master(file_priv) &&
1242 !capable(CAP_SYS_ADMIN))
1243 return -EACCES;
1244 }
1245
1246 if (unlikely(ioctl->cmd != cmd))
1247 goto out_io_encoding;
1248
1249 flags = ioctl->flags;
1250 } else if (!drm_ioctl_flags(nr, &flags))
1251 return -EINVAL;
1252
1253 return ioctl_func(filp, cmd, arg);
1254
1255 out_io_encoding:
1256 DRM_ERROR("Invalid command format, ioctl %d\n",
1257 nr - DRM_COMMAND_BASE);
1258
1259 return -EINVAL;
1260 }
1261
1262 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1263 unsigned long arg)
1264 {
1265 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1266 }
1267
1268 #ifdef CONFIG_COMPAT
1269 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1270 unsigned long arg)
1271 {
1272 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1273 }
1274 #endif
1275
1276 static void vmw_master_set(struct drm_device *dev,
1277 struct drm_file *file_priv,
1278 bool from_open)
1279 {
1280
1281
1282
1283
1284 if (!from_open)
1285 drm_sysfs_hotplug_event(dev);
1286 }
1287
1288 static void vmw_master_drop(struct drm_device *dev,
1289 struct drm_file *file_priv)
1290 {
1291 struct vmw_private *dev_priv = vmw_priv(dev);
1292
1293 vmw_kms_legacy_hotspot_clear(dev_priv);
1294 if (!dev_priv->enable_fb)
1295 vmw_svga_disable(dev_priv);
1296 }
1297
1298
1299
1300
1301
1302
1303
1304 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1305 {
1306 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1307
1308 if (!ttm_resource_manager_used(man)) {
1309 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
1310 ttm_resource_manager_set_used(man, true);
1311 }
1312 }
1313
1314
1315
1316
1317
1318
1319 void vmw_svga_enable(struct vmw_private *dev_priv)
1320 {
1321 __vmw_svga_enable(dev_priv);
1322 }
1323
1324
1325
1326
1327
1328
1329
1330
1331 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1332 {
1333 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1334
1335 if (ttm_resource_manager_used(man)) {
1336 ttm_resource_manager_set_used(man, false);
1337 vmw_write(dev_priv, SVGA_REG_ENABLE,
1338 SVGA_REG_ENABLE_HIDE |
1339 SVGA_REG_ENABLE_ENABLE);
1340 }
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350 void vmw_svga_disable(struct vmw_private *dev_priv)
1351 {
1352 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 vmw_kms_lost_device(&dev_priv->drm);
1366 if (ttm_resource_manager_used(man)) {
1367 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1368 DRM_ERROR("Failed evicting VRAM buffers.\n");
1369 ttm_resource_manager_set_used(man, false);
1370 vmw_write(dev_priv, SVGA_REG_ENABLE,
1371 SVGA_REG_ENABLE_HIDE |
1372 SVGA_REG_ENABLE_ENABLE);
1373 }
1374 }
1375
1376 static void vmw_remove(struct pci_dev *pdev)
1377 {
1378 struct drm_device *dev = pci_get_drvdata(pdev);
1379
1380 drm_dev_unregister(dev);
1381 vmw_driver_unload(dev);
1382 }
1383
1384 static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
1385 {
1386 struct drm_minor *minor = vmw->drm.primary;
1387 struct dentry *root = minor->debugfs_root;
1388
1389 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM),
1390 root, "system_ttm");
1391 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
1392 root, "vram_ttm");
1393 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
1394 root, "gmr_ttm");
1395 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
1396 root, "mob_ttm");
1397 ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
1398 root, "system_mob_ttm");
1399 }
1400
1401 static unsigned long
1402 vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1403 unsigned long len, unsigned long pgoff,
1404 unsigned long flags)
1405 {
1406 struct drm_file *file_priv = file->private_data;
1407 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1408
1409 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1410 dev_priv->drm.vma_offset_manager);
1411 }
1412
1413 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1414 void *ptr)
1415 {
1416 struct vmw_private *dev_priv =
1417 container_of(nb, struct vmw_private, pm_nb);
1418
1419 switch (val) {
1420 case PM_HIBERNATION_PREPARE:
1421
1422
1423
1424
1425
1426
1427
1428
1429 dev_priv->suspend_locked = true;
1430 break;
1431 case PM_POST_HIBERNATION:
1432 case PM_POST_RESTORE:
1433 if (READ_ONCE(dev_priv->suspend_locked)) {
1434 dev_priv->suspend_locked = false;
1435 }
1436 break;
1437 default:
1438 break;
1439 }
1440 return 0;
1441 }
1442
1443 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1444 {
1445 struct drm_device *dev = pci_get_drvdata(pdev);
1446 struct vmw_private *dev_priv = vmw_priv(dev);
1447
1448 if (dev_priv->refuse_hibernation)
1449 return -EBUSY;
1450
1451 pci_save_state(pdev);
1452 pci_disable_device(pdev);
1453 pci_set_power_state(pdev, PCI_D3hot);
1454 return 0;
1455 }
1456
1457 static int vmw_pci_resume(struct pci_dev *pdev)
1458 {
1459 pci_set_power_state(pdev, PCI_D0);
1460 pci_restore_state(pdev);
1461 return pci_enable_device(pdev);
1462 }
1463
1464 static int vmw_pm_suspend(struct device *kdev)
1465 {
1466 struct pci_dev *pdev = to_pci_dev(kdev);
1467 struct pm_message dummy;
1468
1469 dummy.event = 0;
1470
1471 return vmw_pci_suspend(pdev, dummy);
1472 }
1473
1474 static int vmw_pm_resume(struct device *kdev)
1475 {
1476 struct pci_dev *pdev = to_pci_dev(kdev);
1477
1478 return vmw_pci_resume(pdev);
1479 }
1480
1481 static int vmw_pm_freeze(struct device *kdev)
1482 {
1483 struct pci_dev *pdev = to_pci_dev(kdev);
1484 struct drm_device *dev = pci_get_drvdata(pdev);
1485 struct vmw_private *dev_priv = vmw_priv(dev);
1486 struct ttm_operation_ctx ctx = {
1487 .interruptible = false,
1488 .no_wait_gpu = false
1489 };
1490 int ret;
1491
1492
1493
1494
1495 ret = vmw_kms_suspend(&dev_priv->drm);
1496 if (ret) {
1497 DRM_ERROR("Failed to freeze modesetting.\n");
1498 return ret;
1499 }
1500 if (dev_priv->enable_fb)
1501 vmw_fb_off(dev_priv);
1502
1503 vmw_execbuf_release_pinned_bo(dev_priv);
1504 vmw_resource_evict_all(dev_priv);
1505 vmw_release_device_early(dev_priv);
1506 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
1507 if (dev_priv->enable_fb)
1508 vmw_fifo_resource_dec(dev_priv);
1509 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1510 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1511 if (dev_priv->enable_fb)
1512 vmw_fifo_resource_inc(dev_priv);
1513 WARN_ON(vmw_request_device_late(dev_priv));
1514 dev_priv->suspend_locked = false;
1515 if (dev_priv->suspend_state)
1516 vmw_kms_resume(dev);
1517 if (dev_priv->enable_fb)
1518 vmw_fb_on(dev_priv);
1519 return -EBUSY;
1520 }
1521
1522 vmw_fence_fifo_down(dev_priv->fman);
1523 __vmw_svga_disable(dev_priv);
1524
1525 vmw_release_device_late(dev_priv);
1526 return 0;
1527 }
1528
1529 static int vmw_pm_restore(struct device *kdev)
1530 {
1531 struct pci_dev *pdev = to_pci_dev(kdev);
1532 struct drm_device *dev = pci_get_drvdata(pdev);
1533 struct vmw_private *dev_priv = vmw_priv(dev);
1534 int ret;
1535
1536 vmw_detect_version(dev_priv);
1537
1538 if (dev_priv->enable_fb)
1539 vmw_fifo_resource_inc(dev_priv);
1540
1541 ret = vmw_request_device(dev_priv);
1542 if (ret)
1543 return ret;
1544
1545 if (dev_priv->enable_fb)
1546 __vmw_svga_enable(dev_priv);
1547
1548 vmw_fence_fifo_up(dev_priv->fman);
1549 dev_priv->suspend_locked = false;
1550 if (dev_priv->suspend_state)
1551 vmw_kms_resume(&dev_priv->drm);
1552
1553 if (dev_priv->enable_fb)
1554 vmw_fb_on(dev_priv);
1555
1556 return 0;
1557 }
1558
1559 static const struct dev_pm_ops vmw_pm_ops = {
1560 .freeze = vmw_pm_freeze,
1561 .thaw = vmw_pm_restore,
1562 .restore = vmw_pm_restore,
1563 .suspend = vmw_pm_suspend,
1564 .resume = vmw_pm_resume,
1565 };
1566
1567 static const struct file_operations vmwgfx_driver_fops = {
1568 .owner = THIS_MODULE,
1569 .open = drm_open,
1570 .release = drm_release,
1571 .unlocked_ioctl = vmw_unlocked_ioctl,
1572 .mmap = vmw_mmap,
1573 .poll = drm_poll,
1574 .read = drm_read,
1575 #if defined(CONFIG_COMPAT)
1576 .compat_ioctl = vmw_compat_ioctl,
1577 #endif
1578 .llseek = noop_llseek,
1579 .get_unmapped_area = vmw_get_unmapped_area,
1580 };
1581
1582 static const struct drm_driver driver = {
1583 .driver_features =
1584 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
1585 .ioctls = vmw_ioctls,
1586 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1587 .master_set = vmw_master_set,
1588 .master_drop = vmw_master_drop,
1589 .open = vmw_driver_open,
1590 .postclose = vmw_postclose,
1591
1592 .dumb_create = vmw_dumb_create,
1593 .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
1594
1595 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1596 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1597
1598 .fops = &vmwgfx_driver_fops,
1599 .name = VMWGFX_DRIVER_NAME,
1600 .desc = VMWGFX_DRIVER_DESC,
1601 .date = VMWGFX_DRIVER_DATE,
1602 .major = VMWGFX_DRIVER_MAJOR,
1603 .minor = VMWGFX_DRIVER_MINOR,
1604 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1605 };
1606
1607 static struct pci_driver vmw_pci_driver = {
1608 .name = VMWGFX_DRIVER_NAME,
1609 .id_table = vmw_pci_id_list,
1610 .probe = vmw_probe,
1611 .remove = vmw_remove,
1612 .driver = {
1613 .pm = &vmw_pm_ops
1614 }
1615 };
1616
1617 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1618 {
1619 struct vmw_private *vmw;
1620 int ret;
1621
1622 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
1623 if (ret)
1624 goto out_error;
1625
1626 ret = pcim_enable_device(pdev);
1627 if (ret)
1628 goto out_error;
1629
1630 vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
1631 struct vmw_private, drm);
1632 if (IS_ERR(vmw)) {
1633 ret = PTR_ERR(vmw);
1634 goto out_error;
1635 }
1636
1637 pci_set_drvdata(pdev, &vmw->drm);
1638
1639 ret = vmw_driver_load(vmw, ent->device);
1640 if (ret)
1641 goto out_error;
1642
1643 ret = drm_dev_register(&vmw->drm, 0);
1644 if (ret)
1645 goto out_unload;
1646
1647 vmw_debugfs_gem_init(vmw);
1648 vmw_debugfs_resource_managers_init(vmw);
1649
1650 return 0;
1651 out_unload:
1652 vmw_driver_unload(&vmw->drm);
1653 out_error:
1654 return ret;
1655 }
1656
1657 drm_module_pci_driver(vmw_pci_driver);
1658
1659 MODULE_AUTHOR("VMware Inc. and others");
1660 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1661 MODULE_LICENSE("GPL and additional rights");
1662 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1663 __stringify(VMWGFX_DRIVER_MINOR) "."
1664 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1665 "0");