0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/virtio.h>
0027 #include <linux/virtio_config.h>
0028 #include <linux/virtio_ring.h>
0029
0030 #include <drm/drm_file.h>
0031
0032 #include "virtgpu_drv.h"
0033
0034 static void virtio_gpu_config_changed_work_func(struct work_struct *work)
0035 {
0036 struct virtio_gpu_device *vgdev =
0037 container_of(work, struct virtio_gpu_device,
0038 config_changed_work);
0039 u32 events_read, events_clear = 0;
0040
0041
0042 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
0043 events_read, &events_read);
0044 if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
0045 if (vgdev->has_edid)
0046 virtio_gpu_cmd_get_edids(vgdev);
0047 virtio_gpu_cmd_get_display_info(vgdev);
0048 virtio_gpu_notify(vgdev);
0049 drm_helper_hpd_irq_event(vgdev->ddev);
0050 events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
0051 }
0052 virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
0053 events_clear, &events_clear);
0054 }
0055
0056 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
0057 void (*work_func)(struct work_struct *work))
0058 {
0059 spin_lock_init(&vgvq->qlock);
0060 init_waitqueue_head(&vgvq->ack_queue);
0061 INIT_WORK(&vgvq->dequeue_work, work_func);
0062 }
0063
0064 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
0065 int num_capsets)
0066 {
0067 int i, ret;
0068 bool invalid_capset_id = false;
0069
0070 vgdev->capsets = kcalloc(num_capsets,
0071 sizeof(struct virtio_gpu_drv_capset),
0072 GFP_KERNEL);
0073 if (!vgdev->capsets) {
0074 DRM_ERROR("failed to allocate cap sets\n");
0075 return;
0076 }
0077 for (i = 0; i < num_capsets; i++) {
0078 virtio_gpu_cmd_get_capset_info(vgdev, i);
0079 virtio_gpu_notify(vgdev);
0080 ret = wait_event_timeout(vgdev->resp_wq,
0081 vgdev->capsets[i].id > 0, 5 * HZ);
0082
0083
0084
0085
0086 if (!vgdev->capsets[i].id ||
0087 vgdev->capsets[i].id > MAX_CAPSET_ID)
0088 invalid_capset_id = true;
0089
0090 if (ret == 0)
0091 DRM_ERROR("timed out waiting for cap set %d\n", i);
0092 else if (invalid_capset_id)
0093 DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
0094
0095 if (ret == 0 || invalid_capset_id) {
0096 spin_lock(&vgdev->display_info_lock);
0097 kfree(vgdev->capsets);
0098 vgdev->capsets = NULL;
0099 spin_unlock(&vgdev->display_info_lock);
0100 return;
0101 }
0102
0103 vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
0104 DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
0105 i, vgdev->capsets[i].id,
0106 vgdev->capsets[i].max_version,
0107 vgdev->capsets[i].max_size);
0108 }
0109
0110 vgdev->num_capsets = num_capsets;
0111 }
0112
0113 int virtio_gpu_init(struct drm_device *dev)
0114 {
0115 static vq_callback_t *callbacks[] = {
0116 virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
0117 };
0118 static const char * const names[] = { "control", "cursor" };
0119
0120 struct virtio_gpu_device *vgdev;
0121
0122 struct virtqueue *vqs[2];
0123 u32 num_scanouts, num_capsets;
0124 int ret = 0;
0125
0126 if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
0127 return -ENODEV;
0128
0129 vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
0130 if (!vgdev)
0131 return -ENOMEM;
0132
0133 vgdev->ddev = dev;
0134 dev->dev_private = vgdev;
0135 vgdev->vdev = dev_to_virtio(dev->dev);
0136 vgdev->dev = dev->dev;
0137
0138 spin_lock_init(&vgdev->display_info_lock);
0139 spin_lock_init(&vgdev->resource_export_lock);
0140 spin_lock_init(&vgdev->host_visible_lock);
0141 ida_init(&vgdev->ctx_id_ida);
0142 ida_init(&vgdev->resource_ida);
0143 init_waitqueue_head(&vgdev->resp_wq);
0144 virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
0145 virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
0146
0147 vgdev->fence_drv.context = dma_fence_context_alloc(1);
0148 spin_lock_init(&vgdev->fence_drv.lock);
0149 INIT_LIST_HEAD(&vgdev->fence_drv.fences);
0150 INIT_LIST_HEAD(&vgdev->cap_cache);
0151 INIT_WORK(&vgdev->config_changed_work,
0152 virtio_gpu_config_changed_work_func);
0153
0154 INIT_WORK(&vgdev->obj_free_work,
0155 virtio_gpu_array_put_free_work);
0156 INIT_LIST_HEAD(&vgdev->obj_free_list);
0157 spin_lock_init(&vgdev->obj_free_lock);
0158
0159 #ifdef __LITTLE_ENDIAN
0160 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
0161 vgdev->has_virgl_3d = true;
0162 #endif
0163 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
0164 vgdev->has_edid = true;
0165 }
0166 if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
0167 vgdev->has_indirect = true;
0168 }
0169 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
0170 vgdev->has_resource_assign_uuid = true;
0171 }
0172 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
0173 vgdev->has_resource_blob = true;
0174 }
0175 if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
0176 VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
0177 if (!devm_request_mem_region(&vgdev->vdev->dev,
0178 vgdev->host_visible_region.addr,
0179 vgdev->host_visible_region.len,
0180 dev_name(&vgdev->vdev->dev))) {
0181 DRM_ERROR("Could not reserve host visible region\n");
0182 ret = -EBUSY;
0183 goto err_vqs;
0184 }
0185
0186 DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
0187 (unsigned long)vgdev->host_visible_region.addr,
0188 (unsigned long)vgdev->host_visible_region.len);
0189 vgdev->has_host_visible = true;
0190 drm_mm_init(&vgdev->host_visible_mm,
0191 (unsigned long)vgdev->host_visible_region.addr,
0192 (unsigned long)vgdev->host_visible_region.len);
0193 }
0194 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
0195 vgdev->has_context_init = true;
0196 }
0197
0198 DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
0199 vgdev->has_virgl_3d ? '+' : '-',
0200 vgdev->has_edid ? '+' : '-',
0201 vgdev->has_resource_blob ? '+' : '-',
0202 vgdev->has_host_visible ? '+' : '-');
0203
0204 DRM_INFO("features: %ccontext_init\n",
0205 vgdev->has_context_init ? '+' : '-');
0206
0207 ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
0208 if (ret) {
0209 DRM_ERROR("failed to find virt queues\n");
0210 goto err_vqs;
0211 }
0212 vgdev->ctrlq.vq = vqs[0];
0213 vgdev->cursorq.vq = vqs[1];
0214 ret = virtio_gpu_alloc_vbufs(vgdev);
0215 if (ret) {
0216 DRM_ERROR("failed to alloc vbufs\n");
0217 goto err_vbufs;
0218 }
0219
0220
0221 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
0222 num_scanouts, &num_scanouts);
0223 vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
0224 VIRTIO_GPU_MAX_SCANOUTS);
0225 if (!vgdev->num_scanouts) {
0226 DRM_ERROR("num_scanouts is zero\n");
0227 ret = -EINVAL;
0228 goto err_scanouts;
0229 }
0230 DRM_INFO("number of scanouts: %d\n", num_scanouts);
0231
0232 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
0233 num_capsets, &num_capsets);
0234 DRM_INFO("number of cap sets: %d\n", num_capsets);
0235
0236 ret = virtio_gpu_modeset_init(vgdev);
0237 if (ret) {
0238 DRM_ERROR("modeset init failed\n");
0239 goto err_scanouts;
0240 }
0241
0242 virtio_device_ready(vgdev->vdev);
0243
0244 if (num_capsets)
0245 virtio_gpu_get_capsets(vgdev, num_capsets);
0246 if (vgdev->has_edid)
0247 virtio_gpu_cmd_get_edids(vgdev);
0248 virtio_gpu_cmd_get_display_info(vgdev);
0249 virtio_gpu_notify(vgdev);
0250 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
0251 5 * HZ);
0252 return 0;
0253
0254 err_scanouts:
0255 virtio_gpu_free_vbufs(vgdev);
0256 err_vbufs:
0257 vgdev->vdev->config->del_vqs(vgdev->vdev);
0258 err_vqs:
0259 dev->dev_private = NULL;
0260 kfree(vgdev);
0261 return ret;
0262 }
0263
0264 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
0265 {
0266 struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
0267
0268 list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
0269 kfree(cache_ent->caps_cache);
0270 kfree(cache_ent);
0271 }
0272 }
0273
0274 void virtio_gpu_deinit(struct drm_device *dev)
0275 {
0276 struct virtio_gpu_device *vgdev = dev->dev_private;
0277
0278 flush_work(&vgdev->obj_free_work);
0279 flush_work(&vgdev->ctrlq.dequeue_work);
0280 flush_work(&vgdev->cursorq.dequeue_work);
0281 flush_work(&vgdev->config_changed_work);
0282 virtio_reset_device(vgdev->vdev);
0283 vgdev->vdev->config->del_vqs(vgdev->vdev);
0284 }
0285
0286 void virtio_gpu_release(struct drm_device *dev)
0287 {
0288 struct virtio_gpu_device *vgdev = dev->dev_private;
0289
0290 if (!vgdev)
0291 return;
0292
0293 virtio_gpu_modeset_fini(vgdev);
0294 virtio_gpu_free_vbufs(vgdev);
0295 virtio_gpu_cleanup_cap_cache(vgdev);
0296
0297 if (vgdev->has_host_visible)
0298 drm_mm_takedown(&vgdev->host_visible_mm);
0299
0300 kfree(vgdev->capsets);
0301 kfree(vgdev);
0302 }
0303
0304 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
0305 {
0306 struct virtio_gpu_device *vgdev = dev->dev_private;
0307 struct virtio_gpu_fpriv *vfpriv;
0308 int handle;
0309
0310
0311 if (!vgdev->has_virgl_3d)
0312 return 0;
0313
0314
0315 vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
0316 if (!vfpriv)
0317 return -ENOMEM;
0318
0319 mutex_init(&vfpriv->context_lock);
0320
0321 handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
0322 if (handle < 0) {
0323 kfree(vfpriv);
0324 return handle;
0325 }
0326
0327 vfpriv->ctx_id = handle + 1;
0328 file->driver_priv = vfpriv;
0329 return 0;
0330 }
0331
0332 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
0333 {
0334 struct virtio_gpu_device *vgdev = dev->dev_private;
0335 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0336
0337 if (!vgdev->has_virgl_3d)
0338 return;
0339
0340 if (vfpriv->context_created) {
0341 virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
0342 virtio_gpu_notify(vgdev);
0343 }
0344
0345 ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
0346 mutex_destroy(&vfpriv->context_lock);
0347 kfree(vfpriv);
0348 file->driver_priv = NULL;
0349 }