Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 2015 Red Hat, Inc.
0003  * All Rights Reserved.
0004  *
0005  * Authors:
0006  *    Dave Airlie
0007  *    Alon Levy
0008  *
0009  * Permission is hereby granted, free of charge, to any person obtaining a
0010  * copy of this software and associated documentation files (the "Software"),
0011  * to deal in the Software without restriction, including without limitation
0012  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0013  * and/or sell copies of the Software, and to permit persons to whom the
0014  * Software is furnished to do so, subject to the following conditions:
0015  *
0016  * The above copyright notice and this permission notice shall be included in
0017  * all copies or substantial portions of the Software.
0018  *
0019  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0020  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0021  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0022  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0023  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0024  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0025  * OTHER DEALINGS IN THE SOFTWARE.
0026  */
0027 
0028 #include <linux/file.h>
0029 #include <linux/sync_file.h>
0030 #include <linux/uaccess.h>
0031 
0032 #include <drm/drm_file.h>
0033 #include <drm/virtgpu_drm.h>
0034 
0035 #include "virtgpu_drv.h"
0036 
0037 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
0038                     VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
0039                     VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
0040 
0041 static int virtio_gpu_fence_event_create(struct drm_device *dev,
0042                      struct drm_file *file,
0043                      struct virtio_gpu_fence *fence,
0044                      uint32_t ring_idx)
0045 {
0046     struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0047     struct virtio_gpu_fence_event *e = NULL;
0048     int ret;
0049 
0050     if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
0051         return 0;
0052 
0053     e = kzalloc(sizeof(*e), GFP_KERNEL);
0054     if (!e)
0055         return -ENOMEM;
0056 
0057     e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
0058     e->event.length = sizeof(e->event);
0059 
0060     ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
0061     if (ret)
0062         goto free;
0063 
0064     fence->e = e;
0065     return 0;
0066 free:
0067     kfree(e);
0068     return ret;
0069 }
0070 
0071 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
0072 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
0073                          struct virtio_gpu_fpriv *vfpriv)
0074 {
0075     char dbgname[TASK_COMM_LEN];
0076 
0077     get_task_comm(dbgname, current);
0078     virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
0079                       vfpriv->context_init, strlen(dbgname),
0080                       dbgname);
0081 
0082     vfpriv->context_created = true;
0083 }
0084 
0085 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
0086 {
0087     struct virtio_gpu_device *vgdev = dev->dev_private;
0088     struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0089 
0090     mutex_lock(&vfpriv->context_lock);
0091     if (vfpriv->context_created)
0092         goto out_unlock;
0093 
0094     virtio_gpu_create_context_locked(vgdev, vfpriv);
0095 
0096 out_unlock:
0097     mutex_unlock(&vfpriv->context_lock);
0098 }
0099 
0100 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
0101                 struct drm_file *file)
0102 {
0103     struct virtio_gpu_device *vgdev = dev->dev_private;
0104     struct drm_virtgpu_map *virtio_gpu_map = data;
0105 
0106     return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
0107                      virtio_gpu_map->handle,
0108                      &virtio_gpu_map->offset);
0109 }
0110 
0111 /*
0112  * Usage of execbuffer:
0113  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
0114  * However, the command as passed from user space must *not* contain the initial
0115  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
0116  */
0117 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
0118                  struct drm_file *file)
0119 {
0120     struct drm_virtgpu_execbuffer *exbuf = data;
0121     struct virtio_gpu_device *vgdev = dev->dev_private;
0122     struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0123     struct virtio_gpu_fence *out_fence;
0124     int ret;
0125     uint32_t *bo_handles = NULL;
0126     void __user *user_bo_handles = NULL;
0127     struct virtio_gpu_object_array *buflist = NULL;
0128     struct sync_file *sync_file;
0129     int in_fence_fd = exbuf->fence_fd;
0130     int out_fence_fd = -1;
0131     void *buf;
0132     uint64_t fence_ctx;
0133     uint32_t ring_idx;
0134 
0135     fence_ctx = vgdev->fence_drv.context;
0136     ring_idx = 0;
0137 
0138     if (vgdev->has_virgl_3d == false)
0139         return -ENOSYS;
0140 
0141     if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
0142         return -EINVAL;
0143 
0144     if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
0145         if (exbuf->ring_idx >= vfpriv->num_rings)
0146             return -EINVAL;
0147 
0148         if (!vfpriv->base_fence_ctx)
0149             return -EINVAL;
0150 
0151         fence_ctx = vfpriv->base_fence_ctx;
0152         ring_idx = exbuf->ring_idx;
0153     }
0154 
0155     exbuf->fence_fd = -1;
0156 
0157     virtio_gpu_create_context(dev, file);
0158     if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
0159         struct dma_fence *in_fence;
0160 
0161         in_fence = sync_file_get_fence(in_fence_fd);
0162 
0163         if (!in_fence)
0164             return -EINVAL;
0165 
0166         /*
0167          * Wait if the fence is from a foreign context, or if the fence
0168          * array contains any fence from a foreign context.
0169          */
0170         ret = 0;
0171         if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
0172             ret = dma_fence_wait(in_fence, true);
0173 
0174         dma_fence_put(in_fence);
0175         if (ret)
0176             return ret;
0177     }
0178 
0179     if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
0180         out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
0181         if (out_fence_fd < 0)
0182             return out_fence_fd;
0183     }
0184 
0185     if (exbuf->num_bo_handles) {
0186         bo_handles = kvmalloc_array(exbuf->num_bo_handles,
0187                         sizeof(uint32_t), GFP_KERNEL);
0188         if (!bo_handles) {
0189             ret = -ENOMEM;
0190             goto out_unused_fd;
0191         }
0192 
0193         user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
0194         if (copy_from_user(bo_handles, user_bo_handles,
0195                    exbuf->num_bo_handles * sizeof(uint32_t))) {
0196             ret = -EFAULT;
0197             goto out_unused_fd;
0198         }
0199 
0200         buflist = virtio_gpu_array_from_handles(file, bo_handles,
0201                             exbuf->num_bo_handles);
0202         if (!buflist) {
0203             ret = -ENOENT;
0204             goto out_unused_fd;
0205         }
0206         kvfree(bo_handles);
0207         bo_handles = NULL;
0208     }
0209 
0210     buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
0211     if (IS_ERR(buf)) {
0212         ret = PTR_ERR(buf);
0213         goto out_unused_fd;
0214     }
0215 
0216     if (buflist) {
0217         ret = virtio_gpu_array_lock_resv(buflist);
0218         if (ret)
0219             goto out_memdup;
0220     }
0221 
0222     out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
0223     if(!out_fence) {
0224         ret = -ENOMEM;
0225         goto out_unresv;
0226     }
0227 
0228     ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
0229     if (ret)
0230         goto out_unresv;
0231 
0232     if (out_fence_fd >= 0) {
0233         sync_file = sync_file_create(&out_fence->f);
0234         if (!sync_file) {
0235             dma_fence_put(&out_fence->f);
0236             ret = -ENOMEM;
0237             goto out_unresv;
0238         }
0239 
0240         exbuf->fence_fd = out_fence_fd;
0241         fd_install(out_fence_fd, sync_file->file);
0242     }
0243 
0244     virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
0245                   vfpriv->ctx_id, buflist, out_fence);
0246     dma_fence_put(&out_fence->f);
0247     virtio_gpu_notify(vgdev);
0248     return 0;
0249 
0250 out_unresv:
0251     if (buflist)
0252         virtio_gpu_array_unlock_resv(buflist);
0253 out_memdup:
0254     kvfree(buf);
0255 out_unused_fd:
0256     kvfree(bo_handles);
0257     if (buflist)
0258         virtio_gpu_array_put_free(buflist);
0259 
0260     if (out_fence_fd >= 0)
0261         put_unused_fd(out_fence_fd);
0262 
0263     return ret;
0264 }
0265 
0266 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
0267                      struct drm_file *file)
0268 {
0269     struct virtio_gpu_device *vgdev = dev->dev_private;
0270     struct drm_virtgpu_getparam *param = data;
0271     int value;
0272 
0273     switch (param->param) {
0274     case VIRTGPU_PARAM_3D_FEATURES:
0275         value = vgdev->has_virgl_3d ? 1 : 0;
0276         break;
0277     case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
0278         value = 1;
0279         break;
0280     case VIRTGPU_PARAM_RESOURCE_BLOB:
0281         value = vgdev->has_resource_blob ? 1 : 0;
0282         break;
0283     case VIRTGPU_PARAM_HOST_VISIBLE:
0284         value = vgdev->has_host_visible ? 1 : 0;
0285         break;
0286     case VIRTGPU_PARAM_CROSS_DEVICE:
0287         value = vgdev->has_resource_assign_uuid ? 1 : 0;
0288         break;
0289     case VIRTGPU_PARAM_CONTEXT_INIT:
0290         value = vgdev->has_context_init ? 1 : 0;
0291         break;
0292     case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
0293         value = vgdev->capset_id_mask;
0294         break;
0295     default:
0296         return -EINVAL;
0297     }
0298     if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
0299         return -EFAULT;
0300 
0301     return 0;
0302 }
0303 
0304 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
0305                         struct drm_file *file)
0306 {
0307     struct virtio_gpu_device *vgdev = dev->dev_private;
0308     struct drm_virtgpu_resource_create *rc = data;
0309     struct virtio_gpu_fence *fence;
0310     int ret;
0311     struct virtio_gpu_object *qobj;
0312     struct drm_gem_object *obj;
0313     uint32_t handle = 0;
0314     struct virtio_gpu_object_params params = { 0 };
0315 
0316     if (vgdev->has_virgl_3d) {
0317         virtio_gpu_create_context(dev, file);
0318         params.virgl = true;
0319         params.target = rc->target;
0320         params.bind = rc->bind;
0321         params.depth = rc->depth;
0322         params.array_size = rc->array_size;
0323         params.last_level = rc->last_level;
0324         params.nr_samples = rc->nr_samples;
0325         params.flags = rc->flags;
0326     } else {
0327         if (rc->depth > 1)
0328             return -EINVAL;
0329         if (rc->nr_samples > 1)
0330             return -EINVAL;
0331         if (rc->last_level > 1)
0332             return -EINVAL;
0333         if (rc->target != 2)
0334             return -EINVAL;
0335         if (rc->array_size > 1)
0336             return -EINVAL;
0337     }
0338 
0339     params.format = rc->format;
0340     params.width = rc->width;
0341     params.height = rc->height;
0342     params.size = rc->size;
0343     /* allocate a single page size object */
0344     if (params.size == 0)
0345         params.size = PAGE_SIZE;
0346 
0347     fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
0348     if (!fence)
0349         return -ENOMEM;
0350     ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
0351     dma_fence_put(&fence->f);
0352     if (ret < 0)
0353         return ret;
0354     obj = &qobj->base.base;
0355 
0356     ret = drm_gem_handle_create(file, obj, &handle);
0357     if (ret) {
0358         drm_gem_object_release(obj);
0359         return ret;
0360     }
0361     drm_gem_object_put(obj);
0362 
0363     rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
0364     rc->bo_handle = handle;
0365     return 0;
0366 }
0367 
0368 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
0369                       struct drm_file *file)
0370 {
0371     struct drm_virtgpu_resource_info *ri = data;
0372     struct drm_gem_object *gobj = NULL;
0373     struct virtio_gpu_object *qobj = NULL;
0374 
0375     gobj = drm_gem_object_lookup(file, ri->bo_handle);
0376     if (gobj == NULL)
0377         return -ENOENT;
0378 
0379     qobj = gem_to_virtio_gpu_obj(gobj);
0380 
0381     ri->size = qobj->base.base.size;
0382     ri->res_handle = qobj->hw_res_handle;
0383     if (qobj->host3d_blob || qobj->guest_blob)
0384         ri->blob_mem = qobj->blob_mem;
0385 
0386     drm_gem_object_put(gobj);
0387     return 0;
0388 }
0389 
0390 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
0391                            void *data,
0392                            struct drm_file *file)
0393 {
0394     struct virtio_gpu_device *vgdev = dev->dev_private;
0395     struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0396     struct drm_virtgpu_3d_transfer_from_host *args = data;
0397     struct virtio_gpu_object *bo;
0398     struct virtio_gpu_object_array *objs;
0399     struct virtio_gpu_fence *fence;
0400     int ret;
0401     u32 offset = args->offset;
0402 
0403     if (vgdev->has_virgl_3d == false)
0404         return -ENOSYS;
0405 
0406     virtio_gpu_create_context(dev, file);
0407     objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
0408     if (objs == NULL)
0409         return -ENOENT;
0410 
0411     bo = gem_to_virtio_gpu_obj(objs->objs[0]);
0412     if (bo->guest_blob && !bo->host3d_blob) {
0413         ret = -EINVAL;
0414         goto err_put_free;
0415     }
0416 
0417     if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
0418         ret = -EINVAL;
0419         goto err_put_free;
0420     }
0421 
0422     ret = virtio_gpu_array_lock_resv(objs);
0423     if (ret != 0)
0424         goto err_put_free;
0425 
0426     fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
0427     if (!fence) {
0428         ret = -ENOMEM;
0429         goto err_unlock;
0430     }
0431 
0432     virtio_gpu_cmd_transfer_from_host_3d
0433         (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
0434          args->layer_stride, &args->box, objs, fence);
0435     dma_fence_put(&fence->f);
0436     virtio_gpu_notify(vgdev);
0437     return 0;
0438 
0439 err_unlock:
0440     virtio_gpu_array_unlock_resv(objs);
0441 err_put_free:
0442     virtio_gpu_array_put_free(objs);
0443     return ret;
0444 }
0445 
0446 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
0447                          struct drm_file *file)
0448 {
0449     struct virtio_gpu_device *vgdev = dev->dev_private;
0450     struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0451     struct drm_virtgpu_3d_transfer_to_host *args = data;
0452     struct virtio_gpu_object *bo;
0453     struct virtio_gpu_object_array *objs;
0454     struct virtio_gpu_fence *fence;
0455     int ret;
0456     u32 offset = args->offset;
0457 
0458     objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
0459     if (objs == NULL)
0460         return -ENOENT;
0461 
0462     bo = gem_to_virtio_gpu_obj(objs->objs[0]);
0463     if (bo->guest_blob && !bo->host3d_blob) {
0464         ret = -EINVAL;
0465         goto err_put_free;
0466     }
0467 
0468     if (!vgdev->has_virgl_3d) {
0469         virtio_gpu_cmd_transfer_to_host_2d
0470             (vgdev, offset,
0471              args->box.w, args->box.h, args->box.x, args->box.y,
0472              objs, NULL);
0473     } else {
0474         virtio_gpu_create_context(dev, file);
0475 
0476         if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
0477             ret = -EINVAL;
0478             goto err_put_free;
0479         }
0480 
0481         ret = virtio_gpu_array_lock_resv(objs);
0482         if (ret != 0)
0483             goto err_put_free;
0484 
0485         ret = -ENOMEM;
0486         fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0487                            0);
0488         if (!fence)
0489             goto err_unlock;
0490 
0491         virtio_gpu_cmd_transfer_to_host_3d
0492             (vgdev,
0493              vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
0494              args->stride, args->layer_stride, &args->box, objs,
0495              fence);
0496         dma_fence_put(&fence->f);
0497     }
0498     virtio_gpu_notify(vgdev);
0499     return 0;
0500 
0501 err_unlock:
0502     virtio_gpu_array_unlock_resv(objs);
0503 err_put_free:
0504     virtio_gpu_array_put_free(objs);
0505     return ret;
0506 }
0507 
0508 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
0509                  struct drm_file *file)
0510 {
0511     struct drm_virtgpu_3d_wait *args = data;
0512     struct drm_gem_object *obj;
0513     long timeout = 15 * HZ;
0514     int ret;
0515 
0516     obj = drm_gem_object_lookup(file, args->handle);
0517     if (obj == NULL)
0518         return -ENOENT;
0519 
0520     if (args->flags & VIRTGPU_WAIT_NOWAIT) {
0521         ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
0522     } else {
0523         ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
0524                         true, timeout);
0525     }
0526     if (ret == 0)
0527         ret = -EBUSY;
0528     else if (ret > 0)
0529         ret = 0;
0530 
0531     drm_gem_object_put(obj);
0532     return ret;
0533 }
0534 
0535 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
0536                 void *data, struct drm_file *file)
0537 {
0538     struct virtio_gpu_device *vgdev = dev->dev_private;
0539     struct drm_virtgpu_get_caps *args = data;
0540     unsigned size, host_caps_size;
0541     int i;
0542     int found_valid = -1;
0543     int ret;
0544     struct virtio_gpu_drv_cap_cache *cache_ent;
0545     void *ptr;
0546 
0547     if (vgdev->num_capsets == 0)
0548         return -ENOSYS;
0549 
0550     /* don't allow userspace to pass 0 */
0551     if (args->size == 0)
0552         return -EINVAL;
0553 
0554     spin_lock(&vgdev->display_info_lock);
0555     for (i = 0; i < vgdev->num_capsets; i++) {
0556         if (vgdev->capsets[i].id == args->cap_set_id) {
0557             if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
0558                 found_valid = i;
0559                 break;
0560             }
0561         }
0562     }
0563 
0564     if (found_valid == -1) {
0565         spin_unlock(&vgdev->display_info_lock);
0566         return -EINVAL;
0567     }
0568 
0569     host_caps_size = vgdev->capsets[found_valid].max_size;
0570     /* only copy to user the minimum of the host caps size or the guest caps size */
0571     size = min(args->size, host_caps_size);
0572 
0573     list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
0574         if (cache_ent->id == args->cap_set_id &&
0575             cache_ent->version == args->cap_set_ver) {
0576             spin_unlock(&vgdev->display_info_lock);
0577             goto copy_exit;
0578         }
0579     }
0580     spin_unlock(&vgdev->display_info_lock);
0581 
0582     /* not in cache - need to talk to hw */
0583     ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
0584                     &cache_ent);
0585     if (ret)
0586         return ret;
0587     virtio_gpu_notify(vgdev);
0588 
0589 copy_exit:
0590     ret = wait_event_timeout(vgdev->resp_wq,
0591                  atomic_read(&cache_ent->is_valid), 5 * HZ);
0592     if (!ret)
0593         return -EBUSY;
0594 
0595     /* is_valid check must proceed before copy of the cache entry. */
0596     smp_rmb();
0597 
0598     ptr = cache_ent->caps_cache;
0599 
0600     if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
0601         return -EFAULT;
0602 
0603     return 0;
0604 }
0605 
0606 static int verify_blob(struct virtio_gpu_device *vgdev,
0607                struct virtio_gpu_fpriv *vfpriv,
0608                struct virtio_gpu_object_params *params,
0609                struct drm_virtgpu_resource_create_blob *rc_blob,
0610                bool *guest_blob, bool *host3d_blob)
0611 {
0612     if (!vgdev->has_resource_blob)
0613         return -EINVAL;
0614 
0615     if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
0616         return -EINVAL;
0617 
0618     if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
0619         if (!vgdev->has_resource_assign_uuid)
0620             return -EINVAL;
0621     }
0622 
0623     switch (rc_blob->blob_mem) {
0624     case VIRTGPU_BLOB_MEM_GUEST:
0625         *guest_blob = true;
0626         break;
0627     case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
0628         *guest_blob = true;
0629         fallthrough;
0630     case VIRTGPU_BLOB_MEM_HOST3D:
0631         *host3d_blob = true;
0632         break;
0633     default:
0634         return -EINVAL;
0635     }
0636 
0637     if (*host3d_blob) {
0638         if (!vgdev->has_virgl_3d)
0639             return -EINVAL;
0640 
0641         /* Must be dword aligned. */
0642         if (rc_blob->cmd_size % 4 != 0)
0643             return -EINVAL;
0644 
0645         params->ctx_id = vfpriv->ctx_id;
0646         params->blob_id = rc_blob->blob_id;
0647     } else {
0648         if (rc_blob->blob_id != 0)
0649             return -EINVAL;
0650 
0651         if (rc_blob->cmd_size != 0)
0652             return -EINVAL;
0653     }
0654 
0655     params->blob_mem = rc_blob->blob_mem;
0656     params->size = rc_blob->size;
0657     params->blob = true;
0658     params->blob_flags = rc_blob->blob_flags;
0659     return 0;
0660 }
0661 
0662 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
0663                          void *data,
0664                          struct drm_file *file)
0665 {
0666     int ret = 0;
0667     uint32_t handle = 0;
0668     bool guest_blob = false;
0669     bool host3d_blob = false;
0670     struct drm_gem_object *obj;
0671     struct virtio_gpu_object *bo;
0672     struct virtio_gpu_object_params params = { 0 };
0673     struct virtio_gpu_device *vgdev = dev->dev_private;
0674     struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0675     struct drm_virtgpu_resource_create_blob *rc_blob = data;
0676 
0677     if (verify_blob(vgdev, vfpriv, &params, rc_blob,
0678             &guest_blob, &host3d_blob))
0679         return -EINVAL;
0680 
0681     if (vgdev->has_virgl_3d)
0682         virtio_gpu_create_context(dev, file);
0683 
0684     if (rc_blob->cmd_size) {
0685         void *buf;
0686 
0687         buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
0688                   rc_blob->cmd_size);
0689 
0690         if (IS_ERR(buf))
0691             return PTR_ERR(buf);
0692 
0693         virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
0694                       vfpriv->ctx_id, NULL, NULL);
0695     }
0696 
0697     if (guest_blob)
0698         ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
0699     else if (!guest_blob && host3d_blob)
0700         ret = virtio_gpu_vram_create(vgdev, &params, &bo);
0701     else
0702         return -EINVAL;
0703 
0704     if (ret < 0)
0705         return ret;
0706 
0707     bo->guest_blob = guest_blob;
0708     bo->host3d_blob = host3d_blob;
0709     bo->blob_mem = rc_blob->blob_mem;
0710     bo->blob_flags = rc_blob->blob_flags;
0711 
0712     obj = &bo->base.base;
0713     if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
0714         ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
0715         if (ret) {
0716             drm_gem_object_release(obj);
0717             return ret;
0718         }
0719     }
0720 
0721     ret = drm_gem_handle_create(file, obj, &handle);
0722     if (ret) {
0723         drm_gem_object_release(obj);
0724         return ret;
0725     }
0726     drm_gem_object_put(obj);
0727 
0728     rc_blob->res_handle = bo->hw_res_handle;
0729     rc_blob->bo_handle = handle;
0730 
0731     return 0;
0732 }
0733 
0734 static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
0735                      void *data, struct drm_file *file)
0736 {
0737     int ret = 0;
0738     uint32_t num_params, i, param, value;
0739     uint64_t valid_ring_mask;
0740     size_t len;
0741     struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
0742     struct virtio_gpu_device *vgdev = dev->dev_private;
0743     struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0744     struct drm_virtgpu_context_init *args = data;
0745 
0746     num_params = args->num_params;
0747     len = num_params * sizeof(struct drm_virtgpu_context_set_param);
0748 
0749     if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
0750         return -EINVAL;
0751 
0752     /* Number of unique parameters supported at this time. */
0753     if (num_params > 3)
0754         return -EINVAL;
0755 
0756     ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
0757                      len);
0758 
0759     if (IS_ERR(ctx_set_params))
0760         return PTR_ERR(ctx_set_params);
0761 
0762     mutex_lock(&vfpriv->context_lock);
0763     if (vfpriv->context_created) {
0764         ret = -EEXIST;
0765         goto out_unlock;
0766     }
0767 
0768     for (i = 0; i < num_params; i++) {
0769         param = ctx_set_params[i].param;
0770         value = ctx_set_params[i].value;
0771 
0772         switch (param) {
0773         case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
0774             if (value > MAX_CAPSET_ID) {
0775                 ret = -EINVAL;
0776                 goto out_unlock;
0777             }
0778 
0779             if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
0780                 ret = -EINVAL;
0781                 goto out_unlock;
0782             }
0783 
0784             /* Context capset ID already set */
0785             if (vfpriv->context_init &
0786                 VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
0787                 ret = -EINVAL;
0788                 goto out_unlock;
0789             }
0790 
0791             vfpriv->context_init |= value;
0792             break;
0793         case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
0794             if (vfpriv->base_fence_ctx) {
0795                 ret = -EINVAL;
0796                 goto out_unlock;
0797             }
0798 
0799             if (value > MAX_RINGS) {
0800                 ret = -EINVAL;
0801                 goto out_unlock;
0802             }
0803 
0804             vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
0805             vfpriv->num_rings = value;
0806             break;
0807         case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
0808             if (vfpriv->ring_idx_mask) {
0809                 ret = -EINVAL;
0810                 goto out_unlock;
0811             }
0812 
0813             vfpriv->ring_idx_mask = value;
0814             break;
0815         default:
0816             ret = -EINVAL;
0817             goto out_unlock;
0818         }
0819     }
0820 
0821     if (vfpriv->ring_idx_mask) {
0822         valid_ring_mask = 0;
0823         for (i = 0; i < vfpriv->num_rings; i++)
0824             valid_ring_mask |= 1ULL << i;
0825 
0826         if (~valid_ring_mask & vfpriv->ring_idx_mask) {
0827             ret = -EINVAL;
0828             goto out_unlock;
0829         }
0830     }
0831 
0832     virtio_gpu_create_context_locked(vgdev, vfpriv);
0833     virtio_gpu_notify(vgdev);
0834 
0835 out_unlock:
0836     mutex_unlock(&vfpriv->context_lock);
0837     kfree(ctx_set_params);
0838     return ret;
0839 }
0840 
0841 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
0842     DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
0843               DRM_RENDER_ALLOW),
0844 
0845     DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
0846               DRM_RENDER_ALLOW),
0847 
0848     DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
0849               DRM_RENDER_ALLOW),
0850 
0851     DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
0852               virtio_gpu_resource_create_ioctl,
0853               DRM_RENDER_ALLOW),
0854 
0855     DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
0856               DRM_RENDER_ALLOW),
0857 
0858     /* make transfer async to the main ring? - no sure, can we
0859      * thread these in the underlying GL
0860      */
0861     DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
0862               virtio_gpu_transfer_from_host_ioctl,
0863               DRM_RENDER_ALLOW),
0864     DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
0865               virtio_gpu_transfer_to_host_ioctl,
0866               DRM_RENDER_ALLOW),
0867 
0868     DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
0869               DRM_RENDER_ALLOW),
0870 
0871     DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
0872               DRM_RENDER_ALLOW),
0873 
0874     DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
0875               virtio_gpu_resource_create_blob_ioctl,
0876               DRM_RENDER_ALLOW),
0877 
0878     DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
0879               DRM_RENDER_ALLOW),
0880 };