0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <drm/drm_file.h>
0027 #include <drm/drm_fourcc.h>
0028
0029 #include "virtgpu_drv.h"
0030
0031 static int virtio_gpu_gem_create(struct drm_file *file,
0032 struct drm_device *dev,
0033 struct virtio_gpu_object_params *params,
0034 struct drm_gem_object **obj_p,
0035 uint32_t *handle_p)
0036 {
0037 struct virtio_gpu_device *vgdev = dev->dev_private;
0038 struct virtio_gpu_object *obj;
0039 int ret;
0040 u32 handle;
0041
0042 ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
0043 if (ret < 0)
0044 return ret;
0045
0046 ret = drm_gem_handle_create(file, &obj->base.base, &handle);
0047 if (ret) {
0048 drm_gem_object_release(&obj->base.base);
0049 return ret;
0050 }
0051
0052 *obj_p = &obj->base.base;
0053
0054
0055 drm_gem_object_put(&obj->base.base);
0056
0057 *handle_p = handle;
0058 return 0;
0059 }
0060
0061 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
0062 struct drm_device *dev,
0063 struct drm_mode_create_dumb *args)
0064 {
0065 struct drm_gem_object *gobj;
0066 struct virtio_gpu_object_params params = { 0 };
0067 struct virtio_gpu_device *vgdev = dev->dev_private;
0068 int ret;
0069 uint32_t pitch;
0070
0071 if (args->bpp != 32)
0072 return -EINVAL;
0073
0074 pitch = args->width * 4;
0075 args->size = pitch * args->height;
0076 args->size = ALIGN(args->size, PAGE_SIZE);
0077
0078 params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
0079 params.width = args->width;
0080 params.height = args->height;
0081 params.size = args->size;
0082 params.dumb = true;
0083
0084 if (vgdev->has_resource_blob && !vgdev->has_virgl_3d) {
0085 params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
0086 params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
0087 params.blob = true;
0088 }
0089
0090 ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj,
0091 &args->handle);
0092 if (ret)
0093 goto fail;
0094
0095 args->pitch = pitch;
0096 return ret;
0097
0098 fail:
0099 return ret;
0100 }
0101
0102 int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
0103 struct drm_device *dev,
0104 uint32_t handle, uint64_t *offset_p)
0105 {
0106 struct drm_gem_object *gobj;
0107
0108 BUG_ON(!offset_p);
0109 gobj = drm_gem_object_lookup(file_priv, handle);
0110 if (gobj == NULL)
0111 return -ENOENT;
0112 *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
0113 drm_gem_object_put(gobj);
0114 return 0;
0115 }
0116
0117 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
0118 struct drm_file *file)
0119 {
0120 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
0121 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0122 struct virtio_gpu_object_array *objs;
0123
0124 if (!vgdev->has_virgl_3d)
0125 goto out_notify;
0126
0127
0128
0129
0130 virtio_gpu_create_context(obj->dev, file);
0131
0132 objs = virtio_gpu_array_alloc(1);
0133 if (!objs)
0134 return -ENOMEM;
0135 virtio_gpu_array_add_obj(objs, obj);
0136
0137 virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
0138 objs);
0139 out_notify:
0140 virtio_gpu_notify(vgdev);
0141 return 0;
0142 }
0143
0144 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
0145 struct drm_file *file)
0146 {
0147 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
0148 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
0149 struct virtio_gpu_object_array *objs;
0150
0151 if (!vgdev->has_virgl_3d)
0152 return;
0153
0154 objs = virtio_gpu_array_alloc(1);
0155 if (!objs)
0156 return;
0157 virtio_gpu_array_add_obj(objs, obj);
0158
0159 virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
0160 objs);
0161 virtio_gpu_notify(vgdev);
0162 }
0163
0164 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
0165 {
0166 struct virtio_gpu_object_array *objs;
0167
0168 objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL);
0169 if (!objs)
0170 return NULL;
0171
0172 objs->nents = 0;
0173 objs->total = nents;
0174 return objs;
0175 }
0176
0177 static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
0178 {
0179 kfree(objs);
0180 }
0181
0182 struct virtio_gpu_object_array*
0183 virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
0184 {
0185 struct virtio_gpu_object_array *objs;
0186 u32 i;
0187
0188 objs = virtio_gpu_array_alloc(nents);
0189 if (!objs)
0190 return NULL;
0191
0192 for (i = 0; i < nents; i++) {
0193 objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
0194 if (!objs->objs[i]) {
0195 objs->nents = i;
0196 virtio_gpu_array_put_free(objs);
0197 return NULL;
0198 }
0199 }
0200 objs->nents = i;
0201 return objs;
0202 }
0203
0204 void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
0205 struct drm_gem_object *obj)
0206 {
0207 if (WARN_ON_ONCE(objs->nents == objs->total))
0208 return;
0209
0210 drm_gem_object_get(obj);
0211 objs->objs[objs->nents] = obj;
0212 objs->nents++;
0213 }
0214
0215 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
0216 {
0217 unsigned int i;
0218 int ret;
0219
0220 if (objs->nents == 1) {
0221 ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
0222 } else {
0223 ret = drm_gem_lock_reservations(objs->objs, objs->nents,
0224 &objs->ticket);
0225 }
0226 if (ret)
0227 return ret;
0228
0229 for (i = 0; i < objs->nents; ++i) {
0230 ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
0231 if (ret)
0232 return ret;
0233 }
0234 return ret;
0235 }
0236
0237 void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
0238 {
0239 if (objs->nents == 1) {
0240 dma_resv_unlock(objs->objs[0]->resv);
0241 } else {
0242 drm_gem_unlock_reservations(objs->objs, objs->nents,
0243 &objs->ticket);
0244 }
0245 }
0246
0247 void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
0248 struct dma_fence *fence)
0249 {
0250 int i;
0251
0252 for (i = 0; i < objs->nents; i++)
0253 dma_resv_add_fence(objs->objs[i]->resv, fence,
0254 DMA_RESV_USAGE_WRITE);
0255 }
0256
0257 void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
0258 {
0259 u32 i;
0260
0261 if (!objs)
0262 return;
0263
0264 for (i = 0; i < objs->nents; i++)
0265 drm_gem_object_put(objs->objs[i]);
0266 virtio_gpu_array_free(objs);
0267 }
0268
0269 void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
0270 struct virtio_gpu_object_array *objs)
0271 {
0272 spin_lock(&vgdev->obj_free_lock);
0273 list_add_tail(&objs->next, &vgdev->obj_free_list);
0274 spin_unlock(&vgdev->obj_free_lock);
0275 schedule_work(&vgdev->obj_free_work);
0276 }
0277
0278 void virtio_gpu_array_put_free_work(struct work_struct *work)
0279 {
0280 struct virtio_gpu_device *vgdev =
0281 container_of(work, struct virtio_gpu_device, obj_free_work);
0282 struct virtio_gpu_object_array *objs;
0283
0284 spin_lock(&vgdev->obj_free_lock);
0285 while (!list_empty(&vgdev->obj_free_list)) {
0286 objs = list_first_entry(&vgdev->obj_free_list,
0287 struct virtio_gpu_object_array, next);
0288 list_del(&objs->next);
0289 spin_unlock(&vgdev->obj_free_lock);
0290 virtio_gpu_array_put_free(objs);
0291 spin_lock(&vgdev->obj_free_lock);
0292 }
0293 spin_unlock(&vgdev->obj_free_lock);
0294 }