0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/dma-mapping.h>
0027 #include <linux/moduleparam.h>
0028
0029 #include "virtgpu_drv.h"
0030
0031 static int virtio_gpu_virglrenderer_workaround = 1;
0032 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
0033
0034 int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
0035 {
0036 if (virtio_gpu_virglrenderer_workaround) {
0037
0038
0039
0040
0041
0042
0043
0044
0045 static atomic_t seqno = ATOMIC_INIT(0);
0046 int handle = atomic_inc_return(&seqno);
0047 *resid = handle + 1;
0048 } else {
0049 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
0050 if (handle < 0)
0051 return handle;
0052 *resid = handle + 1;
0053 }
0054 return 0;
0055 }
0056
0057 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
0058 {
0059 if (!virtio_gpu_virglrenderer_workaround) {
0060 ida_free(&vgdev->resource_ida, id - 1);
0061 }
0062 }
0063
0064 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
0065 {
0066 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
0067
0068 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
0069 if (virtio_gpu_is_shmem(bo)) {
0070 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
0071
0072 if (shmem->pages) {
0073 if (shmem->mapped) {
0074 dma_unmap_sgtable(vgdev->vdev->dev.parent,
0075 shmem->pages, DMA_TO_DEVICE, 0);
0076 shmem->mapped = 0;
0077 }
0078
0079 sg_free_table(shmem->pages);
0080 kfree(shmem->pages);
0081 shmem->pages = NULL;
0082 drm_gem_shmem_unpin(&bo->base);
0083 }
0084
0085 drm_gem_shmem_free(&bo->base);
0086 } else if (virtio_gpu_is_vram(bo)) {
0087 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
0088
0089 spin_lock(&vgdev->host_visible_lock);
0090 if (drm_mm_node_allocated(&vram->vram_node))
0091 drm_mm_remove_node(&vram->vram_node);
0092
0093 spin_unlock(&vgdev->host_visible_lock);
0094
0095 drm_gem_free_mmap_offset(&vram->base.base.base);
0096 drm_gem_object_release(&vram->base.base.base);
0097 kfree(vram);
0098 }
0099 }
0100
0101 static void virtio_gpu_free_object(struct drm_gem_object *obj)
0102 {
0103 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
0104 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
0105
0106 if (bo->created) {
0107 virtio_gpu_cmd_unref_resource(vgdev, bo);
0108 virtio_gpu_notify(vgdev);
0109
0110 return;
0111 }
0112 virtio_gpu_cleanup_object(bo);
0113 }
0114
0115 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
0116 .free = virtio_gpu_free_object,
0117 .open = virtio_gpu_gem_object_open,
0118 .close = virtio_gpu_gem_object_close,
0119 .print_info = drm_gem_shmem_object_print_info,
0120 .export = virtgpu_gem_prime_export,
0121 .pin = drm_gem_shmem_object_pin,
0122 .unpin = drm_gem_shmem_object_unpin,
0123 .get_sg_table = drm_gem_shmem_object_get_sg_table,
0124 .vmap = drm_gem_shmem_object_vmap,
0125 .vunmap = drm_gem_shmem_object_vunmap,
0126 .mmap = drm_gem_shmem_object_mmap,
0127 .vm_ops = &drm_gem_shmem_vm_ops,
0128 };
0129
0130 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
0131 {
0132 return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
0133 }
0134
0135 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
0136 size_t size)
0137 {
0138 struct virtio_gpu_object_shmem *shmem;
0139 struct drm_gem_shmem_object *dshmem;
0140
0141 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
0142 if (!shmem)
0143 return ERR_PTR(-ENOMEM);
0144
0145 dshmem = &shmem->base.base;
0146 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
0147 return &dshmem->base;
0148 }
0149
0150 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
0151 struct virtio_gpu_object *bo,
0152 struct virtio_gpu_mem_entry **ents,
0153 unsigned int *nents)
0154 {
0155 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
0156 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
0157 struct scatterlist *sg;
0158 int si, ret;
0159
0160 ret = drm_gem_shmem_pin(&bo->base);
0161 if (ret < 0)
0162 return -EINVAL;
0163
0164
0165
0166
0167
0168
0169
0170 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
0171 if (IS_ERR(shmem->pages)) {
0172 drm_gem_shmem_unpin(&bo->base);
0173 return PTR_ERR(shmem->pages);
0174 }
0175
0176 if (use_dma_api) {
0177 ret = dma_map_sgtable(vgdev->vdev->dev.parent,
0178 shmem->pages, DMA_TO_DEVICE, 0);
0179 if (ret)
0180 return ret;
0181 *nents = shmem->mapped = shmem->pages->nents;
0182 } else {
0183 *nents = shmem->pages->orig_nents;
0184 }
0185
0186 *ents = kvmalloc_array(*nents,
0187 sizeof(struct virtio_gpu_mem_entry),
0188 GFP_KERNEL);
0189 if (!(*ents)) {
0190 DRM_ERROR("failed to allocate ent list\n");
0191 return -ENOMEM;
0192 }
0193
0194 if (use_dma_api) {
0195 for_each_sgtable_dma_sg(shmem->pages, sg, si) {
0196 (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
0197 (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
0198 (*ents)[si].padding = 0;
0199 }
0200 } else {
0201 for_each_sgtable_sg(shmem->pages, sg, si) {
0202 (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
0203 (*ents)[si].length = cpu_to_le32(sg->length);
0204 (*ents)[si].padding = 0;
0205 }
0206 }
0207
0208 return 0;
0209 }
0210
0211 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
0212 struct virtio_gpu_object_params *params,
0213 struct virtio_gpu_object **bo_ptr,
0214 struct virtio_gpu_fence *fence)
0215 {
0216 struct virtio_gpu_object_array *objs = NULL;
0217 struct drm_gem_shmem_object *shmem_obj;
0218 struct virtio_gpu_object *bo;
0219 struct virtio_gpu_mem_entry *ents;
0220 unsigned int nents;
0221 int ret;
0222
0223 *bo_ptr = NULL;
0224
0225 params->size = roundup(params->size, PAGE_SIZE);
0226 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
0227 if (IS_ERR(shmem_obj))
0228 return PTR_ERR(shmem_obj);
0229 bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
0230
0231 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
0232 if (ret < 0)
0233 goto err_free_gem;
0234
0235 bo->dumb = params->dumb;
0236
0237 if (fence) {
0238 ret = -ENOMEM;
0239 objs = virtio_gpu_array_alloc(1);
0240 if (!objs)
0241 goto err_put_id;
0242 virtio_gpu_array_add_obj(objs, &bo->base.base);
0243
0244 ret = virtio_gpu_array_lock_resv(objs);
0245 if (ret != 0)
0246 goto err_put_objs;
0247 }
0248
0249 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
0250 if (ret != 0) {
0251 virtio_gpu_array_put_free(objs);
0252 virtio_gpu_free_object(&shmem_obj->base);
0253 return ret;
0254 }
0255
0256 if (params->blob) {
0257 if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
0258 bo->guest_blob = true;
0259
0260 virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
0261 ents, nents);
0262 } else if (params->virgl) {
0263 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
0264 objs, fence);
0265 virtio_gpu_object_attach(vgdev, bo, ents, nents);
0266 } else {
0267 virtio_gpu_cmd_create_resource(vgdev, bo, params,
0268 objs, fence);
0269 virtio_gpu_object_attach(vgdev, bo, ents, nents);
0270 }
0271
0272 *bo_ptr = bo;
0273 return 0;
0274
0275 err_put_objs:
0276 virtio_gpu_array_put_free(objs);
0277 err_put_id:
0278 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
0279 err_free_gem:
0280 drm_gem_shmem_free(shmem_obj);
0281 return ret;
0282 }