Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include "virtgpu_drv.h"
0003 
0004 #include <linux/dma-mapping.h>
0005 
0006 static void virtio_gpu_vram_free(struct drm_gem_object *obj)
0007 {
0008     struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
0009     struct virtio_gpu_device *vgdev = obj->dev->dev_private;
0010     struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
0011     bool unmap;
0012 
0013     if (bo->created) {
0014         spin_lock(&vgdev->host_visible_lock);
0015         unmap = drm_mm_node_allocated(&vram->vram_node);
0016         spin_unlock(&vgdev->host_visible_lock);
0017 
0018         if (unmap)
0019             virtio_gpu_cmd_unmap(vgdev, bo);
0020 
0021         virtio_gpu_cmd_unref_resource(vgdev, bo);
0022         virtio_gpu_notify(vgdev);
0023         return;
0024     }
0025 }
0026 
0027 static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
0028     .open = drm_gem_vm_open,
0029     .close = drm_gem_vm_close,
0030 };
0031 
0032 static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
0033                 struct vm_area_struct *vma)
0034 {
0035     int ret;
0036     struct virtio_gpu_device *vgdev = obj->dev->dev_private;
0037     struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
0038     struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
0039     unsigned long vm_size = vma->vm_end - vma->vm_start;
0040 
0041     if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
0042         return -EINVAL;
0043 
0044     wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
0045     if (vram->map_state != STATE_OK)
0046         return -EINVAL;
0047 
0048     vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
0049     vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
0050     vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0051     vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
0052     vma->vm_ops = &virtio_gpu_vram_vm_ops;
0053 
0054     if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
0055         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
0056     else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
0057         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0058 
0059     /* Partial mappings of GEM buffers don't happen much in practice. */
0060     if (vm_size != vram->vram_node.size)
0061         return -EINVAL;
0062 
0063     ret = io_remap_pfn_range(vma, vma->vm_start,
0064                  vram->vram_node.start >> PAGE_SHIFT,
0065                  vm_size, vma->vm_page_prot);
0066     return ret;
0067 }
0068 
0069 struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
0070                          struct device *dev,
0071                          enum dma_data_direction dir)
0072 {
0073     struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
0074     struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
0075     struct sg_table *sgt;
0076     dma_addr_t addr;
0077     int ret;
0078 
0079     sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
0080     if (!sgt)
0081         return ERR_PTR(-ENOMEM);
0082 
0083     if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
0084         // Virtio devices can access the dma-buf via its UUID. Return a stub
0085         // sg_table so the dma-buf API still works.
0086         if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
0087             ret = -EIO;
0088             goto out;
0089         }
0090         return sgt;
0091     }
0092 
0093     ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
0094     if (ret)
0095         goto out;
0096 
0097     addr = dma_map_resource(dev, vram->vram_node.start,
0098                 vram->vram_node.size, dir,
0099                 DMA_ATTR_SKIP_CPU_SYNC);
0100     ret = dma_mapping_error(dev, addr);
0101     if (ret)
0102         goto out;
0103 
0104     sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
0105     sg_dma_address(sgt->sgl) = addr;
0106     sg_dma_len(sgt->sgl) = vram->vram_node.size;
0107 
0108     return sgt;
0109 out:
0110     sg_free_table(sgt);
0111     kfree(sgt);
0112     return ERR_PTR(ret);
0113 }
0114 
0115 void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
0116                    struct sg_table *sgt,
0117                    enum dma_data_direction dir)
0118 {
0119     if (sgt->nents) {
0120         dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
0121                    sg_dma_len(sgt->sgl), dir,
0122                    DMA_ATTR_SKIP_CPU_SYNC);
0123     }
0124     sg_free_table(sgt);
0125     kfree(sgt);
0126 }
0127 
0128 static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
0129     .open = virtio_gpu_gem_object_open,
0130     .close = virtio_gpu_gem_object_close,
0131     .free = virtio_gpu_vram_free,
0132     .mmap = virtio_gpu_vram_mmap,
0133     .export = virtgpu_gem_prime_export,
0134 };
0135 
0136 bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
0137 {
0138     return bo->base.base.funcs == &virtio_gpu_vram_funcs;
0139 }
0140 
0141 static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
0142 {
0143     int ret;
0144     uint64_t offset;
0145     struct virtio_gpu_object_array *objs;
0146     struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
0147     struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
0148 
0149     if (!vgdev->has_host_visible)
0150         return -EINVAL;
0151 
0152     spin_lock(&vgdev->host_visible_lock);
0153     ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
0154                  bo->base.base.size);
0155     spin_unlock(&vgdev->host_visible_lock);
0156 
0157     if (ret)
0158         return ret;
0159 
0160     objs = virtio_gpu_array_alloc(1);
0161     if (!objs) {
0162         ret = -ENOMEM;
0163         goto err_remove_node;
0164     }
0165 
0166     virtio_gpu_array_add_obj(objs, &bo->base.base);
0167     /*TODO: Add an error checking helper function in drm_mm.h */
0168     offset = vram->vram_node.start - vgdev->host_visible_region.addr;
0169 
0170     ret = virtio_gpu_cmd_map(vgdev, objs, offset);
0171     if (ret) {
0172         virtio_gpu_array_put_free(objs);
0173         goto err_remove_node;
0174     }
0175 
0176     return 0;
0177 
0178 err_remove_node:
0179     spin_lock(&vgdev->host_visible_lock);
0180     drm_mm_remove_node(&vram->vram_node);
0181     spin_unlock(&vgdev->host_visible_lock);
0182     return ret;
0183 }
0184 
0185 int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
0186                struct virtio_gpu_object_params *params,
0187                struct virtio_gpu_object **bo_ptr)
0188 {
0189     struct drm_gem_object *obj;
0190     struct virtio_gpu_object_vram *vram;
0191     int ret;
0192 
0193     vram = kzalloc(sizeof(*vram), GFP_KERNEL);
0194     if (!vram)
0195         return -ENOMEM;
0196 
0197     obj = &vram->base.base.base;
0198     obj->funcs = &virtio_gpu_vram_funcs;
0199 
0200     params->size = PAGE_ALIGN(params->size);
0201     drm_gem_private_object_init(vgdev->ddev, obj, params->size);
0202 
0203     /* Create fake offset */
0204     ret = drm_gem_create_mmap_offset(obj);
0205     if (ret) {
0206         kfree(vram);
0207         return ret;
0208     }
0209 
0210     ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
0211     if (ret) {
0212         kfree(vram);
0213         return ret;
0214     }
0215 
0216     virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
0217                         0);
0218     if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
0219         ret = virtio_gpu_vram_map(&vram->base);
0220         if (ret) {
0221             virtio_gpu_vram_free(obj);
0222             return ret;
0223         }
0224     }
0225 
0226     *bo_ptr = &vram->base;
0227     return 0;
0228 }