Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2012 Russell King
0004  */
0005 
0006 #include <linux/dma-buf.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/mman.h>
0009 #include <linux/shmem_fs.h>
0010 
0011 #include <drm/armada_drm.h>
0012 #include <drm/drm_prime.h>
0013 
0014 #include "armada_drm.h"
0015 #include "armada_gem.h"
0016 #include "armada_ioctlP.h"
0017 
0018 MODULE_IMPORT_NS(DMA_BUF);
0019 
0020 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
0021 {
0022     struct drm_gem_object *gobj = vmf->vma->vm_private_data;
0023     struct armada_gem_object *obj = drm_to_armada_gem(gobj);
0024     unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
0025 
0026     pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
0027     return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
0028 }
0029 
0030 static const struct vm_operations_struct armada_gem_vm_ops = {
0031     .fault  = armada_gem_vm_fault,
0032     .open   = drm_gem_vm_open,
0033     .close  = drm_gem_vm_close,
0034 };
0035 
0036 static size_t roundup_gem_size(size_t size)
0037 {
0038     return roundup(size, PAGE_SIZE);
0039 }
0040 
0041 void armada_gem_free_object(struct drm_gem_object *obj)
0042 {
0043     struct armada_gem_object *dobj = drm_to_armada_gem(obj);
0044     struct armada_private *priv = drm_to_armada_dev(obj->dev);
0045 
0046     DRM_DEBUG_DRIVER("release obj %p\n", dobj);
0047 
0048     drm_gem_free_mmap_offset(&dobj->obj);
0049 
0050     might_lock(&priv->linear_lock);
0051 
0052     if (dobj->page) {
0053         /* page backed memory */
0054         unsigned int order = get_order(dobj->obj.size);
0055         __free_pages(dobj->page, order);
0056     } else if (dobj->linear) {
0057         /* linear backed memory */
0058         mutex_lock(&priv->linear_lock);
0059         drm_mm_remove_node(dobj->linear);
0060         mutex_unlock(&priv->linear_lock);
0061         kfree(dobj->linear);
0062         if (dobj->addr)
0063             iounmap(dobj->addr);
0064     }
0065 
0066     if (dobj->obj.import_attach) {
0067         /* We only ever display imported data */
0068         if (dobj->sgt)
0069             dma_buf_unmap_attachment(dobj->obj.import_attach,
0070                          dobj->sgt, DMA_TO_DEVICE);
0071         drm_prime_gem_destroy(&dobj->obj, NULL);
0072     }
0073 
0074     drm_gem_object_release(&dobj->obj);
0075 
0076     kfree(dobj);
0077 }
0078 
0079 int
0080 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
0081 {
0082     struct armada_private *priv = drm_to_armada_dev(dev);
0083     size_t size = obj->obj.size;
0084 
0085     if (obj->page || obj->linear)
0086         return 0;
0087 
0088     /*
0089      * If it is a small allocation (typically cursor, which will
0090      * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
0091      * Framebuffers will never be this small (our minimum size for
0092      * framebuffers is larger than this anyway.)  Such objects are
0093      * only accessed by the CPU so we don't need any special handing
0094      * here.
0095      */
0096     if (size <= 8192) {
0097         unsigned int order = get_order(size);
0098         struct page *p = alloc_pages(GFP_KERNEL, order);
0099 
0100         if (p) {
0101             obj->addr = page_address(p);
0102             obj->phys_addr = page_to_phys(p);
0103             obj->page = p;
0104 
0105             memset(obj->addr, 0, PAGE_ALIGN(size));
0106         }
0107     }
0108 
0109     /*
0110      * We could grab something from CMA if it's enabled, but that
0111      * involves building in a problem:
0112      *
0113      * CMA's interface uses dma_alloc_coherent(), which provides us
0114      * with an CPU virtual address and a device address.
0115      *
0116      * The CPU virtual address may be either an address in the kernel
0117      * direct mapped region (for example, as it would be on x86) or
0118      * it may be remapped into another part of kernel memory space
0119      * (eg, as it would be on ARM.)  This means virt_to_phys() on the
0120      * returned virtual address is invalid depending on the architecture
0121      * implementation.
0122      *
0123      * The device address may also not be a physical address; it may
0124      * be that there is some kind of remapping between the device and
0125      * system RAM, which makes the use of the device address also
0126      * unsafe to re-use as a physical address.
0127      *
0128      * This makes DRM usage of dma_alloc_coherent() in a generic way
0129      * at best very questionable and unsafe.
0130      */
0131 
0132     /* Otherwise, grab it from our linear allocation */
0133     if (!obj->page) {
0134         struct drm_mm_node *node;
0135         unsigned align = min_t(unsigned, size, SZ_2M);
0136         void __iomem *ptr;
0137         int ret;
0138 
0139         node = kzalloc(sizeof(*node), GFP_KERNEL);
0140         if (!node)
0141             return -ENOSPC;
0142 
0143         mutex_lock(&priv->linear_lock);
0144         ret = drm_mm_insert_node_generic(&priv->linear, node,
0145                          size, align, 0, 0);
0146         mutex_unlock(&priv->linear_lock);
0147         if (ret) {
0148             kfree(node);
0149             return ret;
0150         }
0151 
0152         obj->linear = node;
0153 
0154         /* Ensure that the memory we're returning is cleared. */
0155         ptr = ioremap_wc(obj->linear->start, size);
0156         if (!ptr) {
0157             mutex_lock(&priv->linear_lock);
0158             drm_mm_remove_node(obj->linear);
0159             mutex_unlock(&priv->linear_lock);
0160             kfree(obj->linear);
0161             obj->linear = NULL;
0162             return -ENOMEM;
0163         }
0164 
0165         memset_io(ptr, 0, size);
0166         iounmap(ptr);
0167 
0168         obj->phys_addr = obj->linear->start;
0169         obj->dev_addr = obj->linear->start;
0170         obj->mapped = true;
0171     }
0172 
0173     DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
0174              (unsigned long long)obj->phys_addr,
0175              (unsigned long long)obj->dev_addr);
0176 
0177     return 0;
0178 }
0179 
0180 void *
0181 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
0182 {
0183     /* only linear objects need to be ioremap'd */
0184     if (!dobj->addr && dobj->linear)
0185         dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
0186     return dobj->addr;
0187 }
0188 
0189 static const struct drm_gem_object_funcs armada_gem_object_funcs = {
0190     .free = armada_gem_free_object,
0191     .export = armada_gem_prime_export,
0192     .vm_ops = &armada_gem_vm_ops,
0193 };
0194 
0195 struct armada_gem_object *
0196 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
0197 {
0198     struct armada_gem_object *obj;
0199 
0200     size = roundup_gem_size(size);
0201 
0202     obj = kzalloc(sizeof(*obj), GFP_KERNEL);
0203     if (!obj)
0204         return NULL;
0205 
0206     obj->obj.funcs = &armada_gem_object_funcs;
0207 
0208     drm_gem_private_object_init(dev, &obj->obj, size);
0209 
0210     DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
0211 
0212     return obj;
0213 }
0214 
0215 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
0216     size_t size)
0217 {
0218     struct armada_gem_object *obj;
0219     struct address_space *mapping;
0220 
0221     size = roundup_gem_size(size);
0222 
0223     obj = kzalloc(sizeof(*obj), GFP_KERNEL);
0224     if (!obj)
0225         return NULL;
0226 
0227     obj->obj.funcs = &armada_gem_object_funcs;
0228 
0229     if (drm_gem_object_init(dev, &obj->obj, size)) {
0230         kfree(obj);
0231         return NULL;
0232     }
0233 
0234     mapping = obj->obj.filp->f_mapping;
0235     mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
0236 
0237     DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
0238 
0239     return obj;
0240 }
0241 
0242 /* Dumb alloc support */
0243 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
0244     struct drm_mode_create_dumb *args)
0245 {
0246     struct armada_gem_object *dobj;
0247     u32 handle;
0248     size_t size;
0249     int ret;
0250 
0251     args->pitch = armada_pitch(args->width, args->bpp);
0252     args->size = size = args->pitch * args->height;
0253 
0254     dobj = armada_gem_alloc_private_object(dev, size);
0255     if (dobj == NULL)
0256         return -ENOMEM;
0257 
0258     ret = armada_gem_linear_back(dev, dobj);
0259     if (ret)
0260         goto err;
0261 
0262     ret = drm_gem_handle_create(file, &dobj->obj, &handle);
0263     if (ret)
0264         goto err;
0265 
0266     args->handle = handle;
0267 
0268     /* drop reference from allocate - handle holds it now */
0269     DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
0270  err:
0271     drm_gem_object_put(&dobj->obj);
0272     return ret;
0273 }
0274 
0275 /* Private driver gem ioctls */
0276 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
0277     struct drm_file *file)
0278 {
0279     struct drm_armada_gem_create *args = data;
0280     struct armada_gem_object *dobj;
0281     size_t size;
0282     u32 handle;
0283     int ret;
0284 
0285     if (args->size == 0)
0286         return -ENOMEM;
0287 
0288     size = args->size;
0289 
0290     dobj = armada_gem_alloc_object(dev, size);
0291     if (dobj == NULL)
0292         return -ENOMEM;
0293 
0294     ret = drm_gem_handle_create(file, &dobj->obj, &handle);
0295     if (ret)
0296         goto err;
0297 
0298     args->handle = handle;
0299 
0300     /* drop reference from allocate - handle holds it now */
0301     DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
0302  err:
0303     drm_gem_object_put(&dobj->obj);
0304     return ret;
0305 }
0306 
0307 /* Map a shmem-backed object into process memory space */
0308 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
0309     struct drm_file *file)
0310 {
0311     struct drm_armada_gem_mmap *args = data;
0312     struct armada_gem_object *dobj;
0313     unsigned long addr;
0314 
0315     dobj = armada_gem_object_lookup(file, args->handle);
0316     if (dobj == NULL)
0317         return -ENOENT;
0318 
0319     if (!dobj->obj.filp) {
0320         drm_gem_object_put(&dobj->obj);
0321         return -EINVAL;
0322     }
0323 
0324     addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
0325                MAP_SHARED, args->offset);
0326     drm_gem_object_put(&dobj->obj);
0327     if (IS_ERR_VALUE(addr))
0328         return addr;
0329 
0330     args->addr = addr;
0331 
0332     return 0;
0333 }
0334 
0335 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
0336     struct drm_file *file)
0337 {
0338     struct drm_armada_gem_pwrite *args = data;
0339     struct armada_gem_object *dobj;
0340     char __user *ptr;
0341     int ret = 0;
0342 
0343     DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
0344         args->handle, args->offset, args->size, args->ptr);
0345 
0346     if (args->size == 0)
0347         return 0;
0348 
0349     ptr = (char __user *)(uintptr_t)args->ptr;
0350 
0351     if (!access_ok(ptr, args->size))
0352         return -EFAULT;
0353 
0354     if (fault_in_readable(ptr, args->size))
0355         return -EFAULT;
0356 
0357     dobj = armada_gem_object_lookup(file, args->handle);
0358     if (dobj == NULL)
0359         return -ENOENT;
0360 
0361     /* Must be a kernel-mapped object */
0362     if (!dobj->addr)
0363         return -EINVAL;
0364 
0365     if (args->offset > dobj->obj.size ||
0366         args->size > dobj->obj.size - args->offset) {
0367         DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
0368         ret = -EINVAL;
0369         goto unref;
0370     }
0371 
0372     if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
0373         ret = -EFAULT;
0374     } else if (dobj->update) {
0375         dobj->update(dobj->update_data);
0376         ret = 0;
0377     }
0378 
0379  unref:
0380     drm_gem_object_put(&dobj->obj);
0381     return ret;
0382 }
0383 
0384 /* Prime support */
0385 static struct sg_table *
0386 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
0387     enum dma_data_direction dir)
0388 {
0389     struct drm_gem_object *obj = attach->dmabuf->priv;
0390     struct armada_gem_object *dobj = drm_to_armada_gem(obj);
0391     struct scatterlist *sg;
0392     struct sg_table *sgt;
0393     int i;
0394 
0395     sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
0396     if (!sgt)
0397         return NULL;
0398 
0399     if (dobj->obj.filp) {
0400         struct address_space *mapping;
0401         int count;
0402 
0403         count = dobj->obj.size / PAGE_SIZE;
0404         if (sg_alloc_table(sgt, count, GFP_KERNEL))
0405             goto free_sgt;
0406 
0407         mapping = dobj->obj.filp->f_mapping;
0408 
0409         for_each_sgtable_sg(sgt, sg, i) {
0410             struct page *page;
0411 
0412             page = shmem_read_mapping_page(mapping, i);
0413             if (IS_ERR(page))
0414                 goto release;
0415 
0416             sg_set_page(sg, page, PAGE_SIZE, 0);
0417         }
0418 
0419         if (dma_map_sgtable(attach->dev, sgt, dir, 0))
0420             goto release;
0421     } else if (dobj->page) {
0422         /* Single contiguous page */
0423         if (sg_alloc_table(sgt, 1, GFP_KERNEL))
0424             goto free_sgt;
0425 
0426         sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
0427 
0428         if (dma_map_sgtable(attach->dev, sgt, dir, 0))
0429             goto free_table;
0430     } else if (dobj->linear) {
0431         /* Single contiguous physical region - no struct page */
0432         if (sg_alloc_table(sgt, 1, GFP_KERNEL))
0433             goto free_sgt;
0434         sg_dma_address(sgt->sgl) = dobj->dev_addr;
0435         sg_dma_len(sgt->sgl) = dobj->obj.size;
0436     } else {
0437         goto free_sgt;
0438     }
0439     return sgt;
0440 
0441  release:
0442     for_each_sgtable_sg(sgt, sg, i)
0443         if (sg_page(sg))
0444             put_page(sg_page(sg));
0445  free_table:
0446     sg_free_table(sgt);
0447  free_sgt:
0448     kfree(sgt);
0449     return NULL;
0450 }
0451 
0452 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
0453     struct sg_table *sgt, enum dma_data_direction dir)
0454 {
0455     struct drm_gem_object *obj = attach->dmabuf->priv;
0456     struct armada_gem_object *dobj = drm_to_armada_gem(obj);
0457     int i;
0458 
0459     if (!dobj->linear)
0460         dma_unmap_sgtable(attach->dev, sgt, dir, 0);
0461 
0462     if (dobj->obj.filp) {
0463         struct scatterlist *sg;
0464 
0465         for_each_sgtable_sg(sgt, sg, i)
0466             put_page(sg_page(sg));
0467     }
0468 
0469     sg_free_table(sgt);
0470     kfree(sgt);
0471 }
0472 
0473 static int
0474 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
0475 {
0476     return -EINVAL;
0477 }
0478 
0479 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
0480     .map_dma_buf    = armada_gem_prime_map_dma_buf,
0481     .unmap_dma_buf  = armada_gem_prime_unmap_dma_buf,
0482     .release    = drm_gem_dmabuf_release,
0483     .mmap       = armada_gem_dmabuf_mmap,
0484 };
0485 
0486 struct dma_buf *
0487 armada_gem_prime_export(struct drm_gem_object *obj, int flags)
0488 {
0489     DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0490 
0491     exp_info.ops = &armada_gem_prime_dmabuf_ops;
0492     exp_info.size = obj->size;
0493     exp_info.flags = O_RDWR;
0494     exp_info.priv = obj;
0495 
0496     return drm_gem_dmabuf_export(obj->dev, &exp_info);
0497 }
0498 
0499 struct drm_gem_object *
0500 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
0501 {
0502     struct dma_buf_attachment *attach;
0503     struct armada_gem_object *dobj;
0504 
0505     if (buf->ops == &armada_gem_prime_dmabuf_ops) {
0506         struct drm_gem_object *obj = buf->priv;
0507         if (obj->dev == dev) {
0508             /*
0509              * Importing our own dmabuf(s) increases the
0510              * refcount on the gem object itself.
0511              */
0512             drm_gem_object_get(obj);
0513             return obj;
0514         }
0515     }
0516 
0517     attach = dma_buf_attach(buf, dev->dev);
0518     if (IS_ERR(attach))
0519         return ERR_CAST(attach);
0520 
0521     dobj = armada_gem_alloc_private_object(dev, buf->size);
0522     if (!dobj) {
0523         dma_buf_detach(buf, attach);
0524         return ERR_PTR(-ENOMEM);
0525     }
0526 
0527     dobj->obj.import_attach = attach;
0528     get_dma_buf(buf);
0529 
0530     /*
0531      * Don't call dma_buf_map_attachment() here - it maps the
0532      * scatterlist immediately for DMA, and this is not always
0533      * an appropriate thing to do.
0534      */
0535     return &dobj->obj;
0536 }
0537 
0538 int armada_gem_map_import(struct armada_gem_object *dobj)
0539 {
0540     int ret;
0541 
0542     dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
0543                        DMA_TO_DEVICE);
0544     if (IS_ERR(dobj->sgt)) {
0545         ret = PTR_ERR(dobj->sgt);
0546         dobj->sgt = NULL;
0547         DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
0548         return ret;
0549     }
0550     if (dobj->sgt->nents > 1) {
0551         DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
0552         return -EINVAL;
0553     }
0554     if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
0555         DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
0556         return -EINVAL;
0557     }
0558     dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
0559     dobj->mapped = true;
0560     return 0;
0561 }