Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright 2018 Noralf Trønnes
0004  */
0005 
0006 #include <linux/dma-buf.h>
0007 #include <linux/export.h>
0008 #include <linux/module.h>
0009 #include <linux/mutex.h>
0010 #include <linux/shmem_fs.h>
0011 #include <linux/slab.h>
0012 #include <linux/vmalloc.h>
0013 #include <linux/module.h>
0014 
0015 #ifdef CONFIG_X86
0016 #include <asm/set_memory.h>
0017 #endif
0018 
0019 #include <drm/drm.h>
0020 #include <drm/drm_device.h>
0021 #include <drm/drm_drv.h>
0022 #include <drm/drm_gem_shmem_helper.h>
0023 #include <drm/drm_prime.h>
0024 #include <drm/drm_print.h>
0025 
0026 MODULE_IMPORT_NS(DMA_BUF);
0027 
0028 /**
0029  * DOC: overview
0030  *
0031  * This library provides helpers for GEM objects backed by shmem buffers
0032  * allocated using anonymous pageable memory.
0033  *
0034  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
0035  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
0036  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
0037  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
0038  */
0039 
0040 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
0041     .free = drm_gem_shmem_object_free,
0042     .print_info = drm_gem_shmem_object_print_info,
0043     .pin = drm_gem_shmem_object_pin,
0044     .unpin = drm_gem_shmem_object_unpin,
0045     .get_sg_table = drm_gem_shmem_object_get_sg_table,
0046     .vmap = drm_gem_shmem_object_vmap,
0047     .vunmap = drm_gem_shmem_object_vunmap,
0048     .mmap = drm_gem_shmem_object_mmap,
0049     .vm_ops = &drm_gem_shmem_vm_ops,
0050 };
0051 
0052 static struct drm_gem_shmem_object *
0053 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
0054 {
0055     struct drm_gem_shmem_object *shmem;
0056     struct drm_gem_object *obj;
0057     int ret = 0;
0058 
0059     size = PAGE_ALIGN(size);
0060 
0061     if (dev->driver->gem_create_object) {
0062         obj = dev->driver->gem_create_object(dev, size);
0063         if (IS_ERR(obj))
0064             return ERR_CAST(obj);
0065         shmem = to_drm_gem_shmem_obj(obj);
0066     } else {
0067         shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
0068         if (!shmem)
0069             return ERR_PTR(-ENOMEM);
0070         obj = &shmem->base;
0071     }
0072 
0073     if (!obj->funcs)
0074         obj->funcs = &drm_gem_shmem_funcs;
0075 
0076     if (private) {
0077         drm_gem_private_object_init(dev, obj, size);
0078         shmem->map_wc = false; /* dma-buf mappings use always writecombine */
0079     } else {
0080         ret = drm_gem_object_init(dev, obj, size);
0081     }
0082     if (ret)
0083         goto err_free;
0084 
0085     ret = drm_gem_create_mmap_offset(obj);
0086     if (ret)
0087         goto err_release;
0088 
0089     mutex_init(&shmem->pages_lock);
0090     mutex_init(&shmem->vmap_lock);
0091     INIT_LIST_HEAD(&shmem->madv_list);
0092 
0093     if (!private) {
0094         /*
0095          * Our buffers are kept pinned, so allocating them
0096          * from the MOVABLE zone is a really bad idea, and
0097          * conflicts with CMA. See comments above new_inode()
0098          * why this is required _and_ expected if you're
0099          * going to pin these pages.
0100          */
0101         mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
0102                      __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
0103     }
0104 
0105     return shmem;
0106 
0107 err_release:
0108     drm_gem_object_release(obj);
0109 err_free:
0110     kfree(obj);
0111 
0112     return ERR_PTR(ret);
0113 }
0114 /**
0115  * drm_gem_shmem_create - Allocate an object with the given size
0116  * @dev: DRM device
0117  * @size: Size of the object to allocate
0118  *
0119  * This function creates a shmem GEM object.
0120  *
0121  * Returns:
0122  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
0123  * error code on failure.
0124  */
0125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
0126 {
0127     return __drm_gem_shmem_create(dev, size, false);
0128 }
0129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
0130 
0131 /**
0132  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
0133  * @shmem: shmem GEM object to free
0134  *
0135  * This function cleans up the GEM object state and frees the memory used to
0136  * store the object itself.
0137  */
0138 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
0139 {
0140     struct drm_gem_object *obj = &shmem->base;
0141 
0142     WARN_ON(shmem->vmap_use_count);
0143 
0144     if (obj->import_attach) {
0145         drm_prime_gem_destroy(obj, shmem->sgt);
0146     } else {
0147         if (shmem->sgt) {
0148             dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
0149                       DMA_BIDIRECTIONAL, 0);
0150             sg_free_table(shmem->sgt);
0151             kfree(shmem->sgt);
0152         }
0153         if (shmem->pages)
0154             drm_gem_shmem_put_pages(shmem);
0155     }
0156 
0157     WARN_ON(shmem->pages_use_count);
0158 
0159     drm_gem_object_release(obj);
0160     mutex_destroy(&shmem->pages_lock);
0161     mutex_destroy(&shmem->vmap_lock);
0162     kfree(shmem);
0163 }
0164 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
0165 
0166 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
0167 {
0168     struct drm_gem_object *obj = &shmem->base;
0169     struct page **pages;
0170 
0171     if (shmem->pages_use_count++ > 0)
0172         return 0;
0173 
0174     pages = drm_gem_get_pages(obj);
0175     if (IS_ERR(pages)) {
0176         DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
0177         shmem->pages_use_count = 0;
0178         return PTR_ERR(pages);
0179     }
0180 
0181     /*
0182      * TODO: Allocating WC pages which are correctly flushed is only
0183      * supported on x86. Ideal solution would be a GFP_WC flag, which also
0184      * ttm_pool.c could use.
0185      */
0186 #ifdef CONFIG_X86
0187     if (shmem->map_wc)
0188         set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
0189 #endif
0190 
0191     shmem->pages = pages;
0192 
0193     return 0;
0194 }
0195 
0196 /*
0197  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
0198  * @shmem: shmem GEM object
0199  *
0200  * This function makes sure that backing pages exists for the shmem GEM object
0201  * and increases the use count.
0202  *
0203  * Returns:
0204  * 0 on success or a negative error code on failure.
0205  */
0206 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
0207 {
0208     int ret;
0209 
0210     WARN_ON(shmem->base.import_attach);
0211 
0212     ret = mutex_lock_interruptible(&shmem->pages_lock);
0213     if (ret)
0214         return ret;
0215     ret = drm_gem_shmem_get_pages_locked(shmem);
0216     mutex_unlock(&shmem->pages_lock);
0217 
0218     return ret;
0219 }
0220 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
0221 
0222 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
0223 {
0224     struct drm_gem_object *obj = &shmem->base;
0225 
0226     if (WARN_ON_ONCE(!shmem->pages_use_count))
0227         return;
0228 
0229     if (--shmem->pages_use_count > 0)
0230         return;
0231 
0232 #ifdef CONFIG_X86
0233     if (shmem->map_wc)
0234         set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
0235 #endif
0236 
0237     drm_gem_put_pages(obj, shmem->pages,
0238               shmem->pages_mark_dirty_on_put,
0239               shmem->pages_mark_accessed_on_put);
0240     shmem->pages = NULL;
0241 }
0242 
0243 /*
0244  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
0245  * @shmem: shmem GEM object
0246  *
0247  * This function decreases the use count and puts the backing pages when use drops to zero.
0248  */
0249 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
0250 {
0251     mutex_lock(&shmem->pages_lock);
0252     drm_gem_shmem_put_pages_locked(shmem);
0253     mutex_unlock(&shmem->pages_lock);
0254 }
0255 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
0256 
0257 /**
0258  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
0259  * @shmem: shmem GEM object
0260  *
0261  * This function makes sure the backing pages are pinned in memory while the
0262  * buffer is exported.
0263  *
0264  * Returns:
0265  * 0 on success or a negative error code on failure.
0266  */
0267 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
0268 {
0269     WARN_ON(shmem->base.import_attach);
0270 
0271     return drm_gem_shmem_get_pages(shmem);
0272 }
0273 EXPORT_SYMBOL(drm_gem_shmem_pin);
0274 
0275 /**
0276  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
0277  * @shmem: shmem GEM object
0278  *
0279  * This function removes the requirement that the backing pages are pinned in
0280  * memory.
0281  */
0282 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
0283 {
0284     WARN_ON(shmem->base.import_attach);
0285 
0286     drm_gem_shmem_put_pages(shmem);
0287 }
0288 EXPORT_SYMBOL(drm_gem_shmem_unpin);
0289 
0290 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
0291                      struct iosys_map *map)
0292 {
0293     struct drm_gem_object *obj = &shmem->base;
0294     int ret = 0;
0295 
0296     if (shmem->vmap_use_count++ > 0) {
0297         iosys_map_set_vaddr(map, shmem->vaddr);
0298         return 0;
0299     }
0300 
0301     if (obj->import_attach) {
0302         ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
0303         if (!ret) {
0304             if (WARN_ON(map->is_iomem)) {
0305                 dma_buf_vunmap(obj->import_attach->dmabuf, map);
0306                 ret = -EIO;
0307                 goto err_put_pages;
0308             }
0309             shmem->vaddr = map->vaddr;
0310         }
0311     } else {
0312         pgprot_t prot = PAGE_KERNEL;
0313 
0314         ret = drm_gem_shmem_get_pages(shmem);
0315         if (ret)
0316             goto err_zero_use;
0317 
0318         if (shmem->map_wc)
0319             prot = pgprot_writecombine(prot);
0320         shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
0321                     VM_MAP, prot);
0322         if (!shmem->vaddr)
0323             ret = -ENOMEM;
0324         else
0325             iosys_map_set_vaddr(map, shmem->vaddr);
0326     }
0327 
0328     if (ret) {
0329         DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
0330         goto err_put_pages;
0331     }
0332 
0333     return 0;
0334 
0335 err_put_pages:
0336     if (!obj->import_attach)
0337         drm_gem_shmem_put_pages(shmem);
0338 err_zero_use:
0339     shmem->vmap_use_count = 0;
0340 
0341     return ret;
0342 }
0343 
0344 /*
0345  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
0346  * @shmem: shmem GEM object
0347  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
0348  *       store.
0349  *
0350  * This function makes sure that a contiguous kernel virtual address mapping
0351  * exists for the buffer backing the shmem GEM object. It hides the differences
0352  * between dma-buf imported and natively allocated objects.
0353  *
0354  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
0355  *
0356  * Returns:
0357  * 0 on success or a negative error code on failure.
0358  */
0359 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
0360                struct iosys_map *map)
0361 {
0362     int ret;
0363 
0364     ret = mutex_lock_interruptible(&shmem->vmap_lock);
0365     if (ret)
0366         return ret;
0367     ret = drm_gem_shmem_vmap_locked(shmem, map);
0368     mutex_unlock(&shmem->vmap_lock);
0369 
0370     return ret;
0371 }
0372 EXPORT_SYMBOL(drm_gem_shmem_vmap);
0373 
0374 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
0375                     struct iosys_map *map)
0376 {
0377     struct drm_gem_object *obj = &shmem->base;
0378 
0379     if (WARN_ON_ONCE(!shmem->vmap_use_count))
0380         return;
0381 
0382     if (--shmem->vmap_use_count > 0)
0383         return;
0384 
0385     if (obj->import_attach) {
0386         dma_buf_vunmap(obj->import_attach->dmabuf, map);
0387     } else {
0388         vunmap(shmem->vaddr);
0389         drm_gem_shmem_put_pages(shmem);
0390     }
0391 
0392     shmem->vaddr = NULL;
0393 }
0394 
0395 /*
0396  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
0397  * @shmem: shmem GEM object
0398  * @map: Kernel virtual address where the SHMEM GEM object was mapped
0399  *
0400  * This function cleans up a kernel virtual address mapping acquired by
0401  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
0402  * zero.
0403  *
0404  * This function hides the differences between dma-buf imported and natively
0405  * allocated objects.
0406  */
0407 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
0408               struct iosys_map *map)
0409 {
0410     mutex_lock(&shmem->vmap_lock);
0411     drm_gem_shmem_vunmap_locked(shmem, map);
0412     mutex_unlock(&shmem->vmap_lock);
0413 }
0414 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
0415 
0416 static struct drm_gem_shmem_object *
0417 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
0418                  struct drm_device *dev, size_t size,
0419                  uint32_t *handle)
0420 {
0421     struct drm_gem_shmem_object *shmem;
0422     int ret;
0423 
0424     shmem = drm_gem_shmem_create(dev, size);
0425     if (IS_ERR(shmem))
0426         return shmem;
0427 
0428     /*
0429      * Allocate an id of idr table where the obj is registered
0430      * and handle has the id what user can see.
0431      */
0432     ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
0433     /* drop reference from allocate - handle holds it now. */
0434     drm_gem_object_put(&shmem->base);
0435     if (ret)
0436         return ERR_PTR(ret);
0437 
0438     return shmem;
0439 }
0440 
0441 /* Update madvise status, returns true if not purged, else
0442  * false or -errno.
0443  */
0444 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
0445 {
0446     mutex_lock(&shmem->pages_lock);
0447 
0448     if (shmem->madv >= 0)
0449         shmem->madv = madv;
0450 
0451     madv = shmem->madv;
0452 
0453     mutex_unlock(&shmem->pages_lock);
0454 
0455     return (madv >= 0);
0456 }
0457 EXPORT_SYMBOL(drm_gem_shmem_madvise);
0458 
0459 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
0460 {
0461     struct drm_gem_object *obj = &shmem->base;
0462     struct drm_device *dev = obj->dev;
0463 
0464     WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
0465 
0466     dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
0467     sg_free_table(shmem->sgt);
0468     kfree(shmem->sgt);
0469     shmem->sgt = NULL;
0470 
0471     drm_gem_shmem_put_pages_locked(shmem);
0472 
0473     shmem->madv = -1;
0474 
0475     drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
0476     drm_gem_free_mmap_offset(obj);
0477 
0478     /* Our goal here is to return as much of the memory as
0479      * is possible back to the system as we are called from OOM.
0480      * To do this we must instruct the shmfs to drop all of its
0481      * backing pages, *now*.
0482      */
0483     shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
0484 
0485     invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
0486 }
0487 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
0488 
0489 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
0490 {
0491     if (!mutex_trylock(&shmem->pages_lock))
0492         return false;
0493     drm_gem_shmem_purge_locked(shmem);
0494     mutex_unlock(&shmem->pages_lock);
0495 
0496     return true;
0497 }
0498 EXPORT_SYMBOL(drm_gem_shmem_purge);
0499 
0500 /**
0501  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
0502  * @file: DRM file structure to create the dumb buffer for
0503  * @dev: DRM device
0504  * @args: IOCTL data
0505  *
0506  * This function computes the pitch of the dumb buffer and rounds it up to an
0507  * integer number of bytes per pixel. Drivers for hardware that doesn't have
0508  * any additional restrictions on the pitch can directly use this function as
0509  * their &drm_driver.dumb_create callback.
0510  *
0511  * For hardware with additional restrictions, drivers can adjust the fields
0512  * set up by userspace before calling into this function.
0513  *
0514  * Returns:
0515  * 0 on success or a negative error code on failure.
0516  */
0517 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
0518                   struct drm_mode_create_dumb *args)
0519 {
0520     u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0521     struct drm_gem_shmem_object *shmem;
0522 
0523     if (!args->pitch || !args->size) {
0524         args->pitch = min_pitch;
0525         args->size = PAGE_ALIGN(args->pitch * args->height);
0526     } else {
0527         /* ensure sane minimum values */
0528         if (args->pitch < min_pitch)
0529             args->pitch = min_pitch;
0530         if (args->size < args->pitch * args->height)
0531             args->size = PAGE_ALIGN(args->pitch * args->height);
0532     }
0533 
0534     shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
0535 
0536     return PTR_ERR_OR_ZERO(shmem);
0537 }
0538 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
0539 
0540 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
0541 {
0542     struct vm_area_struct *vma = vmf->vma;
0543     struct drm_gem_object *obj = vma->vm_private_data;
0544     struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
0545     loff_t num_pages = obj->size >> PAGE_SHIFT;
0546     vm_fault_t ret;
0547     struct page *page;
0548     pgoff_t page_offset;
0549 
0550     /* We don't use vmf->pgoff since that has the fake offset */
0551     page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0552 
0553     mutex_lock(&shmem->pages_lock);
0554 
0555     if (page_offset >= num_pages ||
0556         WARN_ON_ONCE(!shmem->pages) ||
0557         shmem->madv < 0) {
0558         ret = VM_FAULT_SIGBUS;
0559     } else {
0560         page = shmem->pages[page_offset];
0561 
0562         ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
0563     }
0564 
0565     mutex_unlock(&shmem->pages_lock);
0566 
0567     return ret;
0568 }
0569 
0570 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
0571 {
0572     struct drm_gem_object *obj = vma->vm_private_data;
0573     struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
0574     int ret;
0575 
0576     WARN_ON(shmem->base.import_attach);
0577 
0578     ret = drm_gem_shmem_get_pages(shmem);
0579     WARN_ON_ONCE(ret != 0);
0580 
0581     drm_gem_vm_open(vma);
0582 }
0583 
0584 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
0585 {
0586     struct drm_gem_object *obj = vma->vm_private_data;
0587     struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
0588 
0589     drm_gem_shmem_put_pages(shmem);
0590     drm_gem_vm_close(vma);
0591 }
0592 
0593 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
0594     .fault = drm_gem_shmem_fault,
0595     .open = drm_gem_shmem_vm_open,
0596     .close = drm_gem_shmem_vm_close,
0597 };
0598 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
0599 
0600 /**
0601  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
0602  * @shmem: shmem GEM object
0603  * @vma: VMA for the area to be mapped
0604  *
0605  * This function implements an augmented version of the GEM DRM file mmap
0606  * operation for shmem objects.
0607  *
0608  * Returns:
0609  * 0 on success or a negative error code on failure.
0610  */
0611 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
0612 {
0613     struct drm_gem_object *obj = &shmem->base;
0614     int ret;
0615 
0616     if (obj->import_attach) {
0617         /* Drop the reference drm_gem_mmap_obj() acquired.*/
0618         drm_gem_object_put(obj);
0619         vma->vm_private_data = NULL;
0620 
0621         return dma_buf_mmap(obj->dma_buf, vma, 0);
0622     }
0623 
0624     ret = drm_gem_shmem_get_pages(shmem);
0625     if (ret) {
0626         drm_gem_vm_close(vma);
0627         return ret;
0628     }
0629 
0630     vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
0631     vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0632     if (shmem->map_wc)
0633         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
0634 
0635     return 0;
0636 }
0637 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
0638 
0639 /**
0640  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
0641  * @shmem: shmem GEM object
0642  * @p: DRM printer
0643  * @indent: Tab indentation level
0644  */
0645 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
0646                   struct drm_printer *p, unsigned int indent)
0647 {
0648     drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
0649     drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
0650     drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
0651 }
0652 EXPORT_SYMBOL(drm_gem_shmem_print_info);
0653 
0654 /**
0655  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
0656  *                              pages for a shmem GEM object
0657  * @shmem: shmem GEM object
0658  *
0659  * This function exports a scatter/gather table suitable for PRIME usage by
0660  * calling the standard DMA mapping API.
0661  *
0662  * Drivers who need to acquire an scatter/gather table for objects need to call
0663  * drm_gem_shmem_get_pages_sgt() instead.
0664  *
0665  * Returns:
0666  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
0667  */
0668 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
0669 {
0670     struct drm_gem_object *obj = &shmem->base;
0671 
0672     WARN_ON(shmem->base.import_attach);
0673 
0674     return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
0675 }
0676 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
0677 
0678 /**
0679  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
0680  *               scatter/gather table for a shmem GEM object.
0681  * @shmem: shmem GEM object
0682  *
0683  * This function returns a scatter/gather table suitable for driver usage. If
0684  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
0685  * table created.
0686  *
0687  * This is the main function for drivers to get at backing storage, and it hides
0688  * and difference between dma-buf imported and natively allocated objects.
0689  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
0690  *
0691  * Returns:
0692  * A pointer to the scatter/gather table of pinned pages or errno on failure.
0693  */
0694 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
0695 {
0696     struct drm_gem_object *obj = &shmem->base;
0697     int ret;
0698     struct sg_table *sgt;
0699 
0700     if (shmem->sgt)
0701         return shmem->sgt;
0702 
0703     WARN_ON(obj->import_attach);
0704 
0705     ret = drm_gem_shmem_get_pages(shmem);
0706     if (ret)
0707         return ERR_PTR(ret);
0708 
0709     sgt = drm_gem_shmem_get_sg_table(shmem);
0710     if (IS_ERR(sgt)) {
0711         ret = PTR_ERR(sgt);
0712         goto err_put_pages;
0713     }
0714     /* Map the pages for use by the h/w. */
0715     ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
0716     if (ret)
0717         goto err_free_sgt;
0718 
0719     shmem->sgt = sgt;
0720 
0721     return sgt;
0722 
0723 err_free_sgt:
0724     sg_free_table(sgt);
0725     kfree(sgt);
0726 err_put_pages:
0727     drm_gem_shmem_put_pages(shmem);
0728     return ERR_PTR(ret);
0729 }
0730 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
0731 
0732 /**
0733  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
0734  *                 another driver's scatter/gather table of pinned pages
0735  * @dev: Device to import into
0736  * @attach: DMA-BUF attachment
0737  * @sgt: Scatter/gather table of pinned pages
0738  *
0739  * This function imports a scatter/gather table exported via DMA-BUF by
0740  * another driver. Drivers that use the shmem helpers should set this as their
0741  * &drm_driver.gem_prime_import_sg_table callback.
0742  *
0743  * Returns:
0744  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
0745  * error code on failure.
0746  */
0747 struct drm_gem_object *
0748 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
0749                     struct dma_buf_attachment *attach,
0750                     struct sg_table *sgt)
0751 {
0752     size_t size = PAGE_ALIGN(attach->dmabuf->size);
0753     struct drm_gem_shmem_object *shmem;
0754 
0755     shmem = __drm_gem_shmem_create(dev, size, true);
0756     if (IS_ERR(shmem))
0757         return ERR_CAST(shmem);
0758 
0759     shmem->sgt = sgt;
0760 
0761     DRM_DEBUG_PRIME("size = %zu\n", size);
0762 
0763     return &shmem->base;
0764 }
0765 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
0766 
0767 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
0768 MODULE_IMPORT_NS(DMA_BUF);
0769 MODULE_LICENSE("GPL v2");