Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2008 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  *
0023  * Authors:
0024  *    Eric Anholt <eric@anholt.net>
0025  *
0026  */
0027 
0028 #include <linux/dma-buf.h>
0029 #include <linux/file.h>
0030 #include <linux/fs.h>
0031 #include <linux/iosys-map.h>
0032 #include <linux/mem_encrypt.h>
0033 #include <linux/mm.h>
0034 #include <linux/mman.h>
0035 #include <linux/module.h>
0036 #include <linux/pagemap.h>
0037 #include <linux/pagevec.h>
0038 #include <linux/shmem_fs.h>
0039 #include <linux/slab.h>
0040 #include <linux/string_helpers.h>
0041 #include <linux/types.h>
0042 #include <linux/uaccess.h>
0043 
0044 #include <drm/drm.h>
0045 #include <drm/drm_device.h>
0046 #include <drm/drm_drv.h>
0047 #include <drm/drm_file.h>
0048 #include <drm/drm_gem.h>
0049 #include <drm/drm_managed.h>
0050 #include <drm/drm_print.h>
0051 #include <drm/drm_vma_manager.h>
0052 
0053 #include "drm_internal.h"
0054 
0055 /** @file drm_gem.c
0056  *
0057  * This file provides some of the base ioctls and library routines for
0058  * the graphics memory manager implemented by each device driver.
0059  *
0060  * Because various devices have different requirements in terms of
0061  * synchronization and migration strategies, implementing that is left up to
0062  * the driver, and all that the general API provides should be generic --
0063  * allocating objects, reading/writing data with the cpu, freeing objects.
0064  * Even there, platform-dependent optimizations for reading/writing data with
0065  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
0066  * the DRI2 implementation wants to have at least allocate/mmap be generic.
0067  *
0068  * The goal was to have swap-backed object allocation managed through
0069  * struct file.  However, file descriptors as handles to a struct file have
0070  * two major failings:
0071  * - Process limits prevent more than 1024 or so being used at a time by
0072  *   default.
0073  * - Inability to allocate high fds will aggravate the X Server's select()
0074  *   handling, and likely that of many GL client applications as well.
0075  *
0076  * This led to a plan of using our own integer IDs (called handles, following
0077  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
0078  * ioctls.  The objects themselves will still include the struct file so
0079  * that we can transition to fds if the required kernel infrastructure shows
0080  * up at a later date, and as our interface with shmfs for memory allocation.
0081  */
0082 
0083 static void
0084 drm_gem_init_release(struct drm_device *dev, void *ptr)
0085 {
0086     drm_vma_offset_manager_destroy(dev->vma_offset_manager);
0087 }
0088 
0089 /**
0090  * drm_gem_init - Initialize the GEM device fields
0091  * @dev: drm_devic structure to initialize
0092  */
0093 int
0094 drm_gem_init(struct drm_device *dev)
0095 {
0096     struct drm_vma_offset_manager *vma_offset_manager;
0097 
0098     mutex_init(&dev->object_name_lock);
0099     idr_init_base(&dev->object_name_idr, 1);
0100 
0101     vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
0102                       GFP_KERNEL);
0103     if (!vma_offset_manager) {
0104         DRM_ERROR("out of memory\n");
0105         return -ENOMEM;
0106     }
0107 
0108     dev->vma_offset_manager = vma_offset_manager;
0109     drm_vma_offset_manager_init(vma_offset_manager,
0110                     DRM_FILE_PAGE_OFFSET_START,
0111                     DRM_FILE_PAGE_OFFSET_SIZE);
0112 
0113     return drmm_add_action(dev, drm_gem_init_release, NULL);
0114 }
0115 
0116 /**
0117  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
0118  * @dev: drm_device the object should be initialized for
0119  * @obj: drm_gem_object to initialize
0120  * @size: object size
0121  *
0122  * Initialize an already allocated GEM object of the specified size with
0123  * shmfs backing store.
0124  */
0125 int drm_gem_object_init(struct drm_device *dev,
0126             struct drm_gem_object *obj, size_t size)
0127 {
0128     struct file *filp;
0129 
0130     drm_gem_private_object_init(dev, obj, size);
0131 
0132     filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
0133     if (IS_ERR(filp))
0134         return PTR_ERR(filp);
0135 
0136     obj->filp = filp;
0137 
0138     return 0;
0139 }
0140 EXPORT_SYMBOL(drm_gem_object_init);
0141 
0142 /**
0143  * drm_gem_private_object_init - initialize an allocated private GEM object
0144  * @dev: drm_device the object should be initialized for
0145  * @obj: drm_gem_object to initialize
0146  * @size: object size
0147  *
0148  * Initialize an already allocated GEM object of the specified size with
0149  * no GEM provided backing store. Instead the caller is responsible for
0150  * backing the object and handling it.
0151  */
0152 void drm_gem_private_object_init(struct drm_device *dev,
0153                  struct drm_gem_object *obj, size_t size)
0154 {
0155     BUG_ON((size & (PAGE_SIZE - 1)) != 0);
0156 
0157     obj->dev = dev;
0158     obj->filp = NULL;
0159 
0160     kref_init(&obj->refcount);
0161     obj->handle_count = 0;
0162     obj->size = size;
0163     dma_resv_init(&obj->_resv);
0164     if (!obj->resv)
0165         obj->resv = &obj->_resv;
0166 
0167     drm_vma_node_reset(&obj->vma_node);
0168 }
0169 EXPORT_SYMBOL(drm_gem_private_object_init);
0170 
0171 /**
0172  * drm_gem_object_handle_free - release resources bound to userspace handles
0173  * @obj: GEM object to clean up.
0174  *
0175  * Called after the last handle to the object has been closed
0176  *
0177  * Removes any name for the object. Note that this must be
0178  * called before drm_gem_object_free or we'll be touching
0179  * freed memory
0180  */
0181 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
0182 {
0183     struct drm_device *dev = obj->dev;
0184 
0185     /* Remove any name for this object */
0186     if (obj->name) {
0187         idr_remove(&dev->object_name_idr, obj->name);
0188         obj->name = 0;
0189     }
0190 }
0191 
0192 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
0193 {
0194     /* Unbreak the reference cycle if we have an exported dma_buf. */
0195     if (obj->dma_buf) {
0196         dma_buf_put(obj->dma_buf);
0197         obj->dma_buf = NULL;
0198     }
0199 }
0200 
0201 static void
0202 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
0203 {
0204     struct drm_device *dev = obj->dev;
0205     bool final = false;
0206 
0207     if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
0208         return;
0209 
0210     /*
0211     * Must bump handle count first as this may be the last
0212     * ref, in which case the object would disappear before we
0213     * checked for a name
0214     */
0215 
0216     mutex_lock(&dev->object_name_lock);
0217     if (--obj->handle_count == 0) {
0218         drm_gem_object_handle_free(obj);
0219         drm_gem_object_exported_dma_buf_free(obj);
0220         final = true;
0221     }
0222     mutex_unlock(&dev->object_name_lock);
0223 
0224     if (final)
0225         drm_gem_object_put(obj);
0226 }
0227 
0228 /*
0229  * Called at device or object close to release the file's
0230  * handle references on objects.
0231  */
0232 static int
0233 drm_gem_object_release_handle(int id, void *ptr, void *data)
0234 {
0235     struct drm_file *file_priv = data;
0236     struct drm_gem_object *obj = ptr;
0237 
0238     if (obj->funcs->close)
0239         obj->funcs->close(obj, file_priv);
0240 
0241     drm_prime_remove_buf_handle(&file_priv->prime, id);
0242     drm_vma_node_revoke(&obj->vma_node, file_priv);
0243 
0244     drm_gem_object_handle_put_unlocked(obj);
0245 
0246     return 0;
0247 }
0248 
0249 /**
0250  * drm_gem_handle_delete - deletes the given file-private handle
0251  * @filp: drm file-private structure to use for the handle look up
0252  * @handle: userspace handle to delete
0253  *
0254  * Removes the GEM handle from the @filp lookup table which has been added with
0255  * drm_gem_handle_create(). If this is the last handle also cleans up linked
0256  * resources like GEM names.
0257  */
0258 int
0259 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
0260 {
0261     struct drm_gem_object *obj;
0262 
0263     spin_lock(&filp->table_lock);
0264 
0265     /* Check if we currently have a reference on the object */
0266     obj = idr_replace(&filp->object_idr, NULL, handle);
0267     spin_unlock(&filp->table_lock);
0268     if (IS_ERR_OR_NULL(obj))
0269         return -EINVAL;
0270 
0271     /* Release driver's reference and decrement refcount. */
0272     drm_gem_object_release_handle(handle, obj, filp);
0273 
0274     /* And finally make the handle available for future allocations. */
0275     spin_lock(&filp->table_lock);
0276     idr_remove(&filp->object_idr, handle);
0277     spin_unlock(&filp->table_lock);
0278 
0279     return 0;
0280 }
0281 EXPORT_SYMBOL(drm_gem_handle_delete);
0282 
0283 /**
0284  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
0285  * @file: drm file-private structure containing the gem object
0286  * @dev: corresponding drm_device
0287  * @handle: gem object handle
0288  * @offset: return location for the fake mmap offset
0289  *
0290  * This implements the &drm_driver.dumb_map_offset kms driver callback for
0291  * drivers which use gem to manage their backing storage.
0292  *
0293  * Returns:
0294  * 0 on success or a negative error code on failure.
0295  */
0296 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
0297                 u32 handle, u64 *offset)
0298 {
0299     struct drm_gem_object *obj;
0300     int ret;
0301 
0302     obj = drm_gem_object_lookup(file, handle);
0303     if (!obj)
0304         return -ENOENT;
0305 
0306     /* Don't allow imported objects to be mapped */
0307     if (obj->import_attach) {
0308         ret = -EINVAL;
0309         goto out;
0310     }
0311 
0312     ret = drm_gem_create_mmap_offset(obj);
0313     if (ret)
0314         goto out;
0315 
0316     *offset = drm_vma_node_offset_addr(&obj->vma_node);
0317 out:
0318     drm_gem_object_put(obj);
0319 
0320     return ret;
0321 }
0322 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
0323 
0324 int drm_gem_dumb_destroy(struct drm_file *file,
0325              struct drm_device *dev,
0326              u32 handle)
0327 {
0328     return drm_gem_handle_delete(file, handle);
0329 }
0330 
0331 /**
0332  * drm_gem_handle_create_tail - internal functions to create a handle
0333  * @file_priv: drm file-private structure to register the handle for
0334  * @obj: object to register
0335  * @handlep: pointer to return the created handle to the caller
0336  *
0337  * This expects the &drm_device.object_name_lock to be held already and will
0338  * drop it before returning. Used to avoid races in establishing new handles
0339  * when importing an object from either an flink name or a dma-buf.
0340  *
0341  * Handles must be release again through drm_gem_handle_delete(). This is done
0342  * when userspace closes @file_priv for all attached handles, or through the
0343  * GEM_CLOSE ioctl for individual handles.
0344  */
0345 int
0346 drm_gem_handle_create_tail(struct drm_file *file_priv,
0347                struct drm_gem_object *obj,
0348                u32 *handlep)
0349 {
0350     struct drm_device *dev = obj->dev;
0351     u32 handle;
0352     int ret;
0353 
0354     WARN_ON(!mutex_is_locked(&dev->object_name_lock));
0355     if (obj->handle_count++ == 0)
0356         drm_gem_object_get(obj);
0357 
0358     /*
0359      * Get the user-visible handle using idr.  Preload and perform
0360      * allocation under our spinlock.
0361      */
0362     idr_preload(GFP_KERNEL);
0363     spin_lock(&file_priv->table_lock);
0364 
0365     ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
0366 
0367     spin_unlock(&file_priv->table_lock);
0368     idr_preload_end();
0369 
0370     mutex_unlock(&dev->object_name_lock);
0371     if (ret < 0)
0372         goto err_unref;
0373 
0374     handle = ret;
0375 
0376     ret = drm_vma_node_allow(&obj->vma_node, file_priv);
0377     if (ret)
0378         goto err_remove;
0379 
0380     if (obj->funcs->open) {
0381         ret = obj->funcs->open(obj, file_priv);
0382         if (ret)
0383             goto err_revoke;
0384     }
0385 
0386     *handlep = handle;
0387     return 0;
0388 
0389 err_revoke:
0390     drm_vma_node_revoke(&obj->vma_node, file_priv);
0391 err_remove:
0392     spin_lock(&file_priv->table_lock);
0393     idr_remove(&file_priv->object_idr, handle);
0394     spin_unlock(&file_priv->table_lock);
0395 err_unref:
0396     drm_gem_object_handle_put_unlocked(obj);
0397     return ret;
0398 }
0399 
0400 /**
0401  * drm_gem_handle_create - create a gem handle for an object
0402  * @file_priv: drm file-private structure to register the handle for
0403  * @obj: object to register
0404  * @handlep: pointer to return the created handle to the caller
0405  *
0406  * Create a handle for this object. This adds a handle reference to the object,
0407  * which includes a regular reference count. Callers will likely want to
0408  * dereference the object afterwards.
0409  *
0410  * Since this publishes @obj to userspace it must be fully set up by this point,
0411  * drivers must call this last in their buffer object creation callbacks.
0412  */
0413 int drm_gem_handle_create(struct drm_file *file_priv,
0414               struct drm_gem_object *obj,
0415               u32 *handlep)
0416 {
0417     mutex_lock(&obj->dev->object_name_lock);
0418 
0419     return drm_gem_handle_create_tail(file_priv, obj, handlep);
0420 }
0421 EXPORT_SYMBOL(drm_gem_handle_create);
0422 
0423 
0424 /**
0425  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
0426  * @obj: obj in question
0427  *
0428  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
0429  *
0430  * Note that drm_gem_object_release() already calls this function, so drivers
0431  * don't have to take care of releasing the mmap offset themselves when freeing
0432  * the GEM object.
0433  */
0434 void
0435 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
0436 {
0437     struct drm_device *dev = obj->dev;
0438 
0439     drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
0440 }
0441 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
0442 
0443 /**
0444  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
0445  * @obj: obj in question
0446  * @size: the virtual size
0447  *
0448  * GEM memory mapping works by handing back to userspace a fake mmap offset
0449  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
0450  * up the object based on the offset and sets up the various memory mapping
0451  * structures.
0452  *
0453  * This routine allocates and attaches a fake offset for @obj, in cases where
0454  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
0455  * Otherwise just use drm_gem_create_mmap_offset().
0456  *
0457  * This function is idempotent and handles an already allocated mmap offset
0458  * transparently. Drivers do not need to check for this case.
0459  */
0460 int
0461 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
0462 {
0463     struct drm_device *dev = obj->dev;
0464 
0465     return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
0466                   size / PAGE_SIZE);
0467 }
0468 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
0469 
0470 /**
0471  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
0472  * @obj: obj in question
0473  *
0474  * GEM memory mapping works by handing back to userspace a fake mmap offset
0475  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
0476  * up the object based on the offset and sets up the various memory mapping
0477  * structures.
0478  *
0479  * This routine allocates and attaches a fake offset for @obj.
0480  *
0481  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
0482  * the fake offset again.
0483  */
0484 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
0485 {
0486     return drm_gem_create_mmap_offset_size(obj, obj->size);
0487 }
0488 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
0489 
0490 /*
0491  * Move pages to appropriate lru and release the pagevec, decrementing the
0492  * ref count of those pages.
0493  */
0494 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
0495 {
0496     check_move_unevictable_pages(pvec);
0497     __pagevec_release(pvec);
0498     cond_resched();
0499 }
0500 
0501 /**
0502  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
0503  * from shmem
0504  * @obj: obj in question
0505  *
0506  * This reads the page-array of the shmem-backing storage of the given gem
0507  * object. An array of pages is returned. If a page is not allocated or
0508  * swapped-out, this will allocate/swap-in the required pages. Note that the
0509  * whole object is covered by the page-array and pinned in memory.
0510  *
0511  * Use drm_gem_put_pages() to release the array and unpin all pages.
0512  *
0513  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
0514  * If you require other GFP-masks, you have to do those allocations yourself.
0515  *
0516  * Note that you are not allowed to change gfp-zones during runtime. That is,
0517  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
0518  * set during initialization. If you have special zone constraints, set them
0519  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
0520  * to keep pages in the required zone during swap-in.
0521  *
0522  * This function is only valid on objects initialized with
0523  * drm_gem_object_init(), but not for those initialized with
0524  * drm_gem_private_object_init() only.
0525  */
0526 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
0527 {
0528     struct address_space *mapping;
0529     struct page *p, **pages;
0530     struct pagevec pvec;
0531     int i, npages;
0532 
0533 
0534     if (WARN_ON(!obj->filp))
0535         return ERR_PTR(-EINVAL);
0536 
0537     /* This is the shared memory object that backs the GEM resource */
0538     mapping = obj->filp->f_mapping;
0539 
0540     /* We already BUG_ON() for non-page-aligned sizes in
0541      * drm_gem_object_init(), so we should never hit this unless
0542      * driver author is doing something really wrong:
0543      */
0544     WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
0545 
0546     npages = obj->size >> PAGE_SHIFT;
0547 
0548     pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
0549     if (pages == NULL)
0550         return ERR_PTR(-ENOMEM);
0551 
0552     mapping_set_unevictable(mapping);
0553 
0554     for (i = 0; i < npages; i++) {
0555         p = shmem_read_mapping_page(mapping, i);
0556         if (IS_ERR(p))
0557             goto fail;
0558         pages[i] = p;
0559 
0560         /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
0561          * correct region during swapin. Note that this requires
0562          * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
0563          * so shmem can relocate pages during swapin if required.
0564          */
0565         BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
0566                 (page_to_pfn(p) >= 0x00100000UL));
0567     }
0568 
0569     return pages;
0570 
0571 fail:
0572     mapping_clear_unevictable(mapping);
0573     pagevec_init(&pvec);
0574     while (i--) {
0575         if (!pagevec_add(&pvec, pages[i]))
0576             drm_gem_check_release_pagevec(&pvec);
0577     }
0578     if (pagevec_count(&pvec))
0579         drm_gem_check_release_pagevec(&pvec);
0580 
0581     kvfree(pages);
0582     return ERR_CAST(p);
0583 }
0584 EXPORT_SYMBOL(drm_gem_get_pages);
0585 
0586 /**
0587  * drm_gem_put_pages - helper to free backing pages for a GEM object
0588  * @obj: obj in question
0589  * @pages: pages to free
0590  * @dirty: if true, pages will be marked as dirty
0591  * @accessed: if true, the pages will be marked as accessed
0592  */
0593 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
0594         bool dirty, bool accessed)
0595 {
0596     int i, npages;
0597     struct address_space *mapping;
0598     struct pagevec pvec;
0599 
0600     mapping = file_inode(obj->filp)->i_mapping;
0601     mapping_clear_unevictable(mapping);
0602 
0603     /* We already BUG_ON() for non-page-aligned sizes in
0604      * drm_gem_object_init(), so we should never hit this unless
0605      * driver author is doing something really wrong:
0606      */
0607     WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
0608 
0609     npages = obj->size >> PAGE_SHIFT;
0610 
0611     pagevec_init(&pvec);
0612     for (i = 0; i < npages; i++) {
0613         if (!pages[i])
0614             continue;
0615 
0616         if (dirty)
0617             set_page_dirty(pages[i]);
0618 
0619         if (accessed)
0620             mark_page_accessed(pages[i]);
0621 
0622         /* Undo the reference we took when populating the table */
0623         if (!pagevec_add(&pvec, pages[i]))
0624             drm_gem_check_release_pagevec(&pvec);
0625     }
0626     if (pagevec_count(&pvec))
0627         drm_gem_check_release_pagevec(&pvec);
0628 
0629     kvfree(pages);
0630 }
0631 EXPORT_SYMBOL(drm_gem_put_pages);
0632 
0633 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
0634               struct drm_gem_object **objs)
0635 {
0636     int i, ret = 0;
0637     struct drm_gem_object *obj;
0638 
0639     spin_lock(&filp->table_lock);
0640 
0641     for (i = 0; i < count; i++) {
0642         /* Check if we currently have a reference on the object */
0643         obj = idr_find(&filp->object_idr, handle[i]);
0644         if (!obj) {
0645             ret = -ENOENT;
0646             break;
0647         }
0648         drm_gem_object_get(obj);
0649         objs[i] = obj;
0650     }
0651     spin_unlock(&filp->table_lock);
0652 
0653     return ret;
0654 }
0655 
0656 /**
0657  * drm_gem_objects_lookup - look up GEM objects from an array of handles
0658  * @filp: DRM file private date
0659  * @bo_handles: user pointer to array of userspace handle
0660  * @count: size of handle array
0661  * @objs_out: returned pointer to array of drm_gem_object pointers
0662  *
0663  * Takes an array of userspace handles and returns a newly allocated array of
0664  * GEM objects.
0665  *
0666  * For a single handle lookup, use drm_gem_object_lookup().
0667  *
0668  * Returns:
0669  *
0670  * @objs filled in with GEM object pointers. Returned GEM objects need to be
0671  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
0672  * failure. 0 is returned on success.
0673  *
0674  */
0675 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
0676                int count, struct drm_gem_object ***objs_out)
0677 {
0678     int ret;
0679     u32 *handles;
0680     struct drm_gem_object **objs;
0681 
0682     if (!count)
0683         return 0;
0684 
0685     objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
0686                  GFP_KERNEL | __GFP_ZERO);
0687     if (!objs)
0688         return -ENOMEM;
0689 
0690     *objs_out = objs;
0691 
0692     handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
0693     if (!handles) {
0694         ret = -ENOMEM;
0695         goto out;
0696     }
0697 
0698     if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
0699         ret = -EFAULT;
0700         DRM_DEBUG("Failed to copy in GEM handles\n");
0701         goto out;
0702     }
0703 
0704     ret = objects_lookup(filp, handles, count, objs);
0705 out:
0706     kvfree(handles);
0707     return ret;
0708 
0709 }
0710 EXPORT_SYMBOL(drm_gem_objects_lookup);
0711 
0712 /**
0713  * drm_gem_object_lookup - look up a GEM object from its handle
0714  * @filp: DRM file private date
0715  * @handle: userspace handle
0716  *
0717  * Returns:
0718  *
0719  * A reference to the object named by the handle if such exists on @filp, NULL
0720  * otherwise.
0721  *
0722  * If looking up an array of handles, use drm_gem_objects_lookup().
0723  */
0724 struct drm_gem_object *
0725 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
0726 {
0727     struct drm_gem_object *obj = NULL;
0728 
0729     objects_lookup(filp, &handle, 1, &obj);
0730     return obj;
0731 }
0732 EXPORT_SYMBOL(drm_gem_object_lookup);
0733 
0734 /**
0735  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
0736  * shared and/or exclusive fences.
0737  * @filep: DRM file private date
0738  * @handle: userspace handle
0739  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
0740  * @timeout: timeout value in jiffies or zero to return immediately
0741  *
0742  * Returns:
0743  *
0744  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
0745  * greater than 0 on success.
0746  */
0747 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
0748                     bool wait_all, unsigned long timeout)
0749 {
0750     long ret;
0751     struct drm_gem_object *obj;
0752 
0753     obj = drm_gem_object_lookup(filep, handle);
0754     if (!obj) {
0755         DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
0756         return -EINVAL;
0757     }
0758 
0759     ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
0760                     true, timeout);
0761     if (ret == 0)
0762         ret = -ETIME;
0763     else if (ret > 0)
0764         ret = 0;
0765 
0766     drm_gem_object_put(obj);
0767 
0768     return ret;
0769 }
0770 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
0771 
0772 /**
0773  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
0774  * @dev: drm_device
0775  * @data: ioctl data
0776  * @file_priv: drm file-private structure
0777  *
0778  * Releases the handle to an mm object.
0779  */
0780 int
0781 drm_gem_close_ioctl(struct drm_device *dev, void *data,
0782             struct drm_file *file_priv)
0783 {
0784     struct drm_gem_close *args = data;
0785     int ret;
0786 
0787     if (!drm_core_check_feature(dev, DRIVER_GEM))
0788         return -EOPNOTSUPP;
0789 
0790     ret = drm_gem_handle_delete(file_priv, args->handle);
0791 
0792     return ret;
0793 }
0794 
0795 /**
0796  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
0797  * @dev: drm_device
0798  * @data: ioctl data
0799  * @file_priv: drm file-private structure
0800  *
0801  * Create a global name for an object, returning the name.
0802  *
0803  * Note that the name does not hold a reference; when the object
0804  * is freed, the name goes away.
0805  */
0806 int
0807 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
0808             struct drm_file *file_priv)
0809 {
0810     struct drm_gem_flink *args = data;
0811     struct drm_gem_object *obj;
0812     int ret;
0813 
0814     if (!drm_core_check_feature(dev, DRIVER_GEM))
0815         return -EOPNOTSUPP;
0816 
0817     obj = drm_gem_object_lookup(file_priv, args->handle);
0818     if (obj == NULL)
0819         return -ENOENT;
0820 
0821     mutex_lock(&dev->object_name_lock);
0822     /* prevent races with concurrent gem_close. */
0823     if (obj->handle_count == 0) {
0824         ret = -ENOENT;
0825         goto err;
0826     }
0827 
0828     if (!obj->name) {
0829         ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
0830         if (ret < 0)
0831             goto err;
0832 
0833         obj->name = ret;
0834     }
0835 
0836     args->name = (uint64_t) obj->name;
0837     ret = 0;
0838 
0839 err:
0840     mutex_unlock(&dev->object_name_lock);
0841     drm_gem_object_put(obj);
0842     return ret;
0843 }
0844 
0845 /**
0846  * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
0847  * @dev: drm_device
0848  * @data: ioctl data
0849  * @file_priv: drm file-private structure
0850  *
0851  * Open an object using the global name, returning a handle and the size.
0852  *
0853  * This handle (of course) holds a reference to the object, so the object
0854  * will not go away until the handle is deleted.
0855  */
0856 int
0857 drm_gem_open_ioctl(struct drm_device *dev, void *data,
0858            struct drm_file *file_priv)
0859 {
0860     struct drm_gem_open *args = data;
0861     struct drm_gem_object *obj;
0862     int ret;
0863     u32 handle;
0864 
0865     if (!drm_core_check_feature(dev, DRIVER_GEM))
0866         return -EOPNOTSUPP;
0867 
0868     mutex_lock(&dev->object_name_lock);
0869     obj = idr_find(&dev->object_name_idr, (int) args->name);
0870     if (obj) {
0871         drm_gem_object_get(obj);
0872     } else {
0873         mutex_unlock(&dev->object_name_lock);
0874         return -ENOENT;
0875     }
0876 
0877     /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
0878     ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
0879     if (ret)
0880         goto err;
0881 
0882     args->handle = handle;
0883     args->size = obj->size;
0884 
0885 err:
0886     drm_gem_object_put(obj);
0887     return ret;
0888 }
0889 
0890 /**
0891  * drm_gem_open - initializes GEM file-private structures at devnode open time
0892  * @dev: drm_device which is being opened by userspace
0893  * @file_private: drm file-private structure to set up
0894  *
0895  * Called at device open time, sets up the structure for handling refcounting
0896  * of mm objects.
0897  */
0898 void
0899 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
0900 {
0901     idr_init_base(&file_private->object_idr, 1);
0902     spin_lock_init(&file_private->table_lock);
0903 }
0904 
0905 /**
0906  * drm_gem_release - release file-private GEM resources
0907  * @dev: drm_device which is being closed by userspace
0908  * @file_private: drm file-private structure to clean up
0909  *
0910  * Called at close time when the filp is going away.
0911  *
0912  * Releases any remaining references on objects by this filp.
0913  */
0914 void
0915 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
0916 {
0917     idr_for_each(&file_private->object_idr,
0918              &drm_gem_object_release_handle, file_private);
0919     idr_destroy(&file_private->object_idr);
0920 }
0921 
0922 /**
0923  * drm_gem_object_release - release GEM buffer object resources
0924  * @obj: GEM buffer object
0925  *
0926  * This releases any structures and resources used by @obj and is the inverse of
0927  * drm_gem_object_init().
0928  */
0929 void
0930 drm_gem_object_release(struct drm_gem_object *obj)
0931 {
0932     WARN_ON(obj->dma_buf);
0933 
0934     if (obj->filp)
0935         fput(obj->filp);
0936 
0937     dma_resv_fini(&obj->_resv);
0938     drm_gem_free_mmap_offset(obj);
0939 }
0940 EXPORT_SYMBOL(drm_gem_object_release);
0941 
0942 /**
0943  * drm_gem_object_free - free a GEM object
0944  * @kref: kref of the object to free
0945  *
0946  * Called after the last reference to the object has been lost.
0947  *
0948  * Frees the object
0949  */
0950 void
0951 drm_gem_object_free(struct kref *kref)
0952 {
0953     struct drm_gem_object *obj =
0954         container_of(kref, struct drm_gem_object, refcount);
0955 
0956     if (WARN_ON(!obj->funcs->free))
0957         return;
0958 
0959     obj->funcs->free(obj);
0960 }
0961 EXPORT_SYMBOL(drm_gem_object_free);
0962 
0963 /**
0964  * drm_gem_vm_open - vma->ops->open implementation for GEM
0965  * @vma: VM area structure
0966  *
0967  * This function implements the #vm_operations_struct open() callback for GEM
0968  * drivers. This must be used together with drm_gem_vm_close().
0969  */
0970 void drm_gem_vm_open(struct vm_area_struct *vma)
0971 {
0972     struct drm_gem_object *obj = vma->vm_private_data;
0973 
0974     drm_gem_object_get(obj);
0975 }
0976 EXPORT_SYMBOL(drm_gem_vm_open);
0977 
0978 /**
0979  * drm_gem_vm_close - vma->ops->close implementation for GEM
0980  * @vma: VM area structure
0981  *
0982  * This function implements the #vm_operations_struct close() callback for GEM
0983  * drivers. This must be used together with drm_gem_vm_open().
0984  */
0985 void drm_gem_vm_close(struct vm_area_struct *vma)
0986 {
0987     struct drm_gem_object *obj = vma->vm_private_data;
0988 
0989     drm_gem_object_put(obj);
0990 }
0991 EXPORT_SYMBOL(drm_gem_vm_close);
0992 
0993 /**
0994  * drm_gem_mmap_obj - memory map a GEM object
0995  * @obj: the GEM object to map
0996  * @obj_size: the object size to be mapped, in bytes
0997  * @vma: VMA for the area to be mapped
0998  *
0999  * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1000  * vm_ops. Depending on their requirements, GEM objects can either
1001  * provide a fault handler in their vm_ops (in which case any accesses to
1002  * the object will be trapped, to perform migration, GTT binding, surface
1003  * register allocation, or performance monitoring), or mmap the buffer memory
1004  * synchronously after calling drm_gem_mmap_obj.
1005  *
1006  * This function is mainly intended to implement the DMABUF mmap operation, when
1007  * the GEM object is not looked up based on its fake offset. To implement the
1008  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1009  *
1010  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1011  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1012  * callers must verify access restrictions before calling this helper.
1013  *
1014  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1015  * size, or if no vm_ops are provided.
1016  */
1017 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1018              struct vm_area_struct *vma)
1019 {
1020     int ret;
1021 
1022     /* Check for valid size. */
1023     if (obj_size < vma->vm_end - vma->vm_start)
1024         return -EINVAL;
1025 
1026     /* Take a ref for this mapping of the object, so that the fault
1027      * handler can dereference the mmap offset's pointer to the object.
1028      * This reference is cleaned up by the corresponding vm_close
1029      * (which should happen whether the vma was created by this call, or
1030      * by a vm_open due to mremap or partial unmap or whatever).
1031      */
1032     drm_gem_object_get(obj);
1033 
1034     vma->vm_private_data = obj;
1035     vma->vm_ops = obj->funcs->vm_ops;
1036 
1037     if (obj->funcs->mmap) {
1038         ret = obj->funcs->mmap(obj, vma);
1039         if (ret)
1040             goto err_drm_gem_object_put;
1041         WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1042     } else {
1043         if (!vma->vm_ops) {
1044             ret = -EINVAL;
1045             goto err_drm_gem_object_put;
1046         }
1047 
1048         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1049         vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1050         vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1051     }
1052 
1053     return 0;
1054 
1055 err_drm_gem_object_put:
1056     drm_gem_object_put(obj);
1057     return ret;
1058 }
1059 EXPORT_SYMBOL(drm_gem_mmap_obj);
1060 
1061 /**
1062  * drm_gem_mmap - memory map routine for GEM objects
1063  * @filp: DRM file pointer
1064  * @vma: VMA for the area to be mapped
1065  *
1066  * If a driver supports GEM object mapping, mmap calls on the DRM file
1067  * descriptor will end up here.
1068  *
1069  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1070  * contain the fake offset we created when the GTT map ioctl was called on
1071  * the object) and map it with a call to drm_gem_mmap_obj().
1072  *
1073  * If the caller is not granted access to the buffer object, the mmap will fail
1074  * with EACCES. Please see the vma manager for more information.
1075  */
1076 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1077 {
1078     struct drm_file *priv = filp->private_data;
1079     struct drm_device *dev = priv->minor->dev;
1080     struct drm_gem_object *obj = NULL;
1081     struct drm_vma_offset_node *node;
1082     int ret;
1083 
1084     if (drm_dev_is_unplugged(dev))
1085         return -ENODEV;
1086 
1087     drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1088     node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1089                           vma->vm_pgoff,
1090                           vma_pages(vma));
1091     if (likely(node)) {
1092         obj = container_of(node, struct drm_gem_object, vma_node);
1093         /*
1094          * When the object is being freed, after it hits 0-refcnt it
1095          * proceeds to tear down the object. In the process it will
1096          * attempt to remove the VMA offset and so acquire this
1097          * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1098          * that matches our range, we know it is in the process of being
1099          * destroyed and will be freed as soon as we release the lock -
1100          * so we have to check for the 0-refcnted object and treat it as
1101          * invalid.
1102          */
1103         if (!kref_get_unless_zero(&obj->refcount))
1104             obj = NULL;
1105     }
1106     drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1107 
1108     if (!obj)
1109         return -EINVAL;
1110 
1111     if (!drm_vma_node_is_allowed(node, priv)) {
1112         drm_gem_object_put(obj);
1113         return -EACCES;
1114     }
1115 
1116     ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1117                    vma);
1118 
1119     drm_gem_object_put(obj);
1120 
1121     return ret;
1122 }
1123 EXPORT_SYMBOL(drm_gem_mmap);
1124 
1125 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1126             const struct drm_gem_object *obj)
1127 {
1128     drm_printf_indent(p, indent, "name=%d\n", obj->name);
1129     drm_printf_indent(p, indent, "refcount=%u\n",
1130               kref_read(&obj->refcount));
1131     drm_printf_indent(p, indent, "start=%08lx\n",
1132               drm_vma_node_start(&obj->vma_node));
1133     drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1134     drm_printf_indent(p, indent, "imported=%s\n",
1135               str_yes_no(obj->import_attach));
1136 
1137     if (obj->funcs->print_info)
1138         obj->funcs->print_info(p, indent, obj);
1139 }
1140 
1141 int drm_gem_pin(struct drm_gem_object *obj)
1142 {
1143     if (obj->funcs->pin)
1144         return obj->funcs->pin(obj);
1145     else
1146         return 0;
1147 }
1148 
1149 void drm_gem_unpin(struct drm_gem_object *obj)
1150 {
1151     if (obj->funcs->unpin)
1152         obj->funcs->unpin(obj);
1153 }
1154 
1155 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1156 {
1157     int ret;
1158 
1159     if (!obj->funcs->vmap)
1160         return -EOPNOTSUPP;
1161 
1162     ret = obj->funcs->vmap(obj, map);
1163     if (ret)
1164         return ret;
1165     else if (iosys_map_is_null(map))
1166         return -ENOMEM;
1167 
1168     return 0;
1169 }
1170 EXPORT_SYMBOL(drm_gem_vmap);
1171 
1172 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1173 {
1174     if (iosys_map_is_null(map))
1175         return;
1176 
1177     if (obj->funcs->vunmap)
1178         obj->funcs->vunmap(obj, map);
1179 
1180     /* Always set the mapping to NULL. Callers may rely on this. */
1181     iosys_map_clear(map);
1182 }
1183 EXPORT_SYMBOL(drm_gem_vunmap);
1184 
1185 /**
1186  * drm_gem_lock_reservations - Sets up the ww context and acquires
1187  * the lock on an array of GEM objects.
1188  *
1189  * Once you've locked your reservations, you'll want to set up space
1190  * for your shared fences (if applicable), submit your job, then
1191  * drm_gem_unlock_reservations().
1192  *
1193  * @objs: drm_gem_objects to lock
1194  * @count: Number of objects in @objs
1195  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1196  * part of tracking this set of locked reservations.
1197  */
1198 int
1199 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1200               struct ww_acquire_ctx *acquire_ctx)
1201 {
1202     int contended = -1;
1203     int i, ret;
1204 
1205     ww_acquire_init(acquire_ctx, &reservation_ww_class);
1206 
1207 retry:
1208     if (contended != -1) {
1209         struct drm_gem_object *obj = objs[contended];
1210 
1211         ret = dma_resv_lock_slow_interruptible(obj->resv,
1212                                  acquire_ctx);
1213         if (ret) {
1214             ww_acquire_fini(acquire_ctx);
1215             return ret;
1216         }
1217     }
1218 
1219     for (i = 0; i < count; i++) {
1220         if (i == contended)
1221             continue;
1222 
1223         ret = dma_resv_lock_interruptible(objs[i]->resv,
1224                                 acquire_ctx);
1225         if (ret) {
1226             int j;
1227 
1228             for (j = 0; j < i; j++)
1229                 dma_resv_unlock(objs[j]->resv);
1230 
1231             if (contended != -1 && contended >= i)
1232                 dma_resv_unlock(objs[contended]->resv);
1233 
1234             if (ret == -EDEADLK) {
1235                 contended = i;
1236                 goto retry;
1237             }
1238 
1239             ww_acquire_fini(acquire_ctx);
1240             return ret;
1241         }
1242     }
1243 
1244     ww_acquire_done(acquire_ctx);
1245 
1246     return 0;
1247 }
1248 EXPORT_SYMBOL(drm_gem_lock_reservations);
1249 
1250 void
1251 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1252                 struct ww_acquire_ctx *acquire_ctx)
1253 {
1254     int i;
1255 
1256     for (i = 0; i < count; i++)
1257         dma_resv_unlock(objs[i]->resv);
1258 
1259     ww_acquire_fini(acquire_ctx);
1260 }
1261 EXPORT_SYMBOL(drm_gem_unlock_reservations);