Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2012 Red Hat
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  *
0023  * Authors:
0024  *      Dave Airlie <airlied@redhat.com>
0025  *      Rob Clark <rob.clark@linaro.org>
0026  *
0027  */
0028 
0029 #include <linux/export.h>
0030 #include <linux/dma-buf.h>
0031 #include <linux/rbtree.h>
0032 #include <linux/module.h>
0033 
0034 #include <drm/drm.h>
0035 #include <drm/drm_drv.h>
0036 #include <drm/drm_file.h>
0037 #include <drm/drm_framebuffer.h>
0038 #include <drm/drm_gem.h>
0039 #include <drm/drm_prime.h>
0040 
0041 #include "drm_internal.h"
0042 
0043 MODULE_IMPORT_NS(DMA_BUF);
0044 
0045 /**
0046  * DOC: overview and lifetime rules
0047  *
0048  * Similar to GEM global names, PRIME file descriptors are also used to share
0049  * buffer objects across processes. They offer additional security: as file
0050  * descriptors must be explicitly sent over UNIX domain sockets to be shared
0051  * between applications, they can't be guessed like the globally unique GEM
0052  * names.
0053  *
0054  * Drivers that support the PRIME API implement the
0055  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
0056  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
0057  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
0058  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
0059  * and &drm_driver.gem_prime_import hooks.
0060  *
0061  * &dma_buf_ops implementations for GEM drivers are all individually exported
0062  * for drivers which need to overwrite or reimplement some of them.
0063  *
0064  * Reference Counting for GEM Drivers
0065  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
0066  *
0067  * On the export the &dma_buf holds a reference to the exported buffer object,
0068  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
0069  * IOCTL, when it first calls &drm_gem_object_funcs.export
0070  * and stores the exporting GEM object in the &dma_buf.priv field. This
0071  * reference needs to be released when the final reference to the &dma_buf
0072  * itself is dropped and its &dma_buf_ops.release function is called.  For
0073  * GEM-based drivers, the &dma_buf should be exported using
0074  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
0075  *
0076  * Thus the chain of references always flows in one direction, avoiding loops:
0077  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
0078  * are the lookup caches for import and export. These are required to guarantee
0079  * that any given object will always have only one unique userspace handle. This
0080  * is required to allow userspace to detect duplicated imports, since some GEM
0081  * drivers do fail command submissions if a given buffer object is listed more
0082  * than once. These import and export caches in &drm_prime_file_private only
0083  * retain a weak reference, which is cleaned up when the corresponding object is
0084  * released.
0085  *
0086  * Self-importing: If userspace is using PRIME as a replacement for flink then
0087  * it will get a fd->handle request for a GEM object that it created.  Drivers
0088  * should detect this situation and return back the underlying object from the
0089  * dma-buf private. For GEM based drivers this is handled in
0090  * drm_gem_prime_import() already.
0091  */
0092 
0093 struct drm_prime_member {
0094     struct dma_buf *dma_buf;
0095     uint32_t handle;
0096 
0097     struct rb_node dmabuf_rb;
0098     struct rb_node handle_rb;
0099 };
0100 
0101 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
0102                     struct dma_buf *dma_buf, uint32_t handle)
0103 {
0104     struct drm_prime_member *member;
0105     struct rb_node **p, *rb;
0106 
0107     member = kmalloc(sizeof(*member), GFP_KERNEL);
0108     if (!member)
0109         return -ENOMEM;
0110 
0111     get_dma_buf(dma_buf);
0112     member->dma_buf = dma_buf;
0113     member->handle = handle;
0114 
0115     rb = NULL;
0116     p = &prime_fpriv->dmabufs.rb_node;
0117     while (*p) {
0118         struct drm_prime_member *pos;
0119 
0120         rb = *p;
0121         pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
0122         if (dma_buf > pos->dma_buf)
0123             p = &rb->rb_right;
0124         else
0125             p = &rb->rb_left;
0126     }
0127     rb_link_node(&member->dmabuf_rb, rb, p);
0128     rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
0129 
0130     rb = NULL;
0131     p = &prime_fpriv->handles.rb_node;
0132     while (*p) {
0133         struct drm_prime_member *pos;
0134 
0135         rb = *p;
0136         pos = rb_entry(rb, struct drm_prime_member, handle_rb);
0137         if (handle > pos->handle)
0138             p = &rb->rb_right;
0139         else
0140             p = &rb->rb_left;
0141     }
0142     rb_link_node(&member->handle_rb, rb, p);
0143     rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
0144 
0145     return 0;
0146 }
0147 
0148 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
0149                               uint32_t handle)
0150 {
0151     struct rb_node *rb;
0152 
0153     rb = prime_fpriv->handles.rb_node;
0154     while (rb) {
0155         struct drm_prime_member *member;
0156 
0157         member = rb_entry(rb, struct drm_prime_member, handle_rb);
0158         if (member->handle == handle)
0159             return member->dma_buf;
0160         else if (member->handle < handle)
0161             rb = rb->rb_right;
0162         else
0163             rb = rb->rb_left;
0164     }
0165 
0166     return NULL;
0167 }
0168 
0169 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
0170                        struct dma_buf *dma_buf,
0171                        uint32_t *handle)
0172 {
0173     struct rb_node *rb;
0174 
0175     rb = prime_fpriv->dmabufs.rb_node;
0176     while (rb) {
0177         struct drm_prime_member *member;
0178 
0179         member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
0180         if (member->dma_buf == dma_buf) {
0181             *handle = member->handle;
0182             return 0;
0183         } else if (member->dma_buf < dma_buf) {
0184             rb = rb->rb_right;
0185         } else {
0186             rb = rb->rb_left;
0187         }
0188     }
0189 
0190     return -ENOENT;
0191 }
0192 
0193 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
0194                  uint32_t handle)
0195 {
0196     struct rb_node *rb;
0197 
0198     mutex_lock(&prime_fpriv->lock);
0199 
0200     rb = prime_fpriv->handles.rb_node;
0201     while (rb) {
0202         struct drm_prime_member *member;
0203 
0204         member = rb_entry(rb, struct drm_prime_member, handle_rb);
0205         if (member->handle == handle) {
0206             rb_erase(&member->handle_rb, &prime_fpriv->handles);
0207             rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
0208 
0209             dma_buf_put(member->dma_buf);
0210             kfree(member);
0211             break;
0212         } else if (member->handle < handle) {
0213             rb = rb->rb_right;
0214         } else {
0215             rb = rb->rb_left;
0216         }
0217     }
0218 
0219     mutex_unlock(&prime_fpriv->lock);
0220 }
0221 
0222 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
0223 {
0224     mutex_init(&prime_fpriv->lock);
0225     prime_fpriv->dmabufs = RB_ROOT;
0226     prime_fpriv->handles = RB_ROOT;
0227 }
0228 
0229 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
0230 {
0231     /* by now drm_gem_release should've made sure the list is empty */
0232     WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
0233 }
0234 
0235 /**
0236  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
0237  * @dev: parent device for the exported dmabuf
0238  * @exp_info: the export information used by dma_buf_export()
0239  *
0240  * This wraps dma_buf_export() for use by generic GEM drivers that are using
0241  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
0242  * a reference to the &drm_device and the exported &drm_gem_object (stored in
0243  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
0244  *
0245  * Returns the new dmabuf.
0246  */
0247 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
0248                       struct dma_buf_export_info *exp_info)
0249 {
0250     struct drm_gem_object *obj = exp_info->priv;
0251     struct dma_buf *dma_buf;
0252 
0253     dma_buf = dma_buf_export(exp_info);
0254     if (IS_ERR(dma_buf))
0255         return dma_buf;
0256 
0257     drm_dev_get(dev);
0258     drm_gem_object_get(obj);
0259     dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
0260 
0261     return dma_buf;
0262 }
0263 EXPORT_SYMBOL(drm_gem_dmabuf_export);
0264 
0265 /**
0266  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
0267  * @dma_buf: buffer to be released
0268  *
0269  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
0270  * must use this in their &dma_buf_ops structure as the release callback.
0271  * drm_gem_dmabuf_release() should be used in conjunction with
0272  * drm_gem_dmabuf_export().
0273  */
0274 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
0275 {
0276     struct drm_gem_object *obj = dma_buf->priv;
0277     struct drm_device *dev = obj->dev;
0278 
0279     /* drop the reference on the export fd holds */
0280     drm_gem_object_put(obj);
0281 
0282     drm_dev_put(dev);
0283 }
0284 EXPORT_SYMBOL(drm_gem_dmabuf_release);
0285 
0286 /**
0287  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
0288  * @dev: dev to export the buffer from
0289  * @file_priv: drm file-private structure
0290  * @prime_fd: fd id of the dma-buf which should be imported
0291  * @handle: pointer to storage for the handle of the imported buffer object
0292  *
0293  * This is the PRIME import function which must be used mandatorily by GEM
0294  * drivers to ensure correct lifetime management of the underlying GEM object.
0295  * The actual importing of GEM object from the dma-buf is done through the
0296  * &drm_driver.gem_prime_import driver callback.
0297  *
0298  * Returns 0 on success or a negative error code on failure.
0299  */
0300 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
0301                    struct drm_file *file_priv, int prime_fd,
0302                    uint32_t *handle)
0303 {
0304     struct dma_buf *dma_buf;
0305     struct drm_gem_object *obj;
0306     int ret;
0307 
0308     dma_buf = dma_buf_get(prime_fd);
0309     if (IS_ERR(dma_buf))
0310         return PTR_ERR(dma_buf);
0311 
0312     mutex_lock(&file_priv->prime.lock);
0313 
0314     ret = drm_prime_lookup_buf_handle(&file_priv->prime,
0315             dma_buf, handle);
0316     if (ret == 0)
0317         goto out_put;
0318 
0319     /* never seen this one, need to import */
0320     mutex_lock(&dev->object_name_lock);
0321     if (dev->driver->gem_prime_import)
0322         obj = dev->driver->gem_prime_import(dev, dma_buf);
0323     else
0324         obj = drm_gem_prime_import(dev, dma_buf);
0325     if (IS_ERR(obj)) {
0326         ret = PTR_ERR(obj);
0327         goto out_unlock;
0328     }
0329 
0330     if (obj->dma_buf) {
0331         WARN_ON(obj->dma_buf != dma_buf);
0332     } else {
0333         obj->dma_buf = dma_buf;
0334         get_dma_buf(dma_buf);
0335     }
0336 
0337     /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
0338     ret = drm_gem_handle_create_tail(file_priv, obj, handle);
0339     drm_gem_object_put(obj);
0340     if (ret)
0341         goto out_put;
0342 
0343     ret = drm_prime_add_buf_handle(&file_priv->prime,
0344             dma_buf, *handle);
0345     mutex_unlock(&file_priv->prime.lock);
0346     if (ret)
0347         goto fail;
0348 
0349     dma_buf_put(dma_buf);
0350 
0351     return 0;
0352 
0353 fail:
0354     /* hmm, if driver attached, we are relying on the free-object path
0355      * to detach.. which seems ok..
0356      */
0357     drm_gem_handle_delete(file_priv, *handle);
0358     dma_buf_put(dma_buf);
0359     return ret;
0360 
0361 out_unlock:
0362     mutex_unlock(&dev->object_name_lock);
0363 out_put:
0364     mutex_unlock(&file_priv->prime.lock);
0365     dma_buf_put(dma_buf);
0366     return ret;
0367 }
0368 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
0369 
0370 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
0371                  struct drm_file *file_priv)
0372 {
0373     struct drm_prime_handle *args = data;
0374 
0375     if (!dev->driver->prime_fd_to_handle)
0376         return -ENOSYS;
0377 
0378     return dev->driver->prime_fd_to_handle(dev, file_priv,
0379             args->fd, &args->handle);
0380 }
0381 
0382 static struct dma_buf *export_and_register_object(struct drm_device *dev,
0383                           struct drm_gem_object *obj,
0384                           uint32_t flags)
0385 {
0386     struct dma_buf *dmabuf;
0387 
0388     /* prevent races with concurrent gem_close. */
0389     if (obj->handle_count == 0) {
0390         dmabuf = ERR_PTR(-ENOENT);
0391         return dmabuf;
0392     }
0393 
0394     if (obj->funcs && obj->funcs->export)
0395         dmabuf = obj->funcs->export(obj, flags);
0396     else
0397         dmabuf = drm_gem_prime_export(obj, flags);
0398     if (IS_ERR(dmabuf)) {
0399         /* normally the created dma-buf takes ownership of the ref,
0400          * but if that fails then drop the ref
0401          */
0402         return dmabuf;
0403     }
0404 
0405     /*
0406      * Note that callers do not need to clean up the export cache
0407      * since the check for obj->handle_count guarantees that someone
0408      * will clean it up.
0409      */
0410     obj->dma_buf = dmabuf;
0411     get_dma_buf(obj->dma_buf);
0412 
0413     return dmabuf;
0414 }
0415 
0416 /**
0417  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
0418  * @dev: dev to export the buffer from
0419  * @file_priv: drm file-private structure
0420  * @handle: buffer handle to export
0421  * @flags: flags like DRM_CLOEXEC
0422  * @prime_fd: pointer to storage for the fd id of the create dma-buf
0423  *
0424  * This is the PRIME export function which must be used mandatorily by GEM
0425  * drivers to ensure correct lifetime management of the underlying GEM object.
0426  * The actual exporting from GEM object to a dma-buf is done through the
0427  * &drm_gem_object_funcs.export callback.
0428  */
0429 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
0430                    struct drm_file *file_priv, uint32_t handle,
0431                    uint32_t flags,
0432                    int *prime_fd)
0433 {
0434     struct drm_gem_object *obj;
0435     int ret = 0;
0436     struct dma_buf *dmabuf;
0437 
0438     mutex_lock(&file_priv->prime.lock);
0439     obj = drm_gem_object_lookup(file_priv, handle);
0440     if (!obj)  {
0441         ret = -ENOENT;
0442         goto out_unlock;
0443     }
0444 
0445     dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
0446     if (dmabuf) {
0447         get_dma_buf(dmabuf);
0448         goto out_have_handle;
0449     }
0450 
0451     mutex_lock(&dev->object_name_lock);
0452     /* re-export the original imported object */
0453     if (obj->import_attach) {
0454         dmabuf = obj->import_attach->dmabuf;
0455         get_dma_buf(dmabuf);
0456         goto out_have_obj;
0457     }
0458 
0459     if (obj->dma_buf) {
0460         get_dma_buf(obj->dma_buf);
0461         dmabuf = obj->dma_buf;
0462         goto out_have_obj;
0463     }
0464 
0465     dmabuf = export_and_register_object(dev, obj, flags);
0466     if (IS_ERR(dmabuf)) {
0467         /* normally the created dma-buf takes ownership of the ref,
0468          * but if that fails then drop the ref
0469          */
0470         ret = PTR_ERR(dmabuf);
0471         mutex_unlock(&dev->object_name_lock);
0472         goto out;
0473     }
0474 
0475 out_have_obj:
0476     /*
0477      * If we've exported this buffer then cheat and add it to the import list
0478      * so we get the correct handle back. We must do this under the
0479      * protection of dev->object_name_lock to ensure that a racing gem close
0480      * ioctl doesn't miss to remove this buffer handle from the cache.
0481      */
0482     ret = drm_prime_add_buf_handle(&file_priv->prime,
0483                        dmabuf, handle);
0484     mutex_unlock(&dev->object_name_lock);
0485     if (ret)
0486         goto fail_put_dmabuf;
0487 
0488 out_have_handle:
0489     ret = dma_buf_fd(dmabuf, flags);
0490     /*
0491      * We must _not_ remove the buffer from the handle cache since the newly
0492      * created dma buf is already linked in the global obj->dma_buf pointer,
0493      * and that is invariant as long as a userspace gem handle exists.
0494      * Closing the handle will clean out the cache anyway, so we don't leak.
0495      */
0496     if (ret < 0) {
0497         goto fail_put_dmabuf;
0498     } else {
0499         *prime_fd = ret;
0500         ret = 0;
0501     }
0502 
0503     goto out;
0504 
0505 fail_put_dmabuf:
0506     dma_buf_put(dmabuf);
0507 out:
0508     drm_gem_object_put(obj);
0509 out_unlock:
0510     mutex_unlock(&file_priv->prime.lock);
0511 
0512     return ret;
0513 }
0514 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
0515 
0516 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
0517                  struct drm_file *file_priv)
0518 {
0519     struct drm_prime_handle *args = data;
0520 
0521     if (!dev->driver->prime_handle_to_fd)
0522         return -ENOSYS;
0523 
0524     /* check flags are valid */
0525     if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
0526         return -EINVAL;
0527 
0528     return dev->driver->prime_handle_to_fd(dev, file_priv,
0529             args->handle, args->flags, &args->fd);
0530 }
0531 
0532 /**
0533  * DOC: PRIME Helpers
0534  *
0535  * Drivers can implement &drm_gem_object_funcs.export and
0536  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
0537  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
0538  * implement dma-buf support in terms of some lower-level helpers, which are
0539  * again exported for drivers to use individually:
0540  *
0541  * Exporting buffers
0542  * ~~~~~~~~~~~~~~~~~
0543  *
0544  * Optional pinning of buffers is handled at dma-buf attach and detach time in
0545  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
0546  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
0547  * &drm_gem_object_funcs.get_sg_table.
0548  *
0549  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
0550  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
0551  * drm_gem_dmabuf_mmap().
0552  *
0553  * Note that these export helpers can only be used if the underlying backing
0554  * storage is fully coherent and either permanently pinned, or it is safe to pin
0555  * it indefinitely.
0556  *
0557  * FIXME: The underlying helper functions are named rather inconsistently.
0558  *
0559  * Importing buffers
0560  * ~~~~~~~~~~~~~~~~~
0561  *
0562  * Importing dma-bufs using drm_gem_prime_import() relies on
0563  * &drm_driver.gem_prime_import_sg_table.
0564  *
0565  * Note that similarly to the export helpers this permanently pins the
0566  * underlying backing storage. Which is ok for scanout, but is not the best
0567  * option for sharing lots of buffers for rendering.
0568  */
0569 
0570 /**
0571  * drm_gem_map_attach - dma_buf attach implementation for GEM
0572  * @dma_buf: buffer to attach device to
0573  * @attach: buffer attachment data
0574  *
0575  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
0576  * used as the &dma_buf_ops.attach callback. Must be used together with
0577  * drm_gem_map_detach().
0578  *
0579  * Returns 0 on success, negative error code on failure.
0580  */
0581 int drm_gem_map_attach(struct dma_buf *dma_buf,
0582                struct dma_buf_attachment *attach)
0583 {
0584     struct drm_gem_object *obj = dma_buf->priv;
0585 
0586     return drm_gem_pin(obj);
0587 }
0588 EXPORT_SYMBOL(drm_gem_map_attach);
0589 
0590 /**
0591  * drm_gem_map_detach - dma_buf detach implementation for GEM
0592  * @dma_buf: buffer to detach from
0593  * @attach: attachment to be detached
0594  *
0595  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
0596  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
0597  * &dma_buf_ops.detach callback.
0598  */
0599 void drm_gem_map_detach(struct dma_buf *dma_buf,
0600             struct dma_buf_attachment *attach)
0601 {
0602     struct drm_gem_object *obj = dma_buf->priv;
0603 
0604     drm_gem_unpin(obj);
0605 }
0606 EXPORT_SYMBOL(drm_gem_map_detach);
0607 
0608 /**
0609  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
0610  * @attach: attachment whose scatterlist is to be returned
0611  * @dir: direction of DMA transfer
0612  *
0613  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
0614  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
0615  * with drm_gem_unmap_dma_buf().
0616  *
0617  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
0618  * on error. May return -EINTR if it is interrupted by a signal.
0619  */
0620 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
0621                      enum dma_data_direction dir)
0622 {
0623     struct drm_gem_object *obj = attach->dmabuf->priv;
0624     struct sg_table *sgt;
0625     int ret;
0626 
0627     if (WARN_ON(dir == DMA_NONE))
0628         return ERR_PTR(-EINVAL);
0629 
0630     if (WARN_ON(!obj->funcs->get_sg_table))
0631         return ERR_PTR(-ENOSYS);
0632 
0633     sgt = obj->funcs->get_sg_table(obj);
0634     if (IS_ERR(sgt))
0635         return sgt;
0636 
0637     ret = dma_map_sgtable(attach->dev, sgt, dir,
0638                   DMA_ATTR_SKIP_CPU_SYNC);
0639     if (ret) {
0640         sg_free_table(sgt);
0641         kfree(sgt);
0642         sgt = ERR_PTR(ret);
0643     }
0644 
0645     return sgt;
0646 }
0647 EXPORT_SYMBOL(drm_gem_map_dma_buf);
0648 
0649 /**
0650  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
0651  * @attach: attachment to unmap buffer from
0652  * @sgt: scatterlist info of the buffer to unmap
0653  * @dir: direction of DMA transfer
0654  *
0655  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
0656  */
0657 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
0658                struct sg_table *sgt,
0659                enum dma_data_direction dir)
0660 {
0661     if (!sgt)
0662         return;
0663 
0664     dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
0665     sg_free_table(sgt);
0666     kfree(sgt);
0667 }
0668 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
0669 
0670 /**
0671  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
0672  * @dma_buf: buffer to be mapped
0673  * @map: the virtual address of the buffer
0674  *
0675  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
0676  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
0677  * The kernel virtual address is returned in map.
0678  *
0679  * Returns 0 on success or a negative errno code otherwise.
0680  */
0681 int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
0682 {
0683     struct drm_gem_object *obj = dma_buf->priv;
0684 
0685     return drm_gem_vmap(obj, map);
0686 }
0687 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
0688 
0689 /**
0690  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
0691  * @dma_buf: buffer to be unmapped
0692  * @map: the virtual address of the buffer
0693  *
0694  * Releases a kernel virtual mapping. This can be used as the
0695  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
0696  */
0697 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
0698 {
0699     struct drm_gem_object *obj = dma_buf->priv;
0700 
0701     drm_gem_vunmap(obj, map);
0702 }
0703 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
0704 
0705 /**
0706  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
0707  * @obj: GEM object
0708  * @vma: Virtual address range
0709  *
0710  * This function sets up a userspace mapping for PRIME exported buffers using
0711  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
0712  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
0713  * called to set up the mapping.
0714  *
0715  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
0716  */
0717 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
0718 {
0719     struct drm_file *priv;
0720     struct file *fil;
0721     int ret;
0722 
0723     /* Add the fake offset */
0724     vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
0725 
0726     if (obj->funcs && obj->funcs->mmap) {
0727         vma->vm_ops = obj->funcs->vm_ops;
0728 
0729         drm_gem_object_get(obj);
0730         ret = obj->funcs->mmap(obj, vma);
0731         if (ret) {
0732             drm_gem_object_put(obj);
0733             return ret;
0734         }
0735         vma->vm_private_data = obj;
0736         return 0;
0737     }
0738 
0739     priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0740     fil = kzalloc(sizeof(*fil), GFP_KERNEL);
0741     if (!priv || !fil) {
0742         ret = -ENOMEM;
0743         goto out;
0744     }
0745 
0746     /* Used by drm_gem_mmap() to lookup the GEM object */
0747     priv->minor = obj->dev->primary;
0748     fil->private_data = priv;
0749 
0750     ret = drm_vma_node_allow(&obj->vma_node, priv);
0751     if (ret)
0752         goto out;
0753 
0754     ret = obj->dev->driver->fops->mmap(fil, vma);
0755 
0756     drm_vma_node_revoke(&obj->vma_node, priv);
0757 out:
0758     kfree(priv);
0759     kfree(fil);
0760 
0761     return ret;
0762 }
0763 EXPORT_SYMBOL(drm_gem_prime_mmap);
0764 
0765 /**
0766  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
0767  * @dma_buf: buffer to be mapped
0768  * @vma: virtual address range
0769  *
0770  * Provides memory mapping for the buffer. This can be used as the
0771  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
0772  * which should be set to drm_gem_prime_mmap().
0773  *
0774  * FIXME: There's really no point to this wrapper, drivers which need anything
0775  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
0776  *
0777  * Returns 0 on success or a negative error code on failure.
0778  */
0779 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
0780 {
0781     struct drm_gem_object *obj = dma_buf->priv;
0782     struct drm_device *dev = obj->dev;
0783 
0784     if (!dev->driver->gem_prime_mmap)
0785         return -ENOSYS;
0786 
0787     return dev->driver->gem_prime_mmap(obj, vma);
0788 }
0789 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
0790 
0791 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
0792     .cache_sgt_mapping = true,
0793     .attach = drm_gem_map_attach,
0794     .detach = drm_gem_map_detach,
0795     .map_dma_buf = drm_gem_map_dma_buf,
0796     .unmap_dma_buf = drm_gem_unmap_dma_buf,
0797     .release = drm_gem_dmabuf_release,
0798     .mmap = drm_gem_dmabuf_mmap,
0799     .vmap = drm_gem_dmabuf_vmap,
0800     .vunmap = drm_gem_dmabuf_vunmap,
0801 };
0802 
0803 /**
0804  * drm_prime_pages_to_sg - converts a page array into an sg list
0805  * @dev: DRM device
0806  * @pages: pointer to the array of page pointers to convert
0807  * @nr_pages: length of the page vector
0808  *
0809  * This helper creates an sg table object from a set of pages
0810  * the driver is responsible for mapping the pages into the
0811  * importers address space for use with dma_buf itself.
0812  *
0813  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
0814  */
0815 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
0816                        struct page **pages, unsigned int nr_pages)
0817 {
0818     struct sg_table *sg;
0819     size_t max_segment = 0;
0820     int err;
0821 
0822     sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
0823     if (!sg)
0824         return ERR_PTR(-ENOMEM);
0825 
0826     if (dev)
0827         max_segment = dma_max_mapping_size(dev->dev);
0828     if (max_segment == 0)
0829         max_segment = UINT_MAX;
0830     err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
0831                         nr_pages << PAGE_SHIFT,
0832                         max_segment, GFP_KERNEL);
0833     if (err) {
0834         kfree(sg);
0835         sg = ERR_PTR(err);
0836     }
0837     return sg;
0838 }
0839 EXPORT_SYMBOL(drm_prime_pages_to_sg);
0840 
0841 /**
0842  * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
0843  * @sgt: sg_table describing the buffer to check
0844  *
0845  * This helper calculates the contiguous size in the DMA address space
0846  * of the buffer described by the provided sg_table.
0847  *
0848  * This is useful for implementing
0849  * &drm_gem_object_funcs.gem_prime_import_sg_table.
0850  */
0851 unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
0852 {
0853     dma_addr_t expected = sg_dma_address(sgt->sgl);
0854     struct scatterlist *sg;
0855     unsigned long size = 0;
0856     int i;
0857 
0858     for_each_sgtable_dma_sg(sgt, sg, i) {
0859         unsigned int len = sg_dma_len(sg);
0860 
0861         if (!len)
0862             break;
0863         if (sg_dma_address(sg) != expected)
0864             break;
0865         expected += len;
0866         size += len;
0867     }
0868     return size;
0869 }
0870 EXPORT_SYMBOL(drm_prime_get_contiguous_size);
0871 
0872 /**
0873  * drm_gem_prime_export - helper library implementation of the export callback
0874  * @obj: GEM object to export
0875  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
0876  *
0877  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
0878  * using the PRIME helpers. It is used as the default in
0879  * drm_gem_prime_handle_to_fd().
0880  */
0881 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
0882                      int flags)
0883 {
0884     struct drm_device *dev = obj->dev;
0885     struct dma_buf_export_info exp_info = {
0886         .exp_name = KBUILD_MODNAME, /* white lie for debug */
0887         .owner = dev->driver->fops->owner,
0888         .ops = &drm_gem_prime_dmabuf_ops,
0889         .size = obj->size,
0890         .flags = flags,
0891         .priv = obj,
0892         .resv = obj->resv,
0893     };
0894 
0895     return drm_gem_dmabuf_export(dev, &exp_info);
0896 }
0897 EXPORT_SYMBOL(drm_gem_prime_export);
0898 
0899 /**
0900  * drm_gem_prime_import_dev - core implementation of the import callback
0901  * @dev: drm_device to import into
0902  * @dma_buf: dma-buf object to import
0903  * @attach_dev: struct device to dma_buf attach
0904  *
0905  * This is the core of drm_gem_prime_import(). It's designed to be called by
0906  * drivers who want to use a different device structure than &drm_device.dev for
0907  * attaching via dma_buf. This function calls
0908  * &drm_driver.gem_prime_import_sg_table internally.
0909  *
0910  * Drivers must arrange to call drm_prime_gem_destroy() from their
0911  * &drm_gem_object_funcs.free hook when using this function.
0912  */
0913 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
0914                         struct dma_buf *dma_buf,
0915                         struct device *attach_dev)
0916 {
0917     struct dma_buf_attachment *attach;
0918     struct sg_table *sgt;
0919     struct drm_gem_object *obj;
0920     int ret;
0921 
0922     if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
0923         obj = dma_buf->priv;
0924         if (obj->dev == dev) {
0925             /*
0926              * Importing dmabuf exported from out own gem increases
0927              * refcount on gem itself instead of f_count of dmabuf.
0928              */
0929             drm_gem_object_get(obj);
0930             return obj;
0931         }
0932     }
0933 
0934     if (!dev->driver->gem_prime_import_sg_table)
0935         return ERR_PTR(-EINVAL);
0936 
0937     attach = dma_buf_attach(dma_buf, attach_dev);
0938     if (IS_ERR(attach))
0939         return ERR_CAST(attach);
0940 
0941     get_dma_buf(dma_buf);
0942 
0943     sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
0944     if (IS_ERR(sgt)) {
0945         ret = PTR_ERR(sgt);
0946         goto fail_detach;
0947     }
0948 
0949     obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
0950     if (IS_ERR(obj)) {
0951         ret = PTR_ERR(obj);
0952         goto fail_unmap;
0953     }
0954 
0955     obj->import_attach = attach;
0956     obj->resv = dma_buf->resv;
0957 
0958     return obj;
0959 
0960 fail_unmap:
0961     dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
0962 fail_detach:
0963     dma_buf_detach(dma_buf, attach);
0964     dma_buf_put(dma_buf);
0965 
0966     return ERR_PTR(ret);
0967 }
0968 EXPORT_SYMBOL(drm_gem_prime_import_dev);
0969 
0970 /**
0971  * drm_gem_prime_import - helper library implementation of the import callback
0972  * @dev: drm_device to import into
0973  * @dma_buf: dma-buf object to import
0974  *
0975  * This is the implementation of the gem_prime_import functions for GEM drivers
0976  * using the PRIME helpers. Drivers can use this as their
0977  * &drm_driver.gem_prime_import implementation. It is used as the default
0978  * implementation in drm_gem_prime_fd_to_handle().
0979  *
0980  * Drivers must arrange to call drm_prime_gem_destroy() from their
0981  * &drm_gem_object_funcs.free hook when using this function.
0982  */
0983 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
0984                         struct dma_buf *dma_buf)
0985 {
0986     return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
0987 }
0988 EXPORT_SYMBOL(drm_gem_prime_import);
0989 
0990 /**
0991  * drm_prime_sg_to_page_array - convert an sg table into a page array
0992  * @sgt: scatter-gather table to convert
0993  * @pages: array of page pointers to store the pages in
0994  * @max_entries: size of the passed-in array
0995  *
0996  * Exports an sg table into an array of pages.
0997  *
0998  * This function is deprecated and strongly discouraged to be used.
0999  * The page array is only useful for page faults and those can corrupt fields
1000  * in the struct page if they are not handled by the exporting driver.
1001  */
1002 int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
1003                         struct page **pages,
1004                         int max_entries)
1005 {
1006     struct sg_page_iter page_iter;
1007     struct page **p = pages;
1008 
1009     for_each_sgtable_page(sgt, &page_iter, 0) {
1010         if (WARN_ON(p - pages >= max_entries))
1011             return -1;
1012         *p++ = sg_page_iter_page(&page_iter);
1013     }
1014     return 0;
1015 }
1016 EXPORT_SYMBOL(drm_prime_sg_to_page_array);
1017 
1018 /**
1019  * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
1020  * @sgt: scatter-gather table to convert
1021  * @addrs: array to store the dma bus address of each page
1022  * @max_entries: size of both the passed-in arrays
1023  *
1024  * Exports an sg table into an array of addresses.
1025  *
1026  * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
1027  * implementation.
1028  */
1029 int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
1030                    int max_entries)
1031 {
1032     struct sg_dma_page_iter dma_iter;
1033     dma_addr_t *a = addrs;
1034 
1035     for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1036         if (WARN_ON(a - addrs >= max_entries))
1037             return -1;
1038         *a++ = sg_page_iter_dma_address(&dma_iter);
1039     }
1040     return 0;
1041 }
1042 EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
1043 
1044 /**
1045  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1046  * @obj: GEM object which was created from a dma-buf
1047  * @sg: the sg-table which was pinned at import time
1048  *
1049  * This is the cleanup functions which GEM drivers need to call when they use
1050  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1051  */
1052 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1053 {
1054     struct dma_buf_attachment *attach;
1055     struct dma_buf *dma_buf;
1056 
1057     attach = obj->import_attach;
1058     if (sg)
1059         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1060     dma_buf = attach->dmabuf;
1061     dma_buf_detach(attach->dmabuf, attach);
1062     /* remove the reference */
1063     dma_buf_put(dma_buf);
1064 }
1065 EXPORT_SYMBOL(drm_prime_gem_destroy);