Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Framework for buffer objects that can be shared across devices/subsystems.
0004  *
0005  * Copyright(C) 2011 Linaro Limited. All rights reserved.
0006  * Author: Sumit Semwal <sumit.semwal@ti.com>
0007  *
0008  * Many thanks to linaro-mm-sig list, and specially
0009  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
0010  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
0011  * refining of this idea.
0012  */
0013 
0014 #include <linux/fs.h>
0015 #include <linux/slab.h>
0016 #include <linux/dma-buf.h>
0017 #include <linux/dma-fence.h>
0018 #include <linux/anon_inodes.h>
0019 #include <linux/export.h>
0020 #include <linux/debugfs.h>
0021 #include <linux/module.h>
0022 #include <linux/seq_file.h>
0023 #include <linux/sync_file.h>
0024 #include <linux/poll.h>
0025 #include <linux/dma-resv.h>
0026 #include <linux/mm.h>
0027 #include <linux/mount.h>
0028 #include <linux/pseudo_fs.h>
0029 
0030 #include <uapi/linux/dma-buf.h>
0031 #include <uapi/linux/magic.h>
0032 
0033 #include "dma-buf-sysfs-stats.h"
0034 
0035 static inline int is_dma_buf_file(struct file *);
0036 
0037 struct dma_buf_list {
0038     struct list_head head;
0039     struct mutex lock;
0040 };
0041 
0042 static struct dma_buf_list db_list;
0043 
0044 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
0045 {
0046     struct dma_buf *dmabuf;
0047     char name[DMA_BUF_NAME_LEN];
0048     size_t ret = 0;
0049 
0050     dmabuf = dentry->d_fsdata;
0051     spin_lock(&dmabuf->name_lock);
0052     if (dmabuf->name)
0053         ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
0054     spin_unlock(&dmabuf->name_lock);
0055 
0056     return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
0057                  dentry->d_name.name, ret > 0 ? name : "");
0058 }
0059 
0060 static void dma_buf_release(struct dentry *dentry)
0061 {
0062     struct dma_buf *dmabuf;
0063 
0064     dmabuf = dentry->d_fsdata;
0065     if (unlikely(!dmabuf))
0066         return;
0067 
0068     BUG_ON(dmabuf->vmapping_counter);
0069 
0070     /*
0071      * If you hit this BUG() it could mean:
0072      * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
0073      * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
0074      */
0075     BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
0076 
0077     dma_buf_stats_teardown(dmabuf);
0078     dmabuf->ops->release(dmabuf);
0079 
0080     if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
0081         dma_resv_fini(dmabuf->resv);
0082 
0083     WARN_ON(!list_empty(&dmabuf->attachments));
0084     module_put(dmabuf->owner);
0085     kfree(dmabuf->name);
0086     kfree(dmabuf);
0087 }
0088 
0089 static int dma_buf_file_release(struct inode *inode, struct file *file)
0090 {
0091     struct dma_buf *dmabuf;
0092 
0093     if (!is_dma_buf_file(file))
0094         return -EINVAL;
0095 
0096     dmabuf = file->private_data;
0097 
0098     mutex_lock(&db_list.lock);
0099     list_del(&dmabuf->list_node);
0100     mutex_unlock(&db_list.lock);
0101 
0102     return 0;
0103 }
0104 
0105 static const struct dentry_operations dma_buf_dentry_ops = {
0106     .d_dname = dmabuffs_dname,
0107     .d_release = dma_buf_release,
0108 };
0109 
0110 static struct vfsmount *dma_buf_mnt;
0111 
0112 static int dma_buf_fs_init_context(struct fs_context *fc)
0113 {
0114     struct pseudo_fs_context *ctx;
0115 
0116     ctx = init_pseudo(fc, DMA_BUF_MAGIC);
0117     if (!ctx)
0118         return -ENOMEM;
0119     ctx->dops = &dma_buf_dentry_ops;
0120     return 0;
0121 }
0122 
0123 static struct file_system_type dma_buf_fs_type = {
0124     .name = "dmabuf",
0125     .init_fs_context = dma_buf_fs_init_context,
0126     .kill_sb = kill_anon_super,
0127 };
0128 
0129 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
0130 {
0131     struct dma_buf *dmabuf;
0132 
0133     if (!is_dma_buf_file(file))
0134         return -EINVAL;
0135 
0136     dmabuf = file->private_data;
0137 
0138     /* check if buffer supports mmap */
0139     if (!dmabuf->ops->mmap)
0140         return -EINVAL;
0141 
0142     /* check for overflowing the buffer's size */
0143     if (vma->vm_pgoff + vma_pages(vma) >
0144         dmabuf->size >> PAGE_SHIFT)
0145         return -EINVAL;
0146 
0147     return dmabuf->ops->mmap(dmabuf, vma);
0148 }
0149 
0150 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
0151 {
0152     struct dma_buf *dmabuf;
0153     loff_t base;
0154 
0155     if (!is_dma_buf_file(file))
0156         return -EBADF;
0157 
0158     dmabuf = file->private_data;
0159 
0160     /* only support discovering the end of the buffer,
0161        but also allow SEEK_SET to maintain the idiomatic
0162        SEEK_END(0), SEEK_CUR(0) pattern */
0163     if (whence == SEEK_END)
0164         base = dmabuf->size;
0165     else if (whence == SEEK_SET)
0166         base = 0;
0167     else
0168         return -EINVAL;
0169 
0170     if (offset != 0)
0171         return -EINVAL;
0172 
0173     return base + offset;
0174 }
0175 
0176 /**
0177  * DOC: implicit fence polling
0178  *
0179  * To support cross-device and cross-driver synchronization of buffer access
0180  * implicit fences (represented internally in the kernel with &struct dma_fence)
0181  * can be attached to a &dma_buf. The glue for that and a few related things are
0182  * provided in the &dma_resv structure.
0183  *
0184  * Userspace can query the state of these implicitly tracked fences using poll()
0185  * and related system calls:
0186  *
0187  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
0188  *   most recent write or exclusive fence.
0189  *
0190  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
0191  *   all attached fences, shared and exclusive ones.
0192  *
0193  * Note that this only signals the completion of the respective fences, i.e. the
0194  * DMA transfers are complete. Cache flushing and any other necessary
0195  * preparations before CPU access can begin still need to happen.
0196  *
0197  * As an alternative to poll(), the set of fences on DMA buffer can be
0198  * exported as a &sync_file using &dma_buf_sync_file_export.
0199  */
0200 
0201 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
0202 {
0203     struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
0204     struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
0205     unsigned long flags;
0206 
0207     spin_lock_irqsave(&dcb->poll->lock, flags);
0208     wake_up_locked_poll(dcb->poll, dcb->active);
0209     dcb->active = 0;
0210     spin_unlock_irqrestore(&dcb->poll->lock, flags);
0211     dma_fence_put(fence);
0212     /* Paired with get_file in dma_buf_poll */
0213     fput(dmabuf->file);
0214 }
0215 
0216 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
0217                 struct dma_buf_poll_cb_t *dcb)
0218 {
0219     struct dma_resv_iter cursor;
0220     struct dma_fence *fence;
0221     int r;
0222 
0223     dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
0224                 fence) {
0225         dma_fence_get(fence);
0226         r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
0227         if (!r)
0228             return true;
0229         dma_fence_put(fence);
0230     }
0231 
0232     return false;
0233 }
0234 
0235 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
0236 {
0237     struct dma_buf *dmabuf;
0238     struct dma_resv *resv;
0239     __poll_t events;
0240 
0241     dmabuf = file->private_data;
0242     if (!dmabuf || !dmabuf->resv)
0243         return EPOLLERR;
0244 
0245     resv = dmabuf->resv;
0246 
0247     poll_wait(file, &dmabuf->poll, poll);
0248 
0249     events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
0250     if (!events)
0251         return 0;
0252 
0253     dma_resv_lock(resv, NULL);
0254 
0255     if (events & EPOLLOUT) {
0256         struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
0257 
0258         /* Check that callback isn't busy */
0259         spin_lock_irq(&dmabuf->poll.lock);
0260         if (dcb->active)
0261             events &= ~EPOLLOUT;
0262         else
0263             dcb->active = EPOLLOUT;
0264         spin_unlock_irq(&dmabuf->poll.lock);
0265 
0266         if (events & EPOLLOUT) {
0267             /* Paired with fput in dma_buf_poll_cb */
0268             get_file(dmabuf->file);
0269 
0270             if (!dma_buf_poll_add_cb(resv, true, dcb))
0271                 /* No callback queued, wake up any other waiters */
0272                 dma_buf_poll_cb(NULL, &dcb->cb);
0273             else
0274                 events &= ~EPOLLOUT;
0275         }
0276     }
0277 
0278     if (events & EPOLLIN) {
0279         struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
0280 
0281         /* Check that callback isn't busy */
0282         spin_lock_irq(&dmabuf->poll.lock);
0283         if (dcb->active)
0284             events &= ~EPOLLIN;
0285         else
0286             dcb->active = EPOLLIN;
0287         spin_unlock_irq(&dmabuf->poll.lock);
0288 
0289         if (events & EPOLLIN) {
0290             /* Paired with fput in dma_buf_poll_cb */
0291             get_file(dmabuf->file);
0292 
0293             if (!dma_buf_poll_add_cb(resv, false, dcb))
0294                 /* No callback queued, wake up any other waiters */
0295                 dma_buf_poll_cb(NULL, &dcb->cb);
0296             else
0297                 events &= ~EPOLLIN;
0298         }
0299     }
0300 
0301     dma_resv_unlock(resv);
0302     return events;
0303 }
0304 
0305 /**
0306  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
0307  * It could support changing the name of the dma-buf if the same
0308  * piece of memory is used for multiple purpose between different devices.
0309  *
0310  * @dmabuf: [in]     dmabuf buffer that will be renamed.
0311  * @buf:    [in]     A piece of userspace memory that contains the name of
0312  *                   the dma-buf.
0313  *
0314  * Returns 0 on success. If the dma-buf buffer is already attached to
0315  * devices, return -EBUSY.
0316  *
0317  */
0318 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
0319 {
0320     char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
0321 
0322     if (IS_ERR(name))
0323         return PTR_ERR(name);
0324 
0325     spin_lock(&dmabuf->name_lock);
0326     kfree(dmabuf->name);
0327     dmabuf->name = name;
0328     spin_unlock(&dmabuf->name_lock);
0329 
0330     return 0;
0331 }
0332 
0333 #if IS_ENABLED(CONFIG_SYNC_FILE)
0334 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
0335                      void __user *user_data)
0336 {
0337     struct dma_buf_export_sync_file arg;
0338     enum dma_resv_usage usage;
0339     struct dma_fence *fence = NULL;
0340     struct sync_file *sync_file;
0341     int fd, ret;
0342 
0343     if (copy_from_user(&arg, user_data, sizeof(arg)))
0344         return -EFAULT;
0345 
0346     if (arg.flags & ~DMA_BUF_SYNC_RW)
0347         return -EINVAL;
0348 
0349     if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
0350         return -EINVAL;
0351 
0352     fd = get_unused_fd_flags(O_CLOEXEC);
0353     if (fd < 0)
0354         return fd;
0355 
0356     usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
0357     ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
0358     if (ret)
0359         goto err_put_fd;
0360 
0361     if (!fence)
0362         fence = dma_fence_get_stub();
0363 
0364     sync_file = sync_file_create(fence);
0365 
0366     dma_fence_put(fence);
0367 
0368     if (!sync_file) {
0369         ret = -ENOMEM;
0370         goto err_put_fd;
0371     }
0372 
0373     arg.fd = fd;
0374     if (copy_to_user(user_data, &arg, sizeof(arg))) {
0375         ret = -EFAULT;
0376         goto err_put_file;
0377     }
0378 
0379     fd_install(fd, sync_file->file);
0380 
0381     return 0;
0382 
0383 err_put_file:
0384     fput(sync_file->file);
0385 err_put_fd:
0386     put_unused_fd(fd);
0387     return ret;
0388 }
0389 
0390 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
0391                      const void __user *user_data)
0392 {
0393     struct dma_buf_import_sync_file arg;
0394     struct dma_fence *fence;
0395     enum dma_resv_usage usage;
0396     int ret = 0;
0397 
0398     if (copy_from_user(&arg, user_data, sizeof(arg)))
0399         return -EFAULT;
0400 
0401     if (arg.flags & ~DMA_BUF_SYNC_RW)
0402         return -EINVAL;
0403 
0404     if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
0405         return -EINVAL;
0406 
0407     fence = sync_file_get_fence(arg.fd);
0408     if (!fence)
0409         return -EINVAL;
0410 
0411     usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
0412                            DMA_RESV_USAGE_READ;
0413 
0414     dma_resv_lock(dmabuf->resv, NULL);
0415 
0416     ret = dma_resv_reserve_fences(dmabuf->resv, 1);
0417     if (!ret)
0418         dma_resv_add_fence(dmabuf->resv, fence, usage);
0419 
0420     dma_resv_unlock(dmabuf->resv);
0421 
0422     dma_fence_put(fence);
0423 
0424     return ret;
0425 }
0426 #endif
0427 
0428 static long dma_buf_ioctl(struct file *file,
0429               unsigned int cmd, unsigned long arg)
0430 {
0431     struct dma_buf *dmabuf;
0432     struct dma_buf_sync sync;
0433     enum dma_data_direction direction;
0434     int ret;
0435 
0436     dmabuf = file->private_data;
0437 
0438     switch (cmd) {
0439     case DMA_BUF_IOCTL_SYNC:
0440         if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
0441             return -EFAULT;
0442 
0443         if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
0444             return -EINVAL;
0445 
0446         switch (sync.flags & DMA_BUF_SYNC_RW) {
0447         case DMA_BUF_SYNC_READ:
0448             direction = DMA_FROM_DEVICE;
0449             break;
0450         case DMA_BUF_SYNC_WRITE:
0451             direction = DMA_TO_DEVICE;
0452             break;
0453         case DMA_BUF_SYNC_RW:
0454             direction = DMA_BIDIRECTIONAL;
0455             break;
0456         default:
0457             return -EINVAL;
0458         }
0459 
0460         if (sync.flags & DMA_BUF_SYNC_END)
0461             ret = dma_buf_end_cpu_access(dmabuf, direction);
0462         else
0463             ret = dma_buf_begin_cpu_access(dmabuf, direction);
0464 
0465         return ret;
0466 
0467     case DMA_BUF_SET_NAME_A:
0468     case DMA_BUF_SET_NAME_B:
0469         return dma_buf_set_name(dmabuf, (const char __user *)arg);
0470 
0471 #if IS_ENABLED(CONFIG_SYNC_FILE)
0472     case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
0473         return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
0474     case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
0475         return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
0476 #endif
0477 
0478     default:
0479         return -ENOTTY;
0480     }
0481 }
0482 
0483 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
0484 {
0485     struct dma_buf *dmabuf = file->private_data;
0486 
0487     seq_printf(m, "size:\t%zu\n", dmabuf->size);
0488     /* Don't count the temporary reference taken inside procfs seq_show */
0489     seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
0490     seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
0491     spin_lock(&dmabuf->name_lock);
0492     if (dmabuf->name)
0493         seq_printf(m, "name:\t%s\n", dmabuf->name);
0494     spin_unlock(&dmabuf->name_lock);
0495 }
0496 
0497 static const struct file_operations dma_buf_fops = {
0498     .release    = dma_buf_file_release,
0499     .mmap       = dma_buf_mmap_internal,
0500     .llseek     = dma_buf_llseek,
0501     .poll       = dma_buf_poll,
0502     .unlocked_ioctl = dma_buf_ioctl,
0503     .compat_ioctl   = compat_ptr_ioctl,
0504     .show_fdinfo    = dma_buf_show_fdinfo,
0505 };
0506 
0507 /*
0508  * is_dma_buf_file - Check if struct file* is associated with dma_buf
0509  */
0510 static inline int is_dma_buf_file(struct file *file)
0511 {
0512     return file->f_op == &dma_buf_fops;
0513 }
0514 
0515 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
0516 {
0517     static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
0518     struct file *file;
0519     struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
0520 
0521     if (IS_ERR(inode))
0522         return ERR_CAST(inode);
0523 
0524     inode->i_size = dmabuf->size;
0525     inode_set_bytes(inode, dmabuf->size);
0526 
0527     /*
0528      * The ->i_ino acquired from get_next_ino() is not unique thus
0529      * not suitable for using it as dentry name by dmabuf stats.
0530      * Override ->i_ino with the unique and dmabuffs specific
0531      * value.
0532      */
0533     inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
0534     file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
0535                  flags, &dma_buf_fops);
0536     if (IS_ERR(file))
0537         goto err_alloc_file;
0538     file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
0539     file->private_data = dmabuf;
0540     file->f_path.dentry->d_fsdata = dmabuf;
0541 
0542     return file;
0543 
0544 err_alloc_file:
0545     iput(inode);
0546     return file;
0547 }
0548 
0549 /**
0550  * DOC: dma buf device access
0551  *
0552  * For device DMA access to a shared DMA buffer the usual sequence of operations
0553  * is fairly simple:
0554  *
0555  * 1. The exporter defines his exporter instance using
0556  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
0557  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
0558  *    as a file descriptor by calling dma_buf_fd().
0559  *
0560  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
0561  *    to share with: First the file descriptor is converted to a &dma_buf using
0562  *    dma_buf_get(). Then the buffer is attached to the device using
0563  *    dma_buf_attach().
0564  *
0565  *    Up to this stage the exporter is still free to migrate or reallocate the
0566  *    backing storage.
0567  *
0568  * 3. Once the buffer is attached to all devices userspace can initiate DMA
0569  *    access to the shared buffer. In the kernel this is done by calling
0570  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
0571  *
0572  * 4. Once a driver is done with a shared buffer it needs to call
0573  *    dma_buf_detach() (after cleaning up any mappings) and then release the
0574  *    reference acquired with dma_buf_get() by calling dma_buf_put().
0575  *
0576  * For the detailed semantics exporters are expected to implement see
0577  * &dma_buf_ops.
0578  */
0579 
0580 /**
0581  * dma_buf_export - Creates a new dma_buf, and associates an anon file
0582  * with this buffer, so it can be exported.
0583  * Also connect the allocator specific data and ops to the buffer.
0584  * Additionally, provide a name string for exporter; useful in debugging.
0585  *
0586  * @exp_info:   [in]    holds all the export related information provided
0587  *          by the exporter. see &struct dma_buf_export_info
0588  *          for further details.
0589  *
0590  * Returns, on success, a newly created struct dma_buf object, which wraps the
0591  * supplied private data and operations for struct dma_buf_ops. On either
0592  * missing ops, or error in allocating struct dma_buf, will return negative
0593  * error.
0594  *
0595  * For most cases the easiest way to create @exp_info is through the
0596  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
0597  */
0598 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
0599 {
0600     struct dma_buf *dmabuf;
0601     struct dma_resv *resv = exp_info->resv;
0602     struct file *file;
0603     size_t alloc_size = sizeof(struct dma_buf);
0604     int ret;
0605 
0606     if (!exp_info->resv)
0607         alloc_size += sizeof(struct dma_resv);
0608     else
0609         /* prevent &dma_buf[1] == dma_buf->resv */
0610         alloc_size += 1;
0611 
0612     if (WARN_ON(!exp_info->priv
0613               || !exp_info->ops
0614               || !exp_info->ops->map_dma_buf
0615               || !exp_info->ops->unmap_dma_buf
0616               || !exp_info->ops->release)) {
0617         return ERR_PTR(-EINVAL);
0618     }
0619 
0620     if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
0621             (exp_info->ops->pin || exp_info->ops->unpin)))
0622         return ERR_PTR(-EINVAL);
0623 
0624     if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
0625         return ERR_PTR(-EINVAL);
0626 
0627     if (!try_module_get(exp_info->owner))
0628         return ERR_PTR(-ENOENT);
0629 
0630     dmabuf = kzalloc(alloc_size, GFP_KERNEL);
0631     if (!dmabuf) {
0632         ret = -ENOMEM;
0633         goto err_module;
0634     }
0635 
0636     dmabuf->priv = exp_info->priv;
0637     dmabuf->ops = exp_info->ops;
0638     dmabuf->size = exp_info->size;
0639     dmabuf->exp_name = exp_info->exp_name;
0640     dmabuf->owner = exp_info->owner;
0641     spin_lock_init(&dmabuf->name_lock);
0642     init_waitqueue_head(&dmabuf->poll);
0643     dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
0644     dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
0645 
0646     if (!resv) {
0647         resv = (struct dma_resv *)&dmabuf[1];
0648         dma_resv_init(resv);
0649     }
0650     dmabuf->resv = resv;
0651 
0652     file = dma_buf_getfile(dmabuf, exp_info->flags);
0653     if (IS_ERR(file)) {
0654         ret = PTR_ERR(file);
0655         goto err_dmabuf;
0656     }
0657 
0658     dmabuf->file = file;
0659 
0660     mutex_init(&dmabuf->lock);
0661     INIT_LIST_HEAD(&dmabuf->attachments);
0662 
0663     mutex_lock(&db_list.lock);
0664     list_add(&dmabuf->list_node, &db_list.head);
0665     mutex_unlock(&db_list.lock);
0666 
0667     ret = dma_buf_stats_setup(dmabuf);
0668     if (ret)
0669         goto err_sysfs;
0670 
0671     return dmabuf;
0672 
0673 err_sysfs:
0674     /*
0675      * Set file->f_path.dentry->d_fsdata to NULL so that when
0676      * dma_buf_release() gets invoked by dentry_ops, it exits
0677      * early before calling the release() dma_buf op.
0678      */
0679     file->f_path.dentry->d_fsdata = NULL;
0680     fput(file);
0681 err_dmabuf:
0682     kfree(dmabuf);
0683 err_module:
0684     module_put(exp_info->owner);
0685     return ERR_PTR(ret);
0686 }
0687 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
0688 
0689 /**
0690  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
0691  * @dmabuf: [in]    pointer to dma_buf for which fd is required.
0692  * @flags:      [in]    flags to give to fd
0693  *
0694  * On success, returns an associated 'fd'. Else, returns error.
0695  */
0696 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
0697 {
0698     int fd;
0699 
0700     if (!dmabuf || !dmabuf->file)
0701         return -EINVAL;
0702 
0703     fd = get_unused_fd_flags(flags);
0704     if (fd < 0)
0705         return fd;
0706 
0707     fd_install(fd, dmabuf->file);
0708 
0709     return fd;
0710 }
0711 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
0712 
0713 /**
0714  * dma_buf_get - returns the struct dma_buf related to an fd
0715  * @fd: [in]    fd associated with the struct dma_buf to be returned
0716  *
0717  * On success, returns the struct dma_buf associated with an fd; uses
0718  * file's refcounting done by fget to increase refcount. returns ERR_PTR
0719  * otherwise.
0720  */
0721 struct dma_buf *dma_buf_get(int fd)
0722 {
0723     struct file *file;
0724 
0725     file = fget(fd);
0726 
0727     if (!file)
0728         return ERR_PTR(-EBADF);
0729 
0730     if (!is_dma_buf_file(file)) {
0731         fput(file);
0732         return ERR_PTR(-EINVAL);
0733     }
0734 
0735     return file->private_data;
0736 }
0737 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
0738 
0739 /**
0740  * dma_buf_put - decreases refcount of the buffer
0741  * @dmabuf: [in]    buffer to reduce refcount of
0742  *
0743  * Uses file's refcounting done implicitly by fput().
0744  *
0745  * If, as a result of this call, the refcount becomes 0, the 'release' file
0746  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
0747  * in turn, and frees the memory allocated for dmabuf when exported.
0748  */
0749 void dma_buf_put(struct dma_buf *dmabuf)
0750 {
0751     if (WARN_ON(!dmabuf || !dmabuf->file))
0752         return;
0753 
0754     fput(dmabuf->file);
0755 }
0756 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
0757 
0758 static void mangle_sg_table(struct sg_table *sg_table)
0759 {
0760 #ifdef CONFIG_DMABUF_DEBUG
0761     int i;
0762     struct scatterlist *sg;
0763 
0764     /* To catch abuse of the underlying struct page by importers mix
0765      * up the bits, but take care to preserve the low SG_ bits to
0766      * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
0767      * before passing the sgt back to the exporter. */
0768     for_each_sgtable_sg(sg_table, sg, i)
0769         sg->page_link ^= ~0xffUL;
0770 #endif
0771 
0772 }
0773 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
0774                        enum dma_data_direction direction)
0775 {
0776     struct sg_table *sg_table;
0777     signed long ret;
0778 
0779     sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
0780     if (IS_ERR_OR_NULL(sg_table))
0781         return sg_table;
0782 
0783     if (!dma_buf_attachment_is_dynamic(attach)) {
0784         ret = dma_resv_wait_timeout(attach->dmabuf->resv,
0785                         DMA_RESV_USAGE_KERNEL, true,
0786                         MAX_SCHEDULE_TIMEOUT);
0787         if (ret < 0) {
0788             attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
0789                                direction);
0790             return ERR_PTR(ret);
0791         }
0792     }
0793 
0794     mangle_sg_table(sg_table);
0795     return sg_table;
0796 }
0797 
0798 /**
0799  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
0800  * @dmabuf:     [in]    buffer to attach device to.
0801  * @dev:        [in]    device to be attached.
0802  * @importer_ops:   [in]    importer operations for the attachment
0803  * @importer_priv:  [in]    importer private pointer for the attachment
0804  *
0805  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
0806  * must be cleaned up by calling dma_buf_detach().
0807  *
0808  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
0809  * functionality.
0810  *
0811  * Returns:
0812  *
0813  * A pointer to newly created &dma_buf_attachment on success, or a negative
0814  * error code wrapped into a pointer on failure.
0815  *
0816  * Note that this can fail if the backing storage of @dmabuf is in a place not
0817  * accessible to @dev, and cannot be moved to a more suitable place. This is
0818  * indicated with the error code -EBUSY.
0819  */
0820 struct dma_buf_attachment *
0821 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
0822                const struct dma_buf_attach_ops *importer_ops,
0823                void *importer_priv)
0824 {
0825     struct dma_buf_attachment *attach;
0826     int ret;
0827 
0828     if (WARN_ON(!dmabuf || !dev))
0829         return ERR_PTR(-EINVAL);
0830 
0831     if (WARN_ON(importer_ops && !importer_ops->move_notify))
0832         return ERR_PTR(-EINVAL);
0833 
0834     attach = kzalloc(sizeof(*attach), GFP_KERNEL);
0835     if (!attach)
0836         return ERR_PTR(-ENOMEM);
0837 
0838     attach->dev = dev;
0839     attach->dmabuf = dmabuf;
0840     if (importer_ops)
0841         attach->peer2peer = importer_ops->allow_peer2peer;
0842     attach->importer_ops = importer_ops;
0843     attach->importer_priv = importer_priv;
0844 
0845     if (dmabuf->ops->attach) {
0846         ret = dmabuf->ops->attach(dmabuf, attach);
0847         if (ret)
0848             goto err_attach;
0849     }
0850     dma_resv_lock(dmabuf->resv, NULL);
0851     list_add(&attach->node, &dmabuf->attachments);
0852     dma_resv_unlock(dmabuf->resv);
0853 
0854     /* When either the importer or the exporter can't handle dynamic
0855      * mappings we cache the mapping here to avoid issues with the
0856      * reservation object lock.
0857      */
0858     if (dma_buf_attachment_is_dynamic(attach) !=
0859         dma_buf_is_dynamic(dmabuf)) {
0860         struct sg_table *sgt;
0861 
0862         if (dma_buf_is_dynamic(attach->dmabuf)) {
0863             dma_resv_lock(attach->dmabuf->resv, NULL);
0864             ret = dmabuf->ops->pin(attach);
0865             if (ret)
0866                 goto err_unlock;
0867         }
0868 
0869         sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
0870         if (!sgt)
0871             sgt = ERR_PTR(-ENOMEM);
0872         if (IS_ERR(sgt)) {
0873             ret = PTR_ERR(sgt);
0874             goto err_unpin;
0875         }
0876         if (dma_buf_is_dynamic(attach->dmabuf))
0877             dma_resv_unlock(attach->dmabuf->resv);
0878         attach->sgt = sgt;
0879         attach->dir = DMA_BIDIRECTIONAL;
0880     }
0881 
0882     return attach;
0883 
0884 err_attach:
0885     kfree(attach);
0886     return ERR_PTR(ret);
0887 
0888 err_unpin:
0889     if (dma_buf_is_dynamic(attach->dmabuf))
0890         dmabuf->ops->unpin(attach);
0891 
0892 err_unlock:
0893     if (dma_buf_is_dynamic(attach->dmabuf))
0894         dma_resv_unlock(attach->dmabuf->resv);
0895 
0896     dma_buf_detach(dmabuf, attach);
0897     return ERR_PTR(ret);
0898 }
0899 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
0900 
0901 /**
0902  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
0903  * @dmabuf: [in]    buffer to attach device to.
0904  * @dev:    [in]    device to be attached.
0905  *
0906  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
0907  * mapping.
0908  */
0909 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
0910                       struct device *dev)
0911 {
0912     return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
0913 }
0914 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
0915 
0916 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
0917                 struct sg_table *sg_table,
0918                 enum dma_data_direction direction)
0919 {
0920     /* uses XOR, hence this unmangles */
0921     mangle_sg_table(sg_table);
0922 
0923     attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
0924 }
0925 
0926 /**
0927  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
0928  * @dmabuf: [in]    buffer to detach from.
0929  * @attach: [in]    attachment to be detached; is free'd after this call.
0930  *
0931  * Clean up a device attachment obtained by calling dma_buf_attach().
0932  *
0933  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
0934  */
0935 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
0936 {
0937     if (WARN_ON(!dmabuf || !attach))
0938         return;
0939 
0940     if (attach->sgt) {
0941         if (dma_buf_is_dynamic(attach->dmabuf))
0942             dma_resv_lock(attach->dmabuf->resv, NULL);
0943 
0944         __unmap_dma_buf(attach, attach->sgt, attach->dir);
0945 
0946         if (dma_buf_is_dynamic(attach->dmabuf)) {
0947             dmabuf->ops->unpin(attach);
0948             dma_resv_unlock(attach->dmabuf->resv);
0949         }
0950     }
0951 
0952     dma_resv_lock(dmabuf->resv, NULL);
0953     list_del(&attach->node);
0954     dma_resv_unlock(dmabuf->resv);
0955     if (dmabuf->ops->detach)
0956         dmabuf->ops->detach(dmabuf, attach);
0957 
0958     kfree(attach);
0959 }
0960 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
0961 
0962 /**
0963  * dma_buf_pin - Lock down the DMA-buf
0964  * @attach: [in]    attachment which should be pinned
0965  *
0966  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
0967  * call this, and only for limited use cases like scanout and not for temporary
0968  * pin operations. It is not permitted to allow userspace to pin arbitrary
0969  * amounts of buffers through this interface.
0970  *
0971  * Buffers must be unpinned by calling dma_buf_unpin().
0972  *
0973  * Returns:
0974  * 0 on success, negative error code on failure.
0975  */
0976 int dma_buf_pin(struct dma_buf_attachment *attach)
0977 {
0978     struct dma_buf *dmabuf = attach->dmabuf;
0979     int ret = 0;
0980 
0981     WARN_ON(!dma_buf_attachment_is_dynamic(attach));
0982 
0983     dma_resv_assert_held(dmabuf->resv);
0984 
0985     if (dmabuf->ops->pin)
0986         ret = dmabuf->ops->pin(attach);
0987 
0988     return ret;
0989 }
0990 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
0991 
0992 /**
0993  * dma_buf_unpin - Unpin a DMA-buf
0994  * @attach: [in]    attachment which should be unpinned
0995  *
0996  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
0997  * any mapping of @attach again and inform the importer through
0998  * &dma_buf_attach_ops.move_notify.
0999  */
1000 void dma_buf_unpin(struct dma_buf_attachment *attach)
1001 {
1002     struct dma_buf *dmabuf = attach->dmabuf;
1003 
1004     WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1005 
1006     dma_resv_assert_held(dmabuf->resv);
1007 
1008     if (dmabuf->ops->unpin)
1009         dmabuf->ops->unpin(attach);
1010 }
1011 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1012 
1013 /**
1014  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1015  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1016  * dma_buf_ops.
1017  * @attach: [in]    attachment whose scatterlist is to be returned
1018  * @direction:  [in]    direction of DMA transfer
1019  *
1020  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1021  * on error. May return -EINTR if it is interrupted by a signal.
1022  *
1023  * On success, the DMA addresses and lengths in the returned scatterlist are
1024  * PAGE_SIZE aligned.
1025  *
1026  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1027  * the underlying backing storage is pinned for as long as a mapping exists,
1028  * therefore users/importers should not hold onto a mapping for undue amounts of
1029  * time.
1030  *
1031  * Important: Dynamic importers must wait for the exclusive fence of the struct
1032  * dma_resv attached to the DMA-BUF first.
1033  */
1034 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1035                     enum dma_data_direction direction)
1036 {
1037     struct sg_table *sg_table;
1038     int r;
1039 
1040     might_sleep();
1041 
1042     if (WARN_ON(!attach || !attach->dmabuf))
1043         return ERR_PTR(-EINVAL);
1044 
1045     if (dma_buf_attachment_is_dynamic(attach))
1046         dma_resv_assert_held(attach->dmabuf->resv);
1047 
1048     if (attach->sgt) {
1049         /*
1050          * Two mappings with different directions for the same
1051          * attachment are not allowed.
1052          */
1053         if (attach->dir != direction &&
1054             attach->dir != DMA_BIDIRECTIONAL)
1055             return ERR_PTR(-EBUSY);
1056 
1057         return attach->sgt;
1058     }
1059 
1060     if (dma_buf_is_dynamic(attach->dmabuf)) {
1061         dma_resv_assert_held(attach->dmabuf->resv);
1062         if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1063             r = attach->dmabuf->ops->pin(attach);
1064             if (r)
1065                 return ERR_PTR(r);
1066         }
1067     }
1068 
1069     sg_table = __map_dma_buf(attach, direction);
1070     if (!sg_table)
1071         sg_table = ERR_PTR(-ENOMEM);
1072 
1073     if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1074          !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1075         attach->dmabuf->ops->unpin(attach);
1076 
1077     if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1078         attach->sgt = sg_table;
1079         attach->dir = direction;
1080     }
1081 
1082 #ifdef CONFIG_DMA_API_DEBUG
1083     if (!IS_ERR(sg_table)) {
1084         struct scatterlist *sg;
1085         u64 addr;
1086         int len;
1087         int i;
1088 
1089         for_each_sgtable_dma_sg(sg_table, sg, i) {
1090             addr = sg_dma_address(sg);
1091             len = sg_dma_len(sg);
1092             if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1093                 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1094                      __func__, addr, len);
1095             }
1096         }
1097     }
1098 #endif /* CONFIG_DMA_API_DEBUG */
1099     return sg_table;
1100 }
1101 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1102 
1103 /**
1104  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1105  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1106  * dma_buf_ops.
1107  * @attach: [in]    attachment to unmap buffer from
1108  * @sg_table:   [in]    scatterlist info of the buffer to unmap
1109  * @direction:  [in]    direction of DMA transfer
1110  *
1111  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1112  */
1113 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1114                 struct sg_table *sg_table,
1115                 enum dma_data_direction direction)
1116 {
1117     might_sleep();
1118 
1119     if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1120         return;
1121 
1122     if (dma_buf_attachment_is_dynamic(attach))
1123         dma_resv_assert_held(attach->dmabuf->resv);
1124 
1125     if (attach->sgt == sg_table)
1126         return;
1127 
1128     if (dma_buf_is_dynamic(attach->dmabuf))
1129         dma_resv_assert_held(attach->dmabuf->resv);
1130 
1131     __unmap_dma_buf(attach, sg_table, direction);
1132 
1133     if (dma_buf_is_dynamic(attach->dmabuf) &&
1134         !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1135         dma_buf_unpin(attach);
1136 }
1137 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1138 
1139 /**
1140  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1141  *
1142  * @dmabuf: [in]    buffer which is moving
1143  *
1144  * Informs all attachmenst that they need to destroy and recreated all their
1145  * mappings.
1146  */
1147 void dma_buf_move_notify(struct dma_buf *dmabuf)
1148 {
1149     struct dma_buf_attachment *attach;
1150 
1151     dma_resv_assert_held(dmabuf->resv);
1152 
1153     list_for_each_entry(attach, &dmabuf->attachments, node)
1154         if (attach->importer_ops)
1155             attach->importer_ops->move_notify(attach);
1156 }
1157 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1158 
1159 /**
1160  * DOC: cpu access
1161  *
1162  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1163  *
1164  * - Fallback operations in the kernel, for example when a device is connected
1165  *   over USB and the kernel needs to shuffle the data around first before
1166  *   sending it away. Cache coherency is handled by braketing any transactions
1167  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1168  *   access.
1169  *
1170  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1171  *   vmap interface is introduced. Note that on very old 32-bit architectures
1172  *   vmalloc space might be limited and result in vmap calls failing.
1173  *
1174  *   Interfaces::
1175  *
1176  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1177  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1178  *
1179  *   The vmap call can fail if there is no vmap support in the exporter, or if
1180  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1181  *   count for all vmap access and calls down into the exporter's vmap function
1182  *   only when no vmapping exists, and only unmaps it once. Protection against
1183  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1184  *
1185  * - For full compatibility on the importer side with existing userspace
1186  *   interfaces, which might already support mmap'ing buffers. This is needed in
1187  *   many processing pipelines (e.g. feeding a software rendered image into a
1188  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1189  *   framework already supported this and for DMA buffer file descriptors to
1190  *   replace ION buffers mmap support was needed.
1191  *
1192  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1193  *   fd. But like for CPU access there's a need to braket the actual access,
1194  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1195  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1196  *   be restarted.
1197  *
1198  *   Some systems might need some sort of cache coherency management e.g. when
1199  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1200  *   To circumvent this problem there are begin/end coherency markers, that
1201  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1202  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1203  *   sequence would be used like following:
1204  *
1205  *     - mmap dma-buf fd
1206  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1207  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1208  *       want (with the new data being consumed by say the GPU or the scanout
1209  *       device)
1210  *     - munmap once you don't need the buffer any more
1211  *
1212  *    For correctness and optimal performance, it is always required to use
1213  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1214  *    mapped address. Userspace cannot rely on coherent access, even when there
1215  *    are systems where it just works without calling these ioctls.
1216  *
1217  * - And as a CPU fallback in userspace processing pipelines.
1218  *
1219  *   Similar to the motivation for kernel cpu access it is again important that
1220  *   the userspace code of a given importing subsystem can use the same
1221  *   interfaces with a imported dma-buf buffer object as with a native buffer
1222  *   object. This is especially important for drm where the userspace part of
1223  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1224  *   use a different way to mmap a buffer rather invasive.
1225  *
1226  *   The assumption in the current dma-buf interfaces is that redirecting the
1227  *   initial mmap is all that's needed. A survey of some of the existing
1228  *   subsystems shows that no driver seems to do any nefarious thing like
1229  *   syncing up with outstanding asynchronous processing on the device or
1230  *   allocating special resources at fault time. So hopefully this is good
1231  *   enough, since adding interfaces to intercept pagefaults and allow pte
1232  *   shootdowns would increase the complexity quite a bit.
1233  *
1234  *   Interface::
1235  *
1236  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1237  *             unsigned long);
1238  *
1239  *   If the importing subsystem simply provides a special-purpose mmap call to
1240  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1241  *   equally achieve that for a dma-buf object.
1242  */
1243 
1244 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1245                       enum dma_data_direction direction)
1246 {
1247     bool write = (direction == DMA_BIDIRECTIONAL ||
1248               direction == DMA_TO_DEVICE);
1249     struct dma_resv *resv = dmabuf->resv;
1250     long ret;
1251 
1252     /* Wait on any implicit rendering fences */
1253     ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1254                     true, MAX_SCHEDULE_TIMEOUT);
1255     if (ret < 0)
1256         return ret;
1257 
1258     return 0;
1259 }
1260 
1261 /**
1262  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1263  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1264  * preparations. Coherency is only guaranteed in the specified range for the
1265  * specified access direction.
1266  * @dmabuf: [in]    buffer to prepare cpu access for.
1267  * @direction:  [in]    length of range for cpu access.
1268  *
1269  * After the cpu access is complete the caller should call
1270  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1271  * it guaranteed to be coherent with other DMA access.
1272  *
1273  * This function will also wait for any DMA transactions tracked through
1274  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1275  * synchronization this function will only ensure cache coherency, callers must
1276  * ensure synchronization with such DMA transactions on their own.
1277  *
1278  * Can return negative error values, returns 0 on success.
1279  */
1280 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1281                  enum dma_data_direction direction)
1282 {
1283     int ret = 0;
1284 
1285     if (WARN_ON(!dmabuf))
1286         return -EINVAL;
1287 
1288     might_lock(&dmabuf->resv->lock.base);
1289 
1290     if (dmabuf->ops->begin_cpu_access)
1291         ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1292 
1293     /* Ensure that all fences are waited upon - but we first allow
1294      * the native handler the chance to do so more efficiently if it
1295      * chooses. A double invocation here will be reasonably cheap no-op.
1296      */
1297     if (ret == 0)
1298         ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1299 
1300     return ret;
1301 }
1302 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1303 
1304 /**
1305  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1306  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1307  * actions. Coherency is only guaranteed in the specified range for the
1308  * specified access direction.
1309  * @dmabuf: [in]    buffer to complete cpu access for.
1310  * @direction:  [in]    length of range for cpu access.
1311  *
1312  * This terminates CPU access started with dma_buf_begin_cpu_access().
1313  *
1314  * Can return negative error values, returns 0 on success.
1315  */
1316 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1317                enum dma_data_direction direction)
1318 {
1319     int ret = 0;
1320 
1321     WARN_ON(!dmabuf);
1322 
1323     might_lock(&dmabuf->resv->lock.base);
1324 
1325     if (dmabuf->ops->end_cpu_access)
1326         ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1327 
1328     return ret;
1329 }
1330 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1331 
1332 
1333 /**
1334  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1335  * @dmabuf: [in]    buffer that should back the vma
1336  * @vma:    [in]    vma for the mmap
1337  * @pgoff:  [in]    offset in pages where this mmap should start within the
1338  *          dma-buf buffer.
1339  *
1340  * This function adjusts the passed in vma so that it points at the file of the
1341  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1342  * checking on the size of the vma. Then it calls the exporters mmap function to
1343  * set up the mapping.
1344  *
1345  * Can return negative error values, returns 0 on success.
1346  */
1347 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1348          unsigned long pgoff)
1349 {
1350     if (WARN_ON(!dmabuf || !vma))
1351         return -EINVAL;
1352 
1353     /* check if buffer supports mmap */
1354     if (!dmabuf->ops->mmap)
1355         return -EINVAL;
1356 
1357     /* check for offset overflow */
1358     if (pgoff + vma_pages(vma) < pgoff)
1359         return -EOVERFLOW;
1360 
1361     /* check for overflowing the buffer's size */
1362     if (pgoff + vma_pages(vma) >
1363         dmabuf->size >> PAGE_SHIFT)
1364         return -EINVAL;
1365 
1366     /* readjust the vma */
1367     vma_set_file(vma, dmabuf->file);
1368     vma->vm_pgoff = pgoff;
1369 
1370     return dmabuf->ops->mmap(dmabuf, vma);
1371 }
1372 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1373 
1374 /**
1375  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1376  * address space. Same restrictions as for vmap and friends apply.
1377  * @dmabuf: [in]    buffer to vmap
1378  * @map:    [out]   returns the vmap pointer
1379  *
1380  * This call may fail due to lack of virtual mapping address space.
1381  * These calls are optional in drivers. The intended use for them
1382  * is for mapping objects linear in kernel space for high use objects.
1383  *
1384  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1385  * dma_buf_end_cpu_access() around any cpu access performed through this
1386  * mapping.
1387  *
1388  * Returns 0 on success, or a negative errno code otherwise.
1389  */
1390 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1391 {
1392     struct iosys_map ptr;
1393     int ret = 0;
1394 
1395     iosys_map_clear(map);
1396 
1397     if (WARN_ON(!dmabuf))
1398         return -EINVAL;
1399 
1400     if (!dmabuf->ops->vmap)
1401         return -EINVAL;
1402 
1403     mutex_lock(&dmabuf->lock);
1404     if (dmabuf->vmapping_counter) {
1405         dmabuf->vmapping_counter++;
1406         BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1407         *map = dmabuf->vmap_ptr;
1408         goto out_unlock;
1409     }
1410 
1411     BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1412 
1413     ret = dmabuf->ops->vmap(dmabuf, &ptr);
1414     if (WARN_ON_ONCE(ret))
1415         goto out_unlock;
1416 
1417     dmabuf->vmap_ptr = ptr;
1418     dmabuf->vmapping_counter = 1;
1419 
1420     *map = dmabuf->vmap_ptr;
1421 
1422 out_unlock:
1423     mutex_unlock(&dmabuf->lock);
1424     return ret;
1425 }
1426 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1427 
1428 /**
1429  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1430  * @dmabuf: [in]    buffer to vunmap
1431  * @map:    [in]    vmap pointer to vunmap
1432  */
1433 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1434 {
1435     if (WARN_ON(!dmabuf))
1436         return;
1437 
1438     BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1439     BUG_ON(dmabuf->vmapping_counter == 0);
1440     BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1441 
1442     mutex_lock(&dmabuf->lock);
1443     if (--dmabuf->vmapping_counter == 0) {
1444         if (dmabuf->ops->vunmap)
1445             dmabuf->ops->vunmap(dmabuf, map);
1446         iosys_map_clear(&dmabuf->vmap_ptr);
1447     }
1448     mutex_unlock(&dmabuf->lock);
1449 }
1450 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1451 
1452 #ifdef CONFIG_DEBUG_FS
1453 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1454 {
1455     struct dma_buf *buf_obj;
1456     struct dma_buf_attachment *attach_obj;
1457     int count = 0, attach_count;
1458     size_t size = 0;
1459     int ret;
1460 
1461     ret = mutex_lock_interruptible(&db_list.lock);
1462 
1463     if (ret)
1464         return ret;
1465 
1466     seq_puts(s, "\nDma-buf Objects:\n");
1467     seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1468            "size", "flags", "mode", "count", "ino");
1469 
1470     list_for_each_entry(buf_obj, &db_list.head, list_node) {
1471 
1472         ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1473         if (ret)
1474             goto error_unlock;
1475 
1476 
1477         spin_lock(&buf_obj->name_lock);
1478         seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1479                 buf_obj->size,
1480                 buf_obj->file->f_flags, buf_obj->file->f_mode,
1481                 file_count(buf_obj->file),
1482                 buf_obj->exp_name,
1483                 file_inode(buf_obj->file)->i_ino,
1484                 buf_obj->name ?: "<none>");
1485         spin_unlock(&buf_obj->name_lock);
1486 
1487         dma_resv_describe(buf_obj->resv, s);
1488 
1489         seq_puts(s, "\tAttached Devices:\n");
1490         attach_count = 0;
1491 
1492         list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1493             seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1494             attach_count++;
1495         }
1496         dma_resv_unlock(buf_obj->resv);
1497 
1498         seq_printf(s, "Total %d devices attached\n\n",
1499                 attach_count);
1500 
1501         count++;
1502         size += buf_obj->size;
1503     }
1504 
1505     seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1506 
1507     mutex_unlock(&db_list.lock);
1508     return 0;
1509 
1510 error_unlock:
1511     mutex_unlock(&db_list.lock);
1512     return ret;
1513 }
1514 
1515 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1516 
1517 static struct dentry *dma_buf_debugfs_dir;
1518 
1519 static int dma_buf_init_debugfs(void)
1520 {
1521     struct dentry *d;
1522     int err = 0;
1523 
1524     d = debugfs_create_dir("dma_buf", NULL);
1525     if (IS_ERR(d))
1526         return PTR_ERR(d);
1527 
1528     dma_buf_debugfs_dir = d;
1529 
1530     d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1531                 NULL, &dma_buf_debug_fops);
1532     if (IS_ERR(d)) {
1533         pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1534         debugfs_remove_recursive(dma_buf_debugfs_dir);
1535         dma_buf_debugfs_dir = NULL;
1536         err = PTR_ERR(d);
1537     }
1538 
1539     return err;
1540 }
1541 
1542 static void dma_buf_uninit_debugfs(void)
1543 {
1544     debugfs_remove_recursive(dma_buf_debugfs_dir);
1545 }
1546 #else
1547 static inline int dma_buf_init_debugfs(void)
1548 {
1549     return 0;
1550 }
1551 static inline void dma_buf_uninit_debugfs(void)
1552 {
1553 }
1554 #endif
1555 
1556 static int __init dma_buf_init(void)
1557 {
1558     int ret;
1559 
1560     ret = dma_buf_init_sysfs_statistics();
1561     if (ret)
1562         return ret;
1563 
1564     dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1565     if (IS_ERR(dma_buf_mnt))
1566         return PTR_ERR(dma_buf_mnt);
1567 
1568     mutex_init(&db_list.lock);
1569     INIT_LIST_HEAD(&db_list.head);
1570     dma_buf_init_debugfs();
1571     return 0;
1572 }
1573 subsys_initcall(dma_buf_init);
1574 
1575 static void __exit dma_buf_deinit(void)
1576 {
1577     dma_buf_uninit_debugfs();
1578     kern_unmount(dma_buf_mnt);
1579     dma_buf_uninit_sysfs_statistics();
1580 }
1581 __exitcall(dma_buf_deinit);