0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/fs.h>
0015 #include <linux/slab.h>
0016 #include <linux/dma-buf.h>
0017 #include <linux/dma-fence.h>
0018 #include <linux/anon_inodes.h>
0019 #include <linux/export.h>
0020 #include <linux/debugfs.h>
0021 #include <linux/module.h>
0022 #include <linux/seq_file.h>
0023 #include <linux/sync_file.h>
0024 #include <linux/poll.h>
0025 #include <linux/dma-resv.h>
0026 #include <linux/mm.h>
0027 #include <linux/mount.h>
0028 #include <linux/pseudo_fs.h>
0029
0030 #include <uapi/linux/dma-buf.h>
0031 #include <uapi/linux/magic.h>
0032
0033 #include "dma-buf-sysfs-stats.h"
0034
0035 static inline int is_dma_buf_file(struct file *);
0036
0037 struct dma_buf_list {
0038 struct list_head head;
0039 struct mutex lock;
0040 };
0041
0042 static struct dma_buf_list db_list;
0043
0044 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
0045 {
0046 struct dma_buf *dmabuf;
0047 char name[DMA_BUF_NAME_LEN];
0048 size_t ret = 0;
0049
0050 dmabuf = dentry->d_fsdata;
0051 spin_lock(&dmabuf->name_lock);
0052 if (dmabuf->name)
0053 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
0054 spin_unlock(&dmabuf->name_lock);
0055
0056 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
0057 dentry->d_name.name, ret > 0 ? name : "");
0058 }
0059
0060 static void dma_buf_release(struct dentry *dentry)
0061 {
0062 struct dma_buf *dmabuf;
0063
0064 dmabuf = dentry->d_fsdata;
0065 if (unlikely(!dmabuf))
0066 return;
0067
0068 BUG_ON(dmabuf->vmapping_counter);
0069
0070
0071
0072
0073
0074
0075 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
0076
0077 dma_buf_stats_teardown(dmabuf);
0078 dmabuf->ops->release(dmabuf);
0079
0080 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
0081 dma_resv_fini(dmabuf->resv);
0082
0083 WARN_ON(!list_empty(&dmabuf->attachments));
0084 module_put(dmabuf->owner);
0085 kfree(dmabuf->name);
0086 kfree(dmabuf);
0087 }
0088
0089 static int dma_buf_file_release(struct inode *inode, struct file *file)
0090 {
0091 struct dma_buf *dmabuf;
0092
0093 if (!is_dma_buf_file(file))
0094 return -EINVAL;
0095
0096 dmabuf = file->private_data;
0097
0098 mutex_lock(&db_list.lock);
0099 list_del(&dmabuf->list_node);
0100 mutex_unlock(&db_list.lock);
0101
0102 return 0;
0103 }
0104
0105 static const struct dentry_operations dma_buf_dentry_ops = {
0106 .d_dname = dmabuffs_dname,
0107 .d_release = dma_buf_release,
0108 };
0109
0110 static struct vfsmount *dma_buf_mnt;
0111
0112 static int dma_buf_fs_init_context(struct fs_context *fc)
0113 {
0114 struct pseudo_fs_context *ctx;
0115
0116 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
0117 if (!ctx)
0118 return -ENOMEM;
0119 ctx->dops = &dma_buf_dentry_ops;
0120 return 0;
0121 }
0122
0123 static struct file_system_type dma_buf_fs_type = {
0124 .name = "dmabuf",
0125 .init_fs_context = dma_buf_fs_init_context,
0126 .kill_sb = kill_anon_super,
0127 };
0128
0129 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
0130 {
0131 struct dma_buf *dmabuf;
0132
0133 if (!is_dma_buf_file(file))
0134 return -EINVAL;
0135
0136 dmabuf = file->private_data;
0137
0138
0139 if (!dmabuf->ops->mmap)
0140 return -EINVAL;
0141
0142
0143 if (vma->vm_pgoff + vma_pages(vma) >
0144 dmabuf->size >> PAGE_SHIFT)
0145 return -EINVAL;
0146
0147 return dmabuf->ops->mmap(dmabuf, vma);
0148 }
0149
0150 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
0151 {
0152 struct dma_buf *dmabuf;
0153 loff_t base;
0154
0155 if (!is_dma_buf_file(file))
0156 return -EBADF;
0157
0158 dmabuf = file->private_data;
0159
0160
0161
0162
0163 if (whence == SEEK_END)
0164 base = dmabuf->size;
0165 else if (whence == SEEK_SET)
0166 base = 0;
0167 else
0168 return -EINVAL;
0169
0170 if (offset != 0)
0171 return -EINVAL;
0172
0173 return base + offset;
0174 }
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
0202 {
0203 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
0204 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
0205 unsigned long flags;
0206
0207 spin_lock_irqsave(&dcb->poll->lock, flags);
0208 wake_up_locked_poll(dcb->poll, dcb->active);
0209 dcb->active = 0;
0210 spin_unlock_irqrestore(&dcb->poll->lock, flags);
0211 dma_fence_put(fence);
0212
0213 fput(dmabuf->file);
0214 }
0215
0216 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
0217 struct dma_buf_poll_cb_t *dcb)
0218 {
0219 struct dma_resv_iter cursor;
0220 struct dma_fence *fence;
0221 int r;
0222
0223 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
0224 fence) {
0225 dma_fence_get(fence);
0226 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
0227 if (!r)
0228 return true;
0229 dma_fence_put(fence);
0230 }
0231
0232 return false;
0233 }
0234
0235 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
0236 {
0237 struct dma_buf *dmabuf;
0238 struct dma_resv *resv;
0239 __poll_t events;
0240
0241 dmabuf = file->private_data;
0242 if (!dmabuf || !dmabuf->resv)
0243 return EPOLLERR;
0244
0245 resv = dmabuf->resv;
0246
0247 poll_wait(file, &dmabuf->poll, poll);
0248
0249 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
0250 if (!events)
0251 return 0;
0252
0253 dma_resv_lock(resv, NULL);
0254
0255 if (events & EPOLLOUT) {
0256 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
0257
0258
0259 spin_lock_irq(&dmabuf->poll.lock);
0260 if (dcb->active)
0261 events &= ~EPOLLOUT;
0262 else
0263 dcb->active = EPOLLOUT;
0264 spin_unlock_irq(&dmabuf->poll.lock);
0265
0266 if (events & EPOLLOUT) {
0267
0268 get_file(dmabuf->file);
0269
0270 if (!dma_buf_poll_add_cb(resv, true, dcb))
0271
0272 dma_buf_poll_cb(NULL, &dcb->cb);
0273 else
0274 events &= ~EPOLLOUT;
0275 }
0276 }
0277
0278 if (events & EPOLLIN) {
0279 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
0280
0281
0282 spin_lock_irq(&dmabuf->poll.lock);
0283 if (dcb->active)
0284 events &= ~EPOLLIN;
0285 else
0286 dcb->active = EPOLLIN;
0287 spin_unlock_irq(&dmabuf->poll.lock);
0288
0289 if (events & EPOLLIN) {
0290
0291 get_file(dmabuf->file);
0292
0293 if (!dma_buf_poll_add_cb(resv, false, dcb))
0294
0295 dma_buf_poll_cb(NULL, &dcb->cb);
0296 else
0297 events &= ~EPOLLIN;
0298 }
0299 }
0300
0301 dma_resv_unlock(resv);
0302 return events;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
0319 {
0320 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
0321
0322 if (IS_ERR(name))
0323 return PTR_ERR(name);
0324
0325 spin_lock(&dmabuf->name_lock);
0326 kfree(dmabuf->name);
0327 dmabuf->name = name;
0328 spin_unlock(&dmabuf->name_lock);
0329
0330 return 0;
0331 }
0332
0333 #if IS_ENABLED(CONFIG_SYNC_FILE)
0334 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
0335 void __user *user_data)
0336 {
0337 struct dma_buf_export_sync_file arg;
0338 enum dma_resv_usage usage;
0339 struct dma_fence *fence = NULL;
0340 struct sync_file *sync_file;
0341 int fd, ret;
0342
0343 if (copy_from_user(&arg, user_data, sizeof(arg)))
0344 return -EFAULT;
0345
0346 if (arg.flags & ~DMA_BUF_SYNC_RW)
0347 return -EINVAL;
0348
0349 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
0350 return -EINVAL;
0351
0352 fd = get_unused_fd_flags(O_CLOEXEC);
0353 if (fd < 0)
0354 return fd;
0355
0356 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
0357 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
0358 if (ret)
0359 goto err_put_fd;
0360
0361 if (!fence)
0362 fence = dma_fence_get_stub();
0363
0364 sync_file = sync_file_create(fence);
0365
0366 dma_fence_put(fence);
0367
0368 if (!sync_file) {
0369 ret = -ENOMEM;
0370 goto err_put_fd;
0371 }
0372
0373 arg.fd = fd;
0374 if (copy_to_user(user_data, &arg, sizeof(arg))) {
0375 ret = -EFAULT;
0376 goto err_put_file;
0377 }
0378
0379 fd_install(fd, sync_file->file);
0380
0381 return 0;
0382
0383 err_put_file:
0384 fput(sync_file->file);
0385 err_put_fd:
0386 put_unused_fd(fd);
0387 return ret;
0388 }
0389
0390 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
0391 const void __user *user_data)
0392 {
0393 struct dma_buf_import_sync_file arg;
0394 struct dma_fence *fence;
0395 enum dma_resv_usage usage;
0396 int ret = 0;
0397
0398 if (copy_from_user(&arg, user_data, sizeof(arg)))
0399 return -EFAULT;
0400
0401 if (arg.flags & ~DMA_BUF_SYNC_RW)
0402 return -EINVAL;
0403
0404 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
0405 return -EINVAL;
0406
0407 fence = sync_file_get_fence(arg.fd);
0408 if (!fence)
0409 return -EINVAL;
0410
0411 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
0412 DMA_RESV_USAGE_READ;
0413
0414 dma_resv_lock(dmabuf->resv, NULL);
0415
0416 ret = dma_resv_reserve_fences(dmabuf->resv, 1);
0417 if (!ret)
0418 dma_resv_add_fence(dmabuf->resv, fence, usage);
0419
0420 dma_resv_unlock(dmabuf->resv);
0421
0422 dma_fence_put(fence);
0423
0424 return ret;
0425 }
0426 #endif
0427
0428 static long dma_buf_ioctl(struct file *file,
0429 unsigned int cmd, unsigned long arg)
0430 {
0431 struct dma_buf *dmabuf;
0432 struct dma_buf_sync sync;
0433 enum dma_data_direction direction;
0434 int ret;
0435
0436 dmabuf = file->private_data;
0437
0438 switch (cmd) {
0439 case DMA_BUF_IOCTL_SYNC:
0440 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
0441 return -EFAULT;
0442
0443 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
0444 return -EINVAL;
0445
0446 switch (sync.flags & DMA_BUF_SYNC_RW) {
0447 case DMA_BUF_SYNC_READ:
0448 direction = DMA_FROM_DEVICE;
0449 break;
0450 case DMA_BUF_SYNC_WRITE:
0451 direction = DMA_TO_DEVICE;
0452 break;
0453 case DMA_BUF_SYNC_RW:
0454 direction = DMA_BIDIRECTIONAL;
0455 break;
0456 default:
0457 return -EINVAL;
0458 }
0459
0460 if (sync.flags & DMA_BUF_SYNC_END)
0461 ret = dma_buf_end_cpu_access(dmabuf, direction);
0462 else
0463 ret = dma_buf_begin_cpu_access(dmabuf, direction);
0464
0465 return ret;
0466
0467 case DMA_BUF_SET_NAME_A:
0468 case DMA_BUF_SET_NAME_B:
0469 return dma_buf_set_name(dmabuf, (const char __user *)arg);
0470
0471 #if IS_ENABLED(CONFIG_SYNC_FILE)
0472 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
0473 return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
0474 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
0475 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
0476 #endif
0477
0478 default:
0479 return -ENOTTY;
0480 }
0481 }
0482
0483 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
0484 {
0485 struct dma_buf *dmabuf = file->private_data;
0486
0487 seq_printf(m, "size:\t%zu\n", dmabuf->size);
0488
0489 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
0490 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
0491 spin_lock(&dmabuf->name_lock);
0492 if (dmabuf->name)
0493 seq_printf(m, "name:\t%s\n", dmabuf->name);
0494 spin_unlock(&dmabuf->name_lock);
0495 }
0496
0497 static const struct file_operations dma_buf_fops = {
0498 .release = dma_buf_file_release,
0499 .mmap = dma_buf_mmap_internal,
0500 .llseek = dma_buf_llseek,
0501 .poll = dma_buf_poll,
0502 .unlocked_ioctl = dma_buf_ioctl,
0503 .compat_ioctl = compat_ptr_ioctl,
0504 .show_fdinfo = dma_buf_show_fdinfo,
0505 };
0506
0507
0508
0509
0510 static inline int is_dma_buf_file(struct file *file)
0511 {
0512 return file->f_op == &dma_buf_fops;
0513 }
0514
0515 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
0516 {
0517 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
0518 struct file *file;
0519 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
0520
0521 if (IS_ERR(inode))
0522 return ERR_CAST(inode);
0523
0524 inode->i_size = dmabuf->size;
0525 inode_set_bytes(inode, dmabuf->size);
0526
0527
0528
0529
0530
0531
0532
0533 inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
0534 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
0535 flags, &dma_buf_fops);
0536 if (IS_ERR(file))
0537 goto err_alloc_file;
0538 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
0539 file->private_data = dmabuf;
0540 file->f_path.dentry->d_fsdata = dmabuf;
0541
0542 return file;
0543
0544 err_alloc_file:
0545 iput(inode);
0546 return file;
0547 }
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
0599 {
0600 struct dma_buf *dmabuf;
0601 struct dma_resv *resv = exp_info->resv;
0602 struct file *file;
0603 size_t alloc_size = sizeof(struct dma_buf);
0604 int ret;
0605
0606 if (!exp_info->resv)
0607 alloc_size += sizeof(struct dma_resv);
0608 else
0609
0610 alloc_size += 1;
0611
0612 if (WARN_ON(!exp_info->priv
0613 || !exp_info->ops
0614 || !exp_info->ops->map_dma_buf
0615 || !exp_info->ops->unmap_dma_buf
0616 || !exp_info->ops->release)) {
0617 return ERR_PTR(-EINVAL);
0618 }
0619
0620 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
0621 (exp_info->ops->pin || exp_info->ops->unpin)))
0622 return ERR_PTR(-EINVAL);
0623
0624 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
0625 return ERR_PTR(-EINVAL);
0626
0627 if (!try_module_get(exp_info->owner))
0628 return ERR_PTR(-ENOENT);
0629
0630 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
0631 if (!dmabuf) {
0632 ret = -ENOMEM;
0633 goto err_module;
0634 }
0635
0636 dmabuf->priv = exp_info->priv;
0637 dmabuf->ops = exp_info->ops;
0638 dmabuf->size = exp_info->size;
0639 dmabuf->exp_name = exp_info->exp_name;
0640 dmabuf->owner = exp_info->owner;
0641 spin_lock_init(&dmabuf->name_lock);
0642 init_waitqueue_head(&dmabuf->poll);
0643 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
0644 dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
0645
0646 if (!resv) {
0647 resv = (struct dma_resv *)&dmabuf[1];
0648 dma_resv_init(resv);
0649 }
0650 dmabuf->resv = resv;
0651
0652 file = dma_buf_getfile(dmabuf, exp_info->flags);
0653 if (IS_ERR(file)) {
0654 ret = PTR_ERR(file);
0655 goto err_dmabuf;
0656 }
0657
0658 dmabuf->file = file;
0659
0660 mutex_init(&dmabuf->lock);
0661 INIT_LIST_HEAD(&dmabuf->attachments);
0662
0663 mutex_lock(&db_list.lock);
0664 list_add(&dmabuf->list_node, &db_list.head);
0665 mutex_unlock(&db_list.lock);
0666
0667 ret = dma_buf_stats_setup(dmabuf);
0668 if (ret)
0669 goto err_sysfs;
0670
0671 return dmabuf;
0672
0673 err_sysfs:
0674
0675
0676
0677
0678
0679 file->f_path.dentry->d_fsdata = NULL;
0680 fput(file);
0681 err_dmabuf:
0682 kfree(dmabuf);
0683 err_module:
0684 module_put(exp_info->owner);
0685 return ERR_PTR(ret);
0686 }
0687 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
0688
0689
0690
0691
0692
0693
0694
0695
0696 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
0697 {
0698 int fd;
0699
0700 if (!dmabuf || !dmabuf->file)
0701 return -EINVAL;
0702
0703 fd = get_unused_fd_flags(flags);
0704 if (fd < 0)
0705 return fd;
0706
0707 fd_install(fd, dmabuf->file);
0708
0709 return fd;
0710 }
0711 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 struct dma_buf *dma_buf_get(int fd)
0722 {
0723 struct file *file;
0724
0725 file = fget(fd);
0726
0727 if (!file)
0728 return ERR_PTR(-EBADF);
0729
0730 if (!is_dma_buf_file(file)) {
0731 fput(file);
0732 return ERR_PTR(-EINVAL);
0733 }
0734
0735 return file->private_data;
0736 }
0737 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749 void dma_buf_put(struct dma_buf *dmabuf)
0750 {
0751 if (WARN_ON(!dmabuf || !dmabuf->file))
0752 return;
0753
0754 fput(dmabuf->file);
0755 }
0756 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
0757
0758 static void mangle_sg_table(struct sg_table *sg_table)
0759 {
0760 #ifdef CONFIG_DMABUF_DEBUG
0761 int i;
0762 struct scatterlist *sg;
0763
0764
0765
0766
0767
0768 for_each_sgtable_sg(sg_table, sg, i)
0769 sg->page_link ^= ~0xffUL;
0770 #endif
0771
0772 }
0773 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
0774 enum dma_data_direction direction)
0775 {
0776 struct sg_table *sg_table;
0777 signed long ret;
0778
0779 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
0780 if (IS_ERR_OR_NULL(sg_table))
0781 return sg_table;
0782
0783 if (!dma_buf_attachment_is_dynamic(attach)) {
0784 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
0785 DMA_RESV_USAGE_KERNEL, true,
0786 MAX_SCHEDULE_TIMEOUT);
0787 if (ret < 0) {
0788 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
0789 direction);
0790 return ERR_PTR(ret);
0791 }
0792 }
0793
0794 mangle_sg_table(sg_table);
0795 return sg_table;
0796 }
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820 struct dma_buf_attachment *
0821 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
0822 const struct dma_buf_attach_ops *importer_ops,
0823 void *importer_priv)
0824 {
0825 struct dma_buf_attachment *attach;
0826 int ret;
0827
0828 if (WARN_ON(!dmabuf || !dev))
0829 return ERR_PTR(-EINVAL);
0830
0831 if (WARN_ON(importer_ops && !importer_ops->move_notify))
0832 return ERR_PTR(-EINVAL);
0833
0834 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
0835 if (!attach)
0836 return ERR_PTR(-ENOMEM);
0837
0838 attach->dev = dev;
0839 attach->dmabuf = dmabuf;
0840 if (importer_ops)
0841 attach->peer2peer = importer_ops->allow_peer2peer;
0842 attach->importer_ops = importer_ops;
0843 attach->importer_priv = importer_priv;
0844
0845 if (dmabuf->ops->attach) {
0846 ret = dmabuf->ops->attach(dmabuf, attach);
0847 if (ret)
0848 goto err_attach;
0849 }
0850 dma_resv_lock(dmabuf->resv, NULL);
0851 list_add(&attach->node, &dmabuf->attachments);
0852 dma_resv_unlock(dmabuf->resv);
0853
0854
0855
0856
0857
0858 if (dma_buf_attachment_is_dynamic(attach) !=
0859 dma_buf_is_dynamic(dmabuf)) {
0860 struct sg_table *sgt;
0861
0862 if (dma_buf_is_dynamic(attach->dmabuf)) {
0863 dma_resv_lock(attach->dmabuf->resv, NULL);
0864 ret = dmabuf->ops->pin(attach);
0865 if (ret)
0866 goto err_unlock;
0867 }
0868
0869 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
0870 if (!sgt)
0871 sgt = ERR_PTR(-ENOMEM);
0872 if (IS_ERR(sgt)) {
0873 ret = PTR_ERR(sgt);
0874 goto err_unpin;
0875 }
0876 if (dma_buf_is_dynamic(attach->dmabuf))
0877 dma_resv_unlock(attach->dmabuf->resv);
0878 attach->sgt = sgt;
0879 attach->dir = DMA_BIDIRECTIONAL;
0880 }
0881
0882 return attach;
0883
0884 err_attach:
0885 kfree(attach);
0886 return ERR_PTR(ret);
0887
0888 err_unpin:
0889 if (dma_buf_is_dynamic(attach->dmabuf))
0890 dmabuf->ops->unpin(attach);
0891
0892 err_unlock:
0893 if (dma_buf_is_dynamic(attach->dmabuf))
0894 dma_resv_unlock(attach->dmabuf->resv);
0895
0896 dma_buf_detach(dmabuf, attach);
0897 return ERR_PTR(ret);
0898 }
0899 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
0910 struct device *dev)
0911 {
0912 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
0913 }
0914 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
0915
0916 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
0917 struct sg_table *sg_table,
0918 enum dma_data_direction direction)
0919 {
0920
0921 mangle_sg_table(sg_table);
0922
0923 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
0924 }
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
0936 {
0937 if (WARN_ON(!dmabuf || !attach))
0938 return;
0939
0940 if (attach->sgt) {
0941 if (dma_buf_is_dynamic(attach->dmabuf))
0942 dma_resv_lock(attach->dmabuf->resv, NULL);
0943
0944 __unmap_dma_buf(attach, attach->sgt, attach->dir);
0945
0946 if (dma_buf_is_dynamic(attach->dmabuf)) {
0947 dmabuf->ops->unpin(attach);
0948 dma_resv_unlock(attach->dmabuf->resv);
0949 }
0950 }
0951
0952 dma_resv_lock(dmabuf->resv, NULL);
0953 list_del(&attach->node);
0954 dma_resv_unlock(dmabuf->resv);
0955 if (dmabuf->ops->detach)
0956 dmabuf->ops->detach(dmabuf, attach);
0957
0958 kfree(attach);
0959 }
0960 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976 int dma_buf_pin(struct dma_buf_attachment *attach)
0977 {
0978 struct dma_buf *dmabuf = attach->dmabuf;
0979 int ret = 0;
0980
0981 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
0982
0983 dma_resv_assert_held(dmabuf->resv);
0984
0985 if (dmabuf->ops->pin)
0986 ret = dmabuf->ops->pin(attach);
0987
0988 return ret;
0989 }
0990 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000 void dma_buf_unpin(struct dma_buf_attachment *attach)
1001 {
1002 struct dma_buf *dmabuf = attach->dmabuf;
1003
1004 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1005
1006 dma_resv_assert_held(dmabuf->resv);
1007
1008 if (dmabuf->ops->unpin)
1009 dmabuf->ops->unpin(attach);
1010 }
1011 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1035 enum dma_data_direction direction)
1036 {
1037 struct sg_table *sg_table;
1038 int r;
1039
1040 might_sleep();
1041
1042 if (WARN_ON(!attach || !attach->dmabuf))
1043 return ERR_PTR(-EINVAL);
1044
1045 if (dma_buf_attachment_is_dynamic(attach))
1046 dma_resv_assert_held(attach->dmabuf->resv);
1047
1048 if (attach->sgt) {
1049
1050
1051
1052
1053 if (attach->dir != direction &&
1054 attach->dir != DMA_BIDIRECTIONAL)
1055 return ERR_PTR(-EBUSY);
1056
1057 return attach->sgt;
1058 }
1059
1060 if (dma_buf_is_dynamic(attach->dmabuf)) {
1061 dma_resv_assert_held(attach->dmabuf->resv);
1062 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1063 r = attach->dmabuf->ops->pin(attach);
1064 if (r)
1065 return ERR_PTR(r);
1066 }
1067 }
1068
1069 sg_table = __map_dma_buf(attach, direction);
1070 if (!sg_table)
1071 sg_table = ERR_PTR(-ENOMEM);
1072
1073 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1074 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1075 attach->dmabuf->ops->unpin(attach);
1076
1077 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1078 attach->sgt = sg_table;
1079 attach->dir = direction;
1080 }
1081
1082 #ifdef CONFIG_DMA_API_DEBUG
1083 if (!IS_ERR(sg_table)) {
1084 struct scatterlist *sg;
1085 u64 addr;
1086 int len;
1087 int i;
1088
1089 for_each_sgtable_dma_sg(sg_table, sg, i) {
1090 addr = sg_dma_address(sg);
1091 len = sg_dma_len(sg);
1092 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1093 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1094 __func__, addr, len);
1095 }
1096 }
1097 }
1098 #endif
1099 return sg_table;
1100 }
1101 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1114 struct sg_table *sg_table,
1115 enum dma_data_direction direction)
1116 {
1117 might_sleep();
1118
1119 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1120 return;
1121
1122 if (dma_buf_attachment_is_dynamic(attach))
1123 dma_resv_assert_held(attach->dmabuf->resv);
1124
1125 if (attach->sgt == sg_table)
1126 return;
1127
1128 if (dma_buf_is_dynamic(attach->dmabuf))
1129 dma_resv_assert_held(attach->dmabuf->resv);
1130
1131 __unmap_dma_buf(attach, sg_table, direction);
1132
1133 if (dma_buf_is_dynamic(attach->dmabuf) &&
1134 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1135 dma_buf_unpin(attach);
1136 }
1137 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 void dma_buf_move_notify(struct dma_buf *dmabuf)
1148 {
1149 struct dma_buf_attachment *attach;
1150
1151 dma_resv_assert_held(dmabuf->resv);
1152
1153 list_for_each_entry(attach, &dmabuf->attachments, node)
1154 if (attach->importer_ops)
1155 attach->importer_ops->move_notify(attach);
1156 }
1157 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1245 enum dma_data_direction direction)
1246 {
1247 bool write = (direction == DMA_BIDIRECTIONAL ||
1248 direction == DMA_TO_DEVICE);
1249 struct dma_resv *resv = dmabuf->resv;
1250 long ret;
1251
1252
1253 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1254 true, MAX_SCHEDULE_TIMEOUT);
1255 if (ret < 0)
1256 return ret;
1257
1258 return 0;
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1281 enum dma_data_direction direction)
1282 {
1283 int ret = 0;
1284
1285 if (WARN_ON(!dmabuf))
1286 return -EINVAL;
1287
1288 might_lock(&dmabuf->resv->lock.base);
1289
1290 if (dmabuf->ops->begin_cpu_access)
1291 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1292
1293
1294
1295
1296
1297 if (ret == 0)
1298 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1299
1300 return ret;
1301 }
1302 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1317 enum dma_data_direction direction)
1318 {
1319 int ret = 0;
1320
1321 WARN_ON(!dmabuf);
1322
1323 might_lock(&dmabuf->resv->lock.base);
1324
1325 if (dmabuf->ops->end_cpu_access)
1326 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1327
1328 return ret;
1329 }
1330 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1348 unsigned long pgoff)
1349 {
1350 if (WARN_ON(!dmabuf || !vma))
1351 return -EINVAL;
1352
1353
1354 if (!dmabuf->ops->mmap)
1355 return -EINVAL;
1356
1357
1358 if (pgoff + vma_pages(vma) < pgoff)
1359 return -EOVERFLOW;
1360
1361
1362 if (pgoff + vma_pages(vma) >
1363 dmabuf->size >> PAGE_SHIFT)
1364 return -EINVAL;
1365
1366
1367 vma_set_file(vma, dmabuf->file);
1368 vma->vm_pgoff = pgoff;
1369
1370 return dmabuf->ops->mmap(dmabuf, vma);
1371 }
1372 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1391 {
1392 struct iosys_map ptr;
1393 int ret = 0;
1394
1395 iosys_map_clear(map);
1396
1397 if (WARN_ON(!dmabuf))
1398 return -EINVAL;
1399
1400 if (!dmabuf->ops->vmap)
1401 return -EINVAL;
1402
1403 mutex_lock(&dmabuf->lock);
1404 if (dmabuf->vmapping_counter) {
1405 dmabuf->vmapping_counter++;
1406 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1407 *map = dmabuf->vmap_ptr;
1408 goto out_unlock;
1409 }
1410
1411 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1412
1413 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1414 if (WARN_ON_ONCE(ret))
1415 goto out_unlock;
1416
1417 dmabuf->vmap_ptr = ptr;
1418 dmabuf->vmapping_counter = 1;
1419
1420 *map = dmabuf->vmap_ptr;
1421
1422 out_unlock:
1423 mutex_unlock(&dmabuf->lock);
1424 return ret;
1425 }
1426 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1427
1428
1429
1430
1431
1432
1433 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1434 {
1435 if (WARN_ON(!dmabuf))
1436 return;
1437
1438 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1439 BUG_ON(dmabuf->vmapping_counter == 0);
1440 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1441
1442 mutex_lock(&dmabuf->lock);
1443 if (--dmabuf->vmapping_counter == 0) {
1444 if (dmabuf->ops->vunmap)
1445 dmabuf->ops->vunmap(dmabuf, map);
1446 iosys_map_clear(&dmabuf->vmap_ptr);
1447 }
1448 mutex_unlock(&dmabuf->lock);
1449 }
1450 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1451
1452 #ifdef CONFIG_DEBUG_FS
1453 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1454 {
1455 struct dma_buf *buf_obj;
1456 struct dma_buf_attachment *attach_obj;
1457 int count = 0, attach_count;
1458 size_t size = 0;
1459 int ret;
1460
1461 ret = mutex_lock_interruptible(&db_list.lock);
1462
1463 if (ret)
1464 return ret;
1465
1466 seq_puts(s, "\nDma-buf Objects:\n");
1467 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1468 "size", "flags", "mode", "count", "ino");
1469
1470 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1471
1472 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1473 if (ret)
1474 goto error_unlock;
1475
1476
1477 spin_lock(&buf_obj->name_lock);
1478 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1479 buf_obj->size,
1480 buf_obj->file->f_flags, buf_obj->file->f_mode,
1481 file_count(buf_obj->file),
1482 buf_obj->exp_name,
1483 file_inode(buf_obj->file)->i_ino,
1484 buf_obj->name ?: "<none>");
1485 spin_unlock(&buf_obj->name_lock);
1486
1487 dma_resv_describe(buf_obj->resv, s);
1488
1489 seq_puts(s, "\tAttached Devices:\n");
1490 attach_count = 0;
1491
1492 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1493 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1494 attach_count++;
1495 }
1496 dma_resv_unlock(buf_obj->resv);
1497
1498 seq_printf(s, "Total %d devices attached\n\n",
1499 attach_count);
1500
1501 count++;
1502 size += buf_obj->size;
1503 }
1504
1505 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1506
1507 mutex_unlock(&db_list.lock);
1508 return 0;
1509
1510 error_unlock:
1511 mutex_unlock(&db_list.lock);
1512 return ret;
1513 }
1514
1515 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1516
1517 static struct dentry *dma_buf_debugfs_dir;
1518
1519 static int dma_buf_init_debugfs(void)
1520 {
1521 struct dentry *d;
1522 int err = 0;
1523
1524 d = debugfs_create_dir("dma_buf", NULL);
1525 if (IS_ERR(d))
1526 return PTR_ERR(d);
1527
1528 dma_buf_debugfs_dir = d;
1529
1530 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1531 NULL, &dma_buf_debug_fops);
1532 if (IS_ERR(d)) {
1533 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1534 debugfs_remove_recursive(dma_buf_debugfs_dir);
1535 dma_buf_debugfs_dir = NULL;
1536 err = PTR_ERR(d);
1537 }
1538
1539 return err;
1540 }
1541
1542 static void dma_buf_uninit_debugfs(void)
1543 {
1544 debugfs_remove_recursive(dma_buf_debugfs_dir);
1545 }
1546 #else
1547 static inline int dma_buf_init_debugfs(void)
1548 {
1549 return 0;
1550 }
1551 static inline void dma_buf_uninit_debugfs(void)
1552 {
1553 }
1554 #endif
1555
1556 static int __init dma_buf_init(void)
1557 {
1558 int ret;
1559
1560 ret = dma_buf_init_sysfs_statistics();
1561 if (ret)
1562 return ret;
1563
1564 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1565 if (IS_ERR(dma_buf_mnt))
1566 return PTR_ERR(dma_buf_mnt);
1567
1568 mutex_init(&db_list.lock);
1569 INIT_LIST_HEAD(&db_list.head);
1570 dma_buf_init_debugfs();
1571 return 0;
1572 }
1573 subsys_initcall(dma_buf_init);
1574
1575 static void __exit dma_buf_deinit(void)
1576 {
1577 dma_buf_uninit_debugfs();
1578 kern_unmount(dma_buf_mnt);
1579 dma_buf_uninit_sysfs_statistics();
1580 }
1581 __exitcall(dma_buf_deinit);