0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <linux/export.h>
0030 #include <linux/dma-buf.h>
0031 #include <linux/rbtree.h>
0032 #include <linux/module.h>
0033
0034 #include <drm/drm.h>
0035 #include <drm/drm_drv.h>
0036 #include <drm/drm_file.h>
0037 #include <drm/drm_framebuffer.h>
0038 #include <drm/drm_gem.h>
0039 #include <drm/drm_prime.h>
0040
0041 #include "drm_internal.h"
0042
0043 MODULE_IMPORT_NS(DMA_BUF);
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 struct drm_prime_member {
0094 struct dma_buf *dma_buf;
0095 uint32_t handle;
0096
0097 struct rb_node dmabuf_rb;
0098 struct rb_node handle_rb;
0099 };
0100
0101 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
0102 struct dma_buf *dma_buf, uint32_t handle)
0103 {
0104 struct drm_prime_member *member;
0105 struct rb_node **p, *rb;
0106
0107 member = kmalloc(sizeof(*member), GFP_KERNEL);
0108 if (!member)
0109 return -ENOMEM;
0110
0111 get_dma_buf(dma_buf);
0112 member->dma_buf = dma_buf;
0113 member->handle = handle;
0114
0115 rb = NULL;
0116 p = &prime_fpriv->dmabufs.rb_node;
0117 while (*p) {
0118 struct drm_prime_member *pos;
0119
0120 rb = *p;
0121 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
0122 if (dma_buf > pos->dma_buf)
0123 p = &rb->rb_right;
0124 else
0125 p = &rb->rb_left;
0126 }
0127 rb_link_node(&member->dmabuf_rb, rb, p);
0128 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
0129
0130 rb = NULL;
0131 p = &prime_fpriv->handles.rb_node;
0132 while (*p) {
0133 struct drm_prime_member *pos;
0134
0135 rb = *p;
0136 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
0137 if (handle > pos->handle)
0138 p = &rb->rb_right;
0139 else
0140 p = &rb->rb_left;
0141 }
0142 rb_link_node(&member->handle_rb, rb, p);
0143 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
0144
0145 return 0;
0146 }
0147
0148 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
0149 uint32_t handle)
0150 {
0151 struct rb_node *rb;
0152
0153 rb = prime_fpriv->handles.rb_node;
0154 while (rb) {
0155 struct drm_prime_member *member;
0156
0157 member = rb_entry(rb, struct drm_prime_member, handle_rb);
0158 if (member->handle == handle)
0159 return member->dma_buf;
0160 else if (member->handle < handle)
0161 rb = rb->rb_right;
0162 else
0163 rb = rb->rb_left;
0164 }
0165
0166 return NULL;
0167 }
0168
0169 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
0170 struct dma_buf *dma_buf,
0171 uint32_t *handle)
0172 {
0173 struct rb_node *rb;
0174
0175 rb = prime_fpriv->dmabufs.rb_node;
0176 while (rb) {
0177 struct drm_prime_member *member;
0178
0179 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
0180 if (member->dma_buf == dma_buf) {
0181 *handle = member->handle;
0182 return 0;
0183 } else if (member->dma_buf < dma_buf) {
0184 rb = rb->rb_right;
0185 } else {
0186 rb = rb->rb_left;
0187 }
0188 }
0189
0190 return -ENOENT;
0191 }
0192
0193 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
0194 uint32_t handle)
0195 {
0196 struct rb_node *rb;
0197
0198 mutex_lock(&prime_fpriv->lock);
0199
0200 rb = prime_fpriv->handles.rb_node;
0201 while (rb) {
0202 struct drm_prime_member *member;
0203
0204 member = rb_entry(rb, struct drm_prime_member, handle_rb);
0205 if (member->handle == handle) {
0206 rb_erase(&member->handle_rb, &prime_fpriv->handles);
0207 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
0208
0209 dma_buf_put(member->dma_buf);
0210 kfree(member);
0211 break;
0212 } else if (member->handle < handle) {
0213 rb = rb->rb_right;
0214 } else {
0215 rb = rb->rb_left;
0216 }
0217 }
0218
0219 mutex_unlock(&prime_fpriv->lock);
0220 }
0221
0222 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
0223 {
0224 mutex_init(&prime_fpriv->lock);
0225 prime_fpriv->dmabufs = RB_ROOT;
0226 prime_fpriv->handles = RB_ROOT;
0227 }
0228
0229 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
0230 {
0231
0232 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
0233 }
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
0248 struct dma_buf_export_info *exp_info)
0249 {
0250 struct drm_gem_object *obj = exp_info->priv;
0251 struct dma_buf *dma_buf;
0252
0253 dma_buf = dma_buf_export(exp_info);
0254 if (IS_ERR(dma_buf))
0255 return dma_buf;
0256
0257 drm_dev_get(dev);
0258 drm_gem_object_get(obj);
0259 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
0260
0261 return dma_buf;
0262 }
0263 EXPORT_SYMBOL(drm_gem_dmabuf_export);
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
0275 {
0276 struct drm_gem_object *obj = dma_buf->priv;
0277 struct drm_device *dev = obj->dev;
0278
0279
0280 drm_gem_object_put(obj);
0281
0282 drm_dev_put(dev);
0283 }
0284 EXPORT_SYMBOL(drm_gem_dmabuf_release);
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
0301 struct drm_file *file_priv, int prime_fd,
0302 uint32_t *handle)
0303 {
0304 struct dma_buf *dma_buf;
0305 struct drm_gem_object *obj;
0306 int ret;
0307
0308 dma_buf = dma_buf_get(prime_fd);
0309 if (IS_ERR(dma_buf))
0310 return PTR_ERR(dma_buf);
0311
0312 mutex_lock(&file_priv->prime.lock);
0313
0314 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
0315 dma_buf, handle);
0316 if (ret == 0)
0317 goto out_put;
0318
0319
0320 mutex_lock(&dev->object_name_lock);
0321 if (dev->driver->gem_prime_import)
0322 obj = dev->driver->gem_prime_import(dev, dma_buf);
0323 else
0324 obj = drm_gem_prime_import(dev, dma_buf);
0325 if (IS_ERR(obj)) {
0326 ret = PTR_ERR(obj);
0327 goto out_unlock;
0328 }
0329
0330 if (obj->dma_buf) {
0331 WARN_ON(obj->dma_buf != dma_buf);
0332 } else {
0333 obj->dma_buf = dma_buf;
0334 get_dma_buf(dma_buf);
0335 }
0336
0337
0338 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
0339 drm_gem_object_put(obj);
0340 if (ret)
0341 goto out_put;
0342
0343 ret = drm_prime_add_buf_handle(&file_priv->prime,
0344 dma_buf, *handle);
0345 mutex_unlock(&file_priv->prime.lock);
0346 if (ret)
0347 goto fail;
0348
0349 dma_buf_put(dma_buf);
0350
0351 return 0;
0352
0353 fail:
0354
0355
0356
0357 drm_gem_handle_delete(file_priv, *handle);
0358 dma_buf_put(dma_buf);
0359 return ret;
0360
0361 out_unlock:
0362 mutex_unlock(&dev->object_name_lock);
0363 out_put:
0364 mutex_unlock(&file_priv->prime.lock);
0365 dma_buf_put(dma_buf);
0366 return ret;
0367 }
0368 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
0369
0370 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
0371 struct drm_file *file_priv)
0372 {
0373 struct drm_prime_handle *args = data;
0374
0375 if (!dev->driver->prime_fd_to_handle)
0376 return -ENOSYS;
0377
0378 return dev->driver->prime_fd_to_handle(dev, file_priv,
0379 args->fd, &args->handle);
0380 }
0381
0382 static struct dma_buf *export_and_register_object(struct drm_device *dev,
0383 struct drm_gem_object *obj,
0384 uint32_t flags)
0385 {
0386 struct dma_buf *dmabuf;
0387
0388
0389 if (obj->handle_count == 0) {
0390 dmabuf = ERR_PTR(-ENOENT);
0391 return dmabuf;
0392 }
0393
0394 if (obj->funcs && obj->funcs->export)
0395 dmabuf = obj->funcs->export(obj, flags);
0396 else
0397 dmabuf = drm_gem_prime_export(obj, flags);
0398 if (IS_ERR(dmabuf)) {
0399
0400
0401
0402 return dmabuf;
0403 }
0404
0405
0406
0407
0408
0409
0410 obj->dma_buf = dmabuf;
0411 get_dma_buf(obj->dma_buf);
0412
0413 return dmabuf;
0414 }
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
0430 struct drm_file *file_priv, uint32_t handle,
0431 uint32_t flags,
0432 int *prime_fd)
0433 {
0434 struct drm_gem_object *obj;
0435 int ret = 0;
0436 struct dma_buf *dmabuf;
0437
0438 mutex_lock(&file_priv->prime.lock);
0439 obj = drm_gem_object_lookup(file_priv, handle);
0440 if (!obj) {
0441 ret = -ENOENT;
0442 goto out_unlock;
0443 }
0444
0445 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
0446 if (dmabuf) {
0447 get_dma_buf(dmabuf);
0448 goto out_have_handle;
0449 }
0450
0451 mutex_lock(&dev->object_name_lock);
0452
0453 if (obj->import_attach) {
0454 dmabuf = obj->import_attach->dmabuf;
0455 get_dma_buf(dmabuf);
0456 goto out_have_obj;
0457 }
0458
0459 if (obj->dma_buf) {
0460 get_dma_buf(obj->dma_buf);
0461 dmabuf = obj->dma_buf;
0462 goto out_have_obj;
0463 }
0464
0465 dmabuf = export_and_register_object(dev, obj, flags);
0466 if (IS_ERR(dmabuf)) {
0467
0468
0469
0470 ret = PTR_ERR(dmabuf);
0471 mutex_unlock(&dev->object_name_lock);
0472 goto out;
0473 }
0474
0475 out_have_obj:
0476
0477
0478
0479
0480
0481
0482 ret = drm_prime_add_buf_handle(&file_priv->prime,
0483 dmabuf, handle);
0484 mutex_unlock(&dev->object_name_lock);
0485 if (ret)
0486 goto fail_put_dmabuf;
0487
0488 out_have_handle:
0489 ret = dma_buf_fd(dmabuf, flags);
0490
0491
0492
0493
0494
0495
0496 if (ret < 0) {
0497 goto fail_put_dmabuf;
0498 } else {
0499 *prime_fd = ret;
0500 ret = 0;
0501 }
0502
0503 goto out;
0504
0505 fail_put_dmabuf:
0506 dma_buf_put(dmabuf);
0507 out:
0508 drm_gem_object_put(obj);
0509 out_unlock:
0510 mutex_unlock(&file_priv->prime.lock);
0511
0512 return ret;
0513 }
0514 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
0515
0516 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
0517 struct drm_file *file_priv)
0518 {
0519 struct drm_prime_handle *args = data;
0520
0521 if (!dev->driver->prime_handle_to_fd)
0522 return -ENOSYS;
0523
0524
0525 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
0526 return -EINVAL;
0527
0528 return dev->driver->prime_handle_to_fd(dev, file_priv,
0529 args->handle, args->flags, &args->fd);
0530 }
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 int drm_gem_map_attach(struct dma_buf *dma_buf,
0582 struct dma_buf_attachment *attach)
0583 {
0584 struct drm_gem_object *obj = dma_buf->priv;
0585
0586 return drm_gem_pin(obj);
0587 }
0588 EXPORT_SYMBOL(drm_gem_map_attach);
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599 void drm_gem_map_detach(struct dma_buf *dma_buf,
0600 struct dma_buf_attachment *attach)
0601 {
0602 struct drm_gem_object *obj = dma_buf->priv;
0603
0604 drm_gem_unpin(obj);
0605 }
0606 EXPORT_SYMBOL(drm_gem_map_detach);
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
0621 enum dma_data_direction dir)
0622 {
0623 struct drm_gem_object *obj = attach->dmabuf->priv;
0624 struct sg_table *sgt;
0625 int ret;
0626
0627 if (WARN_ON(dir == DMA_NONE))
0628 return ERR_PTR(-EINVAL);
0629
0630 if (WARN_ON(!obj->funcs->get_sg_table))
0631 return ERR_PTR(-ENOSYS);
0632
0633 sgt = obj->funcs->get_sg_table(obj);
0634 if (IS_ERR(sgt))
0635 return sgt;
0636
0637 ret = dma_map_sgtable(attach->dev, sgt, dir,
0638 DMA_ATTR_SKIP_CPU_SYNC);
0639 if (ret) {
0640 sg_free_table(sgt);
0641 kfree(sgt);
0642 sgt = ERR_PTR(ret);
0643 }
0644
0645 return sgt;
0646 }
0647 EXPORT_SYMBOL(drm_gem_map_dma_buf);
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
0658 struct sg_table *sgt,
0659 enum dma_data_direction dir)
0660 {
0661 if (!sgt)
0662 return;
0663
0664 dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
0665 sg_free_table(sgt);
0666 kfree(sgt);
0667 }
0668 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681 int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
0682 {
0683 struct drm_gem_object *obj = dma_buf->priv;
0684
0685 return drm_gem_vmap(obj, map);
0686 }
0687 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
0698 {
0699 struct drm_gem_object *obj = dma_buf->priv;
0700
0701 drm_gem_vunmap(obj, map);
0702 }
0703 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
0718 {
0719 struct drm_file *priv;
0720 struct file *fil;
0721 int ret;
0722
0723
0724 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
0725
0726 if (obj->funcs && obj->funcs->mmap) {
0727 vma->vm_ops = obj->funcs->vm_ops;
0728
0729 drm_gem_object_get(obj);
0730 ret = obj->funcs->mmap(obj, vma);
0731 if (ret) {
0732 drm_gem_object_put(obj);
0733 return ret;
0734 }
0735 vma->vm_private_data = obj;
0736 return 0;
0737 }
0738
0739 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0740 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
0741 if (!priv || !fil) {
0742 ret = -ENOMEM;
0743 goto out;
0744 }
0745
0746
0747 priv->minor = obj->dev->primary;
0748 fil->private_data = priv;
0749
0750 ret = drm_vma_node_allow(&obj->vma_node, priv);
0751 if (ret)
0752 goto out;
0753
0754 ret = obj->dev->driver->fops->mmap(fil, vma);
0755
0756 drm_vma_node_revoke(&obj->vma_node, priv);
0757 out:
0758 kfree(priv);
0759 kfree(fil);
0760
0761 return ret;
0762 }
0763 EXPORT_SYMBOL(drm_gem_prime_mmap);
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
0780 {
0781 struct drm_gem_object *obj = dma_buf->priv;
0782 struct drm_device *dev = obj->dev;
0783
0784 if (!dev->driver->gem_prime_mmap)
0785 return -ENOSYS;
0786
0787 return dev->driver->gem_prime_mmap(obj, vma);
0788 }
0789 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
0790
0791 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
0792 .cache_sgt_mapping = true,
0793 .attach = drm_gem_map_attach,
0794 .detach = drm_gem_map_detach,
0795 .map_dma_buf = drm_gem_map_dma_buf,
0796 .unmap_dma_buf = drm_gem_unmap_dma_buf,
0797 .release = drm_gem_dmabuf_release,
0798 .mmap = drm_gem_dmabuf_mmap,
0799 .vmap = drm_gem_dmabuf_vmap,
0800 .vunmap = drm_gem_dmabuf_vunmap,
0801 };
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
0816 struct page **pages, unsigned int nr_pages)
0817 {
0818 struct sg_table *sg;
0819 size_t max_segment = 0;
0820 int err;
0821
0822 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
0823 if (!sg)
0824 return ERR_PTR(-ENOMEM);
0825
0826 if (dev)
0827 max_segment = dma_max_mapping_size(dev->dev);
0828 if (max_segment == 0)
0829 max_segment = UINT_MAX;
0830 err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
0831 nr_pages << PAGE_SHIFT,
0832 max_segment, GFP_KERNEL);
0833 if (err) {
0834 kfree(sg);
0835 sg = ERR_PTR(err);
0836 }
0837 return sg;
0838 }
0839 EXPORT_SYMBOL(drm_prime_pages_to_sg);
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851 unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
0852 {
0853 dma_addr_t expected = sg_dma_address(sgt->sgl);
0854 struct scatterlist *sg;
0855 unsigned long size = 0;
0856 int i;
0857
0858 for_each_sgtable_dma_sg(sgt, sg, i) {
0859 unsigned int len = sg_dma_len(sg);
0860
0861 if (!len)
0862 break;
0863 if (sg_dma_address(sg) != expected)
0864 break;
0865 expected += len;
0866 size += len;
0867 }
0868 return size;
0869 }
0870 EXPORT_SYMBOL(drm_prime_get_contiguous_size);
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
0882 int flags)
0883 {
0884 struct drm_device *dev = obj->dev;
0885 struct dma_buf_export_info exp_info = {
0886 .exp_name = KBUILD_MODNAME,
0887 .owner = dev->driver->fops->owner,
0888 .ops = &drm_gem_prime_dmabuf_ops,
0889 .size = obj->size,
0890 .flags = flags,
0891 .priv = obj,
0892 .resv = obj->resv,
0893 };
0894
0895 return drm_gem_dmabuf_export(dev, &exp_info);
0896 }
0897 EXPORT_SYMBOL(drm_gem_prime_export);
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
0914 struct dma_buf *dma_buf,
0915 struct device *attach_dev)
0916 {
0917 struct dma_buf_attachment *attach;
0918 struct sg_table *sgt;
0919 struct drm_gem_object *obj;
0920 int ret;
0921
0922 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
0923 obj = dma_buf->priv;
0924 if (obj->dev == dev) {
0925
0926
0927
0928
0929 drm_gem_object_get(obj);
0930 return obj;
0931 }
0932 }
0933
0934 if (!dev->driver->gem_prime_import_sg_table)
0935 return ERR_PTR(-EINVAL);
0936
0937 attach = dma_buf_attach(dma_buf, attach_dev);
0938 if (IS_ERR(attach))
0939 return ERR_CAST(attach);
0940
0941 get_dma_buf(dma_buf);
0942
0943 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
0944 if (IS_ERR(sgt)) {
0945 ret = PTR_ERR(sgt);
0946 goto fail_detach;
0947 }
0948
0949 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
0950 if (IS_ERR(obj)) {
0951 ret = PTR_ERR(obj);
0952 goto fail_unmap;
0953 }
0954
0955 obj->import_attach = attach;
0956 obj->resv = dma_buf->resv;
0957
0958 return obj;
0959
0960 fail_unmap:
0961 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
0962 fail_detach:
0963 dma_buf_detach(dma_buf, attach);
0964 dma_buf_put(dma_buf);
0965
0966 return ERR_PTR(ret);
0967 }
0968 EXPORT_SYMBOL(drm_gem_prime_import_dev);
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
0984 struct dma_buf *dma_buf)
0985 {
0986 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
0987 }
0988 EXPORT_SYMBOL(drm_gem_prime_import);
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002 int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
1003 struct page **pages,
1004 int max_entries)
1005 {
1006 struct sg_page_iter page_iter;
1007 struct page **p = pages;
1008
1009 for_each_sgtable_page(sgt, &page_iter, 0) {
1010 if (WARN_ON(p - pages >= max_entries))
1011 return -1;
1012 *p++ = sg_page_iter_page(&page_iter);
1013 }
1014 return 0;
1015 }
1016 EXPORT_SYMBOL(drm_prime_sg_to_page_array);
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
1030 int max_entries)
1031 {
1032 struct sg_dma_page_iter dma_iter;
1033 dma_addr_t *a = addrs;
1034
1035 for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1036 if (WARN_ON(a - addrs >= max_entries))
1037 return -1;
1038 *a++ = sg_page_iter_dma_address(&dma_iter);
1039 }
1040 return 0;
1041 }
1042 EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1053 {
1054 struct dma_buf_attachment *attach;
1055 struct dma_buf *dma_buf;
1056
1057 attach = obj->import_attach;
1058 if (sg)
1059 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1060 dma_buf = attach->dmabuf;
1061 dma_buf_detach(attach->dmabuf, attach);
1062
1063 dma_buf_put(dma_buf);
1064 }
1065 EXPORT_SYMBOL(drm_prime_gem_destroy);