0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/dma-buf.h>
0029 #include <linux/file.h>
0030 #include <linux/fs.h>
0031 #include <linux/iosys-map.h>
0032 #include <linux/mem_encrypt.h>
0033 #include <linux/mm.h>
0034 #include <linux/mman.h>
0035 #include <linux/module.h>
0036 #include <linux/pagemap.h>
0037 #include <linux/pagevec.h>
0038 #include <linux/shmem_fs.h>
0039 #include <linux/slab.h>
0040 #include <linux/string_helpers.h>
0041 #include <linux/types.h>
0042 #include <linux/uaccess.h>
0043
0044 #include <drm/drm.h>
0045 #include <drm/drm_device.h>
0046 #include <drm/drm_drv.h>
0047 #include <drm/drm_file.h>
0048 #include <drm/drm_gem.h>
0049 #include <drm/drm_managed.h>
0050 #include <drm/drm_print.h>
0051 #include <drm/drm_vma_manager.h>
0052
0053 #include "drm_internal.h"
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 static void
0084 drm_gem_init_release(struct drm_device *dev, void *ptr)
0085 {
0086 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
0087 }
0088
0089
0090
0091
0092
0093 int
0094 drm_gem_init(struct drm_device *dev)
0095 {
0096 struct drm_vma_offset_manager *vma_offset_manager;
0097
0098 mutex_init(&dev->object_name_lock);
0099 idr_init_base(&dev->object_name_idr, 1);
0100
0101 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
0102 GFP_KERNEL);
0103 if (!vma_offset_manager) {
0104 DRM_ERROR("out of memory\n");
0105 return -ENOMEM;
0106 }
0107
0108 dev->vma_offset_manager = vma_offset_manager;
0109 drm_vma_offset_manager_init(vma_offset_manager,
0110 DRM_FILE_PAGE_OFFSET_START,
0111 DRM_FILE_PAGE_OFFSET_SIZE);
0112
0113 return drmm_add_action(dev, drm_gem_init_release, NULL);
0114 }
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 int drm_gem_object_init(struct drm_device *dev,
0126 struct drm_gem_object *obj, size_t size)
0127 {
0128 struct file *filp;
0129
0130 drm_gem_private_object_init(dev, obj, size);
0131
0132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
0133 if (IS_ERR(filp))
0134 return PTR_ERR(filp);
0135
0136 obj->filp = filp;
0137
0138 return 0;
0139 }
0140 EXPORT_SYMBOL(drm_gem_object_init);
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 void drm_gem_private_object_init(struct drm_device *dev,
0153 struct drm_gem_object *obj, size_t size)
0154 {
0155 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
0156
0157 obj->dev = dev;
0158 obj->filp = NULL;
0159
0160 kref_init(&obj->refcount);
0161 obj->handle_count = 0;
0162 obj->size = size;
0163 dma_resv_init(&obj->_resv);
0164 if (!obj->resv)
0165 obj->resv = &obj->_resv;
0166
0167 drm_vma_node_reset(&obj->vma_node);
0168 }
0169 EXPORT_SYMBOL(drm_gem_private_object_init);
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
0182 {
0183 struct drm_device *dev = obj->dev;
0184
0185
0186 if (obj->name) {
0187 idr_remove(&dev->object_name_idr, obj->name);
0188 obj->name = 0;
0189 }
0190 }
0191
0192 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
0193 {
0194
0195 if (obj->dma_buf) {
0196 dma_buf_put(obj->dma_buf);
0197 obj->dma_buf = NULL;
0198 }
0199 }
0200
0201 static void
0202 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
0203 {
0204 struct drm_device *dev = obj->dev;
0205 bool final = false;
0206
0207 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
0208 return;
0209
0210
0211
0212
0213
0214
0215
0216 mutex_lock(&dev->object_name_lock);
0217 if (--obj->handle_count == 0) {
0218 drm_gem_object_handle_free(obj);
0219 drm_gem_object_exported_dma_buf_free(obj);
0220 final = true;
0221 }
0222 mutex_unlock(&dev->object_name_lock);
0223
0224 if (final)
0225 drm_gem_object_put(obj);
0226 }
0227
0228
0229
0230
0231
0232 static int
0233 drm_gem_object_release_handle(int id, void *ptr, void *data)
0234 {
0235 struct drm_file *file_priv = data;
0236 struct drm_gem_object *obj = ptr;
0237
0238 if (obj->funcs->close)
0239 obj->funcs->close(obj, file_priv);
0240
0241 drm_prime_remove_buf_handle(&file_priv->prime, id);
0242 drm_vma_node_revoke(&obj->vma_node, file_priv);
0243
0244 drm_gem_object_handle_put_unlocked(obj);
0245
0246 return 0;
0247 }
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258 int
0259 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
0260 {
0261 struct drm_gem_object *obj;
0262
0263 spin_lock(&filp->table_lock);
0264
0265
0266 obj = idr_replace(&filp->object_idr, NULL, handle);
0267 spin_unlock(&filp->table_lock);
0268 if (IS_ERR_OR_NULL(obj))
0269 return -EINVAL;
0270
0271
0272 drm_gem_object_release_handle(handle, obj, filp);
0273
0274
0275 spin_lock(&filp->table_lock);
0276 idr_remove(&filp->object_idr, handle);
0277 spin_unlock(&filp->table_lock);
0278
0279 return 0;
0280 }
0281 EXPORT_SYMBOL(drm_gem_handle_delete);
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
0297 u32 handle, u64 *offset)
0298 {
0299 struct drm_gem_object *obj;
0300 int ret;
0301
0302 obj = drm_gem_object_lookup(file, handle);
0303 if (!obj)
0304 return -ENOENT;
0305
0306
0307 if (obj->import_attach) {
0308 ret = -EINVAL;
0309 goto out;
0310 }
0311
0312 ret = drm_gem_create_mmap_offset(obj);
0313 if (ret)
0314 goto out;
0315
0316 *offset = drm_vma_node_offset_addr(&obj->vma_node);
0317 out:
0318 drm_gem_object_put(obj);
0319
0320 return ret;
0321 }
0322 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
0323
0324 int drm_gem_dumb_destroy(struct drm_file *file,
0325 struct drm_device *dev,
0326 u32 handle)
0327 {
0328 return drm_gem_handle_delete(file, handle);
0329 }
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 int
0346 drm_gem_handle_create_tail(struct drm_file *file_priv,
0347 struct drm_gem_object *obj,
0348 u32 *handlep)
0349 {
0350 struct drm_device *dev = obj->dev;
0351 u32 handle;
0352 int ret;
0353
0354 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
0355 if (obj->handle_count++ == 0)
0356 drm_gem_object_get(obj);
0357
0358
0359
0360
0361
0362 idr_preload(GFP_KERNEL);
0363 spin_lock(&file_priv->table_lock);
0364
0365 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
0366
0367 spin_unlock(&file_priv->table_lock);
0368 idr_preload_end();
0369
0370 mutex_unlock(&dev->object_name_lock);
0371 if (ret < 0)
0372 goto err_unref;
0373
0374 handle = ret;
0375
0376 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
0377 if (ret)
0378 goto err_remove;
0379
0380 if (obj->funcs->open) {
0381 ret = obj->funcs->open(obj, file_priv);
0382 if (ret)
0383 goto err_revoke;
0384 }
0385
0386 *handlep = handle;
0387 return 0;
0388
0389 err_revoke:
0390 drm_vma_node_revoke(&obj->vma_node, file_priv);
0391 err_remove:
0392 spin_lock(&file_priv->table_lock);
0393 idr_remove(&file_priv->object_idr, handle);
0394 spin_unlock(&file_priv->table_lock);
0395 err_unref:
0396 drm_gem_object_handle_put_unlocked(obj);
0397 return ret;
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 int drm_gem_handle_create(struct drm_file *file_priv,
0414 struct drm_gem_object *obj,
0415 u32 *handlep)
0416 {
0417 mutex_lock(&obj->dev->object_name_lock);
0418
0419 return drm_gem_handle_create_tail(file_priv, obj, handlep);
0420 }
0421 EXPORT_SYMBOL(drm_gem_handle_create);
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434 void
0435 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
0436 {
0437 struct drm_device *dev = obj->dev;
0438
0439 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
0440 }
0441 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460 int
0461 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
0462 {
0463 struct drm_device *dev = obj->dev;
0464
0465 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
0466 size / PAGE_SIZE);
0467 }
0468 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
0485 {
0486 return drm_gem_create_mmap_offset_size(obj, obj->size);
0487 }
0488 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
0489
0490
0491
0492
0493
0494 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
0495 {
0496 check_move_unevictable_pages(pvec);
0497 __pagevec_release(pvec);
0498 cond_resched();
0499 }
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
0527 {
0528 struct address_space *mapping;
0529 struct page *p, **pages;
0530 struct pagevec pvec;
0531 int i, npages;
0532
0533
0534 if (WARN_ON(!obj->filp))
0535 return ERR_PTR(-EINVAL);
0536
0537
0538 mapping = obj->filp->f_mapping;
0539
0540
0541
0542
0543
0544 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
0545
0546 npages = obj->size >> PAGE_SHIFT;
0547
0548 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
0549 if (pages == NULL)
0550 return ERR_PTR(-ENOMEM);
0551
0552 mapping_set_unevictable(mapping);
0553
0554 for (i = 0; i < npages; i++) {
0555 p = shmem_read_mapping_page(mapping, i);
0556 if (IS_ERR(p))
0557 goto fail;
0558 pages[i] = p;
0559
0560
0561
0562
0563
0564
0565 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
0566 (page_to_pfn(p) >= 0x00100000UL));
0567 }
0568
0569 return pages;
0570
0571 fail:
0572 mapping_clear_unevictable(mapping);
0573 pagevec_init(&pvec);
0574 while (i--) {
0575 if (!pagevec_add(&pvec, pages[i]))
0576 drm_gem_check_release_pagevec(&pvec);
0577 }
0578 if (pagevec_count(&pvec))
0579 drm_gem_check_release_pagevec(&pvec);
0580
0581 kvfree(pages);
0582 return ERR_CAST(p);
0583 }
0584 EXPORT_SYMBOL(drm_gem_get_pages);
0585
0586
0587
0588
0589
0590
0591
0592
0593 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
0594 bool dirty, bool accessed)
0595 {
0596 int i, npages;
0597 struct address_space *mapping;
0598 struct pagevec pvec;
0599
0600 mapping = file_inode(obj->filp)->i_mapping;
0601 mapping_clear_unevictable(mapping);
0602
0603
0604
0605
0606
0607 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
0608
0609 npages = obj->size >> PAGE_SHIFT;
0610
0611 pagevec_init(&pvec);
0612 for (i = 0; i < npages; i++) {
0613 if (!pages[i])
0614 continue;
0615
0616 if (dirty)
0617 set_page_dirty(pages[i]);
0618
0619 if (accessed)
0620 mark_page_accessed(pages[i]);
0621
0622
0623 if (!pagevec_add(&pvec, pages[i]))
0624 drm_gem_check_release_pagevec(&pvec);
0625 }
0626 if (pagevec_count(&pvec))
0627 drm_gem_check_release_pagevec(&pvec);
0628
0629 kvfree(pages);
0630 }
0631 EXPORT_SYMBOL(drm_gem_put_pages);
0632
0633 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
0634 struct drm_gem_object **objs)
0635 {
0636 int i, ret = 0;
0637 struct drm_gem_object *obj;
0638
0639 spin_lock(&filp->table_lock);
0640
0641 for (i = 0; i < count; i++) {
0642
0643 obj = idr_find(&filp->object_idr, handle[i]);
0644 if (!obj) {
0645 ret = -ENOENT;
0646 break;
0647 }
0648 drm_gem_object_get(obj);
0649 objs[i] = obj;
0650 }
0651 spin_unlock(&filp->table_lock);
0652
0653 return ret;
0654 }
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
0676 int count, struct drm_gem_object ***objs_out)
0677 {
0678 int ret;
0679 u32 *handles;
0680 struct drm_gem_object **objs;
0681
0682 if (!count)
0683 return 0;
0684
0685 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
0686 GFP_KERNEL | __GFP_ZERO);
0687 if (!objs)
0688 return -ENOMEM;
0689
0690 *objs_out = objs;
0691
0692 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
0693 if (!handles) {
0694 ret = -ENOMEM;
0695 goto out;
0696 }
0697
0698 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
0699 ret = -EFAULT;
0700 DRM_DEBUG("Failed to copy in GEM handles\n");
0701 goto out;
0702 }
0703
0704 ret = objects_lookup(filp, handles, count, objs);
0705 out:
0706 kvfree(handles);
0707 return ret;
0708
0709 }
0710 EXPORT_SYMBOL(drm_gem_objects_lookup);
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724 struct drm_gem_object *
0725 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
0726 {
0727 struct drm_gem_object *obj = NULL;
0728
0729 objects_lookup(filp, &handle, 1, &obj);
0730 return obj;
0731 }
0732 EXPORT_SYMBOL(drm_gem_object_lookup);
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
0748 bool wait_all, unsigned long timeout)
0749 {
0750 long ret;
0751 struct drm_gem_object *obj;
0752
0753 obj = drm_gem_object_lookup(filep, handle);
0754 if (!obj) {
0755 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
0756 return -EINVAL;
0757 }
0758
0759 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
0760 true, timeout);
0761 if (ret == 0)
0762 ret = -ETIME;
0763 else if (ret > 0)
0764 ret = 0;
0765
0766 drm_gem_object_put(obj);
0767
0768 return ret;
0769 }
0770 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780 int
0781 drm_gem_close_ioctl(struct drm_device *dev, void *data,
0782 struct drm_file *file_priv)
0783 {
0784 struct drm_gem_close *args = data;
0785 int ret;
0786
0787 if (!drm_core_check_feature(dev, DRIVER_GEM))
0788 return -EOPNOTSUPP;
0789
0790 ret = drm_gem_handle_delete(file_priv, args->handle);
0791
0792 return ret;
0793 }
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 int
0807 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
0808 struct drm_file *file_priv)
0809 {
0810 struct drm_gem_flink *args = data;
0811 struct drm_gem_object *obj;
0812 int ret;
0813
0814 if (!drm_core_check_feature(dev, DRIVER_GEM))
0815 return -EOPNOTSUPP;
0816
0817 obj = drm_gem_object_lookup(file_priv, args->handle);
0818 if (obj == NULL)
0819 return -ENOENT;
0820
0821 mutex_lock(&dev->object_name_lock);
0822
0823 if (obj->handle_count == 0) {
0824 ret = -ENOENT;
0825 goto err;
0826 }
0827
0828 if (!obj->name) {
0829 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
0830 if (ret < 0)
0831 goto err;
0832
0833 obj->name = ret;
0834 }
0835
0836 args->name = (uint64_t) obj->name;
0837 ret = 0;
0838
0839 err:
0840 mutex_unlock(&dev->object_name_lock);
0841 drm_gem_object_put(obj);
0842 return ret;
0843 }
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856 int
0857 drm_gem_open_ioctl(struct drm_device *dev, void *data,
0858 struct drm_file *file_priv)
0859 {
0860 struct drm_gem_open *args = data;
0861 struct drm_gem_object *obj;
0862 int ret;
0863 u32 handle;
0864
0865 if (!drm_core_check_feature(dev, DRIVER_GEM))
0866 return -EOPNOTSUPP;
0867
0868 mutex_lock(&dev->object_name_lock);
0869 obj = idr_find(&dev->object_name_idr, (int) args->name);
0870 if (obj) {
0871 drm_gem_object_get(obj);
0872 } else {
0873 mutex_unlock(&dev->object_name_lock);
0874 return -ENOENT;
0875 }
0876
0877
0878 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
0879 if (ret)
0880 goto err;
0881
0882 args->handle = handle;
0883 args->size = obj->size;
0884
0885 err:
0886 drm_gem_object_put(obj);
0887 return ret;
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898 void
0899 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
0900 {
0901 idr_init_base(&file_private->object_idr, 1);
0902 spin_lock_init(&file_private->table_lock);
0903 }
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914 void
0915 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
0916 {
0917 idr_for_each(&file_private->object_idr,
0918 &drm_gem_object_release_handle, file_private);
0919 idr_destroy(&file_private->object_idr);
0920 }
0921
0922
0923
0924
0925
0926
0927
0928
0929 void
0930 drm_gem_object_release(struct drm_gem_object *obj)
0931 {
0932 WARN_ON(obj->dma_buf);
0933
0934 if (obj->filp)
0935 fput(obj->filp);
0936
0937 dma_resv_fini(&obj->_resv);
0938 drm_gem_free_mmap_offset(obj);
0939 }
0940 EXPORT_SYMBOL(drm_gem_object_release);
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950 void
0951 drm_gem_object_free(struct kref *kref)
0952 {
0953 struct drm_gem_object *obj =
0954 container_of(kref, struct drm_gem_object, refcount);
0955
0956 if (WARN_ON(!obj->funcs->free))
0957 return;
0958
0959 obj->funcs->free(obj);
0960 }
0961 EXPORT_SYMBOL(drm_gem_object_free);
0962
0963
0964
0965
0966
0967
0968
0969
0970 void drm_gem_vm_open(struct vm_area_struct *vma)
0971 {
0972 struct drm_gem_object *obj = vma->vm_private_data;
0973
0974 drm_gem_object_get(obj);
0975 }
0976 EXPORT_SYMBOL(drm_gem_vm_open);
0977
0978
0979
0980
0981
0982
0983
0984
0985 void drm_gem_vm_close(struct vm_area_struct *vma)
0986 {
0987 struct drm_gem_object *obj = vma->vm_private_data;
0988
0989 drm_gem_object_put(obj);
0990 }
0991 EXPORT_SYMBOL(drm_gem_vm_close);
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1018 struct vm_area_struct *vma)
1019 {
1020 int ret;
1021
1022
1023 if (obj_size < vma->vm_end - vma->vm_start)
1024 return -EINVAL;
1025
1026
1027
1028
1029
1030
1031
1032 drm_gem_object_get(obj);
1033
1034 vma->vm_private_data = obj;
1035 vma->vm_ops = obj->funcs->vm_ops;
1036
1037 if (obj->funcs->mmap) {
1038 ret = obj->funcs->mmap(obj, vma);
1039 if (ret)
1040 goto err_drm_gem_object_put;
1041 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1042 } else {
1043 if (!vma->vm_ops) {
1044 ret = -EINVAL;
1045 goto err_drm_gem_object_put;
1046 }
1047
1048 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1049 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1050 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1051 }
1052
1053 return 0;
1054
1055 err_drm_gem_object_put:
1056 drm_gem_object_put(obj);
1057 return ret;
1058 }
1059 EXPORT_SYMBOL(drm_gem_mmap_obj);
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1077 {
1078 struct drm_file *priv = filp->private_data;
1079 struct drm_device *dev = priv->minor->dev;
1080 struct drm_gem_object *obj = NULL;
1081 struct drm_vma_offset_node *node;
1082 int ret;
1083
1084 if (drm_dev_is_unplugged(dev))
1085 return -ENODEV;
1086
1087 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1088 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1089 vma->vm_pgoff,
1090 vma_pages(vma));
1091 if (likely(node)) {
1092 obj = container_of(node, struct drm_gem_object, vma_node);
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 if (!kref_get_unless_zero(&obj->refcount))
1104 obj = NULL;
1105 }
1106 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1107
1108 if (!obj)
1109 return -EINVAL;
1110
1111 if (!drm_vma_node_is_allowed(node, priv)) {
1112 drm_gem_object_put(obj);
1113 return -EACCES;
1114 }
1115
1116 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1117 vma);
1118
1119 drm_gem_object_put(obj);
1120
1121 return ret;
1122 }
1123 EXPORT_SYMBOL(drm_gem_mmap);
1124
1125 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1126 const struct drm_gem_object *obj)
1127 {
1128 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1129 drm_printf_indent(p, indent, "refcount=%u\n",
1130 kref_read(&obj->refcount));
1131 drm_printf_indent(p, indent, "start=%08lx\n",
1132 drm_vma_node_start(&obj->vma_node));
1133 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1134 drm_printf_indent(p, indent, "imported=%s\n",
1135 str_yes_no(obj->import_attach));
1136
1137 if (obj->funcs->print_info)
1138 obj->funcs->print_info(p, indent, obj);
1139 }
1140
1141 int drm_gem_pin(struct drm_gem_object *obj)
1142 {
1143 if (obj->funcs->pin)
1144 return obj->funcs->pin(obj);
1145 else
1146 return 0;
1147 }
1148
1149 void drm_gem_unpin(struct drm_gem_object *obj)
1150 {
1151 if (obj->funcs->unpin)
1152 obj->funcs->unpin(obj);
1153 }
1154
1155 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1156 {
1157 int ret;
1158
1159 if (!obj->funcs->vmap)
1160 return -EOPNOTSUPP;
1161
1162 ret = obj->funcs->vmap(obj, map);
1163 if (ret)
1164 return ret;
1165 else if (iosys_map_is_null(map))
1166 return -ENOMEM;
1167
1168 return 0;
1169 }
1170 EXPORT_SYMBOL(drm_gem_vmap);
1171
1172 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1173 {
1174 if (iosys_map_is_null(map))
1175 return;
1176
1177 if (obj->funcs->vunmap)
1178 obj->funcs->vunmap(obj, map);
1179
1180
1181 iosys_map_clear(map);
1182 }
1183 EXPORT_SYMBOL(drm_gem_vunmap);
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 int
1199 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1200 struct ww_acquire_ctx *acquire_ctx)
1201 {
1202 int contended = -1;
1203 int i, ret;
1204
1205 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1206
1207 retry:
1208 if (contended != -1) {
1209 struct drm_gem_object *obj = objs[contended];
1210
1211 ret = dma_resv_lock_slow_interruptible(obj->resv,
1212 acquire_ctx);
1213 if (ret) {
1214 ww_acquire_fini(acquire_ctx);
1215 return ret;
1216 }
1217 }
1218
1219 for (i = 0; i < count; i++) {
1220 if (i == contended)
1221 continue;
1222
1223 ret = dma_resv_lock_interruptible(objs[i]->resv,
1224 acquire_ctx);
1225 if (ret) {
1226 int j;
1227
1228 for (j = 0; j < i; j++)
1229 dma_resv_unlock(objs[j]->resv);
1230
1231 if (contended != -1 && contended >= i)
1232 dma_resv_unlock(objs[contended]->resv);
1233
1234 if (ret == -EDEADLK) {
1235 contended = i;
1236 goto retry;
1237 }
1238
1239 ww_acquire_fini(acquire_ctx);
1240 return ret;
1241 }
1242 }
1243
1244 ww_acquire_done(acquire_ctx);
1245
1246 return 0;
1247 }
1248 EXPORT_SYMBOL(drm_gem_lock_reservations);
1249
1250 void
1251 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1252 struct ww_acquire_ctx *acquire_ctx)
1253 {
1254 int i;
1255
1256 for (i = 0; i < count; i++)
1257 dma_resv_unlock(objs[i]->resv);
1258
1259 ww_acquire_fini(acquire_ctx);
1260 }
1261 EXPORT_SYMBOL(drm_gem_unlock_reservations);