0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 #include <linux/anon_inodes.h>
0187 #include <linux/dma-fence-unwrap.h>
0188 #include <linux/file.h>
0189 #include <linux/fs.h>
0190 #include <linux/sched/signal.h>
0191 #include <linux/sync_file.h>
0192 #include <linux/uaccess.h>
0193
0194 #include <drm/drm.h>
0195 #include <drm/drm_drv.h>
0196 #include <drm/drm_file.h>
0197 #include <drm/drm_gem.h>
0198 #include <drm/drm_print.h>
0199 #include <drm/drm_syncobj.h>
0200 #include <drm/drm_utils.h>
0201
0202 #include "drm_internal.h"
0203
0204 struct syncobj_wait_entry {
0205 struct list_head node;
0206 struct task_struct *task;
0207 struct dma_fence *fence;
0208 struct dma_fence_cb fence_cb;
0209 u64 point;
0210 };
0211
0212 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
0213 struct syncobj_wait_entry *wait);
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
0224 u32 handle)
0225 {
0226 struct drm_syncobj *syncobj;
0227
0228 spin_lock(&file_private->syncobj_table_lock);
0229
0230
0231 syncobj = idr_find(&file_private->syncobj_idr, handle);
0232 if (syncobj)
0233 drm_syncobj_get(syncobj);
0234
0235 spin_unlock(&file_private->syncobj_table_lock);
0236
0237 return syncobj;
0238 }
0239 EXPORT_SYMBOL(drm_syncobj_find);
0240
0241 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
0242 struct syncobj_wait_entry *wait)
0243 {
0244 struct dma_fence *fence;
0245
0246 if (wait->fence)
0247 return;
0248
0249 spin_lock(&syncobj->lock);
0250
0251
0252
0253
0254 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
0255 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
0256 dma_fence_put(fence);
0257 list_add_tail(&wait->node, &syncobj->cb_list);
0258 } else if (!fence) {
0259 wait->fence = dma_fence_get_stub();
0260 } else {
0261 wait->fence = fence;
0262 }
0263 spin_unlock(&syncobj->lock);
0264 }
0265
0266 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
0267 struct syncobj_wait_entry *wait)
0268 {
0269 if (!wait->node.next)
0270 return;
0271
0272 spin_lock(&syncobj->lock);
0273 list_del_init(&wait->node);
0274 spin_unlock(&syncobj->lock);
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 void drm_syncobj_add_point(struct drm_syncobj *syncobj,
0287 struct dma_fence_chain *chain,
0288 struct dma_fence *fence,
0289 uint64_t point)
0290 {
0291 struct syncobj_wait_entry *cur, *tmp;
0292 struct dma_fence *prev;
0293
0294 dma_fence_get(fence);
0295
0296 spin_lock(&syncobj->lock);
0297
0298 prev = drm_syncobj_fence_get(syncobj);
0299
0300 if (prev && prev->seqno >= point)
0301 DRM_DEBUG("You are adding an unorder point to timeline!\n");
0302 dma_fence_chain_init(chain, prev, fence, point);
0303 rcu_assign_pointer(syncobj->fence, &chain->base);
0304
0305 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
0306 syncobj_wait_syncobj_func(syncobj, cur);
0307 spin_unlock(&syncobj->lock);
0308
0309
0310 dma_fence_chain_for_each(fence, prev);
0311 dma_fence_put(prev);
0312 }
0313 EXPORT_SYMBOL(drm_syncobj_add_point);
0314
0315
0316
0317
0318
0319
0320
0321
0322 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
0323 struct dma_fence *fence)
0324 {
0325 struct dma_fence *old_fence;
0326 struct syncobj_wait_entry *cur, *tmp;
0327
0328 if (fence)
0329 dma_fence_get(fence);
0330
0331 spin_lock(&syncobj->lock);
0332
0333 old_fence = rcu_dereference_protected(syncobj->fence,
0334 lockdep_is_held(&syncobj->lock));
0335 rcu_assign_pointer(syncobj->fence, fence);
0336
0337 if (fence != old_fence) {
0338 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
0339 syncobj_wait_syncobj_func(syncobj, cur);
0340 }
0341
0342 spin_unlock(&syncobj->lock);
0343
0344 dma_fence_put(old_fence);
0345 }
0346 EXPORT_SYMBOL(drm_syncobj_replace_fence);
0347
0348
0349
0350
0351
0352
0353
0354 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
0355 {
0356 struct dma_fence *fence = dma_fence_allocate_private_stub();
0357
0358 if (IS_ERR(fence))
0359 return PTR_ERR(fence);
0360
0361 drm_syncobj_replace_fence(syncobj, fence);
0362 dma_fence_put(fence);
0363 return 0;
0364 }
0365
0366
0367 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 int drm_syncobj_find_fence(struct drm_file *file_private,
0384 u32 handle, u64 point, u64 flags,
0385 struct dma_fence **fence)
0386 {
0387 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
0388 struct syncobj_wait_entry wait;
0389 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
0390 int ret;
0391
0392 if (!syncobj)
0393 return -ENOENT;
0394
0395
0396
0397
0398
0399 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
0400 might_sleep();
0401 lockdep_assert_none_held_once();
0402 }
0403
0404 *fence = drm_syncobj_fence_get(syncobj);
0405
0406 if (*fence) {
0407 ret = dma_fence_chain_find_seqno(fence, point);
0408 if (!ret) {
0409
0410
0411
0412
0413
0414 if (!*fence)
0415 *fence = dma_fence_get_stub();
0416
0417 goto out;
0418 }
0419 dma_fence_put(*fence);
0420 } else {
0421 ret = -EINVAL;
0422 }
0423
0424 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
0425 goto out;
0426
0427 memset(&wait, 0, sizeof(wait));
0428 wait.task = current;
0429 wait.point = point;
0430 drm_syncobj_fence_add_wait(syncobj, &wait);
0431
0432 do {
0433 set_current_state(TASK_INTERRUPTIBLE);
0434 if (wait.fence) {
0435 ret = 0;
0436 break;
0437 }
0438 if (timeout == 0) {
0439 ret = -ETIME;
0440 break;
0441 }
0442
0443 if (signal_pending(current)) {
0444 ret = -ERESTARTSYS;
0445 break;
0446 }
0447
0448 timeout = schedule_timeout(timeout);
0449 } while (1);
0450
0451 __set_current_state(TASK_RUNNING);
0452 *fence = wait.fence;
0453
0454 if (wait.node.next)
0455 drm_syncobj_remove_wait(syncobj, &wait);
0456
0457 out:
0458 drm_syncobj_put(syncobj);
0459
0460 return ret;
0461 }
0462 EXPORT_SYMBOL(drm_syncobj_find_fence);
0463
0464
0465
0466
0467
0468
0469
0470 void drm_syncobj_free(struct kref *kref)
0471 {
0472 struct drm_syncobj *syncobj = container_of(kref,
0473 struct drm_syncobj,
0474 refcount);
0475 drm_syncobj_replace_fence(syncobj, NULL);
0476 kfree(syncobj);
0477 }
0478 EXPORT_SYMBOL(drm_syncobj_free);
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
0493 struct dma_fence *fence)
0494 {
0495 int ret;
0496 struct drm_syncobj *syncobj;
0497
0498 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
0499 if (!syncobj)
0500 return -ENOMEM;
0501
0502 kref_init(&syncobj->refcount);
0503 INIT_LIST_HEAD(&syncobj->cb_list);
0504 spin_lock_init(&syncobj->lock);
0505
0506 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
0507 ret = drm_syncobj_assign_null_handle(syncobj);
0508 if (ret < 0) {
0509 drm_syncobj_put(syncobj);
0510 return ret;
0511 }
0512 }
0513
0514 if (fence)
0515 drm_syncobj_replace_fence(syncobj, fence);
0516
0517 *out_syncobj = syncobj;
0518 return 0;
0519 }
0520 EXPORT_SYMBOL(drm_syncobj_create);
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533 int drm_syncobj_get_handle(struct drm_file *file_private,
0534 struct drm_syncobj *syncobj, u32 *handle)
0535 {
0536 int ret;
0537
0538
0539 drm_syncobj_get(syncobj);
0540
0541 idr_preload(GFP_KERNEL);
0542 spin_lock(&file_private->syncobj_table_lock);
0543 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
0544 spin_unlock(&file_private->syncobj_table_lock);
0545
0546 idr_preload_end();
0547
0548 if (ret < 0) {
0549 drm_syncobj_put(syncobj);
0550 return ret;
0551 }
0552
0553 *handle = ret;
0554 return 0;
0555 }
0556 EXPORT_SYMBOL(drm_syncobj_get_handle);
0557
0558 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
0559 u32 *handle, uint32_t flags)
0560 {
0561 int ret;
0562 struct drm_syncobj *syncobj;
0563
0564 ret = drm_syncobj_create(&syncobj, flags, NULL);
0565 if (ret)
0566 return ret;
0567
0568 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
0569 drm_syncobj_put(syncobj);
0570 return ret;
0571 }
0572
0573 static int drm_syncobj_destroy(struct drm_file *file_private,
0574 u32 handle)
0575 {
0576 struct drm_syncobj *syncobj;
0577
0578 spin_lock(&file_private->syncobj_table_lock);
0579 syncobj = idr_remove(&file_private->syncobj_idr, handle);
0580 spin_unlock(&file_private->syncobj_table_lock);
0581
0582 if (!syncobj)
0583 return -EINVAL;
0584
0585 drm_syncobj_put(syncobj);
0586 return 0;
0587 }
0588
0589 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
0590 {
0591 struct drm_syncobj *syncobj = file->private_data;
0592
0593 drm_syncobj_put(syncobj);
0594 return 0;
0595 }
0596
0597 static const struct file_operations drm_syncobj_file_fops = {
0598 .release = drm_syncobj_file_release,
0599 };
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
0611 {
0612 struct file *file;
0613 int fd;
0614
0615 fd = get_unused_fd_flags(O_CLOEXEC);
0616 if (fd < 0)
0617 return fd;
0618
0619 file = anon_inode_getfile("syncobj_file",
0620 &drm_syncobj_file_fops,
0621 syncobj, 0);
0622 if (IS_ERR(file)) {
0623 put_unused_fd(fd);
0624 return PTR_ERR(file);
0625 }
0626
0627 drm_syncobj_get(syncobj);
0628 fd_install(fd, file);
0629
0630 *p_fd = fd;
0631 return 0;
0632 }
0633 EXPORT_SYMBOL(drm_syncobj_get_fd);
0634
0635 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
0636 u32 handle, int *p_fd)
0637 {
0638 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
0639 int ret;
0640
0641 if (!syncobj)
0642 return -EINVAL;
0643
0644 ret = drm_syncobj_get_fd(syncobj, p_fd);
0645 drm_syncobj_put(syncobj);
0646 return ret;
0647 }
0648
0649 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
0650 int fd, u32 *handle)
0651 {
0652 struct drm_syncobj *syncobj;
0653 struct fd f = fdget(fd);
0654 int ret;
0655
0656 if (!f.file)
0657 return -EINVAL;
0658
0659 if (f.file->f_op != &drm_syncobj_file_fops) {
0660 fdput(f);
0661 return -EINVAL;
0662 }
0663
0664
0665 syncobj = f.file->private_data;
0666 drm_syncobj_get(syncobj);
0667
0668 idr_preload(GFP_KERNEL);
0669 spin_lock(&file_private->syncobj_table_lock);
0670 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
0671 spin_unlock(&file_private->syncobj_table_lock);
0672 idr_preload_end();
0673
0674 if (ret > 0) {
0675 *handle = ret;
0676 ret = 0;
0677 } else
0678 drm_syncobj_put(syncobj);
0679
0680 fdput(f);
0681 return ret;
0682 }
0683
0684 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
0685 int fd, int handle)
0686 {
0687 struct dma_fence *fence = sync_file_get_fence(fd);
0688 struct drm_syncobj *syncobj;
0689
0690 if (!fence)
0691 return -EINVAL;
0692
0693 syncobj = drm_syncobj_find(file_private, handle);
0694 if (!syncobj) {
0695 dma_fence_put(fence);
0696 return -ENOENT;
0697 }
0698
0699 drm_syncobj_replace_fence(syncobj, fence);
0700 dma_fence_put(fence);
0701 drm_syncobj_put(syncobj);
0702 return 0;
0703 }
0704
0705 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
0706 int handle, int *p_fd)
0707 {
0708 int ret;
0709 struct dma_fence *fence;
0710 struct sync_file *sync_file;
0711 int fd = get_unused_fd_flags(O_CLOEXEC);
0712
0713 if (fd < 0)
0714 return fd;
0715
0716 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
0717 if (ret)
0718 goto err_put_fd;
0719
0720 sync_file = sync_file_create(fence);
0721
0722 dma_fence_put(fence);
0723
0724 if (!sync_file) {
0725 ret = -EINVAL;
0726 goto err_put_fd;
0727 }
0728
0729 fd_install(fd, sync_file->file);
0730
0731 *p_fd = fd;
0732 return 0;
0733 err_put_fd:
0734 put_unused_fd(fd);
0735 return ret;
0736 }
0737
0738
0739
0740
0741
0742
0743
0744 void
0745 drm_syncobj_open(struct drm_file *file_private)
0746 {
0747 idr_init_base(&file_private->syncobj_idr, 1);
0748 spin_lock_init(&file_private->syncobj_table_lock);
0749 }
0750
0751 static int
0752 drm_syncobj_release_handle(int id, void *ptr, void *data)
0753 {
0754 struct drm_syncobj *syncobj = ptr;
0755
0756 drm_syncobj_put(syncobj);
0757 return 0;
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768 void
0769 drm_syncobj_release(struct drm_file *file_private)
0770 {
0771 idr_for_each(&file_private->syncobj_idr,
0772 &drm_syncobj_release_handle, file_private);
0773 idr_destroy(&file_private->syncobj_idr);
0774 }
0775
0776 int
0777 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
0778 struct drm_file *file_private)
0779 {
0780 struct drm_syncobj_create *args = data;
0781
0782 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0783 return -EOPNOTSUPP;
0784
0785
0786 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
0787 return -EINVAL;
0788
0789 return drm_syncobj_create_as_handle(file_private,
0790 &args->handle, args->flags);
0791 }
0792
0793 int
0794 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
0795 struct drm_file *file_private)
0796 {
0797 struct drm_syncobj_destroy *args = data;
0798
0799 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0800 return -EOPNOTSUPP;
0801
0802
0803 if (args->pad)
0804 return -EINVAL;
0805 return drm_syncobj_destroy(file_private, args->handle);
0806 }
0807
0808 int
0809 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
0810 struct drm_file *file_private)
0811 {
0812 struct drm_syncobj_handle *args = data;
0813
0814 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0815 return -EOPNOTSUPP;
0816
0817 if (args->pad)
0818 return -EINVAL;
0819
0820 if (args->flags != 0 &&
0821 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
0822 return -EINVAL;
0823
0824 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
0825 return drm_syncobj_export_sync_file(file_private, args->handle,
0826 &args->fd);
0827
0828 return drm_syncobj_handle_to_fd(file_private, args->handle,
0829 &args->fd);
0830 }
0831
0832 int
0833 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
0834 struct drm_file *file_private)
0835 {
0836 struct drm_syncobj_handle *args = data;
0837
0838 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0839 return -EOPNOTSUPP;
0840
0841 if (args->pad)
0842 return -EINVAL;
0843
0844 if (args->flags != 0 &&
0845 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
0846 return -EINVAL;
0847
0848 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
0849 return drm_syncobj_import_sync_file_fence(file_private,
0850 args->fd,
0851 args->handle);
0852
0853 return drm_syncobj_fd_to_handle(file_private, args->fd,
0854 &args->handle);
0855 }
0856
0857 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
0858 struct drm_syncobj_transfer *args)
0859 {
0860 struct drm_syncobj *timeline_syncobj = NULL;
0861 struct dma_fence *fence, *tmp;
0862 struct dma_fence_chain *chain;
0863 int ret;
0864
0865 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
0866 if (!timeline_syncobj) {
0867 return -ENOENT;
0868 }
0869 ret = drm_syncobj_find_fence(file_private, args->src_handle,
0870 args->src_point, args->flags,
0871 &tmp);
0872 if (ret)
0873 goto err_put_timeline;
0874
0875 fence = dma_fence_unwrap_merge(tmp);
0876 dma_fence_put(tmp);
0877 if (!fence) {
0878 ret = -ENOMEM;
0879 goto err_put_timeline;
0880 }
0881
0882 chain = dma_fence_chain_alloc();
0883 if (!chain) {
0884 ret = -ENOMEM;
0885 goto err_free_fence;
0886 }
0887
0888 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
0889 err_free_fence:
0890 dma_fence_put(fence);
0891 err_put_timeline:
0892 drm_syncobj_put(timeline_syncobj);
0893
0894 return ret;
0895 }
0896
0897 static int
0898 drm_syncobj_transfer_to_binary(struct drm_file *file_private,
0899 struct drm_syncobj_transfer *args)
0900 {
0901 struct drm_syncobj *binary_syncobj = NULL;
0902 struct dma_fence *fence;
0903 int ret;
0904
0905 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
0906 if (!binary_syncobj)
0907 return -ENOENT;
0908 ret = drm_syncobj_find_fence(file_private, args->src_handle,
0909 args->src_point, args->flags, &fence);
0910 if (ret)
0911 goto err;
0912 drm_syncobj_replace_fence(binary_syncobj, fence);
0913 dma_fence_put(fence);
0914 err:
0915 drm_syncobj_put(binary_syncobj);
0916
0917 return ret;
0918 }
0919 int
0920 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
0921 struct drm_file *file_private)
0922 {
0923 struct drm_syncobj_transfer *args = data;
0924 int ret;
0925
0926 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
0927 return -EOPNOTSUPP;
0928
0929 if (args->pad)
0930 return -EINVAL;
0931
0932 if (args->dst_point)
0933 ret = drm_syncobj_transfer_to_timeline(file_private, args);
0934 else
0935 ret = drm_syncobj_transfer_to_binary(file_private, args);
0936
0937 return ret;
0938 }
0939
0940 static void syncobj_wait_fence_func(struct dma_fence *fence,
0941 struct dma_fence_cb *cb)
0942 {
0943 struct syncobj_wait_entry *wait =
0944 container_of(cb, struct syncobj_wait_entry, fence_cb);
0945
0946 wake_up_process(wait->task);
0947 }
0948
0949 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
0950 struct syncobj_wait_entry *wait)
0951 {
0952 struct dma_fence *fence;
0953
0954
0955 fence = rcu_dereference_protected(syncobj->fence,
0956 lockdep_is_held(&syncobj->lock));
0957 dma_fence_get(fence);
0958 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
0959 dma_fence_put(fence);
0960 return;
0961 } else if (!fence) {
0962 wait->fence = dma_fence_get_stub();
0963 } else {
0964 wait->fence = fence;
0965 }
0966
0967 wake_up_process(wait->task);
0968 list_del_init(&wait->node);
0969 }
0970
0971 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
0972 void __user *user_points,
0973 uint32_t count,
0974 uint32_t flags,
0975 signed long timeout,
0976 uint32_t *idx)
0977 {
0978 struct syncobj_wait_entry *entries;
0979 struct dma_fence *fence;
0980 uint64_t *points;
0981 uint32_t signaled_count, i;
0982
0983 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
0984 lockdep_assert_none_held_once();
0985
0986 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
0987 if (points == NULL)
0988 return -ENOMEM;
0989
0990 if (!user_points) {
0991 memset(points, 0, count * sizeof(uint64_t));
0992
0993 } else if (copy_from_user(points, user_points,
0994 sizeof(uint64_t) * count)) {
0995 timeout = -EFAULT;
0996 goto err_free_points;
0997 }
0998
0999 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1000 if (!entries) {
1001 timeout = -ENOMEM;
1002 goto err_free_points;
1003 }
1004
1005
1006
1007
1008
1009 signaled_count = 0;
1010 for (i = 0; i < count; ++i) {
1011 struct dma_fence *fence;
1012
1013 entries[i].task = current;
1014 entries[i].point = points[i];
1015 fence = drm_syncobj_fence_get(syncobjs[i]);
1016 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
1017 dma_fence_put(fence);
1018 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
1019 continue;
1020 } else {
1021 timeout = -EINVAL;
1022 goto cleanup_entries;
1023 }
1024 }
1025
1026 if (fence)
1027 entries[i].fence = fence;
1028 else
1029 entries[i].fence = dma_fence_get_stub();
1030
1031 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1032 dma_fence_is_signaled(entries[i].fence)) {
1033 if (signaled_count == 0 && idx)
1034 *idx = i;
1035 signaled_count++;
1036 }
1037 }
1038
1039 if (signaled_count == count ||
1040 (signaled_count > 0 &&
1041 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
1042 goto cleanup_entries;
1043
1044
1045
1046
1047
1048
1049
1050
1051 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
1052 for (i = 0; i < count; ++i)
1053 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
1054 }
1055
1056 do {
1057 set_current_state(TASK_INTERRUPTIBLE);
1058
1059 signaled_count = 0;
1060 for (i = 0; i < count; ++i) {
1061 fence = entries[i].fence;
1062 if (!fence)
1063 continue;
1064
1065 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1066 dma_fence_is_signaled(fence) ||
1067 (!entries[i].fence_cb.func &&
1068 dma_fence_add_callback(fence,
1069 &entries[i].fence_cb,
1070 syncobj_wait_fence_func))) {
1071
1072 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
1073 signaled_count++;
1074 } else {
1075 if (idx)
1076 *idx = i;
1077 goto done_waiting;
1078 }
1079 }
1080 }
1081
1082 if (signaled_count == count)
1083 goto done_waiting;
1084
1085 if (timeout == 0) {
1086 timeout = -ETIME;
1087 goto done_waiting;
1088 }
1089
1090 if (signal_pending(current)) {
1091 timeout = -ERESTARTSYS;
1092 goto done_waiting;
1093 }
1094
1095 timeout = schedule_timeout(timeout);
1096 } while (1);
1097
1098 done_waiting:
1099 __set_current_state(TASK_RUNNING);
1100
1101 cleanup_entries:
1102 for (i = 0; i < count; ++i) {
1103 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
1104 if (entries[i].fence_cb.func)
1105 dma_fence_remove_callback(entries[i].fence,
1106 &entries[i].fence_cb);
1107 dma_fence_put(entries[i].fence);
1108 }
1109 kfree(entries);
1110
1111 err_free_points:
1112 kfree(points);
1113
1114 return timeout;
1115 }
1116
1117
1118
1119
1120
1121
1122
1123
1124 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1125 {
1126 ktime_t abs_timeout, now;
1127 u64 timeout_ns, timeout_jiffies64;
1128
1129
1130 if (timeout_nsec == 0)
1131 return 0;
1132
1133 abs_timeout = ns_to_ktime(timeout_nsec);
1134 now = ktime_get();
1135
1136 if (!ktime_after(abs_timeout, now))
1137 return 0;
1138
1139 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1140
1141 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1142
1143 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1144 return MAX_SCHEDULE_TIMEOUT - 1;
1145
1146 return timeout_jiffies64 + 1;
1147 }
1148 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
1149
1150 static int drm_syncobj_array_wait(struct drm_device *dev,
1151 struct drm_file *file_private,
1152 struct drm_syncobj_wait *wait,
1153 struct drm_syncobj_timeline_wait *timeline_wait,
1154 struct drm_syncobj **syncobjs, bool timeline)
1155 {
1156 signed long timeout = 0;
1157 uint32_t first = ~0;
1158
1159 if (!timeline) {
1160 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1161 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1162 NULL,
1163 wait->count_handles,
1164 wait->flags,
1165 timeout, &first);
1166 if (timeout < 0)
1167 return timeout;
1168 wait->first_signaled = first;
1169 } else {
1170 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1171 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1172 u64_to_user_ptr(timeline_wait->points),
1173 timeline_wait->count_handles,
1174 timeline_wait->flags,
1175 timeout, &first);
1176 if (timeout < 0)
1177 return timeout;
1178 timeline_wait->first_signaled = first;
1179 }
1180 return 0;
1181 }
1182
1183 static int drm_syncobj_array_find(struct drm_file *file_private,
1184 void __user *user_handles,
1185 uint32_t count_handles,
1186 struct drm_syncobj ***syncobjs_out)
1187 {
1188 uint32_t i, *handles;
1189 struct drm_syncobj **syncobjs;
1190 int ret;
1191
1192 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1193 if (handles == NULL)
1194 return -ENOMEM;
1195
1196 if (copy_from_user(handles, user_handles,
1197 sizeof(uint32_t) * count_handles)) {
1198 ret = -EFAULT;
1199 goto err_free_handles;
1200 }
1201
1202 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1203 if (syncobjs == NULL) {
1204 ret = -ENOMEM;
1205 goto err_free_handles;
1206 }
1207
1208 for (i = 0; i < count_handles; i++) {
1209 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1210 if (!syncobjs[i]) {
1211 ret = -ENOENT;
1212 goto err_put_syncobjs;
1213 }
1214 }
1215
1216 kfree(handles);
1217 *syncobjs_out = syncobjs;
1218 return 0;
1219
1220 err_put_syncobjs:
1221 while (i-- > 0)
1222 drm_syncobj_put(syncobjs[i]);
1223 kfree(syncobjs);
1224 err_free_handles:
1225 kfree(handles);
1226
1227 return ret;
1228 }
1229
1230 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1231 uint32_t count)
1232 {
1233 uint32_t i;
1234
1235 for (i = 0; i < count; i++)
1236 drm_syncobj_put(syncobjs[i]);
1237 kfree(syncobjs);
1238 }
1239
1240 int
1241 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1242 struct drm_file *file_private)
1243 {
1244 struct drm_syncobj_wait *args = data;
1245 struct drm_syncobj **syncobjs;
1246 int ret = 0;
1247
1248 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1249 return -EOPNOTSUPP;
1250
1251 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1252 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1253 return -EINVAL;
1254
1255 if (args->count_handles == 0)
1256 return -EINVAL;
1257
1258 ret = drm_syncobj_array_find(file_private,
1259 u64_to_user_ptr(args->handles),
1260 args->count_handles,
1261 &syncobjs);
1262 if (ret < 0)
1263 return ret;
1264
1265 ret = drm_syncobj_array_wait(dev, file_private,
1266 args, NULL, syncobjs, false);
1267
1268 drm_syncobj_array_free(syncobjs, args->count_handles);
1269
1270 return ret;
1271 }
1272
1273 int
1274 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1275 struct drm_file *file_private)
1276 {
1277 struct drm_syncobj_timeline_wait *args = data;
1278 struct drm_syncobj **syncobjs;
1279 int ret = 0;
1280
1281 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1282 return -EOPNOTSUPP;
1283
1284 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1285 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1286 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1287 return -EINVAL;
1288
1289 if (args->count_handles == 0)
1290 return -EINVAL;
1291
1292 ret = drm_syncobj_array_find(file_private,
1293 u64_to_user_ptr(args->handles),
1294 args->count_handles,
1295 &syncobjs);
1296 if (ret < 0)
1297 return ret;
1298
1299 ret = drm_syncobj_array_wait(dev, file_private,
1300 NULL, args, syncobjs, true);
1301
1302 drm_syncobj_array_free(syncobjs, args->count_handles);
1303
1304 return ret;
1305 }
1306
1307
1308 int
1309 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1310 struct drm_file *file_private)
1311 {
1312 struct drm_syncobj_array *args = data;
1313 struct drm_syncobj **syncobjs;
1314 uint32_t i;
1315 int ret;
1316
1317 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1318 return -EOPNOTSUPP;
1319
1320 if (args->pad != 0)
1321 return -EINVAL;
1322
1323 if (args->count_handles == 0)
1324 return -EINVAL;
1325
1326 ret = drm_syncobj_array_find(file_private,
1327 u64_to_user_ptr(args->handles),
1328 args->count_handles,
1329 &syncobjs);
1330 if (ret < 0)
1331 return ret;
1332
1333 for (i = 0; i < args->count_handles; i++)
1334 drm_syncobj_replace_fence(syncobjs[i], NULL);
1335
1336 drm_syncobj_array_free(syncobjs, args->count_handles);
1337
1338 return 0;
1339 }
1340
1341 int
1342 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1343 struct drm_file *file_private)
1344 {
1345 struct drm_syncobj_array *args = data;
1346 struct drm_syncobj **syncobjs;
1347 uint32_t i;
1348 int ret;
1349
1350 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1351 return -EOPNOTSUPP;
1352
1353 if (args->pad != 0)
1354 return -EINVAL;
1355
1356 if (args->count_handles == 0)
1357 return -EINVAL;
1358
1359 ret = drm_syncobj_array_find(file_private,
1360 u64_to_user_ptr(args->handles),
1361 args->count_handles,
1362 &syncobjs);
1363 if (ret < 0)
1364 return ret;
1365
1366 for (i = 0; i < args->count_handles; i++) {
1367 ret = drm_syncobj_assign_null_handle(syncobjs[i]);
1368 if (ret < 0)
1369 break;
1370 }
1371
1372 drm_syncobj_array_free(syncobjs, args->count_handles);
1373
1374 return ret;
1375 }
1376
1377 int
1378 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1379 struct drm_file *file_private)
1380 {
1381 struct drm_syncobj_timeline_array *args = data;
1382 struct drm_syncobj **syncobjs;
1383 struct dma_fence_chain **chains;
1384 uint64_t *points;
1385 uint32_t i, j;
1386 int ret;
1387
1388 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1389 return -EOPNOTSUPP;
1390
1391 if (args->flags != 0)
1392 return -EINVAL;
1393
1394 if (args->count_handles == 0)
1395 return -EINVAL;
1396
1397 ret = drm_syncobj_array_find(file_private,
1398 u64_to_user_ptr(args->handles),
1399 args->count_handles,
1400 &syncobjs);
1401 if (ret < 0)
1402 return ret;
1403
1404 points = kmalloc_array(args->count_handles, sizeof(*points),
1405 GFP_KERNEL);
1406 if (!points) {
1407 ret = -ENOMEM;
1408 goto out;
1409 }
1410 if (!u64_to_user_ptr(args->points)) {
1411 memset(points, 0, args->count_handles * sizeof(uint64_t));
1412 } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1413 sizeof(uint64_t) * args->count_handles)) {
1414 ret = -EFAULT;
1415 goto err_points;
1416 }
1417
1418 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1419 if (!chains) {
1420 ret = -ENOMEM;
1421 goto err_points;
1422 }
1423 for (i = 0; i < args->count_handles; i++) {
1424 chains[i] = dma_fence_chain_alloc();
1425 if (!chains[i]) {
1426 for (j = 0; j < i; j++)
1427 dma_fence_chain_free(chains[j]);
1428 ret = -ENOMEM;
1429 goto err_chains;
1430 }
1431 }
1432
1433 for (i = 0; i < args->count_handles; i++) {
1434 struct dma_fence *fence = dma_fence_get_stub();
1435
1436 drm_syncobj_add_point(syncobjs[i], chains[i],
1437 fence, points[i]);
1438 dma_fence_put(fence);
1439 }
1440 err_chains:
1441 kfree(chains);
1442 err_points:
1443 kfree(points);
1444 out:
1445 drm_syncobj_array_free(syncobjs, args->count_handles);
1446
1447 return ret;
1448 }
1449
1450 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1451 struct drm_file *file_private)
1452 {
1453 struct drm_syncobj_timeline_array *args = data;
1454 struct drm_syncobj **syncobjs;
1455 uint64_t __user *points = u64_to_user_ptr(args->points);
1456 uint32_t i;
1457 int ret;
1458
1459 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1460 return -EOPNOTSUPP;
1461
1462 if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
1463 return -EINVAL;
1464
1465 if (args->count_handles == 0)
1466 return -EINVAL;
1467
1468 ret = drm_syncobj_array_find(file_private,
1469 u64_to_user_ptr(args->handles),
1470 args->count_handles,
1471 &syncobjs);
1472 if (ret < 0)
1473 return ret;
1474
1475 for (i = 0; i < args->count_handles; i++) {
1476 struct dma_fence_chain *chain;
1477 struct dma_fence *fence;
1478 uint64_t point;
1479
1480 fence = drm_syncobj_fence_get(syncobjs[i]);
1481 chain = to_dma_fence_chain(fence);
1482 if (chain) {
1483 struct dma_fence *iter, *last_signaled =
1484 dma_fence_get(fence);
1485
1486 if (args->flags &
1487 DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
1488 point = fence->seqno;
1489 } else {
1490 dma_fence_chain_for_each(iter, fence) {
1491 if (iter->context != fence->context) {
1492 dma_fence_put(iter);
1493
1494
1495 break;
1496 }
1497 dma_fence_put(last_signaled);
1498 last_signaled = dma_fence_get(iter);
1499 }
1500 point = dma_fence_is_signaled(last_signaled) ?
1501 last_signaled->seqno :
1502 to_dma_fence_chain(last_signaled)->prev_seqno;
1503 }
1504 dma_fence_put(last_signaled);
1505 } else {
1506 point = 0;
1507 }
1508 dma_fence_put(fence);
1509 ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1510 ret = ret ? -EFAULT : 0;
1511 if (ret)
1512 break;
1513 }
1514 drm_syncobj_array_free(syncobjs, args->count_handles);
1515
1516 return ret;
1517 }