0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/dma-resv.h>
0037 #include <linux/dma-fence-array.h>
0038 #include <linux/export.h>
0039 #include <linux/mm.h>
0040 #include <linux/sched/mm.h>
0041 #include <linux/mmu_notifier.h>
0042 #include <linux/seq_file.h>
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 DEFINE_WD_CLASS(reservation_ww_class);
0058 EXPORT_SYMBOL(reservation_ww_class);
0059
0060
0061 #define DMA_RESV_LIST_MASK 0x3
0062
0063 struct dma_resv_list {
0064 struct rcu_head rcu;
0065 u32 num_fences, max_fences;
0066 struct dma_fence __rcu *table[];
0067 };
0068
0069
0070 static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
0071 struct dma_resv *resv, struct dma_fence **fence,
0072 enum dma_resv_usage *usage)
0073 {
0074 long tmp;
0075
0076 tmp = (long)rcu_dereference_check(list->table[index],
0077 resv ? dma_resv_held(resv) : true);
0078 *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
0079 if (usage)
0080 *usage = tmp & DMA_RESV_LIST_MASK;
0081 }
0082
0083
0084 static void dma_resv_list_set(struct dma_resv_list *list,
0085 unsigned int index,
0086 struct dma_fence *fence,
0087 enum dma_resv_usage usage)
0088 {
0089 long tmp = ((long)fence) | usage;
0090
0091 RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
0092 }
0093
0094
0095
0096
0097
0098 static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
0099 {
0100 struct dma_resv_list *list;
0101
0102 list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
0103 if (!list)
0104 return NULL;
0105
0106 list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
0107 sizeof(*list->table);
0108
0109 return list;
0110 }
0111
0112
0113 static void dma_resv_list_free(struct dma_resv_list *list)
0114 {
0115 unsigned int i;
0116
0117 if (!list)
0118 return;
0119
0120 for (i = 0; i < list->num_fences; ++i) {
0121 struct dma_fence *fence;
0122
0123 dma_resv_list_entry(list, i, NULL, &fence, NULL);
0124 dma_fence_put(fence);
0125 }
0126 kfree_rcu(list, rcu);
0127 }
0128
0129
0130
0131
0132
0133 void dma_resv_init(struct dma_resv *obj)
0134 {
0135 ww_mutex_init(&obj->lock, &reservation_ww_class);
0136
0137 RCU_INIT_POINTER(obj->fences, NULL);
0138 }
0139 EXPORT_SYMBOL(dma_resv_init);
0140
0141
0142
0143
0144
0145 void dma_resv_fini(struct dma_resv *obj)
0146 {
0147
0148
0149
0150
0151 dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
0152 ww_mutex_destroy(&obj->lock);
0153 }
0154 EXPORT_SYMBOL(dma_resv_fini);
0155
0156
0157 static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
0158 {
0159 return rcu_dereference_check(obj->fences, dma_resv_held(obj));
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
0178 {
0179 struct dma_resv_list *old, *new;
0180 unsigned int i, j, k, max;
0181
0182 dma_resv_assert_held(obj);
0183
0184 old = dma_resv_fences_list(obj);
0185 if (old && old->max_fences) {
0186 if ((old->num_fences + num_fences) <= old->max_fences)
0187 return 0;
0188 max = max(old->num_fences + num_fences, old->max_fences * 2);
0189 } else {
0190 max = max(4ul, roundup_pow_of_two(num_fences));
0191 }
0192
0193 new = dma_resv_list_alloc(max);
0194 if (!new)
0195 return -ENOMEM;
0196
0197
0198
0199
0200
0201
0202
0203 for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
0204 enum dma_resv_usage usage;
0205 struct dma_fence *fence;
0206
0207 dma_resv_list_entry(old, i, obj, &fence, &usage);
0208 if (dma_fence_is_signaled(fence))
0209 RCU_INIT_POINTER(new->table[--k], fence);
0210 else
0211 dma_resv_list_set(new, j++, fence, usage);
0212 }
0213 new->num_fences = j;
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 rcu_assign_pointer(obj->fences, new);
0224
0225 if (!old)
0226 return 0;
0227
0228
0229 for (i = k; i < max; ++i) {
0230 struct dma_fence *fence;
0231
0232 fence = rcu_dereference_protected(new->table[i],
0233 dma_resv_held(obj));
0234 dma_fence_put(fence);
0235 }
0236 kfree_rcu(old, rcu);
0237
0238 return 0;
0239 }
0240 EXPORT_SYMBOL(dma_resv_reserve_fences);
0241
0242 #ifdef CONFIG_DEBUG_MUTEXES
0243
0244
0245
0246
0247
0248
0249
0250
0251 void dma_resv_reset_max_fences(struct dma_resv *obj)
0252 {
0253 struct dma_resv_list *fences = dma_resv_fences_list(obj);
0254
0255 dma_resv_assert_held(obj);
0256
0257
0258 if (fences)
0259 fences->max_fences = fences->num_fences;
0260 }
0261 EXPORT_SYMBOL(dma_resv_reset_max_fences);
0262 #endif
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
0276 enum dma_resv_usage usage)
0277 {
0278 struct dma_resv_list *fobj;
0279 struct dma_fence *old;
0280 unsigned int i, count;
0281
0282 dma_fence_get(fence);
0283
0284 dma_resv_assert_held(obj);
0285
0286
0287
0288
0289 WARN_ON(dma_fence_is_container(fence));
0290
0291 fobj = dma_resv_fences_list(obj);
0292 count = fobj->num_fences;
0293
0294 for (i = 0; i < count; ++i) {
0295 enum dma_resv_usage old_usage;
0296
0297 dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
0298 if ((old->context == fence->context && old_usage >= usage &&
0299 dma_fence_is_later(fence, old)) ||
0300 dma_fence_is_signaled(old)) {
0301 dma_resv_list_set(fobj, i, fence, usage);
0302 dma_fence_put(old);
0303 return;
0304 }
0305 }
0306
0307 BUG_ON(fobj->num_fences >= fobj->max_fences);
0308 count++;
0309
0310 dma_resv_list_set(fobj, i, fence, usage);
0311
0312 smp_store_mb(fobj->num_fences, count);
0313 }
0314 EXPORT_SYMBOL(dma_resv_add_fence);
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
0331 struct dma_fence *replacement,
0332 enum dma_resv_usage usage)
0333 {
0334 struct dma_resv_list *list;
0335 unsigned int i;
0336
0337 dma_resv_assert_held(obj);
0338
0339 list = dma_resv_fences_list(obj);
0340 for (i = 0; list && i < list->num_fences; ++i) {
0341 struct dma_fence *old;
0342
0343 dma_resv_list_entry(list, i, obj, &old, NULL);
0344 if (old->context != context)
0345 continue;
0346
0347 dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
0348 dma_fence_put(old);
0349 }
0350 }
0351 EXPORT_SYMBOL(dma_resv_replace_fences);
0352
0353
0354 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
0355 {
0356 cursor->index = 0;
0357 cursor->num_fences = 0;
0358 cursor->fences = dma_resv_fences_list(cursor->obj);
0359 if (cursor->fences)
0360 cursor->num_fences = cursor->fences->num_fences;
0361 cursor->is_restarted = true;
0362 }
0363
0364
0365 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
0366 {
0367 if (!cursor->fences)
0368 return;
0369
0370 do {
0371
0372 dma_fence_put(cursor->fence);
0373
0374 if (cursor->index >= cursor->num_fences) {
0375 cursor->fence = NULL;
0376 break;
0377
0378 }
0379
0380 dma_resv_list_entry(cursor->fences, cursor->index++,
0381 cursor->obj, &cursor->fence,
0382 &cursor->fence_usage);
0383 cursor->fence = dma_fence_get_rcu(cursor->fence);
0384 if (!cursor->fence) {
0385 dma_resv_iter_restart_unlocked(cursor);
0386 continue;
0387 }
0388
0389 if (!dma_fence_is_signaled(cursor->fence) &&
0390 cursor->usage >= cursor->fence_usage)
0391 break;
0392 } while (true);
0393 }
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
0408 {
0409 rcu_read_lock();
0410 do {
0411 dma_resv_iter_restart_unlocked(cursor);
0412 dma_resv_iter_walk_unlocked(cursor);
0413 } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
0414 rcu_read_unlock();
0415
0416 return cursor->fence;
0417 }
0418 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
0431 {
0432 bool restart;
0433
0434 rcu_read_lock();
0435 cursor->is_restarted = false;
0436 restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
0437 do {
0438 if (restart)
0439 dma_resv_iter_restart_unlocked(cursor);
0440 dma_resv_iter_walk_unlocked(cursor);
0441 restart = true;
0442 } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
0443 rcu_read_unlock();
0444
0445 return cursor->fence;
0446 }
0447 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
0459 {
0460 struct dma_fence *fence;
0461
0462 dma_resv_assert_held(cursor->obj);
0463
0464 cursor->index = 0;
0465 cursor->fences = dma_resv_fences_list(cursor->obj);
0466
0467 fence = dma_resv_iter_next(cursor);
0468 cursor->is_restarted = true;
0469 return fence;
0470 }
0471 EXPORT_SYMBOL_GPL(dma_resv_iter_first);
0472
0473
0474
0475
0476
0477
0478
0479
0480 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
0481 {
0482 struct dma_fence *fence;
0483
0484 dma_resv_assert_held(cursor->obj);
0485
0486 cursor->is_restarted = false;
0487
0488 do {
0489 if (!cursor->fences ||
0490 cursor->index >= cursor->fences->num_fences)
0491 return NULL;
0492
0493 dma_resv_list_entry(cursor->fences, cursor->index++,
0494 cursor->obj, &fence, &cursor->fence_usage);
0495 } while (cursor->fence_usage > cursor->usage);
0496
0497 return fence;
0498 }
0499 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
0500
0501
0502
0503
0504
0505
0506
0507
0508 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
0509 {
0510 struct dma_resv_iter cursor;
0511 struct dma_resv_list *list;
0512 struct dma_fence *f;
0513
0514 dma_resv_assert_held(dst);
0515
0516 list = NULL;
0517
0518 dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
0519 dma_resv_for_each_fence_unlocked(&cursor, f) {
0520
0521 if (dma_resv_iter_is_restarted(&cursor)) {
0522 dma_resv_list_free(list);
0523
0524 list = dma_resv_list_alloc(cursor.num_fences);
0525 if (!list) {
0526 dma_resv_iter_end(&cursor);
0527 return -ENOMEM;
0528 }
0529 list->num_fences = 0;
0530 }
0531
0532 dma_fence_get(f);
0533 dma_resv_list_set(list, list->num_fences++, f,
0534 dma_resv_iter_usage(&cursor));
0535 }
0536 dma_resv_iter_end(&cursor);
0537
0538 list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
0539 dma_resv_list_free(list);
0540 return 0;
0541 }
0542 EXPORT_SYMBOL(dma_resv_copy_fences);
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
0557 unsigned int *num_fences, struct dma_fence ***fences)
0558 {
0559 struct dma_resv_iter cursor;
0560 struct dma_fence *fence;
0561
0562 *num_fences = 0;
0563 *fences = NULL;
0564
0565 dma_resv_iter_begin(&cursor, obj, usage);
0566 dma_resv_for_each_fence_unlocked(&cursor, fence) {
0567
0568 if (dma_resv_iter_is_restarted(&cursor)) {
0569 unsigned int count;
0570
0571 while (*num_fences)
0572 dma_fence_put((*fences)[--(*num_fences)]);
0573
0574 count = cursor.num_fences + 1;
0575
0576
0577 *fences = krealloc_array(*fences, count,
0578 sizeof(void *),
0579 GFP_KERNEL);
0580 if (count && !*fences) {
0581 dma_resv_iter_end(&cursor);
0582 return -ENOMEM;
0583 }
0584 }
0585
0586 (*fences)[(*num_fences)++] = dma_fence_get(fence);
0587 }
0588 dma_resv_iter_end(&cursor);
0589
0590 return 0;
0591 }
0592 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
0610 struct dma_fence **fence)
0611 {
0612 struct dma_fence_array *array;
0613 struct dma_fence **fences;
0614 unsigned count;
0615 int r;
0616
0617 r = dma_resv_get_fences(obj, usage, &count, &fences);
0618 if (r)
0619 return r;
0620
0621 if (count == 0) {
0622 *fence = NULL;
0623 return 0;
0624 }
0625
0626 if (count == 1) {
0627 *fence = fences[0];
0628 kfree(fences);
0629 return 0;
0630 }
0631
0632 array = dma_fence_array_create(count, fences,
0633 dma_fence_context_alloc(1),
0634 1, false);
0635 if (!array) {
0636 while (count--)
0637 dma_fence_put(fences[count]);
0638 kfree(fences);
0639 return -ENOMEM;
0640 }
0641
0642 *fence = &array->base;
0643 return 0;
0644 }
0645 EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
0661 bool intr, unsigned long timeout)
0662 {
0663 long ret = timeout ? timeout : 1;
0664 struct dma_resv_iter cursor;
0665 struct dma_fence *fence;
0666
0667 dma_resv_iter_begin(&cursor, obj, usage);
0668 dma_resv_for_each_fence_unlocked(&cursor, fence) {
0669
0670 ret = dma_fence_wait_timeout(fence, intr, ret);
0671 if (ret <= 0) {
0672 dma_resv_iter_end(&cursor);
0673 return ret;
0674 }
0675 }
0676 dma_resv_iter_end(&cursor);
0677
0678 return ret;
0679 }
0680 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
0697 {
0698 struct dma_resv_iter cursor;
0699 struct dma_fence *fence;
0700
0701 dma_resv_iter_begin(&cursor, obj, usage);
0702 dma_resv_for_each_fence_unlocked(&cursor, fence) {
0703 dma_resv_iter_end(&cursor);
0704 return false;
0705 }
0706 dma_resv_iter_end(&cursor);
0707 return true;
0708 }
0709 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
0720 {
0721 static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
0722 struct dma_resv_iter cursor;
0723 struct dma_fence *fence;
0724
0725 dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
0726 seq_printf(seq, "\t%s fence:",
0727 usage[dma_resv_iter_usage(&cursor)]);
0728 dma_fence_describe(fence, seq);
0729 }
0730 }
0731 EXPORT_SYMBOL_GPL(dma_resv_describe);
0732
0733 #if IS_ENABLED(CONFIG_LOCKDEP)
0734 static int __init dma_resv_lockdep(void)
0735 {
0736 struct mm_struct *mm = mm_alloc();
0737 struct ww_acquire_ctx ctx;
0738 struct dma_resv obj;
0739 struct address_space mapping;
0740 int ret;
0741
0742 if (!mm)
0743 return -ENOMEM;
0744
0745 dma_resv_init(&obj);
0746 address_space_init_once(&mapping);
0747
0748 mmap_read_lock(mm);
0749 ww_acquire_init(&ctx, &reservation_ww_class);
0750 ret = dma_resv_lock(&obj, &ctx);
0751 if (ret == -EDEADLK)
0752 dma_resv_lock_slow(&obj, &ctx);
0753 fs_reclaim_acquire(GFP_KERNEL);
0754
0755 i_mmap_lock_write(&mapping);
0756 i_mmap_unlock_write(&mapping);
0757 #ifdef CONFIG_MMU_NOTIFIER
0758 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0759 __dma_fence_might_wait();
0760 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0761 #else
0762 __dma_fence_might_wait();
0763 #endif
0764 fs_reclaim_release(GFP_KERNEL);
0765 ww_mutex_unlock(&obj.lock);
0766 ww_acquire_fini(&ctx);
0767 mmap_read_unlock(mm);
0768
0769 mmput(mm);
0770
0771 return 0;
0772 }
0773 subsys_initcall(dma_resv_lockdep);
0774 #endif