0001
0002
0003
0004
0005
0006
0007 #include <linux/delay.h>
0008 #include <linux/dma-fence.h>
0009 #include <linux/dma-fence-chain.h>
0010 #include <linux/kernel.h>
0011 #include <linux/kthread.h>
0012 #include <linux/mm.h>
0013 #include <linux/sched/signal.h>
0014 #include <linux/slab.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/random.h>
0017
0018 #include "selftest.h"
0019
0020 #define CHAIN_SZ (4 << 10)
0021
0022 static struct kmem_cache *slab_fences;
0023
0024 static inline struct mock_fence {
0025 struct dma_fence base;
0026 spinlock_t lock;
0027 } *to_mock_fence(struct dma_fence *f) {
0028 return container_of(f, struct mock_fence, base);
0029 }
0030
0031 static const char *mock_name(struct dma_fence *f)
0032 {
0033 return "mock";
0034 }
0035
0036 static void mock_fence_release(struct dma_fence *f)
0037 {
0038 kmem_cache_free(slab_fences, to_mock_fence(f));
0039 }
0040
0041 static const struct dma_fence_ops mock_ops = {
0042 .get_driver_name = mock_name,
0043 .get_timeline_name = mock_name,
0044 .release = mock_fence_release,
0045 };
0046
0047 static struct dma_fence *mock_fence(void)
0048 {
0049 struct mock_fence *f;
0050
0051 f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
0052 if (!f)
0053 return NULL;
0054
0055 spin_lock_init(&f->lock);
0056 dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
0057
0058 return &f->base;
0059 }
0060
0061 static struct dma_fence *mock_chain(struct dma_fence *prev,
0062 struct dma_fence *fence,
0063 u64 seqno)
0064 {
0065 struct dma_fence_chain *f;
0066
0067 f = dma_fence_chain_alloc();
0068 if (!f)
0069 return NULL;
0070
0071 dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence),
0072 seqno);
0073
0074 return &f->base;
0075 }
0076
0077 static int sanitycheck(void *arg)
0078 {
0079 struct dma_fence *f, *chain;
0080 int err = 0;
0081
0082 f = mock_fence();
0083 if (!f)
0084 return -ENOMEM;
0085
0086 chain = mock_chain(NULL, f, 1);
0087 if (!chain)
0088 err = -ENOMEM;
0089
0090 dma_fence_signal(f);
0091 dma_fence_put(f);
0092
0093 dma_fence_put(chain);
0094
0095 return err;
0096 }
0097
0098 struct fence_chains {
0099 unsigned int chain_length;
0100 struct dma_fence **fences;
0101 struct dma_fence **chains;
0102
0103 struct dma_fence *tail;
0104 };
0105
0106 static uint64_t seqno_inc(unsigned int i)
0107 {
0108 return i + 1;
0109 }
0110
0111 static int fence_chains_init(struct fence_chains *fc, unsigned int count,
0112 uint64_t (*seqno_fn)(unsigned int))
0113 {
0114 unsigned int i;
0115 int err = 0;
0116
0117 fc->chains = kvmalloc_array(count, sizeof(*fc->chains),
0118 GFP_KERNEL | __GFP_ZERO);
0119 if (!fc->chains)
0120 return -ENOMEM;
0121
0122 fc->fences = kvmalloc_array(count, sizeof(*fc->fences),
0123 GFP_KERNEL | __GFP_ZERO);
0124 if (!fc->fences) {
0125 err = -ENOMEM;
0126 goto err_chains;
0127 }
0128
0129 fc->tail = NULL;
0130 for (i = 0; i < count; i++) {
0131 fc->fences[i] = mock_fence();
0132 if (!fc->fences[i]) {
0133 err = -ENOMEM;
0134 goto unwind;
0135 }
0136
0137 fc->chains[i] = mock_chain(fc->tail,
0138 fc->fences[i],
0139 seqno_fn(i));
0140 if (!fc->chains[i]) {
0141 err = -ENOMEM;
0142 goto unwind;
0143 }
0144
0145 fc->tail = fc->chains[i];
0146 }
0147
0148 fc->chain_length = i;
0149 return 0;
0150
0151 unwind:
0152 for (i = 0; i < count; i++) {
0153 dma_fence_put(fc->fences[i]);
0154 dma_fence_put(fc->chains[i]);
0155 }
0156 kvfree(fc->fences);
0157 err_chains:
0158 kvfree(fc->chains);
0159 return err;
0160 }
0161
0162 static void fence_chains_fini(struct fence_chains *fc)
0163 {
0164 unsigned int i;
0165
0166 for (i = 0; i < fc->chain_length; i++) {
0167 dma_fence_signal(fc->fences[i]);
0168 dma_fence_put(fc->fences[i]);
0169 }
0170 kvfree(fc->fences);
0171
0172 for (i = 0; i < fc->chain_length; i++)
0173 dma_fence_put(fc->chains[i]);
0174 kvfree(fc->chains);
0175 }
0176
0177 static int find_seqno(void *arg)
0178 {
0179 struct fence_chains fc;
0180 struct dma_fence *fence;
0181 int err;
0182 int i;
0183
0184 err = fence_chains_init(&fc, 64, seqno_inc);
0185 if (err)
0186 return err;
0187
0188 fence = dma_fence_get(fc.tail);
0189 err = dma_fence_chain_find_seqno(&fence, 0);
0190 dma_fence_put(fence);
0191 if (err) {
0192 pr_err("Reported %d for find_seqno(0)!\n", err);
0193 goto err;
0194 }
0195
0196 for (i = 0; i < fc.chain_length; i++) {
0197 fence = dma_fence_get(fc.tail);
0198 err = dma_fence_chain_find_seqno(&fence, i + 1);
0199 dma_fence_put(fence);
0200 if (err) {
0201 pr_err("Reported %d for find_seqno(%d:%d)!\n",
0202 err, fc.chain_length + 1, i + 1);
0203 goto err;
0204 }
0205 if (fence != fc.chains[i]) {
0206 pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
0207 fc.chain_length + 1, i + 1);
0208 err = -EINVAL;
0209 goto err;
0210 }
0211
0212 dma_fence_get(fence);
0213 err = dma_fence_chain_find_seqno(&fence, i + 1);
0214 dma_fence_put(fence);
0215 if (err) {
0216 pr_err("Error reported for finding self\n");
0217 goto err;
0218 }
0219 if (fence != fc.chains[i]) {
0220 pr_err("Incorrect fence reported by find self\n");
0221 err = -EINVAL;
0222 goto err;
0223 }
0224
0225 dma_fence_get(fence);
0226 err = dma_fence_chain_find_seqno(&fence, i + 2);
0227 dma_fence_put(fence);
0228 if (!err) {
0229 pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",
0230 i + 1, i + 2);
0231 err = -EINVAL;
0232 goto err;
0233 }
0234
0235 dma_fence_get(fence);
0236 err = dma_fence_chain_find_seqno(&fence, i);
0237 dma_fence_put(fence);
0238 if (err) {
0239 pr_err("Error reported for previous fence!\n");
0240 goto err;
0241 }
0242 if (i > 0 && fence != fc.chains[i - 1]) {
0243 pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
0244 i + 1, i);
0245 err = -EINVAL;
0246 goto err;
0247 }
0248 }
0249
0250 err:
0251 fence_chains_fini(&fc);
0252 return err;
0253 }
0254
0255 static int find_signaled(void *arg)
0256 {
0257 struct fence_chains fc;
0258 struct dma_fence *fence;
0259 int err;
0260
0261 err = fence_chains_init(&fc, 2, seqno_inc);
0262 if (err)
0263 return err;
0264
0265 dma_fence_signal(fc.fences[0]);
0266
0267 fence = dma_fence_get(fc.tail);
0268 err = dma_fence_chain_find_seqno(&fence, 1);
0269 dma_fence_put(fence);
0270 if (err) {
0271 pr_err("Reported %d for find_seqno()!\n", err);
0272 goto err;
0273 }
0274
0275 if (fence && fence != fc.chains[0]) {
0276 pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n",
0277 fence->seqno);
0278
0279 dma_fence_get(fence);
0280 err = dma_fence_chain_find_seqno(&fence, 1);
0281 dma_fence_put(fence);
0282 if (err)
0283 pr_err("Reported %d for finding self!\n", err);
0284
0285 err = -EINVAL;
0286 }
0287
0288 err:
0289 fence_chains_fini(&fc);
0290 return err;
0291 }
0292
0293 static int find_out_of_order(void *arg)
0294 {
0295 struct fence_chains fc;
0296 struct dma_fence *fence;
0297 int err;
0298
0299 err = fence_chains_init(&fc, 3, seqno_inc);
0300 if (err)
0301 return err;
0302
0303 dma_fence_signal(fc.fences[1]);
0304
0305 fence = dma_fence_get(fc.tail);
0306 err = dma_fence_chain_find_seqno(&fence, 2);
0307 dma_fence_put(fence);
0308 if (err) {
0309 pr_err("Reported %d for find_seqno()!\n", err);
0310 goto err;
0311 }
0312
0313
0314
0315
0316
0317
0318
0319
0320 if (fence != fc.chains[0]) {
0321 pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n",
0322 fence ? fence->seqno : 0);
0323
0324 err = -EINVAL;
0325 }
0326
0327 err:
0328 fence_chains_fini(&fc);
0329 return err;
0330 }
0331
0332 static uint64_t seqno_inc2(unsigned int i)
0333 {
0334 return 2 * i + 2;
0335 }
0336
0337 static int find_gap(void *arg)
0338 {
0339 struct fence_chains fc;
0340 struct dma_fence *fence;
0341 int err;
0342 int i;
0343
0344 err = fence_chains_init(&fc, 64, seqno_inc2);
0345 if (err)
0346 return err;
0347
0348 for (i = 0; i < fc.chain_length; i++) {
0349 fence = dma_fence_get(fc.tail);
0350 err = dma_fence_chain_find_seqno(&fence, 2 * i + 1);
0351 dma_fence_put(fence);
0352 if (err) {
0353 pr_err("Reported %d for find_seqno(%d:%d)!\n",
0354 err, fc.chain_length + 1, 2 * i + 1);
0355 goto err;
0356 }
0357 if (fence != fc.chains[i]) {
0358 pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n",
0359 fence->seqno,
0360 fc.chain_length + 1,
0361 2 * i + 1);
0362 err = -EINVAL;
0363 goto err;
0364 }
0365
0366 dma_fence_get(fence);
0367 err = dma_fence_chain_find_seqno(&fence, 2 * i + 2);
0368 dma_fence_put(fence);
0369 if (err) {
0370 pr_err("Error reported for finding self\n");
0371 goto err;
0372 }
0373 if (fence != fc.chains[i]) {
0374 pr_err("Incorrect fence reported by find self\n");
0375 err = -EINVAL;
0376 goto err;
0377 }
0378 }
0379
0380 err:
0381 fence_chains_fini(&fc);
0382 return err;
0383 }
0384
0385 struct find_race {
0386 struct fence_chains fc;
0387 atomic_t children;
0388 };
0389
0390 static int __find_race(void *arg)
0391 {
0392 struct find_race *data = arg;
0393 int err = 0;
0394
0395 while (!kthread_should_stop()) {
0396 struct dma_fence *fence = dma_fence_get(data->fc.tail);
0397 int seqno;
0398
0399 seqno = prandom_u32_max(data->fc.chain_length) + 1;
0400
0401 err = dma_fence_chain_find_seqno(&fence, seqno);
0402 if (err) {
0403 pr_err("Failed to find fence seqno:%d\n",
0404 seqno);
0405 dma_fence_put(fence);
0406 break;
0407 }
0408 if (!fence)
0409 goto signal;
0410
0411
0412
0413
0414
0415 if (fence->seqno == seqno) {
0416 err = dma_fence_chain_find_seqno(&fence, seqno);
0417 if (err) {
0418 pr_err("Reported an invalid fence for find-self:%d\n",
0419 seqno);
0420 dma_fence_put(fence);
0421 break;
0422 }
0423 }
0424
0425 dma_fence_put(fence);
0426
0427 signal:
0428 seqno = prandom_u32_max(data->fc.chain_length - 1);
0429 dma_fence_signal(data->fc.fences[seqno]);
0430 cond_resched();
0431 }
0432
0433 if (atomic_dec_and_test(&data->children))
0434 wake_up_var(&data->children);
0435 return err;
0436 }
0437
0438 static int find_race(void *arg)
0439 {
0440 struct find_race data;
0441 int ncpus = num_online_cpus();
0442 struct task_struct **threads;
0443 unsigned long count;
0444 int err;
0445 int i;
0446
0447 err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc);
0448 if (err)
0449 return err;
0450
0451 threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
0452 if (!threads) {
0453 err = -ENOMEM;
0454 goto err;
0455 }
0456
0457 atomic_set(&data.children, 0);
0458 for (i = 0; i < ncpus; i++) {
0459 threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i);
0460 if (IS_ERR(threads[i])) {
0461 ncpus = i;
0462 break;
0463 }
0464 atomic_inc(&data.children);
0465 get_task_struct(threads[i]);
0466 }
0467
0468 wait_var_event_timeout(&data.children,
0469 !atomic_read(&data.children),
0470 5 * HZ);
0471
0472 for (i = 0; i < ncpus; i++) {
0473 int ret;
0474
0475 ret = kthread_stop(threads[i]);
0476 if (ret && !err)
0477 err = ret;
0478 put_task_struct(threads[i]);
0479 }
0480 kfree(threads);
0481
0482 count = 0;
0483 for (i = 0; i < data.fc.chain_length; i++)
0484 if (dma_fence_is_signaled(data.fc.fences[i]))
0485 count++;
0486 pr_info("Completed %lu cycles\n", count);
0487
0488 err:
0489 fence_chains_fini(&data.fc);
0490 return err;
0491 }
0492
0493 static int signal_forward(void *arg)
0494 {
0495 struct fence_chains fc;
0496 int err;
0497 int i;
0498
0499 err = fence_chains_init(&fc, 64, seqno_inc);
0500 if (err)
0501 return err;
0502
0503 for (i = 0; i < fc.chain_length; i++) {
0504 dma_fence_signal(fc.fences[i]);
0505
0506 if (!dma_fence_is_signaled(fc.chains[i])) {
0507 pr_err("chain[%d] not signaled!\n", i);
0508 err = -EINVAL;
0509 goto err;
0510 }
0511
0512 if (i + 1 < fc.chain_length &&
0513 dma_fence_is_signaled(fc.chains[i + 1])) {
0514 pr_err("chain[%d] is signaled!\n", i);
0515 err = -EINVAL;
0516 goto err;
0517 }
0518 }
0519
0520 err:
0521 fence_chains_fini(&fc);
0522 return err;
0523 }
0524
0525 static int signal_backward(void *arg)
0526 {
0527 struct fence_chains fc;
0528 int err;
0529 int i;
0530
0531 err = fence_chains_init(&fc, 64, seqno_inc);
0532 if (err)
0533 return err;
0534
0535 for (i = fc.chain_length; i--; ) {
0536 dma_fence_signal(fc.fences[i]);
0537
0538 if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
0539 pr_err("chain[%d] is signaled!\n", i);
0540 err = -EINVAL;
0541 goto err;
0542 }
0543 }
0544
0545 for (i = 0; i < fc.chain_length; i++) {
0546 if (!dma_fence_is_signaled(fc.chains[i])) {
0547 pr_err("chain[%d] was not signaled!\n", i);
0548 err = -EINVAL;
0549 goto err;
0550 }
0551 }
0552
0553 err:
0554 fence_chains_fini(&fc);
0555 return err;
0556 }
0557
0558 static int __wait_fence_chains(void *arg)
0559 {
0560 struct fence_chains *fc = arg;
0561
0562 if (dma_fence_wait(fc->tail, false))
0563 return -EIO;
0564
0565 return 0;
0566 }
0567
0568 static int wait_forward(void *arg)
0569 {
0570 struct fence_chains fc;
0571 struct task_struct *tsk;
0572 int err;
0573 int i;
0574
0575 err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
0576 if (err)
0577 return err;
0578
0579 tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
0580 if (IS_ERR(tsk)) {
0581 err = PTR_ERR(tsk);
0582 goto err;
0583 }
0584 get_task_struct(tsk);
0585 yield_to(tsk, true);
0586
0587 for (i = 0; i < fc.chain_length; i++)
0588 dma_fence_signal(fc.fences[i]);
0589
0590 err = kthread_stop(tsk);
0591 put_task_struct(tsk);
0592
0593 err:
0594 fence_chains_fini(&fc);
0595 return err;
0596 }
0597
0598 static int wait_backward(void *arg)
0599 {
0600 struct fence_chains fc;
0601 struct task_struct *tsk;
0602 int err;
0603 int i;
0604
0605 err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
0606 if (err)
0607 return err;
0608
0609 tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
0610 if (IS_ERR(tsk)) {
0611 err = PTR_ERR(tsk);
0612 goto err;
0613 }
0614 get_task_struct(tsk);
0615 yield_to(tsk, true);
0616
0617 for (i = fc.chain_length; i--; )
0618 dma_fence_signal(fc.fences[i]);
0619
0620 err = kthread_stop(tsk);
0621 put_task_struct(tsk);
0622
0623 err:
0624 fence_chains_fini(&fc);
0625 return err;
0626 }
0627
0628 static void randomise_fences(struct fence_chains *fc)
0629 {
0630 unsigned int count = fc->chain_length;
0631
0632
0633 while (--count) {
0634 unsigned int swp;
0635
0636 swp = prandom_u32_max(count + 1);
0637 if (swp == count)
0638 continue;
0639
0640 swap(fc->fences[count], fc->fences[swp]);
0641 }
0642 }
0643
0644 static int wait_random(void *arg)
0645 {
0646 struct fence_chains fc;
0647 struct task_struct *tsk;
0648 int err;
0649 int i;
0650
0651 err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
0652 if (err)
0653 return err;
0654
0655 randomise_fences(&fc);
0656
0657 tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
0658 if (IS_ERR(tsk)) {
0659 err = PTR_ERR(tsk);
0660 goto err;
0661 }
0662 get_task_struct(tsk);
0663 yield_to(tsk, true);
0664
0665 for (i = 0; i < fc.chain_length; i++)
0666 dma_fence_signal(fc.fences[i]);
0667
0668 err = kthread_stop(tsk);
0669 put_task_struct(tsk);
0670
0671 err:
0672 fence_chains_fini(&fc);
0673 return err;
0674 }
0675
0676 int dma_fence_chain(void)
0677 {
0678 static const struct subtest tests[] = {
0679 SUBTEST(sanitycheck),
0680 SUBTEST(find_seqno),
0681 SUBTEST(find_signaled),
0682 SUBTEST(find_out_of_order),
0683 SUBTEST(find_gap),
0684 SUBTEST(find_race),
0685 SUBTEST(signal_forward),
0686 SUBTEST(signal_backward),
0687 SUBTEST(wait_forward),
0688 SUBTEST(wait_backward),
0689 SUBTEST(wait_random),
0690 };
0691 int ret;
0692
0693 pr_info("sizeof(dma_fence_chain)=%zu\n",
0694 sizeof(struct dma_fence_chain));
0695
0696 slab_fences = KMEM_CACHE(mock_fence,
0697 SLAB_TYPESAFE_BY_RCU |
0698 SLAB_HWCACHE_ALIGN);
0699 if (!slab_fences)
0700 return -ENOMEM;
0701
0702 ret = subtests(tests, NULL);
0703
0704 kmem_cache_destroy(slab_fences);
0705 return ret;
0706 }