0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/completion.h>
0026 #include <linux/delay.h>
0027 #include <linux/prime_numbers.h>
0028
0029 #include "../i915_selftest.h"
0030
0031 static int
0032 fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
0033 {
0034 switch (state) {
0035 case FENCE_COMPLETE:
0036 break;
0037
0038 case FENCE_FREE:
0039
0040 break;
0041 }
0042
0043 return NOTIFY_DONE;
0044 }
0045
0046 static struct i915_sw_fence *alloc_fence(void)
0047 {
0048 struct i915_sw_fence *fence;
0049
0050 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
0051 if (!fence)
0052 return NULL;
0053
0054 i915_sw_fence_init(fence, fence_notify);
0055 return fence;
0056 }
0057
0058 static void free_fence(struct i915_sw_fence *fence)
0059 {
0060 i915_sw_fence_fini(fence);
0061 kfree(fence);
0062 }
0063
0064 static int __test_self(struct i915_sw_fence *fence)
0065 {
0066 if (i915_sw_fence_done(fence))
0067 return -EINVAL;
0068
0069 i915_sw_fence_commit(fence);
0070 if (!i915_sw_fence_done(fence))
0071 return -EINVAL;
0072
0073 i915_sw_fence_wait(fence);
0074 if (!i915_sw_fence_done(fence))
0075 return -EINVAL;
0076
0077 return 0;
0078 }
0079
0080 static int test_self(void *arg)
0081 {
0082 struct i915_sw_fence *fence;
0083 int ret;
0084
0085
0086 fence = alloc_fence();
0087 if (!fence)
0088 return -ENOMEM;
0089
0090 ret = __test_self(fence);
0091
0092 free_fence(fence);
0093 return ret;
0094 }
0095
0096 static int test_dag(void *arg)
0097 {
0098 struct i915_sw_fence *A, *B, *C;
0099 int ret = -EINVAL;
0100
0101
0102 if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
0103 return 0;
0104
0105 A = alloc_fence();
0106 if (!A)
0107 return -ENOMEM;
0108
0109 if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
0110 pr_err("recursive cycle not detected (AA)\n");
0111 goto err_A;
0112 }
0113
0114 B = alloc_fence();
0115 if (!B) {
0116 ret = -ENOMEM;
0117 goto err_A;
0118 }
0119
0120 i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
0121 if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
0122 pr_err("single depth cycle not detected (BAB)\n");
0123 goto err_B;
0124 }
0125
0126 C = alloc_fence();
0127 if (!C) {
0128 ret = -ENOMEM;
0129 goto err_B;
0130 }
0131
0132 if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
0133 pr_err("invalid cycle detected\n");
0134 goto err_C;
0135 }
0136 if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
0137 pr_err("single depth cycle not detected (CBC)\n");
0138 goto err_C;
0139 }
0140 if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
0141 pr_err("cycle not detected (BA, CB, AC)\n");
0142 goto err_C;
0143 }
0144 if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
0145 pr_err("invalid cycle detected\n");
0146 goto err_C;
0147 }
0148
0149 i915_sw_fence_commit(A);
0150 i915_sw_fence_commit(B);
0151 i915_sw_fence_commit(C);
0152
0153 ret = 0;
0154 if (!i915_sw_fence_done(C)) {
0155 pr_err("fence C not done\n");
0156 ret = -EINVAL;
0157 }
0158 if (!i915_sw_fence_done(B)) {
0159 pr_err("fence B not done\n");
0160 ret = -EINVAL;
0161 }
0162 if (!i915_sw_fence_done(A)) {
0163 pr_err("fence A not done\n");
0164 ret = -EINVAL;
0165 }
0166 err_C:
0167 free_fence(C);
0168 err_B:
0169 free_fence(B);
0170 err_A:
0171 free_fence(A);
0172 return ret;
0173 }
0174
0175 static int test_AB(void *arg)
0176 {
0177 struct i915_sw_fence *A, *B;
0178 int ret;
0179
0180
0181 A = alloc_fence();
0182 if (!A)
0183 return -ENOMEM;
0184 B = alloc_fence();
0185 if (!B) {
0186 ret = -ENOMEM;
0187 goto err_A;
0188 }
0189
0190 ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
0191 if (ret < 0)
0192 goto err_B;
0193 if (ret == 0) {
0194 pr_err("Incorrectly reported fence A was complete before await\n");
0195 ret = -EINVAL;
0196 goto err_B;
0197 }
0198
0199 ret = -EINVAL;
0200 i915_sw_fence_commit(A);
0201 if (i915_sw_fence_done(A))
0202 goto err_B;
0203
0204 i915_sw_fence_commit(B);
0205 if (!i915_sw_fence_done(B)) {
0206 pr_err("Fence B is not done\n");
0207 goto err_B;
0208 }
0209
0210 if (!i915_sw_fence_done(A)) {
0211 pr_err("Fence A is not done\n");
0212 goto err_B;
0213 }
0214
0215 ret = 0;
0216 err_B:
0217 free_fence(B);
0218 err_A:
0219 free_fence(A);
0220 return ret;
0221 }
0222
0223 static int test_ABC(void *arg)
0224 {
0225 struct i915_sw_fence *A, *B, *C;
0226 int ret;
0227
0228
0229 A = alloc_fence();
0230 if (!A)
0231 return -ENOMEM;
0232
0233 B = alloc_fence();
0234 if (!B) {
0235 ret = -ENOMEM;
0236 goto err_A;
0237 }
0238
0239 C = alloc_fence();
0240 if (!C) {
0241 ret = -ENOMEM;
0242 goto err_B;
0243 }
0244
0245 ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
0246 if (ret < 0)
0247 goto err_C;
0248 if (ret == 0) {
0249 pr_err("Incorrectly reported fence B was complete before await\n");
0250 goto err_C;
0251 }
0252
0253 ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
0254 if (ret < 0)
0255 goto err_C;
0256 if (ret == 0) {
0257 pr_err("Incorrectly reported fence C was complete before await\n");
0258 goto err_C;
0259 }
0260
0261 ret = -EINVAL;
0262 i915_sw_fence_commit(A);
0263 if (i915_sw_fence_done(A)) {
0264 pr_err("Fence A completed early\n");
0265 goto err_C;
0266 }
0267
0268 i915_sw_fence_commit(B);
0269 if (i915_sw_fence_done(B)) {
0270 pr_err("Fence B completed early\n");
0271 goto err_C;
0272 }
0273
0274 if (i915_sw_fence_done(A)) {
0275 pr_err("Fence A completed early (after signaling B)\n");
0276 goto err_C;
0277 }
0278
0279 i915_sw_fence_commit(C);
0280
0281 ret = 0;
0282 if (!i915_sw_fence_done(C)) {
0283 pr_err("Fence C not done\n");
0284 ret = -EINVAL;
0285 }
0286 if (!i915_sw_fence_done(B)) {
0287 pr_err("Fence B not done\n");
0288 ret = -EINVAL;
0289 }
0290 if (!i915_sw_fence_done(A)) {
0291 pr_err("Fence A not done\n");
0292 ret = -EINVAL;
0293 }
0294 err_C:
0295 free_fence(C);
0296 err_B:
0297 free_fence(B);
0298 err_A:
0299 free_fence(A);
0300 return ret;
0301 }
0302
0303 static int test_AB_C(void *arg)
0304 {
0305 struct i915_sw_fence *A, *B, *C;
0306 int ret = -EINVAL;
0307
0308
0309 A = alloc_fence();
0310 if (!A)
0311 return -ENOMEM;
0312
0313 B = alloc_fence();
0314 if (!B) {
0315 ret = -ENOMEM;
0316 goto err_A;
0317 }
0318
0319 C = alloc_fence();
0320 if (!C) {
0321 ret = -ENOMEM;
0322 goto err_B;
0323 }
0324
0325 ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
0326 if (ret < 0)
0327 goto err_C;
0328 if (ret == 0) {
0329 ret = -EINVAL;
0330 goto err_C;
0331 }
0332
0333 ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
0334 if (ret < 0)
0335 goto err_C;
0336 if (ret == 0) {
0337 ret = -EINVAL;
0338 goto err_C;
0339 }
0340
0341 i915_sw_fence_commit(A);
0342 i915_sw_fence_commit(B);
0343
0344 ret = 0;
0345 if (i915_sw_fence_done(A)) {
0346 pr_err("Fence A completed early\n");
0347 ret = -EINVAL;
0348 }
0349
0350 if (i915_sw_fence_done(B)) {
0351 pr_err("Fence B completed early\n");
0352 ret = -EINVAL;
0353 }
0354
0355 i915_sw_fence_commit(C);
0356 if (!i915_sw_fence_done(C)) {
0357 pr_err("Fence C not done\n");
0358 ret = -EINVAL;
0359 }
0360
0361 if (!i915_sw_fence_done(B)) {
0362 pr_err("Fence B not done\n");
0363 ret = -EINVAL;
0364 }
0365
0366 if (!i915_sw_fence_done(A)) {
0367 pr_err("Fence A not done\n");
0368 ret = -EINVAL;
0369 }
0370
0371 err_C:
0372 free_fence(C);
0373 err_B:
0374 free_fence(B);
0375 err_A:
0376 free_fence(A);
0377 return ret;
0378 }
0379
0380 static int test_C_AB(void *arg)
0381 {
0382 struct i915_sw_fence *A, *B, *C;
0383 int ret;
0384
0385
0386 A = alloc_fence();
0387 if (!A)
0388 return -ENOMEM;
0389
0390 B = alloc_fence();
0391 if (!B) {
0392 ret = -ENOMEM;
0393 goto err_A;
0394 }
0395
0396 C = alloc_fence();
0397 if (!C) {
0398 ret = -ENOMEM;
0399 goto err_B;
0400 }
0401
0402 ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
0403 if (ret < 0)
0404 goto err_C;
0405 if (ret == 0) {
0406 ret = -EINVAL;
0407 goto err_C;
0408 }
0409
0410 ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
0411 if (ret < 0)
0412 goto err_C;
0413 if (ret == 0) {
0414 ret = -EINVAL;
0415 goto err_C;
0416 }
0417
0418 ret = 0;
0419 i915_sw_fence_commit(C);
0420 if (i915_sw_fence_done(C))
0421 ret = -EINVAL;
0422
0423 i915_sw_fence_commit(A);
0424 i915_sw_fence_commit(B);
0425
0426 if (!i915_sw_fence_done(A)) {
0427 pr_err("Fence A not done\n");
0428 ret = -EINVAL;
0429 }
0430
0431 if (!i915_sw_fence_done(B)) {
0432 pr_err("Fence B not done\n");
0433 ret = -EINVAL;
0434 }
0435
0436 if (!i915_sw_fence_done(C)) {
0437 pr_err("Fence C not done\n");
0438 ret = -EINVAL;
0439 }
0440
0441 err_C:
0442 free_fence(C);
0443 err_B:
0444 free_fence(B);
0445 err_A:
0446 free_fence(A);
0447 return ret;
0448 }
0449
0450 static int test_chain(void *arg)
0451 {
0452 int nfences = 4096;
0453 struct i915_sw_fence **fences;
0454 int ret, i;
0455
0456
0457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
0458 if (!fences)
0459 return -ENOMEM;
0460
0461 for (i = 0; i < nfences; i++) {
0462 fences[i] = alloc_fence();
0463 if (!fences[i]) {
0464 nfences = i;
0465 ret = -ENOMEM;
0466 goto err;
0467 }
0468
0469 if (i > 0) {
0470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
0471 fences[i - 1],
0472 GFP_KERNEL);
0473 if (ret < 0) {
0474 nfences = i + 1;
0475 goto err;
0476 }
0477
0478 i915_sw_fence_commit(fences[i]);
0479 }
0480 }
0481
0482 ret = 0;
0483 for (i = nfences; --i; ) {
0484 if (i915_sw_fence_done(fences[i])) {
0485 if (ret == 0)
0486 pr_err("Fence[%d] completed early\n", i);
0487 ret = -EINVAL;
0488 }
0489 }
0490 i915_sw_fence_commit(fences[0]);
0491 for (i = 0; ret == 0 && i < nfences; i++) {
0492 if (!i915_sw_fence_done(fences[i])) {
0493 pr_err("Fence[%d] is not done\n", i);
0494 ret = -EINVAL;
0495 }
0496 }
0497
0498 err:
0499 for (i = 0; i < nfences; i++)
0500 free_fence(fences[i]);
0501 kfree(fences);
0502 return ret;
0503 }
0504
0505 struct task_ipc {
0506 struct work_struct work;
0507 struct completion started;
0508 struct i915_sw_fence *in, *out;
0509 int value;
0510 };
0511
0512 static void task_ipc(struct work_struct *work)
0513 {
0514 struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
0515
0516 complete(&ipc->started);
0517
0518 i915_sw_fence_wait(ipc->in);
0519 smp_store_mb(ipc->value, 1);
0520 i915_sw_fence_commit(ipc->out);
0521 }
0522
0523 static int test_ipc(void *arg)
0524 {
0525 struct task_ipc ipc;
0526 int ret = 0;
0527
0528
0529 ipc.in = alloc_fence();
0530 if (!ipc.in)
0531 return -ENOMEM;
0532 ipc.out = alloc_fence();
0533 if (!ipc.out) {
0534 ret = -ENOMEM;
0535 goto err_in;
0536 }
0537
0538
0539 init_completion(&ipc.started);
0540
0541 ipc.value = 0;
0542 INIT_WORK_ONSTACK(&ipc.work, task_ipc);
0543 schedule_work(&ipc.work);
0544
0545 wait_for_completion(&ipc.started);
0546
0547 usleep_range(1000, 2000);
0548 if (READ_ONCE(ipc.value)) {
0549 pr_err("worker updated value before i915_sw_fence was signaled\n");
0550 ret = -EINVAL;
0551 }
0552
0553 i915_sw_fence_commit(ipc.in);
0554 i915_sw_fence_wait(ipc.out);
0555
0556 if (!READ_ONCE(ipc.value)) {
0557 pr_err("worker signaled i915_sw_fence before value was posted\n");
0558 ret = -EINVAL;
0559 }
0560
0561 flush_work(&ipc.work);
0562 destroy_work_on_stack(&ipc.work);
0563 free_fence(ipc.out);
0564 err_in:
0565 free_fence(ipc.in);
0566 return ret;
0567 }
0568
0569 static int test_timer(void *arg)
0570 {
0571 unsigned long target, delay;
0572 struct timed_fence tf;
0573
0574 preempt_disable();
0575 timed_fence_init(&tf, target = jiffies);
0576 if (!i915_sw_fence_done(&tf.fence)) {
0577 pr_err("Fence with immediate expiration not signaled\n");
0578 goto err;
0579 }
0580 preempt_enable();
0581 timed_fence_fini(&tf);
0582
0583 for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
0584 preempt_disable();
0585 timed_fence_init(&tf, target = jiffies + delay);
0586 if (i915_sw_fence_done(&tf.fence)) {
0587 pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
0588 goto err;
0589 }
0590 preempt_enable();
0591
0592 i915_sw_fence_wait(&tf.fence);
0593
0594 preempt_disable();
0595 if (!i915_sw_fence_done(&tf.fence)) {
0596 pr_err("Fence not signaled after wait\n");
0597 goto err;
0598 }
0599 if (time_before(jiffies, target)) {
0600 pr_err("Fence signaled too early, target=%lu, now=%lu\n",
0601 target, jiffies);
0602 goto err;
0603 }
0604 preempt_enable();
0605 timed_fence_fini(&tf);
0606 }
0607
0608 return 0;
0609
0610 err:
0611 preempt_enable();
0612 timed_fence_fini(&tf);
0613 return -EINVAL;
0614 }
0615
0616 static const char *mock_name(struct dma_fence *fence)
0617 {
0618 return "mock";
0619 }
0620
0621 static const struct dma_fence_ops mock_fence_ops = {
0622 .get_driver_name = mock_name,
0623 .get_timeline_name = mock_name,
0624 };
0625
0626 static DEFINE_SPINLOCK(mock_fence_lock);
0627
0628 static struct dma_fence *alloc_dma_fence(void)
0629 {
0630 struct dma_fence *dma;
0631
0632 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
0633 if (dma)
0634 dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
0635
0636 return dma;
0637 }
0638
0639 static struct i915_sw_fence *
0640 wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
0641 {
0642 struct i915_sw_fence *fence;
0643 int err;
0644
0645 fence = alloc_fence();
0646 if (!fence)
0647 return ERR_PTR(-ENOMEM);
0648
0649 err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
0650 i915_sw_fence_commit(fence);
0651 if (err < 0) {
0652 free_fence(fence);
0653 return ERR_PTR(err);
0654 }
0655
0656 return fence;
0657 }
0658
0659 static int test_dma_fence(void *arg)
0660 {
0661 struct i915_sw_fence *timeout = NULL, *not = NULL;
0662 unsigned long delay = i915_selftest.timeout_jiffies;
0663 unsigned long end, sleep;
0664 struct dma_fence *dma;
0665 int err;
0666
0667 dma = alloc_dma_fence();
0668 if (!dma)
0669 return -ENOMEM;
0670
0671 timeout = wrap_dma_fence(dma, delay);
0672 if (IS_ERR(timeout)) {
0673 err = PTR_ERR(timeout);
0674 goto err;
0675 }
0676
0677 not = wrap_dma_fence(dma, 0);
0678 if (IS_ERR(not)) {
0679 err = PTR_ERR(not);
0680 goto err;
0681 }
0682
0683 err = -EINVAL;
0684 if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
0685 pr_err("Fences immediately signaled\n");
0686 goto err;
0687 }
0688
0689
0690 end = round_jiffies_up(jiffies + delay);
0691
0692 sleep = jiffies_to_usecs(delay) / 3;
0693 usleep_range(sleep, 2 * sleep);
0694 if (time_after(jiffies, end)) {
0695 pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
0696 delay, end, jiffies);
0697 goto skip;
0698 }
0699
0700 if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
0701 pr_err("Fences signaled too early\n");
0702 goto err;
0703 }
0704
0705 if (!wait_event_timeout(timeout->wait,
0706 i915_sw_fence_done(timeout),
0707 2 * (end - jiffies) + 1)) {
0708 pr_err("Timeout fence unsignaled!\n");
0709 goto err;
0710 }
0711
0712 if (i915_sw_fence_done(not)) {
0713 pr_err("No timeout fence signaled!\n");
0714 goto err;
0715 }
0716
0717 skip:
0718 dma_fence_signal(dma);
0719
0720 if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
0721 pr_err("Fences unsignaled\n");
0722 goto err;
0723 }
0724
0725 free_fence(not);
0726 free_fence(timeout);
0727 dma_fence_put(dma);
0728
0729 return 0;
0730
0731 err:
0732 dma_fence_signal(dma);
0733 if (!IS_ERR_OR_NULL(timeout))
0734 free_fence(timeout);
0735 if (!IS_ERR_OR_NULL(not))
0736 free_fence(not);
0737 dma_fence_put(dma);
0738 return err;
0739 }
0740
0741 int i915_sw_fence_mock_selftests(void)
0742 {
0743 static const struct i915_subtest tests[] = {
0744 SUBTEST(test_self),
0745 SUBTEST(test_dag),
0746 SUBTEST(test_AB),
0747 SUBTEST(test_ABC),
0748 SUBTEST(test_AB_C),
0749 SUBTEST(test_C_AB),
0750 SUBTEST(test_chain),
0751 SUBTEST(test_ipc),
0752 SUBTEST(test_timer),
0753 SUBTEST(test_dma_fence),
0754 };
0755
0756 return i915_subtests(tests, NULL);
0757 }