Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Module-based API test facility for ww_mutexes
0004  */
0005 
0006 #include <linux/kernel.h>
0007 
0008 #include <linux/completion.h>
0009 #include <linux/delay.h>
0010 #include <linux/kthread.h>
0011 #include <linux/module.h>
0012 #include <linux/random.h>
0013 #include <linux/slab.h>
0014 #include <linux/ww_mutex.h>
0015 
0016 static DEFINE_WD_CLASS(ww_class);
0017 struct workqueue_struct *wq;
0018 
0019 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
0020 #define ww_acquire_init_noinject(a, b) do { \
0021         ww_acquire_init((a), (b)); \
0022         (a)->deadlock_inject_countdown = ~0U; \
0023     } while (0)
0024 #else
0025 #define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
0026 #endif
0027 
0028 struct test_mutex {
0029     struct work_struct work;
0030     struct ww_mutex mutex;
0031     struct completion ready, go, done;
0032     unsigned int flags;
0033 };
0034 
0035 #define TEST_MTX_SPIN BIT(0)
0036 #define TEST_MTX_TRY BIT(1)
0037 #define TEST_MTX_CTX BIT(2)
0038 #define __TEST_MTX_LAST BIT(3)
0039 
0040 static void test_mutex_work(struct work_struct *work)
0041 {
0042     struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
0043 
0044     complete(&mtx->ready);
0045     wait_for_completion(&mtx->go);
0046 
0047     if (mtx->flags & TEST_MTX_TRY) {
0048         while (!ww_mutex_trylock(&mtx->mutex, NULL))
0049             cond_resched();
0050     } else {
0051         ww_mutex_lock(&mtx->mutex, NULL);
0052     }
0053     complete(&mtx->done);
0054     ww_mutex_unlock(&mtx->mutex);
0055 }
0056 
0057 static int __test_mutex(unsigned int flags)
0058 {
0059 #define TIMEOUT (HZ / 16)
0060     struct test_mutex mtx;
0061     struct ww_acquire_ctx ctx;
0062     int ret;
0063 
0064     ww_mutex_init(&mtx.mutex, &ww_class);
0065     ww_acquire_init(&ctx, &ww_class);
0066 
0067     INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
0068     init_completion(&mtx.ready);
0069     init_completion(&mtx.go);
0070     init_completion(&mtx.done);
0071     mtx.flags = flags;
0072 
0073     schedule_work(&mtx.work);
0074 
0075     wait_for_completion(&mtx.ready);
0076     ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
0077     complete(&mtx.go);
0078     if (flags & TEST_MTX_SPIN) {
0079         unsigned long timeout = jiffies + TIMEOUT;
0080 
0081         ret = 0;
0082         do {
0083             if (completion_done(&mtx.done)) {
0084                 ret = -EINVAL;
0085                 break;
0086             }
0087             cond_resched();
0088         } while (time_before(jiffies, timeout));
0089     } else {
0090         ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
0091     }
0092     ww_mutex_unlock(&mtx.mutex);
0093     ww_acquire_fini(&ctx);
0094 
0095     if (ret) {
0096         pr_err("%s(flags=%x): mutual exclusion failure\n",
0097                __func__, flags);
0098         ret = -EINVAL;
0099     }
0100 
0101     flush_work(&mtx.work);
0102     destroy_work_on_stack(&mtx.work);
0103     return ret;
0104 #undef TIMEOUT
0105 }
0106 
0107 static int test_mutex(void)
0108 {
0109     int ret;
0110     int i;
0111 
0112     for (i = 0; i < __TEST_MTX_LAST; i++) {
0113         ret = __test_mutex(i);
0114         if (ret)
0115             return ret;
0116     }
0117 
0118     return 0;
0119 }
0120 
0121 static int test_aa(bool trylock)
0122 {
0123     struct ww_mutex mutex;
0124     struct ww_acquire_ctx ctx;
0125     int ret;
0126     const char *from = trylock ? "trylock" : "lock";
0127 
0128     ww_mutex_init(&mutex, &ww_class);
0129     ww_acquire_init(&ctx, &ww_class);
0130 
0131     if (!trylock) {
0132         ret = ww_mutex_lock(&mutex, &ctx);
0133         if (ret) {
0134             pr_err("%s: initial lock failed!\n", __func__);
0135             goto out;
0136         }
0137     } else {
0138         ret = !ww_mutex_trylock(&mutex, &ctx);
0139         if (ret) {
0140             pr_err("%s: initial trylock failed!\n", __func__);
0141             goto out;
0142         }
0143     }
0144 
0145     if (ww_mutex_trylock(&mutex, NULL))  {
0146         pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
0147         ww_mutex_unlock(&mutex);
0148         ret = -EINVAL;
0149         goto out;
0150     }
0151 
0152     if (ww_mutex_trylock(&mutex, &ctx))  {
0153         pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
0154         ww_mutex_unlock(&mutex);
0155         ret = -EINVAL;
0156         goto out;
0157     }
0158 
0159     ret = ww_mutex_lock(&mutex, &ctx);
0160     if (ret != -EALREADY) {
0161         pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
0162                __func__, ret, from);
0163         if (!ret)
0164             ww_mutex_unlock(&mutex);
0165         ret = -EINVAL;
0166         goto out;
0167     }
0168 
0169     ww_mutex_unlock(&mutex);
0170     ret = 0;
0171 out:
0172     ww_acquire_fini(&ctx);
0173     return ret;
0174 }
0175 
0176 struct test_abba {
0177     struct work_struct work;
0178     struct ww_mutex a_mutex;
0179     struct ww_mutex b_mutex;
0180     struct completion a_ready;
0181     struct completion b_ready;
0182     bool resolve, trylock;
0183     int result;
0184 };
0185 
0186 static void test_abba_work(struct work_struct *work)
0187 {
0188     struct test_abba *abba = container_of(work, typeof(*abba), work);
0189     struct ww_acquire_ctx ctx;
0190     int err;
0191 
0192     ww_acquire_init_noinject(&ctx, &ww_class);
0193     if (!abba->trylock)
0194         ww_mutex_lock(&abba->b_mutex, &ctx);
0195     else
0196         WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
0197 
0198     WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
0199 
0200     complete(&abba->b_ready);
0201     wait_for_completion(&abba->a_ready);
0202 
0203     err = ww_mutex_lock(&abba->a_mutex, &ctx);
0204     if (abba->resolve && err == -EDEADLK) {
0205         ww_mutex_unlock(&abba->b_mutex);
0206         ww_mutex_lock_slow(&abba->a_mutex, &ctx);
0207         err = ww_mutex_lock(&abba->b_mutex, &ctx);
0208     }
0209 
0210     if (!err)
0211         ww_mutex_unlock(&abba->a_mutex);
0212     ww_mutex_unlock(&abba->b_mutex);
0213     ww_acquire_fini(&ctx);
0214 
0215     abba->result = err;
0216 }
0217 
0218 static int test_abba(bool trylock, bool resolve)
0219 {
0220     struct test_abba abba;
0221     struct ww_acquire_ctx ctx;
0222     int err, ret;
0223 
0224     ww_mutex_init(&abba.a_mutex, &ww_class);
0225     ww_mutex_init(&abba.b_mutex, &ww_class);
0226     INIT_WORK_ONSTACK(&abba.work, test_abba_work);
0227     init_completion(&abba.a_ready);
0228     init_completion(&abba.b_ready);
0229     abba.trylock = trylock;
0230     abba.resolve = resolve;
0231 
0232     schedule_work(&abba.work);
0233 
0234     ww_acquire_init_noinject(&ctx, &ww_class);
0235     if (!trylock)
0236         ww_mutex_lock(&abba.a_mutex, &ctx);
0237     else
0238         WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
0239 
0240     WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
0241 
0242     complete(&abba.a_ready);
0243     wait_for_completion(&abba.b_ready);
0244 
0245     err = ww_mutex_lock(&abba.b_mutex, &ctx);
0246     if (resolve && err == -EDEADLK) {
0247         ww_mutex_unlock(&abba.a_mutex);
0248         ww_mutex_lock_slow(&abba.b_mutex, &ctx);
0249         err = ww_mutex_lock(&abba.a_mutex, &ctx);
0250     }
0251 
0252     if (!err)
0253         ww_mutex_unlock(&abba.b_mutex);
0254     ww_mutex_unlock(&abba.a_mutex);
0255     ww_acquire_fini(&ctx);
0256 
0257     flush_work(&abba.work);
0258     destroy_work_on_stack(&abba.work);
0259 
0260     ret = 0;
0261     if (resolve) {
0262         if (err || abba.result) {
0263             pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
0264                    __func__, err, abba.result);
0265             ret = -EINVAL;
0266         }
0267     } else {
0268         if (err != -EDEADLK && abba.result != -EDEADLK) {
0269             pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
0270                    __func__, err, abba.result);
0271             ret = -EINVAL;
0272         }
0273     }
0274     return ret;
0275 }
0276 
0277 struct test_cycle {
0278     struct work_struct work;
0279     struct ww_mutex a_mutex;
0280     struct ww_mutex *b_mutex;
0281     struct completion *a_signal;
0282     struct completion b_signal;
0283     int result;
0284 };
0285 
0286 static void test_cycle_work(struct work_struct *work)
0287 {
0288     struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
0289     struct ww_acquire_ctx ctx;
0290     int err, erra = 0;
0291 
0292     ww_acquire_init_noinject(&ctx, &ww_class);
0293     ww_mutex_lock(&cycle->a_mutex, &ctx);
0294 
0295     complete(cycle->a_signal);
0296     wait_for_completion(&cycle->b_signal);
0297 
0298     err = ww_mutex_lock(cycle->b_mutex, &ctx);
0299     if (err == -EDEADLK) {
0300         err = 0;
0301         ww_mutex_unlock(&cycle->a_mutex);
0302         ww_mutex_lock_slow(cycle->b_mutex, &ctx);
0303         erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
0304     }
0305 
0306     if (!err)
0307         ww_mutex_unlock(cycle->b_mutex);
0308     if (!erra)
0309         ww_mutex_unlock(&cycle->a_mutex);
0310     ww_acquire_fini(&ctx);
0311 
0312     cycle->result = err ?: erra;
0313 }
0314 
0315 static int __test_cycle(unsigned int nthreads)
0316 {
0317     struct test_cycle *cycles;
0318     unsigned int n, last = nthreads - 1;
0319     int ret;
0320 
0321     cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
0322     if (!cycles)
0323         return -ENOMEM;
0324 
0325     for (n = 0; n < nthreads; n++) {
0326         struct test_cycle *cycle = &cycles[n];
0327 
0328         ww_mutex_init(&cycle->a_mutex, &ww_class);
0329         if (n == last)
0330             cycle->b_mutex = &cycles[0].a_mutex;
0331         else
0332             cycle->b_mutex = &cycles[n + 1].a_mutex;
0333 
0334         if (n == 0)
0335             cycle->a_signal = &cycles[last].b_signal;
0336         else
0337             cycle->a_signal = &cycles[n - 1].b_signal;
0338         init_completion(&cycle->b_signal);
0339 
0340         INIT_WORK(&cycle->work, test_cycle_work);
0341         cycle->result = 0;
0342     }
0343 
0344     for (n = 0; n < nthreads; n++)
0345         queue_work(wq, &cycles[n].work);
0346 
0347     flush_workqueue(wq);
0348 
0349     ret = 0;
0350     for (n = 0; n < nthreads; n++) {
0351         struct test_cycle *cycle = &cycles[n];
0352 
0353         if (!cycle->result)
0354             continue;
0355 
0356         pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
0357                n, nthreads, cycle->result);
0358         ret = -EINVAL;
0359         break;
0360     }
0361 
0362     for (n = 0; n < nthreads; n++)
0363         ww_mutex_destroy(&cycles[n].a_mutex);
0364     kfree(cycles);
0365     return ret;
0366 }
0367 
0368 static int test_cycle(unsigned int ncpus)
0369 {
0370     unsigned int n;
0371     int ret;
0372 
0373     for (n = 2; n <= ncpus + 1; n++) {
0374         ret = __test_cycle(n);
0375         if (ret)
0376             return ret;
0377     }
0378 
0379     return 0;
0380 }
0381 
0382 struct stress {
0383     struct work_struct work;
0384     struct ww_mutex *locks;
0385     unsigned long timeout;
0386     int nlocks;
0387 };
0388 
0389 static int *get_random_order(int count)
0390 {
0391     int *order;
0392     int n, r, tmp;
0393 
0394     order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
0395     if (!order)
0396         return order;
0397 
0398     for (n = 0; n < count; n++)
0399         order[n] = n;
0400 
0401     for (n = count - 1; n > 1; n--) {
0402         r = get_random_int() % (n + 1);
0403         if (r != n) {
0404             tmp = order[n];
0405             order[n] = order[r];
0406             order[r] = tmp;
0407         }
0408     }
0409 
0410     return order;
0411 }
0412 
0413 static void dummy_load(struct stress *stress)
0414 {
0415     usleep_range(1000, 2000);
0416 }
0417 
0418 static void stress_inorder_work(struct work_struct *work)
0419 {
0420     struct stress *stress = container_of(work, typeof(*stress), work);
0421     const int nlocks = stress->nlocks;
0422     struct ww_mutex *locks = stress->locks;
0423     struct ww_acquire_ctx ctx;
0424     int *order;
0425 
0426     order = get_random_order(nlocks);
0427     if (!order)
0428         return;
0429 
0430     do {
0431         int contended = -1;
0432         int n, err;
0433 
0434         ww_acquire_init(&ctx, &ww_class);
0435 retry:
0436         err = 0;
0437         for (n = 0; n < nlocks; n++) {
0438             if (n == contended)
0439                 continue;
0440 
0441             err = ww_mutex_lock(&locks[order[n]], &ctx);
0442             if (err < 0)
0443                 break;
0444         }
0445         if (!err)
0446             dummy_load(stress);
0447 
0448         if (contended > n)
0449             ww_mutex_unlock(&locks[order[contended]]);
0450         contended = n;
0451         while (n--)
0452             ww_mutex_unlock(&locks[order[n]]);
0453 
0454         if (err == -EDEADLK) {
0455             ww_mutex_lock_slow(&locks[order[contended]], &ctx);
0456             goto retry;
0457         }
0458 
0459         if (err) {
0460             pr_err_once("stress (%s) failed with %d\n",
0461                     __func__, err);
0462             break;
0463         }
0464 
0465         ww_acquire_fini(&ctx);
0466     } while (!time_after(jiffies, stress->timeout));
0467 
0468     kfree(order);
0469     kfree(stress);
0470 }
0471 
0472 struct reorder_lock {
0473     struct list_head link;
0474     struct ww_mutex *lock;
0475 };
0476 
0477 static void stress_reorder_work(struct work_struct *work)
0478 {
0479     struct stress *stress = container_of(work, typeof(*stress), work);
0480     LIST_HEAD(locks);
0481     struct ww_acquire_ctx ctx;
0482     struct reorder_lock *ll, *ln;
0483     int *order;
0484     int n, err;
0485 
0486     order = get_random_order(stress->nlocks);
0487     if (!order)
0488         return;
0489 
0490     for (n = 0; n < stress->nlocks; n++) {
0491         ll = kmalloc(sizeof(*ll), GFP_KERNEL);
0492         if (!ll)
0493             goto out;
0494 
0495         ll->lock = &stress->locks[order[n]];
0496         list_add(&ll->link, &locks);
0497     }
0498     kfree(order);
0499     order = NULL;
0500 
0501     do {
0502         ww_acquire_init(&ctx, &ww_class);
0503 
0504         list_for_each_entry(ll, &locks, link) {
0505             err = ww_mutex_lock(ll->lock, &ctx);
0506             if (!err)
0507                 continue;
0508 
0509             ln = ll;
0510             list_for_each_entry_continue_reverse(ln, &locks, link)
0511                 ww_mutex_unlock(ln->lock);
0512 
0513             if (err != -EDEADLK) {
0514                 pr_err_once("stress (%s) failed with %d\n",
0515                         __func__, err);
0516                 break;
0517             }
0518 
0519             ww_mutex_lock_slow(ll->lock, &ctx);
0520             list_move(&ll->link, &locks); /* restarts iteration */
0521         }
0522 
0523         dummy_load(stress);
0524         list_for_each_entry(ll, &locks, link)
0525             ww_mutex_unlock(ll->lock);
0526 
0527         ww_acquire_fini(&ctx);
0528     } while (!time_after(jiffies, stress->timeout));
0529 
0530 out:
0531     list_for_each_entry_safe(ll, ln, &locks, link)
0532         kfree(ll);
0533     kfree(order);
0534     kfree(stress);
0535 }
0536 
0537 static void stress_one_work(struct work_struct *work)
0538 {
0539     struct stress *stress = container_of(work, typeof(*stress), work);
0540     const int nlocks = stress->nlocks;
0541     struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
0542     int err;
0543 
0544     do {
0545         err = ww_mutex_lock(lock, NULL);
0546         if (!err) {
0547             dummy_load(stress);
0548             ww_mutex_unlock(lock);
0549         } else {
0550             pr_err_once("stress (%s) failed with %d\n",
0551                     __func__, err);
0552             break;
0553         }
0554     } while (!time_after(jiffies, stress->timeout));
0555 
0556     kfree(stress);
0557 }
0558 
0559 #define STRESS_INORDER BIT(0)
0560 #define STRESS_REORDER BIT(1)
0561 #define STRESS_ONE BIT(2)
0562 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
0563 
0564 static int stress(int nlocks, int nthreads, unsigned int flags)
0565 {
0566     struct ww_mutex *locks;
0567     int n;
0568 
0569     locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
0570     if (!locks)
0571         return -ENOMEM;
0572 
0573     for (n = 0; n < nlocks; n++)
0574         ww_mutex_init(&locks[n], &ww_class);
0575 
0576     for (n = 0; nthreads; n++) {
0577         struct stress *stress;
0578         void (*fn)(struct work_struct *work);
0579 
0580         fn = NULL;
0581         switch (n & 3) {
0582         case 0:
0583             if (flags & STRESS_INORDER)
0584                 fn = stress_inorder_work;
0585             break;
0586         case 1:
0587             if (flags & STRESS_REORDER)
0588                 fn = stress_reorder_work;
0589             break;
0590         case 2:
0591             if (flags & STRESS_ONE)
0592                 fn = stress_one_work;
0593             break;
0594         }
0595 
0596         if (!fn)
0597             continue;
0598 
0599         stress = kmalloc(sizeof(*stress), GFP_KERNEL);
0600         if (!stress)
0601             break;
0602 
0603         INIT_WORK(&stress->work, fn);
0604         stress->locks = locks;
0605         stress->nlocks = nlocks;
0606         stress->timeout = jiffies + 2*HZ;
0607 
0608         queue_work(wq, &stress->work);
0609         nthreads--;
0610     }
0611 
0612     flush_workqueue(wq);
0613 
0614     for (n = 0; n < nlocks; n++)
0615         ww_mutex_destroy(&locks[n]);
0616     kfree(locks);
0617 
0618     return 0;
0619 }
0620 
0621 static int __init test_ww_mutex_init(void)
0622 {
0623     int ncpus = num_online_cpus();
0624     int ret, i;
0625 
0626     printk(KERN_INFO "Beginning ww mutex selftests\n");
0627 
0628     wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
0629     if (!wq)
0630         return -ENOMEM;
0631 
0632     ret = test_mutex();
0633     if (ret)
0634         return ret;
0635 
0636     ret = test_aa(false);
0637     if (ret)
0638         return ret;
0639 
0640     ret = test_aa(true);
0641     if (ret)
0642         return ret;
0643 
0644     for (i = 0; i < 4; i++) {
0645         ret = test_abba(i & 1, i & 2);
0646         if (ret)
0647             return ret;
0648     }
0649 
0650     ret = test_cycle(ncpus);
0651     if (ret)
0652         return ret;
0653 
0654     ret = stress(16, 2*ncpus, STRESS_INORDER);
0655     if (ret)
0656         return ret;
0657 
0658     ret = stress(16, 2*ncpus, STRESS_REORDER);
0659     if (ret)
0660         return ret;
0661 
0662     ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
0663     if (ret)
0664         return ret;
0665 
0666     printk(KERN_INFO "All ww mutex selftests passed\n");
0667     return 0;
0668 }
0669 
0670 static void __exit test_ww_mutex_exit(void)
0671 {
0672     destroy_workqueue(wq);
0673 }
0674 
0675 module_init(test_ww_mutex_init);
0676 module_exit(test_ww_mutex_exit);
0677 
0678 MODULE_LICENSE("GPL");
0679 MODULE_AUTHOR("Intel Corporation");