0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define pr_fmt(fmt) fmt
0013
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/kthread.h>
0017 #include <linux/sched/rt.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/mutex.h>
0020 #include <linux/rwsem.h>
0021 #include <linux/smp.h>
0022 #include <linux/interrupt.h>
0023 #include <linux/sched.h>
0024 #include <uapi/linux/sched/types.h>
0025 #include <linux/rtmutex.h>
0026 #include <linux/atomic.h>
0027 #include <linux/moduleparam.h>
0028 #include <linux/delay.h>
0029 #include <linux/slab.h>
0030 #include <linux/torture.h>
0031 #include <linux/reboot.h>
0032
0033 MODULE_LICENSE("GPL");
0034 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
0035
0036 torture_param(int, nwriters_stress, -1,
0037 "Number of write-locking stress-test threads");
0038 torture_param(int, nreaders_stress, -1,
0039 "Number of read-locking stress-test threads");
0040 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
0041 torture_param(int, onoff_interval, 0,
0042 "Time between CPU hotplugs (s), 0=disable");
0043 torture_param(int, shuffle_interval, 3,
0044 "Number of jiffies between shuffles, 0=disable");
0045 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
0046 torture_param(int, stat_interval, 60,
0047 "Number of seconds between stats printk()s");
0048 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
0049 torture_param(int, verbose, 1,
0050 "Enable verbose debugging printk()s");
0051
0052 static char *torture_type = "spin_lock";
0053 module_param(torture_type, charp, 0444);
0054 MODULE_PARM_DESC(torture_type,
0055 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
0056
0057 static struct task_struct *stats_task;
0058 static struct task_struct **writer_tasks;
0059 static struct task_struct **reader_tasks;
0060
0061 static bool lock_is_write_held;
0062 static atomic_t lock_is_read_held;
0063 static unsigned long last_lock_release;
0064
0065 struct lock_stress_stats {
0066 long n_lock_fail;
0067 long n_lock_acquired;
0068 };
0069
0070
0071 static void lock_torture_cleanup(void);
0072
0073
0074
0075
0076 struct lock_torture_ops {
0077 void (*init)(void);
0078 void (*exit)(void);
0079 int (*writelock)(int tid);
0080 void (*write_delay)(struct torture_random_state *trsp);
0081 void (*task_boost)(struct torture_random_state *trsp);
0082 void (*writeunlock)(int tid);
0083 int (*readlock)(int tid);
0084 void (*read_delay)(struct torture_random_state *trsp);
0085 void (*readunlock)(int tid);
0086
0087 unsigned long flags;
0088 const char *name;
0089 };
0090
0091 struct lock_torture_cxt {
0092 int nrealwriters_stress;
0093 int nrealreaders_stress;
0094 bool debug_lock;
0095 bool init_called;
0096 atomic_t n_lock_torture_errors;
0097 struct lock_torture_ops *cur_ops;
0098 struct lock_stress_stats *lwsa;
0099 struct lock_stress_stats *lrsa;
0100 };
0101 static struct lock_torture_cxt cxt = { 0, 0, false, false,
0102 ATOMIC_INIT(0),
0103 NULL, NULL};
0104
0105
0106
0107
0108 static int torture_lock_busted_write_lock(int tid __maybe_unused)
0109 {
0110 return 0;
0111 }
0112
0113 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
0114 {
0115 const unsigned long longdelay_ms = 100;
0116
0117
0118 if (!(torture_random(trsp) %
0119 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
0120 mdelay(longdelay_ms);
0121 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
0122 torture_preempt_schedule();
0123 }
0124
0125 static void torture_lock_busted_write_unlock(int tid __maybe_unused)
0126 {
0127
0128 }
0129
0130 static void torture_boost_dummy(struct torture_random_state *trsp)
0131 {
0132
0133 }
0134
0135 static struct lock_torture_ops lock_busted_ops = {
0136 .writelock = torture_lock_busted_write_lock,
0137 .write_delay = torture_lock_busted_write_delay,
0138 .task_boost = torture_boost_dummy,
0139 .writeunlock = torture_lock_busted_write_unlock,
0140 .readlock = NULL,
0141 .read_delay = NULL,
0142 .readunlock = NULL,
0143 .name = "lock_busted"
0144 };
0145
0146 static DEFINE_SPINLOCK(torture_spinlock);
0147
0148 static int torture_spin_lock_write_lock(int tid __maybe_unused)
0149 __acquires(torture_spinlock)
0150 {
0151 spin_lock(&torture_spinlock);
0152 return 0;
0153 }
0154
0155 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
0156 {
0157 const unsigned long shortdelay_us = 2;
0158 const unsigned long longdelay_ms = 100;
0159
0160
0161
0162
0163 if (!(torture_random(trsp) %
0164 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
0165 mdelay(longdelay_ms);
0166 if (!(torture_random(trsp) %
0167 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
0168 udelay(shortdelay_us);
0169 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
0170 torture_preempt_schedule();
0171 }
0172
0173 static void torture_spin_lock_write_unlock(int tid __maybe_unused)
0174 __releases(torture_spinlock)
0175 {
0176 spin_unlock(&torture_spinlock);
0177 }
0178
0179 static struct lock_torture_ops spin_lock_ops = {
0180 .writelock = torture_spin_lock_write_lock,
0181 .write_delay = torture_spin_lock_write_delay,
0182 .task_boost = torture_boost_dummy,
0183 .writeunlock = torture_spin_lock_write_unlock,
0184 .readlock = NULL,
0185 .read_delay = NULL,
0186 .readunlock = NULL,
0187 .name = "spin_lock"
0188 };
0189
0190 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
0191 __acquires(torture_spinlock)
0192 {
0193 unsigned long flags;
0194
0195 spin_lock_irqsave(&torture_spinlock, flags);
0196 cxt.cur_ops->flags = flags;
0197 return 0;
0198 }
0199
0200 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
0201 __releases(torture_spinlock)
0202 {
0203 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
0204 }
0205
0206 static struct lock_torture_ops spin_lock_irq_ops = {
0207 .writelock = torture_spin_lock_write_lock_irq,
0208 .write_delay = torture_spin_lock_write_delay,
0209 .task_boost = torture_boost_dummy,
0210 .writeunlock = torture_lock_spin_write_unlock_irq,
0211 .readlock = NULL,
0212 .read_delay = NULL,
0213 .readunlock = NULL,
0214 .name = "spin_lock_irq"
0215 };
0216
0217 static DEFINE_RWLOCK(torture_rwlock);
0218
0219 static int torture_rwlock_write_lock(int tid __maybe_unused)
0220 __acquires(torture_rwlock)
0221 {
0222 write_lock(&torture_rwlock);
0223 return 0;
0224 }
0225
0226 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
0227 {
0228 const unsigned long shortdelay_us = 2;
0229 const unsigned long longdelay_ms = 100;
0230
0231
0232
0233
0234 if (!(torture_random(trsp) %
0235 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
0236 mdelay(longdelay_ms);
0237 else
0238 udelay(shortdelay_us);
0239 }
0240
0241 static void torture_rwlock_write_unlock(int tid __maybe_unused)
0242 __releases(torture_rwlock)
0243 {
0244 write_unlock(&torture_rwlock);
0245 }
0246
0247 static int torture_rwlock_read_lock(int tid __maybe_unused)
0248 __acquires(torture_rwlock)
0249 {
0250 read_lock(&torture_rwlock);
0251 return 0;
0252 }
0253
0254 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
0255 {
0256 const unsigned long shortdelay_us = 10;
0257 const unsigned long longdelay_ms = 100;
0258
0259
0260
0261
0262 if (!(torture_random(trsp) %
0263 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
0264 mdelay(longdelay_ms);
0265 else
0266 udelay(shortdelay_us);
0267 }
0268
0269 static void torture_rwlock_read_unlock(int tid __maybe_unused)
0270 __releases(torture_rwlock)
0271 {
0272 read_unlock(&torture_rwlock);
0273 }
0274
0275 static struct lock_torture_ops rw_lock_ops = {
0276 .writelock = torture_rwlock_write_lock,
0277 .write_delay = torture_rwlock_write_delay,
0278 .task_boost = torture_boost_dummy,
0279 .writeunlock = torture_rwlock_write_unlock,
0280 .readlock = torture_rwlock_read_lock,
0281 .read_delay = torture_rwlock_read_delay,
0282 .readunlock = torture_rwlock_read_unlock,
0283 .name = "rw_lock"
0284 };
0285
0286 static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
0287 __acquires(torture_rwlock)
0288 {
0289 unsigned long flags;
0290
0291 write_lock_irqsave(&torture_rwlock, flags);
0292 cxt.cur_ops->flags = flags;
0293 return 0;
0294 }
0295
0296 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
0297 __releases(torture_rwlock)
0298 {
0299 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
0300 }
0301
0302 static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
0303 __acquires(torture_rwlock)
0304 {
0305 unsigned long flags;
0306
0307 read_lock_irqsave(&torture_rwlock, flags);
0308 cxt.cur_ops->flags = flags;
0309 return 0;
0310 }
0311
0312 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
0313 __releases(torture_rwlock)
0314 {
0315 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
0316 }
0317
0318 static struct lock_torture_ops rw_lock_irq_ops = {
0319 .writelock = torture_rwlock_write_lock_irq,
0320 .write_delay = torture_rwlock_write_delay,
0321 .task_boost = torture_boost_dummy,
0322 .writeunlock = torture_rwlock_write_unlock_irq,
0323 .readlock = torture_rwlock_read_lock_irq,
0324 .read_delay = torture_rwlock_read_delay,
0325 .readunlock = torture_rwlock_read_unlock_irq,
0326 .name = "rw_lock_irq"
0327 };
0328
0329 static DEFINE_MUTEX(torture_mutex);
0330
0331 static int torture_mutex_lock(int tid __maybe_unused)
0332 __acquires(torture_mutex)
0333 {
0334 mutex_lock(&torture_mutex);
0335 return 0;
0336 }
0337
0338 static void torture_mutex_delay(struct torture_random_state *trsp)
0339 {
0340 const unsigned long longdelay_ms = 100;
0341
0342
0343 if (!(torture_random(trsp) %
0344 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
0345 mdelay(longdelay_ms * 5);
0346 else
0347 mdelay(longdelay_ms / 5);
0348 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
0349 torture_preempt_schedule();
0350 }
0351
0352 static void torture_mutex_unlock(int tid __maybe_unused)
0353 __releases(torture_mutex)
0354 {
0355 mutex_unlock(&torture_mutex);
0356 }
0357
0358 static struct lock_torture_ops mutex_lock_ops = {
0359 .writelock = torture_mutex_lock,
0360 .write_delay = torture_mutex_delay,
0361 .task_boost = torture_boost_dummy,
0362 .writeunlock = torture_mutex_unlock,
0363 .readlock = NULL,
0364 .read_delay = NULL,
0365 .readunlock = NULL,
0366 .name = "mutex_lock"
0367 };
0368
0369 #include <linux/ww_mutex.h>
0370
0371
0372
0373
0374
0375 static DEFINE_WD_CLASS(torture_ww_class);
0376 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
0377 static struct ww_acquire_ctx *ww_acquire_ctxs;
0378
0379 static void torture_ww_mutex_init(void)
0380 {
0381 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
0382 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
0383 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
0384
0385 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
0386 sizeof(*ww_acquire_ctxs),
0387 GFP_KERNEL);
0388 if (!ww_acquire_ctxs)
0389 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
0390 }
0391
0392 static void torture_ww_mutex_exit(void)
0393 {
0394 kfree(ww_acquire_ctxs);
0395 }
0396
0397 static int torture_ww_mutex_lock(int tid)
0398 __acquires(torture_ww_mutex_0)
0399 __acquires(torture_ww_mutex_1)
0400 __acquires(torture_ww_mutex_2)
0401 {
0402 LIST_HEAD(list);
0403 struct reorder_lock {
0404 struct list_head link;
0405 struct ww_mutex *lock;
0406 } locks[3], *ll, *ln;
0407 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
0408
0409 locks[0].lock = &torture_ww_mutex_0;
0410 list_add(&locks[0].link, &list);
0411
0412 locks[1].lock = &torture_ww_mutex_1;
0413 list_add(&locks[1].link, &list);
0414
0415 locks[2].lock = &torture_ww_mutex_2;
0416 list_add(&locks[2].link, &list);
0417
0418 ww_acquire_init(ctx, &torture_ww_class);
0419
0420 list_for_each_entry(ll, &list, link) {
0421 int err;
0422
0423 err = ww_mutex_lock(ll->lock, ctx);
0424 if (!err)
0425 continue;
0426
0427 ln = ll;
0428 list_for_each_entry_continue_reverse(ln, &list, link)
0429 ww_mutex_unlock(ln->lock);
0430
0431 if (err != -EDEADLK)
0432 return err;
0433
0434 ww_mutex_lock_slow(ll->lock, ctx);
0435 list_move(&ll->link, &list);
0436 }
0437
0438 return 0;
0439 }
0440
0441 static void torture_ww_mutex_unlock(int tid)
0442 __releases(torture_ww_mutex_0)
0443 __releases(torture_ww_mutex_1)
0444 __releases(torture_ww_mutex_2)
0445 {
0446 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
0447
0448 ww_mutex_unlock(&torture_ww_mutex_0);
0449 ww_mutex_unlock(&torture_ww_mutex_1);
0450 ww_mutex_unlock(&torture_ww_mutex_2);
0451 ww_acquire_fini(ctx);
0452 }
0453
0454 static struct lock_torture_ops ww_mutex_lock_ops = {
0455 .init = torture_ww_mutex_init,
0456 .exit = torture_ww_mutex_exit,
0457 .writelock = torture_ww_mutex_lock,
0458 .write_delay = torture_mutex_delay,
0459 .task_boost = torture_boost_dummy,
0460 .writeunlock = torture_ww_mutex_unlock,
0461 .readlock = NULL,
0462 .read_delay = NULL,
0463 .readunlock = NULL,
0464 .name = "ww_mutex_lock"
0465 };
0466
0467 #ifdef CONFIG_RT_MUTEXES
0468 static DEFINE_RT_MUTEX(torture_rtmutex);
0469
0470 static int torture_rtmutex_lock(int tid __maybe_unused)
0471 __acquires(torture_rtmutex)
0472 {
0473 rt_mutex_lock(&torture_rtmutex);
0474 return 0;
0475 }
0476
0477 static void torture_rtmutex_boost(struct torture_random_state *trsp)
0478 {
0479 const unsigned int factor = 50000;
0480
0481 if (!rt_task(current)) {
0482
0483
0484
0485
0486
0487 if (trsp && !(torture_random(trsp) %
0488 (cxt.nrealwriters_stress * factor))) {
0489 sched_set_fifo(current);
0490 } else
0491 return;
0492 } else {
0493
0494
0495
0496
0497
0498
0499
0500 if (!trsp || !(torture_random(trsp) %
0501 (cxt.nrealwriters_stress * factor * 2))) {
0502 sched_set_normal(current, 0);
0503 } else
0504 return;
0505 }
0506 }
0507
0508 static void torture_rtmutex_delay(struct torture_random_state *trsp)
0509 {
0510 const unsigned long shortdelay_us = 2;
0511 const unsigned long longdelay_ms = 100;
0512
0513
0514
0515
0516
0517 if (!(torture_random(trsp) %
0518 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
0519 mdelay(longdelay_ms);
0520 if (!(torture_random(trsp) %
0521 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
0522 udelay(shortdelay_us);
0523 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
0524 torture_preempt_schedule();
0525 }
0526
0527 static void torture_rtmutex_unlock(int tid __maybe_unused)
0528 __releases(torture_rtmutex)
0529 {
0530 rt_mutex_unlock(&torture_rtmutex);
0531 }
0532
0533 static struct lock_torture_ops rtmutex_lock_ops = {
0534 .writelock = torture_rtmutex_lock,
0535 .write_delay = torture_rtmutex_delay,
0536 .task_boost = torture_rtmutex_boost,
0537 .writeunlock = torture_rtmutex_unlock,
0538 .readlock = NULL,
0539 .read_delay = NULL,
0540 .readunlock = NULL,
0541 .name = "rtmutex_lock"
0542 };
0543 #endif
0544
0545 static DECLARE_RWSEM(torture_rwsem);
0546 static int torture_rwsem_down_write(int tid __maybe_unused)
0547 __acquires(torture_rwsem)
0548 {
0549 down_write(&torture_rwsem);
0550 return 0;
0551 }
0552
0553 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
0554 {
0555 const unsigned long longdelay_ms = 100;
0556
0557
0558 if (!(torture_random(trsp) %
0559 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
0560 mdelay(longdelay_ms * 10);
0561 else
0562 mdelay(longdelay_ms / 10);
0563 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
0564 torture_preempt_schedule();
0565 }
0566
0567 static void torture_rwsem_up_write(int tid __maybe_unused)
0568 __releases(torture_rwsem)
0569 {
0570 up_write(&torture_rwsem);
0571 }
0572
0573 static int torture_rwsem_down_read(int tid __maybe_unused)
0574 __acquires(torture_rwsem)
0575 {
0576 down_read(&torture_rwsem);
0577 return 0;
0578 }
0579
0580 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
0581 {
0582 const unsigned long longdelay_ms = 100;
0583
0584
0585 if (!(torture_random(trsp) %
0586 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
0587 mdelay(longdelay_ms * 2);
0588 else
0589 mdelay(longdelay_ms / 2);
0590 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
0591 torture_preempt_schedule();
0592 }
0593
0594 static void torture_rwsem_up_read(int tid __maybe_unused)
0595 __releases(torture_rwsem)
0596 {
0597 up_read(&torture_rwsem);
0598 }
0599
0600 static struct lock_torture_ops rwsem_lock_ops = {
0601 .writelock = torture_rwsem_down_write,
0602 .write_delay = torture_rwsem_write_delay,
0603 .task_boost = torture_boost_dummy,
0604 .writeunlock = torture_rwsem_up_write,
0605 .readlock = torture_rwsem_down_read,
0606 .read_delay = torture_rwsem_read_delay,
0607 .readunlock = torture_rwsem_up_read,
0608 .name = "rwsem_lock"
0609 };
0610
0611 #include <linux/percpu-rwsem.h>
0612 static struct percpu_rw_semaphore pcpu_rwsem;
0613
0614 static void torture_percpu_rwsem_init(void)
0615 {
0616 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
0617 }
0618
0619 static void torture_percpu_rwsem_exit(void)
0620 {
0621 percpu_free_rwsem(&pcpu_rwsem);
0622 }
0623
0624 static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
0625 __acquires(pcpu_rwsem)
0626 {
0627 percpu_down_write(&pcpu_rwsem);
0628 return 0;
0629 }
0630
0631 static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
0632 __releases(pcpu_rwsem)
0633 {
0634 percpu_up_write(&pcpu_rwsem);
0635 }
0636
0637 static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
0638 __acquires(pcpu_rwsem)
0639 {
0640 percpu_down_read(&pcpu_rwsem);
0641 return 0;
0642 }
0643
0644 static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
0645 __releases(pcpu_rwsem)
0646 {
0647 percpu_up_read(&pcpu_rwsem);
0648 }
0649
0650 static struct lock_torture_ops percpu_rwsem_lock_ops = {
0651 .init = torture_percpu_rwsem_init,
0652 .exit = torture_percpu_rwsem_exit,
0653 .writelock = torture_percpu_rwsem_down_write,
0654 .write_delay = torture_rwsem_write_delay,
0655 .task_boost = torture_boost_dummy,
0656 .writeunlock = torture_percpu_rwsem_up_write,
0657 .readlock = torture_percpu_rwsem_down_read,
0658 .read_delay = torture_rwsem_read_delay,
0659 .readunlock = torture_percpu_rwsem_up_read,
0660 .name = "percpu_rwsem_lock"
0661 };
0662
0663
0664
0665
0666
0667 static int lock_torture_writer(void *arg)
0668 {
0669 struct lock_stress_stats *lwsp = arg;
0670 int tid = lwsp - cxt.lwsa;
0671 DEFINE_TORTURE_RANDOM(rand);
0672
0673 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
0674 set_user_nice(current, MAX_NICE);
0675
0676 do {
0677 if ((torture_random(&rand) & 0xfffff) == 0)
0678 schedule_timeout_uninterruptible(1);
0679
0680 cxt.cur_ops->task_boost(&rand);
0681 cxt.cur_ops->writelock(tid);
0682 if (WARN_ON_ONCE(lock_is_write_held))
0683 lwsp->n_lock_fail++;
0684 lock_is_write_held = true;
0685 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
0686 lwsp->n_lock_fail++;
0687
0688 lwsp->n_lock_acquired++;
0689 cxt.cur_ops->write_delay(&rand);
0690 lock_is_write_held = false;
0691 WRITE_ONCE(last_lock_release, jiffies);
0692 cxt.cur_ops->writeunlock(tid);
0693
0694 stutter_wait("lock_torture_writer");
0695 } while (!torture_must_stop());
0696
0697 cxt.cur_ops->task_boost(NULL);
0698 torture_kthread_stopping("lock_torture_writer");
0699 return 0;
0700 }
0701
0702
0703
0704
0705
0706 static int lock_torture_reader(void *arg)
0707 {
0708 struct lock_stress_stats *lrsp = arg;
0709 int tid = lrsp - cxt.lrsa;
0710 DEFINE_TORTURE_RANDOM(rand);
0711
0712 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
0713 set_user_nice(current, MAX_NICE);
0714
0715 do {
0716 if ((torture_random(&rand) & 0xfffff) == 0)
0717 schedule_timeout_uninterruptible(1);
0718
0719 cxt.cur_ops->readlock(tid);
0720 atomic_inc(&lock_is_read_held);
0721 if (WARN_ON_ONCE(lock_is_write_held))
0722 lrsp->n_lock_fail++;
0723
0724 lrsp->n_lock_acquired++;
0725 cxt.cur_ops->read_delay(&rand);
0726 atomic_dec(&lock_is_read_held);
0727 cxt.cur_ops->readunlock(tid);
0728
0729 stutter_wait("lock_torture_reader");
0730 } while (!torture_must_stop());
0731 torture_kthread_stopping("lock_torture_reader");
0732 return 0;
0733 }
0734
0735
0736
0737
0738 static void __torture_print_stats(char *page,
0739 struct lock_stress_stats *statp, bool write)
0740 {
0741 long cur;
0742 bool fail = false;
0743 int i, n_stress;
0744 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
0745 long long sum = 0;
0746
0747 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
0748 for (i = 0; i < n_stress; i++) {
0749 if (data_race(statp[i].n_lock_fail))
0750 fail = true;
0751 cur = data_race(statp[i].n_lock_acquired);
0752 sum += cur;
0753 if (max < cur)
0754 max = cur;
0755 if (min > cur)
0756 min = cur;
0757 }
0758 page += sprintf(page,
0759 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
0760 write ? "Writes" : "Reads ",
0761 sum, max, min,
0762 !onoff_interval && max / 2 > min ? "???" : "",
0763 fail, fail ? "!!!" : "");
0764 if (fail)
0765 atomic_inc(&cxt.n_lock_torture_errors);
0766 }
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 static void lock_torture_stats_print(void)
0777 {
0778 int size = cxt.nrealwriters_stress * 200 + 8192;
0779 char *buf;
0780
0781 if (cxt.cur_ops->readlock)
0782 size += cxt.nrealreaders_stress * 200 + 8192;
0783
0784 buf = kmalloc(size, GFP_KERNEL);
0785 if (!buf) {
0786 pr_err("lock_torture_stats_print: Out of memory, need: %d",
0787 size);
0788 return;
0789 }
0790
0791 __torture_print_stats(buf, cxt.lwsa, true);
0792 pr_alert("%s", buf);
0793 kfree(buf);
0794
0795 if (cxt.cur_ops->readlock) {
0796 buf = kmalloc(size, GFP_KERNEL);
0797 if (!buf) {
0798 pr_err("lock_torture_stats_print: Out of memory, need: %d",
0799 size);
0800 return;
0801 }
0802
0803 __torture_print_stats(buf, cxt.lrsa, false);
0804 pr_alert("%s", buf);
0805 kfree(buf);
0806 }
0807 }
0808
0809
0810
0811
0812
0813
0814
0815
0816 static int lock_torture_stats(void *arg)
0817 {
0818 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
0819 do {
0820 schedule_timeout_interruptible(stat_interval * HZ);
0821 lock_torture_stats_print();
0822 torture_shutdown_absorb("lock_torture_stats");
0823 } while (!torture_must_stop());
0824 torture_kthread_stopping("lock_torture_stats");
0825 return 0;
0826 }
0827
0828 static inline void
0829 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
0830 const char *tag)
0831 {
0832 pr_alert("%s" TORTURE_FLAG
0833 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
0834 torture_type, tag, cxt.debug_lock ? " [debug]": "",
0835 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
0836 verbose, shuffle_interval, stutter, shutdown_secs,
0837 onoff_interval, onoff_holdoff);
0838 }
0839
0840 static void lock_torture_cleanup(void)
0841 {
0842 int i;
0843
0844 if (torture_cleanup_begin())
0845 return;
0846
0847
0848
0849
0850
0851
0852
0853
0854 if (!cxt.lwsa && !cxt.lrsa)
0855 goto end;
0856
0857 if (writer_tasks) {
0858 for (i = 0; i < cxt.nrealwriters_stress; i++)
0859 torture_stop_kthread(lock_torture_writer,
0860 writer_tasks[i]);
0861 kfree(writer_tasks);
0862 writer_tasks = NULL;
0863 }
0864
0865 if (reader_tasks) {
0866 for (i = 0; i < cxt.nrealreaders_stress; i++)
0867 torture_stop_kthread(lock_torture_reader,
0868 reader_tasks[i]);
0869 kfree(reader_tasks);
0870 reader_tasks = NULL;
0871 }
0872
0873 torture_stop_kthread(lock_torture_stats, stats_task);
0874 lock_torture_stats_print();
0875
0876 if (atomic_read(&cxt.n_lock_torture_errors))
0877 lock_torture_print_module_parms(cxt.cur_ops,
0878 "End of test: FAILURE");
0879 else if (torture_onoff_failures())
0880 lock_torture_print_module_parms(cxt.cur_ops,
0881 "End of test: LOCK_HOTPLUG");
0882 else
0883 lock_torture_print_module_parms(cxt.cur_ops,
0884 "End of test: SUCCESS");
0885
0886 kfree(cxt.lwsa);
0887 cxt.lwsa = NULL;
0888 kfree(cxt.lrsa);
0889 cxt.lrsa = NULL;
0890
0891 end:
0892 if (cxt.init_called) {
0893 if (cxt.cur_ops->exit)
0894 cxt.cur_ops->exit();
0895 cxt.init_called = false;
0896 }
0897 torture_cleanup_end();
0898 }
0899
0900 static int __init lock_torture_init(void)
0901 {
0902 int i, j;
0903 int firsterr = 0;
0904 static struct lock_torture_ops *torture_ops[] = {
0905 &lock_busted_ops,
0906 &spin_lock_ops, &spin_lock_irq_ops,
0907 &rw_lock_ops, &rw_lock_irq_ops,
0908 &mutex_lock_ops,
0909 &ww_mutex_lock_ops,
0910 #ifdef CONFIG_RT_MUTEXES
0911 &rtmutex_lock_ops,
0912 #endif
0913 &rwsem_lock_ops,
0914 &percpu_rwsem_lock_ops,
0915 };
0916
0917 if (!torture_init_begin(torture_type, verbose))
0918 return -EBUSY;
0919
0920
0921 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
0922 cxt.cur_ops = torture_ops[i];
0923 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
0924 break;
0925 }
0926 if (i == ARRAY_SIZE(torture_ops)) {
0927 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
0928 torture_type);
0929 pr_alert("lock-torture types:");
0930 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
0931 pr_alert(" %s", torture_ops[i]->name);
0932 pr_alert("\n");
0933 firsterr = -EINVAL;
0934 goto unwind;
0935 }
0936
0937 if (nwriters_stress == 0 &&
0938 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
0939 pr_alert("lock-torture: must run at least one locking thread\n");
0940 firsterr = -EINVAL;
0941 goto unwind;
0942 }
0943
0944 if (nwriters_stress >= 0)
0945 cxt.nrealwriters_stress = nwriters_stress;
0946 else
0947 cxt.nrealwriters_stress = 2 * num_online_cpus();
0948
0949 if (cxt.cur_ops->init) {
0950 cxt.cur_ops->init();
0951 cxt.init_called = true;
0952 }
0953
0954 #ifdef CONFIG_DEBUG_MUTEXES
0955 if (str_has_prefix(torture_type, "mutex"))
0956 cxt.debug_lock = true;
0957 #endif
0958 #ifdef CONFIG_DEBUG_RT_MUTEXES
0959 if (str_has_prefix(torture_type, "rtmutex"))
0960 cxt.debug_lock = true;
0961 #endif
0962 #ifdef CONFIG_DEBUG_SPINLOCK
0963 if ((str_has_prefix(torture_type, "spin")) ||
0964 (str_has_prefix(torture_type, "rw_lock")))
0965 cxt.debug_lock = true;
0966 #endif
0967
0968
0969 if (nwriters_stress) {
0970 lock_is_write_held = false;
0971 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
0972 sizeof(*cxt.lwsa),
0973 GFP_KERNEL);
0974 if (cxt.lwsa == NULL) {
0975 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
0976 firsterr = -ENOMEM;
0977 goto unwind;
0978 }
0979
0980 for (i = 0; i < cxt.nrealwriters_stress; i++) {
0981 cxt.lwsa[i].n_lock_fail = 0;
0982 cxt.lwsa[i].n_lock_acquired = 0;
0983 }
0984 }
0985
0986 if (cxt.cur_ops->readlock) {
0987 if (nreaders_stress >= 0)
0988 cxt.nrealreaders_stress = nreaders_stress;
0989 else {
0990
0991
0992
0993
0994
0995 if (nwriters_stress < 0)
0996 cxt.nrealwriters_stress = num_online_cpus();
0997 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
0998 }
0999
1000 if (nreaders_stress) {
1001 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1002 sizeof(*cxt.lrsa),
1003 GFP_KERNEL);
1004 if (cxt.lrsa == NULL) {
1005 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1006 firsterr = -ENOMEM;
1007 kfree(cxt.lwsa);
1008 cxt.lwsa = NULL;
1009 goto unwind;
1010 }
1011
1012 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1013 cxt.lrsa[i].n_lock_fail = 0;
1014 cxt.lrsa[i].n_lock_acquired = 0;
1015 }
1016 }
1017 }
1018
1019 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1020
1021
1022 if (onoff_interval > 0) {
1023 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1024 onoff_interval * HZ, NULL);
1025 if (torture_init_error(firsterr))
1026 goto unwind;
1027 }
1028 if (shuffle_interval > 0) {
1029 firsterr = torture_shuffle_init(shuffle_interval);
1030 if (torture_init_error(firsterr))
1031 goto unwind;
1032 }
1033 if (shutdown_secs > 0) {
1034 firsterr = torture_shutdown_init(shutdown_secs,
1035 lock_torture_cleanup);
1036 if (torture_init_error(firsterr))
1037 goto unwind;
1038 }
1039 if (stutter > 0) {
1040 firsterr = torture_stutter_init(stutter, stutter);
1041 if (torture_init_error(firsterr))
1042 goto unwind;
1043 }
1044
1045 if (nwriters_stress) {
1046 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1047 sizeof(writer_tasks[0]),
1048 GFP_KERNEL);
1049 if (writer_tasks == NULL) {
1050 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1051 firsterr = -ENOMEM;
1052 goto unwind;
1053 }
1054 }
1055
1056 if (cxt.cur_ops->readlock) {
1057 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1058 sizeof(reader_tasks[0]),
1059 GFP_KERNEL);
1060 if (reader_tasks == NULL) {
1061 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1062 kfree(writer_tasks);
1063 writer_tasks = NULL;
1064 firsterr = -ENOMEM;
1065 goto unwind;
1066 }
1067 }
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1078 j < cxt.nrealreaders_stress; i++, j++) {
1079 if (i >= cxt.nrealwriters_stress)
1080 goto create_reader;
1081
1082
1083 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1084 writer_tasks[i]);
1085 if (torture_init_error(firsterr))
1086 goto unwind;
1087
1088 create_reader:
1089 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1090 continue;
1091
1092 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1093 reader_tasks[j]);
1094 if (torture_init_error(firsterr))
1095 goto unwind;
1096 }
1097 if (stat_interval > 0) {
1098 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1099 stats_task);
1100 if (torture_init_error(firsterr))
1101 goto unwind;
1102 }
1103 torture_init_end();
1104 return 0;
1105
1106 unwind:
1107 torture_init_end();
1108 lock_torture_cleanup();
1109 if (shutdown_secs) {
1110 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1111 kernel_power_off();
1112 }
1113 return firsterr;
1114 }
1115
1116 module_init(lock_torture_init);
1117 module_exit(lock_torture_cleanup);