0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) fmt
0014
0015 #include <linux/types.h>
0016 #include <linux/kernel.h>
0017 #include <linux/init.h>
0018 #include <linux/module.h>
0019 #include <linux/kthread.h>
0020 #include <linux/err.h>
0021 #include <linux/spinlock.h>
0022 #include <linux/smp.h>
0023 #include <linux/rcupdate_wait.h>
0024 #include <linux/interrupt.h>
0025 #include <linux/sched/signal.h>
0026 #include <uapi/linux/sched/types.h>
0027 #include <linux/atomic.h>
0028 #include <linux/bitops.h>
0029 #include <linux/completion.h>
0030 #include <linux/moduleparam.h>
0031 #include <linux/percpu.h>
0032 #include <linux/notifier.h>
0033 #include <linux/reboot.h>
0034 #include <linux/freezer.h>
0035 #include <linux/cpu.h>
0036 #include <linux/delay.h>
0037 #include <linux/stat.h>
0038 #include <linux/srcu.h>
0039 #include <linux/slab.h>
0040 #include <linux/trace_clock.h>
0041 #include <asm/byteorder.h>
0042 #include <linux/torture.h>
0043 #include <linux/vmalloc.h>
0044 #include <linux/sched/debug.h>
0045 #include <linux/sched/sysctl.h>
0046 #include <linux/oom.h>
0047 #include <linux/tick.h>
0048 #include <linux/rcupdate_trace.h>
0049 #include <linux/nmi.h>
0050
0051 #include "rcu.h"
0052
0053 MODULE_LICENSE("GPL");
0054 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
0055
0056
0057 #define RCUTORTURE_RDR_SHIFT_1 8
0058 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
0059 #define RCUTORTURE_RDR_SHIFT_2 9
0060 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
0061 #define RCUTORTURE_RDR_BH 0x01
0062 #define RCUTORTURE_RDR_IRQ 0x02
0063 #define RCUTORTURE_RDR_PREEMPT 0x04
0064 #define RCUTORTURE_RDR_RBH 0x08
0065 #define RCUTORTURE_RDR_SCHED 0x10
0066 #define RCUTORTURE_RDR_RCU_1 0x20
0067 #define RCUTORTURE_RDR_RCU_2 0x40
0068 #define RCUTORTURE_RDR_NBITS 7
0069 #define RCUTORTURE_MAX_EXTEND \
0070 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
0071 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
0072 #define RCUTORTURE_RDR_MAX_LOOPS 0x7
0073
0074 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
0075
0076 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
0077 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
0078 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
0079 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
0080 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
0081 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
0082 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
0083 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
0084 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
0085 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
0086 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
0087 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
0088 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
0089 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
0090 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
0091 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
0092 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
0093 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
0094 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
0095 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
0096 torture_param(int, nreaders, -1, "Number of RCU reader threads");
0097 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
0098 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
0099 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
0100 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
0101 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
0102 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
0103 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
0104 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
0105 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
0106 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
0107 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
0108 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
0109 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
0110 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
0111 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
0112 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
0113 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
0114 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
0115 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
0116 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
0117 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
0118 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
0119
0120 static char *torture_type = "rcu";
0121 module_param(torture_type, charp, 0444);
0122 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
0123
0124 static int nrealnocbers;
0125 static int nrealreaders;
0126 static struct task_struct *writer_task;
0127 static struct task_struct **fakewriter_tasks;
0128 static struct task_struct **reader_tasks;
0129 static struct task_struct **nocb_tasks;
0130 static struct task_struct *stats_task;
0131 static struct task_struct *fqs_task;
0132 static struct task_struct *boost_tasks[NR_CPUS];
0133 static struct task_struct *stall_task;
0134 static struct task_struct **fwd_prog_tasks;
0135 static struct task_struct **barrier_cbs_tasks;
0136 static struct task_struct *barrier_task;
0137 static struct task_struct *read_exit_task;
0138
0139 #define RCU_TORTURE_PIPE_LEN 10
0140
0141
0142 struct rcu_torture_reader_check {
0143 unsigned long rtc_myloops;
0144 int rtc_chkrdr;
0145 unsigned long rtc_chkloops;
0146 int rtc_ready;
0147 struct rcu_torture_reader_check *rtc_assigner;
0148 } ____cacheline_internodealigned_in_smp;
0149
0150
0151 struct rcu_torture {
0152 struct rcu_head rtort_rcu;
0153 int rtort_pipe_count;
0154 struct list_head rtort_free;
0155 int rtort_mbtest;
0156 struct rcu_torture_reader_check *rtort_chkp;
0157 };
0158
0159 static LIST_HEAD(rcu_torture_freelist);
0160 static struct rcu_torture __rcu *rcu_torture_current;
0161 static unsigned long rcu_torture_current_version;
0162 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
0163 static DEFINE_SPINLOCK(rcu_torture_lock);
0164 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
0165 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
0166 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
0167 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
0168 static atomic_t n_rcu_torture_alloc;
0169 static atomic_t n_rcu_torture_alloc_fail;
0170 static atomic_t n_rcu_torture_free;
0171 static atomic_t n_rcu_torture_mberror;
0172 static atomic_t n_rcu_torture_mbchk_fail;
0173 static atomic_t n_rcu_torture_mbchk_tries;
0174 static atomic_t n_rcu_torture_error;
0175 static long n_rcu_torture_barrier_error;
0176 static long n_rcu_torture_boost_ktrerror;
0177 static long n_rcu_torture_boost_rterror;
0178 static long n_rcu_torture_boost_failure;
0179 static long n_rcu_torture_boosts;
0180 static atomic_long_t n_rcu_torture_timers;
0181 static long n_barrier_attempts;
0182 static long n_barrier_successes;
0183 static unsigned long n_read_exits;
0184 static struct list_head rcu_torture_removed;
0185 static unsigned long shutdown_jiffies;
0186 static unsigned long start_gp_seq;
0187 static atomic_long_t n_nocb_offload;
0188 static atomic_long_t n_nocb_deoffload;
0189
0190 static int rcu_torture_writer_state;
0191 #define RTWS_FIXED_DELAY 0
0192 #define RTWS_DELAY 1
0193 #define RTWS_REPLACE 2
0194 #define RTWS_DEF_FREE 3
0195 #define RTWS_EXP_SYNC 4
0196 #define RTWS_COND_GET 5
0197 #define RTWS_COND_GET_EXP 6
0198 #define RTWS_COND_SYNC 7
0199 #define RTWS_COND_SYNC_EXP 8
0200 #define RTWS_POLL_GET 9
0201 #define RTWS_POLL_GET_EXP 10
0202 #define RTWS_POLL_WAIT 11
0203 #define RTWS_POLL_WAIT_EXP 12
0204 #define RTWS_SYNC 13
0205 #define RTWS_STUTTER 14
0206 #define RTWS_STOPPING 15
0207 static const char * const rcu_torture_writer_state_names[] = {
0208 "RTWS_FIXED_DELAY",
0209 "RTWS_DELAY",
0210 "RTWS_REPLACE",
0211 "RTWS_DEF_FREE",
0212 "RTWS_EXP_SYNC",
0213 "RTWS_COND_GET",
0214 "RTWS_COND_GET_EXP",
0215 "RTWS_COND_SYNC",
0216 "RTWS_COND_SYNC_EXP",
0217 "RTWS_POLL_GET",
0218 "RTWS_POLL_GET_EXP",
0219 "RTWS_POLL_WAIT",
0220 "RTWS_POLL_WAIT_EXP",
0221 "RTWS_SYNC",
0222 "RTWS_STUTTER",
0223 "RTWS_STOPPING",
0224 };
0225
0226
0227 struct rt_read_seg {
0228 int rt_readstate;
0229 unsigned long rt_delay_jiffies;
0230 unsigned long rt_delay_ms;
0231 unsigned long rt_delay_us;
0232 bool rt_preempted;
0233 };
0234 static int err_segs_recorded;
0235 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
0236 static int rt_read_nsegs;
0237
0238 static const char *rcu_torture_writer_state_getname(void)
0239 {
0240 unsigned int i = READ_ONCE(rcu_torture_writer_state);
0241
0242 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
0243 return "???";
0244 return rcu_torture_writer_state_names[i];
0245 }
0246
0247 #ifdef CONFIG_RCU_TRACE
0248 static u64 notrace rcu_trace_clock_local(void)
0249 {
0250 u64 ts = trace_clock_local();
0251
0252 (void)do_div(ts, NSEC_PER_USEC);
0253 return ts;
0254 }
0255 #else
0256 static u64 notrace rcu_trace_clock_local(void)
0257 {
0258 return 0ULL;
0259 }
0260 #endif
0261
0262
0263
0264
0265
0266 static bool shutdown_time_arrived(void)
0267 {
0268 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
0269 }
0270
0271 static unsigned long boost_starttime;
0272 static DEFINE_MUTEX(boost_mutex);
0273
0274 static atomic_t barrier_cbs_count;
0275 static bool barrier_phase;
0276 static atomic_t barrier_cbs_invoked;
0277 static wait_queue_head_t *barrier_cbs_wq;
0278 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
0279
0280 static atomic_t rcu_fwd_cb_nodelay;
0281
0282
0283
0284
0285 static struct rcu_torture *
0286 rcu_torture_alloc(void)
0287 {
0288 struct list_head *p;
0289
0290 spin_lock_bh(&rcu_torture_lock);
0291 if (list_empty(&rcu_torture_freelist)) {
0292 atomic_inc(&n_rcu_torture_alloc_fail);
0293 spin_unlock_bh(&rcu_torture_lock);
0294 return NULL;
0295 }
0296 atomic_inc(&n_rcu_torture_alloc);
0297 p = rcu_torture_freelist.next;
0298 list_del_init(p);
0299 spin_unlock_bh(&rcu_torture_lock);
0300 return container_of(p, struct rcu_torture, rtort_free);
0301 }
0302
0303
0304
0305
0306 static void
0307 rcu_torture_free(struct rcu_torture *p)
0308 {
0309 atomic_inc(&n_rcu_torture_free);
0310 spin_lock_bh(&rcu_torture_lock);
0311 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
0312 spin_unlock_bh(&rcu_torture_lock);
0313 }
0314
0315
0316
0317
0318
0319 struct rcu_torture_ops {
0320 int ttype;
0321 void (*init)(void);
0322 void (*cleanup)(void);
0323 int (*readlock)(void);
0324 void (*read_delay)(struct torture_random_state *rrsp,
0325 struct rt_read_seg *rtrsp);
0326 void (*readunlock)(int idx);
0327 int (*readlock_held)(void);
0328 unsigned long (*get_gp_seq)(void);
0329 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
0330 void (*deferred_free)(struct rcu_torture *p);
0331 void (*sync)(void);
0332 void (*exp_sync)(void);
0333 unsigned long (*get_gp_state_exp)(void);
0334 unsigned long (*start_gp_poll_exp)(void);
0335 bool (*poll_gp_state_exp)(unsigned long oldstate);
0336 void (*cond_sync_exp)(unsigned long oldstate);
0337 unsigned long (*get_gp_state)(void);
0338 unsigned long (*get_gp_completed)(void);
0339 unsigned long (*start_gp_poll)(void);
0340 bool (*poll_gp_state)(unsigned long oldstate);
0341 void (*cond_sync)(unsigned long oldstate);
0342 call_rcu_func_t call;
0343 void (*cb_barrier)(void);
0344 void (*fqs)(void);
0345 void (*stats)(void);
0346 void (*gp_kthread_dbg)(void);
0347 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
0348 int (*stall_dur)(void);
0349 long cbflood_max;
0350 int irq_capable;
0351 int can_boost;
0352 int extendables;
0353 int slow_gps;
0354 int no_pi_lock;
0355 const char *name;
0356 };
0357
0358 static struct rcu_torture_ops *cur_ops;
0359
0360
0361
0362
0363
0364 static int torture_readlock_not_held(void)
0365 {
0366 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
0367 }
0368
0369 static int rcu_torture_read_lock(void) __acquires(RCU)
0370 {
0371 rcu_read_lock();
0372 return 0;
0373 }
0374
0375 static void
0376 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
0377 {
0378 unsigned long started;
0379 unsigned long completed;
0380 const unsigned long shortdelay_us = 200;
0381 unsigned long longdelay_ms = 300;
0382 unsigned long long ts;
0383
0384
0385
0386
0387
0388 if (!atomic_read(&rcu_fwd_cb_nodelay) &&
0389 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
0390 started = cur_ops->get_gp_seq();
0391 ts = rcu_trace_clock_local();
0392 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
0393 longdelay_ms = 5;
0394 mdelay(longdelay_ms);
0395 rtrsp->rt_delay_ms = longdelay_ms;
0396 completed = cur_ops->get_gp_seq();
0397 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
0398 started, completed);
0399 }
0400 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
0401 udelay(shortdelay_us);
0402 rtrsp->rt_delay_us = shortdelay_us;
0403 }
0404 if (!preempt_count() &&
0405 !(torture_random(rrsp) % (nrealreaders * 500))) {
0406 torture_preempt_schedule();
0407 rtrsp->rt_preempted = true;
0408 }
0409 }
0410
0411 static void rcu_torture_read_unlock(int idx) __releases(RCU)
0412 {
0413 rcu_read_unlock();
0414 }
0415
0416
0417
0418
0419 static bool
0420 rcu_torture_pipe_update_one(struct rcu_torture *rp)
0421 {
0422 int i;
0423 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
0424
0425 if (rtrcp) {
0426 WRITE_ONCE(rp->rtort_chkp, NULL);
0427 smp_store_release(&rtrcp->rtc_ready, 1);
0428 }
0429 i = READ_ONCE(rp->rtort_pipe_count);
0430 if (i > RCU_TORTURE_PIPE_LEN)
0431 i = RCU_TORTURE_PIPE_LEN;
0432 atomic_inc(&rcu_torture_wcount[i]);
0433 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
0434 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
0435 rp->rtort_mbtest = 0;
0436 return true;
0437 }
0438 return false;
0439 }
0440
0441
0442
0443
0444
0445 static void
0446 rcu_torture_pipe_update(struct rcu_torture *old_rp)
0447 {
0448 struct rcu_torture *rp;
0449 struct rcu_torture *rp1;
0450
0451 if (old_rp)
0452 list_add(&old_rp->rtort_free, &rcu_torture_removed);
0453 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
0454 if (rcu_torture_pipe_update_one(rp)) {
0455 list_del(&rp->rtort_free);
0456 rcu_torture_free(rp);
0457 }
0458 }
0459 }
0460
0461 static void
0462 rcu_torture_cb(struct rcu_head *p)
0463 {
0464 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
0465
0466 if (torture_must_stop_irq()) {
0467
0468
0469 return;
0470 }
0471 if (rcu_torture_pipe_update_one(rp))
0472 rcu_torture_free(rp);
0473 else
0474 cur_ops->deferred_free(rp);
0475 }
0476
0477 static unsigned long rcu_no_completed(void)
0478 {
0479 return 0;
0480 }
0481
0482 static void rcu_torture_deferred_free(struct rcu_torture *p)
0483 {
0484 call_rcu(&p->rtort_rcu, rcu_torture_cb);
0485 }
0486
0487 static void rcu_sync_torture_init(void)
0488 {
0489 INIT_LIST_HEAD(&rcu_torture_removed);
0490 }
0491
0492 static struct rcu_torture_ops rcu_ops = {
0493 .ttype = RCU_FLAVOR,
0494 .init = rcu_sync_torture_init,
0495 .readlock = rcu_torture_read_lock,
0496 .read_delay = rcu_read_delay,
0497 .readunlock = rcu_torture_read_unlock,
0498 .readlock_held = torture_readlock_not_held,
0499 .get_gp_seq = rcu_get_gp_seq,
0500 .gp_diff = rcu_seq_diff,
0501 .deferred_free = rcu_torture_deferred_free,
0502 .sync = synchronize_rcu,
0503 .exp_sync = synchronize_rcu_expedited,
0504 .get_gp_state = get_state_synchronize_rcu,
0505 .get_gp_completed = get_completed_synchronize_rcu,
0506 .start_gp_poll = start_poll_synchronize_rcu,
0507 .poll_gp_state = poll_state_synchronize_rcu,
0508 .cond_sync = cond_synchronize_rcu,
0509 .get_gp_state_exp = get_state_synchronize_rcu,
0510 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
0511 .poll_gp_state_exp = poll_state_synchronize_rcu,
0512 .cond_sync_exp = cond_synchronize_rcu_expedited,
0513 .call = call_rcu,
0514 .cb_barrier = rcu_barrier,
0515 .fqs = rcu_force_quiescent_state,
0516 .stats = NULL,
0517 .gp_kthread_dbg = show_rcu_gp_kthreads,
0518 .check_boost_failed = rcu_check_boost_fail,
0519 .stall_dur = rcu_jiffies_till_stall_check,
0520 .irq_capable = 1,
0521 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
0522 .extendables = RCUTORTURE_MAX_EXTEND,
0523 .name = "rcu"
0524 };
0525
0526
0527
0528
0529
0530
0531
0532
0533 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
0534 {
0535
0536 rcu_torture_cb(&p->rtort_rcu);
0537 }
0538
0539 static void synchronize_rcu_busted(void)
0540 {
0541
0542 }
0543
0544 static void
0545 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
0546 {
0547
0548 func(head);
0549 }
0550
0551 static struct rcu_torture_ops rcu_busted_ops = {
0552 .ttype = INVALID_RCU_FLAVOR,
0553 .init = rcu_sync_torture_init,
0554 .readlock = rcu_torture_read_lock,
0555 .read_delay = rcu_read_delay,
0556 .readunlock = rcu_torture_read_unlock,
0557 .readlock_held = torture_readlock_not_held,
0558 .get_gp_seq = rcu_no_completed,
0559 .deferred_free = rcu_busted_torture_deferred_free,
0560 .sync = synchronize_rcu_busted,
0561 .exp_sync = synchronize_rcu_busted,
0562 .call = call_rcu_busted,
0563 .cb_barrier = NULL,
0564 .fqs = NULL,
0565 .stats = NULL,
0566 .irq_capable = 1,
0567 .name = "busted"
0568 };
0569
0570
0571
0572
0573
0574 DEFINE_STATIC_SRCU(srcu_ctl);
0575 static struct srcu_struct srcu_ctld;
0576 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
0577
0578 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
0579 {
0580 return srcu_read_lock(srcu_ctlp);
0581 }
0582
0583 static void
0584 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
0585 {
0586 long delay;
0587 const long uspertick = 1000000 / HZ;
0588 const long longdelay = 10;
0589
0590
0591
0592 delay = torture_random(rrsp) %
0593 (nrealreaders * 2 * longdelay * uspertick);
0594 if (!delay && in_task()) {
0595 schedule_timeout_interruptible(longdelay);
0596 rtrsp->rt_delay_jiffies = longdelay;
0597 } else {
0598 rcu_read_delay(rrsp, rtrsp);
0599 }
0600 }
0601
0602 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
0603 {
0604 srcu_read_unlock(srcu_ctlp, idx);
0605 }
0606
0607 static int torture_srcu_read_lock_held(void)
0608 {
0609 return srcu_read_lock_held(srcu_ctlp);
0610 }
0611
0612 static unsigned long srcu_torture_completed(void)
0613 {
0614 return srcu_batches_completed(srcu_ctlp);
0615 }
0616
0617 static void srcu_torture_deferred_free(struct rcu_torture *rp)
0618 {
0619 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
0620 }
0621
0622 static void srcu_torture_synchronize(void)
0623 {
0624 synchronize_srcu(srcu_ctlp);
0625 }
0626
0627 static unsigned long srcu_torture_get_gp_state(void)
0628 {
0629 return get_state_synchronize_srcu(srcu_ctlp);
0630 }
0631
0632 static unsigned long srcu_torture_start_gp_poll(void)
0633 {
0634 return start_poll_synchronize_srcu(srcu_ctlp);
0635 }
0636
0637 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
0638 {
0639 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
0640 }
0641
0642 static void srcu_torture_call(struct rcu_head *head,
0643 rcu_callback_t func)
0644 {
0645 call_srcu(srcu_ctlp, head, func);
0646 }
0647
0648 static void srcu_torture_barrier(void)
0649 {
0650 srcu_barrier(srcu_ctlp);
0651 }
0652
0653 static void srcu_torture_stats(void)
0654 {
0655 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
0656 }
0657
0658 static void srcu_torture_synchronize_expedited(void)
0659 {
0660 synchronize_srcu_expedited(srcu_ctlp);
0661 }
0662
0663 static struct rcu_torture_ops srcu_ops = {
0664 .ttype = SRCU_FLAVOR,
0665 .init = rcu_sync_torture_init,
0666 .readlock = srcu_torture_read_lock,
0667 .read_delay = srcu_read_delay,
0668 .readunlock = srcu_torture_read_unlock,
0669 .readlock_held = torture_srcu_read_lock_held,
0670 .get_gp_seq = srcu_torture_completed,
0671 .deferred_free = srcu_torture_deferred_free,
0672 .sync = srcu_torture_synchronize,
0673 .exp_sync = srcu_torture_synchronize_expedited,
0674 .get_gp_state = srcu_torture_get_gp_state,
0675 .start_gp_poll = srcu_torture_start_gp_poll,
0676 .poll_gp_state = srcu_torture_poll_gp_state,
0677 .call = srcu_torture_call,
0678 .cb_barrier = srcu_torture_barrier,
0679 .stats = srcu_torture_stats,
0680 .cbflood_max = 50000,
0681 .irq_capable = 1,
0682 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
0683 .name = "srcu"
0684 };
0685
0686 static void srcu_torture_init(void)
0687 {
0688 rcu_sync_torture_init();
0689 WARN_ON(init_srcu_struct(&srcu_ctld));
0690 srcu_ctlp = &srcu_ctld;
0691 }
0692
0693 static void srcu_torture_cleanup(void)
0694 {
0695 cleanup_srcu_struct(&srcu_ctld);
0696 srcu_ctlp = &srcu_ctl;
0697 }
0698
0699
0700 static struct rcu_torture_ops srcud_ops = {
0701 .ttype = SRCU_FLAVOR,
0702 .init = srcu_torture_init,
0703 .cleanup = srcu_torture_cleanup,
0704 .readlock = srcu_torture_read_lock,
0705 .read_delay = srcu_read_delay,
0706 .readunlock = srcu_torture_read_unlock,
0707 .readlock_held = torture_srcu_read_lock_held,
0708 .get_gp_seq = srcu_torture_completed,
0709 .deferred_free = srcu_torture_deferred_free,
0710 .sync = srcu_torture_synchronize,
0711 .exp_sync = srcu_torture_synchronize_expedited,
0712 .call = srcu_torture_call,
0713 .cb_barrier = srcu_torture_barrier,
0714 .stats = srcu_torture_stats,
0715 .cbflood_max = 50000,
0716 .irq_capable = 1,
0717 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
0718 .name = "srcud"
0719 };
0720
0721
0722 static struct rcu_torture_ops busted_srcud_ops = {
0723 .ttype = SRCU_FLAVOR,
0724 .init = srcu_torture_init,
0725 .cleanup = srcu_torture_cleanup,
0726 .readlock = srcu_torture_read_lock,
0727 .read_delay = rcu_read_delay,
0728 .readunlock = srcu_torture_read_unlock,
0729 .readlock_held = torture_srcu_read_lock_held,
0730 .get_gp_seq = srcu_torture_completed,
0731 .deferred_free = srcu_torture_deferred_free,
0732 .sync = srcu_torture_synchronize,
0733 .exp_sync = srcu_torture_synchronize_expedited,
0734 .call = srcu_torture_call,
0735 .cb_barrier = srcu_torture_barrier,
0736 .stats = srcu_torture_stats,
0737 .irq_capable = 1,
0738 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
0739 .extendables = RCUTORTURE_MAX_EXTEND,
0740 .name = "busted_srcud"
0741 };
0742
0743
0744
0745
0746
0747
0748 static void synchronize_rcu_trivial(void)
0749 {
0750 int cpu;
0751
0752 for_each_online_cpu(cpu) {
0753 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
0754 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
0755 }
0756 }
0757
0758 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
0759 {
0760 preempt_disable();
0761 return 0;
0762 }
0763
0764 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
0765 {
0766 preempt_enable();
0767 }
0768
0769 static struct rcu_torture_ops trivial_ops = {
0770 .ttype = RCU_TRIVIAL_FLAVOR,
0771 .init = rcu_sync_torture_init,
0772 .readlock = rcu_torture_read_lock_trivial,
0773 .read_delay = rcu_read_delay,
0774 .readunlock = rcu_torture_read_unlock_trivial,
0775 .readlock_held = torture_readlock_not_held,
0776 .get_gp_seq = rcu_no_completed,
0777 .sync = synchronize_rcu_trivial,
0778 .exp_sync = synchronize_rcu_trivial,
0779 .fqs = NULL,
0780 .stats = NULL,
0781 .irq_capable = 1,
0782 .name = "trivial"
0783 };
0784
0785 #ifdef CONFIG_TASKS_RCU
0786
0787
0788
0789
0790
0791 static int tasks_torture_read_lock(void)
0792 {
0793 return 0;
0794 }
0795
0796 static void tasks_torture_read_unlock(int idx)
0797 {
0798 }
0799
0800 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
0801 {
0802 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
0803 }
0804
0805 static void synchronize_rcu_mult_test(void)
0806 {
0807 synchronize_rcu_mult(call_rcu_tasks, call_rcu);
0808 }
0809
0810 static struct rcu_torture_ops tasks_ops = {
0811 .ttype = RCU_TASKS_FLAVOR,
0812 .init = rcu_sync_torture_init,
0813 .readlock = tasks_torture_read_lock,
0814 .read_delay = rcu_read_delay,
0815 .readunlock = tasks_torture_read_unlock,
0816 .get_gp_seq = rcu_no_completed,
0817 .deferred_free = rcu_tasks_torture_deferred_free,
0818 .sync = synchronize_rcu_tasks,
0819 .exp_sync = synchronize_rcu_mult_test,
0820 .call = call_rcu_tasks,
0821 .cb_barrier = rcu_barrier_tasks,
0822 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
0823 .fqs = NULL,
0824 .stats = NULL,
0825 .irq_capable = 1,
0826 .slow_gps = 1,
0827 .name = "tasks"
0828 };
0829
0830 #define TASKS_OPS &tasks_ops,
0831
0832 #else
0833
0834 #define TASKS_OPS
0835
0836 #endif
0837
0838
0839 #ifdef CONFIG_TASKS_RUDE_RCU
0840
0841
0842
0843
0844
0845 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
0846 {
0847 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
0848 }
0849
0850 static struct rcu_torture_ops tasks_rude_ops = {
0851 .ttype = RCU_TASKS_RUDE_FLAVOR,
0852 .init = rcu_sync_torture_init,
0853 .readlock = rcu_torture_read_lock_trivial,
0854 .read_delay = rcu_read_delay,
0855 .readunlock = rcu_torture_read_unlock_trivial,
0856 .get_gp_seq = rcu_no_completed,
0857 .deferred_free = rcu_tasks_rude_torture_deferred_free,
0858 .sync = synchronize_rcu_tasks_rude,
0859 .exp_sync = synchronize_rcu_tasks_rude,
0860 .call = call_rcu_tasks_rude,
0861 .cb_barrier = rcu_barrier_tasks_rude,
0862 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
0863 .cbflood_max = 50000,
0864 .fqs = NULL,
0865 .stats = NULL,
0866 .irq_capable = 1,
0867 .name = "tasks-rude"
0868 };
0869
0870 #define TASKS_RUDE_OPS &tasks_rude_ops,
0871
0872 #else
0873
0874 #define TASKS_RUDE_OPS
0875
0876 #endif
0877
0878
0879 #ifdef CONFIG_TASKS_TRACE_RCU
0880
0881
0882
0883
0884
0885 static int tasks_tracing_torture_read_lock(void)
0886 {
0887 rcu_read_lock_trace();
0888 return 0;
0889 }
0890
0891 static void tasks_tracing_torture_read_unlock(int idx)
0892 {
0893 rcu_read_unlock_trace();
0894 }
0895
0896 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
0897 {
0898 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
0899 }
0900
0901 static struct rcu_torture_ops tasks_tracing_ops = {
0902 .ttype = RCU_TASKS_TRACING_FLAVOR,
0903 .init = rcu_sync_torture_init,
0904 .readlock = tasks_tracing_torture_read_lock,
0905 .read_delay = srcu_read_delay,
0906 .readunlock = tasks_tracing_torture_read_unlock,
0907 .readlock_held = rcu_read_lock_trace_held,
0908 .get_gp_seq = rcu_no_completed,
0909 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
0910 .sync = synchronize_rcu_tasks_trace,
0911 .exp_sync = synchronize_rcu_tasks_trace,
0912 .call = call_rcu_tasks_trace,
0913 .cb_barrier = rcu_barrier_tasks_trace,
0914 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
0915 .cbflood_max = 50000,
0916 .fqs = NULL,
0917 .stats = NULL,
0918 .irq_capable = 1,
0919 .slow_gps = 1,
0920 .name = "tasks-tracing"
0921 };
0922
0923 #define TASKS_TRACING_OPS &tasks_tracing_ops,
0924
0925 #else
0926
0927 #define TASKS_TRACING_OPS
0928
0929 #endif
0930
0931
0932 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
0933 {
0934 if (!cur_ops->gp_diff)
0935 return new - old;
0936 return cur_ops->gp_diff(new, old);
0937 }
0938
0939
0940
0941
0942
0943
0944
0945
0946 static int old_rt_runtime = -1;
0947
0948 static void rcu_torture_disable_rt_throttle(void)
0949 {
0950
0951
0952
0953
0954
0955
0956 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
0957 return;
0958
0959 old_rt_runtime = sysctl_sched_rt_runtime;
0960 sysctl_sched_rt_runtime = -1;
0961 }
0962
0963 static void rcu_torture_enable_rt_throttle(void)
0964 {
0965 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
0966 return;
0967
0968 sysctl_sched_rt_runtime = old_rt_runtime;
0969 old_rt_runtime = -1;
0970 }
0971
0972 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
0973 {
0974 int cpu;
0975 static int dbg_done;
0976 unsigned long end = jiffies;
0977 bool gp_done;
0978 unsigned long j;
0979 static unsigned long last_persist;
0980 unsigned long lp;
0981 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
0982
0983 if (end - *start > mininterval) {
0984
0985 smp_mb();
0986 if (cur_ops->poll_gp_state(gp_state))
0987 return false;
0988 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
0989
0990 j = jiffies;
0991 lp = READ_ONCE(last_persist);
0992 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
0993 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
0994 return false;
0995 }
0996 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
0997 n_rcu_torture_boost_failure++;
0998 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
0999 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1000 current->rt_priority, gp_state, end - *start);
1001 cur_ops->gp_kthread_dbg();
1002
1003 gp_done = cur_ops->poll_gp_state(gp_state);
1004 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1005 gp_done ? "ended already" : "still pending");
1006
1007 }
1008
1009 return true;
1010 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1011 *start = jiffies;
1012 }
1013
1014 return false;
1015 }
1016
1017 static int rcu_torture_boost(void *arg)
1018 {
1019 unsigned long endtime;
1020 unsigned long gp_state;
1021 unsigned long gp_state_time;
1022 unsigned long oldstarttime;
1023
1024 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1025
1026
1027 sched_set_fifo_low(current);
1028
1029
1030 do {
1031 bool failed = false;
1032 bool gp_initiated = false;
1033
1034 if (kthread_should_stop())
1035 goto checkwait;
1036
1037
1038 oldstarttime = READ_ONCE(boost_starttime);
1039 while (time_before(jiffies, oldstarttime)) {
1040 schedule_timeout_interruptible(oldstarttime - jiffies);
1041 if (stutter_wait("rcu_torture_boost"))
1042 sched_set_fifo_low(current);
1043 if (torture_must_stop())
1044 goto checkwait;
1045 }
1046
1047
1048 endtime = oldstarttime + test_boost_duration * HZ;
1049 while (time_before(jiffies, endtime)) {
1050
1051 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1052 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1053
1054 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1055 gp_state = cur_ops->start_gp_poll();
1056 gp_initiated = true;
1057 gp_state_time = jiffies;
1058 }
1059 if (stutter_wait("rcu_torture_boost")) {
1060 sched_set_fifo_low(current);
1061
1062
1063
1064 if (cur_ops->poll_gp_state(gp_state))
1065 gp_initiated = false;
1066 }
1067 if (torture_must_stop())
1068 goto checkwait;
1069 }
1070
1071
1072 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1073 rcu_torture_boost_failed(gp_state, &gp_state_time);
1074
1075
1076
1077
1078
1079
1080
1081
1082 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1083 if (mutex_trylock(&boost_mutex)) {
1084 if (oldstarttime == boost_starttime) {
1085 WRITE_ONCE(boost_starttime,
1086 jiffies + test_boost_interval * HZ);
1087 n_rcu_torture_boosts++;
1088 }
1089 mutex_unlock(&boost_mutex);
1090 break;
1091 }
1092 schedule_timeout_uninterruptible(1);
1093 }
1094
1095
1096 checkwait: if (stutter_wait("rcu_torture_boost"))
1097 sched_set_fifo_low(current);
1098 } while (!torture_must_stop());
1099
1100
1101 while (!kthread_should_stop()) {
1102 torture_shutdown_absorb("rcu_torture_boost");
1103 schedule_timeout_uninterruptible(1);
1104 }
1105 torture_kthread_stopping("rcu_torture_boost");
1106 return 0;
1107 }
1108
1109
1110
1111
1112
1113
1114 static int
1115 rcu_torture_fqs(void *arg)
1116 {
1117 unsigned long fqs_resume_time;
1118 int fqs_burst_remaining;
1119 int oldnice = task_nice(current);
1120
1121 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1122 do {
1123 fqs_resume_time = jiffies + fqs_stutter * HZ;
1124 while (time_before(jiffies, fqs_resume_time) &&
1125 !kthread_should_stop()) {
1126 schedule_timeout_interruptible(1);
1127 }
1128 fqs_burst_remaining = fqs_duration;
1129 while (fqs_burst_remaining > 0 &&
1130 !kthread_should_stop()) {
1131 cur_ops->fqs();
1132 udelay(fqs_holdoff);
1133 fqs_burst_remaining -= fqs_holdoff;
1134 }
1135 if (stutter_wait("rcu_torture_fqs"))
1136 sched_set_normal(current, oldnice);
1137 } while (!torture_must_stop());
1138 torture_kthread_stopping("rcu_torture_fqs");
1139 return 0;
1140 }
1141
1142
1143 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1144 static int nsynctypes;
1145
1146
1147
1148
1149 static void rcu_torture_write_types(void)
1150 {
1151 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_exp1 = gp_exp;
1152 bool gp_poll_exp1 = gp_poll_exp, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1153 bool gp_sync1 = gp_sync;
1154
1155
1156 if (!gp_cond1 && !gp_cond_exp1 && !gp_exp1 && !gp_poll_exp &&
1157 !gp_normal1 && !gp_poll1 && !gp_sync1)
1158 gp_cond1 = gp_cond_exp1 = gp_exp1 = gp_poll_exp1 =
1159 gp_normal1 = gp_poll1 = gp_sync1 = true;
1160 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1161 synctype[nsynctypes++] = RTWS_COND_GET;
1162 pr_info("%s: Testing conditional GPs.\n", __func__);
1163 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1164 pr_alert("%s: gp_cond without primitives.\n", __func__);
1165 }
1166 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1167 synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1168 pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1169 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1170 pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1171 }
1172 if (gp_exp1 && cur_ops->exp_sync) {
1173 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1174 pr_info("%s: Testing expedited GPs.\n", __func__);
1175 } else if (gp_exp && !cur_ops->exp_sync) {
1176 pr_alert("%s: gp_exp without primitives.\n", __func__);
1177 }
1178 if (gp_normal1 && cur_ops->deferred_free) {
1179 synctype[nsynctypes++] = RTWS_DEF_FREE;
1180 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1181 } else if (gp_normal && !cur_ops->deferred_free) {
1182 pr_alert("%s: gp_normal without primitives.\n", __func__);
1183 }
1184 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1185 synctype[nsynctypes++] = RTWS_POLL_GET;
1186 pr_info("%s: Testing polling GPs.\n", __func__);
1187 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1188 pr_alert("%s: gp_poll without primitives.\n", __func__);
1189 }
1190 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1191 synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1192 pr_info("%s: Testing polling expedited GPs.\n", __func__);
1193 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1194 pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1195 }
1196 if (gp_sync1 && cur_ops->sync) {
1197 synctype[nsynctypes++] = RTWS_SYNC;
1198 pr_info("%s: Testing normal GPs.\n", __func__);
1199 } else if (gp_sync && !cur_ops->sync) {
1200 pr_alert("%s: gp_sync without primitives.\n", __func__);
1201 }
1202 }
1203
1204
1205
1206
1207
1208
1209 static int
1210 rcu_torture_writer(void *arg)
1211 {
1212 bool boot_ended;
1213 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1214 unsigned long cookie;
1215 int expediting = 0;
1216 unsigned long gp_snap;
1217 int i;
1218 int idx;
1219 int oldnice = task_nice(current);
1220 struct rcu_torture *rp;
1221 struct rcu_torture *old_rp;
1222 static DEFINE_TORTURE_RANDOM(rand);
1223 bool stutter_waited;
1224
1225 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1226 if (!can_expedite)
1227 pr_alert("%s" TORTURE_FLAG
1228 " GP expediting controlled from boot/sysfs for %s.\n",
1229 torture_type, cur_ops->name);
1230 if (WARN_ONCE(nsynctypes == 0,
1231 "%s: No update-side primitives.\n", __func__)) {
1232
1233
1234
1235
1236
1237 rcu_torture_writer_state = RTWS_STOPPING;
1238 torture_kthread_stopping("rcu_torture_writer");
1239 return 0;
1240 }
1241
1242 do {
1243 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1244 torture_hrtimeout_us(500, 1000, &rand);
1245 rp = rcu_torture_alloc();
1246 if (rp == NULL)
1247 continue;
1248 rp->rtort_pipe_count = 0;
1249 rcu_torture_writer_state = RTWS_DELAY;
1250 udelay(torture_random(&rand) & 0x3ff);
1251 rcu_torture_writer_state = RTWS_REPLACE;
1252 old_rp = rcu_dereference_check(rcu_torture_current,
1253 current == writer_task);
1254 rp->rtort_mbtest = 1;
1255 rcu_assign_pointer(rcu_torture_current, rp);
1256 smp_wmb();
1257 if (old_rp) {
1258 i = old_rp->rtort_pipe_count;
1259 if (i > RCU_TORTURE_PIPE_LEN)
1260 i = RCU_TORTURE_PIPE_LEN;
1261 atomic_inc(&rcu_torture_wcount[i]);
1262 WRITE_ONCE(old_rp->rtort_pipe_count,
1263 old_rp->rtort_pipe_count + 1);
1264 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1265 idx = cur_ops->readlock();
1266 cookie = cur_ops->get_gp_state();
1267 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1268 cur_ops->poll_gp_state(cookie),
1269 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1270 __func__,
1271 rcu_torture_writer_state_getname(),
1272 rcu_torture_writer_state,
1273 cookie, cur_ops->get_gp_state());
1274 if (cur_ops->get_gp_completed) {
1275 cookie = cur_ops->get_gp_completed();
1276 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1277 }
1278 cur_ops->readunlock(idx);
1279 }
1280 switch (synctype[torture_random(&rand) % nsynctypes]) {
1281 case RTWS_DEF_FREE:
1282 rcu_torture_writer_state = RTWS_DEF_FREE;
1283 cur_ops->deferred_free(old_rp);
1284 break;
1285 case RTWS_EXP_SYNC:
1286 rcu_torture_writer_state = RTWS_EXP_SYNC;
1287 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1288 cookie = cur_ops->get_gp_state();
1289 cur_ops->exp_sync();
1290 cur_ops->exp_sync();
1291 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1292 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1293 rcu_torture_pipe_update(old_rp);
1294 break;
1295 case RTWS_COND_GET:
1296 rcu_torture_writer_state = RTWS_COND_GET;
1297 gp_snap = cur_ops->get_gp_state();
1298 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1299 rcu_torture_writer_state = RTWS_COND_SYNC;
1300 cur_ops->cond_sync(gp_snap);
1301 rcu_torture_pipe_update(old_rp);
1302 break;
1303 case RTWS_COND_GET_EXP:
1304 rcu_torture_writer_state = RTWS_COND_GET_EXP;
1305 gp_snap = cur_ops->get_gp_state_exp();
1306 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1307 rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1308 cur_ops->cond_sync_exp(gp_snap);
1309 rcu_torture_pipe_update(old_rp);
1310 break;
1311 case RTWS_POLL_GET:
1312 rcu_torture_writer_state = RTWS_POLL_GET;
1313 gp_snap = cur_ops->start_gp_poll();
1314 rcu_torture_writer_state = RTWS_POLL_WAIT;
1315 while (!cur_ops->poll_gp_state(gp_snap))
1316 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1317 &rand);
1318 rcu_torture_pipe_update(old_rp);
1319 break;
1320 case RTWS_POLL_GET_EXP:
1321 rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1322 gp_snap = cur_ops->start_gp_poll_exp();
1323 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1324 while (!cur_ops->poll_gp_state_exp(gp_snap))
1325 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1326 &rand);
1327 rcu_torture_pipe_update(old_rp);
1328 break;
1329 case RTWS_SYNC:
1330 rcu_torture_writer_state = RTWS_SYNC;
1331 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1332 cookie = cur_ops->get_gp_state();
1333 cur_ops->sync();
1334 cur_ops->sync();
1335 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1336 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1337 rcu_torture_pipe_update(old_rp);
1338 break;
1339 default:
1340 WARN_ON_ONCE(1);
1341 break;
1342 }
1343 }
1344 WRITE_ONCE(rcu_torture_current_version,
1345 rcu_torture_current_version + 1);
1346
1347 if (can_expedite &&
1348 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1349 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1350 if (expediting >= 0)
1351 rcu_expedite_gp();
1352 else
1353 rcu_unexpedite_gp();
1354 if (++expediting > 3)
1355 expediting = -expediting;
1356 } else if (!can_expedite) {
1357 can_expedite = !rcu_gp_is_expedited() &&
1358 !rcu_gp_is_normal();
1359 }
1360 rcu_torture_writer_state = RTWS_STUTTER;
1361 boot_ended = rcu_inkernel_boot_has_ended();
1362 stutter_waited = stutter_wait("rcu_torture_writer");
1363 if (stutter_waited &&
1364 !atomic_read(&rcu_fwd_cb_nodelay) &&
1365 !cur_ops->slow_gps &&
1366 !torture_must_stop() &&
1367 boot_ended)
1368 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1369 if (list_empty(&rcu_tortures[i].rtort_free) &&
1370 rcu_access_pointer(rcu_torture_current) !=
1371 &rcu_tortures[i]) {
1372 tracing_off();
1373 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1374 rcu_ftrace_dump(DUMP_ALL);
1375 }
1376 if (stutter_waited)
1377 sched_set_normal(current, oldnice);
1378 } while (!torture_must_stop());
1379 rcu_torture_current = NULL;
1380
1381 if (expediting > 0)
1382 expediting = -expediting;
1383 while (can_expedite && expediting++ < 0)
1384 rcu_unexpedite_gp();
1385 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1386 if (!can_expedite)
1387 pr_alert("%s" TORTURE_FLAG
1388 " Dynamic grace-period expediting was disabled.\n",
1389 torture_type);
1390 rcu_torture_writer_state = RTWS_STOPPING;
1391 torture_kthread_stopping("rcu_torture_writer");
1392 return 0;
1393 }
1394
1395
1396
1397
1398
1399 static int
1400 rcu_torture_fakewriter(void *arg)
1401 {
1402 unsigned long gp_snap;
1403 DEFINE_TORTURE_RANDOM(rand);
1404
1405 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1406 set_user_nice(current, MAX_NICE);
1407
1408 if (WARN_ONCE(nsynctypes == 0,
1409 "%s: No update-side primitives.\n", __func__)) {
1410
1411
1412
1413
1414
1415 torture_kthread_stopping("rcu_torture_fakewriter");
1416 return 0;
1417 }
1418
1419 do {
1420 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1421 if (cur_ops->cb_barrier != NULL &&
1422 torture_random(&rand) % (nfakewriters * 8) == 0) {
1423 cur_ops->cb_barrier();
1424 } else {
1425 switch (synctype[torture_random(&rand) % nsynctypes]) {
1426 case RTWS_DEF_FREE:
1427 break;
1428 case RTWS_EXP_SYNC:
1429 cur_ops->exp_sync();
1430 break;
1431 case RTWS_COND_GET:
1432 gp_snap = cur_ops->get_gp_state();
1433 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1434 cur_ops->cond_sync(gp_snap);
1435 break;
1436 case RTWS_COND_GET_EXP:
1437 gp_snap = cur_ops->get_gp_state_exp();
1438 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1439 cur_ops->cond_sync_exp(gp_snap);
1440 break;
1441 case RTWS_POLL_GET:
1442 gp_snap = cur_ops->start_gp_poll();
1443 while (!cur_ops->poll_gp_state(gp_snap)) {
1444 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1445 &rand);
1446 }
1447 break;
1448 case RTWS_POLL_GET_EXP:
1449 gp_snap = cur_ops->start_gp_poll_exp();
1450 while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1451 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1452 &rand);
1453 }
1454 break;
1455 case RTWS_SYNC:
1456 cur_ops->sync();
1457 break;
1458 default:
1459 WARN_ON_ONCE(1);
1460 break;
1461 }
1462 }
1463 stutter_wait("rcu_torture_fakewriter");
1464 } while (!torture_must_stop());
1465
1466 torture_kthread_stopping("rcu_torture_fakewriter");
1467 return 0;
1468 }
1469
1470 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1471 {
1472 kfree(rhp);
1473 }
1474
1475
1476 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1477 struct torture_random_state *trsp)
1478 {
1479 unsigned long loops;
1480 int noc = torture_num_online_cpus();
1481 int rdrchked;
1482 int rdrchker;
1483 struct rcu_torture_reader_check *rtrcp;
1484 struct rcu_torture_reader_check *rtrcp_assigner;
1485 struct rcu_torture_reader_check *rtrcp_chked;
1486 struct rcu_torture_reader_check *rtrcp_chker;
1487
1488 if (myid < 0)
1489 return;
1490
1491
1492 rtrcp = &rcu_torture_reader_mbchk[myid];
1493 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1494
1495
1496 rdrchked = torture_random(trsp) % nrealreaders;
1497 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1498 rdrchker = torture_random(trsp) % nrealreaders;
1499 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1500 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1501 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 &&
1502 !READ_ONCE(rtp->rtort_chkp) &&
1503 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) {
1504 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1505 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1506 rtrcp->rtc_chkrdr = rdrchked;
1507 WARN_ON_ONCE(rtrcp->rtc_ready);
1508 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1509 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1510 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL);
1511 }
1512
1513
1514 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1515 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1516 return;
1517 rdrchked = rtrcp_assigner->rtc_chkrdr;
1518 if (WARN_ON_ONCE(rdrchked < 0))
1519 return;
1520 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1521 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1522 atomic_inc(&n_rcu_torture_mbchk_tries);
1523 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1524 atomic_inc(&n_rcu_torture_mbchk_fail);
1525 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1526 rtrcp_assigner->rtc_ready = 0;
1527 smp_store_release(&rtrcp->rtc_assigner, NULL);
1528 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1);
1529 }
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 static void rcutorture_one_extend(int *readstate, int newstate,
1541 struct torture_random_state *trsp,
1542 struct rt_read_seg *rtrsp)
1543 {
1544 unsigned long flags;
1545 int idxnew1 = -1;
1546 int idxnew2 = -1;
1547 int idxold1 = *readstate;
1548 int idxold2 = idxold1;
1549 int statesnew = ~*readstate & newstate;
1550 int statesold = *readstate & ~newstate;
1551
1552 WARN_ON_ONCE(idxold2 < 0);
1553 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1554 rtrsp->rt_readstate = newstate;
1555
1556
1557 if (statesnew & RCUTORTURE_RDR_BH)
1558 local_bh_disable();
1559 if (statesnew & RCUTORTURE_RDR_RBH)
1560 rcu_read_lock_bh();
1561 if (statesnew & RCUTORTURE_RDR_IRQ)
1562 local_irq_disable();
1563 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1564 preempt_disable();
1565 if (statesnew & RCUTORTURE_RDR_SCHED)
1566 rcu_read_lock_sched();
1567 if (statesnew & RCUTORTURE_RDR_RCU_1)
1568 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1569 if (statesnew & RCUTORTURE_RDR_RCU_2)
1570 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1571
1572
1573
1574
1575
1576
1577
1578
1579 if (statesold & RCUTORTURE_RDR_IRQ)
1580 local_irq_enable();
1581 if (statesold & RCUTORTURE_RDR_PREEMPT)
1582 preempt_enable();
1583 if (statesold & RCUTORTURE_RDR_SCHED)
1584 rcu_read_unlock_sched();
1585 if (statesold & RCUTORTURE_RDR_BH)
1586 local_bh_enable();
1587 if (statesold & RCUTORTURE_RDR_RBH)
1588 rcu_read_unlock_bh();
1589 if (statesold & RCUTORTURE_RDR_RCU_2) {
1590 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1591 WARN_ON_ONCE(idxnew2 != -1);
1592 idxold2 = 0;
1593 }
1594 if (statesold & RCUTORTURE_RDR_RCU_1) {
1595 bool lockit;
1596
1597 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1598 if (lockit)
1599 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1600 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1601 WARN_ON_ONCE(idxnew1 != -1);
1602 idxold1 = 0;
1603 if (lockit)
1604 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1605 }
1606
1607
1608 if ((statesnew || statesold) && *readstate && newstate)
1609 cur_ops->read_delay(trsp, rtrsp);
1610
1611
1612 if (idxnew1 == -1)
1613 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1614 WARN_ON_ONCE(idxnew1 < 0);
1615 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1616 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1617 if (idxnew2 == -1)
1618 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1619 WARN_ON_ONCE(idxnew2 < 0);
1620 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1621 *readstate = idxnew1 | idxnew2 | newstate;
1622 WARN_ON_ONCE(*readstate < 0);
1623 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1624 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1625 }
1626
1627
1628 static int rcutorture_extend_mask_max(void)
1629 {
1630 int mask;
1631
1632 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1633 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1634 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1635 return mask;
1636 }
1637
1638
1639 static int
1640 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1641 {
1642 int mask = rcutorture_extend_mask_max();
1643 unsigned long randmask1 = torture_random(trsp) >> 8;
1644 unsigned long randmask2 = randmask1 >> 3;
1645 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1646 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1647 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1648
1649 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1650
1651 if (!(randmask1 & 0x7))
1652 mask = mask & randmask2;
1653 else
1654 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1655
1656
1657 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1658 if (oldmask & RCUTORTURE_RDR_RCU_1)
1659 mask &= ~RCUTORTURE_RDR_RCU_2;
1660 else
1661 mask |= RCUTORTURE_RDR_RCU_1;
1662 }
1663
1664
1665
1666
1667 if (mask & RCUTORTURE_RDR_IRQ)
1668 mask |= oldmask & bhs;
1669
1670
1671
1672
1673
1674
1675 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1676
1677 if (oldmask & preempts_irq)
1678 mask &= ~bhs;
1679 if ((oldmask | mask) & preempts_irq)
1680 mask |= oldmask & bhs;
1681 }
1682
1683 return mask ?: RCUTORTURE_RDR_RCU_1;
1684 }
1685
1686
1687
1688
1689
1690 static struct rt_read_seg *
1691 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1692 struct rt_read_seg *rtrsp)
1693 {
1694 int i;
1695 int j;
1696 int mask = rcutorture_extend_mask_max();
1697
1698 WARN_ON_ONCE(!*readstate);
1699 if (!((mask - 1) & mask))
1700 return rtrsp;
1701
1702 i = (torture_random(trsp) >> 3);
1703 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1704 for (j = 0; j < i; j++) {
1705 mask = rcutorture_extend_mask(*readstate, trsp);
1706 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1707 }
1708 return &rtrsp[j];
1709 }
1710
1711
1712
1713
1714
1715
1716 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1717 {
1718 unsigned long cookie;
1719 int i;
1720 unsigned long started;
1721 unsigned long completed;
1722 int newstate;
1723 struct rcu_torture *p;
1724 int pipe_count;
1725 int readstate = 0;
1726 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1727 struct rt_read_seg *rtrsp = &rtseg[0];
1728 struct rt_read_seg *rtrsp1;
1729 unsigned long long ts;
1730
1731 WARN_ON_ONCE(!rcu_is_watching());
1732 newstate = rcutorture_extend_mask(readstate, trsp);
1733 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1734 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1735 cookie = cur_ops->get_gp_state();
1736 started = cur_ops->get_gp_seq();
1737 ts = rcu_trace_clock_local();
1738 p = rcu_dereference_check(rcu_torture_current,
1739 !cur_ops->readlock_held || cur_ops->readlock_held());
1740 if (p == NULL) {
1741
1742 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1743 return false;
1744 }
1745 if (p->rtort_mbtest == 0)
1746 atomic_inc(&n_rcu_torture_mberror);
1747 rcu_torture_reader_do_mbchk(myid, p, trsp);
1748 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1749 preempt_disable();
1750 pipe_count = READ_ONCE(p->rtort_pipe_count);
1751 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1752
1753 pipe_count = RCU_TORTURE_PIPE_LEN;
1754 }
1755 completed = cur_ops->get_gp_seq();
1756 if (pipe_count > 1) {
1757 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1758 ts, started, completed);
1759 rcu_ftrace_dump(DUMP_ALL);
1760 }
1761 __this_cpu_inc(rcu_torture_count[pipe_count]);
1762 completed = rcutorture_seq_diff(completed, started);
1763 if (completed > RCU_TORTURE_PIPE_LEN) {
1764
1765 completed = RCU_TORTURE_PIPE_LEN;
1766 }
1767 __this_cpu_inc(rcu_torture_batch[completed]);
1768 preempt_enable();
1769 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1770 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1771 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1772 __func__,
1773 rcu_torture_writer_state_getname(),
1774 rcu_torture_writer_state,
1775 cookie, cur_ops->get_gp_state());
1776 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1777 WARN_ON_ONCE(readstate);
1778
1779
1780 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1781
1782
1783 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1784 i = 0;
1785 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1786 err_segs[i++] = *rtrsp1;
1787 rt_read_nsegs = i;
1788 }
1789
1790 return true;
1791 }
1792
1793 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1794
1795
1796
1797
1798
1799
1800
1801 static void rcu_torture_timer(struct timer_list *unused)
1802 {
1803 atomic_long_inc(&n_rcu_torture_timers);
1804 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1805
1806
1807 if (cur_ops->call) {
1808 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1809
1810 if (rhp)
1811 cur_ops->call(rhp, rcu_torture_timer_cb);
1812 }
1813 }
1814
1815
1816
1817
1818
1819
1820
1821 static int
1822 rcu_torture_reader(void *arg)
1823 {
1824 unsigned long lastsleep = jiffies;
1825 long myid = (long)arg;
1826 int mynumonline = myid;
1827 DEFINE_TORTURE_RANDOM(rand);
1828 struct timer_list t;
1829
1830 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1831 set_user_nice(current, MAX_NICE);
1832 if (irqreader && cur_ops->irq_capable)
1833 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1834 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1835 do {
1836 if (irqreader && cur_ops->irq_capable) {
1837 if (!timer_pending(&t))
1838 mod_timer(&t, jiffies + 1);
1839 }
1840 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1841 schedule_timeout_interruptible(HZ);
1842 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1843 torture_hrtimeout_us(500, 1000, &rand);
1844 lastsleep = jiffies + 10;
1845 }
1846 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1847 schedule_timeout_interruptible(HZ / 5);
1848 stutter_wait("rcu_torture_reader");
1849 } while (!torture_must_stop());
1850 if (irqreader && cur_ops->irq_capable) {
1851 del_timer_sync(&t);
1852 destroy_timer_on_stack(&t);
1853 }
1854 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1855 torture_kthread_stopping("rcu_torture_reader");
1856 return 0;
1857 }
1858
1859
1860
1861
1862
1863 static int rcu_nocb_toggle(void *arg)
1864 {
1865 int cpu;
1866 int maxcpu = -1;
1867 int oldnice = task_nice(current);
1868 long r;
1869 DEFINE_TORTURE_RANDOM(rand);
1870 ktime_t toggle_delay;
1871 unsigned long toggle_fuzz;
1872 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1873
1874 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1875 while (!rcu_inkernel_boot_has_ended())
1876 schedule_timeout_interruptible(HZ / 10);
1877 for_each_online_cpu(cpu)
1878 maxcpu = cpu;
1879 WARN_ON(maxcpu < 0);
1880 if (toggle_interval > ULONG_MAX)
1881 toggle_fuzz = ULONG_MAX >> 3;
1882 else
1883 toggle_fuzz = toggle_interval >> 3;
1884 if (toggle_fuzz <= 0)
1885 toggle_fuzz = NSEC_PER_USEC;
1886 do {
1887 r = torture_random(&rand);
1888 cpu = (r >> 4) % (maxcpu + 1);
1889 if (r & 0x1) {
1890 rcu_nocb_cpu_offload(cpu);
1891 atomic_long_inc(&n_nocb_offload);
1892 } else {
1893 rcu_nocb_cpu_deoffload(cpu);
1894 atomic_long_inc(&n_nocb_deoffload);
1895 }
1896 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1897 set_current_state(TASK_INTERRUPTIBLE);
1898 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1899 if (stutter_wait("rcu_nocb_toggle"))
1900 sched_set_normal(current, oldnice);
1901 } while (!torture_must_stop());
1902 torture_kthread_stopping("rcu_nocb_toggle");
1903 return 0;
1904 }
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914 static void
1915 rcu_torture_stats_print(void)
1916 {
1917 int cpu;
1918 int i;
1919 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1920 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1921 struct rcu_torture *rtcp;
1922 static unsigned long rtcv_snap = ULONG_MAX;
1923 static bool splatted;
1924 struct task_struct *wtp;
1925
1926 for_each_possible_cpu(cpu) {
1927 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1928 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1929 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1930 }
1931 }
1932 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
1933 if (pipesummary[i] != 0)
1934 break;
1935 }
1936
1937 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1938 rtcp = rcu_access_pointer(rcu_torture_current);
1939 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1940 rtcp,
1941 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1942 rcu_torture_current_version,
1943 list_empty(&rcu_torture_freelist),
1944 atomic_read(&n_rcu_torture_alloc),
1945 atomic_read(&n_rcu_torture_alloc_fail),
1946 atomic_read(&n_rcu_torture_free));
1947 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1948 atomic_read(&n_rcu_torture_mberror),
1949 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1950 n_rcu_torture_barrier_error,
1951 n_rcu_torture_boost_ktrerror,
1952 n_rcu_torture_boost_rterror);
1953 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1954 n_rcu_torture_boost_failure,
1955 n_rcu_torture_boosts,
1956 atomic_long_read(&n_rcu_torture_timers));
1957 torture_onoff_stats();
1958 pr_cont("barrier: %ld/%ld:%ld ",
1959 data_race(n_barrier_successes),
1960 data_race(n_barrier_attempts),
1961 data_race(n_rcu_torture_barrier_error));
1962 pr_cont("read-exits: %ld ", data_race(n_read_exits));
1963 pr_cont("nocb-toggles: %ld:%ld\n",
1964 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1965
1966 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1967 if (atomic_read(&n_rcu_torture_mberror) ||
1968 atomic_read(&n_rcu_torture_mbchk_fail) ||
1969 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1970 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1971 i > 1) {
1972 pr_cont("%s", "!!! ");
1973 atomic_inc(&n_rcu_torture_error);
1974 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1975 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1976 WARN_ON_ONCE(n_rcu_torture_barrier_error);
1977 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror);
1978 WARN_ON_ONCE(n_rcu_torture_boost_rterror);
1979 WARN_ON_ONCE(n_rcu_torture_boost_failure);
1980 WARN_ON_ONCE(i > 1);
1981 }
1982 pr_cont("Reader Pipe: ");
1983 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1984 pr_cont(" %ld", pipesummary[i]);
1985 pr_cont("\n");
1986
1987 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1988 pr_cont("Reader Batch: ");
1989 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1990 pr_cont(" %ld", batchsummary[i]);
1991 pr_cont("\n");
1992
1993 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1994 pr_cont("Free-Block Circulation: ");
1995 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1996 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1997 }
1998 pr_cont("\n");
1999
2000 if (cur_ops->stats)
2001 cur_ops->stats();
2002 if (rtcv_snap == rcu_torture_current_version &&
2003 rcu_access_pointer(rcu_torture_current) &&
2004 !rcu_stall_is_suppressed()) {
2005 int __maybe_unused flags = 0;
2006 unsigned long __maybe_unused gp_seq = 0;
2007
2008 rcutorture_get_gp_data(cur_ops->ttype,
2009 &flags, &gp_seq);
2010 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
2011 &flags, &gp_seq);
2012 wtp = READ_ONCE(writer_task);
2013 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2014 rcu_torture_writer_state_getname(),
2015 rcu_torture_writer_state, gp_seq, flags,
2016 wtp == NULL ? ~0U : wtp->__state,
2017 wtp == NULL ? -1 : (int)task_cpu(wtp));
2018 if (!splatted && wtp) {
2019 sched_show_task(wtp);
2020 splatted = true;
2021 }
2022 if (cur_ops->gp_kthread_dbg)
2023 cur_ops->gp_kthread_dbg();
2024 rcu_ftrace_dump(DUMP_ALL);
2025 }
2026 rtcv_snap = rcu_torture_current_version;
2027 }
2028
2029
2030
2031
2032
2033 static int
2034 rcu_torture_stats(void *arg)
2035 {
2036 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2037 do {
2038 schedule_timeout_interruptible(stat_interval * HZ);
2039 rcu_torture_stats_print();
2040 torture_shutdown_absorb("rcu_torture_stats");
2041 } while (!torture_must_stop());
2042 torture_kthread_stopping("rcu_torture_stats");
2043 return 0;
2044 }
2045
2046
2047 static void rcu_torture_mem_dump_obj(void)
2048 {
2049 struct rcu_head *rhp;
2050 struct kmem_cache *kcp;
2051 static int z;
2052
2053 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2054 if (WARN_ON_ONCE(!kcp))
2055 return;
2056 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2057 if (WARN_ON_ONCE(!rhp)) {
2058 kmem_cache_destroy(kcp);
2059 return;
2060 }
2061 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2062 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2063 mem_dump_obj(ZERO_SIZE_PTR);
2064 pr_alert("mem_dump_obj(NULL):");
2065 mem_dump_obj(NULL);
2066 pr_alert("mem_dump_obj(%px):", &rhp);
2067 mem_dump_obj(&rhp);
2068 pr_alert("mem_dump_obj(%px):", rhp);
2069 mem_dump_obj(rhp);
2070 pr_alert("mem_dump_obj(%px):", &rhp->func);
2071 mem_dump_obj(&rhp->func);
2072 pr_alert("mem_dump_obj(%px):", &z);
2073 mem_dump_obj(&z);
2074 kmem_cache_free(kcp, rhp);
2075 kmem_cache_destroy(kcp);
2076 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2077 if (WARN_ON_ONCE(!rhp))
2078 return;
2079 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2080 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2081 mem_dump_obj(rhp);
2082 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2083 mem_dump_obj(&rhp->func);
2084 kfree(rhp);
2085 rhp = vmalloc(4096);
2086 if (WARN_ON_ONCE(!rhp))
2087 return;
2088 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2089 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2090 mem_dump_obj(rhp);
2091 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2092 mem_dump_obj(&rhp->func);
2093 vfree(rhp);
2094 }
2095
2096 static void
2097 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2098 {
2099 pr_alert("%s" TORTURE_FLAG
2100 "--- %s: nreaders=%d nfakewriters=%d "
2101 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2102 "shuffle_interval=%d stutter=%d irqreader=%d "
2103 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2104 "test_boost=%d/%d test_boost_interval=%d "
2105 "test_boost_duration=%d shutdown_secs=%d "
2106 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2107 "stall_cpu_block=%d "
2108 "n_barrier_cbs=%d "
2109 "onoff_interval=%d onoff_holdoff=%d "
2110 "read_exit_delay=%d read_exit_burst=%d "
2111 "nocbs_nthreads=%d nocbs_toggle=%d\n",
2112 torture_type, tag, nrealreaders, nfakewriters,
2113 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2114 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2115 test_boost, cur_ops->can_boost,
2116 test_boost_interval, test_boost_duration, shutdown_secs,
2117 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2118 stall_cpu_block,
2119 n_barrier_cbs,
2120 onoff_interval, onoff_holdoff,
2121 read_exit_delay, read_exit_burst,
2122 nocbs_nthreads, nocbs_toggle);
2123 }
2124
2125 static int rcutorture_booster_cleanup(unsigned int cpu)
2126 {
2127 struct task_struct *t;
2128
2129 if (boost_tasks[cpu] == NULL)
2130 return 0;
2131 mutex_lock(&boost_mutex);
2132 t = boost_tasks[cpu];
2133 boost_tasks[cpu] = NULL;
2134 rcu_torture_enable_rt_throttle();
2135 mutex_unlock(&boost_mutex);
2136
2137
2138 torture_stop_kthread(rcu_torture_boost, t);
2139 return 0;
2140 }
2141
2142 static int rcutorture_booster_init(unsigned int cpu)
2143 {
2144 int retval;
2145
2146 if (boost_tasks[cpu] != NULL)
2147 return 0;
2148
2149
2150
2151
2152 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2153 struct sched_param sp;
2154 struct task_struct *t;
2155
2156 t = per_cpu(ksoftirqd, cpu);
2157 WARN_ON_ONCE(!t);
2158 sp.sched_priority = 2;
2159 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2160 }
2161
2162
2163 mutex_lock(&boost_mutex);
2164 rcu_torture_disable_rt_throttle();
2165 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2166 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2167 cpu, "rcu_torture_boost_%u");
2168 if (IS_ERR(boost_tasks[cpu])) {
2169 retval = PTR_ERR(boost_tasks[cpu]);
2170 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2171 n_rcu_torture_boost_ktrerror++;
2172 boost_tasks[cpu] = NULL;
2173 mutex_unlock(&boost_mutex);
2174 return retval;
2175 }
2176 mutex_unlock(&boost_mutex);
2177 return 0;
2178 }
2179
2180
2181
2182
2183
2184 static int rcu_torture_stall(void *args)
2185 {
2186 int idx;
2187 unsigned long stop_at;
2188
2189 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2190 if (stall_cpu_holdoff > 0) {
2191 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2192 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2193 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2194 }
2195 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2196 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2197 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2198 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2199 if (kthread_should_stop())
2200 break;
2201 schedule_timeout_uninterruptible(HZ);
2202 }
2203 }
2204 if (!kthread_should_stop() && stall_cpu > 0) {
2205 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2206 stop_at = ktime_get_seconds() + stall_cpu;
2207
2208 idx = cur_ops->readlock();
2209 if (stall_cpu_irqsoff)
2210 local_irq_disable();
2211 else if (!stall_cpu_block)
2212 preempt_disable();
2213 pr_alert("%s start on CPU %d.\n",
2214 __func__, raw_smp_processor_id());
2215 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2216 stop_at))
2217 if (stall_cpu_block) {
2218 #ifdef CONFIG_PREEMPTION
2219 preempt_schedule();
2220 #else
2221 schedule_timeout_uninterruptible(HZ);
2222 #endif
2223 } else if (stall_no_softlockup) {
2224 touch_softlockup_watchdog();
2225 }
2226 if (stall_cpu_irqsoff)
2227 local_irq_enable();
2228 else if (!stall_cpu_block)
2229 preempt_enable();
2230 cur_ops->readunlock(idx);
2231 }
2232 pr_alert("%s end.\n", __func__);
2233 torture_shutdown_absorb("rcu_torture_stall");
2234 while (!kthread_should_stop())
2235 schedule_timeout_interruptible(10 * HZ);
2236 return 0;
2237 }
2238
2239
2240 static int __init rcu_torture_stall_init(void)
2241 {
2242 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2243 return 0;
2244 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2245 }
2246
2247
2248 struct fwd_cb_state {
2249 struct rcu_head rh;
2250 int stop;
2251 };
2252
2253
2254
2255
2256
2257
2258 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2259 {
2260 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2261
2262 if (READ_ONCE(fcsp->stop)) {
2263 WRITE_ONCE(fcsp->stop, 2);
2264 return;
2265 }
2266 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2267 }
2268
2269
2270 struct rcu_fwd_cb {
2271 struct rcu_head rh;
2272 struct rcu_fwd_cb *rfc_next;
2273 struct rcu_fwd *rfc_rfp;
2274 int rfc_gps;
2275 };
2276
2277 #define MAX_FWD_CB_JIFFIES (8 * HZ)
2278 #define MIN_FWD_CB_LAUNDERS 3
2279 #define MIN_FWD_CBS_LAUNDERED 100
2280 #define FWD_CBS_HIST_DIV 10
2281 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2282
2283 struct rcu_launder_hist {
2284 long n_launders;
2285 unsigned long launder_gp_seq;
2286 };
2287
2288 struct rcu_fwd {
2289 spinlock_t rcu_fwd_lock;
2290 struct rcu_fwd_cb *rcu_fwd_cb_head;
2291 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2292 long n_launders_cb;
2293 unsigned long rcu_fwd_startat;
2294 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2295 unsigned long rcu_launder_gp_seq_start;
2296 int rcu_fwd_id;
2297 };
2298
2299 static DEFINE_MUTEX(rcu_fwd_mutex);
2300 static struct rcu_fwd *rcu_fwds;
2301 static unsigned long rcu_fwd_seq;
2302 static atomic_long_t rcu_fwd_max_cbs;
2303 static bool rcu_fwd_emergency_stop;
2304
2305 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2306 {
2307 unsigned long gps;
2308 unsigned long gps_old;
2309 int i;
2310 int j;
2311
2312 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2313 if (rfp->n_launders_hist[i].n_launders > 0)
2314 break;
2315 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2316 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2317 gps_old = rfp->rcu_launder_gp_seq_start;
2318 for (j = 0; j <= i; j++) {
2319 gps = rfp->n_launders_hist[j].launder_gp_seq;
2320 pr_cont(" %ds/%d: %ld:%ld",
2321 j + 1, FWD_CBS_HIST_DIV,
2322 rfp->n_launders_hist[j].n_launders,
2323 rcutorture_seq_diff(gps, gps_old));
2324 gps_old = gps;
2325 }
2326 pr_cont("\n");
2327 }
2328
2329
2330 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2331 {
2332 unsigned long flags;
2333 int i;
2334 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2335 struct rcu_fwd_cb **rfcpp;
2336 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2337
2338 rfcp->rfc_next = NULL;
2339 rfcp->rfc_gps++;
2340 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2341 rfcpp = rfp->rcu_fwd_cb_tail;
2342 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2343 WRITE_ONCE(*rfcpp, rfcp);
2344 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2345 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2346 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2347 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2348 rfp->n_launders_hist[i].n_launders++;
2349 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2350 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2351 }
2352
2353
2354 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2355 {
2356 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2357
2358 if (need_resched() || (iter & 0xfff))
2359 schedule();
2360 return;
2361 }
2362
2363 cond_resched();
2364 }
2365
2366
2367
2368
2369
2370 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2371 {
2372 unsigned long flags;
2373 unsigned long freed = 0;
2374 struct rcu_fwd_cb *rfcp;
2375
2376 for (;;) {
2377 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2378 rfcp = rfp->rcu_fwd_cb_head;
2379 if (!rfcp) {
2380 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2381 break;
2382 }
2383 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2384 if (!rfp->rcu_fwd_cb_head)
2385 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2386 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2387 kfree(rfcp);
2388 freed++;
2389 rcu_torture_fwd_prog_cond_resched(freed);
2390 if (tick_nohz_full_enabled()) {
2391 local_irq_save(flags);
2392 rcu_momentary_dyntick_idle();
2393 local_irq_restore(flags);
2394 }
2395 }
2396 return freed;
2397 }
2398
2399
2400 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2401 int *tested, int *tested_tries)
2402 {
2403 unsigned long cver;
2404 unsigned long dur;
2405 struct fwd_cb_state fcs;
2406 unsigned long gps;
2407 int idx;
2408 int sd;
2409 int sd4;
2410 bool selfpropcb = false;
2411 unsigned long stopat;
2412 static DEFINE_TORTURE_RANDOM(trs);
2413
2414 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2415 if (!cur_ops->sync)
2416 return;
2417 if (cur_ops->call && cur_ops->cb_barrier) {
2418 init_rcu_head_on_stack(&fcs.rh);
2419 selfpropcb = true;
2420 }
2421
2422
2423 atomic_inc(&rcu_fwd_cb_nodelay);
2424 cur_ops->sync();
2425 if (selfpropcb) {
2426 WRITE_ONCE(fcs.stop, 0);
2427 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2428 }
2429 cver = READ_ONCE(rcu_torture_current_version);
2430 gps = cur_ops->get_gp_seq();
2431 sd = cur_ops->stall_dur() + 1;
2432 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2433 dur = sd4 + torture_random(&trs) % (sd - sd4);
2434 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2435 stopat = rfp->rcu_fwd_startat + dur;
2436 while (time_before(jiffies, stopat) &&
2437 !shutdown_time_arrived() &&
2438 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2439 idx = cur_ops->readlock();
2440 udelay(10);
2441 cur_ops->readunlock(idx);
2442 if (!fwd_progress_need_resched || need_resched())
2443 cond_resched();
2444 }
2445 (*tested_tries)++;
2446 if (!time_before(jiffies, stopat) &&
2447 !shutdown_time_arrived() &&
2448 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2449 (*tested)++;
2450 cver = READ_ONCE(rcu_torture_current_version) - cver;
2451 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2452 WARN_ON(!cver && gps < 2);
2453 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2454 rfp->rcu_fwd_id, dur, cver, gps);
2455 }
2456 if (selfpropcb) {
2457 WRITE_ONCE(fcs.stop, 1);
2458 cur_ops->sync();
2459 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2460 cur_ops->cb_barrier();
2461 }
2462
2463 if (selfpropcb) {
2464 WARN_ON(READ_ONCE(fcs.stop) != 2);
2465 destroy_rcu_head_on_stack(&fcs.rh);
2466 }
2467 schedule_timeout_uninterruptible(HZ / 10);
2468 atomic_dec(&rcu_fwd_cb_nodelay);
2469 }
2470
2471
2472 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2473 {
2474 unsigned long cver;
2475 unsigned long flags;
2476 unsigned long gps;
2477 int i;
2478 long n_launders;
2479 long n_launders_cb_snap;
2480 long n_launders_sa;
2481 long n_max_cbs;
2482 long n_max_gps;
2483 struct rcu_fwd_cb *rfcp;
2484 struct rcu_fwd_cb *rfcpn;
2485 unsigned long stopat;
2486 unsigned long stoppedat;
2487
2488 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2489 if (READ_ONCE(rcu_fwd_emergency_stop))
2490 return;
2491 if (!cur_ops->call)
2492 return;
2493
2494
2495 atomic_inc(&rcu_fwd_cb_nodelay);
2496 cur_ops->sync();
2497 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2498 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2499 n_launders = 0;
2500 rfp->n_launders_cb = 0;
2501 n_launders_sa = 0;
2502 n_max_cbs = 0;
2503 n_max_gps = 0;
2504 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2505 rfp->n_launders_hist[i].n_launders = 0;
2506 cver = READ_ONCE(rcu_torture_current_version);
2507 gps = cur_ops->get_gp_seq();
2508 rfp->rcu_launder_gp_seq_start = gps;
2509 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2510 while (time_before(jiffies, stopat) &&
2511 !shutdown_time_arrived() &&
2512 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2513 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2514 rfcpn = NULL;
2515 if (rfcp)
2516 rfcpn = READ_ONCE(rfcp->rfc_next);
2517 if (rfcpn) {
2518 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2519 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2520 break;
2521 rfp->rcu_fwd_cb_head = rfcpn;
2522 n_launders++;
2523 n_launders_sa++;
2524 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2525 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2526 if (WARN_ON_ONCE(!rfcp)) {
2527 schedule_timeout_interruptible(1);
2528 continue;
2529 }
2530 n_max_cbs++;
2531 n_launders_sa = 0;
2532 rfcp->rfc_gps = 0;
2533 rfcp->rfc_rfp = rfp;
2534 } else {
2535 rfcp = NULL;
2536 }
2537 if (rfcp)
2538 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2539 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2540 if (tick_nohz_full_enabled()) {
2541 local_irq_save(flags);
2542 rcu_momentary_dyntick_idle();
2543 local_irq_restore(flags);
2544 }
2545 }
2546 stoppedat = jiffies;
2547 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2548 cver = READ_ONCE(rcu_torture_current_version) - cver;
2549 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2550 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2551 cur_ops->cb_barrier();
2552 (void)rcu_torture_fwd_prog_cbfree(rfp);
2553
2554 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2555 !shutdown_time_arrived()) {
2556 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2557 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2558 __func__,
2559 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2560 n_launders + n_max_cbs - n_launders_cb_snap,
2561 n_launders, n_launders_sa,
2562 n_max_gps, n_max_cbs, cver, gps);
2563 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2564 mutex_lock(&rcu_fwd_mutex);
2565 rcu_torture_fwd_cb_hist(rfp);
2566 mutex_unlock(&rcu_fwd_mutex);
2567 }
2568 schedule_timeout_uninterruptible(HZ);
2569 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2570 atomic_dec(&rcu_fwd_cb_nodelay);
2571 }
2572
2573
2574
2575
2576
2577
2578 static int rcutorture_oom_notify(struct notifier_block *self,
2579 unsigned long notused, void *nfreed)
2580 {
2581 int i;
2582 long ncbs;
2583 struct rcu_fwd *rfp;
2584
2585 mutex_lock(&rcu_fwd_mutex);
2586 rfp = rcu_fwds;
2587 if (!rfp) {
2588 mutex_unlock(&rcu_fwd_mutex);
2589 return NOTIFY_OK;
2590 }
2591 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2592 __func__);
2593 for (i = 0; i < fwd_progress; i++) {
2594 rcu_torture_fwd_cb_hist(&rfp[i]);
2595 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2596 }
2597 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2598 smp_mb();
2599 ncbs = 0;
2600 for (i = 0; i < fwd_progress; i++)
2601 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2602 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2603 rcu_barrier();
2604 ncbs = 0;
2605 for (i = 0; i < fwd_progress; i++)
2606 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2607 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2608 rcu_barrier();
2609 ncbs = 0;
2610 for (i = 0; i < fwd_progress; i++)
2611 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2612 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2613 smp_mb();
2614 (*(unsigned long *)nfreed)++;
2615 pr_info("%s returning after OOM processing.\n", __func__);
2616 mutex_unlock(&rcu_fwd_mutex);
2617 return NOTIFY_OK;
2618 }
2619
2620 static struct notifier_block rcutorture_oom_nb = {
2621 .notifier_call = rcutorture_oom_notify
2622 };
2623
2624
2625 static int rcu_torture_fwd_prog(void *args)
2626 {
2627 bool firsttime = true;
2628 long max_cbs;
2629 int oldnice = task_nice(current);
2630 unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2631 struct rcu_fwd *rfp = args;
2632 int tested = 0;
2633 int tested_tries = 0;
2634
2635 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2636 rcu_bind_current_to_nocb();
2637 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2638 set_user_nice(current, MAX_NICE);
2639 do {
2640 if (!rfp->rcu_fwd_id) {
2641 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2642 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2643 if (!firsttime) {
2644 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2645 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2646 }
2647 firsttime = false;
2648 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2649 } else {
2650 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2651 schedule_timeout_interruptible(1);
2652 oldseq = READ_ONCE(rcu_fwd_seq);
2653 }
2654 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2655 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2656 rcu_torture_fwd_prog_cr(rfp);
2657 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2658 (!IS_ENABLED(CONFIG_TINY_RCU) ||
2659 (rcu_inkernel_boot_has_ended() &&
2660 torture_num_online_cpus() > rfp->rcu_fwd_id)))
2661 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2662
2663
2664 if (stutter_wait("rcu_torture_fwd_prog"))
2665 sched_set_normal(current, oldnice);
2666 } while (!torture_must_stop());
2667
2668 if (!rfp->rcu_fwd_id) {
2669 WARN_ON(!tested && tested_tries >= 5);
2670 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2671 }
2672 torture_kthread_stopping("rcu_torture_fwd_prog");
2673 return 0;
2674 }
2675
2676
2677 static int __init rcu_torture_fwd_prog_init(void)
2678 {
2679 int i;
2680 int ret = 0;
2681 struct rcu_fwd *rfp;
2682
2683 if (!fwd_progress)
2684 return 0;
2685 if (fwd_progress >= nr_cpu_ids) {
2686 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2687 fwd_progress = nr_cpu_ids;
2688 } else if (fwd_progress < 0) {
2689 fwd_progress = nr_cpu_ids;
2690 }
2691 if ((!cur_ops->sync && !cur_ops->call) ||
2692 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2693 cur_ops == &rcu_busted_ops) {
2694 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2695 fwd_progress = 0;
2696 return 0;
2697 }
2698 if (stall_cpu > 0) {
2699 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2700 fwd_progress = 0;
2701 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2702 return -EINVAL;
2703 WARN_ON(1);
2704 return 0;
2705 }
2706 if (fwd_progress_holdoff <= 0)
2707 fwd_progress_holdoff = 1;
2708 if (fwd_progress_div <= 0)
2709 fwd_progress_div = 4;
2710 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2711 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2712 if (!rfp || !fwd_prog_tasks) {
2713 kfree(rfp);
2714 kfree(fwd_prog_tasks);
2715 fwd_prog_tasks = NULL;
2716 fwd_progress = 0;
2717 return -ENOMEM;
2718 }
2719 for (i = 0; i < fwd_progress; i++) {
2720 spin_lock_init(&rfp[i].rcu_fwd_lock);
2721 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
2722 rfp[i].rcu_fwd_id = i;
2723 }
2724 mutex_lock(&rcu_fwd_mutex);
2725 rcu_fwds = rfp;
2726 mutex_unlock(&rcu_fwd_mutex);
2727 register_oom_notifier(&rcutorture_oom_nb);
2728 for (i = 0; i < fwd_progress; i++) {
2729 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
2730 if (ret) {
2731 fwd_progress = i;
2732 return ret;
2733 }
2734 }
2735 return 0;
2736 }
2737
2738 static void rcu_torture_fwd_prog_cleanup(void)
2739 {
2740 int i;
2741 struct rcu_fwd *rfp;
2742
2743 if (!rcu_fwds || !fwd_prog_tasks)
2744 return;
2745 for (i = 0; i < fwd_progress; i++)
2746 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
2747 unregister_oom_notifier(&rcutorture_oom_nb);
2748 mutex_lock(&rcu_fwd_mutex);
2749 rfp = rcu_fwds;
2750 rcu_fwds = NULL;
2751 mutex_unlock(&rcu_fwd_mutex);
2752 kfree(rfp);
2753 kfree(fwd_prog_tasks);
2754 fwd_prog_tasks = NULL;
2755 }
2756
2757
2758 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2759 {
2760 atomic_inc(&barrier_cbs_invoked);
2761 }
2762
2763
2764 static void rcu_torture_barrier1cb(void *rcu_void)
2765 {
2766 struct rcu_head *rhp = rcu_void;
2767
2768 cur_ops->call(rhp, rcu_torture_barrier_cbf);
2769 }
2770
2771
2772 static int rcu_torture_barrier_cbs(void *arg)
2773 {
2774 long myid = (long)arg;
2775 bool lastphase = false;
2776 bool newphase;
2777 struct rcu_head rcu;
2778
2779 init_rcu_head_on_stack(&rcu);
2780 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2781 set_user_nice(current, MAX_NICE);
2782 do {
2783 wait_event(barrier_cbs_wq[myid],
2784 (newphase =
2785 smp_load_acquire(&barrier_phase)) != lastphase ||
2786 torture_must_stop());
2787 lastphase = newphase;
2788 if (torture_must_stop())
2789 break;
2790
2791
2792
2793
2794 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2795 &rcu, 1)) {
2796
2797 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2798 }
2799 if (atomic_dec_and_test(&barrier_cbs_count))
2800 wake_up(&barrier_wq);
2801 } while (!torture_must_stop());
2802 if (cur_ops->cb_barrier != NULL)
2803 cur_ops->cb_barrier();
2804 destroy_rcu_head_on_stack(&rcu);
2805 torture_kthread_stopping("rcu_torture_barrier_cbs");
2806 return 0;
2807 }
2808
2809
2810 static int rcu_torture_barrier(void *arg)
2811 {
2812 int i;
2813
2814 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2815 do {
2816 atomic_set(&barrier_cbs_invoked, 0);
2817 atomic_set(&barrier_cbs_count, n_barrier_cbs);
2818
2819 smp_store_release(&barrier_phase, !barrier_phase);
2820 for (i = 0; i < n_barrier_cbs; i++)
2821 wake_up(&barrier_cbs_wq[i]);
2822 wait_event(barrier_wq,
2823 atomic_read(&barrier_cbs_count) == 0 ||
2824 torture_must_stop());
2825 if (torture_must_stop())
2826 break;
2827 n_barrier_attempts++;
2828 cur_ops->cb_barrier();
2829 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2830 n_rcu_torture_barrier_error++;
2831 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2832 atomic_read(&barrier_cbs_invoked),
2833 n_barrier_cbs);
2834 WARN_ON(1);
2835
2836 i = 0;
2837 do {
2838 if (WARN_ON(i++ > HZ))
2839 i = INT_MIN;
2840 schedule_timeout_interruptible(1);
2841 cur_ops->cb_barrier();
2842 } while (atomic_read(&barrier_cbs_invoked) !=
2843 n_barrier_cbs &&
2844 !torture_must_stop());
2845 smp_mb();
2846 if (!torture_must_stop())
2847 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2848 atomic_read(&barrier_cbs_invoked));
2849 } else {
2850 n_barrier_successes++;
2851 }
2852 schedule_timeout_interruptible(HZ / 10);
2853 } while (!torture_must_stop());
2854 torture_kthread_stopping("rcu_torture_barrier");
2855 return 0;
2856 }
2857
2858
2859 static int rcu_torture_barrier_init(void)
2860 {
2861 int i;
2862 int ret;
2863
2864 if (n_barrier_cbs <= 0)
2865 return 0;
2866 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2867 pr_alert("%s" TORTURE_FLAG
2868 " Call or barrier ops missing for %s,\n",
2869 torture_type, cur_ops->name);
2870 pr_alert("%s" TORTURE_FLAG
2871 " RCU barrier testing omitted from run.\n",
2872 torture_type);
2873 return 0;
2874 }
2875 atomic_set(&barrier_cbs_count, 0);
2876 atomic_set(&barrier_cbs_invoked, 0);
2877 barrier_cbs_tasks =
2878 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2879 GFP_KERNEL);
2880 barrier_cbs_wq =
2881 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2882 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2883 return -ENOMEM;
2884 for (i = 0; i < n_barrier_cbs; i++) {
2885 init_waitqueue_head(&barrier_cbs_wq[i]);
2886 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2887 (void *)(long)i,
2888 barrier_cbs_tasks[i]);
2889 if (ret)
2890 return ret;
2891 }
2892 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2893 }
2894
2895
2896 static void rcu_torture_barrier_cleanup(void)
2897 {
2898 int i;
2899
2900 torture_stop_kthread(rcu_torture_barrier, barrier_task);
2901 if (barrier_cbs_tasks != NULL) {
2902 for (i = 0; i < n_barrier_cbs; i++)
2903 torture_stop_kthread(rcu_torture_barrier_cbs,
2904 barrier_cbs_tasks[i]);
2905 kfree(barrier_cbs_tasks);
2906 barrier_cbs_tasks = NULL;
2907 }
2908 if (barrier_cbs_wq != NULL) {
2909 kfree(barrier_cbs_wq);
2910 barrier_cbs_wq = NULL;
2911 }
2912 }
2913
2914 static bool rcu_torture_can_boost(void)
2915 {
2916 static int boost_warn_once;
2917 int prio;
2918
2919 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2920 return false;
2921 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
2922 return false;
2923
2924 prio = rcu_get_gp_kthreads_prio();
2925 if (!prio)
2926 return false;
2927
2928 if (prio < 2) {
2929 if (boost_warn_once == 1)
2930 return false;
2931
2932 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2933 boost_warn_once = 1;
2934 return false;
2935 }
2936
2937 return true;
2938 }
2939
2940 static bool read_exit_child_stop;
2941 static bool read_exit_child_stopped;
2942 static wait_queue_head_t read_exit_wq;
2943
2944
2945 static int rcu_torture_read_exit_child(void *trsp_in)
2946 {
2947 struct torture_random_state *trsp = trsp_in;
2948
2949 set_user_nice(current, MAX_NICE);
2950
2951 while (!kthread_should_stop())
2952 schedule_timeout_uninterruptible(1);
2953 (void)rcu_torture_one_read(trsp, -1);
2954 return 0;
2955 }
2956
2957
2958 static int rcu_torture_read_exit(void *unused)
2959 {
2960 bool errexit = false;
2961 int i;
2962 struct task_struct *tsp;
2963 DEFINE_TORTURE_RANDOM(trs);
2964
2965
2966 set_user_nice(current, MAX_NICE);
2967 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2968
2969
2970 do {
2971 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2972 for (i = 0; i < read_exit_burst; i++) {
2973 if (READ_ONCE(read_exit_child_stop))
2974 break;
2975 stutter_wait("rcu_torture_read_exit");
2976
2977 tsp = kthread_run(rcu_torture_read_exit_child,
2978 &trs, "%s", "rcu_torture_read_exit_child");
2979 if (IS_ERR(tsp)) {
2980 TOROUT_ERRSTRING("out of memory");
2981 errexit = true;
2982 break;
2983 }
2984 cond_resched();
2985 kthread_stop(tsp);
2986 n_read_exits++;
2987 }
2988 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2989 rcu_barrier();
2990 i = 0;
2991 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
2992 schedule_timeout_uninterruptible(HZ);
2993 } while (!errexit && !READ_ONCE(read_exit_child_stop));
2994
2995
2996 smp_store_release(&read_exit_child_stopped, true);
2997 smp_mb();
2998 wake_up(&read_exit_wq);
2999 while (!torture_must_stop())
3000 schedule_timeout_uninterruptible(1);
3001 torture_kthread_stopping("rcu_torture_read_exit");
3002 return 0;
3003 }
3004
3005 static int rcu_torture_read_exit_init(void)
3006 {
3007 if (read_exit_burst <= 0)
3008 return 0;
3009 init_waitqueue_head(&read_exit_wq);
3010 read_exit_child_stop = false;
3011 read_exit_child_stopped = false;
3012 return torture_create_kthread(rcu_torture_read_exit, NULL,
3013 read_exit_task);
3014 }
3015
3016 static void rcu_torture_read_exit_cleanup(void)
3017 {
3018 if (!read_exit_task)
3019 return;
3020 WRITE_ONCE(read_exit_child_stop, true);
3021 smp_mb();
3022 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3023 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3024 }
3025
3026 static enum cpuhp_state rcutor_hp;
3027
3028 static void
3029 rcu_torture_cleanup(void)
3030 {
3031 int firsttime;
3032 int flags = 0;
3033 unsigned long gp_seq = 0;
3034 int i;
3035
3036 if (torture_cleanup_begin()) {
3037 if (cur_ops->cb_barrier != NULL) {
3038 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3039 cur_ops->cb_barrier();
3040 }
3041 rcu_gp_slow_unregister(NULL);
3042 return;
3043 }
3044 if (!cur_ops) {
3045 torture_cleanup_end();
3046 rcu_gp_slow_unregister(NULL);
3047 return;
3048 }
3049
3050 if (cur_ops->gp_kthread_dbg)
3051 cur_ops->gp_kthread_dbg();
3052 rcu_torture_read_exit_cleanup();
3053 rcu_torture_barrier_cleanup();
3054 rcu_torture_fwd_prog_cleanup();
3055 torture_stop_kthread(rcu_torture_stall, stall_task);
3056 torture_stop_kthread(rcu_torture_writer, writer_task);
3057
3058 if (nocb_tasks) {
3059 for (i = 0; i < nrealnocbers; i++)
3060 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3061 kfree(nocb_tasks);
3062 nocb_tasks = NULL;
3063 }
3064
3065 if (reader_tasks) {
3066 for (i = 0; i < nrealreaders; i++)
3067 torture_stop_kthread(rcu_torture_reader,
3068 reader_tasks[i]);
3069 kfree(reader_tasks);
3070 reader_tasks = NULL;
3071 }
3072 kfree(rcu_torture_reader_mbchk);
3073 rcu_torture_reader_mbchk = NULL;
3074
3075 if (fakewriter_tasks) {
3076 for (i = 0; i < nfakewriters; i++)
3077 torture_stop_kthread(rcu_torture_fakewriter,
3078 fakewriter_tasks[i]);
3079 kfree(fakewriter_tasks);
3080 fakewriter_tasks = NULL;
3081 }
3082
3083 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3084 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3085 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3086 cur_ops->name, (long)gp_seq, flags,
3087 rcutorture_seq_diff(gp_seq, start_gp_seq));
3088 torture_stop_kthread(rcu_torture_stats, stats_task);
3089 torture_stop_kthread(rcu_torture_fqs, fqs_task);
3090 if (rcu_torture_can_boost() && rcutor_hp >= 0)
3091 cpuhp_remove_state(rcutor_hp);
3092
3093
3094
3095
3096
3097 if (cur_ops->cb_barrier != NULL) {
3098 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3099 cur_ops->cb_barrier();
3100 }
3101 if (cur_ops->cleanup != NULL)
3102 cur_ops->cleanup();
3103
3104 rcu_torture_mem_dump_obj();
3105
3106 rcu_torture_stats_print();
3107
3108 if (err_segs_recorded) {
3109 pr_alert("Failure/close-call rcutorture reader segments:\n");
3110 if (rt_read_nsegs == 0)
3111 pr_alert("\t: No segments recorded!!!\n");
3112 firsttime = 1;
3113 for (i = 0; i < rt_read_nsegs; i++) {
3114 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3115 if (err_segs[i].rt_delay_jiffies != 0) {
3116 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3117 err_segs[i].rt_delay_jiffies);
3118 firsttime = 0;
3119 }
3120 if (err_segs[i].rt_delay_ms != 0) {
3121 pr_cont("%s%ldms", firsttime ? "" : "+",
3122 err_segs[i].rt_delay_ms);
3123 firsttime = 0;
3124 }
3125 if (err_segs[i].rt_delay_us != 0) {
3126 pr_cont("%s%ldus", firsttime ? "" : "+",
3127 err_segs[i].rt_delay_us);
3128 firsttime = 0;
3129 }
3130 pr_cont("%s\n",
3131 err_segs[i].rt_preempted ? "preempted" : "");
3132
3133 }
3134 }
3135 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3136 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3137 else if (torture_onoff_failures())
3138 rcu_torture_print_module_parms(cur_ops,
3139 "End of test: RCU_HOTPLUG");
3140 else
3141 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3142 torture_cleanup_end();
3143 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
3144 }
3145
3146 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3147 static void rcu_torture_leak_cb(struct rcu_head *rhp)
3148 {
3149 }
3150
3151 static void rcu_torture_err_cb(struct rcu_head *rhp)
3152 {
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3163 }
3164 #endif
3165
3166
3167
3168
3169
3170
3171 static void rcu_test_debug_objects(void)
3172 {
3173 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3174 struct rcu_head rh1;
3175 struct rcu_head rh2;
3176 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3177
3178 init_rcu_head_on_stack(&rh1);
3179 init_rcu_head_on_stack(&rh2);
3180 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3181
3182
3183 preempt_disable();
3184 rcu_read_lock();
3185 call_rcu(&rh1, rcu_torture_leak_cb);
3186 local_irq_disable();
3187 call_rcu(&rh2, rcu_torture_leak_cb);
3188 call_rcu(&rh2, rcu_torture_err_cb);
3189 if (rhp) {
3190 call_rcu(rhp, rcu_torture_leak_cb);
3191 call_rcu(rhp, rcu_torture_err_cb);
3192 }
3193 local_irq_enable();
3194 rcu_read_unlock();
3195 preempt_enable();
3196
3197
3198 rcu_barrier();
3199 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3200 destroy_rcu_head_on_stack(&rh1);
3201 destroy_rcu_head_on_stack(&rh2);
3202 kfree(rhp);
3203 #else
3204 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3205 #endif
3206 }
3207
3208 static void rcutorture_sync(void)
3209 {
3210 static unsigned long n;
3211
3212 if (cur_ops->sync && !(++n & 0xfff))
3213 cur_ops->sync();
3214 }
3215
3216 static int __init
3217 rcu_torture_init(void)
3218 {
3219 long i;
3220 int cpu;
3221 int firsterr = 0;
3222 int flags = 0;
3223 unsigned long gp_seq = 0;
3224 static struct rcu_torture_ops *torture_ops[] = {
3225 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3226 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3227 &trivial_ops,
3228 };
3229
3230 if (!torture_init_begin(torture_type, verbose))
3231 return -EBUSY;
3232
3233
3234 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3235 cur_ops = torture_ops[i];
3236 if (strcmp(torture_type, cur_ops->name) == 0)
3237 break;
3238 }
3239 if (i == ARRAY_SIZE(torture_ops)) {
3240 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3241 torture_type);
3242 pr_alert("rcu-torture types:");
3243 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3244 pr_cont(" %s", torture_ops[i]->name);
3245 pr_cont("\n");
3246 firsterr = -EINVAL;
3247 cur_ops = NULL;
3248 goto unwind;
3249 }
3250 if (cur_ops->fqs == NULL && fqs_duration != 0) {
3251 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3252 fqs_duration = 0;
3253 }
3254 if (cur_ops->init)
3255 cur_ops->init();
3256
3257 if (nreaders >= 0) {
3258 nrealreaders = nreaders;
3259 } else {
3260 nrealreaders = num_online_cpus() - 2 - nreaders;
3261 if (nrealreaders <= 0)
3262 nrealreaders = 1;
3263 }
3264 rcu_torture_print_module_parms(cur_ops, "Start of test");
3265 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3266 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3267 start_gp_seq = gp_seq;
3268 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
3269 cur_ops->name, (long)gp_seq, flags);
3270
3271
3272
3273 INIT_LIST_HEAD(&rcu_torture_freelist);
3274 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3275 rcu_tortures[i].rtort_mbtest = 0;
3276 list_add_tail(&rcu_tortures[i].rtort_free,
3277 &rcu_torture_freelist);
3278 }
3279
3280
3281
3282 rcu_torture_current = NULL;
3283 rcu_torture_current_version = 0;
3284 atomic_set(&n_rcu_torture_alloc, 0);
3285 atomic_set(&n_rcu_torture_alloc_fail, 0);
3286 atomic_set(&n_rcu_torture_free, 0);
3287 atomic_set(&n_rcu_torture_mberror, 0);
3288 atomic_set(&n_rcu_torture_mbchk_fail, 0);
3289 atomic_set(&n_rcu_torture_mbchk_tries, 0);
3290 atomic_set(&n_rcu_torture_error, 0);
3291 n_rcu_torture_barrier_error = 0;
3292 n_rcu_torture_boost_ktrerror = 0;
3293 n_rcu_torture_boost_rterror = 0;
3294 n_rcu_torture_boost_failure = 0;
3295 n_rcu_torture_boosts = 0;
3296 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3297 atomic_set(&rcu_torture_wcount[i], 0);
3298 for_each_possible_cpu(cpu) {
3299 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3300 per_cpu(rcu_torture_count, cpu)[i] = 0;
3301 per_cpu(rcu_torture_batch, cpu)[i] = 0;
3302 }
3303 }
3304 err_segs_recorded = 0;
3305 rt_read_nsegs = 0;
3306
3307
3308
3309 rcu_torture_write_types();
3310 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3311 writer_task);
3312 if (torture_init_error(firsterr))
3313 goto unwind;
3314 if (nfakewriters > 0) {
3315 fakewriter_tasks = kcalloc(nfakewriters,
3316 sizeof(fakewriter_tasks[0]),
3317 GFP_KERNEL);
3318 if (fakewriter_tasks == NULL) {
3319 TOROUT_ERRSTRING("out of memory");
3320 firsterr = -ENOMEM;
3321 goto unwind;
3322 }
3323 }
3324 for (i = 0; i < nfakewriters; i++) {
3325 firsterr = torture_create_kthread(rcu_torture_fakewriter,
3326 NULL, fakewriter_tasks[i]);
3327 if (torture_init_error(firsterr))
3328 goto unwind;
3329 }
3330 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3331 GFP_KERNEL);
3332 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3333 GFP_KERNEL);
3334 if (!reader_tasks || !rcu_torture_reader_mbchk) {
3335 TOROUT_ERRSTRING("out of memory");
3336 firsterr = -ENOMEM;
3337 goto unwind;
3338 }
3339 for (i = 0; i < nrealreaders; i++) {
3340 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3341 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3342 reader_tasks[i]);
3343 if (torture_init_error(firsterr))
3344 goto unwind;
3345 }
3346 nrealnocbers = nocbs_nthreads;
3347 if (WARN_ON(nrealnocbers < 0))
3348 nrealnocbers = 1;
3349 if (WARN_ON(nocbs_toggle < 0))
3350 nocbs_toggle = HZ;
3351 if (nrealnocbers > 0) {
3352 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3353 if (nocb_tasks == NULL) {
3354 TOROUT_ERRSTRING("out of memory");
3355 firsterr = -ENOMEM;
3356 goto unwind;
3357 }
3358 } else {
3359 nocb_tasks = NULL;
3360 }
3361 for (i = 0; i < nrealnocbers; i++) {
3362 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3363 if (torture_init_error(firsterr))
3364 goto unwind;
3365 }
3366 if (stat_interval > 0) {
3367 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3368 stats_task);
3369 if (torture_init_error(firsterr))
3370 goto unwind;
3371 }
3372 if (test_no_idle_hz && shuffle_interval > 0) {
3373 firsterr = torture_shuffle_init(shuffle_interval * HZ);
3374 if (torture_init_error(firsterr))
3375 goto unwind;
3376 }
3377 if (stutter < 0)
3378 stutter = 0;
3379 if (stutter) {
3380 int t;
3381
3382 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3383 firsterr = torture_stutter_init(stutter * HZ, t);
3384 if (torture_init_error(firsterr))
3385 goto unwind;
3386 }
3387 if (fqs_duration < 0)
3388 fqs_duration = 0;
3389 if (fqs_duration) {
3390
3391 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3392 fqs_task);
3393 if (torture_init_error(firsterr))
3394 goto unwind;
3395 }
3396 if (test_boost_interval < 1)
3397 test_boost_interval = 1;
3398 if (test_boost_duration < 2)
3399 test_boost_duration = 2;
3400 if (rcu_torture_can_boost()) {
3401
3402 boost_starttime = jiffies + test_boost_interval * HZ;
3403
3404 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3405 rcutorture_booster_init,
3406 rcutorture_booster_cleanup);
3407 rcutor_hp = firsterr;
3408 if (torture_init_error(firsterr))
3409 goto unwind;
3410 }
3411 shutdown_jiffies = jiffies + shutdown_secs * HZ;
3412 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3413 if (torture_init_error(firsterr))
3414 goto unwind;
3415 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3416 rcutorture_sync);
3417 if (torture_init_error(firsterr))
3418 goto unwind;
3419 firsterr = rcu_torture_stall_init();
3420 if (torture_init_error(firsterr))
3421 goto unwind;
3422 firsterr = rcu_torture_fwd_prog_init();
3423 if (torture_init_error(firsterr))
3424 goto unwind;
3425 firsterr = rcu_torture_barrier_init();
3426 if (torture_init_error(firsterr))
3427 goto unwind;
3428 firsterr = rcu_torture_read_exit_init();
3429 if (torture_init_error(firsterr))
3430 goto unwind;
3431 if (object_debug)
3432 rcu_test_debug_objects();
3433 torture_init_end();
3434 rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
3435 return 0;
3436
3437 unwind:
3438 torture_init_end();
3439 rcu_torture_cleanup();
3440 if (shutdown_secs) {
3441 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3442 kernel_power_off();
3443 }
3444 return firsterr;
3445 }
3446
3447 module_init(rcu_torture_init);
3448 module_exit(rcu_torture_cleanup);