Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Common functions for in-kernel torture tests.
0004  *
0005  * Copyright (C) IBM Corporation, 2014
0006  *
0007  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
0008  *  Based on kernel/rcu/torture.c.
0009  */
0010 
0011 #define pr_fmt(fmt) fmt
0012 
0013 #include <linux/types.h>
0014 #include <linux/kernel.h>
0015 #include <linux/init.h>
0016 #include <linux/module.h>
0017 #include <linux/kthread.h>
0018 #include <linux/err.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/smp.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/sched.h>
0023 #include <linux/sched/clock.h>
0024 #include <linux/atomic.h>
0025 #include <linux/bitops.h>
0026 #include <linux/completion.h>
0027 #include <linux/moduleparam.h>
0028 #include <linux/percpu.h>
0029 #include <linux/notifier.h>
0030 #include <linux/reboot.h>
0031 #include <linux/freezer.h>
0032 #include <linux/cpu.h>
0033 #include <linux/delay.h>
0034 #include <linux/stat.h>
0035 #include <linux/slab.h>
0036 #include <linux/trace_clock.h>
0037 #include <linux/ktime.h>
0038 #include <asm/byteorder.h>
0039 #include <linux/torture.h>
0040 #include "rcu/rcu.h"
0041 
0042 MODULE_LICENSE("GPL");
0043 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
0044 
0045 static bool disable_onoff_at_boot;
0046 module_param(disable_onoff_at_boot, bool, 0444);
0047 
0048 static bool ftrace_dump_at_shutdown;
0049 module_param(ftrace_dump_at_shutdown, bool, 0444);
0050 
0051 static int verbose_sleep_frequency;
0052 module_param(verbose_sleep_frequency, int, 0444);
0053 
0054 static int verbose_sleep_duration = 1;
0055 module_param(verbose_sleep_duration, int, 0444);
0056 
0057 static char *torture_type;
0058 static int verbose;
0059 
0060 /* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
0061 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
0062 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */
0063 #define FULLSTOP_RMMOD    2 /* Normal rmmod of torture. */
0064 static int fullstop = FULLSTOP_RMMOD;
0065 static DEFINE_MUTEX(fullstop_mutex);
0066 
0067 static atomic_t verbose_sleep_counter;
0068 
0069 /*
0070  * Sleep if needed from VERBOSE_TOROUT*().
0071  */
0072 void verbose_torout_sleep(void)
0073 {
0074     if (verbose_sleep_frequency > 0 &&
0075         verbose_sleep_duration > 0 &&
0076         !(atomic_inc_return(&verbose_sleep_counter) % verbose_sleep_frequency))
0077         schedule_timeout_uninterruptible(verbose_sleep_duration);
0078 }
0079 EXPORT_SYMBOL_GPL(verbose_torout_sleep);
0080 
0081 /*
0082  * Schedule a high-resolution-timer sleep in nanoseconds, with a 32-bit
0083  * nanosecond random fuzz.  This function and its friends desynchronize
0084  * testing from the timer wheel.
0085  */
0086 int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
0087 {
0088     ktime_t hto = baset_ns;
0089 
0090     if (trsp)
0091         hto += (torture_random(trsp) >> 3) % fuzzt_ns;
0092     set_current_state(TASK_UNINTERRUPTIBLE);
0093     return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
0094 }
0095 EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
0096 
0097 /*
0098  * Schedule a high-resolution-timer sleep in microseconds, with a 32-bit
0099  * nanosecond (not microsecond!) random fuzz.
0100  */
0101 int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp)
0102 {
0103     ktime_t baset_ns = baset_us * NSEC_PER_USEC;
0104 
0105     return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
0106 }
0107 EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
0108 
0109 /*
0110  * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit
0111  * microsecond (not millisecond!) random fuzz.
0112  */
0113 int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp)
0114 {
0115     ktime_t baset_ns = baset_ms * NSEC_PER_MSEC;
0116     u32 fuzzt_ns;
0117 
0118     if ((u32)~0U / NSEC_PER_USEC < fuzzt_us)
0119         fuzzt_ns = (u32)~0U;
0120     else
0121         fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
0122     return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
0123 }
0124 EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
0125 
0126 /*
0127  * Schedule a high-resolution-timer sleep in jiffies, with an
0128  * implied one-jiffy random fuzz.  This is intended to replace calls to
0129  * schedule_timeout_interruptible() and friends.
0130  */
0131 int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
0132 {
0133     ktime_t baset_ns = jiffies_to_nsecs(baset_j);
0134 
0135     return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
0136 }
0137 EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
0138 
0139 /*
0140  * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit
0141  * millisecond (not second!) random fuzz.
0142  */
0143 int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp)
0144 {
0145     ktime_t baset_ns = baset_s * NSEC_PER_SEC;
0146     u32 fuzzt_ns;
0147 
0148     if ((u32)~0U / NSEC_PER_MSEC < fuzzt_ms)
0149         fuzzt_ns = (u32)~0U;
0150     else
0151         fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
0152     return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
0153 }
0154 EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
0155 
0156 #ifdef CONFIG_HOTPLUG_CPU
0157 
0158 /*
0159  * Variables for online-offline handling.  Only present if CPU hotplug
0160  * is enabled, otherwise does nothing.
0161  */
0162 
0163 static struct task_struct *onoff_task;
0164 static long onoff_holdoff;
0165 static long onoff_interval;
0166 static torture_ofl_func *onoff_f;
0167 static long n_offline_attempts;
0168 static long n_offline_successes;
0169 static unsigned long sum_offline;
0170 static int min_offline = -1;
0171 static int max_offline;
0172 static long n_online_attempts;
0173 static long n_online_successes;
0174 static unsigned long sum_online;
0175 static int min_online = -1;
0176 static int max_online;
0177 
0178 static int torture_online_cpus = NR_CPUS;
0179 
0180 /*
0181  * Some torture testing leverages confusion as to the number of online
0182  * CPUs.  This function returns the torture-testing view of this number,
0183  * which allows torture tests to load-balance appropriately.
0184  */
0185 int torture_num_online_cpus(void)
0186 {
0187     return READ_ONCE(torture_online_cpus);
0188 }
0189 EXPORT_SYMBOL_GPL(torture_num_online_cpus);
0190 
0191 /*
0192  * Attempt to take a CPU offline.  Return false if the CPU is already
0193  * offline or if it is not subject to CPU-hotplug operations.  The
0194  * caller can detect other failures by looking at the statistics.
0195  */
0196 bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
0197              unsigned long *sum_offl, int *min_offl, int *max_offl)
0198 {
0199     unsigned long delta;
0200     int ret;
0201     char *s;
0202     unsigned long starttime;
0203 
0204     if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
0205         return false;
0206     if (num_online_cpus() <= 1)
0207         return false;  /* Can't offline the last CPU. */
0208 
0209     if (verbose > 1)
0210         pr_alert("%s" TORTURE_FLAG
0211              "torture_onoff task: offlining %d\n",
0212              torture_type, cpu);
0213     starttime = jiffies;
0214     (*n_offl_attempts)++;
0215     ret = remove_cpu(cpu);
0216     if (ret) {
0217         s = "";
0218         if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) {
0219             // PCI probe frequently disables hotplug during boot.
0220             (*n_offl_attempts)--;
0221             s = " (-EBUSY forgiven during boot)";
0222         }
0223         if (verbose)
0224             pr_alert("%s" TORTURE_FLAG
0225                  "torture_onoff task: offline %d failed%s: errno %d\n",
0226                  torture_type, cpu, s, ret);
0227     } else {
0228         if (verbose > 1)
0229             pr_alert("%s" TORTURE_FLAG
0230                  "torture_onoff task: offlined %d\n",
0231                  torture_type, cpu);
0232         if (onoff_f)
0233             onoff_f();
0234         (*n_offl_successes)++;
0235         delta = jiffies - starttime;
0236         *sum_offl += delta;
0237         if (*min_offl < 0) {
0238             *min_offl = delta;
0239             *max_offl = delta;
0240         }
0241         if (*min_offl > delta)
0242             *min_offl = delta;
0243         if (*max_offl < delta)
0244             *max_offl = delta;
0245         WRITE_ONCE(torture_online_cpus, torture_online_cpus - 1);
0246         WARN_ON_ONCE(torture_online_cpus <= 0);
0247     }
0248 
0249     return true;
0250 }
0251 EXPORT_SYMBOL_GPL(torture_offline);
0252 
0253 /*
0254  * Attempt to bring a CPU online.  Return false if the CPU is already
0255  * online or if it is not subject to CPU-hotplug operations.  The
0256  * caller can detect other failures by looking at the statistics.
0257  */
0258 bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
0259             unsigned long *sum_onl, int *min_onl, int *max_onl)
0260 {
0261     unsigned long delta;
0262     int ret;
0263     char *s;
0264     unsigned long starttime;
0265 
0266     if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
0267         return false;
0268 
0269     if (verbose > 1)
0270         pr_alert("%s" TORTURE_FLAG
0271              "torture_onoff task: onlining %d\n",
0272              torture_type, cpu);
0273     starttime = jiffies;
0274     (*n_onl_attempts)++;
0275     ret = add_cpu(cpu);
0276     if (ret) {
0277         s = "";
0278         if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) {
0279             // PCI probe frequently disables hotplug during boot.
0280             (*n_onl_attempts)--;
0281             s = " (-EBUSY forgiven during boot)";
0282         }
0283         if (verbose)
0284             pr_alert("%s" TORTURE_FLAG
0285                  "torture_onoff task: online %d failed%s: errno %d\n",
0286                  torture_type, cpu, s, ret);
0287     } else {
0288         if (verbose > 1)
0289             pr_alert("%s" TORTURE_FLAG
0290                  "torture_onoff task: onlined %d\n",
0291                  torture_type, cpu);
0292         (*n_onl_successes)++;
0293         delta = jiffies - starttime;
0294         *sum_onl += delta;
0295         if (*min_onl < 0) {
0296             *min_onl = delta;
0297             *max_onl = delta;
0298         }
0299         if (*min_onl > delta)
0300             *min_onl = delta;
0301         if (*max_onl < delta)
0302             *max_onl = delta;
0303         WRITE_ONCE(torture_online_cpus, torture_online_cpus + 1);
0304     }
0305 
0306     return true;
0307 }
0308 EXPORT_SYMBOL_GPL(torture_online);
0309 
0310 /*
0311  * Get everything online at the beginning and ends of tests.
0312  */
0313 static void torture_online_all(char *phase)
0314 {
0315     int cpu;
0316     int ret;
0317 
0318     for_each_possible_cpu(cpu) {
0319         if (cpu_online(cpu))
0320             continue;
0321         ret = add_cpu(cpu);
0322         if (ret && verbose) {
0323             pr_alert("%s" TORTURE_FLAG
0324                  "%s: %s online %d: errno %d\n",
0325                  __func__, phase, torture_type, cpu, ret);
0326         }
0327     }
0328 }
0329 
0330 /*
0331  * Execute random CPU-hotplug operations at the interval specified
0332  * by the onoff_interval.
0333  */
0334 static int
0335 torture_onoff(void *arg)
0336 {
0337     int cpu;
0338     int maxcpu = -1;
0339     DEFINE_TORTURE_RANDOM(rand);
0340 
0341     VERBOSE_TOROUT_STRING("torture_onoff task started");
0342     for_each_online_cpu(cpu)
0343         maxcpu = cpu;
0344     WARN_ON(maxcpu < 0);
0345     torture_online_all("Initial");
0346     if (maxcpu == 0) {
0347         VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
0348         goto stop;
0349     }
0350 
0351     if (onoff_holdoff > 0) {
0352         VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
0353         schedule_timeout_interruptible(onoff_holdoff);
0354         VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
0355     }
0356     while (!torture_must_stop()) {
0357         if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) {
0358             schedule_timeout_interruptible(HZ / 10);
0359             continue;
0360         }
0361         cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
0362         if (!torture_offline(cpu,
0363                      &n_offline_attempts, &n_offline_successes,
0364                      &sum_offline, &min_offline, &max_offline))
0365             torture_online(cpu,
0366                        &n_online_attempts, &n_online_successes,
0367                        &sum_online, &min_online, &max_online);
0368         schedule_timeout_interruptible(onoff_interval);
0369     }
0370 
0371 stop:
0372     torture_kthread_stopping("torture_onoff");
0373     torture_online_all("Final");
0374     return 0;
0375 }
0376 
0377 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
0378 
0379 /*
0380  * Initiate online-offline handling.
0381  */
0382 int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f)
0383 {
0384 #ifdef CONFIG_HOTPLUG_CPU
0385     onoff_holdoff = ooholdoff;
0386     onoff_interval = oointerval;
0387     onoff_f = f;
0388     if (onoff_interval <= 0)
0389         return 0;
0390     return torture_create_kthread(torture_onoff, NULL, onoff_task);
0391 #else /* #ifdef CONFIG_HOTPLUG_CPU */
0392     return 0;
0393 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
0394 }
0395 EXPORT_SYMBOL_GPL(torture_onoff_init);
0396 
0397 /*
0398  * Clean up after online/offline testing.
0399  */
0400 static void torture_onoff_cleanup(void)
0401 {
0402 #ifdef CONFIG_HOTPLUG_CPU
0403     if (onoff_task == NULL)
0404         return;
0405     VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
0406     kthread_stop(onoff_task);
0407     onoff_task = NULL;
0408 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
0409 }
0410 
0411 /*
0412  * Print online/offline testing statistics.
0413  */
0414 void torture_onoff_stats(void)
0415 {
0416 #ifdef CONFIG_HOTPLUG_CPU
0417     pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
0418         n_online_successes, n_online_attempts,
0419         n_offline_successes, n_offline_attempts,
0420         min_online, max_online,
0421         min_offline, max_offline,
0422         sum_online, sum_offline, HZ);
0423 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
0424 }
0425 EXPORT_SYMBOL_GPL(torture_onoff_stats);
0426 
0427 /*
0428  * Were all the online/offline operations successful?
0429  */
0430 bool torture_onoff_failures(void)
0431 {
0432 #ifdef CONFIG_HOTPLUG_CPU
0433     return n_online_successes != n_online_attempts ||
0434            n_offline_successes != n_offline_attempts;
0435 #else /* #ifdef CONFIG_HOTPLUG_CPU */
0436     return false;
0437 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
0438 }
0439 EXPORT_SYMBOL_GPL(torture_onoff_failures);
0440 
0441 #define TORTURE_RANDOM_MULT 39916801  /* prime */
0442 #define TORTURE_RANDOM_ADD  479001701 /* prime */
0443 #define TORTURE_RANDOM_REFRESH  10000
0444 
0445 /*
0446  * Crude but fast random-number generator.  Uses a linear congruential
0447  * generator, with occasional help from cpu_clock().
0448  */
0449 unsigned long
0450 torture_random(struct torture_random_state *trsp)
0451 {
0452     if (--trsp->trs_count < 0) {
0453         trsp->trs_state += (unsigned long)local_clock();
0454         trsp->trs_count = TORTURE_RANDOM_REFRESH;
0455     }
0456     trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
0457         TORTURE_RANDOM_ADD;
0458     return swahw32(trsp->trs_state);
0459 }
0460 EXPORT_SYMBOL_GPL(torture_random);
0461 
0462 /*
0463  * Variables for shuffling.  The idea is to ensure that each CPU stays
0464  * idle for an extended period to test interactions with dyntick idle,
0465  * as well as interactions with any per-CPU variables.
0466  */
0467 struct shuffle_task {
0468     struct list_head st_l;
0469     struct task_struct *st_t;
0470 };
0471 
0472 static long shuffle_interval;   /* In jiffies. */
0473 static struct task_struct *shuffler_task;
0474 static cpumask_var_t shuffle_tmp_mask;
0475 static int shuffle_idle_cpu;    /* Force all torture tasks off this CPU */
0476 static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
0477 static DEFINE_MUTEX(shuffle_task_mutex);
0478 
0479 /*
0480  * Register a task to be shuffled.  If there is no memory, just splat
0481  * and don't bother registering.
0482  */
0483 void torture_shuffle_task_register(struct task_struct *tp)
0484 {
0485     struct shuffle_task *stp;
0486 
0487     if (WARN_ON_ONCE(tp == NULL))
0488         return;
0489     stp = kmalloc(sizeof(*stp), GFP_KERNEL);
0490     if (WARN_ON_ONCE(stp == NULL))
0491         return;
0492     stp->st_t = tp;
0493     mutex_lock(&shuffle_task_mutex);
0494     list_add(&stp->st_l, &shuffle_task_list);
0495     mutex_unlock(&shuffle_task_mutex);
0496 }
0497 EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
0498 
0499 /*
0500  * Unregister all tasks, for example, at the end of the torture run.
0501  */
0502 static void torture_shuffle_task_unregister_all(void)
0503 {
0504     struct shuffle_task *stp;
0505     struct shuffle_task *p;
0506 
0507     mutex_lock(&shuffle_task_mutex);
0508     list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
0509         list_del(&stp->st_l);
0510         kfree(stp);
0511     }
0512     mutex_unlock(&shuffle_task_mutex);
0513 }
0514 
0515 /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
0516  * A special case is when shuffle_idle_cpu = -1, in which case we allow
0517  * the tasks to run on all CPUs.
0518  */
0519 static void torture_shuffle_tasks(void)
0520 {
0521     struct shuffle_task *stp;
0522 
0523     cpumask_setall(shuffle_tmp_mask);
0524     cpus_read_lock();
0525 
0526     /* No point in shuffling if there is only one online CPU (ex: UP) */
0527     if (num_online_cpus() == 1) {
0528         cpus_read_unlock();
0529         return;
0530     }
0531 
0532     /* Advance to the next CPU.  Upon overflow, don't idle any CPUs. */
0533     shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
0534     if (shuffle_idle_cpu >= nr_cpu_ids)
0535         shuffle_idle_cpu = -1;
0536     else
0537         cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
0538 
0539     mutex_lock(&shuffle_task_mutex);
0540     list_for_each_entry(stp, &shuffle_task_list, st_l)
0541         set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
0542     mutex_unlock(&shuffle_task_mutex);
0543 
0544     cpus_read_unlock();
0545 }
0546 
0547 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
0548  * system to become idle at a time and cut off its timer ticks. This is meant
0549  * to test the support for such tickless idle CPU in RCU.
0550  */
0551 static int torture_shuffle(void *arg)
0552 {
0553     VERBOSE_TOROUT_STRING("torture_shuffle task started");
0554     do {
0555         schedule_timeout_interruptible(shuffle_interval);
0556         torture_shuffle_tasks();
0557         torture_shutdown_absorb("torture_shuffle");
0558     } while (!torture_must_stop());
0559     torture_kthread_stopping("torture_shuffle");
0560     return 0;
0561 }
0562 
0563 /*
0564  * Start the shuffler, with shuffint in jiffies.
0565  */
0566 int torture_shuffle_init(long shuffint)
0567 {
0568     shuffle_interval = shuffint;
0569 
0570     shuffle_idle_cpu = -1;
0571 
0572     if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
0573         TOROUT_ERRSTRING("Failed to alloc mask");
0574         return -ENOMEM;
0575     }
0576 
0577     /* Create the shuffler thread */
0578     return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
0579 }
0580 EXPORT_SYMBOL_GPL(torture_shuffle_init);
0581 
0582 /*
0583  * Stop the shuffling.
0584  */
0585 static void torture_shuffle_cleanup(void)
0586 {
0587     torture_shuffle_task_unregister_all();
0588     if (shuffler_task) {
0589         VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
0590         kthread_stop(shuffler_task);
0591         free_cpumask_var(shuffle_tmp_mask);
0592     }
0593     shuffler_task = NULL;
0594 }
0595 
0596 /*
0597  * Variables for auto-shutdown.  This allows "lights out" torture runs
0598  * to be fully scripted.
0599  */
0600 static struct task_struct *shutdown_task;
0601 static ktime_t shutdown_time;       /* time to system shutdown. */
0602 static void (*torture_shutdown_hook)(void);
0603 
0604 /*
0605  * Absorb kthreads into a kernel function that won't return, so that
0606  * they won't ever access module text or data again.
0607  */
0608 void torture_shutdown_absorb(const char *title)
0609 {
0610     while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
0611         pr_notice("torture thread %s parking due to system shutdown\n",
0612               title);
0613         schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
0614     }
0615 }
0616 EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
0617 
0618 /*
0619  * Cause the torture test to shutdown the system after the test has
0620  * run for the time specified by the shutdown_secs parameter.
0621  */
0622 static int torture_shutdown(void *arg)
0623 {
0624     ktime_t ktime_snap;
0625 
0626     VERBOSE_TOROUT_STRING("torture_shutdown task started");
0627     ktime_snap = ktime_get();
0628     while (ktime_before(ktime_snap, shutdown_time) &&
0629            !torture_must_stop()) {
0630         if (verbose)
0631             pr_alert("%s" TORTURE_FLAG
0632                  "torture_shutdown task: %llu ms remaining\n",
0633                  torture_type,
0634                  ktime_ms_delta(shutdown_time, ktime_snap));
0635         set_current_state(TASK_INTERRUPTIBLE);
0636         schedule_hrtimeout(&shutdown_time, HRTIMER_MODE_ABS);
0637         ktime_snap = ktime_get();
0638     }
0639     if (torture_must_stop()) {
0640         torture_kthread_stopping("torture_shutdown");
0641         return 0;
0642     }
0643 
0644     /* OK, shut down the system. */
0645 
0646     VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
0647     shutdown_task = NULL;   /* Avoid self-kill deadlock. */
0648     if (torture_shutdown_hook)
0649         torture_shutdown_hook();
0650     else
0651         VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
0652     if (ftrace_dump_at_shutdown)
0653         rcu_ftrace_dump(DUMP_ALL);
0654     kernel_power_off(); /* Shut down the system. */
0655     return 0;
0656 }
0657 
0658 /*
0659  * Start up the shutdown task.
0660  */
0661 int torture_shutdown_init(int ssecs, void (*cleanup)(void))
0662 {
0663     torture_shutdown_hook = cleanup;
0664     if (ssecs > 0) {
0665         shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0));
0666         return torture_create_kthread(torture_shutdown, NULL,
0667                          shutdown_task);
0668     }
0669     return 0;
0670 }
0671 EXPORT_SYMBOL_GPL(torture_shutdown_init);
0672 
0673 /*
0674  * Detect and respond to a system shutdown.
0675  */
0676 static int torture_shutdown_notify(struct notifier_block *unused1,
0677                    unsigned long unused2, void *unused3)
0678 {
0679     mutex_lock(&fullstop_mutex);
0680     if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
0681         VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
0682         WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN);
0683     } else {
0684         pr_warn("Concurrent rmmod and shutdown illegal!\n");
0685     }
0686     mutex_unlock(&fullstop_mutex);
0687     return NOTIFY_DONE;
0688 }
0689 
0690 static struct notifier_block torture_shutdown_nb = {
0691     .notifier_call = torture_shutdown_notify,
0692 };
0693 
0694 /*
0695  * Shut down the shutdown task.  Say what???  Heh!  This can happen if
0696  * the torture module gets an rmmod before the shutdown time arrives.  ;-)
0697  */
0698 static void torture_shutdown_cleanup(void)
0699 {
0700     unregister_reboot_notifier(&torture_shutdown_nb);
0701     if (shutdown_task != NULL) {
0702         VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
0703         kthread_stop(shutdown_task);
0704     }
0705     shutdown_task = NULL;
0706 }
0707 
0708 /*
0709  * Variables for stuttering, which means to periodically pause and
0710  * restart testing in order to catch bugs that appear when load is
0711  * suddenly applied to or removed from the system.
0712  */
0713 static struct task_struct *stutter_task;
0714 static int stutter_pause_test;
0715 static int stutter;
0716 static int stutter_gap;
0717 
0718 /*
0719  * Block until the stutter interval ends.  This must be called periodically
0720  * by all running kthreads that need to be subject to stuttering.
0721  */
0722 bool stutter_wait(const char *title)
0723 {
0724     unsigned int i = 0;
0725     bool ret = false;
0726     int spt;
0727 
0728     cond_resched_tasks_rcu_qs();
0729     spt = READ_ONCE(stutter_pause_test);
0730     for (; spt; spt = READ_ONCE(stutter_pause_test)) {
0731         if (!ret) {
0732             sched_set_normal(current, MAX_NICE);
0733             ret = true;
0734         }
0735         if (spt == 1) {
0736             schedule_timeout_interruptible(1);
0737         } else if (spt == 2) {
0738             while (READ_ONCE(stutter_pause_test)) {
0739                 if (!(i++ & 0xffff))
0740                     torture_hrtimeout_us(10, 0, NULL);
0741                 cond_resched();
0742             }
0743         } else {
0744             schedule_timeout_interruptible(round_jiffies_relative(HZ));
0745         }
0746         torture_shutdown_absorb(title);
0747     }
0748     return ret;
0749 }
0750 EXPORT_SYMBOL_GPL(stutter_wait);
0751 
0752 /*
0753  * Cause the torture test to "stutter", starting and stopping all
0754  * threads periodically.
0755  */
0756 static int torture_stutter(void *arg)
0757 {
0758     DEFINE_TORTURE_RANDOM(rand);
0759     int wtime;
0760 
0761     VERBOSE_TOROUT_STRING("torture_stutter task started");
0762     do {
0763         if (!torture_must_stop() && stutter > 1) {
0764             wtime = stutter;
0765             if (stutter > 2) {
0766                 WRITE_ONCE(stutter_pause_test, 1);
0767                 wtime = stutter - 3;
0768                 torture_hrtimeout_jiffies(wtime, &rand);
0769                 wtime = 2;
0770             }
0771             WRITE_ONCE(stutter_pause_test, 2);
0772             torture_hrtimeout_jiffies(wtime, NULL);
0773         }
0774         WRITE_ONCE(stutter_pause_test, 0);
0775         if (!torture_must_stop())
0776             torture_hrtimeout_jiffies(stutter_gap, NULL);
0777         torture_shutdown_absorb("torture_stutter");
0778     } while (!torture_must_stop());
0779     torture_kthread_stopping("torture_stutter");
0780     return 0;
0781 }
0782 
0783 /*
0784  * Initialize and kick off the torture_stutter kthread.
0785  */
0786 int torture_stutter_init(const int s, const int sgap)
0787 {
0788     stutter = s;
0789     stutter_gap = sgap;
0790     return torture_create_kthread(torture_stutter, NULL, stutter_task);
0791 }
0792 EXPORT_SYMBOL_GPL(torture_stutter_init);
0793 
0794 /*
0795  * Cleanup after the torture_stutter kthread.
0796  */
0797 static void torture_stutter_cleanup(void)
0798 {
0799     if (!stutter_task)
0800         return;
0801     VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
0802     kthread_stop(stutter_task);
0803     stutter_task = NULL;
0804 }
0805 
0806 /*
0807  * Initialize torture module.  Please note that this is -not- invoked via
0808  * the usual module_init() mechanism, but rather by an explicit call from
0809  * the client torture module.  This call must be paired with a later
0810  * torture_init_end().
0811  *
0812  * The runnable parameter points to a flag that controls whether or not
0813  * the test is currently runnable.  If there is no such flag, pass in NULL.
0814  */
0815 bool torture_init_begin(char *ttype, int v)
0816 {
0817     mutex_lock(&fullstop_mutex);
0818     if (torture_type != NULL) {
0819         pr_alert("%s: Refusing %s init: %s running.\n",
0820               __func__, ttype, torture_type);
0821         pr_alert("%s: One torture test at a time!\n", __func__);
0822         mutex_unlock(&fullstop_mutex);
0823         return false;
0824     }
0825     torture_type = ttype;
0826     verbose = v;
0827     fullstop = FULLSTOP_DONTSTOP;
0828     return true;
0829 }
0830 EXPORT_SYMBOL_GPL(torture_init_begin);
0831 
0832 /*
0833  * Tell the torture module that initialization is complete.
0834  */
0835 void torture_init_end(void)
0836 {
0837     mutex_unlock(&fullstop_mutex);
0838     register_reboot_notifier(&torture_shutdown_nb);
0839 }
0840 EXPORT_SYMBOL_GPL(torture_init_end);
0841 
0842 /*
0843  * Clean up torture module.  Please note that this is -not- invoked via
0844  * the usual module_exit() mechanism, but rather by an explicit call from
0845  * the client torture module.  Returns true if a race with system shutdown
0846  * is detected, otherwise, all kthreads started by functions in this file
0847  * will be shut down.
0848  *
0849  * This must be called before the caller starts shutting down its own
0850  * kthreads.
0851  *
0852  * Both torture_cleanup_begin() and torture_cleanup_end() must be paired,
0853  * in order to correctly perform the cleanup. They are separated because
0854  * threads can still need to reference the torture_type type, thus nullify
0855  * only after completing all other relevant calls.
0856  */
0857 bool torture_cleanup_begin(void)
0858 {
0859     mutex_lock(&fullstop_mutex);
0860     if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
0861         pr_warn("Concurrent rmmod and shutdown illegal!\n");
0862         mutex_unlock(&fullstop_mutex);
0863         schedule_timeout_uninterruptible(10);
0864         return true;
0865     }
0866     WRITE_ONCE(fullstop, FULLSTOP_RMMOD);
0867     mutex_unlock(&fullstop_mutex);
0868     torture_shutdown_cleanup();
0869     torture_shuffle_cleanup();
0870     torture_stutter_cleanup();
0871     torture_onoff_cleanup();
0872     return false;
0873 }
0874 EXPORT_SYMBOL_GPL(torture_cleanup_begin);
0875 
0876 void torture_cleanup_end(void)
0877 {
0878     mutex_lock(&fullstop_mutex);
0879     torture_type = NULL;
0880     mutex_unlock(&fullstop_mutex);
0881 }
0882 EXPORT_SYMBOL_GPL(torture_cleanup_end);
0883 
0884 /*
0885  * Is it time for the current torture test to stop?
0886  */
0887 bool torture_must_stop(void)
0888 {
0889     return torture_must_stop_irq() || kthread_should_stop();
0890 }
0891 EXPORT_SYMBOL_GPL(torture_must_stop);
0892 
0893 /*
0894  * Is it time for the current torture test to stop?  This is the irq-safe
0895  * version, hence no check for kthread_should_stop().
0896  */
0897 bool torture_must_stop_irq(void)
0898 {
0899     return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP;
0900 }
0901 EXPORT_SYMBOL_GPL(torture_must_stop_irq);
0902 
0903 /*
0904  * Each kthread must wait for kthread_should_stop() before returning from
0905  * its top-level function, otherwise segfaults ensue.  This function
0906  * prints a "stopping" message and waits for kthread_should_stop(), and
0907  * should be called from all torture kthreads immediately prior to
0908  * returning.
0909  */
0910 void torture_kthread_stopping(char *title)
0911 {
0912     char buf[128];
0913 
0914     snprintf(buf, sizeof(buf), "%s is stopping", title);
0915     VERBOSE_TOROUT_STRING(buf);
0916     while (!kthread_should_stop()) {
0917         torture_shutdown_absorb(title);
0918         schedule_timeout_uninterruptible(1);
0919     }
0920 }
0921 EXPORT_SYMBOL_GPL(torture_kthread_stopping);
0922 
0923 /*
0924  * Create a generic torture kthread that is immediately runnable.  If you
0925  * need the kthread to be stopped so that you can do something to it before
0926  * it starts, you will need to open-code your own.
0927  */
0928 int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
0929                 char *f, struct task_struct **tp)
0930 {
0931     int ret = 0;
0932 
0933     VERBOSE_TOROUT_STRING(m);
0934     *tp = kthread_create(fn, arg, "%s", s);
0935     if (IS_ERR(*tp)) {
0936         ret = PTR_ERR(*tp);
0937         TOROUT_ERRSTRING(f);
0938         *tp = NULL;
0939         return ret;
0940     }
0941     wake_up_process(*tp);  // Process is sleeping, so ordering provided.
0942     torture_shuffle_task_register(*tp);
0943     return ret;
0944 }
0945 EXPORT_SYMBOL_GPL(_torture_create_kthread);
0946 
0947 /*
0948  * Stop a generic kthread, emitting a message.
0949  */
0950 void _torture_stop_kthread(char *m, struct task_struct **tp)
0951 {
0952     if (*tp == NULL)
0953         return;
0954     VERBOSE_TOROUT_STRING(m);
0955     kthread_stop(*tp);
0956     *tp = NULL;
0957 }
0958 EXPORT_SYMBOL_GPL(_torture_stop_kthread);