Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Read-Copy Update mechanism for mutual exclusion
0004  *
0005  * Copyright IBM Corporation, 2001
0006  *
0007  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
0008  *      Manfred Spraul <manfred@colorfullife.com>
0009  *
0010  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
0011  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
0012  * Papers:
0013  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
0014  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
0015  *
0016  * For detailed explanation of Read-Copy Update mechanism see -
0017  *      http://lse.sourceforge.net/locking/rcupdate.html
0018  *
0019  */
0020 #include <linux/types.h>
0021 #include <linux/kernel.h>
0022 #include <linux/init.h>
0023 #include <linux/spinlock.h>
0024 #include <linux/smp.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/sched/signal.h>
0027 #include <linux/sched/debug.h>
0028 #include <linux/atomic.h>
0029 #include <linux/bitops.h>
0030 #include <linux/percpu.h>
0031 #include <linux/notifier.h>
0032 #include <linux/cpu.h>
0033 #include <linux/mutex.h>
0034 #include <linux/export.h>
0035 #include <linux/hardirq.h>
0036 #include <linux/delay.h>
0037 #include <linux/moduleparam.h>
0038 #include <linux/kthread.h>
0039 #include <linux/tick.h>
0040 #include <linux/rcupdate_wait.h>
0041 #include <linux/sched/isolation.h>
0042 #include <linux/kprobes.h>
0043 #include <linux/slab.h>
0044 #include <linux/irq_work.h>
0045 #include <linux/rcupdate_trace.h>
0046 
0047 #define CREATE_TRACE_POINTS
0048 
0049 #include "rcu.h"
0050 
0051 #ifdef MODULE_PARAM_PREFIX
0052 #undef MODULE_PARAM_PREFIX
0053 #endif
0054 #define MODULE_PARAM_PREFIX "rcupdate."
0055 
0056 #ifndef CONFIG_TINY_RCU
0057 module_param(rcu_expedited, int, 0444);
0058 module_param(rcu_normal, int, 0444);
0059 static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
0060 #if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
0061 module_param(rcu_normal_after_boot, int, 0444);
0062 #endif
0063 #endif /* #ifndef CONFIG_TINY_RCU */
0064 
0065 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0066 /**
0067  * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
0068  * @ret:    Best guess answer if lockdep cannot be relied on
0069  *
0070  * Returns true if lockdep must be ignored, in which case ``*ret`` contains
0071  * the best guess described below.  Otherwise returns false, in which
0072  * case ``*ret`` tells the caller nothing and the caller should instead
0073  * consult lockdep.
0074  *
0075  * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
0076  * RCU-sched read-side critical section.  In absence of
0077  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
0078  * critical section unless it can prove otherwise.  Note that disabling
0079  * of preemption (including disabling irqs) counts as an RCU-sched
0080  * read-side critical section.  This is useful for debug checks in functions
0081  * that required that they be called within an RCU-sched read-side
0082  * critical section.
0083  *
0084  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
0085  * and while lockdep is disabled.
0086  *
0087  * Note that if the CPU is in the idle loop from an RCU point of view (ie:
0088  * that we are in the section between ct_idle_enter() and ct_idle_exit())
0089  * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
0090  * rcu_read_lock().  The reason for this is that RCU ignores CPUs that are
0091  * in such a section, considering these as in extended quiescent state,
0092  * so such a CPU is effectively never in an RCU read-side critical section
0093  * regardless of what RCU primitives it invokes.  This state of affairs is
0094  * required --- we need to keep an RCU-free window in idle where the CPU may
0095  * possibly enter into low power mode. This way we can notice an extended
0096  * quiescent state to other CPUs that started a grace period. Otherwise
0097  * we would delay any grace period as long as we run in the idle task.
0098  *
0099  * Similarly, we avoid claiming an RCU read lock held if the current
0100  * CPU is offline.
0101  */
0102 static bool rcu_read_lock_held_common(bool *ret)
0103 {
0104     if (!debug_lockdep_rcu_enabled()) {
0105         *ret = true;
0106         return true;
0107     }
0108     if (!rcu_is_watching()) {
0109         *ret = false;
0110         return true;
0111     }
0112     if (!rcu_lockdep_current_cpu_online()) {
0113         *ret = false;
0114         return true;
0115     }
0116     return false;
0117 }
0118 
0119 int rcu_read_lock_sched_held(void)
0120 {
0121     bool ret;
0122 
0123     if (rcu_read_lock_held_common(&ret))
0124         return ret;
0125     return lock_is_held(&rcu_sched_lock_map) || !preemptible();
0126 }
0127 EXPORT_SYMBOL(rcu_read_lock_sched_held);
0128 #endif
0129 
0130 #ifndef CONFIG_TINY_RCU
0131 
0132 /*
0133  * Should expedited grace-period primitives always fall back to their
0134  * non-expedited counterparts?  Intended for use within RCU.  Note
0135  * that if the user specifies both rcu_expedited and rcu_normal, then
0136  * rcu_normal wins.  (Except during the time period during boot from
0137  * when the first task is spawned until the rcu_set_runtime_mode()
0138  * core_initcall() is invoked, at which point everything is expedited.)
0139  */
0140 bool rcu_gp_is_normal(void)
0141 {
0142     return READ_ONCE(rcu_normal) &&
0143            rcu_scheduler_active != RCU_SCHEDULER_INIT;
0144 }
0145 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
0146 
0147 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
0148 
0149 /*
0150  * Should normal grace-period primitives be expedited?  Intended for
0151  * use within RCU.  Note that this function takes the rcu_expedited
0152  * sysfs/boot variable and rcu_scheduler_active into account as well
0153  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
0154  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
0155  */
0156 bool rcu_gp_is_expedited(void)
0157 {
0158     return rcu_expedited || atomic_read(&rcu_expedited_nesting);
0159 }
0160 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
0161 
0162 /**
0163  * rcu_expedite_gp - Expedite future RCU grace periods
0164  *
0165  * After a call to this function, future calls to synchronize_rcu() and
0166  * friends act as the corresponding synchronize_rcu_expedited() function
0167  * had instead been called.
0168  */
0169 void rcu_expedite_gp(void)
0170 {
0171     atomic_inc(&rcu_expedited_nesting);
0172 }
0173 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
0174 
0175 /**
0176  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
0177  *
0178  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
0179  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
0180  * and if the rcu_expedited sysfs/boot parameter is not set, then all
0181  * subsequent calls to synchronize_rcu() and friends will return to
0182  * their normal non-expedited behavior.
0183  */
0184 void rcu_unexpedite_gp(void)
0185 {
0186     atomic_dec(&rcu_expedited_nesting);
0187 }
0188 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
0189 
0190 static bool rcu_boot_ended __read_mostly;
0191 
0192 /*
0193  * Inform RCU of the end of the in-kernel boot sequence.
0194  */
0195 void rcu_end_inkernel_boot(void)
0196 {
0197     rcu_unexpedite_gp();
0198     if (rcu_normal_after_boot)
0199         WRITE_ONCE(rcu_normal, 1);
0200     rcu_boot_ended = true;
0201 }
0202 
0203 /*
0204  * Let rcutorture know when it is OK to turn it up to eleven.
0205  */
0206 bool rcu_inkernel_boot_has_ended(void)
0207 {
0208     return rcu_boot_ended;
0209 }
0210 EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
0211 
0212 #endif /* #ifndef CONFIG_TINY_RCU */
0213 
0214 /*
0215  * Test each non-SRCU synchronous grace-period wait API.  This is
0216  * useful just after a change in mode for these primitives, and
0217  * during early boot.
0218  */
0219 void rcu_test_sync_prims(void)
0220 {
0221     if (!IS_ENABLED(CONFIG_PROVE_RCU))
0222         return;
0223     synchronize_rcu();
0224     synchronize_rcu_expedited();
0225 }
0226 
0227 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
0228 
0229 /*
0230  * Switch to run-time mode once RCU has fully initialized.
0231  */
0232 static int __init rcu_set_runtime_mode(void)
0233 {
0234     rcu_test_sync_prims();
0235     rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
0236     kfree_rcu_scheduler_running();
0237     rcu_test_sync_prims();
0238     return 0;
0239 }
0240 core_initcall(rcu_set_runtime_mode);
0241 
0242 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
0243 
0244 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0245 static struct lock_class_key rcu_lock_key;
0246 struct lockdep_map rcu_lock_map = {
0247     .name = "rcu_read_lock",
0248     .key = &rcu_lock_key,
0249     .wait_type_outer = LD_WAIT_FREE,
0250     .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT implies PREEMPT_RCU */
0251 };
0252 EXPORT_SYMBOL_GPL(rcu_lock_map);
0253 
0254 static struct lock_class_key rcu_bh_lock_key;
0255 struct lockdep_map rcu_bh_lock_map = {
0256     .name = "rcu_read_lock_bh",
0257     .key = &rcu_bh_lock_key,
0258     .wait_type_outer = LD_WAIT_FREE,
0259     .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
0260 };
0261 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
0262 
0263 static struct lock_class_key rcu_sched_lock_key;
0264 struct lockdep_map rcu_sched_lock_map = {
0265     .name = "rcu_read_lock_sched",
0266     .key = &rcu_sched_lock_key,
0267     .wait_type_outer = LD_WAIT_FREE,
0268     .wait_type_inner = LD_WAIT_SPIN,
0269 };
0270 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
0271 
0272 // Tell lockdep when RCU callbacks are being invoked.
0273 static struct lock_class_key rcu_callback_key;
0274 struct lockdep_map rcu_callback_map =
0275     STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
0276 EXPORT_SYMBOL_GPL(rcu_callback_map);
0277 
0278 noinstr int notrace debug_lockdep_rcu_enabled(void)
0279 {
0280     return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
0281            current->lockdep_recursion == 0;
0282 }
0283 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
0284 
0285 /**
0286  * rcu_read_lock_held() - might we be in RCU read-side critical section?
0287  *
0288  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
0289  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
0290  * this assumes we are in an RCU read-side critical section unless it can
0291  * prove otherwise.  This is useful for debug checks in functions that
0292  * require that they be called within an RCU read-side critical section.
0293  *
0294  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
0295  * and while lockdep is disabled.
0296  *
0297  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
0298  * occur in the same context, for example, it is illegal to invoke
0299  * rcu_read_unlock() in process context if the matching rcu_read_lock()
0300  * was invoked from within an irq handler.
0301  *
0302  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
0303  * offline from an RCU perspective, so check for those as well.
0304  */
0305 int rcu_read_lock_held(void)
0306 {
0307     bool ret;
0308 
0309     if (rcu_read_lock_held_common(&ret))
0310         return ret;
0311     return lock_is_held(&rcu_lock_map);
0312 }
0313 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
0314 
0315 /**
0316  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
0317  *
0318  * Check for bottom half being disabled, which covers both the
0319  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
0320  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
0321  * will show the situation.  This is useful for debug checks in functions
0322  * that require that they be called within an RCU read-side critical
0323  * section.
0324  *
0325  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
0326  *
0327  * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
0328  * offline from an RCU perspective, so check for those as well.
0329  */
0330 int rcu_read_lock_bh_held(void)
0331 {
0332     bool ret;
0333 
0334     if (rcu_read_lock_held_common(&ret))
0335         return ret;
0336     return in_softirq() || irqs_disabled();
0337 }
0338 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
0339 
0340 int rcu_read_lock_any_held(void)
0341 {
0342     bool ret;
0343 
0344     if (rcu_read_lock_held_common(&ret))
0345         return ret;
0346     if (lock_is_held(&rcu_lock_map) ||
0347         lock_is_held(&rcu_bh_lock_map) ||
0348         lock_is_held(&rcu_sched_lock_map))
0349         return 1;
0350     return !preemptible();
0351 }
0352 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
0353 
0354 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0355 
0356 /**
0357  * wakeme_after_rcu() - Callback function to awaken a task after grace period
0358  * @head: Pointer to rcu_head member within rcu_synchronize structure
0359  *
0360  * Awaken the corresponding task now that a grace period has elapsed.
0361  */
0362 void wakeme_after_rcu(struct rcu_head *head)
0363 {
0364     struct rcu_synchronize *rcu;
0365 
0366     rcu = container_of(head, struct rcu_synchronize, head);
0367     complete(&rcu->completion);
0368 }
0369 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
0370 
0371 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
0372            struct rcu_synchronize *rs_array)
0373 {
0374     int i;
0375     int j;
0376 
0377     /* Initialize and register callbacks for each crcu_array element. */
0378     for (i = 0; i < n; i++) {
0379         if (checktiny &&
0380             (crcu_array[i] == call_rcu)) {
0381             might_sleep();
0382             continue;
0383         }
0384         for (j = 0; j < i; j++)
0385             if (crcu_array[j] == crcu_array[i])
0386                 break;
0387         if (j == i) {
0388             init_rcu_head_on_stack(&rs_array[i].head);
0389             init_completion(&rs_array[i].completion);
0390             (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
0391         }
0392     }
0393 
0394     /* Wait for all callbacks to be invoked. */
0395     for (i = 0; i < n; i++) {
0396         if (checktiny &&
0397             (crcu_array[i] == call_rcu))
0398             continue;
0399         for (j = 0; j < i; j++)
0400             if (crcu_array[j] == crcu_array[i])
0401                 break;
0402         if (j == i) {
0403             wait_for_completion(&rs_array[i].completion);
0404             destroy_rcu_head_on_stack(&rs_array[i].head);
0405         }
0406     }
0407 }
0408 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
0409 
0410 void finish_rcuwait(struct rcuwait *w)
0411 {
0412     rcu_assign_pointer(w->task, NULL);
0413     __set_current_state(TASK_RUNNING);
0414 }
0415 EXPORT_SYMBOL_GPL(finish_rcuwait);
0416 
0417 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
0418 void init_rcu_head(struct rcu_head *head)
0419 {
0420     debug_object_init(head, &rcuhead_debug_descr);
0421 }
0422 EXPORT_SYMBOL_GPL(init_rcu_head);
0423 
0424 void destroy_rcu_head(struct rcu_head *head)
0425 {
0426     debug_object_free(head, &rcuhead_debug_descr);
0427 }
0428 EXPORT_SYMBOL_GPL(destroy_rcu_head);
0429 
0430 static bool rcuhead_is_static_object(void *addr)
0431 {
0432     return true;
0433 }
0434 
0435 /**
0436  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
0437  * @head: pointer to rcu_head structure to be initialized
0438  *
0439  * This function informs debugobjects of a new rcu_head structure that
0440  * has been allocated as an auto variable on the stack.  This function
0441  * is not required for rcu_head structures that are statically defined or
0442  * that are dynamically allocated on the heap.  This function has no
0443  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
0444  */
0445 void init_rcu_head_on_stack(struct rcu_head *head)
0446 {
0447     debug_object_init_on_stack(head, &rcuhead_debug_descr);
0448 }
0449 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
0450 
0451 /**
0452  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
0453  * @head: pointer to rcu_head structure to be initialized
0454  *
0455  * This function informs debugobjects that an on-stack rcu_head structure
0456  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
0457  * function is not required for rcu_head structures that are statically
0458  * defined or that are dynamically allocated on the heap.  Also as with
0459  * init_rcu_head_on_stack(), this function has no effect for
0460  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
0461  */
0462 void destroy_rcu_head_on_stack(struct rcu_head *head)
0463 {
0464     debug_object_free(head, &rcuhead_debug_descr);
0465 }
0466 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
0467 
0468 const struct debug_obj_descr rcuhead_debug_descr = {
0469     .name = "rcu_head",
0470     .is_static_object = rcuhead_is_static_object,
0471 };
0472 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
0473 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
0474 
0475 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
0476 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
0477                    unsigned long secs,
0478                    unsigned long c_old, unsigned long c)
0479 {
0480     trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
0481 }
0482 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
0483 #else
0484 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
0485     do { } while (0)
0486 #endif
0487 
0488 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
0489 /* Get rcutorture access to sched_setaffinity(). */
0490 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
0491 {
0492     int ret;
0493 
0494     ret = sched_setaffinity(pid, in_mask);
0495     WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
0496     return ret;
0497 }
0498 EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
0499 #endif
0500 
0501 #ifdef CONFIG_RCU_STALL_COMMON
0502 int rcu_cpu_stall_ftrace_dump __read_mostly;
0503 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
0504 int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
0505 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
0506 module_param(rcu_cpu_stall_suppress, int, 0644);
0507 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
0508 module_param(rcu_cpu_stall_timeout, int, 0644);
0509 int rcu_exp_cpu_stall_timeout __read_mostly = CONFIG_RCU_EXP_CPU_STALL_TIMEOUT;
0510 module_param(rcu_exp_cpu_stall_timeout, int, 0644);
0511 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
0512 
0513 // Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
0514 // warnings.  Also used by rcutorture even if stall warnings are excluded.
0515 int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
0516 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
0517 module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
0518 
0519 /**
0520  * get_completed_synchronize_rcu - Return a pre-completed polled state cookie
0521  *
0522  * Returns a value that will always be treated by functions like
0523  * poll_state_synchronize_rcu() as a cookie whose grace period has already
0524  * completed.
0525  */
0526 unsigned long get_completed_synchronize_rcu(void)
0527 {
0528     return RCU_GET_STATE_COMPLETED;
0529 }
0530 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu);
0531 
0532 #ifdef CONFIG_PROVE_RCU
0533 
0534 /*
0535  * Early boot self test parameters.
0536  */
0537 static bool rcu_self_test;
0538 module_param(rcu_self_test, bool, 0444);
0539 
0540 static int rcu_self_test_counter;
0541 
0542 static void test_callback(struct rcu_head *r)
0543 {
0544     rcu_self_test_counter++;
0545     pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
0546 }
0547 
0548 DEFINE_STATIC_SRCU(early_srcu);
0549 static unsigned long early_srcu_cookie;
0550 
0551 struct early_boot_kfree_rcu {
0552     struct rcu_head rh;
0553 };
0554 
0555 static void early_boot_test_call_rcu(void)
0556 {
0557     static struct rcu_head head;
0558     static struct rcu_head shead;
0559     struct early_boot_kfree_rcu *rhp;
0560 
0561     call_rcu(&head, test_callback);
0562     if (IS_ENABLED(CONFIG_SRCU)) {
0563         early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
0564         call_srcu(&early_srcu, &shead, test_callback);
0565     }
0566     rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
0567     if (!WARN_ON_ONCE(!rhp))
0568         kfree_rcu(rhp, rh);
0569 }
0570 
0571 void rcu_early_boot_tests(void)
0572 {
0573     pr_info("Running RCU self tests\n");
0574 
0575     if (rcu_self_test)
0576         early_boot_test_call_rcu();
0577     rcu_test_sync_prims();
0578 }
0579 
0580 static int rcu_verify_early_boot_tests(void)
0581 {
0582     int ret = 0;
0583     int early_boot_test_counter = 0;
0584 
0585     if (rcu_self_test) {
0586         early_boot_test_counter++;
0587         rcu_barrier();
0588         if (IS_ENABLED(CONFIG_SRCU)) {
0589             early_boot_test_counter++;
0590             srcu_barrier(&early_srcu);
0591             WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
0592         }
0593     }
0594     if (rcu_self_test_counter != early_boot_test_counter) {
0595         WARN_ON(1);
0596         ret = -1;
0597     }
0598 
0599     return ret;
0600 }
0601 late_initcall(rcu_verify_early_boot_tests);
0602 #else
0603 void rcu_early_boot_tests(void) {}
0604 #endif /* CONFIG_PROVE_RCU */
0605 
0606 #include "tasks.h"
0607 
0608 #ifndef CONFIG_TINY_RCU
0609 
0610 /*
0611  * Print any significant non-default boot-time settings.
0612  */
0613 void __init rcupdate_announce_bootup_oddness(void)
0614 {
0615     if (rcu_normal)
0616         pr_info("\tNo expedited grace period (rcu_normal).\n");
0617     else if (rcu_normal_after_boot)
0618         pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
0619     else if (rcu_expedited)
0620         pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
0621     if (rcu_cpu_stall_suppress)
0622         pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
0623     if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
0624         pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
0625     rcu_tasks_bootup_oddness();
0626 }
0627 
0628 #endif /* #ifndef CONFIG_TINY_RCU */