Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
0004  *
0005  * Copyright IBM Corporation, 2008
0006  *
0007  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
0008  *      Manfred Spraul <manfred@colorfullife.com>
0009  *      Paul E. McKenney <paulmck@linux.ibm.com>
0010  *
0011  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
0012  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
0013  *
0014  * For detailed explanation of Read-Copy Update mechanism see -
0015  *  Documentation/RCU
0016  */
0017 
0018 #define pr_fmt(fmt) "rcu: " fmt
0019 
0020 #include <linux/types.h>
0021 #include <linux/kernel.h>
0022 #include <linux/init.h>
0023 #include <linux/spinlock.h>
0024 #include <linux/smp.h>
0025 #include <linux/rcupdate_wait.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/sched.h>
0028 #include <linux/sched/debug.h>
0029 #include <linux/nmi.h>
0030 #include <linux/atomic.h>
0031 #include <linux/bitops.h>
0032 #include <linux/export.h>
0033 #include <linux/completion.h>
0034 #include <linux/moduleparam.h>
0035 #include <linux/panic.h>
0036 #include <linux/panic_notifier.h>
0037 #include <linux/percpu.h>
0038 #include <linux/notifier.h>
0039 #include <linux/cpu.h>
0040 #include <linux/mutex.h>
0041 #include <linux/time.h>
0042 #include <linux/kernel_stat.h>
0043 #include <linux/wait.h>
0044 #include <linux/kthread.h>
0045 #include <uapi/linux/sched/types.h>
0046 #include <linux/prefetch.h>
0047 #include <linux/delay.h>
0048 #include <linux/random.h>
0049 #include <linux/trace_events.h>
0050 #include <linux/suspend.h>
0051 #include <linux/ftrace.h>
0052 #include <linux/tick.h>
0053 #include <linux/sysrq.h>
0054 #include <linux/kprobes.h>
0055 #include <linux/gfp.h>
0056 #include <linux/oom.h>
0057 #include <linux/smpboot.h>
0058 #include <linux/jiffies.h>
0059 #include <linux/slab.h>
0060 #include <linux/sched/isolation.h>
0061 #include <linux/sched/clock.h>
0062 #include <linux/vmalloc.h>
0063 #include <linux/mm.h>
0064 #include <linux/kasan.h>
0065 #include <linux/context_tracking.h>
0066 #include "../time/tick-internal.h"
0067 
0068 #include "tree.h"
0069 #include "rcu.h"
0070 
0071 #ifdef MODULE_PARAM_PREFIX
0072 #undef MODULE_PARAM_PREFIX
0073 #endif
0074 #define MODULE_PARAM_PREFIX "rcutree."
0075 
0076 /* Data structures. */
0077 
0078 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
0079 #ifdef CONFIG_RCU_NOCB_CPU
0080     .cblist.flags = SEGCBLIST_RCU_CORE,
0081 #endif
0082 };
0083 static struct rcu_state rcu_state = {
0084     .level = { &rcu_state.node[0] },
0085     .gp_state = RCU_GP_IDLE,
0086     .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
0087     .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
0088     .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
0089     .name = RCU_NAME,
0090     .abbr = RCU_ABBR,
0091     .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
0092     .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
0093     .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
0094 };
0095 
0096 /* Dump rcu_node combining tree at boot to verify correct setup. */
0097 static bool dump_tree;
0098 module_param(dump_tree, bool, 0444);
0099 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
0100 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
0101 #ifndef CONFIG_PREEMPT_RT
0102 module_param(use_softirq, bool, 0444);
0103 #endif
0104 /* Control rcu_node-tree auto-balancing at boot time. */
0105 static bool rcu_fanout_exact;
0106 module_param(rcu_fanout_exact, bool, 0444);
0107 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
0108 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
0109 module_param(rcu_fanout_leaf, int, 0444);
0110 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
0111 /* Number of rcu_nodes at specified level. */
0112 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
0113 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
0114 
0115 /*
0116  * The rcu_scheduler_active variable is initialized to the value
0117  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
0118  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
0119  * RCU can assume that there is but one task, allowing RCU to (for example)
0120  * optimize synchronize_rcu() to a simple barrier().  When this variable
0121  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
0122  * to detect real grace periods.  This variable is also used to suppress
0123  * boot-time false positives from lockdep-RCU error checking.  Finally, it
0124  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
0125  * is fully initialized, including all of its kthreads having been spawned.
0126  */
0127 int rcu_scheduler_active __read_mostly;
0128 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
0129 
0130 /*
0131  * The rcu_scheduler_fully_active variable transitions from zero to one
0132  * during the early_initcall() processing, which is after the scheduler
0133  * is capable of creating new tasks.  So RCU processing (for example,
0134  * creating tasks for RCU priority boosting) must be delayed until after
0135  * rcu_scheduler_fully_active transitions from zero to one.  We also
0136  * currently delay invocation of any RCU callbacks until after this point.
0137  *
0138  * It might later prove better for people registering RCU callbacks during
0139  * early boot to take responsibility for these callbacks, but one step at
0140  * a time.
0141  */
0142 static int rcu_scheduler_fully_active __read_mostly;
0143 
0144 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
0145                   unsigned long gps, unsigned long flags);
0146 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
0147 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
0148 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
0149 static void invoke_rcu_core(void);
0150 static void rcu_report_exp_rdp(struct rcu_data *rdp);
0151 static void sync_sched_exp_online_cleanup(int cpu);
0152 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
0153 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
0154 
0155 /*
0156  * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
0157  * real-time priority(enabling/disabling) is controlled by
0158  * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
0159  */
0160 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
0161 module_param(kthread_prio, int, 0444);
0162 
0163 /* Delay in jiffies for grace-period initialization delays, debug only. */
0164 
0165 static int gp_preinit_delay;
0166 module_param(gp_preinit_delay, int, 0444);
0167 static int gp_init_delay;
0168 module_param(gp_init_delay, int, 0444);
0169 static int gp_cleanup_delay;
0170 module_param(gp_cleanup_delay, int, 0444);
0171 
0172 // Add delay to rcu_read_unlock() for strict grace periods.
0173 static int rcu_unlock_delay;
0174 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
0175 module_param(rcu_unlock_delay, int, 0444);
0176 #endif
0177 
0178 /*
0179  * This rcu parameter is runtime-read-only. It reflects
0180  * a minimum allowed number of objects which can be cached
0181  * per-CPU. Object size is equal to one page. This value
0182  * can be changed at boot time.
0183  */
0184 static int rcu_min_cached_objs = 5;
0185 module_param(rcu_min_cached_objs, int, 0444);
0186 
0187 // A page shrinker can ask for pages to be freed to make them
0188 // available for other parts of the system. This usually happens
0189 // under low memory conditions, and in that case we should also
0190 // defer page-cache filling for a short time period.
0191 //
0192 // The default value is 5 seconds, which is long enough to reduce
0193 // interference with the shrinker while it asks other systems to
0194 // drain their caches.
0195 static int rcu_delay_page_cache_fill_msec = 5000;
0196 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
0197 
0198 /* Retrieve RCU kthreads priority for rcutorture */
0199 int rcu_get_gp_kthreads_prio(void)
0200 {
0201     return kthread_prio;
0202 }
0203 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
0204 
0205 /*
0206  * Number of grace periods between delays, normalized by the duration of
0207  * the delay.  The longer the delay, the more the grace periods between
0208  * each delay.  The reason for this normalization is that it means that,
0209  * for non-zero delays, the overall slowdown of grace periods is constant
0210  * regardless of the duration of the delay.  This arrangement balances
0211  * the need for long delays to increase some race probabilities with the
0212  * need for fast grace periods to increase other race probabilities.
0213  */
0214 #define PER_RCU_NODE_PERIOD 3   /* Number of grace periods between delays for debugging. */
0215 
0216 /*
0217  * Compute the mask of online CPUs for the specified rcu_node structure.
0218  * This will not be stable unless the rcu_node structure's ->lock is
0219  * held, but the bit corresponding to the current CPU will be stable
0220  * in most contexts.
0221  */
0222 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
0223 {
0224     return READ_ONCE(rnp->qsmaskinitnext);
0225 }
0226 
0227 /*
0228  * Is the CPU corresponding to the specified rcu_data structure online
0229  * from RCU's perspective?  This perspective is given by that structure's
0230  * ->qsmaskinitnext field rather than by the global cpu_online_mask.
0231  */
0232 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
0233 {
0234     return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
0235 }
0236 
0237 /*
0238  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
0239  * permit this function to be invoked without holding the root rcu_node
0240  * structure's ->lock, but of course results can be subject to change.
0241  */
0242 static int rcu_gp_in_progress(void)
0243 {
0244     return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
0245 }
0246 
0247 /*
0248  * Return the number of callbacks queued on the specified CPU.
0249  * Handles both the nocbs and normal cases.
0250  */
0251 static long rcu_get_n_cbs_cpu(int cpu)
0252 {
0253     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
0254 
0255     if (rcu_segcblist_is_enabled(&rdp->cblist))
0256         return rcu_segcblist_n_cbs(&rdp->cblist);
0257     return 0;
0258 }
0259 
0260 void rcu_softirq_qs(void)
0261 {
0262     rcu_qs();
0263     rcu_preempt_deferred_qs(current);
0264     rcu_tasks_qs(current, false);
0265 }
0266 
0267 /*
0268  * Reset the current CPU's ->dynticks counter to indicate that the
0269  * newly onlined CPU is no longer in an extended quiescent state.
0270  * This will either leave the counter unchanged, or increment it
0271  * to the next non-quiescent value.
0272  *
0273  * The non-atomic test/increment sequence works because the upper bits
0274  * of the ->dynticks counter are manipulated only by the corresponding CPU,
0275  * or when the corresponding CPU is offline.
0276  */
0277 static void rcu_dynticks_eqs_online(void)
0278 {
0279     if (ct_dynticks() & RCU_DYNTICKS_IDX)
0280         return;
0281     ct_state_inc(RCU_DYNTICKS_IDX);
0282 }
0283 
0284 /*
0285  * Snapshot the ->dynticks counter with full ordering so as to allow
0286  * stable comparison of this counter with past and future snapshots.
0287  */
0288 static int rcu_dynticks_snap(int cpu)
0289 {
0290     smp_mb();  // Fundamental RCU ordering guarantee.
0291     return ct_dynticks_cpu_acquire(cpu);
0292 }
0293 
0294 /*
0295  * Return true if the snapshot returned from rcu_dynticks_snap()
0296  * indicates that RCU is in an extended quiescent state.
0297  */
0298 static bool rcu_dynticks_in_eqs(int snap)
0299 {
0300     return !(snap & RCU_DYNTICKS_IDX);
0301 }
0302 
0303 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
0304 bool rcu_is_idle_cpu(int cpu)
0305 {
0306     return rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
0307 }
0308 
0309 /*
0310  * Return true if the CPU corresponding to the specified rcu_data
0311  * structure has spent some time in an extended quiescent state since
0312  * rcu_dynticks_snap() returned the specified snapshot.
0313  */
0314 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
0315 {
0316     return snap != rcu_dynticks_snap(rdp->cpu);
0317 }
0318 
0319 /*
0320  * Return true if the referenced integer is zero while the specified
0321  * CPU remains within a single extended quiescent state.
0322  */
0323 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
0324 {
0325     int snap;
0326 
0327     // If not quiescent, force back to earlier extended quiescent state.
0328     snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
0329     smp_rmb(); // Order ->dynticks and *vp reads.
0330     if (READ_ONCE(*vp))
0331         return false;  // Non-zero, so report failure;
0332     smp_rmb(); // Order *vp read and ->dynticks re-read.
0333 
0334     // If still in the same extended quiescent state, we are good!
0335     return snap == ct_dynticks_cpu(cpu);
0336 }
0337 
0338 /*
0339  * Let the RCU core know that this CPU has gone through the scheduler,
0340  * which is a quiescent state.  This is called when the need for a
0341  * quiescent state is urgent, so we burn an atomic operation and full
0342  * memory barriers to let the RCU core know about it, regardless of what
0343  * this CPU might (or might not) do in the near future.
0344  *
0345  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
0346  *
0347  * The caller must have disabled interrupts and must not be idle.
0348  */
0349 notrace void rcu_momentary_dyntick_idle(void)
0350 {
0351     int seq;
0352 
0353     raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
0354     seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
0355     /* It is illegal to call this from idle state. */
0356     WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
0357     rcu_preempt_deferred_qs(current);
0358 }
0359 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
0360 
0361 /**
0362  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
0363  *
0364  * If the current CPU is idle and running at a first-level (not nested)
0365  * interrupt, or directly, from idle, return true.
0366  *
0367  * The caller must have at least disabled IRQs.
0368  */
0369 static int rcu_is_cpu_rrupt_from_idle(void)
0370 {
0371     long nesting;
0372 
0373     /*
0374      * Usually called from the tick; but also used from smp_function_call()
0375      * for expedited grace periods. This latter can result in running from
0376      * the idle task, instead of an actual IPI.
0377      */
0378     lockdep_assert_irqs_disabled();
0379 
0380     /* Check for counter underflows */
0381     RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
0382              "RCU dynticks_nesting counter underflow!");
0383     RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
0384              "RCU dynticks_nmi_nesting counter underflow/zero!");
0385 
0386     /* Are we at first interrupt nesting level? */
0387     nesting = ct_dynticks_nmi_nesting();
0388     if (nesting > 1)
0389         return false;
0390 
0391     /*
0392      * If we're not in an interrupt, we must be in the idle task!
0393      */
0394     WARN_ON_ONCE(!nesting && !is_idle_task(current));
0395 
0396     /* Does CPU appear to be idle from an RCU standpoint? */
0397     return ct_dynticks_nesting() == 0;
0398 }
0399 
0400 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
0401                 // Maximum callbacks per rcu_do_batch ...
0402 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
0403 static long blimit = DEFAULT_RCU_BLIMIT;
0404 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
0405 static long qhimark = DEFAULT_RCU_QHIMARK;
0406 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
0407 static long qlowmark = DEFAULT_RCU_QLOMARK;
0408 #define DEFAULT_RCU_QOVLD_MULT 2
0409 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
0410 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
0411 static long qovld_calc = -1;      // No pre-initialization lock acquisitions!
0412 
0413 module_param(blimit, long, 0444);
0414 module_param(qhimark, long, 0444);
0415 module_param(qlowmark, long, 0444);
0416 module_param(qovld, long, 0444);
0417 
0418 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
0419 static ulong jiffies_till_next_fqs = ULONG_MAX;
0420 static bool rcu_kick_kthreads;
0421 static int rcu_divisor = 7;
0422 module_param(rcu_divisor, int, 0644);
0423 
0424 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
0425 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
0426 module_param(rcu_resched_ns, long, 0644);
0427 
0428 /*
0429  * How long the grace period must be before we start recruiting
0430  * quiescent-state help from rcu_note_context_switch().
0431  */
0432 static ulong jiffies_till_sched_qs = ULONG_MAX;
0433 module_param(jiffies_till_sched_qs, ulong, 0444);
0434 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
0435 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
0436 
0437 /*
0438  * Make sure that we give the grace-period kthread time to detect any
0439  * idle CPUs before taking active measures to force quiescent states.
0440  * However, don't go below 100 milliseconds, adjusted upwards for really
0441  * large systems.
0442  */
0443 static void adjust_jiffies_till_sched_qs(void)
0444 {
0445     unsigned long j;
0446 
0447     /* If jiffies_till_sched_qs was specified, respect the request. */
0448     if (jiffies_till_sched_qs != ULONG_MAX) {
0449         WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
0450         return;
0451     }
0452     /* Otherwise, set to third fqs scan, but bound below on large system. */
0453     j = READ_ONCE(jiffies_till_first_fqs) +
0454               2 * READ_ONCE(jiffies_till_next_fqs);
0455     if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
0456         j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
0457     pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
0458     WRITE_ONCE(jiffies_to_sched_qs, j);
0459 }
0460 
0461 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
0462 {
0463     ulong j;
0464     int ret = kstrtoul(val, 0, &j);
0465 
0466     if (!ret) {
0467         WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
0468         adjust_jiffies_till_sched_qs();
0469     }
0470     return ret;
0471 }
0472 
0473 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
0474 {
0475     ulong j;
0476     int ret = kstrtoul(val, 0, &j);
0477 
0478     if (!ret) {
0479         WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
0480         adjust_jiffies_till_sched_qs();
0481     }
0482     return ret;
0483 }
0484 
0485 static const struct kernel_param_ops first_fqs_jiffies_ops = {
0486     .set = param_set_first_fqs_jiffies,
0487     .get = param_get_ulong,
0488 };
0489 
0490 static const struct kernel_param_ops next_fqs_jiffies_ops = {
0491     .set = param_set_next_fqs_jiffies,
0492     .get = param_get_ulong,
0493 };
0494 
0495 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
0496 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
0497 module_param(rcu_kick_kthreads, bool, 0644);
0498 
0499 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
0500 static int rcu_pending(int user);
0501 
0502 /*
0503  * Return the number of RCU GPs completed thus far for debug & stats.
0504  */
0505 unsigned long rcu_get_gp_seq(void)
0506 {
0507     return READ_ONCE(rcu_state.gp_seq);
0508 }
0509 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
0510 
0511 /*
0512  * Return the number of RCU expedited batches completed thus far for
0513  * debug & stats.  Odd numbers mean that a batch is in progress, even
0514  * numbers mean idle.  The value returned will thus be roughly double
0515  * the cumulative batches since boot.
0516  */
0517 unsigned long rcu_exp_batches_completed(void)
0518 {
0519     return rcu_state.expedited_sequence;
0520 }
0521 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
0522 
0523 /*
0524  * Return the root node of the rcu_state structure.
0525  */
0526 static struct rcu_node *rcu_get_root(void)
0527 {
0528     return &rcu_state.node[0];
0529 }
0530 
0531 /*
0532  * Send along grace-period-related data for rcutorture diagnostics.
0533  */
0534 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
0535                 unsigned long *gp_seq)
0536 {
0537     switch (test_type) {
0538     case RCU_FLAVOR:
0539         *flags = READ_ONCE(rcu_state.gp_flags);
0540         *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
0541         break;
0542     default:
0543         break;
0544     }
0545 }
0546 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
0547 
0548 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
0549 /*
0550  * An empty function that will trigger a reschedule on
0551  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
0552  */
0553 static void late_wakeup_func(struct irq_work *work)
0554 {
0555 }
0556 
0557 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
0558     IRQ_WORK_INIT(late_wakeup_func);
0559 
0560 /*
0561  * If either:
0562  *
0563  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
0564  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
0565  *
0566  * In these cases the late RCU wake ups aren't supported in the resched loops and our
0567  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
0568  * get re-enabled again.
0569  */
0570 noinstr void rcu_irq_work_resched(void)
0571 {
0572     struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
0573 
0574     if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
0575         return;
0576 
0577     if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
0578         return;
0579 
0580     instrumentation_begin();
0581     if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
0582         irq_work_queue(this_cpu_ptr(&late_wakeup_work));
0583     }
0584     instrumentation_end();
0585 }
0586 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
0587 
0588 #ifdef CONFIG_PROVE_RCU
0589 /**
0590  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
0591  */
0592 void rcu_irq_exit_check_preempt(void)
0593 {
0594     lockdep_assert_irqs_disabled();
0595 
0596     RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
0597              "RCU dynticks_nesting counter underflow/zero!");
0598     RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
0599              DYNTICK_IRQ_NONIDLE,
0600              "Bad RCU  dynticks_nmi_nesting counter\n");
0601     RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
0602              "RCU in extended quiescent state!");
0603 }
0604 #endif /* #ifdef CONFIG_PROVE_RCU */
0605 
0606 #ifdef CONFIG_NO_HZ_FULL
0607 /**
0608  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
0609  *
0610  * The scheduler tick is not normally enabled when CPUs enter the kernel
0611  * from nohz_full userspace execution.  After all, nohz_full userspace
0612  * execution is an RCU quiescent state and the time executing in the kernel
0613  * is quite short.  Except of course when it isn't.  And it is not hard to
0614  * cause a large system to spend tens of seconds or even minutes looping
0615  * in the kernel, which can cause a number of problems, include RCU CPU
0616  * stall warnings.
0617  *
0618  * Therefore, if a nohz_full CPU fails to report a quiescent state
0619  * in a timely manner, the RCU grace-period kthread sets that CPU's
0620  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
0621  * exception will invoke this function, which will turn on the scheduler
0622  * tick, which will enable RCU to detect that CPU's quiescent states,
0623  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
0624  * The tick will be disabled once a quiescent state is reported for
0625  * this CPU.
0626  *
0627  * Of course, in carefully tuned systems, there might never be an
0628  * interrupt or exception.  In that case, the RCU grace-period kthread
0629  * will eventually cause one to happen.  However, in less carefully
0630  * controlled environments, this function allows RCU to get what it
0631  * needs without creating otherwise useless interruptions.
0632  */
0633 void __rcu_irq_enter_check_tick(void)
0634 {
0635     struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
0636 
0637     // If we're here from NMI there's nothing to do.
0638     if (in_nmi())
0639         return;
0640 
0641     RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
0642              "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
0643 
0644     if (!tick_nohz_full_cpu(rdp->cpu) ||
0645         !READ_ONCE(rdp->rcu_urgent_qs) ||
0646         READ_ONCE(rdp->rcu_forced_tick)) {
0647         // RCU doesn't need nohz_full help from this CPU, or it is
0648         // already getting that help.
0649         return;
0650     }
0651 
0652     // We get here only when not in an extended quiescent state and
0653     // from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
0654     // already watching and (2) The fact that we are in an interrupt
0655     // handler and that the rcu_node lock is an irq-disabled lock
0656     // prevents self-deadlock.  So we can safely recheck under the lock.
0657     // Note that the nohz_full state currently cannot change.
0658     raw_spin_lock_rcu_node(rdp->mynode);
0659     if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
0660         // A nohz_full CPU is in the kernel and RCU needs a
0661         // quiescent state.  Turn on the tick!
0662         WRITE_ONCE(rdp->rcu_forced_tick, true);
0663         tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
0664     }
0665     raw_spin_unlock_rcu_node(rdp->mynode);
0666 }
0667 #endif /* CONFIG_NO_HZ_FULL */
0668 
0669 /*
0670  * Check to see if any future non-offloaded RCU-related work will need
0671  * to be done by the current CPU, even if none need be done immediately,
0672  * returning 1 if so.  This function is part of the RCU implementation;
0673  * it is -not- an exported member of the RCU API.  This is used by
0674  * the idle-entry code to figure out whether it is safe to disable the
0675  * scheduler-clock interrupt.
0676  *
0677  * Just check whether or not this CPU has non-offloaded RCU callbacks
0678  * queued.
0679  */
0680 int rcu_needs_cpu(void)
0681 {
0682     return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
0683         !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
0684 }
0685 
0686 /*
0687  * If any sort of urgency was applied to the current CPU (for example,
0688  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
0689  * to get to a quiescent state, disable it.
0690  */
0691 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
0692 {
0693     raw_lockdep_assert_held_rcu_node(rdp->mynode);
0694     WRITE_ONCE(rdp->rcu_urgent_qs, false);
0695     WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
0696     if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
0697         tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
0698         WRITE_ONCE(rdp->rcu_forced_tick, false);
0699     }
0700 }
0701 
0702 /**
0703  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
0704  *
0705  * Return true if RCU is watching the running CPU, which means that this
0706  * CPU can safely enter RCU read-side critical sections.  In other words,
0707  * if the current CPU is not in its idle loop or is in an interrupt or
0708  * NMI handler, return true.
0709  *
0710  * Make notrace because it can be called by the internal functions of
0711  * ftrace, and making this notrace removes unnecessary recursion calls.
0712  */
0713 notrace bool rcu_is_watching(void)
0714 {
0715     bool ret;
0716 
0717     preempt_disable_notrace();
0718     ret = !rcu_dynticks_curr_cpu_in_eqs();
0719     preempt_enable_notrace();
0720     return ret;
0721 }
0722 EXPORT_SYMBOL_GPL(rcu_is_watching);
0723 
0724 /*
0725  * If a holdout task is actually running, request an urgent quiescent
0726  * state from its CPU.  This is unsynchronized, so migrations can cause
0727  * the request to go to the wrong CPU.  Which is OK, all that will happen
0728  * is that the CPU's next context switch will be a bit slower and next
0729  * time around this task will generate another request.
0730  */
0731 void rcu_request_urgent_qs_task(struct task_struct *t)
0732 {
0733     int cpu;
0734 
0735     barrier();
0736     cpu = task_cpu(t);
0737     if (!task_curr(t))
0738         return; /* This task is not running on that CPU. */
0739     smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
0740 }
0741 
0742 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
0743 
0744 /*
0745  * Is the current CPU online as far as RCU is concerned?
0746  *
0747  * Disable preemption to avoid false positives that could otherwise
0748  * happen due to the current CPU number being sampled, this task being
0749  * preempted, its old CPU being taken offline, resuming on some other CPU,
0750  * then determining that its old CPU is now offline.
0751  *
0752  * Disable checking if in an NMI handler because we cannot safely
0753  * report errors from NMI handlers anyway.  In addition, it is OK to use
0754  * RCU on an offline processor during initial boot, hence the check for
0755  * rcu_scheduler_fully_active.
0756  */
0757 bool rcu_lockdep_current_cpu_online(void)
0758 {
0759     struct rcu_data *rdp;
0760     bool ret = false;
0761 
0762     if (in_nmi() || !rcu_scheduler_fully_active)
0763         return true;
0764     preempt_disable_notrace();
0765     rdp = this_cpu_ptr(&rcu_data);
0766     /*
0767      * Strictly, we care here about the case where the current CPU is
0768      * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
0769      * not being up to date. So arch_spin_is_locked() might have a
0770      * false positive if it's held by some *other* CPU, but that's
0771      * OK because that just means a false *negative* on the warning.
0772      */
0773     if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
0774         ret = true;
0775     preempt_enable_notrace();
0776     return ret;
0777 }
0778 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
0779 
0780 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
0781 
0782 /*
0783  * When trying to report a quiescent state on behalf of some other CPU,
0784  * it is our responsibility to check for and handle potential overflow
0785  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
0786  * After all, the CPU might be in deep idle state, and thus executing no
0787  * code whatsoever.
0788  */
0789 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
0790 {
0791     raw_lockdep_assert_held_rcu_node(rnp);
0792     if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
0793              rnp->gp_seq))
0794         WRITE_ONCE(rdp->gpwrap, true);
0795     if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
0796         rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
0797 }
0798 
0799 /*
0800  * Snapshot the specified CPU's dynticks counter so that we can later
0801  * credit them with an implicit quiescent state.  Return 1 if this CPU
0802  * is in dynticks idle mode, which is an extended quiescent state.
0803  */
0804 static int dyntick_save_progress_counter(struct rcu_data *rdp)
0805 {
0806     rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
0807     if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
0808         trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
0809         rcu_gpnum_ovf(rdp->mynode, rdp);
0810         return 1;
0811     }
0812     return 0;
0813 }
0814 
0815 /*
0816  * Return true if the specified CPU has passed through a quiescent
0817  * state by virtue of being in or having passed through an dynticks
0818  * idle state since the last call to dyntick_save_progress_counter()
0819  * for this same CPU, or by virtue of having been offline.
0820  */
0821 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
0822 {
0823     unsigned long jtsq;
0824     struct rcu_node *rnp = rdp->mynode;
0825 
0826     /*
0827      * If the CPU passed through or entered a dynticks idle phase with
0828      * no active irq/NMI handlers, then we can safely pretend that the CPU
0829      * already acknowledged the request to pass through a quiescent
0830      * state.  Either way, that CPU cannot possibly be in an RCU
0831      * read-side critical section that started before the beginning
0832      * of the current RCU grace period.
0833      */
0834     if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
0835         trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
0836         rcu_gpnum_ovf(rnp, rdp);
0837         return 1;
0838     }
0839 
0840     /*
0841      * Complain if a CPU that is considered to be offline from RCU's
0842      * perspective has not yet reported a quiescent state.  After all,
0843      * the offline CPU should have reported a quiescent state during
0844      * the CPU-offline process, or, failing that, by rcu_gp_init()
0845      * if it ran concurrently with either the CPU going offline or the
0846      * last task on a leaf rcu_node structure exiting its RCU read-side
0847      * critical section while all CPUs corresponding to that structure
0848      * are offline.  This added warning detects bugs in any of these
0849      * code paths.
0850      *
0851      * The rcu_node structure's ->lock is held here, which excludes
0852      * the relevant portions the CPU-hotplug code, the grace-period
0853      * initialization code, and the rcu_read_unlock() code paths.
0854      *
0855      * For more detail, please refer to the "Hotplug CPU" section
0856      * of RCU's Requirements documentation.
0857      */
0858     if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
0859         struct rcu_node *rnp1;
0860 
0861         pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
0862             __func__, rnp->grplo, rnp->grphi, rnp->level,
0863             (long)rnp->gp_seq, (long)rnp->completedqs);
0864         for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
0865             pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
0866                 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
0867         pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
0868             __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
0869             (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
0870             (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
0871         return 1; /* Break things loose after complaining. */
0872     }
0873 
0874     /*
0875      * A CPU running for an extended time within the kernel can
0876      * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
0877      * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
0878      * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
0879      * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
0880      * variable are safe because the assignments are repeated if this
0881      * CPU failed to pass through a quiescent state.  This code
0882      * also checks .jiffies_resched in case jiffies_to_sched_qs
0883      * is set way high.
0884      */
0885     jtsq = READ_ONCE(jiffies_to_sched_qs);
0886     if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
0887         (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
0888          time_after(jiffies, rcu_state.jiffies_resched) ||
0889          rcu_state.cbovld)) {
0890         WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
0891         /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
0892         smp_store_release(&rdp->rcu_urgent_qs, true);
0893     } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
0894         WRITE_ONCE(rdp->rcu_urgent_qs, true);
0895     }
0896 
0897     /*
0898      * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
0899      * The above code handles this, but only for straight cond_resched().
0900      * And some in-kernel loops check need_resched() before calling
0901      * cond_resched(), which defeats the above code for CPUs that are
0902      * running in-kernel with scheduling-clock interrupts disabled.
0903      * So hit them over the head with the resched_cpu() hammer!
0904      */
0905     if (tick_nohz_full_cpu(rdp->cpu) &&
0906         (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
0907          rcu_state.cbovld)) {
0908         WRITE_ONCE(rdp->rcu_urgent_qs, true);
0909         resched_cpu(rdp->cpu);
0910         WRITE_ONCE(rdp->last_fqs_resched, jiffies);
0911     }
0912 
0913     /*
0914      * If more than halfway to RCU CPU stall-warning time, invoke
0915      * resched_cpu() more frequently to try to loosen things up a bit.
0916      * Also check to see if the CPU is getting hammered with interrupts,
0917      * but only once per grace period, just to keep the IPIs down to
0918      * a dull roar.
0919      */
0920     if (time_after(jiffies, rcu_state.jiffies_resched)) {
0921         if (time_after(jiffies,
0922                    READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
0923             resched_cpu(rdp->cpu);
0924             WRITE_ONCE(rdp->last_fqs_resched, jiffies);
0925         }
0926         if (IS_ENABLED(CONFIG_IRQ_WORK) &&
0927             !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
0928             (rnp->ffmask & rdp->grpmask)) {
0929             rdp->rcu_iw_pending = true;
0930             rdp->rcu_iw_gp_seq = rnp->gp_seq;
0931             irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
0932         }
0933     }
0934 
0935     return 0;
0936 }
0937 
0938 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
0939 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
0940                   unsigned long gp_seq_req, const char *s)
0941 {
0942     trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
0943                       gp_seq_req, rnp->level,
0944                       rnp->grplo, rnp->grphi, s);
0945 }
0946 
0947 /*
0948  * rcu_start_this_gp - Request the start of a particular grace period
0949  * @rnp_start: The leaf node of the CPU from which to start.
0950  * @rdp: The rcu_data corresponding to the CPU from which to start.
0951  * @gp_seq_req: The gp_seq of the grace period to start.
0952  *
0953  * Start the specified grace period, as needed to handle newly arrived
0954  * callbacks.  The required future grace periods are recorded in each
0955  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
0956  * is reason to awaken the grace-period kthread.
0957  *
0958  * The caller must hold the specified rcu_node structure's ->lock, which
0959  * is why the caller is responsible for waking the grace-period kthread.
0960  *
0961  * Returns true if the GP thread needs to be awakened else false.
0962  */
0963 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
0964                   unsigned long gp_seq_req)
0965 {
0966     bool ret = false;
0967     struct rcu_node *rnp;
0968 
0969     /*
0970      * Use funnel locking to either acquire the root rcu_node
0971      * structure's lock or bail out if the need for this grace period
0972      * has already been recorded -- or if that grace period has in
0973      * fact already started.  If there is already a grace period in
0974      * progress in a non-leaf node, no recording is needed because the
0975      * end of the grace period will scan the leaf rcu_node structures.
0976      * Note that rnp_start->lock must not be released.
0977      */
0978     raw_lockdep_assert_held_rcu_node(rnp_start);
0979     trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
0980     for (rnp = rnp_start; 1; rnp = rnp->parent) {
0981         if (rnp != rnp_start)
0982             raw_spin_lock_rcu_node(rnp);
0983         if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
0984             rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
0985             (rnp != rnp_start &&
0986              rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
0987             trace_rcu_this_gp(rnp, rdp, gp_seq_req,
0988                       TPS("Prestarted"));
0989             goto unlock_out;
0990         }
0991         WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
0992         if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
0993             /*
0994              * We just marked the leaf or internal node, and a
0995              * grace period is in progress, which means that
0996              * rcu_gp_cleanup() will see the marking.  Bail to
0997              * reduce contention.
0998              */
0999             trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1000                       TPS("Startedleaf"));
1001             goto unlock_out;
1002         }
1003         if (rnp != rnp_start && rnp->parent != NULL)
1004             raw_spin_unlock_rcu_node(rnp);
1005         if (!rnp->parent)
1006             break;  /* At root, and perhaps also leaf. */
1007     }
1008 
1009     /* If GP already in progress, just leave, otherwise start one. */
1010     if (rcu_gp_in_progress()) {
1011         trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1012         goto unlock_out;
1013     }
1014     trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1015     WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1016     WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1017     if (!READ_ONCE(rcu_state.gp_kthread)) {
1018         trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1019         goto unlock_out;
1020     }
1021     trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1022     ret = true;  /* Caller must wake GP kthread. */
1023 unlock_out:
1024     /* Push furthest requested GP to leaf node and rcu_data structure. */
1025     if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1026         WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1027         WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1028     }
1029     if (rnp != rnp_start)
1030         raw_spin_unlock_rcu_node(rnp);
1031     return ret;
1032 }
1033 
1034 /*
1035  * Clean up any old requests for the just-ended grace period.  Also return
1036  * whether any additional grace periods have been requested.
1037  */
1038 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1039 {
1040     bool needmore;
1041     struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1042 
1043     needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1044     if (!needmore)
1045         rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1046     trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1047               needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1048     return needmore;
1049 }
1050 
1051 /*
1052  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1053  * interrupt or softirq handler, in which case we just might immediately
1054  * sleep upon return, resulting in a grace-period hang), and don't bother
1055  * awakening when there is nothing for the grace-period kthread to do
1056  * (as in several CPUs raced to awaken, we lost), and finally don't try
1057  * to awaken a kthread that has not yet been created.  If all those checks
1058  * are passed, track some debug information and awaken.
1059  *
1060  * So why do the self-wakeup when in an interrupt or softirq handler
1061  * in the grace-period kthread's context?  Because the kthread might have
1062  * been interrupted just as it was going to sleep, and just after the final
1063  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1064  * is required, and is therefore supplied.
1065  */
1066 static void rcu_gp_kthread_wake(void)
1067 {
1068     struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1069 
1070     if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1071         !READ_ONCE(rcu_state.gp_flags) || !t)
1072         return;
1073     WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1074     WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1075     swake_up_one(&rcu_state.gp_wq);
1076 }
1077 
1078 /*
1079  * If there is room, assign a ->gp_seq number to any callbacks on this
1080  * CPU that have not already been assigned.  Also accelerate any callbacks
1081  * that were previously assigned a ->gp_seq number that has since proven
1082  * to be too conservative, which can happen if callbacks get assigned a
1083  * ->gp_seq number while RCU is idle, but with reference to a non-root
1084  * rcu_node structure.  This function is idempotent, so it does not hurt
1085  * to call it repeatedly.  Returns an flag saying that we should awaken
1086  * the RCU grace-period kthread.
1087  *
1088  * The caller must hold rnp->lock with interrupts disabled.
1089  */
1090 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1091 {
1092     unsigned long gp_seq_req;
1093     bool ret = false;
1094 
1095     rcu_lockdep_assert_cblist_protected(rdp);
1096     raw_lockdep_assert_held_rcu_node(rnp);
1097 
1098     /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1099     if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1100         return false;
1101 
1102     trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1103 
1104     /*
1105      * Callbacks are often registered with incomplete grace-period
1106      * information.  Something about the fact that getting exact
1107      * information requires acquiring a global lock...  RCU therefore
1108      * makes a conservative estimate of the grace period number at which
1109      * a given callback will become ready to invoke.    The following
1110      * code checks this estimate and improves it when possible, thus
1111      * accelerating callback invocation to an earlier grace-period
1112      * number.
1113      */
1114     gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1115     if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1116         ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1117 
1118     /* Trace depending on how much we were able to accelerate. */
1119     if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1120         trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1121     else
1122         trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1123 
1124     trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1125 
1126     return ret;
1127 }
1128 
1129 /*
1130  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1131  * rcu_node structure's ->lock be held.  It consults the cached value
1132  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1133  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1134  * while holding the leaf rcu_node structure's ->lock.
1135  */
1136 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1137                     struct rcu_data *rdp)
1138 {
1139     unsigned long c;
1140     bool needwake;
1141 
1142     rcu_lockdep_assert_cblist_protected(rdp);
1143     c = rcu_seq_snap(&rcu_state.gp_seq);
1144     if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1145         /* Old request still live, so mark recent callbacks. */
1146         (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1147         return;
1148     }
1149     raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1150     needwake = rcu_accelerate_cbs(rnp, rdp);
1151     raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1152     if (needwake)
1153         rcu_gp_kthread_wake();
1154 }
1155 
1156 /*
1157  * Move any callbacks whose grace period has completed to the
1158  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1159  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1160  * sublist.  This function is idempotent, so it does not hurt to
1161  * invoke it repeatedly.  As long as it is not invoked -too- often...
1162  * Returns true if the RCU grace-period kthread needs to be awakened.
1163  *
1164  * The caller must hold rnp->lock with interrupts disabled.
1165  */
1166 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1167 {
1168     rcu_lockdep_assert_cblist_protected(rdp);
1169     raw_lockdep_assert_held_rcu_node(rnp);
1170 
1171     /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1172     if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1173         return false;
1174 
1175     /*
1176      * Find all callbacks whose ->gp_seq numbers indicate that they
1177      * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1178      */
1179     rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1180 
1181     /* Classify any remaining callbacks. */
1182     return rcu_accelerate_cbs(rnp, rdp);
1183 }
1184 
1185 /*
1186  * Move and classify callbacks, but only if doing so won't require
1187  * that the RCU grace-period kthread be awakened.
1188  */
1189 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1190                           struct rcu_data *rdp)
1191 {
1192     rcu_lockdep_assert_cblist_protected(rdp);
1193     if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1194         return;
1195     // The grace period cannot end while we hold the rcu_node lock.
1196     if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1197         WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1198     raw_spin_unlock_rcu_node(rnp);
1199 }
1200 
1201 /*
1202  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1203  * quiescent state.  This is intended to be invoked when the CPU notices
1204  * a new grace period.
1205  */
1206 static void rcu_strict_gp_check_qs(void)
1207 {
1208     if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1209         rcu_read_lock();
1210         rcu_read_unlock();
1211     }
1212 }
1213 
1214 /*
1215  * Update CPU-local rcu_data state to record the beginnings and ends of
1216  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1217  * structure corresponding to the current CPU, and must have irqs disabled.
1218  * Returns true if the grace-period kthread needs to be awakened.
1219  */
1220 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1221 {
1222     bool ret = false;
1223     bool need_qs;
1224     const bool offloaded = rcu_rdp_is_offloaded(rdp);
1225 
1226     raw_lockdep_assert_held_rcu_node(rnp);
1227 
1228     if (rdp->gp_seq == rnp->gp_seq)
1229         return false; /* Nothing to do. */
1230 
1231     /* Handle the ends of any preceding grace periods first. */
1232     if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1233         unlikely(READ_ONCE(rdp->gpwrap))) {
1234         if (!offloaded)
1235             ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1236         rdp->core_needs_qs = false;
1237         trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1238     } else {
1239         if (!offloaded)
1240             ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1241         if (rdp->core_needs_qs)
1242             rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1243     }
1244 
1245     /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1246     if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1247         unlikely(READ_ONCE(rdp->gpwrap))) {
1248         /*
1249          * If the current grace period is waiting for this CPU,
1250          * set up to detect a quiescent state, otherwise don't
1251          * go looking for one.
1252          */
1253         trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1254         need_qs = !!(rnp->qsmask & rdp->grpmask);
1255         rdp->cpu_no_qs.b.norm = need_qs;
1256         rdp->core_needs_qs = need_qs;
1257         zero_cpu_stall_ticks(rdp);
1258     }
1259     rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1260     if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1261         WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1262     if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1263         WRITE_ONCE(rdp->last_sched_clock, jiffies);
1264     WRITE_ONCE(rdp->gpwrap, false);
1265     rcu_gpnum_ovf(rnp, rdp);
1266     return ret;
1267 }
1268 
1269 static void note_gp_changes(struct rcu_data *rdp)
1270 {
1271     unsigned long flags;
1272     bool needwake;
1273     struct rcu_node *rnp;
1274 
1275     local_irq_save(flags);
1276     rnp = rdp->mynode;
1277     if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1278          !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1279         !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1280         local_irq_restore(flags);
1281         return;
1282     }
1283     needwake = __note_gp_changes(rnp, rdp);
1284     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1285     rcu_strict_gp_check_qs();
1286     if (needwake)
1287         rcu_gp_kthread_wake();
1288 }
1289 
1290 static atomic_t *rcu_gp_slow_suppress;
1291 
1292 /* Register a counter to suppress debugging grace-period delays. */
1293 void rcu_gp_slow_register(atomic_t *rgssp)
1294 {
1295     WARN_ON_ONCE(rcu_gp_slow_suppress);
1296 
1297     WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1298 }
1299 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1300 
1301 /* Unregister a counter, with NULL for not caring which. */
1302 void rcu_gp_slow_unregister(atomic_t *rgssp)
1303 {
1304     WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
1305 
1306     WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1307 }
1308 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1309 
1310 static bool rcu_gp_slow_is_suppressed(void)
1311 {
1312     atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1313 
1314     return rgssp && atomic_read(rgssp);
1315 }
1316 
1317 static void rcu_gp_slow(int delay)
1318 {
1319     if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1320         !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1321         schedule_timeout_idle(delay);
1322 }
1323 
1324 static unsigned long sleep_duration;
1325 
1326 /* Allow rcutorture to stall the grace-period kthread. */
1327 void rcu_gp_set_torture_wait(int duration)
1328 {
1329     if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1330         WRITE_ONCE(sleep_duration, duration);
1331 }
1332 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1333 
1334 /* Actually implement the aforementioned wait. */
1335 static void rcu_gp_torture_wait(void)
1336 {
1337     unsigned long duration;
1338 
1339     if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1340         return;
1341     duration = xchg(&sleep_duration, 0UL);
1342     if (duration > 0) {
1343         pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1344         schedule_timeout_idle(duration);
1345         pr_alert("%s: Wait complete\n", __func__);
1346     }
1347 }
1348 
1349 /*
1350  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1351  * processing.
1352  */
1353 static void rcu_strict_gp_boundary(void *unused)
1354 {
1355     invoke_rcu_core();
1356 }
1357 
1358 // Has rcu_init() been invoked?  This is used (for example) to determine
1359 // whether spinlocks may be acquired safely.
1360 static bool rcu_init_invoked(void)
1361 {
1362     return !!rcu_state.n_online_cpus;
1363 }
1364 
1365 // Make the polled API aware of the beginning of a grace period.
1366 static void rcu_poll_gp_seq_start(unsigned long *snap)
1367 {
1368     struct rcu_node *rnp = rcu_get_root();
1369 
1370     if (rcu_init_invoked())
1371         raw_lockdep_assert_held_rcu_node(rnp);
1372 
1373     // If RCU was idle, note beginning of GP.
1374     if (!rcu_seq_state(rcu_state.gp_seq_polled))
1375         rcu_seq_start(&rcu_state.gp_seq_polled);
1376 
1377     // Either way, record current state.
1378     *snap = rcu_state.gp_seq_polled;
1379 }
1380 
1381 // Make the polled API aware of the end of a grace period.
1382 static void rcu_poll_gp_seq_end(unsigned long *snap)
1383 {
1384     struct rcu_node *rnp = rcu_get_root();
1385 
1386     if (rcu_init_invoked())
1387         raw_lockdep_assert_held_rcu_node(rnp);
1388 
1389     // If the previously noted GP is still in effect, record the
1390     // end of that GP.  Either way, zero counter to avoid counter-wrap
1391     // problems.
1392     if (*snap && *snap == rcu_state.gp_seq_polled) {
1393         rcu_seq_end(&rcu_state.gp_seq_polled);
1394         rcu_state.gp_seq_polled_snap = 0;
1395         rcu_state.gp_seq_polled_exp_snap = 0;
1396     } else {
1397         *snap = 0;
1398     }
1399 }
1400 
1401 // Make the polled API aware of the beginning of a grace period, but
1402 // where caller does not hold the root rcu_node structure's lock.
1403 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1404 {
1405     struct rcu_node *rnp = rcu_get_root();
1406 
1407     if (rcu_init_invoked()) {
1408         lockdep_assert_irqs_enabled();
1409         raw_spin_lock_irq_rcu_node(rnp);
1410     }
1411     rcu_poll_gp_seq_start(snap);
1412     if (rcu_init_invoked())
1413         raw_spin_unlock_irq_rcu_node(rnp);
1414 }
1415 
1416 // Make the polled API aware of the end of a grace period, but where
1417 // caller does not hold the root rcu_node structure's lock.
1418 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1419 {
1420     struct rcu_node *rnp = rcu_get_root();
1421 
1422     if (rcu_init_invoked()) {
1423         lockdep_assert_irqs_enabled();
1424         raw_spin_lock_irq_rcu_node(rnp);
1425     }
1426     rcu_poll_gp_seq_end(snap);
1427     if (rcu_init_invoked())
1428         raw_spin_unlock_irq_rcu_node(rnp);
1429 }
1430 
1431 /*
1432  * Initialize a new grace period.  Return false if no grace period required.
1433  */
1434 static noinline_for_stack bool rcu_gp_init(void)
1435 {
1436     unsigned long flags;
1437     unsigned long oldmask;
1438     unsigned long mask;
1439     struct rcu_data *rdp;
1440     struct rcu_node *rnp = rcu_get_root();
1441 
1442     WRITE_ONCE(rcu_state.gp_activity, jiffies);
1443     raw_spin_lock_irq_rcu_node(rnp);
1444     if (!READ_ONCE(rcu_state.gp_flags)) {
1445         /* Spurious wakeup, tell caller to go back to sleep.  */
1446         raw_spin_unlock_irq_rcu_node(rnp);
1447         return false;
1448     }
1449     WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1450 
1451     if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1452         /*
1453          * Grace period already in progress, don't start another.
1454          * Not supposed to be able to happen.
1455          */
1456         raw_spin_unlock_irq_rcu_node(rnp);
1457         return false;
1458     }
1459 
1460     /* Advance to a new grace period and initialize state. */
1461     record_gp_stall_check_time();
1462     /* Record GP times before starting GP, hence rcu_seq_start(). */
1463     rcu_seq_start(&rcu_state.gp_seq);
1464     ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1465     trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1466     rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1467     raw_spin_unlock_irq_rcu_node(rnp);
1468 
1469     /*
1470      * Apply per-leaf buffered online and offline operations to
1471      * the rcu_node tree. Note that this new grace period need not
1472      * wait for subsequent online CPUs, and that RCU hooks in the CPU
1473      * offlining path, when combined with checks in this function,
1474      * will handle CPUs that are currently going offline or that will
1475      * go offline later.  Please also refer to "Hotplug CPU" section
1476      * of RCU's Requirements documentation.
1477      */
1478     WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1479     /* Exclude CPU hotplug operations. */
1480     rcu_for_each_leaf_node(rnp) {
1481         local_irq_save(flags);
1482         arch_spin_lock(&rcu_state.ofl_lock);
1483         raw_spin_lock_rcu_node(rnp);
1484         if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1485             !rnp->wait_blkd_tasks) {
1486             /* Nothing to do on this leaf rcu_node structure. */
1487             raw_spin_unlock_rcu_node(rnp);
1488             arch_spin_unlock(&rcu_state.ofl_lock);
1489             local_irq_restore(flags);
1490             continue;
1491         }
1492 
1493         /* Record old state, apply changes to ->qsmaskinit field. */
1494         oldmask = rnp->qsmaskinit;
1495         rnp->qsmaskinit = rnp->qsmaskinitnext;
1496 
1497         /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1498         if (!oldmask != !rnp->qsmaskinit) {
1499             if (!oldmask) { /* First online CPU for rcu_node. */
1500                 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1501                     rcu_init_new_rnp(rnp);
1502             } else if (rcu_preempt_has_tasks(rnp)) {
1503                 rnp->wait_blkd_tasks = true; /* blocked tasks */
1504             } else { /* Last offline CPU and can propagate. */
1505                 rcu_cleanup_dead_rnp(rnp);
1506             }
1507         }
1508 
1509         /*
1510          * If all waited-on tasks from prior grace period are
1511          * done, and if all this rcu_node structure's CPUs are
1512          * still offline, propagate up the rcu_node tree and
1513          * clear ->wait_blkd_tasks.  Otherwise, if one of this
1514          * rcu_node structure's CPUs has since come back online,
1515          * simply clear ->wait_blkd_tasks.
1516          */
1517         if (rnp->wait_blkd_tasks &&
1518             (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1519             rnp->wait_blkd_tasks = false;
1520             if (!rnp->qsmaskinit)
1521                 rcu_cleanup_dead_rnp(rnp);
1522         }
1523 
1524         raw_spin_unlock_rcu_node(rnp);
1525         arch_spin_unlock(&rcu_state.ofl_lock);
1526         local_irq_restore(flags);
1527     }
1528     rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1529 
1530     /*
1531      * Set the quiescent-state-needed bits in all the rcu_node
1532      * structures for all currently online CPUs in breadth-first
1533      * order, starting from the root rcu_node structure, relying on the
1534      * layout of the tree within the rcu_state.node[] array.  Note that
1535      * other CPUs will access only the leaves of the hierarchy, thus
1536      * seeing that no grace period is in progress, at least until the
1537      * corresponding leaf node has been initialized.
1538      *
1539      * The grace period cannot complete until the initialization
1540      * process finishes, because this kthread handles both.
1541      */
1542     WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1543     rcu_for_each_node_breadth_first(rnp) {
1544         rcu_gp_slow(gp_init_delay);
1545         raw_spin_lock_irqsave_rcu_node(rnp, flags);
1546         rdp = this_cpu_ptr(&rcu_data);
1547         rcu_preempt_check_blocked_tasks(rnp);
1548         rnp->qsmask = rnp->qsmaskinit;
1549         WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1550         if (rnp == rdp->mynode)
1551             (void)__note_gp_changes(rnp, rdp);
1552         rcu_preempt_boost_start_gp(rnp);
1553         trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1554                         rnp->level, rnp->grplo,
1555                         rnp->grphi, rnp->qsmask);
1556         /* Quiescent states for tasks on any now-offline CPUs. */
1557         mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1558         rnp->rcu_gp_init_mask = mask;
1559         if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1560             rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1561         else
1562             raw_spin_unlock_irq_rcu_node(rnp);
1563         cond_resched_tasks_rcu_qs();
1564         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1565     }
1566 
1567     // If strict, make all CPUs aware of new grace period.
1568     if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1569         on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1570 
1571     return true;
1572 }
1573 
1574 /*
1575  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1576  * time.
1577  */
1578 static bool rcu_gp_fqs_check_wake(int *gfp)
1579 {
1580     struct rcu_node *rnp = rcu_get_root();
1581 
1582     // If under overload conditions, force an immediate FQS scan.
1583     if (*gfp & RCU_GP_FLAG_OVLD)
1584         return true;
1585 
1586     // Someone like call_rcu() requested a force-quiescent-state scan.
1587     *gfp = READ_ONCE(rcu_state.gp_flags);
1588     if (*gfp & RCU_GP_FLAG_FQS)
1589         return true;
1590 
1591     // The current grace period has completed.
1592     if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1593         return true;
1594 
1595     return false;
1596 }
1597 
1598 /*
1599  * Do one round of quiescent-state forcing.
1600  */
1601 static void rcu_gp_fqs(bool first_time)
1602 {
1603     struct rcu_node *rnp = rcu_get_root();
1604 
1605     WRITE_ONCE(rcu_state.gp_activity, jiffies);
1606     WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1607     if (first_time) {
1608         /* Collect dyntick-idle snapshots. */
1609         force_qs_rnp(dyntick_save_progress_counter);
1610     } else {
1611         /* Handle dyntick-idle and offline CPUs. */
1612         force_qs_rnp(rcu_implicit_dynticks_qs);
1613     }
1614     /* Clear flag to prevent immediate re-entry. */
1615     if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1616         raw_spin_lock_irq_rcu_node(rnp);
1617         WRITE_ONCE(rcu_state.gp_flags,
1618                READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1619         raw_spin_unlock_irq_rcu_node(rnp);
1620     }
1621 }
1622 
1623 /*
1624  * Loop doing repeated quiescent-state forcing until the grace period ends.
1625  */
1626 static noinline_for_stack void rcu_gp_fqs_loop(void)
1627 {
1628     bool first_gp_fqs = true;
1629     int gf = 0;
1630     unsigned long j;
1631     int ret;
1632     struct rcu_node *rnp = rcu_get_root();
1633 
1634     j = READ_ONCE(jiffies_till_first_fqs);
1635     if (rcu_state.cbovld)
1636         gf = RCU_GP_FLAG_OVLD;
1637     ret = 0;
1638     for (;;) {
1639         if (rcu_state.cbovld) {
1640             j = (j + 2) / 3;
1641             if (j <= 0)
1642                 j = 1;
1643         }
1644         if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1645             WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1646             /*
1647              * jiffies_force_qs before RCU_GP_WAIT_FQS state
1648              * update; required for stall checks.
1649              */
1650             smp_wmb();
1651             WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1652                    jiffies + (j ? 3 * j : 2));
1653         }
1654         trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1655                        TPS("fqswait"));
1656         WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1657         (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1658                  rcu_gp_fqs_check_wake(&gf), j);
1659         rcu_gp_torture_wait();
1660         WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1661         /* Locking provides needed memory barriers. */
1662         /*
1663          * Exit the loop if the root rcu_node structure indicates that the grace period
1664          * has ended, leave the loop.  The rcu_preempt_blocked_readers_cgp(rnp) check
1665          * is required only for single-node rcu_node trees because readers blocking
1666          * the current grace period are queued only on leaf rcu_node structures.
1667          * For multi-node trees, checking the root node's ->qsmask suffices, because a
1668          * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1669          * the corresponding leaf nodes have passed through their quiescent state.
1670          */
1671         if (!READ_ONCE(rnp->qsmask) &&
1672             !rcu_preempt_blocked_readers_cgp(rnp))
1673             break;
1674         /* If time for quiescent-state forcing, do it. */
1675         if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1676             (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1677             trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1678                            TPS("fqsstart"));
1679             rcu_gp_fqs(first_gp_fqs);
1680             gf = 0;
1681             if (first_gp_fqs) {
1682                 first_gp_fqs = false;
1683                 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1684             }
1685             trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1686                            TPS("fqsend"));
1687             cond_resched_tasks_rcu_qs();
1688             WRITE_ONCE(rcu_state.gp_activity, jiffies);
1689             ret = 0; /* Force full wait till next FQS. */
1690             j = READ_ONCE(jiffies_till_next_fqs);
1691         } else {
1692             /* Deal with stray signal. */
1693             cond_resched_tasks_rcu_qs();
1694             WRITE_ONCE(rcu_state.gp_activity, jiffies);
1695             WARN_ON(signal_pending(current));
1696             trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1697                            TPS("fqswaitsig"));
1698             ret = 1; /* Keep old FQS timing. */
1699             j = jiffies;
1700             if (time_after(jiffies, rcu_state.jiffies_force_qs))
1701                 j = 1;
1702             else
1703                 j = rcu_state.jiffies_force_qs - j;
1704             gf = 0;
1705         }
1706     }
1707 }
1708 
1709 /*
1710  * Clean up after the old grace period.
1711  */
1712 static noinline void rcu_gp_cleanup(void)
1713 {
1714     int cpu;
1715     bool needgp = false;
1716     unsigned long gp_duration;
1717     unsigned long new_gp_seq;
1718     bool offloaded;
1719     struct rcu_data *rdp;
1720     struct rcu_node *rnp = rcu_get_root();
1721     struct swait_queue_head *sq;
1722 
1723     WRITE_ONCE(rcu_state.gp_activity, jiffies);
1724     raw_spin_lock_irq_rcu_node(rnp);
1725     rcu_state.gp_end = jiffies;
1726     gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1727     if (gp_duration > rcu_state.gp_max)
1728         rcu_state.gp_max = gp_duration;
1729 
1730     /*
1731      * We know the grace period is complete, but to everyone else
1732      * it appears to still be ongoing.  But it is also the case
1733      * that to everyone else it looks like there is nothing that
1734      * they can do to advance the grace period.  It is therefore
1735      * safe for us to drop the lock in order to mark the grace
1736      * period as completed in all of the rcu_node structures.
1737      */
1738     rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1739     raw_spin_unlock_irq_rcu_node(rnp);
1740 
1741     /*
1742      * Propagate new ->gp_seq value to rcu_node structures so that
1743      * other CPUs don't have to wait until the start of the next grace
1744      * period to process their callbacks.  This also avoids some nasty
1745      * RCU grace-period initialization races by forcing the end of
1746      * the current grace period to be completely recorded in all of
1747      * the rcu_node structures before the beginning of the next grace
1748      * period is recorded in any of the rcu_node structures.
1749      */
1750     new_gp_seq = rcu_state.gp_seq;
1751     rcu_seq_end(&new_gp_seq);
1752     rcu_for_each_node_breadth_first(rnp) {
1753         raw_spin_lock_irq_rcu_node(rnp);
1754         if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1755             dump_blkd_tasks(rnp, 10);
1756         WARN_ON_ONCE(rnp->qsmask);
1757         WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1758         rdp = this_cpu_ptr(&rcu_data);
1759         if (rnp == rdp->mynode)
1760             needgp = __note_gp_changes(rnp, rdp) || needgp;
1761         /* smp_mb() provided by prior unlock-lock pair. */
1762         needgp = rcu_future_gp_cleanup(rnp) || needgp;
1763         // Reset overload indication for CPUs no longer overloaded
1764         if (rcu_is_leaf_node(rnp))
1765             for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1766                 rdp = per_cpu_ptr(&rcu_data, cpu);
1767                 check_cb_ovld_locked(rdp, rnp);
1768             }
1769         sq = rcu_nocb_gp_get(rnp);
1770         raw_spin_unlock_irq_rcu_node(rnp);
1771         rcu_nocb_gp_cleanup(sq);
1772         cond_resched_tasks_rcu_qs();
1773         WRITE_ONCE(rcu_state.gp_activity, jiffies);
1774         rcu_gp_slow(gp_cleanup_delay);
1775     }
1776     rnp = rcu_get_root();
1777     raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1778 
1779     /* Declare grace period done, trace first to use old GP number. */
1780     trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1781     rcu_seq_end(&rcu_state.gp_seq);
1782     ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1783     WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1784     /* Check for GP requests since above loop. */
1785     rdp = this_cpu_ptr(&rcu_data);
1786     if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1787         trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1788                   TPS("CleanupMore"));
1789         needgp = true;
1790     }
1791     /* Advance CBs to reduce false positives below. */
1792     offloaded = rcu_rdp_is_offloaded(rdp);
1793     if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1794 
1795         // We get here if a grace period was needed (“needgp”)
1796         // and the above call to rcu_accelerate_cbs() did not set
1797         // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1798         // the need for another grace period).  The purpose
1799         // of the “offloaded” check is to avoid invoking
1800         // rcu_accelerate_cbs() on an offloaded CPU because we do not
1801         // hold the ->nocb_lock needed to safely access an offloaded
1802         // ->cblist.  We do not want to acquire that lock because
1803         // it can be heavily contended during callback floods.
1804 
1805         WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1806         WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1807         trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1808     } else {
1809 
1810         // We get here either if there is no need for an
1811         // additional grace period or if rcu_accelerate_cbs() has
1812         // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
1813         // So all we need to do is to clear all of the other
1814         // ->gp_flags bits.
1815 
1816         WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1817     }
1818     raw_spin_unlock_irq_rcu_node(rnp);
1819 
1820     // If strict, make all CPUs aware of the end of the old grace period.
1821     if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1822         on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1823 }
1824 
1825 /*
1826  * Body of kthread that handles grace periods.
1827  */
1828 static int __noreturn rcu_gp_kthread(void *unused)
1829 {
1830     rcu_bind_gp_kthread();
1831     for (;;) {
1832 
1833         /* Handle grace-period start. */
1834         for (;;) {
1835             trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1836                            TPS("reqwait"));
1837             WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1838             swait_event_idle_exclusive(rcu_state.gp_wq,
1839                      READ_ONCE(rcu_state.gp_flags) &
1840                      RCU_GP_FLAG_INIT);
1841             rcu_gp_torture_wait();
1842             WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1843             /* Locking provides needed memory barrier. */
1844             if (rcu_gp_init())
1845                 break;
1846             cond_resched_tasks_rcu_qs();
1847             WRITE_ONCE(rcu_state.gp_activity, jiffies);
1848             WARN_ON(signal_pending(current));
1849             trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1850                            TPS("reqwaitsig"));
1851         }
1852 
1853         /* Handle quiescent-state forcing. */
1854         rcu_gp_fqs_loop();
1855 
1856         /* Handle grace-period end. */
1857         WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1858         rcu_gp_cleanup();
1859         WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1860     }
1861 }
1862 
1863 /*
1864  * Report a full set of quiescent states to the rcu_state data structure.
1865  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1866  * another grace period is required.  Whether we wake the grace-period
1867  * kthread or it awakens itself for the next round of quiescent-state
1868  * forcing, that kthread will clean up after the just-completed grace
1869  * period.  Note that the caller must hold rnp->lock, which is released
1870  * before return.
1871  */
1872 static void rcu_report_qs_rsp(unsigned long flags)
1873     __releases(rcu_get_root()->lock)
1874 {
1875     raw_lockdep_assert_held_rcu_node(rcu_get_root());
1876     WARN_ON_ONCE(!rcu_gp_in_progress());
1877     WRITE_ONCE(rcu_state.gp_flags,
1878            READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1879     raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1880     rcu_gp_kthread_wake();
1881 }
1882 
1883 /*
1884  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1885  * Allows quiescent states for a group of CPUs to be reported at one go
1886  * to the specified rcu_node structure, though all the CPUs in the group
1887  * must be represented by the same rcu_node structure (which need not be a
1888  * leaf rcu_node structure, though it often will be).  The gps parameter
1889  * is the grace-period snapshot, which means that the quiescent states
1890  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
1891  * must be held upon entry, and it is released before return.
1892  *
1893  * As a special case, if mask is zero, the bit-already-cleared check is
1894  * disabled.  This allows propagating quiescent state due to resumed tasks
1895  * during grace-period initialization.
1896  */
1897 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1898                   unsigned long gps, unsigned long flags)
1899     __releases(rnp->lock)
1900 {
1901     unsigned long oldmask = 0;
1902     struct rcu_node *rnp_c;
1903 
1904     raw_lockdep_assert_held_rcu_node(rnp);
1905 
1906     /* Walk up the rcu_node hierarchy. */
1907     for (;;) {
1908         if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1909 
1910             /*
1911              * Our bit has already been cleared, or the
1912              * relevant grace period is already over, so done.
1913              */
1914             raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1915             return;
1916         }
1917         WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1918         WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1919                  rcu_preempt_blocked_readers_cgp(rnp));
1920         WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1921         trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1922                          mask, rnp->qsmask, rnp->level,
1923                          rnp->grplo, rnp->grphi,
1924                          !!rnp->gp_tasks);
1925         if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1926 
1927             /* Other bits still set at this level, so done. */
1928             raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1929             return;
1930         }
1931         rnp->completedqs = rnp->gp_seq;
1932         mask = rnp->grpmask;
1933         if (rnp->parent == NULL) {
1934 
1935             /* No more levels.  Exit loop holding root lock. */
1936 
1937             break;
1938         }
1939         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1940         rnp_c = rnp;
1941         rnp = rnp->parent;
1942         raw_spin_lock_irqsave_rcu_node(rnp, flags);
1943         oldmask = READ_ONCE(rnp_c->qsmask);
1944     }
1945 
1946     /*
1947      * Get here if we are the last CPU to pass through a quiescent
1948      * state for this grace period.  Invoke rcu_report_qs_rsp()
1949      * to clean up and start the next grace period if one is needed.
1950      */
1951     rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1952 }
1953 
1954 /*
1955  * Record a quiescent state for all tasks that were previously queued
1956  * on the specified rcu_node structure and that were blocking the current
1957  * RCU grace period.  The caller must hold the corresponding rnp->lock with
1958  * irqs disabled, and this lock is released upon return, but irqs remain
1959  * disabled.
1960  */
1961 static void __maybe_unused
1962 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1963     __releases(rnp->lock)
1964 {
1965     unsigned long gps;
1966     unsigned long mask;
1967     struct rcu_node *rnp_p;
1968 
1969     raw_lockdep_assert_held_rcu_node(rnp);
1970     if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1971         WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1972         rnp->qsmask != 0) {
1973         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1974         return;  /* Still need more quiescent states! */
1975     }
1976 
1977     rnp->completedqs = rnp->gp_seq;
1978     rnp_p = rnp->parent;
1979     if (rnp_p == NULL) {
1980         /*
1981          * Only one rcu_node structure in the tree, so don't
1982          * try to report up to its nonexistent parent!
1983          */
1984         rcu_report_qs_rsp(flags);
1985         return;
1986     }
1987 
1988     /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1989     gps = rnp->gp_seq;
1990     mask = rnp->grpmask;
1991     raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
1992     raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
1993     rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1994 }
1995 
1996 /*
1997  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1998  * structure.  This must be called from the specified CPU.
1999  */
2000 static void
2001 rcu_report_qs_rdp(struct rcu_data *rdp)
2002 {
2003     unsigned long flags;
2004     unsigned long mask;
2005     bool needwake = false;
2006     bool needacc = false;
2007     struct rcu_node *rnp;
2008 
2009     WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2010     rnp = rdp->mynode;
2011     raw_spin_lock_irqsave_rcu_node(rnp, flags);
2012     if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2013         rdp->gpwrap) {
2014 
2015         /*
2016          * The grace period in which this quiescent state was
2017          * recorded has ended, so don't report it upwards.
2018          * We will instead need a new quiescent state that lies
2019          * within the current grace period.
2020          */
2021         rdp->cpu_no_qs.b.norm = true;   /* need qs for new gp. */
2022         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2023         return;
2024     }
2025     mask = rdp->grpmask;
2026     rdp->core_needs_qs = false;
2027     if ((rnp->qsmask & mask) == 0) {
2028         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2029     } else {
2030         /*
2031          * This GP can't end until cpu checks in, so all of our
2032          * callbacks can be processed during the next GP.
2033          *
2034          * NOCB kthreads have their own way to deal with that...
2035          */
2036         if (!rcu_rdp_is_offloaded(rdp)) {
2037             needwake = rcu_accelerate_cbs(rnp, rdp);
2038         } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2039             /*
2040              * ...but NOCB kthreads may miss or delay callbacks acceleration
2041              * if in the middle of a (de-)offloading process.
2042              */
2043             needacc = true;
2044         }
2045 
2046         rcu_disable_urgency_upon_qs(rdp);
2047         rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2048         /* ^^^ Released rnp->lock */
2049         if (needwake)
2050             rcu_gp_kthread_wake();
2051 
2052         if (needacc) {
2053             rcu_nocb_lock_irqsave(rdp, flags);
2054             rcu_accelerate_cbs_unlocked(rnp, rdp);
2055             rcu_nocb_unlock_irqrestore(rdp, flags);
2056         }
2057     }
2058 }
2059 
2060 /*
2061  * Check to see if there is a new grace period of which this CPU
2062  * is not yet aware, and if so, set up local rcu_data state for it.
2063  * Otherwise, see if this CPU has just passed through its first
2064  * quiescent state for this grace period, and record that fact if so.
2065  */
2066 static void
2067 rcu_check_quiescent_state(struct rcu_data *rdp)
2068 {
2069     /* Check for grace-period ends and beginnings. */
2070     note_gp_changes(rdp);
2071 
2072     /*
2073      * Does this CPU still need to do its part for current grace period?
2074      * If no, return and let the other CPUs do their part as well.
2075      */
2076     if (!rdp->core_needs_qs)
2077         return;
2078 
2079     /*
2080      * Was there a quiescent state since the beginning of the grace
2081      * period? If no, then exit and wait for the next call.
2082      */
2083     if (rdp->cpu_no_qs.b.norm)
2084         return;
2085 
2086     /*
2087      * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2088      * judge of that).
2089      */
2090     rcu_report_qs_rdp(rdp);
2091 }
2092 
2093 /*
2094  * Near the end of the offline process.  Trace the fact that this CPU
2095  * is going offline.
2096  */
2097 int rcutree_dying_cpu(unsigned int cpu)
2098 {
2099     bool blkd;
2100     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2101     struct rcu_node *rnp = rdp->mynode;
2102 
2103     if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2104         return 0;
2105 
2106     blkd = !!(rnp->qsmask & rdp->grpmask);
2107     trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2108                    blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2109     return 0;
2110 }
2111 
2112 /*
2113  * All CPUs for the specified rcu_node structure have gone offline,
2114  * and all tasks that were preempted within an RCU read-side critical
2115  * section while running on one of those CPUs have since exited their RCU
2116  * read-side critical section.  Some other CPU is reporting this fact with
2117  * the specified rcu_node structure's ->lock held and interrupts disabled.
2118  * This function therefore goes up the tree of rcu_node structures,
2119  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2120  * the leaf rcu_node structure's ->qsmaskinit field has already been
2121  * updated.
2122  *
2123  * This function does check that the specified rcu_node structure has
2124  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2125  * prematurely.  That said, invoking it after the fact will cost you
2126  * a needless lock acquisition.  So once it has done its work, don't
2127  * invoke it again.
2128  */
2129 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2130 {
2131     long mask;
2132     struct rcu_node *rnp = rnp_leaf;
2133 
2134     raw_lockdep_assert_held_rcu_node(rnp_leaf);
2135     if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2136         WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2137         WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2138         return;
2139     for (;;) {
2140         mask = rnp->grpmask;
2141         rnp = rnp->parent;
2142         if (!rnp)
2143             break;
2144         raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2145         rnp->qsmaskinit &= ~mask;
2146         /* Between grace periods, so better already be zero! */
2147         WARN_ON_ONCE(rnp->qsmask);
2148         if (rnp->qsmaskinit) {
2149             raw_spin_unlock_rcu_node(rnp);
2150             /* irqs remain disabled. */
2151             return;
2152         }
2153         raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2154     }
2155 }
2156 
2157 /*
2158  * The CPU has been completely removed, and some other CPU is reporting
2159  * this fact from process context.  Do the remainder of the cleanup.
2160  * There can only be one CPU hotplug operation at a time, so no need for
2161  * explicit locking.
2162  */
2163 int rcutree_dead_cpu(unsigned int cpu)
2164 {
2165     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2166     struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2167 
2168     if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2169         return 0;
2170 
2171     WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2172     /* Adjust any no-longer-needed kthreads. */
2173     rcu_boost_kthread_setaffinity(rnp, -1);
2174     // Stop-machine done, so allow nohz_full to disable tick.
2175     tick_dep_clear(TICK_DEP_BIT_RCU);
2176     return 0;
2177 }
2178 
2179 /*
2180  * Invoke any RCU callbacks that have made it to the end of their grace
2181  * period.  Throttle as specified by rdp->blimit.
2182  */
2183 static void rcu_do_batch(struct rcu_data *rdp)
2184 {
2185     int div;
2186     bool __maybe_unused empty;
2187     unsigned long flags;
2188     struct rcu_head *rhp;
2189     struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2190     long bl, count = 0;
2191     long pending, tlimit = 0;
2192 
2193     /* If no callbacks are ready, just return. */
2194     if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2195         trace_rcu_batch_start(rcu_state.name,
2196                       rcu_segcblist_n_cbs(&rdp->cblist), 0);
2197         trace_rcu_batch_end(rcu_state.name, 0,
2198                     !rcu_segcblist_empty(&rdp->cblist),
2199                     need_resched(), is_idle_task(current),
2200                     rcu_is_callbacks_kthread(rdp));
2201         return;
2202     }
2203 
2204     /*
2205      * Extract the list of ready callbacks, disabling IRQs to prevent
2206      * races with call_rcu() from interrupt handlers.  Leave the
2207      * callback counts, as rcu_barrier() needs to be conservative.
2208      */
2209     rcu_nocb_lock_irqsave(rdp, flags);
2210     WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2211     pending = rcu_segcblist_n_cbs(&rdp->cblist);
2212     div = READ_ONCE(rcu_divisor);
2213     div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2214     bl = max(rdp->blimit, pending >> div);
2215     if (in_serving_softirq() && unlikely(bl > 100)) {
2216         long rrn = READ_ONCE(rcu_resched_ns);
2217 
2218         rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2219         tlimit = local_clock() + rrn;
2220     }
2221     trace_rcu_batch_start(rcu_state.name,
2222                   rcu_segcblist_n_cbs(&rdp->cblist), bl);
2223     rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2224     if (rcu_rdp_is_offloaded(rdp))
2225         rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2226 
2227     trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2228     rcu_nocb_unlock_irqrestore(rdp, flags);
2229 
2230     /* Invoke callbacks. */
2231     tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2232     rhp = rcu_cblist_dequeue(&rcl);
2233 
2234     for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2235         rcu_callback_t f;
2236 
2237         count++;
2238         debug_rcu_head_unqueue(rhp);
2239 
2240         rcu_lock_acquire(&rcu_callback_map);
2241         trace_rcu_invoke_callback(rcu_state.name, rhp);
2242 
2243         f = rhp->func;
2244         WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2245         f(rhp);
2246 
2247         rcu_lock_release(&rcu_callback_map);
2248 
2249         /*
2250          * Stop only if limit reached and CPU has something to do.
2251          */
2252         if (in_serving_softirq()) {
2253             if (count >= bl && (need_resched() || !is_idle_task(current)))
2254                 break;
2255             /*
2256              * Make sure we don't spend too much time here and deprive other
2257              * softirq vectors of CPU cycles.
2258              */
2259             if (unlikely(tlimit)) {
2260                 /* only call local_clock() every 32 callbacks */
2261                 if (likely((count & 31) || local_clock() < tlimit))
2262                     continue;
2263                 /* Exceeded the time limit, so leave. */
2264                 break;
2265             }
2266         } else {
2267             local_bh_enable();
2268             lockdep_assert_irqs_enabled();
2269             cond_resched_tasks_rcu_qs();
2270             lockdep_assert_irqs_enabled();
2271             local_bh_disable();
2272         }
2273     }
2274 
2275     rcu_nocb_lock_irqsave(rdp, flags);
2276     rdp->n_cbs_invoked += count;
2277     trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2278                 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2279 
2280     /* Update counts and requeue any remaining callbacks. */
2281     rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2282     rcu_segcblist_add_len(&rdp->cblist, -count);
2283 
2284     /* Reinstate batch limit if we have worked down the excess. */
2285     count = rcu_segcblist_n_cbs(&rdp->cblist);
2286     if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2287         rdp->blimit = blimit;
2288 
2289     /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2290     if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2291         rdp->qlen_last_fqs_check = 0;
2292         rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2293     } else if (count < rdp->qlen_last_fqs_check - qhimark)
2294         rdp->qlen_last_fqs_check = count;
2295 
2296     /*
2297      * The following usually indicates a double call_rcu().  To track
2298      * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2299      */
2300     empty = rcu_segcblist_empty(&rdp->cblist);
2301     WARN_ON_ONCE(count == 0 && !empty);
2302     WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2303              count != 0 && empty);
2304     WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2305     WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2306 
2307     rcu_nocb_unlock_irqrestore(rdp, flags);
2308 
2309     tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2310 }
2311 
2312 /*
2313  * This function is invoked from each scheduling-clock interrupt,
2314  * and checks to see if this CPU is in a non-context-switch quiescent
2315  * state, for example, user mode or idle loop.  It also schedules RCU
2316  * core processing.  If the current grace period has gone on too long,
2317  * it will ask the scheduler to manufacture a context switch for the sole
2318  * purpose of providing the needed quiescent state.
2319  */
2320 void rcu_sched_clock_irq(int user)
2321 {
2322     unsigned long j;
2323 
2324     if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2325         j = jiffies;
2326         WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2327         __this_cpu_write(rcu_data.last_sched_clock, j);
2328     }
2329     trace_rcu_utilization(TPS("Start scheduler-tick"));
2330     lockdep_assert_irqs_disabled();
2331     raw_cpu_inc(rcu_data.ticks_this_gp);
2332     /* The load-acquire pairs with the store-release setting to true. */
2333     if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2334         /* Idle and userspace execution already are quiescent states. */
2335         if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2336             set_tsk_need_resched(current);
2337             set_preempt_need_resched();
2338         }
2339         __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2340     }
2341     rcu_flavor_sched_clock_irq(user);
2342     if (rcu_pending(user))
2343         invoke_rcu_core();
2344     if (user)
2345         rcu_tasks_classic_qs(current, false);
2346     lockdep_assert_irqs_disabled();
2347 
2348     trace_rcu_utilization(TPS("End scheduler-tick"));
2349 }
2350 
2351 /*
2352  * Scan the leaf rcu_node structures.  For each structure on which all
2353  * CPUs have reported a quiescent state and on which there are tasks
2354  * blocking the current grace period, initiate RCU priority boosting.
2355  * Otherwise, invoke the specified function to check dyntick state for
2356  * each CPU that has not yet reported a quiescent state.
2357  */
2358 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2359 {
2360     int cpu;
2361     unsigned long flags;
2362     unsigned long mask;
2363     struct rcu_data *rdp;
2364     struct rcu_node *rnp;
2365 
2366     rcu_state.cbovld = rcu_state.cbovldnext;
2367     rcu_state.cbovldnext = false;
2368     rcu_for_each_leaf_node(rnp) {
2369         cond_resched_tasks_rcu_qs();
2370         mask = 0;
2371         raw_spin_lock_irqsave_rcu_node(rnp, flags);
2372         rcu_state.cbovldnext |= !!rnp->cbovldmask;
2373         if (rnp->qsmask == 0) {
2374             if (rcu_preempt_blocked_readers_cgp(rnp)) {
2375                 /*
2376                  * No point in scanning bits because they
2377                  * are all zero.  But we might need to
2378                  * priority-boost blocked readers.
2379                  */
2380                 rcu_initiate_boost(rnp, flags);
2381                 /* rcu_initiate_boost() releases rnp->lock */
2382                 continue;
2383             }
2384             raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2385             continue;
2386         }
2387         for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2388             rdp = per_cpu_ptr(&rcu_data, cpu);
2389             if (f(rdp)) {
2390                 mask |= rdp->grpmask;
2391                 rcu_disable_urgency_upon_qs(rdp);
2392             }
2393         }
2394         if (mask != 0) {
2395             /* Idle/offline CPUs, report (releases rnp->lock). */
2396             rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2397         } else {
2398             /* Nothing to do here, so just drop the lock. */
2399             raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2400         }
2401     }
2402 }
2403 
2404 /*
2405  * Force quiescent states on reluctant CPUs, and also detect which
2406  * CPUs are in dyntick-idle mode.
2407  */
2408 void rcu_force_quiescent_state(void)
2409 {
2410     unsigned long flags;
2411     bool ret;
2412     struct rcu_node *rnp;
2413     struct rcu_node *rnp_old = NULL;
2414 
2415     /* Funnel through hierarchy to reduce memory contention. */
2416     rnp = __this_cpu_read(rcu_data.mynode);
2417     for (; rnp != NULL; rnp = rnp->parent) {
2418         ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2419                !raw_spin_trylock(&rnp->fqslock);
2420         if (rnp_old != NULL)
2421             raw_spin_unlock(&rnp_old->fqslock);
2422         if (ret)
2423             return;
2424         rnp_old = rnp;
2425     }
2426     /* rnp_old == rcu_get_root(), rnp == NULL. */
2427 
2428     /* Reached the root of the rcu_node tree, acquire lock. */
2429     raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2430     raw_spin_unlock(&rnp_old->fqslock);
2431     if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2432         raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2433         return;  /* Someone beat us to it. */
2434     }
2435     WRITE_ONCE(rcu_state.gp_flags,
2436            READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2437     raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2438     rcu_gp_kthread_wake();
2439 }
2440 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2441 
2442 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2443 // grace periods.
2444 static void strict_work_handler(struct work_struct *work)
2445 {
2446     rcu_read_lock();
2447     rcu_read_unlock();
2448 }
2449 
2450 /* Perform RCU core processing work for the current CPU.  */
2451 static __latent_entropy void rcu_core(void)
2452 {
2453     unsigned long flags;
2454     struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2455     struct rcu_node *rnp = rdp->mynode;
2456     /*
2457      * On RT rcu_core() can be preempted when IRQs aren't disabled.
2458      * Therefore this function can race with concurrent NOCB (de-)offloading
2459      * on this CPU and the below condition must be considered volatile.
2460      * However if we race with:
2461      *
2462      * _ Offloading:   In the worst case we accelerate or process callbacks
2463      *                 concurrently with NOCB kthreads. We are guaranteed to
2464      *                 call rcu_nocb_lock() if that happens.
2465      *
2466      * _ Deoffloading: In the worst case we miss callbacks acceleration or
2467      *                 processing. This is fine because the early stage
2468      *                 of deoffloading invokes rcu_core() after setting
2469      *                 SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2470      *                 what could have been dismissed without the need to wait
2471      *                 for the next rcu_pending() check in the next jiffy.
2472      */
2473     const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2474 
2475     if (cpu_is_offline(smp_processor_id()))
2476         return;
2477     trace_rcu_utilization(TPS("Start RCU core"));
2478     WARN_ON_ONCE(!rdp->beenonline);
2479 
2480     /* Report any deferred quiescent states if preemption enabled. */
2481     if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2482         rcu_preempt_deferred_qs(current);
2483     } else if (rcu_preempt_need_deferred_qs(current)) {
2484         set_tsk_need_resched(current);
2485         set_preempt_need_resched();
2486     }
2487 
2488     /* Update RCU state based on any recent quiescent states. */
2489     rcu_check_quiescent_state(rdp);
2490 
2491     /* No grace period and unregistered callbacks? */
2492     if (!rcu_gp_in_progress() &&
2493         rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2494         rcu_nocb_lock_irqsave(rdp, flags);
2495         if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2496             rcu_accelerate_cbs_unlocked(rnp, rdp);
2497         rcu_nocb_unlock_irqrestore(rdp, flags);
2498     }
2499 
2500     rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2501 
2502     /* If there are callbacks ready, invoke them. */
2503     if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2504         likely(READ_ONCE(rcu_scheduler_fully_active))) {
2505         rcu_do_batch(rdp);
2506         /* Re-invoke RCU core processing if there are callbacks remaining. */
2507         if (rcu_segcblist_ready_cbs(&rdp->cblist))
2508             invoke_rcu_core();
2509     }
2510 
2511     /* Do any needed deferred wakeups of rcuo kthreads. */
2512     do_nocb_deferred_wakeup(rdp);
2513     trace_rcu_utilization(TPS("End RCU core"));
2514 
2515     // If strict GPs, schedule an RCU reader in a clean environment.
2516     if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2517         queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2518 }
2519 
2520 static void rcu_core_si(struct softirq_action *h)
2521 {
2522     rcu_core();
2523 }
2524 
2525 static void rcu_wake_cond(struct task_struct *t, int status)
2526 {
2527     /*
2528      * If the thread is yielding, only wake it when this
2529      * is invoked from idle
2530      */
2531     if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2532         wake_up_process(t);
2533 }
2534 
2535 static void invoke_rcu_core_kthread(void)
2536 {
2537     struct task_struct *t;
2538     unsigned long flags;
2539 
2540     local_irq_save(flags);
2541     __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2542     t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2543     if (t != NULL && t != current)
2544         rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2545     local_irq_restore(flags);
2546 }
2547 
2548 /*
2549  * Wake up this CPU's rcuc kthread to do RCU core processing.
2550  */
2551 static void invoke_rcu_core(void)
2552 {
2553     if (!cpu_online(smp_processor_id()))
2554         return;
2555     if (use_softirq)
2556         raise_softirq(RCU_SOFTIRQ);
2557     else
2558         invoke_rcu_core_kthread();
2559 }
2560 
2561 static void rcu_cpu_kthread_park(unsigned int cpu)
2562 {
2563     per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2564 }
2565 
2566 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2567 {
2568     return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2569 }
2570 
2571 /*
2572  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2573  * the RCU softirq used in configurations of RCU that do not support RCU
2574  * priority boosting.
2575  */
2576 static void rcu_cpu_kthread(unsigned int cpu)
2577 {
2578     unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2579     char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2580     unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2581     int spincnt;
2582 
2583     trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2584     for (spincnt = 0; spincnt < 10; spincnt++) {
2585         WRITE_ONCE(*j, jiffies);
2586         local_bh_disable();
2587         *statusp = RCU_KTHREAD_RUNNING;
2588         local_irq_disable();
2589         work = *workp;
2590         *workp = 0;
2591         local_irq_enable();
2592         if (work)
2593             rcu_core();
2594         local_bh_enable();
2595         if (*workp == 0) {
2596             trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2597             *statusp = RCU_KTHREAD_WAITING;
2598             return;
2599         }
2600     }
2601     *statusp = RCU_KTHREAD_YIELDING;
2602     trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2603     schedule_timeout_idle(2);
2604     trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2605     *statusp = RCU_KTHREAD_WAITING;
2606     WRITE_ONCE(*j, jiffies);
2607 }
2608 
2609 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2610     .store          = &rcu_data.rcu_cpu_kthread_task,
2611     .thread_should_run  = rcu_cpu_kthread_should_run,
2612     .thread_fn      = rcu_cpu_kthread,
2613     .thread_comm        = "rcuc/%u",
2614     .setup          = rcu_cpu_kthread_setup,
2615     .park           = rcu_cpu_kthread_park,
2616 };
2617 
2618 /*
2619  * Spawn per-CPU RCU core processing kthreads.
2620  */
2621 static int __init rcu_spawn_core_kthreads(void)
2622 {
2623     int cpu;
2624 
2625     for_each_possible_cpu(cpu)
2626         per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2627     if (use_softirq)
2628         return 0;
2629     WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2630           "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2631     return 0;
2632 }
2633 
2634 /*
2635  * Handle any core-RCU processing required by a call_rcu() invocation.
2636  */
2637 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2638                 unsigned long flags)
2639 {
2640     /*
2641      * If called from an extended quiescent state, invoke the RCU
2642      * core in order to force a re-evaluation of RCU's idleness.
2643      */
2644     if (!rcu_is_watching())
2645         invoke_rcu_core();
2646 
2647     /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2648     if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2649         return;
2650 
2651     /*
2652      * Force the grace period if too many callbacks or too long waiting.
2653      * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2654      * if some other CPU has recently done so.  Also, don't bother
2655      * invoking rcu_force_quiescent_state() if the newly enqueued callback
2656      * is the only one waiting for a grace period to complete.
2657      */
2658     if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2659              rdp->qlen_last_fqs_check + qhimark)) {
2660 
2661         /* Are we ignoring a completed grace period? */
2662         note_gp_changes(rdp);
2663 
2664         /* Start a new grace period if one not already started. */
2665         if (!rcu_gp_in_progress()) {
2666             rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2667         } else {
2668             /* Give the grace period a kick. */
2669             rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2670             if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2671                 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2672                 rcu_force_quiescent_state();
2673             rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2674             rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2675         }
2676     }
2677 }
2678 
2679 /*
2680  * RCU callback function to leak a callback.
2681  */
2682 static void rcu_leak_callback(struct rcu_head *rhp)
2683 {
2684 }
2685 
2686 /*
2687  * Check and if necessary update the leaf rcu_node structure's
2688  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2689  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2690  * structure's ->lock.
2691  */
2692 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2693 {
2694     raw_lockdep_assert_held_rcu_node(rnp);
2695     if (qovld_calc <= 0)
2696         return; // Early boot and wildcard value set.
2697     if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2698         WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2699     else
2700         WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2701 }
2702 
2703 /*
2704  * Check and if necessary update the leaf rcu_node structure's
2705  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2706  * number of queued RCU callbacks.  No locks need be held, but the
2707  * caller must have disabled interrupts.
2708  *
2709  * Note that this function ignores the possibility that there are a lot
2710  * of callbacks all of which have already seen the end of their respective
2711  * grace periods.  This omission is due to the need for no-CBs CPUs to
2712  * be holding ->nocb_lock to do this check, which is too heavy for a
2713  * common-case operation.
2714  */
2715 static void check_cb_ovld(struct rcu_data *rdp)
2716 {
2717     struct rcu_node *const rnp = rdp->mynode;
2718 
2719     if (qovld_calc <= 0 ||
2720         ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2721          !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2722         return; // Early boot wildcard value or already set correctly.
2723     raw_spin_lock_rcu_node(rnp);
2724     check_cb_ovld_locked(rdp, rnp);
2725     raw_spin_unlock_rcu_node(rnp);
2726 }
2727 
2728 /**
2729  * call_rcu() - Queue an RCU callback for invocation after a grace period.
2730  * @head: structure to be used for queueing the RCU updates.
2731  * @func: actual callback function to be invoked after the grace period
2732  *
2733  * The callback function will be invoked some time after a full grace
2734  * period elapses, in other words after all pre-existing RCU read-side
2735  * critical sections have completed.  However, the callback function
2736  * might well execute concurrently with RCU read-side critical sections
2737  * that started after call_rcu() was invoked.
2738  *
2739  * RCU read-side critical sections are delimited by rcu_read_lock()
2740  * and rcu_read_unlock(), and may be nested.  In addition, but only in
2741  * v5.0 and later, regions of code across which interrupts, preemption,
2742  * or softirqs have been disabled also serve as RCU read-side critical
2743  * sections.  This includes hardware interrupt handlers, softirq handlers,
2744  * and NMI handlers.
2745  *
2746  * Note that all CPUs must agree that the grace period extended beyond
2747  * all pre-existing RCU read-side critical section.  On systems with more
2748  * than one CPU, this means that when "func()" is invoked, each CPU is
2749  * guaranteed to have executed a full memory barrier since the end of its
2750  * last RCU read-side critical section whose beginning preceded the call
2751  * to call_rcu().  It also means that each CPU executing an RCU read-side
2752  * critical section that continues beyond the start of "func()" must have
2753  * executed a memory barrier after the call_rcu() but before the beginning
2754  * of that RCU read-side critical section.  Note that these guarantees
2755  * include CPUs that are offline, idle, or executing in user mode, as
2756  * well as CPUs that are executing in the kernel.
2757  *
2758  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2759  * resulting RCU callback function "func()", then both CPU A and CPU B are
2760  * guaranteed to execute a full memory barrier during the time interval
2761  * between the call to call_rcu() and the invocation of "func()" -- even
2762  * if CPU A and CPU B are the same CPU (but again only if the system has
2763  * more than one CPU).
2764  *
2765  * Implementation of these memory-ordering guarantees is described here:
2766  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2767  */
2768 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2769 {
2770     static atomic_t doublefrees;
2771     unsigned long flags;
2772     struct rcu_data *rdp;
2773     bool was_alldone;
2774 
2775     /* Misaligned rcu_head! */
2776     WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2777 
2778     if (debug_rcu_head_queue(head)) {
2779         /*
2780          * Probable double call_rcu(), so leak the callback.
2781          * Use rcu:rcu_callback trace event to find the previous
2782          * time callback was passed to call_rcu().
2783          */
2784         if (atomic_inc_return(&doublefrees) < 4) {
2785             pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
2786             mem_dump_obj(head);
2787         }
2788         WRITE_ONCE(head->func, rcu_leak_callback);
2789         return;
2790     }
2791     head->func = func;
2792     head->next = NULL;
2793     kasan_record_aux_stack_noalloc(head);
2794     local_irq_save(flags);
2795     rdp = this_cpu_ptr(&rcu_data);
2796 
2797     /* Add the callback to our list. */
2798     if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2799         // This can trigger due to call_rcu() from offline CPU:
2800         WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2801         WARN_ON_ONCE(!rcu_is_watching());
2802         // Very early boot, before rcu_init().  Initialize if needed
2803         // and then drop through to queue the callback.
2804         if (rcu_segcblist_empty(&rdp->cblist))
2805             rcu_segcblist_init(&rdp->cblist);
2806     }
2807 
2808     check_cb_ovld(rdp);
2809     if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2810         return; // Enqueued onto ->nocb_bypass, so just leave.
2811     // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2812     rcu_segcblist_enqueue(&rdp->cblist, head);
2813     if (__is_kvfree_rcu_offset((unsigned long)func))
2814         trace_rcu_kvfree_callback(rcu_state.name, head,
2815                      (unsigned long)func,
2816                      rcu_segcblist_n_cbs(&rdp->cblist));
2817     else
2818         trace_rcu_callback(rcu_state.name, head,
2819                    rcu_segcblist_n_cbs(&rdp->cblist));
2820 
2821     trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2822 
2823     /* Go handle any RCU core processing required. */
2824     if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2825         __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2826     } else {
2827         __call_rcu_core(rdp, head, flags);
2828         local_irq_restore(flags);
2829     }
2830 }
2831 EXPORT_SYMBOL_GPL(call_rcu);
2832 
2833 
2834 /* Maximum number of jiffies to wait before draining a batch. */
2835 #define KFREE_DRAIN_JIFFIES (HZ / 50)
2836 #define KFREE_N_BATCHES 2
2837 #define FREE_N_CHANNELS 2
2838 
2839 /**
2840  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2841  * @nr_records: Number of active pointers in the array
2842  * @next: Next bulk object in the block chain
2843  * @records: Array of the kvfree_rcu() pointers
2844  */
2845 struct kvfree_rcu_bulk_data {
2846     unsigned long nr_records;
2847     struct kvfree_rcu_bulk_data *next;
2848     void *records[];
2849 };
2850 
2851 /*
2852  * This macro defines how many entries the "records" array
2853  * will contain. It is based on the fact that the size of
2854  * kvfree_rcu_bulk_data structure becomes exactly one page.
2855  */
2856 #define KVFREE_BULK_MAX_ENTR \
2857     ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2858 
2859 /**
2860  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2861  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2862  * @head_free: List of kfree_rcu() objects waiting for a grace period
2863  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2864  * @krcp: Pointer to @kfree_rcu_cpu structure
2865  */
2866 
2867 struct kfree_rcu_cpu_work {
2868     struct rcu_work rcu_work;
2869     struct rcu_head *head_free;
2870     struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
2871     struct kfree_rcu_cpu *krcp;
2872 };
2873 
2874 /**
2875  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2876  * @head: List of kfree_rcu() objects not yet waiting for a grace period
2877  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2878  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2879  * @lock: Synchronize access to this structure
2880  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2881  * @initialized: The @rcu_work fields have been initialized
2882  * @count: Number of objects for which GP not started
2883  * @bkvcache:
2884  *  A simple cache list that contains objects for reuse purpose.
2885  *  In order to save some per-cpu space the list is singular.
2886  *  Even though it is lockless an access has to be protected by the
2887  *  per-cpu lock.
2888  * @page_cache_work: A work to refill the cache when it is empty
2889  * @backoff_page_cache_fill: Delay cache refills
2890  * @work_in_progress: Indicates that page_cache_work is running
2891  * @hrtimer: A hrtimer for scheduling a page_cache_work
2892  * @nr_bkv_objs: number of allocated objects at @bkvcache.
2893  *
2894  * This is a per-CPU structure.  The reason that it is not included in
2895  * the rcu_data structure is to permit this code to be extracted from
2896  * the RCU files.  Such extraction could allow further optimization of
2897  * the interactions with the slab allocators.
2898  */
2899 struct kfree_rcu_cpu {
2900     struct rcu_head *head;
2901     struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
2902     struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2903     raw_spinlock_t lock;
2904     struct delayed_work monitor_work;
2905     bool initialized;
2906     int count;
2907 
2908     struct delayed_work page_cache_work;
2909     atomic_t backoff_page_cache_fill;
2910     atomic_t work_in_progress;
2911     struct hrtimer hrtimer;
2912 
2913     struct llist_head bkvcache;
2914     int nr_bkv_objs;
2915 };
2916 
2917 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2918     .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2919 };
2920 
2921 static __always_inline void
2922 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2923 {
2924 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2925     int i;
2926 
2927     for (i = 0; i < bhead->nr_records; i++)
2928         debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2929 #endif
2930 }
2931 
2932 static inline struct kfree_rcu_cpu *
2933 krc_this_cpu_lock(unsigned long *flags)
2934 {
2935     struct kfree_rcu_cpu *krcp;
2936 
2937     local_irq_save(*flags); // For safely calling this_cpu_ptr().
2938     krcp = this_cpu_ptr(&krc);
2939     raw_spin_lock(&krcp->lock);
2940 
2941     return krcp;
2942 }
2943 
2944 static inline void
2945 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2946 {
2947     raw_spin_unlock_irqrestore(&krcp->lock, flags);
2948 }
2949 
2950 static inline struct kvfree_rcu_bulk_data *
2951 get_cached_bnode(struct kfree_rcu_cpu *krcp)
2952 {
2953     if (!krcp->nr_bkv_objs)
2954         return NULL;
2955 
2956     WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2957     return (struct kvfree_rcu_bulk_data *)
2958         llist_del_first(&krcp->bkvcache);
2959 }
2960 
2961 static inline bool
2962 put_cached_bnode(struct kfree_rcu_cpu *krcp,
2963     struct kvfree_rcu_bulk_data *bnode)
2964 {
2965     // Check the limit.
2966     if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
2967         return false;
2968 
2969     llist_add((struct llist_node *) bnode, &krcp->bkvcache);
2970     WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
2971     return true;
2972 }
2973 
2974 static int
2975 drain_page_cache(struct kfree_rcu_cpu *krcp)
2976 {
2977     unsigned long flags;
2978     struct llist_node *page_list, *pos, *n;
2979     int freed = 0;
2980 
2981     raw_spin_lock_irqsave(&krcp->lock, flags);
2982     page_list = llist_del_all(&krcp->bkvcache);
2983     WRITE_ONCE(krcp->nr_bkv_objs, 0);
2984     raw_spin_unlock_irqrestore(&krcp->lock, flags);
2985 
2986     llist_for_each_safe(pos, n, page_list) {
2987         free_page((unsigned long)pos);
2988         freed++;
2989     }
2990 
2991     return freed;
2992 }
2993 
2994 /*
2995  * This function is invoked in workqueue context after a grace period.
2996  * It frees all the objects queued on ->bkvhead_free or ->head_free.
2997  */
2998 static void kfree_rcu_work(struct work_struct *work)
2999 {
3000     unsigned long flags;
3001     struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3002     struct rcu_head *head, *next;
3003     struct kfree_rcu_cpu *krcp;
3004     struct kfree_rcu_cpu_work *krwp;
3005     int i, j;
3006 
3007     krwp = container_of(to_rcu_work(work),
3008                 struct kfree_rcu_cpu_work, rcu_work);
3009     krcp = krwp->krcp;
3010 
3011     raw_spin_lock_irqsave(&krcp->lock, flags);
3012     // Channels 1 and 2.
3013     for (i = 0; i < FREE_N_CHANNELS; i++) {
3014         bkvhead[i] = krwp->bkvhead_free[i];
3015         krwp->bkvhead_free[i] = NULL;
3016     }
3017 
3018     // Channel 3.
3019     head = krwp->head_free;
3020     krwp->head_free = NULL;
3021     raw_spin_unlock_irqrestore(&krcp->lock, flags);
3022 
3023     // Handle the first two channels.
3024     for (i = 0; i < FREE_N_CHANNELS; i++) {
3025         for (; bkvhead[i]; bkvhead[i] = bnext) {
3026             bnext = bkvhead[i]->next;
3027             debug_rcu_bhead_unqueue(bkvhead[i]);
3028 
3029             rcu_lock_acquire(&rcu_callback_map);
3030             if (i == 0) { // kmalloc() / kfree().
3031                 trace_rcu_invoke_kfree_bulk_callback(
3032                     rcu_state.name, bkvhead[i]->nr_records,
3033                     bkvhead[i]->records);
3034 
3035                 kfree_bulk(bkvhead[i]->nr_records,
3036                     bkvhead[i]->records);
3037             } else { // vmalloc() / vfree().
3038                 for (j = 0; j < bkvhead[i]->nr_records; j++) {
3039                     trace_rcu_invoke_kvfree_callback(
3040                         rcu_state.name,
3041                         bkvhead[i]->records[j], 0);
3042 
3043                     vfree(bkvhead[i]->records[j]);
3044                 }
3045             }
3046             rcu_lock_release(&rcu_callback_map);
3047 
3048             raw_spin_lock_irqsave(&krcp->lock, flags);
3049             if (put_cached_bnode(krcp, bkvhead[i]))
3050                 bkvhead[i] = NULL;
3051             raw_spin_unlock_irqrestore(&krcp->lock, flags);
3052 
3053             if (bkvhead[i])
3054                 free_page((unsigned long) bkvhead[i]);
3055 
3056             cond_resched_tasks_rcu_qs();
3057         }
3058     }
3059 
3060     /*
3061      * This is used when the "bulk" path can not be used for the
3062      * double-argument of kvfree_rcu().  This happens when the
3063      * page-cache is empty, which means that objects are instead
3064      * queued on a linked list through their rcu_head structures.
3065      * This list is named "Channel 3".
3066      */
3067     for (; head; head = next) {
3068         unsigned long offset = (unsigned long)head->func;
3069         void *ptr = (void *)head - offset;
3070 
3071         next = head->next;
3072         debug_rcu_head_unqueue((struct rcu_head *)ptr);
3073         rcu_lock_acquire(&rcu_callback_map);
3074         trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3075 
3076         if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3077             kvfree(ptr);
3078 
3079         rcu_lock_release(&rcu_callback_map);
3080         cond_resched_tasks_rcu_qs();
3081     }
3082 }
3083 
3084 static bool
3085 need_offload_krc(struct kfree_rcu_cpu *krcp)
3086 {
3087     int i;
3088 
3089     for (i = 0; i < FREE_N_CHANNELS; i++)
3090         if (krcp->bkvhead[i])
3091             return true;
3092 
3093     return !!krcp->head;
3094 }
3095 
3096 /*
3097  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3098  */
3099 static void kfree_rcu_monitor(struct work_struct *work)
3100 {
3101     struct kfree_rcu_cpu *krcp = container_of(work,
3102         struct kfree_rcu_cpu, monitor_work.work);
3103     unsigned long flags;
3104     int i, j;
3105 
3106     raw_spin_lock_irqsave(&krcp->lock, flags);
3107 
3108     // Attempt to start a new batch.
3109     for (i = 0; i < KFREE_N_BATCHES; i++) {
3110         struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3111 
3112         // Try to detach bkvhead or head and attach it over any
3113         // available corresponding free channel. It can be that
3114         // a previous RCU batch is in progress, it means that
3115         // immediately to queue another one is not possible so
3116         // in that case the monitor work is rearmed.
3117         if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3118             (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3119                 (krcp->head && !krwp->head_free)) {
3120             // Channel 1 corresponds to the SLAB-pointer bulk path.
3121             // Channel 2 corresponds to vmalloc-pointer bulk path.
3122             for (j = 0; j < FREE_N_CHANNELS; j++) {
3123                 if (!krwp->bkvhead_free[j]) {
3124                     krwp->bkvhead_free[j] = krcp->bkvhead[j];
3125                     krcp->bkvhead[j] = NULL;
3126                 }
3127             }
3128 
3129             // Channel 3 corresponds to both SLAB and vmalloc
3130             // objects queued on the linked list.
3131             if (!krwp->head_free) {
3132                 krwp->head_free = krcp->head;
3133                 krcp->head = NULL;
3134             }
3135 
3136             WRITE_ONCE(krcp->count, 0);
3137 
3138             // One work is per one batch, so there are three
3139             // "free channels", the batch can handle. It can
3140             // be that the work is in the pending state when
3141             // channels have been detached following by each
3142             // other.
3143             queue_rcu_work(system_wq, &krwp->rcu_work);
3144         }
3145     }
3146 
3147     // If there is nothing to detach, it means that our job is
3148     // successfully done here. In case of having at least one
3149     // of the channels that is still busy we should rearm the
3150     // work to repeat an attempt. Because previous batches are
3151     // still in progress.
3152     if (need_offload_krc(krcp))
3153         schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3154 
3155     raw_spin_unlock_irqrestore(&krcp->lock, flags);
3156 }
3157 
3158 static enum hrtimer_restart
3159 schedule_page_work_fn(struct hrtimer *t)
3160 {
3161     struct kfree_rcu_cpu *krcp =
3162         container_of(t, struct kfree_rcu_cpu, hrtimer);
3163 
3164     queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3165     return HRTIMER_NORESTART;
3166 }
3167 
3168 static void fill_page_cache_func(struct work_struct *work)
3169 {
3170     struct kvfree_rcu_bulk_data *bnode;
3171     struct kfree_rcu_cpu *krcp =
3172         container_of(work, struct kfree_rcu_cpu,
3173             page_cache_work.work);
3174     unsigned long flags;
3175     int nr_pages;
3176     bool pushed;
3177     int i;
3178 
3179     nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3180         1 : rcu_min_cached_objs;
3181 
3182     for (i = 0; i < nr_pages; i++) {
3183         bnode = (struct kvfree_rcu_bulk_data *)
3184             __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3185 
3186         if (bnode) {
3187             raw_spin_lock_irqsave(&krcp->lock, flags);
3188             pushed = put_cached_bnode(krcp, bnode);
3189             raw_spin_unlock_irqrestore(&krcp->lock, flags);
3190 
3191             if (!pushed) {
3192                 free_page((unsigned long) bnode);
3193                 break;
3194             }
3195         }
3196     }
3197 
3198     atomic_set(&krcp->work_in_progress, 0);
3199     atomic_set(&krcp->backoff_page_cache_fill, 0);
3200 }
3201 
3202 static void
3203 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3204 {
3205     if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3206             !atomic_xchg(&krcp->work_in_progress, 1)) {
3207         if (atomic_read(&krcp->backoff_page_cache_fill)) {
3208             queue_delayed_work(system_wq,
3209                 &krcp->page_cache_work,
3210                     msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3211         } else {
3212             hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3213             krcp->hrtimer.function = schedule_page_work_fn;
3214             hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3215         }
3216     }
3217 }
3218 
3219 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3220 // state specified by flags.  If can_alloc is true, the caller must
3221 // be schedulable and not be holding any locks or mutexes that might be
3222 // acquired by the memory allocator or anything that it might invoke.
3223 // Returns true if ptr was successfully recorded, else the caller must
3224 // use a fallback.
3225 static inline bool
3226 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3227     unsigned long *flags, void *ptr, bool can_alloc)
3228 {
3229     struct kvfree_rcu_bulk_data *bnode;
3230     int idx;
3231 
3232     *krcp = krc_this_cpu_lock(flags);
3233     if (unlikely(!(*krcp)->initialized))
3234         return false;
3235 
3236     idx = !!is_vmalloc_addr(ptr);
3237 
3238     /* Check if a new block is required. */
3239     if (!(*krcp)->bkvhead[idx] ||
3240             (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3241         bnode = get_cached_bnode(*krcp);
3242         if (!bnode && can_alloc) {
3243             krc_this_cpu_unlock(*krcp, *flags);
3244 
3245             // __GFP_NORETRY - allows a light-weight direct reclaim
3246             // what is OK from minimizing of fallback hitting point of
3247             // view. Apart of that it forbids any OOM invoking what is
3248             // also beneficial since we are about to release memory soon.
3249             //
3250             // __GFP_NOMEMALLOC - prevents from consuming of all the
3251             // memory reserves. Please note we have a fallback path.
3252             //
3253             // __GFP_NOWARN - it is supposed that an allocation can
3254             // be failed under low memory or high memory pressure
3255             // scenarios.
3256             bnode = (struct kvfree_rcu_bulk_data *)
3257                 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3258             *krcp = krc_this_cpu_lock(flags);
3259         }
3260 
3261         if (!bnode)
3262             return false;
3263 
3264         /* Initialize the new block. */
3265         bnode->nr_records = 0;
3266         bnode->next = (*krcp)->bkvhead[idx];
3267 
3268         /* Attach it to the head. */
3269         (*krcp)->bkvhead[idx] = bnode;
3270     }
3271 
3272     /* Finally insert. */
3273     (*krcp)->bkvhead[idx]->records
3274         [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3275 
3276     return true;
3277 }
3278 
3279 /*
3280  * Queue a request for lazy invocation of the appropriate free routine
3281  * after a grace period.  Please note that three paths are maintained,
3282  * two for the common case using arrays of pointers and a third one that
3283  * is used only when the main paths cannot be used, for example, due to
3284  * memory pressure.
3285  *
3286  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3287  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3288  * be free'd in workqueue context. This allows us to: batch requests together to
3289  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3290  */
3291 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3292 {
3293     unsigned long flags;
3294     struct kfree_rcu_cpu *krcp;
3295     bool success;
3296     void *ptr;
3297 
3298     if (head) {
3299         ptr = (void *) head - (unsigned long) func;
3300     } else {
3301         /*
3302          * Please note there is a limitation for the head-less
3303          * variant, that is why there is a clear rule for such
3304          * objects: it can be used from might_sleep() context
3305          * only. For other places please embed an rcu_head to
3306          * your data.
3307          */
3308         might_sleep();
3309         ptr = (unsigned long *) func;
3310     }
3311 
3312     // Queue the object but don't yet schedule the batch.
3313     if (debug_rcu_head_queue(ptr)) {
3314         // Probable double kfree_rcu(), just leak.
3315         WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3316               __func__, head);
3317 
3318         // Mark as success and leave.
3319         return;
3320     }
3321 
3322     kasan_record_aux_stack_noalloc(ptr);
3323     success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3324     if (!success) {
3325         run_page_cache_worker(krcp);
3326 
3327         if (head == NULL)
3328             // Inline if kvfree_rcu(one_arg) call.
3329             goto unlock_return;
3330 
3331         head->func = func;
3332         head->next = krcp->head;
3333         krcp->head = head;
3334         success = true;
3335     }
3336 
3337     WRITE_ONCE(krcp->count, krcp->count + 1);
3338 
3339     // Set timer to drain after KFREE_DRAIN_JIFFIES.
3340     if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3341         schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3342 
3343 unlock_return:
3344     krc_this_cpu_unlock(krcp, flags);
3345 
3346     /*
3347      * Inline kvfree() after synchronize_rcu(). We can do
3348      * it from might_sleep() context only, so the current
3349      * CPU can pass the QS state.
3350      */
3351     if (!success) {
3352         debug_rcu_head_unqueue((struct rcu_head *) ptr);
3353         synchronize_rcu();
3354         kvfree(ptr);
3355     }
3356 }
3357 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3358 
3359 static unsigned long
3360 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3361 {
3362     int cpu;
3363     unsigned long count = 0;
3364 
3365     /* Snapshot count of all CPUs */
3366     for_each_possible_cpu(cpu) {
3367         struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3368 
3369         count += READ_ONCE(krcp->count);
3370         count += READ_ONCE(krcp->nr_bkv_objs);
3371         atomic_set(&krcp->backoff_page_cache_fill, 1);
3372     }
3373 
3374     return count;
3375 }
3376 
3377 static unsigned long
3378 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3379 {
3380     int cpu, freed = 0;
3381 
3382     for_each_possible_cpu(cpu) {
3383         int count;
3384         struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3385 
3386         count = krcp->count;
3387         count += drain_page_cache(krcp);
3388         kfree_rcu_monitor(&krcp->monitor_work.work);
3389 
3390         sc->nr_to_scan -= count;
3391         freed += count;
3392 
3393         if (sc->nr_to_scan <= 0)
3394             break;
3395     }
3396 
3397     return freed == 0 ? SHRINK_STOP : freed;
3398 }
3399 
3400 static struct shrinker kfree_rcu_shrinker = {
3401     .count_objects = kfree_rcu_shrink_count,
3402     .scan_objects = kfree_rcu_shrink_scan,
3403     .batch = 0,
3404     .seeks = DEFAULT_SEEKS,
3405 };
3406 
3407 void __init kfree_rcu_scheduler_running(void)
3408 {
3409     int cpu;
3410     unsigned long flags;
3411 
3412     for_each_possible_cpu(cpu) {
3413         struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3414 
3415         raw_spin_lock_irqsave(&krcp->lock, flags);
3416         if (need_offload_krc(krcp))
3417             schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3418         raw_spin_unlock_irqrestore(&krcp->lock, flags);
3419     }
3420 }
3421 
3422 /*
3423  * During early boot, any blocking grace-period wait automatically
3424  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3425  *
3426  * However, because a context switch is a grace period for !PREEMPTION, any
3427  * blocking grace-period wait automatically implies a grace period if
3428  * there is only one CPU online at any point time during execution of
3429  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3430  * occasionally incorrectly indicate that there are multiple CPUs online
3431  * when there was in fact only one the whole time, as this just adds some
3432  * overhead: RCU still operates correctly.
3433  */
3434 static int rcu_blocking_is_gp(void)
3435 {
3436     int ret;
3437 
3438     // Invoking preempt_model_*() too early gets a splat.
3439     if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE ||
3440         preempt_model_full() || preempt_model_rt())
3441         return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3442     might_sleep();  /* Check for RCU read-side critical section. */
3443     preempt_disable();
3444     /*
3445      * If the rcu_state.n_online_cpus counter is equal to one,
3446      * there is only one CPU, and that CPU sees all prior accesses
3447      * made by any CPU that was online at the time of its access.
3448      * Furthermore, if this counter is equal to one, its value cannot
3449      * change until after the preempt_enable() below.
3450      *
3451      * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3452      * all later CPUs (both this one and any that come online later
3453      * on) are guaranteed to see all accesses prior to this point
3454      * in the code, without the need for additional memory barriers.
3455      * Those memory barriers are provided by CPU-hotplug code.
3456      */
3457     ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3458     preempt_enable();
3459     return ret;
3460 }
3461 
3462 /**
3463  * synchronize_rcu - wait until a grace period has elapsed.
3464  *
3465  * Control will return to the caller some time after a full grace
3466  * period has elapsed, in other words after all currently executing RCU
3467  * read-side critical sections have completed.  Note, however, that
3468  * upon return from synchronize_rcu(), the caller might well be executing
3469  * concurrently with new RCU read-side critical sections that began while
3470  * synchronize_rcu() was waiting.
3471  *
3472  * RCU read-side critical sections are delimited by rcu_read_lock()
3473  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3474  * v5.0 and later, regions of code across which interrupts, preemption,
3475  * or softirqs have been disabled also serve as RCU read-side critical
3476  * sections.  This includes hardware interrupt handlers, softirq handlers,
3477  * and NMI handlers.
3478  *
3479  * Note that this guarantee implies further memory-ordering guarantees.
3480  * On systems with more than one CPU, when synchronize_rcu() returns,
3481  * each CPU is guaranteed to have executed a full memory barrier since
3482  * the end of its last RCU read-side critical section whose beginning
3483  * preceded the call to synchronize_rcu().  In addition, each CPU having
3484  * an RCU read-side critical section that extends beyond the return from
3485  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3486  * after the beginning of synchronize_rcu() and before the beginning of
3487  * that RCU read-side critical section.  Note that these guarantees include
3488  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3489  * that are executing in the kernel.
3490  *
3491  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3492  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3493  * to have executed a full memory barrier during the execution of
3494  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3495  * again only if the system has more than one CPU).
3496  *
3497  * Implementation of these memory-ordering guarantees is described here:
3498  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3499  */
3500 void synchronize_rcu(void)
3501 {
3502     RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3503              lock_is_held(&rcu_lock_map) ||
3504              lock_is_held(&rcu_sched_lock_map),
3505              "Illegal synchronize_rcu() in RCU read-side critical section");
3506     if (rcu_blocking_is_gp()) {
3507         // Note well that this code runs with !PREEMPT && !SMP.
3508         // In addition, all code that advances grace periods runs at
3509         // process level.  Therefore, this normal GP overlaps with
3510         // other normal GPs only by being fully nested within them,
3511         // which allows reuse of ->gp_seq_polled_snap.
3512         rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3513         rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3514         if (rcu_init_invoked())
3515             cond_resched_tasks_rcu_qs();
3516         return;  // Context allows vacuous grace periods.
3517     }
3518     if (rcu_gp_is_expedited())
3519         synchronize_rcu_expedited();
3520     else
3521         wait_rcu_gp(call_rcu);
3522 }
3523 EXPORT_SYMBOL_GPL(synchronize_rcu);
3524 
3525 /**
3526  * get_state_synchronize_rcu - Snapshot current RCU state
3527  *
3528  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3529  * or poll_state_synchronize_rcu() to determine whether or not a full
3530  * grace period has elapsed in the meantime.
3531  */
3532 unsigned long get_state_synchronize_rcu(void)
3533 {
3534     /*
3535      * Any prior manipulation of RCU-protected data must happen
3536      * before the load from ->gp_seq.
3537      */
3538     smp_mb();  /* ^^^ */
3539     return rcu_seq_snap(&rcu_state.gp_seq_polled);
3540 }
3541 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3542 
3543 /**
3544  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3545  *
3546  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3547  * or poll_state_synchronize_rcu() to determine whether or not a full
3548  * grace period has elapsed in the meantime.  If the needed grace period
3549  * is not already slated to start, notifies RCU core of the need for that
3550  * grace period.
3551  *
3552  * Interrupts must be enabled for the case where it is necessary to awaken
3553  * the grace-period kthread.
3554  */
3555 unsigned long start_poll_synchronize_rcu(void)
3556 {
3557     unsigned long flags;
3558     unsigned long gp_seq = get_state_synchronize_rcu();
3559     bool needwake;
3560     struct rcu_data *rdp;
3561     struct rcu_node *rnp;
3562 
3563     lockdep_assert_irqs_enabled();
3564     local_irq_save(flags);
3565     rdp = this_cpu_ptr(&rcu_data);
3566     rnp = rdp->mynode;
3567     raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3568     // Note it is possible for a grace period to have elapsed between
3569     // the above call to get_state_synchronize_rcu() and the below call
3570     // to rcu_seq_snap.  This is OK, the worst that happens is that we
3571     // get a grace period that no one needed.  These accesses are ordered
3572     // by smp_mb(), and we are accessing them in the opposite order
3573     // from which they are updated at grace-period start, as required.
3574     needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3575     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3576     if (needwake)
3577         rcu_gp_kthread_wake();
3578     return gp_seq;
3579 }
3580 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3581 
3582 /**
3583  * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
3584  *
3585  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3586  *
3587  * If a full RCU grace period has elapsed since the earlier call from
3588  * which oldstate was obtained, return @true, otherwise return @false.
3589  * If @false is returned, it is the caller's responsibility to invoke this
3590  * function later on until it does return @true.  Alternatively, the caller
3591  * can explicitly wait for a grace period, for example, by passing @oldstate
3592  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3593  *
3594  * Yes, this function does not take counter wrap into account.
3595  * But counter wrap is harmless.  If the counter wraps, we have waited for
3596  * more than a billion grace periods (and way more on a 64-bit system!).
3597  * Those needing to keep oldstate values for very long time periods
3598  * (many hours even on 32-bit systems) should check them occasionally
3599  * and either refresh them or set a flag indicating that the grace period
3600  * has completed.
3601  *
3602  * This function provides the same memory-ordering guarantees that
3603  * would be provided by a synchronize_rcu() that was invoked at the call
3604  * to the function that provided @oldstate, and that returned at the end
3605  * of this function.
3606  */
3607 bool poll_state_synchronize_rcu(unsigned long oldstate)
3608 {
3609     if (oldstate == RCU_GET_STATE_COMPLETED ||
3610         rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3611         smp_mb(); /* Ensure GP ends before subsequent accesses. */
3612         return true;
3613     }
3614     return false;
3615 }
3616 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3617 
3618 /**
3619  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3620  *
3621  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3622  *
3623  * If a full RCU grace period has elapsed since the earlier call to
3624  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3625  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3626  *
3627  * Yes, this function does not take counter wrap into account.
3628  * But counter wrap is harmless.  If the counter wraps, we have waited for
3629  * more than 2 billion grace periods (and way more on a 64-bit system!),
3630  * so waiting for a couple of additional grace periods should be just fine.
3631  *
3632  * This function provides the same memory-ordering guarantees that
3633  * would be provided by a synchronize_rcu() that was invoked at the call
3634  * to the function that provided @oldstate and that returned at the end
3635  * of this function.
3636  */
3637 void cond_synchronize_rcu(unsigned long oldstate)
3638 {
3639     if (!poll_state_synchronize_rcu(oldstate))
3640         synchronize_rcu();
3641 }
3642 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3643 
3644 /*
3645  * Check to see if there is any immediate RCU-related work to be done by
3646  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3647  * in order of increasing expense: checks that can be carried out against
3648  * CPU-local state are performed first.  However, we must check for CPU
3649  * stalls first, else we might not get a chance.
3650  */
3651 static int rcu_pending(int user)
3652 {
3653     bool gp_in_progress;
3654     struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3655     struct rcu_node *rnp = rdp->mynode;
3656 
3657     lockdep_assert_irqs_disabled();
3658 
3659     /* Check for CPU stalls, if enabled. */
3660     check_cpu_stall(rdp);
3661 
3662     /* Does this CPU need a deferred NOCB wakeup? */
3663     if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3664         return 1;
3665 
3666     /* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3667     if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3668         return 0;
3669 
3670     /* Is the RCU core waiting for a quiescent state from this CPU? */
3671     gp_in_progress = rcu_gp_in_progress();
3672     if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3673         return 1;
3674 
3675     /* Does this CPU have callbacks ready to invoke? */
3676     if (!rcu_rdp_is_offloaded(rdp) &&
3677         rcu_segcblist_ready_cbs(&rdp->cblist))
3678         return 1;
3679 
3680     /* Has RCU gone idle with this CPU needing another grace period? */
3681     if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3682         !rcu_rdp_is_offloaded(rdp) &&
3683         !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3684         return 1;
3685 
3686     /* Have RCU grace period completed or started?  */
3687     if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3688         unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3689         return 1;
3690 
3691     /* nothing to do */
3692     return 0;
3693 }
3694 
3695 /*
3696  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3697  * the compiler is expected to optimize this away.
3698  */
3699 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3700 {
3701     trace_rcu_barrier(rcu_state.name, s, cpu,
3702               atomic_read(&rcu_state.barrier_cpu_count), done);
3703 }
3704 
3705 /*
3706  * RCU callback function for rcu_barrier().  If we are last, wake
3707  * up the task executing rcu_barrier().
3708  *
3709  * Note that the value of rcu_state.barrier_sequence must be captured
3710  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3711  * other CPUs might count the value down to zero before this CPU gets
3712  * around to invoking rcu_barrier_trace(), which might result in bogus
3713  * data from the next instance of rcu_barrier().
3714  */
3715 static void rcu_barrier_callback(struct rcu_head *rhp)
3716 {
3717     unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3718 
3719     if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3720         rcu_barrier_trace(TPS("LastCB"), -1, s);
3721         complete(&rcu_state.barrier_completion);
3722     } else {
3723         rcu_barrier_trace(TPS("CB"), -1, s);
3724     }
3725 }
3726 
3727 /*
3728  * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3729  */
3730 static void rcu_barrier_entrain(struct rcu_data *rdp)
3731 {
3732     unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3733     unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3734 
3735     lockdep_assert_held(&rcu_state.barrier_lock);
3736     if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3737         return;
3738     rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3739     rdp->barrier_head.func = rcu_barrier_callback;
3740     debug_rcu_head_queue(&rdp->barrier_head);
3741     rcu_nocb_lock(rdp);
3742     WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3743     if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3744         atomic_inc(&rcu_state.barrier_cpu_count);
3745     } else {
3746         debug_rcu_head_unqueue(&rdp->barrier_head);
3747         rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3748     }
3749     rcu_nocb_unlock(rdp);
3750     smp_store_release(&rdp->barrier_seq_snap, gseq);
3751 }
3752 
3753 /*
3754  * Called with preemption disabled, and from cross-cpu IRQ context.
3755  */
3756 static void rcu_barrier_handler(void *cpu_in)
3757 {
3758     uintptr_t cpu = (uintptr_t)cpu_in;
3759     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3760 
3761     lockdep_assert_irqs_disabled();
3762     WARN_ON_ONCE(cpu != rdp->cpu);
3763     WARN_ON_ONCE(cpu != smp_processor_id());
3764     raw_spin_lock(&rcu_state.barrier_lock);
3765     rcu_barrier_entrain(rdp);
3766     raw_spin_unlock(&rcu_state.barrier_lock);
3767 }
3768 
3769 /**
3770  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3771  *
3772  * Note that this primitive does not necessarily wait for an RCU grace period
3773  * to complete.  For example, if there are no RCU callbacks queued anywhere
3774  * in the system, then rcu_barrier() is within its rights to return
3775  * immediately, without waiting for anything, much less an RCU grace period.
3776  */
3777 void rcu_barrier(void)
3778 {
3779     uintptr_t cpu;
3780     unsigned long flags;
3781     unsigned long gseq;
3782     struct rcu_data *rdp;
3783     unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3784 
3785     rcu_barrier_trace(TPS("Begin"), -1, s);
3786 
3787     /* Take mutex to serialize concurrent rcu_barrier() requests. */
3788     mutex_lock(&rcu_state.barrier_mutex);
3789 
3790     /* Did someone else do our work for us? */
3791     if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3792         rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
3793         smp_mb(); /* caller's subsequent code after above check. */
3794         mutex_unlock(&rcu_state.barrier_mutex);
3795         return;
3796     }
3797 
3798     /* Mark the start of the barrier operation. */
3799     raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3800     rcu_seq_start(&rcu_state.barrier_sequence);
3801     gseq = rcu_state.barrier_sequence;
3802     rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3803 
3804     /*
3805      * Initialize the count to two rather than to zero in order
3806      * to avoid a too-soon return to zero in case of an immediate
3807      * invocation of the just-enqueued callback (or preemption of
3808      * this task).  Exclude CPU-hotplug operations to ensure that no
3809      * offline non-offloaded CPU has callbacks queued.
3810      */
3811     init_completion(&rcu_state.barrier_completion);
3812     atomic_set(&rcu_state.barrier_cpu_count, 2);
3813     raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3814 
3815     /*
3816      * Force each CPU with callbacks to register a new callback.
3817      * When that callback is invoked, we will know that all of the
3818      * corresponding CPU's preceding callbacks have been invoked.
3819      */
3820     for_each_possible_cpu(cpu) {
3821         rdp = per_cpu_ptr(&rcu_data, cpu);
3822 retry:
3823         if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
3824             continue;
3825         raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3826         if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
3827             WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3828             raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3829             rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
3830             continue;
3831         }
3832         if (!rcu_rdp_cpu_online(rdp)) {
3833             rcu_barrier_entrain(rdp);
3834             WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3835             raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3836             rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
3837             continue;
3838         }
3839         raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3840         if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
3841             schedule_timeout_uninterruptible(1);
3842             goto retry;
3843         }
3844         WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3845         rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
3846     }
3847 
3848     /*
3849      * Now that we have an rcu_barrier_callback() callback on each
3850      * CPU, and thus each counted, remove the initial count.
3851      */
3852     if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3853         complete(&rcu_state.barrier_completion);
3854 
3855     /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3856     wait_for_completion(&rcu_state.barrier_completion);
3857 
3858     /* Mark the end of the barrier operation. */
3859     rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3860     rcu_seq_end(&rcu_state.barrier_sequence);
3861     gseq = rcu_state.barrier_sequence;
3862     for_each_possible_cpu(cpu) {
3863         rdp = per_cpu_ptr(&rcu_data, cpu);
3864 
3865         WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3866     }
3867 
3868     /* Other rcu_barrier() invocations can now safely proceed. */
3869     mutex_unlock(&rcu_state.barrier_mutex);
3870 }
3871 EXPORT_SYMBOL_GPL(rcu_barrier);
3872 
3873 /*
3874  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3875  * first CPU in a given leaf rcu_node structure coming online.  The caller
3876  * must hold the corresponding leaf rcu_node ->lock with interrupts
3877  * disabled.
3878  */
3879 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3880 {
3881     long mask;
3882     long oldmask;
3883     struct rcu_node *rnp = rnp_leaf;
3884 
3885     raw_lockdep_assert_held_rcu_node(rnp_leaf);
3886     WARN_ON_ONCE(rnp->wait_blkd_tasks);
3887     for (;;) {
3888         mask = rnp->grpmask;
3889         rnp = rnp->parent;
3890         if (rnp == NULL)
3891             return;
3892         raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3893         oldmask = rnp->qsmaskinit;
3894         rnp->qsmaskinit |= mask;
3895         raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3896         if (oldmask)
3897             return;
3898     }
3899 }
3900 
3901 /*
3902  * Do boot-time initialization of a CPU's per-CPU RCU data.
3903  */
3904 static void __init
3905 rcu_boot_init_percpu_data(int cpu)
3906 {
3907     struct context_tracking *ct = this_cpu_ptr(&context_tracking);
3908     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3909 
3910     /* Set up local state, ensuring consistent view of global state. */
3911     rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3912     INIT_WORK(&rdp->strict_work, strict_work_handler);
3913     WARN_ON_ONCE(ct->dynticks_nesting != 1);
3914     WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
3915     rdp->barrier_seq_snap = rcu_state.barrier_sequence;
3916     rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3917     rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3918     rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3919     rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3920     rdp->last_sched_clock = jiffies;
3921     rdp->cpu = cpu;
3922     rcu_boot_init_nocb_percpu_data(rdp);
3923 }
3924 
3925 /*
3926  * Invoked early in the CPU-online process, when pretty much all services
3927  * are available.  The incoming CPU is not present.
3928  *
3929  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
3930  * offline event can be happening at a given time.  Note also that we can
3931  * accept some slop in the rsp->gp_seq access due to the fact that this
3932  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3933  * And any offloaded callbacks are being numbered elsewhere.
3934  */
3935 int rcutree_prepare_cpu(unsigned int cpu)
3936 {
3937     unsigned long flags;
3938     struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
3939     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3940     struct rcu_node *rnp = rcu_get_root();
3941 
3942     /* Set up local state, ensuring consistent view of global state. */
3943     raw_spin_lock_irqsave_rcu_node(rnp, flags);
3944     rdp->qlen_last_fqs_check = 0;
3945     rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
3946     rdp->blimit = blimit;
3947     ct->dynticks_nesting = 1;   /* CPU not up, no tearing. */
3948     raw_spin_unlock_rcu_node(rnp);      /* irqs remain disabled. */
3949 
3950     /*
3951      * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
3952      * (re-)initialized.
3953      */
3954     if (!rcu_segcblist_is_enabled(&rdp->cblist))
3955         rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
3956 
3957     /*
3958      * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
3959      * propagation up the rcu_node tree will happen at the beginning
3960      * of the next grace period.
3961      */
3962     rnp = rdp->mynode;
3963     raw_spin_lock_rcu_node(rnp);        /* irqs already disabled. */
3964     rdp->beenonline = true;  /* We have now been online. */
3965     rdp->gp_seq = READ_ONCE(rnp->gp_seq);
3966     rdp->gp_seq_needed = rdp->gp_seq;
3967     rdp->cpu_no_qs.b.norm = true;
3968     rdp->core_needs_qs = false;
3969     rdp->rcu_iw_pending = false;
3970     rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
3971     rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
3972     trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
3973     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3974     rcu_spawn_one_boost_kthread(rnp);
3975     rcu_spawn_cpu_nocb_kthread(cpu);
3976     WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
3977 
3978     return 0;
3979 }
3980 
3981 /*
3982  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3983  */
3984 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3985 {
3986     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3987 
3988     rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3989 }
3990 
3991 /*
3992  * Near the end of the CPU-online process.  Pretty much all services
3993  * enabled, and the CPU is now very much alive.
3994  */
3995 int rcutree_online_cpu(unsigned int cpu)
3996 {
3997     unsigned long flags;
3998     struct rcu_data *rdp;
3999     struct rcu_node *rnp;
4000 
4001     rdp = per_cpu_ptr(&rcu_data, cpu);
4002     rnp = rdp->mynode;
4003     raw_spin_lock_irqsave_rcu_node(rnp, flags);
4004     rnp->ffmask |= rdp->grpmask;
4005     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4006     if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4007         return 0; /* Too early in boot for scheduler work. */
4008     sync_sched_exp_online_cleanup(cpu);
4009     rcutree_affinity_setting(cpu, -1);
4010 
4011     // Stop-machine done, so allow nohz_full to disable tick.
4012     tick_dep_clear(TICK_DEP_BIT_RCU);
4013     return 0;
4014 }
4015 
4016 /*
4017  * Near the beginning of the process.  The CPU is still very much alive
4018  * with pretty much all services enabled.
4019  */
4020 int rcutree_offline_cpu(unsigned int cpu)
4021 {
4022     unsigned long flags;
4023     struct rcu_data *rdp;
4024     struct rcu_node *rnp;
4025 
4026     rdp = per_cpu_ptr(&rcu_data, cpu);
4027     rnp = rdp->mynode;
4028     raw_spin_lock_irqsave_rcu_node(rnp, flags);
4029     rnp->ffmask &= ~rdp->grpmask;
4030     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4031 
4032     rcutree_affinity_setting(cpu, cpu);
4033 
4034     // nohz_full CPUs need the tick for stop-machine to work quickly
4035     tick_dep_set(TICK_DEP_BIT_RCU);
4036     return 0;
4037 }
4038 
4039 /*
4040  * Mark the specified CPU as being online so that subsequent grace periods
4041  * (both expedited and normal) will wait on it.  Note that this means that
4042  * incoming CPUs are not allowed to use RCU read-side critical sections
4043  * until this function is called.  Failing to observe this restriction
4044  * will result in lockdep splats.
4045  *
4046  * Note that this function is special in that it is invoked directly
4047  * from the incoming CPU rather than from the cpuhp_step mechanism.
4048  * This is because this function must be invoked at a precise location.
4049  */
4050 void rcu_cpu_starting(unsigned int cpu)
4051 {
4052     unsigned long flags;
4053     unsigned long mask;
4054     struct rcu_data *rdp;
4055     struct rcu_node *rnp;
4056     bool newcpu;
4057 
4058     rdp = per_cpu_ptr(&rcu_data, cpu);
4059     if (rdp->cpu_started)
4060         return;
4061     rdp->cpu_started = true;
4062 
4063     rnp = rdp->mynode;
4064     mask = rdp->grpmask;
4065     local_irq_save(flags);
4066     arch_spin_lock(&rcu_state.ofl_lock);
4067     rcu_dynticks_eqs_online();
4068     raw_spin_lock(&rcu_state.barrier_lock);
4069     raw_spin_lock_rcu_node(rnp);
4070     WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4071     raw_spin_unlock(&rcu_state.barrier_lock);
4072     newcpu = !(rnp->expmaskinitnext & mask);
4073     rnp->expmaskinitnext |= mask;
4074     /* Allow lockless access for expedited grace periods. */
4075     smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4076     ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4077     rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4078     rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4079     rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4080 
4081     /* An incoming CPU should never be blocking a grace period. */
4082     if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4083         /* rcu_report_qs_rnp() *really* wants some flags to restore */
4084         unsigned long flags2;
4085 
4086         local_irq_save(flags2);
4087         rcu_disable_urgency_upon_qs(rdp);
4088         /* Report QS -after- changing ->qsmaskinitnext! */
4089         rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
4090     } else {
4091         raw_spin_unlock_rcu_node(rnp);
4092     }
4093     arch_spin_unlock(&rcu_state.ofl_lock);
4094     local_irq_restore(flags);
4095     smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4096 }
4097 
4098 /*
4099  * The outgoing function has no further need of RCU, so remove it from
4100  * the rcu_node tree's ->qsmaskinitnext bit masks.
4101  *
4102  * Note that this function is special in that it is invoked directly
4103  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4104  * This is because this function must be invoked at a precise location.
4105  */
4106 void rcu_report_dead(unsigned int cpu)
4107 {
4108     unsigned long flags, seq_flags;
4109     unsigned long mask;
4110     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4111     struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4112 
4113     // Do any dangling deferred wakeups.
4114     do_nocb_deferred_wakeup(rdp);
4115 
4116     /* QS for any half-done expedited grace period. */
4117     rcu_report_exp_rdp(rdp);
4118     rcu_preempt_deferred_qs(current);
4119 
4120     /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4121     mask = rdp->grpmask;
4122     local_irq_save(seq_flags);
4123     arch_spin_lock(&rcu_state.ofl_lock);
4124     raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4125     rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4126     rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4127     if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4128         /* Report quiescent state -before- changing ->qsmaskinitnext! */
4129         rcu_disable_urgency_upon_qs(rdp);
4130         rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4131         raw_spin_lock_irqsave_rcu_node(rnp, flags);
4132     }
4133     WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4134     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4135     arch_spin_unlock(&rcu_state.ofl_lock);
4136     local_irq_restore(seq_flags);
4137 
4138     rdp->cpu_started = false;
4139 }
4140 
4141 #ifdef CONFIG_HOTPLUG_CPU
4142 /*
4143  * The outgoing CPU has just passed through the dying-idle state, and we
4144  * are being invoked from the CPU that was IPIed to continue the offline
4145  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4146  */
4147 void rcutree_migrate_callbacks(int cpu)
4148 {
4149     unsigned long flags;
4150     struct rcu_data *my_rdp;
4151     struct rcu_node *my_rnp;
4152     struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4153     bool needwake;
4154 
4155     if (rcu_rdp_is_offloaded(rdp) ||
4156         rcu_segcblist_empty(&rdp->cblist))
4157         return;  /* No callbacks to migrate. */
4158 
4159     raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4160     WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4161     rcu_barrier_entrain(rdp);
4162     my_rdp = this_cpu_ptr(&rcu_data);
4163     my_rnp = my_rdp->mynode;
4164     rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4165     WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4166     raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4167     /* Leverage recent GPs and set GP for new callbacks. */
4168     needwake = rcu_advance_cbs(my_rnp, rdp) ||
4169            rcu_advance_cbs(my_rnp, my_rdp);
4170     rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4171     raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4172     needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4173     rcu_segcblist_disable(&rdp->cblist);
4174     WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4175     check_cb_ovld_locked(my_rdp, my_rnp);
4176     if (rcu_rdp_is_offloaded(my_rdp)) {
4177         raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4178         __call_rcu_nocb_wake(my_rdp, true, flags);
4179     } else {
4180         rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4181         raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4182     }
4183     if (needwake)
4184         rcu_gp_kthread_wake();
4185     lockdep_assert_irqs_enabled();
4186     WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4187           !rcu_segcblist_empty(&rdp->cblist),
4188           "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4189           cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4190           rcu_segcblist_first_cb(&rdp->cblist));
4191 }
4192 #endif
4193 
4194 /*
4195  * On non-huge systems, use expedited RCU grace periods to make suspend
4196  * and hibernation run faster.
4197  */
4198 static int rcu_pm_notify(struct notifier_block *self,
4199              unsigned long action, void *hcpu)
4200 {
4201     switch (action) {
4202     case PM_HIBERNATION_PREPARE:
4203     case PM_SUSPEND_PREPARE:
4204         rcu_expedite_gp();
4205         break;
4206     case PM_POST_HIBERNATION:
4207     case PM_POST_SUSPEND:
4208         rcu_unexpedite_gp();
4209         break;
4210     default:
4211         break;
4212     }
4213     return NOTIFY_OK;
4214 }
4215 
4216 #ifdef CONFIG_RCU_EXP_KTHREAD
4217 struct kthread_worker *rcu_exp_gp_kworker;
4218 struct kthread_worker *rcu_exp_par_gp_kworker;
4219 
4220 static void __init rcu_start_exp_gp_kworkers(void)
4221 {
4222     const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4223     const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4224     struct sched_param param = { .sched_priority = kthread_prio };
4225 
4226     rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4227     if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4228         pr_err("Failed to create %s!\n", gp_kworker_name);
4229         return;
4230     }
4231 
4232     rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4233     if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4234         pr_err("Failed to create %s!\n", par_gp_kworker_name);
4235         kthread_destroy_worker(rcu_exp_gp_kworker);
4236         return;
4237     }
4238 
4239     sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4240     sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4241                    &param);
4242 }
4243 
4244 static inline void rcu_alloc_par_gp_wq(void)
4245 {
4246 }
4247 #else /* !CONFIG_RCU_EXP_KTHREAD */
4248 struct workqueue_struct *rcu_par_gp_wq;
4249 
4250 static void __init rcu_start_exp_gp_kworkers(void)
4251 {
4252 }
4253 
4254 static inline void rcu_alloc_par_gp_wq(void)
4255 {
4256     rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4257     WARN_ON(!rcu_par_gp_wq);
4258 }
4259 #endif /* CONFIG_RCU_EXP_KTHREAD */
4260 
4261 /*
4262  * Spawn the kthreads that handle RCU's grace periods.
4263  */
4264 static int __init rcu_spawn_gp_kthread(void)
4265 {
4266     unsigned long flags;
4267     struct rcu_node *rnp;
4268     struct sched_param sp;
4269     struct task_struct *t;
4270     struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4271 
4272     rcu_scheduler_fully_active = 1;
4273     t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4274     if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4275         return 0;
4276     if (kthread_prio) {
4277         sp.sched_priority = kthread_prio;
4278         sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4279     }
4280     rnp = rcu_get_root();
4281     raw_spin_lock_irqsave_rcu_node(rnp, flags);
4282     WRITE_ONCE(rcu_state.gp_activity, jiffies);
4283     WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4284     // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4285     smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4286     raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4287     wake_up_process(t);
4288     /* This is a pre-SMP initcall, we expect a single CPU */
4289     WARN_ON(num_online_cpus() > 1);
4290     /*
4291      * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4292      * due to rcu_scheduler_fully_active.
4293      */
4294     rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4295     rcu_spawn_one_boost_kthread(rdp->mynode);
4296     rcu_spawn_core_kthreads();
4297     /* Create kthread worker for expedited GPs */
4298     rcu_start_exp_gp_kworkers();
4299     return 0;
4300 }
4301 early_initcall(rcu_spawn_gp_kthread);
4302 
4303 /*
4304  * This function is invoked towards the end of the scheduler's
4305  * initialization process.  Before this is called, the idle task might
4306  * contain synchronous grace-period primitives (during which time, this idle
4307  * task is booting the system, and such primitives are no-ops).  After this
4308  * function is called, any synchronous grace-period primitives are run as
4309  * expedited, with the requesting task driving the grace period forward.
4310  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4311  * runtime RCU functionality.
4312  */
4313 void rcu_scheduler_starting(void)
4314 {
4315     WARN_ON(num_online_cpus() != 1);
4316     WARN_ON(nr_context_switches() > 0);
4317     rcu_test_sync_prims();
4318     rcu_scheduler_active = RCU_SCHEDULER_INIT;
4319     rcu_test_sync_prims();
4320 }
4321 
4322 /*
4323  * Helper function for rcu_init() that initializes the rcu_state structure.
4324  */
4325 static void __init rcu_init_one(void)
4326 {
4327     static const char * const buf[] = RCU_NODE_NAME_INIT;
4328     static const char * const fqs[] = RCU_FQS_NAME_INIT;
4329     static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4330     static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4331 
4332     int levelspread[RCU_NUM_LVLS];      /* kids/node in each level. */
4333     int cpustride = 1;
4334     int i;
4335     int j;
4336     struct rcu_node *rnp;
4337 
4338     BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4339 
4340     /* Silence gcc 4.8 false positive about array index out of range. */
4341     if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4342         panic("rcu_init_one: rcu_num_lvls out of range");
4343 
4344     /* Initialize the level-tracking arrays. */
4345 
4346     for (i = 1; i < rcu_num_lvls; i++)
4347         rcu_state.level[i] =
4348             rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4349     rcu_init_levelspread(levelspread, num_rcu_lvl);
4350 
4351     /* Initialize the elements themselves, starting from the leaves. */
4352 
4353     for (i = rcu_num_lvls - 1; i >= 0; i--) {
4354         cpustride *= levelspread[i];
4355         rnp = rcu_state.level[i];
4356         for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4357             raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4358             lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4359                            &rcu_node_class[i], buf[i]);
4360             raw_spin_lock_init(&rnp->fqslock);
4361             lockdep_set_class_and_name(&rnp->fqslock,
4362                            &rcu_fqs_class[i], fqs[i]);
4363             rnp->gp_seq = rcu_state.gp_seq;
4364             rnp->gp_seq_needed = rcu_state.gp_seq;
4365             rnp->completedqs = rcu_state.gp_seq;
4366             rnp->qsmask = 0;
4367             rnp->qsmaskinit = 0;
4368             rnp->grplo = j * cpustride;
4369             rnp->grphi = (j + 1) * cpustride - 1;
4370             if (rnp->grphi >= nr_cpu_ids)
4371                 rnp->grphi = nr_cpu_ids - 1;
4372             if (i == 0) {
4373                 rnp->grpnum = 0;
4374                 rnp->grpmask = 0;
4375                 rnp->parent = NULL;
4376             } else {
4377                 rnp->grpnum = j % levelspread[i - 1];
4378                 rnp->grpmask = BIT(rnp->grpnum);
4379                 rnp->parent = rcu_state.level[i - 1] +
4380                           j / levelspread[i - 1];
4381             }
4382             rnp->level = i;
4383             INIT_LIST_HEAD(&rnp->blkd_tasks);
4384             rcu_init_one_nocb(rnp);
4385             init_waitqueue_head(&rnp->exp_wq[0]);
4386             init_waitqueue_head(&rnp->exp_wq[1]);
4387             init_waitqueue_head(&rnp->exp_wq[2]);
4388             init_waitqueue_head(&rnp->exp_wq[3]);
4389             spin_lock_init(&rnp->exp_lock);
4390             mutex_init(&rnp->boost_kthread_mutex);
4391             raw_spin_lock_init(&rnp->exp_poll_lock);
4392             rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4393             INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4394         }
4395     }
4396 
4397     init_swait_queue_head(&rcu_state.gp_wq);
4398     init_swait_queue_head(&rcu_state.expedited_wq);
4399     rnp = rcu_first_leaf_node();
4400     for_each_possible_cpu(i) {
4401         while (i > rnp->grphi)
4402             rnp++;
4403         per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4404         rcu_boot_init_percpu_data(i);
4405     }
4406 }
4407 
4408 /*
4409  * Force priority from the kernel command-line into range.
4410  */
4411 static void __init sanitize_kthread_prio(void)
4412 {
4413     int kthread_prio_in = kthread_prio;
4414 
4415     if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4416         && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4417         kthread_prio = 2;
4418     else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4419         kthread_prio = 1;
4420     else if (kthread_prio < 0)
4421         kthread_prio = 0;
4422     else if (kthread_prio > 99)
4423         kthread_prio = 99;
4424 
4425     if (kthread_prio != kthread_prio_in)
4426         pr_alert("%s: Limited prio to %d from %d\n",
4427              __func__, kthread_prio, kthread_prio_in);
4428 }
4429 
4430 /*
4431  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4432  * replace the definitions in tree.h because those are needed to size
4433  * the ->node array in the rcu_state structure.
4434  */
4435 void rcu_init_geometry(void)
4436 {
4437     ulong d;
4438     int i;
4439     static unsigned long old_nr_cpu_ids;
4440     int rcu_capacity[RCU_NUM_LVLS];
4441     static bool initialized;
4442 
4443     if (initialized) {
4444         /*
4445          * Warn if setup_nr_cpu_ids() had not yet been invoked,
4446          * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4447          */
4448         WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4449         return;
4450     }
4451 
4452     old_nr_cpu_ids = nr_cpu_ids;
4453     initialized = true;
4454 
4455     /*
4456      * Initialize any unspecified boot parameters.
4457      * The default values of jiffies_till_first_fqs and
4458      * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4459      * value, which is a function of HZ, then adding one for each
4460      * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4461      */
4462     d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4463     if (jiffies_till_first_fqs == ULONG_MAX)
4464         jiffies_till_first_fqs = d;
4465     if (jiffies_till_next_fqs == ULONG_MAX)
4466         jiffies_till_next_fqs = d;
4467     adjust_jiffies_till_sched_qs();
4468 
4469     /* If the compile-time values are accurate, just leave. */
4470     if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4471         nr_cpu_ids == NR_CPUS)
4472         return;
4473     pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4474         rcu_fanout_leaf, nr_cpu_ids);
4475 
4476     /*
4477      * The boot-time rcu_fanout_leaf parameter must be at least two
4478      * and cannot exceed the number of bits in the rcu_node masks.
4479      * Complain and fall back to the compile-time values if this
4480      * limit is exceeded.
4481      */
4482     if (rcu_fanout_leaf < 2 ||
4483         rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4484         rcu_fanout_leaf = RCU_FANOUT_LEAF;
4485         WARN_ON(1);
4486         return;
4487     }
4488 
4489     /*
4490      * Compute number of nodes that can be handled an rcu_node tree
4491      * with the given number of levels.
4492      */
4493     rcu_capacity[0] = rcu_fanout_leaf;
4494     for (i = 1; i < RCU_NUM_LVLS; i++)
4495         rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4496 
4497     /*
4498      * The tree must be able to accommodate the configured number of CPUs.
4499      * If this limit is exceeded, fall back to the compile-time values.
4500      */
4501     if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4502         rcu_fanout_leaf = RCU_FANOUT_LEAF;
4503         WARN_ON(1);
4504         return;
4505     }
4506 
4507     /* Calculate the number of levels in the tree. */
4508     for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4509     }
4510     rcu_num_lvls = i + 1;
4511 
4512     /* Calculate the number of rcu_nodes at each level of the tree. */
4513     for (i = 0; i < rcu_num_lvls; i++) {
4514         int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4515         num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4516     }
4517 
4518     /* Calculate the total number of rcu_node structures. */
4519     rcu_num_nodes = 0;
4520     for (i = 0; i < rcu_num_lvls; i++)
4521         rcu_num_nodes += num_rcu_lvl[i];
4522 }
4523 
4524 /*
4525  * Dump out the structure of the rcu_node combining tree associated
4526  * with the rcu_state structure.
4527  */
4528 static void __init rcu_dump_rcu_node_tree(void)
4529 {
4530     int level = 0;
4531     struct rcu_node *rnp;
4532 
4533     pr_info("rcu_node tree layout dump\n");
4534     pr_info(" ");
4535     rcu_for_each_node_breadth_first(rnp) {
4536         if (rnp->level != level) {
4537             pr_cont("\n");
4538             pr_info(" ");
4539             level = rnp->level;
4540         }
4541         pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4542     }
4543     pr_cont("\n");
4544 }
4545 
4546 struct workqueue_struct *rcu_gp_wq;
4547 
4548 static void __init kfree_rcu_batch_init(void)
4549 {
4550     int cpu;
4551     int i;
4552 
4553     /* Clamp it to [0:100] seconds interval. */
4554     if (rcu_delay_page_cache_fill_msec < 0 ||
4555         rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4556 
4557         rcu_delay_page_cache_fill_msec =
4558             clamp(rcu_delay_page_cache_fill_msec, 0,
4559                 (int) (100 * MSEC_PER_SEC));
4560 
4561         pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4562             rcu_delay_page_cache_fill_msec);
4563     }
4564 
4565     for_each_possible_cpu(cpu) {
4566         struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4567 
4568         for (i = 0; i < KFREE_N_BATCHES; i++) {
4569             INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4570             krcp->krw_arr[i].krcp = krcp;
4571         }
4572 
4573         INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4574         INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4575         krcp->initialized = true;
4576     }
4577     if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
4578         pr_err("Failed to register kfree_rcu() shrinker!\n");
4579 }
4580 
4581 void __init rcu_init(void)
4582 {
4583     int cpu = smp_processor_id();
4584 
4585     rcu_early_boot_tests();
4586 
4587     kfree_rcu_batch_init();
4588     rcu_bootup_announce();
4589     sanitize_kthread_prio();
4590     rcu_init_geometry();
4591     rcu_init_one();
4592     if (dump_tree)
4593         rcu_dump_rcu_node_tree();
4594     if (use_softirq)
4595         open_softirq(RCU_SOFTIRQ, rcu_core_si);
4596 
4597     /*
4598      * We don't need protection against CPU-hotplug here because
4599      * this is called early in boot, before either interrupts
4600      * or the scheduler are operational.
4601      */
4602     pm_notifier(rcu_pm_notify, 0);
4603     WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4604     rcutree_prepare_cpu(cpu);
4605     rcu_cpu_starting(cpu);
4606     rcutree_online_cpu(cpu);
4607 
4608     /* Create workqueue for Tree SRCU and for expedited GPs. */
4609     rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4610     WARN_ON(!rcu_gp_wq);
4611     rcu_alloc_par_gp_wq();
4612 
4613     /* Fill in default value for rcutree.qovld boot parameter. */
4614     /* -After- the rcu_node ->lock fields are initialized! */
4615     if (qovld < 0)
4616         qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4617     else
4618         qovld_calc = qovld;
4619 
4620     // Kick-start any polled grace periods that started early.
4621     if (!(per_cpu_ptr(&rcu_data, cpu)->mynode->exp_seq_poll_rq & 0x1))
4622         (void)start_poll_synchronize_rcu_expedited();
4623 }
4624 
4625 #include "tree_stall.h"
4626 #include "tree_exp.h"
4627 #include "tree_nocb.h"
4628 #include "tree_plugin.h"