Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Sleepable Read-Copy Update mechanism for mutual exclusion.
0004  *
0005  * Copyright (C) IBM Corporation, 2006
0006  * Copyright (C) Fujitsu, 2012
0007  *
0008  * Authors: Paul McKenney <paulmck@linux.ibm.com>
0009  *     Lai Jiangshan <laijs@cn.fujitsu.com>
0010  *
0011  * For detailed explanation of Read-Copy Update mechanism see -
0012  *      Documentation/RCU/ *.txt
0013  *
0014  */
0015 
0016 #define pr_fmt(fmt) "rcu: " fmt
0017 
0018 #include <linux/export.h>
0019 #include <linux/mutex.h>
0020 #include <linux/percpu.h>
0021 #include <linux/preempt.h>
0022 #include <linux/rcupdate_wait.h>
0023 #include <linux/sched.h>
0024 #include <linux/smp.h>
0025 #include <linux/delay.h>
0026 #include <linux/module.h>
0027 #include <linux/slab.h>
0028 #include <linux/srcu.h>
0029 
0030 #include "rcu.h"
0031 #include "rcu_segcblist.h"
0032 
0033 /* Holdoff in nanoseconds for auto-expediting. */
0034 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
0035 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
0036 module_param(exp_holdoff, ulong, 0444);
0037 
0038 /* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
0039 static ulong counter_wrap_check = (ULONG_MAX >> 2);
0040 module_param(counter_wrap_check, ulong, 0444);
0041 
0042 /*
0043  * Control conversion to SRCU_SIZE_BIG:
0044  *    0: Don't convert at all.
0045  *    1: Convert at init_srcu_struct() time.
0046  *    2: Convert when rcutorture invokes srcu_torture_stats_print().
0047  *    3: Decide at boot time based on system shape (default).
0048  * 0x1x: Convert when excessive contention encountered.
0049  */
0050 #define SRCU_SIZING_NONE    0
0051 #define SRCU_SIZING_INIT    1
0052 #define SRCU_SIZING_TORTURE 2
0053 #define SRCU_SIZING_AUTO    3
0054 #define SRCU_SIZING_CONTEND 0x10
0055 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
0056 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
0057 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
0058 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
0059 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
0060 static int convert_to_big = SRCU_SIZING_AUTO;
0061 module_param(convert_to_big, int, 0444);
0062 
0063 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
0064 static int big_cpu_lim __read_mostly = 128;
0065 module_param(big_cpu_lim, int, 0444);
0066 
0067 /* Contention events per jiffy to initiate transition to big. */
0068 static int small_contention_lim __read_mostly = 100;
0069 module_param(small_contention_lim, int, 0444);
0070 
0071 /* Early-boot callback-management, so early that no lock is required! */
0072 static LIST_HEAD(srcu_boot_list);
0073 static bool __read_mostly srcu_init_done;
0074 
0075 static void srcu_invoke_callbacks(struct work_struct *work);
0076 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
0077 static void process_srcu(struct work_struct *work);
0078 static void srcu_delay_timer(struct timer_list *t);
0079 
0080 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
0081 #define spin_lock_rcu_node(p)                           \
0082 do {                                        \
0083     spin_lock(&ACCESS_PRIVATE(p, lock));                    \
0084     smp_mb__after_unlock_lock();                        \
0085 } while (0)
0086 
0087 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
0088 
0089 #define spin_lock_irq_rcu_node(p)                       \
0090 do {                                        \
0091     spin_lock_irq(&ACCESS_PRIVATE(p, lock));                \
0092     smp_mb__after_unlock_lock();                        \
0093 } while (0)
0094 
0095 #define spin_unlock_irq_rcu_node(p)                     \
0096     spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
0097 
0098 #define spin_lock_irqsave_rcu_node(p, flags)                    \
0099 do {                                        \
0100     spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);         \
0101     smp_mb__after_unlock_lock();                        \
0102 } while (0)
0103 
0104 #define spin_trylock_irqsave_rcu_node(p, flags)                 \
0105 ({                                      \
0106     bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
0107                                         \
0108     if (___locked)                              \
0109         smp_mb__after_unlock_lock();                    \
0110     ___locked;                              \
0111 })
0112 
0113 #define spin_unlock_irqrestore_rcu_node(p, flags)               \
0114     spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)         \
0115 
0116 /*
0117  * Initialize SRCU per-CPU data.  Note that statically allocated
0118  * srcu_struct structures might already have srcu_read_lock() and
0119  * srcu_read_unlock() running against them.  So if the is_static parameter
0120  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
0121  */
0122 static void init_srcu_struct_data(struct srcu_struct *ssp)
0123 {
0124     int cpu;
0125     struct srcu_data *sdp;
0126 
0127     /*
0128      * Initialize the per-CPU srcu_data array, which feeds into the
0129      * leaves of the srcu_node tree.
0130      */
0131     WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
0132              ARRAY_SIZE(sdp->srcu_unlock_count));
0133     for_each_possible_cpu(cpu) {
0134         sdp = per_cpu_ptr(ssp->sda, cpu);
0135         spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
0136         rcu_segcblist_init(&sdp->srcu_cblist);
0137         sdp->srcu_cblist_invoking = false;
0138         sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
0139         sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
0140         sdp->mynode = NULL;
0141         sdp->cpu = cpu;
0142         INIT_WORK(&sdp->work, srcu_invoke_callbacks);
0143         timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
0144         sdp->ssp = ssp;
0145     }
0146 }
0147 
0148 /* Invalid seq state, used during snp node initialization */
0149 #define SRCU_SNP_INIT_SEQ       0x2
0150 
0151 /*
0152  * Check whether sequence number corresponding to snp node,
0153  * is invalid.
0154  */
0155 static inline bool srcu_invl_snp_seq(unsigned long s)
0156 {
0157     return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
0158 }
0159 
0160 /*
0161  * Allocated and initialize SRCU combining tree.  Returns @true if
0162  * allocation succeeded and @false otherwise.
0163  */
0164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
0165 {
0166     int cpu;
0167     int i;
0168     int level = 0;
0169     int levelspread[RCU_NUM_LVLS];
0170     struct srcu_data *sdp;
0171     struct srcu_node *snp;
0172     struct srcu_node *snp_first;
0173 
0174     /* Initialize geometry if it has not already been initialized. */
0175     rcu_init_geometry();
0176     ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
0177     if (!ssp->node)
0178         return false;
0179 
0180     /* Work out the overall tree geometry. */
0181     ssp->level[0] = &ssp->node[0];
0182     for (i = 1; i < rcu_num_lvls; i++)
0183         ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
0184     rcu_init_levelspread(levelspread, num_rcu_lvl);
0185 
0186     /* Each pass through this loop initializes one srcu_node structure. */
0187     srcu_for_each_node_breadth_first(ssp, snp) {
0188         spin_lock_init(&ACCESS_PRIVATE(snp, lock));
0189         WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
0190                  ARRAY_SIZE(snp->srcu_data_have_cbs));
0191         for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
0192             snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
0193             snp->srcu_data_have_cbs[i] = 0;
0194         }
0195         snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
0196         snp->grplo = -1;
0197         snp->grphi = -1;
0198         if (snp == &ssp->node[0]) {
0199             /* Root node, special case. */
0200             snp->srcu_parent = NULL;
0201             continue;
0202         }
0203 
0204         /* Non-root node. */
0205         if (snp == ssp->level[level + 1])
0206             level++;
0207         snp->srcu_parent = ssp->level[level - 1] +
0208                    (snp - ssp->level[level]) /
0209                    levelspread[level - 1];
0210     }
0211 
0212     /*
0213      * Initialize the per-CPU srcu_data array, which feeds into the
0214      * leaves of the srcu_node tree.
0215      */
0216     level = rcu_num_lvls - 1;
0217     snp_first = ssp->level[level];
0218     for_each_possible_cpu(cpu) {
0219         sdp = per_cpu_ptr(ssp->sda, cpu);
0220         sdp->mynode = &snp_first[cpu / levelspread[level]];
0221         for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
0222             if (snp->grplo < 0)
0223                 snp->grplo = cpu;
0224             snp->grphi = cpu;
0225         }
0226         sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
0227     }
0228     smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
0229     return true;
0230 }
0231 
0232 /*
0233  * Initialize non-compile-time initialized fields, including the
0234  * associated srcu_node and srcu_data structures.  The is_static parameter
0235  * tells us that ->sda has already been wired up to srcu_data.
0236  */
0237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
0238 {
0239     ssp->srcu_size_state = SRCU_SIZE_SMALL;
0240     ssp->node = NULL;
0241     mutex_init(&ssp->srcu_cb_mutex);
0242     mutex_init(&ssp->srcu_gp_mutex);
0243     ssp->srcu_idx = 0;
0244     ssp->srcu_gp_seq = 0;
0245     ssp->srcu_barrier_seq = 0;
0246     mutex_init(&ssp->srcu_barrier_mutex);
0247     atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
0248     INIT_DELAYED_WORK(&ssp->work, process_srcu);
0249     ssp->sda_is_static = is_static;
0250     if (!is_static)
0251         ssp->sda = alloc_percpu(struct srcu_data);
0252     if (!ssp->sda)
0253         return -ENOMEM;
0254     init_srcu_struct_data(ssp);
0255     ssp->srcu_gp_seq_needed_exp = 0;
0256     ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
0257     if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
0258         if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
0259             if (!ssp->sda_is_static) {
0260                 free_percpu(ssp->sda);
0261                 ssp->sda = NULL;
0262                 return -ENOMEM;
0263             }
0264         } else {
0265             WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
0266         }
0267     }
0268     smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
0269     return 0;
0270 }
0271 
0272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0273 
0274 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
0275                struct lock_class_key *key)
0276 {
0277     /* Don't re-initialize a lock while it is held. */
0278     debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
0279     lockdep_init_map(&ssp->dep_map, name, key, 0);
0280     spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
0281     return init_srcu_struct_fields(ssp, false);
0282 }
0283 EXPORT_SYMBOL_GPL(__init_srcu_struct);
0284 
0285 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0286 
0287 /**
0288  * init_srcu_struct - initialize a sleep-RCU structure
0289  * @ssp: structure to initialize.
0290  *
0291  * Must invoke this on a given srcu_struct before passing that srcu_struct
0292  * to any other function.  Each srcu_struct represents a separate domain
0293  * of SRCU protection.
0294  */
0295 int init_srcu_struct(struct srcu_struct *ssp)
0296 {
0297     spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
0298     return init_srcu_struct_fields(ssp, false);
0299 }
0300 EXPORT_SYMBOL_GPL(init_srcu_struct);
0301 
0302 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0303 
0304 /*
0305  * Initiate a transition to SRCU_SIZE_BIG with lock held.
0306  */
0307 static void __srcu_transition_to_big(struct srcu_struct *ssp)
0308 {
0309     lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
0310     smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
0311 }
0312 
0313 /*
0314  * Initiate an idempotent transition to SRCU_SIZE_BIG.
0315  */
0316 static void srcu_transition_to_big(struct srcu_struct *ssp)
0317 {
0318     unsigned long flags;
0319 
0320     /* Double-checked locking on ->srcu_size-state. */
0321     if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
0322         return;
0323     spin_lock_irqsave_rcu_node(ssp, flags);
0324     if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
0325         spin_unlock_irqrestore_rcu_node(ssp, flags);
0326         return;
0327     }
0328     __srcu_transition_to_big(ssp);
0329     spin_unlock_irqrestore_rcu_node(ssp, flags);
0330 }
0331 
0332 /*
0333  * Check to see if the just-encountered contention event justifies
0334  * a transition to SRCU_SIZE_BIG.
0335  */
0336 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
0337 {
0338     unsigned long j;
0339 
0340     if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
0341         return;
0342     j = jiffies;
0343     if (ssp->srcu_size_jiffies != j) {
0344         ssp->srcu_size_jiffies = j;
0345         ssp->srcu_n_lock_retries = 0;
0346     }
0347     if (++ssp->srcu_n_lock_retries <= small_contention_lim)
0348         return;
0349     __srcu_transition_to_big(ssp);
0350 }
0351 
0352 /*
0353  * Acquire the specified srcu_data structure's ->lock, but check for
0354  * excessive contention, which results in initiation of a transition
0355  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
0356  * parameter permits this.
0357  */
0358 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
0359 {
0360     struct srcu_struct *ssp = sdp->ssp;
0361 
0362     if (spin_trylock_irqsave_rcu_node(sdp, *flags))
0363         return;
0364     spin_lock_irqsave_rcu_node(ssp, *flags);
0365     spin_lock_irqsave_check_contention(ssp);
0366     spin_unlock_irqrestore_rcu_node(ssp, *flags);
0367     spin_lock_irqsave_rcu_node(sdp, *flags);
0368 }
0369 
0370 /*
0371  * Acquire the specified srcu_struct structure's ->lock, but check for
0372  * excessive contention, which results in initiation of a transition
0373  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
0374  * parameter permits this.
0375  */
0376 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
0377 {
0378     if (spin_trylock_irqsave_rcu_node(ssp, *flags))
0379         return;
0380     spin_lock_irqsave_rcu_node(ssp, *flags);
0381     spin_lock_irqsave_check_contention(ssp);
0382 }
0383 
0384 /*
0385  * First-use initialization of statically allocated srcu_struct
0386  * structure.  Wiring up the combining tree is more than can be
0387  * done with compile-time initialization, so this check is added
0388  * to each update-side SRCU primitive.  Use ssp->lock, which -is-
0389  * compile-time initialized, to resolve races involving multiple
0390  * CPUs trying to garner first-use privileges.
0391  */
0392 static void check_init_srcu_struct(struct srcu_struct *ssp)
0393 {
0394     unsigned long flags;
0395 
0396     /* The smp_load_acquire() pairs with the smp_store_release(). */
0397     if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
0398         return; /* Already initialized. */
0399     spin_lock_irqsave_rcu_node(ssp, flags);
0400     if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
0401         spin_unlock_irqrestore_rcu_node(ssp, flags);
0402         return;
0403     }
0404     init_srcu_struct_fields(ssp, true);
0405     spin_unlock_irqrestore_rcu_node(ssp, flags);
0406 }
0407 
0408 /*
0409  * Returns approximate total of the readers' ->srcu_lock_count[] values
0410  * for the rank of per-CPU counters specified by idx.
0411  */
0412 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
0413 {
0414     int cpu;
0415     unsigned long sum = 0;
0416 
0417     for_each_possible_cpu(cpu) {
0418         struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
0419 
0420         sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
0421     }
0422     return sum;
0423 }
0424 
0425 /*
0426  * Returns approximate total of the readers' ->srcu_unlock_count[] values
0427  * for the rank of per-CPU counters specified by idx.
0428  */
0429 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
0430 {
0431     int cpu;
0432     unsigned long sum = 0;
0433 
0434     for_each_possible_cpu(cpu) {
0435         struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
0436 
0437         sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
0438     }
0439     return sum;
0440 }
0441 
0442 /*
0443  * Return true if the number of pre-existing readers is determined to
0444  * be zero.
0445  */
0446 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
0447 {
0448     unsigned long unlocks;
0449 
0450     unlocks = srcu_readers_unlock_idx(ssp, idx);
0451 
0452     /*
0453      * Make sure that a lock is always counted if the corresponding
0454      * unlock is counted. Needs to be a smp_mb() as the read side may
0455      * contain a read from a variable that is written to before the
0456      * synchronize_srcu() in the write side. In this case smp_mb()s
0457      * A and B act like the store buffering pattern.
0458      *
0459      * This smp_mb() also pairs with smp_mb() C to prevent accesses
0460      * after the synchronize_srcu() from being executed before the
0461      * grace period ends.
0462      */
0463     smp_mb(); /* A */
0464 
0465     /*
0466      * If the locks are the same as the unlocks, then there must have
0467      * been no readers on this index at some time in between. This does
0468      * not mean that there are no more readers, as one could have read
0469      * the current index but not have incremented the lock counter yet.
0470      *
0471      * So suppose that the updater is preempted here for so long
0472      * that more than ULONG_MAX non-nested readers come and go in
0473      * the meantime.  It turns out that this cannot result in overflow
0474      * because if a reader modifies its unlock count after we read it
0475      * above, then that reader's next load of ->srcu_idx is guaranteed
0476      * to get the new value, which will cause it to operate on the
0477      * other bank of counters, where it cannot contribute to the
0478      * overflow of these counters.  This means that there is a maximum
0479      * of 2*NR_CPUS increments, which cannot overflow given current
0480      * systems, especially not on 64-bit systems.
0481      *
0482      * OK, how about nesting?  This does impose a limit on nesting
0483      * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
0484      * especially on 64-bit systems.
0485      */
0486     return srcu_readers_lock_idx(ssp, idx) == unlocks;
0487 }
0488 
0489 /**
0490  * srcu_readers_active - returns true if there are readers. and false
0491  *                       otherwise
0492  * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
0493  *
0494  * Note that this is not an atomic primitive, and can therefore suffer
0495  * severe errors when invoked on an active srcu_struct.  That said, it
0496  * can be useful as an error check at cleanup time.
0497  */
0498 static bool srcu_readers_active(struct srcu_struct *ssp)
0499 {
0500     int cpu;
0501     unsigned long sum = 0;
0502 
0503     for_each_possible_cpu(cpu) {
0504         struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
0505 
0506         sum += READ_ONCE(cpuc->srcu_lock_count[0]);
0507         sum += READ_ONCE(cpuc->srcu_lock_count[1]);
0508         sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
0509         sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
0510     }
0511     return sum;
0512 }
0513 
0514 /*
0515  * We use an adaptive strategy for synchronize_srcu() and especially for
0516  * synchronize_srcu_expedited().  We spin for a fixed time period
0517  * (defined below, boot time configurable) to allow SRCU readers to exit
0518  * their read-side critical sections.  If there are still some readers
0519  * after one jiffy, we repeatedly block for one jiffy time periods.
0520  * The blocking time is increased as the grace-period age increases,
0521  * with max blocking time capped at 10 jiffies.
0522  */
0523 #define SRCU_DEFAULT_RETRY_CHECK_DELAY      5
0524 
0525 static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
0526 module_param(srcu_retry_check_delay, ulong, 0444);
0527 
0528 #define SRCU_INTERVAL       1       // Base delay if no expedited GPs pending.
0529 #define SRCU_MAX_INTERVAL   10      // Maximum incremental delay from slow readers.
0530 
0531 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO   3UL // Lowmark on default per-GP-phase
0532                             // no-delay instances.
0533 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI   1000UL  // Highmark on default per-GP-phase
0534                             // no-delay instances.
0535 
0536 #define SRCU_UL_CLAMP_LO(val, low)  ((val) > (low) ? (val) : (low))
0537 #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
0538 #define SRCU_UL_CLAMP(val, low, high)   SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
0539 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
0540 // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
0541 // called from process_srcu().
0542 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
0543     (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
0544 
0545 // Maximum per-GP-phase consecutive no-delay instances.
0546 #define SRCU_DEFAULT_MAX_NODELAY_PHASE  \
0547     SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED,  \
0548               SRCU_DEFAULT_MAX_NODELAY_PHASE_LO,    \
0549               SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
0550 
0551 static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
0552 module_param(srcu_max_nodelay_phase, ulong, 0444);
0553 
0554 // Maximum consecutive no-delay instances.
0555 #define SRCU_DEFAULT_MAX_NODELAY    (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
0556                      SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
0557 
0558 static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
0559 module_param(srcu_max_nodelay, ulong, 0444);
0560 
0561 /*
0562  * Return grace-period delay, zero if there are expedited grace
0563  * periods pending, SRCU_INTERVAL otherwise.
0564  */
0565 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
0566 {
0567     unsigned long gpstart;
0568     unsigned long j;
0569     unsigned long jbase = SRCU_INTERVAL;
0570 
0571     if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
0572         jbase = 0;
0573     if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
0574         j = jiffies - 1;
0575         gpstart = READ_ONCE(ssp->srcu_gp_start);
0576         if (time_after(j, gpstart))
0577             jbase += j - gpstart;
0578         if (!jbase) {
0579             WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
0580             if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
0581                 jbase = 1;
0582         }
0583     }
0584     return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
0585 }
0586 
0587 /**
0588  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
0589  * @ssp: structure to clean up.
0590  *
0591  * Must invoke this after you are finished using a given srcu_struct that
0592  * was initialized via init_srcu_struct(), else you leak memory.
0593  */
0594 void cleanup_srcu_struct(struct srcu_struct *ssp)
0595 {
0596     int cpu;
0597 
0598     if (WARN_ON(!srcu_get_delay(ssp)))
0599         return; /* Just leak it! */
0600     if (WARN_ON(srcu_readers_active(ssp)))
0601         return; /* Just leak it! */
0602     flush_delayed_work(&ssp->work);
0603     for_each_possible_cpu(cpu) {
0604         struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
0605 
0606         del_timer_sync(&sdp->delay_work);
0607         flush_work(&sdp->work);
0608         if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
0609             return; /* Forgot srcu_barrier(), so just leak it! */
0610     }
0611     if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
0612         WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
0613         WARN_ON(srcu_readers_active(ssp))) {
0614         pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
0615             __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
0616             rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
0617         return; /* Caller forgot to stop doing call_srcu()? */
0618     }
0619     if (!ssp->sda_is_static) {
0620         free_percpu(ssp->sda);
0621         ssp->sda = NULL;
0622     }
0623     kfree(ssp->node);
0624     ssp->node = NULL;
0625     ssp->srcu_size_state = SRCU_SIZE_SMALL;
0626 }
0627 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
0628 
0629 /*
0630  * Counts the new reader in the appropriate per-CPU element of the
0631  * srcu_struct.
0632  * Returns an index that must be passed to the matching srcu_read_unlock().
0633  */
0634 int __srcu_read_lock(struct srcu_struct *ssp)
0635 {
0636     int idx;
0637 
0638     idx = READ_ONCE(ssp->srcu_idx) & 0x1;
0639     this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
0640     smp_mb(); /* B */  /* Avoid leaking the critical section. */
0641     return idx;
0642 }
0643 EXPORT_SYMBOL_GPL(__srcu_read_lock);
0644 
0645 /*
0646  * Removes the count for the old reader from the appropriate per-CPU
0647  * element of the srcu_struct.  Note that this may well be a different
0648  * CPU than that which was incremented by the corresponding srcu_read_lock().
0649  */
0650 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
0651 {
0652     smp_mb(); /* C */  /* Avoid leaking the critical section. */
0653     this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
0654 }
0655 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
0656 
0657 /*
0658  * Start an SRCU grace period.
0659  */
0660 static void srcu_gp_start(struct srcu_struct *ssp)
0661 {
0662     struct srcu_data *sdp;
0663     int state;
0664 
0665     if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
0666         sdp = per_cpu_ptr(ssp->sda, 0);
0667     else
0668         sdp = this_cpu_ptr(ssp->sda);
0669     lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
0670     WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
0671     spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
0672     rcu_segcblist_advance(&sdp->srcu_cblist,
0673                   rcu_seq_current(&ssp->srcu_gp_seq));
0674     (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
0675                        rcu_seq_snap(&ssp->srcu_gp_seq));
0676     spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
0677     WRITE_ONCE(ssp->srcu_gp_start, jiffies);
0678     WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
0679     smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
0680     rcu_seq_start(&ssp->srcu_gp_seq);
0681     state = rcu_seq_state(ssp->srcu_gp_seq);
0682     WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
0683 }
0684 
0685 
0686 static void srcu_delay_timer(struct timer_list *t)
0687 {
0688     struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
0689 
0690     queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
0691 }
0692 
0693 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
0694                        unsigned long delay)
0695 {
0696     if (!delay) {
0697         queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
0698         return;
0699     }
0700 
0701     timer_reduce(&sdp->delay_work, jiffies + delay);
0702 }
0703 
0704 /*
0705  * Schedule callback invocation for the specified srcu_data structure,
0706  * if possible, on the corresponding CPU.
0707  */
0708 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
0709 {
0710     srcu_queue_delayed_work_on(sdp, delay);
0711 }
0712 
0713 /*
0714  * Schedule callback invocation for all srcu_data structures associated
0715  * with the specified srcu_node structure that have callbacks for the
0716  * just-completed grace period, the one corresponding to idx.  If possible,
0717  * schedule this invocation on the corresponding CPUs.
0718  */
0719 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
0720                   unsigned long mask, unsigned long delay)
0721 {
0722     int cpu;
0723 
0724     for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
0725         if (!(mask & (1 << (cpu - snp->grplo))))
0726             continue;
0727         srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
0728     }
0729 }
0730 
0731 /*
0732  * Note the end of an SRCU grace period.  Initiates callback invocation
0733  * and starts a new grace period if needed.
0734  *
0735  * The ->srcu_cb_mutex acquisition does not protect any data, but
0736  * instead prevents more than one grace period from starting while we
0737  * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
0738  * array to have a finite number of elements.
0739  */
0740 static void srcu_gp_end(struct srcu_struct *ssp)
0741 {
0742     unsigned long cbdelay = 1;
0743     bool cbs;
0744     bool last_lvl;
0745     int cpu;
0746     unsigned long flags;
0747     unsigned long gpseq;
0748     int idx;
0749     unsigned long mask;
0750     struct srcu_data *sdp;
0751     unsigned long sgsne;
0752     struct srcu_node *snp;
0753     int ss_state;
0754 
0755     /* Prevent more than one additional grace period. */
0756     mutex_lock(&ssp->srcu_cb_mutex);
0757 
0758     /* End the current grace period. */
0759     spin_lock_irq_rcu_node(ssp);
0760     idx = rcu_seq_state(ssp->srcu_gp_seq);
0761     WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
0762     if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
0763         cbdelay = 0;
0764 
0765     WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
0766     rcu_seq_end(&ssp->srcu_gp_seq);
0767     gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
0768     if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
0769         WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
0770     spin_unlock_irq_rcu_node(ssp);
0771     mutex_unlock(&ssp->srcu_gp_mutex);
0772     /* A new grace period can start at this point.  But only one. */
0773 
0774     /* Initiate callback invocation as needed. */
0775     ss_state = smp_load_acquire(&ssp->srcu_size_state);
0776     if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
0777         srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
0778     } else {
0779         idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
0780         srcu_for_each_node_breadth_first(ssp, snp) {
0781             spin_lock_irq_rcu_node(snp);
0782             cbs = false;
0783             last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
0784             if (last_lvl)
0785                 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
0786             snp->srcu_have_cbs[idx] = gpseq;
0787             rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
0788             sgsne = snp->srcu_gp_seq_needed_exp;
0789             if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
0790                 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
0791             if (ss_state < SRCU_SIZE_BIG)
0792                 mask = ~0;
0793             else
0794                 mask = snp->srcu_data_have_cbs[idx];
0795             snp->srcu_data_have_cbs[idx] = 0;
0796             spin_unlock_irq_rcu_node(snp);
0797             if (cbs)
0798                 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
0799         }
0800     }
0801 
0802     /* Occasionally prevent srcu_data counter wrap. */
0803     if (!(gpseq & counter_wrap_check))
0804         for_each_possible_cpu(cpu) {
0805             sdp = per_cpu_ptr(ssp->sda, cpu);
0806             spin_lock_irqsave_rcu_node(sdp, flags);
0807             if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
0808                 sdp->srcu_gp_seq_needed = gpseq;
0809             if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
0810                 sdp->srcu_gp_seq_needed_exp = gpseq;
0811             spin_unlock_irqrestore_rcu_node(sdp, flags);
0812         }
0813 
0814     /* Callback initiation done, allow grace periods after next. */
0815     mutex_unlock(&ssp->srcu_cb_mutex);
0816 
0817     /* Start a new grace period if needed. */
0818     spin_lock_irq_rcu_node(ssp);
0819     gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
0820     if (!rcu_seq_state(gpseq) &&
0821         ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
0822         srcu_gp_start(ssp);
0823         spin_unlock_irq_rcu_node(ssp);
0824         srcu_reschedule(ssp, 0);
0825     } else {
0826         spin_unlock_irq_rcu_node(ssp);
0827     }
0828 
0829     /* Transition to big if needed. */
0830     if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
0831         if (ss_state == SRCU_SIZE_ALLOC)
0832             init_srcu_struct_nodes(ssp, GFP_KERNEL);
0833         else
0834             smp_store_release(&ssp->srcu_size_state, ss_state + 1);
0835     }
0836 }
0837 
0838 /*
0839  * Funnel-locking scheme to scalably mediate many concurrent expedited
0840  * grace-period requests.  This function is invoked for the first known
0841  * expedited request for a grace period that has already been requested,
0842  * but without expediting.  To start a completely new grace period,
0843  * whether expedited or not, use srcu_funnel_gp_start() instead.
0844  */
0845 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
0846                   unsigned long s)
0847 {
0848     unsigned long flags;
0849     unsigned long sgsne;
0850 
0851     if (snp)
0852         for (; snp != NULL; snp = snp->srcu_parent) {
0853             sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
0854             if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
0855                 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
0856                 return;
0857             spin_lock_irqsave_rcu_node(snp, flags);
0858             sgsne = snp->srcu_gp_seq_needed_exp;
0859             if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
0860                 spin_unlock_irqrestore_rcu_node(snp, flags);
0861                 return;
0862             }
0863             WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
0864             spin_unlock_irqrestore_rcu_node(snp, flags);
0865         }
0866     spin_lock_irqsave_ssp_contention(ssp, &flags);
0867     if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
0868         WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
0869     spin_unlock_irqrestore_rcu_node(ssp, flags);
0870 }
0871 
0872 /*
0873  * Funnel-locking scheme to scalably mediate many concurrent grace-period
0874  * requests.  The winner has to do the work of actually starting grace
0875  * period s.  Losers must either ensure that their desired grace-period
0876  * number is recorded on at least their leaf srcu_node structure, or they
0877  * must take steps to invoke their own callbacks.
0878  *
0879  * Note that this function also does the work of srcu_funnel_exp_start(),
0880  * in some cases by directly invoking it.
0881  */
0882 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
0883                  unsigned long s, bool do_norm)
0884 {
0885     unsigned long flags;
0886     int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
0887     unsigned long sgsne;
0888     struct srcu_node *snp;
0889     struct srcu_node *snp_leaf;
0890     unsigned long snp_seq;
0891 
0892     /* Ensure that snp node tree is fully initialized before traversing it */
0893     if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
0894         snp_leaf = NULL;
0895     else
0896         snp_leaf = sdp->mynode;
0897 
0898     if (snp_leaf)
0899         /* Each pass through the loop does one level of the srcu_node tree. */
0900         for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
0901             if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
0902                 return; /* GP already done and CBs recorded. */
0903             spin_lock_irqsave_rcu_node(snp, flags);
0904             snp_seq = snp->srcu_have_cbs[idx];
0905             if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
0906                 if (snp == snp_leaf && snp_seq == s)
0907                     snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
0908                 spin_unlock_irqrestore_rcu_node(snp, flags);
0909                 if (snp == snp_leaf && snp_seq != s) {
0910                     srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
0911                     return;
0912                 }
0913                 if (!do_norm)
0914                     srcu_funnel_exp_start(ssp, snp, s);
0915                 return;
0916             }
0917             snp->srcu_have_cbs[idx] = s;
0918             if (snp == snp_leaf)
0919                 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
0920             sgsne = snp->srcu_gp_seq_needed_exp;
0921             if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
0922                 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
0923             spin_unlock_irqrestore_rcu_node(snp, flags);
0924         }
0925 
0926     /* Top of tree, must ensure the grace period will be started. */
0927     spin_lock_irqsave_ssp_contention(ssp, &flags);
0928     if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
0929         /*
0930          * Record need for grace period s.  Pair with load
0931          * acquire setting up for initialization.
0932          */
0933         smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
0934     }
0935     if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
0936         WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
0937 
0938     /* If grace period not already done and none in progress, start it. */
0939     if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
0940         rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
0941         WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
0942         srcu_gp_start(ssp);
0943 
0944         // And how can that list_add() in the "else" clause
0945         // possibly be safe for concurrent execution?  Well,
0946         // it isn't.  And it does not have to be.  After all, it
0947         // can only be executed during early boot when there is only
0948         // the one boot CPU running with interrupts still disabled.
0949         if (likely(srcu_init_done))
0950             queue_delayed_work(rcu_gp_wq, &ssp->work,
0951                        !!srcu_get_delay(ssp));
0952         else if (list_empty(&ssp->work.work.entry))
0953             list_add(&ssp->work.work.entry, &srcu_boot_list);
0954     }
0955     spin_unlock_irqrestore_rcu_node(ssp, flags);
0956 }
0957 
0958 /*
0959  * Wait until all readers counted by array index idx complete, but
0960  * loop an additional time if there is an expedited grace period pending.
0961  * The caller must ensure that ->srcu_idx is not changed while checking.
0962  */
0963 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
0964 {
0965     unsigned long curdelay;
0966 
0967     curdelay = !srcu_get_delay(ssp);
0968 
0969     for (;;) {
0970         if (srcu_readers_active_idx_check(ssp, idx))
0971             return true;
0972         if ((--trycount + curdelay) <= 0)
0973             return false;
0974         udelay(srcu_retry_check_delay);
0975     }
0976 }
0977 
0978 /*
0979  * Increment the ->srcu_idx counter so that future SRCU readers will
0980  * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
0981  * us to wait for pre-existing readers in a starvation-free manner.
0982  */
0983 static void srcu_flip(struct srcu_struct *ssp)
0984 {
0985     /*
0986      * Ensure that if this updater saw a given reader's increment
0987      * from __srcu_read_lock(), that reader was using an old value
0988      * of ->srcu_idx.  Also ensure that if a given reader sees the
0989      * new value of ->srcu_idx, this updater's earlier scans cannot
0990      * have seen that reader's increments (which is OK, because this
0991      * grace period need not wait on that reader).
0992      */
0993     smp_mb(); /* E */  /* Pairs with B and C. */
0994 
0995     WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
0996 
0997     /*
0998      * Ensure that if the updater misses an __srcu_read_unlock()
0999      * increment, that task's next __srcu_read_lock() will see the
1000      * above counter update.  Note that both this memory barrier
1001      * and the one in srcu_readers_active_idx_check() provide the
1002      * guarantee for __srcu_read_lock().
1003      */
1004     smp_mb(); /* D */  /* Pairs with C. */
1005 }
1006 
1007 /*
1008  * If SRCU is likely idle, return true, otherwise return false.
1009  *
1010  * Note that it is OK for several current from-idle requests for a new
1011  * grace period from idle to specify expediting because they will all end
1012  * up requesting the same grace period anyhow.  So no loss.
1013  *
1014  * Note also that if any CPU (including the current one) is still invoking
1015  * callbacks, this function will nevertheless say "idle".  This is not
1016  * ideal, but the overhead of checking all CPUs' callback lists is even
1017  * less ideal, especially on large systems.  Furthermore, the wakeup
1018  * can happen before the callback is fully removed, so we have no choice
1019  * but to accept this type of error.
1020  *
1021  * This function is also subject to counter-wrap errors, but let's face
1022  * it, if this function was preempted for enough time for the counters
1023  * to wrap, it really doesn't matter whether or not we expedite the grace
1024  * period.  The extra overhead of a needlessly expedited grace period is
1025  * negligible when amortized over that time period, and the extra latency
1026  * of a needlessly non-expedited grace period is similarly negligible.
1027  */
1028 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1029 {
1030     unsigned long curseq;
1031     unsigned long flags;
1032     struct srcu_data *sdp;
1033     unsigned long t;
1034     unsigned long tlast;
1035 
1036     check_init_srcu_struct(ssp);
1037     /* If the local srcu_data structure has callbacks, not idle.  */
1038     sdp = raw_cpu_ptr(ssp->sda);
1039     spin_lock_irqsave_rcu_node(sdp, flags);
1040     if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1041         spin_unlock_irqrestore_rcu_node(sdp, flags);
1042         return false; /* Callbacks already present, so not idle. */
1043     }
1044     spin_unlock_irqrestore_rcu_node(sdp, flags);
1045 
1046     /*
1047      * No local callbacks, so probabilistically probe global state.
1048      * Exact information would require acquiring locks, which would
1049      * kill scalability, hence the probabilistic nature of the probe.
1050      */
1051 
1052     /* First, see if enough time has passed since the last GP. */
1053     t = ktime_get_mono_fast_ns();
1054     tlast = READ_ONCE(ssp->srcu_last_gp_end);
1055     if (exp_holdoff == 0 ||
1056         time_in_range_open(t, tlast, tlast + exp_holdoff))
1057         return false; /* Too soon after last GP. */
1058 
1059     /* Next, check for probable idleness. */
1060     curseq = rcu_seq_current(&ssp->srcu_gp_seq);
1061     smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1062     if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
1063         return false; /* Grace period in progress, so not idle. */
1064     smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1065     if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
1066         return false; /* GP # changed, so not idle. */
1067     return true; /* With reasonable probability, idle! */
1068 }
1069 
1070 /*
1071  * SRCU callback function to leak a callback.
1072  */
1073 static void srcu_leak_callback(struct rcu_head *rhp)
1074 {
1075 }
1076 
1077 /*
1078  * Start an SRCU grace period, and also queue the callback if non-NULL.
1079  */
1080 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1081                          struct rcu_head *rhp, bool do_norm)
1082 {
1083     unsigned long flags;
1084     int idx;
1085     bool needexp = false;
1086     bool needgp = false;
1087     unsigned long s;
1088     struct srcu_data *sdp;
1089     struct srcu_node *sdp_mynode;
1090     int ss_state;
1091 
1092     check_init_srcu_struct(ssp);
1093     idx = srcu_read_lock(ssp);
1094     ss_state = smp_load_acquire(&ssp->srcu_size_state);
1095     if (ss_state < SRCU_SIZE_WAIT_CALL)
1096         sdp = per_cpu_ptr(ssp->sda, 0);
1097     else
1098         sdp = raw_cpu_ptr(ssp->sda);
1099     spin_lock_irqsave_sdp_contention(sdp, &flags);
1100     if (rhp)
1101         rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1102     rcu_segcblist_advance(&sdp->srcu_cblist,
1103                   rcu_seq_current(&ssp->srcu_gp_seq));
1104     s = rcu_seq_snap(&ssp->srcu_gp_seq);
1105     (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
1106     if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1107         sdp->srcu_gp_seq_needed = s;
1108         needgp = true;
1109     }
1110     if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1111         sdp->srcu_gp_seq_needed_exp = s;
1112         needexp = true;
1113     }
1114     spin_unlock_irqrestore_rcu_node(sdp, flags);
1115 
1116     /* Ensure that snp node tree is fully initialized before traversing it */
1117     if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1118         sdp_mynode = NULL;
1119     else
1120         sdp_mynode = sdp->mynode;
1121 
1122     if (needgp)
1123         srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1124     else if (needexp)
1125         srcu_funnel_exp_start(ssp, sdp_mynode, s);
1126     srcu_read_unlock(ssp, idx);
1127     return s;
1128 }
1129 
1130 /*
1131  * Enqueue an SRCU callback on the srcu_data structure associated with
1132  * the current CPU and the specified srcu_struct structure, initiating
1133  * grace-period processing if it is not already running.
1134  *
1135  * Note that all CPUs must agree that the grace period extended beyond
1136  * all pre-existing SRCU read-side critical section.  On systems with
1137  * more than one CPU, this means that when "func()" is invoked, each CPU
1138  * is guaranteed to have executed a full memory barrier since the end of
1139  * its last corresponding SRCU read-side critical section whose beginning
1140  * preceded the call to call_srcu().  It also means that each CPU executing
1141  * an SRCU read-side critical section that continues beyond the start of
1142  * "func()" must have executed a memory barrier after the call_srcu()
1143  * but before the beginning of that SRCU read-side critical section.
1144  * Note that these guarantees include CPUs that are offline, idle, or
1145  * executing in user mode, as well as CPUs that are executing in the kernel.
1146  *
1147  * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1148  * resulting SRCU callback function "func()", then both CPU A and CPU
1149  * B are guaranteed to execute a full memory barrier during the time
1150  * interval between the call to call_srcu() and the invocation of "func()".
1151  * This guarantee applies even if CPU A and CPU B are the same CPU (but
1152  * again only if the system has more than one CPU).
1153  *
1154  * Of course, these guarantees apply only for invocations of call_srcu(),
1155  * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1156  * srcu_struct structure.
1157  */
1158 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1159             rcu_callback_t func, bool do_norm)
1160 {
1161     if (debug_rcu_head_queue(rhp)) {
1162         /* Probable double call_srcu(), so leak the callback. */
1163         WRITE_ONCE(rhp->func, srcu_leak_callback);
1164         WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1165         return;
1166     }
1167     rhp->func = func;
1168     (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1169 }
1170 
1171 /**
1172  * call_srcu() - Queue a callback for invocation after an SRCU grace period
1173  * @ssp: srcu_struct in queue the callback
1174  * @rhp: structure to be used for queueing the SRCU callback.
1175  * @func: function to be invoked after the SRCU grace period
1176  *
1177  * The callback function will be invoked some time after a full SRCU
1178  * grace period elapses, in other words after all pre-existing SRCU
1179  * read-side critical sections have completed.  However, the callback
1180  * function might well execute concurrently with other SRCU read-side
1181  * critical sections that started after call_srcu() was invoked.  SRCU
1182  * read-side critical sections are delimited by srcu_read_lock() and
1183  * srcu_read_unlock(), and may be nested.
1184  *
1185  * The callback will be invoked from process context, but must nevertheless
1186  * be fast and must not block.
1187  */
1188 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1189            rcu_callback_t func)
1190 {
1191     __call_srcu(ssp, rhp, func, true);
1192 }
1193 EXPORT_SYMBOL_GPL(call_srcu);
1194 
1195 /*
1196  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1197  */
1198 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1199 {
1200     struct rcu_synchronize rcu;
1201 
1202     RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1203              lock_is_held(&rcu_bh_lock_map) ||
1204              lock_is_held(&rcu_lock_map) ||
1205              lock_is_held(&rcu_sched_lock_map),
1206              "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1207 
1208     if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1209         return;
1210     might_sleep();
1211     check_init_srcu_struct(ssp);
1212     init_completion(&rcu.completion);
1213     init_rcu_head_on_stack(&rcu.head);
1214     __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1215     wait_for_completion(&rcu.completion);
1216     destroy_rcu_head_on_stack(&rcu.head);
1217 
1218     /*
1219      * Make sure that later code is ordered after the SRCU grace
1220      * period.  This pairs with the spin_lock_irq_rcu_node()
1221      * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
1222      * because the current CPU might have been totally uninvolved with
1223      * (and thus unordered against) that grace period.
1224      */
1225     smp_mb();
1226 }
1227 
1228 /**
1229  * synchronize_srcu_expedited - Brute-force SRCU grace period
1230  * @ssp: srcu_struct with which to synchronize.
1231  *
1232  * Wait for an SRCU grace period to elapse, but be more aggressive about
1233  * spinning rather than blocking when waiting.
1234  *
1235  * Note that synchronize_srcu_expedited() has the same deadlock and
1236  * memory-ordering properties as does synchronize_srcu().
1237  */
1238 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1239 {
1240     __synchronize_srcu(ssp, rcu_gp_is_normal());
1241 }
1242 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1243 
1244 /**
1245  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1246  * @ssp: srcu_struct with which to synchronize.
1247  *
1248  * Wait for the count to drain to zero of both indexes. To avoid the
1249  * possible starvation of synchronize_srcu(), it waits for the count of
1250  * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1251  * and then flip the srcu_idx and wait for the count of the other index.
1252  *
1253  * Can block; must be called from process context.
1254  *
1255  * Note that it is illegal to call synchronize_srcu() from the corresponding
1256  * SRCU read-side critical section; doing so will result in deadlock.
1257  * However, it is perfectly legal to call synchronize_srcu() on one
1258  * srcu_struct from some other srcu_struct's read-side critical section,
1259  * as long as the resulting graph of srcu_structs is acyclic.
1260  *
1261  * There are memory-ordering constraints implied by synchronize_srcu().
1262  * On systems with more than one CPU, when synchronize_srcu() returns,
1263  * each CPU is guaranteed to have executed a full memory barrier since
1264  * the end of its last corresponding SRCU read-side critical section
1265  * whose beginning preceded the call to synchronize_srcu().  In addition,
1266  * each CPU having an SRCU read-side critical section that extends beyond
1267  * the return from synchronize_srcu() is guaranteed to have executed a
1268  * full memory barrier after the beginning of synchronize_srcu() and before
1269  * the beginning of that SRCU read-side critical section.  Note that these
1270  * guarantees include CPUs that are offline, idle, or executing in user mode,
1271  * as well as CPUs that are executing in the kernel.
1272  *
1273  * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1274  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1275  * to have executed a full memory barrier during the execution of
1276  * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
1277  * are the same CPU, but again only if the system has more than one CPU.
1278  *
1279  * Of course, these memory-ordering guarantees apply only when
1280  * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1281  * passed the same srcu_struct structure.
1282  *
1283  * Implementation of these memory-ordering guarantees is similar to
1284  * that of synchronize_rcu().
1285  *
1286  * If SRCU is likely idle, expedite the first request.  This semantic
1287  * was provided by Classic SRCU, and is relied upon by its users, so TREE
1288  * SRCU must also provide it.  Note that detecting idleness is heuristic
1289  * and subject to both false positives and negatives.
1290  */
1291 void synchronize_srcu(struct srcu_struct *ssp)
1292 {
1293     if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1294         synchronize_srcu_expedited(ssp);
1295     else
1296         __synchronize_srcu(ssp, true);
1297 }
1298 EXPORT_SYMBOL_GPL(synchronize_srcu);
1299 
1300 /**
1301  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1302  * @ssp: srcu_struct to provide cookie for.
1303  *
1304  * This function returns a cookie that can be passed to
1305  * poll_state_synchronize_srcu(), which will return true if a full grace
1306  * period has elapsed in the meantime.  It is the caller's responsibility
1307  * to make sure that grace period happens, for example, by invoking
1308  * call_srcu() after return from get_state_synchronize_srcu().
1309  */
1310 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1311 {
1312     // Any prior manipulation of SRCU-protected data must happen
1313     // before the load from ->srcu_gp_seq.
1314     smp_mb();
1315     return rcu_seq_snap(&ssp->srcu_gp_seq);
1316 }
1317 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1318 
1319 /**
1320  * start_poll_synchronize_srcu - Provide cookie and start grace period
1321  * @ssp: srcu_struct to provide cookie for.
1322  *
1323  * This function returns a cookie that can be passed to
1324  * poll_state_synchronize_srcu(), which will return true if a full grace
1325  * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
1326  * this function also ensures that any needed SRCU grace period will be
1327  * started.  This convenience does come at a cost in terms of CPU overhead.
1328  */
1329 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1330 {
1331     return srcu_gp_start_if_needed(ssp, NULL, true);
1332 }
1333 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1334 
1335 /**
1336  * poll_state_synchronize_srcu - Has cookie's grace period ended?
1337  * @ssp: srcu_struct to provide cookie for.
1338  * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1339  *
1340  * This function takes the cookie that was returned from either
1341  * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1342  * returns @true if an SRCU grace period elapsed since the time that the
1343  * cookie was created.
1344  *
1345  * Because cookies are finite in size, wrapping/overflow is possible.
1346  * This is more pronounced on 32-bit systems where cookies are 32 bits,
1347  * where in theory wrapping could happen in about 14 hours assuming
1348  * 25-microsecond expedited SRCU grace periods.  However, a more likely
1349  * overflow lower bound is on the order of 24 days in the case of
1350  * one-millisecond SRCU grace periods.  Of course, wrapping in a 64-bit
1351  * system requires geologic timespans, as in more than seven million years
1352  * even for expedited SRCU grace periods.
1353  *
1354  * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1355  * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU.  This uses
1356  * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1357  * few minutes.  If this proves to be a problem, this counter will be
1358  * expanded to the same size as for Tree SRCU.
1359  */
1360 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1361 {
1362     if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1363         return false;
1364     // Ensure that the end of the SRCU grace period happens before
1365     // any subsequent code that the caller might execute.
1366     smp_mb(); // ^^^
1367     return true;
1368 }
1369 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1370 
1371 /*
1372  * Callback function for srcu_barrier() use.
1373  */
1374 static void srcu_barrier_cb(struct rcu_head *rhp)
1375 {
1376     struct srcu_data *sdp;
1377     struct srcu_struct *ssp;
1378 
1379     sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1380     ssp = sdp->ssp;
1381     if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1382         complete(&ssp->srcu_barrier_completion);
1383 }
1384 
1385 /*
1386  * Enqueue an srcu_barrier() callback on the specified srcu_data
1387  * structure's ->cblist.  but only if that ->cblist already has at least one
1388  * callback enqueued.  Note that if a CPU already has callbacks enqueue,
1389  * it must have already registered the need for a future grace period,
1390  * so all we need do is enqueue a callback that will use the same grace
1391  * period as the last callback already in the queue.
1392  */
1393 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1394 {
1395     spin_lock_irq_rcu_node(sdp);
1396     atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1397     sdp->srcu_barrier_head.func = srcu_barrier_cb;
1398     debug_rcu_head_queue(&sdp->srcu_barrier_head);
1399     if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1400                    &sdp->srcu_barrier_head)) {
1401         debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1402         atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1403     }
1404     spin_unlock_irq_rcu_node(sdp);
1405 }
1406 
1407 /**
1408  * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1409  * @ssp: srcu_struct on which to wait for in-flight callbacks.
1410  */
1411 void srcu_barrier(struct srcu_struct *ssp)
1412 {
1413     int cpu;
1414     int idx;
1415     unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1416 
1417     check_init_srcu_struct(ssp);
1418     mutex_lock(&ssp->srcu_barrier_mutex);
1419     if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1420         smp_mb(); /* Force ordering following return. */
1421         mutex_unlock(&ssp->srcu_barrier_mutex);
1422         return; /* Someone else did our work for us. */
1423     }
1424     rcu_seq_start(&ssp->srcu_barrier_seq);
1425     init_completion(&ssp->srcu_barrier_completion);
1426 
1427     /* Initial count prevents reaching zero until all CBs are posted. */
1428     atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1429 
1430     idx = srcu_read_lock(ssp);
1431     if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1432         srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
1433     else
1434         for_each_possible_cpu(cpu)
1435             srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1436     srcu_read_unlock(ssp, idx);
1437 
1438     /* Remove the initial count, at which point reaching zero can happen. */
1439     if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1440         complete(&ssp->srcu_barrier_completion);
1441     wait_for_completion(&ssp->srcu_barrier_completion);
1442 
1443     rcu_seq_end(&ssp->srcu_barrier_seq);
1444     mutex_unlock(&ssp->srcu_barrier_mutex);
1445 }
1446 EXPORT_SYMBOL_GPL(srcu_barrier);
1447 
1448 /**
1449  * srcu_batches_completed - return batches completed.
1450  * @ssp: srcu_struct on which to report batch completion.
1451  *
1452  * Report the number of batches, correlated with, but not necessarily
1453  * precisely the same as, the number of grace periods that have elapsed.
1454  */
1455 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1456 {
1457     return READ_ONCE(ssp->srcu_idx);
1458 }
1459 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1460 
1461 /*
1462  * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1463  * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1464  * completed in that state.
1465  */
1466 static void srcu_advance_state(struct srcu_struct *ssp)
1467 {
1468     int idx;
1469 
1470     mutex_lock(&ssp->srcu_gp_mutex);
1471 
1472     /*
1473      * Because readers might be delayed for an extended period after
1474      * fetching ->srcu_idx for their index, at any point in time there
1475      * might well be readers using both idx=0 and idx=1.  We therefore
1476      * need to wait for readers to clear from both index values before
1477      * invoking a callback.
1478      *
1479      * The load-acquire ensures that we see the accesses performed
1480      * by the prior grace period.
1481      */
1482     idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1483     if (idx == SRCU_STATE_IDLE) {
1484         spin_lock_irq_rcu_node(ssp);
1485         if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1486             WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1487             spin_unlock_irq_rcu_node(ssp);
1488             mutex_unlock(&ssp->srcu_gp_mutex);
1489             return;
1490         }
1491         idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1492         if (idx == SRCU_STATE_IDLE)
1493             srcu_gp_start(ssp);
1494         spin_unlock_irq_rcu_node(ssp);
1495         if (idx != SRCU_STATE_IDLE) {
1496             mutex_unlock(&ssp->srcu_gp_mutex);
1497             return; /* Someone else started the grace period. */
1498         }
1499     }
1500 
1501     if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1502         idx = 1 ^ (ssp->srcu_idx & 1);
1503         if (!try_check_zero(ssp, idx, 1)) {
1504             mutex_unlock(&ssp->srcu_gp_mutex);
1505             return; /* readers present, retry later. */
1506         }
1507         srcu_flip(ssp);
1508         spin_lock_irq_rcu_node(ssp);
1509         rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1510         ssp->srcu_n_exp_nodelay = 0;
1511         spin_unlock_irq_rcu_node(ssp);
1512     }
1513 
1514     if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1515 
1516         /*
1517          * SRCU read-side critical sections are normally short,
1518          * so check at least twice in quick succession after a flip.
1519          */
1520         idx = 1 ^ (ssp->srcu_idx & 1);
1521         if (!try_check_zero(ssp, idx, 2)) {
1522             mutex_unlock(&ssp->srcu_gp_mutex);
1523             return; /* readers present, retry later. */
1524         }
1525         ssp->srcu_n_exp_nodelay = 0;
1526         srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1527     }
1528 }
1529 
1530 /*
1531  * Invoke a limited number of SRCU callbacks that have passed through
1532  * their grace period.  If there are more to do, SRCU will reschedule
1533  * the workqueue.  Note that needed memory barriers have been executed
1534  * in this task's context by srcu_readers_active_idx_check().
1535  */
1536 static void srcu_invoke_callbacks(struct work_struct *work)
1537 {
1538     long len;
1539     bool more;
1540     struct rcu_cblist ready_cbs;
1541     struct rcu_head *rhp;
1542     struct srcu_data *sdp;
1543     struct srcu_struct *ssp;
1544 
1545     sdp = container_of(work, struct srcu_data, work);
1546 
1547     ssp = sdp->ssp;
1548     rcu_cblist_init(&ready_cbs);
1549     spin_lock_irq_rcu_node(sdp);
1550     rcu_segcblist_advance(&sdp->srcu_cblist,
1551                   rcu_seq_current(&ssp->srcu_gp_seq));
1552     if (sdp->srcu_cblist_invoking ||
1553         !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1554         spin_unlock_irq_rcu_node(sdp);
1555         return;  /* Someone else on the job or nothing to do. */
1556     }
1557 
1558     /* We are on the job!  Extract and invoke ready callbacks. */
1559     sdp->srcu_cblist_invoking = true;
1560     rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1561     len = ready_cbs.len;
1562     spin_unlock_irq_rcu_node(sdp);
1563     rhp = rcu_cblist_dequeue(&ready_cbs);
1564     for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1565         debug_rcu_head_unqueue(rhp);
1566         local_bh_disable();
1567         rhp->func(rhp);
1568         local_bh_enable();
1569     }
1570     WARN_ON_ONCE(ready_cbs.len);
1571 
1572     /*
1573      * Update counts, accelerate new callbacks, and if needed,
1574      * schedule another round of callback invocation.
1575      */
1576     spin_lock_irq_rcu_node(sdp);
1577     rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1578     (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1579                        rcu_seq_snap(&ssp->srcu_gp_seq));
1580     sdp->srcu_cblist_invoking = false;
1581     more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1582     spin_unlock_irq_rcu_node(sdp);
1583     if (more)
1584         srcu_schedule_cbs_sdp(sdp, 0);
1585 }
1586 
1587 /*
1588  * Finished one round of SRCU grace period.  Start another if there are
1589  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1590  */
1591 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1592 {
1593     bool pushgp = true;
1594 
1595     spin_lock_irq_rcu_node(ssp);
1596     if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1597         if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1598             /* All requests fulfilled, time to go idle. */
1599             pushgp = false;
1600         }
1601     } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1602         /* Outstanding request and no GP.  Start one. */
1603         srcu_gp_start(ssp);
1604     }
1605     spin_unlock_irq_rcu_node(ssp);
1606 
1607     if (pushgp)
1608         queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1609 }
1610 
1611 /*
1612  * This is the work-queue function that handles SRCU grace periods.
1613  */
1614 static void process_srcu(struct work_struct *work)
1615 {
1616     unsigned long curdelay;
1617     unsigned long j;
1618     struct srcu_struct *ssp;
1619 
1620     ssp = container_of(work, struct srcu_struct, work.work);
1621 
1622     srcu_advance_state(ssp);
1623     curdelay = srcu_get_delay(ssp);
1624     if (curdelay) {
1625         WRITE_ONCE(ssp->reschedule_count, 0);
1626     } else {
1627         j = jiffies;
1628         if (READ_ONCE(ssp->reschedule_jiffies) == j) {
1629             WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
1630             if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
1631                 curdelay = 1;
1632         } else {
1633             WRITE_ONCE(ssp->reschedule_count, 1);
1634             WRITE_ONCE(ssp->reschedule_jiffies, j);
1635         }
1636     }
1637     srcu_reschedule(ssp, curdelay);
1638 }
1639 
1640 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1641                  struct srcu_struct *ssp, int *flags,
1642                  unsigned long *gp_seq)
1643 {
1644     if (test_type != SRCU_FLAVOR)
1645         return;
1646     *flags = 0;
1647     *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1648 }
1649 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1650 
1651 static const char * const srcu_size_state_name[] = {
1652     "SRCU_SIZE_SMALL",
1653     "SRCU_SIZE_ALLOC",
1654     "SRCU_SIZE_WAIT_BARRIER",
1655     "SRCU_SIZE_WAIT_CALL",
1656     "SRCU_SIZE_WAIT_CBS1",
1657     "SRCU_SIZE_WAIT_CBS2",
1658     "SRCU_SIZE_WAIT_CBS3",
1659     "SRCU_SIZE_WAIT_CBS4",
1660     "SRCU_SIZE_BIG",
1661     "SRCU_SIZE_???",
1662 };
1663 
1664 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1665 {
1666     int cpu;
1667     int idx;
1668     unsigned long s0 = 0, s1 = 0;
1669     int ss_state = READ_ONCE(ssp->srcu_size_state);
1670     int ss_state_idx = ss_state;
1671 
1672     idx = ssp->srcu_idx & 0x1;
1673     if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1674         ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1675     pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1676          tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
1677          srcu_size_state_name[ss_state_idx]);
1678     if (!ssp->sda) {
1679         // Called after cleanup_srcu_struct(), perhaps.
1680         pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1681     } else {
1682         pr_cont(" per-CPU(idx=%d):", idx);
1683         for_each_possible_cpu(cpu) {
1684             unsigned long l0, l1;
1685             unsigned long u0, u1;
1686             long c0, c1;
1687             struct srcu_data *sdp;
1688 
1689             sdp = per_cpu_ptr(ssp->sda, cpu);
1690             u0 = data_race(sdp->srcu_unlock_count[!idx]);
1691             u1 = data_race(sdp->srcu_unlock_count[idx]);
1692 
1693             /*
1694              * Make sure that a lock is always counted if the corresponding
1695              * unlock is counted.
1696              */
1697             smp_rmb();
1698 
1699             l0 = data_race(sdp->srcu_lock_count[!idx]);
1700             l1 = data_race(sdp->srcu_lock_count[idx]);
1701 
1702             c0 = l0 - u0;
1703             c1 = l1 - u1;
1704             pr_cont(" %d(%ld,%ld %c)",
1705                 cpu, c0, c1,
1706                 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1707             s0 += c0;
1708             s1 += c1;
1709         }
1710         pr_cont(" T(%ld,%ld)\n", s0, s1);
1711     }
1712     if (SRCU_SIZING_IS_TORTURE())
1713         srcu_transition_to_big(ssp);
1714 }
1715 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1716 
1717 static int __init srcu_bootup_announce(void)
1718 {
1719     pr_info("Hierarchical SRCU implementation.\n");
1720     if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1721         pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1722     if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1723         pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1724     if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1725         pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1726     pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1727     return 0;
1728 }
1729 early_initcall(srcu_bootup_announce);
1730 
1731 void __init srcu_init(void)
1732 {
1733     struct srcu_struct *ssp;
1734 
1735     /* Decide on srcu_struct-size strategy. */
1736     if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1737         if (nr_cpu_ids >= big_cpu_lim) {
1738             convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1739             pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1740         } else {
1741             convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1742             pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1743         }
1744     }
1745 
1746     /*
1747      * Once that is set, call_srcu() can follow the normal path and
1748      * queue delayed work. This must follow RCU workqueues creation
1749      * and timers initialization.
1750      */
1751     srcu_init_done = true;
1752     while (!list_empty(&srcu_boot_list)) {
1753         ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1754                       work.work.entry);
1755         list_del_init(&ssp->work.work.entry);
1756         if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
1757             ssp->srcu_size_state = SRCU_SIZE_ALLOC;
1758         queue_work(rcu_gp_wq, &ssp->work.work);
1759     }
1760 }
1761 
1762 #ifdef CONFIG_MODULES
1763 
1764 /* Initialize any global-scope srcu_struct structures used by this module. */
1765 static int srcu_module_coming(struct module *mod)
1766 {
1767     int i;
1768     struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1769     int ret;
1770 
1771     for (i = 0; i < mod->num_srcu_structs; i++) {
1772         ret = init_srcu_struct(*(sspp++));
1773         if (WARN_ON_ONCE(ret))
1774             return ret;
1775     }
1776     return 0;
1777 }
1778 
1779 /* Clean up any global-scope srcu_struct structures used by this module. */
1780 static void srcu_module_going(struct module *mod)
1781 {
1782     int i;
1783     struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1784 
1785     for (i = 0; i < mod->num_srcu_structs; i++)
1786         cleanup_srcu_struct(*(sspp++));
1787 }
1788 
1789 /* Handle one module, either coming or going. */
1790 static int srcu_module_notify(struct notifier_block *self,
1791                   unsigned long val, void *data)
1792 {
1793     struct module *mod = data;
1794     int ret = 0;
1795 
1796     switch (val) {
1797     case MODULE_STATE_COMING:
1798         ret = srcu_module_coming(mod);
1799         break;
1800     case MODULE_STATE_GOING:
1801         srcu_module_going(mod);
1802         break;
1803     default:
1804         break;
1805     }
1806     return ret;
1807 }
1808 
1809 static struct notifier_block srcu_module_nb = {
1810     .notifier_call = srcu_module_notify,
1811     .priority = 0,
1812 };
1813 
1814 static __init int init_srcu_module_notifier(void)
1815 {
1816     int ret;
1817 
1818     ret = register_module_notifier(&srcu_module_nb);
1819     if (ret)
1820         pr_warn("Failed to register srcu module notifier\n");
1821     return ret;
1822 }
1823 late_initcall(init_srcu_module_notifier);
1824 
1825 #endif /* #ifdef CONFIG_MODULES */