Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0+ */
0002 /*
0003  * Read-Copy Update definitions shared among RCU implementations.
0004  *
0005  * Copyright IBM Corporation, 2011
0006  *
0007  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
0008  */
0009 
0010 #ifndef __LINUX_RCU_H
0011 #define __LINUX_RCU_H
0012 
0013 #include <trace/events/rcu.h>
0014 
0015 /*
0016  * Grace-period counter management.
0017  */
0018 
0019 #define RCU_SEQ_CTR_SHIFT   2
0020 #define RCU_SEQ_STATE_MASK  ((1 << RCU_SEQ_CTR_SHIFT) - 1)
0021 
0022 /* Low-order bit definition for polled grace-period APIs. */
0023 #define RCU_GET_STATE_COMPLETED 0x1
0024 
0025 extern int sysctl_sched_rt_runtime;
0026 
0027 /*
0028  * Return the counter portion of a sequence number previously returned
0029  * by rcu_seq_snap() or rcu_seq_current().
0030  */
0031 static inline unsigned long rcu_seq_ctr(unsigned long s)
0032 {
0033     return s >> RCU_SEQ_CTR_SHIFT;
0034 }
0035 
0036 /*
0037  * Return the state portion of a sequence number previously returned
0038  * by rcu_seq_snap() or rcu_seq_current().
0039  */
0040 static inline int rcu_seq_state(unsigned long s)
0041 {
0042     return s & RCU_SEQ_STATE_MASK;
0043 }
0044 
0045 /*
0046  * Set the state portion of the pointed-to sequence number.
0047  * The caller is responsible for preventing conflicting updates.
0048  */
0049 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
0050 {
0051     WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
0052     WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
0053 }
0054 
0055 /* Adjust sequence number for start of update-side operation. */
0056 static inline void rcu_seq_start(unsigned long *sp)
0057 {
0058     WRITE_ONCE(*sp, *sp + 1);
0059     smp_mb(); /* Ensure update-side operation after counter increment. */
0060     WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
0061 }
0062 
0063 /* Compute the end-of-grace-period value for the specified sequence number. */
0064 static inline unsigned long rcu_seq_endval(unsigned long *sp)
0065 {
0066     return (*sp | RCU_SEQ_STATE_MASK) + 1;
0067 }
0068 
0069 /* Adjust sequence number for end of update-side operation. */
0070 static inline void rcu_seq_end(unsigned long *sp)
0071 {
0072     smp_mb(); /* Ensure update-side operation before counter increment. */
0073     WARN_ON_ONCE(!rcu_seq_state(*sp));
0074     WRITE_ONCE(*sp, rcu_seq_endval(sp));
0075 }
0076 
0077 /*
0078  * rcu_seq_snap - Take a snapshot of the update side's sequence number.
0079  *
0080  * This function returns the earliest value of the grace-period sequence number
0081  * that will indicate that a full grace period has elapsed since the current
0082  * time.  Once the grace-period sequence number has reached this value, it will
0083  * be safe to invoke all callbacks that have been registered prior to the
0084  * current time. This value is the current grace-period number plus two to the
0085  * power of the number of low-order bits reserved for state, then rounded up to
0086  * the next value in which the state bits are all zero.
0087  */
0088 static inline unsigned long rcu_seq_snap(unsigned long *sp)
0089 {
0090     unsigned long s;
0091 
0092     s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
0093     smp_mb(); /* Above access must not bleed into critical section. */
0094     return s;
0095 }
0096 
0097 /* Return the current value the update side's sequence number, no ordering. */
0098 static inline unsigned long rcu_seq_current(unsigned long *sp)
0099 {
0100     return READ_ONCE(*sp);
0101 }
0102 
0103 /*
0104  * Given a snapshot from rcu_seq_snap(), determine whether or not the
0105  * corresponding update-side operation has started.
0106  */
0107 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
0108 {
0109     return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
0110 }
0111 
0112 /*
0113  * Given a snapshot from rcu_seq_snap(), determine whether or not a
0114  * full update-side operation has occurred.
0115  */
0116 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
0117 {
0118     return ULONG_CMP_GE(READ_ONCE(*sp), s);
0119 }
0120 
0121 /*
0122  * Given a snapshot from rcu_seq_snap(), determine whether or not a
0123  * full update-side operation has occurred, but do not allow the
0124  * (ULONG_MAX / 2) safety-factor/guard-band.
0125  */
0126 static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
0127 {
0128     unsigned long cur_s = READ_ONCE(*sp);
0129 
0130     return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
0131 }
0132 
0133 /*
0134  * Has a grace period completed since the time the old gp_seq was collected?
0135  */
0136 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
0137 {
0138     return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
0139 }
0140 
0141 /*
0142  * Has a grace period started since the time the old gp_seq was collected?
0143  */
0144 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
0145 {
0146     return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
0147                 new);
0148 }
0149 
0150 /*
0151  * Roughly how many full grace periods have elapsed between the collection
0152  * of the two specified grace periods?
0153  */
0154 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
0155 {
0156     unsigned long rnd_diff;
0157 
0158     if (old == new)
0159         return 0;
0160     /*
0161      * Compute the number of grace periods (still shifted up), plus
0162      * one if either of new and old is not an exact grace period.
0163      */
0164     rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
0165            ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
0166            ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
0167     if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
0168         return 1; /* Definitely no grace period has elapsed. */
0169     return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
0170 }
0171 
0172 /*
0173  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
0174  * by call_rcu() and rcu callback execution, and are therefore not part
0175  * of the RCU API. These are in rcupdate.h because they are used by all
0176  * RCU implementations.
0177  */
0178 
0179 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
0180 # define STATE_RCU_HEAD_READY   0
0181 # define STATE_RCU_HEAD_QUEUED  1
0182 
0183 extern const struct debug_obj_descr rcuhead_debug_descr;
0184 
0185 static inline int debug_rcu_head_queue(struct rcu_head *head)
0186 {
0187     int r1;
0188 
0189     r1 = debug_object_activate(head, &rcuhead_debug_descr);
0190     debug_object_active_state(head, &rcuhead_debug_descr,
0191                   STATE_RCU_HEAD_READY,
0192                   STATE_RCU_HEAD_QUEUED);
0193     return r1;
0194 }
0195 
0196 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
0197 {
0198     debug_object_active_state(head, &rcuhead_debug_descr,
0199                   STATE_RCU_HEAD_QUEUED,
0200                   STATE_RCU_HEAD_READY);
0201     debug_object_deactivate(head, &rcuhead_debug_descr);
0202 }
0203 #else   /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
0204 static inline int debug_rcu_head_queue(struct rcu_head *head)
0205 {
0206     return 0;
0207 }
0208 
0209 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
0210 {
0211 }
0212 #endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
0213 
0214 extern int rcu_cpu_stall_suppress_at_boot;
0215 
0216 static inline bool rcu_stall_is_suppressed_at_boot(void)
0217 {
0218     return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
0219 }
0220 
0221 #ifdef CONFIG_RCU_STALL_COMMON
0222 
0223 extern int rcu_cpu_stall_ftrace_dump;
0224 extern int rcu_cpu_stall_suppress;
0225 extern int rcu_cpu_stall_timeout;
0226 extern int rcu_exp_cpu_stall_timeout;
0227 int rcu_jiffies_till_stall_check(void);
0228 int rcu_exp_jiffies_till_stall_check(void);
0229 
0230 static inline bool rcu_stall_is_suppressed(void)
0231 {
0232     return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
0233 }
0234 
0235 #define rcu_ftrace_dump_stall_suppress() \
0236 do { \
0237     if (!rcu_cpu_stall_suppress) \
0238         rcu_cpu_stall_suppress = 3; \
0239 } while (0)
0240 
0241 #define rcu_ftrace_dump_stall_unsuppress() \
0242 do { \
0243     if (rcu_cpu_stall_suppress == 3) \
0244         rcu_cpu_stall_suppress = 0; \
0245 } while (0)
0246 
0247 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
0248 
0249 static inline bool rcu_stall_is_suppressed(void)
0250 {
0251     return rcu_stall_is_suppressed_at_boot();
0252 }
0253 #define rcu_ftrace_dump_stall_suppress()
0254 #define rcu_ftrace_dump_stall_unsuppress()
0255 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
0256 
0257 /*
0258  * Strings used in tracepoints need to be exported via the
0259  * tracing system such that tools like perf and trace-cmd can
0260  * translate the string address pointers to actual text.
0261  */
0262 #define TPS(x)  tracepoint_string(x)
0263 
0264 /*
0265  * Dump the ftrace buffer, but only one time per callsite per boot.
0266  */
0267 #define rcu_ftrace_dump(oops_dump_mode) \
0268 do { \
0269     static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
0270     \
0271     if (!atomic_read(&___rfd_beenhere) && \
0272         !atomic_xchg(&___rfd_beenhere, 1)) { \
0273         tracing_off(); \
0274         rcu_ftrace_dump_stall_suppress(); \
0275         ftrace_dump(oops_dump_mode); \
0276         rcu_ftrace_dump_stall_unsuppress(); \
0277     } \
0278 } while (0)
0279 
0280 void rcu_early_boot_tests(void);
0281 void rcu_test_sync_prims(void);
0282 
0283 /*
0284  * This function really isn't for public consumption, but RCU is special in
0285  * that context switches can allow the state machine to make progress.
0286  */
0287 extern void resched_cpu(int cpu);
0288 
0289 #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU)
0290 
0291 #include <linux/rcu_node_tree.h>
0292 
0293 extern int rcu_num_lvls;
0294 extern int num_rcu_lvl[];
0295 extern int rcu_num_nodes;
0296 static bool rcu_fanout_exact;
0297 static int rcu_fanout_leaf;
0298 
0299 /*
0300  * Compute the per-level fanout, either using the exact fanout specified
0301  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
0302  */
0303 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
0304 {
0305     int i;
0306 
0307     for (i = 0; i < RCU_NUM_LVLS; i++)
0308         levelspread[i] = INT_MIN;
0309     if (rcu_fanout_exact) {
0310         levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
0311         for (i = rcu_num_lvls - 2; i >= 0; i--)
0312             levelspread[i] = RCU_FANOUT;
0313     } else {
0314         int ccur;
0315         int cprv;
0316 
0317         cprv = nr_cpu_ids;
0318         for (i = rcu_num_lvls - 1; i >= 0; i--) {
0319             ccur = levelcnt[i];
0320             levelspread[i] = (cprv + ccur - 1) / ccur;
0321             cprv = ccur;
0322         }
0323     }
0324 }
0325 
0326 extern void rcu_init_geometry(void);
0327 
0328 /* Returns a pointer to the first leaf rcu_node structure. */
0329 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
0330 
0331 /* Is this rcu_node a leaf? */
0332 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
0333 
0334 /* Is this rcu_node the last leaf? */
0335 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
0336 
0337 /*
0338  * Do a full breadth-first scan of the {s,}rcu_node structures for the
0339  * specified state structure (for SRCU) or the only rcu_state structure
0340  * (for RCU).
0341  */
0342 #define srcu_for_each_node_breadth_first(sp, rnp) \
0343     for ((rnp) = &(sp)->node[0]; \
0344          (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
0345 #define rcu_for_each_node_breadth_first(rnp) \
0346     srcu_for_each_node_breadth_first(&rcu_state, rnp)
0347 
0348 /*
0349  * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
0350  * Note that if there is a singleton rcu_node tree with but one rcu_node
0351  * structure, this loop -will- visit the rcu_node structure.  It is still
0352  * a leaf node, even if it is also the root node.
0353  */
0354 #define rcu_for_each_leaf_node(rnp) \
0355     for ((rnp) = rcu_first_leaf_node(); \
0356          (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
0357 
0358 /*
0359  * Iterate over all possible CPUs in a leaf RCU node.
0360  */
0361 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
0362     for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
0363          (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
0364          (cpu) <= rnp->grphi; \
0365          (cpu) = cpumask_next((cpu), cpu_possible_mask))
0366 
0367 /*
0368  * Iterate over all CPUs in a leaf RCU node's specified mask.
0369  */
0370 #define rcu_find_next_bit(rnp, cpu, mask) \
0371     ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
0372 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
0373     for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
0374          (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
0375          (cpu) <= rnp->grphi; \
0376          (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
0377 
0378 /*
0379  * Wrappers for the rcu_node::lock acquire and release.
0380  *
0381  * Because the rcu_nodes form a tree, the tree traversal locking will observe
0382  * different lock values, this in turn means that an UNLOCK of one level
0383  * followed by a LOCK of another level does not imply a full memory barrier;
0384  * and most importantly transitivity is lost.
0385  *
0386  * In order to restore full ordering between tree levels, augment the regular
0387  * lock acquire functions with smp_mb__after_unlock_lock().
0388  *
0389  * As ->lock of struct rcu_node is a __private field, therefore one should use
0390  * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
0391  */
0392 #define raw_spin_lock_rcu_node(p)                   \
0393 do {                                    \
0394     raw_spin_lock(&ACCESS_PRIVATE(p, lock));            \
0395     smp_mb__after_unlock_lock();                    \
0396 } while (0)
0397 
0398 #define raw_spin_unlock_rcu_node(p)                 \
0399 do {                                    \
0400     lockdep_assert_irqs_disabled();                 \
0401     raw_spin_unlock(&ACCESS_PRIVATE(p, lock));          \
0402 } while (0)
0403 
0404 #define raw_spin_lock_irq_rcu_node(p)                   \
0405 do {                                    \
0406     raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));            \
0407     smp_mb__after_unlock_lock();                    \
0408 } while (0)
0409 
0410 #define raw_spin_unlock_irq_rcu_node(p)                 \
0411 do {                                    \
0412     lockdep_assert_irqs_disabled();                 \
0413     raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock));          \
0414 } while (0)
0415 
0416 #define raw_spin_lock_irqsave_rcu_node(p, flags)            \
0417 do {                                    \
0418     raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
0419     smp_mb__after_unlock_lock();                    \
0420 } while (0)
0421 
0422 #define raw_spin_unlock_irqrestore_rcu_node(p, flags)           \
0423 do {                                    \
0424     lockdep_assert_irqs_disabled();                 \
0425     raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags);    \
0426 } while (0)
0427 
0428 #define raw_spin_trylock_rcu_node(p)                    \
0429 ({                                  \
0430     bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));    \
0431                                     \
0432     if (___locked)                          \
0433         smp_mb__after_unlock_lock();                \
0434     ___locked;                          \
0435 })
0436 
0437 #define raw_lockdep_assert_held_rcu_node(p)             \
0438     lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
0439 
0440 #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
0441 
0442 #ifdef CONFIG_TINY_RCU
0443 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
0444 static inline bool rcu_gp_is_normal(void) { return true; }
0445 static inline bool rcu_gp_is_expedited(void) { return false; }
0446 static inline void rcu_expedite_gp(void) { }
0447 static inline void rcu_unexpedite_gp(void) { }
0448 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
0449 #else /* #ifdef CONFIG_TINY_RCU */
0450 bool rcu_gp_is_normal(void);     /* Internal RCU use. */
0451 bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
0452 void rcu_expedite_gp(void);
0453 void rcu_unexpedite_gp(void);
0454 void rcupdate_announce_bootup_oddness(void);
0455 #ifdef CONFIG_TASKS_RCU_GENERIC
0456 void show_rcu_tasks_gp_kthreads(void);
0457 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
0458 static inline void show_rcu_tasks_gp_kthreads(void) {}
0459 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
0460 void rcu_request_urgent_qs_task(struct task_struct *t);
0461 #endif /* #else #ifdef CONFIG_TINY_RCU */
0462 
0463 #define RCU_SCHEDULER_INACTIVE  0
0464 #define RCU_SCHEDULER_INIT  1
0465 #define RCU_SCHEDULER_RUNNING   2
0466 
0467 enum rcutorture_type {
0468     RCU_FLAVOR,
0469     RCU_TASKS_FLAVOR,
0470     RCU_TASKS_RUDE_FLAVOR,
0471     RCU_TASKS_TRACING_FLAVOR,
0472     RCU_TRIVIAL_FLAVOR,
0473     SRCU_FLAVOR,
0474     INVALID_RCU_FLAVOR
0475 };
0476 
0477 #if defined(CONFIG_TREE_RCU)
0478 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
0479                 unsigned long *gp_seq);
0480 void do_trace_rcu_torture_read(const char *rcutorturename,
0481                    struct rcu_head *rhp,
0482                    unsigned long secs,
0483                    unsigned long c_old,
0484                    unsigned long c);
0485 void rcu_gp_set_torture_wait(int duration);
0486 #else
0487 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
0488                       int *flags, unsigned long *gp_seq)
0489 {
0490     *flags = 0;
0491     *gp_seq = 0;
0492 }
0493 #ifdef CONFIG_RCU_TRACE
0494 void do_trace_rcu_torture_read(const char *rcutorturename,
0495                    struct rcu_head *rhp,
0496                    unsigned long secs,
0497                    unsigned long c_old,
0498                    unsigned long c);
0499 #else
0500 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
0501     do { } while (0)
0502 #endif
0503 static inline void rcu_gp_set_torture_wait(int duration) { }
0504 #endif
0505 
0506 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
0507 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
0508 #endif
0509 
0510 #ifdef CONFIG_TINY_SRCU
0511 
0512 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
0513                        struct srcu_struct *sp, int *flags,
0514                        unsigned long *gp_seq)
0515 {
0516     if (test_type != SRCU_FLAVOR)
0517         return;
0518     *flags = 0;
0519     *gp_seq = sp->srcu_idx;
0520 }
0521 
0522 #elif defined(CONFIG_TREE_SRCU)
0523 
0524 void srcutorture_get_gp_data(enum rcutorture_type test_type,
0525                  struct srcu_struct *sp, int *flags,
0526                  unsigned long *gp_seq);
0527 
0528 #endif
0529 
0530 #ifdef CONFIG_TINY_RCU
0531 static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
0532 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
0533 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
0534 static inline unsigned long
0535 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
0536 static inline void rcu_force_quiescent_state(void) { }
0537 static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
0538 static inline void show_rcu_gp_kthreads(void) { }
0539 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
0540 static inline void rcu_fwd_progress_check(unsigned long j) { }
0541 static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
0542 static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
0543 #else /* #ifdef CONFIG_TINY_RCU */
0544 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
0545 unsigned long rcu_get_gp_seq(void);
0546 unsigned long rcu_exp_batches_completed(void);
0547 unsigned long srcu_batches_completed(struct srcu_struct *sp);
0548 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
0549 void show_rcu_gp_kthreads(void);
0550 int rcu_get_gp_kthreads_prio(void);
0551 void rcu_fwd_progress_check(unsigned long j);
0552 void rcu_force_quiescent_state(void);
0553 extern struct workqueue_struct *rcu_gp_wq;
0554 #ifdef CONFIG_RCU_EXP_KTHREAD
0555 extern struct kthread_worker *rcu_exp_gp_kworker;
0556 extern struct kthread_worker *rcu_exp_par_gp_kworker;
0557 #else /* !CONFIG_RCU_EXP_KTHREAD */
0558 extern struct workqueue_struct *rcu_par_gp_wq;
0559 #endif /* CONFIG_RCU_EXP_KTHREAD */
0560 void rcu_gp_slow_register(atomic_t *rgssp);
0561 void rcu_gp_slow_unregister(atomic_t *rgssp);
0562 #endif /* #else #ifdef CONFIG_TINY_RCU */
0563 
0564 #ifdef CONFIG_RCU_NOCB_CPU
0565 void rcu_bind_current_to_nocb(void);
0566 #else
0567 static inline void rcu_bind_current_to_nocb(void) { }
0568 #endif
0569 
0570 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
0571 void show_rcu_tasks_classic_gp_kthread(void);
0572 #else
0573 static inline void show_rcu_tasks_classic_gp_kthread(void) {}
0574 #endif
0575 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
0576 void show_rcu_tasks_rude_gp_kthread(void);
0577 #else
0578 static inline void show_rcu_tasks_rude_gp_kthread(void) {}
0579 #endif
0580 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
0581 void show_rcu_tasks_trace_gp_kthread(void);
0582 #else
0583 static inline void show_rcu_tasks_trace_gp_kthread(void) {}
0584 #endif
0585 
0586 #endif /* __LINUX_RCU_H */