0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/cache.h>
0013 #include <linux/kthread.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/rtmutex.h>
0016 #include <linux/threads.h>
0017 #include <linux/cpumask.h>
0018 #include <linux/seqlock.h>
0019 #include <linux/swait.h>
0020 #include <linux/rcu_node_tree.h>
0021
0022 #include "rcu_segcblist.h"
0023
0024
0025 struct rcu_exp_work {
0026 unsigned long rew_s;
0027 #ifdef CONFIG_RCU_EXP_KTHREAD
0028 struct kthread_work rew_work;
0029 #else
0030 struct work_struct rew_work;
0031 #endif
0032 };
0033
0034
0035 #define RCU_KTHREAD_STOPPED 0
0036 #define RCU_KTHREAD_RUNNING 1
0037 #define RCU_KTHREAD_WAITING 2
0038 #define RCU_KTHREAD_OFFCPU 3
0039 #define RCU_KTHREAD_YIELDING 4
0040 #define RCU_KTHREAD_MAX 4
0041
0042
0043
0044
0045 struct rcu_node {
0046 raw_spinlock_t __private lock;
0047
0048
0049 unsigned long gp_seq;
0050 unsigned long gp_seq_needed;
0051 unsigned long completedqs;
0052 unsigned long qsmask;
0053
0054
0055
0056
0057
0058 unsigned long rcu_gp_init_mask;
0059 unsigned long qsmaskinit;
0060
0061
0062
0063 unsigned long qsmaskinitnext;
0064 unsigned long expmask;
0065
0066
0067 unsigned long expmaskinit;
0068
0069
0070
0071 unsigned long expmaskinitnext;
0072
0073
0074
0075 unsigned long cbovldmask;
0076
0077 unsigned long ffmask;
0078 unsigned long grpmask;
0079
0080 int grplo;
0081 int grphi;
0082 u8 grpnum;
0083 u8 level;
0084 bool wait_blkd_tasks;
0085
0086
0087
0088 struct rcu_node *parent;
0089 struct list_head blkd_tasks;
0090
0091
0092
0093 struct list_head *gp_tasks;
0094
0095
0096
0097 struct list_head *exp_tasks;
0098
0099
0100
0101
0102
0103 struct list_head *boost_tasks;
0104
0105
0106
0107
0108
0109
0110
0111 struct rt_mutex boost_mtx;
0112
0113
0114 unsigned long boost_time;
0115
0116 struct mutex boost_kthread_mutex;
0117
0118
0119 struct task_struct *boost_kthread_task;
0120
0121
0122 unsigned int boost_kthread_status;
0123
0124 unsigned long n_boosts;
0125 #ifdef CONFIG_RCU_NOCB_CPU
0126 struct swait_queue_head nocb_gp_wq[2];
0127
0128 #endif
0129 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
0130
0131 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
0132 unsigned long exp_seq_rq;
0133 wait_queue_head_t exp_wq[4];
0134 struct rcu_exp_work rew;
0135 bool exp_need_flush;
0136 raw_spinlock_t exp_poll_lock;
0137
0138 unsigned long exp_seq_poll_rq;
0139 struct work_struct exp_poll_wq;
0140 } ____cacheline_internodealigned_in_smp;
0141
0142
0143
0144
0145
0146
0147 #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
0148
0149
0150
0151
0152
0153 union rcu_noqs {
0154 struct {
0155 u8 norm;
0156 u8 exp;
0157 } b;
0158 u16 s;
0159 };
0160
0161
0162 struct rcu_data {
0163
0164 unsigned long gp_seq;
0165 unsigned long gp_seq_needed;
0166 union rcu_noqs cpu_no_qs;
0167 bool core_needs_qs;
0168 bool beenonline;
0169 bool gpwrap;
0170 bool cpu_started;
0171 struct rcu_node *mynode;
0172 unsigned long grpmask;
0173 unsigned long ticks_this_gp;
0174
0175
0176
0177 struct irq_work defer_qs_iw;
0178 bool defer_qs_iw_pending;
0179 struct work_struct strict_work;
0180
0181
0182 struct rcu_segcblist cblist;
0183
0184
0185 long qlen_last_fqs_check;
0186
0187 unsigned long n_cbs_invoked;
0188 unsigned long n_force_qs_snap;
0189
0190 long blimit;
0191
0192
0193 int dynticks_snap;
0194 bool rcu_need_heavy_qs;
0195 bool rcu_urgent_qs;
0196 bool rcu_forced_tick;
0197 bool rcu_forced_tick_exp;
0198
0199
0200 unsigned long barrier_seq_snap;
0201 struct rcu_head barrier_head;
0202 int exp_dynticks_snap;
0203
0204
0205 #ifdef CONFIG_RCU_NOCB_CPU
0206 struct swait_queue_head nocb_cb_wq;
0207 struct swait_queue_head nocb_state_wq;
0208 struct task_struct *nocb_gp_kthread;
0209 raw_spinlock_t nocb_lock;
0210 atomic_t nocb_lock_contended;
0211 int nocb_defer_wakeup;
0212 struct timer_list nocb_timer;
0213 unsigned long nocb_gp_adv_time;
0214 struct mutex nocb_gp_kthread_mutex;
0215
0216
0217
0218 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
0219 struct rcu_cblist nocb_bypass;
0220 unsigned long nocb_bypass_first;
0221 unsigned long nocb_nobypass_last;
0222 int nocb_nobypass_count;
0223
0224
0225 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
0226 u8 nocb_gp_sleep;
0227 u8 nocb_gp_bypass;
0228 u8 nocb_gp_gp;
0229 unsigned long nocb_gp_seq;
0230 unsigned long nocb_gp_loops;
0231 struct swait_queue_head nocb_gp_wq;
0232 bool nocb_cb_sleep;
0233 struct task_struct *nocb_cb_kthread;
0234 struct list_head nocb_head_rdp;
0235
0236
0237
0238 struct list_head nocb_entry_rdp;
0239 struct rcu_data *nocb_toggling_rdp;
0240
0241
0242 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
0243
0244 #endif
0245
0246
0247 struct task_struct *rcu_cpu_kthread_task;
0248
0249 unsigned int rcu_cpu_kthread_status;
0250 char rcu_cpu_has_work;
0251 unsigned long rcuc_activity;
0252
0253
0254 unsigned int softirq_snap;
0255
0256 struct irq_work rcu_iw;
0257 bool rcu_iw_pending;
0258 unsigned long rcu_iw_gp_seq;
0259 unsigned long rcu_ofl_gp_seq;
0260 short rcu_ofl_gp_flags;
0261 unsigned long rcu_onl_gp_seq;
0262 short rcu_onl_gp_flags;
0263 unsigned long last_fqs_resched;
0264 unsigned long last_sched_clock;
0265
0266 int cpu;
0267 };
0268
0269
0270 #define RCU_NOCB_WAKE_NOT 0
0271 #define RCU_NOCB_WAKE_BYPASS 1
0272 #define RCU_NOCB_WAKE 2
0273 #define RCU_NOCB_WAKE_FORCE 3
0274
0275 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
0276
0277
0278
0279 #define RCU_JIFFIES_FQS_DIV 256
0280
0281
0282
0283 #define RCU_STALL_RAT_DELAY 2
0284
0285
0286
0287 #define rcu_wait(cond) \
0288 do { \
0289 for (;;) { \
0290 set_current_state(TASK_INTERRUPTIBLE); \
0291 if (cond) \
0292 break; \
0293 schedule(); \
0294 } \
0295 __set_current_state(TASK_RUNNING); \
0296 } while (0)
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308 struct rcu_state {
0309 struct rcu_node node[NUM_RCU_NODES];
0310 struct rcu_node *level[RCU_NUM_LVLS + 1];
0311
0312
0313 int ncpus;
0314 int n_online_cpus;
0315
0316
0317
0318 unsigned long gp_seq ____cacheline_internodealigned_in_smp;
0319
0320 unsigned long gp_max;
0321
0322 struct task_struct *gp_kthread;
0323 struct swait_queue_head gp_wq;
0324 short gp_flags;
0325 short gp_state;
0326 unsigned long gp_wake_time;
0327 unsigned long gp_wake_seq;
0328 unsigned long gp_seq_polled;
0329 unsigned long gp_seq_polled_snap;
0330 unsigned long gp_seq_polled_exp_snap;
0331
0332
0333
0334 struct mutex barrier_mutex;
0335 atomic_t barrier_cpu_count;
0336 struct completion barrier_completion;
0337 unsigned long barrier_sequence;
0338
0339
0340
0341 raw_spinlock_t barrier_lock;
0342
0343 struct mutex exp_mutex;
0344 struct mutex exp_wake_mutex;
0345 unsigned long expedited_sequence;
0346 atomic_t expedited_need_qs;
0347 struct swait_queue_head expedited_wq;
0348 int ncpus_snap;
0349 u8 cbovld;
0350 u8 cbovldnext;
0351
0352 unsigned long jiffies_force_qs;
0353
0354 unsigned long jiffies_kick_kthreads;
0355
0356 unsigned long n_force_qs;
0357
0358 unsigned long gp_start;
0359
0360 unsigned long gp_end;
0361
0362 unsigned long gp_activity;
0363
0364 unsigned long gp_req_activity;
0365
0366 unsigned long jiffies_stall;
0367
0368 unsigned long jiffies_resched;
0369
0370 unsigned long n_force_qs_gpstart;
0371
0372 const char *name;
0373 char abbr;
0374
0375 arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
0376
0377
0378 int nocb_is_setup;
0379 };
0380
0381
0382 #define RCU_GP_FLAG_INIT 0x1
0383 #define RCU_GP_FLAG_FQS 0x2
0384 #define RCU_GP_FLAG_OVLD 0x4
0385
0386
0387 #define RCU_GP_IDLE 0
0388 #define RCU_GP_WAIT_GPS 1
0389 #define RCU_GP_DONE_GPS 2
0390 #define RCU_GP_ONOFF 3
0391 #define RCU_GP_INIT 4
0392 #define RCU_GP_WAIT_FQS 5
0393 #define RCU_GP_DOING_FQS 6
0394 #define RCU_GP_CLEANUP 7
0395 #define RCU_GP_CLEANED 8
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 #ifdef CONFIG_PREEMPT_RCU
0406 #define RCU_ABBR 'p'
0407 #define RCU_NAME_RAW "rcu_preempt"
0408 #else
0409 #define RCU_ABBR 's'
0410 #define RCU_NAME_RAW "rcu_sched"
0411 #endif
0412 #ifndef CONFIG_TRACING
0413 #define RCU_NAME RCU_NAME_RAW
0414 #else
0415 static char rcu_name[] = RCU_NAME_RAW;
0416 static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
0417 #define RCU_NAME rcu_name
0418 #endif
0419
0420
0421 static void rcu_bootup_announce(void);
0422 static void rcu_qs(void);
0423 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
0424 #ifdef CONFIG_HOTPLUG_CPU
0425 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
0426 #endif
0427 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
0428 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
0429 static void rcu_flavor_sched_clock_irq(int user);
0430 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
0431 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
0432 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
0433 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
0434 static void rcu_cpu_kthread_setup(unsigned int cpu);
0435 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
0436 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
0437 static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
0438 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
0439 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
0440 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
0441 static void rcu_init_one_nocb(struct rcu_node *rnp);
0442 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
0443 unsigned long j);
0444 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
0445 bool *was_alldone, unsigned long flags);
0446 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
0447 unsigned long flags);
0448 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
0449 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
0450 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
0451 static void rcu_spawn_cpu_nocb_kthread(int cpu);
0452 static void show_rcu_nocb_state(struct rcu_data *rdp);
0453 static void rcu_nocb_lock(struct rcu_data *rdp);
0454 static void rcu_nocb_unlock(struct rcu_data *rdp);
0455 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
0456 unsigned long flags);
0457 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
0458 #ifdef CONFIG_RCU_NOCB_CPU
0459 static void __init rcu_organize_nocb_kthreads(void);
0460
0461
0462
0463
0464
0465 #define rcu_nocb_lock_irqsave(rdp, flags) \
0466 do { \
0467 local_irq_save(flags); \
0468 if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
0469 raw_spin_lock(&(rdp)->nocb_lock); \
0470 } while (0)
0471 #else
0472 #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
0473 #endif
0474
0475 static void rcu_bind_gp_kthread(void);
0476 static bool rcu_nohz_full_cpu(void);
0477
0478
0479 static void record_gp_stall_check_time(void);
0480 static void rcu_iw_handler(struct irq_work *iwp);
0481 static void check_cpu_stall(struct rcu_data *rdp);
0482 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
0483 const unsigned long gpssdelay);
0484
0485
0486 static void sync_rcu_do_polled_gp(struct work_struct *wp);