0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #define pr_fmt(fmt) "rcu: " fmt
0017
0018 #include <linux/export.h>
0019 #include <linux/mutex.h>
0020 #include <linux/percpu.h>
0021 #include <linux/preempt.h>
0022 #include <linux/rcupdate_wait.h>
0023 #include <linux/sched.h>
0024 #include <linux/smp.h>
0025 #include <linux/delay.h>
0026 #include <linux/module.h>
0027 #include <linux/slab.h>
0028 #include <linux/srcu.h>
0029
0030 #include "rcu.h"
0031 #include "rcu_segcblist.h"
0032
0033
0034 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
0035 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
0036 module_param(exp_holdoff, ulong, 0444);
0037
0038
0039 static ulong counter_wrap_check = (ULONG_MAX >> 2);
0040 module_param(counter_wrap_check, ulong, 0444);
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 #define SRCU_SIZING_NONE 0
0051 #define SRCU_SIZING_INIT 1
0052 #define SRCU_SIZING_TORTURE 2
0053 #define SRCU_SIZING_AUTO 3
0054 #define SRCU_SIZING_CONTEND 0x10
0055 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
0056 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
0057 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
0058 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
0059 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
0060 static int convert_to_big = SRCU_SIZING_AUTO;
0061 module_param(convert_to_big, int, 0444);
0062
0063
0064 static int big_cpu_lim __read_mostly = 128;
0065 module_param(big_cpu_lim, int, 0444);
0066
0067
0068 static int small_contention_lim __read_mostly = 100;
0069 module_param(small_contention_lim, int, 0444);
0070
0071
0072 static LIST_HEAD(srcu_boot_list);
0073 static bool __read_mostly srcu_init_done;
0074
0075 static void srcu_invoke_callbacks(struct work_struct *work);
0076 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
0077 static void process_srcu(struct work_struct *work);
0078 static void srcu_delay_timer(struct timer_list *t);
0079
0080
0081 #define spin_lock_rcu_node(p) \
0082 do { \
0083 spin_lock(&ACCESS_PRIVATE(p, lock)); \
0084 smp_mb__after_unlock_lock(); \
0085 } while (0)
0086
0087 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
0088
0089 #define spin_lock_irq_rcu_node(p) \
0090 do { \
0091 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
0092 smp_mb__after_unlock_lock(); \
0093 } while (0)
0094
0095 #define spin_unlock_irq_rcu_node(p) \
0096 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
0097
0098 #define spin_lock_irqsave_rcu_node(p, flags) \
0099 do { \
0100 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
0101 smp_mb__after_unlock_lock(); \
0102 } while (0)
0103
0104 #define spin_trylock_irqsave_rcu_node(p, flags) \
0105 ({ \
0106 bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
0107 \
0108 if (___locked) \
0109 smp_mb__after_unlock_lock(); \
0110 ___locked; \
0111 })
0112
0113 #define spin_unlock_irqrestore_rcu_node(p, flags) \
0114 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
0115
0116
0117
0118
0119
0120
0121
0122 static void init_srcu_struct_data(struct srcu_struct *ssp)
0123 {
0124 int cpu;
0125 struct srcu_data *sdp;
0126
0127
0128
0129
0130
0131 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
0132 ARRAY_SIZE(sdp->srcu_unlock_count));
0133 for_each_possible_cpu(cpu) {
0134 sdp = per_cpu_ptr(ssp->sda, cpu);
0135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
0136 rcu_segcblist_init(&sdp->srcu_cblist);
0137 sdp->srcu_cblist_invoking = false;
0138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
0139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
0140 sdp->mynode = NULL;
0141 sdp->cpu = cpu;
0142 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
0143 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
0144 sdp->ssp = ssp;
0145 }
0146 }
0147
0148
0149 #define SRCU_SNP_INIT_SEQ 0x2
0150
0151
0152
0153
0154
0155 static inline bool srcu_invl_snp_seq(unsigned long s)
0156 {
0157 return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
0158 }
0159
0160
0161
0162
0163
0164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
0165 {
0166 int cpu;
0167 int i;
0168 int level = 0;
0169 int levelspread[RCU_NUM_LVLS];
0170 struct srcu_data *sdp;
0171 struct srcu_node *snp;
0172 struct srcu_node *snp_first;
0173
0174
0175 rcu_init_geometry();
0176 ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
0177 if (!ssp->node)
0178 return false;
0179
0180
0181 ssp->level[0] = &ssp->node[0];
0182 for (i = 1; i < rcu_num_lvls; i++)
0183 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
0184 rcu_init_levelspread(levelspread, num_rcu_lvl);
0185
0186
0187 srcu_for_each_node_breadth_first(ssp, snp) {
0188 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
0189 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
0190 ARRAY_SIZE(snp->srcu_data_have_cbs));
0191 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
0192 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
0193 snp->srcu_data_have_cbs[i] = 0;
0194 }
0195 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
0196 snp->grplo = -1;
0197 snp->grphi = -1;
0198 if (snp == &ssp->node[0]) {
0199
0200 snp->srcu_parent = NULL;
0201 continue;
0202 }
0203
0204
0205 if (snp == ssp->level[level + 1])
0206 level++;
0207 snp->srcu_parent = ssp->level[level - 1] +
0208 (snp - ssp->level[level]) /
0209 levelspread[level - 1];
0210 }
0211
0212
0213
0214
0215
0216 level = rcu_num_lvls - 1;
0217 snp_first = ssp->level[level];
0218 for_each_possible_cpu(cpu) {
0219 sdp = per_cpu_ptr(ssp->sda, cpu);
0220 sdp->mynode = &snp_first[cpu / levelspread[level]];
0221 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
0222 if (snp->grplo < 0)
0223 snp->grplo = cpu;
0224 snp->grphi = cpu;
0225 }
0226 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
0227 }
0228 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
0229 return true;
0230 }
0231
0232
0233
0234
0235
0236
0237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
0238 {
0239 ssp->srcu_size_state = SRCU_SIZE_SMALL;
0240 ssp->node = NULL;
0241 mutex_init(&ssp->srcu_cb_mutex);
0242 mutex_init(&ssp->srcu_gp_mutex);
0243 ssp->srcu_idx = 0;
0244 ssp->srcu_gp_seq = 0;
0245 ssp->srcu_barrier_seq = 0;
0246 mutex_init(&ssp->srcu_barrier_mutex);
0247 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
0248 INIT_DELAYED_WORK(&ssp->work, process_srcu);
0249 ssp->sda_is_static = is_static;
0250 if (!is_static)
0251 ssp->sda = alloc_percpu(struct srcu_data);
0252 if (!ssp->sda)
0253 return -ENOMEM;
0254 init_srcu_struct_data(ssp);
0255 ssp->srcu_gp_seq_needed_exp = 0;
0256 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
0257 if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
0258 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
0259 if (!ssp->sda_is_static) {
0260 free_percpu(ssp->sda);
0261 ssp->sda = NULL;
0262 return -ENOMEM;
0263 }
0264 } else {
0265 WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
0266 }
0267 }
0268 smp_store_release(&ssp->srcu_gp_seq_needed, 0);
0269 return 0;
0270 }
0271
0272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0273
0274 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
0275 struct lock_class_key *key)
0276 {
0277
0278 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
0279 lockdep_init_map(&ssp->dep_map, name, key, 0);
0280 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
0281 return init_srcu_struct_fields(ssp, false);
0282 }
0283 EXPORT_SYMBOL_GPL(__init_srcu_struct);
0284
0285 #else
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 int init_srcu_struct(struct srcu_struct *ssp)
0296 {
0297 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
0298 return init_srcu_struct_fields(ssp, false);
0299 }
0300 EXPORT_SYMBOL_GPL(init_srcu_struct);
0301
0302 #endif
0303
0304
0305
0306
0307 static void __srcu_transition_to_big(struct srcu_struct *ssp)
0308 {
0309 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
0310 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
0311 }
0312
0313
0314
0315
0316 static void srcu_transition_to_big(struct srcu_struct *ssp)
0317 {
0318 unsigned long flags;
0319
0320
0321 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
0322 return;
0323 spin_lock_irqsave_rcu_node(ssp, flags);
0324 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
0325 spin_unlock_irqrestore_rcu_node(ssp, flags);
0326 return;
0327 }
0328 __srcu_transition_to_big(ssp);
0329 spin_unlock_irqrestore_rcu_node(ssp, flags);
0330 }
0331
0332
0333
0334
0335
0336 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
0337 {
0338 unsigned long j;
0339
0340 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
0341 return;
0342 j = jiffies;
0343 if (ssp->srcu_size_jiffies != j) {
0344 ssp->srcu_size_jiffies = j;
0345 ssp->srcu_n_lock_retries = 0;
0346 }
0347 if (++ssp->srcu_n_lock_retries <= small_contention_lim)
0348 return;
0349 __srcu_transition_to_big(ssp);
0350 }
0351
0352
0353
0354
0355
0356
0357
0358 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
0359 {
0360 struct srcu_struct *ssp = sdp->ssp;
0361
0362 if (spin_trylock_irqsave_rcu_node(sdp, *flags))
0363 return;
0364 spin_lock_irqsave_rcu_node(ssp, *flags);
0365 spin_lock_irqsave_check_contention(ssp);
0366 spin_unlock_irqrestore_rcu_node(ssp, *flags);
0367 spin_lock_irqsave_rcu_node(sdp, *flags);
0368 }
0369
0370
0371
0372
0373
0374
0375
0376 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
0377 {
0378 if (spin_trylock_irqsave_rcu_node(ssp, *flags))
0379 return;
0380 spin_lock_irqsave_rcu_node(ssp, *flags);
0381 spin_lock_irqsave_check_contention(ssp);
0382 }
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 static void check_init_srcu_struct(struct srcu_struct *ssp)
0393 {
0394 unsigned long flags;
0395
0396
0397 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed)))
0398 return;
0399 spin_lock_irqsave_rcu_node(ssp, flags);
0400 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
0401 spin_unlock_irqrestore_rcu_node(ssp, flags);
0402 return;
0403 }
0404 init_srcu_struct_fields(ssp, true);
0405 spin_unlock_irqrestore_rcu_node(ssp, flags);
0406 }
0407
0408
0409
0410
0411
0412 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
0413 {
0414 int cpu;
0415 unsigned long sum = 0;
0416
0417 for_each_possible_cpu(cpu) {
0418 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
0419
0420 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
0421 }
0422 return sum;
0423 }
0424
0425
0426
0427
0428
0429 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
0430 {
0431 int cpu;
0432 unsigned long sum = 0;
0433
0434 for_each_possible_cpu(cpu) {
0435 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
0436
0437 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
0438 }
0439 return sum;
0440 }
0441
0442
0443
0444
0445
0446 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
0447 {
0448 unsigned long unlocks;
0449
0450 unlocks = srcu_readers_unlock_idx(ssp, idx);
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 smp_mb();
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486 return srcu_readers_lock_idx(ssp, idx) == unlocks;
0487 }
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498 static bool srcu_readers_active(struct srcu_struct *ssp)
0499 {
0500 int cpu;
0501 unsigned long sum = 0;
0502
0503 for_each_possible_cpu(cpu) {
0504 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
0505
0506 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
0507 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
0508 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
0509 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
0510 }
0511 return sum;
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 #define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
0524
0525 static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
0526 module_param(srcu_retry_check_delay, ulong, 0444);
0527
0528 #define SRCU_INTERVAL 1
0529 #define SRCU_MAX_INTERVAL 10
0530
0531 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL
0532
0533 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL
0534
0535
0536 #define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
0537 #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
0538 #define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
0539
0540
0541
0542 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
0543 (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
0544
0545
0546 #define SRCU_DEFAULT_MAX_NODELAY_PHASE \
0547 SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
0548 SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
0549 SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
0550
0551 static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
0552 module_param(srcu_max_nodelay_phase, ulong, 0444);
0553
0554
0555 #define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
0556 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
0557
0558 static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
0559 module_param(srcu_max_nodelay, ulong, 0444);
0560
0561
0562
0563
0564
0565 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
0566 {
0567 unsigned long gpstart;
0568 unsigned long j;
0569 unsigned long jbase = SRCU_INTERVAL;
0570
0571 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
0572 jbase = 0;
0573 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
0574 j = jiffies - 1;
0575 gpstart = READ_ONCE(ssp->srcu_gp_start);
0576 if (time_after(j, gpstart))
0577 jbase += j - gpstart;
0578 if (!jbase) {
0579 WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
0580 if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
0581 jbase = 1;
0582 }
0583 }
0584 return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594 void cleanup_srcu_struct(struct srcu_struct *ssp)
0595 {
0596 int cpu;
0597
0598 if (WARN_ON(!srcu_get_delay(ssp)))
0599 return;
0600 if (WARN_ON(srcu_readers_active(ssp)))
0601 return;
0602 flush_delayed_work(&ssp->work);
0603 for_each_possible_cpu(cpu) {
0604 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
0605
0606 del_timer_sync(&sdp->delay_work);
0607 flush_work(&sdp->work);
0608 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
0609 return;
0610 }
0611 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
0612 WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
0613 WARN_ON(srcu_readers_active(ssp))) {
0614 pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
0615 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
0616 rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
0617 return;
0618 }
0619 if (!ssp->sda_is_static) {
0620 free_percpu(ssp->sda);
0621 ssp->sda = NULL;
0622 }
0623 kfree(ssp->node);
0624 ssp->node = NULL;
0625 ssp->srcu_size_state = SRCU_SIZE_SMALL;
0626 }
0627 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
0628
0629
0630
0631
0632
0633
0634 int __srcu_read_lock(struct srcu_struct *ssp)
0635 {
0636 int idx;
0637
0638 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
0639 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
0640 smp_mb();
0641 return idx;
0642 }
0643 EXPORT_SYMBOL_GPL(__srcu_read_lock);
0644
0645
0646
0647
0648
0649
0650 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
0651 {
0652 smp_mb();
0653 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
0654 }
0655 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
0656
0657
0658
0659
0660 static void srcu_gp_start(struct srcu_struct *ssp)
0661 {
0662 struct srcu_data *sdp;
0663 int state;
0664
0665 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
0666 sdp = per_cpu_ptr(ssp->sda, 0);
0667 else
0668 sdp = this_cpu_ptr(ssp->sda);
0669 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
0670 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
0671 spin_lock_rcu_node(sdp);
0672 rcu_segcblist_advance(&sdp->srcu_cblist,
0673 rcu_seq_current(&ssp->srcu_gp_seq));
0674 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
0675 rcu_seq_snap(&ssp->srcu_gp_seq));
0676 spin_unlock_rcu_node(sdp);
0677 WRITE_ONCE(ssp->srcu_gp_start, jiffies);
0678 WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
0679 smp_mb();
0680 rcu_seq_start(&ssp->srcu_gp_seq);
0681 state = rcu_seq_state(ssp->srcu_gp_seq);
0682 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
0683 }
0684
0685
0686 static void srcu_delay_timer(struct timer_list *t)
0687 {
0688 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
0689
0690 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
0691 }
0692
0693 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
0694 unsigned long delay)
0695 {
0696 if (!delay) {
0697 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
0698 return;
0699 }
0700
0701 timer_reduce(&sdp->delay_work, jiffies + delay);
0702 }
0703
0704
0705
0706
0707
0708 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
0709 {
0710 srcu_queue_delayed_work_on(sdp, delay);
0711 }
0712
0713
0714
0715
0716
0717
0718
0719 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
0720 unsigned long mask, unsigned long delay)
0721 {
0722 int cpu;
0723
0724 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
0725 if (!(mask & (1 << (cpu - snp->grplo))))
0726 continue;
0727 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
0728 }
0729 }
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740 static void srcu_gp_end(struct srcu_struct *ssp)
0741 {
0742 unsigned long cbdelay = 1;
0743 bool cbs;
0744 bool last_lvl;
0745 int cpu;
0746 unsigned long flags;
0747 unsigned long gpseq;
0748 int idx;
0749 unsigned long mask;
0750 struct srcu_data *sdp;
0751 unsigned long sgsne;
0752 struct srcu_node *snp;
0753 int ss_state;
0754
0755
0756 mutex_lock(&ssp->srcu_cb_mutex);
0757
0758
0759 spin_lock_irq_rcu_node(ssp);
0760 idx = rcu_seq_state(ssp->srcu_gp_seq);
0761 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
0762 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
0763 cbdelay = 0;
0764
0765 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
0766 rcu_seq_end(&ssp->srcu_gp_seq);
0767 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
0768 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
0769 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
0770 spin_unlock_irq_rcu_node(ssp);
0771 mutex_unlock(&ssp->srcu_gp_mutex);
0772
0773
0774
0775 ss_state = smp_load_acquire(&ssp->srcu_size_state);
0776 if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
0777 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
0778 } else {
0779 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
0780 srcu_for_each_node_breadth_first(ssp, snp) {
0781 spin_lock_irq_rcu_node(snp);
0782 cbs = false;
0783 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
0784 if (last_lvl)
0785 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
0786 snp->srcu_have_cbs[idx] = gpseq;
0787 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
0788 sgsne = snp->srcu_gp_seq_needed_exp;
0789 if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
0790 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
0791 if (ss_state < SRCU_SIZE_BIG)
0792 mask = ~0;
0793 else
0794 mask = snp->srcu_data_have_cbs[idx];
0795 snp->srcu_data_have_cbs[idx] = 0;
0796 spin_unlock_irq_rcu_node(snp);
0797 if (cbs)
0798 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
0799 }
0800 }
0801
0802
0803 if (!(gpseq & counter_wrap_check))
0804 for_each_possible_cpu(cpu) {
0805 sdp = per_cpu_ptr(ssp->sda, cpu);
0806 spin_lock_irqsave_rcu_node(sdp, flags);
0807 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
0808 sdp->srcu_gp_seq_needed = gpseq;
0809 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
0810 sdp->srcu_gp_seq_needed_exp = gpseq;
0811 spin_unlock_irqrestore_rcu_node(sdp, flags);
0812 }
0813
0814
0815 mutex_unlock(&ssp->srcu_cb_mutex);
0816
0817
0818 spin_lock_irq_rcu_node(ssp);
0819 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
0820 if (!rcu_seq_state(gpseq) &&
0821 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
0822 srcu_gp_start(ssp);
0823 spin_unlock_irq_rcu_node(ssp);
0824 srcu_reschedule(ssp, 0);
0825 } else {
0826 spin_unlock_irq_rcu_node(ssp);
0827 }
0828
0829
0830 if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
0831 if (ss_state == SRCU_SIZE_ALLOC)
0832 init_srcu_struct_nodes(ssp, GFP_KERNEL);
0833 else
0834 smp_store_release(&ssp->srcu_size_state, ss_state + 1);
0835 }
0836 }
0837
0838
0839
0840
0841
0842
0843
0844
0845 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
0846 unsigned long s)
0847 {
0848 unsigned long flags;
0849 unsigned long sgsne;
0850
0851 if (snp)
0852 for (; snp != NULL; snp = snp->srcu_parent) {
0853 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
0854 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
0855 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
0856 return;
0857 spin_lock_irqsave_rcu_node(snp, flags);
0858 sgsne = snp->srcu_gp_seq_needed_exp;
0859 if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
0860 spin_unlock_irqrestore_rcu_node(snp, flags);
0861 return;
0862 }
0863 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
0864 spin_unlock_irqrestore_rcu_node(snp, flags);
0865 }
0866 spin_lock_irqsave_ssp_contention(ssp, &flags);
0867 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
0868 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
0869 spin_unlock_irqrestore_rcu_node(ssp, flags);
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
0883 unsigned long s, bool do_norm)
0884 {
0885 unsigned long flags;
0886 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
0887 unsigned long sgsne;
0888 struct srcu_node *snp;
0889 struct srcu_node *snp_leaf;
0890 unsigned long snp_seq;
0891
0892
0893 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
0894 snp_leaf = NULL;
0895 else
0896 snp_leaf = sdp->mynode;
0897
0898 if (snp_leaf)
0899
0900 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
0901 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
0902 return;
0903 spin_lock_irqsave_rcu_node(snp, flags);
0904 snp_seq = snp->srcu_have_cbs[idx];
0905 if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
0906 if (snp == snp_leaf && snp_seq == s)
0907 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
0908 spin_unlock_irqrestore_rcu_node(snp, flags);
0909 if (snp == snp_leaf && snp_seq != s) {
0910 srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
0911 return;
0912 }
0913 if (!do_norm)
0914 srcu_funnel_exp_start(ssp, snp, s);
0915 return;
0916 }
0917 snp->srcu_have_cbs[idx] = s;
0918 if (snp == snp_leaf)
0919 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
0920 sgsne = snp->srcu_gp_seq_needed_exp;
0921 if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
0922 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
0923 spin_unlock_irqrestore_rcu_node(snp, flags);
0924 }
0925
0926
0927 spin_lock_irqsave_ssp_contention(ssp, &flags);
0928 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
0929
0930
0931
0932
0933 smp_store_release(&ssp->srcu_gp_seq_needed, s);
0934 }
0935 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
0936 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
0937
0938
0939 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
0940 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
0941 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
0942 srcu_gp_start(ssp);
0943
0944
0945
0946
0947
0948
0949 if (likely(srcu_init_done))
0950 queue_delayed_work(rcu_gp_wq, &ssp->work,
0951 !!srcu_get_delay(ssp));
0952 else if (list_empty(&ssp->work.work.entry))
0953 list_add(&ssp->work.work.entry, &srcu_boot_list);
0954 }
0955 spin_unlock_irqrestore_rcu_node(ssp, flags);
0956 }
0957
0958
0959
0960
0961
0962
0963 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
0964 {
0965 unsigned long curdelay;
0966
0967 curdelay = !srcu_get_delay(ssp);
0968
0969 for (;;) {
0970 if (srcu_readers_active_idx_check(ssp, idx))
0971 return true;
0972 if ((--trycount + curdelay) <= 0)
0973 return false;
0974 udelay(srcu_retry_check_delay);
0975 }
0976 }
0977
0978
0979
0980
0981
0982
0983 static void srcu_flip(struct srcu_struct *ssp)
0984 {
0985
0986
0987
0988
0989
0990
0991
0992
0993 smp_mb();
0994
0995 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
0996
0997
0998
0999
1000
1001
1002
1003
1004 smp_mb();
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1029 {
1030 unsigned long curseq;
1031 unsigned long flags;
1032 struct srcu_data *sdp;
1033 unsigned long t;
1034 unsigned long tlast;
1035
1036 check_init_srcu_struct(ssp);
1037
1038 sdp = raw_cpu_ptr(ssp->sda);
1039 spin_lock_irqsave_rcu_node(sdp, flags);
1040 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1041 spin_unlock_irqrestore_rcu_node(sdp, flags);
1042 return false;
1043 }
1044 spin_unlock_irqrestore_rcu_node(sdp, flags);
1045
1046
1047
1048
1049
1050
1051
1052
1053 t = ktime_get_mono_fast_ns();
1054 tlast = READ_ONCE(ssp->srcu_last_gp_end);
1055 if (exp_holdoff == 0 ||
1056 time_in_range_open(t, tlast, tlast + exp_holdoff))
1057 return false;
1058
1059
1060 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
1061 smp_mb();
1062 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
1063 return false;
1064 smp_mb();
1065 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
1066 return false;
1067 return true;
1068 }
1069
1070
1071
1072
1073 static void srcu_leak_callback(struct rcu_head *rhp)
1074 {
1075 }
1076
1077
1078
1079
1080 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1081 struct rcu_head *rhp, bool do_norm)
1082 {
1083 unsigned long flags;
1084 int idx;
1085 bool needexp = false;
1086 bool needgp = false;
1087 unsigned long s;
1088 struct srcu_data *sdp;
1089 struct srcu_node *sdp_mynode;
1090 int ss_state;
1091
1092 check_init_srcu_struct(ssp);
1093 idx = srcu_read_lock(ssp);
1094 ss_state = smp_load_acquire(&ssp->srcu_size_state);
1095 if (ss_state < SRCU_SIZE_WAIT_CALL)
1096 sdp = per_cpu_ptr(ssp->sda, 0);
1097 else
1098 sdp = raw_cpu_ptr(ssp->sda);
1099 spin_lock_irqsave_sdp_contention(sdp, &flags);
1100 if (rhp)
1101 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1102 rcu_segcblist_advance(&sdp->srcu_cblist,
1103 rcu_seq_current(&ssp->srcu_gp_seq));
1104 s = rcu_seq_snap(&ssp->srcu_gp_seq);
1105 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
1106 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1107 sdp->srcu_gp_seq_needed = s;
1108 needgp = true;
1109 }
1110 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1111 sdp->srcu_gp_seq_needed_exp = s;
1112 needexp = true;
1113 }
1114 spin_unlock_irqrestore_rcu_node(sdp, flags);
1115
1116
1117 if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1118 sdp_mynode = NULL;
1119 else
1120 sdp_mynode = sdp->mynode;
1121
1122 if (needgp)
1123 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1124 else if (needexp)
1125 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1126 srcu_read_unlock(ssp, idx);
1127 return s;
1128 }
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1159 rcu_callback_t func, bool do_norm)
1160 {
1161 if (debug_rcu_head_queue(rhp)) {
1162
1163 WRITE_ONCE(rhp->func, srcu_leak_callback);
1164 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1165 return;
1166 }
1167 rhp->func = func;
1168 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1189 rcu_callback_t func)
1190 {
1191 __call_srcu(ssp, rhp, func, true);
1192 }
1193 EXPORT_SYMBOL_GPL(call_srcu);
1194
1195
1196
1197
1198 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1199 {
1200 struct rcu_synchronize rcu;
1201
1202 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1203 lock_is_held(&rcu_bh_lock_map) ||
1204 lock_is_held(&rcu_lock_map) ||
1205 lock_is_held(&rcu_sched_lock_map),
1206 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1207
1208 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1209 return;
1210 might_sleep();
1211 check_init_srcu_struct(ssp);
1212 init_completion(&rcu.completion);
1213 init_rcu_head_on_stack(&rcu.head);
1214 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1215 wait_for_completion(&rcu.completion);
1216 destroy_rcu_head_on_stack(&rcu.head);
1217
1218
1219
1220
1221
1222
1223
1224
1225 smp_mb();
1226 }
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1239 {
1240 __synchronize_srcu(ssp, rcu_gp_is_normal());
1241 }
1242 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 void synchronize_srcu(struct srcu_struct *ssp)
1292 {
1293 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1294 synchronize_srcu_expedited(ssp);
1295 else
1296 __synchronize_srcu(ssp, true);
1297 }
1298 EXPORT_SYMBOL_GPL(synchronize_srcu);
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1311 {
1312
1313
1314 smp_mb();
1315 return rcu_seq_snap(&ssp->srcu_gp_seq);
1316 }
1317 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1330 {
1331 return srcu_gp_start_if_needed(ssp, NULL, true);
1332 }
1333 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1361 {
1362 if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1363 return false;
1364
1365
1366 smp_mb();
1367 return true;
1368 }
1369 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1370
1371
1372
1373
1374 static void srcu_barrier_cb(struct rcu_head *rhp)
1375 {
1376 struct srcu_data *sdp;
1377 struct srcu_struct *ssp;
1378
1379 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1380 ssp = sdp->ssp;
1381 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1382 complete(&ssp->srcu_barrier_completion);
1383 }
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1394 {
1395 spin_lock_irq_rcu_node(sdp);
1396 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1397 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1398 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1399 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1400 &sdp->srcu_barrier_head)) {
1401 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1402 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1403 }
1404 spin_unlock_irq_rcu_node(sdp);
1405 }
1406
1407
1408
1409
1410
1411 void srcu_barrier(struct srcu_struct *ssp)
1412 {
1413 int cpu;
1414 int idx;
1415 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1416
1417 check_init_srcu_struct(ssp);
1418 mutex_lock(&ssp->srcu_barrier_mutex);
1419 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1420 smp_mb();
1421 mutex_unlock(&ssp->srcu_barrier_mutex);
1422 return;
1423 }
1424 rcu_seq_start(&ssp->srcu_barrier_seq);
1425 init_completion(&ssp->srcu_barrier_completion);
1426
1427
1428 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1429
1430 idx = srcu_read_lock(ssp);
1431 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1432 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
1433 else
1434 for_each_possible_cpu(cpu)
1435 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1436 srcu_read_unlock(ssp, idx);
1437
1438
1439 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1440 complete(&ssp->srcu_barrier_completion);
1441 wait_for_completion(&ssp->srcu_barrier_completion);
1442
1443 rcu_seq_end(&ssp->srcu_barrier_seq);
1444 mutex_unlock(&ssp->srcu_barrier_mutex);
1445 }
1446 EXPORT_SYMBOL_GPL(srcu_barrier);
1447
1448
1449
1450
1451
1452
1453
1454
1455 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1456 {
1457 return READ_ONCE(ssp->srcu_idx);
1458 }
1459 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1460
1461
1462
1463
1464
1465
1466 static void srcu_advance_state(struct srcu_struct *ssp)
1467 {
1468 int idx;
1469
1470 mutex_lock(&ssp->srcu_gp_mutex);
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq));
1483 if (idx == SRCU_STATE_IDLE) {
1484 spin_lock_irq_rcu_node(ssp);
1485 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1486 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1487 spin_unlock_irq_rcu_node(ssp);
1488 mutex_unlock(&ssp->srcu_gp_mutex);
1489 return;
1490 }
1491 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1492 if (idx == SRCU_STATE_IDLE)
1493 srcu_gp_start(ssp);
1494 spin_unlock_irq_rcu_node(ssp);
1495 if (idx != SRCU_STATE_IDLE) {
1496 mutex_unlock(&ssp->srcu_gp_mutex);
1497 return;
1498 }
1499 }
1500
1501 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1502 idx = 1 ^ (ssp->srcu_idx & 1);
1503 if (!try_check_zero(ssp, idx, 1)) {
1504 mutex_unlock(&ssp->srcu_gp_mutex);
1505 return;
1506 }
1507 srcu_flip(ssp);
1508 spin_lock_irq_rcu_node(ssp);
1509 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1510 ssp->srcu_n_exp_nodelay = 0;
1511 spin_unlock_irq_rcu_node(ssp);
1512 }
1513
1514 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1515
1516
1517
1518
1519
1520 idx = 1 ^ (ssp->srcu_idx & 1);
1521 if (!try_check_zero(ssp, idx, 2)) {
1522 mutex_unlock(&ssp->srcu_gp_mutex);
1523 return;
1524 }
1525 ssp->srcu_n_exp_nodelay = 0;
1526 srcu_gp_end(ssp);
1527 }
1528 }
1529
1530
1531
1532
1533
1534
1535
1536 static void srcu_invoke_callbacks(struct work_struct *work)
1537 {
1538 long len;
1539 bool more;
1540 struct rcu_cblist ready_cbs;
1541 struct rcu_head *rhp;
1542 struct srcu_data *sdp;
1543 struct srcu_struct *ssp;
1544
1545 sdp = container_of(work, struct srcu_data, work);
1546
1547 ssp = sdp->ssp;
1548 rcu_cblist_init(&ready_cbs);
1549 spin_lock_irq_rcu_node(sdp);
1550 rcu_segcblist_advance(&sdp->srcu_cblist,
1551 rcu_seq_current(&ssp->srcu_gp_seq));
1552 if (sdp->srcu_cblist_invoking ||
1553 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1554 spin_unlock_irq_rcu_node(sdp);
1555 return;
1556 }
1557
1558
1559 sdp->srcu_cblist_invoking = true;
1560 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1561 len = ready_cbs.len;
1562 spin_unlock_irq_rcu_node(sdp);
1563 rhp = rcu_cblist_dequeue(&ready_cbs);
1564 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1565 debug_rcu_head_unqueue(rhp);
1566 local_bh_disable();
1567 rhp->func(rhp);
1568 local_bh_enable();
1569 }
1570 WARN_ON_ONCE(ready_cbs.len);
1571
1572
1573
1574
1575
1576 spin_lock_irq_rcu_node(sdp);
1577 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1578 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1579 rcu_seq_snap(&ssp->srcu_gp_seq));
1580 sdp->srcu_cblist_invoking = false;
1581 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1582 spin_unlock_irq_rcu_node(sdp);
1583 if (more)
1584 srcu_schedule_cbs_sdp(sdp, 0);
1585 }
1586
1587
1588
1589
1590
1591 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1592 {
1593 bool pushgp = true;
1594
1595 spin_lock_irq_rcu_node(ssp);
1596 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1597 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1598
1599 pushgp = false;
1600 }
1601 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1602
1603 srcu_gp_start(ssp);
1604 }
1605 spin_unlock_irq_rcu_node(ssp);
1606
1607 if (pushgp)
1608 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1609 }
1610
1611
1612
1613
1614 static void process_srcu(struct work_struct *work)
1615 {
1616 unsigned long curdelay;
1617 unsigned long j;
1618 struct srcu_struct *ssp;
1619
1620 ssp = container_of(work, struct srcu_struct, work.work);
1621
1622 srcu_advance_state(ssp);
1623 curdelay = srcu_get_delay(ssp);
1624 if (curdelay) {
1625 WRITE_ONCE(ssp->reschedule_count, 0);
1626 } else {
1627 j = jiffies;
1628 if (READ_ONCE(ssp->reschedule_jiffies) == j) {
1629 WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
1630 if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
1631 curdelay = 1;
1632 } else {
1633 WRITE_ONCE(ssp->reschedule_count, 1);
1634 WRITE_ONCE(ssp->reschedule_jiffies, j);
1635 }
1636 }
1637 srcu_reschedule(ssp, curdelay);
1638 }
1639
1640 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1641 struct srcu_struct *ssp, int *flags,
1642 unsigned long *gp_seq)
1643 {
1644 if (test_type != SRCU_FLAVOR)
1645 return;
1646 *flags = 0;
1647 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1648 }
1649 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1650
1651 static const char * const srcu_size_state_name[] = {
1652 "SRCU_SIZE_SMALL",
1653 "SRCU_SIZE_ALLOC",
1654 "SRCU_SIZE_WAIT_BARRIER",
1655 "SRCU_SIZE_WAIT_CALL",
1656 "SRCU_SIZE_WAIT_CBS1",
1657 "SRCU_SIZE_WAIT_CBS2",
1658 "SRCU_SIZE_WAIT_CBS3",
1659 "SRCU_SIZE_WAIT_CBS4",
1660 "SRCU_SIZE_BIG",
1661 "SRCU_SIZE_???",
1662 };
1663
1664 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1665 {
1666 int cpu;
1667 int idx;
1668 unsigned long s0 = 0, s1 = 0;
1669 int ss_state = READ_ONCE(ssp->srcu_size_state);
1670 int ss_state_idx = ss_state;
1671
1672 idx = ssp->srcu_idx & 0x1;
1673 if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1674 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1675 pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1676 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
1677 srcu_size_state_name[ss_state_idx]);
1678 if (!ssp->sda) {
1679
1680 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1681 } else {
1682 pr_cont(" per-CPU(idx=%d):", idx);
1683 for_each_possible_cpu(cpu) {
1684 unsigned long l0, l1;
1685 unsigned long u0, u1;
1686 long c0, c1;
1687 struct srcu_data *sdp;
1688
1689 sdp = per_cpu_ptr(ssp->sda, cpu);
1690 u0 = data_race(sdp->srcu_unlock_count[!idx]);
1691 u1 = data_race(sdp->srcu_unlock_count[idx]);
1692
1693
1694
1695
1696
1697 smp_rmb();
1698
1699 l0 = data_race(sdp->srcu_lock_count[!idx]);
1700 l1 = data_race(sdp->srcu_lock_count[idx]);
1701
1702 c0 = l0 - u0;
1703 c1 = l1 - u1;
1704 pr_cont(" %d(%ld,%ld %c)",
1705 cpu, c0, c1,
1706 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1707 s0 += c0;
1708 s1 += c1;
1709 }
1710 pr_cont(" T(%ld,%ld)\n", s0, s1);
1711 }
1712 if (SRCU_SIZING_IS_TORTURE())
1713 srcu_transition_to_big(ssp);
1714 }
1715 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1716
1717 static int __init srcu_bootup_announce(void)
1718 {
1719 pr_info("Hierarchical SRCU implementation.\n");
1720 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1721 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1722 if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1723 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1724 if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1725 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1726 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1727 return 0;
1728 }
1729 early_initcall(srcu_bootup_announce);
1730
1731 void __init srcu_init(void)
1732 {
1733 struct srcu_struct *ssp;
1734
1735
1736 if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1737 if (nr_cpu_ids >= big_cpu_lim) {
1738 convert_to_big = SRCU_SIZING_INIT;
1739 pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1740 } else {
1741 convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1742 pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1743 }
1744 }
1745
1746
1747
1748
1749
1750
1751 srcu_init_done = true;
1752 while (!list_empty(&srcu_boot_list)) {
1753 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1754 work.work.entry);
1755 list_del_init(&ssp->work.work.entry);
1756 if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
1757 ssp->srcu_size_state = SRCU_SIZE_ALLOC;
1758 queue_work(rcu_gp_wq, &ssp->work.work);
1759 }
1760 }
1761
1762 #ifdef CONFIG_MODULES
1763
1764
1765 static int srcu_module_coming(struct module *mod)
1766 {
1767 int i;
1768 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1769 int ret;
1770
1771 for (i = 0; i < mod->num_srcu_structs; i++) {
1772 ret = init_srcu_struct(*(sspp++));
1773 if (WARN_ON_ONCE(ret))
1774 return ret;
1775 }
1776 return 0;
1777 }
1778
1779
1780 static void srcu_module_going(struct module *mod)
1781 {
1782 int i;
1783 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1784
1785 for (i = 0; i < mod->num_srcu_structs; i++)
1786 cleanup_srcu_struct(*(sspp++));
1787 }
1788
1789
1790 static int srcu_module_notify(struct notifier_block *self,
1791 unsigned long val, void *data)
1792 {
1793 struct module *mod = data;
1794 int ret = 0;
1795
1796 switch (val) {
1797 case MODULE_STATE_COMING:
1798 ret = srcu_module_coming(mod);
1799 break;
1800 case MODULE_STATE_GOING:
1801 srcu_module_going(mod);
1802 break;
1803 default:
1804 break;
1805 }
1806 return ret;
1807 }
1808
1809 static struct notifier_block srcu_module_nb = {
1810 .notifier_call = srcu_module_notify,
1811 .priority = 0,
1812 };
1813
1814 static __init int init_srcu_module_notifier(void)
1815 {
1816 int ret;
1817
1818 ret = register_module_notifier(&srcu_module_nb);
1819 if (ret)
1820 pr_warn("Failed to register srcu module notifier\n");
1821 return ret;
1822 }
1823 late_initcall(init_srcu_module_notifier);
1824
1825 #endif