0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/types.h>
0021 #include <linux/kernel.h>
0022 #include <linux/init.h>
0023 #include <linux/spinlock.h>
0024 #include <linux/smp.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/sched/signal.h>
0027 #include <linux/sched/debug.h>
0028 #include <linux/atomic.h>
0029 #include <linux/bitops.h>
0030 #include <linux/percpu.h>
0031 #include <linux/notifier.h>
0032 #include <linux/cpu.h>
0033 #include <linux/mutex.h>
0034 #include <linux/export.h>
0035 #include <linux/hardirq.h>
0036 #include <linux/delay.h>
0037 #include <linux/moduleparam.h>
0038 #include <linux/kthread.h>
0039 #include <linux/tick.h>
0040 #include <linux/rcupdate_wait.h>
0041 #include <linux/sched/isolation.h>
0042 #include <linux/kprobes.h>
0043 #include <linux/slab.h>
0044 #include <linux/irq_work.h>
0045 #include <linux/rcupdate_trace.h>
0046
0047 #define CREATE_TRACE_POINTS
0048
0049 #include "rcu.h"
0050
0051 #ifdef MODULE_PARAM_PREFIX
0052 #undef MODULE_PARAM_PREFIX
0053 #endif
0054 #define MODULE_PARAM_PREFIX "rcupdate."
0055
0056 #ifndef CONFIG_TINY_RCU
0057 module_param(rcu_expedited, int, 0444);
0058 module_param(rcu_normal, int, 0444);
0059 static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
0060 #if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
0061 module_param(rcu_normal_after_boot, int, 0444);
0062 #endif
0063 #endif
0064
0065 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 static bool rcu_read_lock_held_common(bool *ret)
0103 {
0104 if (!debug_lockdep_rcu_enabled()) {
0105 *ret = true;
0106 return true;
0107 }
0108 if (!rcu_is_watching()) {
0109 *ret = false;
0110 return true;
0111 }
0112 if (!rcu_lockdep_current_cpu_online()) {
0113 *ret = false;
0114 return true;
0115 }
0116 return false;
0117 }
0118
0119 int rcu_read_lock_sched_held(void)
0120 {
0121 bool ret;
0122
0123 if (rcu_read_lock_held_common(&ret))
0124 return ret;
0125 return lock_is_held(&rcu_sched_lock_map) || !preemptible();
0126 }
0127 EXPORT_SYMBOL(rcu_read_lock_sched_held);
0128 #endif
0129
0130 #ifndef CONFIG_TINY_RCU
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 bool rcu_gp_is_normal(void)
0141 {
0142 return READ_ONCE(rcu_normal) &&
0143 rcu_scheduler_active != RCU_SCHEDULER_INIT;
0144 }
0145 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
0146
0147 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
0148
0149
0150
0151
0152
0153
0154
0155
0156 bool rcu_gp_is_expedited(void)
0157 {
0158 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
0159 }
0160 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
0161
0162
0163
0164
0165
0166
0167
0168
0169 void rcu_expedite_gp(void)
0170 {
0171 atomic_inc(&rcu_expedited_nesting);
0172 }
0173 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 void rcu_unexpedite_gp(void)
0185 {
0186 atomic_dec(&rcu_expedited_nesting);
0187 }
0188 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
0189
0190 static bool rcu_boot_ended __read_mostly;
0191
0192
0193
0194
0195 void rcu_end_inkernel_boot(void)
0196 {
0197 rcu_unexpedite_gp();
0198 if (rcu_normal_after_boot)
0199 WRITE_ONCE(rcu_normal, 1);
0200 rcu_boot_ended = true;
0201 }
0202
0203
0204
0205
0206 bool rcu_inkernel_boot_has_ended(void)
0207 {
0208 return rcu_boot_ended;
0209 }
0210 EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
0211
0212 #endif
0213
0214
0215
0216
0217
0218
0219 void rcu_test_sync_prims(void)
0220 {
0221 if (!IS_ENABLED(CONFIG_PROVE_RCU))
0222 return;
0223 synchronize_rcu();
0224 synchronize_rcu_expedited();
0225 }
0226
0227 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
0228
0229
0230
0231
0232 static int __init rcu_set_runtime_mode(void)
0233 {
0234 rcu_test_sync_prims();
0235 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
0236 kfree_rcu_scheduler_running();
0237 rcu_test_sync_prims();
0238 return 0;
0239 }
0240 core_initcall(rcu_set_runtime_mode);
0241
0242 #endif
0243
0244 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0245 static struct lock_class_key rcu_lock_key;
0246 struct lockdep_map rcu_lock_map = {
0247 .name = "rcu_read_lock",
0248 .key = &rcu_lock_key,
0249 .wait_type_outer = LD_WAIT_FREE,
0250 .wait_type_inner = LD_WAIT_CONFIG,
0251 };
0252 EXPORT_SYMBOL_GPL(rcu_lock_map);
0253
0254 static struct lock_class_key rcu_bh_lock_key;
0255 struct lockdep_map rcu_bh_lock_map = {
0256 .name = "rcu_read_lock_bh",
0257 .key = &rcu_bh_lock_key,
0258 .wait_type_outer = LD_WAIT_FREE,
0259 .wait_type_inner = LD_WAIT_CONFIG,
0260 };
0261 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
0262
0263 static struct lock_class_key rcu_sched_lock_key;
0264 struct lockdep_map rcu_sched_lock_map = {
0265 .name = "rcu_read_lock_sched",
0266 .key = &rcu_sched_lock_key,
0267 .wait_type_outer = LD_WAIT_FREE,
0268 .wait_type_inner = LD_WAIT_SPIN,
0269 };
0270 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
0271
0272
0273 static struct lock_class_key rcu_callback_key;
0274 struct lockdep_map rcu_callback_map =
0275 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
0276 EXPORT_SYMBOL_GPL(rcu_callback_map);
0277
0278 noinstr int notrace debug_lockdep_rcu_enabled(void)
0279 {
0280 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
0281 current->lockdep_recursion == 0;
0282 }
0283 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 int rcu_read_lock_held(void)
0306 {
0307 bool ret;
0308
0309 if (rcu_read_lock_held_common(&ret))
0310 return ret;
0311 return lock_is_held(&rcu_lock_map);
0312 }
0313 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330 int rcu_read_lock_bh_held(void)
0331 {
0332 bool ret;
0333
0334 if (rcu_read_lock_held_common(&ret))
0335 return ret;
0336 return in_softirq() || irqs_disabled();
0337 }
0338 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
0339
0340 int rcu_read_lock_any_held(void)
0341 {
0342 bool ret;
0343
0344 if (rcu_read_lock_held_common(&ret))
0345 return ret;
0346 if (lock_is_held(&rcu_lock_map) ||
0347 lock_is_held(&rcu_bh_lock_map) ||
0348 lock_is_held(&rcu_sched_lock_map))
0349 return 1;
0350 return !preemptible();
0351 }
0352 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
0353
0354 #endif
0355
0356
0357
0358
0359
0360
0361
0362 void wakeme_after_rcu(struct rcu_head *head)
0363 {
0364 struct rcu_synchronize *rcu;
0365
0366 rcu = container_of(head, struct rcu_synchronize, head);
0367 complete(&rcu->completion);
0368 }
0369 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
0370
0371 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
0372 struct rcu_synchronize *rs_array)
0373 {
0374 int i;
0375 int j;
0376
0377
0378 for (i = 0; i < n; i++) {
0379 if (checktiny &&
0380 (crcu_array[i] == call_rcu)) {
0381 might_sleep();
0382 continue;
0383 }
0384 for (j = 0; j < i; j++)
0385 if (crcu_array[j] == crcu_array[i])
0386 break;
0387 if (j == i) {
0388 init_rcu_head_on_stack(&rs_array[i].head);
0389 init_completion(&rs_array[i].completion);
0390 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
0391 }
0392 }
0393
0394
0395 for (i = 0; i < n; i++) {
0396 if (checktiny &&
0397 (crcu_array[i] == call_rcu))
0398 continue;
0399 for (j = 0; j < i; j++)
0400 if (crcu_array[j] == crcu_array[i])
0401 break;
0402 if (j == i) {
0403 wait_for_completion(&rs_array[i].completion);
0404 destroy_rcu_head_on_stack(&rs_array[i].head);
0405 }
0406 }
0407 }
0408 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
0409
0410 void finish_rcuwait(struct rcuwait *w)
0411 {
0412 rcu_assign_pointer(w->task, NULL);
0413 __set_current_state(TASK_RUNNING);
0414 }
0415 EXPORT_SYMBOL_GPL(finish_rcuwait);
0416
0417 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
0418 void init_rcu_head(struct rcu_head *head)
0419 {
0420 debug_object_init(head, &rcuhead_debug_descr);
0421 }
0422 EXPORT_SYMBOL_GPL(init_rcu_head);
0423
0424 void destroy_rcu_head(struct rcu_head *head)
0425 {
0426 debug_object_free(head, &rcuhead_debug_descr);
0427 }
0428 EXPORT_SYMBOL_GPL(destroy_rcu_head);
0429
0430 static bool rcuhead_is_static_object(void *addr)
0431 {
0432 return true;
0433 }
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445 void init_rcu_head_on_stack(struct rcu_head *head)
0446 {
0447 debug_object_init_on_stack(head, &rcuhead_debug_descr);
0448 }
0449 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 void destroy_rcu_head_on_stack(struct rcu_head *head)
0463 {
0464 debug_object_free(head, &rcuhead_debug_descr);
0465 }
0466 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
0467
0468 const struct debug_obj_descr rcuhead_debug_descr = {
0469 .name = "rcu_head",
0470 .is_static_object = rcuhead_is_static_object,
0471 };
0472 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
0473 #endif
0474
0475 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
0476 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
0477 unsigned long secs,
0478 unsigned long c_old, unsigned long c)
0479 {
0480 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
0481 }
0482 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
0483 #else
0484 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
0485 do { } while (0)
0486 #endif
0487
0488 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
0489
0490 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
0491 {
0492 int ret;
0493
0494 ret = sched_setaffinity(pid, in_mask);
0495 WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
0496 return ret;
0497 }
0498 EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
0499 #endif
0500
0501 #ifdef CONFIG_RCU_STALL_COMMON
0502 int rcu_cpu_stall_ftrace_dump __read_mostly;
0503 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
0504 int rcu_cpu_stall_suppress __read_mostly;
0505 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
0506 module_param(rcu_cpu_stall_suppress, int, 0644);
0507 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
0508 module_param(rcu_cpu_stall_timeout, int, 0644);
0509 int rcu_exp_cpu_stall_timeout __read_mostly = CONFIG_RCU_EXP_CPU_STALL_TIMEOUT;
0510 module_param(rcu_exp_cpu_stall_timeout, int, 0644);
0511 #endif
0512
0513
0514
0515 int rcu_cpu_stall_suppress_at_boot __read_mostly;
0516 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
0517 module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
0518
0519
0520
0521
0522
0523
0524
0525
0526 unsigned long get_completed_synchronize_rcu(void)
0527 {
0528 return RCU_GET_STATE_COMPLETED;
0529 }
0530 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu);
0531
0532 #ifdef CONFIG_PROVE_RCU
0533
0534
0535
0536
0537 static bool rcu_self_test;
0538 module_param(rcu_self_test, bool, 0444);
0539
0540 static int rcu_self_test_counter;
0541
0542 static void test_callback(struct rcu_head *r)
0543 {
0544 rcu_self_test_counter++;
0545 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
0546 }
0547
0548 DEFINE_STATIC_SRCU(early_srcu);
0549 static unsigned long early_srcu_cookie;
0550
0551 struct early_boot_kfree_rcu {
0552 struct rcu_head rh;
0553 };
0554
0555 static void early_boot_test_call_rcu(void)
0556 {
0557 static struct rcu_head head;
0558 static struct rcu_head shead;
0559 struct early_boot_kfree_rcu *rhp;
0560
0561 call_rcu(&head, test_callback);
0562 if (IS_ENABLED(CONFIG_SRCU)) {
0563 early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
0564 call_srcu(&early_srcu, &shead, test_callback);
0565 }
0566 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
0567 if (!WARN_ON_ONCE(!rhp))
0568 kfree_rcu(rhp, rh);
0569 }
0570
0571 void rcu_early_boot_tests(void)
0572 {
0573 pr_info("Running RCU self tests\n");
0574
0575 if (rcu_self_test)
0576 early_boot_test_call_rcu();
0577 rcu_test_sync_prims();
0578 }
0579
0580 static int rcu_verify_early_boot_tests(void)
0581 {
0582 int ret = 0;
0583 int early_boot_test_counter = 0;
0584
0585 if (rcu_self_test) {
0586 early_boot_test_counter++;
0587 rcu_barrier();
0588 if (IS_ENABLED(CONFIG_SRCU)) {
0589 early_boot_test_counter++;
0590 srcu_barrier(&early_srcu);
0591 WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
0592 }
0593 }
0594 if (rcu_self_test_counter != early_boot_test_counter) {
0595 WARN_ON(1);
0596 ret = -1;
0597 }
0598
0599 return ret;
0600 }
0601 late_initcall(rcu_verify_early_boot_tests);
0602 #else
0603 void rcu_early_boot_tests(void) {}
0604 #endif
0605
0606 #include "tasks.h"
0607
0608 #ifndef CONFIG_TINY_RCU
0609
0610
0611
0612
0613 void __init rcupdate_announce_bootup_oddness(void)
0614 {
0615 if (rcu_normal)
0616 pr_info("\tNo expedited grace period (rcu_normal).\n");
0617 else if (rcu_normal_after_boot)
0618 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
0619 else if (rcu_expedited)
0620 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
0621 if (rcu_cpu_stall_suppress)
0622 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
0623 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
0624 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
0625 rcu_tasks_bootup_oddness();
0626 }
0627
0628 #endif