0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/completion.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/notifier.h>
0015 #include <linux/rcupdate_wait.h>
0016 #include <linux/kernel.h>
0017 #include <linux/export.h>
0018 #include <linux/mutex.h>
0019 #include <linux/sched.h>
0020 #include <linux/types.h>
0021 #include <linux/init.h>
0022 #include <linux/time.h>
0023 #include <linux/cpu.h>
0024 #include <linux/prefetch.h>
0025 #include <linux/slab.h>
0026 #include <linux/mm.h>
0027
0028 #include "rcu.h"
0029
0030
0031 struct rcu_ctrlblk {
0032 struct rcu_head *rcucblist;
0033 struct rcu_head **donetail;
0034 struct rcu_head **curtail;
0035 unsigned long gp_seq;
0036 };
0037
0038
0039 static struct rcu_ctrlblk rcu_ctrlblk = {
0040 .donetail = &rcu_ctrlblk.rcucblist,
0041 .curtail = &rcu_ctrlblk.rcucblist,
0042 .gp_seq = 0 - 300UL,
0043 };
0044
0045 void rcu_barrier(void)
0046 {
0047 wait_rcu_gp(call_rcu);
0048 }
0049 EXPORT_SYMBOL(rcu_barrier);
0050
0051
0052 void rcu_qs(void)
0053 {
0054 unsigned long flags;
0055
0056 local_irq_save(flags);
0057 if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
0058 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
0059 raise_softirq_irqoff(RCU_SOFTIRQ);
0060 }
0061 WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
0062 local_irq_restore(flags);
0063 }
0064
0065
0066
0067
0068
0069
0070
0071 void rcu_sched_clock_irq(int user)
0072 {
0073 if (user) {
0074 rcu_qs();
0075 } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
0076 set_tsk_need_resched(current);
0077 set_preempt_need_resched();
0078 }
0079 }
0080
0081
0082
0083
0084
0085 static inline bool rcu_reclaim_tiny(struct rcu_head *head)
0086 {
0087 rcu_callback_t f;
0088 unsigned long offset = (unsigned long)head->func;
0089
0090 rcu_lock_acquire(&rcu_callback_map);
0091 if (__is_kvfree_rcu_offset(offset)) {
0092 trace_rcu_invoke_kvfree_callback("", head, offset);
0093 kvfree((void *)head - offset);
0094 rcu_lock_release(&rcu_callback_map);
0095 return true;
0096 }
0097
0098 trace_rcu_invoke_callback("", head);
0099 f = head->func;
0100 WRITE_ONCE(head->func, (rcu_callback_t)0L);
0101 f(head);
0102 rcu_lock_release(&rcu_callback_map);
0103 return false;
0104 }
0105
0106
0107 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
0108 {
0109 struct rcu_head *next, *list;
0110 unsigned long flags;
0111
0112
0113 local_irq_save(flags);
0114 if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
0115
0116 local_irq_restore(flags);
0117 return;
0118 }
0119 list = rcu_ctrlblk.rcucblist;
0120 rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
0121 *rcu_ctrlblk.donetail = NULL;
0122 if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
0123 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
0124 rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
0125 local_irq_restore(flags);
0126
0127
0128 while (list) {
0129 next = list->next;
0130 prefetch(next);
0131 debug_rcu_head_unqueue(list);
0132 local_bh_disable();
0133 rcu_reclaim_tiny(list);
0134 local_bh_enable();
0135 list = next;
0136 }
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 void synchronize_rcu(void)
0152 {
0153 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
0154 lock_is_held(&rcu_lock_map) ||
0155 lock_is_held(&rcu_sched_lock_map),
0156 "Illegal synchronize_rcu() in RCU read-side critical section");
0157 WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
0158 }
0159 EXPORT_SYMBOL_GPL(synchronize_rcu);
0160
0161
0162
0163
0164
0165
0166 void call_rcu(struct rcu_head *head, rcu_callback_t func)
0167 {
0168 unsigned long flags;
0169
0170 debug_rcu_head_queue(head);
0171 head->func = func;
0172 head->next = NULL;
0173
0174 local_irq_save(flags);
0175 *rcu_ctrlblk.curtail = head;
0176 rcu_ctrlblk.curtail = &head->next;
0177 local_irq_restore(flags);
0178
0179 if (unlikely(is_idle_task(current))) {
0180
0181 resched_cpu(0);
0182 }
0183 }
0184 EXPORT_SYMBOL_GPL(call_rcu);
0185
0186
0187
0188
0189
0190 unsigned long get_state_synchronize_rcu(void)
0191 {
0192 return READ_ONCE(rcu_ctrlblk.gp_seq);
0193 }
0194 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
0195
0196
0197
0198
0199
0200 unsigned long start_poll_synchronize_rcu(void)
0201 {
0202 unsigned long gp_seq = get_state_synchronize_rcu();
0203
0204 if (unlikely(is_idle_task(current))) {
0205
0206 resched_cpu(0);
0207 }
0208 return gp_seq;
0209 }
0210 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
0211
0212
0213
0214
0215
0216
0217 bool poll_state_synchronize_rcu(unsigned long oldstate)
0218 {
0219 return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
0220 }
0221 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
0222
0223 #ifdef CONFIG_KASAN_GENERIC
0224 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
0225 {
0226 if (head) {
0227 void *ptr = (void *) head - (unsigned long) func;
0228
0229 kasan_record_aux_stack_noalloc(ptr);
0230 }
0231
0232 __kvfree_call_rcu(head, func);
0233 }
0234 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
0235 #endif
0236
0237 void __init rcu_init(void)
0238 {
0239 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
0240 rcu_early_boot_tests();
0241 }