Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
0004  *
0005  * Copyright IBM Corporation, 2008
0006  *
0007  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
0008  *
0009  * For detailed explanation of Read-Copy Update mechanism see -
0010  *      Documentation/RCU
0011  */
0012 #include <linux/completion.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/notifier.h>
0015 #include <linux/rcupdate_wait.h>
0016 #include <linux/kernel.h>
0017 #include <linux/export.h>
0018 #include <linux/mutex.h>
0019 #include <linux/sched.h>
0020 #include <linux/types.h>
0021 #include <linux/init.h>
0022 #include <linux/time.h>
0023 #include <linux/cpu.h>
0024 #include <linux/prefetch.h>
0025 #include <linux/slab.h>
0026 #include <linux/mm.h>
0027 
0028 #include "rcu.h"
0029 
0030 /* Global control variables for rcupdate callback mechanism. */
0031 struct rcu_ctrlblk {
0032     struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
0033     struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
0034     struct rcu_head **curtail;  /* ->next pointer of last CB. */
0035     unsigned long gp_seq;       /* Grace-period counter. */
0036 };
0037 
0038 /* Definition for rcupdate control block. */
0039 static struct rcu_ctrlblk rcu_ctrlblk = {
0040     .donetail   = &rcu_ctrlblk.rcucblist,
0041     .curtail    = &rcu_ctrlblk.rcucblist,
0042     .gp_seq     = 0 - 300UL,
0043 };
0044 
0045 void rcu_barrier(void)
0046 {
0047     wait_rcu_gp(call_rcu);
0048 }
0049 EXPORT_SYMBOL(rcu_barrier);
0050 
0051 /* Record an rcu quiescent state.  */
0052 void rcu_qs(void)
0053 {
0054     unsigned long flags;
0055 
0056     local_irq_save(flags);
0057     if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
0058         rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
0059         raise_softirq_irqoff(RCU_SOFTIRQ);
0060     }
0061     WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
0062     local_irq_restore(flags);
0063 }
0064 
0065 /*
0066  * Check to see if the scheduling-clock interrupt came from an extended
0067  * quiescent state, and, if so, tell RCU about it.  This function must
0068  * be called from hardirq context.  It is normally called from the
0069  * scheduling-clock interrupt.
0070  */
0071 void rcu_sched_clock_irq(int user)
0072 {
0073     if (user) {
0074         rcu_qs();
0075     } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
0076         set_tsk_need_resched(current);
0077         set_preempt_need_resched();
0078     }
0079 }
0080 
0081 /*
0082  * Reclaim the specified callback, either by invoking it for non-kfree cases or
0083  * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
0084  */
0085 static inline bool rcu_reclaim_tiny(struct rcu_head *head)
0086 {
0087     rcu_callback_t f;
0088     unsigned long offset = (unsigned long)head->func;
0089 
0090     rcu_lock_acquire(&rcu_callback_map);
0091     if (__is_kvfree_rcu_offset(offset)) {
0092         trace_rcu_invoke_kvfree_callback("", head, offset);
0093         kvfree((void *)head - offset);
0094         rcu_lock_release(&rcu_callback_map);
0095         return true;
0096     }
0097 
0098     trace_rcu_invoke_callback("", head);
0099     f = head->func;
0100     WRITE_ONCE(head->func, (rcu_callback_t)0L);
0101     f(head);
0102     rcu_lock_release(&rcu_callback_map);
0103     return false;
0104 }
0105 
0106 /* Invoke the RCU callbacks whose grace period has elapsed.  */
0107 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
0108 {
0109     struct rcu_head *next, *list;
0110     unsigned long flags;
0111 
0112     /* Move the ready-to-invoke callbacks to a local list. */
0113     local_irq_save(flags);
0114     if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
0115         /* No callbacks ready, so just leave. */
0116         local_irq_restore(flags);
0117         return;
0118     }
0119     list = rcu_ctrlblk.rcucblist;
0120     rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
0121     *rcu_ctrlblk.donetail = NULL;
0122     if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
0123         rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
0124     rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
0125     local_irq_restore(flags);
0126 
0127     /* Invoke the callbacks on the local list. */
0128     while (list) {
0129         next = list->next;
0130         prefetch(next);
0131         debug_rcu_head_unqueue(list);
0132         local_bh_disable();
0133         rcu_reclaim_tiny(list);
0134         local_bh_enable();
0135         list = next;
0136     }
0137 }
0138 
0139 /*
0140  * Wait for a grace period to elapse.  But it is illegal to invoke
0141  * synchronize_rcu() from within an RCU read-side critical section.
0142  * Therefore, any legal call to synchronize_rcu() is a quiescent state,
0143  * and so on a UP system, synchronize_rcu() need do nothing, other than
0144  * let the polled APIs know that another grace period elapsed.
0145  *
0146  * (But Lai Jiangshan points out the benefits of doing might_sleep()
0147  * to reduce latency.)
0148  *
0149  * Cool, huh?  (Due to Josh Triplett.)
0150  */
0151 void synchronize_rcu(void)
0152 {
0153     RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
0154              lock_is_held(&rcu_lock_map) ||
0155              lock_is_held(&rcu_sched_lock_map),
0156              "Illegal synchronize_rcu() in RCU read-side critical section");
0157     WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
0158 }
0159 EXPORT_SYMBOL_GPL(synchronize_rcu);
0160 
0161 /*
0162  * Post an RCU callback to be invoked after the end of an RCU grace
0163  * period.  But since we have but one CPU, that would be after any
0164  * quiescent state.
0165  */
0166 void call_rcu(struct rcu_head *head, rcu_callback_t func)
0167 {
0168     unsigned long flags;
0169 
0170     debug_rcu_head_queue(head);
0171     head->func = func;
0172     head->next = NULL;
0173 
0174     local_irq_save(flags);
0175     *rcu_ctrlblk.curtail = head;
0176     rcu_ctrlblk.curtail = &head->next;
0177     local_irq_restore(flags);
0178 
0179     if (unlikely(is_idle_task(current))) {
0180         /* force scheduling for rcu_qs() */
0181         resched_cpu(0);
0182     }
0183 }
0184 EXPORT_SYMBOL_GPL(call_rcu);
0185 
0186 /*
0187  * Return a grace-period-counter "cookie".  For more information,
0188  * see the Tree RCU header comment.
0189  */
0190 unsigned long get_state_synchronize_rcu(void)
0191 {
0192     return READ_ONCE(rcu_ctrlblk.gp_seq);
0193 }
0194 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
0195 
0196 /*
0197  * Return a grace-period-counter "cookie" and ensure that a future grace
0198  * period completes.  For more information, see the Tree RCU header comment.
0199  */
0200 unsigned long start_poll_synchronize_rcu(void)
0201 {
0202     unsigned long gp_seq = get_state_synchronize_rcu();
0203 
0204     if (unlikely(is_idle_task(current))) {
0205         /* force scheduling for rcu_qs() */
0206         resched_cpu(0);
0207     }
0208     return gp_seq;
0209 }
0210 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
0211 
0212 /*
0213  * Return true if the grace period corresponding to oldstate has completed
0214  * and false otherwise.  For more information, see the Tree RCU header
0215  * comment.
0216  */
0217 bool poll_state_synchronize_rcu(unsigned long oldstate)
0218 {
0219     return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
0220 }
0221 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
0222 
0223 #ifdef CONFIG_KASAN_GENERIC
0224 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
0225 {
0226     if (head) {
0227         void *ptr = (void *) head - (unsigned long) func;
0228 
0229         kasan_record_aux_stack_noalloc(ptr);
0230     }
0231 
0232     __kvfree_call_rcu(head, func);
0233 }
0234 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
0235 #endif
0236 
0237 void __init rcu_init(void)
0238 {
0239     open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
0240     rcu_early_boot_tests();
0241 }