Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Sleepable Read-Copy Update mechanism for mutual exclusion,
0004  *  tiny version for non-preemptible single-CPU use.
0005  *
0006  * Copyright (C) IBM Corporation, 2017
0007  *
0008  * Author: Paul McKenney <paulmck@linux.ibm.com>
0009  */
0010 
0011 #include <linux/export.h>
0012 #include <linux/mutex.h>
0013 #include <linux/preempt.h>
0014 #include <linux/rcupdate_wait.h>
0015 #include <linux/sched.h>
0016 #include <linux/delay.h>
0017 #include <linux/srcu.h>
0018 
0019 #include <linux/rcu_node_tree.h>
0020 #include "rcu_segcblist.h"
0021 #include "rcu.h"
0022 
0023 int rcu_scheduler_active __read_mostly;
0024 static LIST_HEAD(srcu_boot_list);
0025 static bool srcu_init_done;
0026 
0027 static int init_srcu_struct_fields(struct srcu_struct *ssp)
0028 {
0029     ssp->srcu_lock_nesting[0] = 0;
0030     ssp->srcu_lock_nesting[1] = 0;
0031     init_swait_queue_head(&ssp->srcu_wq);
0032     ssp->srcu_cb_head = NULL;
0033     ssp->srcu_cb_tail = &ssp->srcu_cb_head;
0034     ssp->srcu_gp_running = false;
0035     ssp->srcu_gp_waiting = false;
0036     ssp->srcu_idx = 0;
0037     ssp->srcu_idx_max = 0;
0038     INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
0039     INIT_LIST_HEAD(&ssp->srcu_work.entry);
0040     return 0;
0041 }
0042 
0043 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0044 
0045 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
0046                struct lock_class_key *key)
0047 {
0048     /* Don't re-initialize a lock while it is held. */
0049     debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
0050     lockdep_init_map(&ssp->dep_map, name, key, 0);
0051     return init_srcu_struct_fields(ssp);
0052 }
0053 EXPORT_SYMBOL_GPL(__init_srcu_struct);
0054 
0055 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0056 
0057 /*
0058  * init_srcu_struct - initialize a sleep-RCU structure
0059  * @ssp: structure to initialize.
0060  *
0061  * Must invoke this on a given srcu_struct before passing that srcu_struct
0062  * to any other function.  Each srcu_struct represents a separate domain
0063  * of SRCU protection.
0064  */
0065 int init_srcu_struct(struct srcu_struct *ssp)
0066 {
0067     return init_srcu_struct_fields(ssp);
0068 }
0069 EXPORT_SYMBOL_GPL(init_srcu_struct);
0070 
0071 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
0072 
0073 /*
0074  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
0075  * @ssp: structure to clean up.
0076  *
0077  * Must invoke this after you are finished using a given srcu_struct that
0078  * was initialized via init_srcu_struct(), else you leak memory.
0079  */
0080 void cleanup_srcu_struct(struct srcu_struct *ssp)
0081 {
0082     WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
0083     flush_work(&ssp->srcu_work);
0084     WARN_ON(ssp->srcu_gp_running);
0085     WARN_ON(ssp->srcu_gp_waiting);
0086     WARN_ON(ssp->srcu_cb_head);
0087     WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
0088     WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
0089     WARN_ON(ssp->srcu_idx & 0x1);
0090 }
0091 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
0092 
0093 /*
0094  * Removes the count for the old reader from the appropriate element of
0095  * the srcu_struct.
0096  */
0097 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
0098 {
0099     int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
0100 
0101     WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
0102     if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task())
0103         swake_up_one(&ssp->srcu_wq);
0104 }
0105 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
0106 
0107 /*
0108  * Workqueue handler to drive one grace period and invoke any callbacks
0109  * that become ready as a result.  Single-CPU and !PREEMPTION operation
0110  * means that we get away with murder on synchronization.  ;-)
0111  */
0112 void srcu_drive_gp(struct work_struct *wp)
0113 {
0114     int idx;
0115     struct rcu_head *lh;
0116     struct rcu_head *rhp;
0117     struct srcu_struct *ssp;
0118 
0119     ssp = container_of(wp, struct srcu_struct, srcu_work);
0120     if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
0121         return; /* Already running or nothing to do. */
0122 
0123     /* Remove recently arrived callbacks and wait for readers. */
0124     WRITE_ONCE(ssp->srcu_gp_running, true);
0125     local_irq_disable();
0126     lh = ssp->srcu_cb_head;
0127     ssp->srcu_cb_head = NULL;
0128     ssp->srcu_cb_tail = &ssp->srcu_cb_head;
0129     local_irq_enable();
0130     idx = (ssp->srcu_idx & 0x2) / 2;
0131     WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
0132     WRITE_ONCE(ssp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
0133     swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
0134     WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
0135     WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
0136 
0137     /* Invoke the callbacks we removed above. */
0138     while (lh) {
0139         rhp = lh;
0140         lh = lh->next;
0141         local_bh_disable();
0142         rhp->func(rhp);
0143         local_bh_enable();
0144     }
0145 
0146     /*
0147      * Enable rescheduling, and if there are more callbacks,
0148      * reschedule ourselves.  This can race with a call_srcu()
0149      * at interrupt level, but the ->srcu_gp_running checks will
0150      * straighten that out.
0151      */
0152     WRITE_ONCE(ssp->srcu_gp_running, false);
0153     if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
0154         schedule_work(&ssp->srcu_work);
0155 }
0156 EXPORT_SYMBOL_GPL(srcu_drive_gp);
0157 
0158 static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
0159 {
0160     unsigned short cookie;
0161 
0162     cookie = get_state_synchronize_srcu(ssp);
0163     if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
0164         return;
0165     WRITE_ONCE(ssp->srcu_idx_max, cookie);
0166     if (!READ_ONCE(ssp->srcu_gp_running)) {
0167         if (likely(srcu_init_done))
0168             schedule_work(&ssp->srcu_work);
0169         else if (list_empty(&ssp->srcu_work.entry))
0170             list_add(&ssp->srcu_work.entry, &srcu_boot_list);
0171     }
0172 }
0173 
0174 /*
0175  * Enqueue an SRCU callback on the specified srcu_struct structure,
0176  * initiating grace-period processing if it is not already running.
0177  */
0178 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
0179            rcu_callback_t func)
0180 {
0181     unsigned long flags;
0182 
0183     rhp->func = func;
0184     rhp->next = NULL;
0185     local_irq_save(flags);
0186     *ssp->srcu_cb_tail = rhp;
0187     ssp->srcu_cb_tail = &rhp->next;
0188     local_irq_restore(flags);
0189     srcu_gp_start_if_needed(ssp);
0190 }
0191 EXPORT_SYMBOL_GPL(call_srcu);
0192 
0193 /*
0194  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
0195  */
0196 void synchronize_srcu(struct srcu_struct *ssp)
0197 {
0198     struct rcu_synchronize rs;
0199 
0200     init_rcu_head_on_stack(&rs.head);
0201     init_completion(&rs.completion);
0202     call_srcu(ssp, &rs.head, wakeme_after_rcu);
0203     wait_for_completion(&rs.completion);
0204     destroy_rcu_head_on_stack(&rs.head);
0205 }
0206 EXPORT_SYMBOL_GPL(synchronize_srcu);
0207 
0208 /*
0209  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
0210  */
0211 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
0212 {
0213     unsigned long ret;
0214 
0215     barrier();
0216     ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
0217     barrier();
0218     return ret & USHRT_MAX;
0219 }
0220 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
0221 
0222 /*
0223  * start_poll_synchronize_srcu - Provide cookie and start grace period
0224  *
0225  * The difference between this and get_state_synchronize_srcu() is that
0226  * this function ensures that the poll_state_synchronize_srcu() will
0227  * eventually return the value true.
0228  */
0229 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
0230 {
0231     unsigned long ret = get_state_synchronize_srcu(ssp);
0232 
0233     srcu_gp_start_if_needed(ssp);
0234     return ret;
0235 }
0236 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
0237 
0238 /*
0239  * poll_state_synchronize_srcu - Has cookie's grace period ended?
0240  */
0241 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
0242 {
0243     bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
0244 
0245     barrier();
0246     return ret;
0247 }
0248 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
0249 
0250 /* Lockdep diagnostics.  */
0251 void __init rcu_scheduler_starting(void)
0252 {
0253     rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
0254 }
0255 
0256 /*
0257  * Queue work for srcu_struct structures with early boot callbacks.
0258  * The work won't actually execute until the workqueue initialization
0259  * phase that takes place after the scheduler starts.
0260  */
0261 void __init srcu_init(void)
0262 {
0263     struct srcu_struct *ssp;
0264 
0265     srcu_init_done = true;
0266     while (!list_empty(&srcu_boot_list)) {
0267         ssp = list_first_entry(&srcu_boot_list,
0268                       struct srcu_struct, srcu_work.entry);
0269         list_del_init(&ssp->srcu_work.entry);
0270         schedule_work(&ssp->srcu_work);
0271     }
0272 }