![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0+ 0002 /* 0003 * RCU-based infrastructure for lightweight reader-writer locking 0004 * 0005 * Copyright (c) 2015, Red Hat, Inc. 0006 * 0007 * Author: Oleg Nesterov <oleg@redhat.com> 0008 */ 0009 0010 #include <linux/rcu_sync.h> 0011 #include <linux/sched.h> 0012 0013 enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY }; 0014 0015 #define rss_lock gp_wait.lock 0016 0017 /** 0018 * rcu_sync_init() - Initialize an rcu_sync structure 0019 * @rsp: Pointer to rcu_sync structure to be initialized 0020 */ 0021 void rcu_sync_init(struct rcu_sync *rsp) 0022 { 0023 memset(rsp, 0, sizeof(*rsp)); 0024 init_waitqueue_head(&rsp->gp_wait); 0025 } 0026 0027 /** 0028 * rcu_sync_enter_start - Force readers onto slow path for multiple updates 0029 * @rsp: Pointer to rcu_sync structure to use for synchronization 0030 * 0031 * Must be called after rcu_sync_init() and before first use. 0032 * 0033 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() 0034 * pairs turn into NO-OPs. 0035 */ 0036 void rcu_sync_enter_start(struct rcu_sync *rsp) 0037 { 0038 rsp->gp_count++; 0039 rsp->gp_state = GP_PASSED; 0040 } 0041 0042 0043 static void rcu_sync_func(struct rcu_head *rhp); 0044 0045 static void rcu_sync_call(struct rcu_sync *rsp) 0046 { 0047 call_rcu(&rsp->cb_head, rcu_sync_func); 0048 } 0049 0050 /** 0051 * rcu_sync_func() - Callback function managing reader access to fastpath 0052 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization 0053 * 0054 * This function is passed to call_rcu() function by rcu_sync_enter() and 0055 * rcu_sync_exit(), so that it is invoked after a grace period following the 0056 * that invocation of enter/exit. 0057 * 0058 * If it is called by rcu_sync_enter() it signals that all the readers were 0059 * switched onto slow path. 0060 * 0061 * If it is called by rcu_sync_exit() it takes action based on events that 0062 * have taken place in the meantime, so that closely spaced rcu_sync_enter() 0063 * and rcu_sync_exit() pairs need not wait for a grace period. 0064 * 0065 * If another rcu_sync_enter() is invoked before the grace period 0066 * ended, reset state to allow the next rcu_sync_exit() to let the 0067 * readers back onto their fastpaths (after a grace period). If both 0068 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked 0069 * before the grace period ended, re-invoke call_rcu() on behalf of that 0070 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 0071 * can again use their fastpaths. 0072 */ 0073 static void rcu_sync_func(struct rcu_head *rhp) 0074 { 0075 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); 0076 unsigned long flags; 0077 0078 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); 0079 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); 0080 0081 spin_lock_irqsave(&rsp->rss_lock, flags); 0082 if (rsp->gp_count) { 0083 /* 0084 * We're at least a GP after the GP_IDLE->GP_ENTER transition. 0085 */ 0086 WRITE_ONCE(rsp->gp_state, GP_PASSED); 0087 wake_up_locked(&rsp->gp_wait); 0088 } else if (rsp->gp_state == GP_REPLAY) { 0089 /* 0090 * A new rcu_sync_exit() has happened; requeue the callback to 0091 * catch a later GP. 0092 */ 0093 WRITE_ONCE(rsp->gp_state, GP_EXIT); 0094 rcu_sync_call(rsp); 0095 } else { 0096 /* 0097 * We're at least a GP after the last rcu_sync_exit(); everybody 0098 * will now have observed the write side critical section. 0099 * Let 'em rip! 0100 */ 0101 WRITE_ONCE(rsp->gp_state, GP_IDLE); 0102 } 0103 spin_unlock_irqrestore(&rsp->rss_lock, flags); 0104 } 0105 0106 /** 0107 * rcu_sync_enter() - Force readers onto slowpath 0108 * @rsp: Pointer to rcu_sync structure to use for synchronization 0109 * 0110 * This function is used by updaters who need readers to make use of 0111 * a slowpath during the update. After this function returns, all 0112 * subsequent calls to rcu_sync_is_idle() will return false, which 0113 * tells readers to stay off their fastpaths. A later call to 0114 * rcu_sync_exit() re-enables reader fastpaths. 0115 * 0116 * When called in isolation, rcu_sync_enter() must wait for a grace 0117 * period, however, closely spaced calls to rcu_sync_enter() can 0118 * optimize away the grace-period wait via a state machine implemented 0119 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func(). 0120 */ 0121 void rcu_sync_enter(struct rcu_sync *rsp) 0122 { 0123 int gp_state; 0124 0125 spin_lock_irq(&rsp->rss_lock); 0126 gp_state = rsp->gp_state; 0127 if (gp_state == GP_IDLE) { 0128 WRITE_ONCE(rsp->gp_state, GP_ENTER); 0129 WARN_ON_ONCE(rsp->gp_count); 0130 /* 0131 * Note that we could simply do rcu_sync_call(rsp) here and 0132 * avoid the "if (gp_state == GP_IDLE)" block below. 0133 * 0134 * However, synchronize_rcu() can be faster if rcu_expedited 0135 * or rcu_blocking_is_gp() is true. 0136 * 0137 * Another reason is that we can't wait for rcu callback if 0138 * we are called at early boot time but this shouldn't happen. 0139 */ 0140 } 0141 rsp->gp_count++; 0142 spin_unlock_irq(&rsp->rss_lock); 0143 0144 if (gp_state == GP_IDLE) { 0145 /* 0146 * See the comment above, this simply does the "synchronous" 0147 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED. 0148 */ 0149 synchronize_rcu(); 0150 rcu_sync_func(&rsp->cb_head); 0151 /* Not really needed, wait_event() would see GP_PASSED. */ 0152 return; 0153 } 0154 0155 wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED); 0156 } 0157 0158 /** 0159 * rcu_sync_exit() - Allow readers back onto fast path after grace period 0160 * @rsp: Pointer to rcu_sync structure to use for synchronization 0161 * 0162 * This function is used by updaters who have completed, and can therefore 0163 * now allow readers to make use of their fastpaths after a grace period 0164 * has elapsed. After this grace period has completed, all subsequent 0165 * calls to rcu_sync_is_idle() will return true, which tells readers that 0166 * they can once again use their fastpaths. 0167 */ 0168 void rcu_sync_exit(struct rcu_sync *rsp) 0169 { 0170 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); 0171 WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0); 0172 0173 spin_lock_irq(&rsp->rss_lock); 0174 if (!--rsp->gp_count) { 0175 if (rsp->gp_state == GP_PASSED) { 0176 WRITE_ONCE(rsp->gp_state, GP_EXIT); 0177 rcu_sync_call(rsp); 0178 } else if (rsp->gp_state == GP_EXIT) { 0179 WRITE_ONCE(rsp->gp_state, GP_REPLAY); 0180 } 0181 } 0182 spin_unlock_irq(&rsp->rss_lock); 0183 } 0184 0185 /** 0186 * rcu_sync_dtor() - Clean up an rcu_sync structure 0187 * @rsp: Pointer to rcu_sync structure to be cleaned up 0188 */ 0189 void rcu_sync_dtor(struct rcu_sync *rsp) 0190 { 0191 int gp_state; 0192 0193 WARN_ON_ONCE(READ_ONCE(rsp->gp_count)); 0194 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); 0195 0196 spin_lock_irq(&rsp->rss_lock); 0197 if (rsp->gp_state == GP_REPLAY) 0198 WRITE_ONCE(rsp->gp_state, GP_EXIT); 0199 gp_state = rsp->gp_state; 0200 spin_unlock_irq(&rsp->rss_lock); 0201 0202 if (gp_state != GP_IDLE) { 0203 rcu_barrier(); 0204 WARN_ON_ONCE(rsp->gp_state != GP_IDLE); 0205 } 0206 }
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |