![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0 0002 0003 /* 0004 * Generic wait-for-completion handler; 0005 * 0006 * It differs from semaphores in that their default case is the opposite, 0007 * wait_for_completion default blocks whereas semaphore default non-block. The 0008 * interface also makes it easy to 'complete' multiple waiting threads, 0009 * something which isn't entirely natural for semaphores. 0010 * 0011 * But more importantly, the primitive documents the usage. Semaphores would 0012 * typically be used for exclusion which gives rise to priority inversion. 0013 * Waiting for completion is a typically sync point, but not an exclusion point. 0014 */ 0015 0016 /** 0017 * complete: - signals a single thread waiting on this completion 0018 * @x: holds the state of this particular completion 0019 * 0020 * This will wake up a single thread waiting on this completion. Threads will be 0021 * awakened in the same order in which they were queued. 0022 * 0023 * See also complete_all(), wait_for_completion() and related routines. 0024 * 0025 * If this function wakes up a task, it executes a full memory barrier before 0026 * accessing the task state. 0027 */ 0028 void complete(struct completion *x) 0029 { 0030 unsigned long flags; 0031 0032 raw_spin_lock_irqsave(&x->wait.lock, flags); 0033 0034 if (x->done != UINT_MAX) 0035 x->done++; 0036 swake_up_locked(&x->wait); 0037 raw_spin_unlock_irqrestore(&x->wait.lock, flags); 0038 } 0039 EXPORT_SYMBOL(complete); 0040 0041 /** 0042 * complete_all: - signals all threads waiting on this completion 0043 * @x: holds the state of this particular completion 0044 * 0045 * This will wake up all threads waiting on this particular completion event. 0046 * 0047 * If this function wakes up a task, it executes a full memory barrier before 0048 * accessing the task state. 0049 * 0050 * Since complete_all() sets the completion of @x permanently to done 0051 * to allow multiple waiters to finish, a call to reinit_completion() 0052 * must be used on @x if @x is to be used again. The code must make 0053 * sure that all waiters have woken and finished before reinitializing 0054 * @x. Also note that the function completion_done() can not be used 0055 * to know if there are still waiters after complete_all() has been called. 0056 */ 0057 void complete_all(struct completion *x) 0058 { 0059 unsigned long flags; 0060 0061 lockdep_assert_RT_in_threaded_ctx(); 0062 0063 raw_spin_lock_irqsave(&x->wait.lock, flags); 0064 x->done = UINT_MAX; 0065 swake_up_all_locked(&x->wait); 0066 raw_spin_unlock_irqrestore(&x->wait.lock, flags); 0067 } 0068 EXPORT_SYMBOL(complete_all); 0069 0070 static inline long __sched 0071 do_wait_for_common(struct completion *x, 0072 long (*action)(long), long timeout, int state) 0073 { 0074 if (!x->done) { 0075 DECLARE_SWAITQUEUE(wait); 0076 0077 do { 0078 if (signal_pending_state(state, current)) { 0079 timeout = -ERESTARTSYS; 0080 break; 0081 } 0082 __prepare_to_swait(&x->wait, &wait); 0083 __set_current_state(state); 0084 raw_spin_unlock_irq(&x->wait.lock); 0085 timeout = action(timeout); 0086 raw_spin_lock_irq(&x->wait.lock); 0087 } while (!x->done && timeout); 0088 __finish_swait(&x->wait, &wait); 0089 if (!x->done) 0090 return timeout; 0091 } 0092 if (x->done != UINT_MAX) 0093 x->done--; 0094 return timeout ?: 1; 0095 } 0096 0097 static inline long __sched 0098 __wait_for_common(struct completion *x, 0099 long (*action)(long), long timeout, int state) 0100 { 0101 might_sleep(); 0102 0103 complete_acquire(x); 0104 0105 raw_spin_lock_irq(&x->wait.lock); 0106 timeout = do_wait_for_common(x, action, timeout, state); 0107 raw_spin_unlock_irq(&x->wait.lock); 0108 0109 complete_release(x); 0110 0111 return timeout; 0112 } 0113 0114 static long __sched 0115 wait_for_common(struct completion *x, long timeout, int state) 0116 { 0117 return __wait_for_common(x, schedule_timeout, timeout, state); 0118 } 0119 0120 static long __sched 0121 wait_for_common_io(struct completion *x, long timeout, int state) 0122 { 0123 return __wait_for_common(x, io_schedule_timeout, timeout, state); 0124 } 0125 0126 /** 0127 * wait_for_completion: - waits for completion of a task 0128 * @x: holds the state of this particular completion 0129 * 0130 * This waits to be signaled for completion of a specific task. It is NOT 0131 * interruptible and there is no timeout. 0132 * 0133 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout 0134 * and interrupt capability. Also see complete(). 0135 */ 0136 void __sched wait_for_completion(struct completion *x) 0137 { 0138 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); 0139 } 0140 EXPORT_SYMBOL(wait_for_completion); 0141 0142 /** 0143 * wait_for_completion_timeout: - waits for completion of a task (w/timeout) 0144 * @x: holds the state of this particular completion 0145 * @timeout: timeout value in jiffies 0146 * 0147 * This waits for either a completion of a specific task to be signaled or for a 0148 * specified timeout to expire. The timeout is in jiffies. It is not 0149 * interruptible. 0150 * 0151 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left 0152 * till timeout) if completed. 0153 */ 0154 unsigned long __sched 0155 wait_for_completion_timeout(struct completion *x, unsigned long timeout) 0156 { 0157 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); 0158 } 0159 EXPORT_SYMBOL(wait_for_completion_timeout); 0160 0161 /** 0162 * wait_for_completion_io: - waits for completion of a task 0163 * @x: holds the state of this particular completion 0164 * 0165 * This waits to be signaled for completion of a specific task. It is NOT 0166 * interruptible and there is no timeout. The caller is accounted as waiting 0167 * for IO (which traditionally means blkio only). 0168 */ 0169 void __sched wait_for_completion_io(struct completion *x) 0170 { 0171 wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); 0172 } 0173 EXPORT_SYMBOL(wait_for_completion_io); 0174 0175 /** 0176 * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout) 0177 * @x: holds the state of this particular completion 0178 * @timeout: timeout value in jiffies 0179 * 0180 * This waits for either a completion of a specific task to be signaled or for a 0181 * specified timeout to expire. The timeout is in jiffies. It is not 0182 * interruptible. The caller is accounted as waiting for IO (which traditionally 0183 * means blkio only). 0184 * 0185 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left 0186 * till timeout) if completed. 0187 */ 0188 unsigned long __sched 0189 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) 0190 { 0191 return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE); 0192 } 0193 EXPORT_SYMBOL(wait_for_completion_io_timeout); 0194 0195 /** 0196 * wait_for_completion_interruptible: - waits for completion of a task (w/intr) 0197 * @x: holds the state of this particular completion 0198 * 0199 * This waits for completion of a specific task to be signaled. It is 0200 * interruptible. 0201 * 0202 * Return: -ERESTARTSYS if interrupted, 0 if completed. 0203 */ 0204 int __sched wait_for_completion_interruptible(struct completion *x) 0205 { 0206 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); 0207 if (t == -ERESTARTSYS) 0208 return t; 0209 return 0; 0210 } 0211 EXPORT_SYMBOL(wait_for_completion_interruptible); 0212 0213 /** 0214 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) 0215 * @x: holds the state of this particular completion 0216 * @timeout: timeout value in jiffies 0217 * 0218 * This waits for either a completion of a specific task to be signaled or for a 0219 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 0220 * 0221 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, 0222 * or number of jiffies left till timeout) if completed. 0223 */ 0224 long __sched 0225 wait_for_completion_interruptible_timeout(struct completion *x, 0226 unsigned long timeout) 0227 { 0228 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); 0229 } 0230 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); 0231 0232 /** 0233 * wait_for_completion_killable: - waits for completion of a task (killable) 0234 * @x: holds the state of this particular completion 0235 * 0236 * This waits to be signaled for completion of a specific task. It can be 0237 * interrupted by a kill signal. 0238 * 0239 * Return: -ERESTARTSYS if interrupted, 0 if completed. 0240 */ 0241 int __sched wait_for_completion_killable(struct completion *x) 0242 { 0243 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); 0244 if (t == -ERESTARTSYS) 0245 return t; 0246 return 0; 0247 } 0248 EXPORT_SYMBOL(wait_for_completion_killable); 0249 0250 /** 0251 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) 0252 * @x: holds the state of this particular completion 0253 * @timeout: timeout value in jiffies 0254 * 0255 * This waits for either a completion of a specific task to be 0256 * signaled or for a specified timeout to expire. It can be 0257 * interrupted by a kill signal. The timeout is in jiffies. 0258 * 0259 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, 0260 * or number of jiffies left till timeout) if completed. 0261 */ 0262 long __sched 0263 wait_for_completion_killable_timeout(struct completion *x, 0264 unsigned long timeout) 0265 { 0266 return wait_for_common(x, timeout, TASK_KILLABLE); 0267 } 0268 EXPORT_SYMBOL(wait_for_completion_killable_timeout); 0269 0270 /** 0271 * try_wait_for_completion - try to decrement a completion without blocking 0272 * @x: completion structure 0273 * 0274 * Return: 0 if a decrement cannot be done without blocking 0275 * 1 if a decrement succeeded. 0276 * 0277 * If a completion is being used as a counting completion, 0278 * attempt to decrement the counter without blocking. This 0279 * enables us to avoid waiting if the resource the completion 0280 * is protecting is not available. 0281 */ 0282 bool try_wait_for_completion(struct completion *x) 0283 { 0284 unsigned long flags; 0285 bool ret = true; 0286 0287 /* 0288 * Since x->done will need to be locked only 0289 * in the non-blocking case, we check x->done 0290 * first without taking the lock so we can 0291 * return early in the blocking case. 0292 */ 0293 if (!READ_ONCE(x->done)) 0294 return false; 0295 0296 raw_spin_lock_irqsave(&x->wait.lock, flags); 0297 if (!x->done) 0298 ret = false; 0299 else if (x->done != UINT_MAX) 0300 x->done--; 0301 raw_spin_unlock_irqrestore(&x->wait.lock, flags); 0302 return ret; 0303 } 0304 EXPORT_SYMBOL(try_wait_for_completion); 0305 0306 /** 0307 * completion_done - Test to see if a completion has any waiters 0308 * @x: completion structure 0309 * 0310 * Return: 0 if there are waiters (wait_for_completion() in progress) 0311 * 1 if there are no waiters. 0312 * 0313 * Note, this will always return true if complete_all() was called on @X. 0314 */ 0315 bool completion_done(struct completion *x) 0316 { 0317 unsigned long flags; 0318 0319 if (!READ_ONCE(x->done)) 0320 return false; 0321 0322 /* 0323 * If ->done, we need to wait for complete() to release ->wait.lock 0324 * otherwise we can end up freeing the completion before complete() 0325 * is done referencing it. 0326 */ 0327 raw_spin_lock_irqsave(&x->wait.lock, flags); 0328 raw_spin_unlock_irqrestore(&x->wait.lock, flags); 0329 return true; 0330 } 0331 EXPORT_SYMBOL(completion_done);
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |