0001
0002
0003
0004
0005
0006
0007
0008 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
0009 {
0010 spin_lock_init(&wq_head->lock);
0011 lockdep_set_class_and_name(&wq_head->lock, key, name);
0012 INIT_LIST_HEAD(&wq_head->head);
0013 }
0014
0015 EXPORT_SYMBOL(__init_waitqueue_head);
0016
0017 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0018 {
0019 unsigned long flags;
0020
0021 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
0022 spin_lock_irqsave(&wq_head->lock, flags);
0023 __add_wait_queue(wq_head, wq_entry);
0024 spin_unlock_irqrestore(&wq_head->lock, flags);
0025 }
0026 EXPORT_SYMBOL(add_wait_queue);
0027
0028 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0029 {
0030 unsigned long flags;
0031
0032 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
0033 spin_lock_irqsave(&wq_head->lock, flags);
0034 __add_wait_queue_entry_tail(wq_head, wq_entry);
0035 spin_unlock_irqrestore(&wq_head->lock, flags);
0036 }
0037 EXPORT_SYMBOL(add_wait_queue_exclusive);
0038
0039 void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0040 {
0041 unsigned long flags;
0042
0043 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
0044 spin_lock_irqsave(&wq_head->lock, flags);
0045 __add_wait_queue(wq_head, wq_entry);
0046 spin_unlock_irqrestore(&wq_head->lock, flags);
0047 }
0048 EXPORT_SYMBOL_GPL(add_wait_queue_priority);
0049
0050 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0051 {
0052 unsigned long flags;
0053
0054 spin_lock_irqsave(&wq_head->lock, flags);
0055 __remove_wait_queue(wq_head, wq_entry);
0056 spin_unlock_irqrestore(&wq_head->lock, flags);
0057 }
0058 EXPORT_SYMBOL(remove_wait_queue);
0059
0060
0061
0062
0063
0064
0065 #define WAITQUEUE_WALK_BREAK_CNT 64
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
0081 int nr_exclusive, int wake_flags, void *key,
0082 wait_queue_entry_t *bookmark)
0083 {
0084 wait_queue_entry_t *curr, *next;
0085 int cnt = 0;
0086
0087 lockdep_assert_held(&wq_head->lock);
0088
0089 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
0090 curr = list_next_entry(bookmark, entry);
0091
0092 list_del(&bookmark->entry);
0093 bookmark->flags = 0;
0094 } else
0095 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
0096
0097 if (&curr->entry == &wq_head->head)
0098 return nr_exclusive;
0099
0100 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
0101 unsigned flags = curr->flags;
0102 int ret;
0103
0104 if (flags & WQ_FLAG_BOOKMARK)
0105 continue;
0106
0107 ret = curr->func(curr, mode, wake_flags, key);
0108 if (ret < 0)
0109 break;
0110 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
0111 break;
0112
0113 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
0114 (&next->entry != &wq_head->head)) {
0115 bookmark->flags = WQ_FLAG_BOOKMARK;
0116 list_add_tail(&bookmark->entry, &next->entry);
0117 break;
0118 }
0119 }
0120
0121 return nr_exclusive;
0122 }
0123
0124 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
0125 int nr_exclusive, int wake_flags, void *key)
0126 {
0127 unsigned long flags;
0128 wait_queue_entry_t bookmark;
0129
0130 bookmark.flags = 0;
0131 bookmark.private = NULL;
0132 bookmark.func = NULL;
0133 INIT_LIST_HEAD(&bookmark.entry);
0134
0135 do {
0136 spin_lock_irqsave(&wq_head->lock, flags);
0137 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
0138 wake_flags, key, &bookmark);
0139 spin_unlock_irqrestore(&wq_head->lock, flags);
0140 } while (bookmark.flags & WQ_FLAG_BOOKMARK);
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
0154 int nr_exclusive, void *key)
0155 {
0156 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
0157 }
0158 EXPORT_SYMBOL(__wake_up);
0159
0160
0161
0162
0163 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
0164 {
0165 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
0166 }
0167 EXPORT_SYMBOL_GPL(__wake_up_locked);
0168
0169 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
0170 {
0171 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
0172 }
0173 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
0174
0175 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
0176 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
0177 {
0178 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
0179 }
0180 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
0199 void *key)
0200 {
0201 if (unlikely(!wq_head))
0202 return;
0203
0204 __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
0205 }
0206 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
0225 unsigned int mode, void *key)
0226 {
0227 __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
0228 }
0229 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
0230
0231
0232
0233
0234 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
0235 {
0236 __wake_up_sync_key(wq_head, mode, NULL);
0237 }
0238 EXPORT_SYMBOL_GPL(__wake_up_sync);
0239
0240 void __wake_up_pollfree(struct wait_queue_head *wq_head)
0241 {
0242 __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
0243
0244 WARN_ON_ONCE(waitqueue_active(wq_head));
0245 }
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259 void
0260 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
0261 {
0262 unsigned long flags;
0263
0264 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
0265 spin_lock_irqsave(&wq_head->lock, flags);
0266 if (list_empty(&wq_entry->entry))
0267 __add_wait_queue(wq_head, wq_entry);
0268 set_current_state(state);
0269 spin_unlock_irqrestore(&wq_head->lock, flags);
0270 }
0271 EXPORT_SYMBOL(prepare_to_wait);
0272
0273
0274 bool
0275 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
0276 {
0277 unsigned long flags;
0278 bool was_empty = false;
0279
0280 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
0281 spin_lock_irqsave(&wq_head->lock, flags);
0282 if (list_empty(&wq_entry->entry)) {
0283 was_empty = list_empty(&wq_head->head);
0284 __add_wait_queue_entry_tail(wq_head, wq_entry);
0285 }
0286 set_current_state(state);
0287 spin_unlock_irqrestore(&wq_head->lock, flags);
0288 return was_empty;
0289 }
0290 EXPORT_SYMBOL(prepare_to_wait_exclusive);
0291
0292 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
0293 {
0294 wq_entry->flags = flags;
0295 wq_entry->private = current;
0296 wq_entry->func = autoremove_wake_function;
0297 INIT_LIST_HEAD(&wq_entry->entry);
0298 }
0299 EXPORT_SYMBOL(init_wait_entry);
0300
0301 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
0302 {
0303 unsigned long flags;
0304 long ret = 0;
0305
0306 spin_lock_irqsave(&wq_head->lock, flags);
0307 if (signal_pending_state(state, current)) {
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 list_del_init(&wq_entry->entry);
0321 ret = -ERESTARTSYS;
0322 } else {
0323 if (list_empty(&wq_entry->entry)) {
0324 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
0325 __add_wait_queue_entry_tail(wq_head, wq_entry);
0326 else
0327 __add_wait_queue(wq_head, wq_entry);
0328 }
0329 set_current_state(state);
0330 }
0331 spin_unlock_irqrestore(&wq_head->lock, flags);
0332
0333 return ret;
0334 }
0335 EXPORT_SYMBOL(prepare_to_wait_event);
0336
0337
0338
0339
0340
0341
0342
0343
0344 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
0345 {
0346 if (likely(list_empty(&wait->entry)))
0347 __add_wait_queue_entry_tail(wq, wait);
0348
0349 set_current_state(TASK_INTERRUPTIBLE);
0350 if (signal_pending(current))
0351 return -ERESTARTSYS;
0352
0353 spin_unlock(&wq->lock);
0354 schedule();
0355 spin_lock(&wq->lock);
0356
0357 return 0;
0358 }
0359 EXPORT_SYMBOL(do_wait_intr);
0360
0361 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
0362 {
0363 if (likely(list_empty(&wait->entry)))
0364 __add_wait_queue_entry_tail(wq, wait);
0365
0366 set_current_state(TASK_INTERRUPTIBLE);
0367 if (signal_pending(current))
0368 return -ERESTARTSYS;
0369
0370 spin_unlock_irq(&wq->lock);
0371 schedule();
0372 spin_lock_irq(&wq->lock);
0373
0374 return 0;
0375 }
0376 EXPORT_SYMBOL(do_wait_intr_irq);
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0388 {
0389 unsigned long flags;
0390
0391 __set_current_state(TASK_RUNNING);
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 if (!list_empty_careful(&wq_entry->entry)) {
0406 spin_lock_irqsave(&wq_head->lock, flags);
0407 list_del_init(&wq_entry->entry);
0408 spin_unlock_irqrestore(&wq_head->lock, flags);
0409 }
0410 }
0411 EXPORT_SYMBOL(finish_wait);
0412
0413 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
0414 {
0415 int ret = default_wake_function(wq_entry, mode, sync, key);
0416
0417 if (ret)
0418 list_del_init_careful(&wq_entry->entry);
0419
0420 return ret;
0421 }
0422 EXPORT_SYMBOL(autoremove_wake_function);
0423
0424 static inline bool is_kthread_should_stop(void)
0425 {
0426 return (current->flags & PF_KTHREAD) && kthread_should_stop();
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
0450 {
0451
0452
0453
0454
0455
0456
0457 set_current_state(mode);
0458 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
0459 timeout = schedule_timeout(timeout);
0460 __set_current_state(TASK_RUNNING);
0461
0462
0463
0464
0465
0466
0467
0468 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
0469
0470 return timeout;
0471 }
0472 EXPORT_SYMBOL(wait_woken);
0473
0474 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
0475 {
0476
0477 smp_mb();
0478 wq_entry->flags |= WQ_FLAG_WOKEN;
0479
0480 return default_wake_function(wq_entry, mode, sync, key);
0481 }
0482 EXPORT_SYMBOL(woken_wake_function);