0001
0002 #ifndef _LINUX_WAIT_H
0003 #define _LINUX_WAIT_H
0004
0005
0006
0007 #include <linux/list.h>
0008 #include <linux/stddef.h>
0009 #include <linux/spinlock.h>
0010
0011 #include <asm/current.h>
0012 #include <uapi/linux/wait.h>
0013
0014 typedef struct wait_queue_entry wait_queue_entry_t;
0015
0016 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
0017 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
0018
0019
0020 #define WQ_FLAG_EXCLUSIVE 0x01
0021 #define WQ_FLAG_WOKEN 0x02
0022 #define WQ_FLAG_BOOKMARK 0x04
0023 #define WQ_FLAG_CUSTOM 0x08
0024 #define WQ_FLAG_DONE 0x10
0025 #define WQ_FLAG_PRIORITY 0x20
0026
0027
0028
0029
0030 struct wait_queue_entry {
0031 unsigned int flags;
0032 void *private;
0033 wait_queue_func_t func;
0034 struct list_head entry;
0035 };
0036
0037 struct wait_queue_head {
0038 spinlock_t lock;
0039 struct list_head head;
0040 };
0041 typedef struct wait_queue_head wait_queue_head_t;
0042
0043 struct task_struct;
0044
0045
0046
0047
0048
0049 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
0050 .private = tsk, \
0051 .func = default_wake_function, \
0052 .entry = { NULL, NULL } }
0053
0054 #define DECLARE_WAITQUEUE(name, tsk) \
0055 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
0056
0057 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
0058 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
0059 .head = LIST_HEAD_INIT(name.head) }
0060
0061 #define DECLARE_WAIT_QUEUE_HEAD(name) \
0062 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
0063
0064 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
0065
0066 #define init_waitqueue_head(wq_head) \
0067 do { \
0068 static struct lock_class_key __key; \
0069 \
0070 __init_waitqueue_head((wq_head), #wq_head, &__key); \
0071 } while (0)
0072
0073 #ifdef CONFIG_LOCKDEP
0074 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
0075 ({ init_waitqueue_head(&name); name; })
0076 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
0077 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
0078 #else
0079 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
0080 #endif
0081
0082 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
0083 {
0084 wq_entry->flags = 0;
0085 wq_entry->private = p;
0086 wq_entry->func = default_wake_function;
0087 }
0088
0089 static inline void
0090 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
0091 {
0092 wq_entry->flags = 0;
0093 wq_entry->private = NULL;
0094 wq_entry->func = func;
0095 }
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 static inline int waitqueue_active(struct wait_queue_head *wq_head)
0128 {
0129 return !list_empty(&wq_head->head);
0130 }
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
0141 {
0142 return list_is_singular(&wq_head->head);
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
0154 {
0155
0156
0157
0158
0159
0160
0161
0162 smp_mb();
0163 return waitqueue_active(wq_head);
0164 }
0165
0166 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
0167 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
0168 extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
0169 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
0170
0171 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0172 {
0173 struct list_head *head = &wq_head->head;
0174 struct wait_queue_entry *wq;
0175
0176 list_for_each_entry(wq, &wq_head->head, entry) {
0177 if (!(wq->flags & WQ_FLAG_PRIORITY))
0178 break;
0179 head = &wq->entry;
0180 }
0181 list_add(&wq_entry->entry, head);
0182 }
0183
0184
0185
0186
0187 static inline void
0188 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0189 {
0190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
0191 __add_wait_queue(wq_head, wq_entry);
0192 }
0193
0194 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0195 {
0196 list_add_tail(&wq_entry->entry, &wq_head->head);
0197 }
0198
0199 static inline void
0200 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0201 {
0202 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
0203 __add_wait_queue_entry_tail(wq_head, wq_entry);
0204 }
0205
0206 static inline void
0207 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
0208 {
0209 list_del(&wq_entry->entry);
0210 }
0211
0212 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
0213 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
0214 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
0215 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
0216 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
0217 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
0218 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
0219 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
0220 void __wake_up_pollfree(struct wait_queue_head *wq_head);
0221
0222 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
0223 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
0224 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
0225 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
0226 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
0227
0228 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
0229 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
0230 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
0231 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
0232
0233
0234
0235
0236 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
0237 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
0238 #define wake_up_poll(x, m) \
0239 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
0240 #define wake_up_locked_poll(x, m) \
0241 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
0242 #define wake_up_interruptible_poll(x, m) \
0243 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
0244 #define wake_up_interruptible_sync_poll(x, m) \
0245 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
0246 #define wake_up_interruptible_sync_poll_locked(x, m) \
0247 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
0262 {
0263
0264
0265
0266
0267
0268
0269
0270 if (waitqueue_active(wq_head))
0271 __wake_up_pollfree(wq_head);
0272 }
0273
0274 #define ___wait_cond_timeout(condition) \
0275 ({ \
0276 bool __cond = (condition); \
0277 if (__cond && !__ret) \
0278 __ret = 1; \
0279 __cond || !__ret; \
0280 })
0281
0282 #define ___wait_is_interruptible(state) \
0283 (!__builtin_constant_p(state) || \
0284 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
0285
0286 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
0301 ({ \
0302 __label__ __out; \
0303 struct wait_queue_entry __wq_entry; \
0304 long __ret = ret; \
0305 \
0306 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
0307 for (;;) { \
0308 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
0309 \
0310 if (condition) \
0311 break; \
0312 \
0313 if (___wait_is_interruptible(state) && __int) { \
0314 __ret = __int; \
0315 goto __out; \
0316 } \
0317 \
0318 cmd; \
0319 } \
0320 finish_wait(&wq_head, &__wq_entry); \
0321 __out: __ret; \
0322 })
0323
0324 #define __wait_event(wq_head, condition) \
0325 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
0326 schedule())
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 #define wait_event(wq_head, condition) \
0341 do { \
0342 might_sleep(); \
0343 if (condition) \
0344 break; \
0345 __wait_event(wq_head, condition); \
0346 } while (0)
0347
0348 #define __io_wait_event(wq_head, condition) \
0349 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
0350 io_schedule())
0351
0352
0353
0354
0355 #define io_wait_event(wq_head, condition) \
0356 do { \
0357 might_sleep(); \
0358 if (condition) \
0359 break; \
0360 __io_wait_event(wq_head, condition); \
0361 } while (0)
0362
0363 #define __wait_event_freezable(wq_head, condition) \
0364 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
0365 freezable_schedule())
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 #define wait_event_freezable(wq_head, condition) \
0380 ({ \
0381 int __ret = 0; \
0382 might_sleep(); \
0383 if (!(condition)) \
0384 __ret = __wait_event_freezable(wq_head, condition); \
0385 __ret; \
0386 })
0387
0388 #define __wait_event_timeout(wq_head, condition, timeout) \
0389 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
0390 TASK_UNINTERRUPTIBLE, 0, timeout, \
0391 __ret = schedule_timeout(__ret))
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 #define wait_event_timeout(wq_head, condition, timeout) \
0413 ({ \
0414 long __ret = timeout; \
0415 might_sleep(); \
0416 if (!___wait_cond_timeout(condition)) \
0417 __ret = __wait_event_timeout(wq_head, condition, timeout); \
0418 __ret; \
0419 })
0420
0421 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
0422 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
0423 TASK_INTERRUPTIBLE, 0, timeout, \
0424 __ret = freezable_schedule_timeout(__ret))
0425
0426
0427
0428
0429
0430 #define wait_event_freezable_timeout(wq_head, condition, timeout) \
0431 ({ \
0432 long __ret = timeout; \
0433 might_sleep(); \
0434 if (!___wait_cond_timeout(condition)) \
0435 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
0436 __ret; \
0437 })
0438
0439 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
0440 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
0441 cmd1; schedule(); cmd2)
0442
0443
0444
0445 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
0446 do { \
0447 if (condition) \
0448 break; \
0449 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
0450 } while (0)
0451
0452 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
0453 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
0454 cmd1; schedule(); cmd2)
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470 #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
0471 do { \
0472 if (condition) \
0473 break; \
0474 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
0475 } while (0)
0476
0477 #define __wait_event_interruptible(wq_head, condition) \
0478 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
0479 schedule())
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496 #define wait_event_interruptible(wq_head, condition) \
0497 ({ \
0498 int __ret = 0; \
0499 might_sleep(); \
0500 if (!(condition)) \
0501 __ret = __wait_event_interruptible(wq_head, condition); \
0502 __ret; \
0503 })
0504
0505 #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
0506 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
0507 TASK_INTERRUPTIBLE, 0, timeout, \
0508 __ret = schedule_timeout(__ret))
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
0531 ({ \
0532 long __ret = timeout; \
0533 might_sleep(); \
0534 if (!___wait_cond_timeout(condition)) \
0535 __ret = __wait_event_interruptible_timeout(wq_head, \
0536 condition, timeout); \
0537 __ret; \
0538 })
0539
0540 #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
0541 ({ \
0542 int __ret = 0; \
0543 struct hrtimer_sleeper __t; \
0544 \
0545 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
0546 HRTIMER_MODE_REL); \
0547 if ((timeout) != KTIME_MAX) { \
0548 hrtimer_set_expires_range_ns(&__t.timer, timeout, \
0549 current->timer_slack_ns); \
0550 hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
0551 } \
0552 \
0553 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
0554 if (!__t.task) { \
0555 __ret = -ETIME; \
0556 break; \
0557 } \
0558 schedule()); \
0559 \
0560 hrtimer_cancel(&__t.timer); \
0561 destroy_hrtimer_on_stack(&__t.timer); \
0562 __ret; \
0563 })
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 #define wait_event_hrtimeout(wq_head, condition, timeout) \
0582 ({ \
0583 int __ret = 0; \
0584 might_sleep(); \
0585 if (!(condition)) \
0586 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
0587 TASK_UNINTERRUPTIBLE); \
0588 __ret; \
0589 })
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
0608 ({ \
0609 long __ret = 0; \
0610 might_sleep(); \
0611 if (!(condition)) \
0612 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
0613 TASK_INTERRUPTIBLE); \
0614 __ret; \
0615 })
0616
0617 #define __wait_event_interruptible_exclusive(wq, condition) \
0618 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
0619 schedule())
0620
0621 #define wait_event_interruptible_exclusive(wq, condition) \
0622 ({ \
0623 int __ret = 0; \
0624 might_sleep(); \
0625 if (!(condition)) \
0626 __ret = __wait_event_interruptible_exclusive(wq, condition); \
0627 __ret; \
0628 })
0629
0630 #define __wait_event_killable_exclusive(wq, condition) \
0631 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
0632 schedule())
0633
0634 #define wait_event_killable_exclusive(wq, condition) \
0635 ({ \
0636 int __ret = 0; \
0637 might_sleep(); \
0638 if (!(condition)) \
0639 __ret = __wait_event_killable_exclusive(wq, condition); \
0640 __ret; \
0641 })
0642
0643
0644 #define __wait_event_freezable_exclusive(wq, condition) \
0645 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
0646 freezable_schedule())
0647
0648 #define wait_event_freezable_exclusive(wq, condition) \
0649 ({ \
0650 int __ret = 0; \
0651 might_sleep(); \
0652 if (!(condition)) \
0653 __ret = __wait_event_freezable_exclusive(wq, condition); \
0654 __ret; \
0655 })
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670 #define wait_event_idle(wq_head, condition) \
0671 do { \
0672 might_sleep(); \
0673 if (!(condition)) \
0674 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
0675 } while (0)
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694 #define wait_event_idle_exclusive(wq_head, condition) \
0695 do { \
0696 might_sleep(); \
0697 if (!(condition)) \
0698 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
0699 } while (0)
0700
0701 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
0702 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
0703 TASK_IDLE, 0, timeout, \
0704 __ret = schedule_timeout(__ret))
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725 #define wait_event_idle_timeout(wq_head, condition, timeout) \
0726 ({ \
0727 long __ret = timeout; \
0728 might_sleep(); \
0729 if (!___wait_cond_timeout(condition)) \
0730 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
0731 __ret; \
0732 })
0733
0734 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
0735 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
0736 TASK_IDLE, 1, timeout, \
0737 __ret = schedule_timeout(__ret))
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
0763 ({ \
0764 long __ret = timeout; \
0765 might_sleep(); \
0766 if (!___wait_cond_timeout(condition)) \
0767 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
0768 __ret; \
0769 })
0770
0771 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
0772 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
0773
0774 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
0775 ({ \
0776 int __ret; \
0777 DEFINE_WAIT(__wait); \
0778 if (exclusive) \
0779 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
0780 do { \
0781 __ret = fn(&(wq), &__wait); \
0782 if (__ret) \
0783 break; \
0784 } while (!(condition)); \
0785 __remove_wait_queue(&(wq), &__wait); \
0786 __set_current_state(TASK_RUNNING); \
0787 __ret; \
0788 })
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814 #define wait_event_interruptible_locked(wq, condition) \
0815 ((condition) \
0816 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841 #define wait_event_interruptible_locked_irq(wq, condition) \
0842 ((condition) \
0843 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872 #define wait_event_interruptible_exclusive_locked(wq, condition) \
0873 ((condition) \
0874 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
0904 ((condition) \
0905 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
0906
0907
0908 #define __wait_event_killable(wq, condition) \
0909 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926 #define wait_event_killable(wq_head, condition) \
0927 ({ \
0928 int __ret = 0; \
0929 might_sleep(); \
0930 if (!(condition)) \
0931 __ret = __wait_event_killable(wq_head, condition); \
0932 __ret; \
0933 })
0934
0935 #define __wait_event_killable_timeout(wq_head, condition, timeout) \
0936 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
0937 TASK_KILLABLE, 0, timeout, \
0938 __ret = schedule_timeout(__ret))
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962 #define wait_event_killable_timeout(wq_head, condition, timeout) \
0963 ({ \
0964 long __ret = timeout; \
0965 might_sleep(); \
0966 if (!___wait_cond_timeout(condition)) \
0967 __ret = __wait_event_killable_timeout(wq_head, \
0968 condition, timeout); \
0969 __ret; \
0970 })
0971
0972
0973 #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
0974 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
0975 spin_unlock_irq(&lock); \
0976 cmd; \
0977 schedule(); \
0978 spin_lock_irq(&lock))
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
1004 do { \
1005 if (condition) \
1006 break; \
1007 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
1008 } while (0)
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 #define wait_event_lock_irq(wq_head, condition, lock) \
1031 do { \
1032 if (condition) \
1033 break; \
1034 __wait_event_lock_irq(wq_head, condition, lock, ); \
1035 } while (0)
1036
1037
1038 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1039 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1040 spin_unlock_irq(&lock); \
1041 cmd; \
1042 schedule(); \
1043 spin_lock_irq(&lock))
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1071 ({ \
1072 int __ret = 0; \
1073 if (!(condition)) \
1074 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1075 condition, lock, cmd); \
1076 __ret; \
1077 })
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1102 ({ \
1103 int __ret = 0; \
1104 if (!(condition)) \
1105 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1106 condition, lock,); \
1107 __ret; \
1108 })
1109
1110 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1111 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1112 state, 0, timeout, \
1113 spin_unlock_irq(&lock); \
1114 __ret = schedule_timeout(__ret); \
1115 spin_lock_irq(&lock));
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1142 timeout) \
1143 ({ \
1144 long __ret = timeout; \
1145 if (!___wait_cond_timeout(condition)) \
1146 __ret = __wait_event_lock_irq_timeout( \
1147 wq_head, condition, lock, timeout, \
1148 TASK_INTERRUPTIBLE); \
1149 __ret; \
1150 })
1151
1152 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1153 ({ \
1154 long __ret = timeout; \
1155 if (!___wait_cond_timeout(condition)) \
1156 __ret = __wait_event_lock_irq_timeout( \
1157 wq_head, condition, lock, timeout, \
1158 TASK_UNINTERRUPTIBLE); \
1159 __ret; \
1160 })
1161
1162
1163
1164
1165 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1166 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1167 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1168 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1169 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1170 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1171 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1172
1173 #define DEFINE_WAIT_FUNC(name, function) \
1174 struct wait_queue_entry name = { \
1175 .private = current, \
1176 .func = function, \
1177 .entry = LIST_HEAD_INIT((name).entry), \
1178 }
1179
1180 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1181
1182 #define init_wait(wait) \
1183 do { \
1184 (wait)->private = current; \
1185 (wait)->func = autoremove_wake_function; \
1186 INIT_LIST_HEAD(&(wait)->entry); \
1187 (wait)->flags = 0; \
1188 } while (0)
1189
1190 typedef int (*task_call_f)(struct task_struct *p, void *arg);
1191 extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
1192
1193 #endif