Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SWAIT_H
0003 #define _LINUX_SWAIT_H
0004 
0005 #include <linux/list.h>
0006 #include <linux/stddef.h>
0007 #include <linux/spinlock.h>
0008 #include <linux/wait.h>
0009 #include <asm/current.h>
0010 
0011 /*
0012  * Simple waitqueues are semantically very different to regular wait queues
0013  * (wait.h). The most important difference is that the simple waitqueue allows
0014  * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
0015  * times.
0016  *
0017  * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
0018  * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
0019  * priority task a chance to run.
0020  *
0021  * Secondly, we had to drop a fair number of features of the other waitqueue
0022  * code; notably:
0023  *
0024  *  - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
0025  *    all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
0026  *    sleeper state.
0027  *
0028  *  - the !exclusive mode; because that leads to O(n) wakeups, everything is
0029  *    exclusive. As such swake_up_one will only ever awake _one_ waiter.
0030  *
0031  *  - custom wake callback functions; because you cannot give any guarantees
0032  *    about random code. This also allows swait to be used in RT, such that
0033  *    raw spinlock can be used for the swait queue head.
0034  *
0035  * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
0036  * For all the above, note that simple wait queues should _only_ be used under
0037  * very specific realtime constraints -- it is best to stick with the regular
0038  * wait queues in most cases.
0039  */
0040 
0041 struct task_struct;
0042 
0043 struct swait_queue_head {
0044     raw_spinlock_t      lock;
0045     struct list_head    task_list;
0046 };
0047 
0048 struct swait_queue {
0049     struct task_struct  *task;
0050     struct list_head    task_list;
0051 };
0052 
0053 #define __SWAITQUEUE_INITIALIZER(name) {                \
0054     .task       = current,                  \
0055     .task_list  = LIST_HEAD_INIT((name).task_list),     \
0056 }
0057 
0058 #define DECLARE_SWAITQUEUE(name)                    \
0059     struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
0060 
0061 #define __SWAIT_QUEUE_HEAD_INITIALIZER(name) {              \
0062     .lock       = __RAW_SPIN_LOCK_UNLOCKED(name.lock),      \
0063     .task_list  = LIST_HEAD_INIT((name).task_list),     \
0064 }
0065 
0066 #define DECLARE_SWAIT_QUEUE_HEAD(name)                  \
0067     struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
0068 
0069 extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
0070                     struct lock_class_key *key);
0071 
0072 #define init_swait_queue_head(q)                \
0073     do {                            \
0074         static struct lock_class_key __key;     \
0075         __init_swait_queue_head((q), #q, &__key);   \
0076     } while (0)
0077 
0078 #ifdef CONFIG_LOCKDEP
0079 # define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)          \
0080     ({ init_swait_queue_head(&name); name; })
0081 # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name)         \
0082     struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
0083 #else
0084 # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name)         \
0085     DECLARE_SWAIT_QUEUE_HEAD(name)
0086 #endif
0087 
0088 /**
0089  * swait_active -- locklessly test for waiters on the queue
0090  * @wq: the waitqueue to test for waiters
0091  *
0092  * returns true if the wait list is not empty
0093  *
0094  * NOTE: this function is lockless and requires care, incorrect usage _will_
0095  * lead to sporadic and non-obvious failure.
0096  *
0097  * NOTE2: this function has the same above implications as regular waitqueues.
0098  *
0099  * Use either while holding swait_queue_head::lock or when used for wakeups
0100  * with an extra smp_mb() like:
0101  *
0102  *      CPU0 - waker                    CPU1 - waiter
0103  *
0104  *                                      for (;;) {
0105  *      @cond = true;                     prepare_to_swait_exclusive(&wq_head, &wait, state);
0106  *      smp_mb();                         // smp_mb() from set_current_state()
0107  *      if (swait_active(wq_head))        if (@cond)
0108  *        wake_up(wq_head);                      break;
0109  *                                        schedule();
0110  *                                      }
0111  *                                      finish_swait(&wq_head, &wait);
0112  *
0113  * Because without the explicit smp_mb() it's possible for the
0114  * swait_active() load to get hoisted over the @cond store such that we'll
0115  * observe an empty wait list while the waiter might not observe @cond.
0116  * This, in turn, can trigger missing wakeups.
0117  *
0118  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
0119  * which (when the lock is uncontended) are of roughly equal cost.
0120  */
0121 static inline int swait_active(struct swait_queue_head *wq)
0122 {
0123     return !list_empty(&wq->task_list);
0124 }
0125 
0126 /**
0127  * swq_has_sleeper - check if there are any waiting processes
0128  * @wq: the waitqueue to test for waiters
0129  *
0130  * Returns true if @wq has waiting processes
0131  *
0132  * Please refer to the comment for swait_active.
0133  */
0134 static inline bool swq_has_sleeper(struct swait_queue_head *wq)
0135 {
0136     /*
0137      * We need to be sure we are in sync with the list_add()
0138      * modifications to the wait queue (task_list).
0139      *
0140      * This memory barrier should be paired with one on the
0141      * waiting side.
0142      */
0143     smp_mb();
0144     return swait_active(wq);
0145 }
0146 
0147 extern void swake_up_one(struct swait_queue_head *q);
0148 extern void swake_up_all(struct swait_queue_head *q);
0149 extern void swake_up_locked(struct swait_queue_head *q);
0150 
0151 extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
0152 extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
0153 
0154 extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
0155 extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
0156 
0157 /* as per ___wait_event() but for swait, therefore "exclusive == 1" */
0158 #define ___swait_event(wq, condition, state, ret, cmd)          \
0159 ({                                  \
0160     __label__ __out;                        \
0161     struct swait_queue __wait;                  \
0162     long __ret = ret;                       \
0163                                     \
0164     INIT_LIST_HEAD(&__wait.task_list);              \
0165     for (;;) {                          \
0166         long __int = prepare_to_swait_event(&wq, &__wait, state);\
0167                                     \
0168         if (condition)                      \
0169             break;                      \
0170                                     \
0171         if (___wait_is_interruptible(state) && __int) {     \
0172             __ret = __int;                  \
0173             goto __out;                 \
0174         }                           \
0175                                     \
0176         cmd;                            \
0177     }                               \
0178     finish_swait(&wq, &__wait);                 \
0179 __out:  __ret;                              \
0180 })
0181 
0182 #define __swait_event(wq, condition)                    \
0183     (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0,    \
0184                 schedule())
0185 
0186 #define swait_event_exclusive(wq, condition)                \
0187 do {                                    \
0188     if (condition)                          \
0189         break;                          \
0190     __swait_event(wq, condition);                   \
0191 } while (0)
0192 
0193 #define __swait_event_timeout(wq, condition, timeout)           \
0194     ___swait_event(wq, ___wait_cond_timeout(condition),     \
0195               TASK_UNINTERRUPTIBLE, timeout,            \
0196               __ret = schedule_timeout(__ret))
0197 
0198 #define swait_event_timeout_exclusive(wq, condition, timeout)       \
0199 ({                                  \
0200     long __ret = timeout;                       \
0201     if (!___wait_cond_timeout(condition))               \
0202         __ret = __swait_event_timeout(wq, condition, timeout);  \
0203     __ret;                              \
0204 })
0205 
0206 #define __swait_event_interruptible(wq, condition)          \
0207     ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0,        \
0208               schedule())
0209 
0210 #define swait_event_interruptible_exclusive(wq, condition)      \
0211 ({                                  \
0212     int __ret = 0;                          \
0213     if (!(condition))                       \
0214         __ret = __swait_event_interruptible(wq, condition); \
0215     __ret;                              \
0216 })
0217 
0218 #define __swait_event_interruptible_timeout(wq, condition, timeout) \
0219     ___swait_event(wq, ___wait_cond_timeout(condition),     \
0220               TASK_INTERRUPTIBLE, timeout,          \
0221               __ret = schedule_timeout(__ret))
0222 
0223 #define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
0224 ({                                  \
0225     long __ret = timeout;                       \
0226     if (!___wait_cond_timeout(condition))               \
0227         __ret = __swait_event_interruptible_timeout(wq,     \
0228                         condition, timeout);    \
0229     __ret;                              \
0230 })
0231 
0232 #define __swait_event_idle(wq, condition)               \
0233     (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
0234 
0235 /**
0236  * swait_event_idle_exclusive - wait without system load contribution
0237  * @wq: the waitqueue to wait on
0238  * @condition: a C expression for the event to wait for
0239  *
0240  * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
0241  * true. The @condition is checked each time the waitqueue @wq is woken up.
0242  *
0243  * This function is mostly used when a kthread or workqueue waits for some
0244  * condition and doesn't want to contribute to system load. Signals are
0245  * ignored.
0246  */
0247 #define swait_event_idle_exclusive(wq, condition)           \
0248 do {                                    \
0249     if (condition)                          \
0250         break;                          \
0251     __swait_event_idle(wq, condition);              \
0252 } while (0)
0253 
0254 #define __swait_event_idle_timeout(wq, condition, timeout)      \
0255     ___swait_event(wq, ___wait_cond_timeout(condition),     \
0256                TASK_IDLE, timeout,              \
0257                __ret = schedule_timeout(__ret))
0258 
0259 /**
0260  * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
0261  * @wq: the waitqueue to wait on
0262  * @condition: a C expression for the event to wait for
0263  * @timeout: timeout at which we'll give up in jiffies
0264  *
0265  * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
0266  * true. The @condition is checked each time the waitqueue @wq is woken up.
0267  *
0268  * This function is mostly used when a kthread or workqueue waits for some
0269  * condition and doesn't want to contribute to system load. Signals are
0270  * ignored.
0271  *
0272  * Returns:
0273  * 0 if the @condition evaluated to %false after the @timeout elapsed,
0274  * 1 if the @condition evaluated to %true after the @timeout elapsed,
0275  * or the remaining jiffies (at least 1) if the @condition evaluated
0276  * to %true before the @timeout elapsed.
0277  */
0278 #define swait_event_idle_timeout_exclusive(wq, condition, timeout)  \
0279 ({                                  \
0280     long __ret = timeout;                       \
0281     if (!___wait_cond_timeout(condition))               \
0282         __ret = __swait_event_idle_timeout(wq,          \
0283                            condition, timeout); \
0284     __ret;                              \
0285 })
0286 
0287 #endif /* _LINUX_SWAIT_H */