Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 
0003 #ifndef WW_RT
0004 
0005 #define MUTEX       mutex
0006 #define MUTEX_WAITER    mutex_waiter
0007 
0008 static inline struct mutex_waiter *
0009 __ww_waiter_first(struct mutex *lock)
0010 {
0011     struct mutex_waiter *w;
0012 
0013     w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
0014     if (list_entry_is_head(w, &lock->wait_list, list))
0015         return NULL;
0016 
0017     return w;
0018 }
0019 
0020 static inline struct mutex_waiter *
0021 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
0022 {
0023     w = list_next_entry(w, list);
0024     if (list_entry_is_head(w, &lock->wait_list, list))
0025         return NULL;
0026 
0027     return w;
0028 }
0029 
0030 static inline struct mutex_waiter *
0031 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
0032 {
0033     w = list_prev_entry(w, list);
0034     if (list_entry_is_head(w, &lock->wait_list, list))
0035         return NULL;
0036 
0037     return w;
0038 }
0039 
0040 static inline struct mutex_waiter *
0041 __ww_waiter_last(struct mutex *lock)
0042 {
0043     struct mutex_waiter *w;
0044 
0045     w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
0046     if (list_entry_is_head(w, &lock->wait_list, list))
0047         return NULL;
0048 
0049     return w;
0050 }
0051 
0052 static inline void
0053 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
0054 {
0055     struct list_head *p = &lock->wait_list;
0056     if (pos)
0057         p = &pos->list;
0058     __mutex_add_waiter(lock, waiter, p);
0059 }
0060 
0061 static inline struct task_struct *
0062 __ww_mutex_owner(struct mutex *lock)
0063 {
0064     return __mutex_owner(lock);
0065 }
0066 
0067 static inline bool
0068 __ww_mutex_has_waiters(struct mutex *lock)
0069 {
0070     return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
0071 }
0072 
0073 static inline void lock_wait_lock(struct mutex *lock)
0074 {
0075     raw_spin_lock(&lock->wait_lock);
0076 }
0077 
0078 static inline void unlock_wait_lock(struct mutex *lock)
0079 {
0080     raw_spin_unlock(&lock->wait_lock);
0081 }
0082 
0083 static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
0084 {
0085     lockdep_assert_held(&lock->wait_lock);
0086 }
0087 
0088 #else /* WW_RT */
0089 
0090 #define MUTEX       rt_mutex
0091 #define MUTEX_WAITER    rt_mutex_waiter
0092 
0093 static inline struct rt_mutex_waiter *
0094 __ww_waiter_first(struct rt_mutex *lock)
0095 {
0096     struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
0097     if (!n)
0098         return NULL;
0099     return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0100 }
0101 
0102 static inline struct rt_mutex_waiter *
0103 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
0104 {
0105     struct rb_node *n = rb_next(&w->tree_entry);
0106     if (!n)
0107         return NULL;
0108     return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0109 }
0110 
0111 static inline struct rt_mutex_waiter *
0112 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
0113 {
0114     struct rb_node *n = rb_prev(&w->tree_entry);
0115     if (!n)
0116         return NULL;
0117     return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0118 }
0119 
0120 static inline struct rt_mutex_waiter *
0121 __ww_waiter_last(struct rt_mutex *lock)
0122 {
0123     struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
0124     if (!n)
0125         return NULL;
0126     return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0127 }
0128 
0129 static inline void
0130 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
0131 {
0132     /* RT unconditionally adds the waiter first and then removes it on error */
0133 }
0134 
0135 static inline struct task_struct *
0136 __ww_mutex_owner(struct rt_mutex *lock)
0137 {
0138     return rt_mutex_owner(&lock->rtmutex);
0139 }
0140 
0141 static inline bool
0142 __ww_mutex_has_waiters(struct rt_mutex *lock)
0143 {
0144     return rt_mutex_has_waiters(&lock->rtmutex);
0145 }
0146 
0147 static inline void lock_wait_lock(struct rt_mutex *lock)
0148 {
0149     raw_spin_lock(&lock->rtmutex.wait_lock);
0150 }
0151 
0152 static inline void unlock_wait_lock(struct rt_mutex *lock)
0153 {
0154     raw_spin_unlock(&lock->rtmutex.wait_lock);
0155 }
0156 
0157 static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
0158 {
0159     lockdep_assert_held(&lock->rtmutex.wait_lock);
0160 }
0161 
0162 #endif /* WW_RT */
0163 
0164 /*
0165  * Wait-Die:
0166  *   The newer transactions are killed when:
0167  *     It (the new transaction) makes a request for a lock being held
0168  *     by an older transaction.
0169  *
0170  * Wound-Wait:
0171  *   The newer transactions are wounded when:
0172  *     An older transaction makes a request for a lock being held by
0173  *     the newer transaction.
0174  */
0175 
0176 /*
0177  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
0178  * it.
0179  */
0180 static __always_inline void
0181 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
0182 {
0183 #ifdef DEBUG_WW_MUTEXES
0184     /*
0185      * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
0186      * but released with a normal mutex_unlock in this call.
0187      *
0188      * This should never happen, always use ww_mutex_unlock.
0189      */
0190     DEBUG_LOCKS_WARN_ON(ww->ctx);
0191 
0192     /*
0193      * Not quite done after calling ww_acquire_done() ?
0194      */
0195     DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
0196 
0197     if (ww_ctx->contending_lock) {
0198         /*
0199          * After -EDEADLK you tried to
0200          * acquire a different ww_mutex? Bad!
0201          */
0202         DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
0203 
0204         /*
0205          * You called ww_mutex_lock after receiving -EDEADLK,
0206          * but 'forgot' to unlock everything else first?
0207          */
0208         DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
0209         ww_ctx->contending_lock = NULL;
0210     }
0211 
0212     /*
0213      * Naughty, using a different class will lead to undefined behavior!
0214      */
0215     DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
0216 #endif
0217     ww_ctx->acquired++;
0218     ww->ctx = ww_ctx;
0219 }
0220 
0221 /*
0222  * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
0223  * or, when of equal priority, a younger transaction than @b.
0224  *
0225  * Depending on the algorithm, @a will either need to wait for @b, or die.
0226  */
0227 static inline bool
0228 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
0229 {
0230 /*
0231  * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
0232  * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
0233  * isn't affected by this.
0234  */
0235 #ifdef WW_RT
0236     /* kernel prio; less is more */
0237     int a_prio = a->task->prio;
0238     int b_prio = b->task->prio;
0239 
0240     if (rt_prio(a_prio) || rt_prio(b_prio)) {
0241 
0242         if (a_prio > b_prio)
0243             return true;
0244 
0245         if (a_prio < b_prio)
0246             return false;
0247 
0248         /* equal static prio */
0249 
0250         if (dl_prio(a_prio)) {
0251             if (dl_time_before(b->task->dl.deadline,
0252                        a->task->dl.deadline))
0253                 return true;
0254 
0255             if (dl_time_before(a->task->dl.deadline,
0256                        b->task->dl.deadline))
0257                 return false;
0258         }
0259 
0260         /* equal prio */
0261     }
0262 #endif
0263 
0264     /* FIFO order tie break -- bigger is younger */
0265     return (signed long)(a->stamp - b->stamp) > 0;
0266 }
0267 
0268 /*
0269  * Wait-Die; wake a lesser waiter context (when locks held) such that it can
0270  * die.
0271  *
0272  * Among waiters with context, only the first one can have other locks acquired
0273  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
0274  * __ww_mutex_check_kill() wake any but the earliest context.
0275  */
0276 static bool
0277 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
0278            struct ww_acquire_ctx *ww_ctx)
0279 {
0280     if (!ww_ctx->is_wait_die)
0281         return false;
0282 
0283     if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
0284 #ifndef WW_RT
0285         debug_mutex_wake_waiter(lock, waiter);
0286 #endif
0287         wake_up_process(waiter->task);
0288     }
0289 
0290     return true;
0291 }
0292 
0293 /*
0294  * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
0295  *
0296  * Wound the lock holder if there are waiters with more important transactions
0297  * than the lock holders. Even if multiple waiters may wound the lock holder,
0298  * it's sufficient that only one does.
0299  */
0300 static bool __ww_mutex_wound(struct MUTEX *lock,
0301                  struct ww_acquire_ctx *ww_ctx,
0302                  struct ww_acquire_ctx *hold_ctx)
0303 {
0304     struct task_struct *owner = __ww_mutex_owner(lock);
0305 
0306     lockdep_assert_wait_lock_held(lock);
0307 
0308     /*
0309      * Possible through __ww_mutex_add_waiter() when we race with
0310      * ww_mutex_set_context_fastpath(). In that case we'll get here again
0311      * through __ww_mutex_check_waiters().
0312      */
0313     if (!hold_ctx)
0314         return false;
0315 
0316     /*
0317      * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
0318      * it cannot go away because we'll have FLAG_WAITERS set and hold
0319      * wait_lock.
0320      */
0321     if (!owner)
0322         return false;
0323 
0324     if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
0325         hold_ctx->wounded = 1;
0326 
0327         /*
0328          * wake_up_process() paired with set_current_state()
0329          * inserts sufficient barriers to make sure @owner either sees
0330          * it's wounded in __ww_mutex_check_kill() or has a
0331          * wakeup pending to re-read the wounded state.
0332          */
0333         if (owner != current)
0334             wake_up_process(owner);
0335 
0336         return true;
0337     }
0338 
0339     return false;
0340 }
0341 
0342 /*
0343  * We just acquired @lock under @ww_ctx, if there are more important contexts
0344  * waiting behind us on the wait-list, check if they need to die, or wound us.
0345  *
0346  * See __ww_mutex_add_waiter() for the list-order construction; basically the
0347  * list is ordered by stamp, smallest (oldest) first.
0348  *
0349  * This relies on never mixing wait-die/wound-wait on the same wait-list;
0350  * which is currently ensured by that being a ww_class property.
0351  *
0352  * The current task must not be on the wait list.
0353  */
0354 static void
0355 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
0356 {
0357     struct MUTEX_WAITER *cur;
0358 
0359     lockdep_assert_wait_lock_held(lock);
0360 
0361     for (cur = __ww_waiter_first(lock); cur;
0362          cur = __ww_waiter_next(lock, cur)) {
0363 
0364         if (!cur->ww_ctx)
0365             continue;
0366 
0367         if (__ww_mutex_die(lock, cur, ww_ctx) ||
0368             __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
0369             break;
0370     }
0371 }
0372 
0373 /*
0374  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
0375  * and wake up any waiters so they can recheck.
0376  */
0377 static __always_inline void
0378 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
0379 {
0380     ww_mutex_lock_acquired(lock, ctx);
0381 
0382     /*
0383      * The lock->ctx update should be visible on all cores before
0384      * the WAITERS check is done, otherwise contended waiters might be
0385      * missed. The contended waiters will either see ww_ctx == NULL
0386      * and keep spinning, or it will acquire wait_lock, add itself
0387      * to waiter list and sleep.
0388      */
0389     smp_mb(); /* See comments above and below. */
0390 
0391     /*
0392      * [W] ww->ctx = ctx        [W] MUTEX_FLAG_WAITERS
0393      *     MB               MB
0394      * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
0395      *
0396      * The memory barrier above pairs with the memory barrier in
0397      * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
0398      * and/or !empty list.
0399      */
0400     if (likely(!__ww_mutex_has_waiters(&lock->base)))
0401         return;
0402 
0403     /*
0404      * Uh oh, we raced in fastpath, check if any of the waiters need to
0405      * die or wound us.
0406      */
0407     lock_wait_lock(&lock->base);
0408     __ww_mutex_check_waiters(&lock->base, ctx);
0409     unlock_wait_lock(&lock->base);
0410 }
0411 
0412 static __always_inline int
0413 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
0414 {
0415     if (ww_ctx->acquired > 0) {
0416 #ifdef DEBUG_WW_MUTEXES
0417         struct ww_mutex *ww;
0418 
0419         ww = container_of(lock, struct ww_mutex, base);
0420         DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
0421         ww_ctx->contending_lock = ww;
0422 #endif
0423         return -EDEADLK;
0424     }
0425 
0426     return 0;
0427 }
0428 
0429 /*
0430  * Check the wound condition for the current lock acquire.
0431  *
0432  * Wound-Wait: If we're wounded, kill ourself.
0433  *
0434  * Wait-Die: If we're trying to acquire a lock already held by an older
0435  *           context, kill ourselves.
0436  *
0437  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
0438  * look at waiters before us in the wait-list.
0439  */
0440 static inline int
0441 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
0442               struct ww_acquire_ctx *ctx)
0443 {
0444     struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
0445     struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
0446     struct MUTEX_WAITER *cur;
0447 
0448     if (ctx->acquired == 0)
0449         return 0;
0450 
0451     if (!ctx->is_wait_die) {
0452         if (ctx->wounded)
0453             return __ww_mutex_kill(lock, ctx);
0454 
0455         return 0;
0456     }
0457 
0458     if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
0459         return __ww_mutex_kill(lock, ctx);
0460 
0461     /*
0462      * If there is a waiter in front of us that has a context, then its
0463      * stamp is earlier than ours and we must kill ourself.
0464      */
0465     for (cur = __ww_waiter_prev(lock, waiter); cur;
0466          cur = __ww_waiter_prev(lock, cur)) {
0467 
0468         if (!cur->ww_ctx)
0469             continue;
0470 
0471         return __ww_mutex_kill(lock, ctx);
0472     }
0473 
0474     return 0;
0475 }
0476 
0477 /*
0478  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
0479  * first. Such that older contexts are preferred to acquire the lock over
0480  * younger contexts.
0481  *
0482  * Waiters without context are interspersed in FIFO order.
0483  *
0484  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
0485  * older contexts already waiting) to avoid unnecessary waiting and for
0486  * Wound-Wait ensure we wound the owning context when it is younger.
0487  */
0488 static inline int
0489 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
0490               struct MUTEX *lock,
0491               struct ww_acquire_ctx *ww_ctx)
0492 {
0493     struct MUTEX_WAITER *cur, *pos = NULL;
0494     bool is_wait_die;
0495 
0496     if (!ww_ctx) {
0497         __ww_waiter_add(lock, waiter, NULL);
0498         return 0;
0499     }
0500 
0501     is_wait_die = ww_ctx->is_wait_die;
0502 
0503     /*
0504      * Add the waiter before the first waiter with a higher stamp.
0505      * Waiters without a context are skipped to avoid starving
0506      * them. Wait-Die waiters may die here. Wound-Wait waiters
0507      * never die here, but they are sorted in stamp order and
0508      * may wound the lock holder.
0509      */
0510     for (cur = __ww_waiter_last(lock); cur;
0511          cur = __ww_waiter_prev(lock, cur)) {
0512 
0513         if (!cur->ww_ctx)
0514             continue;
0515 
0516         if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
0517             /*
0518              * Wait-Die: if we find an older context waiting, there
0519              * is no point in queueing behind it, as we'd have to
0520              * die the moment it would acquire the lock.
0521              */
0522             if (is_wait_die) {
0523                 int ret = __ww_mutex_kill(lock, ww_ctx);
0524 
0525                 if (ret)
0526                     return ret;
0527             }
0528 
0529             break;
0530         }
0531 
0532         pos = cur;
0533 
0534         /* Wait-Die: ensure younger waiters die. */
0535         __ww_mutex_die(lock, cur, ww_ctx);
0536     }
0537 
0538     __ww_waiter_add(lock, waiter, pos);
0539 
0540     /*
0541      * Wound-Wait: if we're blocking on a mutex owned by a younger context,
0542      * wound that such that we might proceed.
0543      */
0544     if (!is_wait_die) {
0545         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
0546 
0547         /*
0548          * See ww_mutex_set_context_fastpath(). Orders setting
0549          * MUTEX_FLAG_WAITERS vs the ww->ctx load,
0550          * such that either we or the fastpath will wound @ww->ctx.
0551          */
0552         smp_mb();
0553         __ww_mutex_wound(lock, ww_ctx, ww->ctx);
0554     }
0555 
0556     return 0;
0557 }
0558 
0559 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
0560 {
0561     if (lock->ctx) {
0562 #ifdef DEBUG_WW_MUTEXES
0563         DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
0564 #endif
0565         if (lock->ctx->acquired > 0)
0566             lock->ctx->acquired--;
0567         lock->ctx = NULL;
0568     }
0569 }