Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * RT Mutexes: blocking mutual exclusion locks with PI support
0004  *
0005  * started by Ingo Molnar and Thomas Gleixner:
0006  *
0007  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
0008  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
0009  *
0010  * This file contains the private data structure and API definitions.
0011  */
0012 
0013 #ifndef __KERNEL_RTMUTEX_COMMON_H
0014 #define __KERNEL_RTMUTEX_COMMON_H
0015 
0016 #include <linux/debug_locks.h>
0017 #include <linux/rtmutex.h>
0018 #include <linux/sched/wake_q.h>
0019 
0020 /*
0021  * This is the control structure for tasks blocked on a rt_mutex,
0022  * which is allocated on the kernel stack on of the blocked task.
0023  *
0024  * @tree_entry:     pi node to enqueue into the mutex waiters tree
0025  * @pi_tree_entry:  pi node to enqueue into the mutex owner waiters tree
0026  * @task:       task reference to the blocked task
0027  * @lock:       Pointer to the rt_mutex on which the waiter blocks
0028  * @wake_state:     Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
0029  * @prio:       Priority of the waiter
0030  * @deadline:       Deadline of the waiter if applicable
0031  * @ww_ctx:     WW context pointer
0032  */
0033 struct rt_mutex_waiter {
0034     struct rb_node      tree_entry;
0035     struct rb_node      pi_tree_entry;
0036     struct task_struct  *task;
0037     struct rt_mutex_base    *lock;
0038     unsigned int        wake_state;
0039     int         prio;
0040     u64         deadline;
0041     struct ww_acquire_ctx   *ww_ctx;
0042 };
0043 
0044 /**
0045  * rt_wake_q_head - Wrapper around regular wake_q_head to support
0046  *          "sleeping" spinlocks on RT
0047  * @head:       The regular wake_q_head for sleeping lock variants
0048  * @rtlock_task:    Task pointer for RT lock (spin/rwlock) wakeups
0049  */
0050 struct rt_wake_q_head {
0051     struct wake_q_head  head;
0052     struct task_struct  *rtlock_task;
0053 };
0054 
0055 #define DEFINE_RT_WAKE_Q(name)                      \
0056     struct rt_wake_q_head name = {                  \
0057         .head       = WAKE_Q_HEAD_INITIALIZER(name.head),   \
0058         .rtlock_task    = NULL,                 \
0059     }
0060 
0061 /*
0062  * PI-futex support (proxy locking functions, etc.):
0063  */
0064 extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
0065                        struct task_struct *proxy_owner);
0066 extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
0067 extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
0068                      struct rt_mutex_waiter *waiter,
0069                      struct task_struct *task);
0070 extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
0071                      struct rt_mutex_waiter *waiter,
0072                      struct task_struct *task);
0073 extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
0074                    struct hrtimer_sleeper *to,
0075                    struct rt_mutex_waiter *waiter);
0076 extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
0077                  struct rt_mutex_waiter *waiter);
0078 
0079 extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
0080 extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
0081 
0082 extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
0083 extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
0084                 struct rt_wake_q_head *wqh);
0085 
0086 extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
0087 
0088 /*
0089  * Must be guarded because this header is included from rcu/tree_plugin.h
0090  * unconditionally.
0091  */
0092 #ifdef CONFIG_RT_MUTEXES
0093 static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
0094 {
0095     return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
0096 }
0097 
0098 /*
0099  * Lockless speculative check whether @waiter is still the top waiter on
0100  * @lock. This is solely comparing pointers and not derefencing the
0101  * leftmost entry which might be about to vanish.
0102  */
0103 static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
0104                          struct rt_mutex_waiter *waiter)
0105 {
0106     struct rb_node *leftmost = rb_first_cached(&lock->waiters);
0107 
0108     return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter;
0109 }
0110 
0111 static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
0112 {
0113     struct rb_node *leftmost = rb_first_cached(&lock->waiters);
0114     struct rt_mutex_waiter *w = NULL;
0115 
0116     if (leftmost) {
0117         w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
0118         BUG_ON(w->lock != lock);
0119     }
0120     return w;
0121 }
0122 
0123 static inline int task_has_pi_waiters(struct task_struct *p)
0124 {
0125     return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
0126 }
0127 
0128 static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
0129 {
0130     return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
0131             pi_tree_entry);
0132 }
0133 
0134 #define RT_MUTEX_HAS_WAITERS    1UL
0135 
0136 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
0137 {
0138     unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
0139 
0140     return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
0141 }
0142 
0143 /*
0144  * Constants for rt mutex functions which have a selectable deadlock
0145  * detection.
0146  *
0147  * RT_MUTEX_MIN_CHAINWALK:  Stops the lock chain walk when there are
0148  *              no further PI adjustments to be made.
0149  *
0150  * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full
0151  *              walk of the lock chain.
0152  */
0153 enum rtmutex_chainwalk {
0154     RT_MUTEX_MIN_CHAINWALK,
0155     RT_MUTEX_FULL_CHAINWALK,
0156 };
0157 
0158 static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
0159 {
0160     raw_spin_lock_init(&lock->wait_lock);
0161     lock->waiters = RB_ROOT_CACHED;
0162     lock->owner = NULL;
0163 }
0164 
0165 /* Debug functions */
0166 static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
0167 {
0168     if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
0169         DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
0170 }
0171 
0172 static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
0173 {
0174     if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
0175         DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
0176 }
0177 
0178 static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
0179 {
0180     if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
0181         memset(waiter, 0x11, sizeof(*waiter));
0182 }
0183 
0184 static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
0185 {
0186     if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
0187         memset(waiter, 0x22, sizeof(*waiter));
0188 }
0189 
0190 static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
0191 {
0192     debug_rt_mutex_init_waiter(waiter);
0193     RB_CLEAR_NODE(&waiter->pi_tree_entry);
0194     RB_CLEAR_NODE(&waiter->tree_entry);
0195     waiter->wake_state = TASK_NORMAL;
0196     waiter->task = NULL;
0197 }
0198 
0199 static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
0200 {
0201     rt_mutex_init_waiter(waiter);
0202     waiter->wake_state = TASK_RTLOCK_WAIT;
0203 }
0204 
0205 #else /* CONFIG_RT_MUTEXES */
0206 /* Used in rcu/tree_plugin.h */
0207 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
0208 {
0209     return NULL;
0210 }
0211 #endif  /* !CONFIG_RT_MUTEXES */
0212 
0213 #endif