Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * PREEMPT_RT substitution for spin/rw_locks
0004  *
0005  * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
0006  * resemble the non RT semantics:
0007  *
0008  * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
0009  *   preserving. The task state is saved before blocking on the underlying
0010  *   rtmutex, and restored when the lock has been acquired. Regular wakeups
0011  *   during that time are redirected to the saved state so no wake up is
0012  *   missed.
0013  *
0014  * - Non RT spin/rwlocks disable preemption and eventually interrupts.
0015  *   Disabling preemption has the side effect of disabling migration and
0016  *   preventing RCU grace periods.
0017  *
0018  *   The RT substitutions explicitly disable migration and take
0019  *   rcu_read_lock() across the lock held section.
0020  */
0021 #include <linux/spinlock.h>
0022 #include <linux/export.h>
0023 
0024 #define RT_MUTEX_BUILD_SPINLOCKS
0025 #include "rtmutex.c"
0026 
0027 /*
0028  * __might_resched() skips the state check as rtlocks are state
0029  * preserving. Take RCU nesting into account as spin/read/write_lock() can
0030  * legitimately nest into an RCU read side critical section.
0031  */
0032 #define RTLOCK_RESCHED_OFFSETS                      \
0033     (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
0034 
0035 #define rtlock_might_resched()                      \
0036     __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
0037 
0038 static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
0039 {
0040     if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
0041         rtlock_slowlock(rtm);
0042 }
0043 
0044 static __always_inline void __rt_spin_lock(spinlock_t *lock)
0045 {
0046     rtlock_might_resched();
0047     rtlock_lock(&lock->lock);
0048     rcu_read_lock();
0049     migrate_disable();
0050 }
0051 
0052 void __sched rt_spin_lock(spinlock_t *lock)
0053 {
0054     spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
0055     __rt_spin_lock(lock);
0056 }
0057 EXPORT_SYMBOL(rt_spin_lock);
0058 
0059 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0060 void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
0061 {
0062     spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
0063     __rt_spin_lock(lock);
0064 }
0065 EXPORT_SYMBOL(rt_spin_lock_nested);
0066 
0067 void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
0068                     struct lockdep_map *nest_lock)
0069 {
0070     spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
0071     __rt_spin_lock(lock);
0072 }
0073 EXPORT_SYMBOL(rt_spin_lock_nest_lock);
0074 #endif
0075 
0076 void __sched rt_spin_unlock(spinlock_t *lock)
0077 {
0078     spin_release(&lock->dep_map, _RET_IP_);
0079     migrate_enable();
0080     rcu_read_unlock();
0081 
0082     if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
0083         rt_mutex_slowunlock(&lock->lock);
0084 }
0085 EXPORT_SYMBOL(rt_spin_unlock);
0086 
0087 /*
0088  * Wait for the lock to get unlocked: instead of polling for an unlock
0089  * (like raw spinlocks do), lock and unlock, to force the kernel to
0090  * schedule if there's contention:
0091  */
0092 void __sched rt_spin_lock_unlock(spinlock_t *lock)
0093 {
0094     spin_lock(lock);
0095     spin_unlock(lock);
0096 }
0097 EXPORT_SYMBOL(rt_spin_lock_unlock);
0098 
0099 static __always_inline int __rt_spin_trylock(spinlock_t *lock)
0100 {
0101     int ret = 1;
0102 
0103     if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
0104         ret = rt_mutex_slowtrylock(&lock->lock);
0105 
0106     if (ret) {
0107         spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0108         rcu_read_lock();
0109         migrate_disable();
0110     }
0111     return ret;
0112 }
0113 
0114 int __sched rt_spin_trylock(spinlock_t *lock)
0115 {
0116     return __rt_spin_trylock(lock);
0117 }
0118 EXPORT_SYMBOL(rt_spin_trylock);
0119 
0120 int __sched rt_spin_trylock_bh(spinlock_t *lock)
0121 {
0122     int ret;
0123 
0124     local_bh_disable();
0125     ret = __rt_spin_trylock(lock);
0126     if (!ret)
0127         local_bh_enable();
0128     return ret;
0129 }
0130 EXPORT_SYMBOL(rt_spin_trylock_bh);
0131 
0132 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0133 void __rt_spin_lock_init(spinlock_t *lock, const char *name,
0134              struct lock_class_key *key, bool percpu)
0135 {
0136     u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
0137 
0138     debug_check_no_locks_freed((void *)lock, sizeof(*lock));
0139     lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
0140                   LD_WAIT_INV, type);
0141 }
0142 EXPORT_SYMBOL(__rt_spin_lock_init);
0143 #endif
0144 
0145 /*
0146  * RT-specific reader/writer locks
0147  */
0148 #define rwbase_set_and_save_current_state(state)    \
0149     current_save_and_set_rtlock_wait_state()
0150 
0151 #define rwbase_restore_current_state()          \
0152     current_restore_rtlock_saved_state()
0153 
0154 static __always_inline int
0155 rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
0156 {
0157     if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
0158         rtlock_slowlock(rtm);
0159     return 0;
0160 }
0161 
0162 static __always_inline int
0163 rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
0164 {
0165     rtlock_slowlock_locked(rtm);
0166     return 0;
0167 }
0168 
0169 static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
0170 {
0171     if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
0172         return;
0173 
0174     rt_mutex_slowunlock(rtm);
0175 }
0176 
0177 static __always_inline int  rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
0178 {
0179     if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
0180         return 1;
0181 
0182     return rt_mutex_slowtrylock(rtm);
0183 }
0184 
0185 #define rwbase_signal_pending_state(state, current) (0)
0186 
0187 #define rwbase_schedule()               \
0188     schedule_rtlock()
0189 
0190 #include "rwbase_rt.c"
0191 /*
0192  * The common functions which get wrapped into the rwlock API.
0193  */
0194 int __sched rt_read_trylock(rwlock_t *rwlock)
0195 {
0196     int ret;
0197 
0198     ret = rwbase_read_trylock(&rwlock->rwbase);
0199     if (ret) {
0200         rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
0201         rcu_read_lock();
0202         migrate_disable();
0203     }
0204     return ret;
0205 }
0206 EXPORT_SYMBOL(rt_read_trylock);
0207 
0208 int __sched rt_write_trylock(rwlock_t *rwlock)
0209 {
0210     int ret;
0211 
0212     ret = rwbase_write_trylock(&rwlock->rwbase);
0213     if (ret) {
0214         rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
0215         rcu_read_lock();
0216         migrate_disable();
0217     }
0218     return ret;
0219 }
0220 EXPORT_SYMBOL(rt_write_trylock);
0221 
0222 void __sched rt_read_lock(rwlock_t *rwlock)
0223 {
0224     rtlock_might_resched();
0225     rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
0226     rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
0227     rcu_read_lock();
0228     migrate_disable();
0229 }
0230 EXPORT_SYMBOL(rt_read_lock);
0231 
0232 void __sched rt_write_lock(rwlock_t *rwlock)
0233 {
0234     rtlock_might_resched();
0235     rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
0236     rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
0237     rcu_read_lock();
0238     migrate_disable();
0239 }
0240 EXPORT_SYMBOL(rt_write_lock);
0241 
0242 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0243 void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
0244 {
0245     rtlock_might_resched();
0246     rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
0247     rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
0248     rcu_read_lock();
0249     migrate_disable();
0250 }
0251 EXPORT_SYMBOL(rt_write_lock_nested);
0252 #endif
0253 
0254 void __sched rt_read_unlock(rwlock_t *rwlock)
0255 {
0256     rwlock_release(&rwlock->dep_map, _RET_IP_);
0257     migrate_enable();
0258     rcu_read_unlock();
0259     rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
0260 }
0261 EXPORT_SYMBOL(rt_read_unlock);
0262 
0263 void __sched rt_write_unlock(rwlock_t *rwlock)
0264 {
0265     rwlock_release(&rwlock->dep_map, _RET_IP_);
0266     rcu_read_unlock();
0267     migrate_enable();
0268     rwbase_write_unlock(&rwlock->rwbase);
0269 }
0270 EXPORT_SYMBOL(rt_write_unlock);
0271 
0272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0273 void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
0274               struct lock_class_key *key)
0275 {
0276     debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
0277     lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
0278 }
0279 EXPORT_SYMBOL(__rt_rwlock_init);
0280 #endif