0001
0002 #ifndef __LINUX_SPINLOCK_RT_H
0003 #define __LINUX_SPINLOCK_RT_H
0004
0005 #ifndef __LINUX_SPINLOCK_H
0006 #error Do not include directly. Use spinlock.h
0007 #endif
0008
0009 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0010 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
0011 struct lock_class_key *key, bool percpu);
0012 #else
0013 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
0014 struct lock_class_key *key, bool percpu)
0015 {
0016 }
0017 #endif
0018
0019 #define spin_lock_init(slock) \
0020 do { \
0021 static struct lock_class_key __key; \
0022 \
0023 rt_mutex_base_init(&(slock)->lock); \
0024 __rt_spin_lock_init(slock, #slock, &__key, false); \
0025 } while (0)
0026
0027 #define local_spin_lock_init(slock) \
0028 do { \
0029 static struct lock_class_key __key; \
0030 \
0031 rt_mutex_base_init(&(slock)->lock); \
0032 __rt_spin_lock_init(slock, #slock, &__key, true); \
0033 } while (0)
0034
0035 extern void rt_spin_lock(spinlock_t *lock);
0036 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
0037 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
0038 extern void rt_spin_unlock(spinlock_t *lock);
0039 extern void rt_spin_lock_unlock(spinlock_t *lock);
0040 extern int rt_spin_trylock_bh(spinlock_t *lock);
0041 extern int rt_spin_trylock(spinlock_t *lock);
0042
0043 static __always_inline void spin_lock(spinlock_t *lock)
0044 {
0045 rt_spin_lock(lock);
0046 }
0047
0048 #ifdef CONFIG_LOCKDEP
0049 # define __spin_lock_nested(lock, subclass) \
0050 rt_spin_lock_nested(lock, subclass)
0051
0052 # define __spin_lock_nest_lock(lock, nest_lock) \
0053 do { \
0054 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
0055 rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
0056 } while (0)
0057 # define __spin_lock_irqsave_nested(lock, flags, subclass) \
0058 do { \
0059 typecheck(unsigned long, flags); \
0060 flags = 0; \
0061 __spin_lock_nested(lock, subclass); \
0062 } while (0)
0063
0064 #else
0065
0066
0067
0068
0069
0070 # define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
0071 # define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
0072 # define __spin_lock_irqsave_nested(lock, flags, subclass) \
0073 spin_lock_irqsave(((void)(subclass), (lock)), flags)
0074 #endif
0075
0076 #define spin_lock_nested(lock, subclass) \
0077 __spin_lock_nested(lock, subclass)
0078
0079 #define spin_lock_nest_lock(lock, nest_lock) \
0080 __spin_lock_nest_lock(lock, nest_lock)
0081
0082 #define spin_lock_irqsave_nested(lock, flags, subclass) \
0083 __spin_lock_irqsave_nested(lock, flags, subclass)
0084
0085 static __always_inline void spin_lock_bh(spinlock_t *lock)
0086 {
0087
0088 local_bh_disable();
0089 rt_spin_lock(lock);
0090 }
0091
0092 static __always_inline void spin_lock_irq(spinlock_t *lock)
0093 {
0094 rt_spin_lock(lock);
0095 }
0096
0097 #define spin_lock_irqsave(lock, flags) \
0098 do { \
0099 typecheck(unsigned long, flags); \
0100 flags = 0; \
0101 spin_lock(lock); \
0102 } while (0)
0103
0104 static __always_inline void spin_unlock(spinlock_t *lock)
0105 {
0106 rt_spin_unlock(lock);
0107 }
0108
0109 static __always_inline void spin_unlock_bh(spinlock_t *lock)
0110 {
0111 rt_spin_unlock(lock);
0112 local_bh_enable();
0113 }
0114
0115 static __always_inline void spin_unlock_irq(spinlock_t *lock)
0116 {
0117 rt_spin_unlock(lock);
0118 }
0119
0120 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
0121 unsigned long flags)
0122 {
0123 rt_spin_unlock(lock);
0124 }
0125
0126 #define spin_trylock(lock) \
0127 __cond_lock(lock, rt_spin_trylock(lock))
0128
0129 #define spin_trylock_bh(lock) \
0130 __cond_lock(lock, rt_spin_trylock_bh(lock))
0131
0132 #define spin_trylock_irq(lock) \
0133 __cond_lock(lock, rt_spin_trylock(lock))
0134
0135 #define __spin_trylock_irqsave(lock, flags) \
0136 ({ \
0137 int __locked; \
0138 \
0139 typecheck(unsigned long, flags); \
0140 flags = 0; \
0141 __locked = spin_trylock(lock); \
0142 __locked; \
0143 })
0144
0145 #define spin_trylock_irqsave(lock, flags) \
0146 __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
0147
0148 #define spin_is_contended(lock) (((void)(lock), 0))
0149
0150 static inline int spin_is_locked(spinlock_t *lock)
0151 {
0152 return rt_mutex_base_is_locked(&lock->lock);
0153 }
0154
0155 #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
0156
0157 #include <linux/rwlock_rt.h>
0158
0159 #endif