Back to home page

OSCL-LXR

 
 

    


0001 #ifndef __LINUX_SPINLOCK_API_SMP_H
0002 #define __LINUX_SPINLOCK_API_SMP_H
0003 
0004 #ifndef __LINUX_SPINLOCK_H
0005 # error "please don't include this file directly"
0006 #endif
0007 
0008 /*
0009  * include/linux/spinlock_api_smp.h
0010  *
0011  * spinlock API declarations on SMP (and debug)
0012  * (implemented in kernel/spinlock.c)
0013  *
0014  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
0015  * Released under the General Public License (GPL).
0016  */
0017 
0018 int in_lock_functions(unsigned long addr);
0019 
0020 #define assert_raw_spin_locked(x)   BUG_ON(!raw_spin_is_locked(x))
0021 
0022 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)        __acquires(lock);
0023 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
0024                                 __acquires(lock);
0025 void __lockfunc
0026 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
0027                                 __acquires(lock);
0028 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)     __acquires(lock);
0029 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
0030                                 __acquires(lock);
0031 
0032 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
0033                                 __acquires(lock);
0034 unsigned long __lockfunc
0035 _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
0036                                 __acquires(lock);
0037 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
0038 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
0039 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)      __releases(lock);
0040 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)   __releases(lock);
0041 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)  __releases(lock);
0042 void __lockfunc
0043 _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
0044                                 __releases(lock);
0045 
0046 #ifdef CONFIG_INLINE_SPIN_LOCK
0047 #define _raw_spin_lock(lock) __raw_spin_lock(lock)
0048 #endif
0049 
0050 #ifdef CONFIG_INLINE_SPIN_LOCK_BH
0051 #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
0052 #endif
0053 
0054 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
0055 #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
0056 #endif
0057 
0058 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
0059 #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
0060 #endif
0061 
0062 #ifdef CONFIG_INLINE_SPIN_TRYLOCK
0063 #define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
0064 #endif
0065 
0066 #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
0067 #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
0068 #endif
0069 
0070 #ifndef CONFIG_UNINLINE_SPIN_UNLOCK
0071 #define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
0072 #endif
0073 
0074 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
0075 #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
0076 #endif
0077 
0078 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
0079 #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
0080 #endif
0081 
0082 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
0083 #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
0084 #endif
0085 
0086 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
0087 {
0088     preempt_disable();
0089     if (do_raw_spin_trylock(lock)) {
0090         spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0091         return 1;
0092     }
0093     preempt_enable();
0094     return 0;
0095 }
0096 
0097 /*
0098  * If lockdep is enabled then we use the non-preemption spin-ops
0099  * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
0100  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
0101  */
0102 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
0103 
0104 static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
0105 {
0106     unsigned long flags;
0107 
0108     local_irq_save(flags);
0109     preempt_disable();
0110     spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
0111     LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
0112     return flags;
0113 }
0114 
0115 static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
0116 {
0117     local_irq_disable();
0118     preempt_disable();
0119     spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
0120     LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
0121 }
0122 
0123 static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
0124 {
0125     __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
0126     spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
0127     LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
0128 }
0129 
0130 static inline void __raw_spin_lock(raw_spinlock_t *lock)
0131 {
0132     preempt_disable();
0133     spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
0134     LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
0135 }
0136 
0137 #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
0138 
0139 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
0140 {
0141     spin_release(&lock->dep_map, _RET_IP_);
0142     do_raw_spin_unlock(lock);
0143     preempt_enable();
0144 }
0145 
0146 static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
0147                         unsigned long flags)
0148 {
0149     spin_release(&lock->dep_map, _RET_IP_);
0150     do_raw_spin_unlock(lock);
0151     local_irq_restore(flags);
0152     preempt_enable();
0153 }
0154 
0155 static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
0156 {
0157     spin_release(&lock->dep_map, _RET_IP_);
0158     do_raw_spin_unlock(lock);
0159     local_irq_enable();
0160     preempt_enable();
0161 }
0162 
0163 static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
0164 {
0165     spin_release(&lock->dep_map, _RET_IP_);
0166     do_raw_spin_unlock(lock);
0167     __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
0168 }
0169 
0170 static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
0171 {
0172     __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
0173     if (do_raw_spin_trylock(lock)) {
0174         spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0175         return 1;
0176     }
0177     __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
0178     return 0;
0179 }
0180 
0181 /* PREEMPT_RT has its own rwlock implementation */
0182 #ifndef CONFIG_PREEMPT_RT
0183 #include <linux/rwlock_api_smp.h>
0184 #endif
0185 
0186 #endif /* __LINUX_SPINLOCK_API_SMP_H */