Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __LINUX_BIT_SPINLOCK_H
0003 #define __LINUX_BIT_SPINLOCK_H
0004 
0005 #include <linux/kernel.h>
0006 #include <linux/preempt.h>
0007 #include <linux/atomic.h>
0008 #include <linux/bug.h>
0009 
0010 /*
0011  *  bit-based spin_lock()
0012  *
0013  * Don't use this unless you really need to: spin_lock() and spin_unlock()
0014  * are significantly faster.
0015  */
0016 static inline void bit_spin_lock(int bitnum, unsigned long *addr)
0017 {
0018     /*
0019      * Assuming the lock is uncontended, this never enters
0020      * the body of the outer loop. If it is contended, then
0021      * within the inner loop a non-atomic test is used to
0022      * busywait with less bus contention for a good time to
0023      * attempt to acquire the lock bit.
0024      */
0025     preempt_disable();
0026 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
0027     while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
0028         preempt_enable();
0029         do {
0030             cpu_relax();
0031         } while (test_bit(bitnum, addr));
0032         preempt_disable();
0033     }
0034 #endif
0035     __acquire(bitlock);
0036 }
0037 
0038 /*
0039  * Return true if it was acquired
0040  */
0041 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
0042 {
0043     preempt_disable();
0044 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
0045     if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
0046         preempt_enable();
0047         return 0;
0048     }
0049 #endif
0050     __acquire(bitlock);
0051     return 1;
0052 }
0053 
0054 /*
0055  *  bit-based spin_unlock()
0056  */
0057 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
0058 {
0059 #ifdef CONFIG_DEBUG_SPINLOCK
0060     BUG_ON(!test_bit(bitnum, addr));
0061 #endif
0062 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
0063     clear_bit_unlock(bitnum, addr);
0064 #endif
0065     preempt_enable();
0066     __release(bitlock);
0067 }
0068 
0069 /*
0070  *  bit-based spin_unlock()
0071  *  non-atomic version, which can be used eg. if the bit lock itself is
0072  *  protecting the rest of the flags in the word.
0073  */
0074 static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
0075 {
0076 #ifdef CONFIG_DEBUG_SPINLOCK
0077     BUG_ON(!test_bit(bitnum, addr));
0078 #endif
0079 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
0080     __clear_bit_unlock(bitnum, addr);
0081 #endif
0082     preempt_enable();
0083     __release(bitlock);
0084 }
0085 
0086 /*
0087  * Return true if the lock is held.
0088  */
0089 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
0090 {
0091 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
0092     return test_bit(bitnum, addr);
0093 #elif defined CONFIG_PREEMPT_COUNT
0094     return preempt_count();
0095 #else
0096     return 1;
0097 #endif
0098 }
0099 
0100 #endif /* __LINUX_BIT_SPINLOCK_H */
0101