Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_GENERIC_BITOPS_LOCK_H_
0003 #define _ASM_GENERIC_BITOPS_LOCK_H_
0004 
0005 #include <linux/atomic.h>
0006 #include <linux/compiler.h>
0007 #include <asm/barrier.h>
0008 
0009 /**
0010  * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
0011  * @nr: Bit to set
0012  * @addr: Address to count from
0013  *
0014  * This operation is atomic and provides acquire barrier semantics if
0015  * the returned value is 0.
0016  * It can be used to implement bit locks.
0017  */
0018 static __always_inline int
0019 arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
0020 {
0021     long old;
0022     unsigned long mask = BIT_MASK(nr);
0023 
0024     p += BIT_WORD(nr);
0025     if (READ_ONCE(*p) & mask)
0026         return 1;
0027 
0028     old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
0029     return !!(old & mask);
0030 }
0031 
0032 
0033 /**
0034  * arch_clear_bit_unlock - Clear a bit in memory, for unlock
0035  * @nr: the bit to set
0036  * @addr: the address to start counting from
0037  *
0038  * This operation is atomic and provides release barrier semantics.
0039  */
0040 static __always_inline void
0041 arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
0042 {
0043     p += BIT_WORD(nr);
0044     arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
0045 }
0046 
0047 /**
0048  * arch___clear_bit_unlock - Clear a bit in memory, for unlock
0049  * @nr: the bit to set
0050  * @addr: the address to start counting from
0051  *
0052  * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
0053  * the bits in the word are protected by this lock some archs can use weaker
0054  * ops to safely unlock.
0055  *
0056  * See for example x86's implementation.
0057  */
0058 static inline void
0059 arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
0060 {
0061     unsigned long old;
0062 
0063     p += BIT_WORD(nr);
0064     old = READ_ONCE(*p);
0065     old &= ~BIT_MASK(nr);
0066     arch_atomic_long_set_release((atomic_long_t *)p, old);
0067 }
0068 
0069 /**
0070  * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
0071  *                                          byte is negative, for unlock.
0072  * @nr: the bit to clear
0073  * @addr: the address to start counting from
0074  *
0075  * This is a bit of a one-trick-pony for the filemap code, which clears
0076  * PG_locked and tests PG_waiters,
0077  */
0078 #ifndef arch_clear_bit_unlock_is_negative_byte
0079 static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
0080                               volatile unsigned long *p)
0081 {
0082     long old;
0083     unsigned long mask = BIT_MASK(nr);
0084 
0085     p += BIT_WORD(nr);
0086     old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
0087     return !!(old & BIT(7));
0088 }
0089 #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
0090 #endif
0091 
0092 #include <asm-generic/bitops/instrumented-lock.h>
0093 
0094 #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */