Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
0003 #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
0004 
0005 #include <linux/bits.h>
0006 
0007 /**
0008  * ___set_bit - Set a bit in memory
0009  * @nr: the bit to set
0010  * @addr: the address to start counting from
0011  *
0012  * Unlike set_bit(), this function is non-atomic and may be reordered.
0013  * If it's called on the same region of memory simultaneously, the effect
0014  * may be that only one operation succeeds.
0015  */
0016 static __always_inline void
0017 ___set_bit(unsigned long nr, volatile unsigned long *addr)
0018 {
0019     unsigned long mask = BIT_MASK(nr);
0020     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
0021 
0022     *p  |= mask;
0023 }
0024 
0025 static __always_inline void
0026 ___clear_bit(unsigned long nr, volatile unsigned long *addr)
0027 {
0028     unsigned long mask = BIT_MASK(nr);
0029     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
0030 
0031     *p &= ~mask;
0032 }
0033 
0034 /**
0035  * ___change_bit - Toggle a bit in memory
0036  * @nr: the bit to change
0037  * @addr: the address to start counting from
0038  *
0039  * Unlike change_bit(), this function is non-atomic and may be reordered.
0040  * If it's called on the same region of memory simultaneously, the effect
0041  * may be that only one operation succeeds.
0042  */
0043 static __always_inline void
0044 ___change_bit(unsigned long nr, volatile unsigned long *addr)
0045 {
0046     unsigned long mask = BIT_MASK(nr);
0047     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
0048 
0049     *p ^= mask;
0050 }
0051 
0052 /**
0053  * ___test_and_set_bit - Set a bit and return its old value
0054  * @nr: Bit to set
0055  * @addr: Address to count from
0056  *
0057  * This operation is non-atomic and can be reordered.
0058  * If two examples of this operation race, one can appear to succeed
0059  * but actually fail.  You must protect multiple accesses with a lock.
0060  */
0061 static __always_inline bool
0062 ___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
0063 {
0064     unsigned long mask = BIT_MASK(nr);
0065     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
0066     unsigned long old = *p;
0067 
0068     *p = old | mask;
0069     return (old & mask) != 0;
0070 }
0071 
0072 /**
0073  * ___test_and_clear_bit - Clear a bit and return its old value
0074  * @nr: Bit to clear
0075  * @addr: Address to count from
0076  *
0077  * This operation is non-atomic and can be reordered.
0078  * If two examples of this operation race, one can appear to succeed
0079  * but actually fail.  You must protect multiple accesses with a lock.
0080  */
0081 static __always_inline bool
0082 ___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
0083 {
0084     unsigned long mask = BIT_MASK(nr);
0085     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
0086     unsigned long old = *p;
0087 
0088     *p = old & ~mask;
0089     return (old & mask) != 0;
0090 }
0091 
0092 /* WARNING: non atomic and it can be reordered! */
0093 static __always_inline bool
0094 ___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
0095 {
0096     unsigned long mask = BIT_MASK(nr);
0097     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
0098     unsigned long old = *p;
0099 
0100     *p = old ^ mask;
0101     return (old & mask) != 0;
0102 }
0103 
0104 /**
0105  * _test_bit - Determine whether a bit is set
0106  * @nr: bit number to test
0107  * @addr: Address to start counting from
0108  */
0109 static __always_inline bool
0110 _test_bit(unsigned long nr, const volatile unsigned long *addr)
0111 {
0112     return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
0113 }
0114 
0115 #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */