Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_SYNC_BITOPS_H
0003 #define _ASM_X86_SYNC_BITOPS_H
0004 
0005 /*
0006  * Copyright 1992, Linus Torvalds.
0007  */
0008 
0009 /*
0010  * These have to be done with inline assembly: that way the bit-setting
0011  * is guaranteed to be atomic. All bit operations return 0 if the bit
0012  * was cleared before the operation and != 0 if it was not.
0013  *
0014  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
0015  */
0016 
0017 #include <asm/rmwcc.h>
0018 
0019 #define ADDR (*(volatile long *)addr)
0020 
0021 /**
0022  * sync_set_bit - Atomically set a bit in memory
0023  * @nr: the bit to set
0024  * @addr: the address to start counting from
0025  *
0026  * This function is atomic and may not be reordered.  See __set_bit()
0027  * if you do not require the atomic guarantees.
0028  *
0029  * Note that @nr may be almost arbitrarily large; this function is not
0030  * restricted to acting on a single-word quantity.
0031  */
0032 static inline void sync_set_bit(long nr, volatile unsigned long *addr)
0033 {
0034     asm volatile("lock; " __ASM_SIZE(bts) " %1,%0"
0035              : "+m" (ADDR)
0036              : "Ir" (nr)
0037              : "memory");
0038 }
0039 
0040 /**
0041  * sync_clear_bit - Clears a bit in memory
0042  * @nr: Bit to clear
0043  * @addr: Address to start counting from
0044  *
0045  * sync_clear_bit() is atomic and may not be reordered.  However, it does
0046  * not contain a memory barrier, so if it is used for locking purposes,
0047  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
0048  * in order to ensure changes are visible on other processors.
0049  */
0050 static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
0051 {
0052     asm volatile("lock; " __ASM_SIZE(btr) " %1,%0"
0053              : "+m" (ADDR)
0054              : "Ir" (nr)
0055              : "memory");
0056 }
0057 
0058 /**
0059  * sync_change_bit - Toggle a bit in memory
0060  * @nr: Bit to change
0061  * @addr: Address to start counting from
0062  *
0063  * sync_change_bit() is atomic and may not be reordered.
0064  * Note that @nr may be almost arbitrarily large; this function is not
0065  * restricted to acting on a single-word quantity.
0066  */
0067 static inline void sync_change_bit(long nr, volatile unsigned long *addr)
0068 {
0069     asm volatile("lock; " __ASM_SIZE(btc) " %1,%0"
0070              : "+m" (ADDR)
0071              : "Ir" (nr)
0072              : "memory");
0073 }
0074 
0075 /**
0076  * sync_test_and_set_bit - Set a bit and return its old value
0077  * @nr: Bit to set
0078  * @addr: Address to count from
0079  *
0080  * This operation is atomic and cannot be reordered.
0081  * It also implies a memory barrier.
0082  */
0083 static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
0084 {
0085     return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr);
0086 }
0087 
0088 /**
0089  * sync_test_and_clear_bit - Clear a bit and return its old value
0090  * @nr: Bit to clear
0091  * @addr: Address to count from
0092  *
0093  * This operation is atomic and cannot be reordered.
0094  * It also implies a memory barrier.
0095  */
0096 static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
0097 {
0098     return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr);
0099 }
0100 
0101 /**
0102  * sync_test_and_change_bit - Change a bit and return its old value
0103  * @nr: Bit to change
0104  * @addr: Address to count from
0105  *
0106  * This operation is atomic and cannot be reordered.
0107  * It also implies a memory barrier.
0108  */
0109 static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
0110 {
0111     return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr);
0112 }
0113 
0114 #define sync_test_bit(nr, addr) test_bit(nr, addr)
0115 
0116 #undef ADDR
0117 
0118 #endif /* _ASM_X86_SYNC_BITOPS_H */