Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_CMPXCHG_32_H
0003 #define _ASM_X86_CMPXCHG_32_H
0004 
0005 /*
0006  * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
0007  *       you need to test for the feature in boot_cpu_data.
0008  */
0009 
0010 /*
0011  * CMPXCHG8B only writes to the target if we had the previous
0012  * value in registers, otherwise it acts as a read and gives us the
0013  * "new previous" value.  That is why there is a loop.  Preloading
0014  * EDX:EAX is a performance optimization: in the common case it means
0015  * we need only one locked operation.
0016  *
0017  * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
0018  * least an FPU save and/or %cr0.ts manipulation.
0019  *
0020  * cmpxchg8b must be used with the lock prefix here to allow the
0021  * instruction to be executed atomically.  We need to have the reader
0022  * side to see the coherent 64bit value.
0023  */
0024 static inline void set_64bit(volatile u64 *ptr, u64 value)
0025 {
0026     u32 low  = value;
0027     u32 high = value >> 32;
0028     u64 prev = *ptr;
0029 
0030     asm volatile("\n1:\t"
0031              LOCK_PREFIX "cmpxchg8b %0\n\t"
0032              "jnz 1b"
0033              : "=m" (*ptr), "+A" (prev)
0034              : "b" (low), "c" (high)
0035              : "memory");
0036 }
0037 
0038 #ifdef CONFIG_X86_CMPXCHG64
0039 #define arch_cmpxchg64(ptr, o, n)                   \
0040     ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
0041                      (unsigned long long)(n)))
0042 #define arch_cmpxchg64_local(ptr, o, n)                 \
0043     ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
0044                            (unsigned long long)(n)))
0045 #define arch_try_cmpxchg64(ptr, po, n)                  \
0046     __try_cmpxchg64((ptr), (unsigned long long *)(po), \
0047             (unsigned long long)(n))
0048 #endif
0049 
0050 static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
0051 {
0052     u64 prev;
0053     asm volatile(LOCK_PREFIX "cmpxchg8b %1"
0054              : "=A" (prev),
0055                "+m" (*ptr)
0056              : "b" ((u32)new),
0057                "c" ((u32)(new >> 32)),
0058                "0" (old)
0059              : "memory");
0060     return prev;
0061 }
0062 
0063 static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
0064 {
0065     u64 prev;
0066     asm volatile("cmpxchg8b %1"
0067              : "=A" (prev),
0068                "+m" (*ptr)
0069              : "b" ((u32)new),
0070                "c" ((u32)(new >> 32)),
0071                "0" (old)
0072              : "memory");
0073     return prev;
0074 }
0075 
0076 static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
0077 {
0078     bool success;
0079     u64 old = *pold;
0080     asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
0081              CC_SET(z)
0082              : CC_OUT(z) (success),
0083                [ptr] "+m" (*ptr),
0084                "+A" (old)
0085              : "b" ((u32)new),
0086                "c" ((u32)(new >> 32))
0087              : "memory");
0088 
0089     if (unlikely(!success))
0090         *pold = old;
0091     return success;
0092 }
0093 
0094 #ifndef CONFIG_X86_CMPXCHG64
0095 /*
0096  * Building a kernel capable running on 80386 and 80486. It may be necessary
0097  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
0098  */
0099 
0100 #define arch_cmpxchg64(ptr, o, n)               \
0101 ({                              \
0102     __typeof__(*(ptr)) __ret;               \
0103     __typeof__(*(ptr)) __old = (o);             \
0104     __typeof__(*(ptr)) __new = (n);             \
0105     alternative_io(LOCK_PREFIX_HERE             \
0106             "call cmpxchg8b_emu",           \
0107             "lock; cmpxchg8b (%%esi)" ,     \
0108                X86_FEATURE_CX8,             \
0109                "=A" (__ret),                \
0110                "S" ((ptr)), "0" (__old),        \
0111                "b" ((unsigned int)__new),       \
0112                "c" ((unsigned int)(__new>>32))      \
0113                : "memory");             \
0114     __ret; })
0115 
0116 
0117 #define arch_cmpxchg64_local(ptr, o, n)             \
0118 ({                              \
0119     __typeof__(*(ptr)) __ret;               \
0120     __typeof__(*(ptr)) __old = (o);             \
0121     __typeof__(*(ptr)) __new = (n);             \
0122     alternative_io("call cmpxchg8b_emu",            \
0123                "cmpxchg8b (%%esi)" ,            \
0124                X86_FEATURE_CX8,             \
0125                "=A" (__ret),                \
0126                "S" ((ptr)), "0" (__old),        \
0127                "b" ((unsigned int)__new),       \
0128                "c" ((unsigned int)(__new>>32))      \
0129                : "memory");             \
0130     __ret; })
0131 
0132 #endif
0133 
0134 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
0135 
0136 #endif /* _ASM_X86_CMPXCHG_32_H */