0001
0002 #ifndef _ASM_X86_ATOMIC_H
0003 #define _ASM_X86_ATOMIC_H
0004
0005 #include <linux/compiler.h>
0006 #include <linux/types.h>
0007 #include <asm/alternative.h>
0008 #include <asm/cmpxchg.h>
0009 #include <asm/rmwcc.h>
0010 #include <asm/barrier.h>
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 static __always_inline int arch_atomic_read(const atomic_t *v)
0024 {
0025
0026
0027
0028
0029 return __READ_ONCE((v)->counter);
0030 }
0031
0032
0033
0034
0035
0036
0037
0038
0039 static __always_inline void arch_atomic_set(atomic_t *v, int i)
0040 {
0041 __WRITE_ONCE(v->counter, i);
0042 }
0043
0044
0045
0046
0047
0048
0049
0050
0051 static __always_inline void arch_atomic_add(int i, atomic_t *v)
0052 {
0053 asm volatile(LOCK_PREFIX "addl %1,%0"
0054 : "+m" (v->counter)
0055 : "ir" (i) : "memory");
0056 }
0057
0058
0059
0060
0061
0062
0063
0064
0065 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
0066 {
0067 asm volatile(LOCK_PREFIX "subl %1,%0"
0068 : "+m" (v->counter)
0069 : "ir" (i) : "memory");
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
0082 {
0083 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
0084 }
0085 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
0086
0087
0088
0089
0090
0091
0092
0093 static __always_inline void arch_atomic_inc(atomic_t *v)
0094 {
0095 asm volatile(LOCK_PREFIX "incl %0"
0096 : "+m" (v->counter) :: "memory");
0097 }
0098 #define arch_atomic_inc arch_atomic_inc
0099
0100
0101
0102
0103
0104
0105
0106 static __always_inline void arch_atomic_dec(atomic_t *v)
0107 {
0108 asm volatile(LOCK_PREFIX "decl %0"
0109 : "+m" (v->counter) :: "memory");
0110 }
0111 #define arch_atomic_dec arch_atomic_dec
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
0122 {
0123 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
0124 }
0125 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
0136 {
0137 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
0138 }
0139 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
0151 {
0152 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
0153 }
0154 #define arch_atomic_add_negative arch_atomic_add_negative
0155
0156
0157
0158
0159
0160
0161
0162
0163 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
0164 {
0165 return i + xadd(&v->counter, i);
0166 }
0167 #define arch_atomic_add_return arch_atomic_add_return
0168
0169
0170
0171
0172
0173
0174
0175
0176 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
0177 {
0178 return arch_atomic_add_return(-i, v);
0179 }
0180 #define arch_atomic_sub_return arch_atomic_sub_return
0181
0182 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
0183 {
0184 return xadd(&v->counter, i);
0185 }
0186 #define arch_atomic_fetch_add arch_atomic_fetch_add
0187
0188 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
0189 {
0190 return xadd(&v->counter, -i);
0191 }
0192 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
0193
0194 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
0195 {
0196 return arch_cmpxchg(&v->counter, old, new);
0197 }
0198 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
0199
0200 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
0201 {
0202 return arch_try_cmpxchg(&v->counter, old, new);
0203 }
0204 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
0205
0206 static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
0207 {
0208 return arch_xchg(&v->counter, new);
0209 }
0210 #define arch_atomic_xchg arch_atomic_xchg
0211
0212 static __always_inline void arch_atomic_and(int i, atomic_t *v)
0213 {
0214 asm volatile(LOCK_PREFIX "andl %1,%0"
0215 : "+m" (v->counter)
0216 : "ir" (i)
0217 : "memory");
0218 }
0219
0220 static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
0221 {
0222 int val = arch_atomic_read(v);
0223
0224 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
0225
0226 return val;
0227 }
0228 #define arch_atomic_fetch_and arch_atomic_fetch_and
0229
0230 static __always_inline void arch_atomic_or(int i, atomic_t *v)
0231 {
0232 asm volatile(LOCK_PREFIX "orl %1,%0"
0233 : "+m" (v->counter)
0234 : "ir" (i)
0235 : "memory");
0236 }
0237
0238 static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
0239 {
0240 int val = arch_atomic_read(v);
0241
0242 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
0243
0244 return val;
0245 }
0246 #define arch_atomic_fetch_or arch_atomic_fetch_or
0247
0248 static __always_inline void arch_atomic_xor(int i, atomic_t *v)
0249 {
0250 asm volatile(LOCK_PREFIX "xorl %1,%0"
0251 : "+m" (v->counter)
0252 : "ir" (i)
0253 : "memory");
0254 }
0255
0256 static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
0257 {
0258 int val = arch_atomic_read(v);
0259
0260 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
0261
0262 return val;
0263 }
0264 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
0265
0266 #ifdef CONFIG_X86_32
0267 # include <asm/atomic64_32.h>
0268 #else
0269 # include <asm/atomic64_64.h>
0270 #endif
0271
0272 #endif