0001
0002 #ifndef _ASM_X86_ATOMIC64_64_H
0003 #define _ASM_X86_ATOMIC64_64_H
0004
0005 #include <linux/types.h>
0006 #include <asm/alternative.h>
0007 #include <asm/cmpxchg.h>
0008
0009
0010
0011 #define ATOMIC64_INIT(i) { (i) }
0012
0013
0014
0015
0016
0017
0018
0019
0020 static inline s64 arch_atomic64_read(const atomic64_t *v)
0021 {
0022 return __READ_ONCE((v)->counter);
0023 }
0024
0025
0026
0027
0028
0029
0030
0031
0032 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
0033 {
0034 __WRITE_ONCE(v->counter, i);
0035 }
0036
0037
0038
0039
0040
0041
0042
0043
0044 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
0045 {
0046 asm volatile(LOCK_PREFIX "addq %1,%0"
0047 : "=m" (v->counter)
0048 : "er" (i), "m" (v->counter) : "memory");
0049 }
0050
0051
0052
0053
0054
0055
0056
0057
0058 static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
0059 {
0060 asm volatile(LOCK_PREFIX "subq %1,%0"
0061 : "=m" (v->counter)
0062 : "er" (i), "m" (v->counter) : "memory");
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
0075 {
0076 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
0077 }
0078 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
0079
0080
0081
0082
0083
0084
0085
0086 static __always_inline void arch_atomic64_inc(atomic64_t *v)
0087 {
0088 asm volatile(LOCK_PREFIX "incq %0"
0089 : "=m" (v->counter)
0090 : "m" (v->counter) : "memory");
0091 }
0092 #define arch_atomic64_inc arch_atomic64_inc
0093
0094
0095
0096
0097
0098
0099
0100 static __always_inline void arch_atomic64_dec(atomic64_t *v)
0101 {
0102 asm volatile(LOCK_PREFIX "decq %0"
0103 : "=m" (v->counter)
0104 : "m" (v->counter) : "memory");
0105 }
0106 #define arch_atomic64_dec arch_atomic64_dec
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
0117 {
0118 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
0119 }
0120 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
0131 {
0132 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
0133 }
0134 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
0146 {
0147 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
0148 }
0149 #define arch_atomic64_add_negative arch_atomic64_add_negative
0150
0151
0152
0153
0154
0155
0156
0157
0158 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
0159 {
0160 return i + xadd(&v->counter, i);
0161 }
0162 #define arch_atomic64_add_return arch_atomic64_add_return
0163
0164 static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
0165 {
0166 return arch_atomic64_add_return(-i, v);
0167 }
0168 #define arch_atomic64_sub_return arch_atomic64_sub_return
0169
0170 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
0171 {
0172 return xadd(&v->counter, i);
0173 }
0174 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
0175
0176 static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
0177 {
0178 return xadd(&v->counter, -i);
0179 }
0180 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
0181
0182 static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
0183 {
0184 return arch_cmpxchg(&v->counter, old, new);
0185 }
0186 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
0187
0188 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
0189 {
0190 return arch_try_cmpxchg(&v->counter, old, new);
0191 }
0192 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
0193
0194 static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
0195 {
0196 return arch_xchg(&v->counter, new);
0197 }
0198 #define arch_atomic64_xchg arch_atomic64_xchg
0199
0200 static inline void arch_atomic64_and(s64 i, atomic64_t *v)
0201 {
0202 asm volatile(LOCK_PREFIX "andq %1,%0"
0203 : "+m" (v->counter)
0204 : "er" (i)
0205 : "memory");
0206 }
0207
0208 static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
0209 {
0210 s64 val = arch_atomic64_read(v);
0211
0212 do {
0213 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
0214 return val;
0215 }
0216 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
0217
0218 static inline void arch_atomic64_or(s64 i, atomic64_t *v)
0219 {
0220 asm volatile(LOCK_PREFIX "orq %1,%0"
0221 : "+m" (v->counter)
0222 : "er" (i)
0223 : "memory");
0224 }
0225
0226 static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
0227 {
0228 s64 val = arch_atomic64_read(v);
0229
0230 do {
0231 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
0232 return val;
0233 }
0234 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
0235
0236 static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
0237 {
0238 asm volatile(LOCK_PREFIX "xorq %1,%0"
0239 : "+m" (v->counter)
0240 : "er" (i)
0241 : "memory");
0242 }
0243
0244 static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
0245 {
0246 s64 val = arch_atomic64_read(v);
0247
0248 do {
0249 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
0250 return val;
0251 }
0252 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
0253
0254 #endif