0001
0002
0003
0004
0005
0006
0007 #ifndef __ASM_CMPXCHG_H
0008 #define __ASM_CMPXCHG_H
0009
0010 #include <linux/build_bug.h>
0011 #include <linux/compiler.h>
0012
0013 #include <asm/barrier.h>
0014 #include <asm/lse.h>
0015
0016
0017
0018
0019
0020
0021 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
0022 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
0023 { \
0024 u##sz ret; \
0025 unsigned long tmp; \
0026 \
0027 asm volatile(ARM64_LSE_ATOMIC_INSN( \
0028 \
0029 " prfm pstl1strm, %2\n" \
0030 "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
0031 " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
0032 " cbnz %w1, 1b\n" \
0033 " " #mb, \
0034 \
0035 " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
0036 __nops(3) \
0037 " " #nop_lse) \
0038 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
0039 : "r" (x) \
0040 : cl); \
0041 \
0042 return ret; \
0043 }
0044
0045 __XCHG_CASE(w, b, , 8, , , , , , )
0046 __XCHG_CASE(w, h, , 16, , , , , , )
0047 __XCHG_CASE(w, , , 32, , , , , , )
0048 __XCHG_CASE( , , , 64, , , , , , )
0049 __XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
0050 __XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
0051 __XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
0052 __XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
0053 __XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
0054 __XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
0055 __XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
0056 __XCHG_CASE( , , rel_, 64, , , , , l, "memory")
0057 __XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
0058 __XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
0059 __XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
0060 __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
0061
0062 #undef __XCHG_CASE
0063
0064 #define __XCHG_GEN(sfx) \
0065 static __always_inline unsigned long __xchg##sfx(unsigned long x, \
0066 volatile void *ptr, \
0067 int size) \
0068 { \
0069 switch (size) { \
0070 case 1: \
0071 return __xchg_case##sfx##_8(x, ptr); \
0072 case 2: \
0073 return __xchg_case##sfx##_16(x, ptr); \
0074 case 4: \
0075 return __xchg_case##sfx##_32(x, ptr); \
0076 case 8: \
0077 return __xchg_case##sfx##_64(x, ptr); \
0078 default: \
0079 BUILD_BUG(); \
0080 } \
0081 \
0082 unreachable(); \
0083 }
0084
0085 __XCHG_GEN()
0086 __XCHG_GEN(_acq)
0087 __XCHG_GEN(_rel)
0088 __XCHG_GEN(_mb)
0089
0090 #undef __XCHG_GEN
0091
0092 #define __xchg_wrapper(sfx, ptr, x) \
0093 ({ \
0094 __typeof__(*(ptr)) __ret; \
0095 __ret = (__typeof__(*(ptr))) \
0096 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
0097 __ret; \
0098 })
0099
0100
0101 #define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
0102 #define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
0103 #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
0104 #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
0105
0106 #define __CMPXCHG_CASE(name, sz) \
0107 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
0108 u##sz old, \
0109 u##sz new) \
0110 { \
0111 return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \
0112 ptr, old, new); \
0113 }
0114
0115 __CMPXCHG_CASE( , 8)
0116 __CMPXCHG_CASE( , 16)
0117 __CMPXCHG_CASE( , 32)
0118 __CMPXCHG_CASE( , 64)
0119 __CMPXCHG_CASE(acq_, 8)
0120 __CMPXCHG_CASE(acq_, 16)
0121 __CMPXCHG_CASE(acq_, 32)
0122 __CMPXCHG_CASE(acq_, 64)
0123 __CMPXCHG_CASE(rel_, 8)
0124 __CMPXCHG_CASE(rel_, 16)
0125 __CMPXCHG_CASE(rel_, 32)
0126 __CMPXCHG_CASE(rel_, 64)
0127 __CMPXCHG_CASE(mb_, 8)
0128 __CMPXCHG_CASE(mb_, 16)
0129 __CMPXCHG_CASE(mb_, 32)
0130 __CMPXCHG_CASE(mb_, 64)
0131
0132 #undef __CMPXCHG_CASE
0133
0134 #define __CMPXCHG_DBL(name) \
0135 static inline long __cmpxchg_double##name(unsigned long old1, \
0136 unsigned long old2, \
0137 unsigned long new1, \
0138 unsigned long new2, \
0139 volatile void *ptr) \
0140 { \
0141 return __lse_ll_sc_body(_cmpxchg_double##name, \
0142 old1, old2, new1, new2, ptr); \
0143 }
0144
0145 __CMPXCHG_DBL( )
0146 __CMPXCHG_DBL(_mb)
0147
0148 #undef __CMPXCHG_DBL
0149
0150 #define __CMPXCHG_GEN(sfx) \
0151 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
0152 unsigned long old, \
0153 unsigned long new, \
0154 int size) \
0155 { \
0156 switch (size) { \
0157 case 1: \
0158 return __cmpxchg_case##sfx##_8(ptr, old, new); \
0159 case 2: \
0160 return __cmpxchg_case##sfx##_16(ptr, old, new); \
0161 case 4: \
0162 return __cmpxchg_case##sfx##_32(ptr, old, new); \
0163 case 8: \
0164 return __cmpxchg_case##sfx##_64(ptr, old, new); \
0165 default: \
0166 BUILD_BUG(); \
0167 } \
0168 \
0169 unreachable(); \
0170 }
0171
0172 __CMPXCHG_GEN()
0173 __CMPXCHG_GEN(_acq)
0174 __CMPXCHG_GEN(_rel)
0175 __CMPXCHG_GEN(_mb)
0176
0177 #undef __CMPXCHG_GEN
0178
0179 #define __cmpxchg_wrapper(sfx, ptr, o, n) \
0180 ({ \
0181 __typeof__(*(ptr)) __ret; \
0182 __ret = (__typeof__(*(ptr))) \
0183 __cmpxchg##sfx((ptr), (unsigned long)(o), \
0184 (unsigned long)(n), sizeof(*(ptr))); \
0185 __ret; \
0186 })
0187
0188
0189 #define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
0190 #define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
0191 #define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
0192 #define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
0193 #define arch_cmpxchg_local arch_cmpxchg_relaxed
0194
0195
0196 #define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
0197 #define arch_cmpxchg64_acquire arch_cmpxchg_acquire
0198 #define arch_cmpxchg64_release arch_cmpxchg_release
0199 #define arch_cmpxchg64 arch_cmpxchg
0200 #define arch_cmpxchg64_local arch_cmpxchg_local
0201
0202
0203 #define system_has_cmpxchg_double() 1
0204
0205 #define __cmpxchg_double_check(ptr1, ptr2) \
0206 ({ \
0207 if (sizeof(*(ptr1)) != 8) \
0208 BUILD_BUG(); \
0209 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
0210 })
0211
0212 #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
0213 ({ \
0214 int __ret; \
0215 __cmpxchg_double_check(ptr1, ptr2); \
0216 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
0217 (unsigned long)(n1), (unsigned long)(n2), \
0218 ptr1); \
0219 __ret; \
0220 })
0221
0222 #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
0223 ({ \
0224 int __ret; \
0225 __cmpxchg_double_check(ptr1, ptr2); \
0226 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
0227 (unsigned long)(n1), (unsigned long)(n2), \
0228 ptr1); \
0229 __ret; \
0230 })
0231
0232 #define __CMPWAIT_CASE(w, sfx, sz) \
0233 static inline void __cmpwait_case_##sz(volatile void *ptr, \
0234 unsigned long val) \
0235 { \
0236 unsigned long tmp; \
0237 \
0238 asm volatile( \
0239 " sevl\n" \
0240 " wfe\n" \
0241 " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
0242 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
0243 " cbnz %" #w "[tmp], 1f\n" \
0244 " wfe\n" \
0245 "1:" \
0246 : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr) \
0247 : [val] "r" (val)); \
0248 }
0249
0250 __CMPWAIT_CASE(w, b, 8);
0251 __CMPWAIT_CASE(w, h, 16);
0252 __CMPWAIT_CASE(w, , 32);
0253 __CMPWAIT_CASE( , , 64);
0254
0255 #undef __CMPWAIT_CASE
0256
0257 #define __CMPWAIT_GEN(sfx) \
0258 static __always_inline void __cmpwait##sfx(volatile void *ptr, \
0259 unsigned long val, \
0260 int size) \
0261 { \
0262 switch (size) { \
0263 case 1: \
0264 return __cmpwait_case##sfx##_8(ptr, (u8)val); \
0265 case 2: \
0266 return __cmpwait_case##sfx##_16(ptr, (u16)val); \
0267 case 4: \
0268 return __cmpwait_case##sfx##_32(ptr, val); \
0269 case 8: \
0270 return __cmpwait_case##sfx##_64(ptr, val); \
0271 default: \
0272 BUILD_BUG(); \
0273 } \
0274 \
0275 unreachable(); \
0276 }
0277
0278 __CMPWAIT_GEN()
0279
0280 #undef __CMPWAIT_GEN
0281
0282 #define __cmpwait_relaxed(ptr, val) \
0283 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
0284
0285 #endif