0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef __ASM_ATOMIC_LL_SC_H
0011 #define __ASM_ATOMIC_LL_SC_H
0012
0013 #include <linux/stringify.h>
0014
0015 #ifdef CONFIG_ARM64_LSE_ATOMICS
0016 #define __LL_SC_FALLBACK(asm_ops) \
0017 " b 3f\n" \
0018 " .subsection 1\n" \
0019 "3:\n" \
0020 asm_ops "\n" \
0021 " b 4f\n" \
0022 " .previous\n" \
0023 "4:\n"
0024 #else
0025 #define __LL_SC_FALLBACK(asm_ops) asm_ops
0026 #endif
0027
0028 #ifndef CONFIG_CC_HAS_K_CONSTRAINT
0029 #define K
0030 #endif
0031
0032
0033
0034
0035
0036
0037
0038 #define ATOMIC_OP(op, asm_op, constraint) \
0039 static inline void \
0040 __ll_sc_atomic_##op(int i, atomic_t *v) \
0041 { \
0042 unsigned long tmp; \
0043 int result; \
0044 \
0045 asm volatile("// atomic_" #op "\n" \
0046 __LL_SC_FALLBACK( \
0047 " prfm pstl1strm, %2\n" \
0048 "1: ldxr %w0, %2\n" \
0049 " " #asm_op " %w0, %w0, %w3\n" \
0050 " stxr %w1, %w0, %2\n" \
0051 " cbnz %w1, 1b\n") \
0052 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
0053 : __stringify(constraint) "r" (i)); \
0054 }
0055
0056 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
0057 static inline int \
0058 __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
0059 { \
0060 unsigned long tmp; \
0061 int result; \
0062 \
0063 asm volatile("// atomic_" #op "_return" #name "\n" \
0064 __LL_SC_FALLBACK( \
0065 " prfm pstl1strm, %2\n" \
0066 "1: ld" #acq "xr %w0, %2\n" \
0067 " " #asm_op " %w0, %w0, %w3\n" \
0068 " st" #rel "xr %w1, %w0, %2\n" \
0069 " cbnz %w1, 1b\n" \
0070 " " #mb ) \
0071 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
0072 : __stringify(constraint) "r" (i) \
0073 : cl); \
0074 \
0075 return result; \
0076 }
0077
0078 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
0079 static inline int \
0080 __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
0081 { \
0082 unsigned long tmp; \
0083 int val, result; \
0084 \
0085 asm volatile("// atomic_fetch_" #op #name "\n" \
0086 __LL_SC_FALLBACK( \
0087 " prfm pstl1strm, %3\n" \
0088 "1: ld" #acq "xr %w0, %3\n" \
0089 " " #asm_op " %w1, %w0, %w4\n" \
0090 " st" #rel "xr %w2, %w1, %3\n" \
0091 " cbnz %w2, 1b\n" \
0092 " " #mb ) \
0093 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
0094 : __stringify(constraint) "r" (i) \
0095 : cl); \
0096 \
0097 return result; \
0098 }
0099
0100 #define ATOMIC_OPS(...) \
0101 ATOMIC_OP(__VA_ARGS__) \
0102 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
0103 ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
0104 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
0105 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
0106 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
0107 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
0108 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
0109 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
0110
0111 ATOMIC_OPS(add, add, I)
0112 ATOMIC_OPS(sub, sub, J)
0113
0114 #undef ATOMIC_OPS
0115 #define ATOMIC_OPS(...) \
0116 ATOMIC_OP(__VA_ARGS__) \
0117 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
0118 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
0119 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
0120 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
0121
0122 ATOMIC_OPS(and, and, K)
0123 ATOMIC_OPS(or, orr, K)
0124 ATOMIC_OPS(xor, eor, K)
0125
0126
0127
0128
0129
0130 ATOMIC_OPS(andnot, bic, )
0131
0132 #undef ATOMIC_OPS
0133 #undef ATOMIC_FETCH_OP
0134 #undef ATOMIC_OP_RETURN
0135 #undef ATOMIC_OP
0136
0137 #define ATOMIC64_OP(op, asm_op, constraint) \
0138 static inline void \
0139 __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
0140 { \
0141 s64 result; \
0142 unsigned long tmp; \
0143 \
0144 asm volatile("// atomic64_" #op "\n" \
0145 __LL_SC_FALLBACK( \
0146 " prfm pstl1strm, %2\n" \
0147 "1: ldxr %0, %2\n" \
0148 " " #asm_op " %0, %0, %3\n" \
0149 " stxr %w1, %0, %2\n" \
0150 " cbnz %w1, 1b") \
0151 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
0152 : __stringify(constraint) "r" (i)); \
0153 }
0154
0155 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
0156 static inline long \
0157 __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
0158 { \
0159 s64 result; \
0160 unsigned long tmp; \
0161 \
0162 asm volatile("// atomic64_" #op "_return" #name "\n" \
0163 __LL_SC_FALLBACK( \
0164 " prfm pstl1strm, %2\n" \
0165 "1: ld" #acq "xr %0, %2\n" \
0166 " " #asm_op " %0, %0, %3\n" \
0167 " st" #rel "xr %w1, %0, %2\n" \
0168 " cbnz %w1, 1b\n" \
0169 " " #mb ) \
0170 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
0171 : __stringify(constraint) "r" (i) \
0172 : cl); \
0173 \
0174 return result; \
0175 }
0176
0177 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
0178 static inline long \
0179 __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
0180 { \
0181 s64 result, val; \
0182 unsigned long tmp; \
0183 \
0184 asm volatile("// atomic64_fetch_" #op #name "\n" \
0185 __LL_SC_FALLBACK( \
0186 " prfm pstl1strm, %3\n" \
0187 "1: ld" #acq "xr %0, %3\n" \
0188 " " #asm_op " %1, %0, %4\n" \
0189 " st" #rel "xr %w2, %1, %3\n" \
0190 " cbnz %w2, 1b\n" \
0191 " " #mb ) \
0192 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
0193 : __stringify(constraint) "r" (i) \
0194 : cl); \
0195 \
0196 return result; \
0197 }
0198
0199 #define ATOMIC64_OPS(...) \
0200 ATOMIC64_OP(__VA_ARGS__) \
0201 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
0202 ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
0203 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
0204 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
0205 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
0206 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
0207 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
0208 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
0209
0210 ATOMIC64_OPS(add, add, I)
0211 ATOMIC64_OPS(sub, sub, J)
0212
0213 #undef ATOMIC64_OPS
0214 #define ATOMIC64_OPS(...) \
0215 ATOMIC64_OP(__VA_ARGS__) \
0216 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
0217 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
0218 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
0219 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
0220
0221 ATOMIC64_OPS(and, and, L)
0222 ATOMIC64_OPS(or, orr, L)
0223 ATOMIC64_OPS(xor, eor, L)
0224
0225
0226
0227
0228
0229 ATOMIC64_OPS(andnot, bic, )
0230
0231 #undef ATOMIC64_OPS
0232 #undef ATOMIC64_FETCH_OP
0233 #undef ATOMIC64_OP_RETURN
0234 #undef ATOMIC64_OP
0235
0236 static inline s64
0237 __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
0238 {
0239 s64 result;
0240 unsigned long tmp;
0241
0242 asm volatile("// atomic64_dec_if_positive\n"
0243 __LL_SC_FALLBACK(
0244 " prfm pstl1strm, %2\n"
0245 "1: ldxr %0, %2\n"
0246 " subs %0, %0, #1\n"
0247 " b.lt 2f\n"
0248 " stlxr %w1, %0, %2\n"
0249 " cbnz %w1, 1b\n"
0250 " dmb ish\n"
0251 "2:")
0252 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
0253 :
0254 : "cc", "memory");
0255
0256 return result;
0257 }
0258
0259 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
0260 static inline u##sz \
0261 __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
0262 unsigned long old, \
0263 u##sz new) \
0264 { \
0265 unsigned long tmp; \
0266 u##sz oldval; \
0267 \
0268
0269
0270
0271
0272 \
0273 if (sz < 32) \
0274 old = (u##sz)old; \
0275 \
0276 asm volatile( \
0277 __LL_SC_FALLBACK( \
0278 " prfm pstl1strm, %[v]\n" \
0279 "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
0280 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
0281 " cbnz %" #w "[tmp], 2f\n" \
0282 " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
0283 " cbnz %w[tmp], 1b\n" \
0284 " " #mb "\n" \
0285 "2:") \
0286 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
0287 [v] "+Q" (*(u##sz *)ptr) \
0288 : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
0289 : cl); \
0290 \
0291 return oldval; \
0292 }
0293
0294
0295
0296
0297
0298
0299 __CMPXCHG_CASE(w, b, , 8, , , , , K)
0300 __CMPXCHG_CASE(w, h, , 16, , , , , K)
0301 __CMPXCHG_CASE(w, , , 32, , , , , K)
0302 __CMPXCHG_CASE( , , , 64, , , , , L)
0303 __CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
0304 __CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
0305 __CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
0306 __CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
0307 __CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
0308 __CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
0309 __CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
0310 __CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
0311 __CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
0312 __CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
0313 __CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
0314 __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
0315
0316 #undef __CMPXCHG_CASE
0317
0318 #define __CMPXCHG_DBL(name, mb, rel, cl) \
0319 static inline long \
0320 __ll_sc__cmpxchg_double##name(unsigned long old1, \
0321 unsigned long old2, \
0322 unsigned long new1, \
0323 unsigned long new2, \
0324 volatile void *ptr) \
0325 { \
0326 unsigned long tmp, ret; \
0327 \
0328 asm volatile("// __cmpxchg_double" #name "\n" \
0329 __LL_SC_FALLBACK( \
0330 " prfm pstl1strm, %2\n" \
0331 "1: ldxp %0, %1, %2\n" \
0332 " eor %0, %0, %3\n" \
0333 " eor %1, %1, %4\n" \
0334 " orr %1, %0, %1\n" \
0335 " cbnz %1, 2f\n" \
0336 " st" #rel "xp %w0, %5, %6, %2\n" \
0337 " cbnz %w0, 1b\n" \
0338 " " #mb "\n" \
0339 "2:") \
0340 : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
0341 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
0342 : cl); \
0343 \
0344 return ret; \
0345 }
0346
0347 __CMPXCHG_DBL( , , , )
0348 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
0349
0350 #undef __CMPXCHG_DBL
0351 #undef K
0352
0353 #endif