0001
0002 #ifndef _ASM_POWERPC_ATOMIC_H_
0003 #define _ASM_POWERPC_ATOMIC_H_
0004
0005
0006
0007
0008
0009 #ifdef __KERNEL__
0010 #include <linux/types.h>
0011 #include <asm/cmpxchg.h>
0012 #include <asm/barrier.h>
0013 #include <asm/asm-const.h>
0014
0015
0016
0017
0018
0019
0020 #define __atomic_acquire_fence() \
0021 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
0022
0023 #define __atomic_release_fence() \
0024 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
0025
0026 static __inline__ int arch_atomic_read(const atomic_t *v)
0027 {
0028 int t;
0029
0030 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
0031
0032 return t;
0033 }
0034
0035 static __inline__ void arch_atomic_set(atomic_t *v, int i)
0036 {
0037 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
0038 }
0039
0040 #define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
0041 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
0042 { \
0043 int t; \
0044 \
0045 __asm__ __volatile__( \
0046 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
0047 #asm_op "%I2" suffix " %0,%0,%2\n" \
0048 " stwcx. %0,0,%3 \n" \
0049 " bne- 1b\n" \
0050 : "=&r" (t), "+m" (v->counter) \
0051 : "r"#sign (a), "r" (&v->counter) \
0052 : "cc", ##__VA_ARGS__); \
0053 } \
0054
0055 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
0056 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
0057 { \
0058 int t; \
0059 \
0060 __asm__ __volatile__( \
0061 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
0062 #asm_op "%I2" suffix " %0,%0,%2\n" \
0063 " stwcx. %0,0,%3\n" \
0064 " bne- 1b\n" \
0065 : "=&r" (t), "+m" (v->counter) \
0066 : "r"#sign (a), "r" (&v->counter) \
0067 : "cc", ##__VA_ARGS__); \
0068 \
0069 return t; \
0070 }
0071
0072 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
0073 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
0074 { \
0075 int res, t; \
0076 \
0077 __asm__ __volatile__( \
0078 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
0079 #asm_op "%I3" suffix " %1,%0,%3\n" \
0080 " stwcx. %1,0,%4\n" \
0081 " bne- 1b\n" \
0082 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
0083 : "r"#sign (a), "r" (&v->counter) \
0084 : "cc", ##__VA_ARGS__); \
0085 \
0086 return res; \
0087 }
0088
0089 #define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
0090 ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
0091 ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
0092 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
0093
0094 ATOMIC_OPS(add, add, "c", I, "xer")
0095 ATOMIC_OPS(sub, sub, "c", I, "xer")
0096
0097 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
0098 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
0099
0100 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
0101 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
0102
0103 #undef ATOMIC_OPS
0104 #define ATOMIC_OPS(op, asm_op, suffix, sign) \
0105 ATOMIC_OP(op, asm_op, suffix, sign) \
0106 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
0107
0108 ATOMIC_OPS(and, and, ".", K)
0109 ATOMIC_OPS(or, or, "", K)
0110 ATOMIC_OPS(xor, xor, "", K)
0111
0112 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
0113 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
0114 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
0115
0116 #undef ATOMIC_OPS
0117 #undef ATOMIC_FETCH_OP_RELAXED
0118 #undef ATOMIC_OP_RETURN_RELAXED
0119 #undef ATOMIC_OP
0120
0121 #define arch_atomic_cmpxchg(v, o, n) \
0122 (arch_cmpxchg(&((v)->counter), (o), (n)))
0123 #define arch_atomic_cmpxchg_relaxed(v, o, n) \
0124 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
0125 #define arch_atomic_cmpxchg_acquire(v, o, n) \
0126 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
0127
0128 #define arch_atomic_xchg(v, new) \
0129 (arch_xchg(&((v)->counter), new))
0130 #define arch_atomic_xchg_relaxed(v, new) \
0131 arch_xchg_relaxed(&((v)->counter), (new))
0132
0133
0134
0135
0136
0137
0138
0139 static __always_inline bool
0140 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
0141 {
0142 int r, o = *old;
0143 unsigned int eh = IS_ENABLED(CONFIG_PPC64);
0144
0145 __asm__ __volatile__ (
0146 "1: lwarx %0,0,%2,%[eh] # atomic_try_cmpxchg_acquire \n"
0147 " cmpw 0,%0,%3 \n"
0148 " bne- 2f \n"
0149 " stwcx. %4,0,%2 \n"
0150 " bne- 1b \n"
0151 "\t" PPC_ACQUIRE_BARRIER " \n"
0152 "2: \n"
0153 : "=&r" (r), "+m" (v->counter)
0154 : "r" (&v->counter), "r" (o), "r" (new), [eh] "n" (eh)
0155 : "cr0", "memory");
0156
0157 if (unlikely(r != o))
0158 *old = r;
0159 return likely(r == o);
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
0172 {
0173 int t;
0174
0175 __asm__ __volatile__ (
0176 PPC_ATOMIC_ENTRY_BARRIER
0177 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
0178 cmpw 0,%0,%3 \n\
0179 beq 2f \n\
0180 add%I2c %0,%0,%2 \n"
0181 " stwcx. %0,0,%1 \n\
0182 bne- 1b \n"
0183 PPC_ATOMIC_EXIT_BARRIER
0184 " sub%I2c %0,%0,%2 \n\
0185 2:"
0186 : "=&r" (t)
0187 : "r" (&v->counter), "rI" (a), "r" (u)
0188 : "cc", "memory", "xer");
0189
0190 return t;
0191 }
0192 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
0193
0194
0195
0196
0197
0198
0199 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
0200 {
0201 int t;
0202
0203 __asm__ __volatile__(
0204 PPC_ATOMIC_ENTRY_BARRIER
0205 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
0206 cmpwi %0,1\n\
0207 addi %0,%0,-1\n\
0208 blt- 2f\n"
0209 " stwcx. %0,0,%1\n\
0210 bne- 1b"
0211 PPC_ATOMIC_EXIT_BARRIER
0212 "\n\
0213 2:" : "=&b" (t)
0214 : "r" (&v->counter)
0215 : "cc", "memory");
0216
0217 return t;
0218 }
0219 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
0220
0221 #ifdef __powerpc64__
0222
0223 #define ATOMIC64_INIT(i) { (i) }
0224
0225 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
0226 {
0227 s64 t;
0228
0229 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
0230
0231 return t;
0232 }
0233
0234 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
0235 {
0236 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
0237 }
0238
0239 #define ATOMIC64_OP(op, asm_op) \
0240 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
0241 { \
0242 s64 t; \
0243 \
0244 __asm__ __volatile__( \
0245 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
0246 #asm_op " %0,%2,%0\n" \
0247 " stdcx. %0,0,%3 \n" \
0248 " bne- 1b\n" \
0249 : "=&r" (t), "+m" (v->counter) \
0250 : "r" (a), "r" (&v->counter) \
0251 : "cc"); \
0252 }
0253
0254 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
0255 static inline s64 \
0256 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
0257 { \
0258 s64 t; \
0259 \
0260 __asm__ __volatile__( \
0261 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
0262 #asm_op " %0,%2,%0\n" \
0263 " stdcx. %0,0,%3\n" \
0264 " bne- 1b\n" \
0265 : "=&r" (t), "+m" (v->counter) \
0266 : "r" (a), "r" (&v->counter) \
0267 : "cc"); \
0268 \
0269 return t; \
0270 }
0271
0272 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
0273 static inline s64 \
0274 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
0275 { \
0276 s64 res, t; \
0277 \
0278 __asm__ __volatile__( \
0279 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
0280 #asm_op " %1,%3,%0\n" \
0281 " stdcx. %1,0,%4\n" \
0282 " bne- 1b\n" \
0283 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
0284 : "r" (a), "r" (&v->counter) \
0285 : "cc"); \
0286 \
0287 return res; \
0288 }
0289
0290 #define ATOMIC64_OPS(op, asm_op) \
0291 ATOMIC64_OP(op, asm_op) \
0292 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
0293 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
0294
0295 ATOMIC64_OPS(add, add)
0296 ATOMIC64_OPS(sub, subf)
0297
0298 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
0299 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
0300
0301 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
0302 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
0303
0304 #undef ATOMIC64_OPS
0305 #define ATOMIC64_OPS(op, asm_op) \
0306 ATOMIC64_OP(op, asm_op) \
0307 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
0308
0309 ATOMIC64_OPS(and, and)
0310 ATOMIC64_OPS(or, or)
0311 ATOMIC64_OPS(xor, xor)
0312
0313 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
0314 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
0315 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
0316
0317 #undef ATOPIC64_OPS
0318 #undef ATOMIC64_FETCH_OP_RELAXED
0319 #undef ATOMIC64_OP_RETURN_RELAXED
0320 #undef ATOMIC64_OP
0321
0322 static __inline__ void arch_atomic64_inc(atomic64_t *v)
0323 {
0324 s64 t;
0325
0326 __asm__ __volatile__(
0327 "1: ldarx %0,0,%2 # atomic64_inc\n\
0328 addic %0,%0,1\n\
0329 stdcx. %0,0,%2 \n\
0330 bne- 1b"
0331 : "=&r" (t), "+m" (v->counter)
0332 : "r" (&v->counter)
0333 : "cc", "xer");
0334 }
0335 #define arch_atomic64_inc arch_atomic64_inc
0336
0337 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
0338 {
0339 s64 t;
0340
0341 __asm__ __volatile__(
0342 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
0343 " addic %0,%0,1\n"
0344 " stdcx. %0,0,%2\n"
0345 " bne- 1b"
0346 : "=&r" (t), "+m" (v->counter)
0347 : "r" (&v->counter)
0348 : "cc", "xer");
0349
0350 return t;
0351 }
0352
0353 static __inline__ void arch_atomic64_dec(atomic64_t *v)
0354 {
0355 s64 t;
0356
0357 __asm__ __volatile__(
0358 "1: ldarx %0,0,%2 # atomic64_dec\n\
0359 addic %0,%0,-1\n\
0360 stdcx. %0,0,%2\n\
0361 bne- 1b"
0362 : "=&r" (t), "+m" (v->counter)
0363 : "r" (&v->counter)
0364 : "cc", "xer");
0365 }
0366 #define arch_atomic64_dec arch_atomic64_dec
0367
0368 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
0369 {
0370 s64 t;
0371
0372 __asm__ __volatile__(
0373 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
0374 " addic %0,%0,-1\n"
0375 " stdcx. %0,0,%2\n"
0376 " bne- 1b"
0377 : "=&r" (t), "+m" (v->counter)
0378 : "r" (&v->counter)
0379 : "cc", "xer");
0380
0381 return t;
0382 }
0383
0384 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
0385 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
0386
0387
0388
0389
0390
0391 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
0392 {
0393 s64 t;
0394
0395 __asm__ __volatile__(
0396 PPC_ATOMIC_ENTRY_BARRIER
0397 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
0398 addic. %0,%0,-1\n\
0399 blt- 2f\n\
0400 stdcx. %0,0,%1\n\
0401 bne- 1b"
0402 PPC_ATOMIC_EXIT_BARRIER
0403 "\n\
0404 2:" : "=&r" (t)
0405 : "r" (&v->counter)
0406 : "cc", "xer", "memory");
0407
0408 return t;
0409 }
0410 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
0411
0412 #define arch_atomic64_cmpxchg(v, o, n) \
0413 (arch_cmpxchg(&((v)->counter), (o), (n)))
0414 #define arch_atomic64_cmpxchg_relaxed(v, o, n) \
0415 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
0416 #define arch_atomic64_cmpxchg_acquire(v, o, n) \
0417 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
0418
0419 #define arch_atomic64_xchg(v, new) \
0420 (arch_xchg(&((v)->counter), new))
0421 #define arch_atomic64_xchg_relaxed(v, new) \
0422 arch_xchg_relaxed(&((v)->counter), (new))
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
0434 {
0435 s64 t;
0436
0437 __asm__ __volatile__ (
0438 PPC_ATOMIC_ENTRY_BARRIER
0439 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
0440 cmpd 0,%0,%3 \n\
0441 beq 2f \n\
0442 add %0,%2,%0 \n"
0443 " stdcx. %0,0,%1 \n\
0444 bne- 1b \n"
0445 PPC_ATOMIC_EXIT_BARRIER
0446 " subf %0,%2,%0 \n\
0447 2:"
0448 : "=&r" (t)
0449 : "r" (&v->counter), "r" (a), "r" (u)
0450 : "cc", "memory");
0451
0452 return t;
0453 }
0454 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
0455
0456
0457
0458
0459
0460
0461
0462
0463 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
0464 {
0465 s64 t1, t2;
0466
0467 __asm__ __volatile__ (
0468 PPC_ATOMIC_ENTRY_BARRIER
0469 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
0470 cmpdi 0,%0,0\n\
0471 beq- 2f\n\
0472 addic %1,%0,1\n\
0473 stdcx. %1,0,%2\n\
0474 bne- 1b\n"
0475 PPC_ATOMIC_EXIT_BARRIER
0476 "\n\
0477 2:"
0478 : "=&r" (t1), "=&r" (t2)
0479 : "r" (&v->counter)
0480 : "cc", "xer", "memory");
0481
0482 return t1 != 0;
0483 }
0484 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
0485
0486 #endif
0487
0488 #endif
0489 #endif