0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __ASM_ARM_ATOMIC_H
0009 #define __ASM_ARM_ATOMIC_H
0010
0011 #include <linux/compiler.h>
0012 #include <linux/prefetch.h>
0013 #include <linux/types.h>
0014 #include <linux/irqflags.h>
0015 #include <asm/barrier.h>
0016 #include <asm/cmpxchg.h>
0017
0018 #ifdef __KERNEL__
0019
0020
0021
0022
0023
0024
0025 #define arch_atomic_read(v) READ_ONCE((v)->counter)
0026 #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
0027
0028 #if __LINUX_ARM_ARCH__ >= 6
0029
0030
0031
0032
0033
0034
0035
0036 #define ATOMIC_OP(op, c_op, asm_op) \
0037 static inline void arch_atomic_##op(int i, atomic_t *v) \
0038 { \
0039 unsigned long tmp; \
0040 int result; \
0041 \
0042 prefetchw(&v->counter); \
0043 __asm__ __volatile__("@ atomic_" #op "\n" \
0044 "1: ldrex %0, [%3]\n" \
0045 " " #asm_op " %0, %0, %4\n" \
0046 " strex %1, %0, [%3]\n" \
0047 " teq %1, #0\n" \
0048 " bne 1b" \
0049 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
0050 : "r" (&v->counter), "Ir" (i) \
0051 : "cc"); \
0052 } \
0053
0054 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
0055 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
0056 { \
0057 unsigned long tmp; \
0058 int result; \
0059 \
0060 prefetchw(&v->counter); \
0061 \
0062 __asm__ __volatile__("@ atomic_" #op "_return\n" \
0063 "1: ldrex %0, [%3]\n" \
0064 " " #asm_op " %0, %0, %4\n" \
0065 " strex %1, %0, [%3]\n" \
0066 " teq %1, #0\n" \
0067 " bne 1b" \
0068 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
0069 : "r" (&v->counter), "Ir" (i) \
0070 : "cc"); \
0071 \
0072 return result; \
0073 }
0074
0075 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
0076 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
0077 { \
0078 unsigned long tmp; \
0079 int result, val; \
0080 \
0081 prefetchw(&v->counter); \
0082 \
0083 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
0084 "1: ldrex %0, [%4]\n" \
0085 " " #asm_op " %1, %0, %5\n" \
0086 " strex %2, %1, [%4]\n" \
0087 " teq %2, #0\n" \
0088 " bne 1b" \
0089 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
0090 : "r" (&v->counter), "Ir" (i) \
0091 : "cc"); \
0092 \
0093 return result; \
0094 }
0095
0096 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
0097 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
0098 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
0099 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
0100
0101 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
0102 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
0103 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
0104 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
0105
0106 static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
0107 {
0108 int oldval;
0109 unsigned long res;
0110
0111 prefetchw(&ptr->counter);
0112
0113 do {
0114 __asm__ __volatile__("@ atomic_cmpxchg\n"
0115 "ldrex %1, [%3]\n"
0116 "mov %0, #0\n"
0117 "teq %1, %4\n"
0118 "strexeq %0, %5, [%3]\n"
0119 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
0120 : "r" (&ptr->counter), "Ir" (old), "r" (new)
0121 : "cc");
0122 } while (res);
0123
0124 return oldval;
0125 }
0126 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
0127
0128 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
0129 {
0130 int oldval, newval;
0131 unsigned long tmp;
0132
0133 smp_mb();
0134 prefetchw(&v->counter);
0135
0136 __asm__ __volatile__ ("@ atomic_add_unless\n"
0137 "1: ldrex %0, [%4]\n"
0138 " teq %0, %5\n"
0139 " beq 2f\n"
0140 " add %1, %0, %6\n"
0141 " strex %2, %1, [%4]\n"
0142 " teq %2, #0\n"
0143 " bne 1b\n"
0144 "2:"
0145 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
0146 : "r" (&v->counter), "r" (u), "r" (a)
0147 : "cc");
0148
0149 if (oldval != u)
0150 smp_mb();
0151
0152 return oldval;
0153 }
0154 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
0155
0156 #else
0157
0158 #ifdef CONFIG_SMP
0159 #error SMP not supported on pre-ARMv6 CPUs
0160 #endif
0161
0162 #define ATOMIC_OP(op, c_op, asm_op) \
0163 static inline void arch_atomic_##op(int i, atomic_t *v) \
0164 { \
0165 unsigned long flags; \
0166 \
0167 raw_local_irq_save(flags); \
0168 v->counter c_op i; \
0169 raw_local_irq_restore(flags); \
0170 } \
0171
0172 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
0173 static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
0174 { \
0175 unsigned long flags; \
0176 int val; \
0177 \
0178 raw_local_irq_save(flags); \
0179 v->counter c_op i; \
0180 val = v->counter; \
0181 raw_local_irq_restore(flags); \
0182 \
0183 return val; \
0184 }
0185
0186 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
0187 static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
0188 { \
0189 unsigned long flags; \
0190 int val; \
0191 \
0192 raw_local_irq_save(flags); \
0193 val = v->counter; \
0194 v->counter c_op i; \
0195 raw_local_irq_restore(flags); \
0196 \
0197 return val; \
0198 }
0199
0200 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
0201 {
0202 int ret;
0203 unsigned long flags;
0204
0205 raw_local_irq_save(flags);
0206 ret = v->counter;
0207 if (likely(ret == old))
0208 v->counter = new;
0209 raw_local_irq_restore(flags);
0210
0211 return ret;
0212 }
0213
0214 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
0215
0216 #endif
0217
0218 #define ATOMIC_OPS(op, c_op, asm_op) \
0219 ATOMIC_OP(op, c_op, asm_op) \
0220 ATOMIC_OP_RETURN(op, c_op, asm_op) \
0221 ATOMIC_FETCH_OP(op, c_op, asm_op)
0222
0223 ATOMIC_OPS(add, +=, add)
0224 ATOMIC_OPS(sub, -=, sub)
0225
0226 #define arch_atomic_andnot arch_atomic_andnot
0227
0228 #undef ATOMIC_OPS
0229 #define ATOMIC_OPS(op, c_op, asm_op) \
0230 ATOMIC_OP(op, c_op, asm_op) \
0231 ATOMIC_FETCH_OP(op, c_op, asm_op)
0232
0233 ATOMIC_OPS(and, &=, and)
0234 ATOMIC_OPS(andnot, &= ~, bic)
0235 ATOMIC_OPS(or, |=, orr)
0236 ATOMIC_OPS(xor, ^=, eor)
0237
0238 #undef ATOMIC_OPS
0239 #undef ATOMIC_FETCH_OP
0240 #undef ATOMIC_OP_RETURN
0241 #undef ATOMIC_OP
0242
0243 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
0244
0245 #ifndef CONFIG_GENERIC_ATOMIC64
0246 typedef struct {
0247 s64 counter;
0248 } atomic64_t;
0249
0250 #define ATOMIC64_INIT(i) { (i) }
0251
0252 #ifdef CONFIG_ARM_LPAE
0253 static inline s64 arch_atomic64_read(const atomic64_t *v)
0254 {
0255 s64 result;
0256
0257 __asm__ __volatile__("@ atomic64_read\n"
0258 " ldrd %0, %H0, [%1]"
0259 : "=&r" (result)
0260 : "r" (&v->counter), "Qo" (v->counter)
0261 );
0262
0263 return result;
0264 }
0265
0266 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
0267 {
0268 __asm__ __volatile__("@ atomic64_set\n"
0269 " strd %2, %H2, [%1]"
0270 : "=Qo" (v->counter)
0271 : "r" (&v->counter), "r" (i)
0272 );
0273 }
0274 #else
0275 static inline s64 arch_atomic64_read(const atomic64_t *v)
0276 {
0277 s64 result;
0278
0279 __asm__ __volatile__("@ atomic64_read\n"
0280 " ldrexd %0, %H0, [%1]"
0281 : "=&r" (result)
0282 : "r" (&v->counter), "Qo" (v->counter)
0283 );
0284
0285 return result;
0286 }
0287
0288 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
0289 {
0290 s64 tmp;
0291
0292 prefetchw(&v->counter);
0293 __asm__ __volatile__("@ atomic64_set\n"
0294 "1: ldrexd %0, %H0, [%2]\n"
0295 " strexd %0, %3, %H3, [%2]\n"
0296 " teq %0, #0\n"
0297 " bne 1b"
0298 : "=&r" (tmp), "=Qo" (v->counter)
0299 : "r" (&v->counter), "r" (i)
0300 : "cc");
0301 }
0302 #endif
0303
0304 #define ATOMIC64_OP(op, op1, op2) \
0305 static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
0306 { \
0307 s64 result; \
0308 unsigned long tmp; \
0309 \
0310 prefetchw(&v->counter); \
0311 __asm__ __volatile__("@ atomic64_" #op "\n" \
0312 "1: ldrexd %0, %H0, [%3]\n" \
0313 " " #op1 " %Q0, %Q0, %Q4\n" \
0314 " " #op2 " %R0, %R0, %R4\n" \
0315 " strexd %1, %0, %H0, [%3]\n" \
0316 " teq %1, #0\n" \
0317 " bne 1b" \
0318 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
0319 : "r" (&v->counter), "r" (i) \
0320 : "cc"); \
0321 } \
0322
0323 #define ATOMIC64_OP_RETURN(op, op1, op2) \
0324 static inline s64 \
0325 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
0326 { \
0327 s64 result; \
0328 unsigned long tmp; \
0329 \
0330 prefetchw(&v->counter); \
0331 \
0332 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
0333 "1: ldrexd %0, %H0, [%3]\n" \
0334 " " #op1 " %Q0, %Q0, %Q4\n" \
0335 " " #op2 " %R0, %R0, %R4\n" \
0336 " strexd %1, %0, %H0, [%3]\n" \
0337 " teq %1, #0\n" \
0338 " bne 1b" \
0339 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
0340 : "r" (&v->counter), "r" (i) \
0341 : "cc"); \
0342 \
0343 return result; \
0344 }
0345
0346 #define ATOMIC64_FETCH_OP(op, op1, op2) \
0347 static inline s64 \
0348 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
0349 { \
0350 s64 result, val; \
0351 unsigned long tmp; \
0352 \
0353 prefetchw(&v->counter); \
0354 \
0355 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
0356 "1: ldrexd %0, %H0, [%4]\n" \
0357 " " #op1 " %Q1, %Q0, %Q5\n" \
0358 " " #op2 " %R1, %R0, %R5\n" \
0359 " strexd %2, %1, %H1, [%4]\n" \
0360 " teq %2, #0\n" \
0361 " bne 1b" \
0362 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
0363 : "r" (&v->counter), "r" (i) \
0364 : "cc"); \
0365 \
0366 return result; \
0367 }
0368
0369 #define ATOMIC64_OPS(op, op1, op2) \
0370 ATOMIC64_OP(op, op1, op2) \
0371 ATOMIC64_OP_RETURN(op, op1, op2) \
0372 ATOMIC64_FETCH_OP(op, op1, op2)
0373
0374 ATOMIC64_OPS(add, adds, adc)
0375 ATOMIC64_OPS(sub, subs, sbc)
0376
0377 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
0378 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
0379 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
0380 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
0381
0382 #undef ATOMIC64_OPS
0383 #define ATOMIC64_OPS(op, op1, op2) \
0384 ATOMIC64_OP(op, op1, op2) \
0385 ATOMIC64_FETCH_OP(op, op1, op2)
0386
0387 #define arch_atomic64_andnot arch_atomic64_andnot
0388
0389 ATOMIC64_OPS(and, and, and)
0390 ATOMIC64_OPS(andnot, bic, bic)
0391 ATOMIC64_OPS(or, orr, orr)
0392 ATOMIC64_OPS(xor, eor, eor)
0393
0394 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
0395 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
0396 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
0397 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
0398
0399 #undef ATOMIC64_OPS
0400 #undef ATOMIC64_FETCH_OP
0401 #undef ATOMIC64_OP_RETURN
0402 #undef ATOMIC64_OP
0403
0404 static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
0405 {
0406 s64 oldval;
0407 unsigned long res;
0408
0409 prefetchw(&ptr->counter);
0410
0411 do {
0412 __asm__ __volatile__("@ atomic64_cmpxchg\n"
0413 "ldrexd %1, %H1, [%3]\n"
0414 "mov %0, #0\n"
0415 "teq %1, %4\n"
0416 "teqeq %H1, %H4\n"
0417 "strexdeq %0, %5, %H5, [%3]"
0418 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
0419 : "r" (&ptr->counter), "r" (old), "r" (new)
0420 : "cc");
0421 } while (res);
0422
0423 return oldval;
0424 }
0425 #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
0426
0427 static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
0428 {
0429 s64 result;
0430 unsigned long tmp;
0431
0432 prefetchw(&ptr->counter);
0433
0434 __asm__ __volatile__("@ atomic64_xchg\n"
0435 "1: ldrexd %0, %H0, [%3]\n"
0436 " strexd %1, %4, %H4, [%3]\n"
0437 " teq %1, #0\n"
0438 " bne 1b"
0439 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
0440 : "r" (&ptr->counter), "r" (new)
0441 : "cc");
0442
0443 return result;
0444 }
0445 #define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
0446
0447 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
0448 {
0449 s64 result;
0450 unsigned long tmp;
0451
0452 smp_mb();
0453 prefetchw(&v->counter);
0454
0455 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
0456 "1: ldrexd %0, %H0, [%3]\n"
0457 " subs %Q0, %Q0, #1\n"
0458 " sbc %R0, %R0, #0\n"
0459 " teq %R0, #0\n"
0460 " bmi 2f\n"
0461 " strexd %1, %0, %H0, [%3]\n"
0462 " teq %1, #0\n"
0463 " bne 1b\n"
0464 "2:"
0465 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
0466 : "r" (&v->counter)
0467 : "cc");
0468
0469 smp_mb();
0470
0471 return result;
0472 }
0473 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
0474
0475 static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
0476 {
0477 s64 oldval, newval;
0478 unsigned long tmp;
0479
0480 smp_mb();
0481 prefetchw(&v->counter);
0482
0483 __asm__ __volatile__("@ atomic64_add_unless\n"
0484 "1: ldrexd %0, %H0, [%4]\n"
0485 " teq %0, %5\n"
0486 " teqeq %H0, %H5\n"
0487 " beq 2f\n"
0488 " adds %Q1, %Q0, %Q6\n"
0489 " adc %R1, %R0, %R6\n"
0490 " strexd %2, %1, %H1, [%4]\n"
0491 " teq %2, #0\n"
0492 " bne 1b\n"
0493 "2:"
0494 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
0495 : "r" (&v->counter), "r" (u), "r" (a)
0496 : "cc");
0497
0498 if (oldval != u)
0499 smp_mb();
0500
0501 return oldval;
0502 }
0503 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
0504
0505 #endif
0506 #endif
0507 #endif