0001
0002 #ifndef _ASM_POWERPC_CMPXCHG_H_
0003 #define _ASM_POWERPC_CMPXCHG_H_
0004
0005 #ifdef __KERNEL__
0006 #include <linux/compiler.h>
0007 #include <asm/synch.h>
0008 #include <linux/bug.h>
0009
0010 #ifdef __BIG_ENDIAN
0011 #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
0012 #else
0013 #define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
0014 #endif
0015
0016 #define XCHG_GEN(type, sfx, cl) \
0017 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
0018 { \
0019 unsigned int prev, prev_mask, tmp, bitoff, off; \
0020 \
0021 off = (unsigned long)p % sizeof(u32); \
0022 bitoff = BITOFF_CAL(sizeof(type), off); \
0023 p -= off; \
0024 val <<= bitoff; \
0025 prev_mask = (u32)(type)-1 << bitoff; \
0026 \
0027 __asm__ __volatile__( \
0028 "1: lwarx %0,0,%3\n" \
0029 " andc %1,%0,%5\n" \
0030 " or %1,%1,%4\n" \
0031 " stwcx. %1,0,%3\n" \
0032 " bne- 1b\n" \
0033 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
0034 : "r" (p), "r" (val), "r" (prev_mask) \
0035 : "cc", cl); \
0036 \
0037 return prev >> bitoff; \
0038 }
0039
0040 #define CMPXCHG_GEN(type, sfx, br, br2, cl) \
0041 static inline \
0042 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
0043 { \
0044 unsigned int prev, prev_mask, tmp, bitoff, off; \
0045 \
0046 off = (unsigned long)p % sizeof(u32); \
0047 bitoff = BITOFF_CAL(sizeof(type), off); \
0048 p -= off; \
0049 old <<= bitoff; \
0050 new <<= bitoff; \
0051 prev_mask = (u32)(type)-1 << bitoff; \
0052 \
0053 __asm__ __volatile__( \
0054 br \
0055 "1: lwarx %0,0,%3\n" \
0056 " and %1,%0,%6\n" \
0057 " cmpw 0,%1,%4\n" \
0058 " bne- 2f\n" \
0059 " andc %1,%0,%6\n" \
0060 " or %1,%1,%5\n" \
0061 " stwcx. %1,0,%3\n" \
0062 " bne- 1b\n" \
0063 br2 \
0064 "\n" \
0065 "2:" \
0066 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
0067 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
0068 : "cc", cl); \
0069 \
0070 return prev >> bitoff; \
0071 }
0072
0073
0074
0075
0076
0077
0078
0079
0080 XCHG_GEN(u8, _local, "memory");
0081 XCHG_GEN(u8, _relaxed, "cc");
0082 XCHG_GEN(u16, _local, "memory");
0083 XCHG_GEN(u16, _relaxed, "cc");
0084
0085 static __always_inline unsigned long
0086 __xchg_u32_local(volatile void *p, unsigned long val)
0087 {
0088 unsigned long prev;
0089
0090 __asm__ __volatile__(
0091 "1: lwarx %0,0,%2 \n"
0092 " stwcx. %3,0,%2 \n\
0093 bne- 1b"
0094 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
0095 : "r" (p), "r" (val)
0096 : "cc", "memory");
0097
0098 return prev;
0099 }
0100
0101 static __always_inline unsigned long
0102 __xchg_u32_relaxed(u32 *p, unsigned long val)
0103 {
0104 unsigned long prev;
0105
0106 __asm__ __volatile__(
0107 "1: lwarx %0,0,%2\n"
0108 " stwcx. %3,0,%2\n"
0109 " bne- 1b"
0110 : "=&r" (prev), "+m" (*p)
0111 : "r" (p), "r" (val)
0112 : "cc");
0113
0114 return prev;
0115 }
0116
0117 #ifdef CONFIG_PPC64
0118 static __always_inline unsigned long
0119 __xchg_u64_local(volatile void *p, unsigned long val)
0120 {
0121 unsigned long prev;
0122
0123 __asm__ __volatile__(
0124 "1: ldarx %0,0,%2 \n"
0125 " stdcx. %3,0,%2 \n\
0126 bne- 1b"
0127 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
0128 : "r" (p), "r" (val)
0129 : "cc", "memory");
0130
0131 return prev;
0132 }
0133
0134 static __always_inline unsigned long
0135 __xchg_u64_relaxed(u64 *p, unsigned long val)
0136 {
0137 unsigned long prev;
0138
0139 __asm__ __volatile__(
0140 "1: ldarx %0,0,%2\n"
0141 " stdcx. %3,0,%2\n"
0142 " bne- 1b"
0143 : "=&r" (prev), "+m" (*p)
0144 : "r" (p), "r" (val)
0145 : "cc");
0146
0147 return prev;
0148 }
0149 #endif
0150
0151 static __always_inline unsigned long
0152 __xchg_local(void *ptr, unsigned long x, unsigned int size)
0153 {
0154 switch (size) {
0155 case 1:
0156 return __xchg_u8_local(ptr, x);
0157 case 2:
0158 return __xchg_u16_local(ptr, x);
0159 case 4:
0160 return __xchg_u32_local(ptr, x);
0161 #ifdef CONFIG_PPC64
0162 case 8:
0163 return __xchg_u64_local(ptr, x);
0164 #endif
0165 }
0166 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
0167 return x;
0168 }
0169
0170 static __always_inline unsigned long
0171 __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
0172 {
0173 switch (size) {
0174 case 1:
0175 return __xchg_u8_relaxed(ptr, x);
0176 case 2:
0177 return __xchg_u16_relaxed(ptr, x);
0178 case 4:
0179 return __xchg_u32_relaxed(ptr, x);
0180 #ifdef CONFIG_PPC64
0181 case 8:
0182 return __xchg_u64_relaxed(ptr, x);
0183 #endif
0184 }
0185 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
0186 return x;
0187 }
0188 #define arch_xchg_local(ptr,x) \
0189 ({ \
0190 __typeof__(*(ptr)) _x_ = (x); \
0191 (__typeof__(*(ptr))) __xchg_local((ptr), \
0192 (unsigned long)_x_, sizeof(*(ptr))); \
0193 })
0194
0195 #define arch_xchg_relaxed(ptr, x) \
0196 ({ \
0197 __typeof__(*(ptr)) _x_ = (x); \
0198 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
0199 (unsigned long)_x_, sizeof(*(ptr))); \
0200 })
0201
0202
0203
0204
0205
0206 CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
0207 CMPXCHG_GEN(u8, _local, , , "memory");
0208 CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
0209 CMPXCHG_GEN(u8, _relaxed, , , "cc");
0210 CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
0211 CMPXCHG_GEN(u16, _local, , , "memory");
0212 CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
0213 CMPXCHG_GEN(u16, _relaxed, , , "cc");
0214
0215 static __always_inline unsigned long
0216 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
0217 {
0218 unsigned int prev;
0219
0220 __asm__ __volatile__ (
0221 PPC_ATOMIC_ENTRY_BARRIER
0222 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
0223 cmpw 0,%0,%3\n\
0224 bne- 2f\n"
0225 " stwcx. %4,0,%2\n\
0226 bne- 1b"
0227 PPC_ATOMIC_EXIT_BARRIER
0228 "\n\
0229 2:"
0230 : "=&r" (prev), "+m" (*p)
0231 : "r" (p), "r" (old), "r" (new)
0232 : "cc", "memory");
0233
0234 return prev;
0235 }
0236
0237 static __always_inline unsigned long
0238 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
0239 unsigned long new)
0240 {
0241 unsigned int prev;
0242
0243 __asm__ __volatile__ (
0244 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
0245 cmpw 0,%0,%3\n\
0246 bne- 2f\n"
0247 " stwcx. %4,0,%2\n\
0248 bne- 1b"
0249 "\n\
0250 2:"
0251 : "=&r" (prev), "+m" (*p)
0252 : "r" (p), "r" (old), "r" (new)
0253 : "cc", "memory");
0254
0255 return prev;
0256 }
0257
0258 static __always_inline unsigned long
0259 __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
0260 {
0261 unsigned long prev;
0262
0263 __asm__ __volatile__ (
0264 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
0265 " cmpw 0,%0,%3\n"
0266 " bne- 2f\n"
0267 " stwcx. %4,0,%2\n"
0268 " bne- 1b\n"
0269 "2:"
0270 : "=&r" (prev), "+m" (*p)
0271 : "r" (p), "r" (old), "r" (new)
0272 : "cc");
0273
0274 return prev;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 static __always_inline unsigned long
0286 __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
0287 {
0288 unsigned long prev;
0289
0290 __asm__ __volatile__ (
0291 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
0292 " cmpw 0,%0,%3\n"
0293 " bne- 2f\n"
0294 " stwcx. %4,0,%2\n"
0295 " bne- 1b\n"
0296 PPC_ACQUIRE_BARRIER
0297 "\n"
0298 "2:"
0299 : "=&r" (prev), "+m" (*p)
0300 : "r" (p), "r" (old), "r" (new)
0301 : "cc", "memory");
0302
0303 return prev;
0304 }
0305
0306 #ifdef CONFIG_PPC64
0307 static __always_inline unsigned long
0308 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
0309 {
0310 unsigned long prev;
0311
0312 __asm__ __volatile__ (
0313 PPC_ATOMIC_ENTRY_BARRIER
0314 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
0315 cmpd 0,%0,%3\n\
0316 bne- 2f\n\
0317 stdcx. %4,0,%2\n\
0318 bne- 1b"
0319 PPC_ATOMIC_EXIT_BARRIER
0320 "\n\
0321 2:"
0322 : "=&r" (prev), "+m" (*p)
0323 : "r" (p), "r" (old), "r" (new)
0324 : "cc", "memory");
0325
0326 return prev;
0327 }
0328
0329 static __always_inline unsigned long
0330 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
0331 unsigned long new)
0332 {
0333 unsigned long prev;
0334
0335 __asm__ __volatile__ (
0336 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
0337 cmpd 0,%0,%3\n\
0338 bne- 2f\n\
0339 stdcx. %4,0,%2\n\
0340 bne- 1b"
0341 "\n\
0342 2:"
0343 : "=&r" (prev), "+m" (*p)
0344 : "r" (p), "r" (old), "r" (new)
0345 : "cc", "memory");
0346
0347 return prev;
0348 }
0349
0350 static __always_inline unsigned long
0351 __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
0352 {
0353 unsigned long prev;
0354
0355 __asm__ __volatile__ (
0356 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
0357 " cmpd 0,%0,%3\n"
0358 " bne- 2f\n"
0359 " stdcx. %4,0,%2\n"
0360 " bne- 1b\n"
0361 "2:"
0362 : "=&r" (prev), "+m" (*p)
0363 : "r" (p), "r" (old), "r" (new)
0364 : "cc");
0365
0366 return prev;
0367 }
0368
0369 static __always_inline unsigned long
0370 __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
0371 {
0372 unsigned long prev;
0373
0374 __asm__ __volatile__ (
0375 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
0376 " cmpd 0,%0,%3\n"
0377 " bne- 2f\n"
0378 " stdcx. %4,0,%2\n"
0379 " bne- 1b\n"
0380 PPC_ACQUIRE_BARRIER
0381 "\n"
0382 "2:"
0383 : "=&r" (prev), "+m" (*p)
0384 : "r" (p), "r" (old), "r" (new)
0385 : "cc", "memory");
0386
0387 return prev;
0388 }
0389 #endif
0390
0391 static __always_inline unsigned long
0392 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
0393 unsigned int size)
0394 {
0395 switch (size) {
0396 case 1:
0397 return __cmpxchg_u8(ptr, old, new);
0398 case 2:
0399 return __cmpxchg_u16(ptr, old, new);
0400 case 4:
0401 return __cmpxchg_u32(ptr, old, new);
0402 #ifdef CONFIG_PPC64
0403 case 8:
0404 return __cmpxchg_u64(ptr, old, new);
0405 #endif
0406 }
0407 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
0408 return old;
0409 }
0410
0411 static __always_inline unsigned long
0412 __cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
0413 unsigned int size)
0414 {
0415 switch (size) {
0416 case 1:
0417 return __cmpxchg_u8_local(ptr, old, new);
0418 case 2:
0419 return __cmpxchg_u16_local(ptr, old, new);
0420 case 4:
0421 return __cmpxchg_u32_local(ptr, old, new);
0422 #ifdef CONFIG_PPC64
0423 case 8:
0424 return __cmpxchg_u64_local(ptr, old, new);
0425 #endif
0426 }
0427 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
0428 return old;
0429 }
0430
0431 static __always_inline unsigned long
0432 __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
0433 unsigned int size)
0434 {
0435 switch (size) {
0436 case 1:
0437 return __cmpxchg_u8_relaxed(ptr, old, new);
0438 case 2:
0439 return __cmpxchg_u16_relaxed(ptr, old, new);
0440 case 4:
0441 return __cmpxchg_u32_relaxed(ptr, old, new);
0442 #ifdef CONFIG_PPC64
0443 case 8:
0444 return __cmpxchg_u64_relaxed(ptr, old, new);
0445 #endif
0446 }
0447 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
0448 return old;
0449 }
0450
0451 static __always_inline unsigned long
0452 __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
0453 unsigned int size)
0454 {
0455 switch (size) {
0456 case 1:
0457 return __cmpxchg_u8_acquire(ptr, old, new);
0458 case 2:
0459 return __cmpxchg_u16_acquire(ptr, old, new);
0460 case 4:
0461 return __cmpxchg_u32_acquire(ptr, old, new);
0462 #ifdef CONFIG_PPC64
0463 case 8:
0464 return __cmpxchg_u64_acquire(ptr, old, new);
0465 #endif
0466 }
0467 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
0468 return old;
0469 }
0470 #define arch_cmpxchg(ptr, o, n) \
0471 ({ \
0472 __typeof__(*(ptr)) _o_ = (o); \
0473 __typeof__(*(ptr)) _n_ = (n); \
0474 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
0475 (unsigned long)_n_, sizeof(*(ptr))); \
0476 })
0477
0478
0479 #define arch_cmpxchg_local(ptr, o, n) \
0480 ({ \
0481 __typeof__(*(ptr)) _o_ = (o); \
0482 __typeof__(*(ptr)) _n_ = (n); \
0483 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
0484 (unsigned long)_n_, sizeof(*(ptr))); \
0485 })
0486
0487 #define arch_cmpxchg_relaxed(ptr, o, n) \
0488 ({ \
0489 __typeof__(*(ptr)) _o_ = (o); \
0490 __typeof__(*(ptr)) _n_ = (n); \
0491 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
0492 (unsigned long)_o_, (unsigned long)_n_, \
0493 sizeof(*(ptr))); \
0494 })
0495
0496 #define arch_cmpxchg_acquire(ptr, o, n) \
0497 ({ \
0498 __typeof__(*(ptr)) _o_ = (o); \
0499 __typeof__(*(ptr)) _n_ = (n); \
0500 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
0501 (unsigned long)_o_, (unsigned long)_n_, \
0502 sizeof(*(ptr))); \
0503 })
0504 #ifdef CONFIG_PPC64
0505 #define arch_cmpxchg64(ptr, o, n) \
0506 ({ \
0507 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
0508 arch_cmpxchg((ptr), (o), (n)); \
0509 })
0510 #define arch_cmpxchg64_local(ptr, o, n) \
0511 ({ \
0512 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
0513 arch_cmpxchg_local((ptr), (o), (n)); \
0514 })
0515 #define arch_cmpxchg64_relaxed(ptr, o, n) \
0516 ({ \
0517 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
0518 arch_cmpxchg_relaxed((ptr), (o), (n)); \
0519 })
0520 #define arch_cmpxchg64_acquire(ptr, o, n) \
0521 ({ \
0522 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
0523 arch_cmpxchg_acquire((ptr), (o), (n)); \
0524 })
0525 #else
0526 #include <asm-generic/cmpxchg-local.h>
0527 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
0528 #endif
0529
0530 #endif
0531 #endif