0001
0002 #ifndef __ASM_ARM_CMPXCHG_H
0003 #define __ASM_ARM_CMPXCHG_H
0004
0005 #include <linux/irqflags.h>
0006 #include <linux/prefetch.h>
0007 #include <asm/barrier.h>
0008
0009 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #define swp_is_buggy
0026 #endif
0027
0028 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
0029 {
0030 extern void __bad_xchg(volatile void *, int);
0031 unsigned long ret;
0032 #ifdef swp_is_buggy
0033 unsigned long flags;
0034 #endif
0035 #if __LINUX_ARM_ARCH__ >= 6
0036 unsigned int tmp;
0037 #endif
0038
0039 prefetchw((const void *)ptr);
0040
0041 switch (size) {
0042 #if __LINUX_ARM_ARCH__ >= 6
0043 #ifndef CONFIG_CPU_V6
0044 case 1:
0045 asm volatile("@ __xchg1\n"
0046 "1: ldrexb %0, [%3]\n"
0047 " strexb %1, %2, [%3]\n"
0048 " teq %1, #0\n"
0049 " bne 1b"
0050 : "=&r" (ret), "=&r" (tmp)
0051 : "r" (x), "r" (ptr)
0052 : "memory", "cc");
0053 break;
0054 case 2:
0055 asm volatile("@ __xchg2\n"
0056 "1: ldrexh %0, [%3]\n"
0057 " strexh %1, %2, [%3]\n"
0058 " teq %1, #0\n"
0059 " bne 1b"
0060 : "=&r" (ret), "=&r" (tmp)
0061 : "r" (x), "r" (ptr)
0062 : "memory", "cc");
0063 break;
0064 #endif
0065 case 4:
0066 asm volatile("@ __xchg4\n"
0067 "1: ldrex %0, [%3]\n"
0068 " strex %1, %2, [%3]\n"
0069 " teq %1, #0\n"
0070 " bne 1b"
0071 : "=&r" (ret), "=&r" (tmp)
0072 : "r" (x), "r" (ptr)
0073 : "memory", "cc");
0074 break;
0075 #elif defined(swp_is_buggy)
0076 #ifdef CONFIG_SMP
0077 #error SMP is not supported on this platform
0078 #endif
0079 case 1:
0080 raw_local_irq_save(flags);
0081 ret = *(volatile unsigned char *)ptr;
0082 *(volatile unsigned char *)ptr = x;
0083 raw_local_irq_restore(flags);
0084 break;
0085
0086 case 4:
0087 raw_local_irq_save(flags);
0088 ret = *(volatile unsigned long *)ptr;
0089 *(volatile unsigned long *)ptr = x;
0090 raw_local_irq_restore(flags);
0091 break;
0092 #else
0093 case 1:
0094 asm volatile("@ __xchg1\n"
0095 " swpb %0, %1, [%2]"
0096 : "=&r" (ret)
0097 : "r" (x), "r" (ptr)
0098 : "memory", "cc");
0099 break;
0100 case 4:
0101 asm volatile("@ __xchg4\n"
0102 " swp %0, %1, [%2]"
0103 : "=&r" (ret)
0104 : "r" (x), "r" (ptr)
0105 : "memory", "cc");
0106 break;
0107 #endif
0108 default:
0109
0110 __bad_xchg(ptr, size), ret = 0;
0111 break;
0112 }
0113
0114 return ret;
0115 }
0116
0117 #define arch_xchg_relaxed(ptr, x) ({ \
0118 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
0119 sizeof(*(ptr))); \
0120 })
0121
0122 #include <asm-generic/cmpxchg-local.h>
0123
0124 #if __LINUX_ARM_ARCH__ < 6
0125
0126
0127 #ifdef CONFIG_SMP
0128 #error "SMP is not supported on this platform"
0129 #endif
0130
0131 #define arch_xchg arch_xchg_relaxed
0132
0133
0134
0135
0136
0137 #define arch_cmpxchg_local(ptr, o, n) ({ \
0138 (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
0139 (unsigned long)(o), \
0140 (unsigned long)(n), \
0141 sizeof(*(ptr))); \
0142 })
0143
0144 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
0145
0146 #include <asm-generic/cmpxchg.h>
0147
0148 #else
0149
0150 extern void __bad_cmpxchg(volatile void *ptr, int size);
0151
0152
0153
0154
0155
0156 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
0157 unsigned long new, int size)
0158 {
0159 unsigned long oldval, res;
0160
0161 prefetchw((const void *)ptr);
0162
0163 switch (size) {
0164 #ifndef CONFIG_CPU_V6
0165 case 1:
0166 do {
0167 asm volatile("@ __cmpxchg1\n"
0168 " ldrexb %1, [%2]\n"
0169 " mov %0, #0\n"
0170 " teq %1, %3\n"
0171 " strexbeq %0, %4, [%2]\n"
0172 : "=&r" (res), "=&r" (oldval)
0173 : "r" (ptr), "Ir" (old), "r" (new)
0174 : "memory", "cc");
0175 } while (res);
0176 break;
0177 case 2:
0178 do {
0179 asm volatile("@ __cmpxchg1\n"
0180 " ldrexh %1, [%2]\n"
0181 " mov %0, #0\n"
0182 " teq %1, %3\n"
0183 " strexheq %0, %4, [%2]\n"
0184 : "=&r" (res), "=&r" (oldval)
0185 : "r" (ptr), "Ir" (old), "r" (new)
0186 : "memory", "cc");
0187 } while (res);
0188 break;
0189 #endif
0190 case 4:
0191 do {
0192 asm volatile("@ __cmpxchg4\n"
0193 " ldrex %1, [%2]\n"
0194 " mov %0, #0\n"
0195 " teq %1, %3\n"
0196 " strexeq %0, %4, [%2]\n"
0197 : "=&r" (res), "=&r" (oldval)
0198 : "r" (ptr), "Ir" (old), "r" (new)
0199 : "memory", "cc");
0200 } while (res);
0201 break;
0202 default:
0203 __bad_cmpxchg(ptr, size);
0204 oldval = 0;
0205 }
0206
0207 return oldval;
0208 }
0209
0210 #define arch_cmpxchg_relaxed(ptr,o,n) ({ \
0211 (__typeof__(*(ptr)))__cmpxchg((ptr), \
0212 (unsigned long)(o), \
0213 (unsigned long)(n), \
0214 sizeof(*(ptr))); \
0215 })
0216
0217 static inline unsigned long __cmpxchg_local(volatile void *ptr,
0218 unsigned long old,
0219 unsigned long new, int size)
0220 {
0221 unsigned long ret;
0222
0223 switch (size) {
0224 #ifdef CONFIG_CPU_V6
0225 case 1:
0226 case 2:
0227 ret = __generic_cmpxchg_local(ptr, old, new, size);
0228 break;
0229 #endif
0230 default:
0231 ret = __cmpxchg(ptr, old, new, size);
0232 }
0233
0234 return ret;
0235 }
0236
0237 #define arch_cmpxchg_local(ptr, o, n) ({ \
0238 (__typeof(*ptr))__cmpxchg_local((ptr), \
0239 (unsigned long)(o), \
0240 (unsigned long)(n), \
0241 sizeof(*(ptr))); \
0242 })
0243
0244 static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
0245 unsigned long long old,
0246 unsigned long long new)
0247 {
0248 unsigned long long oldval;
0249 unsigned long res;
0250
0251 prefetchw(ptr);
0252
0253 __asm__ __volatile__(
0254 "1: ldrexd %1, %H1, [%3]\n"
0255 " teq %1, %4\n"
0256 " teqeq %H1, %H4\n"
0257 " bne 2f\n"
0258 " strexd %0, %5, %H5, [%3]\n"
0259 " teq %0, #0\n"
0260 " bne 1b\n"
0261 "2:"
0262 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
0263 : "r" (ptr), "r" (old), "r" (new)
0264 : "cc");
0265
0266 return oldval;
0267 }
0268
0269 #define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
0270 (__typeof__(*(ptr)))__cmpxchg64((ptr), \
0271 (unsigned long long)(o), \
0272 (unsigned long long)(n)); \
0273 })
0274
0275 #define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
0276
0277 #endif
0278
0279 #endif