0001
0002 #ifndef _ALPHA_ATOMIC_H
0003 #define _ALPHA_ATOMIC_H
0004
0005 #include <linux/types.h>
0006 #include <asm/barrier.h>
0007 #include <asm/cmpxchg.h>
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define __atomic_acquire_fence()
0025 #define __atomic_post_full_fence()
0026
0027 #define ATOMIC64_INIT(i) { (i) }
0028
0029 #define arch_atomic_read(v) READ_ONCE((v)->counter)
0030 #define arch_atomic64_read(v) READ_ONCE((v)->counter)
0031
0032 #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
0033 #define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
0034
0035
0036
0037
0038
0039
0040
0041 #define ATOMIC_OP(op, asm_op) \
0042 static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
0043 { \
0044 unsigned long temp; \
0045 __asm__ __volatile__( \
0046 "1: ldl_l %0,%1\n" \
0047 " " #asm_op " %0,%2,%0\n" \
0048 " stl_c %0,%1\n" \
0049 " beq %0,2f\n" \
0050 ".subsection 2\n" \
0051 "2: br 1b\n" \
0052 ".previous" \
0053 :"=&r" (temp), "=m" (v->counter) \
0054 :"Ir" (i), "m" (v->counter)); \
0055 } \
0056
0057 #define ATOMIC_OP_RETURN(op, asm_op) \
0058 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
0059 { \
0060 long temp, result; \
0061 __asm__ __volatile__( \
0062 "1: ldl_l %0,%1\n" \
0063 " " #asm_op " %0,%3,%2\n" \
0064 " " #asm_op " %0,%3,%0\n" \
0065 " stl_c %0,%1\n" \
0066 " beq %0,2f\n" \
0067 ".subsection 2\n" \
0068 "2: br 1b\n" \
0069 ".previous" \
0070 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
0071 :"Ir" (i), "m" (v->counter) : "memory"); \
0072 smp_mb(); \
0073 return result; \
0074 }
0075
0076 #define ATOMIC_FETCH_OP(op, asm_op) \
0077 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
0078 { \
0079 long temp, result; \
0080 __asm__ __volatile__( \
0081 "1: ldl_l %2,%1\n" \
0082 " " #asm_op " %2,%3,%0\n" \
0083 " stl_c %0,%1\n" \
0084 " beq %0,2f\n" \
0085 ".subsection 2\n" \
0086 "2: br 1b\n" \
0087 ".previous" \
0088 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
0089 :"Ir" (i), "m" (v->counter) : "memory"); \
0090 smp_mb(); \
0091 return result; \
0092 }
0093
0094 #define ATOMIC64_OP(op, asm_op) \
0095 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
0096 { \
0097 s64 temp; \
0098 __asm__ __volatile__( \
0099 "1: ldq_l %0,%1\n" \
0100 " " #asm_op " %0,%2,%0\n" \
0101 " stq_c %0,%1\n" \
0102 " beq %0,2f\n" \
0103 ".subsection 2\n" \
0104 "2: br 1b\n" \
0105 ".previous" \
0106 :"=&r" (temp), "=m" (v->counter) \
0107 :"Ir" (i), "m" (v->counter)); \
0108 } \
0109
0110 #define ATOMIC64_OP_RETURN(op, asm_op) \
0111 static __inline__ s64 \
0112 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
0113 { \
0114 s64 temp, result; \
0115 __asm__ __volatile__( \
0116 "1: ldq_l %0,%1\n" \
0117 " " #asm_op " %0,%3,%2\n" \
0118 " " #asm_op " %0,%3,%0\n" \
0119 " stq_c %0,%1\n" \
0120 " beq %0,2f\n" \
0121 ".subsection 2\n" \
0122 "2: br 1b\n" \
0123 ".previous" \
0124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
0125 :"Ir" (i), "m" (v->counter) : "memory"); \
0126 smp_mb(); \
0127 return result; \
0128 }
0129
0130 #define ATOMIC64_FETCH_OP(op, asm_op) \
0131 static __inline__ s64 \
0132 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
0133 { \
0134 s64 temp, result; \
0135 __asm__ __volatile__( \
0136 "1: ldq_l %2,%1\n" \
0137 " " #asm_op " %2,%3,%0\n" \
0138 " stq_c %0,%1\n" \
0139 " beq %0,2f\n" \
0140 ".subsection 2\n" \
0141 "2: br 1b\n" \
0142 ".previous" \
0143 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
0144 :"Ir" (i), "m" (v->counter) : "memory"); \
0145 smp_mb(); \
0146 return result; \
0147 }
0148
0149 #define ATOMIC_OPS(op) \
0150 ATOMIC_OP(op, op##l) \
0151 ATOMIC_OP_RETURN(op, op##l) \
0152 ATOMIC_FETCH_OP(op, op##l) \
0153 ATOMIC64_OP(op, op##q) \
0154 ATOMIC64_OP_RETURN(op, op##q) \
0155 ATOMIC64_FETCH_OP(op, op##q)
0156
0157 ATOMIC_OPS(add)
0158 ATOMIC_OPS(sub)
0159
0160 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
0161 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
0162 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
0163 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
0164
0165 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
0166 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
0167 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
0168 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
0169
0170 #define arch_atomic_andnot arch_atomic_andnot
0171 #define arch_atomic64_andnot arch_atomic64_andnot
0172
0173 #undef ATOMIC_OPS
0174 #define ATOMIC_OPS(op, asm) \
0175 ATOMIC_OP(op, asm) \
0176 ATOMIC_FETCH_OP(op, asm) \
0177 ATOMIC64_OP(op, asm) \
0178 ATOMIC64_FETCH_OP(op, asm)
0179
0180 ATOMIC_OPS(and, and)
0181 ATOMIC_OPS(andnot, bic)
0182 ATOMIC_OPS(or, bis)
0183 ATOMIC_OPS(xor, xor)
0184
0185 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
0186 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
0187 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
0188 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
0189
0190 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
0191 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
0192 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
0193 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
0194
0195 #undef ATOMIC_OPS
0196 #undef ATOMIC64_FETCH_OP
0197 #undef ATOMIC64_OP_RETURN
0198 #undef ATOMIC64_OP
0199 #undef ATOMIC_FETCH_OP
0200 #undef ATOMIC_OP_RETURN
0201 #undef ATOMIC_OP
0202
0203 #define arch_atomic64_cmpxchg(v, old, new) \
0204 (arch_cmpxchg(&((v)->counter), old, new))
0205 #define arch_atomic64_xchg(v, new) \
0206 (arch_xchg(&((v)->counter), new))
0207
0208 #define arch_atomic_cmpxchg(v, old, new) \
0209 (arch_cmpxchg(&((v)->counter), old, new))
0210 #define arch_atomic_xchg(v, new) \
0211 (arch_xchg(&((v)->counter), new))
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
0223 {
0224 int c, new, old;
0225 smp_mb();
0226 __asm__ __volatile__(
0227 "1: ldl_l %[old],%[mem]\n"
0228 " cmpeq %[old],%[u],%[c]\n"
0229 " addl %[old],%[a],%[new]\n"
0230 " bne %[c],2f\n"
0231 " stl_c %[new],%[mem]\n"
0232 " beq %[new],3f\n"
0233 "2:\n"
0234 ".subsection 2\n"
0235 "3: br 1b\n"
0236 ".previous"
0237 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
0238 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
0239 : "memory");
0240 smp_mb();
0241 return old;
0242 }
0243 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
0255 {
0256 s64 c, new, old;
0257 smp_mb();
0258 __asm__ __volatile__(
0259 "1: ldq_l %[old],%[mem]\n"
0260 " cmpeq %[old],%[u],%[c]\n"
0261 " addq %[old],%[a],%[new]\n"
0262 " bne %[c],2f\n"
0263 " stq_c %[new],%[mem]\n"
0264 " beq %[new],3f\n"
0265 "2:\n"
0266 ".subsection 2\n"
0267 "3: br 1b\n"
0268 ".previous"
0269 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
0270 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
0271 : "memory");
0272 smp_mb();
0273 return old;
0274 }
0275 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
0276
0277
0278
0279
0280
0281
0282
0283
0284 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
0285 {
0286 s64 old, tmp;
0287 smp_mb();
0288 __asm__ __volatile__(
0289 "1: ldq_l %[old],%[mem]\n"
0290 " subq %[old],1,%[tmp]\n"
0291 " ble %[old],2f\n"
0292 " stq_c %[tmp],%[mem]\n"
0293 " beq %[tmp],3f\n"
0294 "2:\n"
0295 ".subsection 2\n"
0296 "3: br 1b\n"
0297 ".previous"
0298 : [old] "=&r"(old), [tmp] "=&r"(tmp)
0299 : [mem] "m"(*v)
0300 : "memory");
0301 smp_mb();
0302 return old - 1;
0303 }
0304 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
0305
0306 #endif