0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #ifndef _ASM_ATOMIC_H
0015 #define _ASM_ATOMIC_H
0016
0017 #include <linux/irqflags.h>
0018 #include <linux/types.h>
0019 #include <asm/asm.h>
0020 #include <asm/barrier.h>
0021 #include <asm/compiler.h>
0022 #include <asm/cpu-features.h>
0023 #include <asm/cmpxchg.h>
0024 #include <asm/sync.h>
0025
0026 #define ATOMIC_OPS(pfx, type) \
0027 static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
0028 { \
0029 return READ_ONCE(v->counter); \
0030 } \
0031 \
0032 static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
0033 { \
0034 WRITE_ONCE(v->counter, i); \
0035 } \
0036 \
0037 static __always_inline type \
0038 arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
0039 { \
0040 return arch_cmpxchg(&v->counter, o, n); \
0041 } \
0042 \
0043 static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
0044 { \
0045 return arch_xchg(&v->counter, n); \
0046 }
0047
0048 ATOMIC_OPS(atomic, int)
0049
0050 #ifdef CONFIG_64BIT
0051 # define ATOMIC64_INIT(i) { (i) }
0052 ATOMIC_OPS(atomic64, s64)
0053 #endif
0054
0055 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
0056 static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
0057 { \
0058 type temp; \
0059 \
0060 if (!kernel_uses_llsc) { \
0061 unsigned long flags; \
0062 \
0063 raw_local_irq_save(flags); \
0064 v->counter c_op i; \
0065 raw_local_irq_restore(flags); \
0066 return; \
0067 } \
0068 \
0069 __asm__ __volatile__( \
0070 " .set push \n" \
0071 " .set " MIPS_ISA_LEVEL " \n" \
0072 " " __SYNC(full, loongson3_war) " \n" \
0073 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
0074 " " #asm_op " %0, %2 \n" \
0075 " " #sc " %0, %1 \n" \
0076 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
0077 " .set pop \n" \
0078 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
0079 : "Ir" (i) : __LLSC_CLOBBER); \
0080 }
0081
0082 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
0083 static __inline__ type \
0084 arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
0085 { \
0086 type temp, result; \
0087 \
0088 if (!kernel_uses_llsc) { \
0089 unsigned long flags; \
0090 \
0091 raw_local_irq_save(flags); \
0092 result = v->counter; \
0093 result c_op i; \
0094 v->counter = result; \
0095 raw_local_irq_restore(flags); \
0096 return result; \
0097 } \
0098 \
0099 __asm__ __volatile__( \
0100 " .set push \n" \
0101 " .set " MIPS_ISA_LEVEL " \n" \
0102 " " __SYNC(full, loongson3_war) " \n" \
0103 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
0104 " " #asm_op " %0, %1, %3 \n" \
0105 " " #sc " %0, %2 \n" \
0106 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
0107 " " #asm_op " %0, %1, %3 \n" \
0108 " .set pop \n" \
0109 : "=&r" (result), "=&r" (temp), \
0110 "+" GCC_OFF_SMALL_ASM() (v->counter) \
0111 : "Ir" (i) : __LLSC_CLOBBER); \
0112 \
0113 return result; \
0114 }
0115
0116 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
0117 static __inline__ type \
0118 arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
0119 { \
0120 int temp, result; \
0121 \
0122 if (!kernel_uses_llsc) { \
0123 unsigned long flags; \
0124 \
0125 raw_local_irq_save(flags); \
0126 result = v->counter; \
0127 v->counter c_op i; \
0128 raw_local_irq_restore(flags); \
0129 return result; \
0130 } \
0131 \
0132 __asm__ __volatile__( \
0133 " .set push \n" \
0134 " .set " MIPS_ISA_LEVEL " \n" \
0135 " " __SYNC(full, loongson3_war) " \n" \
0136 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
0137 " " #asm_op " %0, %1, %3 \n" \
0138 " " #sc " %0, %2 \n" \
0139 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
0140 " .set pop \n" \
0141 " move %0, %1 \n" \
0142 : "=&r" (result), "=&r" (temp), \
0143 "+" GCC_OFF_SMALL_ASM() (v->counter) \
0144 : "Ir" (i) : __LLSC_CLOBBER); \
0145 \
0146 return result; \
0147 }
0148
0149 #undef ATOMIC_OPS
0150 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
0151 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
0152 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
0153 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
0154
0155 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
0156 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
0157
0158 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
0159 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
0160 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
0161 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
0162
0163 #ifdef CONFIG_64BIT
0164 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
0165 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
0166 # define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
0167 # define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
0168 # define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
0169 # define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
0170 #endif
0171
0172 #undef ATOMIC_OPS
0173 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
0174 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
0175 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
0176
0177 ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
0178 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
0179 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
0180
0181 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
0182 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
0183 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
0184
0185 #ifdef CONFIG_64BIT
0186 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
0187 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
0188 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
0189 # define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
0190 # define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
0191 # define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
0192 #endif
0193
0194 #undef ATOMIC_OPS
0195 #undef ATOMIC_FETCH_OP
0196 #undef ATOMIC_OP_RETURN
0197 #undef ATOMIC_OP
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
0208 static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
0209 { \
0210 type temp, result; \
0211 \
0212 smp_mb__before_atomic(); \
0213 \
0214 if (!kernel_uses_llsc) { \
0215 unsigned long flags; \
0216 \
0217 raw_local_irq_save(flags); \
0218 result = v->counter; \
0219 result -= i; \
0220 if (result >= 0) \
0221 v->counter = result; \
0222 raw_local_irq_restore(flags); \
0223 smp_mb__after_atomic(); \
0224 return result; \
0225 } \
0226 \
0227 __asm__ __volatile__( \
0228 " .set push \n" \
0229 " .set " MIPS_ISA_LEVEL " \n" \
0230 " " __SYNC(full, loongson3_war) " \n" \
0231 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
0232 " .set pop \n" \
0233 " " #op " %0, %1, %3 \n" \
0234 " move %1, %0 \n" \
0235 " bltz %0, 2f \n" \
0236 " .set push \n" \
0237 " .set " MIPS_ISA_LEVEL " \n" \
0238 " " #sc " %1, %2 \n" \
0239 " " __stringify(SC_BEQZ) " %1, 1b \n" \
0240 "2: " __SYNC(full, loongson3_war) " \n" \
0241 " .set pop \n" \
0242 : "=&r" (result), "=&r" (temp), \
0243 "+" GCC_OFF_SMALL_ASM() (v->counter) \
0244 : "Ir" (i) \
0245 : __LLSC_CLOBBER); \
0246 \
0247
0248
0249
0250
0251
0252 \
0253 if (__SYNC_loongson3_war == 0) \
0254 smp_mb__after_atomic(); \
0255 \
0256 return result; \
0257 }
0258
0259 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
0260 #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
0261
0262 #ifdef CONFIG_64BIT
0263 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
0264 #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
0265 #endif
0266
0267 #undef ATOMIC_SIP_OP
0268
0269 #endif