0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __ARCH_S390_ATOMIC_OPS__
0009 #define __ARCH_S390_ATOMIC_OPS__
0010
0011 static inline int __atomic_read(const atomic_t *v)
0012 {
0013 int c;
0014
0015 asm volatile(
0016 " l %0,%1\n"
0017 : "=d" (c) : "R" (v->counter));
0018 return c;
0019 }
0020
0021 static inline void __atomic_set(atomic_t *v, int i)
0022 {
0023 asm volatile(
0024 " st %1,%0\n"
0025 : "=R" (v->counter) : "d" (i));
0026 }
0027
0028 static inline s64 __atomic64_read(const atomic64_t *v)
0029 {
0030 s64 c;
0031
0032 asm volatile(
0033 " lg %0,%1\n"
0034 : "=d" (c) : "RT" (v->counter));
0035 return c;
0036 }
0037
0038 static inline void __atomic64_set(atomic64_t *v, s64 i)
0039 {
0040 asm volatile(
0041 " stg %1,%0\n"
0042 : "=RT" (v->counter) : "d" (i));
0043 }
0044
0045 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
0046
0047 #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
0048 static inline op_type op_name(op_type val, op_type *ptr) \
0049 { \
0050 op_type old; \
0051 \
0052 asm volatile( \
0053 op_string " %[old],%[val],%[ptr]\n" \
0054 op_barrier \
0055 : [old] "=d" (old), [ptr] "+QS" (*ptr) \
0056 : [val] "d" (val) : "cc", "memory"); \
0057 return old; \
0058 } \
0059
0060 #define __ATOMIC_OPS(op_name, op_type, op_string) \
0061 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
0062 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
0063
0064 __ATOMIC_OPS(__atomic_add, int, "laa")
0065 __ATOMIC_OPS(__atomic_and, int, "lan")
0066 __ATOMIC_OPS(__atomic_or, int, "lao")
0067 __ATOMIC_OPS(__atomic_xor, int, "lax")
0068
0069 __ATOMIC_OPS(__atomic64_add, long, "laag")
0070 __ATOMIC_OPS(__atomic64_and, long, "lang")
0071 __ATOMIC_OPS(__atomic64_or, long, "laog")
0072 __ATOMIC_OPS(__atomic64_xor, long, "laxg")
0073
0074 #undef __ATOMIC_OPS
0075 #undef __ATOMIC_OP
0076
0077 #define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
0078 static __always_inline void op_name(op_type val, op_type *ptr) \
0079 { \
0080 asm volatile( \
0081 op_string " %[ptr],%[val]\n" \
0082 op_barrier \
0083 : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
0084 }
0085
0086 #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
0087 __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
0088 __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
0089
0090 __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
0091 __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
0092
0093 #undef __ATOMIC_CONST_OPS
0094 #undef __ATOMIC_CONST_OP
0095
0096 #else
0097
0098 #define __ATOMIC_OP(op_name, op_string) \
0099 static inline int op_name(int val, int *ptr) \
0100 { \
0101 int old, new; \
0102 \
0103 asm volatile( \
0104 "0: lr %[new],%[old]\n" \
0105 op_string " %[new],%[val]\n" \
0106 " cs %[old],%[new],%[ptr]\n" \
0107 " jl 0b" \
0108 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
0109 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
0110 return old; \
0111 }
0112
0113 #define __ATOMIC_OPS(op_name, op_string) \
0114 __ATOMIC_OP(op_name, op_string) \
0115 __ATOMIC_OP(op_name##_barrier, op_string)
0116
0117 __ATOMIC_OPS(__atomic_add, "ar")
0118 __ATOMIC_OPS(__atomic_and, "nr")
0119 __ATOMIC_OPS(__atomic_or, "or")
0120 __ATOMIC_OPS(__atomic_xor, "xr")
0121
0122 #undef __ATOMIC_OPS
0123
0124 #define __ATOMIC64_OP(op_name, op_string) \
0125 static inline long op_name(long val, long *ptr) \
0126 { \
0127 long old, new; \
0128 \
0129 asm volatile( \
0130 "0: lgr %[new],%[old]\n" \
0131 op_string " %[new],%[val]\n" \
0132 " csg %[old],%[new],%[ptr]\n" \
0133 " jl 0b" \
0134 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\
0135 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
0136 return old; \
0137 }
0138
0139 #define __ATOMIC64_OPS(op_name, op_string) \
0140 __ATOMIC64_OP(op_name, op_string) \
0141 __ATOMIC64_OP(op_name##_barrier, op_string)
0142
0143 __ATOMIC64_OPS(__atomic64_add, "agr")
0144 __ATOMIC64_OPS(__atomic64_and, "ngr")
0145 __ATOMIC64_OPS(__atomic64_or, "ogr")
0146 __ATOMIC64_OPS(__atomic64_xor, "xgr")
0147
0148 #undef __ATOMIC64_OPS
0149
0150 #define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
0151 #define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
0152 #define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
0153 #define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
0154
0155 #endif
0156
0157 static inline int __atomic_cmpxchg(int *ptr, int old, int new)
0158 {
0159 asm volatile(
0160 " cs %[old],%[new],%[ptr]"
0161 : [old] "+d" (old), [ptr] "+Q" (*ptr)
0162 : [new] "d" (new)
0163 : "cc", "memory");
0164 return old;
0165 }
0166
0167 static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
0168 {
0169 int old_expected = old;
0170
0171 asm volatile(
0172 " cs %[old],%[new],%[ptr]"
0173 : [old] "+d" (old), [ptr] "+Q" (*ptr)
0174 : [new] "d" (new)
0175 : "cc", "memory");
0176 return old == old_expected;
0177 }
0178
0179 static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
0180 {
0181 asm volatile(
0182 " csg %[old],%[new],%[ptr]"
0183 : [old] "+d" (old), [ptr] "+QS" (*ptr)
0184 : [new] "d" (new)
0185 : "cc", "memory");
0186 return old;
0187 }
0188
0189 static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
0190 {
0191 long old_expected = old;
0192
0193 asm volatile(
0194 " csg %[old],%[new],%[ptr]"
0195 : [old] "+d" (old), [ptr] "+QS" (*ptr)
0196 : [new] "d" (new)
0197 : "cc", "memory");
0198 return old == old_expected;
0199 }
0200
0201 #endif