Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 
0003 #ifndef __ASM_CSKY_ATOMIC_H
0004 #define __ASM_CSKY_ATOMIC_H
0005 
0006 #ifdef CONFIG_SMP
0007 #include <asm-generic/atomic64.h>
0008 
0009 #include <asm/cmpxchg.h>
0010 #include <asm/barrier.h>
0011 
0012 #define __atomic_acquire_fence()    __bar_brarw()
0013 
0014 #define __atomic_release_fence()    __bar_brwaw()
0015 
0016 static __always_inline int arch_atomic_read(const atomic_t *v)
0017 {
0018     return READ_ONCE(v->counter);
0019 }
0020 static __always_inline void arch_atomic_set(atomic_t *v, int i)
0021 {
0022     WRITE_ONCE(v->counter, i);
0023 }
0024 
0025 #define ATOMIC_OP(op)                           \
0026 static __always_inline                          \
0027 void arch_atomic_##op(int i, atomic_t *v)               \
0028 {                                   \
0029     unsigned long tmp;                      \
0030     __asm__ __volatile__ (                      \
0031     "1: ldex.w      %0, (%2)    \n"         \
0032     "   " #op "     %0, %1      \n"         \
0033     "   stex.w      %0, (%2)    \n"         \
0034     "   bez     %0, 1b      \n"         \
0035     : "=&r" (tmp)                           \
0036     : "r" (i), "r" (&v->counter)                    \
0037     : "memory");                            \
0038 }
0039 
0040 ATOMIC_OP(add)
0041 ATOMIC_OP(sub)
0042 ATOMIC_OP(and)
0043 ATOMIC_OP( or)
0044 ATOMIC_OP(xor)
0045 
0046 #undef ATOMIC_OP
0047 
0048 #define ATOMIC_FETCH_OP(op)                     \
0049 static __always_inline                          \
0050 int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)        \
0051 {                                   \
0052     register int ret, tmp;                      \
0053     __asm__ __volatile__ (                      \
0054     "1: ldex.w      %0, (%3) \n"                \
0055     "   mov     %1, %0   \n"                \
0056     "   " #op "     %0, %2   \n"                \
0057     "   stex.w      %0, (%3) \n"                \
0058     "   bez     %0, 1b   \n"                \
0059         : "=&r" (tmp), "=&r" (ret)              \
0060         : "r" (i), "r"(&v->counter)                 \
0061         : "memory");                        \
0062     return ret;                         \
0063 }
0064 
0065 #define ATOMIC_OP_RETURN(op, c_op)                  \
0066 static __always_inline                          \
0067 int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)       \
0068 {                                   \
0069     return arch_atomic_fetch_##op##_relaxed(i, v) c_op i;       \
0070 }
0071 
0072 #define ATOMIC_OPS(op, c_op)                        \
0073     ATOMIC_FETCH_OP(op)                     \
0074     ATOMIC_OP_RETURN(op, c_op)
0075 
0076 ATOMIC_OPS(add, +)
0077 ATOMIC_OPS(sub, -)
0078 
0079 #define arch_atomic_fetch_add_relaxed   arch_atomic_fetch_add_relaxed
0080 #define arch_atomic_fetch_sub_relaxed   arch_atomic_fetch_sub_relaxed
0081 
0082 #define arch_atomic_add_return_relaxed  arch_atomic_add_return_relaxed
0083 #define arch_atomic_sub_return_relaxed  arch_atomic_sub_return_relaxed
0084 
0085 #undef ATOMIC_OPS
0086 #undef ATOMIC_OP_RETURN
0087 
0088 #define ATOMIC_OPS(op)                          \
0089     ATOMIC_FETCH_OP(op)
0090 
0091 ATOMIC_OPS(and)
0092 ATOMIC_OPS( or)
0093 ATOMIC_OPS(xor)
0094 
0095 #define arch_atomic_fetch_and_relaxed   arch_atomic_fetch_and_relaxed
0096 #define arch_atomic_fetch_or_relaxed    arch_atomic_fetch_or_relaxed
0097 #define arch_atomic_fetch_xor_relaxed   arch_atomic_fetch_xor_relaxed
0098 
0099 #undef ATOMIC_OPS
0100 
0101 #undef ATOMIC_FETCH_OP
0102 
0103 static __always_inline int
0104 arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
0105 {
0106     int prev, tmp;
0107 
0108     __asm__ __volatile__ (
0109         RELEASE_FENCE
0110         "1: ldex.w      %0, (%3)    \n"
0111         "   cmpne       %0, %4      \n"
0112         "   bf      2f      \n"
0113         "   mov     %1, %0      \n"
0114         "   add     %1, %2      \n"
0115         "   stex.w      %1, (%3)    \n"
0116         "   bez     %1, 1b      \n"
0117         FULL_FENCE
0118         "2:\n"
0119         : "=&r" (prev), "=&r" (tmp)
0120         : "r" (a), "r" (&v->counter), "r" (u)
0121         : "memory");
0122 
0123     return prev;
0124 }
0125 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
0126 
0127 static __always_inline bool
0128 arch_atomic_inc_unless_negative(atomic_t *v)
0129 {
0130     int rc, tmp;
0131 
0132     __asm__ __volatile__ (
0133         RELEASE_FENCE
0134         "1: ldex.w      %0, (%2)    \n"
0135         "   movi        %1, 0       \n"
0136         "   blz     %0, 2f      \n"
0137         "   movi        %1, 1       \n"
0138         "   addi        %0, 1       \n"
0139         "   stex.w      %0, (%2)    \n"
0140         "   bez     %0, 1b      \n"
0141         FULL_FENCE
0142         "2:\n"
0143         : "=&r" (tmp), "=&r" (rc)
0144         : "r" (&v->counter)
0145         : "memory");
0146 
0147     return tmp ? true : false;
0148 
0149 }
0150 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
0151 
0152 static __always_inline bool
0153 arch_atomic_dec_unless_positive(atomic_t *v)
0154 {
0155     int rc, tmp;
0156 
0157     __asm__ __volatile__ (
0158         RELEASE_FENCE
0159         "1: ldex.w      %0, (%2)    \n"
0160         "   movi        %1, 0       \n"
0161         "   bhz     %0, 2f      \n"
0162         "   movi        %1, 1       \n"
0163         "   subi        %0, 1       \n"
0164         "   stex.w      %0, (%2)    \n"
0165         "   bez     %0, 1b      \n"
0166         FULL_FENCE
0167         "2:\n"
0168         : "=&r" (tmp), "=&r" (rc)
0169         : "r" (&v->counter)
0170         : "memory");
0171 
0172     return tmp ? true : false;
0173 }
0174 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
0175 
0176 static __always_inline int
0177 arch_atomic_dec_if_positive(atomic_t *v)
0178 {
0179     int dec, tmp;
0180 
0181     __asm__ __volatile__ (
0182         RELEASE_FENCE
0183         "1: ldex.w      %0, (%2)    \n"
0184         "   subi        %1, %0, 1   \n"
0185         "   blz     %1, 2f      \n"
0186         "   stex.w      %1, (%2)    \n"
0187         "   bez     %1, 1b      \n"
0188         FULL_FENCE
0189         "2:\n"
0190         : "=&r" (dec), "=&r" (tmp)
0191         : "r" (&v->counter)
0192         : "memory");
0193 
0194     return dec - 1;
0195 }
0196 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
0197 
0198 #define ATOMIC_OP()                         \
0199 static __always_inline                          \
0200 int arch_atomic_xchg_relaxed(atomic_t *v, int n)            \
0201 {                                   \
0202     return __xchg_relaxed(n, &(v->counter), 4);         \
0203 }                                   \
0204 static __always_inline                          \
0205 int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n)      \
0206 {                                   \
0207     return __cmpxchg_relaxed(&(v->counter), o, n, 4);       \
0208 }                                   \
0209 static __always_inline                          \
0210 int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n)      \
0211 {                                   \
0212     return __cmpxchg_acquire(&(v->counter), o, n, 4);       \
0213 }                                   \
0214 static __always_inline                          \
0215 int arch_atomic_cmpxchg(atomic_t *v, int o, int n)          \
0216 {                                   \
0217     return __cmpxchg(&(v->counter), o, n, 4);           \
0218 }
0219 
0220 #define ATOMIC_OPS()                            \
0221     ATOMIC_OP()
0222 
0223 ATOMIC_OPS()
0224 
0225 #define arch_atomic_xchg_relaxed    arch_atomic_xchg_relaxed
0226 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
0227 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
0228 #define arch_atomic_cmpxchg     arch_atomic_cmpxchg
0229 
0230 #undef ATOMIC_OPS
0231 #undef ATOMIC_OP
0232 
0233 #else
0234 #include <asm-generic/atomic.h>
0235 #endif
0236 
0237 #endif /* __ASM_CSKY_ATOMIC_H */