Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/include/asm/atomic.h
0004  *
0005  * Copyright (C) 1996 Russell King.
0006  * Copyright (C) 2002 Deep Blue Solutions Ltd.
0007  * Copyright (C) 2012 ARM Ltd.
0008  */
0009 
0010 #ifndef __ASM_ATOMIC_LSE_H
0011 #define __ASM_ATOMIC_LSE_H
0012 
0013 #define ATOMIC_OP(op, asm_op)                       \
0014 static inline void __lse_atomic_##op(int i, atomic_t *v)        \
0015 {                                   \
0016     asm volatile(                           \
0017     __LSE_PREAMBLE                          \
0018     "   " #asm_op " %w[i], %[v]\n"              \
0019     : [v] "+Q" (v->counter)                     \
0020     : [i] "r" (i));                         \
0021 }
0022 
0023 ATOMIC_OP(andnot, stclr)
0024 ATOMIC_OP(or, stset)
0025 ATOMIC_OP(xor, steor)
0026 ATOMIC_OP(add, stadd)
0027 
0028 static inline void __lse_atomic_sub(int i, atomic_t *v)
0029 {
0030     __lse_atomic_add(-i, v);
0031 }
0032 
0033 #undef ATOMIC_OP
0034 
0035 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)            \
0036 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
0037 {                                   \
0038     int old;                            \
0039                                     \
0040     asm volatile(                           \
0041     __LSE_PREAMBLE                          \
0042     "   " #asm_op #mb " %w[i], %w[old], %[v]"           \
0043     : [v] "+Q" (v->counter),                    \
0044       [old] "=r" (old)                      \
0045     : [i] "r" (i)                           \
0046     : cl);                              \
0047                                     \
0048     return old;                         \
0049 }
0050 
0051 #define ATOMIC_FETCH_OPS(op, asm_op)                    \
0052     ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)           \
0053     ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")     \
0054     ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")     \
0055     ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
0056 
0057 ATOMIC_FETCH_OPS(andnot, ldclr)
0058 ATOMIC_FETCH_OPS(or, ldset)
0059 ATOMIC_FETCH_OPS(xor, ldeor)
0060 ATOMIC_FETCH_OPS(add, ldadd)
0061 
0062 #undef ATOMIC_FETCH_OP
0063 #undef ATOMIC_FETCH_OPS
0064 
0065 #define ATOMIC_FETCH_OP_SUB(name)                   \
0066 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)  \
0067 {                                   \
0068     return __lse_atomic_fetch_add##name(-i, v);         \
0069 }
0070 
0071 ATOMIC_FETCH_OP_SUB(_relaxed)
0072 ATOMIC_FETCH_OP_SUB(_acquire)
0073 ATOMIC_FETCH_OP_SUB(_release)
0074 ATOMIC_FETCH_OP_SUB(        )
0075 
0076 #undef ATOMIC_FETCH_OP_SUB
0077 
0078 #define ATOMIC_OP_ADD_SUB_RETURN(name)                  \
0079 static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
0080 {                                   \
0081     return __lse_atomic_fetch_add##name(i, v) + i;          \
0082 }                                   \
0083                                     \
0084 static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
0085 {                                   \
0086     return __lse_atomic_fetch_sub(i, v) - i;            \
0087 }
0088 
0089 ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
0090 ATOMIC_OP_ADD_SUB_RETURN(_acquire)
0091 ATOMIC_OP_ADD_SUB_RETURN(_release)
0092 ATOMIC_OP_ADD_SUB_RETURN(        )
0093 
0094 #undef ATOMIC_OP_ADD_SUB_RETURN
0095 
0096 static inline void __lse_atomic_and(int i, atomic_t *v)
0097 {
0098     return __lse_atomic_andnot(~i, v);
0099 }
0100 
0101 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)                \
0102 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)  \
0103 {                                   \
0104     return __lse_atomic_fetch_andnot##name(~i, v);          \
0105 }
0106 
0107 ATOMIC_FETCH_OP_AND(_relaxed,   )
0108 ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
0109 ATOMIC_FETCH_OP_AND(_release,  l, "memory")
0110 ATOMIC_FETCH_OP_AND(        , al, "memory")
0111 
0112 #undef ATOMIC_FETCH_OP_AND
0113 
0114 #define ATOMIC64_OP(op, asm_op)                     \
0115 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)        \
0116 {                                   \
0117     asm volatile(                           \
0118     __LSE_PREAMBLE                          \
0119     "   " #asm_op " %[i], %[v]\n"               \
0120     : [v] "+Q" (v->counter)                     \
0121     : [i] "r" (i));                         \
0122 }
0123 
0124 ATOMIC64_OP(andnot, stclr)
0125 ATOMIC64_OP(or, stset)
0126 ATOMIC64_OP(xor, steor)
0127 ATOMIC64_OP(add, stadd)
0128 
0129 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
0130 {
0131     __lse_atomic64_add(-i, v);
0132 }
0133 
0134 #undef ATOMIC64_OP
0135 
0136 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)          \
0137 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
0138 {                                   \
0139     s64 old;                            \
0140                                     \
0141     asm volatile(                           \
0142     __LSE_PREAMBLE                          \
0143     "   " #asm_op #mb " %[i], %[old], %[v]"         \
0144     : [v] "+Q" (v->counter),                    \
0145       [old] "=r" (old)                      \
0146     : [i] "r" (i)                           \
0147     : cl);                              \
0148                                     \
0149     return old;                         \
0150 }
0151 
0152 #define ATOMIC64_FETCH_OPS(op, asm_op)                  \
0153     ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)         \
0154     ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")       \
0155     ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")       \
0156     ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
0157 
0158 ATOMIC64_FETCH_OPS(andnot, ldclr)
0159 ATOMIC64_FETCH_OPS(or, ldset)
0160 ATOMIC64_FETCH_OPS(xor, ldeor)
0161 ATOMIC64_FETCH_OPS(add, ldadd)
0162 
0163 #undef ATOMIC64_FETCH_OP
0164 #undef ATOMIC64_FETCH_OPS
0165 
0166 #define ATOMIC64_FETCH_OP_SUB(name)                 \
0167 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
0168 {                                   \
0169     return __lse_atomic64_fetch_add##name(-i, v);           \
0170 }
0171 
0172 ATOMIC64_FETCH_OP_SUB(_relaxed)
0173 ATOMIC64_FETCH_OP_SUB(_acquire)
0174 ATOMIC64_FETCH_OP_SUB(_release)
0175 ATOMIC64_FETCH_OP_SUB(        )
0176 
0177 #undef ATOMIC64_FETCH_OP_SUB
0178 
0179 #define ATOMIC64_OP_ADD_SUB_RETURN(name)                \
0180 static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
0181 {                                   \
0182     return __lse_atomic64_fetch_add##name(i, v) + i;        \
0183 }                                   \
0184                                     \
0185 static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
0186 {                                   \
0187     return __lse_atomic64_fetch_sub##name(i, v) - i;        \
0188 }
0189 
0190 ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
0191 ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
0192 ATOMIC64_OP_ADD_SUB_RETURN(_release)
0193 ATOMIC64_OP_ADD_SUB_RETURN(        )
0194 
0195 #undef ATOMIC64_OP_ADD_SUB_RETURN
0196 
0197 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
0198 {
0199     return __lse_atomic64_andnot(~i, v);
0200 }
0201 
0202 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)              \
0203 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
0204 {                                   \
0205     return __lse_atomic64_fetch_andnot##name(~i, v);        \
0206 }
0207 
0208 ATOMIC64_FETCH_OP_AND(_relaxed,   )
0209 ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
0210 ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
0211 ATOMIC64_FETCH_OP_AND(        , al, "memory")
0212 
0213 #undef ATOMIC64_FETCH_OP_AND
0214 
0215 static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
0216 {
0217     unsigned long tmp;
0218 
0219     asm volatile(
0220     __LSE_PREAMBLE
0221     "1: ldr %x[tmp], %[v]\n"
0222     "   subs    %[ret], %x[tmp], #1\n"
0223     "   b.lt    2f\n"
0224     "   casal   %x[tmp], %[ret], %[v]\n"
0225     "   sub %x[tmp], %x[tmp], #1\n"
0226     "   sub %x[tmp], %x[tmp], %[ret]\n"
0227     "   cbnz    %x[tmp], 1b\n"
0228     "2:"
0229     : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
0230     :
0231     : "cc", "memory");
0232 
0233     return (long)v;
0234 }
0235 
0236 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)         \
0237 static __always_inline u##sz                        \
0238 __lse__cmpxchg_case_##name##sz(volatile void *ptr,          \
0239                           u##sz old,        \
0240                           u##sz new)        \
0241 {                                   \
0242     register unsigned long x0 asm ("x0") = (unsigned long)ptr;  \
0243     register u##sz x1 asm ("x1") = old;             \
0244     register u##sz x2 asm ("x2") = new;             \
0245     unsigned long tmp;                      \
0246                                     \
0247     asm volatile(                           \
0248     __LSE_PREAMBLE                          \
0249     "   mov %" #w "[tmp], %" #w "[old]\n"           \
0250     "   cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"    \
0251     "   mov %" #w "[ret], %" #w "[tmp]"         \
0252     : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr),            \
0253       [tmp] "=&r" (tmp)                     \
0254     : [old] "r" (x1), [new] "r" (x2)                \
0255     : cl);                              \
0256                                     \
0257     return x0;                          \
0258 }
0259 
0260 __CMPXCHG_CASE(w, b,     ,  8,   )
0261 __CMPXCHG_CASE(w, h,     , 16,   )
0262 __CMPXCHG_CASE(w,  ,     , 32,   )
0263 __CMPXCHG_CASE(x,  ,     , 64,   )
0264 __CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
0265 __CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
0266 __CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
0267 __CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
0268 __CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
0269 __CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
0270 __CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
0271 __CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
0272 __CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
0273 __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
0274 __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
0275 __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
0276 
0277 #undef __CMPXCHG_CASE
0278 
0279 #define __CMPXCHG_DBL(name, mb, cl...)                  \
0280 static __always_inline long                     \
0281 __lse__cmpxchg_double##name(unsigned long old1,             \
0282                      unsigned long old2,        \
0283                      unsigned long new1,        \
0284                      unsigned long new2,        \
0285                      volatile void *ptr)        \
0286 {                                   \
0287     unsigned long oldval1 = old1;                   \
0288     unsigned long oldval2 = old2;                   \
0289     register unsigned long x0 asm ("x0") = old1;            \
0290     register unsigned long x1 asm ("x1") = old2;            \
0291     register unsigned long x2 asm ("x2") = new1;            \
0292     register unsigned long x3 asm ("x3") = new2;            \
0293     register unsigned long x4 asm ("x4") = (unsigned long)ptr;  \
0294                                     \
0295     asm volatile(                           \
0296     __LSE_PREAMBLE                          \
0297     "   casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
0298     "   eor %[old1], %[old1], %[oldval1]\n"         \
0299     "   eor %[old2], %[old2], %[oldval2]\n"         \
0300     "   orr %[old1], %[old1], %[old2]"          \
0301     : [old1] "+&r" (x0), [old2] "+&r" (x1),             \
0302       [v] "+Q" (*(unsigned long *)ptr)              \
0303     : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),     \
0304       [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)      \
0305     : cl);                              \
0306                                     \
0307     return x0;                          \
0308 }
0309 
0310 __CMPXCHG_DBL(   ,   )
0311 __CMPXCHG_DBL(_mb, al, "memory")
0312 
0313 #undef __CMPXCHG_DBL
0314 
0315 #endif  /* __ASM_ATOMIC_LSE_H */