Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_ATOMIC64_64_H
0003 #define _ASM_X86_ATOMIC64_64_H
0004 
0005 #include <linux/types.h>
0006 #include <asm/alternative.h>
0007 #include <asm/cmpxchg.h>
0008 
0009 /* The 64-bit atomic type */
0010 
0011 #define ATOMIC64_INIT(i)    { (i) }
0012 
0013 /**
0014  * arch_atomic64_read - read atomic64 variable
0015  * @v: pointer of type atomic64_t
0016  *
0017  * Atomically reads the value of @v.
0018  * Doesn't imply a read memory barrier.
0019  */
0020 static inline s64 arch_atomic64_read(const atomic64_t *v)
0021 {
0022     return __READ_ONCE((v)->counter);
0023 }
0024 
0025 /**
0026  * arch_atomic64_set - set atomic64 variable
0027  * @v: pointer to type atomic64_t
0028  * @i: required value
0029  *
0030  * Atomically sets the value of @v to @i.
0031  */
0032 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
0033 {
0034     __WRITE_ONCE(v->counter, i);
0035 }
0036 
0037 /**
0038  * arch_atomic64_add - add integer to atomic64 variable
0039  * @i: integer value to add
0040  * @v: pointer to type atomic64_t
0041  *
0042  * Atomically adds @i to @v.
0043  */
0044 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
0045 {
0046     asm volatile(LOCK_PREFIX "addq %1,%0"
0047              : "=m" (v->counter)
0048              : "er" (i), "m" (v->counter) : "memory");
0049 }
0050 
0051 /**
0052  * arch_atomic64_sub - subtract the atomic64 variable
0053  * @i: integer value to subtract
0054  * @v: pointer to type atomic64_t
0055  *
0056  * Atomically subtracts @i from @v.
0057  */
0058 static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
0059 {
0060     asm volatile(LOCK_PREFIX "subq %1,%0"
0061              : "=m" (v->counter)
0062              : "er" (i), "m" (v->counter) : "memory");
0063 }
0064 
0065 /**
0066  * arch_atomic64_sub_and_test - subtract value from variable and test result
0067  * @i: integer value to subtract
0068  * @v: pointer to type atomic64_t
0069  *
0070  * Atomically subtracts @i from @v and returns
0071  * true if the result is zero, or false for all
0072  * other cases.
0073  */
0074 static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
0075 {
0076     return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
0077 }
0078 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
0079 
0080 /**
0081  * arch_atomic64_inc - increment atomic64 variable
0082  * @v: pointer to type atomic64_t
0083  *
0084  * Atomically increments @v by 1.
0085  */
0086 static __always_inline void arch_atomic64_inc(atomic64_t *v)
0087 {
0088     asm volatile(LOCK_PREFIX "incq %0"
0089              : "=m" (v->counter)
0090              : "m" (v->counter) : "memory");
0091 }
0092 #define arch_atomic64_inc arch_atomic64_inc
0093 
0094 /**
0095  * arch_atomic64_dec - decrement atomic64 variable
0096  * @v: pointer to type atomic64_t
0097  *
0098  * Atomically decrements @v by 1.
0099  */
0100 static __always_inline void arch_atomic64_dec(atomic64_t *v)
0101 {
0102     asm volatile(LOCK_PREFIX "decq %0"
0103              : "=m" (v->counter)
0104              : "m" (v->counter) : "memory");
0105 }
0106 #define arch_atomic64_dec arch_atomic64_dec
0107 
0108 /**
0109  * arch_atomic64_dec_and_test - decrement and test
0110  * @v: pointer to type atomic64_t
0111  *
0112  * Atomically decrements @v by 1 and
0113  * returns true if the result is 0, or false for all other
0114  * cases.
0115  */
0116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
0117 {
0118     return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
0119 }
0120 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
0121 
0122 /**
0123  * arch_atomic64_inc_and_test - increment and test
0124  * @v: pointer to type atomic64_t
0125  *
0126  * Atomically increments @v by 1
0127  * and returns true if the result is zero, or false for all
0128  * other cases.
0129  */
0130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
0131 {
0132     return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
0133 }
0134 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
0135 
0136 /**
0137  * arch_atomic64_add_negative - add and test if negative
0138  * @i: integer value to add
0139  * @v: pointer to type atomic64_t
0140  *
0141  * Atomically adds @i to @v and returns true
0142  * if the result is negative, or false when
0143  * result is greater than or equal to zero.
0144  */
0145 static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
0146 {
0147     return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
0148 }
0149 #define arch_atomic64_add_negative arch_atomic64_add_negative
0150 
0151 /**
0152  * arch_atomic64_add_return - add and return
0153  * @i: integer value to add
0154  * @v: pointer to type atomic64_t
0155  *
0156  * Atomically adds @i to @v and returns @i + @v
0157  */
0158 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
0159 {
0160     return i + xadd(&v->counter, i);
0161 }
0162 #define arch_atomic64_add_return arch_atomic64_add_return
0163 
0164 static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
0165 {
0166     return arch_atomic64_add_return(-i, v);
0167 }
0168 #define arch_atomic64_sub_return arch_atomic64_sub_return
0169 
0170 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
0171 {
0172     return xadd(&v->counter, i);
0173 }
0174 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
0175 
0176 static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
0177 {
0178     return xadd(&v->counter, -i);
0179 }
0180 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
0181 
0182 static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
0183 {
0184     return arch_cmpxchg(&v->counter, old, new);
0185 }
0186 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
0187 
0188 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
0189 {
0190     return arch_try_cmpxchg(&v->counter, old, new);
0191 }
0192 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
0193 
0194 static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
0195 {
0196     return arch_xchg(&v->counter, new);
0197 }
0198 #define arch_atomic64_xchg arch_atomic64_xchg
0199 
0200 static inline void arch_atomic64_and(s64 i, atomic64_t *v)
0201 {
0202     asm volatile(LOCK_PREFIX "andq %1,%0"
0203             : "+m" (v->counter)
0204             : "er" (i)
0205             : "memory");
0206 }
0207 
0208 static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
0209 {
0210     s64 val = arch_atomic64_read(v);
0211 
0212     do {
0213     } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
0214     return val;
0215 }
0216 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
0217 
0218 static inline void arch_atomic64_or(s64 i, atomic64_t *v)
0219 {
0220     asm volatile(LOCK_PREFIX "orq %1,%0"
0221             : "+m" (v->counter)
0222             : "er" (i)
0223             : "memory");
0224 }
0225 
0226 static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
0227 {
0228     s64 val = arch_atomic64_read(v);
0229 
0230     do {
0231     } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
0232     return val;
0233 }
0234 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
0235 
0236 static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
0237 {
0238     asm volatile(LOCK_PREFIX "xorq %1,%0"
0239             : "+m" (v->counter)
0240             : "er" (i)
0241             : "memory");
0242 }
0243 
0244 static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
0245 {
0246     s64 val = arch_atomic64_read(v);
0247 
0248     do {
0249     } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
0250     return val;
0251 }
0252 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
0253 
0254 #endif /* _ASM_X86_ATOMIC64_64_H */