Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_ATOMIC_H
0003 #define _ASM_X86_ATOMIC_H
0004 
0005 #include <linux/compiler.h>
0006 #include <linux/types.h>
0007 #include <asm/alternative.h>
0008 #include <asm/cmpxchg.h>
0009 #include <asm/rmwcc.h>
0010 #include <asm/barrier.h>
0011 
0012 /*
0013  * Atomic operations that C can't guarantee us.  Useful for
0014  * resource counting etc..
0015  */
0016 
0017 /**
0018  * arch_atomic_read - read atomic variable
0019  * @v: pointer of type atomic_t
0020  *
0021  * Atomically reads the value of @v.
0022  */
0023 static __always_inline int arch_atomic_read(const atomic_t *v)
0024 {
0025     /*
0026      * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
0027      * it's non-inlined function that increases binary size and stack usage.
0028      */
0029     return __READ_ONCE((v)->counter);
0030 }
0031 
0032 /**
0033  * arch_atomic_set - set atomic variable
0034  * @v: pointer of type atomic_t
0035  * @i: required value
0036  *
0037  * Atomically sets the value of @v to @i.
0038  */
0039 static __always_inline void arch_atomic_set(atomic_t *v, int i)
0040 {
0041     __WRITE_ONCE(v->counter, i);
0042 }
0043 
0044 /**
0045  * arch_atomic_add - add integer to atomic variable
0046  * @i: integer value to add
0047  * @v: pointer of type atomic_t
0048  *
0049  * Atomically adds @i to @v.
0050  */
0051 static __always_inline void arch_atomic_add(int i, atomic_t *v)
0052 {
0053     asm volatile(LOCK_PREFIX "addl %1,%0"
0054              : "+m" (v->counter)
0055              : "ir" (i) : "memory");
0056 }
0057 
0058 /**
0059  * arch_atomic_sub - subtract integer from atomic variable
0060  * @i: integer value to subtract
0061  * @v: pointer of type atomic_t
0062  *
0063  * Atomically subtracts @i from @v.
0064  */
0065 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
0066 {
0067     asm volatile(LOCK_PREFIX "subl %1,%0"
0068              : "+m" (v->counter)
0069              : "ir" (i) : "memory");
0070 }
0071 
0072 /**
0073  * arch_atomic_sub_and_test - subtract value from variable and test result
0074  * @i: integer value to subtract
0075  * @v: pointer of type atomic_t
0076  *
0077  * Atomically subtracts @i from @v and returns
0078  * true if the result is zero, or false for all
0079  * other cases.
0080  */
0081 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
0082 {
0083     return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
0084 }
0085 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
0086 
0087 /**
0088  * arch_atomic_inc - increment atomic variable
0089  * @v: pointer of type atomic_t
0090  *
0091  * Atomically increments @v by 1.
0092  */
0093 static __always_inline void arch_atomic_inc(atomic_t *v)
0094 {
0095     asm volatile(LOCK_PREFIX "incl %0"
0096              : "+m" (v->counter) :: "memory");
0097 }
0098 #define arch_atomic_inc arch_atomic_inc
0099 
0100 /**
0101  * arch_atomic_dec - decrement atomic variable
0102  * @v: pointer of type atomic_t
0103  *
0104  * Atomically decrements @v by 1.
0105  */
0106 static __always_inline void arch_atomic_dec(atomic_t *v)
0107 {
0108     asm volatile(LOCK_PREFIX "decl %0"
0109              : "+m" (v->counter) :: "memory");
0110 }
0111 #define arch_atomic_dec arch_atomic_dec
0112 
0113 /**
0114  * arch_atomic_dec_and_test - decrement and test
0115  * @v: pointer of type atomic_t
0116  *
0117  * Atomically decrements @v by 1 and
0118  * returns true if the result is 0, or false for all other
0119  * cases.
0120  */
0121 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
0122 {
0123     return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
0124 }
0125 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
0126 
0127 /**
0128  * arch_atomic_inc_and_test - increment and test
0129  * @v: pointer of type atomic_t
0130  *
0131  * Atomically increments @v by 1
0132  * and returns true if the result is zero, or false for all
0133  * other cases.
0134  */
0135 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
0136 {
0137     return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
0138 }
0139 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
0140 
0141 /**
0142  * arch_atomic_add_negative - add and test if negative
0143  * @i: integer value to add
0144  * @v: pointer of type atomic_t
0145  *
0146  * Atomically adds @i to @v and returns true
0147  * if the result is negative, or false when
0148  * result is greater than or equal to zero.
0149  */
0150 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
0151 {
0152     return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
0153 }
0154 #define arch_atomic_add_negative arch_atomic_add_negative
0155 
0156 /**
0157  * arch_atomic_add_return - add integer and return
0158  * @i: integer value to add
0159  * @v: pointer of type atomic_t
0160  *
0161  * Atomically adds @i to @v and returns @i + @v
0162  */
0163 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
0164 {
0165     return i + xadd(&v->counter, i);
0166 }
0167 #define arch_atomic_add_return arch_atomic_add_return
0168 
0169 /**
0170  * arch_atomic_sub_return - subtract integer and return
0171  * @v: pointer of type atomic_t
0172  * @i: integer value to subtract
0173  *
0174  * Atomically subtracts @i from @v and returns @v - @i
0175  */
0176 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
0177 {
0178     return arch_atomic_add_return(-i, v);
0179 }
0180 #define arch_atomic_sub_return arch_atomic_sub_return
0181 
0182 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
0183 {
0184     return xadd(&v->counter, i);
0185 }
0186 #define arch_atomic_fetch_add arch_atomic_fetch_add
0187 
0188 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
0189 {
0190     return xadd(&v->counter, -i);
0191 }
0192 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
0193 
0194 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
0195 {
0196     return arch_cmpxchg(&v->counter, old, new);
0197 }
0198 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
0199 
0200 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
0201 {
0202     return arch_try_cmpxchg(&v->counter, old, new);
0203 }
0204 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
0205 
0206 static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
0207 {
0208     return arch_xchg(&v->counter, new);
0209 }
0210 #define arch_atomic_xchg arch_atomic_xchg
0211 
0212 static __always_inline void arch_atomic_and(int i, atomic_t *v)
0213 {
0214     asm volatile(LOCK_PREFIX "andl %1,%0"
0215             : "+m" (v->counter)
0216             : "ir" (i)
0217             : "memory");
0218 }
0219 
0220 static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
0221 {
0222     int val = arch_atomic_read(v);
0223 
0224     do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
0225 
0226     return val;
0227 }
0228 #define arch_atomic_fetch_and arch_atomic_fetch_and
0229 
0230 static __always_inline void arch_atomic_or(int i, atomic_t *v)
0231 {
0232     asm volatile(LOCK_PREFIX "orl %1,%0"
0233             : "+m" (v->counter)
0234             : "ir" (i)
0235             : "memory");
0236 }
0237 
0238 static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
0239 {
0240     int val = arch_atomic_read(v);
0241 
0242     do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
0243 
0244     return val;
0245 }
0246 #define arch_atomic_fetch_or arch_atomic_fetch_or
0247 
0248 static __always_inline void arch_atomic_xor(int i, atomic_t *v)
0249 {
0250     asm volatile(LOCK_PREFIX "xorl %1,%0"
0251             : "+m" (v->counter)
0252             : "ir" (i)
0253             : "memory");
0254 }
0255 
0256 static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
0257 {
0258     int val = arch_atomic_read(v);
0259 
0260     do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
0261 
0262     return val;
0263 }
0264 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
0265 
0266 #ifdef CONFIG_X86_32
0267 # include <asm/atomic64_32.h>
0268 #else
0269 # include <asm/atomic64_64.h>
0270 #endif
0271 
0272 #endif /* _ASM_X86_ATOMIC_H */