Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Atomic operations usable in machine independent code */
0003 #ifndef _LINUX_ATOMIC_H
0004 #define _LINUX_ATOMIC_H
0005 #include <linux/types.h>
0006 
0007 #include <asm/atomic.h>
0008 #include <asm/barrier.h>
0009 
0010 /*
0011  * Relaxed variants of xchg, cmpxchg and some atomic operations.
0012  *
0013  * We support four variants:
0014  *
0015  * - Fully ordered: The default implementation, no suffix required.
0016  * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
0017  * - Release: Provides RELEASE semantics, _release suffix.
0018  * - Relaxed: No ordering guarantees, _relaxed suffix.
0019  *
0020  * For compound atomics performing both a load and a store, ACQUIRE
0021  * semantics apply only to the load and RELEASE semantics only to the
0022  * store portion of the operation. Note that a failed cmpxchg_acquire
0023  * does -not- imply any memory ordering constraints.
0024  *
0025  * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
0026  */
0027 
0028 #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
0029 #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
0030 
0031 #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
0032 #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
0033 
0034 /*
0035  * The idea here is to build acquire/release variants by adding explicit
0036  * barriers on top of the relaxed variant. In the case where the relaxed
0037  * variant is already fully ordered, no additional barriers are needed.
0038  *
0039  * If an architecture overrides __atomic_acquire_fence() it will probably
0040  * want to define smp_mb__after_spinlock().
0041  */
0042 #ifndef __atomic_acquire_fence
0043 #define __atomic_acquire_fence      smp_mb__after_atomic
0044 #endif
0045 
0046 #ifndef __atomic_release_fence
0047 #define __atomic_release_fence      smp_mb__before_atomic
0048 #endif
0049 
0050 #ifndef __atomic_pre_full_fence
0051 #define __atomic_pre_full_fence     smp_mb__before_atomic
0052 #endif
0053 
0054 #ifndef __atomic_post_full_fence
0055 #define __atomic_post_full_fence    smp_mb__after_atomic
0056 #endif
0057 
0058 #define __atomic_op_acquire(op, args...)                \
0059 ({                                  \
0060     typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);     \
0061     __atomic_acquire_fence();                   \
0062     __ret;                              \
0063 })
0064 
0065 #define __atomic_op_release(op, args...)                \
0066 ({                                  \
0067     __atomic_release_fence();                   \
0068     op##_relaxed(args);                     \
0069 })
0070 
0071 #define __atomic_op_fence(op, args...)                  \
0072 ({                                  \
0073     typeof(op##_relaxed(args)) __ret;               \
0074     __atomic_pre_full_fence();                  \
0075     __ret = op##_relaxed(args);                 \
0076     __atomic_post_full_fence();                 \
0077     __ret;                              \
0078 })
0079 
0080 #include <linux/atomic/atomic-arch-fallback.h>
0081 #include <linux/atomic/atomic-long.h>
0082 #include <linux/atomic/atomic-instrumented.h>
0083 
0084 #endif /* _LINUX_ATOMIC_H */