Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
0003 #define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
0004 
0005 /*
0006  * From tools/perf/perf-sys.h, last modified in:
0007  * f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
0008  *
0009  * XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
0010  * a case like for arm32 where we do things differently in userspace?
0011  */
0012 
0013 #define mb()        asm volatile("dmb ish" ::: "memory")
0014 #define wmb()       asm volatile("dmb ishst" ::: "memory")
0015 #define rmb()       asm volatile("dmb ishld" ::: "memory")
0016 
0017 /*
0018  * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
0019  * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
0020  * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
0021  * smp_*() don't.
0022  */
0023 #define smp_mb()    asm volatile("dmb ish" ::: "memory")
0024 #define smp_wmb()   asm volatile("dmb ishst" ::: "memory")
0025 #define smp_rmb()   asm volatile("dmb ishld" ::: "memory")
0026 
0027 #define smp_store_release(p, v)                     \
0028 do {                                    \
0029     union { typeof(*p) __val; char __c[1]; } __u =          \
0030         { .__val = (v) };                   \
0031                                     \
0032     switch (sizeof(*p)) {                       \
0033     case 1:                             \
0034         asm volatile ("stlrb %w1, %0"               \
0035                 : "=Q" (*p)             \
0036                 : "r" (*(__u8_alias_t *)__u.__c)    \
0037                 : "memory");                \
0038         break;                          \
0039     case 2:                             \
0040         asm volatile ("stlrh %w1, %0"               \
0041                 : "=Q" (*p)             \
0042                 : "r" (*(__u16_alias_t *)__u.__c)   \
0043                 : "memory");                \
0044         break;                          \
0045     case 4:                             \
0046         asm volatile ("stlr %w1, %0"                \
0047                 : "=Q" (*p)             \
0048                 : "r" (*(__u32_alias_t *)__u.__c)   \
0049                 : "memory");                \
0050         break;                          \
0051     case 8:                             \
0052         asm volatile ("stlr %1, %0"             \
0053                 : "=Q" (*p)             \
0054                 : "r" (*(__u64_alias_t *)__u.__c)   \
0055                 : "memory");                \
0056         break;                          \
0057     default:                            \
0058         /* Only to shut up gcc ... */               \
0059         mb();                           \
0060         break;                          \
0061     }                               \
0062 } while (0)
0063 
0064 #define smp_load_acquire(p)                     \
0065 ({                                  \
0066     union { typeof(*p) __val; char __c[1]; } __u =          \
0067         { .__c = { 0 } };                   \
0068                                     \
0069     switch (sizeof(*p)) {                       \
0070     case 1:                             \
0071         asm volatile ("ldarb %w0, %1"               \
0072             : "=r" (*(__u8_alias_t *)__u.__c)       \
0073             : "Q" (*p) : "memory");             \
0074         break;                          \
0075     case 2:                             \
0076         asm volatile ("ldarh %w0, %1"               \
0077             : "=r" (*(__u16_alias_t *)__u.__c)      \
0078             : "Q" (*p) : "memory");             \
0079         break;                          \
0080     case 4:                             \
0081         asm volatile ("ldar %w0, %1"                \
0082             : "=r" (*(__u32_alias_t *)__u.__c)      \
0083             : "Q" (*p) : "memory");             \
0084         break;                          \
0085     case 8:                             \
0086         asm volatile ("ldar %0, %1"             \
0087             : "=r" (*(__u64_alias_t *)__u.__c)      \
0088             : "Q" (*p) : "memory");             \
0089         break;                          \
0090     default:                            \
0091         /* Only to shut up gcc ... */               \
0092         mb();                           \
0093         break;                          \
0094     }                               \
0095     __u.__val;                          \
0096 })
0097 
0098 #endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */