Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
0003 #define _TOOLS_LINUX_ASM_X86_BARRIER_H
0004 
0005 /*
0006  * Copied from the Linux kernel sources, and also moving code
0007  * out from tools/perf/perf-sys.h so as to make it be located
0008  * in a place similar as in the kernel sources.
0009  *
0010  * Force strict CPU ordering.
0011  * And yes, this is required on UP too when we're talking
0012  * to devices.
0013  */
0014 
0015 #if defined(__i386__)
0016 /*
0017  * Some non-Intel clones support out of order store. wmb() ceases to be a
0018  * nop for these.
0019  */
0020 #define mb()    asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
0021 #define rmb()   asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
0022 #define wmb()   asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
0023 #elif defined(__x86_64__)
0024 #define mb()    asm volatile("mfence" ::: "memory")
0025 #define rmb()   asm volatile("lfence" ::: "memory")
0026 #define wmb()   asm volatile("sfence" ::: "memory")
0027 #define smp_rmb() barrier()
0028 #define smp_wmb() barrier()
0029 #define smp_mb()  asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
0030 #endif
0031 
0032 #if defined(__x86_64__)
0033 #define smp_store_release(p, v)         \
0034 do {                        \
0035     barrier();              \
0036     WRITE_ONCE(*p, v);          \
0037 } while (0)
0038 
0039 #define smp_load_acquire(p)         \
0040 ({                      \
0041     typeof(*p) ___p1 = READ_ONCE(*p);   \
0042     barrier();              \
0043     ___p1;                  \
0044 })
0045 #endif /* defined(__x86_64__) */
0046 #endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */