Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_BARRIER_H
0003 #define _ASM_X86_BARRIER_H
0004 
0005 #include <asm/alternative.h>
0006 #include <asm/nops.h>
0007 
0008 /*
0009  * Force strict CPU ordering.
0010  * And yes, this might be required on UP too when we're talking
0011  * to devices.
0012  */
0013 
0014 #ifdef CONFIG_X86_32
0015 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
0016                       X86_FEATURE_XMM2) ::: "memory", "cc")
0017 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
0018                        X86_FEATURE_XMM2) ::: "memory", "cc")
0019 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
0020                        X86_FEATURE_XMM2) ::: "memory", "cc")
0021 #else
0022 #define __mb()  asm volatile("mfence":::"memory")
0023 #define __rmb() asm volatile("lfence":::"memory")
0024 #define __wmb() asm volatile("sfence" ::: "memory")
0025 #endif
0026 
0027 /**
0028  * array_index_mask_nospec() - generate a mask that is ~0UL when the
0029  *  bounds check succeeds and 0 otherwise
0030  * @index: array element index
0031  * @size: number of elements in array
0032  *
0033  * Returns:
0034  *     0 - (index < size)
0035  */
0036 static inline unsigned long array_index_mask_nospec(unsigned long index,
0037         unsigned long size)
0038 {
0039     unsigned long mask;
0040 
0041     asm volatile ("cmp %1,%2; sbb %0,%0;"
0042             :"=r" (mask)
0043             :"g"(size),"r" (index)
0044             :"cc");
0045     return mask;
0046 }
0047 
0048 /* Override the default implementation from linux/nospec.h. */
0049 #define array_index_mask_nospec array_index_mask_nospec
0050 
0051 /* Prevent speculative execution past this barrier. */
0052 #define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
0053 
0054 #define __dma_rmb() barrier()
0055 #define __dma_wmb() barrier()
0056 
0057 #define __smp_mb()  asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
0058 
0059 #define __smp_rmb() dma_rmb()
0060 #define __smp_wmb() barrier()
0061 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
0062 
0063 #define __smp_store_release(p, v)                   \
0064 do {                                    \
0065     compiletime_assert_atomic_type(*p);             \
0066     barrier();                          \
0067     WRITE_ONCE(*p, v);                      \
0068 } while (0)
0069 
0070 #define __smp_load_acquire(p)                       \
0071 ({                                  \
0072     typeof(*p) ___p1 = READ_ONCE(*p);               \
0073     compiletime_assert_atomic_type(*p);             \
0074     barrier();                          \
0075     ___p1;                              \
0076 })
0077 
0078 /* Atomic operations are already serializing on x86 */
0079 #define __smp_mb__before_atomic()   do { } while (0)
0080 #define __smp_mb__after_atomic()    do { } while (0)
0081 
0082 #include <asm-generic/barrier.h>
0083 
0084 /*
0085  * Make previous memory operations globally visible before
0086  * a WRMSR.
0087  *
0088  * MFENCE makes writes visible, but only affects load/store
0089  * instructions.  WRMSR is unfortunately not a load/store
0090  * instruction and is unaffected by MFENCE.  The LFENCE ensures
0091  * that the WRMSR is not reordered.
0092  *
0093  * Most WRMSRs are full serializing instructions themselves and
0094  * do not require this barrier.  This is only required for the
0095  * IA32_TSC_DEADLINE and X2APIC MSRs.
0096  */
0097 static inline void weak_wrmsr_fence(void)
0098 {
0099     asm volatile("mfence; lfence" : : : "memory");
0100 }
0101 
0102 #endif /* _ASM_X86_BARRIER_H */