Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
0007  */
0008 #ifndef __ASM_BARRIER_H
0009 #define __ASM_BARRIER_H
0010 
0011 #include <asm/addrspace.h>
0012 #include <asm/sync.h>
0013 
0014 static inline void __sync(void)
0015 {
0016     asm volatile(__SYNC(full, always) ::: "memory");
0017 }
0018 
0019 static inline void rmb(void)
0020 {
0021     asm volatile(__SYNC(rmb, always) ::: "memory");
0022 }
0023 #define rmb rmb
0024 
0025 static inline void wmb(void)
0026 {
0027     asm volatile(__SYNC(wmb, always) ::: "memory");
0028 }
0029 #define wmb wmb
0030 
0031 #define fast_mb()   __sync()
0032 
0033 #define __fast_iob()                \
0034     __asm__ __volatile__(           \
0035         ".set   push\n\t"       \
0036         ".set   noreorder\n\t"      \
0037         "lw $0,%0\n\t"      \
0038         "nop\n\t"           \
0039         ".set   pop"            \
0040         : /* no output */       \
0041         : "m" (*(int *)CKSEG1)      \
0042         : "memory")
0043 #ifdef CONFIG_CPU_CAVIUM_OCTEON
0044 # define fast_iob() do { } while (0)
0045 #else /* ! CONFIG_CPU_CAVIUM_OCTEON */
0046 # ifdef CONFIG_SGI_IP28
0047 #  define fast_iob()                \
0048     __asm__ __volatile__(           \
0049         ".set   push\n\t"       \
0050         ".set   noreorder\n\t"      \
0051         "lw $0,%0\n\t"      \
0052         "sync\n\t"          \
0053         "lw $0,%0\n\t"      \
0054         ".set   pop"            \
0055         : /* no output */       \
0056         : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
0057         : "memory")
0058 # else
0059 #  define fast_iob()                \
0060     do {                    \
0061         __sync();           \
0062         __fast_iob();           \
0063     } while (0)
0064 # endif
0065 #endif /* CONFIG_CPU_CAVIUM_OCTEON */
0066 
0067 #ifdef CONFIG_CPU_HAS_WB
0068 
0069 #include <asm/wbflush.h>
0070 
0071 #define mb()        wbflush()
0072 #define iob()       wbflush()
0073 
0074 #else /* !CONFIG_CPU_HAS_WB */
0075 
0076 #define mb()        fast_mb()
0077 #define iob()       fast_iob()
0078 
0079 #endif /* !CONFIG_CPU_HAS_WB */
0080 
0081 #if defined(CONFIG_WEAK_ORDERING)
0082 # define __smp_mb() __sync()
0083 # define __smp_rmb()    rmb()
0084 # define __smp_wmb()    wmb()
0085 #else
0086 # define __smp_mb() barrier()
0087 # define __smp_rmb()    barrier()
0088 # define __smp_wmb()    barrier()
0089 #endif
0090 
0091 /*
0092  * When LL/SC does imply order, it must also be a compiler barrier to avoid the
0093  * compiler from reordering where the CPU will not. When it does not imply
0094  * order, the compiler is also free to reorder across the LL/SC loop and
0095  * ordering will be done by smp_llsc_mb() and friends.
0096  */
0097 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
0098 # define __WEAK_LLSC_MB     sync
0099 # define smp_llsc_mb() \
0100     __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
0101 # define __LLSC_CLOBBER
0102 #else
0103 # define __WEAK_LLSC_MB
0104 # define smp_llsc_mb()      do { } while (0)
0105 # define __LLSC_CLOBBER     "memory"
0106 #endif
0107 
0108 #ifdef CONFIG_CPU_CAVIUM_OCTEON
0109 #define smp_mb__before_llsc() smp_wmb()
0110 #define __smp_mb__before_llsc() __smp_wmb()
0111 /* Cause previous writes to become visible on all CPUs as soon as possible */
0112 #define nudge_writes() __asm__ __volatile__(".set push\n\t"     \
0113                         ".set arch=octeon\n\t"  \
0114                         "syncw\n\t"         \
0115                         ".set pop" : : : "memory")
0116 #else
0117 #define smp_mb__before_llsc() smp_llsc_mb()
0118 #define __smp_mb__before_llsc() smp_llsc_mb()
0119 #define nudge_writes() mb()
0120 #endif
0121 
0122 /*
0123  * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
0124  * a completion barrier immediately preceding the LL instruction. Therefore we
0125  * can skip emitting a barrier from __smp_mb__before_atomic().
0126  */
0127 #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
0128 # define __smp_mb__before_atomic()
0129 #else
0130 # define __smp_mb__before_atomic()  __smp_mb__before_llsc()
0131 #endif
0132 
0133 #define __smp_mb__after_atomic()    smp_llsc_mb()
0134 
0135 static inline void sync_ginv(void)
0136 {
0137     asm volatile(__SYNC(ginv, always));
0138 }
0139 
0140 #include <asm-generic/barrier.h>
0141 
0142 #endif /* __ASM_BARRIER_H */