Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * Generic barrier definitions.
0004  *
0005  * It should be possible to use these on really simple architectures,
0006  * but it serves more as a starting point for new ports.
0007  *
0008  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
0009  * Written by David Howells (dhowells@redhat.com)
0010  */
0011 #ifndef __ASM_GENERIC_BARRIER_H
0012 #define __ASM_GENERIC_BARRIER_H
0013 
0014 #ifndef __ASSEMBLY__
0015 
0016 #include <linux/compiler.h>
0017 #include <linux/kcsan-checks.h>
0018 #include <asm/rwonce.h>
0019 
0020 #ifndef nop
0021 #define nop()   asm volatile ("nop")
0022 #endif
0023 
0024 /*
0025  * Architectures that want generic instrumentation can define __ prefixed
0026  * variants of all barriers.
0027  */
0028 
0029 #ifdef __mb
0030 #define mb()    do { kcsan_mb(); __mb(); } while (0)
0031 #endif
0032 
0033 #ifdef __rmb
0034 #define rmb()   do { kcsan_rmb(); __rmb(); } while (0)
0035 #endif
0036 
0037 #ifdef __wmb
0038 #define wmb()   do { kcsan_wmb(); __wmb(); } while (0)
0039 #endif
0040 
0041 #ifdef __dma_mb
0042 #define dma_mb()    do { kcsan_mb(); __dma_mb(); } while (0)
0043 #endif
0044 
0045 #ifdef __dma_rmb
0046 #define dma_rmb()   do { kcsan_rmb(); __dma_rmb(); } while (0)
0047 #endif
0048 
0049 #ifdef __dma_wmb
0050 #define dma_wmb()   do { kcsan_wmb(); __dma_wmb(); } while (0)
0051 #endif
0052 
0053 /*
0054  * Force strict CPU ordering. And yes, this is required on UP too when we're
0055  * talking to devices.
0056  *
0057  * Fall back to compiler barriers if nothing better is provided.
0058  */
0059 
0060 #ifndef mb
0061 #define mb()    barrier()
0062 #endif
0063 
0064 #ifndef rmb
0065 #define rmb()   mb()
0066 #endif
0067 
0068 #ifndef wmb
0069 #define wmb()   mb()
0070 #endif
0071 
0072 #ifndef dma_mb
0073 #define dma_mb()    mb()
0074 #endif
0075 
0076 #ifndef dma_rmb
0077 #define dma_rmb()   rmb()
0078 #endif
0079 
0080 #ifndef dma_wmb
0081 #define dma_wmb()   wmb()
0082 #endif
0083 
0084 #ifndef __smp_mb
0085 #define __smp_mb()  mb()
0086 #endif
0087 
0088 #ifndef __smp_rmb
0089 #define __smp_rmb() rmb()
0090 #endif
0091 
0092 #ifndef __smp_wmb
0093 #define __smp_wmb() wmb()
0094 #endif
0095 
0096 #ifdef CONFIG_SMP
0097 
0098 #ifndef smp_mb
0099 #define smp_mb()    do { kcsan_mb(); __smp_mb(); } while (0)
0100 #endif
0101 
0102 #ifndef smp_rmb
0103 #define smp_rmb()   do { kcsan_rmb(); __smp_rmb(); } while (0)
0104 #endif
0105 
0106 #ifndef smp_wmb
0107 #define smp_wmb()   do { kcsan_wmb(); __smp_wmb(); } while (0)
0108 #endif
0109 
0110 #else   /* !CONFIG_SMP */
0111 
0112 #ifndef smp_mb
0113 #define smp_mb()    barrier()
0114 #endif
0115 
0116 #ifndef smp_rmb
0117 #define smp_rmb()   barrier()
0118 #endif
0119 
0120 #ifndef smp_wmb
0121 #define smp_wmb()   barrier()
0122 #endif
0123 
0124 #endif  /* CONFIG_SMP */
0125 
0126 #ifndef __smp_store_mb
0127 #define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
0128 #endif
0129 
0130 #ifndef __smp_mb__before_atomic
0131 #define __smp_mb__before_atomic()   __smp_mb()
0132 #endif
0133 
0134 #ifndef __smp_mb__after_atomic
0135 #define __smp_mb__after_atomic()    __smp_mb()
0136 #endif
0137 
0138 #ifndef __smp_store_release
0139 #define __smp_store_release(p, v)                   \
0140 do {                                    \
0141     compiletime_assert_atomic_type(*p);             \
0142     __smp_mb();                         \
0143     WRITE_ONCE(*p, v);                      \
0144 } while (0)
0145 #endif
0146 
0147 #ifndef __smp_load_acquire
0148 #define __smp_load_acquire(p)                       \
0149 ({                                  \
0150     __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);       \
0151     compiletime_assert_atomic_type(*p);             \
0152     __smp_mb();                         \
0153     (typeof(*p))___p1;                      \
0154 })
0155 #endif
0156 
0157 #ifdef CONFIG_SMP
0158 
0159 #ifndef smp_store_mb
0160 #define smp_store_mb(var, value)  do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
0161 #endif
0162 
0163 #ifndef smp_mb__before_atomic
0164 #define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
0165 #endif
0166 
0167 #ifndef smp_mb__after_atomic
0168 #define smp_mb__after_atomic()  do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
0169 #endif
0170 
0171 #ifndef smp_store_release
0172 #define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
0173 #endif
0174 
0175 #ifndef smp_load_acquire
0176 #define smp_load_acquire(p) __smp_load_acquire(p)
0177 #endif
0178 
0179 #else   /* !CONFIG_SMP */
0180 
0181 #ifndef smp_store_mb
0182 #define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
0183 #endif
0184 
0185 #ifndef smp_mb__before_atomic
0186 #define smp_mb__before_atomic() barrier()
0187 #endif
0188 
0189 #ifndef smp_mb__after_atomic
0190 #define smp_mb__after_atomic()  barrier()
0191 #endif
0192 
0193 #ifndef smp_store_release
0194 #define smp_store_release(p, v)                     \
0195 do {                                    \
0196     compiletime_assert_atomic_type(*p);             \
0197     barrier();                          \
0198     WRITE_ONCE(*p, v);                      \
0199 } while (0)
0200 #endif
0201 
0202 #ifndef smp_load_acquire
0203 #define smp_load_acquire(p)                     \
0204 ({                                  \
0205     __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);       \
0206     compiletime_assert_atomic_type(*p);             \
0207     barrier();                          \
0208     (typeof(*p))___p1;                      \
0209 })
0210 #endif
0211 
0212 #endif  /* CONFIG_SMP */
0213 
0214 /* Barriers for virtual machine guests when talking to an SMP host */
0215 #define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
0216 #define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
0217 #define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
0218 #define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
0219 #define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
0220 #define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
0221 #define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
0222 #define virt_load_acquire(p) __smp_load_acquire(p)
0223 
0224 /**
0225  * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
0226  *
0227  * A control dependency provides a LOAD->STORE order, the additional RMB
0228  * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
0229  * aka. (load)-ACQUIRE.
0230  *
0231  * Architectures that do not do load speculation can have this be barrier().
0232  */
0233 #ifndef smp_acquire__after_ctrl_dep
0234 #define smp_acquire__after_ctrl_dep()       smp_rmb()
0235 #endif
0236 
0237 /**
0238  * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
0239  * @ptr: pointer to the variable to wait on
0240  * @cond: boolean expression to wait for
0241  *
0242  * Equivalent to using READ_ONCE() on the condition variable.
0243  *
0244  * Due to C lacking lambda expressions we load the value of *ptr into a
0245  * pre-named variable @VAL to be used in @cond.
0246  */
0247 #ifndef smp_cond_load_relaxed
0248 #define smp_cond_load_relaxed(ptr, cond_expr) ({        \
0249     typeof(ptr) __PTR = (ptr);              \
0250     __unqual_scalar_typeof(*ptr) VAL;           \
0251     for (;;) {                      \
0252         VAL = READ_ONCE(*__PTR);            \
0253         if (cond_expr)                  \
0254             break;                  \
0255         cpu_relax();                    \
0256     }                           \
0257     (typeof(*ptr))VAL;                  \
0258 })
0259 #endif
0260 
0261 /**
0262  * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
0263  * @ptr: pointer to the variable to wait on
0264  * @cond: boolean expression to wait for
0265  *
0266  * Equivalent to using smp_load_acquire() on the condition variable but employs
0267  * the control dependency of the wait to reduce the barrier on many platforms.
0268  */
0269 #ifndef smp_cond_load_acquire
0270 #define smp_cond_load_acquire(ptr, cond_expr) ({        \
0271     __unqual_scalar_typeof(*ptr) _val;          \
0272     _val = smp_cond_load_relaxed(ptr, cond_expr);       \
0273     smp_acquire__after_ctrl_dep();              \
0274     (typeof(*ptr))_val;                 \
0275 })
0276 #endif
0277 
0278 /*
0279  * pmem_wmb() ensures that all stores for which the modification
0280  * are written to persistent storage by preceding instructions have
0281  * updated persistent storage before any data  access or data transfer
0282  * caused by subsequent instructions is initiated.
0283  */
0284 #ifndef pmem_wmb
0285 #define pmem_wmb()  wmb()
0286 #endif
0287 
0288 /*
0289  * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
0290  * this kind of memory accesses, the CPU may wait for prior accesses to be
0291  * merged with subsequent ones. In some situation, such wait is bad for the
0292  * performance. io_stop_wc() can be used to prevent the merging of
0293  * write-combining memory accesses before this macro with those after it.
0294  */
0295 #ifndef io_stop_wc
0296 #define io_stop_wc() do { } while (0)
0297 #endif
0298 
0299 #endif /* !__ASSEMBLY__ */
0300 #endif /* __ASM_GENERIC_BARRIER_H */