Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/include/asm/barrier.h
0004  *
0005  * Copyright (C) 2012 ARM Ltd.
0006  */
0007 #ifndef __ASM_BARRIER_H
0008 #define __ASM_BARRIER_H
0009 
0010 #ifndef __ASSEMBLY__
0011 
0012 #include <linux/kasan-checks.h>
0013 
0014 #define __nops(n)   ".rept  " #n "\nnop\n.endr\n"
0015 #define nops(n)     asm volatile(__nops(n))
0016 
0017 #define sev()       asm volatile("sev" : : : "memory")
0018 #define wfe()       asm volatile("wfe" : : : "memory")
0019 #define wfet(val)   asm volatile("msr s0_3_c1_c0_0, %0" \
0020                      : : "r" (val) : "memory")
0021 #define wfi()       asm volatile("wfi" : : : "memory")
0022 #define wfit(val)   asm volatile("msr s0_3_c1_c0_1, %0" \
0023                      : : "r" (val) : "memory")
0024 
0025 #define isb()       asm volatile("isb" : : : "memory")
0026 #define dmb(opt)    asm volatile("dmb " #opt : : : "memory")
0027 #define dsb(opt)    asm volatile("dsb " #opt : : : "memory")
0028 
0029 #define psb_csync() asm volatile("hint #17" : : : "memory")
0030 #define __tsb_csync()   asm volatile("hint #18" : : : "memory")
0031 #define csdb()      asm volatile("hint #20" : : : "memory")
0032 
0033 /*
0034  * Data Gathering Hint:
0035  * This instruction prevents merging memory accesses with Normal-NC or
0036  * Device-GRE attributes before the hint instruction with any memory accesses
0037  * appearing after the hint instruction.
0038  */
0039 #define dgh()       asm volatile("hint #6" : : : "memory")
0040 
0041 #ifdef CONFIG_ARM64_PSEUDO_NMI
0042 #define pmr_sync()                      \
0043     do {                            \
0044         extern struct static_key_false gic_pmr_sync;    \
0045                                 \
0046         if (static_branch_unlikely(&gic_pmr_sync))  \
0047             dsb(sy);                \
0048     } while(0)
0049 #else
0050 #define pmr_sync()  do {} while (0)
0051 #endif
0052 
0053 #define __mb()      dsb(sy)
0054 #define __rmb()     dsb(ld)
0055 #define __wmb()     dsb(st)
0056 
0057 #define __dma_mb()  dmb(osh)
0058 #define __dma_rmb() dmb(oshld)
0059 #define __dma_wmb() dmb(oshst)
0060 
0061 #define io_stop_wc()    dgh()
0062 
0063 #define tsb_csync()                             \
0064     do {                                    \
0065         /*                              \
0066          * CPUs affected by Arm Erratum 2054223 or 2067961 needs    \
0067          * another TSB to ensure the trace is flushed. The barriers \
0068          * don't have to be strictly back to back, as long as the   \
0069          * CPU is in trace prohibited state.                \
0070          */                             \
0071         if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE))    \
0072             __tsb_csync();                      \
0073         __tsb_csync();                          \
0074     } while (0)
0075 
0076 /*
0077  * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
0078  * and 0 otherwise.
0079  */
0080 #define array_index_mask_nospec array_index_mask_nospec
0081 static inline unsigned long array_index_mask_nospec(unsigned long idx,
0082                             unsigned long sz)
0083 {
0084     unsigned long mask;
0085 
0086     asm volatile(
0087     "   cmp %1, %2\n"
0088     "   sbc %0, xzr, xzr\n"
0089     : "=r" (mask)
0090     : "r" (idx), "Ir" (sz)
0091     : "cc");
0092 
0093     csdb();
0094     return mask;
0095 }
0096 
0097 /*
0098  * Ensure that reads of the counter are treated the same as memory reads
0099  * for the purposes of ordering by subsequent memory barriers.
0100  *
0101  * This insanity brought to you by speculative system register reads,
0102  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
0103  *
0104  * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
0105  */
0106 #define arch_counter_enforce_ordering(val) do {             \
0107     u64 tmp, _val = (val);                      \
0108                                     \
0109     asm volatile(                           \
0110     "   eor %0, %1, %1\n"                   \
0111     "   add %0, sp, %0\n"                   \
0112     "   ldr xzr, [%0]"                  \
0113     : "=r" (tmp) : "r" (_val));                 \
0114 } while (0)
0115 
0116 #define __smp_mb()  dmb(ish)
0117 #define __smp_rmb() dmb(ishld)
0118 #define __smp_wmb() dmb(ishst)
0119 
0120 #define __smp_store_release(p, v)                   \
0121 do {                                    \
0122     typeof(p) __p = (p);                        \
0123     union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u =  \
0124         { .__val = (__force __unqual_scalar_typeof(*p)) (v) };  \
0125     compiletime_assert_atomic_type(*p);             \
0126     kasan_check_write(__p, sizeof(*p));             \
0127     switch (sizeof(*p)) {                       \
0128     case 1:                             \
0129         asm volatile ("stlrb %w1, %0"               \
0130                 : "=Q" (*__p)               \
0131                 : "r" (*(__u8 *)__u.__c)        \
0132                 : "memory");                \
0133         break;                          \
0134     case 2:                             \
0135         asm volatile ("stlrh %w1, %0"               \
0136                 : "=Q" (*__p)               \
0137                 : "r" (*(__u16 *)__u.__c)       \
0138                 : "memory");                \
0139         break;                          \
0140     case 4:                             \
0141         asm volatile ("stlr %w1, %0"                \
0142                 : "=Q" (*__p)               \
0143                 : "r" (*(__u32 *)__u.__c)       \
0144                 : "memory");                \
0145         break;                          \
0146     case 8:                             \
0147         asm volatile ("stlr %1, %0"             \
0148                 : "=Q" (*__p)               \
0149                 : "r" (*(__u64 *)__u.__c)       \
0150                 : "memory");                \
0151         break;                          \
0152     }                               \
0153 } while (0)
0154 
0155 #define __smp_load_acquire(p)                       \
0156 ({                                  \
0157     union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u;   \
0158     typeof(p) __p = (p);                        \
0159     compiletime_assert_atomic_type(*p);             \
0160     kasan_check_read(__p, sizeof(*p));              \
0161     switch (sizeof(*p)) {                       \
0162     case 1:                             \
0163         asm volatile ("ldarb %w0, %1"               \
0164             : "=r" (*(__u8 *)__u.__c)           \
0165             : "Q" (*__p) : "memory");           \
0166         break;                          \
0167     case 2:                             \
0168         asm volatile ("ldarh %w0, %1"               \
0169             : "=r" (*(__u16 *)__u.__c)          \
0170             : "Q" (*__p) : "memory");           \
0171         break;                          \
0172     case 4:                             \
0173         asm volatile ("ldar %w0, %1"                \
0174             : "=r" (*(__u32 *)__u.__c)          \
0175             : "Q" (*__p) : "memory");           \
0176         break;                          \
0177     case 8:                             \
0178         asm volatile ("ldar %0, %1"             \
0179             : "=r" (*(__u64 *)__u.__c)          \
0180             : "Q" (*__p) : "memory");           \
0181         break;                          \
0182     }                               \
0183     (typeof(*p))__u.__val;                      \
0184 })
0185 
0186 #define smp_cond_load_relaxed(ptr, cond_expr)               \
0187 ({                                  \
0188     typeof(ptr) __PTR = (ptr);                  \
0189     __unqual_scalar_typeof(*ptr) VAL;               \
0190     for (;;) {                          \
0191         VAL = READ_ONCE(*__PTR);                \
0192         if (cond_expr)                      \
0193             break;                      \
0194         __cmpwait_relaxed(__PTR, VAL);              \
0195     }                               \
0196     (typeof(*ptr))VAL;                      \
0197 })
0198 
0199 #define smp_cond_load_acquire(ptr, cond_expr)               \
0200 ({                                  \
0201     typeof(ptr) __PTR = (ptr);                  \
0202     __unqual_scalar_typeof(*ptr) VAL;               \
0203     for (;;) {                          \
0204         VAL = smp_load_acquire(__PTR);              \
0205         if (cond_expr)                      \
0206             break;                      \
0207         __cmpwait_relaxed(__PTR, VAL);              \
0208     }                               \
0209     (typeof(*ptr))VAL;                      \
0210 })
0211 
0212 #include <asm-generic/barrier.h>
0213 
0214 #endif  /* __ASSEMBLY__ */
0215 
0216 #endif  /* __ASM_BARRIER_H */