Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __SPARC64_BARRIER_H
0003 #define __SPARC64_BARRIER_H
0004 
0005 /* These are here in an effort to more fully work around Spitfire Errata
0006  * #51.  Essentially, if a memory barrier occurs soon after a mispredicted
0007  * branch, the chip can stop executing instructions until a trap occurs.
0008  * Therefore, if interrupts are disabled, the chip can hang forever.
0009  *
0010  * It used to be believed that the memory barrier had to be right in the
0011  * delay slot, but a case has been traced recently wherein the memory barrier
0012  * was one instruction after the branch delay slot and the chip still hung.
0013  * The offending sequence was the following in sym_wakeup_done() of the
0014  * sym53c8xx_2 driver:
0015  *
0016  *  call    sym_ccb_from_dsa, 0
0017  *   movge  %icc, 0, %l0
0018  *  brz,pn  %o0, .LL1303
0019  *   mov    %o0, %l2
0020  *  membar  #LoadLoad
0021  *
0022  * The branch has to be mispredicted for the bug to occur.  Therefore, we put
0023  * the memory barrier explicitly into a "branch always, predicted taken"
0024  * delay slot to avoid the problem case.
0025  */
0026 #define membar_safe(type) \
0027 do {    __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
0028                  " membar   " type "\n" \
0029                  "1:\n" \
0030                  : : : "memory"); \
0031 } while (0)
0032 
0033 /* The kernel always executes in TSO memory model these days,
0034  * and furthermore most sparc64 chips implement more stringent
0035  * memory ordering than required by the specifications.
0036  */
0037 #define mb()    membar_safe("#StoreLoad")
0038 #define rmb()   __asm__ __volatile__("":::"memory")
0039 #define wmb()   __asm__ __volatile__("":::"memory")
0040 
0041 #define __smp_store_release(p, v)                       \
0042 do {                                    \
0043     compiletime_assert_atomic_type(*p);             \
0044     barrier();                          \
0045     WRITE_ONCE(*p, v);                      \
0046 } while (0)
0047 
0048 #define __smp_load_acquire(p)                       \
0049 ({                                  \
0050     typeof(*p) ___p1 = READ_ONCE(*p);               \
0051     compiletime_assert_atomic_type(*p);             \
0052     barrier();                          \
0053     ___p1;                              \
0054 })
0055 
0056 #define __smp_mb__before_atomic()   barrier()
0057 #define __smp_mb__after_atomic()    barrier()
0058 
0059 #include <asm-generic/barrier.h>
0060 
0061 #endif /* !(__SPARC64_BARRIER_H) */