![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0 */ 0002 /* 0003 * Copied from the kernel sources: 0004 * 0005 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 0006 */ 0007 #ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H 0008 #define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H 0009 0010 /* 0011 * Memory barrier. 0012 * The sync instruction guarantees that all memory accesses initiated 0013 * by this processor have been performed (with respect to all other 0014 * mechanisms that access memory). The eieio instruction is a barrier 0015 * providing an ordering (separately) for (a) cacheable stores and (b) 0016 * loads and stores to non-cacheable memory (e.g. I/O devices). 0017 * 0018 * mb() prevents loads and stores being reordered across this point. 0019 * rmb() prevents loads being reordered across this point. 0020 * wmb() prevents stores being reordered across this point. 0021 * 0022 * *mb() variants without smp_ prefix must order all types of memory 0023 * operations with one another. sync is the only instruction sufficient 0024 * to do this. 0025 */ 0026 #define mb() __asm__ __volatile__ ("sync" : : : "memory") 0027 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") 0028 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") 0029 0030 #if defined(__powerpc64__) 0031 #define smp_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory") 0032 0033 #define smp_store_release(p, v) \ 0034 do { \ 0035 smp_lwsync(); \ 0036 WRITE_ONCE(*p, v); \ 0037 } while (0) 0038 0039 #define smp_load_acquire(p) \ 0040 ({ \ 0041 typeof(*p) ___p1 = READ_ONCE(*p); \ 0042 smp_lwsync(); \ 0043 ___p1; \ 0044 }) 0045 #endif /* defined(__powerpc64__) */ 0046 #endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |