![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-only */ 0002 /* 0003 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) 0004 */ 0005 0006 #ifndef __ASM_BARRIER_H 0007 #define __ASM_BARRIER_H 0008 0009 #ifdef CONFIG_ISA_ARCV2 0010 0011 /* 0012 * ARCv2 based HS38 cores are in-order issue, but still weakly ordered 0013 * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... 0014 * 0015 * Explicit barrier provided by DMB instruction 0016 * - Operand supports fine grained load/store/load+store semantics 0017 * - Ensures that selected memory operation issued before it will complete 0018 * before any subsequent memory operation of same type 0019 * - DMB guarantees SMP as well as local barrier semantics 0020 * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. 0021 * UP: barrier(), SMP: smp_*mb == *mb) 0022 * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed 0023 * in the general case. Plus it only provides full barrier. 0024 */ 0025 0026 #define mb() asm volatile("dmb 3\n" : : : "memory") 0027 #define rmb() asm volatile("dmb 1\n" : : : "memory") 0028 #define wmb() asm volatile("dmb 2\n" : : : "memory") 0029 0030 #else 0031 0032 /* 0033 * ARCompact based cores (ARC700) only have SYNC instruction which is super 0034 * heavy weight as it flushes the pipeline as well. 0035 * There are no real SMP implementations of such cores. 0036 */ 0037 0038 #define mb() asm volatile("sync\n" : : : "memory") 0039 0040 #endif 0041 0042 #include <asm-generic/barrier.h> 0043 0044 #endif
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |