Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __LINUX_CACHE_H
0003 #define __LINUX_CACHE_H
0004 
0005 #include <uapi/linux/kernel.h>
0006 #include <asm/cache.h>
0007 
0008 #ifndef L1_CACHE_ALIGN
0009 #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
0010 #endif
0011 
0012 #ifndef SMP_CACHE_BYTES
0013 #define SMP_CACHE_BYTES L1_CACHE_BYTES
0014 #endif
0015 
0016 /*
0017  * __read_mostly is used to keep rarely changing variables out of frequently
0018  * updated cachelines. Its use should be reserved for data that is used
0019  * frequently in hot paths. Performance traces can help decide when to use
0020  * this. You want __read_mostly data to be tightly packed, so that in the
0021  * best case multiple frequently read variables for a hot path will be next
0022  * to each other in order to reduce the number of cachelines needed to
0023  * execute a critical path. We should be mindful and selective of its use.
0024  * ie: if you're going to use it please supply a *good* justification in your
0025  * commit log
0026  */
0027 #ifndef __read_mostly
0028 #define __read_mostly
0029 #endif
0030 
0031 /*
0032  * __ro_after_init is used to mark things that are read-only after init (i.e.
0033  * after mark_rodata_ro() has been called). These are effectively read-only,
0034  * but may get written to during init, so can't live in .rodata (via "const").
0035  */
0036 #ifndef __ro_after_init
0037 #define __ro_after_init __section(".data..ro_after_init")
0038 #endif
0039 
0040 #ifndef ____cacheline_aligned
0041 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
0042 #endif
0043 
0044 #ifndef ____cacheline_aligned_in_smp
0045 #ifdef CONFIG_SMP
0046 #define ____cacheline_aligned_in_smp ____cacheline_aligned
0047 #else
0048 #define ____cacheline_aligned_in_smp
0049 #endif /* CONFIG_SMP */
0050 #endif
0051 
0052 #ifndef __cacheline_aligned
0053 #define __cacheline_aligned                 \
0054   __attribute__((__aligned__(SMP_CACHE_BYTES),          \
0055          __section__(".data..cacheline_aligned")))
0056 #endif /* __cacheline_aligned */
0057 
0058 #ifndef __cacheline_aligned_in_smp
0059 #ifdef CONFIG_SMP
0060 #define __cacheline_aligned_in_smp __cacheline_aligned
0061 #else
0062 #define __cacheline_aligned_in_smp
0063 #endif /* CONFIG_SMP */
0064 #endif
0065 
0066 /*
0067  * The maximum alignment needed for some critical structures
0068  * These could be inter-node cacheline sizes/L3 cacheline
0069  * size etc.  Define this in asm/cache.h for your arch
0070  */
0071 #ifndef INTERNODE_CACHE_SHIFT
0072 #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
0073 #endif
0074 
0075 #if !defined(____cacheline_internodealigned_in_smp)
0076 #if defined(CONFIG_SMP)
0077 #define ____cacheline_internodealigned_in_smp \
0078     __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
0079 #else
0080 #define ____cacheline_internodealigned_in_smp
0081 #endif
0082 #endif
0083 
0084 #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
0085 #define cache_line_size()   L1_CACHE_BYTES
0086 #endif
0087 
0088 #endif /* __LINUX_CACHE_H */