![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-only */ 0002 /* 0003 * Based on arch/arm/include/asm/cacheflush.h 0004 * 0005 * Copyright (C) 1999-2002 Russell King. 0006 * Copyright (C) 2012 ARM Ltd. 0007 */ 0008 #ifndef __ASM_CACHEFLUSH_H 0009 #define __ASM_CACHEFLUSH_H 0010 0011 #include <linux/kgdb.h> 0012 #include <linux/mm.h> 0013 0014 /* 0015 * This flag is used to indicate that the page pointed to by a pte is clean 0016 * and does not require cleaning before returning it to the user. 0017 */ 0018 #define PG_dcache_clean PG_arch_1 0019 0020 /* 0021 * MM Cache Management 0022 * =================== 0023 * 0024 * The arch/arm64/mm/cache.S implements these methods. 0025 * 0026 * Start addresses are inclusive and end addresses are exclusive; start 0027 * addresses should be rounded down, end addresses up. 0028 * 0029 * See Documentation/core-api/cachetlb.rst for more information. Please note that 0030 * the implementation assumes non-aliasing VIPT D-cache and (aliasing) 0031 * VIPT I-cache. 0032 * 0033 * All functions below apply to the interval [start, end) 0034 * - start - virtual start address (inclusive) 0035 * - end - virtual end address (exclusive) 0036 * 0037 * caches_clean_inval_pou(start, end) 0038 * 0039 * Ensure coherency between the I-cache and the D-cache region to 0040 * the Point of Unification. 0041 * 0042 * caches_clean_inval_user_pou(start, end) 0043 * 0044 * Ensure coherency between the I-cache and the D-cache region to 0045 * the Point of Unification. 0046 * Use only if the region might access user memory. 0047 * 0048 * icache_inval_pou(start, end) 0049 * 0050 * Invalidate I-cache region to the Point of Unification. 0051 * 0052 * dcache_clean_inval_poc(start, end) 0053 * 0054 * Clean and invalidate D-cache region to the Point of Coherency. 0055 * 0056 * dcache_inval_poc(start, end) 0057 * 0058 * Invalidate D-cache region to the Point of Coherency. 0059 * 0060 * dcache_clean_poc(start, end) 0061 * 0062 * Clean D-cache region to the Point of Coherency. 0063 * 0064 * dcache_clean_pop(start, end) 0065 * 0066 * Clean D-cache region to the Point of Persistence. 0067 * 0068 * dcache_clean_pou(start, end) 0069 * 0070 * Clean D-cache region to the Point of Unification. 0071 */ 0072 extern void caches_clean_inval_pou(unsigned long start, unsigned long end); 0073 extern void icache_inval_pou(unsigned long start, unsigned long end); 0074 extern void dcache_clean_inval_poc(unsigned long start, unsigned long end); 0075 extern void dcache_inval_poc(unsigned long start, unsigned long end); 0076 extern void dcache_clean_poc(unsigned long start, unsigned long end); 0077 extern void dcache_clean_pop(unsigned long start, unsigned long end); 0078 extern void dcache_clean_pou(unsigned long start, unsigned long end); 0079 extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end); 0080 extern void sync_icache_aliases(unsigned long start, unsigned long end); 0081 0082 static inline void flush_icache_range(unsigned long start, unsigned long end) 0083 { 0084 caches_clean_inval_pou(start, end); 0085 0086 /* 0087 * IPI all online CPUs so that they undergo a context synchronization 0088 * event and are forced to refetch the new instructions. 0089 */ 0090 0091 /* 0092 * KGDB performs cache maintenance with interrupts disabled, so we 0093 * will deadlock trying to IPI the secondary CPUs. In theory, we can 0094 * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that 0095 * just means that KGDB will elide the maintenance altogether! As it 0096 * turns out, KGDB uses IPIs to round-up the secondary CPUs during 0097 * the patching operation, so we don't need extra IPIs here anyway. 0098 * In which case, add a KGDB-specific bodge and return early. 0099 */ 0100 if (in_dbg_master()) 0101 return; 0102 0103 kick_all_cpus_sync(); 0104 } 0105 #define flush_icache_range flush_icache_range 0106 0107 /* 0108 * Copy user data from/to a page which is mapped into a different 0109 * processes address space. Really, we want to allow our "user 0110 * space" model to handle this. 0111 */ 0112 extern void copy_to_user_page(struct vm_area_struct *, struct page *, 0113 unsigned long, void *, const void *, unsigned long); 0114 #define copy_to_user_page copy_to_user_page 0115 0116 /* 0117 * flush_dcache_page is used when the kernel has written to the page 0118 * cache page at virtual address page->virtual. 0119 * 0120 * If this page isn't mapped (ie, page_mapping == NULL), or it might 0121 * have userspace mappings, then we _must_ always clean + invalidate 0122 * the dcache entries associated with the kernel mapping. 0123 * 0124 * Otherwise we can defer the operation, and clean the cache when we are 0125 * about to change to user space. This is the same method as used on SPARC64. 0126 * See update_mmu_cache for the user space part. 0127 */ 0128 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 0129 extern void flush_dcache_page(struct page *); 0130 0131 static __always_inline void icache_inval_all_pou(void) 0132 { 0133 if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) 0134 return; 0135 0136 asm("ic ialluis"); 0137 dsb(ish); 0138 } 0139 0140 #include <asm-generic/cacheflush.h> 0141 0142 #endif /* __ASM_CACHEFLUSH_H */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |