0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/fs.h>
0010 #include <linux/fcntl.h>
0011 #include <linux/kernel.h>
0012 #include <linux/linkage.h>
0013 #include <linux/export.h>
0014 #include <linux/sched.h>
0015 #include <linux/syscalls.h>
0016 #include <linux/mm.h>
0017 #include <linux/highmem.h>
0018 #include <linux/pagemap.h>
0019
0020 #include <asm/cacheflush.h>
0021 #include <asm/processor.h>
0022 #include <asm/cpu.h>
0023 #include <asm/cpu-features.h>
0024 #include <asm/setup.h>
0025 #include <asm/pgtable.h>
0026
0027
0028 void (*flush_cache_all)(void);
0029 void (*__flush_cache_all)(void);
0030 EXPORT_SYMBOL_GPL(__flush_cache_all);
0031 void (*flush_cache_mm)(struct mm_struct *mm);
0032 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
0033 unsigned long end);
0034 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
0035 unsigned long pfn);
0036 void (*flush_icache_range)(unsigned long start, unsigned long end);
0037 EXPORT_SYMBOL_GPL(flush_icache_range);
0038 void (*local_flush_icache_range)(unsigned long start, unsigned long end);
0039 EXPORT_SYMBOL_GPL(local_flush_icache_range);
0040 void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
0041 void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
0042 EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
0043
0044 void (*__flush_cache_vmap)(void);
0045 void (*__flush_cache_vunmap)(void);
0046
0047 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
0048 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
0049
0050
0051 void (*local_flush_data_cache_page)(void * addr);
0052 void (*flush_data_cache_page)(unsigned long addr);
0053 void (*flush_icache_all)(void);
0054
0055 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
0056 EXPORT_SYMBOL(flush_data_cache_page);
0057 EXPORT_SYMBOL(flush_icache_all);
0058
0059 #ifdef CONFIG_DMA_NONCOHERENT
0060
0061
0062 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
0063 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
0064 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
0065
0066 #endif
0067
0068
0069
0070
0071
0072 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
0073 unsigned int, cache)
0074 {
0075 if (bytes == 0)
0076 return 0;
0077 if (!access_ok((void __user *) addr, bytes))
0078 return -EFAULT;
0079
0080 __flush_icache_user_range(addr, addr + bytes);
0081
0082 return 0;
0083 }
0084
0085 void __flush_dcache_page(struct page *page)
0086 {
0087 struct address_space *mapping = page_mapping_file(page);
0088 unsigned long addr;
0089
0090 if (mapping && !mapping_mapped(mapping)) {
0091 SetPageDcacheDirty(page);
0092 return;
0093 }
0094
0095
0096
0097
0098
0099
0100 if (PageHighMem(page))
0101 addr = (unsigned long)kmap_atomic(page);
0102 else
0103 addr = (unsigned long)page_address(page);
0104
0105 flush_data_cache_page(addr);
0106
0107 if (PageHighMem(page))
0108 kunmap_atomic((void *)addr);
0109 }
0110
0111 EXPORT_SYMBOL(__flush_dcache_page);
0112
0113 void __flush_anon_page(struct page *page, unsigned long vmaddr)
0114 {
0115 unsigned long addr = (unsigned long) page_address(page);
0116
0117 if (pages_do_alias(addr, vmaddr)) {
0118 if (page_mapcount(page) && !Page_dcache_dirty(page)) {
0119 void *kaddr;
0120
0121 kaddr = kmap_coherent(page, vmaddr);
0122 flush_data_cache_page((unsigned long)kaddr);
0123 kunmap_coherent();
0124 } else
0125 flush_data_cache_page(addr);
0126 }
0127 }
0128
0129 EXPORT_SYMBOL(__flush_anon_page);
0130
0131 void __update_cache(unsigned long address, pte_t pte)
0132 {
0133 struct page *page;
0134 unsigned long pfn, addr;
0135 int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
0136
0137 pfn = pte_pfn(pte);
0138 if (unlikely(!pfn_valid(pfn)))
0139 return;
0140 page = pfn_to_page(pfn);
0141 if (Page_dcache_dirty(page)) {
0142 if (PageHighMem(page))
0143 addr = (unsigned long)kmap_atomic(page);
0144 else
0145 addr = (unsigned long)page_address(page);
0146
0147 if (exec || pages_do_alias(addr, address & PAGE_MASK))
0148 flush_data_cache_page(addr);
0149
0150 if (PageHighMem(page))
0151 kunmap_atomic((void *)addr);
0152
0153 ClearPageDcacheDirty(page);
0154 }
0155 }
0156
0157 unsigned long _page_cachable_default;
0158 EXPORT_SYMBOL(_page_cachable_default);
0159
0160 #define PM(p) __pgprot(_page_cachable_default | (p))
0161
0162 static pgprot_t protection_map[16] __ro_after_init;
0163 DECLARE_VM_GET_PAGE_PROT
0164
0165 static inline void setup_protection_map(void)
0166 {
0167 protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
0168 protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
0169 protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
0170 protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
0171 protection_map[4] = PM(_PAGE_PRESENT);
0172 protection_map[5] = PM(_PAGE_PRESENT);
0173 protection_map[6] = PM(_PAGE_PRESENT);
0174 protection_map[7] = PM(_PAGE_PRESENT);
0175
0176 protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
0177 protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
0178 protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
0179 _PAGE_NO_READ);
0180 protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
0181 protection_map[12] = PM(_PAGE_PRESENT);
0182 protection_map[13] = PM(_PAGE_PRESENT);
0183 protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
0184 protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
0185 }
0186
0187 #undef PM
0188
0189 void cpu_cache_init(void)
0190 {
0191 if (cpu_has_3k_cache) {
0192 extern void __weak r3k_cache_init(void);
0193
0194 r3k_cache_init();
0195 }
0196 if (cpu_has_4k_cache) {
0197 extern void __weak r4k_cache_init(void);
0198
0199 r4k_cache_init();
0200 }
0201
0202 if (cpu_has_octeon_cache) {
0203 extern void __weak octeon_cache_init(void);
0204
0205 octeon_cache_init();
0206 }
0207
0208 setup_protection_map();
0209 }