0001
0002
0003
0004
0005
0006
0007
0008
0009 #ifndef _ASM_CACHEFLUSH_H
0010 #define _ASM_CACHEFLUSH_H
0011
0012
0013 #include <linux/mm.h>
0014 #include <asm/cpu-features.h>
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #define PG_dcache_dirty PG_arch_1
0038
0039 #define Page_dcache_dirty(page) \
0040 test_bit(PG_dcache_dirty, &(page)->flags)
0041 #define SetPageDcacheDirty(page) \
0042 set_bit(PG_dcache_dirty, &(page)->flags)
0043 #define ClearPageDcacheDirty(page) \
0044 clear_bit(PG_dcache_dirty, &(page)->flags)
0045
0046 extern void (*flush_cache_all)(void);
0047 extern void (*__flush_cache_all)(void);
0048 extern void (*flush_cache_mm)(struct mm_struct *mm);
0049 #define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
0050 extern void (*flush_cache_range)(struct vm_area_struct *vma,
0051 unsigned long start, unsigned long end);
0052 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
0053 extern void __flush_dcache_page(struct page *page);
0054
0055 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
0056 static inline void flush_dcache_page(struct page *page)
0057 {
0058 if (cpu_has_dc_aliases)
0059 __flush_dcache_page(page);
0060 else if (!cpu_has_ic_fills_f_dc)
0061 SetPageDcacheDirty(page);
0062 }
0063
0064 #define flush_dcache_mmap_lock(mapping) do { } while (0)
0065 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
0066
0067 #define ARCH_HAS_FLUSH_ANON_PAGE
0068 extern void __flush_anon_page(struct page *, unsigned long);
0069 static inline void flush_anon_page(struct vm_area_struct *vma,
0070 struct page *page, unsigned long vmaddr)
0071 {
0072 if (cpu_has_dc_aliases && PageAnon(page))
0073 __flush_anon_page(page, vmaddr);
0074 }
0075
0076 static inline void flush_icache_page(struct vm_area_struct *vma,
0077 struct page *page)
0078 {
0079 }
0080
0081 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
0082 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
0083 extern void (*__flush_icache_user_range)(unsigned long start,
0084 unsigned long end);
0085 extern void (*__local_flush_icache_user_range)(unsigned long start,
0086 unsigned long end);
0087
0088 extern void (*__flush_cache_vmap)(void);
0089
0090 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
0091 {
0092 if (cpu_has_dc_aliases)
0093 __flush_cache_vmap();
0094 }
0095
0096 extern void (*__flush_cache_vunmap)(void);
0097
0098 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
0099 {
0100 if (cpu_has_dc_aliases)
0101 __flush_cache_vunmap();
0102 }
0103
0104 extern void copy_to_user_page(struct vm_area_struct *vma,
0105 struct page *page, unsigned long vaddr, void *dst, const void *src,
0106 unsigned long len);
0107
0108 extern void copy_from_user_page(struct vm_area_struct *vma,
0109 struct page *page, unsigned long vaddr, void *dst, const void *src,
0110 unsigned long len);
0111
0112 extern void (*flush_icache_all)(void);
0113 extern void (*local_flush_data_cache_page)(void * addr);
0114 extern void (*flush_data_cache_page)(unsigned long addr);
0115
0116
0117 unsigned long run_uncached(void *func);
0118
0119 extern void *kmap_coherent(struct page *page, unsigned long addr);
0120 extern void kunmap_coherent(void);
0121 extern void *kmap_noncoherent(struct page *page, unsigned long addr);
0122
0123 static inline void kunmap_noncoherent(void)
0124 {
0125 kunmap_coherent();
0126 }
0127
0128 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
0129
0130
0131
0132
0133 extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
0134
0135 static inline void flush_kernel_vmap_range(void *vaddr, int size)
0136 {
0137 if (cpu_has_dc_aliases)
0138 __flush_kernel_vmap_range((unsigned long) vaddr, size);
0139 }
0140
0141 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
0142 {
0143 if (cpu_has_dc_aliases)
0144 __flush_kernel_vmap_range((unsigned long) vaddr, size);
0145 }
0146
0147 #endif