0001
0002
0003 #ifndef __ABI_CSKY_CACHEFLUSH_H
0004 #define __ABI_CSKY_CACHEFLUSH_H
0005
0006 #include <linux/mm.h>
0007 #include <asm/string.h>
0008 #include <asm/cache.h>
0009
0010 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
0011 extern void flush_dcache_page(struct page *);
0012
0013 #define flush_cache_mm(mm) dcache_wbinv_all()
0014 #define flush_cache_page(vma, page, pfn) cache_wbinv_all()
0015 #define flush_cache_dup_mm(mm) cache_wbinv_all()
0016
0017 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
0018 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
0019
0020 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
0021 static inline void flush_kernel_vmap_range(void *addr, int size)
0022 {
0023 dcache_wbinv_all();
0024 }
0025 static inline void invalidate_kernel_vmap_range(void *addr, int size)
0026 {
0027 dcache_wbinv_all();
0028 }
0029
0030 #define ARCH_HAS_FLUSH_ANON_PAGE
0031 static inline void flush_anon_page(struct vm_area_struct *vma,
0032 struct page *page, unsigned long vmaddr)
0033 {
0034 if (PageAnon(page))
0035 cache_wbinv_all();
0036 }
0037
0038
0039
0040
0041
0042 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
0043 #define flush_cache_vmap(start, end) cache_wbinv_all()
0044 #define flush_cache_vunmap(start, end) cache_wbinv_all()
0045
0046 #define flush_icache_page(vma, page) do {} while (0);
0047 #define flush_icache_range(start, end) cache_wbinv_range(start, end)
0048 #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
0049 #define flush_icache_deferred(mm) do {} while (0);
0050
0051 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
0052 do { \
0053 memcpy(dst, src, len); \
0054 } while (0)
0055
0056 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
0057 do { \
0058 memcpy(dst, src, len); \
0059 cache_wbinv_all(); \
0060 } while (0)
0061
0062 #endif