0001
0002
0003 #ifndef __ABI_CSKY_CACHEFLUSH_H
0004 #define __ABI_CSKY_CACHEFLUSH_H
0005
0006
0007 #include <linux/mm.h>
0008
0009
0010
0011
0012
0013 #define flush_cache_all() do { } while (0)
0014 #define flush_cache_mm(mm) do { } while (0)
0015 #define flush_cache_dup_mm(mm) do { } while (0)
0016 #define flush_cache_range(vma, start, end) do { } while (0)
0017 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
0018
0019 #define PG_dcache_clean PG_arch_1
0020
0021 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
0022 static inline void flush_dcache_page(struct page *page)
0023 {
0024 if (test_bit(PG_dcache_clean, &page->flags))
0025 clear_bit(PG_dcache_clean, &page->flags);
0026 }
0027
0028 #define flush_dcache_mmap_lock(mapping) do { } while (0)
0029 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
0030 #define flush_icache_page(vma, page) do { } while (0)
0031
0032 #define flush_icache_range(start, end) cache_wbinv_range(start, end)
0033
0034 void flush_icache_mm_range(struct mm_struct *mm,
0035 unsigned long start, unsigned long end);
0036 void flush_icache_deferred(struct mm_struct *mm);
0037
0038 #define flush_cache_vmap(start, end) do { } while (0)
0039 #define flush_cache_vunmap(start, end) do { } while (0)
0040
0041 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
0042 do { \
0043 memcpy(dst, src, len); \
0044 if (vma->vm_flags & VM_EXEC) { \
0045 dcache_wb_range((unsigned long)dst, \
0046 (unsigned long)dst + len); \
0047 flush_icache_mm_range(current->mm, \
0048 (unsigned long)dst, \
0049 (unsigned long)dst + len); \
0050 } \
0051 } while (0)
0052 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
0053 memcpy(dst, src, len)
0054
0055 #endif