Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
0007  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
0008  */
0009 #ifndef _ASM_CACHEFLUSH_H
0010 #define _ASM_CACHEFLUSH_H
0011 
0012 /* Keep includes the same across arches.  */
0013 #include <linux/mm.h>
0014 #include <asm/cpu-features.h>
0015 
0016 /* Cache flushing:
0017  *
0018  *  - flush_cache_all() flushes entire cache
0019  *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
0020  *  - flush_cache_dup mm(mm) handles cache flushing when forking
0021  *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
0022  *  - flush_cache_range(vma, start, end) flushes a range of pages
0023  *  - flush_icache_range(start, end) flush a range of instructions
0024  *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
0025  *
0026  * MIPS specific flush operations:
0027  *
0028  *  - flush_icache_all() flush the entire instruction cache
0029  *  - flush_data_cache_page() flushes a page from the data cache
0030  *  - __flush_icache_user_range(start, end) flushes range of user instructions
0031  */
0032 
0033  /*
0034  * This flag is used to indicate that the page pointed to by a pte
0035  * is dirty and requires cleaning before returning it to the user.
0036  */
0037 #define PG_dcache_dirty         PG_arch_1
0038 
0039 #define Page_dcache_dirty(page)     \
0040     test_bit(PG_dcache_dirty, &(page)->flags)
0041 #define SetPageDcacheDirty(page)    \
0042     set_bit(PG_dcache_dirty, &(page)->flags)
0043 #define ClearPageDcacheDirty(page)  \
0044     clear_bit(PG_dcache_dirty, &(page)->flags)
0045 
0046 extern void (*flush_cache_all)(void);
0047 extern void (*__flush_cache_all)(void);
0048 extern void (*flush_cache_mm)(struct mm_struct *mm);
0049 #define flush_cache_dup_mm(mm)  do { (void) (mm); } while (0)
0050 extern void (*flush_cache_range)(struct vm_area_struct *vma,
0051     unsigned long start, unsigned long end);
0052 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
0053 extern void __flush_dcache_page(struct page *page);
0054 
0055 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
0056 static inline void flush_dcache_page(struct page *page)
0057 {
0058     if (cpu_has_dc_aliases)
0059         __flush_dcache_page(page);
0060     else if (!cpu_has_ic_fills_f_dc)
0061         SetPageDcacheDirty(page);
0062 }
0063 
0064 #define flush_dcache_mmap_lock(mapping)     do { } while (0)
0065 #define flush_dcache_mmap_unlock(mapping)   do { } while (0)
0066 
0067 #define ARCH_HAS_FLUSH_ANON_PAGE
0068 extern void __flush_anon_page(struct page *, unsigned long);
0069 static inline void flush_anon_page(struct vm_area_struct *vma,
0070     struct page *page, unsigned long vmaddr)
0071 {
0072     if (cpu_has_dc_aliases && PageAnon(page))
0073         __flush_anon_page(page, vmaddr);
0074 }
0075 
0076 static inline void flush_icache_page(struct vm_area_struct *vma,
0077     struct page *page)
0078 {
0079 }
0080 
0081 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
0082 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
0083 extern void (*__flush_icache_user_range)(unsigned long start,
0084                      unsigned long end);
0085 extern void (*__local_flush_icache_user_range)(unsigned long start,
0086                            unsigned long end);
0087 
0088 extern void (*__flush_cache_vmap)(void);
0089 
0090 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
0091 {
0092     if (cpu_has_dc_aliases)
0093         __flush_cache_vmap();
0094 }
0095 
0096 extern void (*__flush_cache_vunmap)(void);
0097 
0098 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
0099 {
0100     if (cpu_has_dc_aliases)
0101         __flush_cache_vunmap();
0102 }
0103 
0104 extern void copy_to_user_page(struct vm_area_struct *vma,
0105     struct page *page, unsigned long vaddr, void *dst, const void *src,
0106     unsigned long len);
0107 
0108 extern void copy_from_user_page(struct vm_area_struct *vma,
0109     struct page *page, unsigned long vaddr, void *dst, const void *src,
0110     unsigned long len);
0111 
0112 extern void (*flush_icache_all)(void);
0113 extern void (*local_flush_data_cache_page)(void * addr);
0114 extern void (*flush_data_cache_page)(unsigned long addr);
0115 
0116 /* Run kernel code uncached, useful for cache probing functions. */
0117 unsigned long run_uncached(void *func);
0118 
0119 extern void *kmap_coherent(struct page *page, unsigned long addr);
0120 extern void kunmap_coherent(void);
0121 extern void *kmap_noncoherent(struct page *page, unsigned long addr);
0122 
0123 static inline void kunmap_noncoherent(void)
0124 {
0125     kunmap_coherent();
0126 }
0127 
0128 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
0129 /*
0130  * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
0131  * cache writeback and invalidate operation.
0132  */
0133 extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
0134 
0135 static inline void flush_kernel_vmap_range(void *vaddr, int size)
0136 {
0137     if (cpu_has_dc_aliases)
0138         __flush_kernel_vmap_range((unsigned long) vaddr, size);
0139 }
0140 
0141 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
0142 {
0143     if (cpu_has_dc_aliases)
0144         __flush_kernel_vmap_range((unsigned long) vaddr, size);
0145 }
0146 
0147 #endif /* _ASM_CACHEFLUSH_H */