0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/mm.h>
0006 #include <linux/fs.h>
0007 #include <linux/pagemap.h>
0008 #include <linux/syscalls.h>
0009 #include <linux/spinlock.h>
0010 #include <asm/page.h>
0011 #include <asm/cache.h>
0012 #include <asm/cacheflush.h>
0013 #include <asm/cachectl.h>
0014
0015 #define PG_dcache_clean PG_arch_1
0016
0017 void flush_dcache_page(struct page *page)
0018 {
0019 struct address_space *mapping;
0020
0021 if (page == ZERO_PAGE(0))
0022 return;
0023
0024 mapping = page_mapping_file(page);
0025
0026 if (mapping && !page_mapcount(page))
0027 clear_bit(PG_dcache_clean, &page->flags);
0028 else {
0029 dcache_wbinv_all();
0030 if (mapping)
0031 icache_inv_all();
0032 set_bit(PG_dcache_clean, &page->flags);
0033 }
0034 }
0035 EXPORT_SYMBOL(flush_dcache_page);
0036
0037 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
0038 pte_t *ptep)
0039 {
0040 unsigned long pfn = pte_pfn(*ptep);
0041 struct page *page;
0042
0043 if (!pfn_valid(pfn))
0044 return;
0045
0046 page = pfn_to_page(pfn);
0047 if (page == ZERO_PAGE(0))
0048 return;
0049
0050 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
0051 dcache_wbinv_all();
0052
0053 if (page_mapping_file(page)) {
0054 if (vma->vm_flags & VM_EXEC)
0055 icache_inv_all();
0056 }
0057 }
0058
0059 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
0060 unsigned long end)
0061 {
0062 dcache_wbinv_all();
0063
0064 if (vma->vm_flags & VM_EXEC)
0065 icache_inv_all();
0066 }