Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  *
0005  *  vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
0006  *   -flush_cache_dup_mm (fork)
0007  *   -likewise for flush_cache_mm (exit/execve)
0008  *   -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
0009  *
0010  *  vineetg: April 2008
0011  *   -Added a critical CacheLine flush to copy_to_user_page( ) which
0012  *     was causing gdbserver to not setup breakpoints consistently
0013  */
0014 
0015 #ifndef _ASM_CACHEFLUSH_H
0016 #define _ASM_CACHEFLUSH_H
0017 
0018 #include <linux/mm.h>
0019 #include <asm/shmparam.h>
0020 
0021 /*
0022  * Semantically we need this because icache doesn't snoop dcache/dma.
0023  * However ARC Cache flush requires paddr as well as vaddr, latter not available
0024  * in the flush_icache_page() API. So we no-op it but do the equivalent work
0025  * in update_mmu_cache()
0026  */
0027 #define flush_icache_page(vma, page)
0028 
0029 void flush_cache_all(void);
0030 
0031 void flush_icache_range(unsigned long kstart, unsigned long kend);
0032 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
0033 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
0034 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
0035 
0036 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
0037 
0038 void flush_dcache_page(struct page *page);
0039 
0040 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
0041 void dma_cache_inv(phys_addr_t start, unsigned long sz);
0042 void dma_cache_wback(phys_addr_t start, unsigned long sz);
0043 
0044 #define flush_dcache_mmap_lock(mapping)     do { } while (0)
0045 #define flush_dcache_mmap_unlock(mapping)   do { } while (0)
0046 
0047 /* TBD: optimize this */
0048 #define flush_cache_vmap(start, end)        flush_cache_all()
0049 #define flush_cache_vunmap(start, end)      flush_cache_all()
0050 
0051 #define flush_cache_dup_mm(mm)          /* called on fork (VIVT only) */
0052 
0053 #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
0054 
0055 #define flush_cache_mm(mm)          /* called on munmap/exit */
0056 #define flush_cache_range(mm, u_vstart, u_vend)
0057 #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
0058 
0059 #else   /* VIPT aliasing dcache */
0060 
0061 /* To clear out stale userspace mappings */
0062 void flush_cache_mm(struct mm_struct *mm);
0063 void flush_cache_range(struct vm_area_struct *vma,
0064     unsigned long start,unsigned long end);
0065 void flush_cache_page(struct vm_area_struct *vma,
0066     unsigned long user_addr, unsigned long page);
0067 
0068 /*
0069  * To make sure that userspace mapping is flushed to memory before
0070  * get_user_pages() uses a kernel mapping to access the page
0071  */
0072 #define ARCH_HAS_FLUSH_ANON_PAGE
0073 void flush_anon_page(struct vm_area_struct *vma,
0074     struct page *page, unsigned long u_vaddr);
0075 
0076 #endif  /* CONFIG_ARC_CACHE_VIPT_ALIASING */
0077 
0078 /*
0079  * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
0080  * This works around some PIO based drivers which don't call flush_dcache_page
0081  * to record that they dirtied the dcache
0082  */
0083 #define PG_dc_clean PG_arch_1
0084 
0085 #define CACHE_COLORS_NUM    4
0086 #define CACHE_COLORS_MSK    (CACHE_COLORS_NUM - 1)
0087 #define CACHE_COLOR(addr)   (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
0088 
0089 /*
0090  * Simple wrapper over config option
0091  * Bootup code ensures that hardware matches kernel configuration
0092  */
0093 static inline int cache_is_vipt_aliasing(void)
0094 {
0095     return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
0096 }
0097 
0098 /*
0099  * checks if two addresses (after page aligning) index into same cache set
0100  */
0101 #define addr_not_cache_congruent(addr1, addr2)              \
0102 ({                                  \
0103     cache_is_vipt_aliasing() ?                  \
0104         (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0;     \
0105 })
0106 
0107 #define copy_to_user_page(vma, page, vaddr, dst, src, len)      \
0108 do {                                    \
0109     memcpy(dst, src, len);                      \
0110     if (vma->vm_flags & VM_EXEC)                    \
0111         __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
0112 } while (0)
0113 
0114 #define copy_from_user_page(vma, page, vaddr, dst, src, len)        \
0115     memcpy(dst, src, len);                      \
0116 
0117 #endif