Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Based on arch/arm/mm/flush.c
0004  *
0005  * Copyright (C) 1995-2002 Russell King
0006  * Copyright (C) 2012 ARM Ltd.
0007  */
0008 
0009 #include <linux/export.h>
0010 #include <linux/mm.h>
0011 #include <linux/pagemap.h>
0012 
0013 #include <asm/cacheflush.h>
0014 #include <asm/cache.h>
0015 #include <asm/tlbflush.h>
0016 
0017 void sync_icache_aliases(unsigned long start, unsigned long end)
0018 {
0019     if (icache_is_aliasing()) {
0020         dcache_clean_pou(start, end);
0021         icache_inval_all_pou();
0022     } else {
0023         /*
0024          * Don't issue kick_all_cpus_sync() after I-cache invalidation
0025          * for user mappings.
0026          */
0027         caches_clean_inval_pou(start, end);
0028     }
0029 }
0030 
0031 static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
0032                 unsigned long end)
0033 {
0034     if (vma->vm_flags & VM_EXEC)
0035         sync_icache_aliases(start, end);
0036 }
0037 
0038 /*
0039  * Copy user data from/to a page which is mapped into a different processes
0040  * address space.  Really, we want to allow our "user space" model to handle
0041  * this.
0042  */
0043 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
0044                unsigned long uaddr, void *dst, const void *src,
0045                unsigned long len)
0046 {
0047     memcpy(dst, src, len);
0048     flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
0049 }
0050 
0051 void __sync_icache_dcache(pte_t pte)
0052 {
0053     struct page *page = pte_page(pte);
0054 
0055     /*
0056      * HugeTLB pages are always fully mapped, so only setting head page's
0057      * PG_dcache_clean flag is enough.
0058      */
0059     if (PageHuge(page))
0060         page = compound_head(page);
0061 
0062     if (!test_bit(PG_dcache_clean, &page->flags)) {
0063         sync_icache_aliases((unsigned long)page_address(page),
0064                     (unsigned long)page_address(page) +
0065                         page_size(page));
0066         set_bit(PG_dcache_clean, &page->flags);
0067     }
0068 }
0069 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
0070 
0071 /*
0072  * This function is called when a page has been modified by the kernel. Mark
0073  * it as dirty for later flushing when mapped in user space (if executable,
0074  * see __sync_icache_dcache).
0075  */
0076 void flush_dcache_page(struct page *page)
0077 {
0078     /*
0079      * HugeTLB pages are always fully mapped and only head page will be
0080      * set PG_dcache_clean (see comments in __sync_icache_dcache()).
0081      */
0082     if (PageHuge(page))
0083         page = compound_head(page);
0084 
0085     if (test_bit(PG_dcache_clean, &page->flags))
0086         clear_bit(PG_dcache_clean, &page->flags);
0087 }
0088 EXPORT_SYMBOL(flush_dcache_page);
0089 
0090 /*
0091  * Additional functions defined in assembly.
0092  */
0093 EXPORT_SYMBOL(caches_clean_inval_pou);
0094 
0095 #ifdef CONFIG_ARCH_HAS_PMEM_API
0096 void arch_wb_cache_pmem(void *addr, size_t size)
0097 {
0098     /* Ensure order against any prior non-cacheable writes */
0099     dmb(osh);
0100     dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
0101 }
0102 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
0103 
0104 void arch_invalidate_pmem(void *addr, size_t size)
0105 {
0106     dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
0107 }
0108 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
0109 #endif