Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/flush.c
0004  *
0005  *  Copyright (C) 1995-2002 Russell King
0006  */
0007 #include <linux/module.h>
0008 #include <linux/mm.h>
0009 #include <linux/pagemap.h>
0010 #include <linux/highmem.h>
0011 
0012 #include <asm/cacheflush.h>
0013 #include <asm/cachetype.h>
0014 #include <asm/highmem.h>
0015 #include <asm/smp_plat.h>
0016 #include <asm/tlbflush.h>
0017 #include <linux/hugetlb.h>
0018 
0019 #include "mm.h"
0020 
0021 #ifdef CONFIG_ARM_HEAVY_MB
0022 void (*soc_mb)(void);
0023 
0024 void arm_heavy_mb(void)
0025 {
0026 #ifdef CONFIG_OUTER_CACHE_SYNC
0027     if (outer_cache.sync)
0028         outer_cache.sync();
0029 #endif
0030     if (soc_mb)
0031         soc_mb();
0032 }
0033 EXPORT_SYMBOL(arm_heavy_mb);
0034 #endif
0035 
0036 #ifdef CONFIG_CPU_CACHE_VIPT
0037 
0038 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
0039 {
0040     unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
0041     const int zero = 0;
0042 
0043     set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
0044 
0045     asm(    "mcrr   p15, 0, %1, %0, c14\n"
0046     "   mcr p15, 0, %2, c7, c10, 4"
0047         :
0048         : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
0049         : "cc");
0050 }
0051 
0052 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
0053 {
0054     unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
0055     unsigned long offset = vaddr & (PAGE_SIZE - 1);
0056     unsigned long to;
0057 
0058     set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
0059     to = va + offset;
0060     flush_icache_range(to, to + len);
0061 }
0062 
0063 void flush_cache_mm(struct mm_struct *mm)
0064 {
0065     if (cache_is_vivt()) {
0066         vivt_flush_cache_mm(mm);
0067         return;
0068     }
0069 
0070     if (cache_is_vipt_aliasing()) {
0071         asm(    "mcr    p15, 0, %0, c7, c14, 0\n"
0072         "   mcr p15, 0, %0, c7, c10, 4"
0073             :
0074             : "r" (0)
0075             : "cc");
0076     }
0077 }
0078 
0079 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
0080 {
0081     if (cache_is_vivt()) {
0082         vivt_flush_cache_range(vma, start, end);
0083         return;
0084     }
0085 
0086     if (cache_is_vipt_aliasing()) {
0087         asm(    "mcr    p15, 0, %0, c7, c14, 0\n"
0088         "   mcr p15, 0, %0, c7, c10, 4"
0089             :
0090             : "r" (0)
0091             : "cc");
0092     }
0093 
0094     if (vma->vm_flags & VM_EXEC)
0095         __flush_icache_all();
0096 }
0097 
0098 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
0099 {
0100     if (cache_is_vivt()) {
0101         vivt_flush_cache_page(vma, user_addr, pfn);
0102         return;
0103     }
0104 
0105     if (cache_is_vipt_aliasing()) {
0106         flush_pfn_alias(pfn, user_addr);
0107         __flush_icache_all();
0108     }
0109 
0110     if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
0111         __flush_icache_all();
0112 }
0113 
0114 #else
0115 #define flush_pfn_alias(pfn,vaddr)      do { } while (0)
0116 #define flush_icache_alias(pfn,vaddr,len)   do { } while (0)
0117 #endif
0118 
0119 #define FLAG_PA_IS_EXEC 1
0120 #define FLAG_PA_CORE_IN_MM 2
0121 
0122 static void flush_ptrace_access_other(void *args)
0123 {
0124     __flush_icache_all();
0125 }
0126 
0127 static inline
0128 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
0129                unsigned long len, unsigned int flags)
0130 {
0131     if (cache_is_vivt()) {
0132         if (flags & FLAG_PA_CORE_IN_MM) {
0133             unsigned long addr = (unsigned long)kaddr;
0134             __cpuc_coherent_kern_range(addr, addr + len);
0135         }
0136         return;
0137     }
0138 
0139     if (cache_is_vipt_aliasing()) {
0140         flush_pfn_alias(page_to_pfn(page), uaddr);
0141         __flush_icache_all();
0142         return;
0143     }
0144 
0145     /* VIPT non-aliasing D-cache */
0146     if (flags & FLAG_PA_IS_EXEC) {
0147         unsigned long addr = (unsigned long)kaddr;
0148         if (icache_is_vipt_aliasing())
0149             flush_icache_alias(page_to_pfn(page), uaddr, len);
0150         else
0151             __cpuc_coherent_kern_range(addr, addr + len);
0152         if (cache_ops_need_broadcast())
0153             smp_call_function(flush_ptrace_access_other,
0154                       NULL, 1);
0155     }
0156 }
0157 
0158 static
0159 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
0160              unsigned long uaddr, void *kaddr, unsigned long len)
0161 {
0162     unsigned int flags = 0;
0163     if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
0164         flags |= FLAG_PA_CORE_IN_MM;
0165     if (vma->vm_flags & VM_EXEC)
0166         flags |= FLAG_PA_IS_EXEC;
0167     __flush_ptrace_access(page, uaddr, kaddr, len, flags);
0168 }
0169 
0170 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
0171                  void *kaddr, unsigned long len)
0172 {
0173     unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
0174 
0175     __flush_ptrace_access(page, uaddr, kaddr, len, flags);
0176 }
0177 
0178 /*
0179  * Copy user data from/to a page which is mapped into a different
0180  * processes address space.  Really, we want to allow our "user
0181  * space" model to handle this.
0182  *
0183  * Note that this code needs to run on the current CPU.
0184  */
0185 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
0186                unsigned long uaddr, void *dst, const void *src,
0187                unsigned long len)
0188 {
0189 #ifdef CONFIG_SMP
0190     preempt_disable();
0191 #endif
0192     memcpy(dst, src, len);
0193     flush_ptrace_access(vma, page, uaddr, dst, len);
0194 #ifdef CONFIG_SMP
0195     preempt_enable();
0196 #endif
0197 }
0198 
0199 void __flush_dcache_page(struct address_space *mapping, struct page *page)
0200 {
0201     /*
0202      * Writeback any data associated with the kernel mapping of this
0203      * page.  This ensures that data in the physical page is mutually
0204      * coherent with the kernels mapping.
0205      */
0206     if (!PageHighMem(page)) {
0207         __cpuc_flush_dcache_area(page_address(page), page_size(page));
0208     } else {
0209         unsigned long i;
0210         if (cache_is_vipt_nonaliasing()) {
0211             for (i = 0; i < compound_nr(page); i++) {
0212                 void *addr = kmap_atomic(page + i);
0213                 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
0214                 kunmap_atomic(addr);
0215             }
0216         } else {
0217             for (i = 0; i < compound_nr(page); i++) {
0218                 void *addr = kmap_high_get(page + i);
0219                 if (addr) {
0220                     __cpuc_flush_dcache_area(addr, PAGE_SIZE);
0221                     kunmap_high(page + i);
0222                 }
0223             }
0224         }
0225     }
0226 
0227     /*
0228      * If this is a page cache page, and we have an aliasing VIPT cache,
0229      * we only need to do one flush - which would be at the relevant
0230      * userspace colour, which is congruent with page->index.
0231      */
0232     if (mapping && cache_is_vipt_aliasing())
0233         flush_pfn_alias(page_to_pfn(page),
0234                 page->index << PAGE_SHIFT);
0235 }
0236 
0237 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
0238 {
0239     struct mm_struct *mm = current->active_mm;
0240     struct vm_area_struct *mpnt;
0241     pgoff_t pgoff;
0242 
0243     /*
0244      * There are possible user space mappings of this page:
0245      * - VIVT cache: we need to also write back and invalidate all user
0246      *   data in the current VM view associated with this page.
0247      * - aliasing VIPT: we only need to find one mapping of this page.
0248      */
0249     pgoff = page->index;
0250 
0251     flush_dcache_mmap_lock(mapping);
0252     vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
0253         unsigned long offset;
0254 
0255         /*
0256          * If this VMA is not in our MM, we can ignore it.
0257          */
0258         if (mpnt->vm_mm != mm)
0259             continue;
0260         if (!(mpnt->vm_flags & VM_MAYSHARE))
0261             continue;
0262         offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
0263         flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
0264     }
0265     flush_dcache_mmap_unlock(mapping);
0266 }
0267 
0268 #if __LINUX_ARM_ARCH__ >= 6
0269 void __sync_icache_dcache(pte_t pteval)
0270 {
0271     unsigned long pfn;
0272     struct page *page;
0273     struct address_space *mapping;
0274 
0275     if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
0276         /* only flush non-aliasing VIPT caches for exec mappings */
0277         return;
0278     pfn = pte_pfn(pteval);
0279     if (!pfn_valid(pfn))
0280         return;
0281 
0282     page = pfn_to_page(pfn);
0283     if (cache_is_vipt_aliasing())
0284         mapping = page_mapping_file(page);
0285     else
0286         mapping = NULL;
0287 
0288     if (!test_and_set_bit(PG_dcache_clean, &page->flags))
0289         __flush_dcache_page(mapping, page);
0290 
0291     if (pte_exec(pteval))
0292         __flush_icache_all();
0293 }
0294 #endif
0295 
0296 /*
0297  * Ensure cache coherency between kernel mapping and userspace mapping
0298  * of this page.
0299  *
0300  * We have three cases to consider:
0301  *  - VIPT non-aliasing cache: fully coherent so nothing required.
0302  *  - VIVT: fully aliasing, so we need to handle every alias in our
0303  *          current VM view.
0304  *  - VIPT aliasing: need to handle one alias in our current VM view.
0305  *
0306  * If we need to handle aliasing:
0307  *  If the page only exists in the page cache and there are no user
0308  *  space mappings, we can be lazy and remember that we may have dirty
0309  *  kernel cache lines for later.  Otherwise, we assume we have
0310  *  aliasing mappings.
0311  *
0312  * Note that we disable the lazy flush for SMP configurations where
0313  * the cache maintenance operations are not automatically broadcasted.
0314  */
0315 void flush_dcache_page(struct page *page)
0316 {
0317     struct address_space *mapping;
0318 
0319     /*
0320      * The zero page is never written to, so never has any dirty
0321      * cache lines, and therefore never needs to be flushed.
0322      */
0323     if (page == ZERO_PAGE(0))
0324         return;
0325 
0326     if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
0327         if (test_bit(PG_dcache_clean, &page->flags))
0328             clear_bit(PG_dcache_clean, &page->flags);
0329         return;
0330     }
0331 
0332     mapping = page_mapping_file(page);
0333 
0334     if (!cache_ops_need_broadcast() &&
0335         mapping && !page_mapcount(page))
0336         clear_bit(PG_dcache_clean, &page->flags);
0337     else {
0338         __flush_dcache_page(mapping, page);
0339         if (mapping && cache_is_vivt())
0340             __flush_dcache_aliases(mapping, page);
0341         else if (mapping)
0342             __flush_icache_all();
0343         set_bit(PG_dcache_clean, &page->flags);
0344     }
0345 }
0346 EXPORT_SYMBOL(flush_dcache_page);
0347 
0348 /*
0349  * Flush an anonymous page so that users of get_user_pages()
0350  * can safely access the data.  The expected sequence is:
0351  *
0352  *  get_user_pages()
0353  *    -> flush_anon_page
0354  *  memcpy() to/from page
0355  *  if written to page, flush_dcache_page()
0356  */
0357 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
0358 {
0359     unsigned long pfn;
0360 
0361     /* VIPT non-aliasing caches need do nothing */
0362     if (cache_is_vipt_nonaliasing())
0363         return;
0364 
0365     /*
0366      * Write back and invalidate userspace mapping.
0367      */
0368     pfn = page_to_pfn(page);
0369     if (cache_is_vivt()) {
0370         flush_cache_page(vma, vmaddr, pfn);
0371     } else {
0372         /*
0373          * For aliasing VIPT, we can flush an alias of the
0374          * userspace address only.
0375          */
0376         flush_pfn_alias(pfn, vmaddr);
0377         __flush_icache_all();
0378     }
0379 
0380     /*
0381      * Invalidate kernel mapping.  No data should be contained
0382      * in this mapping of the page.  FIXME: this is overkill
0383      * since we actually ask for a write-back and invalidate.
0384      */
0385     __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
0386 }