Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/fault-armv.c
0004  *
0005  *  Copyright (C) 1995  Linus Torvalds
0006  *  Modifications for ARM processor (c) 1995-2002 Russell King
0007  */
0008 #include <linux/sched.h>
0009 #include <linux/kernel.h>
0010 #include <linux/mm.h>
0011 #include <linux/bitops.h>
0012 #include <linux/vmalloc.h>
0013 #include <linux/init.h>
0014 #include <linux/pagemap.h>
0015 #include <linux/gfp.h>
0016 
0017 #include <asm/bugs.h>
0018 #include <asm/cacheflush.h>
0019 #include <asm/cachetype.h>
0020 #include <asm/tlbflush.h>
0021 
0022 #include "mm.h"
0023 
0024 static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
0025 
0026 #if __LINUX_ARM_ARCH__ < 6
0027 /*
0028  * We take the easy way out of this problem - we make the
0029  * PTE uncacheable.  However, we leave the write buffer on.
0030  *
0031  * Note that the pte lock held when calling update_mmu_cache must also
0032  * guard the pte (somewhere else in the same mm) that we modify here.
0033  * Therefore those configurations which might call adjust_pte (those
0034  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
0035  */
0036 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
0037     unsigned long pfn, pte_t *ptep)
0038 {
0039     pte_t entry = *ptep;
0040     int ret;
0041 
0042     /*
0043      * If this page is present, it's actually being shared.
0044      */
0045     ret = pte_present(entry);
0046 
0047     /*
0048      * If this page isn't present, or is already setup to
0049      * fault (ie, is old), we can safely ignore any issues.
0050      */
0051     if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
0052         flush_cache_page(vma, address, pfn);
0053         outer_flush_range((pfn << PAGE_SHIFT),
0054                   (pfn << PAGE_SHIFT) + PAGE_SIZE);
0055         pte_val(entry) &= ~L_PTE_MT_MASK;
0056         pte_val(entry) |= shared_pte_mask;
0057         set_pte_at(vma->vm_mm, address, ptep, entry);
0058         flush_tlb_page(vma, address);
0059     }
0060 
0061     return ret;
0062 }
0063 
0064 #if USE_SPLIT_PTE_PTLOCKS
0065 /*
0066  * If we are using split PTE locks, then we need to take the page
0067  * lock here.  Otherwise we are using shared mm->page_table_lock
0068  * which is already locked, thus cannot take it.
0069  */
0070 static inline void do_pte_lock(spinlock_t *ptl)
0071 {
0072     /*
0073      * Use nested version here to indicate that we are already
0074      * holding one similar spinlock.
0075      */
0076     spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
0077 }
0078 
0079 static inline void do_pte_unlock(spinlock_t *ptl)
0080 {
0081     spin_unlock(ptl);
0082 }
0083 #else /* !USE_SPLIT_PTE_PTLOCKS */
0084 static inline void do_pte_lock(spinlock_t *ptl) {}
0085 static inline void do_pte_unlock(spinlock_t *ptl) {}
0086 #endif /* USE_SPLIT_PTE_PTLOCKS */
0087 
0088 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
0089     unsigned long pfn)
0090 {
0091     spinlock_t *ptl;
0092     pgd_t *pgd;
0093     p4d_t *p4d;
0094     pud_t *pud;
0095     pmd_t *pmd;
0096     pte_t *pte;
0097     int ret;
0098 
0099     pgd = pgd_offset(vma->vm_mm, address);
0100     if (pgd_none_or_clear_bad(pgd))
0101         return 0;
0102 
0103     p4d = p4d_offset(pgd, address);
0104     if (p4d_none_or_clear_bad(p4d))
0105         return 0;
0106 
0107     pud = pud_offset(p4d, address);
0108     if (pud_none_or_clear_bad(pud))
0109         return 0;
0110 
0111     pmd = pmd_offset(pud, address);
0112     if (pmd_none_or_clear_bad(pmd))
0113         return 0;
0114 
0115     /*
0116      * This is called while another page table is mapped, so we
0117      * must use the nested version.  This also means we need to
0118      * open-code the spin-locking.
0119      */
0120     ptl = pte_lockptr(vma->vm_mm, pmd);
0121     pte = pte_offset_map(pmd, address);
0122     do_pte_lock(ptl);
0123 
0124     ret = do_adjust_pte(vma, address, pfn, pte);
0125 
0126     do_pte_unlock(ptl);
0127     pte_unmap(pte);
0128 
0129     return ret;
0130 }
0131 
0132 static void
0133 make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
0134     unsigned long addr, pte_t *ptep, unsigned long pfn)
0135 {
0136     struct mm_struct *mm = vma->vm_mm;
0137     struct vm_area_struct *mpnt;
0138     unsigned long offset;
0139     pgoff_t pgoff;
0140     int aliases = 0;
0141 
0142     pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
0143 
0144     /*
0145      * If we have any shared mappings that are in the same mm
0146      * space, then we need to handle them specially to maintain
0147      * cache coherency.
0148      */
0149     flush_dcache_mmap_lock(mapping);
0150     vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
0151         /*
0152          * If this VMA is not in our MM, we can ignore it.
0153          * Note that we intentionally mask out the VMA
0154          * that we are fixing up.
0155          */
0156         if (mpnt->vm_mm != mm || mpnt == vma)
0157             continue;
0158         if (!(mpnt->vm_flags & VM_MAYSHARE))
0159             continue;
0160         offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
0161         aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
0162     }
0163     flush_dcache_mmap_unlock(mapping);
0164     if (aliases)
0165         do_adjust_pte(vma, addr, pfn, ptep);
0166 }
0167 
0168 /*
0169  * Take care of architecture specific things when placing a new PTE into
0170  * a page table, or changing an existing PTE.  Basically, there are two
0171  * things that we need to take care of:
0172  *
0173  *  1. If PG_dcache_clean is not set for the page, we need to ensure
0174  *     that any cache entries for the kernels virtual memory
0175  *     range are written back to the page.
0176  *  2. If we have multiple shared mappings of the same space in
0177  *     an object, we need to deal with the cache aliasing issues.
0178  *
0179  * Note that the pte lock will be held.
0180  */
0181 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
0182     pte_t *ptep)
0183 {
0184     unsigned long pfn = pte_pfn(*ptep);
0185     struct address_space *mapping;
0186     struct page *page;
0187 
0188     if (!pfn_valid(pfn))
0189         return;
0190 
0191     /*
0192      * The zero page is never written to, so never has any dirty
0193      * cache lines, and therefore never needs to be flushed.
0194      */
0195     page = pfn_to_page(pfn);
0196     if (page == ZERO_PAGE(0))
0197         return;
0198 
0199     mapping = page_mapping_file(page);
0200     if (!test_and_set_bit(PG_dcache_clean, &page->flags))
0201         __flush_dcache_page(mapping, page);
0202     if (mapping) {
0203         if (cache_is_vivt())
0204             make_coherent(mapping, vma, addr, ptep, pfn);
0205         else if (vma->vm_flags & VM_EXEC)
0206             __flush_icache_all();
0207     }
0208 }
0209 #endif  /* __LINUX_ARM_ARCH__ < 6 */
0210 
0211 /*
0212  * Check whether the write buffer has physical address aliasing
0213  * issues.  If it has, we need to avoid them for the case where
0214  * we have several shared mappings of the same object in user
0215  * space.
0216  */
0217 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
0218 {
0219     register unsigned long zero = 0, one = 1, val;
0220 
0221     local_irq_disable();
0222     mb();
0223     *p1 = one;
0224     mb();
0225     *p2 = zero;
0226     mb();
0227     val = *p1;
0228     mb();
0229     local_irq_enable();
0230     return val != zero;
0231 }
0232 
0233 void __init check_writebuffer_bugs(void)
0234 {
0235     struct page *page;
0236     const char *reason;
0237     unsigned long v = 1;
0238 
0239     pr_info("CPU: Testing write buffer coherency: ");
0240 
0241     page = alloc_page(GFP_KERNEL);
0242     if (page) {
0243         unsigned long *p1, *p2;
0244         pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
0245                     L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
0246 
0247         p1 = vmap(&page, 1, VM_IOREMAP, prot);
0248         p2 = vmap(&page, 1, VM_IOREMAP, prot);
0249 
0250         if (p1 && p2) {
0251             v = check_writebuffer(p1, p2);
0252             reason = "enabling work-around";
0253         } else {
0254             reason = "unable to map memory\n";
0255         }
0256 
0257         vunmap(p1);
0258         vunmap(p2);
0259         put_page(page);
0260     } else {
0261         reason = "unable to grab page\n";
0262     }
0263 
0264     if (v) {
0265         pr_cont("failed, %s\n", reason);
0266         shared_pte_mask = L_PTE_MT_UNCACHED;
0267     } else {
0268         pr_cont("ok\n");
0269     }
0270 }