Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/lib/uaccess_with_memcpy.c
0004  *
0005  *  Written by: Lennert Buytenhek and Nicolas Pitre
0006  *  Copyright (C) 2009 Marvell Semiconductor
0007  */
0008 
0009 #include <linux/kernel.h>
0010 #include <linux/ctype.h>
0011 #include <linux/uaccess.h>
0012 #include <linux/rwsem.h>
0013 #include <linux/mm.h>
0014 #include <linux/sched.h>
0015 #include <linux/hardirq.h> /* for in_atomic() */
0016 #include <linux/gfp.h>
0017 #include <linux/highmem.h>
0018 #include <linux/hugetlb.h>
0019 #include <asm/current.h>
0020 #include <asm/page.h>
0021 
0022 static int
0023 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
0024 {
0025     unsigned long addr = (unsigned long)_addr;
0026     pgd_t *pgd;
0027     p4d_t *p4d;
0028     pmd_t *pmd;
0029     pte_t *pte;
0030     pud_t *pud;
0031     spinlock_t *ptl;
0032 
0033     pgd = pgd_offset(current->mm, addr);
0034     if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
0035         return 0;
0036 
0037     p4d = p4d_offset(pgd, addr);
0038     if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
0039         return 0;
0040 
0041     pud = pud_offset(p4d, addr);
0042     if (unlikely(pud_none(*pud) || pud_bad(*pud)))
0043         return 0;
0044 
0045     pmd = pmd_offset(pud, addr);
0046     if (unlikely(pmd_none(*pmd)))
0047         return 0;
0048 
0049     /*
0050      * A pmd can be bad if it refers to a HugeTLB or THP page.
0051      *
0052      * Both THP and HugeTLB pages have the same pmd layout
0053      * and should not be manipulated by the pte functions.
0054      *
0055      * Lock the page table for the destination and check
0056      * to see that it's still huge and whether or not we will
0057      * need to fault on write.
0058      */
0059     if (unlikely(pmd_thp_or_huge(*pmd))) {
0060         ptl = &current->mm->page_table_lock;
0061         spin_lock(ptl);
0062         if (unlikely(!pmd_thp_or_huge(*pmd)
0063             || pmd_hugewillfault(*pmd))) {
0064             spin_unlock(ptl);
0065             return 0;
0066         }
0067 
0068         *ptep = NULL;
0069         *ptlp = ptl;
0070         return 1;
0071     }
0072 
0073     if (unlikely(pmd_bad(*pmd)))
0074         return 0;
0075 
0076     pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
0077     if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
0078         !pte_write(*pte) || !pte_dirty(*pte))) {
0079         pte_unmap_unlock(pte, ptl);
0080         return 0;
0081     }
0082 
0083     *ptep = pte;
0084     *ptlp = ptl;
0085 
0086     return 1;
0087 }
0088 
0089 static unsigned long noinline
0090 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
0091 {
0092     unsigned long ua_flags;
0093     int atomic;
0094 
0095     /* the mmap semaphore is taken only if not in an atomic context */
0096     atomic = faulthandler_disabled();
0097 
0098     if (!atomic)
0099         mmap_read_lock(current->mm);
0100     while (n) {
0101         pte_t *pte;
0102         spinlock_t *ptl;
0103         int tocopy;
0104 
0105         while (!pin_page_for_write(to, &pte, &ptl)) {
0106             if (!atomic)
0107                 mmap_read_unlock(current->mm);
0108             if (__put_user(0, (char __user *)to))
0109                 goto out;
0110             if (!atomic)
0111                 mmap_read_lock(current->mm);
0112         }
0113 
0114         tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
0115         if (tocopy > n)
0116             tocopy = n;
0117 
0118         ua_flags = uaccess_save_and_enable();
0119         memcpy((void *)to, from, tocopy);
0120         uaccess_restore(ua_flags);
0121         to += tocopy;
0122         from += tocopy;
0123         n -= tocopy;
0124 
0125         if (pte)
0126             pte_unmap_unlock(pte, ptl);
0127         else
0128             spin_unlock(ptl);
0129     }
0130     if (!atomic)
0131         mmap_read_unlock(current->mm);
0132 
0133 out:
0134     return n;
0135 }
0136 
0137 unsigned long
0138 arm_copy_to_user(void __user *to, const void *from, unsigned long n)
0139 {
0140     /*
0141      * This test is stubbed out of the main function above to keep
0142      * the overhead for small copies low by avoiding a large
0143      * register dump on the stack just to reload them right away.
0144      * With frame pointer disabled, tail call optimization kicks in
0145      * as well making this test almost invisible.
0146      */
0147     if (n < 64) {
0148         unsigned long ua_flags = uaccess_save_and_enable();
0149         n = __copy_to_user_std(to, from, n);
0150         uaccess_restore(ua_flags);
0151     } else {
0152         n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
0153                       from, n);
0154     }
0155     return n;
0156 }
0157     
0158 static unsigned long noinline
0159 __clear_user_memset(void __user *addr, unsigned long n)
0160 {
0161     unsigned long ua_flags;
0162 
0163     mmap_read_lock(current->mm);
0164     while (n) {
0165         pte_t *pte;
0166         spinlock_t *ptl;
0167         int tocopy;
0168 
0169         while (!pin_page_for_write(addr, &pte, &ptl)) {
0170             mmap_read_unlock(current->mm);
0171             if (__put_user(0, (char __user *)addr))
0172                 goto out;
0173             mmap_read_lock(current->mm);
0174         }
0175 
0176         tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
0177         if (tocopy > n)
0178             tocopy = n;
0179 
0180         ua_flags = uaccess_save_and_enable();
0181         memset((void *)addr, 0, tocopy);
0182         uaccess_restore(ua_flags);
0183         addr += tocopy;
0184         n -= tocopy;
0185 
0186         if (pte)
0187             pte_unmap_unlock(pte, ptl);
0188         else
0189             spin_unlock(ptl);
0190     }
0191     mmap_read_unlock(current->mm);
0192 
0193 out:
0194     return n;
0195 }
0196 
0197 unsigned long arm_clear_user(void __user *addr, unsigned long n)
0198 {
0199     /* See rational for this in __copy_to_user() above. */
0200     if (n < 64) {
0201         unsigned long ua_flags = uaccess_save_and_enable();
0202         n = __clear_user_std(addr, n);
0203         uaccess_restore(ua_flags);
0204     } else {
0205         n = __clear_user_memset(addr, n);
0206     }
0207     return n;
0208 }
0209 
0210 #if 0
0211 
0212 /*
0213  * This code is disabled by default, but kept around in case the chosen
0214  * thresholds need to be revalidated.  Some overhead (small but still)
0215  * would be implied by a runtime determined variable threshold, and
0216  * so far the measurement on concerned targets didn't show a worthwhile
0217  * variation.
0218  *
0219  * Note that a fairly precise sched_clock() implementation is needed
0220  * for results to make some sense.
0221  */
0222 
0223 #include <linux/vmalloc.h>
0224 
0225 static int __init test_size_treshold(void)
0226 {
0227     struct page *src_page, *dst_page;
0228     void *user_ptr, *kernel_ptr;
0229     unsigned long long t0, t1, t2;
0230     int size, ret;
0231 
0232     ret = -ENOMEM;
0233     src_page = alloc_page(GFP_KERNEL);
0234     if (!src_page)
0235         goto no_src;
0236     dst_page = alloc_page(GFP_KERNEL);
0237     if (!dst_page)
0238         goto no_dst;
0239     kernel_ptr = page_address(src_page);
0240     user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
0241     if (!user_ptr)
0242         goto no_vmap;
0243 
0244     /* warm up the src page dcache */
0245     ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
0246 
0247     for (size = PAGE_SIZE; size >= 4; size /= 2) {
0248         t0 = sched_clock();
0249         ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
0250         t1 = sched_clock();
0251         ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
0252         t2 = sched_clock();
0253         printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
0254     }
0255 
0256     for (size = PAGE_SIZE; size >= 4; size /= 2) {
0257         t0 = sched_clock();
0258         ret |= __clear_user_memset(user_ptr, size);
0259         t1 = sched_clock();
0260         ret |= __clear_user_std(user_ptr, size);
0261         t2 = sched_clock();
0262         printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
0263     }
0264 
0265     if (ret)
0266         ret = -EFAULT;
0267 
0268     vunmap(user_ptr);
0269 no_vmap:
0270     put_page(dst_page);
0271 no_dst:
0272     put_page(src_page);
0273 no_src:
0274     return ret;
0275 }
0276 
0277 subsys_initcall(test_size_treshold);
0278 
0279 #endif