Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* 
0003  * User address space access functions.
0004  *
0005  * Copyright 1997 Andi Kleen <ak@muc.de>
0006  * Copyright 1997 Linus Torvalds
0007  * Copyright 2002 Andi Kleen <ak@suse.de>
0008  */
0009 #include <linux/export.h>
0010 #include <linux/uaccess.h>
0011 #include <linux/highmem.h>
0012 
0013 /*
0014  * Zero Userspace
0015  */
0016 
0017 unsigned long __clear_user(void __user *addr, unsigned long size)
0018 {
0019     long __d0;
0020     might_fault();
0021     /* no memory constraint because it doesn't change any memory gcc knows
0022        about */
0023     stac();
0024     asm volatile(
0025         "   testq  %[size8],%[size8]\n"
0026         "   jz     4f\n"
0027         "   .align 16\n"
0028         "0: movq $0,(%[dst])\n"
0029         "   addq   $8,%[dst]\n"
0030         "   decl %%ecx ; jnz   0b\n"
0031         "4: movq  %[size1],%%rcx\n"
0032         "   testl %%ecx,%%ecx\n"
0033         "   jz     2f\n"
0034         "1: movb   $0,(%[dst])\n"
0035         "   incq   %[dst]\n"
0036         "   decl %%ecx ; jnz  1b\n"
0037         "2:\n"
0038 
0039         _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN8, %[size1])
0040         _ASM_EXTABLE_UA(1b, 2b)
0041 
0042         : [size8] "=&c"(size), [dst] "=&D" (__d0)
0043         : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
0044     clac();
0045     return size;
0046 }
0047 EXPORT_SYMBOL(__clear_user);
0048 
0049 unsigned long clear_user(void __user *to, unsigned long n)
0050 {
0051     if (access_ok(to, n))
0052         return __clear_user(to, n);
0053     return n;
0054 }
0055 EXPORT_SYMBOL(clear_user);
0056 
0057 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
0058 /**
0059  * clean_cache_range - write back a cache range with CLWB
0060  * @vaddr:  virtual start address
0061  * @size:   number of bytes to write back
0062  *
0063  * Write back a cache range using the CLWB (cache line write back)
0064  * instruction. Note that @size is internally rounded up to be cache
0065  * line size aligned.
0066  */
0067 static void clean_cache_range(void *addr, size_t size)
0068 {
0069     u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
0070     unsigned long clflush_mask = x86_clflush_size - 1;
0071     void *vend = addr + size;
0072     void *p;
0073 
0074     for (p = (void *)((unsigned long)addr & ~clflush_mask);
0075          p < vend; p += x86_clflush_size)
0076         clwb(p);
0077 }
0078 
0079 void arch_wb_cache_pmem(void *addr, size_t size)
0080 {
0081     clean_cache_range(addr, size);
0082 }
0083 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
0084 
0085 long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
0086 {
0087     unsigned long flushed, dest = (unsigned long) dst;
0088     long rc = __copy_user_nocache(dst, src, size, 0);
0089 
0090     /*
0091      * __copy_user_nocache() uses non-temporal stores for the bulk
0092      * of the transfer, but we need to manually flush if the
0093      * transfer is unaligned. A cached memory copy is used when
0094      * destination or size is not naturally aligned. That is:
0095      *   - Require 8-byte alignment when size is 8 bytes or larger.
0096      *   - Require 4-byte alignment when size is 4 bytes.
0097      */
0098     if (size < 8) {
0099         if (!IS_ALIGNED(dest, 4) || size != 4)
0100             clean_cache_range(dst, size);
0101     } else {
0102         if (!IS_ALIGNED(dest, 8)) {
0103             dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
0104             clean_cache_range(dst, 1);
0105         }
0106 
0107         flushed = dest - (unsigned long) dst;
0108         if (size > flushed && !IS_ALIGNED(size - flushed, 8))
0109             clean_cache_range(dst + size - 1, 1);
0110     }
0111 
0112     return rc;
0113 }
0114 
0115 void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
0116 {
0117     unsigned long dest = (unsigned long) _dst;
0118     unsigned long source = (unsigned long) _src;
0119 
0120     /* cache copy and flush to align dest */
0121     if (!IS_ALIGNED(dest, 8)) {
0122         size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
0123 
0124         memcpy((void *) dest, (void *) source, len);
0125         clean_cache_range((void *) dest, len);
0126         dest += len;
0127         source += len;
0128         size -= len;
0129         if (!size)
0130             return;
0131     }
0132 
0133     /* 4x8 movnti loop */
0134     while (size >= 32) {
0135         asm("movq    (%0), %%r8\n"
0136             "movq   8(%0), %%r9\n"
0137             "movq  16(%0), %%r10\n"
0138             "movq  24(%0), %%r11\n"
0139             "movnti  %%r8,   (%1)\n"
0140             "movnti  %%r9,  8(%1)\n"
0141             "movnti %%r10, 16(%1)\n"
0142             "movnti %%r11, 24(%1)\n"
0143             :: "r" (source), "r" (dest)
0144             : "memory", "r8", "r9", "r10", "r11");
0145         dest += 32;
0146         source += 32;
0147         size -= 32;
0148     }
0149 
0150     /* 1x8 movnti loop */
0151     while (size >= 8) {
0152         asm("movq    (%0), %%r8\n"
0153             "movnti  %%r8,   (%1)\n"
0154             :: "r" (source), "r" (dest)
0155             : "memory", "r8");
0156         dest += 8;
0157         source += 8;
0158         size -= 8;
0159     }
0160 
0161     /* 1x4 movnti loop */
0162     while (size >= 4) {
0163         asm("movl    (%0), %%r8d\n"
0164             "movnti  %%r8d,   (%1)\n"
0165             :: "r" (source), "r" (dest)
0166             : "memory", "r8");
0167         dest += 4;
0168         source += 4;
0169         size -= 4;
0170     }
0171 
0172     /* cache copy for remaining bytes */
0173     if (size) {
0174         memcpy((void *) dest, (void *) source, size);
0175         clean_cache_range((void *) dest, size);
0176     }
0177 }
0178 EXPORT_SYMBOL_GPL(__memcpy_flushcache);
0179 
0180 void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
0181         size_t len)
0182 {
0183     char *from = kmap_atomic(page);
0184 
0185     memcpy_flushcache(to, from + offset, len);
0186     kunmap_atomic(from);
0187 }
0188 #endif