Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_UACCESS_64_H
0003 #define _ASM_X86_UACCESS_64_H
0004 
0005 /*
0006  * User space memory access functions
0007  */
0008 #include <linux/compiler.h>
0009 #include <linux/lockdep.h>
0010 #include <linux/kasan-checks.h>
0011 #include <asm/alternative.h>
0012 #include <asm/cpufeatures.h>
0013 #include <asm/page.h>
0014 
0015 /*
0016  * Copy To/From Userspace
0017  */
0018 
0019 /* Handles exceptions in both to and from, but doesn't do access_ok */
0020 __must_check unsigned long
0021 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
0022 __must_check unsigned long
0023 copy_user_generic_string(void *to, const void *from, unsigned len);
0024 __must_check unsigned long
0025 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
0026 
0027 static __always_inline __must_check unsigned long
0028 copy_user_generic(void *to, const void *from, unsigned len)
0029 {
0030     unsigned ret;
0031 
0032     /*
0033      * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
0034      * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
0035      * Otherwise, use copy_user_generic_unrolled.
0036      */
0037     alternative_call_2(copy_user_generic_unrolled,
0038              copy_user_generic_string,
0039              X86_FEATURE_REP_GOOD,
0040              copy_user_enhanced_fast_string,
0041              X86_FEATURE_ERMS,
0042              ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
0043                      "=d" (len)),
0044              "1" (to), "2" (from), "3" (len)
0045              : "memory", "rcx", "r8", "r9", "r10", "r11");
0046     return ret;
0047 }
0048 
0049 static __always_inline __must_check unsigned long
0050 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
0051 {
0052     return copy_user_generic(dst, (__force void *)src, size);
0053 }
0054 
0055 static __always_inline __must_check unsigned long
0056 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
0057 {
0058     return copy_user_generic((__force void *)dst, src, size);
0059 }
0060 
0061 extern long __copy_user_nocache(void *dst, const void __user *src,
0062                 unsigned size, int zerorest);
0063 
0064 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
0065 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
0066                size_t len);
0067 
0068 static inline int
0069 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
0070                   unsigned size)
0071 {
0072     kasan_check_write(dst, size);
0073     return __copy_user_nocache(dst, src, size, 0);
0074 }
0075 
0076 static inline int
0077 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
0078 {
0079     kasan_check_write(dst, size);
0080     return __copy_user_flushcache(dst, src, size);
0081 }
0082 #endif /* _ASM_X86_UACCESS_64_H */