Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_UACCESS_H
0003 #define _ASM_X86_UACCESS_H
0004 /*
0005  * User space memory access functions
0006  */
0007 #include <linux/compiler.h>
0008 #include <linux/kasan-checks.h>
0009 #include <linux/string.h>
0010 #include <asm/asm.h>
0011 #include <asm/page.h>
0012 #include <asm/smap.h>
0013 #include <asm/extable.h>
0014 
0015 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
0016 static inline bool pagefault_disabled(void);
0017 # define WARN_ON_IN_IRQ()   \
0018     WARN_ON_ONCE(!in_task() && !pagefault_disabled())
0019 #else
0020 # define WARN_ON_IN_IRQ()
0021 #endif
0022 
0023 /**
0024  * access_ok - Checks if a user space pointer is valid
0025  * @addr: User space pointer to start of block to check
0026  * @size: Size of block to check
0027  *
0028  * Context: User context only. This function may sleep if pagefaults are
0029  *          enabled.
0030  *
0031  * Checks if a pointer to a block of memory in user space is valid.
0032  *
0033  * Note that, depending on architecture, this function probably just
0034  * checks that the pointer is in the user space range - after calling
0035  * this function, memory access functions may still return -EFAULT.
0036  *
0037  * Return: true (nonzero) if the memory block may be valid, false (zero)
0038  * if it is definitely invalid.
0039  */
0040 #define access_ok(addr, size)                   \
0041 ({                                  \
0042     WARN_ON_IN_IRQ();                       \
0043     likely(__access_ok(addr, size));                \
0044 })
0045 
0046 #include <asm-generic/access_ok.h>
0047 
0048 extern int __get_user_1(void);
0049 extern int __get_user_2(void);
0050 extern int __get_user_4(void);
0051 extern int __get_user_8(void);
0052 extern int __get_user_nocheck_1(void);
0053 extern int __get_user_nocheck_2(void);
0054 extern int __get_user_nocheck_4(void);
0055 extern int __get_user_nocheck_8(void);
0056 extern int __get_user_bad(void);
0057 
0058 #define __uaccess_begin() stac()
0059 #define __uaccess_end()   clac()
0060 #define __uaccess_begin_nospec()    \
0061 ({                  \
0062     stac();             \
0063     barrier_nospec();       \
0064 })
0065 
0066 /*
0067  * This is the smallest unsigned integer type that can fit a value
0068  * (up to 'long long')
0069  */
0070 #define __inttype(x) __typeof__(        \
0071     __typefits(x,char,          \
0072       __typefits(x,short,           \
0073         __typefits(x,int,           \
0074           __typefits(x,long,0ULL)))))
0075 
0076 #define __typefits(x,type,not) \
0077     __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
0078 
0079 /*
0080  * This is used for both get_user() and __get_user() to expand to
0081  * the proper special function call that has odd calling conventions
0082  * due to returning both a value and an error, and that depends on
0083  * the size of the pointer passed in.
0084  *
0085  * Careful: we have to cast the result to the type of the pointer
0086  * for sign reasons.
0087  *
0088  * The use of _ASM_DX as the register specifier is a bit of a
0089  * simplification, as gcc only cares about it as the starting point
0090  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
0091  * (%ecx being the next register in gcc's x86 register sequence), and
0092  * %rdx on 64 bits.
0093  *
0094  * Clang/LLVM cares about the size of the register, but still wants
0095  * the base register for something that ends up being a pair.
0096  */
0097 #define do_get_user_call(fn,x,ptr)                  \
0098 ({                                  \
0099     int __ret_gu;                           \
0100     register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);        \
0101     __chk_user_ptr(ptr);                        \
0102     asm volatile("call __" #fn "_%P4"               \
0103              : "=a" (__ret_gu), "=r" (__val_gu),        \
0104             ASM_CALL_CONSTRAINT             \
0105              : "0" (ptr), "i" (sizeof(*(ptr))));        \
0106     (x) = (__force __typeof__(*(ptr))) __val_gu;            \
0107     __builtin_expect(__ret_gu, 0);                  \
0108 })
0109 
0110 /**
0111  * get_user - Get a simple variable from user space.
0112  * @x:   Variable to store result.
0113  * @ptr: Source address, in user space.
0114  *
0115  * Context: User context only. This function may sleep if pagefaults are
0116  *          enabled.
0117  *
0118  * This macro copies a single simple variable from user space to kernel
0119  * space.  It supports simple types like char and int, but not larger
0120  * data types like structures or arrays.
0121  *
0122  * @ptr must have pointer-to-simple-variable type, and the result of
0123  * dereferencing @ptr must be assignable to @x without a cast.
0124  *
0125  * Return: zero on success, or -EFAULT on error.
0126  * On error, the variable @x is set to zero.
0127  */
0128 #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
0129 
0130 /**
0131  * __get_user - Get a simple variable from user space, with less checking.
0132  * @x:   Variable to store result.
0133  * @ptr: Source address, in user space.
0134  *
0135  * Context: User context only. This function may sleep if pagefaults are
0136  *          enabled.
0137  *
0138  * This macro copies a single simple variable from user space to kernel
0139  * space.  It supports simple types like char and int, but not larger
0140  * data types like structures or arrays.
0141  *
0142  * @ptr must have pointer-to-simple-variable type, and the result of
0143  * dereferencing @ptr must be assignable to @x without a cast.
0144  *
0145  * Caller must check the pointer with access_ok() before calling this
0146  * function.
0147  *
0148  * Return: zero on success, or -EFAULT on error.
0149  * On error, the variable @x is set to zero.
0150  */
0151 #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
0152 
0153 
0154 #ifdef CONFIG_X86_32
0155 #define __put_user_goto_u64(x, addr, label)         \
0156     asm_volatile_goto("\n"                  \
0157              "1:    movl %%eax,0(%1)\n"     \
0158              "2:    movl %%edx,4(%1)\n"     \
0159              _ASM_EXTABLE_UA(1b, %l2)           \
0160              _ASM_EXTABLE_UA(2b, %l2)           \
0161              : : "A" (x), "r" (addr)            \
0162              : : label)
0163 
0164 #else
0165 #define __put_user_goto_u64(x, ptr, label) \
0166     __put_user_goto(x, ptr, "q", "er", label)
0167 #endif
0168 
0169 extern void __put_user_bad(void);
0170 
0171 /*
0172  * Strange magic calling convention: pointer in %ecx,
0173  * value in %eax(:%edx), return value in %ecx. clobbers %rbx
0174  */
0175 extern void __put_user_1(void);
0176 extern void __put_user_2(void);
0177 extern void __put_user_4(void);
0178 extern void __put_user_8(void);
0179 extern void __put_user_nocheck_1(void);
0180 extern void __put_user_nocheck_2(void);
0181 extern void __put_user_nocheck_4(void);
0182 extern void __put_user_nocheck_8(void);
0183 
0184 /*
0185  * ptr must be evaluated and assigned to the temporary __ptr_pu before
0186  * the assignment of x to __val_pu, to avoid any function calls
0187  * involved in the ptr expression (possibly implicitly generated due
0188  * to KASAN) from clobbering %ax.
0189  */
0190 #define do_put_user_call(fn,x,ptr)                  \
0191 ({                                  \
0192     int __ret_pu;                           \
0193     void __user *__ptr_pu;                      \
0194     register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX);       \
0195     __chk_user_ptr(ptr);                        \
0196     __ptr_pu = (ptr);                       \
0197     __val_pu = (x);                         \
0198     asm volatile("call __" #fn "_%P[size]"              \
0199              : "=c" (__ret_pu),                 \
0200             ASM_CALL_CONSTRAINT             \
0201              : "0" (__ptr_pu),                  \
0202                "r" (__val_pu),                  \
0203                [size] "i" (sizeof(*(ptr)))          \
0204              :"ebx");                       \
0205     __builtin_expect(__ret_pu, 0);                  \
0206 })
0207 
0208 /**
0209  * put_user - Write a simple value into user space.
0210  * @x:   Value to copy to user space.
0211  * @ptr: Destination address, in user space.
0212  *
0213  * Context: User context only. This function may sleep if pagefaults are
0214  *          enabled.
0215  *
0216  * This macro copies a single simple value from kernel space to user
0217  * space.  It supports simple types like char and int, but not larger
0218  * data types like structures or arrays.
0219  *
0220  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
0221  * to the result of dereferencing @ptr.
0222  *
0223  * Return: zero on success, or -EFAULT on error.
0224  */
0225 #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
0226 
0227 /**
0228  * __put_user - Write a simple value into user space, with less checking.
0229  * @x:   Value to copy to user space.
0230  * @ptr: Destination address, in user space.
0231  *
0232  * Context: User context only. This function may sleep if pagefaults are
0233  *          enabled.
0234  *
0235  * This macro copies a single simple value from kernel space to user
0236  * space.  It supports simple types like char and int, but not larger
0237  * data types like structures or arrays.
0238  *
0239  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
0240  * to the result of dereferencing @ptr.
0241  *
0242  * Caller must check the pointer with access_ok() before calling this
0243  * function.
0244  *
0245  * Return: zero on success, or -EFAULT on error.
0246  */
0247 #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
0248 
0249 #define __put_user_size(x, ptr, size, label)                \
0250 do {                                    \
0251     __chk_user_ptr(ptr);                        \
0252     switch (size) {                         \
0253     case 1:                             \
0254         __put_user_goto(x, ptr, "b", "iq", label);      \
0255         break;                          \
0256     case 2:                             \
0257         __put_user_goto(x, ptr, "w", "ir", label);      \
0258         break;                          \
0259     case 4:                             \
0260         __put_user_goto(x, ptr, "l", "ir", label);      \
0261         break;                          \
0262     case 8:                             \
0263         __put_user_goto_u64(x, ptr, label);         \
0264         break;                          \
0265     default:                            \
0266         __put_user_bad();                   \
0267     }                               \
0268 } while (0)
0269 
0270 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0271 
0272 #ifdef CONFIG_X86_32
0273 #define __get_user_asm_u64(x, ptr, label) do {              \
0274     unsigned int __gu_low, __gu_high;               \
0275     const unsigned int __user *__gu_ptr;                \
0276     __gu_ptr = (const void __user *)(ptr);              \
0277     __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);       \
0278     __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);    \
0279     (x) = ((unsigned long long)__gu_high << 32) | __gu_low;     \
0280 } while (0)
0281 #else
0282 #define __get_user_asm_u64(x, ptr, label)               \
0283     __get_user_asm(x, ptr, "q", "=r", label)
0284 #endif
0285 
0286 #define __get_user_size(x, ptr, size, label)                \
0287 do {                                    \
0288     __chk_user_ptr(ptr);                        \
0289     switch (size) {                         \
0290     case 1: {                           \
0291         unsigned char x_u8__;                   \
0292         __get_user_asm(x_u8__, ptr, "b", "=q", label);      \
0293         (x) = x_u8__;                       \
0294         break;                          \
0295     }                               \
0296     case 2:                             \
0297         __get_user_asm(x, ptr, "w", "=r", label);       \
0298         break;                          \
0299     case 4:                             \
0300         __get_user_asm(x, ptr, "l", "=r", label);       \
0301         break;                          \
0302     case 8:                             \
0303         __get_user_asm_u64(x, ptr, label);          \
0304         break;                          \
0305     default:                            \
0306         (x) = __get_user_bad();                 \
0307     }                               \
0308 } while (0)
0309 
0310 #define __get_user_asm(x, addr, itype, ltype, label)            \
0311     asm_volatile_goto("\n"                      \
0312              "1:    mov"itype" %[umem],%[output]\n"     \
0313              _ASM_EXTABLE_UA(1b, %l2)               \
0314              : [output] ltype(x)                \
0315              : [umem] "m" (__m(addr))               \
0316              : : label)
0317 
0318 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0319 
0320 #ifdef CONFIG_X86_32
0321 #define __get_user_asm_u64(x, ptr, retval)              \
0322 ({                                  \
0323     __typeof__(ptr) __ptr = (ptr);                  \
0324     asm volatile("\n"                       \
0325              "1:    movl %[lowbits],%%eax\n"        \
0326              "2:    movl %[highbits],%%edx\n"       \
0327              "3:\n"                     \
0328              _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \
0329                        EX_FLAG_CLEAR_AX_DX,     \
0330                        %[errout])           \
0331              _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \
0332                        EX_FLAG_CLEAR_AX_DX,     \
0333                        %[errout])           \
0334              : [errout] "=r" (retval),              \
0335                [output] "=&A"(x)                \
0336              : [lowbits] "m" (__m(__ptr)),          \
0337                [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
0338                "0" (retval));                   \
0339 })
0340 
0341 #else
0342 #define __get_user_asm_u64(x, ptr, retval) \
0343      __get_user_asm(x, ptr, retval, "q")
0344 #endif
0345 
0346 #define __get_user_size(x, ptr, size, retval)               \
0347 do {                                    \
0348     unsigned char x_u8__;                       \
0349                                     \
0350     retval = 0;                         \
0351     __chk_user_ptr(ptr);                        \
0352     switch (size) {                         \
0353     case 1:                             \
0354         __get_user_asm(x_u8__, ptr, retval, "b");       \
0355         (x) = x_u8__;                       \
0356         break;                          \
0357     case 2:                             \
0358         __get_user_asm(x, ptr, retval, "w");            \
0359         break;                          \
0360     case 4:                             \
0361         __get_user_asm(x, ptr, retval, "l");            \
0362         break;                          \
0363     case 8:                             \
0364         __get_user_asm_u64(x, ptr, retval);         \
0365         break;                          \
0366     default:                            \
0367         (x) = __get_user_bad();                 \
0368     }                               \
0369 } while (0)
0370 
0371 #define __get_user_asm(x, addr, err, itype)             \
0372     asm volatile("\n"                       \
0373              "1:    mov"itype" %[umem],%[output]\n"     \
0374              "2:\n"                     \
0375              _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
0376                        EX_FLAG_CLEAR_AX,        \
0377                        %[errout])           \
0378              : [errout] "=r" (err),             \
0379                [output] "=a" (x)                \
0380              : [umem] "m" (__m(addr)),              \
0381                "0" (err))
0382 
0383 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0384 
0385 #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
0386 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)  ({ \
0387     bool success;                           \
0388     __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);      \
0389     __typeof__(*(_ptr)) __old = *_old;              \
0390     __typeof__(*(_ptr)) __new = (_new);             \
0391     asm_volatile_goto("\n"                      \
0392              "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
0393              _ASM_EXTABLE_UA(1b, %l[label])         \
0394              : CC_OUT(z) (success),             \
0395                [ptr] "+m" (*_ptr),              \
0396                [old] "+a" (__old)               \
0397              : [new] ltype (__new)              \
0398              : "memory"                     \
0399              : label);                      \
0400     if (unlikely(!success))                     \
0401         *_old = __old;                      \
0402     likely(success);                    })
0403 
0404 #ifdef CONFIG_X86_32
0405 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)  ({  \
0406     bool success;                           \
0407     __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);      \
0408     __typeof__(*(_ptr)) __old = *_old;              \
0409     __typeof__(*(_ptr)) __new = (_new);             \
0410     asm_volatile_goto("\n"                      \
0411              "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"     \
0412              _ASM_EXTABLE_UA(1b, %l[label])         \
0413              : CC_OUT(z) (success),             \
0414                "+A" (__old),                    \
0415                [ptr] "+m" (*_ptr)               \
0416              : "b" ((u32)__new),                \
0417                "c" ((u32)((u64)__new >> 32))            \
0418              : "memory"                     \
0419              : label);                      \
0420     if (unlikely(!success))                     \
0421         *_old = __old;                      \
0422     likely(success);                    })
0423 #endif // CONFIG_X86_32
0424 #else  // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
0425 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)  ({ \
0426     int __err = 0;                          \
0427     bool success;                           \
0428     __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);      \
0429     __typeof__(*(_ptr)) __old = *_old;              \
0430     __typeof__(*(_ptr)) __new = (_new);             \
0431     asm volatile("\n"                       \
0432              "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
0433              CC_SET(z)                      \
0434              "2:\n"                     \
0435              _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,  \
0436                        %[errout])           \
0437              : CC_OUT(z) (success),             \
0438                [errout] "+r" (__err),               \
0439                [ptr] "+m" (*_ptr),              \
0440                [old] "+a" (__old)               \
0441              : [new] ltype (__new)              \
0442              : "memory");                   \
0443     if (unlikely(__err))                        \
0444         goto label;                     \
0445     if (unlikely(!success))                     \
0446         *_old = __old;                      \
0447     likely(success);                    })
0448 
0449 #ifdef CONFIG_X86_32
0450 /*
0451  * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
0452  * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
0453  * hardcoded by CMPXCHG8B, leaving only ESI and EDI.  If the compiler uses
0454  * both ESI and EDI for the memory operand, compilation will fail if the error
0455  * is an input+output as there will be no register available for input.
0456  */
0457 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)  ({  \
0458     int __result;                           \
0459     __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);      \
0460     __typeof__(*(_ptr)) __old = *_old;              \
0461     __typeof__(*(_ptr)) __new = (_new);             \
0462     asm volatile("\n"                       \
0463              "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"     \
0464              "mov $0, %%ecx\n\t"                \
0465              "setz %%cl\n"                  \
0466              "2:\n"                     \
0467              _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
0468              : [result]"=c" (__result),             \
0469                "+A" (__old),                    \
0470                [ptr] "+m" (*_ptr)               \
0471              : "b" ((u32)__new),                \
0472                "c" ((u32)((u64)__new >> 32))            \
0473              : "memory", "cc");                 \
0474     if (unlikely(__result < 0))                 \
0475         goto label;                     \
0476     if (unlikely(!__result))                    \
0477         *_old = __old;                      \
0478     likely(__result);                   })
0479 #endif // CONFIG_X86_32
0480 #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
0481 
0482 /* FIXME: this hack is definitely wrong -AK */
0483 struct __large_struct { unsigned long buf[100]; };
0484 #define __m(x) (*(struct __large_struct __user *)(x))
0485 
0486 /*
0487  * Tell gcc we read from memory instead of writing: this is because
0488  * we do not write to any memory gcc knows about, so there are no
0489  * aliasing issues.
0490  */
0491 #define __put_user_goto(x, addr, itype, ltype, label)           \
0492     asm_volatile_goto("\n"                      \
0493         "1: mov"itype" %0,%1\n"             \
0494         _ASM_EXTABLE_UA(1b, %l2)                \
0495         : : ltype(x), "m" (__m(addr))               \
0496         : : label)
0497 
0498 extern unsigned long
0499 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
0500 extern __must_check long
0501 strncpy_from_user(char *dst, const char __user *src, long count);
0502 
0503 extern __must_check long strnlen_user(const char __user *str, long n);
0504 
0505 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
0506 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
0507 
0508 #ifdef CONFIG_ARCH_HAS_COPY_MC
0509 unsigned long __must_check
0510 copy_mc_to_kernel(void *to, const void *from, unsigned len);
0511 #define copy_mc_to_kernel copy_mc_to_kernel
0512 
0513 unsigned long __must_check
0514 copy_mc_to_user(void *to, const void *from, unsigned len);
0515 #endif
0516 
0517 /*
0518  * movsl can be slow when source and dest are not both 8-byte aligned
0519  */
0520 #ifdef CONFIG_X86_INTEL_USERCOPY
0521 extern struct movsl_mask {
0522     int mask;
0523 } ____cacheline_aligned_in_smp movsl_mask;
0524 #endif
0525 
0526 #define ARCH_HAS_NOCACHE_UACCESS 1
0527 
0528 #ifdef CONFIG_X86_32
0529 # include <asm/uaccess_32.h>
0530 #else
0531 # include <asm/uaccess_64.h>
0532 #endif
0533 
0534 /*
0535  * The "unsafe" user accesses aren't really "unsafe", but the naming
0536  * is a big fat warning: you have to not only do the access_ok()
0537  * checking before using them, but you have to surround them with the
0538  * user_access_begin/end() pair.
0539  */
0540 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
0541 {
0542     if (unlikely(!access_ok(ptr,len)))
0543         return 0;
0544     __uaccess_begin_nospec();
0545     return 1;
0546 }
0547 #define user_access_begin(a,b)  user_access_begin(a,b)
0548 #define user_access_end()   __uaccess_end()
0549 
0550 #define user_access_save()  smap_save()
0551 #define user_access_restore(x)  smap_restore(x)
0552 
0553 #define unsafe_put_user(x, ptr, label)  \
0554     __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
0555 
0556 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0557 #define unsafe_get_user(x, ptr, err_label)                  \
0558 do {                                        \
0559     __inttype(*(ptr)) __gu_val;                     \
0560     __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label);        \
0561     (x) = (__force __typeof__(*(ptr)))__gu_val;             \
0562 } while (0)
0563 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0564 #define unsafe_get_user(x, ptr, err_label)                  \
0565 do {                                        \
0566     int __gu_err;                               \
0567     __inttype(*(ptr)) __gu_val;                     \
0568     __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err);     \
0569     (x) = (__force __typeof__(*(ptr)))__gu_val;             \
0570     if (unlikely(__gu_err)) goto err_label;                 \
0571 } while (0)
0572 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0573 
0574 extern void __try_cmpxchg_user_wrong_size(void);
0575 
0576 #ifndef CONFIG_X86_32
0577 #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label)        \
0578     __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
0579 #endif
0580 
0581 /*
0582  * Force the pointer to u<size> to match the size expected by the asm helper.
0583  * clang/LLVM compiles all cases and only discards the unused paths after
0584  * processing errors, which breaks i386 if the pointer is an 8-byte value.
0585  */
0586 #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({          \
0587     bool __ret;                             \
0588     __chk_user_ptr(_ptr);                           \
0589     switch (sizeof(*(_ptr))) {                      \
0590     case 1: __ret = __try_cmpxchg_user_asm("b", "q",            \
0591                            (__force u8 *)(_ptr), (_oldp),   \
0592                            (_nval), _label);        \
0593         break;                              \
0594     case 2: __ret = __try_cmpxchg_user_asm("w", "r",            \
0595                            (__force u16 *)(_ptr), (_oldp),  \
0596                            (_nval), _label);        \
0597         break;                              \
0598     case 4: __ret = __try_cmpxchg_user_asm("l", "r",            \
0599                            (__force u32 *)(_ptr), (_oldp),  \
0600                            (_nval), _label);        \
0601         break;                              \
0602     case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
0603                          (_nval), _label);      \
0604         break;                              \
0605     default: __try_cmpxchg_user_wrong_size();               \
0606     }                                   \
0607     __ret;                      })
0608 
0609 /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
0610 #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label)  ({      \
0611     int __ret = -EFAULT;                        \
0612     __uaccess_begin_nospec();                   \
0613     __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label);   \
0614 _label:                                 \
0615     __uaccess_end();                        \
0616     __ret;                              \
0617                             })
0618 
0619 /*
0620  * We want the unsafe accessors to always be inlined and use
0621  * the error labels - thus the macro games.
0622  */
0623 #define unsafe_copy_loop(dst, src, len, type, label)                \
0624     while (len >= sizeof(type)) {                       \
0625         unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
0626         dst += sizeof(type);                        \
0627         src += sizeof(type);                        \
0628         len -= sizeof(type);                        \
0629     }
0630 
0631 #define unsafe_copy_to_user(_dst,_src,_len,label)           \
0632 do {                                    \
0633     char __user *__ucu_dst = (_dst);                \
0634     const char *__ucu_src = (_src);                 \
0635     size_t __ucu_len = (_len);                  \
0636     unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);  \
0637     unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);  \
0638     unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);  \
0639     unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);   \
0640 } while (0)
0641 
0642 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0643 #define __get_kernel_nofault(dst, src, type, err_label)         \
0644     __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
0645             sizeof(type), err_label)
0646 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0647 #define __get_kernel_nofault(dst, src, type, err_label)         \
0648 do {                                    \
0649     int __kr_err;                           \
0650                                     \
0651     __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
0652             sizeof(type), __kr_err);            \
0653     if (unlikely(__kr_err))                     \
0654         goto err_label;                     \
0655 } while (0)
0656 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0657 
0658 #define __put_kernel_nofault(dst, src, type, err_label)         \
0659     __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
0660             sizeof(type), err_label)
0661 
0662 #endif /* _ASM_X86_UACCESS_H */
0663