0001
0002
0003
0004
0005 #ifndef _ASMARM_UACCESS_H
0006 #define _ASMARM_UACCESS_H
0007
0008
0009
0010
0011 #include <linux/string.h>
0012 #include <asm/memory.h>
0013 #include <asm/domain.h>
0014 #include <asm/unaligned.h>
0015 #include <asm/unified.h>
0016 #include <asm/compiler.h>
0017
0018 #include <asm/extable.h>
0019
0020
0021
0022
0023
0024
0025
0026 static __always_inline unsigned int uaccess_save_and_enable(void)
0027 {
0028 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
0029 unsigned int old_domain = get_domain();
0030
0031
0032 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
0033 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
0034
0035 return old_domain;
0036 #else
0037 return 0;
0038 #endif
0039 }
0040
0041 static __always_inline void uaccess_restore(unsigned int flags)
0042 {
0043 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
0044
0045 set_domain(flags);
0046 #endif
0047 }
0048
0049
0050
0051
0052
0053 extern int __get_user_bad(void);
0054 extern int __put_user_bad(void);
0055
0056 #ifdef CONFIG_MMU
0057
0058
0059
0060
0061
0062 #define __inttype(x) \
0063 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
0064
0065
0066
0067
0068
0069 #define uaccess_mask_range_ptr(ptr, size) \
0070 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
0071 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
0072 size_t size)
0073 {
0074 void __user *safe_ptr = (void __user *)ptr;
0075 unsigned long tmp;
0076
0077 asm volatile(
0078 " .syntax unified\n"
0079 " sub %1, %3, #1\n"
0080 " subs %1, %1, %0\n"
0081 " addhs %1, %1, #1\n"
0082 " subshs %1, %1, %2\n"
0083 " movlo %0, #0\n"
0084 : "+r" (safe_ptr), "=&r" (tmp)
0085 : "r" (size), "r" (TASK_SIZE)
0086 : "cc");
0087
0088 csdb();
0089 return safe_ptr;
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 extern int __get_user_1(void *);
0104 extern int __get_user_2(void *);
0105 extern int __get_user_4(void *);
0106 extern int __get_user_32t_8(void *);
0107 extern int __get_user_8(void *);
0108 extern int __get_user_64t_1(void *);
0109 extern int __get_user_64t_2(void *);
0110 extern int __get_user_64t_4(void *);
0111
0112 #define __GUP_CLOBBER_1 "lr", "cc"
0113 #ifdef CONFIG_CPU_USE_DOMAINS
0114 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
0115 #else
0116 #define __GUP_CLOBBER_2 "lr", "cc"
0117 #endif
0118 #define __GUP_CLOBBER_4 "lr", "cc"
0119 #define __GUP_CLOBBER_32t_8 "lr", "cc"
0120 #define __GUP_CLOBBER_8 "lr", "cc"
0121
0122 #define __get_user_x(__r2, __p, __e, __l, __s) \
0123 __asm__ __volatile__ ( \
0124 __asmeq("%0", "r0") __asmeq("%1", "r2") \
0125 __asmeq("%3", "r1") \
0126 "bl __get_user_" #__s \
0127 : "=&r" (__e), "=r" (__r2) \
0128 : "0" (__p), "r" (__l) \
0129 : __GUP_CLOBBER_##__s)
0130
0131
0132 #ifdef __ARMEB__
0133 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
0134 __get_user_x(__r2, __p, __e, __l, 32t_8)
0135 #else
0136 #define __get_user_x_32t __get_user_x
0137 #endif
0138
0139
0140
0141
0142
0143 #ifdef __ARMEB__
0144 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
0145 __asm__ __volatile__ ( \
0146 __asmeq("%0", "r0") __asmeq("%1", "r2") \
0147 __asmeq("%3", "r1") \
0148 "bl __get_user_64t_" #__s \
0149 : "=&r" (__e), "=r" (__r2) \
0150 : "0" (__p), "r" (__l) \
0151 : __GUP_CLOBBER_##__s)
0152 #else
0153 #define __get_user_x_64t __get_user_x
0154 #endif
0155
0156
0157 #define __get_user_check(x, p) \
0158 ({ \
0159 unsigned long __limit = TASK_SIZE - 1; \
0160 register typeof(*(p)) __user *__p asm("r0") = (p); \
0161 register __inttype(x) __r2 asm("r2"); \
0162 register unsigned long __l asm("r1") = __limit; \
0163 register int __e asm("r0"); \
0164 unsigned int __ua_flags = uaccess_save_and_enable(); \
0165 int __tmp_e; \
0166 switch (sizeof(*(__p))) { \
0167 case 1: \
0168 if (sizeof((x)) >= 8) \
0169 __get_user_x_64t(__r2, __p, __e, __l, 1); \
0170 else \
0171 __get_user_x(__r2, __p, __e, __l, 1); \
0172 break; \
0173 case 2: \
0174 if (sizeof((x)) >= 8) \
0175 __get_user_x_64t(__r2, __p, __e, __l, 2); \
0176 else \
0177 __get_user_x(__r2, __p, __e, __l, 2); \
0178 break; \
0179 case 4: \
0180 if (sizeof((x)) >= 8) \
0181 __get_user_x_64t(__r2, __p, __e, __l, 4); \
0182 else \
0183 __get_user_x(__r2, __p, __e, __l, 4); \
0184 break; \
0185 case 8: \
0186 if (sizeof((x)) < 8) \
0187 __get_user_x_32t(__r2, __p, __e, __l, 4); \
0188 else \
0189 __get_user_x(__r2, __p, __e, __l, 8); \
0190 break; \
0191 default: __e = __get_user_bad(); break; \
0192 } \
0193 __tmp_e = __e; \
0194 uaccess_restore(__ua_flags); \
0195 x = (typeof(*(p))) __r2; \
0196 __tmp_e; \
0197 })
0198
0199 #define get_user(x, p) \
0200 ({ \
0201 might_fault(); \
0202 __get_user_check(x, p); \
0203 })
0204
0205 extern int __put_user_1(void *, unsigned int);
0206 extern int __put_user_2(void *, unsigned int);
0207 extern int __put_user_4(void *, unsigned int);
0208 extern int __put_user_8(void *, unsigned long long);
0209
0210 #define __put_user_check(__pu_val, __ptr, __err, __s) \
0211 ({ \
0212 unsigned long __limit = TASK_SIZE - 1; \
0213 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
0214 register const void __user *__p asm("r0") = __ptr; \
0215 register unsigned long __l asm("r1") = __limit; \
0216 register int __e asm("r0"); \
0217 __asm__ __volatile__ ( \
0218 __asmeq("%0", "r0") __asmeq("%2", "r2") \
0219 __asmeq("%3", "r1") \
0220 "bl __put_user_" #__s \
0221 : "=&r" (__e) \
0222 : "0" (__p), "r" (__r2), "r" (__l) \
0223 : "ip", "lr", "cc"); \
0224 __err = __e; \
0225 })
0226
0227 #else
0228
0229 #define get_user(x, p) __get_user(x, p)
0230 #define __put_user_check __put_user_nocheck
0231
0232 #endif
0233
0234 #include <asm-generic/access_ok.h>
0235
0236 #ifdef CONFIG_CPU_SPECTRE
0237
0238
0239
0240
0241
0242
0243 #define __get_user(x, ptr) get_user(x, ptr)
0244 #else
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 #define __get_user(x, ptr) \
0256 ({ \
0257 long __gu_err = 0; \
0258 __get_user_err((x), (ptr), __gu_err, TUSER()); \
0259 __gu_err; \
0260 })
0261
0262 #define __get_user_err(x, ptr, err, __t) \
0263 do { \
0264 unsigned long __gu_addr = (unsigned long)(ptr); \
0265 unsigned long __gu_val; \
0266 unsigned int __ua_flags; \
0267 __chk_user_ptr(ptr); \
0268 might_fault(); \
0269 __ua_flags = uaccess_save_and_enable(); \
0270 switch (sizeof(*(ptr))) { \
0271 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
0272 case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
0273 case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
0274 default: (__gu_val) = __get_user_bad(); \
0275 } \
0276 uaccess_restore(__ua_flags); \
0277 (x) = (__typeof__(*(ptr)))__gu_val; \
0278 } while (0)
0279 #endif
0280
0281 #define __get_user_asm(x, addr, err, instr) \
0282 __asm__ __volatile__( \
0283 "1: " instr " %1, [%2], #0\n" \
0284 "2:\n" \
0285 " .pushsection .text.fixup,\"ax\"\n" \
0286 " .align 2\n" \
0287 "3: mov %0, %3\n" \
0288 " mov %1, #0\n" \
0289 " b 2b\n" \
0290 " .popsection\n" \
0291 " .pushsection __ex_table,\"a\"\n" \
0292 " .align 3\n" \
0293 " .long 1b, 3b\n" \
0294 " .popsection" \
0295 : "+r" (err), "=&r" (x) \
0296 : "r" (addr), "i" (-EFAULT) \
0297 : "cc")
0298
0299 #define __get_user_asm_byte(x, addr, err, __t) \
0300 __get_user_asm(x, addr, err, "ldrb" __t)
0301
0302 #if __LINUX_ARM_ARCH__ >= 6
0303
0304 #define __get_user_asm_half(x, addr, err, __t) \
0305 __get_user_asm(x, addr, err, "ldrh" __t)
0306
0307 #else
0308
0309 #ifndef __ARMEB__
0310 #define __get_user_asm_half(x, __gu_addr, err, __t) \
0311 ({ \
0312 unsigned long __b1, __b2; \
0313 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
0314 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
0315 (x) = __b1 | (__b2 << 8); \
0316 })
0317 #else
0318 #define __get_user_asm_half(x, __gu_addr, err, __t) \
0319 ({ \
0320 unsigned long __b1, __b2; \
0321 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
0322 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
0323 (x) = (__b1 << 8) | __b2; \
0324 })
0325 #endif
0326
0327 #endif
0328
0329 #define __get_user_asm_word(x, addr, err, __t) \
0330 __get_user_asm(x, addr, err, "ldr" __t)
0331
0332 #define __put_user_switch(x, ptr, __err, __fn) \
0333 do { \
0334 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
0335 __typeof__(*(ptr)) __pu_val = (x); \
0336 unsigned int __ua_flags; \
0337 might_fault(); \
0338 __ua_flags = uaccess_save_and_enable(); \
0339 switch (sizeof(*(ptr))) { \
0340 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
0341 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
0342 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
0343 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
0344 default: __err = __put_user_bad(); break; \
0345 } \
0346 uaccess_restore(__ua_flags); \
0347 } while (0)
0348
0349 #define put_user(x, ptr) \
0350 ({ \
0351 int __pu_err = 0; \
0352 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
0353 __pu_err; \
0354 })
0355
0356 #ifdef CONFIG_CPU_SPECTRE
0357
0358
0359
0360
0361 #define __put_user(x, ptr) put_user(x, ptr)
0362
0363 #else
0364 #define __put_user(x, ptr) \
0365 ({ \
0366 long __pu_err = 0; \
0367 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
0368 __pu_err; \
0369 })
0370
0371 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
0372 do { \
0373 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
0374 __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
0375 } while (0)
0376
0377 #define __put_user_nocheck_1 __put_user_asm_byte
0378 #define __put_user_nocheck_2 __put_user_asm_half
0379 #define __put_user_nocheck_4 __put_user_asm_word
0380 #define __put_user_nocheck_8 __put_user_asm_dword
0381
0382 #endif
0383
0384 #define __put_user_asm(x, __pu_addr, err, instr) \
0385 __asm__ __volatile__( \
0386 "1: " instr " %1, [%2], #0\n" \
0387 "2:\n" \
0388 " .pushsection .text.fixup,\"ax\"\n" \
0389 " .align 2\n" \
0390 "3: mov %0, %3\n" \
0391 " b 2b\n" \
0392 " .popsection\n" \
0393 " .pushsection __ex_table,\"a\"\n" \
0394 " .align 3\n" \
0395 " .long 1b, 3b\n" \
0396 " .popsection" \
0397 : "+r" (err) \
0398 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
0399 : "cc")
0400
0401 #define __put_user_asm_byte(x, __pu_addr, err, __t) \
0402 __put_user_asm(x, __pu_addr, err, "strb" __t)
0403
0404 #if __LINUX_ARM_ARCH__ >= 6
0405
0406 #define __put_user_asm_half(x, __pu_addr, err, __t) \
0407 __put_user_asm(x, __pu_addr, err, "strh" __t)
0408
0409 #else
0410
0411 #ifndef __ARMEB__
0412 #define __put_user_asm_half(x, __pu_addr, err, __t) \
0413 ({ \
0414 unsigned long __temp = (__force unsigned long)(x); \
0415 __put_user_asm_byte(__temp, __pu_addr, err, __t); \
0416 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
0417 })
0418 #else
0419 #define __put_user_asm_half(x, __pu_addr, err, __t) \
0420 ({ \
0421 unsigned long __temp = (__force unsigned long)(x); \
0422 __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
0423 __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
0424 })
0425 #endif
0426
0427 #endif
0428
0429 #define __put_user_asm_word(x, __pu_addr, err, __t) \
0430 __put_user_asm(x, __pu_addr, err, "str" __t)
0431
0432 #ifndef __ARMEB__
0433 #define __reg_oper0 "%R2"
0434 #define __reg_oper1 "%Q2"
0435 #else
0436 #define __reg_oper0 "%Q2"
0437 #define __reg_oper1 "%R2"
0438 #endif
0439
0440 #define __put_user_asm_dword(x, __pu_addr, err, __t) \
0441 __asm__ __volatile__( \
0442 ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
0443 ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
0444 THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
0445 THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
0446 "3:\n" \
0447 " .pushsection .text.fixup,\"ax\"\n" \
0448 " .align 2\n" \
0449 "4: mov %0, %3\n" \
0450 " b 3b\n" \
0451 " .popsection\n" \
0452 " .pushsection __ex_table,\"a\"\n" \
0453 " .align 3\n" \
0454 " .long 1b, 4b\n" \
0455 " .long 2b, 4b\n" \
0456 " .popsection" \
0457 : "+r" (err), "+r" (__pu_addr) \
0458 : "r" (x), "i" (-EFAULT) \
0459 : "cc")
0460
0461 #define __get_kernel_nofault(dst, src, type, err_label) \
0462 do { \
0463 const type *__pk_ptr = (src); \
0464 unsigned long __src = (unsigned long)(__pk_ptr); \
0465 type __val; \
0466 int __err = 0; \
0467 switch (sizeof(type)) { \
0468 case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
0469 case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
0470 case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
0471 case 8: { \
0472 u32 *__v32 = (u32*)&__val; \
0473 __get_user_asm_word(__v32[0], __src, __err, ""); \
0474 if (__err) \
0475 break; \
0476 __get_user_asm_word(__v32[1], __src+4, __err, ""); \
0477 break; \
0478 } \
0479 default: __err = __get_user_bad(); break; \
0480 } \
0481 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
0482 put_unaligned(__val, (type *)(dst)); \
0483 else \
0484 *(type *)(dst) = __val; \
0485 if (__err) \
0486 goto err_label; \
0487 } while (0)
0488
0489 #define __put_kernel_nofault(dst, src, type, err_label) \
0490 do { \
0491 const type *__pk_ptr = (dst); \
0492 unsigned long __dst = (unsigned long)__pk_ptr; \
0493 int __err = 0; \
0494 type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
0495 ? get_unaligned((type *)(src)) \
0496 : *(type *)(src); \
0497 switch (sizeof(type)) { \
0498 case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
0499 case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
0500 case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
0501 case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
0502 default: __err = __put_user_bad(); break; \
0503 } \
0504 if (__err) \
0505 goto err_label; \
0506 } while (0)
0507
0508 #ifdef CONFIG_MMU
0509 extern unsigned long __must_check
0510 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
0511
0512 static inline unsigned long __must_check
0513 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
0514 {
0515 unsigned int __ua_flags;
0516
0517 __ua_flags = uaccess_save_and_enable();
0518 n = arm_copy_from_user(to, from, n);
0519 uaccess_restore(__ua_flags);
0520 return n;
0521 }
0522
0523 extern unsigned long __must_check
0524 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
0525 extern unsigned long __must_check
0526 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
0527
0528 static inline unsigned long __must_check
0529 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
0530 {
0531 #ifndef CONFIG_UACCESS_WITH_MEMCPY
0532 unsigned int __ua_flags;
0533 __ua_flags = uaccess_save_and_enable();
0534 n = arm_copy_to_user(to, from, n);
0535 uaccess_restore(__ua_flags);
0536 return n;
0537 #else
0538 return arm_copy_to_user(to, from, n);
0539 #endif
0540 }
0541
0542 extern unsigned long __must_check
0543 arm_clear_user(void __user *addr, unsigned long n);
0544 extern unsigned long __must_check
0545 __clear_user_std(void __user *addr, unsigned long n);
0546
0547 static inline unsigned long __must_check
0548 __clear_user(void __user *addr, unsigned long n)
0549 {
0550 unsigned int __ua_flags = uaccess_save_and_enable();
0551 n = arm_clear_user(addr, n);
0552 uaccess_restore(__ua_flags);
0553 return n;
0554 }
0555
0556 #else
0557 static inline unsigned long
0558 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
0559 {
0560 memcpy(to, (const void __force *)from, n);
0561 return 0;
0562 }
0563 static inline unsigned long
0564 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
0565 {
0566 memcpy((void __force *)to, from, n);
0567 return 0;
0568 }
0569 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
0570 #endif
0571 #define INLINE_COPY_TO_USER
0572 #define INLINE_COPY_FROM_USER
0573
0574 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
0575 {
0576 if (access_ok(to, n))
0577 n = __clear_user(to, n);
0578 return n;
0579 }
0580
0581
0582 extern long strncpy_from_user(char *dest, const char __user *src, long count);
0583
0584 extern __must_check long strnlen_user(const char __user *str, long n);
0585
0586 #endif