0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef _ASM_UACCESS_H
0012 #define _ASM_UACCESS_H
0013
0014 #include <linux/kernel.h>
0015 #include <linux/string.h>
0016 #include <asm/asm-eva.h>
0017 #include <asm/extable.h>
0018
0019 #ifdef CONFIG_32BIT
0020
0021 #define __UA_LIMIT 0x80000000UL
0022 #define TASK_SIZE_MAX KSEG0
0023
0024 #define __UA_ADDR ".word"
0025 #define __UA_LA "la"
0026 #define __UA_ADDU "addu"
0027 #define __UA_t0 "$8"
0028 #define __UA_t1 "$9"
0029
0030 #endif
0031
0032 #ifdef CONFIG_64BIT
0033
0034 extern u64 __ua_limit;
0035
0036 #define __UA_LIMIT __ua_limit
0037 #define TASK_SIZE_MAX XKSSEG
0038
0039 #define __UA_ADDR ".dword"
0040 #define __UA_LA "dla"
0041 #define __UA_ADDU "daddu"
0042 #define __UA_t0 "$12"
0043 #define __UA_t1 "$13"
0044
0045 #endif
0046
0047 #include <asm-generic/access_ok.h>
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 #define put_user(x, ptr) \
0067 ({ \
0068 __typeof__(*(ptr)) __user *__p = (ptr); \
0069 \
0070 might_fault(); \
0071 access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \
0072 })
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 #define get_user(x, ptr) \
0093 ({ \
0094 const __typeof__(*(ptr)) __user *__p = (ptr); \
0095 \
0096 might_fault(); \
0097 access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \
0098 ((x) = 0, -EFAULT); \
0099 })
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 #define __put_user(x, ptr) \
0122 ({ \
0123 __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
0124 __typeof__(*(ptr)) __pu_val = (x); \
0125 int __pu_err = 0; \
0126 \
0127 __chk_user_ptr(__pu_ptr); \
0128 switch (sizeof(*__pu_ptr)) { \
0129 case 1: \
0130 __put_data_asm(user_sb, __pu_ptr); \
0131 break; \
0132 case 2: \
0133 __put_data_asm(user_sh, __pu_ptr); \
0134 break; \
0135 case 4: \
0136 __put_data_asm(user_sw, __pu_ptr); \
0137 break; \
0138 case 8: \
0139 __PUT_DW(user_sd, __pu_ptr); \
0140 break; \
0141 default: \
0142 BUILD_BUG(); \
0143 } \
0144 \
0145 __pu_err; \
0146 })
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 #define __get_user(x, ptr) \
0170 ({ \
0171 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
0172 int __gu_err = 0; \
0173 \
0174 __chk_user_ptr(__gu_ptr); \
0175 switch (sizeof(*__gu_ptr)) { \
0176 case 1: \
0177 __get_data_asm((x), user_lb, __gu_ptr); \
0178 break; \
0179 case 2: \
0180 __get_data_asm((x), user_lh, __gu_ptr); \
0181 break; \
0182 case 4: \
0183 __get_data_asm((x), user_lw, __gu_ptr); \
0184 break; \
0185 case 8: \
0186 __GET_DW((x), user_ld, __gu_ptr); \
0187 break; \
0188 default: \
0189 BUILD_BUG(); \
0190 } \
0191 \
0192 __gu_err; \
0193 })
0194
0195 struct __large_struct { unsigned long buf[100]; };
0196 #define __m(x) (*(struct __large_struct __user *)(x))
0197
0198 #ifdef CONFIG_32BIT
0199 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
0200 #endif
0201 #ifdef CONFIG_64BIT
0202 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
0203 #endif
0204
0205 #define __get_data_asm(val, insn, addr) \
0206 { \
0207 long __gu_tmp; \
0208 \
0209 __asm__ __volatile__( \
0210 "1: "insn("%1", "%3")" \n" \
0211 "2: \n" \
0212 " .insn \n" \
0213 " .section .fixup,\"ax\" \n" \
0214 "3: li %0, %4 \n" \
0215 " move %1, $0 \n" \
0216 " j 2b \n" \
0217 " .previous \n" \
0218 " .section __ex_table,\"a\" \n" \
0219 " "__UA_ADDR "\t1b, 3b \n" \
0220 " .previous \n" \
0221 : "=r" (__gu_err), "=r" (__gu_tmp) \
0222 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
0223 \
0224 (val) = (__typeof__(*(addr))) __gu_tmp; \
0225 }
0226
0227
0228
0229
0230 #define __get_data_asm_ll32(val, insn, addr) \
0231 { \
0232 union { \
0233 unsigned long long l; \
0234 __typeof__(*(addr)) t; \
0235 } __gu_tmp; \
0236 \
0237 __asm__ __volatile__( \
0238 "1: " insn("%1", "(%3)")" \n" \
0239 "2: " insn("%D1", "4(%3)")" \n" \
0240 "3: \n" \
0241 " .insn \n" \
0242 " .section .fixup,\"ax\" \n" \
0243 "4: li %0, %4 \n" \
0244 " move %1, $0 \n" \
0245 " move %D1, $0 \n" \
0246 " j 3b \n" \
0247 " .previous \n" \
0248 " .section __ex_table,\"a\" \n" \
0249 " " __UA_ADDR " 1b, 4b \n" \
0250 " " __UA_ADDR " 2b, 4b \n" \
0251 " .previous \n" \
0252 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
0253 : "0" (0), "r" (addr), "i" (-EFAULT)); \
0254 \
0255 (val) = __gu_tmp.t; \
0256 }
0257
0258 #define __get_kernel_nofault(dst, src, type, err_label) \
0259 do { \
0260 int __gu_err; \
0261 \
0262 switch (sizeof(type)) { \
0263 case 1: \
0264 __get_data_asm(*(type *)(dst), kernel_lb, \
0265 (__force type *)(src)); \
0266 break; \
0267 case 2: \
0268 __get_data_asm(*(type *)(dst), kernel_lh, \
0269 (__force type *)(src)); \
0270 break; \
0271 case 4: \
0272 __get_data_asm(*(type *)(dst), kernel_lw, \
0273 (__force type *)(src)); \
0274 break; \
0275 case 8: \
0276 __GET_DW(*(type *)(dst), kernel_ld, \
0277 (__force type *)(src)); \
0278 break; \
0279 default: \
0280 BUILD_BUG(); \
0281 break; \
0282 } \
0283 if (unlikely(__gu_err)) \
0284 goto err_label; \
0285 } while (0)
0286
0287
0288
0289
0290
0291 #ifdef CONFIG_32BIT
0292 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
0293 #endif
0294 #ifdef CONFIG_64BIT
0295 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
0296 #endif
0297
0298 #define __put_data_asm(insn, ptr) \
0299 { \
0300 __asm__ __volatile__( \
0301 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
0302 "2: \n" \
0303 " .insn \n" \
0304 " .section .fixup,\"ax\" \n" \
0305 "3: li %0, %4 \n" \
0306 " j 2b \n" \
0307 " .previous \n" \
0308 " .section __ex_table,\"a\" \n" \
0309 " " __UA_ADDR " 1b, 3b \n" \
0310 " .previous \n" \
0311 : "=r" (__pu_err) \
0312 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
0313 "i" (-EFAULT)); \
0314 }
0315
0316 #define __put_data_asm_ll32(insn, ptr) \
0317 { \
0318 __asm__ __volatile__( \
0319 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
0320 "2: "insn("%D2", "4(%3)")" \n" \
0321 "3: \n" \
0322 " .insn \n" \
0323 " .section .fixup,\"ax\" \n" \
0324 "4: li %0, %4 \n" \
0325 " j 3b \n" \
0326 " .previous \n" \
0327 " .section __ex_table,\"a\" \n" \
0328 " " __UA_ADDR " 1b, 4b \n" \
0329 " " __UA_ADDR " 2b, 4b \n" \
0330 " .previous" \
0331 : "=r" (__pu_err) \
0332 : "0" (0), "r" (__pu_val), "r" (ptr), \
0333 "i" (-EFAULT)); \
0334 }
0335
0336 #define __put_kernel_nofault(dst, src, type, err_label) \
0337 do { \
0338 type __pu_val; \
0339 int __pu_err = 0; \
0340 \
0341 __pu_val = *(__force type *)(src); \
0342 switch (sizeof(type)) { \
0343 case 1: \
0344 __put_data_asm(kernel_sb, (type *)(dst)); \
0345 break; \
0346 case 2: \
0347 __put_data_asm(kernel_sh, (type *)(dst)); \
0348 break; \
0349 case 4: \
0350 __put_data_asm(kernel_sw, (type *)(dst)) \
0351 break; \
0352 case 8: \
0353 __PUT_DW(kernel_sd, (type *)(dst)); \
0354 break; \
0355 default: \
0356 BUILD_BUG(); \
0357 break; \
0358 } \
0359 if (unlikely(__pu_err)) \
0360 goto err_label; \
0361 } while (0)
0362
0363
0364
0365
0366
0367
0368 #ifdef MODULE
0369 #define __MODULE_JAL(destination) \
0370 ".set\tnoat\n\t" \
0371 __UA_LA "\t$1, " #destination "\n\t" \
0372 "jalr\t$1\n\t" \
0373 ".set\tat\n\t"
0374 #else
0375 #define __MODULE_JAL(destination) \
0376 "jal\t" #destination "\n\t"
0377 #endif
0378
0379 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
0380 defined(CONFIG_CPU_HAS_PREFETCH))
0381 #define DADDI_SCRATCH "$3"
0382 #else
0383 #define DADDI_SCRATCH "$0"
0384 #endif
0385
0386 extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
0387 extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
0388
0389 static inline unsigned long
0390 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
0391 {
0392 register void *__cu_to_r __asm__("$4");
0393 register const void __user *__cu_from_r __asm__("$5");
0394 register long __cu_len_r __asm__("$6");
0395
0396 __cu_to_r = to;
0397 __cu_from_r = from;
0398 __cu_len_r = n;
0399
0400 __asm__ __volatile__(
0401 ".set\tnoreorder\n\t"
0402 __MODULE_JAL(__raw_copy_from_user)
0403 ".set\tnoat\n\t"
0404 __UA_ADDU "\t$1, %1, %2\n\t"
0405 ".set\tat\n\t"
0406 ".set\treorder"
0407 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
0408 :
0409 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
0410 DADDI_SCRATCH, "memory");
0411
0412 return __cu_len_r;
0413 }
0414
0415 static inline unsigned long
0416 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
0417 {
0418 register void __user *__cu_to_r __asm__("$4");
0419 register const void *__cu_from_r __asm__("$5");
0420 register long __cu_len_r __asm__("$6");
0421
0422 __cu_to_r = (to);
0423 __cu_from_r = (from);
0424 __cu_len_r = (n);
0425
0426 __asm__ __volatile__(
0427 __MODULE_JAL(__raw_copy_to_user)
0428 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
0429 :
0430 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
0431 DADDI_SCRATCH, "memory");
0432
0433 return __cu_len_r;
0434 }
0435
0436 #define INLINE_COPY_FROM_USER
0437 #define INLINE_COPY_TO_USER
0438
0439 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 static inline __kernel_size_t
0453 __clear_user(void __user *addr, __kernel_size_t size)
0454 {
0455 __kernel_size_t res;
0456
0457 #ifdef CONFIG_CPU_MICROMIPS
0458
0459 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
0460 #else
0461 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
0462 #endif
0463
0464 might_fault();
0465 __asm__ __volatile__(
0466 "move\t$4, %1\n\t"
0467 "move\t$5, $0\n\t"
0468 "move\t$6, %2\n\t"
0469 __MODULE_JAL(__bzero)
0470 "move\t%0, $6"
0471 : "=r" (res)
0472 : "r" (addr), "r" (size)
0473 : bzero_clobbers);
0474
0475 return res;
0476 }
0477
0478 #define clear_user(addr,n) \
0479 ({ \
0480 void __user * __cl_addr = (addr); \
0481 unsigned long __cl_size = (n); \
0482 if (__cl_size && access_ok(__cl_addr, __cl_size)) \
0483 __cl_size = __clear_user(__cl_addr, __cl_size); \
0484 __cl_size; \
0485 })
0486
0487 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507 static inline long
0508 strncpy_from_user(char *__to, const char __user *__from, long __len)
0509 {
0510 long res;
0511
0512 if (!access_ok(__from, __len))
0513 return -EFAULT;
0514
0515 might_fault();
0516 __asm__ __volatile__(
0517 "move\t$4, %1\n\t"
0518 "move\t$5, %2\n\t"
0519 "move\t$6, %3\n\t"
0520 __MODULE_JAL(__strncpy_from_user_asm)
0521 "move\t%0, $2"
0522 : "=r" (res)
0523 : "r" (__to), "r" (__from), "r" (__len)
0524 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
0525
0526 return res;
0527 }
0528
0529 extern long __strnlen_user_asm(const char __user *s, long n);
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544 static inline long strnlen_user(const char __user *s, long n)
0545 {
0546 long res;
0547
0548 if (!access_ok(s, 1))
0549 return 0;
0550
0551 might_fault();
0552 __asm__ __volatile__(
0553 "move\t$4, %1\n\t"
0554 "move\t$5, %2\n\t"
0555 __MODULE_JAL(__strnlen_user_asm)
0556 "move\t%0, $2"
0557 : "=r" (res)
0558 : "r" (s), "r" (n)
0559 : "$2", "$4", "$5", __UA_t0, "$31");
0560
0561 return res;
0562 }
0563
0564 #endif