0001
0002 #ifndef _ARCH_POWERPC_UACCESS_H
0003 #define _ARCH_POWERPC_UACCESS_H
0004
0005 #include <asm/processor.h>
0006 #include <asm/page.h>
0007 #include <asm/extable.h>
0008 #include <asm/kup.h>
0009
0010 #ifdef __powerpc64__
0011
0012 #define TASK_SIZE_MAX TASK_SIZE_USER64
0013 #endif
0014
0015 #include <asm-generic/access_ok.h>
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #define __put_user(x, ptr) \
0037 ({ \
0038 long __pu_err; \
0039 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
0040 __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
0041 __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
0042 \
0043 might_fault(); \
0044 do { \
0045 __label__ __pu_failed; \
0046 \
0047 allow_write_to_user(__pu_addr, __pu_size); \
0048 __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
0049 prevent_write_to_user(__pu_addr, __pu_size); \
0050 __pu_err = 0; \
0051 break; \
0052 \
0053 __pu_failed: \
0054 prevent_write_to_user(__pu_addr, __pu_size); \
0055 __pu_err = -EFAULT; \
0056 } while (0); \
0057 \
0058 __pu_err; \
0059 })
0060
0061 #define put_user(x, ptr) \
0062 ({ \
0063 __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
0064 \
0065 access_ok(_pu_addr, sizeof(*(ptr))) ? \
0066 __put_user(x, _pu_addr) : -EFAULT; \
0067 })
0068
0069
0070
0071
0072
0073
0074 #define __put_user_asm_goto(x, addr, label, op) \
0075 asm_volatile_goto( \
0076 "1: " op "%U1%X1 %0,%1 # put_user\n" \
0077 EX_TABLE(1b, %l2) \
0078 : \
0079 : "r" (x), "m<>" (*addr) \
0080 : \
0081 : label)
0082
0083 #ifdef __powerpc64__
0084 #define __put_user_asm2_goto(x, ptr, label) \
0085 __put_user_asm_goto(x, ptr, label, "std")
0086 #else
0087 #define __put_user_asm2_goto(x, addr, label) \
0088 asm_volatile_goto( \
0089 "1: stw%X1 %0, %1\n" \
0090 "2: stw%X1 %L0, %L1\n" \
0091 EX_TABLE(1b, %l2) \
0092 EX_TABLE(2b, %l2) \
0093 : \
0094 : "r" (x), "m" (*addr) \
0095 : \
0096 : label)
0097 #endif
0098
0099 #define __put_user_size_goto(x, ptr, size, label) \
0100 do { \
0101 __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
0102 \
0103 switch (size) { \
0104 case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
0105 case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
0106 case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
0107 case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
0108 default: BUILD_BUG(); \
0109 } \
0110 } while (0)
0111
0112
0113
0114
0115
0116 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
0117 __asm__ __volatile__( \
0118 ".machine push\n" \
0119 ".machine altivec\n" \
0120 "1: lvx 0,0,%1 # get user\n" \
0121 " stvx 0,0,%2 # put kernel\n" \
0122 ".machine pop\n" \
0123 "2:\n" \
0124 ".section .fixup,\"ax\"\n" \
0125 "3: li %0,%3\n" \
0126 " b 2b\n" \
0127 ".previous\n" \
0128 EX_TABLE(1b, 3b) \
0129 : "=r" (err) \
0130 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
0131
0132 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
0133
0134 #define __get_user_asm_goto(x, addr, label, op) \
0135 asm_volatile_goto( \
0136 "1: "op"%U1%X1 %0, %1 # get_user\n" \
0137 EX_TABLE(1b, %l2) \
0138 : "=r" (x) \
0139 : "m<>" (*addr) \
0140 : \
0141 : label)
0142
0143 #ifdef __powerpc64__
0144 #define __get_user_asm2_goto(x, addr, label) \
0145 __get_user_asm_goto(x, addr, label, "ld")
0146 #else
0147 #define __get_user_asm2_goto(x, addr, label) \
0148 asm_volatile_goto( \
0149 "1: lwz%X1 %0, %1\n" \
0150 "2: lwz%X1 %L0, %L1\n" \
0151 EX_TABLE(1b, %l2) \
0152 EX_TABLE(2b, %l2) \
0153 : "=&r" (x) \
0154 : "m" (*addr) \
0155 : \
0156 : label)
0157 #endif
0158
0159 #define __get_user_size_goto(x, ptr, size, label) \
0160 do { \
0161 BUILD_BUG_ON(size > sizeof(x)); \
0162 switch (size) { \
0163 case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
0164 case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
0165 case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
0166 case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
0167 default: x = 0; BUILD_BUG(); \
0168 } \
0169 } while (0)
0170
0171 #define __get_user_size_allowed(x, ptr, size, retval) \
0172 do { \
0173 __label__ __gus_failed; \
0174 \
0175 __get_user_size_goto(x, ptr, size, __gus_failed); \
0176 retval = 0; \
0177 break; \
0178 __gus_failed: \
0179 x = 0; \
0180 retval = -EFAULT; \
0181 } while (0)
0182
0183 #else
0184
0185 #define __get_user_asm(x, addr, err, op) \
0186 __asm__ __volatile__( \
0187 "1: "op"%U2%X2 %1, %2 # get_user\n" \
0188 "2:\n" \
0189 ".section .fixup,\"ax\"\n" \
0190 "3: li %0,%3\n" \
0191 " li %1,0\n" \
0192 " b 2b\n" \
0193 ".previous\n" \
0194 EX_TABLE(1b, 3b) \
0195 : "=r" (err), "=r" (x) \
0196 : "m<>" (*addr), "i" (-EFAULT), "0" (err))
0197
0198 #ifdef __powerpc64__
0199 #define __get_user_asm2(x, addr, err) \
0200 __get_user_asm(x, addr, err, "ld")
0201 #else
0202 #define __get_user_asm2(x, addr, err) \
0203 __asm__ __volatile__( \
0204 "1: lwz%X2 %1, %2\n" \
0205 "2: lwz%X2 %L1, %L2\n" \
0206 "3:\n" \
0207 ".section .fixup,\"ax\"\n" \
0208 "4: li %0,%3\n" \
0209 " li %1,0\n" \
0210 " li %1+1,0\n" \
0211 " b 3b\n" \
0212 ".previous\n" \
0213 EX_TABLE(1b, 4b) \
0214 EX_TABLE(2b, 4b) \
0215 : "=r" (err), "=&r" (x) \
0216 : "m" (*addr), "i" (-EFAULT), "0" (err))
0217 #endif
0218
0219 #define __get_user_size_allowed(x, ptr, size, retval) \
0220 do { \
0221 retval = 0; \
0222 BUILD_BUG_ON(size > sizeof(x)); \
0223 switch (size) { \
0224 case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
0225 case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
0226 case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
0227 case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
0228 default: x = 0; BUILD_BUG(); \
0229 } \
0230 } while (0)
0231
0232 #define __get_user_size_goto(x, ptr, size, label) \
0233 do { \
0234 long __gus_retval; \
0235 \
0236 __get_user_size_allowed(x, ptr, size, __gus_retval); \
0237 if (__gus_retval) \
0238 goto label; \
0239 } while (0)
0240
0241 #endif
0242
0243
0244
0245
0246
0247 #define __long_type(x) \
0248 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
0249
0250 #define __get_user(x, ptr) \
0251 ({ \
0252 long __gu_err; \
0253 __long_type(*(ptr)) __gu_val; \
0254 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
0255 __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
0256 \
0257 might_fault(); \
0258 allow_read_from_user(__gu_addr, __gu_size); \
0259 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
0260 prevent_read_from_user(__gu_addr, __gu_size); \
0261 (x) = (__typeof__(*(ptr)))__gu_val; \
0262 \
0263 __gu_err; \
0264 })
0265
0266 #define get_user(x, ptr) \
0267 ({ \
0268 __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
0269 \
0270 access_ok(_gu_addr, sizeof(*(ptr))) ? \
0271 __get_user(x, _gu_addr) : \
0272 ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
0273 })
0274
0275
0276
0277 extern unsigned long __copy_tofrom_user(void __user *to,
0278 const void __user *from, unsigned long size);
0279
0280 #ifdef __powerpc64__
0281 static inline unsigned long
0282 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
0283 {
0284 unsigned long ret;
0285
0286 allow_read_write_user(to, from, n);
0287 ret = __copy_tofrom_user(to, from, n);
0288 prevent_read_write_user(to, from, n);
0289 return ret;
0290 }
0291 #endif
0292
0293 static inline unsigned long raw_copy_from_user(void *to,
0294 const void __user *from, unsigned long n)
0295 {
0296 unsigned long ret;
0297
0298 allow_read_from_user(from, n);
0299 ret = __copy_tofrom_user((__force void __user *)to, from, n);
0300 prevent_read_from_user(from, n);
0301 return ret;
0302 }
0303
0304 static inline unsigned long
0305 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
0306 {
0307 unsigned long ret;
0308
0309 allow_write_to_user(to, n);
0310 ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
0311 prevent_write_to_user(to, n);
0312 return ret;
0313 }
0314
0315 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
0316
0317 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
0318 {
0319 unsigned long ret;
0320
0321 might_fault();
0322 allow_write_to_user(addr, size);
0323 ret = __arch_clear_user(addr, size);
0324 prevent_write_to_user(addr, size);
0325 return ret;
0326 }
0327
0328 static inline unsigned long clear_user(void __user *addr, unsigned long size)
0329 {
0330 return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
0331 }
0332
0333 extern long strncpy_from_user(char *dst, const char __user *src, long count);
0334 extern __must_check long strnlen_user(const char __user *str, long n);
0335
0336 #ifdef CONFIG_ARCH_HAS_COPY_MC
0337 unsigned long __must_check
0338 copy_mc_generic(void *to, const void *from, unsigned long size);
0339
0340 static inline unsigned long __must_check
0341 copy_mc_to_kernel(void *to, const void *from, unsigned long size)
0342 {
0343 return copy_mc_generic(to, from, size);
0344 }
0345 #define copy_mc_to_kernel copy_mc_to_kernel
0346
0347 static inline unsigned long __must_check
0348 copy_mc_to_user(void __user *to, const void *from, unsigned long n)
0349 {
0350 if (check_copy_size(from, n, true)) {
0351 if (access_ok(to, n)) {
0352 allow_write_to_user(to, n);
0353 n = copy_mc_generic((void *)to, from, n);
0354 prevent_write_to_user(to, n);
0355 }
0356 }
0357
0358 return n;
0359 }
0360 #endif
0361
0362 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
0363 unsigned size);
0364 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
0365 size_t len);
0366
0367 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
0368 {
0369 if (unlikely(!access_ok(ptr, len)))
0370 return false;
0371
0372 might_fault();
0373
0374 allow_read_write_user((void __user *)ptr, ptr, len);
0375 return true;
0376 }
0377 #define user_access_begin user_access_begin
0378 #define user_access_end prevent_current_access_user
0379 #define user_access_save prevent_user_access_return
0380 #define user_access_restore restore_user_access
0381
0382 static __must_check inline bool
0383 user_read_access_begin(const void __user *ptr, size_t len)
0384 {
0385 if (unlikely(!access_ok(ptr, len)))
0386 return false;
0387
0388 might_fault();
0389
0390 allow_read_from_user(ptr, len);
0391 return true;
0392 }
0393 #define user_read_access_begin user_read_access_begin
0394 #define user_read_access_end prevent_current_read_from_user
0395
0396 static __must_check inline bool
0397 user_write_access_begin(const void __user *ptr, size_t len)
0398 {
0399 if (unlikely(!access_ok(ptr, len)))
0400 return false;
0401
0402 might_fault();
0403
0404 allow_write_to_user((void __user *)ptr, len);
0405 return true;
0406 }
0407 #define user_write_access_begin user_write_access_begin
0408 #define user_write_access_end prevent_current_write_to_user
0409
0410 #define unsafe_get_user(x, p, e) do { \
0411 __long_type(*(p)) __gu_val; \
0412 __typeof__(*(p)) __user *__gu_addr = (p); \
0413 \
0414 __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
0415 (x) = (__typeof__(*(p)))__gu_val; \
0416 } while (0)
0417
0418 #define unsafe_put_user(x, p, e) \
0419 __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
0420
0421 #define unsafe_copy_from_user(d, s, l, e) \
0422 do { \
0423 u8 *_dst = (u8 *)(d); \
0424 const u8 __user *_src = (const u8 __user *)(s); \
0425 size_t _len = (l); \
0426 int _i; \
0427 \
0428 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
0429 unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
0430 if (_len & 4) { \
0431 unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
0432 _i += 4; \
0433 } \
0434 if (_len & 2) { \
0435 unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
0436 _i += 2; \
0437 } \
0438 if (_len & 1) \
0439 unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
0440 } while (0)
0441
0442 #define unsafe_copy_to_user(d, s, l, e) \
0443 do { \
0444 u8 __user *_dst = (u8 __user *)(d); \
0445 const u8 *_src = (const u8 *)(s); \
0446 size_t _len = (l); \
0447 int _i; \
0448 \
0449 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
0450 unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
0451 if (_len & 4) { \
0452 unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
0453 _i += 4; \
0454 } \
0455 if (_len & 2) { \
0456 unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
0457 _i += 2; \
0458 } \
0459 if (_len & 1) \
0460 unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
0461 } while (0)
0462
0463 #define __get_kernel_nofault(dst, src, type, err_label) \
0464 __get_user_size_goto(*((type *)(dst)), \
0465 (__force type __user *)(src), sizeof(type), err_label)
0466
0467 #define __put_kernel_nofault(dst, src, type, err_label) \
0468 __put_user_size_goto(*((type *)(src)), \
0469 (__force type __user *)(dst), sizeof(type), err_label)
0470
0471 #endif