0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _ASM_UACCESS_H
0009 #define _ASM_UACCESS_H
0010
0011 #include <linux/compiler.h>
0012 #include <linux/string.h>
0013
0014 #include <asm/processor.h>
0015 #include <asm-generic/access_ok.h>
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #define put_user(x, ptr) ({ \
0027 void __user *__pu_addr = (ptr); \
0028 __chk_user_ptr(ptr); \
0029 __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
0030 })
0031
0032 #define get_user(x, ptr) ({ \
0033 const void __user *__gu_addr = (ptr); \
0034 __chk_user_ptr(ptr); \
0035 __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
0036 })
0037
0038
0039
0040
0041
0042
0043 #define __put_user(x, ptr) \
0044 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
0045 #define __get_user(x, ptr) \
0046 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
0047
0048 struct __large_struct { unsigned long buf[100]; };
0049 #define __m(x) ((struct __large_struct __user *)(x))
0050
0051 #define __put_user_check(x, addr, size) ({ \
0052 register int __pu_ret; \
0053 if (__access_ok(addr, size)) { \
0054 switch (size) { \
0055 case 1: \
0056 __put_user_asm(x, b, addr, __pu_ret); \
0057 break; \
0058 case 2: \
0059 __put_user_asm(x, h, addr, __pu_ret); \
0060 break; \
0061 case 4: \
0062 __put_user_asm(x, , addr, __pu_ret); \
0063 break; \
0064 case 8: \
0065 __put_user_asm(x, d, addr, __pu_ret); \
0066 break; \
0067 default: \
0068 __pu_ret = __put_user_bad(); \
0069 break; \
0070 } \
0071 } else { \
0072 __pu_ret = -EFAULT; \
0073 } \
0074 __pu_ret; \
0075 })
0076
0077 #define __put_user_nocheck(x, addr, size) ({ \
0078 register int __pu_ret; \
0079 switch (size) { \
0080 case 1: __put_user_asm(x, b, addr, __pu_ret); break; \
0081 case 2: __put_user_asm(x, h, addr, __pu_ret); break; \
0082 case 4: __put_user_asm(x, , addr, __pu_ret); break; \
0083 case 8: __put_user_asm(x, d, addr, __pu_ret); break; \
0084 default: __pu_ret = __put_user_bad(); break; \
0085 } \
0086 __pu_ret; \
0087 })
0088
0089 #define __put_user_asm(x, size, addr, ret) \
0090 __asm__ __volatile__( \
0091 "/* Put user asm, inline. */\n" \
0092 "1:\t" "st"#size " %1, %2\n\t" \
0093 "clr %0\n" \
0094 "2:\n\n\t" \
0095 ".section .fixup,#alloc,#execinstr\n\t" \
0096 ".align 4\n" \
0097 "3:\n\t" \
0098 "b 2b\n\t" \
0099 " mov %3, %0\n\t" \
0100 ".previous\n\n\t" \
0101 ".section __ex_table,#alloc\n\t" \
0102 ".align 4\n\t" \
0103 ".word 1b, 3b\n\t" \
0104 ".previous\n\n\t" \
0105 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
0106 "i" (-EFAULT))
0107
0108 int __put_user_bad(void);
0109
0110 #define __get_user_check(x, addr, size, type) ({ \
0111 register int __gu_ret; \
0112 register unsigned long __gu_val; \
0113 if (__access_ok(addr, size)) { \
0114 switch (size) { \
0115 case 1: \
0116 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
0117 break; \
0118 case 2: \
0119 __get_user_asm(__gu_val, uh, addr, __gu_ret); \
0120 break; \
0121 case 4: \
0122 __get_user_asm(__gu_val, , addr, __gu_ret); \
0123 break; \
0124 case 8: \
0125 __get_user_asm(__gu_val, d, addr, __gu_ret); \
0126 break; \
0127 default: \
0128 __gu_val = 0; \
0129 __gu_ret = __get_user_bad(); \
0130 break; \
0131 } \
0132 } else { \
0133 __gu_val = 0; \
0134 __gu_ret = -EFAULT; \
0135 } \
0136 x = (__force type) __gu_val; \
0137 __gu_ret; \
0138 })
0139
0140 #define __get_user_nocheck(x, addr, size, type) ({ \
0141 register int __gu_ret; \
0142 register unsigned long __gu_val; \
0143 switch (size) { \
0144 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
0145 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
0146 case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \
0147 case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \
0148 default: \
0149 __gu_val = 0; \
0150 __gu_ret = __get_user_bad(); \
0151 break; \
0152 } \
0153 x = (__force type) __gu_val; \
0154 __gu_ret; \
0155 })
0156
0157 #define __get_user_asm(x, size, addr, ret) \
0158 __asm__ __volatile__( \
0159 "/* Get user asm, inline. */\n" \
0160 "1:\t" "ld"#size " %2, %1\n\t" \
0161 "clr %0\n" \
0162 "2:\n\n\t" \
0163 ".section .fixup,#alloc,#execinstr\n\t" \
0164 ".align 4\n" \
0165 "3:\n\t" \
0166 "clr %1\n\t" \
0167 "b 2b\n\t" \
0168 " mov %3, %0\n\n\t" \
0169 ".previous\n\t" \
0170 ".section __ex_table,#alloc\n\t" \
0171 ".align 4\n\t" \
0172 ".word 1b, 3b\n\n\t" \
0173 ".previous\n\t" \
0174 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
0175 "i" (-EFAULT))
0176
0177 int __get_user_bad(void);
0178
0179 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
0180
0181 static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
0182 {
0183 return __copy_user(to, (__force void __user *) from, n);
0184 }
0185
0186 static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
0187 {
0188 return __copy_user((__force void __user *) to, from, n);
0189 }
0190
0191 #define INLINE_COPY_FROM_USER
0192 #define INLINE_COPY_TO_USER
0193
0194 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
0195 {
0196 unsigned long ret;
0197
0198 __asm__ __volatile__ (
0199 "mov %2, %%o1\n"
0200 "call __bzero\n\t"
0201 " mov %1, %%o0\n\t"
0202 "mov %%o0, %0\n"
0203 : "=r" (ret) : "r" (addr), "r" (size) :
0204 "o0", "o1", "o2", "o3", "o4", "o5", "o7",
0205 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
0206
0207 return ret;
0208 }
0209
0210 static inline unsigned long clear_user(void __user *addr, unsigned long n)
0211 {
0212 if (n && __access_ok(addr, n))
0213 return __clear_user(addr, n);
0214 else
0215 return n;
0216 }
0217
0218 __must_check long strnlen_user(const char __user *str, long n);
0219
0220 #endif