Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_UACCESS_H
0003 #define _ASM_UACCESS_H
0004 
0005 /*
0006  * User space memory access functions
0007  */
0008 
0009 #include <linux/compiler.h>
0010 #include <linux/string.h>
0011 #include <asm/asi.h>
0012 #include <asm/spitfire.h>
0013 
0014 #include <asm/processor.h>
0015 #include <asm-generic/access_ok.h>
0016 
0017 /*
0018  * Sparc64 is segmented, though more like the M68K than the I386.
0019  * We use the secondary ASI to address user memory, which references a
0020  * completely different VM map, thus there is zero chance of the user
0021  * doing something queer and tricking us into poking kernel memory.
0022  */
0023 
0024 /*
0025  * Test whether a block of memory is a valid user space address.
0026  * Returns 0 if the range is valid, nonzero otherwise.
0027  */
0028 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
0029 {
0030     if (__builtin_constant_p(size))
0031         return addr > limit - size;
0032 
0033     addr += size;
0034     if (addr < size)
0035         return true;
0036 
0037     return addr > limit;
0038 }
0039 
0040 #define __range_not_ok(addr, size, limit)                               \
0041 ({                                                                      \
0042     __chk_user_ptr(addr);                                           \
0043     __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
0044 })
0045 
0046 void __retl_efault(void);
0047 
0048 /* Uh, these should become the main single-value transfer routines..
0049  * They automatically use the right size if we just have the right
0050  * pointer type..
0051  *
0052  * This gets kind of ugly. We want to return _two_ values in "get_user()"
0053  * and yet we don't want to do any pointers, because that is too much
0054  * of a performance impact. Thus we have a few rather ugly macros here,
0055  * and hide all the ugliness from the user.
0056  */
0057 #define put_user(x, ptr) ({ \
0058     unsigned long __pu_addr = (unsigned long)(ptr); \
0059     __chk_user_ptr(ptr); \
0060     __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
0061 })
0062 
0063 #define get_user(x, ptr) ({ \
0064     unsigned long __gu_addr = (unsigned long)(ptr); \
0065     __chk_user_ptr(ptr); \
0066     __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
0067 })
0068 
0069 #define __put_user(x, ptr) put_user(x, ptr)
0070 #define __get_user(x, ptr) get_user(x, ptr)
0071 
0072 struct __large_struct { unsigned long buf[100]; };
0073 #define __m(x) ((struct __large_struct *)(x))
0074 
0075 #define __put_kernel_nofault(dst, src, type, label)         \
0076 do {                                    \
0077     type *addr = (type __force *)(dst);             \
0078     type data = *(type *)src;                   \
0079     register int __pu_ret;                      \
0080     switch (sizeof(type)) {                     \
0081     case 1: __put_kernel_asm(data, b, addr, __pu_ret); break;   \
0082     case 2: __put_kernel_asm(data, h, addr, __pu_ret); break;   \
0083     case 4: __put_kernel_asm(data, w, addr, __pu_ret); break;   \
0084     case 8: __put_kernel_asm(data, x, addr, __pu_ret); break;   \
0085     default: __pu_ret = __put_user_bad(); break;            \
0086     }                               \
0087     if (__pu_ret)                           \
0088         goto label;                     \
0089 } while (0)
0090 
0091 #define __put_kernel_asm(x, size, addr, ret)                \
0092 __asm__ __volatile__(                           \
0093         "/* Put kernel asm, inline. */\n"           \
0094     "1:\t"  "st"#size " %1, [%2]\n\t"               \
0095         "clr    %0\n"                       \
0096     "2:\n\n\t"                          \
0097         ".section .fixup,#alloc,#execinstr\n\t"         \
0098         ".align 4\n"                        \
0099     "3:\n\t"                            \
0100         "sethi  %%hi(2b), %0\n\t"               \
0101         "jmpl   %0 + %%lo(2b), %%g0\n\t"            \
0102         " mov   %3, %0\n\n\t"                   \
0103         ".previous\n\t"                     \
0104         ".section __ex_table,\"a\"\n\t"             \
0105         ".align 4\n\t"                      \
0106         ".word  1b, 3b\n\t"                 \
0107         ".previous\n\n\t"                   \
0108            : "=r" (ret) : "r" (x), "r" (__m(addr)),         \
0109          "i" (-EFAULT))
0110 
0111 #define __put_user_nocheck(data, addr, size) ({         \
0112     register int __pu_ret;                  \
0113     switch (size) {                     \
0114     case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
0115     case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
0116     case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
0117     case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
0118     default: __pu_ret = __put_user_bad(); break;        \
0119     }                           \
0120     __pu_ret;                       \
0121 })
0122 
0123 #define __put_user_asm(x, size, addr, ret)              \
0124 __asm__ __volatile__(                           \
0125         "/* Put user asm, inline. */\n"             \
0126     "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"            \
0127         "clr    %0\n"                       \
0128     "2:\n\n\t"                          \
0129         ".section .fixup,#alloc,#execinstr\n\t"         \
0130         ".align 4\n"                        \
0131     "3:\n\t"                            \
0132         "sethi  %%hi(2b), %0\n\t"               \
0133         "jmpl   %0 + %%lo(2b), %%g0\n\t"            \
0134         " mov   %3, %0\n\n\t"                   \
0135         ".previous\n\t"                     \
0136         ".section __ex_table,\"a\"\n\t"             \
0137         ".align 4\n\t"                      \
0138         ".word  1b, 3b\n\t"                 \
0139         ".previous\n\n\t"                   \
0140            : "=r" (ret) : "r" (x), "r" (__m(addr)),         \
0141          "i" (-EFAULT))
0142 
0143 int __put_user_bad(void);
0144 
0145 #define __get_kernel_nofault(dst, src, type, label)              \
0146 do {                                         \
0147     type *addr = (type __force *)(src);                      \
0148     register int __gu_ret;                           \
0149     register unsigned long __gu_val;                     \
0150     switch (sizeof(type)) {                          \
0151         case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
0152         case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
0153         case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
0154         case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break;  \
0155         default:                             \
0156             __gu_val = 0;                        \
0157             __gu_ret = __get_user_bad();                 \
0158             break;                           \
0159     }                                    \
0160     if (__gu_ret)                                \
0161         goto label;                          \
0162     *(type *)dst = (__force type) __gu_val;                  \
0163 } while (0)
0164 #define __get_kernel_asm(x, size, addr, ret)                \
0165 __asm__ __volatile__(                           \
0166         "/* Get kernel asm, inline. */\n"           \
0167     "1:\t"  "ld"#size " [%2], %1\n\t"               \
0168         "clr    %0\n"                       \
0169     "2:\n\n\t"                          \
0170         ".section .fixup,#alloc,#execinstr\n\t"         \
0171         ".align 4\n"                        \
0172     "3:\n\t"                            \
0173         "sethi  %%hi(2b), %0\n\t"               \
0174         "clr    %1\n\t"                     \
0175         "jmpl   %0 + %%lo(2b), %%g0\n\t"            \
0176         " mov   %3, %0\n\n\t"                   \
0177         ".previous\n\t"                     \
0178         ".section __ex_table,\"a\"\n\t"             \
0179         ".align 4\n\t"                      \
0180         ".word  1b, 3b\n\n\t"                   \
0181         ".previous\n\t"                     \
0182            : "=r" (ret), "=r" (x) : "r" (__m(addr)),        \
0183          "i" (-EFAULT))
0184 
0185 #define __get_user_nocheck(data, addr, size, type) ({                \
0186     register int __gu_ret;                           \
0187     register unsigned long __gu_val;                     \
0188     switch (size) {                              \
0189         case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
0190         case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
0191         case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
0192         case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
0193         default:                             \
0194             __gu_val = 0;                        \
0195             __gu_ret = __get_user_bad();                 \
0196             break;                           \
0197     }                                    \
0198     data = (__force type) __gu_val;                      \
0199      __gu_ret;                               \
0200 })
0201 
0202 #define __get_user_asm(x, size, addr, ret)              \
0203 __asm__ __volatile__(                           \
0204         "/* Get user asm, inline. */\n"             \
0205     "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"            \
0206         "clr    %0\n"                       \
0207     "2:\n\n\t"                          \
0208         ".section .fixup,#alloc,#execinstr\n\t"         \
0209         ".align 4\n"                        \
0210     "3:\n\t"                            \
0211         "sethi  %%hi(2b), %0\n\t"               \
0212         "clr    %1\n\t"                     \
0213         "jmpl   %0 + %%lo(2b), %%g0\n\t"            \
0214         " mov   %3, %0\n\n\t"                   \
0215         ".previous\n\t"                     \
0216         ".section __ex_table,\"a\"\n\t"             \
0217         ".align 4\n\t"                      \
0218         ".word  1b, 3b\n\n\t"                   \
0219         ".previous\n\t"                     \
0220            : "=r" (ret), "=r" (x) : "r" (__m(addr)),        \
0221          "i" (-EFAULT))
0222 
0223 int __get_user_bad(void);
0224 
0225 unsigned long __must_check raw_copy_from_user(void *to,
0226                          const void __user *from,
0227                          unsigned long size);
0228 
0229 unsigned long __must_check raw_copy_to_user(void __user *to,
0230                        const void *from,
0231                        unsigned long size);
0232 #define INLINE_COPY_FROM_USER
0233 #define INLINE_COPY_TO_USER
0234 
0235 unsigned long __must_check raw_copy_in_user(void __user *to,
0236                        const void __user *from,
0237                        unsigned long size);
0238 
0239 unsigned long __must_check __clear_user(void __user *, unsigned long);
0240 
0241 #define clear_user __clear_user
0242 
0243 __must_check long strnlen_user(const char __user *str, long n);
0244 
0245 struct pt_regs;
0246 unsigned long compute_effective_address(struct pt_regs *,
0247                     unsigned int insn,
0248                     unsigned int rd);
0249 
0250 #endif /* _ASM_UACCESS_H */