Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __ALPHA_UACCESS_H
0003 #define __ALPHA_UACCESS_H
0004 
0005 #include <asm-generic/access_ok.h>
0006 /*
0007  * These are the main single-value transfer routines.  They automatically
0008  * use the right size if we just have the right pointer type.
0009  *
0010  * As the alpha uses the same address space for kernel and user
0011  * data, we can just do these as direct assignments.  (Of course, the
0012  * exception handling means that it's no longer "just"...)
0013  *
0014  * Careful to not
0015  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
0016  * (b) require any knowledge of processes at this stage
0017  */
0018 #define put_user(x, ptr) \
0019   __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
0020 #define get_user(x, ptr) \
0021   __get_user_check((x), (ptr), sizeof(*(ptr)))
0022 
0023 /*
0024  * The "__xxx" versions do not do address space checking, useful when
0025  * doing multiple accesses to the same area (the programmer has to do the
0026  * checks by hand with "access_ok()")
0027  */
0028 #define __put_user(x, ptr) \
0029   __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
0030 #define __get_user(x, ptr) \
0031   __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
0032   
0033 /*
0034  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
0035  * encode the bits we need for resolving the exception.  See the
0036  * more extensive comments with fixup_inline_exception below for
0037  * more information.
0038  */
0039 #define EXC(label,cont,res,err)             \
0040     ".section __ex_table,\"a\"\n"           \
0041     "   .long "#label"-.\n"         \
0042     "   lda "#res","#cont"-"#label"("#err")\n"  \
0043     ".previous\n"
0044 
0045 extern void __get_user_unknown(void);
0046 
0047 #define __get_user_nocheck(x, ptr, size)            \
0048 ({                              \
0049     long __gu_err = 0;                  \
0050     unsigned long __gu_val;                 \
0051     __chk_user_ptr(ptr);                    \
0052     switch (size) {                     \
0053       case 1: __get_user_8(ptr); break;         \
0054       case 2: __get_user_16(ptr); break;            \
0055       case 4: __get_user_32(ptr); break;            \
0056       case 8: __get_user_64(ptr); break;            \
0057       default: __get_user_unknown(); break;         \
0058     }                           \
0059     (x) = (__force __typeof__(*(ptr))) __gu_val;        \
0060     __gu_err;                       \
0061 })
0062 
0063 #define __get_user_check(x, ptr, size)              \
0064 ({                              \
0065     long __gu_err = -EFAULT;                \
0066     unsigned long __gu_val = 0;             \
0067     const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
0068     if (__access_ok(__gu_addr, size)) {         \
0069         __gu_err = 0;                   \
0070         switch (size) {                 \
0071           case 1: __get_user_8(__gu_addr); break;   \
0072           case 2: __get_user_16(__gu_addr); break;  \
0073           case 4: __get_user_32(__gu_addr); break;  \
0074           case 8: __get_user_64(__gu_addr); break;  \
0075           default: __get_user_unknown(); break;     \
0076         }                       \
0077     }                           \
0078     (x) = (__force __typeof__(*(ptr))) __gu_val;        \
0079     __gu_err;                       \
0080 })
0081 
0082 struct __large_struct { unsigned long buf[100]; };
0083 #define __m(x) (*(struct __large_struct __user *)(x))
0084 
0085 #define __get_user_64(addr)             \
0086     __asm__("1: ldq %0,%2\n"            \
0087     "2:\n"                      \
0088     EXC(1b,2b,%0,%1)                \
0089         : "=r"(__gu_val), "=r"(__gu_err)    \
0090         : "m"(__m(addr)), "1"(__gu_err))
0091 
0092 #define __get_user_32(addr)             \
0093     __asm__("1: ldl %0,%2\n"            \
0094     "2:\n"                      \
0095     EXC(1b,2b,%0,%1)                \
0096         : "=r"(__gu_val), "=r"(__gu_err)    \
0097         : "m"(__m(addr)), "1"(__gu_err))
0098 
0099 #ifdef __alpha_bwx__
0100 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
0101 
0102 #define __get_user_16(addr)             \
0103     __asm__("1: ldwu %0,%2\n"           \
0104     "2:\n"                      \
0105     EXC(1b,2b,%0,%1)                \
0106         : "=r"(__gu_val), "=r"(__gu_err)    \
0107         : "m"(__m(addr)), "1"(__gu_err))
0108 
0109 #define __get_user_8(addr)              \
0110     __asm__("1: ldbu %0,%2\n"           \
0111     "2:\n"                      \
0112     EXC(1b,2b,%0,%1)                \
0113         : "=r"(__gu_val), "=r"(__gu_err)    \
0114         : "m"(__m(addr)), "1"(__gu_err))
0115 #else
0116 /* Unfortunately, we can't get an unaligned access trap for the sub-word
0117    load, so we have to do a general unaligned operation.  */
0118 
0119 #define __get_user_16(addr)                     \
0120 {                                   \
0121     long __gu_tmp;                          \
0122     __asm__("1: ldq_u %0,0(%3)\n"                   \
0123     "2: ldq_u %1,1(%3)\n"                   \
0124     "   extwl %0,%3,%0\n"                   \
0125     "   extwh %1,%3,%1\n"                   \
0126     "   or %0,%1,%0\n"                      \
0127     "3:\n"                              \
0128     EXC(1b,3b,%0,%2)                        \
0129     EXC(2b,3b,%0,%2)                        \
0130         : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)  \
0131         : "r"(addr), "2"(__gu_err));                \
0132 }
0133 
0134 #define __get_user_8(addr)                      \
0135     __asm__("1: ldq_u %0,0(%2)\n"                   \
0136     "   extbl %0,%2,%0\n"                   \
0137     "2:\n"                              \
0138     EXC(1b,2b,%0,%1)                        \
0139         : "=&r"(__gu_val), "=r"(__gu_err)           \
0140         : "r"(addr), "1"(__gu_err))
0141 #endif
0142 
0143 extern void __put_user_unknown(void);
0144 
0145 #define __put_user_nocheck(x, ptr, size)            \
0146 ({                              \
0147     long __pu_err = 0;                  \
0148     __chk_user_ptr(ptr);                    \
0149     switch (size) {                     \
0150       case 1: __put_user_8(x, ptr); break;          \
0151       case 2: __put_user_16(x, ptr); break;         \
0152       case 4: __put_user_32(x, ptr); break;         \
0153       case 8: __put_user_64(x, ptr); break;         \
0154       default: __put_user_unknown(); break;         \
0155     }                           \
0156     __pu_err;                       \
0157 })
0158 
0159 #define __put_user_check(x, ptr, size)              \
0160 ({                              \
0161     long __pu_err = -EFAULT;                \
0162     __typeof__(*(ptr)) __user *__pu_addr = (ptr);       \
0163     if (__access_ok(__pu_addr, size)) {         \
0164         __pu_err = 0;                   \
0165         switch (size) {                 \
0166           case 1: __put_user_8(x, __pu_addr); break;    \
0167           case 2: __put_user_16(x, __pu_addr); break;   \
0168           case 4: __put_user_32(x, __pu_addr); break;   \
0169           case 8: __put_user_64(x, __pu_addr); break;   \
0170           default: __put_user_unknown(); break;     \
0171         }                       \
0172     }                           \
0173     __pu_err;                       \
0174 })
0175 
0176 /*
0177  * The "__put_user_xx()" macros tell gcc they read from memory
0178  * instead of writing: this is because they do not write to
0179  * any memory gcc knows about, so there are no aliasing issues
0180  */
0181 #define __put_user_64(x, addr)                  \
0182 __asm__ __volatile__("1: stq %r2,%1\n"              \
0183     "2:\n"                          \
0184     EXC(1b,2b,$31,%0)                   \
0185         : "=r"(__pu_err)                \
0186         : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
0187 
0188 #define __put_user_32(x, addr)                  \
0189 __asm__ __volatile__("1: stl %r2,%1\n"              \
0190     "2:\n"                          \
0191     EXC(1b,2b,$31,%0)                   \
0192         : "=r"(__pu_err)                \
0193         : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
0194 
0195 #ifdef __alpha_bwx__
0196 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
0197 
0198 #define __put_user_16(x, addr)                  \
0199 __asm__ __volatile__("1: stw %r2,%1\n"              \
0200     "2:\n"                          \
0201     EXC(1b,2b,$31,%0)                   \
0202         : "=r"(__pu_err)                \
0203         : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
0204 
0205 #define __put_user_8(x, addr)                   \
0206 __asm__ __volatile__("1: stb %r2,%1\n"              \
0207     "2:\n"                          \
0208     EXC(1b,2b,$31,%0)                   \
0209         : "=r"(__pu_err)                \
0210         : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
0211 #else
0212 /* Unfortunately, we can't get an unaligned access trap for the sub-word
0213    write, so we have to do a general unaligned operation.  */
0214 
0215 #define __put_user_16(x, addr)                  \
0216 {                               \
0217     long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;    \
0218     __asm__ __volatile__(                   \
0219     "1: ldq_u %2,1(%5)\n"               \
0220     "2: ldq_u %1,0(%5)\n"               \
0221     "   inswh %6,%5,%4\n"               \
0222     "   inswl %6,%5,%3\n"               \
0223     "   mskwh %2,%5,%2\n"               \
0224     "   mskwl %1,%5,%1\n"               \
0225     "   or %2,%4,%2\n"                  \
0226     "   or %1,%3,%1\n"                  \
0227     "3: stq_u %2,1(%5)\n"               \
0228     "4: stq_u %1,0(%5)\n"               \
0229     "5:\n"                          \
0230     EXC(1b,5b,$31,%0)                   \
0231     EXC(2b,5b,$31,%0)                   \
0232     EXC(3b,5b,$31,%0)                   \
0233     EXC(4b,5b,$31,%0)                   \
0234         : "=r"(__pu_err), "=&r"(__pu_tmp1),         \
0235           "=&r"(__pu_tmp2), "=&r"(__pu_tmp3),       \
0236           "=&r"(__pu_tmp4)              \
0237         : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
0238 }
0239 
0240 #define __put_user_8(x, addr)                   \
0241 {                               \
0242     long __pu_tmp1, __pu_tmp2;              \
0243     __asm__ __volatile__(                   \
0244     "1: ldq_u %1,0(%4)\n"               \
0245     "   insbl %3,%4,%2\n"               \
0246     "   mskbl %1,%4,%1\n"               \
0247     "   or %1,%2,%1\n"                  \
0248     "2: stq_u %1,0(%4)\n"               \
0249     "3:\n"                          \
0250     EXC(1b,3b,$31,%0)                   \
0251     EXC(2b,3b,$31,%0)                   \
0252         : "=r"(__pu_err),               \
0253           "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)        \
0254         : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
0255 }
0256 #endif
0257 
0258 
0259 /*
0260  * Complex access routines
0261  */
0262 
0263 extern long __copy_user(void *to, const void *from, long len);
0264 
0265 static inline unsigned long
0266 raw_copy_from_user(void *to, const void __user *from, unsigned long len)
0267 {
0268     return __copy_user(to, (__force const void *)from, len);
0269 }
0270 
0271 static inline unsigned long
0272 raw_copy_to_user(void __user *to, const void *from, unsigned long len)
0273 {
0274     return __copy_user((__force void *)to, from, len);
0275 }
0276 
0277 extern long __clear_user(void __user *to, long len);
0278 
0279 static inline long
0280 clear_user(void __user *to, long len)
0281 {
0282     if (__access_ok(to, len))
0283         len = __clear_user(to, len);
0284     return len;
0285 }
0286 
0287 extern long strncpy_from_user(char *dest, const char __user *src, long count);
0288 extern __must_check long strnlen_user(const char __user *str, long n);
0289 
0290 #include <asm/extable.h>
0291 
0292 #endif /* __ALPHA_UACCESS_H */