Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __LINUX_UACCESS_H__
0003 #define __LINUX_UACCESS_H__
0004 
0005 #include <linux/fault-inject-usercopy.h>
0006 #include <linux/instrumented.h>
0007 #include <linux/minmax.h>
0008 #include <linux/sched.h>
0009 #include <linux/thread_info.h>
0010 
0011 #include <asm/uaccess.h>
0012 
0013 /*
0014  * Architectures should provide two primitives (raw_copy_{to,from}_user())
0015  * and get rid of their private instances of copy_{to,from}_user() and
0016  * __copy_{to,from}_user{,_inatomic}().
0017  *
0018  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
0019  * return the amount left to copy.  They should assume that access_ok() has
0020  * already been checked (and succeeded); they should *not* zero-pad anything.
0021  * No KASAN or object size checks either - those belong here.
0022  *
0023  * Both of these functions should attempt to copy size bytes starting at from
0024  * into the area starting at to.  They must not fetch or store anything
0025  * outside of those areas.  Return value must be between 0 (everything
0026  * copied successfully) and size (nothing copied).
0027  *
0028  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
0029  * at to must become equal to the bytes fetched from the corresponding area
0030  * starting at from.  All data past to + size - N must be left unmodified.
0031  *
0032  * If copying succeeds, the return value must be 0.  If some data cannot be
0033  * fetched, it is permitted to copy less than had been fetched; the only
0034  * hard requirement is that not storing anything at all (i.e. returning size)
0035  * should happen only when nothing could be copied.  In other words, you don't
0036  * have to squeeze as much as possible - it is allowed, but not necessary.
0037  *
0038  * For raw_copy_from_user() to always points to kernel memory and no faults
0039  * on store should happen.  Interpretation of from is affected by set_fs().
0040  * For raw_copy_to_user() it's the other way round.
0041  *
0042  * Both can be inlined - it's up to architectures whether it wants to bother
0043  * with that.  They should not be used directly; they are used to implement
0044  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
0045  * that are used instead.  Out of those, __... ones are inlined.  Plain
0046  * copy_{to,from}_user() might or might not be inlined.  If you want them
0047  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
0048  *
0049  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
0050  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
0051  * at all; their callers absolutely must check the return value.
0052  *
0053  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
0054  * but both source and destination are __user pointers (affected by set_fs()
0055  * as usual) and both source and destination can trigger faults.
0056  */
0057 
0058 static __always_inline __must_check unsigned long
0059 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
0060 {
0061     instrument_copy_from_user(to, from, n);
0062     check_object_size(to, n, false);
0063     return raw_copy_from_user(to, from, n);
0064 }
0065 
0066 static __always_inline __must_check unsigned long
0067 __copy_from_user(void *to, const void __user *from, unsigned long n)
0068 {
0069     might_fault();
0070     if (should_fail_usercopy())
0071         return n;
0072     instrument_copy_from_user(to, from, n);
0073     check_object_size(to, n, false);
0074     return raw_copy_from_user(to, from, n);
0075 }
0076 
0077 /**
0078  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
0079  * @to:   Destination address, in user space.
0080  * @from: Source address, in kernel space.
0081  * @n:    Number of bytes to copy.
0082  *
0083  * Context: User context only.
0084  *
0085  * Copy data from kernel space to user space.  Caller must check
0086  * the specified block with access_ok() before calling this function.
0087  * The caller should also make sure he pins the user space address
0088  * so that we don't result in page fault and sleep.
0089  */
0090 static __always_inline __must_check unsigned long
0091 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
0092 {
0093     if (should_fail_usercopy())
0094         return n;
0095     instrument_copy_to_user(to, from, n);
0096     check_object_size(from, n, true);
0097     return raw_copy_to_user(to, from, n);
0098 }
0099 
0100 static __always_inline __must_check unsigned long
0101 __copy_to_user(void __user *to, const void *from, unsigned long n)
0102 {
0103     might_fault();
0104     if (should_fail_usercopy())
0105         return n;
0106     instrument_copy_to_user(to, from, n);
0107     check_object_size(from, n, true);
0108     return raw_copy_to_user(to, from, n);
0109 }
0110 
0111 #ifdef INLINE_COPY_FROM_USER
0112 static inline __must_check unsigned long
0113 _copy_from_user(void *to, const void __user *from, unsigned long n)
0114 {
0115     unsigned long res = n;
0116     might_fault();
0117     if (!should_fail_usercopy() && likely(access_ok(from, n))) {
0118         instrument_copy_from_user(to, from, n);
0119         res = raw_copy_from_user(to, from, n);
0120     }
0121     if (unlikely(res))
0122         memset(to + (n - res), 0, res);
0123     return res;
0124 }
0125 #else
0126 extern __must_check unsigned long
0127 _copy_from_user(void *, const void __user *, unsigned long);
0128 #endif
0129 
0130 #ifdef INLINE_COPY_TO_USER
0131 static inline __must_check unsigned long
0132 _copy_to_user(void __user *to, const void *from, unsigned long n)
0133 {
0134     might_fault();
0135     if (should_fail_usercopy())
0136         return n;
0137     if (access_ok(to, n)) {
0138         instrument_copy_to_user(to, from, n);
0139         n = raw_copy_to_user(to, from, n);
0140     }
0141     return n;
0142 }
0143 #else
0144 extern __must_check unsigned long
0145 _copy_to_user(void __user *, const void *, unsigned long);
0146 #endif
0147 
0148 static __always_inline unsigned long __must_check
0149 copy_from_user(void *to, const void __user *from, unsigned long n)
0150 {
0151     if (check_copy_size(to, n, false))
0152         n = _copy_from_user(to, from, n);
0153     return n;
0154 }
0155 
0156 static __always_inline unsigned long __must_check
0157 copy_to_user(void __user *to, const void *from, unsigned long n)
0158 {
0159     if (check_copy_size(from, n, true))
0160         n = _copy_to_user(to, from, n);
0161     return n;
0162 }
0163 
0164 #ifndef copy_mc_to_kernel
0165 /*
0166  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
0167  * #MC (or arch equivalent) during source read.
0168  */
0169 static inline unsigned long __must_check
0170 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
0171 {
0172     memcpy(dst, src, cnt);
0173     return 0;
0174 }
0175 #endif
0176 
0177 static __always_inline void pagefault_disabled_inc(void)
0178 {
0179     current->pagefault_disabled++;
0180 }
0181 
0182 static __always_inline void pagefault_disabled_dec(void)
0183 {
0184     current->pagefault_disabled--;
0185 }
0186 
0187 /*
0188  * These routines enable/disable the pagefault handler. If disabled, it will
0189  * not take any locks and go straight to the fixup table.
0190  *
0191  * User access methods will not sleep when called from a pagefault_disabled()
0192  * environment.
0193  */
0194 static inline void pagefault_disable(void)
0195 {
0196     pagefault_disabled_inc();
0197     /*
0198      * make sure to have issued the store before a pagefault
0199      * can hit.
0200      */
0201     barrier();
0202 }
0203 
0204 static inline void pagefault_enable(void)
0205 {
0206     /*
0207      * make sure to issue those last loads/stores before enabling
0208      * the pagefault handler again.
0209      */
0210     barrier();
0211     pagefault_disabled_dec();
0212 }
0213 
0214 /*
0215  * Is the pagefault handler disabled? If so, user access methods will not sleep.
0216  */
0217 static inline bool pagefault_disabled(void)
0218 {
0219     return current->pagefault_disabled != 0;
0220 }
0221 
0222 /*
0223  * The pagefault handler is in general disabled by pagefault_disable() or
0224  * when in irq context (via in_atomic()).
0225  *
0226  * This function should only be used by the fault handlers. Other users should
0227  * stick to pagefault_disabled().
0228  * Please NEVER use preempt_disable() to disable the fault handler. With
0229  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
0230  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
0231  */
0232 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
0233 
0234 #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
0235 
0236 /**
0237  * probe_subpage_writeable: probe the user range for write faults at sub-page
0238  *              granularity (e.g. arm64 MTE)
0239  * @uaddr: start of address range
0240  * @size: size of address range
0241  *
0242  * Returns 0 on success, the number of bytes not probed on fault.
0243  *
0244  * It is expected that the caller checked for the write permission of each
0245  * page in the range either by put_user() or GUP. The architecture port can
0246  * implement a more efficient get_user() probing if the same sub-page faults
0247  * are triggered by either a read or a write.
0248  */
0249 static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
0250 {
0251     return 0;
0252 }
0253 
0254 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
0255 
0256 #ifndef ARCH_HAS_NOCACHE_UACCESS
0257 
0258 static inline __must_check unsigned long
0259 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
0260                   unsigned long n)
0261 {
0262     return __copy_from_user_inatomic(to, from, n);
0263 }
0264 
0265 #endif      /* ARCH_HAS_NOCACHE_UACCESS */
0266 
0267 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
0268 
0269 /**
0270  * copy_struct_from_user: copy a struct from userspace
0271  * @dst:   Destination address, in kernel space. This buffer must be @ksize
0272  *         bytes long.
0273  * @ksize: Size of @dst struct.
0274  * @src:   Source address, in userspace.
0275  * @usize: (Alleged) size of @src struct.
0276  *
0277  * Copies a struct from userspace to kernel space, in a way that guarantees
0278  * backwards-compatibility for struct syscall arguments (as long as future
0279  * struct extensions are made such that all new fields are *appended* to the
0280  * old struct, and zeroed-out new fields have the same meaning as the old
0281  * struct).
0282  *
0283  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
0284  * The recommended usage is something like the following:
0285  *
0286  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
0287  *   {
0288  *      int err;
0289  *      struct foo karg = {};
0290  *
0291  *      if (usize > PAGE_SIZE)
0292  *        return -E2BIG;
0293  *      if (usize < FOO_SIZE_VER0)
0294  *        return -EINVAL;
0295  *
0296  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
0297  *      if (err)
0298  *        return err;
0299  *
0300  *      // ...
0301  *   }
0302  *
0303  * There are three cases to consider:
0304  *  * If @usize == @ksize, then it's copied verbatim.
0305  *  * If @usize < @ksize, then the userspace has passed an old struct to a
0306  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
0307  *    are to be zero-filled.
0308  *  * If @usize > @ksize, then the userspace has passed a new struct to an
0309  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
0310  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
0311  *
0312  * Returns (in all cases, some data may have been copied):
0313  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
0314  *  * -EFAULT: access to userspace failed.
0315  */
0316 static __always_inline __must_check int
0317 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
0318               size_t usize)
0319 {
0320     size_t size = min(ksize, usize);
0321     size_t rest = max(ksize, usize) - size;
0322 
0323     /* Deal with trailing bytes. */
0324     if (usize < ksize) {
0325         memset(dst + size, 0, rest);
0326     } else if (usize > ksize) {
0327         int ret = check_zeroed_user(src + size, rest);
0328         if (ret <= 0)
0329             return ret ?: -E2BIG;
0330     }
0331     /* Copy the interoperable parts of the struct. */
0332     if (copy_from_user(dst, src, size))
0333         return -EFAULT;
0334     return 0;
0335 }
0336 
0337 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
0338 
0339 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
0340 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
0341 
0342 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
0343 long notrace copy_to_user_nofault(void __user *dst, const void *src,
0344         size_t size);
0345 
0346 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
0347         long count);
0348 
0349 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
0350         long count);
0351 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
0352 
0353 #ifndef __get_kernel_nofault
0354 #define __get_kernel_nofault(dst, src, type, label) \
0355 do {                            \
0356     type __user *p = (type __force __user *)(src);  \
0357     type data;                  \
0358     if (__get_user(data, p))            \
0359         goto label;             \
0360     *(type *)dst = data;                \
0361 } while (0)
0362 
0363 #define __put_kernel_nofault(dst, src, type, label) \
0364 do {                            \
0365     type __user *p = (type __force __user *)(dst);  \
0366     type data = *(type *)src;           \
0367     if (__put_user(data, p))            \
0368         goto label;             \
0369 } while (0)
0370 #endif
0371 
0372 /**
0373  * get_kernel_nofault(): safely attempt to read from a location
0374  * @val: read into this variable
0375  * @ptr: address to read from
0376  *
0377  * Returns 0 on success, or -EFAULT.
0378  */
0379 #define get_kernel_nofault(val, ptr) ({             \
0380     const typeof(val) *__gk_ptr = (ptr);            \
0381     copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
0382 })
0383 
0384 #ifndef user_access_begin
0385 #define user_access_begin(ptr,len) access_ok(ptr, len)
0386 #define user_access_end() do { } while (0)
0387 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
0388 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
0389 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
0390 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
0391 #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
0392 static inline unsigned long user_access_save(void) { return 0UL; }
0393 static inline void user_access_restore(unsigned long flags) { }
0394 #endif
0395 #ifndef user_write_access_begin
0396 #define user_write_access_begin user_access_begin
0397 #define user_write_access_end user_access_end
0398 #endif
0399 #ifndef user_read_access_begin
0400 #define user_read_access_begin user_access_begin
0401 #define user_read_access_end user_access_end
0402 #endif
0403 
0404 #ifdef CONFIG_HARDENED_USERCOPY
0405 void __noreturn usercopy_abort(const char *name, const char *detail,
0406                    bool to_user, unsigned long offset,
0407                    unsigned long len);
0408 #endif
0409 
0410 #endif      /* __LINUX_UACCESS_H__ */