0001
0002 #ifndef __LINUX_UACCESS_H__
0003 #define __LINUX_UACCESS_H__
0004
0005 #include <linux/fault-inject-usercopy.h>
0006 #include <linux/instrumented.h>
0007 #include <linux/minmax.h>
0008 #include <linux/sched.h>
0009 #include <linux/thread_info.h>
0010
0011 #include <asm/uaccess.h>
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 static __always_inline __must_check unsigned long
0059 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
0060 {
0061 instrument_copy_from_user(to, from, n);
0062 check_object_size(to, n, false);
0063 return raw_copy_from_user(to, from, n);
0064 }
0065
0066 static __always_inline __must_check unsigned long
0067 __copy_from_user(void *to, const void __user *from, unsigned long n)
0068 {
0069 might_fault();
0070 if (should_fail_usercopy())
0071 return n;
0072 instrument_copy_from_user(to, from, n);
0073 check_object_size(to, n, false);
0074 return raw_copy_from_user(to, from, n);
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static __always_inline __must_check unsigned long
0091 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
0092 {
0093 if (should_fail_usercopy())
0094 return n;
0095 instrument_copy_to_user(to, from, n);
0096 check_object_size(from, n, true);
0097 return raw_copy_to_user(to, from, n);
0098 }
0099
0100 static __always_inline __must_check unsigned long
0101 __copy_to_user(void __user *to, const void *from, unsigned long n)
0102 {
0103 might_fault();
0104 if (should_fail_usercopy())
0105 return n;
0106 instrument_copy_to_user(to, from, n);
0107 check_object_size(from, n, true);
0108 return raw_copy_to_user(to, from, n);
0109 }
0110
0111 #ifdef INLINE_COPY_FROM_USER
0112 static inline __must_check unsigned long
0113 _copy_from_user(void *to, const void __user *from, unsigned long n)
0114 {
0115 unsigned long res = n;
0116 might_fault();
0117 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
0118 instrument_copy_from_user(to, from, n);
0119 res = raw_copy_from_user(to, from, n);
0120 }
0121 if (unlikely(res))
0122 memset(to + (n - res), 0, res);
0123 return res;
0124 }
0125 #else
0126 extern __must_check unsigned long
0127 _copy_from_user(void *, const void __user *, unsigned long);
0128 #endif
0129
0130 #ifdef INLINE_COPY_TO_USER
0131 static inline __must_check unsigned long
0132 _copy_to_user(void __user *to, const void *from, unsigned long n)
0133 {
0134 might_fault();
0135 if (should_fail_usercopy())
0136 return n;
0137 if (access_ok(to, n)) {
0138 instrument_copy_to_user(to, from, n);
0139 n = raw_copy_to_user(to, from, n);
0140 }
0141 return n;
0142 }
0143 #else
0144 extern __must_check unsigned long
0145 _copy_to_user(void __user *, const void *, unsigned long);
0146 #endif
0147
0148 static __always_inline unsigned long __must_check
0149 copy_from_user(void *to, const void __user *from, unsigned long n)
0150 {
0151 if (check_copy_size(to, n, false))
0152 n = _copy_from_user(to, from, n);
0153 return n;
0154 }
0155
0156 static __always_inline unsigned long __must_check
0157 copy_to_user(void __user *to, const void *from, unsigned long n)
0158 {
0159 if (check_copy_size(from, n, true))
0160 n = _copy_to_user(to, from, n);
0161 return n;
0162 }
0163
0164 #ifndef copy_mc_to_kernel
0165
0166
0167
0168
0169 static inline unsigned long __must_check
0170 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
0171 {
0172 memcpy(dst, src, cnt);
0173 return 0;
0174 }
0175 #endif
0176
0177 static __always_inline void pagefault_disabled_inc(void)
0178 {
0179 current->pagefault_disabled++;
0180 }
0181
0182 static __always_inline void pagefault_disabled_dec(void)
0183 {
0184 current->pagefault_disabled--;
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194 static inline void pagefault_disable(void)
0195 {
0196 pagefault_disabled_inc();
0197
0198
0199
0200
0201 barrier();
0202 }
0203
0204 static inline void pagefault_enable(void)
0205 {
0206
0207
0208
0209
0210 barrier();
0211 pagefault_disabled_dec();
0212 }
0213
0214
0215
0216
0217 static inline bool pagefault_disabled(void)
0218 {
0219 return current->pagefault_disabled != 0;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
0233
0234 #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
0250 {
0251 return 0;
0252 }
0253
0254 #endif
0255
0256 #ifndef ARCH_HAS_NOCACHE_UACCESS
0257
0258 static inline __must_check unsigned long
0259 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
0260 unsigned long n)
0261 {
0262 return __copy_from_user_inatomic(to, from, n);
0263 }
0264
0265 #endif
0266
0267 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 static __always_inline __must_check int
0317 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
0318 size_t usize)
0319 {
0320 size_t size = min(ksize, usize);
0321 size_t rest = max(ksize, usize) - size;
0322
0323
0324 if (usize < ksize) {
0325 memset(dst + size, 0, rest);
0326 } else if (usize > ksize) {
0327 int ret = check_zeroed_user(src + size, rest);
0328 if (ret <= 0)
0329 return ret ?: -E2BIG;
0330 }
0331
0332 if (copy_from_user(dst, src, size))
0333 return -EFAULT;
0334 return 0;
0335 }
0336
0337 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
0338
0339 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
0340 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
0341
0342 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
0343 long notrace copy_to_user_nofault(void __user *dst, const void *src,
0344 size_t size);
0345
0346 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
0347 long count);
0348
0349 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
0350 long count);
0351 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
0352
0353 #ifndef __get_kernel_nofault
0354 #define __get_kernel_nofault(dst, src, type, label) \
0355 do { \
0356 type __user *p = (type __force __user *)(src); \
0357 type data; \
0358 if (__get_user(data, p)) \
0359 goto label; \
0360 *(type *)dst = data; \
0361 } while (0)
0362
0363 #define __put_kernel_nofault(dst, src, type, label) \
0364 do { \
0365 type __user *p = (type __force __user *)(dst); \
0366 type data = *(type *)src; \
0367 if (__put_user(data, p)) \
0368 goto label; \
0369 } while (0)
0370 #endif
0371
0372
0373
0374
0375
0376
0377
0378
0379 #define get_kernel_nofault(val, ptr) ({ \
0380 const typeof(val) *__gk_ptr = (ptr); \
0381 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
0382 })
0383
0384 #ifndef user_access_begin
0385 #define user_access_begin(ptr,len) access_ok(ptr, len)
0386 #define user_access_end() do { } while (0)
0387 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
0388 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
0389 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
0390 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
0391 #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
0392 static inline unsigned long user_access_save(void) { return 0UL; }
0393 static inline void user_access_restore(unsigned long flags) { }
0394 #endif
0395 #ifndef user_write_access_begin
0396 #define user_write_access_begin user_access_begin
0397 #define user_write_access_end user_access_end
0398 #endif
0399 #ifndef user_read_access_begin
0400 #define user_read_access_begin user_access_begin
0401 #define user_read_access_end user_access_end
0402 #endif
0403
0404 #ifdef CONFIG_HARDENED_USERCOPY
0405 void __noreturn usercopy_abort(const char *name, const char *detail,
0406 bool to_user, unsigned long offset,
0407 unsigned long len);
0408 #endif
0409
0410 #endif