Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/include/asm/uaccess.h
0004  *
0005  * Copyright (C) 2012 ARM Ltd.
0006  */
0007 #ifndef __ASM_UACCESS_H
0008 #define __ASM_UACCESS_H
0009 
0010 #include <asm/alternative.h>
0011 #include <asm/kernel-pgtable.h>
0012 #include <asm/sysreg.h>
0013 
0014 /*
0015  * User space memory access functions
0016  */
0017 #include <linux/bitops.h>
0018 #include <linux/kasan-checks.h>
0019 #include <linux/string.h>
0020 
0021 #include <asm/asm-extable.h>
0022 #include <asm/cpufeature.h>
0023 #include <asm/mmu.h>
0024 #include <asm/mte.h>
0025 #include <asm/ptrace.h>
0026 #include <asm/memory.h>
0027 #include <asm/extable.h>
0028 
0029 static inline int __access_ok(const void __user *ptr, unsigned long size);
0030 
0031 /*
0032  * Test whether a block of memory is a valid user space address.
0033  * Returns 1 if the range is valid, 0 otherwise.
0034  *
0035  * This is equivalent to the following test:
0036  * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
0037  */
0038 static inline int access_ok(const void __user *addr, unsigned long size)
0039 {
0040     /*
0041      * Asynchronous I/O running in a kernel thread does not have the
0042      * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
0043      * the user address before checking.
0044      */
0045     if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
0046         (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
0047         addr = untagged_addr(addr);
0048 
0049     return likely(__access_ok(addr, size));
0050 }
0051 #define access_ok access_ok
0052 
0053 #include <asm-generic/access_ok.h>
0054 
0055 /*
0056  * User access enabling/disabling.
0057  */
0058 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0059 static inline void __uaccess_ttbr0_disable(void)
0060 {
0061     unsigned long flags, ttbr;
0062 
0063     local_irq_save(flags);
0064     ttbr = read_sysreg(ttbr1_el1);
0065     ttbr &= ~TTBR_ASID_MASK;
0066     /* reserved_pg_dir placed before swapper_pg_dir */
0067     write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
0068     isb();
0069     /* Set reserved ASID */
0070     write_sysreg(ttbr, ttbr1_el1);
0071     isb();
0072     local_irq_restore(flags);
0073 }
0074 
0075 static inline void __uaccess_ttbr0_enable(void)
0076 {
0077     unsigned long flags, ttbr0, ttbr1;
0078 
0079     /*
0080      * Disable interrupts to avoid preemption between reading the 'ttbr0'
0081      * variable and the MSR. A context switch could trigger an ASID
0082      * roll-over and an update of 'ttbr0'.
0083      */
0084     local_irq_save(flags);
0085     ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
0086 
0087     /* Restore active ASID */
0088     ttbr1 = read_sysreg(ttbr1_el1);
0089     ttbr1 &= ~TTBR_ASID_MASK;       /* safety measure */
0090     ttbr1 |= ttbr0 & TTBR_ASID_MASK;
0091     write_sysreg(ttbr1, ttbr1_el1);
0092     isb();
0093 
0094     /* Restore user page table */
0095     write_sysreg(ttbr0, ttbr0_el1);
0096     isb();
0097     local_irq_restore(flags);
0098 }
0099 
0100 static inline bool uaccess_ttbr0_disable(void)
0101 {
0102     if (!system_uses_ttbr0_pan())
0103         return false;
0104     __uaccess_ttbr0_disable();
0105     return true;
0106 }
0107 
0108 static inline bool uaccess_ttbr0_enable(void)
0109 {
0110     if (!system_uses_ttbr0_pan())
0111         return false;
0112     __uaccess_ttbr0_enable();
0113     return true;
0114 }
0115 #else
0116 static inline bool uaccess_ttbr0_disable(void)
0117 {
0118     return false;
0119 }
0120 
0121 static inline bool uaccess_ttbr0_enable(void)
0122 {
0123     return false;
0124 }
0125 #endif
0126 
0127 static inline void __uaccess_disable_hw_pan(void)
0128 {
0129     asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
0130             CONFIG_ARM64_PAN));
0131 }
0132 
0133 static inline void __uaccess_enable_hw_pan(void)
0134 {
0135     asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
0136             CONFIG_ARM64_PAN));
0137 }
0138 
0139 /*
0140  * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
0141  * affects EL0 and TCF affects EL1 irrespective of which TTBR is
0142  * used.
0143  * The kernel accesses TTBR0 usually with LDTR/STTR instructions
0144  * when UAO is available, so these would act as EL0 accesses using
0145  * TCF0.
0146  * However futex.h code uses exclusives which would be executed as
0147  * EL1, this can potentially cause a tag check fault even if the
0148  * user disables TCF0.
0149  *
0150  * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
0151  * and reset it in uaccess_disable().
0152  *
0153  * The Tag check override (TCO) bit disables temporarily the tag checking
0154  * preventing the issue.
0155  */
0156 static inline void __uaccess_disable_tco(void)
0157 {
0158     asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
0159                  ARM64_MTE, CONFIG_KASAN_HW_TAGS));
0160 }
0161 
0162 static inline void __uaccess_enable_tco(void)
0163 {
0164     asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
0165                  ARM64_MTE, CONFIG_KASAN_HW_TAGS));
0166 }
0167 
0168 /*
0169  * These functions disable tag checking only if in MTE async mode
0170  * since the sync mode generates exceptions synchronously and the
0171  * nofault or load_unaligned_zeropad can handle them.
0172  */
0173 static inline void __uaccess_disable_tco_async(void)
0174 {
0175     if (system_uses_mte_async_or_asymm_mode())
0176          __uaccess_disable_tco();
0177 }
0178 
0179 static inline void __uaccess_enable_tco_async(void)
0180 {
0181     if (system_uses_mte_async_or_asymm_mode())
0182         __uaccess_enable_tco();
0183 }
0184 
0185 static inline void uaccess_disable_privileged(void)
0186 {
0187     __uaccess_disable_tco();
0188 
0189     if (uaccess_ttbr0_disable())
0190         return;
0191 
0192     __uaccess_enable_hw_pan();
0193 }
0194 
0195 static inline void uaccess_enable_privileged(void)
0196 {
0197     __uaccess_enable_tco();
0198 
0199     if (uaccess_ttbr0_enable())
0200         return;
0201 
0202     __uaccess_disable_hw_pan();
0203 }
0204 
0205 /*
0206  * Sanitise a uaccess pointer such that it becomes NULL if above the maximum
0207  * user address. In case the pointer is tagged (has the top byte set), untag
0208  * the pointer before checking.
0209  */
0210 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
0211 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
0212 {
0213     void __user *safe_ptr;
0214 
0215     asm volatile(
0216     "   bics    xzr, %3, %2\n"
0217     "   csel    %0, %1, xzr, eq\n"
0218     : "=&r" (safe_ptr)
0219     : "r" (ptr), "r" (TASK_SIZE_MAX - 1),
0220       "r" (untagged_addr(ptr))
0221     : "cc");
0222 
0223     csdb();
0224     return safe_ptr;
0225 }
0226 
0227 /*
0228  * The "__xxx" versions of the user access functions do not verify the address
0229  * space - it must have been done previously with a separate "access_ok()"
0230  * call.
0231  *
0232  * The "__xxx_error" versions set the third argument to -EFAULT if an error
0233  * occurs, and leave it unchanged on success.
0234  */
0235 #define __get_mem_asm(load, reg, x, addr, err, type)            \
0236     asm volatile(                           \
0237     "1: " load "    " reg "1, [%2]\n"           \
0238     "2:\n"                              \
0239     _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1)      \
0240     : "+r" (err), "=&r" (x)                     \
0241     : "r" (addr))
0242 
0243 #define __raw_get_mem(ldr, x, ptr, err, type)                   \
0244 do {                                        \
0245     unsigned long __gu_val;                         \
0246     switch (sizeof(*(ptr))) {                       \
0247     case 1:                                 \
0248         __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \
0249         break;                              \
0250     case 2:                                 \
0251         __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \
0252         break;                              \
0253     case 4:                                 \
0254         __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type);     \
0255         break;                              \
0256     case 8:                                 \
0257         __get_mem_asm(ldr, "%x",  __gu_val, (ptr), (err), type);    \
0258         break;                              \
0259     default:                                \
0260         BUILD_BUG();                            \
0261     }                                   \
0262     (x) = (__force __typeof__(*(ptr)))__gu_val;             \
0263 } while (0)
0264 
0265 /*
0266  * We must not call into the scheduler between uaccess_ttbr0_enable() and
0267  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
0268  * we must evaluate these outside of the critical section.
0269  */
0270 #define __raw_get_user(x, ptr, err)                 \
0271 do {                                    \
0272     __typeof__(*(ptr)) __user *__rgu_ptr = (ptr);           \
0273     __typeof__(x) __rgu_val;                    \
0274     __chk_user_ptr(ptr);                        \
0275                                     \
0276     uaccess_ttbr0_enable();                     \
0277     __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U);        \
0278     uaccess_ttbr0_disable();                    \
0279                                     \
0280     (x) = __rgu_val;                        \
0281 } while (0)
0282 
0283 #define __get_user_error(x, ptr, err)                   \
0284 do {                                    \
0285     __typeof__(*(ptr)) __user *__p = (ptr);             \
0286     might_fault();                          \
0287     if (access_ok(__p, sizeof(*__p))) {             \
0288         __p = uaccess_mask_ptr(__p);                \
0289         __raw_get_user((x), __p, (err));            \
0290     } else {                            \
0291         (x) = (__force __typeof__(x))0; (err) = -EFAULT;    \
0292     }                               \
0293 } while (0)
0294 
0295 #define __get_user(x, ptr)                      \
0296 ({                                  \
0297     int __gu_err = 0;                       \
0298     __get_user_error((x), (ptr), __gu_err);             \
0299     __gu_err;                           \
0300 })
0301 
0302 #define get_user    __get_user
0303 
0304 /*
0305  * We must not call into the scheduler between __uaccess_enable_tco_async() and
0306  * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
0307  * functions, we must evaluate these outside of the critical section.
0308  */
0309 #define __get_kernel_nofault(dst, src, type, err_label)         \
0310 do {                                    \
0311     __typeof__(dst) __gkn_dst = (dst);              \
0312     __typeof__(src) __gkn_src = (src);              \
0313     int __gkn_err = 0;                      \
0314                                     \
0315     __uaccess_enable_tco_async();                   \
0316     __raw_get_mem("ldr", *((type *)(__gkn_dst)),            \
0317               (__force type *)(__gkn_src), __gkn_err, K);   \
0318     __uaccess_disable_tco_async();                  \
0319                                     \
0320     if (unlikely(__gkn_err))                    \
0321         goto err_label;                     \
0322 } while (0)
0323 
0324 #define __put_mem_asm(store, reg, x, addr, err, type)           \
0325     asm volatile(                           \
0326     "1: " store "   " reg "1, [%2]\n"           \
0327     "2:\n"                              \
0328     _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0)            \
0329     : "+r" (err)                            \
0330     : "r" (x), "r" (addr))
0331 
0332 #define __raw_put_mem(str, x, ptr, err, type)                   \
0333 do {                                        \
0334     __typeof__(*(ptr)) __pu_val = (x);                  \
0335     switch (sizeof(*(ptr))) {                       \
0336     case 1:                                 \
0337         __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \
0338         break;                              \
0339     case 2:                                 \
0340         __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \
0341         break;                              \
0342     case 4:                                 \
0343         __put_mem_asm(str, "%w", __pu_val, (ptr), (err), type);     \
0344         break;                              \
0345     case 8:                                 \
0346         __put_mem_asm(str, "%x", __pu_val, (ptr), (err), type);     \
0347         break;                              \
0348     default:                                \
0349         BUILD_BUG();                            \
0350     }                                   \
0351 } while (0)
0352 
0353 /*
0354  * We must not call into the scheduler between uaccess_ttbr0_enable() and
0355  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
0356  * we must evaluate these outside of the critical section.
0357  */
0358 #define __raw_put_user(x, ptr, err)                 \
0359 do {                                    \
0360     __typeof__(*(ptr)) __user *__rpu_ptr = (ptr);           \
0361     __typeof__(*(ptr)) __rpu_val = (x);             \
0362     __chk_user_ptr(__rpu_ptr);                  \
0363                                     \
0364     uaccess_ttbr0_enable();                     \
0365     __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U);        \
0366     uaccess_ttbr0_disable();                    \
0367 } while (0)
0368 
0369 #define __put_user_error(x, ptr, err)                   \
0370 do {                                    \
0371     __typeof__(*(ptr)) __user *__p = (ptr);             \
0372     might_fault();                          \
0373     if (access_ok(__p, sizeof(*__p))) {             \
0374         __p = uaccess_mask_ptr(__p);                \
0375         __raw_put_user((x), __p, (err));            \
0376     } else  {                           \
0377         (err) = -EFAULT;                    \
0378     }                               \
0379 } while (0)
0380 
0381 #define __put_user(x, ptr)                      \
0382 ({                                  \
0383     int __pu_err = 0;                       \
0384     __put_user_error((x), (ptr), __pu_err);             \
0385     __pu_err;                           \
0386 })
0387 
0388 #define put_user    __put_user
0389 
0390 /*
0391  * We must not call into the scheduler between __uaccess_enable_tco_async() and
0392  * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
0393  * functions, we must evaluate these outside of the critical section.
0394  */
0395 #define __put_kernel_nofault(dst, src, type, err_label)         \
0396 do {                                    \
0397     __typeof__(dst) __pkn_dst = (dst);              \
0398     __typeof__(src) __pkn_src = (src);              \
0399     int __pkn_err = 0;                      \
0400                                     \
0401     __uaccess_enable_tco_async();                   \
0402     __raw_put_mem("str", *((type *)(__pkn_src)),            \
0403               (__force type *)(__pkn_dst), __pkn_err, K);   \
0404     __uaccess_disable_tco_async();                  \
0405                                     \
0406     if (unlikely(__pkn_err))                    \
0407         goto err_label;                     \
0408 } while(0)
0409 
0410 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
0411 #define raw_copy_from_user(to, from, n)                 \
0412 ({                                  \
0413     unsigned long __acfu_ret;                   \
0414     uaccess_ttbr0_enable();                     \
0415     __acfu_ret = __arch_copy_from_user((to),            \
0416                       __uaccess_mask_ptr(from), (n));   \
0417     uaccess_ttbr0_disable();                    \
0418     __acfu_ret;                         \
0419 })
0420 
0421 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
0422 #define raw_copy_to_user(to, from, n)                   \
0423 ({                                  \
0424     unsigned long __actu_ret;                   \
0425     uaccess_ttbr0_enable();                     \
0426     __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),    \
0427                     (from), (n));           \
0428     uaccess_ttbr0_disable();                    \
0429     __actu_ret;                         \
0430 })
0431 
0432 #define INLINE_COPY_TO_USER
0433 #define INLINE_COPY_FROM_USER
0434 
0435 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
0436 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
0437 {
0438     if (access_ok(to, n)) {
0439         uaccess_ttbr0_enable();
0440         n = __arch_clear_user(__uaccess_mask_ptr(to), n);
0441         uaccess_ttbr0_disable();
0442     }
0443     return n;
0444 }
0445 #define clear_user  __clear_user
0446 
0447 extern long strncpy_from_user(char *dest, const char __user *src, long count);
0448 
0449 extern __must_check long strnlen_user(const char __user *str, long n);
0450 
0451 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
0452 struct page;
0453 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
0454 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
0455 
0456 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
0457 {
0458     kasan_check_write(dst, size);
0459     return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
0460 }
0461 #endif
0462 
0463 #ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
0464 
0465 /*
0466  * Return 0 on success, the number of bytes not probed otherwise.
0467  */
0468 static inline size_t probe_subpage_writeable(const char __user *uaddr,
0469                          size_t size)
0470 {
0471     if (!system_supports_mte())
0472         return 0;
0473     return mte_probe_user_range(uaddr, size);
0474 }
0475 
0476 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
0477 
0478 #endif /* __ASM_UACCESS_H */