Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
0004  * which are designed to protect kernel memory from needless exposure
0005  * and overwrite under many unintended conditions. This code is based
0006  * on PAX_USERCOPY, which is:
0007  *
0008  * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
0009  * Security Inc.
0010  */
0011 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0012 
0013 #include <linux/mm.h>
0014 #include <linux/highmem.h>
0015 #include <linux/slab.h>
0016 #include <linux/sched.h>
0017 #include <linux/sched/task.h>
0018 #include <linux/sched/task_stack.h>
0019 #include <linux/thread_info.h>
0020 #include <linux/vmalloc.h>
0021 #include <linux/atomic.h>
0022 #include <linux/jump_label.h>
0023 #include <asm/sections.h>
0024 #include "slab.h"
0025 
0026 /*
0027  * Checks if a given pointer and length is contained by the current
0028  * stack frame (if possible).
0029  *
0030  * Returns:
0031  *  NOT_STACK: not at all on the stack
0032  *  GOOD_FRAME: fully within a valid stack frame
0033  *  GOOD_STACK: within the current stack (when can't frame-check exactly)
0034  *  BAD_STACK: error condition (invalid stack position or bad stack frame)
0035  */
0036 static noinline int check_stack_object(const void *obj, unsigned long len)
0037 {
0038     const void * const stack = task_stack_page(current);
0039     const void * const stackend = stack + THREAD_SIZE;
0040     int ret;
0041 
0042     /* Object is not on the stack at all. */
0043     if (obj + len <= stack || stackend <= obj)
0044         return NOT_STACK;
0045 
0046     /*
0047      * Reject: object partially overlaps the stack (passing the
0048      * check above means at least one end is within the stack,
0049      * so if this check fails, the other end is outside the stack).
0050      */
0051     if (obj < stack || stackend < obj + len)
0052         return BAD_STACK;
0053 
0054     /* Check if object is safely within a valid frame. */
0055     ret = arch_within_stack_frames(stack, stackend, obj, len);
0056     if (ret)
0057         return ret;
0058 
0059     /* Finally, check stack depth if possible. */
0060 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
0061     if (IS_ENABLED(CONFIG_STACK_GROWSUP)) {
0062         if ((void *)current_stack_pointer < obj + len)
0063             return BAD_STACK;
0064     } else {
0065         if (obj < (void *)current_stack_pointer)
0066             return BAD_STACK;
0067     }
0068 #endif
0069 
0070     return GOOD_STACK;
0071 }
0072 
0073 /*
0074  * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
0075  * an unexpected state during a copy_from_user() or copy_to_user() call.
0076  * There are several checks being performed on the buffer by the
0077  * __check_object_size() function. Normal stack buffer usage should never
0078  * trip the checks, and kernel text addressing will always trip the check.
0079  * For cache objects, it is checking that only the whitelisted range of
0080  * bytes for a given cache is being accessed (via the cache's usersize and
0081  * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
0082  * kmem_cache_create_usercopy() function to create the cache (and
0083  * carefully audit the whitelist range).
0084  */
0085 void __noreturn usercopy_abort(const char *name, const char *detail,
0086                    bool to_user, unsigned long offset,
0087                    unsigned long len)
0088 {
0089     pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
0090          to_user ? "exposure" : "overwrite",
0091          to_user ? "from" : "to",
0092          name ? : "unknown?!",
0093          detail ? " '" : "", detail ? : "", detail ? "'" : "",
0094          offset, len);
0095 
0096     /*
0097      * For greater effect, it would be nice to do do_group_exit(),
0098      * but BUG() actually hooks all the lock-breaking and per-arch
0099      * Oops code, so that is used here instead.
0100      */
0101     BUG();
0102 }
0103 
0104 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
0105 static bool overlaps(const unsigned long ptr, unsigned long n,
0106              unsigned long low, unsigned long high)
0107 {
0108     const unsigned long check_low = ptr;
0109     unsigned long check_high = check_low + n;
0110 
0111     /* Does not overlap if entirely above or entirely below. */
0112     if (check_low >= high || check_high <= low)
0113         return false;
0114 
0115     return true;
0116 }
0117 
0118 /* Is this address range in the kernel text area? */
0119 static inline void check_kernel_text_object(const unsigned long ptr,
0120                         unsigned long n, bool to_user)
0121 {
0122     unsigned long textlow = (unsigned long)_stext;
0123     unsigned long texthigh = (unsigned long)_etext;
0124     unsigned long textlow_linear, texthigh_linear;
0125 
0126     if (overlaps(ptr, n, textlow, texthigh))
0127         usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
0128 
0129     /*
0130      * Some architectures have virtual memory mappings with a secondary
0131      * mapping of the kernel text, i.e. there is more than one virtual
0132      * kernel address that points to the kernel image. It is usually
0133      * when there is a separate linear physical memory mapping, in that
0134      * __pa() is not just the reverse of __va(). This can be detected
0135      * and checked:
0136      */
0137     textlow_linear = (unsigned long)lm_alias(textlow);
0138     /* No different mapping: we're done. */
0139     if (textlow_linear == textlow)
0140         return;
0141 
0142     /* Check the secondary mapping... */
0143     texthigh_linear = (unsigned long)lm_alias(texthigh);
0144     if (overlaps(ptr, n, textlow_linear, texthigh_linear))
0145         usercopy_abort("linear kernel text", NULL, to_user,
0146                    ptr - textlow_linear, n);
0147 }
0148 
0149 static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
0150                        bool to_user)
0151 {
0152     /* Reject if object wraps past end of memory. */
0153     if (ptr + (n - 1) < ptr)
0154         usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
0155 
0156     /* Reject if NULL or ZERO-allocation. */
0157     if (ZERO_OR_NULL_PTR(ptr))
0158         usercopy_abort("null address", NULL, to_user, ptr, n);
0159 }
0160 
0161 static inline void check_heap_object(const void *ptr, unsigned long n,
0162                      bool to_user)
0163 {
0164     unsigned long addr = (unsigned long)ptr;
0165     unsigned long offset;
0166     struct folio *folio;
0167 
0168     if (is_kmap_addr(ptr)) {
0169         offset = offset_in_page(ptr);
0170         if (n > PAGE_SIZE - offset)
0171             usercopy_abort("kmap", NULL, to_user, offset, n);
0172         return;
0173     }
0174 
0175     if (is_vmalloc_addr(ptr)) {
0176         struct vmap_area *area = find_vmap_area(addr);
0177 
0178         if (!area)
0179             usercopy_abort("vmalloc", "no area", to_user, 0, n);
0180 
0181         if (n > area->va_end - addr) {
0182             offset = addr - area->va_start;
0183             usercopy_abort("vmalloc", NULL, to_user, offset, n);
0184         }
0185         return;
0186     }
0187 
0188     if (!virt_addr_valid(ptr))
0189         return;
0190 
0191     folio = virt_to_folio(ptr);
0192 
0193     if (folio_test_slab(folio)) {
0194         /* Check slab allocator for flags and size. */
0195         __check_heap_object(ptr, n, folio_slab(folio), to_user);
0196     } else if (folio_test_large(folio)) {
0197         offset = ptr - folio_address(folio);
0198         if (n > folio_size(folio) - offset)
0199             usercopy_abort("page alloc", NULL, to_user, offset, n);
0200     }
0201 }
0202 
0203 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
0204 
0205 /*
0206  * Validates that the given object is:
0207  * - not bogus address
0208  * - fully contained by stack (or stack frame, when available)
0209  * - fully within SLAB object (or object whitelist area, when available)
0210  * - not in kernel text
0211  */
0212 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
0213 {
0214     if (static_branch_unlikely(&bypass_usercopy_checks))
0215         return;
0216 
0217     /* Skip all tests if size is zero. */
0218     if (!n)
0219         return;
0220 
0221     /* Check for invalid addresses. */
0222     check_bogus_address((const unsigned long)ptr, n, to_user);
0223 
0224     /* Check for bad stack object. */
0225     switch (check_stack_object(ptr, n)) {
0226     case NOT_STACK:
0227         /* Object is not touching the current process stack. */
0228         break;
0229     case GOOD_FRAME:
0230     case GOOD_STACK:
0231         /*
0232          * Object is either in the correct frame (when it
0233          * is possible to check) or just generally on the
0234          * process stack (when frame checking not available).
0235          */
0236         return;
0237     default:
0238         usercopy_abort("process stack", NULL, to_user,
0239 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
0240             IS_ENABLED(CONFIG_STACK_GROWSUP) ?
0241                 ptr - (void *)current_stack_pointer :
0242                 (void *)current_stack_pointer - ptr,
0243 #else
0244             0,
0245 #endif
0246             n);
0247     }
0248 
0249     /* Check for bad heap object. */
0250     check_heap_object(ptr, n, to_user);
0251 
0252     /* Check for object in kernel to avoid text exposure. */
0253     check_kernel_text_object((const unsigned long)ptr, n, to_user);
0254 }
0255 EXPORT_SYMBOL(__check_object_size);
0256 
0257 static bool enable_checks __initdata = true;
0258 
0259 static int __init parse_hardened_usercopy(char *str)
0260 {
0261     if (strtobool(str, &enable_checks))
0262         pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
0263             str);
0264     return 1;
0265 }
0266 
0267 __setup("hardened_usercopy=", parse_hardened_usercopy);
0268 
0269 static int __init set_hardened_usercopy(void)
0270 {
0271     if (enable_checks == false)
0272         static_branch_enable(&bypass_usercopy_checks);
0273     return 1;
0274 }
0275 
0276 late_initcall(set_hardened_usercopy);