0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0012
0013 #include <linux/mm.h>
0014 #include <linux/highmem.h>
0015 #include <linux/slab.h>
0016 #include <linux/sched.h>
0017 #include <linux/sched/task.h>
0018 #include <linux/sched/task_stack.h>
0019 #include <linux/thread_info.h>
0020 #include <linux/vmalloc.h>
0021 #include <linux/atomic.h>
0022 #include <linux/jump_label.h>
0023 #include <asm/sections.h>
0024 #include "slab.h"
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 static noinline int check_stack_object(const void *obj, unsigned long len)
0037 {
0038 const void * const stack = task_stack_page(current);
0039 const void * const stackend = stack + THREAD_SIZE;
0040 int ret;
0041
0042
0043 if (obj + len <= stack || stackend <= obj)
0044 return NOT_STACK;
0045
0046
0047
0048
0049
0050
0051 if (obj < stack || stackend < obj + len)
0052 return BAD_STACK;
0053
0054
0055 ret = arch_within_stack_frames(stack, stackend, obj, len);
0056 if (ret)
0057 return ret;
0058
0059
0060 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
0061 if (IS_ENABLED(CONFIG_STACK_GROWSUP)) {
0062 if ((void *)current_stack_pointer < obj + len)
0063 return BAD_STACK;
0064 } else {
0065 if (obj < (void *)current_stack_pointer)
0066 return BAD_STACK;
0067 }
0068 #endif
0069
0070 return GOOD_STACK;
0071 }
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 void __noreturn usercopy_abort(const char *name, const char *detail,
0086 bool to_user, unsigned long offset,
0087 unsigned long len)
0088 {
0089 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
0090 to_user ? "exposure" : "overwrite",
0091 to_user ? "from" : "to",
0092 name ? : "unknown?!",
0093 detail ? " '" : "", detail ? : "", detail ? "'" : "",
0094 offset, len);
0095
0096
0097
0098
0099
0100
0101 BUG();
0102 }
0103
0104
0105 static bool overlaps(const unsigned long ptr, unsigned long n,
0106 unsigned long low, unsigned long high)
0107 {
0108 const unsigned long check_low = ptr;
0109 unsigned long check_high = check_low + n;
0110
0111
0112 if (check_low >= high || check_high <= low)
0113 return false;
0114
0115 return true;
0116 }
0117
0118
0119 static inline void check_kernel_text_object(const unsigned long ptr,
0120 unsigned long n, bool to_user)
0121 {
0122 unsigned long textlow = (unsigned long)_stext;
0123 unsigned long texthigh = (unsigned long)_etext;
0124 unsigned long textlow_linear, texthigh_linear;
0125
0126 if (overlaps(ptr, n, textlow, texthigh))
0127 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137 textlow_linear = (unsigned long)lm_alias(textlow);
0138
0139 if (textlow_linear == textlow)
0140 return;
0141
0142
0143 texthigh_linear = (unsigned long)lm_alias(texthigh);
0144 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
0145 usercopy_abort("linear kernel text", NULL, to_user,
0146 ptr - textlow_linear, n);
0147 }
0148
0149 static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
0150 bool to_user)
0151 {
0152
0153 if (ptr + (n - 1) < ptr)
0154 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
0155
0156
0157 if (ZERO_OR_NULL_PTR(ptr))
0158 usercopy_abort("null address", NULL, to_user, ptr, n);
0159 }
0160
0161 static inline void check_heap_object(const void *ptr, unsigned long n,
0162 bool to_user)
0163 {
0164 unsigned long addr = (unsigned long)ptr;
0165 unsigned long offset;
0166 struct folio *folio;
0167
0168 if (is_kmap_addr(ptr)) {
0169 offset = offset_in_page(ptr);
0170 if (n > PAGE_SIZE - offset)
0171 usercopy_abort("kmap", NULL, to_user, offset, n);
0172 return;
0173 }
0174
0175 if (is_vmalloc_addr(ptr)) {
0176 struct vmap_area *area = find_vmap_area(addr);
0177
0178 if (!area)
0179 usercopy_abort("vmalloc", "no area", to_user, 0, n);
0180
0181 if (n > area->va_end - addr) {
0182 offset = addr - area->va_start;
0183 usercopy_abort("vmalloc", NULL, to_user, offset, n);
0184 }
0185 return;
0186 }
0187
0188 if (!virt_addr_valid(ptr))
0189 return;
0190
0191 folio = virt_to_folio(ptr);
0192
0193 if (folio_test_slab(folio)) {
0194
0195 __check_heap_object(ptr, n, folio_slab(folio), to_user);
0196 } else if (folio_test_large(folio)) {
0197 offset = ptr - folio_address(folio);
0198 if (n > folio_size(folio) - offset)
0199 usercopy_abort("page alloc", NULL, to_user, offset, n);
0200 }
0201 }
0202
0203 static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
0204
0205
0206
0207
0208
0209
0210
0211
0212 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
0213 {
0214 if (static_branch_unlikely(&bypass_usercopy_checks))
0215 return;
0216
0217
0218 if (!n)
0219 return;
0220
0221
0222 check_bogus_address((const unsigned long)ptr, n, to_user);
0223
0224
0225 switch (check_stack_object(ptr, n)) {
0226 case NOT_STACK:
0227
0228 break;
0229 case GOOD_FRAME:
0230 case GOOD_STACK:
0231
0232
0233
0234
0235
0236 return;
0237 default:
0238 usercopy_abort("process stack", NULL, to_user,
0239 #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
0240 IS_ENABLED(CONFIG_STACK_GROWSUP) ?
0241 ptr - (void *)current_stack_pointer :
0242 (void *)current_stack_pointer - ptr,
0243 #else
0244 0,
0245 #endif
0246 n);
0247 }
0248
0249
0250 check_heap_object(ptr, n, to_user);
0251
0252
0253 check_kernel_text_object((const unsigned long)ptr, n, to_user);
0254 }
0255 EXPORT_SYMBOL(__check_object_size);
0256
0257 static bool enable_checks __initdata = true;
0258
0259 static int __init parse_hardened_usercopy(char *str)
0260 {
0261 if (strtobool(str, &enable_checks))
0262 pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
0263 str);
0264 return 1;
0265 }
0266
0267 __setup("hardened_usercopy=", parse_hardened_usercopy);
0268
0269 static int __init set_hardened_usercopy(void)
0270 {
0271 if (enable_checks == false)
0272 static_branch_enable(&bypass_usercopy_checks);
0273 return 1;
0274 }
0275
0276 late_initcall(set_hardened_usercopy);