0001
0002
0003
0004
0005
0006 #include "lkdtm.h"
0007 #include <linux/slab.h>
0008 #include <linux/highmem.h>
0009 #include <linux/vmalloc.h>
0010 #include <linux/sched/task_stack.h>
0011 #include <linux/mman.h>
0012 #include <linux/uaccess.h>
0013 #include <asm/cacheflush.h>
0014
0015
0016
0017
0018
0019
0020
0021
0022 static volatile size_t unconst;
0023 static volatile size_t cache_size = 1024;
0024 static struct kmem_cache *whitelist_cache;
0025
0026 static const unsigned char test_text[] = "This is a test.\n";
0027
0028
0029
0030
0031
0032 static noinline unsigned char *trick_compiler(unsigned char *stack)
0033 {
0034 return stack + unconst;
0035 }
0036
0037 static noinline unsigned char *do_usercopy_stack_callee(int value)
0038 {
0039 unsigned char buf[128];
0040 int i;
0041
0042
0043 for (i = 0; i < sizeof(buf); i++) {
0044 buf[i] = value & 0xff;
0045 }
0046
0047
0048
0049
0050
0051
0052 return trick_compiler(&buf[(128/2)-32]);
0053 }
0054
0055 static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
0056 {
0057 unsigned long user_addr;
0058 unsigned char good_stack[32];
0059 unsigned char *bad_stack;
0060 int i;
0061
0062
0063 for (i = 0; i < sizeof(good_stack); i++)
0064 good_stack[i] = test_text[i % sizeof(test_text)];
0065
0066
0067 if (bad_frame) {
0068 bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
0069 } else {
0070
0071 bad_stack = task_stack_page(current) + THREAD_SIZE;
0072 bad_stack -= sizeof(unsigned long);
0073 }
0074
0075 #ifdef ARCH_HAS_CURRENT_STACK_POINTER
0076 pr_info("stack : %px\n", (void *)current_stack_pointer);
0077 #endif
0078 pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
0079 pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
0080
0081 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
0082 PROT_READ | PROT_WRITE | PROT_EXEC,
0083 MAP_ANONYMOUS | MAP_PRIVATE, 0);
0084 if (user_addr >= TASK_SIZE) {
0085 pr_warn("Failed to allocate user memory\n");
0086 return;
0087 }
0088
0089 if (to_user) {
0090 pr_info("attempting good copy_to_user of local stack\n");
0091 if (copy_to_user((void __user *)user_addr, good_stack,
0092 unconst + sizeof(good_stack))) {
0093 pr_warn("copy_to_user failed unexpectedly?!\n");
0094 goto free_user;
0095 }
0096
0097 pr_info("attempting bad copy_to_user of distant stack\n");
0098 if (copy_to_user((void __user *)user_addr, bad_stack,
0099 unconst + sizeof(good_stack))) {
0100 pr_warn("copy_to_user failed, but lacked Oops\n");
0101 goto free_user;
0102 }
0103 } else {
0104
0105
0106
0107
0108 if (!bad_frame)
0109 goto free_user;
0110
0111 pr_info("attempting good copy_from_user of local stack\n");
0112 if (copy_from_user(good_stack, (void __user *)user_addr,
0113 unconst + sizeof(good_stack))) {
0114 pr_warn("copy_from_user failed unexpectedly?!\n");
0115 goto free_user;
0116 }
0117
0118 pr_info("attempting bad copy_from_user of distant stack\n");
0119 if (copy_from_user(bad_stack, (void __user *)user_addr,
0120 unconst + sizeof(good_stack))) {
0121 pr_warn("copy_from_user failed, but lacked Oops\n");
0122 goto free_user;
0123 }
0124 }
0125
0126 free_user:
0127 vm_munmap(user_addr, PAGE_SIZE);
0128 }
0129
0130
0131
0132
0133
0134 static void do_usercopy_slab_size(bool to_user)
0135 {
0136 unsigned long user_addr;
0137 unsigned char *one, *two;
0138 void __user *test_user_addr;
0139 void *test_kern_addr;
0140 size_t size = unconst + 1024;
0141
0142 one = kmalloc(size, GFP_KERNEL);
0143 two = kmalloc(size, GFP_KERNEL);
0144 if (!one || !two) {
0145 pr_warn("Failed to allocate kernel memory\n");
0146 goto free_kernel;
0147 }
0148
0149 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
0150 PROT_READ | PROT_WRITE | PROT_EXEC,
0151 MAP_ANONYMOUS | MAP_PRIVATE, 0);
0152 if (user_addr >= TASK_SIZE) {
0153 pr_warn("Failed to allocate user memory\n");
0154 goto free_kernel;
0155 }
0156
0157 memset(one, 'A', size);
0158 memset(two, 'B', size);
0159
0160 test_user_addr = (void __user *)(user_addr + 16);
0161 test_kern_addr = one + 16;
0162
0163 if (to_user) {
0164 pr_info("attempting good copy_to_user of correct size\n");
0165 if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
0166 pr_warn("copy_to_user failed unexpectedly?!\n");
0167 goto free_user;
0168 }
0169
0170 pr_info("attempting bad copy_to_user of too large size\n");
0171 if (copy_to_user(test_user_addr, test_kern_addr, size)) {
0172 pr_warn("copy_to_user failed, but lacked Oops\n");
0173 goto free_user;
0174 }
0175 } else {
0176 pr_info("attempting good copy_from_user of correct size\n");
0177 if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
0178 pr_warn("copy_from_user failed unexpectedly?!\n");
0179 goto free_user;
0180 }
0181
0182 pr_info("attempting bad copy_from_user of too large size\n");
0183 if (copy_from_user(test_kern_addr, test_user_addr, size)) {
0184 pr_warn("copy_from_user failed, but lacked Oops\n");
0185 goto free_user;
0186 }
0187 }
0188 pr_err("FAIL: bad usercopy not detected!\n");
0189 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
0190
0191 free_user:
0192 vm_munmap(user_addr, PAGE_SIZE);
0193 free_kernel:
0194 kfree(one);
0195 kfree(two);
0196 }
0197
0198
0199
0200
0201
0202 static void do_usercopy_slab_whitelist(bool to_user)
0203 {
0204 unsigned long user_alloc;
0205 unsigned char *buf = NULL;
0206 unsigned char __user *user_addr;
0207 size_t offset, size;
0208
0209
0210 if (!whitelist_cache) {
0211 pr_warn("Failed to allocate kernel cache\n");
0212 return;
0213 }
0214
0215
0216
0217
0218 buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
0219 if (!buf) {
0220 pr_warn("Failed to allocate buffer from whitelist cache\n");
0221 goto free_alloc;
0222 }
0223
0224
0225 user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
0226 PROT_READ | PROT_WRITE | PROT_EXEC,
0227 MAP_ANONYMOUS | MAP_PRIVATE, 0);
0228 if (user_alloc >= TASK_SIZE) {
0229 pr_warn("Failed to allocate user memory\n");
0230 goto free_alloc;
0231 }
0232 user_addr = (void __user *)user_alloc;
0233
0234 memset(buf, 'B', cache_size);
0235
0236
0237 offset = (cache_size / 4) + unconst;
0238 size = (cache_size / 16) + unconst;
0239
0240 if (to_user) {
0241 pr_info("attempting good copy_to_user inside whitelist\n");
0242 if (copy_to_user(user_addr, buf + offset, size)) {
0243 pr_warn("copy_to_user failed unexpectedly?!\n");
0244 goto free_user;
0245 }
0246
0247 pr_info("attempting bad copy_to_user outside whitelist\n");
0248 if (copy_to_user(user_addr, buf + offset - 1, size)) {
0249 pr_warn("copy_to_user failed, but lacked Oops\n");
0250 goto free_user;
0251 }
0252 } else {
0253 pr_info("attempting good copy_from_user inside whitelist\n");
0254 if (copy_from_user(buf + offset, user_addr, size)) {
0255 pr_warn("copy_from_user failed unexpectedly?!\n");
0256 goto free_user;
0257 }
0258
0259 pr_info("attempting bad copy_from_user outside whitelist\n");
0260 if (copy_from_user(buf + offset - 1, user_addr, size)) {
0261 pr_warn("copy_from_user failed, but lacked Oops\n");
0262 goto free_user;
0263 }
0264 }
0265 pr_err("FAIL: bad usercopy not detected!\n");
0266 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
0267
0268 free_user:
0269 vm_munmap(user_alloc, PAGE_SIZE);
0270 free_alloc:
0271 if (buf)
0272 kmem_cache_free(whitelist_cache, buf);
0273 }
0274
0275
0276 static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
0277 {
0278 do_usercopy_slab_size(true);
0279 }
0280
0281 static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
0282 {
0283 do_usercopy_slab_size(false);
0284 }
0285
0286 static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
0287 {
0288 do_usercopy_slab_whitelist(true);
0289 }
0290
0291 static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
0292 {
0293 do_usercopy_slab_whitelist(false);
0294 }
0295
0296 static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
0297 {
0298 do_usercopy_stack(true, true);
0299 }
0300
0301 static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
0302 {
0303 do_usercopy_stack(false, true);
0304 }
0305
0306 static void lkdtm_USERCOPY_STACK_BEYOND(void)
0307 {
0308 do_usercopy_stack(true, false);
0309 }
0310
0311 static void lkdtm_USERCOPY_KERNEL(void)
0312 {
0313 unsigned long user_addr;
0314
0315 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
0316 PROT_READ | PROT_WRITE | PROT_EXEC,
0317 MAP_ANONYMOUS | MAP_PRIVATE, 0);
0318 if (user_addr >= TASK_SIZE) {
0319 pr_warn("Failed to allocate user memory\n");
0320 return;
0321 }
0322
0323 pr_info("attempting good copy_to_user from kernel rodata: %px\n",
0324 test_text);
0325 if (copy_to_user((void __user *)user_addr, test_text,
0326 unconst + sizeof(test_text))) {
0327 pr_warn("copy_to_user failed unexpectedly?!\n");
0328 goto free_user;
0329 }
0330
0331 pr_info("attempting bad copy_to_user from kernel text: %px\n",
0332 vm_mmap);
0333 if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap),
0334 unconst + PAGE_SIZE)) {
0335 pr_warn("copy_to_user failed, but lacked Oops\n");
0336 goto free_user;
0337 }
0338 pr_err("FAIL: bad copy_to_user() not detected!\n");
0339 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
0340
0341 free_user:
0342 vm_munmap(user_addr, PAGE_SIZE);
0343 }
0344
0345
0346
0347
0348
0349
0350
0351 static void do_usercopy_page_span(const char *name, void *kaddr)
0352 {
0353 unsigned long uaddr;
0354
0355 uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
0356 MAP_ANONYMOUS | MAP_PRIVATE, 0);
0357 if (uaddr >= TASK_SIZE) {
0358 pr_warn("Failed to allocate user memory\n");
0359 return;
0360 }
0361
0362
0363 memset(kaddr, 0xAA, PAGE_SIZE);
0364
0365
0366 kaddr += PAGE_SIZE / 2;
0367
0368 pr_info("attempting good copy_to_user() from kernel %s: %px\n",
0369 name, kaddr);
0370 if (copy_to_user((void __user *)uaddr, kaddr,
0371 unconst + (PAGE_SIZE / 2))) {
0372 pr_err("copy_to_user() failed unexpectedly?!\n");
0373 goto free_user;
0374 }
0375
0376 pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
0377 name, kaddr);
0378 if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
0379 pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
0380 goto free_user;
0381 }
0382
0383 pr_err("FAIL: bad copy_to_user() not detected!\n");
0384 pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
0385
0386 free_user:
0387 vm_munmap(uaddr, PAGE_SIZE);
0388 }
0389
0390 static void lkdtm_USERCOPY_VMALLOC(void)
0391 {
0392 void *addr;
0393
0394 addr = vmalloc(PAGE_SIZE);
0395 if (!addr) {
0396 pr_err("vmalloc() failed!?\n");
0397 return;
0398 }
0399 do_usercopy_page_span("vmalloc", addr);
0400 vfree(addr);
0401 }
0402
0403 static void lkdtm_USERCOPY_FOLIO(void)
0404 {
0405 struct folio *folio;
0406 void *addr;
0407
0408
0409
0410
0411
0412 folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
0413 if (!folio) {
0414 pr_err("folio_alloc() failed!?\n");
0415 return;
0416 }
0417 addr = folio_address(folio);
0418 if (addr)
0419 do_usercopy_page_span("folio", addr + PAGE_SIZE);
0420 else
0421 pr_err("folio_address() failed?!\n");
0422 folio_put(folio);
0423 }
0424
0425 void __init lkdtm_usercopy_init(void)
0426 {
0427
0428 whitelist_cache =
0429 kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
0430 0, 0,
0431 cache_size / 4,
0432 cache_size / 16,
0433 NULL);
0434 }
0435
0436 void __exit lkdtm_usercopy_exit(void)
0437 {
0438 kmem_cache_destroy(whitelist_cache);
0439 }
0440
0441 static struct crashtype crashtypes[] = {
0442 CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
0443 CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
0444 CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
0445 CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
0446 CRASHTYPE(USERCOPY_STACK_FRAME_TO),
0447 CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
0448 CRASHTYPE(USERCOPY_STACK_BEYOND),
0449 CRASHTYPE(USERCOPY_VMALLOC),
0450 CRASHTYPE(USERCOPY_FOLIO),
0451 CRASHTYPE(USERCOPY_KERNEL),
0452 };
0453
0454 struct crashtype_category usercopy_crashtypes = {
0455 .crashtypes = crashtypes,
0456 .len = ARRAY_SIZE(crashtypes),
0457 };