0001
0002
0003
0004
0005
0006
0007
0008 #include "lkdtm.h"
0009 #include <linux/list.h>
0010 #include <linux/sched.h>
0011 #include <linux/sched/signal.h>
0012 #include <linux/sched/task_stack.h>
0013 #include <linux/uaccess.h>
0014 #include <linux/slab.h>
0015
0016 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
0017 #include <asm/desc.h>
0018 #endif
0019
0020 struct lkdtm_list {
0021 struct list_head node;
0022 };
0023
0024
0025
0026
0027
0028
0029 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
0030 #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
0031 #else
0032 #define REC_STACK_SIZE (THREAD_SIZE / 8UL)
0033 #endif
0034 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
0035
0036 static int recur_count = REC_NUM_DEFAULT;
0037
0038 static DEFINE_SPINLOCK(lock_me_up);
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048 static int noinline recursive_loop(int remaining)
0049 {
0050 volatile char buf[REC_STACK_SIZE];
0051 volatile int ret;
0052
0053 memset((void *)buf, remaining & 0xFF, sizeof(buf));
0054 if (!remaining)
0055 ret = 0;
0056 else
0057 ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
0058 memzero_explicit((void *)buf, sizeof(buf));
0059 return ret;
0060 }
0061
0062
0063 void __init lkdtm_bugs_init(int *recur_param)
0064 {
0065 if (*recur_param < 0)
0066 *recur_param = recur_count;
0067 else
0068 recur_count = *recur_param;
0069 }
0070
0071 static void lkdtm_PANIC(void)
0072 {
0073 panic("dumptest");
0074 }
0075
0076 static void lkdtm_BUG(void)
0077 {
0078 BUG();
0079 }
0080
0081 static int warn_counter;
0082
0083 static void lkdtm_WARNING(void)
0084 {
0085 WARN_ON(++warn_counter);
0086 }
0087
0088 static void lkdtm_WARNING_MESSAGE(void)
0089 {
0090 WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
0091 }
0092
0093 static void lkdtm_EXCEPTION(void)
0094 {
0095 *((volatile int *) 0) = 0;
0096 }
0097
0098 static void lkdtm_LOOP(void)
0099 {
0100 for (;;)
0101 ;
0102 }
0103
0104 static void lkdtm_EXHAUST_STACK(void)
0105 {
0106 pr_info("Calling function with %lu frame size to depth %d ...\n",
0107 REC_STACK_SIZE, recur_count);
0108 recursive_loop(recur_count);
0109 pr_info("FAIL: survived without exhausting stack?!\n");
0110 }
0111
0112 static noinline void __lkdtm_CORRUPT_STACK(void *stack)
0113 {
0114 memset(stack, '\xff', 64);
0115 }
0116
0117
0118 static noinline void lkdtm_CORRUPT_STACK(void)
0119 {
0120
0121 char data[8] __aligned(sizeof(void *));
0122
0123 pr_info("Corrupting stack containing char array ...\n");
0124 __lkdtm_CORRUPT_STACK((void *)&data);
0125 }
0126
0127
0128 static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
0129 {
0130 union {
0131 unsigned short shorts[4];
0132 unsigned long *ptr;
0133 } data __aligned(sizeof(void *));
0134
0135 pr_info("Corrupting stack containing union ...\n");
0136 __lkdtm_CORRUPT_STACK((void *)&data);
0137 }
0138
0139 static pid_t stack_pid;
0140 static unsigned long stack_addr;
0141
0142 static void lkdtm_REPORT_STACK(void)
0143 {
0144 volatile uintptr_t magic;
0145 pid_t pid = task_pid_nr(current);
0146
0147 if (pid != stack_pid) {
0148 pr_info("Starting stack offset tracking for pid %d\n", pid);
0149 stack_pid = pid;
0150 stack_addr = (uintptr_t)&magic;
0151 }
0152
0153 pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
0154 }
0155
0156 static pid_t stack_canary_pid;
0157 static unsigned long stack_canary;
0158 static unsigned long stack_canary_offset;
0159
0160 static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
0161 {
0162 int i = 0;
0163 pid_t pid = task_pid_nr(current);
0164 unsigned long *canary = (unsigned long *)stack;
0165 unsigned long current_offset = 0, init_offset = 0;
0166
0167
0168 for (i = 1; i < 16; i++) {
0169 canary = (unsigned long *)stack + i;
0170 #ifdef CONFIG_STACKPROTECTOR
0171 if (*canary == current->stack_canary)
0172 current_offset = i;
0173 if (*canary == init_task.stack_canary)
0174 init_offset = i;
0175 #endif
0176 }
0177
0178 if (current_offset == 0) {
0179
0180
0181
0182
0183
0184 if (init_offset != 0) {
0185 pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
0186 init_offset, pid);
0187 } else {
0188 pr_warn("FAIL: did not correctly locate stack canary :(\n");
0189 pr_expected_config(CONFIG_STACKPROTECTOR);
0190 }
0191
0192 return;
0193 } else if (init_offset != 0) {
0194 pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
0195 }
0196
0197 canary = (unsigned long *)stack + current_offset;
0198 if (stack_canary_pid == 0) {
0199 stack_canary = *canary;
0200 stack_canary_pid = pid;
0201 stack_canary_offset = current_offset;
0202 pr_info("Recorded stack canary for pid %d at offset %ld\n",
0203 stack_canary_pid, stack_canary_offset);
0204 } else if (pid == stack_canary_pid) {
0205 pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
0206 } else {
0207 if (current_offset != stack_canary_offset) {
0208 pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
0209 stack_canary_offset, current_offset);
0210 return;
0211 }
0212
0213 if (*canary == stack_canary) {
0214 pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
0215 stack_canary_pid, pid, current_offset);
0216 } else {
0217 pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
0218 stack_canary_pid, pid, current_offset);
0219
0220 stack_canary_pid = 0;
0221 }
0222 }
0223 }
0224
0225 static void lkdtm_REPORT_STACK_CANARY(void)
0226 {
0227
0228 char data[8] __aligned(sizeof(void *)) = { };
0229
0230 __lkdtm_REPORT_STACK_CANARY((void *)&data);
0231 }
0232
0233 static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
0234 {
0235 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
0236 u32 *p;
0237 u32 val = 0x12345678;
0238
0239 p = (u32 *)(data + 1);
0240 if (*p == 0)
0241 val = 0x87654321;
0242 *p = val;
0243
0244 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
0245 pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
0246 }
0247
0248 static void lkdtm_SOFTLOCKUP(void)
0249 {
0250 preempt_disable();
0251 for (;;)
0252 cpu_relax();
0253 }
0254
0255 static void lkdtm_HARDLOCKUP(void)
0256 {
0257 local_irq_disable();
0258 for (;;)
0259 cpu_relax();
0260 }
0261
0262 static void lkdtm_SPINLOCKUP(void)
0263 {
0264
0265 spin_lock(&lock_me_up);
0266
0267 __release(&lock_me_up);
0268 }
0269
0270 static void lkdtm_HUNG_TASK(void)
0271 {
0272 set_current_state(TASK_UNINTERRUPTIBLE);
0273 schedule();
0274 }
0275
0276 volatile unsigned int huge = INT_MAX - 2;
0277 volatile unsigned int ignored;
0278
0279 static void lkdtm_OVERFLOW_SIGNED(void)
0280 {
0281 int value;
0282
0283 value = huge;
0284 pr_info("Normal signed addition ...\n");
0285 value += 1;
0286 ignored = value;
0287
0288 pr_info("Overflowing signed addition ...\n");
0289 value += 4;
0290 ignored = value;
0291 }
0292
0293
0294 static void lkdtm_OVERFLOW_UNSIGNED(void)
0295 {
0296 unsigned int value;
0297
0298 value = huge;
0299 pr_info("Normal unsigned addition ...\n");
0300 value += 1;
0301 ignored = value;
0302
0303 pr_info("Overflowing unsigned addition ...\n");
0304 value += 4;
0305 ignored = value;
0306 }
0307
0308
0309 struct array_bounds_flex_array {
0310 int one;
0311 int two;
0312 char data[1];
0313 };
0314
0315 struct array_bounds {
0316 int one;
0317 int two;
0318 char data[8];
0319 int three;
0320 };
0321
0322 static void lkdtm_ARRAY_BOUNDS(void)
0323 {
0324 struct array_bounds_flex_array *not_checked;
0325 struct array_bounds *checked;
0326 volatile int i;
0327
0328 not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
0329 checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
0330 if (!not_checked || !checked) {
0331 kfree(not_checked);
0332 kfree(checked);
0333 return;
0334 }
0335
0336 pr_info("Array access within bounds ...\n");
0337
0338 for (i = 0; i < sizeof(checked->data); i++)
0339 checked->data[i] = 'A';
0340
0341
0342
0343
0344 for (i = 0; i < sizeof(not_checked->data) + 1; i++)
0345 not_checked->data[i] = 'A';
0346
0347 pr_info("Array access beyond bounds ...\n");
0348 for (i = 0; i < sizeof(checked->data) + 1; i++)
0349 checked->data[i] = 'B';
0350
0351 kfree(not_checked);
0352 kfree(checked);
0353 pr_err("FAIL: survived array bounds overflow!\n");
0354 if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
0355 pr_expected_config(CONFIG_UBSAN_TRAP);
0356 else
0357 pr_expected_config(CONFIG_UBSAN_BOUNDS);
0358 }
0359
0360 static void lkdtm_CORRUPT_LIST_ADD(void)
0361 {
0362
0363
0364
0365
0366
0367 LIST_HEAD(test_head);
0368 struct lkdtm_list good, bad;
0369 void *target[2] = { };
0370 void *redirection = ⌖
0371
0372 pr_info("attempting good list addition\n");
0373
0374
0375
0376
0377
0378
0379
0380
0381 list_add(&good.node, &test_head);
0382
0383 pr_info("attempting corrupted list addition\n");
0384
0385
0386
0387
0388
0389 test_head.next = redirection;
0390 list_add(&bad.node, &test_head);
0391
0392 if (target[0] == NULL && target[1] == NULL)
0393 pr_err("Overwrite did not happen, but no BUG?!\n");
0394 else {
0395 pr_err("list_add() corruption not detected!\n");
0396 pr_expected_config(CONFIG_DEBUG_LIST);
0397 }
0398 }
0399
0400 static void lkdtm_CORRUPT_LIST_DEL(void)
0401 {
0402 LIST_HEAD(test_head);
0403 struct lkdtm_list item;
0404 void *target[2] = { };
0405 void *redirection = ⌖
0406
0407 list_add(&item.node, &test_head);
0408
0409 pr_info("attempting good list removal\n");
0410 list_del(&item.node);
0411
0412 pr_info("attempting corrupted list removal\n");
0413 list_add(&item.node, &test_head);
0414
0415
0416 item.node.next = redirection;
0417 list_del(&item.node);
0418
0419 if (target[0] == NULL && target[1] == NULL)
0420 pr_err("Overwrite did not happen, but no BUG?!\n");
0421 else {
0422 pr_err("list_del() corruption not detected!\n");
0423 pr_expected_config(CONFIG_DEBUG_LIST);
0424 }
0425 }
0426
0427
0428 static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
0429 {
0430 const unsigned char *stack = task_stack_page(current);
0431 const unsigned char *ptr = stack - 1;
0432 volatile unsigned char byte;
0433
0434 pr_info("attempting bad read from page below current stack\n");
0435
0436 byte = *ptr;
0437
0438 pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
0439 }
0440
0441
0442 static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
0443 {
0444 const unsigned char *stack = task_stack_page(current);
0445 const unsigned char *ptr = stack + THREAD_SIZE;
0446 volatile unsigned char byte;
0447
0448 pr_info("attempting bad read from page above current stack\n");
0449
0450 byte = *ptr;
0451
0452 pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
0453 }
0454
0455 static void lkdtm_UNSET_SMEP(void)
0456 {
0457 #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
0458 #define MOV_CR4_DEPTH 64
0459 void (*direct_write_cr4)(unsigned long val);
0460 unsigned char *insn;
0461 unsigned long cr4;
0462 int i;
0463
0464 cr4 = native_read_cr4();
0465
0466 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
0467 pr_err("FAIL: SMEP not in use\n");
0468 return;
0469 }
0470 cr4 &= ~(X86_CR4_SMEP);
0471
0472 pr_info("trying to clear SMEP normally\n");
0473 native_write_cr4(cr4);
0474 if (cr4 == native_read_cr4()) {
0475 pr_err("FAIL: pinning SMEP failed!\n");
0476 cr4 |= X86_CR4_SMEP;
0477 pr_info("restoring SMEP\n");
0478 native_write_cr4(cr4);
0479 return;
0480 }
0481 pr_info("ok: SMEP did not get cleared\n");
0482
0483
0484
0485
0486
0487
0488
0489 insn = (unsigned char *)native_write_cr4;
0490 for (i = 0; i < MOV_CR4_DEPTH; i++) {
0491
0492 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
0493 break;
0494
0495 if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
0496 insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
0497 insn[i+4] == 0x22 && insn[i+5] == 0xe0)
0498 break;
0499 }
0500 if (i >= MOV_CR4_DEPTH) {
0501 pr_info("ok: cannot locate cr4 writing call gadget\n");
0502 return;
0503 }
0504 direct_write_cr4 = (void *)(insn + i);
0505
0506 pr_info("trying to clear SMEP with call gadget\n");
0507 direct_write_cr4(cr4);
0508 if (native_read_cr4() & X86_CR4_SMEP) {
0509 pr_info("ok: SMEP removal was reverted\n");
0510 } else {
0511 pr_err("FAIL: cleared SMEP not detected!\n");
0512 cr4 |= X86_CR4_SMEP;
0513 pr_info("restoring SMEP\n");
0514 native_write_cr4(cr4);
0515 }
0516 #else
0517 pr_err("XFAIL: this test is x86_64-only\n");
0518 #endif
0519 }
0520
0521 static void lkdtm_DOUBLE_FAULT(void)
0522 {
0523 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
0524
0525
0526
0527
0528
0529 struct desc_struct d = {
0530 .type = 3,
0531 .p = 1,
0532 .d = 1,
0533 .g = 0,
0534 .s = 1,
0535 };
0536
0537 local_irq_disable();
0538 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
0539 GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
0550 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
0551
0552 pr_err("FAIL: tried to double fault but didn't die\n");
0553 #else
0554 pr_err("XFAIL: this test is ia32-only\n");
0555 #endif
0556 }
0557
0558 #ifdef CONFIG_ARM64
0559 static noinline void change_pac_parameters(void)
0560 {
0561 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
0562
0563 ptrauth_thread_init_kernel(current);
0564 ptrauth_thread_switch_kernel(current);
0565 }
0566 }
0567 #endif
0568
0569 static noinline void lkdtm_CORRUPT_PAC(void)
0570 {
0571 #ifdef CONFIG_ARM64
0572 #define CORRUPT_PAC_ITERATE 10
0573 int i;
0574
0575 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
0576 pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
0577
0578 if (!system_supports_address_auth()) {
0579 pr_err("FAIL: CPU lacks pointer authentication feature\n");
0580 return;
0581 }
0582
0583 pr_info("changing PAC parameters to force function return failure...\n");
0584
0585
0586
0587
0588
0589 for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
0590 change_pac_parameters();
0591
0592 pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
0593 #else
0594 pr_err("XFAIL: this test is arm64-only\n");
0595 #endif
0596 }
0597
0598 static struct crashtype crashtypes[] = {
0599 CRASHTYPE(PANIC),
0600 CRASHTYPE(BUG),
0601 CRASHTYPE(WARNING),
0602 CRASHTYPE(WARNING_MESSAGE),
0603 CRASHTYPE(EXCEPTION),
0604 CRASHTYPE(LOOP),
0605 CRASHTYPE(EXHAUST_STACK),
0606 CRASHTYPE(CORRUPT_STACK),
0607 CRASHTYPE(CORRUPT_STACK_STRONG),
0608 CRASHTYPE(REPORT_STACK),
0609 CRASHTYPE(REPORT_STACK_CANARY),
0610 CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
0611 CRASHTYPE(SOFTLOCKUP),
0612 CRASHTYPE(HARDLOCKUP),
0613 CRASHTYPE(SPINLOCKUP),
0614 CRASHTYPE(HUNG_TASK),
0615 CRASHTYPE(OVERFLOW_SIGNED),
0616 CRASHTYPE(OVERFLOW_UNSIGNED),
0617 CRASHTYPE(ARRAY_BOUNDS),
0618 CRASHTYPE(CORRUPT_LIST_ADD),
0619 CRASHTYPE(CORRUPT_LIST_DEL),
0620 CRASHTYPE(STACK_GUARD_PAGE_LEADING),
0621 CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
0622 CRASHTYPE(UNSET_SMEP),
0623 CRASHTYPE(DOUBLE_FAULT),
0624 CRASHTYPE(CORRUPT_PAC),
0625 };
0626
0627 struct crashtype_category bugs_crashtypes = {
0628 .crashtypes = crashtypes,
0629 .len = ARRAY_SIZE(crashtypes),
0630 };