0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/debug_locks.h>
0009 #include <linux/delay.h>
0010 #include <linux/jiffies.h>
0011 #include <linux/kallsyms.h>
0012 #include <linux/kernel.h>
0013 #include <linux/lockdep.h>
0014 #include <linux/preempt.h>
0015 #include <linux/printk.h>
0016 #include <linux/sched.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/stacktrace.h>
0019
0020 #include "kcsan.h"
0021 #include "encoding.h"
0022
0023
0024
0025
0026 #define NUM_STACK_ENTRIES 64
0027
0028
0029 struct access_info {
0030 const volatile void *ptr;
0031 size_t size;
0032 int access_type;
0033 int task_pid;
0034 int cpu_id;
0035 unsigned long ip;
0036 };
0037
0038
0039
0040
0041
0042 struct other_info {
0043 struct access_info ai;
0044 unsigned long stack_entries[NUM_STACK_ENTRIES];
0045 int num_stack_entries;
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 struct task_struct *task;
0065 };
0066
0067
0068
0069
0070
0071 static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
0072
0073
0074
0075
0076 struct report_time {
0077
0078
0079
0080 unsigned long time;
0081
0082
0083
0084
0085
0086 unsigned long frame1;
0087 unsigned long frame2;
0088 };
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 #define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
0101 #define REPORT_TIMES_SIZE \
0102 (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \
0103 REPORT_TIMES_MAX : \
0104 CONFIG_KCSAN_REPORT_ONCE_IN_MS)
0105 static struct report_time report_times[REPORT_TIMES_SIZE];
0106
0107
0108
0109
0110
0111
0112 static DEFINE_RAW_SPINLOCK(report_lock);
0113
0114
0115
0116
0117
0118 static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
0119 {
0120 struct report_time *use_entry = &report_times[0];
0121 unsigned long invalid_before;
0122 int i;
0123
0124 BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
0125
0126 if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
0127 return false;
0128
0129 invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
0130
0131
0132 for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
0133 struct report_time *rt = &report_times[i];
0134
0135
0136
0137
0138
0139
0140
0141 if (time_before(rt->time, use_entry->time))
0142 use_entry = rt;
0143
0144
0145
0146
0147
0148 if (rt->time == 0)
0149 break;
0150
0151
0152 if (time_before(rt->time, invalid_before))
0153 continue;
0154
0155
0156 if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
0157 (rt->frame1 == frame2 && rt->frame2 == frame1))
0158 return true;
0159 }
0160
0161 use_entry->time = jiffies;
0162 use_entry->frame1 = frame1;
0163 use_entry->frame2 = frame2;
0164 return false;
0165 }
0166
0167
0168
0169
0170 static bool
0171 skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
0172 {
0173
0174 WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
0194 value_change == KCSAN_VALUE_CHANGE_MAYBE) {
0195
0196
0197
0198
0199
0200
0201 char buf[64];
0202 int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
0203
0204 if (!strnstr(buf, "rcu_", len) &&
0205 !strnstr(buf, "_rcu", len) &&
0206 !strnstr(buf, "_srcu", len))
0207 return true;
0208 }
0209
0210 return kcsan_skip_report_debugfs(top_frame);
0211 }
0212
0213 static const char *get_access_type(int type)
0214 {
0215 if (type & KCSAN_ACCESS_ASSERT) {
0216 if (type & KCSAN_ACCESS_SCOPED) {
0217 if (type & KCSAN_ACCESS_WRITE)
0218 return "assert no accesses (reordered)";
0219 else
0220 return "assert no writes (reordered)";
0221 } else {
0222 if (type & KCSAN_ACCESS_WRITE)
0223 return "assert no accesses";
0224 else
0225 return "assert no writes";
0226 }
0227 }
0228
0229 switch (type) {
0230 case 0:
0231 return "read";
0232 case KCSAN_ACCESS_ATOMIC:
0233 return "read (marked)";
0234 case KCSAN_ACCESS_WRITE:
0235 return "write";
0236 case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0237 return "write (marked)";
0238 case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
0239 return "read-write";
0240 case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0241 return "read-write (marked)";
0242 case KCSAN_ACCESS_SCOPED:
0243 return "read (reordered)";
0244 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
0245 return "read (marked, reordered)";
0246 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
0247 return "write (reordered)";
0248 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0249 return "write (marked, reordered)";
0250 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
0251 return "read-write (reordered)";
0252 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0253 return "read-write (marked, reordered)";
0254 default:
0255 BUG();
0256 }
0257 }
0258
0259 static const char *get_bug_type(int type)
0260 {
0261 return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
0262 }
0263
0264
0265 static const char *get_thread_desc(int task_id)
0266 {
0267 if (task_id != -1) {
0268 static char buf[32];
0269
0270 snprintf(buf, sizeof(buf), "task %i", task_id);
0271 return buf;
0272 }
0273 return "interrupt";
0274 }
0275
0276
0277 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries)
0278 {
0279 char buf[64];
0280 char *cur;
0281 int len, skip;
0282
0283 for (skip = 0; skip < num_entries; ++skip) {
0284 len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
0285
0286
0287 if (strnstr(buf, "tsan_", len) ||
0288 strnstr(buf, "_once_size", len))
0289 continue;
0290
0291 cur = strnstr(buf, "kcsan_", len);
0292 if (cur) {
0293 cur += strlen("kcsan_");
0294 if (!str_has_prefix(cur, "test"))
0295 continue;
0296
0297 }
0298
0299
0300
0301
0302
0303 break;
0304 }
0305
0306 return skip;
0307 }
0308
0309
0310
0311
0312
0313
0314 static int
0315 replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip,
0316 unsigned long *replaced)
0317 {
0318 unsigned long symbolsize, offset;
0319 unsigned long target_func;
0320 int skip;
0321
0322 if (kallsyms_lookup_size_offset(ip, &symbolsize, &offset))
0323 target_func = ip - offset;
0324 else
0325 goto fallback;
0326
0327 for (skip = 0; skip < num_entries; ++skip) {
0328 unsigned long func = stack_entries[skip];
0329
0330 if (!kallsyms_lookup_size_offset(func, &symbolsize, &offset))
0331 goto fallback;
0332 func -= offset;
0333
0334 if (func == target_func) {
0335 *replaced = stack_entries[skip];
0336 stack_entries[skip] = ip;
0337 return skip;
0338 }
0339 }
0340
0341 fallback:
0342
0343 WARN_ONCE(1, "Cannot find frame for %pS in stack trace", (void *)ip);
0344 return get_stack_skipnr(stack_entries, num_entries);
0345 }
0346
0347 static int
0348 sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip,
0349 unsigned long *replaced)
0350 {
0351 return ip ? replace_stack_entry(stack_entries, num_entries, ip, replaced) :
0352 get_stack_skipnr(stack_entries, num_entries);
0353 }
0354
0355
0356 static int sym_strcmp(void *addr1, void *addr2)
0357 {
0358 char buf1[64];
0359 char buf2[64];
0360
0361 snprintf(buf1, sizeof(buf1), "%pS", addr1);
0362 snprintf(buf2, sizeof(buf2), "%pS", addr2);
0363
0364 return strncmp(buf1, buf2, sizeof(buf1));
0365 }
0366
0367 static void
0368 print_stack_trace(unsigned long stack_entries[], int num_entries, unsigned long reordered_to)
0369 {
0370 stack_trace_print(stack_entries, num_entries, 0);
0371 if (reordered_to)
0372 pr_err(" |\n +-> reordered to: %pS\n", (void *)reordered_to);
0373 }
0374
0375 static void print_verbose_info(struct task_struct *task)
0376 {
0377 if (!task)
0378 return;
0379
0380
0381 kcsan_restore_irqtrace(task);
0382
0383 pr_err("\n");
0384 debug_show_held_locks(task);
0385 print_irqtrace_events(task);
0386 }
0387
0388 static void print_report(enum kcsan_value_change value_change,
0389 const struct access_info *ai,
0390 struct other_info *other_info,
0391 u64 old, u64 new, u64 mask)
0392 {
0393 unsigned long reordered_to = 0;
0394 unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
0395 int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
0396 int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip, &reordered_to);
0397 unsigned long this_frame = stack_entries[skipnr];
0398 unsigned long other_reordered_to = 0;
0399 unsigned long other_frame = 0;
0400 int other_skipnr = 0;
0401
0402
0403
0404
0405 if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
0406 return;
0407
0408 if (other_info) {
0409 other_skipnr = sanitize_stack_entries(other_info->stack_entries,
0410 other_info->num_stack_entries,
0411 other_info->ai.ip, &other_reordered_to);
0412 other_frame = other_info->stack_entries[other_skipnr];
0413
0414
0415 if (skip_report(value_change, other_frame))
0416 return;
0417 }
0418
0419 if (rate_limit_report(this_frame, other_frame))
0420 return;
0421
0422
0423 pr_err("==================================================================\n");
0424 if (other_info) {
0425 int cmp;
0426
0427
0428
0429
0430
0431 cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
0432 pr_err("BUG: KCSAN: %s in %ps / %ps\n",
0433 get_bug_type(ai->access_type | other_info->ai.access_type),
0434 (void *)(cmp < 0 ? other_frame : this_frame),
0435 (void *)(cmp < 0 ? this_frame : other_frame));
0436 } else {
0437 pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
0438 (void *)this_frame);
0439 }
0440
0441 pr_err("\n");
0442
0443
0444 if (other_info) {
0445 pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
0446 get_access_type(other_info->ai.access_type), other_info->ai.ptr,
0447 other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
0448 other_info->ai.cpu_id);
0449
0450
0451 print_stack_trace(other_info->stack_entries + other_skipnr,
0452 other_info->num_stack_entries - other_skipnr,
0453 other_reordered_to);
0454 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
0455 print_verbose_info(other_info->task);
0456
0457 pr_err("\n");
0458 pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
0459 get_access_type(ai->access_type), ai->ptr, ai->size,
0460 get_thread_desc(ai->task_pid), ai->cpu_id);
0461 } else {
0462 pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
0463 get_access_type(ai->access_type), ai->ptr, ai->size,
0464 get_thread_desc(ai->task_pid), ai->cpu_id);
0465 }
0466
0467 print_stack_trace(stack_entries + skipnr, num_stack_entries - skipnr, reordered_to);
0468 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
0469 print_verbose_info(current);
0470
0471
0472 if (ai->size <= 8) {
0473 int hex_len = ai->size * 2;
0474 u64 diff = old ^ new;
0475
0476 if (mask)
0477 diff &= mask;
0478 if (diff) {
0479 pr_err("\n");
0480 pr_err("value changed: 0x%0*llx -> 0x%0*llx\n",
0481 hex_len, old, hex_len, new);
0482 if (mask) {
0483 pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n",
0484 hex_len, diff, hex_len, mask);
0485 }
0486 }
0487 }
0488
0489
0490 pr_err("\n");
0491 pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
0492 dump_stack_print_info(KERN_DEFAULT);
0493 pr_err("==================================================================\n");
0494
0495 if (panic_on_warn)
0496 panic("panic_on_warn set ...\n");
0497 }
0498
0499 static void release_report(unsigned long *flags, struct other_info *other_info)
0500 {
0501
0502
0503
0504
0505 other_info->ai.size = 0;
0506 raw_spin_unlock_irqrestore(&report_lock, *flags);
0507 }
0508
0509
0510
0511
0512
0513
0514
0515 static void set_other_info_task_blocking(unsigned long *flags,
0516 const struct access_info *ai,
0517 struct other_info *other_info)
0518 {
0519
0520
0521
0522
0523 const bool is_running = task_is_running(current);
0524
0525
0526
0527
0528
0529
0530
0531
0532 int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
0533
0534 other_info->task = current;
0535 do {
0536 if (is_running) {
0537
0538
0539
0540
0541
0542 set_current_state(TASK_UNINTERRUPTIBLE);
0543 }
0544 raw_spin_unlock_irqrestore(&report_lock, *flags);
0545
0546
0547
0548
0549
0550 udelay(1);
0551 raw_spin_lock_irqsave(&report_lock, *flags);
0552 if (timeout-- < 0) {
0553
0554
0555
0556
0557
0558
0559 other_info->task = NULL;
0560 break;
0561 }
0562
0563
0564
0565
0566 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr &&
0567 other_info->task == current);
0568 if (is_running)
0569 set_current_state(TASK_RUNNING);
0570 }
0571
0572
0573 static void prepare_report_producer(unsigned long *flags,
0574 const struct access_info *ai,
0575 struct other_info *other_info)
0576 {
0577 raw_spin_lock_irqsave(&report_lock, *flags);
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 WARN_ON(other_info->ai.size);
0593
0594 other_info->ai = *ai;
0595 other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2);
0596
0597 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
0598 set_other_info_task_blocking(flags, ai, other_info);
0599
0600 raw_spin_unlock_irqrestore(&report_lock, *flags);
0601 }
0602
0603
0604 static bool prepare_report_consumer(unsigned long *flags,
0605 const struct access_info *ai,
0606 struct other_info *other_info)
0607 {
0608
0609 raw_spin_lock_irqsave(&report_lock, *flags);
0610 while (!other_info->ai.size) {
0611 raw_spin_unlock_irqrestore(&report_lock, *flags);
0612 cpu_relax();
0613 raw_spin_lock_irqsave(&report_lock, *flags);
0614 }
0615
0616
0617 if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size,
0618 (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)))
0619 goto discard;
0620
0621 if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size,
0622 (unsigned long)ai->ptr, ai->size)) {
0623
0624
0625
0626
0627 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
0628 goto discard;
0629 }
0630
0631 return true;
0632
0633 discard:
0634 release_report(flags, other_info);
0635 return false;
0636 }
0637
0638 static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
0639 int access_type, unsigned long ip)
0640 {
0641 return (struct access_info) {
0642 .ptr = ptr,
0643 .size = size,
0644 .access_type = access_type,
0645 .task_pid = in_task() ? task_pid_nr(current) : -1,
0646 .cpu_id = raw_smp_processor_id(),
0647
0648 .ip = (access_type & KCSAN_ACCESS_SCOPED) ? ip : 0,
0649 };
0650 }
0651
0652 void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
0653 unsigned long ip, int watchpoint_idx)
0654 {
0655 const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
0656 unsigned long flags;
0657
0658 kcsan_disable_current();
0659 lockdep_off();
0660
0661 prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]);
0662
0663 lockdep_on();
0664 kcsan_enable_current();
0665 }
0666
0667 void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
0668 unsigned long ip, enum kcsan_value_change value_change,
0669 int watchpoint_idx, u64 old, u64 new, u64 mask)
0670 {
0671 const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
0672 struct other_info *other_info = &other_infos[watchpoint_idx];
0673 unsigned long flags = 0;
0674
0675 kcsan_disable_current();
0676
0677
0678
0679
0680
0681
0682
0683 lockdep_off();
0684
0685 if (!prepare_report_consumer(&flags, &ai, other_info))
0686 goto out;
0687
0688
0689
0690
0691
0692 if (value_change != KCSAN_VALUE_CHANGE_FALSE)
0693 print_report(value_change, &ai, other_info, old, new, mask);
0694
0695 release_report(&flags, other_info);
0696 out:
0697 lockdep_on();
0698 kcsan_enable_current();
0699 }
0700
0701 void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
0702 unsigned long ip, u64 old, u64 new, u64 mask)
0703 {
0704 const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
0705 unsigned long flags;
0706
0707 kcsan_disable_current();
0708 lockdep_off();
0709
0710 raw_spin_lock_irqsave(&report_lock, flags);
0711 print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask);
0712 raw_spin_unlock_irqrestore(&report_lock, flags);
0713
0714 lockdep_on();
0715 kcsan_enable_current();
0716 }