0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "kcsan: " fmt
0009
0010 #include <linux/atomic.h>
0011 #include <linux/bug.h>
0012 #include <linux/delay.h>
0013 #include <linux/export.h>
0014 #include <linux/init.h>
0015 #include <linux/kernel.h>
0016 #include <linux/list.h>
0017 #include <linux/moduleparam.h>
0018 #include <linux/percpu.h>
0019 #include <linux/preempt.h>
0020 #include <linux/sched.h>
0021 #include <linux/uaccess.h>
0022
0023 #include "encoding.h"
0024 #include "kcsan.h"
0025 #include "permissive.h"
0026
0027 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
0028 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
0029 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
0030 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
0031 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
0032
0033 #ifdef MODULE_PARAM_PREFIX
0034 #undef MODULE_PARAM_PREFIX
0035 #endif
0036 #define MODULE_PARAM_PREFIX "kcsan."
0037 module_param_named(early_enable, kcsan_early_enable, bool, 0);
0038 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
0039 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
0040 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
0041 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
0042
0043 #ifdef CONFIG_KCSAN_WEAK_MEMORY
0044 static bool kcsan_weak_memory = true;
0045 module_param_named(weak_memory, kcsan_weak_memory, bool, 0644);
0046 #else
0047 #define kcsan_weak_memory false
0048 #endif
0049
0050 bool kcsan_enabled;
0051
0052
0053 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
0054 .scoped_accesses = {LIST_POISON1, NULL},
0055 };
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
0083
0084
0085
0086
0087
0088
0089
0090 #define SLOT_IDX_FAST(slot, i) (slot + i)
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
0102
0103
0104
0105
0106
0107 static DEFINE_PER_CPU(long, kcsan_skip);
0108
0109
0110 static DEFINE_PER_CPU(u32, kcsan_rand_state);
0111
0112 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
0113 size_t size,
0114 bool expect_write,
0115 long *encoded_watchpoint)
0116 {
0117 const int slot = watchpoint_slot(addr);
0118 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
0119 atomic_long_t *watchpoint;
0120 unsigned long wp_addr_masked;
0121 size_t wp_size;
0122 bool is_write;
0123 int i;
0124
0125 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
0126
0127 for (i = 0; i < NUM_SLOTS; ++i) {
0128 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
0129 *encoded_watchpoint = atomic_long_read(watchpoint);
0130 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
0131 &wp_size, &is_write))
0132 continue;
0133
0134 if (expect_write && !is_write)
0135 continue;
0136
0137
0138 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
0139 return watchpoint;
0140 }
0141
0142 return NULL;
0143 }
0144
0145 static inline atomic_long_t *
0146 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
0147 {
0148 const int slot = watchpoint_slot(addr);
0149 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
0150 atomic_long_t *watchpoint;
0151 int i;
0152
0153
0154 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
0155 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
0156 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
0157 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
0158
0159 for (i = 0; i < NUM_SLOTS; ++i) {
0160 long expect_val = INVALID_WATCHPOINT;
0161
0162
0163 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
0164 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
0165 return watchpoint;
0166 }
0167
0168 return NULL;
0169 }
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static __always_inline bool
0181 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
0182 {
0183 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
0184 }
0185
0186
0187 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
0188 {
0189 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
0190 }
0191
0192
0193 static inline void remove_watchpoint(atomic_long_t *watchpoint)
0194 {
0195 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
0196 }
0197
0198 static __always_inline struct kcsan_ctx *get_ctx(void)
0199 {
0200
0201
0202
0203
0204 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
0205 }
0206
0207 static __always_inline void
0208 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
0209
0210
0211 static noinline void kcsan_check_scoped_accesses(void)
0212 {
0213 struct kcsan_ctx *ctx = get_ctx();
0214 struct kcsan_scoped_access *scoped_access;
0215
0216 if (ctx->disable_scoped)
0217 return;
0218
0219 ctx->disable_scoped++;
0220 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
0221 check_access(scoped_access->ptr, scoped_access->size,
0222 scoped_access->type, scoped_access->ip);
0223 }
0224 ctx->disable_scoped--;
0225 }
0226
0227
0228 static __always_inline bool
0229 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
0230 {
0231 if (type & KCSAN_ACCESS_ATOMIC)
0232 return true;
0233
0234
0235
0236
0237
0238
0239 if (type & KCSAN_ACCESS_ASSERT)
0240 return false;
0241
0242 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
0243 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
0244 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
0245 return true;
0246
0247 if (ctx->atomic_next > 0) {
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
0258 --ctx->atomic_next;
0259 return true;
0260 }
0261
0262 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
0263 }
0264
0265 static __always_inline bool
0266 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
0267 {
0268
0269
0270
0271
0272
0273
0274
0275 if (is_atomic(ctx, ptr, size, type))
0276 return false;
0277
0278 if (this_cpu_dec_return(kcsan_skip) >= 0)
0279 return false;
0280
0281
0282
0283
0284
0285
0286
0287 return true;
0288 }
0289
0290
0291
0292
0293
0294 static u32 kcsan_prandom_u32_max(u32 ep_ro)
0295 {
0296 u32 state = this_cpu_read(kcsan_rand_state);
0297
0298 state = 1664525 * state + 1013904223;
0299 this_cpu_write(kcsan_rand_state, state);
0300
0301 return state % ep_ro;
0302 }
0303
0304 static inline void reset_kcsan_skip(void)
0305 {
0306 long skip_count = kcsan_skip_watch -
0307 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
0308 kcsan_prandom_u32_max(kcsan_skip_watch) :
0309 0);
0310 this_cpu_write(kcsan_skip, skip_count);
0311 }
0312
0313 static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
0314 {
0315 return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
0316 }
0317
0318
0319 static void delay_access(int type)
0320 {
0321 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
0322
0323 unsigned int skew_delay_order =
0324 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
0325
0326 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
0327 kcsan_prandom_u32_max(delay >> skew_delay_order) :
0328 0;
0329 udelay(delay);
0330 }
0331
0332
0333
0334
0335
0336 static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
0337 {
0338 switch (size) {
0339 case 1: return READ_ONCE(*(const u8 *)ptr);
0340 case 2: return READ_ONCE(*(const u16 *)ptr);
0341 case 4: return READ_ONCE(*(const u32 *)ptr);
0342 case 8: return READ_ONCE(*(const u64 *)ptr);
0343 default: return 0;
0344 }
0345 }
0346
0347 void kcsan_save_irqtrace(struct task_struct *task)
0348 {
0349 #ifdef CONFIG_TRACE_IRQFLAGS
0350 task->kcsan_save_irqtrace = task->irqtrace;
0351 #endif
0352 }
0353
0354 void kcsan_restore_irqtrace(struct task_struct *task)
0355 {
0356 #ifdef CONFIG_TRACE_IRQFLAGS
0357 task->irqtrace = task->kcsan_save_irqtrace;
0358 #endif
0359 }
0360
0361 static __always_inline int get_kcsan_stack_depth(void)
0362 {
0363 #ifdef CONFIG_KCSAN_WEAK_MEMORY
0364 return current->kcsan_stack_depth;
0365 #else
0366 BUILD_BUG();
0367 return 0;
0368 #endif
0369 }
0370
0371 static __always_inline void add_kcsan_stack_depth(int val)
0372 {
0373 #ifdef CONFIG_KCSAN_WEAK_MEMORY
0374 current->kcsan_stack_depth += val;
0375 #else
0376 BUILD_BUG();
0377 #endif
0378 }
0379
0380 static __always_inline struct kcsan_scoped_access *get_reorder_access(struct kcsan_ctx *ctx)
0381 {
0382 #ifdef CONFIG_KCSAN_WEAK_MEMORY
0383 return ctx->disable_scoped ? NULL : &ctx->reorder_access;
0384 #else
0385 return NULL;
0386 #endif
0387 }
0388
0389 static __always_inline bool
0390 find_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
0391 int type, unsigned long ip)
0392 {
0393 struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
0394
0395 if (!reorder_access)
0396 return false;
0397
0398
0399
0400
0401
0402 return reorder_access->ptr == ptr && reorder_access->size == size &&
0403 reorder_access->type == type && reorder_access->ip == ip;
0404 }
0405
0406 static inline void
0407 set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
0408 int type, unsigned long ip)
0409 {
0410 struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
0411
0412 if (!reorder_access || !kcsan_weak_memory)
0413 return;
0414
0415
0416
0417
0418
0419
0420 ctx->disable_scoped++;
0421 barrier();
0422 reorder_access->ptr = ptr;
0423 reorder_access->size = size;
0424 reorder_access->type = type | KCSAN_ACCESS_SCOPED;
0425 reorder_access->ip = ip;
0426 reorder_access->stack_depth = get_kcsan_stack_depth();
0427 barrier();
0428 ctx->disable_scoped--;
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
0445 size_t size,
0446 int type,
0447 unsigned long ip,
0448 atomic_long_t *watchpoint,
0449 long encoded_watchpoint)
0450 {
0451 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
0452 struct kcsan_ctx *ctx = get_ctx();
0453 unsigned long flags;
0454 bool consumed;
0455
0456
0457
0458
0459
0460
0461
0462 if (!kcsan_is_enabled(ctx))
0463 return;
0464
0465
0466
0467
0468
0469
0470
0471
0472 if (ctx->access_mask && !find_reorder_access(ctx, ptr, size, type, ip))
0473 return;
0474
0475
0476
0477
0478
0479
0480
0481
0482 if (!is_assert && kcsan_ignore_address(ptr))
0483 return;
0484
0485
0486
0487
0488
0489 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
0490
0491
0492 flags = user_access_save();
0493
0494 if (consumed) {
0495 kcsan_save_irqtrace(current);
0496 kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
0497 kcsan_restore_irqtrace(current);
0498 } else {
0499
0500
0501
0502
0503
0504 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
0505 }
0506
0507 if (is_assert)
0508 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
0509 else
0510 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
0511
0512 user_access_restore(flags);
0513 }
0514
0515 static noinline void
0516 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
0517 {
0518 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
0519 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
0520 atomic_long_t *watchpoint;
0521 u64 old, new, diff;
0522 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
0523 bool interrupt_watcher = kcsan_interrupt_watcher;
0524 unsigned long ua_flags = user_access_save();
0525 struct kcsan_ctx *ctx = get_ctx();
0526 unsigned long access_mask = ctx->access_mask;
0527 unsigned long irq_flags = 0;
0528 bool is_reorder_access;
0529
0530
0531
0532
0533
0534 reset_kcsan_skip();
0535
0536 if (!kcsan_is_enabled(ctx))
0537 goto out;
0538
0539
0540
0541
0542
0543 if (!is_assert && kcsan_ignore_address(ptr))
0544 goto out;
0545
0546 if (!check_encodable((unsigned long)ptr, size)) {
0547 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
0548 goto out;
0549 }
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559 is_reorder_access = find_reorder_access(ctx, ptr, size, type, ip);
0560 if (is_reorder_access)
0561 interrupt_watcher = false;
0562
0563
0564
0565
0566
0567
0568
0569 ctx->disable_scoped++;
0570
0571
0572
0573
0574
0575
0576 kcsan_save_irqtrace(current);
0577 if (!interrupt_watcher)
0578 local_irq_save(irq_flags);
0579
0580 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
0581 if (watchpoint == NULL) {
0582
0583
0584
0585
0586
0587 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
0588 goto out_unlock;
0589 }
0590
0591 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
0592 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
0593
0594
0595
0596
0597
0598 old = is_reorder_access ? 0 : read_instrumented_memory(ptr, size);
0599
0600
0601
0602
0603
0604 delay_access(type);
0605
0606
0607
0608
0609
0610 if (!is_reorder_access) {
0611 new = read_instrumented_memory(ptr, size);
0612 } else {
0613
0614
0615
0616
0617
0618 new = 0;
0619 access_mask = 0;
0620 }
0621
0622 diff = old ^ new;
0623 if (access_mask)
0624 diff &= access_mask;
0625
0626
0627
0628
0629
0630
0631
0632
0633 if (diff && !kcsan_ignore_data_race(size, type, old, new, diff))
0634 value_change = KCSAN_VALUE_CHANGE_TRUE;
0635
0636
0637 if (!consume_watchpoint(watchpoint)) {
0638
0639
0640
0641
0642 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
0643 if (access_mask != 0) {
0644
0645
0646
0647
0648
0649 value_change = KCSAN_VALUE_CHANGE_FALSE;
0650 } else if (size > 8 || is_assert) {
0651
0652 value_change = KCSAN_VALUE_CHANGE_TRUE;
0653 }
0654 }
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
0665 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
0666
0667 kcsan_report_known_origin(ptr, size, type, ip,
0668 value_change, watchpoint - watchpoints,
0669 old, new, access_mask);
0670 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
0671
0672
0673 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
0674 if (is_assert)
0675 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
0676
0677 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
0678 kcsan_report_unknown_origin(ptr, size, type, ip,
0679 old, new, access_mask);
0680 }
0681 }
0682
0683
0684
0685
0686
0687 remove_watchpoint(watchpoint);
0688 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
0689
0690 out_unlock:
0691 if (!interrupt_watcher)
0692 local_irq_restore(irq_flags);
0693 kcsan_restore_irqtrace(current);
0694 ctx->disable_scoped--;
0695
0696
0697
0698
0699
0700
0701 if (!access_mask && !is_assert)
0702 set_reorder_access(ctx, ptr, size, type, ip);
0703 out:
0704 user_access_restore(ua_flags);
0705 }
0706
0707 static __always_inline void
0708 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
0709 {
0710 atomic_long_t *watchpoint;
0711 long encoded_watchpoint;
0712
0713
0714
0715
0716
0717 if (unlikely(size == 0))
0718 return;
0719
0720 again:
0721
0722
0723
0724
0725
0726 watchpoint = find_watchpoint((unsigned long)ptr, size,
0727 !(type & KCSAN_ACCESS_WRITE),
0728 &encoded_watchpoint);
0729
0730
0731
0732
0733
0734
0735
0736 if (unlikely(watchpoint != NULL))
0737 kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
0738 else {
0739 struct kcsan_ctx *ctx = get_ctx();
0740
0741 if (unlikely(should_watch(ctx, ptr, size, type))) {
0742 kcsan_setup_watchpoint(ptr, size, type, ip);
0743 return;
0744 }
0745
0746 if (!(type & KCSAN_ACCESS_SCOPED)) {
0747 struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
0748
0749 if (reorder_access) {
0750
0751
0752
0753
0754 ptr = reorder_access->ptr;
0755 type = reorder_access->type;
0756 ip = reorder_access->ip;
0757
0758
0759
0760
0761
0762
0763
0764
0765 barrier();
0766 size = READ_ONCE(reorder_access->size);
0767 if (size)
0768 goto again;
0769 }
0770 }
0771
0772
0773
0774
0775
0776 if (unlikely(ctx->scoped_accesses.prev))
0777 kcsan_check_scoped_accesses();
0778 }
0779 }
0780
0781
0782
0783 void __init kcsan_init(void)
0784 {
0785 int cpu;
0786
0787 BUG_ON(!in_task());
0788
0789 for_each_possible_cpu(cpu)
0790 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
0791
0792
0793
0794
0795
0796 if (kcsan_early_enable) {
0797 pr_info("enabled early\n");
0798 WRITE_ONCE(kcsan_enabled, true);
0799 }
0800
0801 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) ||
0802 IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) ||
0803 IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) ||
0804 IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
0805 pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n");
0806 } else {
0807 pr_info("strict mode configured\n");
0808 }
0809 }
0810
0811
0812
0813 void kcsan_disable_current(void)
0814 {
0815 ++get_ctx()->disable_count;
0816 }
0817 EXPORT_SYMBOL(kcsan_disable_current);
0818
0819 void kcsan_enable_current(void)
0820 {
0821 if (get_ctx()->disable_count-- == 0) {
0822
0823
0824
0825
0826
0827 kcsan_disable_current();
0828 kcsan_disable_current();
0829 WARN(1, "Unbalanced %s()", __func__);
0830 kcsan_enable_current();
0831 }
0832 }
0833 EXPORT_SYMBOL(kcsan_enable_current);
0834
0835 void kcsan_enable_current_nowarn(void)
0836 {
0837 if (get_ctx()->disable_count-- == 0)
0838 kcsan_disable_current();
0839 }
0840 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
0841
0842 void kcsan_nestable_atomic_begin(void)
0843 {
0844
0845
0846
0847
0848
0849
0850
0851 ++get_ctx()->atomic_nest_count;
0852 }
0853 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
0854
0855 void kcsan_nestable_atomic_end(void)
0856 {
0857 if (get_ctx()->atomic_nest_count-- == 0) {
0858
0859
0860
0861
0862
0863 kcsan_nestable_atomic_begin();
0864 kcsan_disable_current();
0865 WARN(1, "Unbalanced %s()", __func__);
0866 kcsan_enable_current();
0867 }
0868 }
0869 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
0870
0871 void kcsan_flat_atomic_begin(void)
0872 {
0873 get_ctx()->in_flat_atomic = true;
0874 }
0875 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
0876
0877 void kcsan_flat_atomic_end(void)
0878 {
0879 get_ctx()->in_flat_atomic = false;
0880 }
0881 EXPORT_SYMBOL(kcsan_flat_atomic_end);
0882
0883 void kcsan_atomic_next(int n)
0884 {
0885 get_ctx()->atomic_next = n;
0886 }
0887 EXPORT_SYMBOL(kcsan_atomic_next);
0888
0889 void kcsan_set_access_mask(unsigned long mask)
0890 {
0891 get_ctx()->access_mask = mask;
0892 }
0893 EXPORT_SYMBOL(kcsan_set_access_mask);
0894
0895 struct kcsan_scoped_access *
0896 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
0897 struct kcsan_scoped_access *sa)
0898 {
0899 struct kcsan_ctx *ctx = get_ctx();
0900
0901 check_access(ptr, size, type, _RET_IP_);
0902
0903 ctx->disable_count++;
0904
0905 INIT_LIST_HEAD(&sa->list);
0906 sa->ptr = ptr;
0907 sa->size = size;
0908 sa->type = type;
0909 sa->ip = _RET_IP_;
0910
0911 if (!ctx->scoped_accesses.prev)
0912 INIT_LIST_HEAD(&ctx->scoped_accesses);
0913 list_add(&sa->list, &ctx->scoped_accesses);
0914
0915 ctx->disable_count--;
0916 return sa;
0917 }
0918 EXPORT_SYMBOL(kcsan_begin_scoped_access);
0919
0920 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
0921 {
0922 struct kcsan_ctx *ctx = get_ctx();
0923
0924 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
0925 return;
0926
0927 ctx->disable_count++;
0928
0929 list_del(&sa->list);
0930 if (list_empty(&ctx->scoped_accesses))
0931
0932
0933
0934
0935
0936
0937 ctx->scoped_accesses.prev = NULL;
0938
0939 ctx->disable_count--;
0940
0941 check_access(sa->ptr, sa->size, sa->type, sa->ip);
0942 }
0943 EXPORT_SYMBOL(kcsan_end_scoped_access);
0944
0945 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
0946 {
0947 check_access(ptr, size, type, _RET_IP_);
0948 }
0949 EXPORT_SYMBOL(__kcsan_check_access);
0950
0951 #define DEFINE_MEMORY_BARRIER(name, order_before_cond) \
0952 void __kcsan_##name(void) \
0953 { \
0954 struct kcsan_scoped_access *sa = get_reorder_access(get_ctx()); \
0955 if (!sa) \
0956 return; \
0957 if (order_before_cond) \
0958 sa->size = 0; \
0959 } \
0960 EXPORT_SYMBOL(__kcsan_##name)
0961
0962 DEFINE_MEMORY_BARRIER(mb, true);
0963 DEFINE_MEMORY_BARRIER(wmb, sa->type & (KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND));
0964 DEFINE_MEMORY_BARRIER(rmb, !(sa->type & KCSAN_ACCESS_WRITE) || (sa->type & KCSAN_ACCESS_COMPOUND));
0965 DEFINE_MEMORY_BARRIER(release, true);
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980 #define DEFINE_TSAN_READ_WRITE(size) \
0981 void __tsan_read##size(void *ptr); \
0982 void __tsan_read##size(void *ptr) \
0983 { \
0984 check_access(ptr, size, 0, _RET_IP_); \
0985 } \
0986 EXPORT_SYMBOL(__tsan_read##size); \
0987 void __tsan_unaligned_read##size(void *ptr) \
0988 __alias(__tsan_read##size); \
0989 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
0990 void __tsan_write##size(void *ptr); \
0991 void __tsan_write##size(void *ptr) \
0992 { \
0993 check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
0994 } \
0995 EXPORT_SYMBOL(__tsan_write##size); \
0996 void __tsan_unaligned_write##size(void *ptr) \
0997 __alias(__tsan_write##size); \
0998 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
0999 void __tsan_read_write##size(void *ptr); \
1000 void __tsan_read_write##size(void *ptr) \
1001 { \
1002 check_access(ptr, size, \
1003 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
1004 _RET_IP_); \
1005 } \
1006 EXPORT_SYMBOL(__tsan_read_write##size); \
1007 void __tsan_unaligned_read_write##size(void *ptr) \
1008 __alias(__tsan_read_write##size); \
1009 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
1010
1011 DEFINE_TSAN_READ_WRITE(1);
1012 DEFINE_TSAN_READ_WRITE(2);
1013 DEFINE_TSAN_READ_WRITE(4);
1014 DEFINE_TSAN_READ_WRITE(8);
1015 DEFINE_TSAN_READ_WRITE(16);
1016
1017 void __tsan_read_range(void *ptr, size_t size);
1018 void __tsan_read_range(void *ptr, size_t size)
1019 {
1020 check_access(ptr, size, 0, _RET_IP_);
1021 }
1022 EXPORT_SYMBOL(__tsan_read_range);
1023
1024 void __tsan_write_range(void *ptr, size_t size);
1025 void __tsan_write_range(void *ptr, size_t size)
1026 {
1027 check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
1028 }
1029 EXPORT_SYMBOL(__tsan_write_range);
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
1041 void __tsan_volatile_read##size(void *ptr); \
1042 void __tsan_volatile_read##size(void *ptr) \
1043 { \
1044 const bool is_atomic = size <= sizeof(long long) && \
1045 IS_ALIGNED((unsigned long)ptr, size); \
1046 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
1047 return; \
1048 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
1049 _RET_IP_); \
1050 } \
1051 EXPORT_SYMBOL(__tsan_volatile_read##size); \
1052 void __tsan_unaligned_volatile_read##size(void *ptr) \
1053 __alias(__tsan_volatile_read##size); \
1054 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
1055 void __tsan_volatile_write##size(void *ptr); \
1056 void __tsan_volatile_write##size(void *ptr) \
1057 { \
1058 const bool is_atomic = size <= sizeof(long long) && \
1059 IS_ALIGNED((unsigned long)ptr, size); \
1060 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
1061 return; \
1062 check_access(ptr, size, \
1063 KCSAN_ACCESS_WRITE | \
1064 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
1065 _RET_IP_); \
1066 } \
1067 EXPORT_SYMBOL(__tsan_volatile_write##size); \
1068 void __tsan_unaligned_volatile_write##size(void *ptr) \
1069 __alias(__tsan_volatile_write##size); \
1070 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
1071
1072 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
1073 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
1074 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
1075 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
1076 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 void __tsan_func_entry(void *call_pc);
1090 noinline void __tsan_func_entry(void *call_pc)
1091 {
1092 if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1093 return;
1094
1095 add_kcsan_stack_depth(1);
1096 }
1097 EXPORT_SYMBOL(__tsan_func_entry);
1098
1099 void __tsan_func_exit(void);
1100 noinline void __tsan_func_exit(void)
1101 {
1102 struct kcsan_scoped_access *reorder_access;
1103
1104 if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1105 return;
1106
1107 reorder_access = get_reorder_access(get_ctx());
1108 if (!reorder_access)
1109 goto out;
1110
1111 if (get_kcsan_stack_depth() <= reorder_access->stack_depth) {
1112
1113
1114
1115
1116
1117
1118
1119 check_access(reorder_access->ptr, reorder_access->size,
1120 reorder_access->type, reorder_access->ip);
1121 reorder_access->size = 0;
1122 reorder_access->stack_depth = INT_MIN;
1123 }
1124 out:
1125 add_kcsan_stack_depth(-1);
1126 }
1127 EXPORT_SYMBOL(__tsan_func_exit);
1128
1129 void __tsan_init(void);
1130 void __tsan_init(void)
1131 {
1132 }
1133 EXPORT_SYMBOL(__tsan_init);
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 static __always_inline void kcsan_atomic_builtin_memorder(int memorder)
1152 {
1153 if (memorder == __ATOMIC_RELEASE ||
1154 memorder == __ATOMIC_SEQ_CST ||
1155 memorder == __ATOMIC_ACQ_REL)
1156 __kcsan_release();
1157 }
1158
1159 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
1160 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
1161 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
1162 { \
1163 kcsan_atomic_builtin_memorder(memorder); \
1164 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1165 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1166 } \
1167 return __atomic_load_n(ptr, memorder); \
1168 } \
1169 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
1170 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
1171 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
1172 { \
1173 kcsan_atomic_builtin_memorder(memorder); \
1174 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1175 check_access(ptr, bits / BITS_PER_BYTE, \
1176 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1177 } \
1178 __atomic_store_n(ptr, v, memorder); \
1179 } \
1180 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
1181
1182 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
1183 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
1184 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
1185 { \
1186 kcsan_atomic_builtin_memorder(memorder); \
1187 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1188 check_access(ptr, bits / BITS_PER_BYTE, \
1189 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1190 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1191 } \
1192 return __atomic_##op##suffix(ptr, v, memorder); \
1193 } \
1194 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
1214 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1215 u##bits val, int mo, int fail_mo); \
1216 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1217 u##bits val, int mo, int fail_mo) \
1218 { \
1219 kcsan_atomic_builtin_memorder(mo); \
1220 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1221 check_access(ptr, bits / BITS_PER_BYTE, \
1222 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1223 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1224 } \
1225 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1226 } \
1227 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1228
1229 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1230 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1231 int mo, int fail_mo); \
1232 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1233 int mo, int fail_mo) \
1234 { \
1235 kcsan_atomic_builtin_memorder(mo); \
1236 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1237 check_access(ptr, bits / BITS_PER_BYTE, \
1238 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1239 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1240 } \
1241 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1242 return exp; \
1243 } \
1244 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1245
1246 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1247 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1248 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1249 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1250 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1251 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1252 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1253 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1254 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1255 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1256 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1257 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1258
1259 DEFINE_TSAN_ATOMIC_OPS(8);
1260 DEFINE_TSAN_ATOMIC_OPS(16);
1261 DEFINE_TSAN_ATOMIC_OPS(32);
1262 DEFINE_TSAN_ATOMIC_OPS(64);
1263
1264 void __tsan_atomic_thread_fence(int memorder);
1265 void __tsan_atomic_thread_fence(int memorder)
1266 {
1267 kcsan_atomic_builtin_memorder(memorder);
1268 __atomic_thread_fence(memorder);
1269 }
1270 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290 void __tsan_atomic_signal_fence(int memorder);
1291 noinline void __tsan_atomic_signal_fence(int memorder)
1292 {
1293 switch (memorder) {
1294 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb:
1295 __kcsan_mb();
1296 break;
1297 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb:
1298 __kcsan_wmb();
1299 break;
1300 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb:
1301 __kcsan_rmb();
1302 break;
1303 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_release:
1304 __kcsan_release();
1305 break;
1306 default:
1307 break;
1308 }
1309 }
1310 EXPORT_SYMBOL(__tsan_atomic_signal_fence);