Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * KCSAN reporting.
0004  *
0005  * Copyright (C) 2019, Google LLC.
0006  */
0007 
0008 #include <linux/debug_locks.h>
0009 #include <linux/delay.h>
0010 #include <linux/jiffies.h>
0011 #include <linux/kallsyms.h>
0012 #include <linux/kernel.h>
0013 #include <linux/lockdep.h>
0014 #include <linux/preempt.h>
0015 #include <linux/printk.h>
0016 #include <linux/sched.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/stacktrace.h>
0019 
0020 #include "kcsan.h"
0021 #include "encoding.h"
0022 
0023 /*
0024  * Max. number of stack entries to show in the report.
0025  */
0026 #define NUM_STACK_ENTRIES 64
0027 
0028 /* Common access info. */
0029 struct access_info {
0030     const volatile void *ptr;
0031     size_t          size;
0032     int         access_type;
0033     int         task_pid;
0034     int         cpu_id;
0035     unsigned long       ip;
0036 };
0037 
0038 /*
0039  * Other thread info: communicated from other racing thread to thread that set
0040  * up the watchpoint, which then prints the complete report atomically.
0041  */
0042 struct other_info {
0043     struct access_info  ai;
0044     unsigned long       stack_entries[NUM_STACK_ENTRIES];
0045     int         num_stack_entries;
0046 
0047     /*
0048      * Optionally pass @current. Typically we do not need to pass @current
0049      * via @other_info since just @task_pid is sufficient. Passing @current
0050      * has additional overhead.
0051      *
0052      * To safely pass @current, we must either use get_task_struct/
0053      * put_task_struct, or stall the thread that populated @other_info.
0054      *
0055      * We cannot rely on get_task_struct/put_task_struct in case
0056      * release_report() races with a task being released, and would have to
0057      * free it in release_report(). This may result in deadlock if we want
0058      * to use KCSAN on the allocators.
0059      *
0060      * Since we also want to reliably print held locks for
0061      * CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
0062      * that populated @other_info until it has been consumed.
0063      */
0064     struct task_struct  *task;
0065 };
0066 
0067 /*
0068  * To never block any producers of struct other_info, we need as many elements
0069  * as we have watchpoints (upper bound on concurrent races to report).
0070  */
0071 static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
0072 
0073 /*
0074  * Information about reported races; used to rate limit reporting.
0075  */
0076 struct report_time {
0077     /*
0078      * The last time the race was reported.
0079      */
0080     unsigned long time;
0081 
0082     /*
0083      * The frames of the 2 threads; if only 1 thread is known, one frame
0084      * will be 0.
0085      */
0086     unsigned long frame1;
0087     unsigned long frame2;
0088 };
0089 
0090 /*
0091  * Since we also want to be able to debug allocators with KCSAN, to avoid
0092  * deadlock, report_times cannot be dynamically resized with krealloc in
0093  * rate_limit_report.
0094  *
0095  * Therefore, we use a fixed-size array, which at most will occupy a page. This
0096  * still adequately rate limits reports, assuming that a) number of unique data
0097  * races is not excessive, and b) occurrence of unique races within the
0098  * same time window is limited.
0099  */
0100 #define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
0101 #define REPORT_TIMES_SIZE                                                      \
0102     (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ?                   \
0103          REPORT_TIMES_MAX :                                            \
0104          CONFIG_KCSAN_REPORT_ONCE_IN_MS)
0105 static struct report_time report_times[REPORT_TIMES_SIZE];
0106 
0107 /*
0108  * Spinlock serializing report generation, and access to @other_infos. Although
0109  * it could make sense to have a finer-grained locking story for @other_infos,
0110  * report generation needs to be serialized either way, so not much is gained.
0111  */
0112 static DEFINE_RAW_SPINLOCK(report_lock);
0113 
0114 /*
0115  * Checks if the race identified by thread frames frame1 and frame2 has
0116  * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
0117  */
0118 static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
0119 {
0120     struct report_time *use_entry = &report_times[0];
0121     unsigned long invalid_before;
0122     int i;
0123 
0124     BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
0125 
0126     if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
0127         return false;
0128 
0129     invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
0130 
0131     /* Check if a matching race report exists. */
0132     for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
0133         struct report_time *rt = &report_times[i];
0134 
0135         /*
0136          * Must always select an entry for use to store info as we
0137          * cannot resize report_times; at the end of the scan, use_entry
0138          * will be the oldest entry, which ideally also happened before
0139          * KCSAN_REPORT_ONCE_IN_MS ago.
0140          */
0141         if (time_before(rt->time, use_entry->time))
0142             use_entry = rt;
0143 
0144         /*
0145          * Initially, no need to check any further as this entry as well
0146          * as following entries have never been used.
0147          */
0148         if (rt->time == 0)
0149             break;
0150 
0151         /* Check if entry expired. */
0152         if (time_before(rt->time, invalid_before))
0153             continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
0154 
0155         /* Reported recently, check if race matches. */
0156         if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
0157             (rt->frame1 == frame2 && rt->frame2 == frame1))
0158             return true;
0159     }
0160 
0161     use_entry->time = jiffies;
0162     use_entry->frame1 = frame1;
0163     use_entry->frame2 = frame2;
0164     return false;
0165 }
0166 
0167 /*
0168  * Special rules to skip reporting.
0169  */
0170 static bool
0171 skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
0172 {
0173     /* Should never get here if value_change==FALSE. */
0174     WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
0175 
0176     /*
0177      * The first call to skip_report always has value_change==TRUE, since we
0178      * cannot know the value written of an instrumented access. For the 2nd
0179      * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
0180      *
0181      * 1. read watchpoint, conflicting write (value_change==TRUE): report;
0182      * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
0183      * 3. write watchpoint, conflicting write (value_change==TRUE): report;
0184      * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
0185      * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
0186      * 6. write watchpoint, conflicting read (value_change==TRUE): report;
0187      *
0188      * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
0189      * data races where the write may have rewritten the same value; case 6
0190      * is possible either if the size is larger than what we check value
0191      * changes for or the access type is KCSAN_ACCESS_ASSERT.
0192      */
0193     if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
0194         value_change == KCSAN_VALUE_CHANGE_MAYBE) {
0195         /*
0196          * The access is a write, but the data value did not change.
0197          *
0198          * We opt-out of this filter for certain functions at request of
0199          * maintainers.
0200          */
0201         char buf[64];
0202         int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
0203 
0204         if (!strnstr(buf, "rcu_", len) &&
0205             !strnstr(buf, "_rcu", len) &&
0206             !strnstr(buf, "_srcu", len))
0207             return true;
0208     }
0209 
0210     return kcsan_skip_report_debugfs(top_frame);
0211 }
0212 
0213 static const char *get_access_type(int type)
0214 {
0215     if (type & KCSAN_ACCESS_ASSERT) {
0216         if (type & KCSAN_ACCESS_SCOPED) {
0217             if (type & KCSAN_ACCESS_WRITE)
0218                 return "assert no accesses (reordered)";
0219             else
0220                 return "assert no writes (reordered)";
0221         } else {
0222             if (type & KCSAN_ACCESS_WRITE)
0223                 return "assert no accesses";
0224             else
0225                 return "assert no writes";
0226         }
0227     }
0228 
0229     switch (type) {
0230     case 0:
0231         return "read";
0232     case KCSAN_ACCESS_ATOMIC:
0233         return "read (marked)";
0234     case KCSAN_ACCESS_WRITE:
0235         return "write";
0236     case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0237         return "write (marked)";
0238     case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
0239         return "read-write";
0240     case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0241         return "read-write (marked)";
0242     case KCSAN_ACCESS_SCOPED:
0243         return "read (reordered)";
0244     case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
0245         return "read (marked, reordered)";
0246     case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
0247         return "write (reordered)";
0248     case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0249         return "write (marked, reordered)";
0250     case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
0251         return "read-write (reordered)";
0252     case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
0253         return "read-write (marked, reordered)";
0254     default:
0255         BUG();
0256     }
0257 }
0258 
0259 static const char *get_bug_type(int type)
0260 {
0261     return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
0262 }
0263 
0264 /* Return thread description: in task or interrupt. */
0265 static const char *get_thread_desc(int task_id)
0266 {
0267     if (task_id != -1) {
0268         static char buf[32]; /* safe: protected by report_lock */
0269 
0270         snprintf(buf, sizeof(buf), "task %i", task_id);
0271         return buf;
0272     }
0273     return "interrupt";
0274 }
0275 
0276 /* Helper to skip KCSAN-related functions in stack-trace. */
0277 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries)
0278 {
0279     char buf[64];
0280     char *cur;
0281     int len, skip;
0282 
0283     for (skip = 0; skip < num_entries; ++skip) {
0284         len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
0285 
0286         /* Never show tsan_* or {read,write}_once_size. */
0287         if (strnstr(buf, "tsan_", len) ||
0288             strnstr(buf, "_once_size", len))
0289             continue;
0290 
0291         cur = strnstr(buf, "kcsan_", len);
0292         if (cur) {
0293             cur += strlen("kcsan_");
0294             if (!str_has_prefix(cur, "test"))
0295                 continue; /* KCSAN runtime function. */
0296             /* KCSAN related test. */
0297         }
0298 
0299         /*
0300          * No match for runtime functions -- @skip entries to skip to
0301          * get to first frame of interest.
0302          */
0303         break;
0304     }
0305 
0306     return skip;
0307 }
0308 
0309 /*
0310  * Skips to the first entry that matches the function of @ip, and then replaces
0311  * that entry with @ip, returning the entries to skip with @replaced containing
0312  * the replaced entry.
0313  */
0314 static int
0315 replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip,
0316             unsigned long *replaced)
0317 {
0318     unsigned long symbolsize, offset;
0319     unsigned long target_func;
0320     int skip;
0321 
0322     if (kallsyms_lookup_size_offset(ip, &symbolsize, &offset))
0323         target_func = ip - offset;
0324     else
0325         goto fallback;
0326 
0327     for (skip = 0; skip < num_entries; ++skip) {
0328         unsigned long func = stack_entries[skip];
0329 
0330         if (!kallsyms_lookup_size_offset(func, &symbolsize, &offset))
0331             goto fallback;
0332         func -= offset;
0333 
0334         if (func == target_func) {
0335             *replaced = stack_entries[skip];
0336             stack_entries[skip] = ip;
0337             return skip;
0338         }
0339     }
0340 
0341 fallback:
0342     /* Should not happen; the resulting stack trace is likely misleading. */
0343     WARN_ONCE(1, "Cannot find frame for %pS in stack trace", (void *)ip);
0344     return get_stack_skipnr(stack_entries, num_entries);
0345 }
0346 
0347 static int
0348 sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip,
0349                unsigned long *replaced)
0350 {
0351     return ip ? replace_stack_entry(stack_entries, num_entries, ip, replaced) :
0352               get_stack_skipnr(stack_entries, num_entries);
0353 }
0354 
0355 /* Compares symbolized strings of addr1 and addr2. */
0356 static int sym_strcmp(void *addr1, void *addr2)
0357 {
0358     char buf1[64];
0359     char buf2[64];
0360 
0361     snprintf(buf1, sizeof(buf1), "%pS", addr1);
0362     snprintf(buf2, sizeof(buf2), "%pS", addr2);
0363 
0364     return strncmp(buf1, buf2, sizeof(buf1));
0365 }
0366 
0367 static void
0368 print_stack_trace(unsigned long stack_entries[], int num_entries, unsigned long reordered_to)
0369 {
0370     stack_trace_print(stack_entries, num_entries, 0);
0371     if (reordered_to)
0372         pr_err("  |\n  +-> reordered to: %pS\n", (void *)reordered_to);
0373 }
0374 
0375 static void print_verbose_info(struct task_struct *task)
0376 {
0377     if (!task)
0378         return;
0379 
0380     /* Restore IRQ state trace for printing. */
0381     kcsan_restore_irqtrace(task);
0382 
0383     pr_err("\n");
0384     debug_show_held_locks(task);
0385     print_irqtrace_events(task);
0386 }
0387 
0388 static void print_report(enum kcsan_value_change value_change,
0389              const struct access_info *ai,
0390              struct other_info *other_info,
0391              u64 old, u64 new, u64 mask)
0392 {
0393     unsigned long reordered_to = 0;
0394     unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
0395     int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
0396     int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip, &reordered_to);
0397     unsigned long this_frame = stack_entries[skipnr];
0398     unsigned long other_reordered_to = 0;
0399     unsigned long other_frame = 0;
0400     int other_skipnr = 0; /* silence uninit warnings */
0401 
0402     /*
0403      * Must check report filter rules before starting to print.
0404      */
0405     if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
0406         return;
0407 
0408     if (other_info) {
0409         other_skipnr = sanitize_stack_entries(other_info->stack_entries,
0410                               other_info->num_stack_entries,
0411                               other_info->ai.ip, &other_reordered_to);
0412         other_frame = other_info->stack_entries[other_skipnr];
0413 
0414         /* @value_change is only known for the other thread */
0415         if (skip_report(value_change, other_frame))
0416             return;
0417     }
0418 
0419     if (rate_limit_report(this_frame, other_frame))
0420         return;
0421 
0422     /* Print report header. */
0423     pr_err("==================================================================\n");
0424     if (other_info) {
0425         int cmp;
0426 
0427         /*
0428          * Order functions lexographically for consistent bug titles.
0429          * Do not print offset of functions to keep title short.
0430          */
0431         cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
0432         pr_err("BUG: KCSAN: %s in %ps / %ps\n",
0433                get_bug_type(ai->access_type | other_info->ai.access_type),
0434                (void *)(cmp < 0 ? other_frame : this_frame),
0435                (void *)(cmp < 0 ? this_frame : other_frame));
0436     } else {
0437         pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
0438                (void *)this_frame);
0439     }
0440 
0441     pr_err("\n");
0442 
0443     /* Print information about the racing accesses. */
0444     if (other_info) {
0445         pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
0446                get_access_type(other_info->ai.access_type), other_info->ai.ptr,
0447                other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
0448                other_info->ai.cpu_id);
0449 
0450         /* Print the other thread's stack trace. */
0451         print_stack_trace(other_info->stack_entries + other_skipnr,
0452                   other_info->num_stack_entries - other_skipnr,
0453                   other_reordered_to);
0454         if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
0455             print_verbose_info(other_info->task);
0456 
0457         pr_err("\n");
0458         pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
0459                get_access_type(ai->access_type), ai->ptr, ai->size,
0460                get_thread_desc(ai->task_pid), ai->cpu_id);
0461     } else {
0462         pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
0463                get_access_type(ai->access_type), ai->ptr, ai->size,
0464                get_thread_desc(ai->task_pid), ai->cpu_id);
0465     }
0466     /* Print stack trace of this thread. */
0467     print_stack_trace(stack_entries + skipnr, num_stack_entries - skipnr, reordered_to);
0468     if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
0469         print_verbose_info(current);
0470 
0471     /* Print observed value change. */
0472     if (ai->size <= 8) {
0473         int hex_len = ai->size * 2;
0474         u64 diff = old ^ new;
0475 
0476         if (mask)
0477             diff &= mask;
0478         if (diff) {
0479             pr_err("\n");
0480             pr_err("value changed: 0x%0*llx -> 0x%0*llx\n",
0481                    hex_len, old, hex_len, new);
0482             if (mask) {
0483                 pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n",
0484                        hex_len, diff, hex_len, mask);
0485             }
0486         }
0487     }
0488 
0489     /* Print report footer. */
0490     pr_err("\n");
0491     pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
0492     dump_stack_print_info(KERN_DEFAULT);
0493     pr_err("==================================================================\n");
0494 
0495     if (panic_on_warn)
0496         panic("panic_on_warn set ...\n");
0497 }
0498 
0499 static void release_report(unsigned long *flags, struct other_info *other_info)
0500 {
0501     /*
0502      * Use size to denote valid/invalid, since KCSAN entirely ignores
0503      * 0-sized accesses.
0504      */
0505     other_info->ai.size = 0;
0506     raw_spin_unlock_irqrestore(&report_lock, *flags);
0507 }
0508 
0509 /*
0510  * Sets @other_info->task and awaits consumption of @other_info.
0511  *
0512  * Precondition: report_lock is held.
0513  * Postcondition: report_lock is held.
0514  */
0515 static void set_other_info_task_blocking(unsigned long *flags,
0516                      const struct access_info *ai,
0517                      struct other_info *other_info)
0518 {
0519     /*
0520      * We may be instrumenting a code-path where current->state is already
0521      * something other than TASK_RUNNING.
0522      */
0523     const bool is_running = task_is_running(current);
0524     /*
0525      * To avoid deadlock in case we are in an interrupt here and this is a
0526      * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
0527      * timeout to ensure this works in all contexts.
0528      *
0529      * Await approximately the worst case delay of the reporting thread (if
0530      * we are not interrupted).
0531      */
0532     int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
0533 
0534     other_info->task = current;
0535     do {
0536         if (is_running) {
0537             /*
0538              * Let lockdep know the real task is sleeping, to print
0539              * the held locks (recall we turned lockdep off, so
0540              * locking/unlocking @report_lock won't be recorded).
0541              */
0542             set_current_state(TASK_UNINTERRUPTIBLE);
0543         }
0544         raw_spin_unlock_irqrestore(&report_lock, *flags);
0545         /*
0546          * We cannot call schedule() since we also cannot reliably
0547          * determine if sleeping here is permitted -- see in_atomic().
0548          */
0549 
0550         udelay(1);
0551         raw_spin_lock_irqsave(&report_lock, *flags);
0552         if (timeout-- < 0) {
0553             /*
0554              * Abort. Reset @other_info->task to NULL, since it
0555              * appears the other thread is still going to consume
0556              * it. It will result in no verbose info printed for
0557              * this task.
0558              */
0559             other_info->task = NULL;
0560             break;
0561         }
0562         /*
0563          * If invalid, or @ptr nor @current matches, then @other_info
0564          * has been consumed and we may continue. If not, retry.
0565          */
0566     } while (other_info->ai.size && other_info->ai.ptr == ai->ptr &&
0567          other_info->task == current);
0568     if (is_running)
0569         set_current_state(TASK_RUNNING);
0570 }
0571 
0572 /* Populate @other_info; requires that the provided @other_info not in use. */
0573 static void prepare_report_producer(unsigned long *flags,
0574                     const struct access_info *ai,
0575                     struct other_info *other_info)
0576 {
0577     raw_spin_lock_irqsave(&report_lock, *flags);
0578 
0579     /*
0580      * The same @other_infos entry cannot be used concurrently, because
0581      * there is a one-to-one mapping to watchpoint slots (@watchpoints in
0582      * core.c), and a watchpoint is only released for reuse after reporting
0583      * is done by the consumer of @other_info. Therefore, it is impossible
0584      * for another concurrent prepare_report_producer() to set the same
0585      * @other_info, and are guaranteed exclusivity for the @other_infos
0586      * entry pointed to by @other_info.
0587      *
0588      * To check this property holds, size should never be non-zero here,
0589      * because every consumer of struct other_info resets size to 0 in
0590      * release_report().
0591      */
0592     WARN_ON(other_info->ai.size);
0593 
0594     other_info->ai = *ai;
0595     other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2);
0596 
0597     if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
0598         set_other_info_task_blocking(flags, ai, other_info);
0599 
0600     raw_spin_unlock_irqrestore(&report_lock, *flags);
0601 }
0602 
0603 /* Awaits producer to fill @other_info and then returns. */
0604 static bool prepare_report_consumer(unsigned long *flags,
0605                     const struct access_info *ai,
0606                     struct other_info *other_info)
0607 {
0608 
0609     raw_spin_lock_irqsave(&report_lock, *flags);
0610     while (!other_info->ai.size) { /* Await valid @other_info. */
0611         raw_spin_unlock_irqrestore(&report_lock, *flags);
0612         cpu_relax();
0613         raw_spin_lock_irqsave(&report_lock, *flags);
0614     }
0615 
0616     /* Should always have a matching access based on watchpoint encoding. */
0617     if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size,
0618                      (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)))
0619         goto discard;
0620 
0621     if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size,
0622                  (unsigned long)ai->ptr, ai->size)) {
0623         /*
0624          * If the actual accesses to not match, this was a false
0625          * positive due to watchpoint encoding.
0626          */
0627         atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
0628         goto discard;
0629     }
0630 
0631     return true;
0632 
0633 discard:
0634     release_report(flags, other_info);
0635     return false;
0636 }
0637 
0638 static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
0639                           int access_type, unsigned long ip)
0640 {
0641     return (struct access_info) {
0642         .ptr        = ptr,
0643         .size       = size,
0644         .access_type    = access_type,
0645         .task_pid   = in_task() ? task_pid_nr(current) : -1,
0646         .cpu_id     = raw_smp_processor_id(),
0647         /* Only replace stack entry with @ip if scoped access. */
0648         .ip     = (access_type & KCSAN_ACCESS_SCOPED) ? ip : 0,
0649     };
0650 }
0651 
0652 void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
0653                unsigned long ip, int watchpoint_idx)
0654 {
0655     const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
0656     unsigned long flags;
0657 
0658     kcsan_disable_current();
0659     lockdep_off(); /* See kcsan_report_known_origin(). */
0660 
0661     prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]);
0662 
0663     lockdep_on();
0664     kcsan_enable_current();
0665 }
0666 
0667 void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
0668                    unsigned long ip, enum kcsan_value_change value_change,
0669                    int watchpoint_idx, u64 old, u64 new, u64 mask)
0670 {
0671     const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
0672     struct other_info *other_info = &other_infos[watchpoint_idx];
0673     unsigned long flags = 0;
0674 
0675     kcsan_disable_current();
0676     /*
0677      * Because we may generate reports when we're in scheduler code, the use
0678      * of printk() could deadlock. Until such time that all printing code
0679      * called in print_report() is scheduler-safe, accept the risk, and just
0680      * get our message out. As such, also disable lockdep to hide the
0681      * warning, and avoid disabling lockdep for the rest of the kernel.
0682      */
0683     lockdep_off();
0684 
0685     if (!prepare_report_consumer(&flags, &ai, other_info))
0686         goto out;
0687     /*
0688      * Never report if value_change is FALSE, only when it is
0689      * either TRUE or MAYBE. In case of MAYBE, further filtering may
0690      * be done once we know the full stack trace in print_report().
0691      */
0692     if (value_change != KCSAN_VALUE_CHANGE_FALSE)
0693         print_report(value_change, &ai, other_info, old, new, mask);
0694 
0695     release_report(&flags, other_info);
0696 out:
0697     lockdep_on();
0698     kcsan_enable_current();
0699 }
0700 
0701 void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
0702                  unsigned long ip, u64 old, u64 new, u64 mask)
0703 {
0704     const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
0705     unsigned long flags;
0706 
0707     kcsan_disable_current();
0708     lockdep_off(); /* See kcsan_report_known_origin(). */
0709 
0710     raw_spin_lock_irqsave(&report_lock, flags);
0711     print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask);
0712     raw_spin_unlock_irqrestore(&report_lock, flags);
0713 
0714     lockdep_on();
0715     kcsan_enable_current();
0716 }