![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0 */ 0002 /* 0003 * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and 0004 * data structures to set up runtime. See kcsan-checks.h for explicit checks and 0005 * modifiers. For more info please see Documentation/dev-tools/kcsan.rst. 0006 * 0007 * Copyright (C) 2019, Google LLC. 0008 */ 0009 0010 #ifndef _LINUX_KCSAN_H 0011 #define _LINUX_KCSAN_H 0012 0013 #include <linux/kcsan-checks.h> 0014 #include <linux/types.h> 0015 0016 #ifdef CONFIG_KCSAN 0017 0018 /* 0019 * Context for each thread of execution: for tasks, this is stored in 0020 * task_struct, and interrupts access internal per-CPU storage. 0021 */ 0022 struct kcsan_ctx { 0023 int disable_count; /* disable counter */ 0024 int disable_scoped; /* disable scoped access counter */ 0025 int atomic_next; /* number of following atomic ops */ 0026 0027 /* 0028 * We distinguish between: (a) nestable atomic regions that may contain 0029 * other nestable regions; and (b) flat atomic regions that do not keep 0030 * track of nesting. Both (a) and (b) are entirely independent of each 0031 * other, and a flat region may be started in a nestable region or 0032 * vice-versa. 0033 * 0034 * This is required because, for example, in the annotations for 0035 * seqlocks, we declare seqlock writer critical sections as (a) nestable 0036 * atomic regions, but reader critical sections as (b) flat atomic 0037 * regions, but have encountered cases where seqlock reader critical 0038 * sections are contained within writer critical sections (the opposite 0039 * may be possible, too). 0040 * 0041 * To support these cases, we independently track the depth of nesting 0042 * for (a), and whether the leaf level is flat for (b). 0043 */ 0044 int atomic_nest_count; 0045 bool in_flat_atomic; 0046 0047 /* 0048 * Access mask for all accesses if non-zero. 0049 */ 0050 unsigned long access_mask; 0051 0052 /* List of scoped accesses; likely to be empty. */ 0053 struct list_head scoped_accesses; 0054 0055 #ifdef CONFIG_KCSAN_WEAK_MEMORY 0056 /* 0057 * Scoped access for modeling access reordering to detect missing memory 0058 * barriers; only keep 1 to keep fast-path complexity manageable. 0059 */ 0060 struct kcsan_scoped_access reorder_access; 0061 #endif 0062 }; 0063 0064 /** 0065 * kcsan_init - initialize KCSAN runtime 0066 */ 0067 void kcsan_init(void); 0068 0069 #else /* CONFIG_KCSAN */ 0070 0071 static inline void kcsan_init(void) { } 0072 0073 #endif /* CONFIG_KCSAN */ 0074 0075 #endif /* _LINUX_KCSAN_H */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |