0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 enum lock_usage_bit {
0014 #define LOCKDEP_STATE(__STATE) \
0015 LOCK_USED_IN_##__STATE, \
0016 LOCK_USED_IN_##__STATE##_READ, \
0017 LOCK_ENABLED_##__STATE, \
0018 LOCK_ENABLED_##__STATE##_READ,
0019 #include "lockdep_states.h"
0020 #undef LOCKDEP_STATE
0021 LOCK_USED,
0022 LOCK_USED_READ,
0023 LOCK_USAGE_STATES,
0024 };
0025
0026
0027 static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
0028
0029 #define LOCK_USAGE_READ_MASK 1
0030 #define LOCK_USAGE_DIR_MASK 2
0031 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
0032
0033
0034
0035
0036 #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
0037
0038 enum {
0039 #define LOCKDEP_STATE(__STATE) \
0040 __LOCKF(USED_IN_##__STATE) \
0041 __LOCKF(USED_IN_##__STATE##_READ) \
0042 __LOCKF(ENABLED_##__STATE) \
0043 __LOCKF(ENABLED_##__STATE##_READ)
0044 #include "lockdep_states.h"
0045 #undef LOCKDEP_STATE
0046 __LOCKF(USED)
0047 __LOCKF(USED_READ)
0048 };
0049
0050 #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
0051 static const unsigned long LOCKF_ENABLED_IRQ =
0052 #include "lockdep_states.h"
0053 0;
0054 #undef LOCKDEP_STATE
0055
0056 #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
0057 static const unsigned long LOCKF_USED_IN_IRQ =
0058 #include "lockdep_states.h"
0059 0;
0060 #undef LOCKDEP_STATE
0061
0062 #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
0063 static const unsigned long LOCKF_ENABLED_IRQ_READ =
0064 #include "lockdep_states.h"
0065 0;
0066 #undef LOCKDEP_STATE
0067
0068 #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
0069 static const unsigned long LOCKF_USED_IN_IRQ_READ =
0070 #include "lockdep_states.h"
0071 0;
0072 #undef LOCKDEP_STATE
0073
0074 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
0075 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
0076
0077 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
0078 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
0079
0080
0081
0082
0083
0084
0085
0086
0087 #ifdef CONFIG_LOCKDEP_SMALL
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 #define MAX_LOCKDEP_ENTRIES 16384UL
0098 #define MAX_LOCKDEP_CHAINS_BITS 15
0099 #define MAX_STACK_TRACE_ENTRIES 262144UL
0100 #define STACK_TRACE_HASH_SIZE 8192
0101 #else
0102 #define MAX_LOCKDEP_ENTRIES (1UL << CONFIG_LOCKDEP_BITS)
0103
0104 #define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS
0105
0106
0107
0108
0109
0110 #define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
0111 #define STACK_TRACE_HASH_SIZE (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
0112 #endif
0113
0114
0115
0116
0117 #define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
0118 #define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
0119
0120 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
0121
0122 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
0123
0124 extern struct lock_chain lock_chains[];
0125
0126 #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
0127
0128 extern void get_usage_chars(struct lock_class *class,
0129 char usage[LOCK_USAGE_CHARS]);
0130
0131 extern const char *__get_key_name(const struct lockdep_subclass_key *key,
0132 char *str);
0133
0134 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
0135
0136 extern unsigned long nr_lock_classes;
0137 extern unsigned long nr_zapped_classes;
0138 extern unsigned long nr_zapped_lock_chains;
0139 extern unsigned long nr_list_entries;
0140 long lockdep_next_lockchain(long i);
0141 unsigned long lock_chain_count(void);
0142 extern unsigned long nr_stack_trace_entries;
0143
0144 extern unsigned int nr_hardirq_chains;
0145 extern unsigned int nr_softirq_chains;
0146 extern unsigned int nr_process_chains;
0147 extern unsigned int nr_free_chain_hlocks;
0148 extern unsigned int nr_lost_chain_hlocks;
0149 extern unsigned int nr_large_chain_blocks;
0150
0151 extern unsigned int max_lockdep_depth;
0152 extern unsigned int max_bfs_queue_depth;
0153 extern unsigned long max_lock_class_idx;
0154
0155 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
0156 extern unsigned long lock_classes_in_use[];
0157
0158 #ifdef CONFIG_PROVE_LOCKING
0159 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
0160 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
0161 #ifdef CONFIG_TRACE_IRQFLAGS
0162 u64 lockdep_stack_trace_count(void);
0163 u64 lockdep_stack_hash_count(void);
0164 #endif
0165 #else
0166 static inline unsigned long
0167 lockdep_count_forward_deps(struct lock_class *class)
0168 {
0169 return 0;
0170 }
0171 static inline unsigned long
0172 lockdep_count_backward_deps(struct lock_class *class)
0173 {
0174 return 0;
0175 }
0176 #endif
0177
0178 #ifdef CONFIG_DEBUG_LOCKDEP
0179
0180 #include <asm/local.h>
0181
0182
0183
0184
0185
0186 struct lockdep_stats {
0187 unsigned long chain_lookup_hits;
0188 unsigned int chain_lookup_misses;
0189 unsigned long hardirqs_on_events;
0190 unsigned long hardirqs_off_events;
0191 unsigned long redundant_hardirqs_on;
0192 unsigned long redundant_hardirqs_off;
0193 unsigned long softirqs_on_events;
0194 unsigned long softirqs_off_events;
0195 unsigned long redundant_softirqs_on;
0196 unsigned long redundant_softirqs_off;
0197 int nr_unused_locks;
0198 unsigned int nr_redundant_checks;
0199 unsigned int nr_redundant;
0200 unsigned int nr_cyclic_checks;
0201 unsigned int nr_find_usage_forwards_checks;
0202 unsigned int nr_find_usage_backwards_checks;
0203
0204
0205
0206
0207 unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
0208 };
0209
0210 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
0211
0212 #define __debug_atomic_inc(ptr) \
0213 this_cpu_inc(lockdep_stats.ptr);
0214
0215 #define debug_atomic_inc(ptr) { \
0216 WARN_ON_ONCE(!irqs_disabled()); \
0217 __this_cpu_inc(lockdep_stats.ptr); \
0218 }
0219
0220 #define debug_atomic_dec(ptr) { \
0221 WARN_ON_ONCE(!irqs_disabled()); \
0222 __this_cpu_dec(lockdep_stats.ptr); \
0223 }
0224
0225 #define debug_atomic_read(ptr) ({ \
0226 struct lockdep_stats *__cpu_lockdep_stats; \
0227 unsigned long long __total = 0; \
0228 int __cpu; \
0229 for_each_possible_cpu(__cpu) { \
0230 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
0231 __total += __cpu_lockdep_stats->ptr; \
0232 } \
0233 __total; \
0234 })
0235
0236 static inline void debug_class_ops_inc(struct lock_class *class)
0237 {
0238 int idx;
0239
0240 idx = class - lock_classes;
0241 __debug_atomic_inc(lock_class_ops[idx]);
0242 }
0243
0244 static inline unsigned long debug_class_ops_read(struct lock_class *class)
0245 {
0246 int idx, cpu;
0247 unsigned long ops = 0;
0248
0249 idx = class - lock_classes;
0250 for_each_possible_cpu(cpu)
0251 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
0252 return ops;
0253 }
0254
0255 #else
0256 # define __debug_atomic_inc(ptr) do { } while (0)
0257 # define debug_atomic_inc(ptr) do { } while (0)
0258 # define debug_atomic_dec(ptr) do { } while (0)
0259 # define debug_atomic_read(ptr) 0
0260 # define debug_class_ops_inc(ptr) do { } while (0)
0261 #endif