0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #define DISABLE_BRANCH_PROFILING
0030 #include <linux/mutex.h>
0031 #include <linux/sched.h>
0032 #include <linux/sched/clock.h>
0033 #include <linux/sched/task.h>
0034 #include <linux/sched/mm.h>
0035 #include <linux/delay.h>
0036 #include <linux/module.h>
0037 #include <linux/proc_fs.h>
0038 #include <linux/seq_file.h>
0039 #include <linux/spinlock.h>
0040 #include <linux/kallsyms.h>
0041 #include <linux/interrupt.h>
0042 #include <linux/stacktrace.h>
0043 #include <linux/debug_locks.h>
0044 #include <linux/irqflags.h>
0045 #include <linux/utsname.h>
0046 #include <linux/hash.h>
0047 #include <linux/ftrace.h>
0048 #include <linux/stringify.h>
0049 #include <linux/bitmap.h>
0050 #include <linux/bitops.h>
0051 #include <linux/gfp.h>
0052 #include <linux/random.h>
0053 #include <linux/jhash.h>
0054 #include <linux/nmi.h>
0055 #include <linux/rcupdate.h>
0056 #include <linux/kprobes.h>
0057 #include <linux/lockdep.h>
0058
0059 #include <asm/sections.h>
0060
0061 #include "lockdep_internals.h"
0062
0063 #include <trace/events/lock.h>
0064
0065 #ifdef CONFIG_PROVE_LOCKING
0066 static int prove_locking = 1;
0067 module_param(prove_locking, int, 0644);
0068 #else
0069 #define prove_locking 0
0070 #endif
0071
0072 #ifdef CONFIG_LOCK_STAT
0073 static int lock_stat = 1;
0074 module_param(lock_stat, int, 0644);
0075 #else
0076 #define lock_stat 0
0077 #endif
0078
0079 #ifdef CONFIG_SYSCTL
0080 static struct ctl_table kern_lockdep_table[] = {
0081 #ifdef CONFIG_PROVE_LOCKING
0082 {
0083 .procname = "prove_locking",
0084 .data = &prove_locking,
0085 .maxlen = sizeof(int),
0086 .mode = 0644,
0087 .proc_handler = proc_dointvec,
0088 },
0089 #endif
0090 #ifdef CONFIG_LOCK_STAT
0091 {
0092 .procname = "lock_stat",
0093 .data = &lock_stat,
0094 .maxlen = sizeof(int),
0095 .mode = 0644,
0096 .proc_handler = proc_dointvec,
0097 },
0098 #endif
0099 { }
0100 };
0101
0102 static __init int kernel_lockdep_sysctls_init(void)
0103 {
0104 register_sysctl_init("kernel", kern_lockdep_table);
0105 return 0;
0106 }
0107 late_initcall(kernel_lockdep_sysctls_init);
0108 #endif
0109
0110 DEFINE_PER_CPU(unsigned int, lockdep_recursion);
0111 EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
0112
0113 static __always_inline bool lockdep_enabled(void)
0114 {
0115 if (!debug_locks)
0116 return false;
0117
0118 if (this_cpu_read(lockdep_recursion))
0119 return false;
0120
0121 if (current->lockdep_recursion)
0122 return false;
0123
0124 return true;
0125 }
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
0136 static struct task_struct *__owner;
0137
0138 static inline void lockdep_lock(void)
0139 {
0140 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
0141
0142 __this_cpu_inc(lockdep_recursion);
0143 arch_spin_lock(&__lock);
0144 __owner = current;
0145 }
0146
0147 static inline void lockdep_unlock(void)
0148 {
0149 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
0150
0151 if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
0152 return;
0153
0154 __owner = NULL;
0155 arch_spin_unlock(&__lock);
0156 __this_cpu_dec(lockdep_recursion);
0157 }
0158
0159 static inline bool lockdep_assert_locked(void)
0160 {
0161 return DEBUG_LOCKS_WARN_ON(__owner != current);
0162 }
0163
0164 static struct task_struct *lockdep_selftest_task_struct;
0165
0166
0167 static int graph_lock(void)
0168 {
0169 lockdep_lock();
0170
0171
0172
0173
0174
0175
0176 if (!debug_locks) {
0177 lockdep_unlock();
0178 return 0;
0179 }
0180 return 1;
0181 }
0182
0183 static inline void graph_unlock(void)
0184 {
0185 lockdep_unlock();
0186 }
0187
0188
0189
0190
0191
0192 static inline int debug_locks_off_graph_unlock(void)
0193 {
0194 int ret = debug_locks_off();
0195
0196 lockdep_unlock();
0197
0198 return ret;
0199 }
0200
0201 unsigned long nr_list_entries;
0202 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
0203 static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
0204
0205
0206
0207
0208
0209
0210
0211 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
0212 #define KEYHASH_SIZE (1UL << KEYHASH_BITS)
0213 static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
0214 unsigned long nr_lock_classes;
0215 unsigned long nr_zapped_classes;
0216 unsigned long max_lock_class_idx;
0217 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
0218 DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
0219
0220 static inline struct lock_class *hlock_class(struct held_lock *hlock)
0221 {
0222 unsigned int class_idx = hlock->class_idx;
0223
0224
0225 barrier();
0226
0227 if (!test_bit(class_idx, lock_classes_in_use)) {
0228
0229
0230
0231 DEBUG_LOCKS_WARN_ON(1);
0232 return NULL;
0233 }
0234
0235
0236
0237
0238
0239 return lock_classes + class_idx;
0240 }
0241
0242 #ifdef CONFIG_LOCK_STAT
0243 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
0244
0245 static inline u64 lockstat_clock(void)
0246 {
0247 return local_clock();
0248 }
0249
0250 static int lock_point(unsigned long points[], unsigned long ip)
0251 {
0252 int i;
0253
0254 for (i = 0; i < LOCKSTAT_POINTS; i++) {
0255 if (points[i] == 0) {
0256 points[i] = ip;
0257 break;
0258 }
0259 if (points[i] == ip)
0260 break;
0261 }
0262
0263 return i;
0264 }
0265
0266 static void lock_time_inc(struct lock_time *lt, u64 time)
0267 {
0268 if (time > lt->max)
0269 lt->max = time;
0270
0271 if (time < lt->min || !lt->nr)
0272 lt->min = time;
0273
0274 lt->total += time;
0275 lt->nr++;
0276 }
0277
0278 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
0279 {
0280 if (!src->nr)
0281 return;
0282
0283 if (src->max > dst->max)
0284 dst->max = src->max;
0285
0286 if (src->min < dst->min || !dst->nr)
0287 dst->min = src->min;
0288
0289 dst->total += src->total;
0290 dst->nr += src->nr;
0291 }
0292
0293 struct lock_class_stats lock_stats(struct lock_class *class)
0294 {
0295 struct lock_class_stats stats;
0296 int cpu, i;
0297
0298 memset(&stats, 0, sizeof(struct lock_class_stats));
0299 for_each_possible_cpu(cpu) {
0300 struct lock_class_stats *pcs =
0301 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
0302
0303 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
0304 stats.contention_point[i] += pcs->contention_point[i];
0305
0306 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
0307 stats.contending_point[i] += pcs->contending_point[i];
0308
0309 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
0310 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
0311
0312 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
0313 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
0314
0315 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
0316 stats.bounces[i] += pcs->bounces[i];
0317 }
0318
0319 return stats;
0320 }
0321
0322 void clear_lock_stats(struct lock_class *class)
0323 {
0324 int cpu;
0325
0326 for_each_possible_cpu(cpu) {
0327 struct lock_class_stats *cpu_stats =
0328 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
0329
0330 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
0331 }
0332 memset(class->contention_point, 0, sizeof(class->contention_point));
0333 memset(class->contending_point, 0, sizeof(class->contending_point));
0334 }
0335
0336 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
0337 {
0338 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
0339 }
0340
0341 static void lock_release_holdtime(struct held_lock *hlock)
0342 {
0343 struct lock_class_stats *stats;
0344 u64 holdtime;
0345
0346 if (!lock_stat)
0347 return;
0348
0349 holdtime = lockstat_clock() - hlock->holdtime_stamp;
0350
0351 stats = get_lock_stats(hlock_class(hlock));
0352 if (hlock->read)
0353 lock_time_inc(&stats->read_holdtime, holdtime);
0354 else
0355 lock_time_inc(&stats->write_holdtime, holdtime);
0356 }
0357 #else
0358 static inline void lock_release_holdtime(struct held_lock *hlock)
0359 {
0360 }
0361 #endif
0362
0363
0364
0365
0366
0367
0368
0369 static LIST_HEAD(all_lock_classes);
0370 static LIST_HEAD(free_lock_classes);
0371
0372
0373
0374
0375
0376
0377
0378 struct pending_free {
0379 struct list_head zapped;
0380 DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
0381 };
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394 static struct delayed_free {
0395 struct rcu_head rcu_head;
0396 int index;
0397 int scheduled;
0398 struct pending_free pf[2];
0399 } delayed_free;
0400
0401
0402
0403
0404 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
0405 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
0406 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
0407 #define classhashentry(key) (classhash_table + __classhashfn((key)))
0408
0409 static struct hlist_head classhash_table[CLASSHASH_SIZE];
0410
0411
0412
0413
0414
0415 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
0416 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
0417 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
0418 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
0419
0420 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
0421
0422
0423
0424
0425 static inline u16 hlock_id(struct held_lock *hlock)
0426 {
0427 BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16);
0428
0429 return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
0430 }
0431
0432 static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
0433 {
0434 return hlock_id & (MAX_LOCKDEP_KEYS - 1);
0435 }
0436
0437
0438
0439
0440
0441
0442
0443 static inline u64 iterate_chain_key(u64 key, u32 idx)
0444 {
0445 u32 k0 = key, k1 = key >> 32;
0446
0447 __jhash_mix(idx, k0, k1);
0448
0449 return k0 | (u64)k1 << 32;
0450 }
0451
0452 void lockdep_init_task(struct task_struct *task)
0453 {
0454 task->lockdep_depth = 0;
0455 task->curr_chain_key = INITIAL_CHAIN_KEY;
0456 task->lockdep_recursion = 0;
0457 }
0458
0459 static __always_inline void lockdep_recursion_inc(void)
0460 {
0461 __this_cpu_inc(lockdep_recursion);
0462 }
0463
0464 static __always_inline void lockdep_recursion_finish(void)
0465 {
0466 if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
0467 __this_cpu_write(lockdep_recursion, 0);
0468 }
0469
0470 void lockdep_set_selftest_task(struct task_struct *task)
0471 {
0472 lockdep_selftest_task_struct = task;
0473 }
0474
0475
0476
0477
0478
0479 #define VERBOSE 0
0480 #define VERY_VERBOSE 0
0481
0482 #if VERBOSE
0483 # define HARDIRQ_VERBOSE 1
0484 # define SOFTIRQ_VERBOSE 1
0485 #else
0486 # define HARDIRQ_VERBOSE 0
0487 # define SOFTIRQ_VERBOSE 0
0488 #endif
0489
0490 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
0491
0492
0493
0494 static int class_filter(struct lock_class *class)
0495 {
0496 #if 0
0497
0498 if (class->name_version == 1 &&
0499 !strcmp(class->name, "lockname"))
0500 return 1;
0501 if (class->name_version == 1 &&
0502 !strcmp(class->name, "&struct->lockfield"))
0503 return 1;
0504 #endif
0505
0506 return 0;
0507 }
0508 #endif
0509
0510 static int verbose(struct lock_class *class)
0511 {
0512 #if VERBOSE
0513 return class_filter(class);
0514 #endif
0515 return 0;
0516 }
0517
0518 static void print_lockdep_off(const char *bug_msg)
0519 {
0520 printk(KERN_DEBUG "%s\n", bug_msg);
0521 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
0522 #ifdef CONFIG_LOCK_STAT
0523 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
0524 #endif
0525 }
0526
0527 unsigned long nr_stack_trace_entries;
0528
0529 #ifdef CONFIG_PROVE_LOCKING
0530
0531
0532
0533
0534
0535
0536
0537 struct lock_trace {
0538 struct hlist_node hash_entry;
0539 u32 hash;
0540 u32 nr_entries;
0541 unsigned long entries[] __aligned(sizeof(unsigned long));
0542 };
0543 #define LOCK_TRACE_SIZE_IN_LONGS \
0544 (sizeof(struct lock_trace) / sizeof(unsigned long))
0545
0546
0547
0548 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
0549 static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
0550
0551 static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
0552 {
0553 return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
0554 memcmp(t1->entries, t2->entries,
0555 t1->nr_entries * sizeof(t1->entries[0])) == 0;
0556 }
0557
0558 static struct lock_trace *save_trace(void)
0559 {
0560 struct lock_trace *trace, *t2;
0561 struct hlist_head *hash_head;
0562 u32 hash;
0563 int max_entries;
0564
0565 BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
0566 BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
0567
0568 trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
0569 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
0570 LOCK_TRACE_SIZE_IN_LONGS;
0571
0572 if (max_entries <= 0) {
0573 if (!debug_locks_off_graph_unlock())
0574 return NULL;
0575
0576 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
0577 dump_stack();
0578
0579 return NULL;
0580 }
0581 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
0582
0583 hash = jhash(trace->entries, trace->nr_entries *
0584 sizeof(trace->entries[0]), 0);
0585 trace->hash = hash;
0586 hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
0587 hlist_for_each_entry(t2, hash_head, hash_entry) {
0588 if (traces_identical(trace, t2))
0589 return t2;
0590 }
0591 nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
0592 hlist_add_head(&trace->hash_entry, hash_head);
0593
0594 return trace;
0595 }
0596
0597
0598 u64 lockdep_stack_trace_count(void)
0599 {
0600 struct lock_trace *trace;
0601 u64 c = 0;
0602 int i;
0603
0604 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
0605 hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
0606 c++;
0607 }
0608 }
0609
0610 return c;
0611 }
0612
0613
0614 u64 lockdep_stack_hash_count(void)
0615 {
0616 u64 c = 0;
0617 int i;
0618
0619 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
0620 if (!hlist_empty(&stack_trace_hash[i]))
0621 c++;
0622
0623 return c;
0624 }
0625 #endif
0626
0627 unsigned int nr_hardirq_chains;
0628 unsigned int nr_softirq_chains;
0629 unsigned int nr_process_chains;
0630 unsigned int max_lockdep_depth;
0631
0632 #ifdef CONFIG_DEBUG_LOCKDEP
0633
0634
0635
0636 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
0637 #endif
0638
0639 #ifdef CONFIG_PROVE_LOCKING
0640
0641
0642
0643
0644 #define __USAGE(__STATE) \
0645 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
0646 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
0647 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
0648 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
0649
0650 static const char *usage_str[] =
0651 {
0652 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
0653 #include "lockdep_states.h"
0654 #undef LOCKDEP_STATE
0655 [LOCK_USED] = "INITIAL USE",
0656 [LOCK_USED_READ] = "INITIAL READ USE",
0657
0658 [LOCK_USAGE_STATES] = "IN-NMI",
0659 };
0660 #endif
0661
0662 const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
0663 {
0664 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
0665 }
0666
0667 static inline unsigned long lock_flag(enum lock_usage_bit bit)
0668 {
0669 return 1UL << bit;
0670 }
0671
0672 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
0673 {
0674
0675
0676
0677
0678 char c = '.';
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
0689 c = '+';
0690 if (class->usage_mask & lock_flag(bit))
0691 c = '?';
0692 } else if (class->usage_mask & lock_flag(bit))
0693 c = '-';
0694
0695 return c;
0696 }
0697
0698 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
0699 {
0700 int i = 0;
0701
0702 #define LOCKDEP_STATE(__STATE) \
0703 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
0704 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
0705 #include "lockdep_states.h"
0706 #undef LOCKDEP_STATE
0707
0708 usage[i] = '\0';
0709 }
0710
0711 static void __print_lock_name(struct lock_class *class)
0712 {
0713 char str[KSYM_NAME_LEN];
0714 const char *name;
0715
0716 name = class->name;
0717 if (!name) {
0718 name = __get_key_name(class->key, str);
0719 printk(KERN_CONT "%s", name);
0720 } else {
0721 printk(KERN_CONT "%s", name);
0722 if (class->name_version > 1)
0723 printk(KERN_CONT "#%d", class->name_version);
0724 if (class->subclass)
0725 printk(KERN_CONT "/%d", class->subclass);
0726 }
0727 }
0728
0729 static void print_lock_name(struct lock_class *class)
0730 {
0731 char usage[LOCK_USAGE_CHARS];
0732
0733 get_usage_chars(class, usage);
0734
0735 printk(KERN_CONT " (");
0736 __print_lock_name(class);
0737 printk(KERN_CONT "){%s}-{%d:%d}", usage,
0738 class->wait_type_outer ?: class->wait_type_inner,
0739 class->wait_type_inner);
0740 }
0741
0742 static void print_lockdep_cache(struct lockdep_map *lock)
0743 {
0744 const char *name;
0745 char str[KSYM_NAME_LEN];
0746
0747 name = lock->name;
0748 if (!name)
0749 name = __get_key_name(lock->key->subkeys, str);
0750
0751 printk(KERN_CONT "%s", name);
0752 }
0753
0754 static void print_lock(struct held_lock *hlock)
0755 {
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766 struct lock_class *lock = hlock_class(hlock);
0767
0768 if (!lock) {
0769 printk(KERN_CONT "<RELEASED>\n");
0770 return;
0771 }
0772
0773 printk(KERN_CONT "%px", hlock->instance);
0774 print_lock_name(lock);
0775 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
0776 }
0777
0778 static void lockdep_print_held_locks(struct task_struct *p)
0779 {
0780 int i, depth = READ_ONCE(p->lockdep_depth);
0781
0782 if (!depth)
0783 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
0784 else
0785 printk("%d lock%s held by %s/%d:\n", depth,
0786 depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
0787
0788
0789
0790
0791 if (p != current && task_is_running(p))
0792 return;
0793 for (i = 0; i < depth; i++) {
0794 printk(" #%d: ", i);
0795 print_lock(p->held_locks + i);
0796 }
0797 }
0798
0799 static void print_kernel_ident(void)
0800 {
0801 printk("%s %.*s %s\n", init_utsname()->release,
0802 (int)strcspn(init_utsname()->version, " "),
0803 init_utsname()->version,
0804 print_tainted());
0805 }
0806
0807 static int very_verbose(struct lock_class *class)
0808 {
0809 #if VERY_VERBOSE
0810 return class_filter(class);
0811 #endif
0812 return 0;
0813 }
0814
0815
0816
0817
0818 #ifdef __KERNEL__
0819
0820
0821
0822
0823
0824 #ifndef arch_is_kernel_initmem_freed
0825 static int arch_is_kernel_initmem_freed(unsigned long addr)
0826 {
0827 if (system_state < SYSTEM_FREEING_INITMEM)
0828 return 0;
0829
0830 return init_section_contains((void *)addr, 1);
0831 }
0832 #endif
0833
0834 static int static_obj(const void *obj)
0835 {
0836 unsigned long start = (unsigned long) &_stext,
0837 end = (unsigned long) &_end,
0838 addr = (unsigned long) obj;
0839
0840 if (arch_is_kernel_initmem_freed(addr))
0841 return 0;
0842
0843
0844
0845
0846 if ((addr >= start) && (addr < end))
0847 return 1;
0848
0849
0850
0851
0852 if (is_kernel_percpu_address(addr))
0853 return 1;
0854
0855
0856
0857
0858 return is_module_address(addr) || is_module_percpu_address(addr);
0859 }
0860 #endif
0861
0862
0863
0864
0865
0866
0867 static int count_matching_names(struct lock_class *new_class)
0868 {
0869 struct lock_class *class;
0870 int count = 0;
0871
0872 if (!new_class->name)
0873 return 0;
0874
0875 list_for_each_entry(class, &all_lock_classes, lock_entry) {
0876 if (new_class->key - new_class->subclass == class->key)
0877 return class->name_version;
0878 if (class->name && !strcmp(class->name, new_class->name))
0879 count = max(count, class->name_version);
0880 }
0881
0882 return count + 1;
0883 }
0884
0885
0886 static noinstr struct lock_class *
0887 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
0888 {
0889 struct lockdep_subclass_key *key;
0890 struct hlist_head *hash_head;
0891 struct lock_class *class;
0892
0893 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
0894 instrumentation_begin();
0895 debug_locks_off();
0896 printk(KERN_ERR
0897 "BUG: looking up invalid subclass: %u\n", subclass);
0898 printk(KERN_ERR
0899 "turning off the locking correctness validator.\n");
0900 dump_stack();
0901 instrumentation_end();
0902 return NULL;
0903 }
0904
0905
0906
0907
0908
0909 if (unlikely(!lock->key))
0910 return NULL;
0911
0912
0913
0914
0915
0916
0917
0918 BUILD_BUG_ON(sizeof(struct lock_class_key) >
0919 sizeof(struct lockdep_map));
0920
0921 key = lock->key->subkeys + subclass;
0922
0923 hash_head = classhashentry(key);
0924
0925
0926
0927
0928 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
0929 return NULL;
0930
0931 hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
0932 if (class->key == key) {
0933
0934
0935
0936
0937 WARN_ON_ONCE(class->name != lock->name &&
0938 lock->key != &__lockdep_no_validate__);
0939 return class;
0940 }
0941 }
0942
0943 return NULL;
0944 }
0945
0946
0947
0948
0949
0950
0951 static bool assign_lock_key(struct lockdep_map *lock)
0952 {
0953 unsigned long can_addr, addr = (unsigned long)lock;
0954
0955 #ifdef __KERNEL__
0956
0957
0958
0959
0960
0961
0962
0963 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
0964 #endif
0965
0966 if (__is_kernel_percpu_address(addr, &can_addr))
0967 lock->key = (void *)can_addr;
0968 else if (__is_module_percpu_address(addr, &can_addr))
0969 lock->key = (void *)can_addr;
0970 else if (static_obj(lock))
0971 lock->key = (void *)lock;
0972 else {
0973
0974 debug_locks_off();
0975 pr_err("INFO: trying to register non-static key.\n");
0976 pr_err("The code is fine but needs lockdep annotation, or maybe\n");
0977 pr_err("you didn't initialize this object before use?\n");
0978 pr_err("turning off the locking correctness validator.\n");
0979 dump_stack();
0980 return false;
0981 }
0982
0983 return true;
0984 }
0985
0986 #ifdef CONFIG_DEBUG_LOCKDEP
0987
0988
0989 static bool in_list(struct list_head *e, struct list_head *h)
0990 {
0991 struct list_head *f;
0992
0993 list_for_each(f, h) {
0994 if (e == f)
0995 return true;
0996 }
0997
0998 return false;
0999 }
1000
1001
1002
1003
1004
1005 static bool in_any_class_list(struct list_head *e)
1006 {
1007 struct lock_class *class;
1008 int i;
1009
1010 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1011 class = &lock_classes[i];
1012 if (in_list(e, &class->locks_after) ||
1013 in_list(e, &class->locks_before))
1014 return true;
1015 }
1016 return false;
1017 }
1018
1019 static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
1020 {
1021 struct lock_list *e;
1022
1023 list_for_each_entry(e, h, entry) {
1024 if (e->links_to != c) {
1025 printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
1026 c->name ? : "(?)",
1027 (unsigned long)(e - list_entries),
1028 e->links_to && e->links_to->name ?
1029 e->links_to->name : "(?)",
1030 e->class && e->class->name ? e->class->name :
1031 "(?)");
1032 return false;
1033 }
1034 }
1035 return true;
1036 }
1037
1038 #ifdef CONFIG_PROVE_LOCKING
1039 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1040 #endif
1041
1042 static bool check_lock_chain_key(struct lock_chain *chain)
1043 {
1044 #ifdef CONFIG_PROVE_LOCKING
1045 u64 chain_key = INITIAL_CHAIN_KEY;
1046 int i;
1047
1048 for (i = chain->base; i < chain->base + chain->depth; i++)
1049 chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
1050
1051
1052
1053
1054 if (chain->chain_key != chain_key) {
1055 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
1056 (unsigned long long)(chain - lock_chains),
1057 (unsigned long long)chain->chain_key,
1058 (unsigned long long)chain_key);
1059 return false;
1060 }
1061 #endif
1062 return true;
1063 }
1064
1065 static bool in_any_zapped_class_list(struct lock_class *class)
1066 {
1067 struct pending_free *pf;
1068 int i;
1069
1070 for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
1071 if (in_list(&class->lock_entry, &pf->zapped))
1072 return true;
1073 }
1074
1075 return false;
1076 }
1077
1078 static bool __check_data_structures(void)
1079 {
1080 struct lock_class *class;
1081 struct lock_chain *chain;
1082 struct hlist_head *head;
1083 struct lock_list *e;
1084 int i;
1085
1086
1087 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1088 class = &lock_classes[i];
1089 if (!in_list(&class->lock_entry, &all_lock_classes) &&
1090 !in_list(&class->lock_entry, &free_lock_classes) &&
1091 !in_any_zapped_class_list(class)) {
1092 printk(KERN_INFO "class %px/%s is not in any class list\n",
1093 class, class->name ? : "(?)");
1094 return false;
1095 }
1096 }
1097
1098
1099 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1100 class = &lock_classes[i];
1101 if (!class_lock_list_valid(class, &class->locks_before))
1102 return false;
1103 if (!class_lock_list_valid(class, &class->locks_after))
1104 return false;
1105 }
1106
1107
1108 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1109 head = chainhash_table + i;
1110 hlist_for_each_entry_rcu(chain, head, entry) {
1111 if (!check_lock_chain_key(chain))
1112 return false;
1113 }
1114 }
1115
1116
1117
1118
1119
1120 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1121 e = list_entries + i;
1122 if (!in_any_class_list(&e->entry)) {
1123 printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1124 (unsigned int)(e - list_entries),
1125 e->class->name ? : "(?)",
1126 e->links_to->name ? : "(?)");
1127 return false;
1128 }
1129 }
1130
1131
1132
1133
1134
1135 for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1136 e = list_entries + i;
1137 if (in_any_class_list(&e->entry)) {
1138 printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1139 (unsigned int)(e - list_entries),
1140 e->class && e->class->name ? e->class->name :
1141 "(?)",
1142 e->links_to && e->links_to->name ?
1143 e->links_to->name : "(?)");
1144 return false;
1145 }
1146 }
1147
1148 return true;
1149 }
1150
1151 int check_consistency = 0;
1152 module_param(check_consistency, int, 0644);
1153
1154 static void check_data_structures(void)
1155 {
1156 static bool once = false;
1157
1158 if (check_consistency && !once) {
1159 if (!__check_data_structures()) {
1160 once = true;
1161 WARN_ON(once);
1162 }
1163 }
1164 }
1165
1166 #else
1167
1168 static inline void check_data_structures(void) { }
1169
1170 #endif
1171
1172 static void init_chain_block_buckets(void);
1173
1174
1175
1176
1177
1178 static void init_data_structures_once(void)
1179 {
1180 static bool __read_mostly ds_initialized, rcu_head_initialized;
1181 int i;
1182
1183 if (likely(rcu_head_initialized))
1184 return;
1185
1186 if (system_state >= SYSTEM_SCHEDULING) {
1187 init_rcu_head(&delayed_free.rcu_head);
1188 rcu_head_initialized = true;
1189 }
1190
1191 if (ds_initialized)
1192 return;
1193
1194 ds_initialized = true;
1195
1196 INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1197 INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1198
1199 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1200 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1201 INIT_LIST_HEAD(&lock_classes[i].locks_after);
1202 INIT_LIST_HEAD(&lock_classes[i].locks_before);
1203 }
1204 init_chain_block_buckets();
1205 }
1206
1207 static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1208 {
1209 unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1210
1211 return lock_keys_hash + hash;
1212 }
1213
1214
1215 void lockdep_register_key(struct lock_class_key *key)
1216 {
1217 struct hlist_head *hash_head;
1218 struct lock_class_key *k;
1219 unsigned long flags;
1220
1221 if (WARN_ON_ONCE(static_obj(key)))
1222 return;
1223 hash_head = keyhashentry(key);
1224
1225 raw_local_irq_save(flags);
1226 if (!graph_lock())
1227 goto restore_irqs;
1228 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1229 if (WARN_ON_ONCE(k == key))
1230 goto out_unlock;
1231 }
1232 hlist_add_head_rcu(&key->hash_entry, hash_head);
1233 out_unlock:
1234 graph_unlock();
1235 restore_irqs:
1236 raw_local_irq_restore(flags);
1237 }
1238 EXPORT_SYMBOL_GPL(lockdep_register_key);
1239
1240
1241 static bool is_dynamic_key(const struct lock_class_key *key)
1242 {
1243 struct hlist_head *hash_head;
1244 struct lock_class_key *k;
1245 bool found = false;
1246
1247 if (WARN_ON_ONCE(static_obj(key)))
1248 return false;
1249
1250
1251
1252
1253
1254
1255 if (!debug_locks)
1256 return true;
1257
1258 hash_head = keyhashentry(key);
1259
1260 rcu_read_lock();
1261 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1262 if (k == key) {
1263 found = true;
1264 break;
1265 }
1266 }
1267 rcu_read_unlock();
1268
1269 return found;
1270 }
1271
1272
1273
1274
1275
1276
1277 static struct lock_class *
1278 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1279 {
1280 struct lockdep_subclass_key *key;
1281 struct hlist_head *hash_head;
1282 struct lock_class *class;
1283 int idx;
1284
1285 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1286
1287 class = look_up_lock_class(lock, subclass);
1288 if (likely(class))
1289 goto out_set_class_cache;
1290
1291 if (!lock->key) {
1292 if (!assign_lock_key(lock))
1293 return NULL;
1294 } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1295 return NULL;
1296 }
1297
1298 key = lock->key->subkeys + subclass;
1299 hash_head = classhashentry(key);
1300
1301 if (!graph_lock()) {
1302 return NULL;
1303 }
1304
1305
1306
1307
1308 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1309 if (class->key == key)
1310 goto out_unlock_set;
1311 }
1312
1313 init_data_structures_once();
1314
1315
1316 class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1317 lock_entry);
1318 if (!class) {
1319 if (!debug_locks_off_graph_unlock()) {
1320 return NULL;
1321 }
1322
1323 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1324 dump_stack();
1325 return NULL;
1326 }
1327 nr_lock_classes++;
1328 __set_bit(class - lock_classes, lock_classes_in_use);
1329 debug_atomic_inc(nr_unused_locks);
1330 class->key = key;
1331 class->name = lock->name;
1332 class->subclass = subclass;
1333 WARN_ON_ONCE(!list_empty(&class->locks_before));
1334 WARN_ON_ONCE(!list_empty(&class->locks_after));
1335 class->name_version = count_matching_names(class);
1336 class->wait_type_inner = lock->wait_type_inner;
1337 class->wait_type_outer = lock->wait_type_outer;
1338 class->lock_type = lock->lock_type;
1339
1340
1341
1342
1343 hlist_add_head_rcu(&class->hash_entry, hash_head);
1344
1345
1346
1347
1348 list_move_tail(&class->lock_entry, &all_lock_classes);
1349 idx = class - lock_classes;
1350 if (idx > max_lock_class_idx)
1351 max_lock_class_idx = idx;
1352
1353 if (verbose(class)) {
1354 graph_unlock();
1355
1356 printk("\nnew class %px: %s", class->key, class->name);
1357 if (class->name_version > 1)
1358 printk(KERN_CONT "#%d", class->name_version);
1359 printk(KERN_CONT "\n");
1360 dump_stack();
1361
1362 if (!graph_lock()) {
1363 return NULL;
1364 }
1365 }
1366 out_unlock_set:
1367 graph_unlock();
1368
1369 out_set_class_cache:
1370 if (!subclass || force)
1371 lock->class_cache[0] = class;
1372 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1373 lock->class_cache[subclass] = class;
1374
1375
1376
1377
1378
1379 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1380 return NULL;
1381
1382 return class;
1383 }
1384
1385 #ifdef CONFIG_PROVE_LOCKING
1386
1387
1388
1389
1390 static struct lock_list *alloc_list_entry(void)
1391 {
1392 int idx = find_first_zero_bit(list_entries_in_use,
1393 ARRAY_SIZE(list_entries));
1394
1395 if (idx >= ARRAY_SIZE(list_entries)) {
1396 if (!debug_locks_off_graph_unlock())
1397 return NULL;
1398
1399 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1400 dump_stack();
1401 return NULL;
1402 }
1403 nr_list_entries++;
1404 __set_bit(idx, list_entries_in_use);
1405 return list_entries + idx;
1406 }
1407
1408
1409
1410
1411 static int add_lock_to_list(struct lock_class *this,
1412 struct lock_class *links_to, struct list_head *head,
1413 u16 distance, u8 dep,
1414 const struct lock_trace *trace)
1415 {
1416 struct lock_list *entry;
1417
1418
1419
1420
1421 entry = alloc_list_entry();
1422 if (!entry)
1423 return 0;
1424
1425 entry->class = this;
1426 entry->links_to = links_to;
1427 entry->dep = dep;
1428 entry->distance = distance;
1429 entry->trace = trace;
1430
1431
1432
1433
1434
1435 list_add_tail_rcu(&entry->entry, head);
1436
1437 return 1;
1438 }
1439
1440
1441
1442
1443 #define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
1444 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456 struct circular_queue {
1457 struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
1458 unsigned int front, rear;
1459 };
1460
1461 static struct circular_queue lock_cq;
1462
1463 unsigned int max_bfs_queue_depth;
1464
1465 static unsigned int lockdep_dependency_gen_id;
1466
1467 static inline void __cq_init(struct circular_queue *cq)
1468 {
1469 cq->front = cq->rear = 0;
1470 lockdep_dependency_gen_id++;
1471 }
1472
1473 static inline int __cq_empty(struct circular_queue *cq)
1474 {
1475 return (cq->front == cq->rear);
1476 }
1477
1478 static inline int __cq_full(struct circular_queue *cq)
1479 {
1480 return ((cq->rear + 1) & CQ_MASK) == cq->front;
1481 }
1482
1483 static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
1484 {
1485 if (__cq_full(cq))
1486 return -1;
1487
1488 cq->element[cq->rear] = elem;
1489 cq->rear = (cq->rear + 1) & CQ_MASK;
1490 return 0;
1491 }
1492
1493
1494
1495
1496
1497 static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
1498 {
1499 struct lock_list * lock;
1500
1501 if (__cq_empty(cq))
1502 return NULL;
1503
1504 lock = cq->element[cq->front];
1505 cq->front = (cq->front + 1) & CQ_MASK;
1506
1507 return lock;
1508 }
1509
1510 static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
1511 {
1512 return (cq->rear - cq->front) & CQ_MASK;
1513 }
1514
1515 static inline void mark_lock_accessed(struct lock_list *lock)
1516 {
1517 lock->class->dep_gen_id = lockdep_dependency_gen_id;
1518 }
1519
1520 static inline void visit_lock_entry(struct lock_list *lock,
1521 struct lock_list *parent)
1522 {
1523 lock->parent = parent;
1524 }
1525
1526 static inline unsigned long lock_accessed(struct lock_list *lock)
1527 {
1528 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1529 }
1530
1531 static inline struct lock_list *get_lock_parent(struct lock_list *child)
1532 {
1533 return child->parent;
1534 }
1535
1536 static inline int get_lock_depth(struct lock_list *child)
1537 {
1538 int depth = 0;
1539 struct lock_list *parent;
1540
1541 while ((parent = get_lock_parent(child))) {
1542 child = parent;
1543 depth++;
1544 }
1545 return depth;
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555 static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
1556 {
1557 void *lock_class = lock->class;
1558
1559 return lock_class + offset;
1560 }
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 enum bfs_result {
1578 BFS_EINVALIDNODE = -2,
1579 BFS_EQUEUEFULL = -1,
1580 BFS_RMATCH = 0,
1581 BFS_RNOMATCH = 1,
1582 };
1583
1584
1585
1586
1587 static inline bool bfs_error(enum bfs_result res)
1588 {
1589 return res < 0;
1590 }
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 #define DEP_SR_BIT (0 + (0 << 1))
1608 #define DEP_ER_BIT (1 + (0 << 1))
1609 #define DEP_SN_BIT (0 + (1 << 1))
1610 #define DEP_EN_BIT (1 + (1 << 1))
1611
1612 #define DEP_SR_MASK (1U << (DEP_SR_BIT))
1613 #define DEP_ER_MASK (1U << (DEP_ER_BIT))
1614 #define DEP_SN_MASK (1U << (DEP_SN_BIT))
1615 #define DEP_EN_MASK (1U << (DEP_EN_BIT))
1616
1617 static inline unsigned int
1618 __calc_dep_bit(struct held_lock *prev, struct held_lock *next)
1619 {
1620 return (prev->read == 0) + ((next->read != 2) << 1);
1621 }
1622
1623 static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
1624 {
1625 return 1U << __calc_dep_bit(prev, next);
1626 }
1627
1628
1629
1630
1631
1632 static inline unsigned int
1633 __calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
1634 {
1635 return (next->read != 2) + ((prev->read == 0) << 1);
1636 }
1637
1638 static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
1639 {
1640 return 1U << __calc_dep_bitb(prev, next);
1641 }
1642
1643
1644
1645
1646
1647 static inline void __bfs_init_root(struct lock_list *lock,
1648 struct lock_class *class)
1649 {
1650 lock->class = class;
1651 lock->parent = NULL;
1652 lock->only_xr = 0;
1653 }
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 static inline void bfs_init_root(struct lock_list *lock,
1664 struct held_lock *hlock)
1665 {
1666 __bfs_init_root(lock, hlock_class(hlock));
1667 lock->only_xr = (hlock->read == 2);
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677 static inline void bfs_init_rootb(struct lock_list *lock,
1678 struct held_lock *hlock)
1679 {
1680 __bfs_init_root(lock, hlock_class(hlock));
1681 lock->only_xr = (hlock->read != 0);
1682 }
1683
1684 static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
1685 {
1686 if (!lock || !lock->parent)
1687 return NULL;
1688
1689 return list_next_or_null_rcu(get_dep_list(lock->parent, offset),
1690 &lock->entry, struct lock_list, entry);
1691 }
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 static enum bfs_result __bfs(struct lock_list *source_entry,
1721 void *data,
1722 bool (*match)(struct lock_list *entry, void *data),
1723 bool (*skip)(struct lock_list *entry, void *data),
1724 struct lock_list **target_entry,
1725 int offset)
1726 {
1727 struct circular_queue *cq = &lock_cq;
1728 struct lock_list *lock = NULL;
1729 struct lock_list *entry;
1730 struct list_head *head;
1731 unsigned int cq_depth;
1732 bool first;
1733
1734 lockdep_assert_locked();
1735
1736 __cq_init(cq);
1737 __cq_enqueue(cq, source_entry);
1738
1739 while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) {
1740 if (!lock->class)
1741 return BFS_EINVALIDNODE;
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 if (lock_accessed(lock))
1753 continue;
1754 else
1755 mark_lock_accessed(lock);
1756
1757
1758
1759
1760
1761 if (lock->parent) {
1762 u8 dep = lock->dep;
1763 bool prev_only_xr = lock->parent->only_xr;
1764
1765
1766
1767
1768
1769
1770 if (prev_only_xr)
1771 dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
1772
1773
1774 if (!dep)
1775 continue;
1776
1777
1778 lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
1779 }
1780
1781
1782
1783
1784
1785
1786
1787 if (skip && skip(lock, data))
1788 continue;
1789
1790 if (match(lock, data)) {
1791 *target_entry = lock;
1792 return BFS_RMATCH;
1793 }
1794
1795
1796
1797
1798
1799
1800 first = true;
1801 head = get_dep_list(lock, offset);
1802 list_for_each_entry_rcu(entry, head, entry) {
1803 visit_lock_entry(entry, lock);
1804
1805
1806
1807
1808
1809
1810
1811 if (!first)
1812 continue;
1813
1814 first = false;
1815
1816 if (__cq_enqueue(cq, entry))
1817 return BFS_EQUEUEFULL;
1818
1819 cq_depth = __cq_get_elem_count(cq);
1820 if (max_bfs_queue_depth < cq_depth)
1821 max_bfs_queue_depth = cq_depth;
1822 }
1823 }
1824
1825 return BFS_RNOMATCH;
1826 }
1827
1828 static inline enum bfs_result
1829 __bfs_forwards(struct lock_list *src_entry,
1830 void *data,
1831 bool (*match)(struct lock_list *entry, void *data),
1832 bool (*skip)(struct lock_list *entry, void *data),
1833 struct lock_list **target_entry)
1834 {
1835 return __bfs(src_entry, data, match, skip, target_entry,
1836 offsetof(struct lock_class, locks_after));
1837
1838 }
1839
1840 static inline enum bfs_result
1841 __bfs_backwards(struct lock_list *src_entry,
1842 void *data,
1843 bool (*match)(struct lock_list *entry, void *data),
1844 bool (*skip)(struct lock_list *entry, void *data),
1845 struct lock_list **target_entry)
1846 {
1847 return __bfs(src_entry, data, match, skip, target_entry,
1848 offsetof(struct lock_class, locks_before));
1849
1850 }
1851
1852 static void print_lock_trace(const struct lock_trace *trace,
1853 unsigned int spaces)
1854 {
1855 stack_trace_print(trace->entries, trace->nr_entries, spaces);
1856 }
1857
1858
1859
1860
1861
1862 static noinline void
1863 print_circular_bug_entry(struct lock_list *target, int depth)
1864 {
1865 if (debug_locks_silent)
1866 return;
1867 printk("\n-> #%u", depth);
1868 print_lock_name(target->class);
1869 printk(KERN_CONT ":\n");
1870 print_lock_trace(target->trace, 6);
1871 }
1872
1873 static void
1874 print_circular_lock_scenario(struct held_lock *src,
1875 struct held_lock *tgt,
1876 struct lock_list *prt)
1877 {
1878 struct lock_class *source = hlock_class(src);
1879 struct lock_class *target = hlock_class(tgt);
1880 struct lock_class *parent = prt->class;
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 if (parent != source) {
1896 printk("Chain exists of:\n ");
1897 __print_lock_name(source);
1898 printk(KERN_CONT " --> ");
1899 __print_lock_name(parent);
1900 printk(KERN_CONT " --> ");
1901 __print_lock_name(target);
1902 printk(KERN_CONT "\n\n");
1903 }
1904
1905 printk(" Possible unsafe locking scenario:\n\n");
1906 printk(" CPU0 CPU1\n");
1907 printk(" ---- ----\n");
1908 printk(" lock(");
1909 __print_lock_name(target);
1910 printk(KERN_CONT ");\n");
1911 printk(" lock(");
1912 __print_lock_name(parent);
1913 printk(KERN_CONT ");\n");
1914 printk(" lock(");
1915 __print_lock_name(target);
1916 printk(KERN_CONT ");\n");
1917 printk(" lock(");
1918 __print_lock_name(source);
1919 printk(KERN_CONT ");\n");
1920 printk("\n *** DEADLOCK ***\n\n");
1921 }
1922
1923
1924
1925
1926
1927 static noinline void
1928 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1929 struct held_lock *check_src,
1930 struct held_lock *check_tgt)
1931 {
1932 struct task_struct *curr = current;
1933
1934 if (debug_locks_silent)
1935 return;
1936
1937 pr_warn("\n");
1938 pr_warn("======================================================\n");
1939 pr_warn("WARNING: possible circular locking dependency detected\n");
1940 print_kernel_ident();
1941 pr_warn("------------------------------------------------------\n");
1942 pr_warn("%s/%d is trying to acquire lock:\n",
1943 curr->comm, task_pid_nr(curr));
1944 print_lock(check_src);
1945
1946 pr_warn("\nbut task is already holding lock:\n");
1947
1948 print_lock(check_tgt);
1949 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1950 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1951
1952 print_circular_bug_entry(entry, depth);
1953 }
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981 static inline bool hlock_equal(struct lock_list *entry, void *data)
1982 {
1983 struct held_lock *hlock = (struct held_lock *)data;
1984
1985 return hlock_class(hlock) == entry->class &&
1986 (hlock->read == 2 ||
1987 !entry->only_xr);
1988 }
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008 static inline bool hlock_conflict(struct lock_list *entry, void *data)
2009 {
2010 struct held_lock *hlock = (struct held_lock *)data;
2011
2012 return hlock_class(hlock) == entry->class &&
2013 (hlock->read == 0 ||
2014 !entry->only_xr);
2015 }
2016
2017 static noinline void print_circular_bug(struct lock_list *this,
2018 struct lock_list *target,
2019 struct held_lock *check_src,
2020 struct held_lock *check_tgt)
2021 {
2022 struct task_struct *curr = current;
2023 struct lock_list *parent;
2024 struct lock_list *first_parent;
2025 int depth;
2026
2027 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2028 return;
2029
2030 this->trace = save_trace();
2031 if (!this->trace)
2032 return;
2033
2034 depth = get_lock_depth(target);
2035
2036 print_circular_bug_header(target, depth, check_src, check_tgt);
2037
2038 parent = get_lock_parent(target);
2039 first_parent = parent;
2040
2041 while (parent) {
2042 print_circular_bug_entry(parent, --depth);
2043 parent = get_lock_parent(parent);
2044 }
2045
2046 printk("\nother info that might help us debug this:\n\n");
2047 print_circular_lock_scenario(check_src, check_tgt,
2048 first_parent);
2049
2050 lockdep_print_held_locks(curr);
2051
2052 printk("\nstack backtrace:\n");
2053 dump_stack();
2054 }
2055
2056 static noinline void print_bfs_bug(int ret)
2057 {
2058 if (!debug_locks_off_graph_unlock())
2059 return;
2060
2061
2062
2063
2064 WARN(1, "lockdep bfs error:%d\n", ret);
2065 }
2066
2067 static bool noop_count(struct lock_list *entry, void *data)
2068 {
2069 (*(unsigned long *)data)++;
2070 return false;
2071 }
2072
2073 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
2074 {
2075 unsigned long count = 0;
2076 struct lock_list *target_entry;
2077
2078 __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
2079
2080 return count;
2081 }
2082 unsigned long lockdep_count_forward_deps(struct lock_class *class)
2083 {
2084 unsigned long ret, flags;
2085 struct lock_list this;
2086
2087 __bfs_init_root(&this, class);
2088
2089 raw_local_irq_save(flags);
2090 lockdep_lock();
2091 ret = __lockdep_count_forward_deps(&this);
2092 lockdep_unlock();
2093 raw_local_irq_restore(flags);
2094
2095 return ret;
2096 }
2097
2098 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
2099 {
2100 unsigned long count = 0;
2101 struct lock_list *target_entry;
2102
2103 __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
2104
2105 return count;
2106 }
2107
2108 unsigned long lockdep_count_backward_deps(struct lock_class *class)
2109 {
2110 unsigned long ret, flags;
2111 struct lock_list this;
2112
2113 __bfs_init_root(&this, class);
2114
2115 raw_local_irq_save(flags);
2116 lockdep_lock();
2117 ret = __lockdep_count_backward_deps(&this);
2118 lockdep_unlock();
2119 raw_local_irq_restore(flags);
2120
2121 return ret;
2122 }
2123
2124
2125
2126
2127
2128 static noinline enum bfs_result
2129 check_path(struct held_lock *target, struct lock_list *src_entry,
2130 bool (*match)(struct lock_list *entry, void *data),
2131 bool (*skip)(struct lock_list *entry, void *data),
2132 struct lock_list **target_entry)
2133 {
2134 enum bfs_result ret;
2135
2136 ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
2137
2138 if (unlikely(bfs_error(ret)))
2139 print_bfs_bug(ret);
2140
2141 return ret;
2142 }
2143
2144
2145
2146
2147
2148
2149
2150
2151 static noinline enum bfs_result
2152 check_noncircular(struct held_lock *src, struct held_lock *target,
2153 struct lock_trace **const trace)
2154 {
2155 enum bfs_result ret;
2156 struct lock_list *target_entry;
2157 struct lock_list src_entry;
2158
2159 bfs_init_root(&src_entry, src);
2160
2161 debug_atomic_inc(nr_cyclic_checks);
2162
2163 ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
2164
2165 if (unlikely(ret == BFS_RMATCH)) {
2166 if (!*trace) {
2167
2168
2169
2170
2171
2172 *trace = save_trace();
2173 }
2174
2175 print_circular_bug(&src_entry, target_entry, src, target);
2176 }
2177
2178 return ret;
2179 }
2180
2181 #ifdef CONFIG_TRACE_IRQFLAGS
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224 static inline bool usage_accumulate(struct lock_list *entry, void *mask)
2225 {
2226 if (!entry->only_xr)
2227 *(unsigned long *)mask |= entry->class->usage_mask;
2228 else
2229 *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ);
2230
2231 return false;
2232 }
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243 static inline bool usage_match(struct lock_list *entry, void *mask)
2244 {
2245 if (!entry->only_xr)
2246 return !!(entry->class->usage_mask & *(unsigned long *)mask);
2247 else
2248 return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
2249 }
2250
2251 static inline bool usage_skip(struct lock_list *entry, void *mask)
2252 {
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279 if (entry->class->lock_type == LD_LOCK_PERCPU) {
2280 if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
2281 return false;
2282
2283 return true;
2284 }
2285
2286 return false;
2287 }
2288
2289
2290
2291
2292
2293
2294
2295
2296 static enum bfs_result
2297 find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
2298 struct lock_list **target_entry)
2299 {
2300 enum bfs_result result;
2301
2302 debug_atomic_inc(nr_find_usage_forwards_checks);
2303
2304 result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2305
2306 return result;
2307 }
2308
2309
2310
2311
2312
2313 static enum bfs_result
2314 find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
2315 struct lock_list **target_entry)
2316 {
2317 enum bfs_result result;
2318
2319 debug_atomic_inc(nr_find_usage_backwards_checks);
2320
2321 result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2322
2323 return result;
2324 }
2325
2326 static void print_lock_class_header(struct lock_class *class, int depth)
2327 {
2328 int bit;
2329
2330 printk("%*s->", depth, "");
2331 print_lock_name(class);
2332 #ifdef CONFIG_DEBUG_LOCKDEP
2333 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
2334 #endif
2335 printk(KERN_CONT " {\n");
2336
2337 for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
2338 if (class->usage_mask & (1 << bit)) {
2339 int len = depth;
2340
2341 len += printk("%*s %s", depth, "", usage_str[bit]);
2342 len += printk(KERN_CONT " at:\n");
2343 print_lock_trace(class->usage_traces[bit], len);
2344 }
2345 }
2346 printk("%*s }\n", depth, "");
2347
2348 printk("%*s ... key at: [<%px>] %pS\n",
2349 depth, "", class->key, class->key);
2350 }
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404 static void __used
2405 print_shortest_lock_dependencies(struct lock_list *leaf,
2406 struct lock_list *root)
2407 {
2408 struct lock_list *entry = leaf;
2409 int depth;
2410
2411
2412 depth = get_lock_depth(leaf);
2413
2414 do {
2415 print_lock_class_header(entry->class, depth);
2416 printk("%*s ... acquired at:\n", depth, "");
2417 print_lock_trace(entry->trace, 2);
2418 printk("\n");
2419
2420 if (depth == 0 && (entry != root)) {
2421 printk("lockdep:%s bad path found in chain graph\n", __func__);
2422 break;
2423 }
2424
2425 entry = get_lock_parent(entry);
2426 depth--;
2427 } while (entry && (depth >= 0));
2428 }
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450 static void __used
2451 print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
2452 struct lock_list *root)
2453 {
2454 struct lock_list *entry = leaf;
2455 const struct lock_trace *trace = NULL;
2456 int depth;
2457
2458
2459 depth = get_lock_depth(leaf);
2460
2461 do {
2462 print_lock_class_header(entry->class, depth);
2463 if (trace) {
2464 printk("%*s ... acquired at:\n", depth, "");
2465 print_lock_trace(trace, 2);
2466 printk("\n");
2467 }
2468
2469
2470
2471
2472
2473 trace = entry->trace;
2474
2475 if (depth == 0 && (entry != root)) {
2476 printk("lockdep:%s bad path found in chain graph\n", __func__);
2477 break;
2478 }
2479
2480 entry = get_lock_parent(entry);
2481 depth--;
2482 } while (entry && (depth >= 0));
2483 }
2484
2485 static void
2486 print_irq_lock_scenario(struct lock_list *safe_entry,
2487 struct lock_list *unsafe_entry,
2488 struct lock_class *prev_class,
2489 struct lock_class *next_class)
2490 {
2491 struct lock_class *safe_class = safe_entry->class;
2492 struct lock_class *unsafe_class = unsafe_entry->class;
2493 struct lock_class *middle_class = prev_class;
2494
2495 if (middle_class == safe_class)
2496 middle_class = next_class;
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511 if (middle_class != unsafe_class) {
2512 printk("Chain exists of:\n ");
2513 __print_lock_name(safe_class);
2514 printk(KERN_CONT " --> ");
2515 __print_lock_name(middle_class);
2516 printk(KERN_CONT " --> ");
2517 __print_lock_name(unsafe_class);
2518 printk(KERN_CONT "\n\n");
2519 }
2520
2521 printk(" Possible interrupt unsafe locking scenario:\n\n");
2522 printk(" CPU0 CPU1\n");
2523 printk(" ---- ----\n");
2524 printk(" lock(");
2525 __print_lock_name(unsafe_class);
2526 printk(KERN_CONT ");\n");
2527 printk(" local_irq_disable();\n");
2528 printk(" lock(");
2529 __print_lock_name(safe_class);
2530 printk(KERN_CONT ");\n");
2531 printk(" lock(");
2532 __print_lock_name(middle_class);
2533 printk(KERN_CONT ");\n");
2534 printk(" <Interrupt>\n");
2535 printk(" lock(");
2536 __print_lock_name(safe_class);
2537 printk(KERN_CONT ");\n");
2538 printk("\n *** DEADLOCK ***\n\n");
2539 }
2540
2541 static void
2542 print_bad_irq_dependency(struct task_struct *curr,
2543 struct lock_list *prev_root,
2544 struct lock_list *next_root,
2545 struct lock_list *backwards_entry,
2546 struct lock_list *forwards_entry,
2547 struct held_lock *prev,
2548 struct held_lock *next,
2549 enum lock_usage_bit bit1,
2550 enum lock_usage_bit bit2,
2551 const char *irqclass)
2552 {
2553 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2554 return;
2555
2556 pr_warn("\n");
2557 pr_warn("=====================================================\n");
2558 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2559 irqclass, irqclass);
2560 print_kernel_ident();
2561 pr_warn("-----------------------------------------------------\n");
2562 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2563 curr->comm, task_pid_nr(curr),
2564 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
2565 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2566 lockdep_hardirqs_enabled(),
2567 curr->softirqs_enabled);
2568 print_lock(next);
2569
2570 pr_warn("\nand this task is already holding:\n");
2571 print_lock(prev);
2572 pr_warn("which would create a new lock dependency:\n");
2573 print_lock_name(hlock_class(prev));
2574 pr_cont(" ->");
2575 print_lock_name(hlock_class(next));
2576 pr_cont("\n");
2577
2578 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2579 irqclass);
2580 print_lock_name(backwards_entry->class);
2581 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
2582
2583 print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
2584
2585 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
2586 print_lock_name(forwards_entry->class);
2587 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2588 pr_warn("...");
2589
2590 print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
2591
2592 pr_warn("\nother info that might help us debug this:\n\n");
2593 print_irq_lock_scenario(backwards_entry, forwards_entry,
2594 hlock_class(prev), hlock_class(next));
2595
2596 lockdep_print_held_locks(curr);
2597
2598 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
2599 print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
2600
2601 pr_warn("\nthe dependencies between the lock to be acquired");
2602 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
2603 next_root->trace = save_trace();
2604 if (!next_root->trace)
2605 return;
2606 print_shortest_lock_dependencies(forwards_entry, next_root);
2607
2608 pr_warn("\nstack backtrace:\n");
2609 dump_stack();
2610 }
2611
2612 static const char *state_names[] = {
2613 #define LOCKDEP_STATE(__STATE) \
2614 __stringify(__STATE),
2615 #include "lockdep_states.h"
2616 #undef LOCKDEP_STATE
2617 };
2618
2619 static const char *state_rnames[] = {
2620 #define LOCKDEP_STATE(__STATE) \
2621 __stringify(__STATE)"-READ",
2622 #include "lockdep_states.h"
2623 #undef LOCKDEP_STATE
2624 };
2625
2626 static inline const char *state_name(enum lock_usage_bit bit)
2627 {
2628 if (bit & LOCK_USAGE_READ_MASK)
2629 return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2630 else
2631 return state_names[bit >> LOCK_USAGE_DIR_MASK];
2632 }
2633
2634
2635
2636
2637
2638
2639
2640
2641 static int exclusive_bit(int new_bit)
2642 {
2643 int state = new_bit & LOCK_USAGE_STATE_MASK;
2644 int dir = new_bit & LOCK_USAGE_DIR_MASK;
2645
2646
2647
2648
2649 return state | (dir ^ LOCK_USAGE_DIR_MASK);
2650 }
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666 static unsigned long invert_dir_mask(unsigned long mask)
2667 {
2668 unsigned long excl = 0;
2669
2670
2671 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2672 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
2673
2674 return excl;
2675 }
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705 static unsigned long exclusive_mask(unsigned long mask)
2706 {
2707 unsigned long excl = invert_dir_mask(mask);
2708
2709 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2710 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2711
2712 return excl;
2713 }
2714
2715
2716
2717
2718
2719
2720
2721
2722 static unsigned long original_mask(unsigned long mask)
2723 {
2724 unsigned long excl = invert_dir_mask(mask);
2725
2726
2727 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2728 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2729
2730 return excl;
2731 }
2732
2733
2734
2735
2736
2737 static int find_exclusive_match(unsigned long mask,
2738 unsigned long excl_mask,
2739 enum lock_usage_bit *bitp,
2740 enum lock_usage_bit *excl_bitp)
2741 {
2742 int bit, excl, excl_read;
2743
2744 for_each_set_bit(bit, &mask, LOCK_USED) {
2745
2746
2747
2748
2749
2750 excl = exclusive_bit(bit);
2751 excl_read = excl | LOCK_USAGE_READ_MASK;
2752 if (excl_mask & lock_flag(excl)) {
2753 *bitp = bit;
2754 *excl_bitp = excl;
2755 return 0;
2756 } else if (excl_mask & lock_flag(excl_read)) {
2757 *bitp = bit;
2758 *excl_bitp = excl_read;
2759 return 0;
2760 }
2761 }
2762 return -1;
2763 }
2764
2765
2766
2767
2768
2769
2770
2771 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2772 struct held_lock *next)
2773 {
2774 unsigned long usage_mask = 0, forward_mask, backward_mask;
2775 enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2776 struct lock_list *target_entry1;
2777 struct lock_list *target_entry;
2778 struct lock_list this, that;
2779 enum bfs_result ret;
2780
2781
2782
2783
2784
2785 bfs_init_rootb(&this, prev);
2786
2787 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
2788 if (bfs_error(ret)) {
2789 print_bfs_bug(ret);
2790 return 0;
2791 }
2792
2793 usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2794 if (!usage_mask)
2795 return 1;
2796
2797
2798
2799
2800
2801 forward_mask = exclusive_mask(usage_mask);
2802
2803 bfs_init_root(&that, next);
2804
2805 ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2806 if (bfs_error(ret)) {
2807 print_bfs_bug(ret);
2808 return 0;
2809 }
2810 if (ret == BFS_RNOMATCH)
2811 return 1;
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828 backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
2829
2830 ret = find_usage_backwards(&this, backward_mask, &target_entry);
2831 if (bfs_error(ret)) {
2832 print_bfs_bug(ret);
2833 return 0;
2834 }
2835 if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
2836 return 1;
2837
2838
2839
2840
2841
2842 ret = find_exclusive_match(target_entry->class->usage_mask,
2843 target_entry1->class->usage_mask,
2844 &backward_bit, &forward_bit);
2845 if (DEBUG_LOCKS_WARN_ON(ret == -1))
2846 return 1;
2847
2848 print_bad_irq_dependency(curr, &this, &that,
2849 target_entry, target_entry1,
2850 prev, next,
2851 backward_bit, forward_bit,
2852 state_name(backward_bit));
2853
2854 return 0;
2855 }
2856
2857 #else
2858
2859 static inline int check_irq_usage(struct task_struct *curr,
2860 struct held_lock *prev, struct held_lock *next)
2861 {
2862 return 1;
2863 }
2864
2865 static inline bool usage_skip(struct lock_list *entry, void *mask)
2866 {
2867 return false;
2868 }
2869
2870 #endif
2871
2872 #ifdef CONFIG_LOCKDEP_SMALL
2873
2874
2875
2876
2877
2878
2879
2880
2881 static noinline enum bfs_result
2882 check_redundant(struct held_lock *src, struct held_lock *target)
2883 {
2884 enum bfs_result ret;
2885 struct lock_list *target_entry;
2886 struct lock_list src_entry;
2887
2888 bfs_init_root(&src_entry, src);
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899 src_entry.only_xr = src->read == 0;
2900
2901 debug_atomic_inc(nr_redundant_checks);
2902
2903
2904
2905
2906
2907
2908 ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
2909
2910 if (ret == BFS_RMATCH)
2911 debug_atomic_inc(nr_redundant);
2912
2913 return ret;
2914 }
2915
2916 #else
2917
2918 static inline enum bfs_result
2919 check_redundant(struct held_lock *src, struct held_lock *target)
2920 {
2921 return BFS_RNOMATCH;
2922 }
2923
2924 #endif
2925
2926 static void inc_chains(int irq_context)
2927 {
2928 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2929 nr_hardirq_chains++;
2930 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2931 nr_softirq_chains++;
2932 else
2933 nr_process_chains++;
2934 }
2935
2936 static void dec_chains(int irq_context)
2937 {
2938 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2939 nr_hardirq_chains--;
2940 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2941 nr_softirq_chains--;
2942 else
2943 nr_process_chains--;
2944 }
2945
2946 static void
2947 print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
2948 {
2949 struct lock_class *next = hlock_class(nxt);
2950 struct lock_class *prev = hlock_class(prv);
2951
2952 printk(" Possible unsafe locking scenario:\n\n");
2953 printk(" CPU0\n");
2954 printk(" ----\n");
2955 printk(" lock(");
2956 __print_lock_name(prev);
2957 printk(KERN_CONT ");\n");
2958 printk(" lock(");
2959 __print_lock_name(next);
2960 printk(KERN_CONT ");\n");
2961 printk("\n *** DEADLOCK ***\n\n");
2962 printk(" May be due to missing lock nesting notation\n\n");
2963 }
2964
2965 static void
2966 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2967 struct held_lock *next)
2968 {
2969 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2970 return;
2971
2972 pr_warn("\n");
2973 pr_warn("============================================\n");
2974 pr_warn("WARNING: possible recursive locking detected\n");
2975 print_kernel_ident();
2976 pr_warn("--------------------------------------------\n");
2977 pr_warn("%s/%d is trying to acquire lock:\n",
2978 curr->comm, task_pid_nr(curr));
2979 print_lock(next);
2980 pr_warn("\nbut task is already holding lock:\n");
2981 print_lock(prev);
2982
2983 pr_warn("\nother info that might help us debug this:\n");
2984 print_deadlock_scenario(next, prev);
2985 lockdep_print_held_locks(curr);
2986
2987 pr_warn("\nstack backtrace:\n");
2988 dump_stack();
2989 }
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001 static int
3002 check_deadlock(struct task_struct *curr, struct held_lock *next)
3003 {
3004 struct held_lock *prev;
3005 struct held_lock *nest = NULL;
3006 int i;
3007
3008 for (i = 0; i < curr->lockdep_depth; i++) {
3009 prev = curr->held_locks + i;
3010
3011 if (prev->instance == next->nest_lock)
3012 nest = prev;
3013
3014 if (hlock_class(prev) != hlock_class(next))
3015 continue;
3016
3017
3018
3019
3020
3021 if ((next->read == 2) && prev->read)
3022 continue;
3023
3024
3025
3026
3027
3028 if (nest)
3029 return 2;
3030
3031 print_deadlock_bug(curr, prev, next);
3032 return 0;
3033 }
3034 return 1;
3035 }
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059 static int
3060 check_prev_add(struct task_struct *curr, struct held_lock *prev,
3061 struct held_lock *next, u16 distance,
3062 struct lock_trace **const trace)
3063 {
3064 struct lock_list *entry;
3065 enum bfs_result ret;
3066
3067 if (!hlock_class(prev)->key || !hlock_class(next)->key) {
3068
3069
3070
3071
3072
3073
3074 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
3075 "Detected use-after-free of lock class %px/%s\n",
3076 hlock_class(prev),
3077 hlock_class(prev)->name);
3078 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
3079 "Detected use-after-free of lock class %px/%s\n",
3080 hlock_class(next),
3081 hlock_class(next)->name);
3082 return 2;
3083 }
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095 ret = check_noncircular(next, prev, trace);
3096 if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
3097 return 0;
3098
3099 if (!check_irq_usage(curr, prev, next))
3100 return 0;
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
3111 if (entry->class == hlock_class(next)) {
3112 if (distance == 1)
3113 entry->distance = 1;
3114 entry->dep |= calc_dep(prev, next);
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132 list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) {
3133 if (entry->class == hlock_class(prev)) {
3134 if (distance == 1)
3135 entry->distance = 1;
3136 entry->dep |= calc_depb(prev, next);
3137 return 1;
3138 }
3139 }
3140
3141
3142 return 0;
3143 }
3144 }
3145
3146
3147
3148
3149 ret = check_redundant(prev, next);
3150 if (bfs_error(ret))
3151 return 0;
3152 else if (ret == BFS_RMATCH)
3153 return 2;
3154
3155 if (!*trace) {
3156 *trace = save_trace();
3157 if (!*trace)
3158 return 0;
3159 }
3160
3161
3162
3163
3164
3165 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
3166 &hlock_class(prev)->locks_after, distance,
3167 calc_dep(prev, next), *trace);
3168
3169 if (!ret)
3170 return 0;
3171
3172 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
3173 &hlock_class(next)->locks_before, distance,
3174 calc_depb(prev, next), *trace);
3175 if (!ret)
3176 return 0;
3177
3178 return 2;
3179 }
3180
3181
3182
3183
3184
3185
3186
3187 static int
3188 check_prevs_add(struct task_struct *curr, struct held_lock *next)
3189 {
3190 struct lock_trace *trace = NULL;
3191 int depth = curr->lockdep_depth;
3192 struct held_lock *hlock;
3193
3194
3195
3196
3197
3198
3199 if (!depth)
3200 goto out_bug;
3201
3202
3203
3204
3205 if (curr->held_locks[depth].irq_context !=
3206 curr->held_locks[depth-1].irq_context)
3207 goto out_bug;
3208
3209 for (;;) {
3210 u16 distance = curr->lockdep_depth - depth + 1;
3211 hlock = curr->held_locks + depth - 1;
3212
3213 if (hlock->check) {
3214 int ret = check_prev_add(curr, hlock, next, distance, &trace);
3215 if (!ret)
3216 return 0;
3217
3218
3219
3220
3221
3222
3223
3224 if (!hlock->trylock)
3225 break;
3226 }
3227
3228 depth--;
3229
3230
3231
3232 if (!depth)
3233 break;
3234
3235
3236
3237 if (curr->held_locks[depth].irq_context !=
3238 curr->held_locks[depth-1].irq_context)
3239 break;
3240 }
3241 return 1;
3242 out_bug:
3243 if (!debug_locks_off_graph_unlock())
3244 return 0;
3245
3246
3247
3248
3249
3250
3251 WARN_ON(1);
3252
3253 return 0;
3254 }
3255
3256 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
3257 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
3258 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
3259 unsigned long nr_zapped_lock_chains;
3260 unsigned int nr_free_chain_hlocks;
3261 unsigned int nr_lost_chain_hlocks;
3262 unsigned int nr_large_chain_blocks;
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281 #define MAX_CHAIN_BUCKETS 16
3282 #define CHAIN_BLK_FLAG (1U << 15)
3283 #define CHAIN_BLK_LIST_END 0xFFFFU
3284
3285 static int chain_block_buckets[MAX_CHAIN_BUCKETS];
3286
3287 static inline int size_to_bucket(int size)
3288 {
3289 if (size > MAX_CHAIN_BUCKETS)
3290 return 0;
3291
3292 return size - 1;
3293 }
3294
3295
3296
3297
3298 #define for_each_chain_block(bucket, prev, curr) \
3299 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3300 (curr) >= 0; \
3301 (prev) = (curr), (curr) = chain_block_next(curr))
3302
3303
3304
3305
3306 static inline int chain_block_next(int offset)
3307 {
3308 int next = chain_hlocks[offset];
3309
3310 WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
3311
3312 if (next == CHAIN_BLK_LIST_END)
3313 return -1;
3314
3315 next &= ~CHAIN_BLK_FLAG;
3316 next <<= 16;
3317 next |= chain_hlocks[offset + 1];
3318
3319 return next;
3320 }
3321
3322
3323
3324
3325 static inline int chain_block_size(int offset)
3326 {
3327 return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
3328 }
3329
3330 static inline void init_chain_block(int offset, int next, int bucket, int size)
3331 {
3332 chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
3333 chain_hlocks[offset + 1] = (u16)next;
3334
3335 if (size && !bucket) {
3336 chain_hlocks[offset + 2] = size >> 16;
3337 chain_hlocks[offset + 3] = (u16)size;
3338 }
3339 }
3340
3341 static inline void add_chain_block(int offset, int size)
3342 {
3343 int bucket = size_to_bucket(size);
3344 int next = chain_block_buckets[bucket];
3345 int prev, curr;
3346
3347 if (unlikely(size < 2)) {
3348
3349
3350
3351
3352
3353
3354
3355 if (size)
3356 nr_lost_chain_hlocks++;
3357 return;
3358 }
3359
3360 nr_free_chain_hlocks += size;
3361 if (!bucket) {
3362 nr_large_chain_blocks++;
3363
3364
3365
3366
3367 for_each_chain_block(0, prev, curr) {
3368 if (size >= chain_block_size(curr))
3369 break;
3370 }
3371 init_chain_block(offset, curr, 0, size);
3372 if (prev < 0)
3373 chain_block_buckets[0] = offset;
3374 else
3375 init_chain_block(prev, offset, 0, 0);
3376 return;
3377 }
3378
3379
3380
3381 init_chain_block(offset, next, bucket, size);
3382 chain_block_buckets[bucket] = offset;
3383 }
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396 static inline void del_chain_block(int bucket, int size, int next)
3397 {
3398 nr_free_chain_hlocks -= size;
3399 chain_block_buckets[bucket] = next;
3400
3401 if (!bucket)
3402 nr_large_chain_blocks--;
3403 }
3404
3405 static void init_chain_block_buckets(void)
3406 {
3407 int i;
3408
3409 for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
3410 chain_block_buckets[i] = -1;
3411
3412 add_chain_block(0, ARRAY_SIZE(chain_hlocks));
3413 }
3414
3415
3416
3417
3418
3419
3420
3421 static int alloc_chain_hlocks(int req)
3422 {
3423 int bucket, curr, size;
3424
3425
3426
3427
3428
3429 BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
3430
3431 init_data_structures_once();
3432
3433 if (nr_free_chain_hlocks < req)
3434 return -1;
3435
3436
3437
3438
3439
3440 req = max(req, 2);
3441 bucket = size_to_bucket(req);
3442 curr = chain_block_buckets[bucket];
3443
3444 if (bucket) {
3445 if (curr >= 0) {
3446 del_chain_block(bucket, req, chain_block_next(curr));
3447 return curr;
3448 }
3449
3450 curr = chain_block_buckets[0];
3451 }
3452
3453
3454
3455
3456
3457 if (curr >= 0) {
3458 size = chain_block_size(curr);
3459 if (likely(size >= req)) {
3460 del_chain_block(0, size, chain_block_next(curr));
3461 add_chain_block(curr + req, size - req);
3462 return curr;
3463 }
3464 }
3465
3466
3467
3468
3469 for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
3470 bucket = size_to_bucket(size);
3471 curr = chain_block_buckets[bucket];
3472 if (curr < 0)
3473 continue;
3474
3475 del_chain_block(bucket, size, chain_block_next(curr));
3476 add_chain_block(curr + req, size - req);
3477 return curr;
3478 }
3479
3480 return -1;
3481 }
3482
3483 static inline void free_chain_hlocks(int base, int size)
3484 {
3485 add_chain_block(base, max(size, 2));
3486 }
3487
3488 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
3489 {
3490 u16 chain_hlock = chain_hlocks[chain->base + i];
3491 unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
3492
3493 return lock_classes + class_idx;
3494 }
3495
3496
3497
3498
3499 static inline int get_first_held_lock(struct task_struct *curr,
3500 struct held_lock *hlock)
3501 {
3502 int i;
3503 struct held_lock *hlock_curr;
3504
3505 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
3506 hlock_curr = curr->held_locks + i;
3507 if (hlock_curr->irq_context != hlock->irq_context)
3508 break;
3509
3510 }
3511
3512 return ++i;
3513 }
3514
3515 #ifdef CONFIG_DEBUG_LOCKDEP
3516
3517
3518
3519 static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key)
3520 {
3521 u64 new_chain_key = iterate_chain_key(chain_key, hlock_id);
3522
3523 printk(" hlock_id:%d -> chain_key:%016Lx",
3524 (unsigned int)hlock_id,
3525 (unsigned long long)new_chain_key);
3526 return new_chain_key;
3527 }
3528
3529 static void
3530 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
3531 {
3532 struct held_lock *hlock;
3533 u64 chain_key = INITIAL_CHAIN_KEY;
3534 int depth = curr->lockdep_depth;
3535 int i = get_first_held_lock(curr, hlock_next);
3536
3537 printk("depth: %u (irq_context %u)\n", depth - i + 1,
3538 hlock_next->irq_context);
3539 for (; i < depth; i++) {
3540 hlock = curr->held_locks + i;
3541 chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
3542
3543 print_lock(hlock);
3544 }
3545
3546 print_chain_key_iteration(hlock_id(hlock_next), chain_key);
3547 print_lock(hlock_next);
3548 }
3549
3550 static void print_chain_keys_chain(struct lock_chain *chain)
3551 {
3552 int i;
3553 u64 chain_key = INITIAL_CHAIN_KEY;
3554 u16 hlock_id;
3555
3556 printk("depth: %u\n", chain->depth);
3557 for (i = 0; i < chain->depth; i++) {
3558 hlock_id = chain_hlocks[chain->base + i];
3559 chain_key = print_chain_key_iteration(hlock_id, chain_key);
3560
3561 print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
3562 printk("\n");
3563 }
3564 }
3565
3566 static void print_collision(struct task_struct *curr,
3567 struct held_lock *hlock_next,
3568 struct lock_chain *chain)
3569 {
3570 pr_warn("\n");
3571 pr_warn("============================\n");
3572 pr_warn("WARNING: chain_key collision\n");
3573 print_kernel_ident();
3574 pr_warn("----------------------------\n");
3575 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
3576 pr_warn("Hash chain already cached but the contents don't match!\n");
3577
3578 pr_warn("Held locks:");
3579 print_chain_keys_held_locks(curr, hlock_next);
3580
3581 pr_warn("Locks in cached chain:");
3582 print_chain_keys_chain(chain);
3583
3584 pr_warn("\nstack backtrace:\n");
3585 dump_stack();
3586 }
3587 #endif
3588
3589
3590
3591
3592
3593
3594
3595 static int check_no_collision(struct task_struct *curr,
3596 struct held_lock *hlock,
3597 struct lock_chain *chain)
3598 {
3599 #ifdef CONFIG_DEBUG_LOCKDEP
3600 int i, j, id;
3601
3602 i = get_first_held_lock(curr, hlock);
3603
3604 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
3605 print_collision(curr, hlock, chain);
3606 return 0;
3607 }
3608
3609 for (j = 0; j < chain->depth - 1; j++, i++) {
3610 id = hlock_id(&curr->held_locks[i]);
3611
3612 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
3613 print_collision(curr, hlock, chain);
3614 return 0;
3615 }
3616 }
3617 #endif
3618 return 1;
3619 }
3620
3621
3622
3623
3624
3625 long lockdep_next_lockchain(long i)
3626 {
3627 i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
3628 return i < ARRAY_SIZE(lock_chains) ? i : -2;
3629 }
3630
3631 unsigned long lock_chain_count(void)
3632 {
3633 return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
3634 }
3635
3636
3637 static struct lock_chain *alloc_lock_chain(void)
3638 {
3639 int idx = find_first_zero_bit(lock_chains_in_use,
3640 ARRAY_SIZE(lock_chains));
3641
3642 if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
3643 return NULL;
3644 __set_bit(idx, lock_chains_in_use);
3645 return lock_chains + idx;
3646 }
3647
3648
3649
3650
3651
3652
3653
3654
3655 static inline int add_chain_cache(struct task_struct *curr,
3656 struct held_lock *hlock,
3657 u64 chain_key)
3658 {
3659 struct hlist_head *hash_head = chainhashentry(chain_key);
3660 struct lock_chain *chain;
3661 int i, j;
3662
3663
3664
3665
3666
3667
3668 if (lockdep_assert_locked())
3669 return 0;
3670
3671 chain = alloc_lock_chain();
3672 if (!chain) {
3673 if (!debug_locks_off_graph_unlock())
3674 return 0;
3675
3676 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
3677 dump_stack();
3678 return 0;
3679 }
3680 chain->chain_key = chain_key;
3681 chain->irq_context = hlock->irq_context;
3682 i = get_first_held_lock(curr, hlock);
3683 chain->depth = curr->lockdep_depth + 1 - i;
3684
3685 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
3686 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
3687 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
3688
3689 j = alloc_chain_hlocks(chain->depth);
3690 if (j < 0) {
3691 if (!debug_locks_off_graph_unlock())
3692 return 0;
3693
3694 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
3695 dump_stack();
3696 return 0;
3697 }
3698
3699 chain->base = j;
3700 for (j = 0; j < chain->depth - 1; j++, i++) {
3701 int lock_id = hlock_id(curr->held_locks + i);
3702
3703 chain_hlocks[chain->base + j] = lock_id;
3704 }
3705 chain_hlocks[chain->base + j] = hlock_id(hlock);
3706 hlist_add_head_rcu(&chain->entry, hash_head);
3707 debug_atomic_inc(chain_lookup_misses);
3708 inc_chains(chain->irq_context);
3709
3710 return 1;
3711 }
3712
3713
3714
3715
3716
3717 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
3718 {
3719 struct hlist_head *hash_head = chainhashentry(chain_key);
3720 struct lock_chain *chain;
3721
3722 hlist_for_each_entry_rcu(chain, hash_head, entry) {
3723 if (READ_ONCE(chain->chain_key) == chain_key) {
3724 debug_atomic_inc(chain_lookup_hits);
3725 return chain;
3726 }
3727 }
3728 return NULL;
3729 }
3730
3731
3732
3733
3734
3735
3736
3737 static inline int lookup_chain_cache_add(struct task_struct *curr,
3738 struct held_lock *hlock,
3739 u64 chain_key)
3740 {
3741 struct lock_class *class = hlock_class(hlock);
3742 struct lock_chain *chain = lookup_chain_cache(chain_key);
3743
3744 if (chain) {
3745 cache_hit:
3746 if (!check_no_collision(curr, hlock, chain))
3747 return 0;
3748
3749 if (very_verbose(class)) {
3750 printk("\nhash chain already cached, key: "
3751 "%016Lx tail class: [%px] %s\n",
3752 (unsigned long long)chain_key,
3753 class->key, class->name);
3754 }
3755
3756 return 0;
3757 }
3758
3759 if (very_verbose(class)) {
3760 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3761 (unsigned long long)chain_key, class->key, class->name);
3762 }
3763
3764 if (!graph_lock())
3765 return 0;
3766
3767
3768
3769
3770 chain = lookup_chain_cache(chain_key);
3771 if (chain) {
3772 graph_unlock();
3773 goto cache_hit;
3774 }
3775
3776 if (!add_chain_cache(curr, hlock, chain_key))
3777 return 0;
3778
3779 return 1;
3780 }
3781
3782 static int validate_chain(struct task_struct *curr,
3783 struct held_lock *hlock,
3784 int chain_head, u64 chain_key)
3785 {
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796 if (!hlock->trylock && hlock->check &&
3797 lookup_chain_cache_add(curr, hlock, chain_key)) {
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816 int ret = check_deadlock(curr, hlock);
3817
3818 if (!ret)
3819 return 0;
3820
3821
3822
3823
3824
3825
3826
3827
3828 if (!chain_head && ret != 2) {
3829 if (!check_prevs_add(curr, hlock))
3830 return 0;
3831 }
3832
3833 graph_unlock();
3834 } else {
3835
3836 if (unlikely(!debug_locks))
3837 return 0;
3838 }
3839
3840 return 1;
3841 }
3842 #else
3843 static inline int validate_chain(struct task_struct *curr,
3844 struct held_lock *hlock,
3845 int chain_head, u64 chain_key)
3846 {
3847 return 1;
3848 }
3849
3850 static void init_chain_block_buckets(void) { }
3851 #endif
3852
3853
3854
3855
3856
3857 static void check_chain_key(struct task_struct *curr)
3858 {
3859 #ifdef CONFIG_DEBUG_LOCKDEP
3860 struct held_lock *hlock, *prev_hlock = NULL;
3861 unsigned int i;
3862 u64 chain_key = INITIAL_CHAIN_KEY;
3863
3864 for (i = 0; i < curr->lockdep_depth; i++) {
3865 hlock = curr->held_locks + i;
3866 if (chain_key != hlock->prev_chain_key) {
3867 debug_locks_off();
3868
3869
3870
3871
3872 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3873 curr->lockdep_depth, i,
3874 (unsigned long long)chain_key,
3875 (unsigned long long)hlock->prev_chain_key);
3876 return;
3877 }
3878
3879
3880
3881
3882
3883 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3884 return;
3885
3886 if (prev_hlock && (prev_hlock->irq_context !=
3887 hlock->irq_context))
3888 chain_key = INITIAL_CHAIN_KEY;
3889 chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
3890 prev_hlock = hlock;
3891 }
3892 if (chain_key != curr->curr_chain_key) {
3893 debug_locks_off();
3894
3895
3896
3897
3898 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3899 curr->lockdep_depth, i,
3900 (unsigned long long)chain_key,
3901 (unsigned long long)curr->curr_chain_key);
3902 }
3903 #endif
3904 }
3905
3906 #ifdef CONFIG_PROVE_LOCKING
3907 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3908 enum lock_usage_bit new_bit);
3909
3910 static void print_usage_bug_scenario(struct held_lock *lock)
3911 {
3912 struct lock_class *class = hlock_class(lock);
3913
3914 printk(" Possible unsafe locking scenario:\n\n");
3915 printk(" CPU0\n");
3916 printk(" ----\n");
3917 printk(" lock(");
3918 __print_lock_name(class);
3919 printk(KERN_CONT ");\n");
3920 printk(" <Interrupt>\n");
3921 printk(" lock(");
3922 __print_lock_name(class);
3923 printk(KERN_CONT ");\n");
3924 printk("\n *** DEADLOCK ***\n\n");
3925 }
3926
3927 static void
3928 print_usage_bug(struct task_struct *curr, struct held_lock *this,
3929 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3930 {
3931 if (!debug_locks_off() || debug_locks_silent)
3932 return;
3933
3934 pr_warn("\n");
3935 pr_warn("================================\n");
3936 pr_warn("WARNING: inconsistent lock state\n");
3937 print_kernel_ident();
3938 pr_warn("--------------------------------\n");
3939
3940 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3941 usage_str[prev_bit], usage_str[new_bit]);
3942
3943 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3944 curr->comm, task_pid_nr(curr),
3945 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3946 lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3947 lockdep_hardirqs_enabled(),
3948 lockdep_softirqs_enabled(curr));
3949 print_lock(this);
3950
3951 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3952 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3953
3954 print_irqtrace_events(curr);
3955 pr_warn("\nother info that might help us debug this:\n");
3956 print_usage_bug_scenario(this);
3957
3958 lockdep_print_held_locks(curr);
3959
3960 pr_warn("\nstack backtrace:\n");
3961 dump_stack();
3962 }
3963
3964
3965
3966
3967 static inline int
3968 valid_state(struct task_struct *curr, struct held_lock *this,
3969 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
3970 {
3971 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3972 graph_unlock();
3973 print_usage_bug(curr, this, bad_bit, new_bit);
3974 return 0;
3975 }
3976 return 1;
3977 }
3978
3979
3980
3981
3982
3983 static void
3984 print_irq_inversion_bug(struct task_struct *curr,
3985 struct lock_list *root, struct lock_list *other,
3986 struct held_lock *this, int forwards,
3987 const char *irqclass)
3988 {
3989 struct lock_list *entry = other;
3990 struct lock_list *middle = NULL;
3991 int depth;
3992
3993 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3994 return;
3995
3996 pr_warn("\n");
3997 pr_warn("========================================================\n");
3998 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3999 print_kernel_ident();
4000 pr_warn("--------------------------------------------------------\n");
4001 pr_warn("%s/%d just changed the state of lock:\n",
4002 curr->comm, task_pid_nr(curr));
4003 print_lock(this);
4004 if (forwards)
4005 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
4006 else
4007 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
4008 print_lock_name(other->class);
4009 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
4010
4011 pr_warn("\nother info that might help us debug this:\n");
4012
4013
4014 depth = get_lock_depth(other);
4015 do {
4016 if (depth == 0 && (entry != root)) {
4017 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
4018 break;
4019 }
4020 middle = entry;
4021 entry = get_lock_parent(entry);
4022 depth--;
4023 } while (entry && entry != root && (depth >= 0));
4024 if (forwards)
4025 print_irq_lock_scenario(root, other,
4026 middle ? middle->class : root->class, other->class);
4027 else
4028 print_irq_lock_scenario(other, root,
4029 middle ? middle->class : other->class, root->class);
4030
4031 lockdep_print_held_locks(curr);
4032
4033 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
4034 root->trace = save_trace();
4035 if (!root->trace)
4036 return;
4037 print_shortest_lock_dependencies(other, root);
4038
4039 pr_warn("\nstack backtrace:\n");
4040 dump_stack();
4041 }
4042
4043
4044
4045
4046
4047 static int
4048 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
4049 enum lock_usage_bit bit)
4050 {
4051 enum bfs_result ret;
4052 struct lock_list root;
4053 struct lock_list *target_entry;
4054 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4055 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4056
4057 bfs_init_root(&root, this);
4058 ret = find_usage_forwards(&root, usage_mask, &target_entry);
4059 if (bfs_error(ret)) {
4060 print_bfs_bug(ret);
4061 return 0;
4062 }
4063 if (ret == BFS_RNOMATCH)
4064 return 1;
4065
4066
4067 if (target_entry->class->usage_mask & lock_flag(bit)) {
4068 print_irq_inversion_bug(curr, &root, target_entry,
4069 this, 1, state_name(bit));
4070 } else {
4071 print_irq_inversion_bug(curr, &root, target_entry,
4072 this, 1, state_name(read_bit));
4073 }
4074
4075 return 0;
4076 }
4077
4078
4079
4080
4081
4082 static int
4083 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
4084 enum lock_usage_bit bit)
4085 {
4086 enum bfs_result ret;
4087 struct lock_list root;
4088 struct lock_list *target_entry;
4089 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
4090 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
4091
4092 bfs_init_rootb(&root, this);
4093 ret = find_usage_backwards(&root, usage_mask, &target_entry);
4094 if (bfs_error(ret)) {
4095 print_bfs_bug(ret);
4096 return 0;
4097 }
4098 if (ret == BFS_RNOMATCH)
4099 return 1;
4100
4101
4102 if (target_entry->class->usage_mask & lock_flag(bit)) {
4103 print_irq_inversion_bug(curr, &root, target_entry,
4104 this, 0, state_name(bit));
4105 } else {
4106 print_irq_inversion_bug(curr, &root, target_entry,
4107 this, 0, state_name(read_bit));
4108 }
4109
4110 return 0;
4111 }
4112
4113 void print_irqtrace_events(struct task_struct *curr)
4114 {
4115 const struct irqtrace_events *trace = &curr->irqtrace;
4116
4117 printk("irq event stamp: %u\n", trace->irq_events);
4118 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
4119 trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
4120 (void *)trace->hardirq_enable_ip);
4121 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
4122 trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
4123 (void *)trace->hardirq_disable_ip);
4124 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
4125 trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
4126 (void *)trace->softirq_enable_ip);
4127 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
4128 trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
4129 (void *)trace->softirq_disable_ip);
4130 }
4131
4132 static int HARDIRQ_verbose(struct lock_class *class)
4133 {
4134 #if HARDIRQ_VERBOSE
4135 return class_filter(class);
4136 #endif
4137 return 0;
4138 }
4139
4140 static int SOFTIRQ_verbose(struct lock_class *class)
4141 {
4142 #if SOFTIRQ_VERBOSE
4143 return class_filter(class);
4144 #endif
4145 return 0;
4146 }
4147
4148 static int (*state_verbose_f[])(struct lock_class *class) = {
4149 #define LOCKDEP_STATE(__STATE) \
4150 __STATE##_verbose,
4151 #include "lockdep_states.h"
4152 #undef LOCKDEP_STATE
4153 };
4154
4155 static inline int state_verbose(enum lock_usage_bit bit,
4156 struct lock_class *class)
4157 {
4158 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
4159 }
4160
4161 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
4162 enum lock_usage_bit bit, const char *name);
4163
4164 static int
4165 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
4166 enum lock_usage_bit new_bit)
4167 {
4168 int excl_bit = exclusive_bit(new_bit);
4169 int read = new_bit & LOCK_USAGE_READ_MASK;
4170 int dir = new_bit & LOCK_USAGE_DIR_MASK;
4171
4172
4173
4174
4175
4176 if (!valid_state(curr, this, new_bit, excl_bit))
4177 return 0;
4178
4179
4180
4181
4182 if (!read && !valid_state(curr, this, new_bit,
4183 excl_bit + LOCK_USAGE_READ_MASK))
4184 return 0;
4185
4186
4187
4188
4189
4190
4191 if (dir) {
4192
4193
4194
4195
4196 if (!check_usage_backwards(curr, this, excl_bit))
4197 return 0;
4198 } else {
4199
4200
4201
4202
4203 if (!check_usage_forwards(curr, this, excl_bit))
4204 return 0;
4205 }
4206
4207 if (state_verbose(new_bit, hlock_class(this)))
4208 return 2;
4209
4210 return 1;
4211 }
4212
4213
4214
4215
4216 static int
4217 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
4218 {
4219 struct held_lock *hlock;
4220 int i;
4221
4222 for (i = 0; i < curr->lockdep_depth; i++) {
4223 enum lock_usage_bit hlock_bit = base_bit;
4224 hlock = curr->held_locks + i;
4225
4226 if (hlock->read)
4227 hlock_bit += LOCK_USAGE_READ_MASK;
4228
4229 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
4230
4231 if (!hlock->check)
4232 continue;
4233
4234 if (!mark_lock(curr, hlock, hlock_bit))
4235 return 0;
4236 }
4237
4238 return 1;
4239 }
4240
4241
4242
4243
4244 static void __trace_hardirqs_on_caller(void)
4245 {
4246 struct task_struct *curr = current;
4247
4248
4249
4250
4251
4252 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
4253 return;
4254
4255
4256
4257
4258
4259 if (curr->softirqs_enabled)
4260 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
4261 }
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271 void lockdep_hardirqs_on_prepare(void)
4272 {
4273 if (unlikely(!debug_locks))
4274 return;
4275
4276
4277
4278
4279 if (unlikely(in_nmi()))
4280 return;
4281
4282 if (unlikely(this_cpu_read(lockdep_recursion)))
4283 return;
4284
4285 if (unlikely(lockdep_hardirqs_enabled())) {
4286
4287
4288
4289
4290
4291 __debug_atomic_inc(redundant_hardirqs_on);
4292 return;
4293 }
4294
4295
4296
4297
4298
4299
4300 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4301 return;
4302
4303
4304
4305
4306 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
4307 return;
4308
4309
4310
4311
4312
4313 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
4314 return;
4315
4316 current->hardirq_chain_key = current->curr_chain_key;
4317
4318 lockdep_recursion_inc();
4319 __trace_hardirqs_on_caller();
4320 lockdep_recursion_finish();
4321 }
4322 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
4323
4324 void noinstr lockdep_hardirqs_on(unsigned long ip)
4325 {
4326 struct irqtrace_events *trace = ¤t->irqtrace;
4327
4328 if (unlikely(!debug_locks))
4329 return;
4330
4331
4332
4333
4334
4335
4336
4337
4338 if (unlikely(in_nmi())) {
4339 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4340 return;
4341
4342
4343
4344
4345
4346
4347
4348 goto skip_checks;
4349 }
4350
4351 if (unlikely(this_cpu_read(lockdep_recursion)))
4352 return;
4353
4354 if (lockdep_hardirqs_enabled()) {
4355
4356
4357
4358
4359
4360 __debug_atomic_inc(redundant_hardirqs_on);
4361 return;
4362 }
4363
4364
4365
4366
4367
4368
4369 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4370 return;
4371
4372
4373
4374
4375
4376 DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
4377 current->curr_chain_key);
4378
4379 skip_checks:
4380
4381 __this_cpu_write(hardirqs_enabled, 1);
4382 trace->hardirq_enable_ip = ip;
4383 trace->hardirq_enable_event = ++trace->irq_events;
4384 debug_atomic_inc(hardirqs_on_events);
4385 }
4386 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
4387
4388
4389
4390
4391 void noinstr lockdep_hardirqs_off(unsigned long ip)
4392 {
4393 if (unlikely(!debug_locks))
4394 return;
4395
4396
4397
4398
4399
4400
4401 if (in_nmi()) {
4402 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4403 return;
4404 } else if (__this_cpu_read(lockdep_recursion))
4405 return;
4406
4407
4408
4409
4410
4411 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4412 return;
4413
4414 if (lockdep_hardirqs_enabled()) {
4415 struct irqtrace_events *trace = ¤t->irqtrace;
4416
4417
4418
4419
4420 __this_cpu_write(hardirqs_enabled, 0);
4421 trace->hardirq_disable_ip = ip;
4422 trace->hardirq_disable_event = ++trace->irq_events;
4423 debug_atomic_inc(hardirqs_off_events);
4424 } else {
4425 debug_atomic_inc(redundant_hardirqs_off);
4426 }
4427 }
4428 EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
4429
4430
4431
4432
4433 void lockdep_softirqs_on(unsigned long ip)
4434 {
4435 struct irqtrace_events *trace = ¤t->irqtrace;
4436
4437 if (unlikely(!lockdep_enabled()))
4438 return;
4439
4440
4441
4442
4443
4444 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4445 return;
4446
4447 if (current->softirqs_enabled) {
4448 debug_atomic_inc(redundant_softirqs_on);
4449 return;
4450 }
4451
4452 lockdep_recursion_inc();
4453
4454
4455
4456 current->softirqs_enabled = 1;
4457 trace->softirq_enable_ip = ip;
4458 trace->softirq_enable_event = ++trace->irq_events;
4459 debug_atomic_inc(softirqs_on_events);
4460
4461
4462
4463
4464
4465 if (lockdep_hardirqs_enabled())
4466 mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
4467 lockdep_recursion_finish();
4468 }
4469
4470
4471
4472
4473 void lockdep_softirqs_off(unsigned long ip)
4474 {
4475 if (unlikely(!lockdep_enabled()))
4476 return;
4477
4478
4479
4480
4481 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4482 return;
4483
4484 if (current->softirqs_enabled) {
4485 struct irqtrace_events *trace = ¤t->irqtrace;
4486
4487
4488
4489
4490 current->softirqs_enabled = 0;
4491 trace->softirq_disable_ip = ip;
4492 trace->softirq_disable_event = ++trace->irq_events;
4493 debug_atomic_inc(softirqs_off_events);
4494
4495
4496
4497 DEBUG_LOCKS_WARN_ON(!softirq_count());
4498 } else
4499 debug_atomic_inc(redundant_softirqs_off);
4500 }
4501
4502 static int
4503 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4504 {
4505 if (!check)
4506 goto lock_used;
4507
4508
4509
4510
4511
4512 if (!hlock->trylock) {
4513 if (hlock->read) {
4514 if (lockdep_hardirq_context())
4515 if (!mark_lock(curr, hlock,
4516 LOCK_USED_IN_HARDIRQ_READ))
4517 return 0;
4518 if (curr->softirq_context)
4519 if (!mark_lock(curr, hlock,
4520 LOCK_USED_IN_SOFTIRQ_READ))
4521 return 0;
4522 } else {
4523 if (lockdep_hardirq_context())
4524 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
4525 return 0;
4526 if (curr->softirq_context)
4527 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
4528 return 0;
4529 }
4530 }
4531 if (!hlock->hardirqs_off) {
4532 if (hlock->read) {
4533 if (!mark_lock(curr, hlock,
4534 LOCK_ENABLED_HARDIRQ_READ))
4535 return 0;
4536 if (curr->softirqs_enabled)
4537 if (!mark_lock(curr, hlock,
4538 LOCK_ENABLED_SOFTIRQ_READ))
4539 return 0;
4540 } else {
4541 if (!mark_lock(curr, hlock,
4542 LOCK_ENABLED_HARDIRQ))
4543 return 0;
4544 if (curr->softirqs_enabled)
4545 if (!mark_lock(curr, hlock,
4546 LOCK_ENABLED_SOFTIRQ))
4547 return 0;
4548 }
4549 }
4550
4551 lock_used:
4552
4553 if (!mark_lock(curr, hlock, LOCK_USED))
4554 return 0;
4555
4556 return 1;
4557 }
4558
4559 static inline unsigned int task_irq_context(struct task_struct *task)
4560 {
4561 return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
4562 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
4563 }
4564
4565 static int separate_irq_context(struct task_struct *curr,
4566 struct held_lock *hlock)
4567 {
4568 unsigned int depth = curr->lockdep_depth;
4569
4570
4571
4572
4573 if (depth) {
4574 struct held_lock *prev_hlock;
4575
4576 prev_hlock = curr->held_locks + depth-1;
4577
4578
4579
4580
4581
4582 if (prev_hlock->irq_context != hlock->irq_context)
4583 return 1;
4584 }
4585 return 0;
4586 }
4587
4588
4589
4590
4591 static int mark_lock(struct task_struct *curr, struct held_lock *this,
4592 enum lock_usage_bit new_bit)
4593 {
4594 unsigned int new_mask, ret = 1;
4595
4596 if (new_bit >= LOCK_USAGE_STATES) {
4597 DEBUG_LOCKS_WARN_ON(1);
4598 return 0;
4599 }
4600
4601 if (new_bit == LOCK_USED && this->read)
4602 new_bit = LOCK_USED_READ;
4603
4604 new_mask = 1 << new_bit;
4605
4606
4607
4608
4609
4610 if (likely(hlock_class(this)->usage_mask & new_mask))
4611 return 1;
4612
4613 if (!graph_lock())
4614 return 0;
4615
4616
4617
4618 if (unlikely(hlock_class(this)->usage_mask & new_mask))
4619 goto unlock;
4620
4621 if (!hlock_class(this)->usage_mask)
4622 debug_atomic_dec(nr_unused_locks);
4623
4624 hlock_class(this)->usage_mask |= new_mask;
4625
4626 if (new_bit < LOCK_TRACE_STATES) {
4627 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
4628 return 0;
4629 }
4630
4631 if (new_bit < LOCK_USED) {
4632 ret = mark_lock_irq(curr, this, new_bit);
4633 if (!ret)
4634 return 0;
4635 }
4636
4637 unlock:
4638 graph_unlock();
4639
4640
4641
4642
4643 if (ret == 2) {
4644 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
4645 print_lock(this);
4646 print_irqtrace_events(curr);
4647 dump_stack();
4648 }
4649
4650 return ret;
4651 }
4652
4653 static inline short task_wait_context(struct task_struct *curr)
4654 {
4655
4656
4657
4658
4659 if (lockdep_hardirq_context()) {
4660
4661
4662
4663 if (curr->hardirq_threaded || curr->irq_config)
4664 return LD_WAIT_CONFIG;
4665
4666 return LD_WAIT_SPIN;
4667 } else if (curr->softirq_context) {
4668
4669
4670
4671 return LD_WAIT_CONFIG;
4672 }
4673
4674 return LD_WAIT_MAX;
4675 }
4676
4677 static int
4678 print_lock_invalid_wait_context(struct task_struct *curr,
4679 struct held_lock *hlock)
4680 {
4681 short curr_inner;
4682
4683 if (!debug_locks_off())
4684 return 0;
4685 if (debug_locks_silent)
4686 return 0;
4687
4688 pr_warn("\n");
4689 pr_warn("=============================\n");
4690 pr_warn("[ BUG: Invalid wait context ]\n");
4691 print_kernel_ident();
4692 pr_warn("-----------------------------\n");
4693
4694 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4695 print_lock(hlock);
4696
4697 pr_warn("other info that might help us debug this:\n");
4698
4699 curr_inner = task_wait_context(curr);
4700 pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
4701
4702 lockdep_print_held_locks(curr);
4703
4704 pr_warn("stack backtrace:\n");
4705 dump_stack();
4706
4707 return 0;
4708 }
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725 static int check_wait_context(struct task_struct *curr, struct held_lock *next)
4726 {
4727 u8 next_inner = hlock_class(next)->wait_type_inner;
4728 u8 next_outer = hlock_class(next)->wait_type_outer;
4729 u8 curr_inner;
4730 int depth;
4731
4732 if (!next_inner || next->trylock)
4733 return 0;
4734
4735 if (!next_outer)
4736 next_outer = next_inner;
4737
4738
4739
4740
4741 for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) {
4742 struct held_lock *prev = curr->held_locks + depth;
4743 if (prev->irq_context != next->irq_context)
4744 break;
4745 }
4746 depth++;
4747
4748 curr_inner = task_wait_context(curr);
4749
4750 for (; depth < curr->lockdep_depth; depth++) {
4751 struct held_lock *prev = curr->held_locks + depth;
4752 u8 prev_inner = hlock_class(prev)->wait_type_inner;
4753
4754 if (prev_inner) {
4755
4756
4757
4758
4759
4760
4761 curr_inner = min(curr_inner, prev_inner);
4762 }
4763 }
4764
4765 if (next_outer > curr_inner)
4766 return print_lock_invalid_wait_context(curr, next);
4767
4768 return 0;
4769 }
4770
4771 #else
4772
4773 static inline int
4774 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4775 {
4776 return 1;
4777 }
4778
4779 static inline unsigned int task_irq_context(struct task_struct *task)
4780 {
4781 return 0;
4782 }
4783
4784 static inline int separate_irq_context(struct task_struct *curr,
4785 struct held_lock *hlock)
4786 {
4787 return 0;
4788 }
4789
4790 static inline int check_wait_context(struct task_struct *curr,
4791 struct held_lock *next)
4792 {
4793 return 0;
4794 }
4795
4796 #endif
4797
4798
4799
4800
4801 void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
4802 struct lock_class_key *key, int subclass,
4803 u8 inner, u8 outer, u8 lock_type)
4804 {
4805 int i;
4806
4807 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
4808 lock->class_cache[i] = NULL;
4809
4810 #ifdef CONFIG_LOCK_STAT
4811 lock->cpu = raw_smp_processor_id();
4812 #endif
4813
4814
4815
4816
4817 if (DEBUG_LOCKS_WARN_ON(!name)) {
4818 lock->name = "NULL";
4819 return;
4820 }
4821
4822 lock->name = name;
4823
4824 lock->wait_type_outer = outer;
4825 lock->wait_type_inner = inner;
4826 lock->lock_type = lock_type;
4827
4828
4829
4830
4831 if (DEBUG_LOCKS_WARN_ON(!key))
4832 return;
4833
4834
4835
4836
4837 if (!static_obj(key) && !is_dynamic_key(key)) {
4838 if (debug_locks)
4839 printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
4840 DEBUG_LOCKS_WARN_ON(1);
4841 return;
4842 }
4843 lock->key = key;
4844
4845 if (unlikely(!debug_locks))
4846 return;
4847
4848 if (subclass) {
4849 unsigned long flags;
4850
4851 if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
4852 return;
4853
4854 raw_local_irq_save(flags);
4855 lockdep_recursion_inc();
4856 register_lock_class(lock, subclass, 1);
4857 lockdep_recursion_finish();
4858 raw_local_irq_restore(flags);
4859 }
4860 }
4861 EXPORT_SYMBOL_GPL(lockdep_init_map_type);
4862
4863 struct lock_class_key __lockdep_no_validate__;
4864 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
4865
4866 static void
4867 print_lock_nested_lock_not_held(struct task_struct *curr,
4868 struct held_lock *hlock)
4869 {
4870 if (!debug_locks_off())
4871 return;
4872 if (debug_locks_silent)
4873 return;
4874
4875 pr_warn("\n");
4876 pr_warn("==================================\n");
4877 pr_warn("WARNING: Nested lock was not taken\n");
4878 print_kernel_ident();
4879 pr_warn("----------------------------------\n");
4880
4881 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4882 print_lock(hlock);
4883
4884 pr_warn("\nbut this task is not holding:\n");
4885 pr_warn("%s\n", hlock->nest_lock->name);
4886
4887 pr_warn("\nstack backtrace:\n");
4888 dump_stack();
4889
4890 pr_warn("\nother info that might help us debug this:\n");
4891 lockdep_print_held_locks(curr);
4892
4893 pr_warn("\nstack backtrace:\n");
4894 dump_stack();
4895 }
4896
4897 static int __lock_is_held(const struct lockdep_map *lock, int read);
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4908 int trylock, int read, int check, int hardirqs_off,
4909 struct lockdep_map *nest_lock, unsigned long ip,
4910 int references, int pin_count)
4911 {
4912 struct task_struct *curr = current;
4913 struct lock_class *class = NULL;
4914 struct held_lock *hlock;
4915 unsigned int depth;
4916 int chain_head = 0;
4917 int class_idx;
4918 u64 chain_key;
4919
4920 if (unlikely(!debug_locks))
4921 return 0;
4922
4923 if (!prove_locking || lock->key == &__lockdep_no_validate__)
4924 check = 0;
4925
4926 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
4927 class = lock->class_cache[subclass];
4928
4929
4930
4931 if (unlikely(!class)) {
4932 class = register_lock_class(lock, subclass, 0);
4933 if (!class)
4934 return 0;
4935 }
4936
4937 debug_class_ops_inc(class);
4938
4939 if (very_verbose(class)) {
4940 printk("\nacquire class [%px] %s", class->key, class->name);
4941 if (class->name_version > 1)
4942 printk(KERN_CONT "#%d", class->name_version);
4943 printk(KERN_CONT "\n");
4944 dump_stack();
4945 }
4946
4947
4948
4949
4950
4951
4952 depth = curr->lockdep_depth;
4953
4954
4955
4956 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
4957 return 0;
4958
4959 class_idx = class - lock_classes;
4960
4961 if (depth) {
4962 hlock = curr->held_locks + depth - 1;
4963 if (hlock->class_idx == class_idx && nest_lock) {
4964 if (!references)
4965 references++;
4966
4967 if (!hlock->references)
4968 hlock->references++;
4969
4970 hlock->references += references;
4971
4972
4973 if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
4974 return 0;
4975
4976 return 2;
4977 }
4978 }
4979
4980 hlock = curr->held_locks + depth;
4981
4982
4983
4984
4985 if (DEBUG_LOCKS_WARN_ON(!class))
4986 return 0;
4987 hlock->class_idx = class_idx;
4988 hlock->acquire_ip = ip;
4989 hlock->instance = lock;
4990 hlock->nest_lock = nest_lock;
4991 hlock->irq_context = task_irq_context(curr);
4992 hlock->trylock = trylock;
4993 hlock->read = read;
4994 hlock->check = check;
4995 hlock->hardirqs_off = !!hardirqs_off;
4996 hlock->references = references;
4997 #ifdef CONFIG_LOCK_STAT
4998 hlock->waittime_stamp = 0;
4999 hlock->holdtime_stamp = lockstat_clock();
5000 #endif
5001 hlock->pin_count = pin_count;
5002
5003 if (check_wait_context(curr, hlock))
5004 return 0;
5005
5006
5007 if (!mark_usage(curr, hlock, check))
5008 return 0;
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023 if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
5024 return 0;
5025
5026 chain_key = curr->curr_chain_key;
5027 if (!depth) {
5028
5029
5030
5031 if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
5032 return 0;
5033 chain_head = 1;
5034 }
5035
5036 hlock->prev_chain_key = chain_key;
5037 if (separate_irq_context(curr, hlock)) {
5038 chain_key = INITIAL_CHAIN_KEY;
5039 chain_head = 1;
5040 }
5041 chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
5042
5043 if (nest_lock && !__lock_is_held(nest_lock, -1)) {
5044 print_lock_nested_lock_not_held(curr, hlock);
5045 return 0;
5046 }
5047
5048 if (!debug_locks_silent) {
5049 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
5050 WARN_ON_ONCE(!hlock_class(hlock)->key);
5051 }
5052
5053 if (!validate_chain(curr, hlock, chain_head, chain_key))
5054 return 0;
5055
5056 curr->curr_chain_key = chain_key;
5057 curr->lockdep_depth++;
5058 check_chain_key(curr);
5059 #ifdef CONFIG_DEBUG_LOCKDEP
5060 if (unlikely(!debug_locks))
5061 return 0;
5062 #endif
5063 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
5064 debug_locks_off();
5065 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
5066 printk(KERN_DEBUG "depth: %i max: %lu!\n",
5067 curr->lockdep_depth, MAX_LOCK_DEPTH);
5068
5069 lockdep_print_held_locks(current);
5070 debug_show_all_locks();
5071 dump_stack();
5072
5073 return 0;
5074 }
5075
5076 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
5077 max_lockdep_depth = curr->lockdep_depth;
5078
5079 return 1;
5080 }
5081
5082 static void print_unlock_imbalance_bug(struct task_struct *curr,
5083 struct lockdep_map *lock,
5084 unsigned long ip)
5085 {
5086 if (!debug_locks_off())
5087 return;
5088 if (debug_locks_silent)
5089 return;
5090
5091 pr_warn("\n");
5092 pr_warn("=====================================\n");
5093 pr_warn("WARNING: bad unlock balance detected!\n");
5094 print_kernel_ident();
5095 pr_warn("-------------------------------------\n");
5096 pr_warn("%s/%d is trying to release lock (",
5097 curr->comm, task_pid_nr(curr));
5098 print_lockdep_cache(lock);
5099 pr_cont(") at:\n");
5100 print_ip_sym(KERN_WARNING, ip);
5101 pr_warn("but there are no more locks to release!\n");
5102 pr_warn("\nother info that might help us debug this:\n");
5103 lockdep_print_held_locks(curr);
5104
5105 pr_warn("\nstack backtrace:\n");
5106 dump_stack();
5107 }
5108
5109 static noinstr int match_held_lock(const struct held_lock *hlock,
5110 const struct lockdep_map *lock)
5111 {
5112 if (hlock->instance == lock)
5113 return 1;
5114
5115 if (hlock->references) {
5116 const struct lock_class *class = lock->class_cache[0];
5117
5118 if (!class)
5119 class = look_up_lock_class(lock, 0);
5120
5121
5122
5123
5124
5125
5126
5127 if (!class)
5128 return 0;
5129
5130
5131
5132
5133
5134
5135 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
5136 return 0;
5137
5138 if (hlock->class_idx == class - lock_classes)
5139 return 1;
5140 }
5141
5142 return 0;
5143 }
5144
5145
5146 static struct held_lock *find_held_lock(struct task_struct *curr,
5147 struct lockdep_map *lock,
5148 unsigned int depth, int *idx)
5149 {
5150 struct held_lock *ret, *hlock, *prev_hlock;
5151 int i;
5152
5153 i = depth - 1;
5154 hlock = curr->held_locks + i;
5155 ret = hlock;
5156 if (match_held_lock(hlock, lock))
5157 goto out;
5158
5159 ret = NULL;
5160 for (i--, prev_hlock = hlock--;
5161 i >= 0;
5162 i--, prev_hlock = hlock--) {
5163
5164
5165
5166 if (prev_hlock->irq_context != hlock->irq_context) {
5167 ret = NULL;
5168 break;
5169 }
5170 if (match_held_lock(hlock, lock)) {
5171 ret = hlock;
5172 break;
5173 }
5174 }
5175
5176 out:
5177 *idx = i;
5178 return ret;
5179 }
5180
5181 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
5182 int idx, unsigned int *merged)
5183 {
5184 struct held_lock *hlock;
5185 int first_idx = idx;
5186
5187 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
5188 return 0;
5189
5190 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
5191 switch (__lock_acquire(hlock->instance,
5192 hlock_class(hlock)->subclass,
5193 hlock->trylock,
5194 hlock->read, hlock->check,
5195 hlock->hardirqs_off,
5196 hlock->nest_lock, hlock->acquire_ip,
5197 hlock->references, hlock->pin_count)) {
5198 case 0:
5199 return 1;
5200 case 1:
5201 break;
5202 case 2:
5203 *merged += (idx == first_idx);
5204 break;
5205 default:
5206 WARN_ON(1);
5207 return 0;
5208 }
5209 }
5210 return 0;
5211 }
5212
5213 static int
5214 __lock_set_class(struct lockdep_map *lock, const char *name,
5215 struct lock_class_key *key, unsigned int subclass,
5216 unsigned long ip)
5217 {
5218 struct task_struct *curr = current;
5219 unsigned int depth, merged = 0;
5220 struct held_lock *hlock;
5221 struct lock_class *class;
5222 int i;
5223
5224 if (unlikely(!debug_locks))
5225 return 0;
5226
5227 depth = curr->lockdep_depth;
5228
5229
5230
5231
5232 if (DEBUG_LOCKS_WARN_ON(!depth))
5233 return 0;
5234
5235 hlock = find_held_lock(curr, lock, depth, &i);
5236 if (!hlock) {
5237 print_unlock_imbalance_bug(curr, lock, ip);
5238 return 0;
5239 }
5240
5241 lockdep_init_map_type(lock, name, key, 0,
5242 lock->wait_type_inner,
5243 lock->wait_type_outer,
5244 lock->lock_type);
5245 class = register_lock_class(lock, subclass, 0);
5246 hlock->class_idx = class - lock_classes;
5247
5248 curr->lockdep_depth = i;
5249 curr->curr_chain_key = hlock->prev_chain_key;
5250
5251 if (reacquire_held_locks(curr, depth, i, &merged))
5252 return 0;
5253
5254
5255
5256
5257
5258 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
5259 return 0;
5260 return 1;
5261 }
5262
5263 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5264 {
5265 struct task_struct *curr = current;
5266 unsigned int depth, merged = 0;
5267 struct held_lock *hlock;
5268 int i;
5269
5270 if (unlikely(!debug_locks))
5271 return 0;
5272
5273 depth = curr->lockdep_depth;
5274
5275
5276
5277
5278 if (DEBUG_LOCKS_WARN_ON(!depth))
5279 return 0;
5280
5281 hlock = find_held_lock(curr, lock, depth, &i);
5282 if (!hlock) {
5283 print_unlock_imbalance_bug(curr, lock, ip);
5284 return 0;
5285 }
5286
5287 curr->lockdep_depth = i;
5288 curr->curr_chain_key = hlock->prev_chain_key;
5289
5290 WARN(hlock->read, "downgrading a read lock");
5291 hlock->read = 1;
5292 hlock->acquire_ip = ip;
5293
5294 if (reacquire_held_locks(curr, depth, i, &merged))
5295 return 0;
5296
5297
5298 if (DEBUG_LOCKS_WARN_ON(merged))
5299 return 0;
5300
5301
5302
5303
5304
5305 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
5306 return 0;
5307
5308 return 1;
5309 }
5310
5311
5312
5313
5314
5315
5316 static int
5317 __lock_release(struct lockdep_map *lock, unsigned long ip)
5318 {
5319 struct task_struct *curr = current;
5320 unsigned int depth, merged = 1;
5321 struct held_lock *hlock;
5322 int i;
5323
5324 if (unlikely(!debug_locks))
5325 return 0;
5326
5327 depth = curr->lockdep_depth;
5328
5329
5330
5331
5332 if (depth <= 0) {
5333 print_unlock_imbalance_bug(curr, lock, ip);
5334 return 0;
5335 }
5336
5337
5338
5339
5340
5341 hlock = find_held_lock(curr, lock, depth, &i);
5342 if (!hlock) {
5343 print_unlock_imbalance_bug(curr, lock, ip);
5344 return 0;
5345 }
5346
5347 if (hlock->instance == lock)
5348 lock_release_holdtime(hlock);
5349
5350 WARN(hlock->pin_count, "releasing a pinned lock\n");
5351
5352 if (hlock->references) {
5353 hlock->references--;
5354 if (hlock->references) {
5355
5356
5357
5358
5359
5360 return 1;
5361 }
5362 }
5363
5364
5365
5366
5367
5368
5369
5370 curr->lockdep_depth = i;
5371 curr->curr_chain_key = hlock->prev_chain_key;
5372
5373
5374
5375
5376
5377 if (i == depth-1)
5378 return 1;
5379
5380 if (reacquire_held_locks(curr, depth, i + 1, &merged))
5381 return 0;
5382
5383
5384
5385
5386
5387
5388 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
5389
5390
5391
5392
5393
5394
5395 return 0;
5396 }
5397
5398 static __always_inline
5399 int __lock_is_held(const struct lockdep_map *lock, int read)
5400 {
5401 struct task_struct *curr = current;
5402 int i;
5403
5404 for (i = 0; i < curr->lockdep_depth; i++) {
5405 struct held_lock *hlock = curr->held_locks + i;
5406
5407 if (match_held_lock(hlock, lock)) {
5408 if (read == -1 || !!hlock->read == read)
5409 return LOCK_STATE_HELD;
5410
5411 return LOCK_STATE_NOT_HELD;
5412 }
5413 }
5414
5415 return LOCK_STATE_NOT_HELD;
5416 }
5417
5418 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
5419 {
5420 struct pin_cookie cookie = NIL_COOKIE;
5421 struct task_struct *curr = current;
5422 int i;
5423
5424 if (unlikely(!debug_locks))
5425 return cookie;
5426
5427 for (i = 0; i < curr->lockdep_depth; i++) {
5428 struct held_lock *hlock = curr->held_locks + i;
5429
5430 if (match_held_lock(hlock, lock)) {
5431
5432
5433
5434
5435
5436 cookie.val = 1 + (sched_clock() & 0xffff);
5437 hlock->pin_count += cookie.val;
5438 return cookie;
5439 }
5440 }
5441
5442 WARN(1, "pinning an unheld lock\n");
5443 return cookie;
5444 }
5445
5446 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5447 {
5448 struct task_struct *curr = current;
5449 int i;
5450
5451 if (unlikely(!debug_locks))
5452 return;
5453
5454 for (i = 0; i < curr->lockdep_depth; i++) {
5455 struct held_lock *hlock = curr->held_locks + i;
5456
5457 if (match_held_lock(hlock, lock)) {
5458 hlock->pin_count += cookie.val;
5459 return;
5460 }
5461 }
5462
5463 WARN(1, "pinning an unheld lock\n");
5464 }
5465
5466 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5467 {
5468 struct task_struct *curr = current;
5469 int i;
5470
5471 if (unlikely(!debug_locks))
5472 return;
5473
5474 for (i = 0; i < curr->lockdep_depth; i++) {
5475 struct held_lock *hlock = curr->held_locks + i;
5476
5477 if (match_held_lock(hlock, lock)) {
5478 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
5479 return;
5480
5481 hlock->pin_count -= cookie.val;
5482
5483 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
5484 hlock->pin_count = 0;
5485
5486 return;
5487 }
5488 }
5489
5490 WARN(1, "unpinning an unheld lock\n");
5491 }
5492
5493
5494
5495
5496 static noinstr void check_flags(unsigned long flags)
5497 {
5498 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
5499 if (!debug_locks)
5500 return;
5501
5502
5503 instrumentation_begin();
5504
5505 if (irqs_disabled_flags(flags)) {
5506 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
5507 printk("possible reason: unannotated irqs-off.\n");
5508 }
5509 } else {
5510 if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
5511 printk("possible reason: unannotated irqs-on.\n");
5512 }
5513 }
5514
5515 #ifndef CONFIG_PREEMPT_RT
5516
5517
5518
5519
5520
5521 if (!hardirq_count()) {
5522 if (softirq_count()) {
5523
5524 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
5525 } else {
5526
5527 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
5528 }
5529 }
5530 #endif
5531
5532 if (!debug_locks)
5533 print_irqtrace_events(current);
5534
5535 instrumentation_end();
5536 #endif
5537 }
5538
5539 void lock_set_class(struct lockdep_map *lock, const char *name,
5540 struct lock_class_key *key, unsigned int subclass,
5541 unsigned long ip)
5542 {
5543 unsigned long flags;
5544
5545 if (unlikely(!lockdep_enabled()))
5546 return;
5547
5548 raw_local_irq_save(flags);
5549 lockdep_recursion_inc();
5550 check_flags(flags);
5551 if (__lock_set_class(lock, name, key, subclass, ip))
5552 check_chain_key(current);
5553 lockdep_recursion_finish();
5554 raw_local_irq_restore(flags);
5555 }
5556 EXPORT_SYMBOL_GPL(lock_set_class);
5557
5558 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5559 {
5560 unsigned long flags;
5561
5562 if (unlikely(!lockdep_enabled()))
5563 return;
5564
5565 raw_local_irq_save(flags);
5566 lockdep_recursion_inc();
5567 check_flags(flags);
5568 if (__lock_downgrade(lock, ip))
5569 check_chain_key(current);
5570 lockdep_recursion_finish();
5571 raw_local_irq_restore(flags);
5572 }
5573 EXPORT_SYMBOL_GPL(lock_downgrade);
5574
5575
5576 static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
5577 {
5578 #ifdef CONFIG_PROVE_LOCKING
5579 struct lock_class *class = look_up_lock_class(lock, subclass);
5580 unsigned long mask = LOCKF_USED;
5581
5582
5583 if (!class)
5584 return;
5585
5586
5587
5588
5589
5590 if (!hlock->read)
5591 mask |= LOCKF_USED_READ;
5592
5593 if (!(class->usage_mask & mask))
5594 return;
5595
5596 hlock->class_idx = class - lock_classes;
5597
5598 print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
5599 #endif
5600 }
5601
5602 static bool lockdep_nmi(void)
5603 {
5604 if (raw_cpu_read(lockdep_recursion))
5605 return false;
5606
5607 if (!in_nmi())
5608 return false;
5609
5610 return true;
5611 }
5612
5613
5614
5615
5616
5617
5618
5619 bool read_lock_is_recursive(void)
5620 {
5621 return force_read_lock_recursive ||
5622 !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) ||
5623 in_interrupt();
5624 }
5625 EXPORT_SYMBOL_GPL(read_lock_is_recursive);
5626
5627
5628
5629
5630
5631 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
5632 int trylock, int read, int check,
5633 struct lockdep_map *nest_lock, unsigned long ip)
5634 {
5635 unsigned long flags;
5636
5637 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
5638
5639 if (!debug_locks)
5640 return;
5641
5642 if (unlikely(!lockdep_enabled())) {
5643
5644 if (lockdep_nmi() && !trylock) {
5645 struct held_lock hlock;
5646
5647 hlock.acquire_ip = ip;
5648 hlock.instance = lock;
5649 hlock.nest_lock = nest_lock;
5650 hlock.irq_context = 2;
5651 hlock.trylock = trylock;
5652 hlock.read = read;
5653 hlock.check = check;
5654 hlock.hardirqs_off = true;
5655 hlock.references = 0;
5656
5657 verify_lock_unused(lock, &hlock, subclass);
5658 }
5659 return;
5660 }
5661
5662 raw_local_irq_save(flags);
5663 check_flags(flags);
5664
5665 lockdep_recursion_inc();
5666 __lock_acquire(lock, subclass, trylock, read, check,
5667 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
5668 lockdep_recursion_finish();
5669 raw_local_irq_restore(flags);
5670 }
5671 EXPORT_SYMBOL_GPL(lock_acquire);
5672
5673 void lock_release(struct lockdep_map *lock, unsigned long ip)
5674 {
5675 unsigned long flags;
5676
5677 trace_lock_release(lock, ip);
5678
5679 if (unlikely(!lockdep_enabled()))
5680 return;
5681
5682 raw_local_irq_save(flags);
5683 check_flags(flags);
5684
5685 lockdep_recursion_inc();
5686 if (__lock_release(lock, ip))
5687 check_chain_key(current);
5688 lockdep_recursion_finish();
5689 raw_local_irq_restore(flags);
5690 }
5691 EXPORT_SYMBOL_GPL(lock_release);
5692
5693 noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
5694 {
5695 unsigned long flags;
5696 int ret = LOCK_STATE_NOT_HELD;
5697
5698
5699
5700
5701
5702 if (unlikely(!lockdep_enabled()))
5703 return LOCK_STATE_UNKNOWN;
5704
5705 raw_local_irq_save(flags);
5706 check_flags(flags);
5707
5708 lockdep_recursion_inc();
5709 ret = __lock_is_held(lock, read);
5710 lockdep_recursion_finish();
5711 raw_local_irq_restore(flags);
5712
5713 return ret;
5714 }
5715 EXPORT_SYMBOL_GPL(lock_is_held_type);
5716 NOKPROBE_SYMBOL(lock_is_held_type);
5717
5718 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
5719 {
5720 struct pin_cookie cookie = NIL_COOKIE;
5721 unsigned long flags;
5722
5723 if (unlikely(!lockdep_enabled()))
5724 return cookie;
5725
5726 raw_local_irq_save(flags);
5727 check_flags(flags);
5728
5729 lockdep_recursion_inc();
5730 cookie = __lock_pin_lock(lock);
5731 lockdep_recursion_finish();
5732 raw_local_irq_restore(flags);
5733
5734 return cookie;
5735 }
5736 EXPORT_SYMBOL_GPL(lock_pin_lock);
5737
5738 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5739 {
5740 unsigned long flags;
5741
5742 if (unlikely(!lockdep_enabled()))
5743 return;
5744
5745 raw_local_irq_save(flags);
5746 check_flags(flags);
5747
5748 lockdep_recursion_inc();
5749 __lock_repin_lock(lock, cookie);
5750 lockdep_recursion_finish();
5751 raw_local_irq_restore(flags);
5752 }
5753 EXPORT_SYMBOL_GPL(lock_repin_lock);
5754
5755 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5756 {
5757 unsigned long flags;
5758
5759 if (unlikely(!lockdep_enabled()))
5760 return;
5761
5762 raw_local_irq_save(flags);
5763 check_flags(flags);
5764
5765 lockdep_recursion_inc();
5766 __lock_unpin_lock(lock, cookie);
5767 lockdep_recursion_finish();
5768 raw_local_irq_restore(flags);
5769 }
5770 EXPORT_SYMBOL_GPL(lock_unpin_lock);
5771
5772 #ifdef CONFIG_LOCK_STAT
5773 static void print_lock_contention_bug(struct task_struct *curr,
5774 struct lockdep_map *lock,
5775 unsigned long ip)
5776 {
5777 if (!debug_locks_off())
5778 return;
5779 if (debug_locks_silent)
5780 return;
5781
5782 pr_warn("\n");
5783 pr_warn("=================================\n");
5784 pr_warn("WARNING: bad contention detected!\n");
5785 print_kernel_ident();
5786 pr_warn("---------------------------------\n");
5787 pr_warn("%s/%d is trying to contend lock (",
5788 curr->comm, task_pid_nr(curr));
5789 print_lockdep_cache(lock);
5790 pr_cont(") at:\n");
5791 print_ip_sym(KERN_WARNING, ip);
5792 pr_warn("but there are no locks held!\n");
5793 pr_warn("\nother info that might help us debug this:\n");
5794 lockdep_print_held_locks(curr);
5795
5796 pr_warn("\nstack backtrace:\n");
5797 dump_stack();
5798 }
5799
5800 static void
5801 __lock_contended(struct lockdep_map *lock, unsigned long ip)
5802 {
5803 struct task_struct *curr = current;
5804 struct held_lock *hlock;
5805 struct lock_class_stats *stats;
5806 unsigned int depth;
5807 int i, contention_point, contending_point;
5808
5809 depth = curr->lockdep_depth;
5810
5811
5812
5813
5814 if (DEBUG_LOCKS_WARN_ON(!depth))
5815 return;
5816
5817 hlock = find_held_lock(curr, lock, depth, &i);
5818 if (!hlock) {
5819 print_lock_contention_bug(curr, lock, ip);
5820 return;
5821 }
5822
5823 if (hlock->instance != lock)
5824 return;
5825
5826 hlock->waittime_stamp = lockstat_clock();
5827
5828 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
5829 contending_point = lock_point(hlock_class(hlock)->contending_point,
5830 lock->ip);
5831
5832 stats = get_lock_stats(hlock_class(hlock));
5833 if (contention_point < LOCKSTAT_POINTS)
5834 stats->contention_point[contention_point]++;
5835 if (contending_point < LOCKSTAT_POINTS)
5836 stats->contending_point[contending_point]++;
5837 if (lock->cpu != smp_processor_id())
5838 stats->bounces[bounce_contended + !!hlock->read]++;
5839 }
5840
5841 static void
5842 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
5843 {
5844 struct task_struct *curr = current;
5845 struct held_lock *hlock;
5846 struct lock_class_stats *stats;
5847 unsigned int depth;
5848 u64 now, waittime = 0;
5849 int i, cpu;
5850
5851 depth = curr->lockdep_depth;
5852
5853
5854
5855
5856 if (DEBUG_LOCKS_WARN_ON(!depth))
5857 return;
5858
5859 hlock = find_held_lock(curr, lock, depth, &i);
5860 if (!hlock) {
5861 print_lock_contention_bug(curr, lock, _RET_IP_);
5862 return;
5863 }
5864
5865 if (hlock->instance != lock)
5866 return;
5867
5868 cpu = smp_processor_id();
5869 if (hlock->waittime_stamp) {
5870 now = lockstat_clock();
5871 waittime = now - hlock->waittime_stamp;
5872 hlock->holdtime_stamp = now;
5873 }
5874
5875 stats = get_lock_stats(hlock_class(hlock));
5876 if (waittime) {
5877 if (hlock->read)
5878 lock_time_inc(&stats->read_waittime, waittime);
5879 else
5880 lock_time_inc(&stats->write_waittime, waittime);
5881 }
5882 if (lock->cpu != cpu)
5883 stats->bounces[bounce_acquired + !!hlock->read]++;
5884
5885 lock->cpu = cpu;
5886 lock->ip = ip;
5887 }
5888
5889 void lock_contended(struct lockdep_map *lock, unsigned long ip)
5890 {
5891 unsigned long flags;
5892
5893 trace_lock_contended(lock, ip);
5894
5895 if (unlikely(!lock_stat || !lockdep_enabled()))
5896 return;
5897
5898 raw_local_irq_save(flags);
5899 check_flags(flags);
5900 lockdep_recursion_inc();
5901 __lock_contended(lock, ip);
5902 lockdep_recursion_finish();
5903 raw_local_irq_restore(flags);
5904 }
5905 EXPORT_SYMBOL_GPL(lock_contended);
5906
5907 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
5908 {
5909 unsigned long flags;
5910
5911 trace_lock_acquired(lock, ip);
5912
5913 if (unlikely(!lock_stat || !lockdep_enabled()))
5914 return;
5915
5916 raw_local_irq_save(flags);
5917 check_flags(flags);
5918 lockdep_recursion_inc();
5919 __lock_acquired(lock, ip);
5920 lockdep_recursion_finish();
5921 raw_local_irq_restore(flags);
5922 }
5923 EXPORT_SYMBOL_GPL(lock_acquired);
5924 #endif
5925
5926
5927
5928
5929
5930
5931 void lockdep_reset(void)
5932 {
5933 unsigned long flags;
5934 int i;
5935
5936 raw_local_irq_save(flags);
5937 lockdep_init_task(current);
5938 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
5939 nr_hardirq_chains = 0;
5940 nr_softirq_chains = 0;
5941 nr_process_chains = 0;
5942 debug_locks = 1;
5943 for (i = 0; i < CHAINHASH_SIZE; i++)
5944 INIT_HLIST_HEAD(chainhash_table + i);
5945 raw_local_irq_restore(flags);
5946 }
5947
5948
5949 static void remove_class_from_lock_chain(struct pending_free *pf,
5950 struct lock_chain *chain,
5951 struct lock_class *class)
5952 {
5953 #ifdef CONFIG_PROVE_LOCKING
5954 int i;
5955
5956 for (i = chain->base; i < chain->base + chain->depth; i++) {
5957 if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes)
5958 continue;
5959
5960
5961
5962
5963 goto free_lock_chain;
5964 }
5965
5966 return;
5967
5968 free_lock_chain:
5969 free_chain_hlocks(chain->base, chain->depth);
5970
5971 WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
5972 dec_chains(chain->irq_context);
5973
5974
5975
5976
5977
5978 hlist_del_rcu(&chain->entry);
5979 __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
5980 nr_zapped_lock_chains++;
5981 #endif
5982 }
5983
5984
5985 static void remove_class_from_lock_chains(struct pending_free *pf,
5986 struct lock_class *class)
5987 {
5988 struct lock_chain *chain;
5989 struct hlist_head *head;
5990 int i;
5991
5992 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
5993 head = chainhash_table + i;
5994 hlist_for_each_entry_rcu(chain, head, entry) {
5995 remove_class_from_lock_chain(pf, chain, class);
5996 }
5997 }
5998 }
5999
6000
6001
6002
6003 static void zap_class(struct pending_free *pf, struct lock_class *class)
6004 {
6005 struct lock_list *entry;
6006 int i;
6007
6008 WARN_ON_ONCE(!class->key);
6009
6010
6011
6012
6013
6014 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
6015 entry = list_entries + i;
6016 if (entry->class != class && entry->links_to != class)
6017 continue;
6018 __clear_bit(i, list_entries_in_use);
6019 nr_list_entries--;
6020 list_del_rcu(&entry->entry);
6021 }
6022 if (list_empty(&class->locks_after) &&
6023 list_empty(&class->locks_before)) {
6024 list_move_tail(&class->lock_entry, &pf->zapped);
6025 hlist_del_rcu(&class->hash_entry);
6026 WRITE_ONCE(class->key, NULL);
6027 WRITE_ONCE(class->name, NULL);
6028 nr_lock_classes--;
6029 __clear_bit(class - lock_classes, lock_classes_in_use);
6030 if (class - lock_classes == max_lock_class_idx)
6031 max_lock_class_idx--;
6032 } else {
6033 WARN_ONCE(true, "%s() failed for class %s\n", __func__,
6034 class->name);
6035 }
6036
6037 remove_class_from_lock_chains(pf, class);
6038 nr_zapped_classes++;
6039 }
6040
6041 static void reinit_class(struct lock_class *class)
6042 {
6043 WARN_ON_ONCE(!class->lock_entry.next);
6044 WARN_ON_ONCE(!list_empty(&class->locks_after));
6045 WARN_ON_ONCE(!list_empty(&class->locks_before));
6046 memset_startat(class, 0, key);
6047 WARN_ON_ONCE(!class->lock_entry.next);
6048 WARN_ON_ONCE(!list_empty(&class->locks_after));
6049 WARN_ON_ONCE(!list_empty(&class->locks_before));
6050 }
6051
6052 static inline int within(const void *addr, void *start, unsigned long size)
6053 {
6054 return addr >= start && addr < start + size;
6055 }
6056
6057 static bool inside_selftest(void)
6058 {
6059 return current == lockdep_selftest_task_struct;
6060 }
6061
6062
6063 static struct pending_free *get_pending_free(void)
6064 {
6065 return delayed_free.pf + delayed_free.index;
6066 }
6067
6068 static void free_zapped_rcu(struct rcu_head *cb);
6069
6070
6071
6072
6073
6074 static void call_rcu_zapped(struct pending_free *pf)
6075 {
6076 WARN_ON_ONCE(inside_selftest());
6077
6078 if (list_empty(&pf->zapped))
6079 return;
6080
6081 if (delayed_free.scheduled)
6082 return;
6083
6084 delayed_free.scheduled = true;
6085
6086 WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
6087 delayed_free.index ^= 1;
6088
6089 call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
6090 }
6091
6092
6093 static void __free_zapped_classes(struct pending_free *pf)
6094 {
6095 struct lock_class *class;
6096
6097 check_data_structures();
6098
6099 list_for_each_entry(class, &pf->zapped, lock_entry)
6100 reinit_class(class);
6101
6102 list_splice_init(&pf->zapped, &free_lock_classes);
6103
6104 #ifdef CONFIG_PROVE_LOCKING
6105 bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
6106 pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
6107 bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
6108 #endif
6109 }
6110
6111 static void free_zapped_rcu(struct rcu_head *ch)
6112 {
6113 struct pending_free *pf;
6114 unsigned long flags;
6115
6116 if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
6117 return;
6118
6119 raw_local_irq_save(flags);
6120 lockdep_lock();
6121
6122
6123 pf = delayed_free.pf + (delayed_free.index ^ 1);
6124 __free_zapped_classes(pf);
6125 delayed_free.scheduled = false;
6126
6127
6128
6129
6130 call_rcu_zapped(delayed_free.pf + delayed_free.index);
6131
6132 lockdep_unlock();
6133 raw_local_irq_restore(flags);
6134 }
6135
6136
6137
6138
6139
6140
6141
6142 static void __lockdep_free_key_range(struct pending_free *pf, void *start,
6143 unsigned long size)
6144 {
6145 struct lock_class *class;
6146 struct hlist_head *head;
6147 int i;
6148
6149
6150 for (i = 0; i < CLASSHASH_SIZE; i++) {
6151 head = classhash_table + i;
6152 hlist_for_each_entry_rcu(class, head, hash_entry) {
6153 if (!within(class->key, start, size) &&
6154 !within(class->name, start, size))
6155 continue;
6156 zap_class(pf, class);
6157 }
6158 }
6159 }
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169 static void lockdep_free_key_range_reg(void *start, unsigned long size)
6170 {
6171 struct pending_free *pf;
6172 unsigned long flags;
6173
6174 init_data_structures_once();
6175
6176 raw_local_irq_save(flags);
6177 lockdep_lock();
6178 pf = get_pending_free();
6179 __lockdep_free_key_range(pf, start, size);
6180 call_rcu_zapped(pf);
6181 lockdep_unlock();
6182 raw_local_irq_restore(flags);
6183
6184
6185
6186
6187
6188 synchronize_rcu();
6189 }
6190
6191
6192
6193
6194
6195 static void lockdep_free_key_range_imm(void *start, unsigned long size)
6196 {
6197 struct pending_free *pf = delayed_free.pf;
6198 unsigned long flags;
6199
6200 init_data_structures_once();
6201
6202 raw_local_irq_save(flags);
6203 lockdep_lock();
6204 __lockdep_free_key_range(pf, start, size);
6205 __free_zapped_classes(pf);
6206 lockdep_unlock();
6207 raw_local_irq_restore(flags);
6208 }
6209
6210 void lockdep_free_key_range(void *start, unsigned long size)
6211 {
6212 init_data_structures_once();
6213
6214 if (inside_selftest())
6215 lockdep_free_key_range_imm(start, size);
6216 else
6217 lockdep_free_key_range_reg(start, size);
6218 }
6219
6220
6221
6222
6223
6224
6225 static bool lock_class_cache_is_registered(struct lockdep_map *lock)
6226 {
6227 struct lock_class *class;
6228 struct hlist_head *head;
6229 int i, j;
6230
6231 for (i = 0; i < CLASSHASH_SIZE; i++) {
6232 head = classhash_table + i;
6233 hlist_for_each_entry_rcu(class, head, hash_entry) {
6234 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
6235 if (lock->class_cache[j] == class)
6236 return true;
6237 }
6238 }
6239 return false;
6240 }
6241
6242
6243 static void __lockdep_reset_lock(struct pending_free *pf,
6244 struct lockdep_map *lock)
6245 {
6246 struct lock_class *class;
6247 int j;
6248
6249
6250
6251
6252 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
6253
6254
6255
6256 class = look_up_lock_class(lock, j);
6257 if (class)
6258 zap_class(pf, class);
6259 }
6260
6261
6262
6263
6264 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
6265 debug_locks_off();
6266 }
6267
6268
6269
6270
6271
6272 static void lockdep_reset_lock_reg(struct lockdep_map *lock)
6273 {
6274 struct pending_free *pf;
6275 unsigned long flags;
6276 int locked;
6277
6278 raw_local_irq_save(flags);
6279 locked = graph_lock();
6280 if (!locked)
6281 goto out_irq;
6282
6283 pf = get_pending_free();
6284 __lockdep_reset_lock(pf, lock);
6285 call_rcu_zapped(pf);
6286
6287 graph_unlock();
6288 out_irq:
6289 raw_local_irq_restore(flags);
6290 }
6291
6292
6293
6294
6295
6296 static void lockdep_reset_lock_imm(struct lockdep_map *lock)
6297 {
6298 struct pending_free *pf = delayed_free.pf;
6299 unsigned long flags;
6300
6301 raw_local_irq_save(flags);
6302 lockdep_lock();
6303 __lockdep_reset_lock(pf, lock);
6304 __free_zapped_classes(pf);
6305 lockdep_unlock();
6306 raw_local_irq_restore(flags);
6307 }
6308
6309 void lockdep_reset_lock(struct lockdep_map *lock)
6310 {
6311 init_data_structures_once();
6312
6313 if (inside_selftest())
6314 lockdep_reset_lock_imm(lock);
6315 else
6316 lockdep_reset_lock_reg(lock);
6317 }
6318
6319
6320
6321
6322
6323
6324
6325
6326 void lockdep_unregister_key(struct lock_class_key *key)
6327 {
6328 struct hlist_head *hash_head = keyhashentry(key);
6329 struct lock_class_key *k;
6330 struct pending_free *pf;
6331 unsigned long flags;
6332 bool found = false;
6333
6334 might_sleep();
6335
6336 if (WARN_ON_ONCE(static_obj(key)))
6337 return;
6338
6339 raw_local_irq_save(flags);
6340 lockdep_lock();
6341
6342 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
6343 if (k == key) {
6344 hlist_del_rcu(&k->hash_entry);
6345 found = true;
6346 break;
6347 }
6348 }
6349 WARN_ON_ONCE(!found && debug_locks);
6350 if (found) {
6351 pf = get_pending_free();
6352 __lockdep_free_key_range(pf, key, 1);
6353 call_rcu_zapped(pf);
6354 }
6355 lockdep_unlock();
6356 raw_local_irq_restore(flags);
6357
6358
6359 synchronize_rcu();
6360 }
6361 EXPORT_SYMBOL_GPL(lockdep_unregister_key);
6362
6363 void __init lockdep_init(void)
6364 {
6365 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
6366
6367 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
6368 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
6369 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
6370 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
6371 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
6372 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
6373 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
6374
6375 printk(" memory used by lock dependency info: %zu kB\n",
6376 (sizeof(lock_classes) +
6377 sizeof(lock_classes_in_use) +
6378 sizeof(classhash_table) +
6379 sizeof(list_entries) +
6380 sizeof(list_entries_in_use) +
6381 sizeof(chainhash_table) +
6382 sizeof(delayed_free)
6383 #ifdef CONFIG_PROVE_LOCKING
6384 + sizeof(lock_cq)
6385 + sizeof(lock_chains)
6386 + sizeof(lock_chains_in_use)
6387 + sizeof(chain_hlocks)
6388 #endif
6389 ) / 1024
6390 );
6391
6392 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
6393 printk(" memory used for stack traces: %zu kB\n",
6394 (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
6395 );
6396 #endif
6397
6398 printk(" per task-struct memory footprint: %zu bytes\n",
6399 sizeof(((struct task_struct *)NULL)->held_locks));
6400 }
6401
6402 static void
6403 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
6404 const void *mem_to, struct held_lock *hlock)
6405 {
6406 if (!debug_locks_off())
6407 return;
6408 if (debug_locks_silent)
6409 return;
6410
6411 pr_warn("\n");
6412 pr_warn("=========================\n");
6413 pr_warn("WARNING: held lock freed!\n");
6414 print_kernel_ident();
6415 pr_warn("-------------------------\n");
6416 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
6417 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
6418 print_lock(hlock);
6419 lockdep_print_held_locks(curr);
6420
6421 pr_warn("\nstack backtrace:\n");
6422 dump_stack();
6423 }
6424
6425 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
6426 const void* lock_from, unsigned long lock_len)
6427 {
6428 return lock_from + lock_len <= mem_from ||
6429 mem_from + mem_len <= lock_from;
6430 }
6431
6432
6433
6434
6435
6436
6437 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
6438 {
6439 struct task_struct *curr = current;
6440 struct held_lock *hlock;
6441 unsigned long flags;
6442 int i;
6443
6444 if (unlikely(!debug_locks))
6445 return;
6446
6447 raw_local_irq_save(flags);
6448 for (i = 0; i < curr->lockdep_depth; i++) {
6449 hlock = curr->held_locks + i;
6450
6451 if (not_in_range(mem_from, mem_len, hlock->instance,
6452 sizeof(*hlock->instance)))
6453 continue;
6454
6455 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
6456 break;
6457 }
6458 raw_local_irq_restore(flags);
6459 }
6460 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
6461
6462 static void print_held_locks_bug(void)
6463 {
6464 if (!debug_locks_off())
6465 return;
6466 if (debug_locks_silent)
6467 return;
6468
6469 pr_warn("\n");
6470 pr_warn("====================================\n");
6471 pr_warn("WARNING: %s/%d still has locks held!\n",
6472 current->comm, task_pid_nr(current));
6473 print_kernel_ident();
6474 pr_warn("------------------------------------\n");
6475 lockdep_print_held_locks(current);
6476 pr_warn("\nstack backtrace:\n");
6477 dump_stack();
6478 }
6479
6480 void debug_check_no_locks_held(void)
6481 {
6482 if (unlikely(current->lockdep_depth > 0))
6483 print_held_locks_bug();
6484 }
6485 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
6486
6487 #ifdef __KERNEL__
6488 void debug_show_all_locks(void)
6489 {
6490 struct task_struct *g, *p;
6491
6492 if (unlikely(!debug_locks)) {
6493 pr_warn("INFO: lockdep is turned off.\n");
6494 return;
6495 }
6496 pr_warn("\nShowing all locks held in the system:\n");
6497
6498 rcu_read_lock();
6499 for_each_process_thread(g, p) {
6500 if (!p->lockdep_depth)
6501 continue;
6502 lockdep_print_held_locks(p);
6503 touch_nmi_watchdog();
6504 touch_all_softlockup_watchdogs();
6505 }
6506 rcu_read_unlock();
6507
6508 pr_warn("\n");
6509 pr_warn("=============================================\n\n");
6510 }
6511 EXPORT_SYMBOL_GPL(debug_show_all_locks);
6512 #endif
6513
6514
6515
6516
6517
6518 void debug_show_held_locks(struct task_struct *task)
6519 {
6520 if (unlikely(!debug_locks)) {
6521 printk("INFO: lockdep is turned off.\n");
6522 return;
6523 }
6524 lockdep_print_held_locks(task);
6525 }
6526 EXPORT_SYMBOL_GPL(debug_show_held_locks);
6527
6528 asmlinkage __visible void lockdep_sys_exit(void)
6529 {
6530 struct task_struct *curr = current;
6531
6532 if (unlikely(curr->lockdep_depth)) {
6533 if (!debug_locks_off())
6534 return;
6535 pr_warn("\n");
6536 pr_warn("================================================\n");
6537 pr_warn("WARNING: lock held when returning to user space!\n");
6538 print_kernel_ident();
6539 pr_warn("------------------------------------------------\n");
6540 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
6541 curr->comm, curr->pid);
6542 lockdep_print_held_locks(curr);
6543 }
6544
6545
6546
6547
6548
6549 lockdep_invariant_state(false);
6550 }
6551
6552 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
6553 {
6554 struct task_struct *curr = current;
6555 int dl = READ_ONCE(debug_locks);
6556
6557
6558 pr_warn("\n");
6559 pr_warn("=============================\n");
6560 pr_warn("WARNING: suspicious RCU usage\n");
6561 print_kernel_ident();
6562 pr_warn("-----------------------------\n");
6563 pr_warn("%s:%d %s!\n", file, line, s);
6564 pr_warn("\nother info that might help us debug this:\n\n");
6565 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s",
6566 !rcu_lockdep_current_cpu_online()
6567 ? "RCU used illegally from offline CPU!\n"
6568 : "",
6569 rcu_scheduler_active, dl,
6570 dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n");
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590 if (!rcu_is_watching())
6591 pr_warn("RCU used illegally from extended quiescent state!\n");
6592
6593 lockdep_print_held_locks(curr);
6594 pr_warn("\nstack backtrace:\n");
6595 dump_stack();
6596 }
6597 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);