0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef __LINUX_LOCKDEP_H
0011 #define __LINUX_LOCKDEP_H
0012
0013 #include <linux/lockdep_types.h>
0014 #include <linux/smp.h>
0015 #include <asm/percpu.h>
0016
0017 struct task_struct;
0018
0019 #ifdef CONFIG_LOCKDEP
0020
0021 #include <linux/linkage.h>
0022 #include <linux/list.h>
0023 #include <linux/debug_locks.h>
0024 #include <linux/stacktrace.h>
0025
0026 static inline void lockdep_copy_map(struct lockdep_map *to,
0027 struct lockdep_map *from)
0028 {
0029 int i;
0030
0031 *to = *from;
0032
0033
0034
0035
0036
0037
0038
0039
0040 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
0041 to->class_cache[i] = NULL;
0042 }
0043
0044
0045
0046
0047
0048 struct lock_list {
0049 struct list_head entry;
0050 struct lock_class *class;
0051 struct lock_class *links_to;
0052 const struct lock_trace *trace;
0053 u16 distance;
0054
0055 u8 dep;
0056
0057 u8 only_xr;
0058
0059
0060
0061
0062
0063 struct lock_list *parent;
0064 };
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 struct lock_chain {
0076
0077 unsigned int irq_context : 2,
0078 depth : 6,
0079 base : 24;
0080
0081 struct hlist_node entry;
0082 u64 chain_key;
0083 };
0084
0085 #define MAX_LOCKDEP_KEYS_BITS 13
0086 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
0087 #define INITIAL_CHAIN_KEY -1
0088
0089 struct held_lock {
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 u64 prev_chain_key;
0105 unsigned long acquire_ip;
0106 struct lockdep_map *instance;
0107 struct lockdep_map *nest_lock;
0108 #ifdef CONFIG_LOCK_STAT
0109 u64 waittime_stamp;
0110 u64 holdtime_stamp;
0111 #endif
0112
0113
0114
0115
0116
0117 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131 unsigned int irq_context:2;
0132 unsigned int trylock:1;
0133
0134 unsigned int read:2;
0135 unsigned int check:1;
0136 unsigned int hardirqs_off:1;
0137 unsigned int references:12;
0138 unsigned int pin_count;
0139 };
0140
0141
0142
0143
0144 extern void lockdep_init(void);
0145 extern void lockdep_reset(void);
0146 extern void lockdep_reset_lock(struct lockdep_map *lock);
0147 extern void lockdep_free_key_range(void *start, unsigned long size);
0148 extern asmlinkage void lockdep_sys_exit(void);
0149 extern void lockdep_set_selftest_task(struct task_struct *task);
0150
0151 extern void lockdep_init_task(struct task_struct *task);
0152
0153
0154
0155
0156 #define LOCKDEP_RECURSION_BITS 16
0157 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
0158 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
0159
0160
0161
0162
0163
0164
0165 #define lockdep_off() \
0166 do { \
0167 current->lockdep_recursion += LOCKDEP_OFF; \
0168 } while (0)
0169
0170 #define lockdep_on() \
0171 do { \
0172 current->lockdep_recursion -= LOCKDEP_OFF; \
0173 } while (0)
0174
0175 extern void lockdep_register_key(struct lock_class_key *key);
0176 extern void lockdep_unregister_key(struct lock_class_key *key);
0177
0178
0179
0180
0181
0182
0183
0184 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
0185 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
0186
0187 static inline void
0188 lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
0189 struct lock_class_key *key, int subclass, u8 inner, u8 outer)
0190 {
0191 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
0192 }
0193
0194 static inline void
0195 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
0196 struct lock_class_key *key, int subclass, u8 inner)
0197 {
0198 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
0199 }
0200
0201 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
0202 struct lock_class_key *key, int subclass)
0203 {
0204 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
0205 }
0206
0207
0208
0209
0210
0211
0212
0213 #define lockdep_set_class(lock, key) \
0214 lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
0215 (lock)->dep_map.wait_type_inner, \
0216 (lock)->dep_map.wait_type_outer, \
0217 (lock)->dep_map.lock_type)
0218
0219 #define lockdep_set_class_and_name(lock, key, name) \
0220 lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
0221 (lock)->dep_map.wait_type_inner, \
0222 (lock)->dep_map.wait_type_outer, \
0223 (lock)->dep_map.lock_type)
0224
0225 #define lockdep_set_class_and_subclass(lock, key, sub) \
0226 lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
0227 (lock)->dep_map.wait_type_inner, \
0228 (lock)->dep_map.wait_type_outer, \
0229 (lock)->dep_map.lock_type)
0230
0231 #define lockdep_set_subclass(lock, sub) \
0232 lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
0233 (lock)->dep_map.wait_type_inner, \
0234 (lock)->dep_map.wait_type_outer, \
0235 (lock)->dep_map.lock_type)
0236
0237 #define lockdep_set_novalidate_class(lock) \
0238 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
0239
0240
0241
0242
0243 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
0244
0245 static inline int lockdep_match_key(struct lockdep_map *lock,
0246 struct lock_class_key *key)
0247 {
0248 return lock->key == key;
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
0266 int trylock, int read, int check,
0267 struct lockdep_map *nest_lock, unsigned long ip);
0268
0269 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
0270
0271
0272 #define LOCK_STATE_UNKNOWN -1
0273 #define LOCK_STATE_NOT_HELD 0
0274 #define LOCK_STATE_HELD 1
0275
0276
0277
0278
0279 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
0280
0281 static inline int lock_is_held(const struct lockdep_map *lock)
0282 {
0283 return lock_is_held_type(lock, -1);
0284 }
0285
0286 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
0287 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
0288
0289 extern void lock_set_class(struct lockdep_map *lock, const char *name,
0290 struct lock_class_key *key, unsigned int subclass,
0291 unsigned long ip);
0292
0293 #define lock_set_novalidate_class(l, n, i) \
0294 lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
0295
0296 static inline void lock_set_subclass(struct lockdep_map *lock,
0297 unsigned int subclass, unsigned long ip)
0298 {
0299 lock_set_class(lock, lock->name, lock->key, subclass, ip);
0300 }
0301
0302 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
0303
0304 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
0305
0306 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
0307 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
0308 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
0309
0310 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
0311
0312 #define lockdep_assert(cond) \
0313 do { WARN_ON(debug_locks && !(cond)); } while (0)
0314
0315 #define lockdep_assert_once(cond) \
0316 do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
0317
0318 #define lockdep_assert_held(l) \
0319 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
0320
0321 #define lockdep_assert_not_held(l) \
0322 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
0323
0324 #define lockdep_assert_held_write(l) \
0325 lockdep_assert(lockdep_is_held_type(l, 0))
0326
0327 #define lockdep_assert_held_read(l) \
0328 lockdep_assert(lockdep_is_held_type(l, 1))
0329
0330 #define lockdep_assert_held_once(l) \
0331 lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
0332
0333 #define lockdep_assert_none_held_once() \
0334 lockdep_assert_once(!current->lockdep_depth)
0335
0336 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
0337
0338 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
0339 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
0340 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
0341
0342 #else
0343
0344 static inline void lockdep_init_task(struct task_struct *task)
0345 {
0346 }
0347
0348 static inline void lockdep_off(void)
0349 {
0350 }
0351
0352 static inline void lockdep_on(void)
0353 {
0354 }
0355
0356 static inline void lockdep_set_selftest_task(struct task_struct *task)
0357 {
0358 }
0359
0360 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
0361 # define lock_release(l, i) do { } while (0)
0362 # define lock_downgrade(l, i) do { } while (0)
0363 # define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
0364 # define lock_set_novalidate_class(l, n, i) do { } while (0)
0365 # define lock_set_subclass(l, s, i) do { } while (0)
0366 # define lockdep_init() do { } while (0)
0367 # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
0368 do { (void)(name); (void)(key); } while (0)
0369 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
0370 do { (void)(name); (void)(key); } while (0)
0371 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
0372 do { (void)(name); (void)(key); } while (0)
0373 # define lockdep_init_map(lock, name, key, sub) \
0374 do { (void)(name); (void)(key); } while (0)
0375 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
0376 # define lockdep_set_class_and_name(lock, key, name) \
0377 do { (void)(key); (void)(name); } while (0)
0378 #define lockdep_set_class_and_subclass(lock, key, sub) \
0379 do { (void)(key); } while (0)
0380 #define lockdep_set_subclass(lock, sub) do { } while (0)
0381
0382 #define lockdep_set_novalidate_class(lock) do { } while (0)
0383
0384
0385
0386
0387
0388
0389
0390 # define lockdep_reset() do { debug_locks = 1; } while (0)
0391 # define lockdep_free_key_range(start, size) do { } while (0)
0392 # define lockdep_sys_exit() do { } while (0)
0393
0394 static inline void lockdep_register_key(struct lock_class_key *key)
0395 {
0396 }
0397
0398 static inline void lockdep_unregister_key(struct lock_class_key *key)
0399 {
0400 }
0401
0402 #define lockdep_depth(tsk) (0)
0403
0404
0405
0406
0407
0408 extern int lock_is_held(const void *);
0409 extern int lockdep_is_held(const void *);
0410 #define lockdep_is_held_type(l, r) (1)
0411
0412 #define lockdep_assert(c) do { } while (0)
0413 #define lockdep_assert_once(c) do { } while (0)
0414
0415 #define lockdep_assert_held(l) do { (void)(l); } while (0)
0416 #define lockdep_assert_not_held(l) do { (void)(l); } while (0)
0417 #define lockdep_assert_held_write(l) do { (void)(l); } while (0)
0418 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
0419 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
0420 #define lockdep_assert_none_held_once() do { } while (0)
0421
0422 #define lockdep_recursing(tsk) (0)
0423
0424 #define NIL_COOKIE (struct pin_cookie){ }
0425
0426 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
0427 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
0428 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
0429
0430 #endif
0431
0432 enum xhlock_context_t {
0433 XHLOCK_HARD,
0434 XHLOCK_SOFT,
0435 XHLOCK_CTX_NR,
0436 };
0437
0438 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
0439
0440
0441
0442
0443 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
0444 { .name = (_name), .key = (void *)(_key), }
0445
0446 static inline void lockdep_invariant_state(bool force) {}
0447 static inline void lockdep_free_task(struct task_struct *task) {}
0448
0449 #ifdef CONFIG_LOCK_STAT
0450
0451 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
0452 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
0453
0454 #define LOCK_CONTENDED(_lock, try, lock) \
0455 do { \
0456 if (!try(_lock)) { \
0457 lock_contended(&(_lock)->dep_map, _RET_IP_); \
0458 lock(_lock); \
0459 } \
0460 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
0461 } while (0)
0462
0463 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
0464 ({ \
0465 int ____err = 0; \
0466 if (!try(_lock)) { \
0467 lock_contended(&(_lock)->dep_map, _RET_IP_); \
0468 ____err = lock(_lock); \
0469 } \
0470 if (!____err) \
0471 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
0472 ____err; \
0473 })
0474
0475 #else
0476
0477 #define lock_contended(lockdep_map, ip) do {} while (0)
0478 #define lock_acquired(lockdep_map, ip) do {} while (0)
0479
0480 #define LOCK_CONTENDED(_lock, try, lock) \
0481 lock(_lock)
0482
0483 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
0484 lock(_lock)
0485
0486 #endif
0487
0488 #ifdef CONFIG_PROVE_LOCKING
0489 extern void print_irqtrace_events(struct task_struct *curr);
0490 #else
0491 static inline void print_irqtrace_events(struct task_struct *curr)
0492 {
0493 }
0494 #endif
0495
0496
0497 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
0498 extern unsigned int force_read_lock_recursive;
0499 #else
0500 #define force_read_lock_recursive 0
0501 #endif
0502
0503 #ifdef CONFIG_LOCKDEP
0504 extern bool read_lock_is_recursive(void);
0505 #else
0506
0507 #define read_lock_is_recursive() 0
0508 #endif
0509
0510
0511
0512
0513
0514
0515 #define SINGLE_DEPTH_NESTING 1
0516
0517
0518
0519
0520
0521
0522 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
0523 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
0524 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
0525
0526 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
0527 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
0528 #define spin_release(l, i) lock_release(l, i)
0529
0530 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
0531 #define rwlock_acquire_read(l, s, t, i) \
0532 do { \
0533 if (read_lock_is_recursive()) \
0534 lock_acquire_shared_recursive(l, s, t, NULL, i); \
0535 else \
0536 lock_acquire_shared(l, s, t, NULL, i); \
0537 } while (0)
0538
0539 #define rwlock_release(l, i) lock_release(l, i)
0540
0541 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
0542 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
0543 #define seqcount_release(l, i) lock_release(l, i)
0544
0545 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
0546 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
0547 #define mutex_release(l, i) lock_release(l, i)
0548
0549 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
0550 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
0551 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
0552 #define rwsem_release(l, i) lock_release(l, i)
0553
0554 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
0555 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
0556 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
0557 #define lock_map_release(l) lock_release(l, _THIS_IP_)
0558
0559 #ifdef CONFIG_PROVE_LOCKING
0560 # define might_lock(lock) \
0561 do { \
0562 typecheck(struct lockdep_map *, &(lock)->dep_map); \
0563 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
0564 lock_release(&(lock)->dep_map, _THIS_IP_); \
0565 } while (0)
0566 # define might_lock_read(lock) \
0567 do { \
0568 typecheck(struct lockdep_map *, &(lock)->dep_map); \
0569 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
0570 lock_release(&(lock)->dep_map, _THIS_IP_); \
0571 } while (0)
0572 # define might_lock_nested(lock, subclass) \
0573 do { \
0574 typecheck(struct lockdep_map *, &(lock)->dep_map); \
0575 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
0576 _THIS_IP_); \
0577 lock_release(&(lock)->dep_map, _THIS_IP_); \
0578 } while (0)
0579
0580 DECLARE_PER_CPU(int, hardirqs_enabled);
0581 DECLARE_PER_CPU(int, hardirq_context);
0582 DECLARE_PER_CPU(unsigned int, lockdep_recursion);
0583
0584 #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
0585
0586 #define lockdep_assert_irqs_enabled() \
0587 do { \
0588 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
0589 } while (0)
0590
0591 #define lockdep_assert_irqs_disabled() \
0592 do { \
0593 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
0594 } while (0)
0595
0596 #define lockdep_assert_in_irq() \
0597 do { \
0598 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
0599 } while (0)
0600
0601 #define lockdep_assert_preemption_enabled() \
0602 do { \
0603 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
0604 __lockdep_enabled && \
0605 (preempt_count() != 0 || \
0606 !this_cpu_read(hardirqs_enabled))); \
0607 } while (0)
0608
0609 #define lockdep_assert_preemption_disabled() \
0610 do { \
0611 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
0612 __lockdep_enabled && \
0613 (preempt_count() == 0 && \
0614 this_cpu_read(hardirqs_enabled))); \
0615 } while (0)
0616
0617
0618
0619
0620
0621 #define lockdep_assert_in_softirq() \
0622 do { \
0623 WARN_ON_ONCE(__lockdep_enabled && \
0624 (!in_softirq() || in_irq() || in_nmi())); \
0625 } while (0)
0626
0627 #else
0628 # define might_lock(lock) do { } while (0)
0629 # define might_lock_read(lock) do { } while (0)
0630 # define might_lock_nested(lock, subclass) do { } while (0)
0631
0632 # define lockdep_assert_irqs_enabled() do { } while (0)
0633 # define lockdep_assert_irqs_disabled() do { } while (0)
0634 # define lockdep_assert_in_irq() do { } while (0)
0635
0636 # define lockdep_assert_preemption_enabled() do { } while (0)
0637 # define lockdep_assert_preemption_disabled() do { } while (0)
0638 # define lockdep_assert_in_softirq() do { } while (0)
0639 #endif
0640
0641 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
0642
0643 # define lockdep_assert_RT_in_threaded_ctx() do { \
0644 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
0645 lockdep_hardirq_context() && \
0646 !(current->hardirq_threaded || current->irq_config), \
0647 "Not in threaded context on PREEMPT_RT as expected\n"); \
0648 } while (0)
0649
0650 #else
0651
0652 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
0653
0654 #endif
0655
0656 #ifdef CONFIG_LOCKDEP
0657 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
0658 #else
0659 static inline void
0660 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
0661 {
0662 }
0663 #endif
0664
0665 #endif