Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Runtime locking correctness validator
0004  *
0005  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
0006  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
0007  *
0008  * see Documentation/locking/lockdep-design.rst for more details.
0009  */
0010 #ifndef __LINUX_LOCKDEP_TYPES_H
0011 #define __LINUX_LOCKDEP_TYPES_H
0012 
0013 #include <linux/types.h>
0014 
0015 #define MAX_LOCKDEP_SUBCLASSES      8UL
0016 
0017 enum lockdep_wait_type {
0018     LD_WAIT_INV = 0,    /* not checked, catch all */
0019 
0020     LD_WAIT_FREE,       /* wait free, rcu etc.. */
0021     LD_WAIT_SPIN,       /* spin loops, raw_spinlock_t etc.. */
0022 
0023 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
0024     LD_WAIT_CONFIG,     /* preemptible in PREEMPT_RT, spinlock_t etc.. */
0025 #else
0026     LD_WAIT_CONFIG = LD_WAIT_SPIN,
0027 #endif
0028     LD_WAIT_SLEEP,      /* sleeping locks, mutex_t etc.. */
0029 
0030     LD_WAIT_MAX,        /* must be last */
0031 };
0032 
0033 enum lockdep_lock_type {
0034     LD_LOCK_NORMAL = 0, /* normal, catch all */
0035     LD_LOCK_PERCPU,     /* percpu */
0036     LD_LOCK_MAX,
0037 };
0038 
0039 #ifdef CONFIG_LOCKDEP
0040 
0041 /*
0042  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
0043  * the total number of states... :-(
0044  *
0045  * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
0046  * of those we generates 4 states, Additionally we report on USED and USED_READ.
0047  */
0048 #define XXX_LOCK_USAGE_STATES       2
0049 #define LOCK_TRACE_STATES       (XXX_LOCK_USAGE_STATES*4 + 2)
0050 
0051 /*
0052  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
0053  * cached in the instance of lockdep_map
0054  *
0055  * Currently main class (subclass == 0) and single depth subclass
0056  * are cached in lockdep_map. This optimization is mainly targeting
0057  * on rq->lock. double_rq_lock() acquires this highly competitive with
0058  * single depth.
0059  */
0060 #define NR_LOCKDEP_CACHING_CLASSES  2
0061 
0062 /*
0063  * A lockdep key is associated with each lock object. For static locks we use
0064  * the lock address itself as the key. Dynamically allocated lock objects can
0065  * have a statically or dynamically allocated key. Dynamically allocated lock
0066  * keys must be registered before being used and must be unregistered before
0067  * the key memory is freed.
0068  */
0069 struct lockdep_subclass_key {
0070     char __one_byte;
0071 } __attribute__ ((__packed__));
0072 
0073 /* hash_entry is used to keep track of dynamically allocated keys. */
0074 struct lock_class_key {
0075     union {
0076         struct hlist_node       hash_entry;
0077         struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
0078     };
0079 };
0080 
0081 extern struct lock_class_key __lockdep_no_validate__;
0082 
0083 struct lock_trace;
0084 
0085 #define LOCKSTAT_POINTS     4
0086 
0087 /*
0088  * The lock-class itself. The order of the structure members matters.
0089  * reinit_class() zeroes the key member and all subsequent members.
0090  */
0091 struct lock_class {
0092     /*
0093      * class-hash:
0094      */
0095     struct hlist_node       hash_entry;
0096 
0097     /*
0098      * Entry in all_lock_classes when in use. Entry in free_lock_classes
0099      * when not in use. Instances that are being freed are on one of the
0100      * zapped_classes lists.
0101      */
0102     struct list_head        lock_entry;
0103 
0104     /*
0105      * These fields represent a directed graph of lock dependencies,
0106      * to every node we attach a list of "forward" and a list of
0107      * "backward" graph nodes.
0108      */
0109     struct list_head        locks_after, locks_before;
0110 
0111     const struct lockdep_subclass_key *key;
0112     unsigned int            subclass;
0113     unsigned int            dep_gen_id;
0114 
0115     /*
0116      * IRQ/softirq usage tracking bits:
0117      */
0118     unsigned long           usage_mask;
0119     const struct lock_trace     *usage_traces[LOCK_TRACE_STATES];
0120 
0121     /*
0122      * Generation counter, when doing certain classes of graph walking,
0123      * to ensure that we check one node only once:
0124      */
0125     int             name_version;
0126     const char          *name;
0127 
0128     u8              wait_type_inner;
0129     u8              wait_type_outer;
0130     u8              lock_type;
0131     /* u8               hole; */
0132 
0133 #ifdef CONFIG_LOCK_STAT
0134     unsigned long           contention_point[LOCKSTAT_POINTS];
0135     unsigned long           contending_point[LOCKSTAT_POINTS];
0136 #endif
0137 } __no_randomize_layout;
0138 
0139 #ifdef CONFIG_LOCK_STAT
0140 struct lock_time {
0141     s64             min;
0142     s64             max;
0143     s64             total;
0144     unsigned long           nr;
0145 };
0146 
0147 enum bounce_type {
0148     bounce_acquired_write,
0149     bounce_acquired_read,
0150     bounce_contended_write,
0151     bounce_contended_read,
0152     nr_bounce_types,
0153 
0154     bounce_acquired = bounce_acquired_write,
0155     bounce_contended = bounce_contended_write,
0156 };
0157 
0158 struct lock_class_stats {
0159     unsigned long           contention_point[LOCKSTAT_POINTS];
0160     unsigned long           contending_point[LOCKSTAT_POINTS];
0161     struct lock_time        read_waittime;
0162     struct lock_time        write_waittime;
0163     struct lock_time        read_holdtime;
0164     struct lock_time        write_holdtime;
0165     unsigned long           bounces[nr_bounce_types];
0166 };
0167 
0168 struct lock_class_stats lock_stats(struct lock_class *class);
0169 void clear_lock_stats(struct lock_class *class);
0170 #endif
0171 
0172 /*
0173  * Map the lock object (the lock instance) to the lock-class object.
0174  * This is embedded into specific lock instances:
0175  */
0176 struct lockdep_map {
0177     struct lock_class_key       *key;
0178     struct lock_class       *class_cache[NR_LOCKDEP_CACHING_CLASSES];
0179     const char          *name;
0180     u8              wait_type_outer; /* can be taken in this context */
0181     u8              wait_type_inner; /* presents this context */
0182     u8              lock_type;
0183     /* u8               hole; */
0184 #ifdef CONFIG_LOCK_STAT
0185     int             cpu;
0186     unsigned long           ip;
0187 #endif
0188 };
0189 
0190 struct pin_cookie { unsigned int val; };
0191 
0192 #else /* !CONFIG_LOCKDEP */
0193 
0194 /*
0195  * The class key takes no space if lockdep is disabled:
0196  */
0197 struct lock_class_key { };
0198 
0199 /*
0200  * The lockdep_map takes no space if lockdep is disabled:
0201  */
0202 struct lockdep_map { };
0203 
0204 struct pin_cookie { };
0205 
0206 #endif /* !LOCKDEP */
0207 
0208 #endif /* __LINUX_LOCKDEP_TYPES_H */