1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.rst for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_TYPES_H
11 #define __LINUX_LOCKDEP_TYPES_H
12 
13 #include <linux/types.h>
14 
15 #define MAX_LOCKDEP_SUBCLASSES		8UL
16 
17 enum lockdep_wait_type {
18 	LD_WAIT_INV = 0,	/* not checked, catch all */
19 
20 	LD_WAIT_FREE,		/* wait free, rcu etc.. */
21 	LD_WAIT_SPIN,		/* spin loops, raw_spinlock_t etc.. */
22 
23 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
24 	LD_WAIT_CONFIG,		/* preemptible in PREEMPT_RT, spinlock_t etc.. */
25 #else
26 	LD_WAIT_CONFIG = LD_WAIT_SPIN,
27 #endif
28 	LD_WAIT_SLEEP,		/* sleeping locks, mutex_t etc.. */
29 
30 	LD_WAIT_MAX,		/* must be last */
31 };
32 
33 enum lockdep_lock_type {
34 	LD_LOCK_NORMAL = 0,	/* normal, catch all */
35 	LD_LOCK_PERCPU,		/* percpu */
36 	LD_LOCK_MAX,
37 };
38 
39 #ifdef CONFIG_LOCKDEP
40 
41 /*
42  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
43  * the total number of states... :-(
44  *
45  * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
46  * of those we generates 4 states, Additionally we report on USED and USED_READ.
47  */
48 #define XXX_LOCK_USAGE_STATES		2
49 #define LOCK_TRACE_STATES		(XXX_LOCK_USAGE_STATES*4 + 2)
50 
51 /*
52  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
53  * cached in the instance of lockdep_map
54  *
55  * Currently main class (subclass == 0) and single depth subclass
56  * are cached in lockdep_map. This optimization is mainly targeting
57  * on rq->lock. double_rq_lock() acquires this highly competitive with
58  * single depth.
59  */
60 #define NR_LOCKDEP_CACHING_CLASSES	2
61 
62 /*
63  * A lockdep key is associated with each lock object. For static locks we use
64  * the lock address itself as the key. Dynamically allocated lock objects can
65  * have a statically or dynamically allocated key. Dynamically allocated lock
66  * keys must be registered before being used and must be unregistered before
67  * the key memory is freed.
68  */
69 struct lockdep_subclass_key {
70 	char __one_byte;
71 } __attribute__ ((__packed__));
72 
73 /* hash_entry is used to keep track of dynamically allocated keys. */
74 struct lock_class_key {
75 	union {
76 		struct hlist_node		hash_entry;
77 		struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
78 	};
79 };
80 
81 extern struct lock_class_key __lockdep_no_validate__;
82 
83 struct lock_trace;
84 
85 #define LOCKSTAT_POINTS		4
86 
87 /*
88  * The lock-class itself. The order of the structure members matters.
89  * reinit_class() zeroes the key member and all subsequent members.
90  */
91 struct lock_class {
92 	/*
93 	 * class-hash:
94 	 */
95 	struct hlist_node		hash_entry;
96 
97 	/*
98 	 * Entry in all_lock_classes when in use. Entry in free_lock_classes
99 	 * when not in use. Instances that are being freed are on one of the
100 	 * zapped_classes lists.
101 	 */
102 	struct list_head		lock_entry;
103 
104 	/*
105 	 * These fields represent a directed graph of lock dependencies,
106 	 * to every node we attach a list of "forward" and a list of
107 	 * "backward" graph nodes.
108 	 */
109 	struct list_head		locks_after, locks_before;
110 
111 	const struct lockdep_subclass_key *key;
112 	unsigned int			subclass;
113 	unsigned int			dep_gen_id;
114 
115 	/*
116 	 * IRQ/softirq usage tracking bits:
117 	 */
118 	unsigned long			usage_mask;
119 	const struct lock_trace		*usage_traces[LOCK_TRACE_STATES];
120 
121 	/*
122 	 * Generation counter, when doing certain classes of graph walking,
123 	 * to ensure that we check one node only once:
124 	 */
125 	int				name_version;
126 	const char			*name;
127 
128 	u8				wait_type_inner;
129 	u8				wait_type_outer;
130 	u8				lock_type;
131 	/* u8				hole; */
132 
133 #ifdef CONFIG_LOCK_STAT
134 	unsigned long			contention_point[LOCKSTAT_POINTS];
135 	unsigned long			contending_point[LOCKSTAT_POINTS];
136 #endif
137 } __no_randomize_layout;
138 
139 #ifdef CONFIG_LOCK_STAT
140 struct lock_time {
141 	s64				min;
142 	s64				max;
143 	s64				total;
144 	unsigned long			nr;
145 };
146 
147 enum bounce_type {
148 	bounce_acquired_write,
149 	bounce_acquired_read,
150 	bounce_contended_write,
151 	bounce_contended_read,
152 	nr_bounce_types,
153 
154 	bounce_acquired = bounce_acquired_write,
155 	bounce_contended = bounce_contended_write,
156 };
157 
158 struct lock_class_stats {
159 	unsigned long			contention_point[LOCKSTAT_POINTS];
160 	unsigned long			contending_point[LOCKSTAT_POINTS];
161 	struct lock_time		read_waittime;
162 	struct lock_time		write_waittime;
163 	struct lock_time		read_holdtime;
164 	struct lock_time		write_holdtime;
165 	unsigned long			bounces[nr_bounce_types];
166 };
167 
168 struct lock_class_stats lock_stats(struct lock_class *class);
169 void clear_lock_stats(struct lock_class *class);
170 #endif
171 
172 /*
173  * Map the lock object (the lock instance) to the lock-class object.
174  * This is embedded into specific lock instances:
175  */
176 struct lockdep_map {
177 	struct lock_class_key		*key;
178 	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
179 	const char			*name;
180 	u8				wait_type_outer; /* can be taken in this context */
181 	u8				wait_type_inner; /* presents this context */
182 	u8				lock_type;
183 	/* u8				hole; */
184 #ifdef CONFIG_LOCK_STAT
185 	int				cpu;
186 	unsigned long			ip;
187 #endif
188 };
189 
190 struct pin_cookie { unsigned int val; };
191 
192 #else /* !CONFIG_LOCKDEP */
193 
194 /*
195  * The class key takes no space if lockdep is disabled:
196  */
197 struct lock_class_key { };
198 
199 /*
200  * The lockdep_map takes no space if lockdep is disabled:
201  */
202 struct lockdep_map { };
203 
204 struct pin_cookie { };
205 
206 #endif /* !LOCKDEP */
207 
208 #endif /* __LINUX_LOCKDEP_TYPES_H */
209