]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
fbb9ce95 IM |
2 | /* |
3 | * Runtime locking correctness validator | |
4 | * | |
4b32d0a4 | 5 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
90eec103 | 6 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
fbb9ce95 | 7 | * |
214e0aed | 8 | * see Documentation/locking/lockdep-design.txt for more details. |
fbb9ce95 IM |
9 | */ |
10 | #ifndef __LINUX_LOCKDEP_H | |
11 | #define __LINUX_LOCKDEP_H | |
12 | ||
a1e96b03 | 13 | struct task_struct; |
f20786ff | 14 | struct lockdep_map; |
a1e96b03 | 15 | |
2edf5e49 DY |
16 | /* for sysctl */ |
17 | extern int prove_locking; | |
18 | extern int lock_stat; | |
19 | ||
a5ecddfe BG |
20 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
21 | ||
e6f3faa7 PZ |
22 | #include <linux/types.h> |
23 | ||
db0b0ead MT |
24 | #ifdef CONFIG_LOCKDEP |
25 | ||
fbb9ce95 IM |
26 | #include <linux/linkage.h> |
27 | #include <linux/list.h> | |
28 | #include <linux/debug_locks.h> | |
29 | #include <linux/stacktrace.h> | |
30 | ||
fbb9ce95 | 31 | /* |
9851673b PZ |
32 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
33 | * the total number of states... :-( | |
fbb9ce95 | 34 | */ |
d92a8cfc | 35 | #define XXX_LOCK_USAGE_STATES (1+2*4) |
fbb9ce95 | 36 | |
62016250 HM |
37 | /* |
38 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes | |
39 | * cached in the instance of lockdep_map | |
40 | * | |
41 | * Currently main class (subclass == 0) and signle depth subclass | |
42 | * are cached in lockdep_map. This optimization is mainly targeting | |
43 | * on rq->lock. double_rq_lock() acquires this highly competitive with | |
44 | * single depth. | |
45 | */ | |
46 | #define NR_LOCKDEP_CACHING_CLASSES 2 | |
47 | ||
fbb9ce95 IM |
48 | /* |
49 | * Lock-classes are keyed via unique addresses, by embedding the | |
50 | * lockclass-key into the kernel (or module) .data section. (For | |
51 | * static locks we use the lock address itself as the key.) | |
52 | */ | |
53 | struct lockdep_subclass_key { | |
54 | char __one_byte; | |
55 | } __attribute__ ((__packed__)); | |
56 | ||
57 | struct lock_class_key { | |
58 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; | |
59 | }; | |
60 | ||
1704f47b PZ |
61 | extern struct lock_class_key __lockdep_no_validate__; |
62 | ||
c7e78cff PZ |
63 | #define LOCKSTAT_POINTS 4 |
64 | ||
fbb9ce95 IM |
65 | /* |
66 | * The lock-class itself: | |
67 | */ | |
68 | struct lock_class { | |
69 | /* | |
70 | * class-hash: | |
71 | */ | |
a63f38cc | 72 | struct hlist_node hash_entry; |
fbb9ce95 IM |
73 | |
74 | /* | |
75 | * global list of all lock-classes: | |
76 | */ | |
77 | struct list_head lock_entry; | |
78 | ||
79 | struct lockdep_subclass_key *key; | |
80 | unsigned int subclass; | |
e351b660 | 81 | unsigned int dep_gen_id; |
fbb9ce95 IM |
82 | |
83 | /* | |
84 | * IRQ/softirq usage tracking bits: | |
85 | */ | |
86 | unsigned long usage_mask; | |
9851673b | 87 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
fbb9ce95 IM |
88 | |
89 | /* | |
90 | * These fields represent a directed graph of lock dependencies, | |
91 | * to every node we attach a list of "forward" and a list of | |
92 | * "backward" graph nodes. | |
93 | */ | |
94 | struct list_head locks_after, locks_before; | |
95 | ||
96 | /* | |
97 | * Generation counter, when doing certain classes of graph walking, | |
98 | * to ensure that we check one node only once: | |
99 | */ | |
100 | unsigned int version; | |
101 | ||
102 | /* | |
103 | * Statistics counter: | |
104 | */ | |
105 | unsigned long ops; | |
106 | ||
107 | const char *name; | |
108 | int name_version; | |
f20786ff PZ |
109 | |
110 | #ifdef CONFIG_LOCK_STAT | |
c7e78cff PZ |
111 | unsigned long contention_point[LOCKSTAT_POINTS]; |
112 | unsigned long contending_point[LOCKSTAT_POINTS]; | |
f20786ff PZ |
113 | #endif |
114 | }; | |
115 | ||
116 | #ifdef CONFIG_LOCK_STAT | |
117 | struct lock_time { | |
118 | s64 min; | |
119 | s64 max; | |
120 | s64 total; | |
121 | unsigned long nr; | |
fbb9ce95 IM |
122 | }; |
123 | ||
96645678 PZ |
124 | enum bounce_type { |
125 | bounce_acquired_write, | |
126 | bounce_acquired_read, | |
127 | bounce_contended_write, | |
128 | bounce_contended_read, | |
129 | nr_bounce_types, | |
130 | ||
131 | bounce_acquired = bounce_acquired_write, | |
132 | bounce_contended = bounce_contended_write, | |
133 | }; | |
134 | ||
f20786ff | 135 | struct lock_class_stats { |
68722101 GB |
136 | unsigned long contention_point[LOCKSTAT_POINTS]; |
137 | unsigned long contending_point[LOCKSTAT_POINTS]; | |
f20786ff PZ |
138 | struct lock_time read_waittime; |
139 | struct lock_time write_waittime; | |
140 | struct lock_time read_holdtime; | |
141 | struct lock_time write_holdtime; | |
96645678 | 142 | unsigned long bounces[nr_bounce_types]; |
f20786ff PZ |
143 | }; |
144 | ||
145 | struct lock_class_stats lock_stats(struct lock_class *class); | |
146 | void clear_lock_stats(struct lock_class *class); | |
147 | #endif | |
148 | ||
fbb9ce95 IM |
149 | /* |
150 | * Map the lock object (the lock instance) to the lock-class object. | |
151 | * This is embedded into specific lock instances: | |
152 | */ | |
153 | struct lockdep_map { | |
154 | struct lock_class_key *key; | |
62016250 | 155 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
fbb9ce95 | 156 | const char *name; |
96645678 PZ |
157 | #ifdef CONFIG_LOCK_STAT |
158 | int cpu; | |
c7e78cff | 159 | unsigned long ip; |
96645678 | 160 | #endif |
b09be676 BP |
161 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE |
162 | /* | |
163 | * Whether it's a crosslock. | |
164 | */ | |
165 | int cross; | |
166 | #endif | |
fbb9ce95 IM |
167 | }; |
168 | ||
4d82a1de PZ |
169 | static inline void lockdep_copy_map(struct lockdep_map *to, |
170 | struct lockdep_map *from) | |
171 | { | |
172 | int i; | |
173 | ||
174 | *to = *from; | |
175 | /* | |
176 | * Since the class cache can be modified concurrently we could observe | |
177 | * half pointers (64bit arch using 32bit copy insns). Therefore clear | |
178 | * the caches and take the performance hit. | |
179 | * | |
180 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since | |
181 | * that relies on cache abuse. | |
182 | */ | |
183 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | |
184 | to->class_cache[i] = NULL; | |
185 | } | |
186 | ||
fbb9ce95 IM |
187 | /* |
188 | * Every lock has a list of other locks that were taken after it. | |
189 | * We only grow the list, never remove from it: | |
190 | */ | |
191 | struct lock_list { | |
192 | struct list_head entry; | |
193 | struct lock_class *class; | |
194 | struct stack_trace trace; | |
068135e6 | 195 | int distance; |
c94aa5ca | 196 | |
af012961 PZ |
197 | /* |
198 | * The parent field is used to implement breadth-first search, and the | |
199 | * bit 0 is reused to indicate if the lock has been accessed in BFS. | |
c94aa5ca ML |
200 | */ |
201 | struct lock_list *parent; | |
fbb9ce95 IM |
202 | }; |
203 | ||
204 | /* | |
205 | * We record lock dependency chains, so that we can cache them: | |
206 | */ | |
207 | struct lock_chain { | |
75dd602a PZ |
208 | /* see BUILD_BUG_ON()s in lookup_chain_cache() */ |
209 | unsigned int irq_context : 2, | |
210 | depth : 6, | |
211 | base : 24; | |
212 | /* 4 byte hole */ | |
a63f38cc | 213 | struct hlist_node entry; |
fbb9ce95 IM |
214 | u64 chain_key; |
215 | }; | |
216 | ||
e5f363e3 | 217 | #define MAX_LOCKDEP_KEYS_BITS 13 |
b42e737e PZ |
218 | /* |
219 | * Subtract one because we offset hlock->class_idx by 1 in order | |
220 | * to make 0 mean no class. This avoids overflowing the class_idx | |
221 | * bitfield and hitting the BUG in hlock_class(). | |
222 | */ | |
223 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) | |
f82b217e | 224 | |
fbb9ce95 IM |
225 | struct held_lock { |
226 | /* | |
227 | * One-way hash of the dependency chain up to this point. We | |
228 | * hash the hashes step by step as the dependency chain grows. | |
229 | * | |
230 | * We use it for dependency-caching and we skip detection | |
231 | * passes and dependency-updates if there is a cache-hit, so | |
232 | * it is absolutely critical for 100% coverage of the validator | |
233 | * to have a unique key value for every unique dependency path | |
234 | * that can occur in the system, to make a unique hash value | |
235 | * as likely as possible - hence the 64-bit width. | |
236 | * | |
237 | * The task struct holds the current hash value (initialized | |
238 | * with zero), here we store the previous hash value: | |
239 | */ | |
240 | u64 prev_chain_key; | |
fbb9ce95 IM |
241 | unsigned long acquire_ip; |
242 | struct lockdep_map *instance; | |
7531e2f3 | 243 | struct lockdep_map *nest_lock; |
f20786ff PZ |
244 | #ifdef CONFIG_LOCK_STAT |
245 | u64 waittime_stamp; | |
246 | u64 holdtime_stamp; | |
247 | #endif | |
f82b217e | 248 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
fbb9ce95 IM |
249 | /* |
250 | * The lock-stack is unified in that the lock chains of interrupt | |
251 | * contexts nest ontop of process context chains, but we 'separate' | |
252 | * the hashes by starting with 0 if we cross into an interrupt | |
253 | * context, and we also keep do not add cross-context lock | |
254 | * dependencies - the lock usage graph walking covers that area | |
255 | * anyway, and we'd just unnecessarily increase the number of | |
256 | * dependencies otherwise. [Note: hardirq and softirq contexts | |
257 | * are separated from each other too.] | |
258 | * | |
259 | * The following field is used to detect when we cross into an | |
260 | * interrupt context: | |
261 | */ | |
f82b217e | 262 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
bb97a91e PZ |
263 | unsigned int trylock:1; /* 16 bits */ |
264 | ||
f82b217e | 265 | unsigned int read:2; /* see lock_acquire() comment */ |
fb9edbe9 | 266 | unsigned int check:1; /* see lock_acquire() comment */ |
f82b217e | 267 | unsigned int hardirqs_off:1; |
fb9edbe9 | 268 | unsigned int references:12; /* 32 bits */ |
a24fc60d | 269 | unsigned int pin_count; |
b09be676 BP |
270 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE |
271 | /* | |
272 | * Generation id. | |
273 | * | |
274 | * A value of cross_gen_id will be stored when holding this, | |
275 | * which is globally increased whenever each crosslock is held. | |
276 | */ | |
277 | unsigned int gen_id; | |
278 | #endif | |
279 | }; | |
280 | ||
281 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | |
282 | #define MAX_XHLOCK_TRACE_ENTRIES 5 | |
283 | ||
284 | /* | |
285 | * This is for keeping locks waiting for commit so that true dependencies | |
286 | * can be added at commit step. | |
287 | */ | |
288 | struct hist_lock { | |
23f873d8 BP |
289 | /* |
290 | * Id for each entry in the ring buffer. This is used to | |
291 | * decide whether the ring buffer was overwritten or not. | |
292 | * | |
293 | * For example, | |
294 | * | |
295 | * |<----------- hist_lock ring buffer size ------->| | |
296 | * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii | |
297 | * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii....................... | |
298 | * | |
299 | * where 'p' represents an acquisition in process | |
300 | * context, 'i' represents an acquisition in irq | |
301 | * context. | |
302 | * | |
303 | * In this example, the ring buffer was overwritten by | |
304 | * acquisitions in irq context, that should be detected on | |
305 | * rollback or commit. | |
306 | */ | |
307 | unsigned int hist_id; | |
308 | ||
b09be676 BP |
309 | /* |
310 | * Seperate stack_trace data. This will be used at commit step. | |
311 | */ | |
312 | struct stack_trace trace; | |
313 | unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES]; | |
314 | ||
315 | /* | |
316 | * Seperate hlock instance. This will be used at commit step. | |
317 | * | |
318 | * TODO: Use a smaller data structure containing only necessary | |
319 | * data. However, we should make lockdep code able to handle the | |
320 | * smaller one first. | |
321 | */ | |
322 | struct held_lock hlock; | |
323 | }; | |
324 | ||
325 | /* | |
326 | * To initialize a lock as crosslock, lockdep_init_map_crosslock() should | |
327 | * be called instead of lockdep_init_map(). | |
328 | */ | |
329 | struct cross_lock { | |
28a903f6 BP |
330 | /* |
331 | * When more than one acquisition of crosslocks are overlapped, | |
332 | * we have to perform commit for them based on cross_gen_id of | |
333 | * the first acquisition, which allows us to add more true | |
334 | * dependencies. | |
335 | * | |
336 | * Moreover, when no acquisition of a crosslock is in progress, | |
337 | * we should not perform commit because the lock might not exist | |
338 | * any more, which might cause incorrect memory access. So we | |
339 | * have to track the number of acquisitions of a crosslock. | |
340 | */ | |
341 | int nr_acquire; | |
342 | ||
b09be676 BP |
343 | /* |
344 | * Seperate hlock instance. This will be used at commit step. | |
345 | * | |
346 | * TODO: Use a smaller data structure containing only necessary | |
347 | * data. However, we should make lockdep code able to handle the | |
348 | * smaller one first. | |
349 | */ | |
350 | struct held_lock hlock; | |
351 | }; | |
352 | ||
353 | struct lockdep_map_cross { | |
354 | struct lockdep_map map; | |
355 | struct cross_lock xlock; | |
fbb9ce95 | 356 | }; |
b09be676 | 357 | #endif |
fbb9ce95 IM |
358 | |
359 | /* | |
360 | * Initialization, self-test and debugging-output methods: | |
361 | */ | |
fbb9ce95 IM |
362 | extern void lockdep_info(void); |
363 | extern void lockdep_reset(void); | |
364 | extern void lockdep_reset_lock(struct lockdep_map *lock); | |
365 | extern void lockdep_free_key_range(void *start, unsigned long size); | |
63f9a7fd | 366 | extern asmlinkage void lockdep_sys_exit(void); |
fbb9ce95 IM |
367 | |
368 | extern void lockdep_off(void); | |
369 | extern void lockdep_on(void); | |
fbb9ce95 IM |
370 | |
371 | /* | |
372 | * These methods are used by specific locking variants (spinlocks, | |
373 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events | |
374 | * to lockdep: | |
375 | */ | |
376 | ||
377 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |
4dfbb9d8 | 378 | struct lock_class_key *key, int subclass); |
fbb9ce95 IM |
379 | |
380 | /* | |
381 | * Reinitialize a lock key - for cases where there is special locking or | |
382 | * special initialization of locks so that the validator gets the scope | |
383 | * of dependencies wrong: they are either too broad (they need a class-split) | |
384 | * or they are too narrow (they suffer from a false class-split): | |
385 | */ | |
386 | #define lockdep_set_class(lock, key) \ | |
4dfbb9d8 | 387 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
fbb9ce95 | 388 | #define lockdep_set_class_and_name(lock, key, name) \ |
4dfbb9d8 PZ |
389 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
390 | #define lockdep_set_class_and_subclass(lock, key, sub) \ | |
391 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) | |
392 | #define lockdep_set_subclass(lock, sub) \ | |
393 | lockdep_init_map(&(lock)->dep_map, #lock, \ | |
394 | (lock)->dep_map.key, sub) | |
1704f47b PZ |
395 | |
396 | #define lockdep_set_novalidate_class(lock) \ | |
47be1c1a | 397 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
9a7aa12f JK |
398 | /* |
399 | * Compare locking classes | |
400 | */ | |
401 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) | |
402 | ||
403 | static inline int lockdep_match_key(struct lockdep_map *lock, | |
404 | struct lock_class_key *key) | |
405 | { | |
406 | return lock->key == key; | |
407 | } | |
fbb9ce95 IM |
408 | |
409 | /* | |
410 | * Acquire a lock. | |
411 | * | |
412 | * Values for "read": | |
413 | * | |
414 | * 0: exclusive (write) acquire | |
415 | * 1: read-acquire (no recursion allowed) | |
416 | * 2: read-acquire with same-instance recursion allowed | |
417 | * | |
418 | * Values for check: | |
419 | * | |
fb9edbe9 ON |
420 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
421 | * 1: full validation | |
fbb9ce95 IM |
422 | */ |
423 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |
7531e2f3 PZ |
424 | int trylock, int read, int check, |
425 | struct lockdep_map *nest_lock, unsigned long ip); | |
fbb9ce95 IM |
426 | |
427 | extern void lock_release(struct lockdep_map *lock, int nested, | |
428 | unsigned long ip); | |
429 | ||
f8319483 PZ |
430 | /* |
431 | * Same "read" as for lock_acquire(), except -1 means any. | |
432 | */ | |
433 | extern int lock_is_held_type(struct lockdep_map *lock, int read); | |
434 | ||
435 | static inline int lock_is_held(struct lockdep_map *lock) | |
436 | { | |
437 | return lock_is_held_type(lock, -1); | |
438 | } | |
f607c668 | 439 | |
f8319483 PZ |
440 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
441 | #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) | |
f607c668 | 442 | |
00ef9f73 PZ |
443 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
444 | struct lock_class_key *key, unsigned int subclass, | |
445 | unsigned long ip); | |
446 | ||
447 | static inline void lock_set_subclass(struct lockdep_map *lock, | |
448 | unsigned int subclass, unsigned long ip) | |
449 | { | |
450 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | |
451 | } | |
64aa348e | 452 | |
6419c4af O |
453 | extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); |
454 | ||
e7904a28 PZ |
455 | struct pin_cookie { unsigned int val; }; |
456 | ||
457 | #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } | |
458 | ||
459 | extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); | |
460 | extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); | |
461 | extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); | |
a24fc60d | 462 | |
d92a8cfc | 463 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
fbb9ce95 | 464 | |
e3a55fd1 | 465 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
d5abe669 | 466 | |
b1ae345d JB |
467 | #define lockdep_assert_held(l) do { \ |
468 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ | |
469 | } while (0) | |
f607c668 | 470 | |
f8319483 PZ |
471 | #define lockdep_assert_held_exclusive(l) do { \ |
472 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ | |
473 | } while (0) | |
474 | ||
475 | #define lockdep_assert_held_read(l) do { \ | |
476 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ | |
477 | } while (0) | |
478 | ||
9a37110d PH |
479 | #define lockdep_assert_held_once(l) do { \ |
480 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ | |
481 | } while (0) | |
482 | ||
94d24fc4 PZ |
483 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
484 | ||
e7904a28 PZ |
485 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) |
486 | #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) | |
487 | #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) | |
a24fc60d | 488 | |
a51805ef | 489 | #else /* !CONFIG_LOCKDEP */ |
fbb9ce95 IM |
490 | |
491 | static inline void lockdep_off(void) | |
492 | { | |
493 | } | |
494 | ||
495 | static inline void lockdep_on(void) | |
496 | { | |
497 | } | |
498 | ||
7531e2f3 | 499 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
fbb9ce95 | 500 | # define lock_release(l, n, i) do { } while (0) |
6419c4af | 501 | # define lock_downgrade(l, i) do { } while (0) |
00ef9f73 | 502 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
64aa348e | 503 | # define lock_set_subclass(l, s, i) do { } while (0) |
fbb9ce95 | 504 | # define lockdep_info() do { } while (0) |
e25cf3db IM |
505 | # define lockdep_init_map(lock, name, key, sub) \ |
506 | do { (void)(name); (void)(key); } while (0) | |
fbb9ce95 IM |
507 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
508 | # define lockdep_set_class_and_name(lock, key, name) \ | |
e25cf3db | 509 | do { (void)(key); (void)(name); } while (0) |
4dfbb9d8 PZ |
510 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
511 | do { (void)(key); } while (0) | |
07646e21 | 512 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
1704f47b PZ |
513 | |
514 | #define lockdep_set_novalidate_class(lock) do { } while (0) | |
515 | ||
9a7aa12f JK |
516 | /* |
517 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP | |
518 | * case since the result is not well defined and the caller should rather | |
519 | * #ifdef the call himself. | |
520 | */ | |
07646e21 | 521 | |
fbb9ce95 IM |
522 | # define INIT_LOCKDEP |
523 | # define lockdep_reset() do { debug_locks = 1; } while (0) | |
524 | # define lockdep_free_key_range(start, size) do { } while (0) | |
b351d164 | 525 | # define lockdep_sys_exit() do { } while (0) |
fbb9ce95 IM |
526 | /* |
527 | * The class key takes no space if lockdep is disabled: | |
528 | */ | |
529 | struct lock_class_key { }; | |
d5abe669 PZ |
530 | |
531 | #define lockdep_depth(tsk) (0) | |
532 | ||
f8319483 PZ |
533 | #define lockdep_is_held_type(l, r) (1) |
534 | ||
5cd3f5af | 535 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
f8319483 PZ |
536 | #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) |
537 | #define lockdep_assert_held_read(l) do { (void)(l); } while (0) | |
9a37110d | 538 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
f607c668 | 539 | |
94d24fc4 PZ |
540 | #define lockdep_recursing(tsk) (0) |
541 | ||
e7904a28 PZ |
542 | struct pin_cookie { }; |
543 | ||
544 | #define NIL_COOKIE (struct pin_cookie){ } | |
545 | ||
546 | #define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) | |
547 | #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) | |
548 | #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) | |
a24fc60d | 549 | |
fbb9ce95 IM |
550 | #endif /* !LOCKDEP */ |
551 | ||
b09be676 BP |
552 | enum xhlock_context_t { |
553 | XHLOCK_HARD, | |
554 | XHLOCK_SOFT, | |
b09be676 BP |
555 | XHLOCK_CTX_NR, |
556 | }; | |
557 | ||
558 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | |
559 | extern void lockdep_init_map_crosslock(struct lockdep_map *lock, | |
560 | const char *name, | |
561 | struct lock_class_key *key, | |
562 | int subclass); | |
563 | extern void lock_commit_crosslock(struct lockdep_map *lock); | |
564 | ||
28a903f6 BP |
565 | /* |
566 | * What we essencially have to initialize is 'nr_acquire'. Other members | |
567 | * will be initialized in add_xlock(). | |
568 | */ | |
569 | #define STATIC_CROSS_LOCK_INIT() \ | |
570 | { .nr_acquire = 0,} | |
571 | ||
b09be676 BP |
572 | #define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \ |
573 | { .map.name = (_name), .map.key = (void *)(_key), \ | |
28a903f6 | 574 | .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), } |
b09be676 BP |
575 | |
576 | /* | |
577 | * To initialize a lockdep_map statically use this macro. | |
578 | * Note that _name must not be NULL. | |
579 | */ | |
580 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | |
581 | { .name = (_name), .key = (void *)(_key), .cross = 0, } | |
582 | ||
f52be570 | 583 | extern void crossrelease_hist_start(enum xhlock_context_t c); |
b09be676 | 584 | extern void crossrelease_hist_end(enum xhlock_context_t c); |
f52be570 | 585 | extern void lockdep_invariant_state(bool force); |
b09be676 BP |
586 | extern void lockdep_init_task(struct task_struct *task); |
587 | extern void lockdep_free_task(struct task_struct *task); | |
e6f3faa7 | 588 | #else /* !CROSSRELEASE */ |
52fa5bc5 | 589 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) |
b09be676 BP |
590 | /* |
591 | * To initialize a lockdep_map statically use this macro. | |
592 | * Note that _name must not be NULL. | |
593 | */ | |
594 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | |
595 | { .name = (_name), .key = (void *)(_key), } | |
596 | ||
f52be570 | 597 | static inline void crossrelease_hist_start(enum xhlock_context_t c) {} |
b09be676 | 598 | static inline void crossrelease_hist_end(enum xhlock_context_t c) {} |
f52be570 | 599 | static inline void lockdep_invariant_state(bool force) {} |
b09be676 BP |
600 | static inline void lockdep_init_task(struct task_struct *task) {} |
601 | static inline void lockdep_free_task(struct task_struct *task) {} | |
e6f3faa7 | 602 | #endif /* CROSSRELEASE */ |
b09be676 | 603 | |
f20786ff PZ |
604 | #ifdef CONFIG_LOCK_STAT |
605 | ||
606 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | |
c7e78cff | 607 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
f20786ff PZ |
608 | |
609 | #define LOCK_CONTENDED(_lock, try, lock) \ | |
610 | do { \ | |
611 | if (!try(_lock)) { \ | |
612 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | |
613 | lock(_lock); \ | |
f20786ff | 614 | } \ |
c7e78cff | 615 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
f20786ff PZ |
616 | } while (0) |
617 | ||
916633a4 MH |
618 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
619 | ({ \ | |
620 | int ____err = 0; \ | |
621 | if (!try(_lock)) { \ | |
622 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | |
623 | ____err = lock(_lock); \ | |
624 | } \ | |
625 | if (!____err) \ | |
626 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ | |
627 | ____err; \ | |
628 | }) | |
629 | ||
f20786ff PZ |
630 | #else /* CONFIG_LOCK_STAT */ |
631 | ||
632 | #define lock_contended(lockdep_map, ip) do {} while (0) | |
c7e78cff | 633 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
f20786ff PZ |
634 | |
635 | #define LOCK_CONTENDED(_lock, try, lock) \ | |
636 | lock(_lock) | |
637 | ||
916633a4 MH |
638 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
639 | lock(_lock) | |
640 | ||
f20786ff PZ |
641 | #endif /* CONFIG_LOCK_STAT */ |
642 | ||
e8c158bb RH |
643 | #ifdef CONFIG_LOCKDEP |
644 | ||
645 | /* | |
646 | * On lockdep we dont want the hand-coded irq-enable of | |
647 | * _raw_*_lock_flags() code, because lockdep assumes | |
648 | * that interrupts are not re-enabled during lock-acquire: | |
649 | */ | |
650 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | |
651 | LOCK_CONTENDED((_lock), (try), (lock)) | |
652 | ||
653 | #else /* CONFIG_LOCKDEP */ | |
654 | ||
655 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | |
656 | lockfl((_lock), (flags)) | |
657 | ||
658 | #endif /* CONFIG_LOCKDEP */ | |
659 | ||
fbb9ce95 | 660 | #ifdef CONFIG_TRACE_IRQFLAGS |
3117df04 | 661 | extern void print_irqtrace_events(struct task_struct *curr); |
fbb9ce95 | 662 | #else |
3117df04 IM |
663 | static inline void print_irqtrace_events(struct task_struct *curr) |
664 | { | |
665 | } | |
fbb9ce95 IM |
666 | #endif |
667 | ||
668 | /* | |
669 | * For trivial one-depth nesting of a lock-class, the following | |
670 | * global define can be used. (Subsystems with multiple levels | |
671 | * of nesting should define their own lock-nesting subclasses.) | |
672 | */ | |
673 | #define SINGLE_DEPTH_NESTING 1 | |
674 | ||
675 | /* | |
676 | * Map the dependency ops to NOP or to real lockdep ops, depending | |
677 | * on the per lock-class debug mode: | |
678 | */ | |
679 | ||
fb9edbe9 ON |
680 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
681 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) | |
682 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) | |
fbb9ce95 | 683 | |
a51805ef ML |
684 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
685 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) | |
686 | #define spin_release(l, n, i) lock_release(l, n, i) | |
fbb9ce95 | 687 | |
a51805ef ML |
688 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
689 | #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) | |
690 | #define rwlock_release(l, n, i) lock_release(l, n, i) | |
fbb9ce95 | 691 | |
1ca7d67c JS |
692 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
693 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) | |
694 | #define seqcount_release(l, n, i) lock_release(l, n, i) | |
695 | ||
a51805ef ML |
696 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
697 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) | |
698 | #define mutex_release(l, n, i) lock_release(l, n, i) | |
699 | ||
700 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) | |
701 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) | |
702 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) | |
1ca7d67c | 703 | #define rwsem_release(l, n, i) lock_release(l, n, i) |
fbb9ce95 | 704 | |
a51805ef ML |
705 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
706 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) | |
dd56af42 | 707 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
1ca7d67c | 708 | #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
4f3e7524 | 709 | |
76b189e9 PZ |
710 | #ifdef CONFIG_PROVE_LOCKING |
711 | # define might_lock(lock) \ | |
712 | do { \ | |
713 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | |
fb9edbe9 | 714 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
76b189e9 PZ |
715 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
716 | } while (0) | |
717 | # define might_lock_read(lock) \ | |
718 | do { \ | |
719 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | |
fb9edbe9 | 720 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
76b189e9 PZ |
721 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
722 | } while (0) | |
723 | #else | |
724 | # define might_lock(lock) do { } while (0) | |
725 | # define might_lock_read(lock) do { } while (0) | |
726 | #endif | |
727 | ||
d24209bb | 728 | #ifdef CONFIG_LOCKDEP |
b3fbab05 | 729 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
d24209bb PM |
730 | #else |
731 | static inline void | |
732 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |
733 | { | |
734 | } | |
0632eb3d PM |
735 | #endif |
736 | ||
fbb9ce95 | 737 | #endif /* __LINUX_LOCKDEP_H */ |