]> git.ipfire.org Git - thirdparty/linux.git/blob - include/linux/lockdep.h
x86/fpu/xstate: Restore supervisor states for signal return
[thirdparty/linux.git] / include / linux / lockdep.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Runtime locking correctness validator
4 *
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
8 * see Documentation/locking/lockdep-design.rst for more details.
9 */
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
12
13 struct task_struct;
14 struct lockdep_map;
15
16 /* for sysctl */
17 extern int prove_locking;
18 extern int lock_stat;
19
20 #define MAX_LOCKDEP_SUBCLASSES 8UL
21
22 #include <linux/types.h>
23
24 enum lockdep_wait_type {
25 LD_WAIT_INV = 0, /* not checked, catch all */
26
27 LD_WAIT_FREE, /* wait free, rcu etc.. */
28 LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
29
30 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
31 LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
32 #else
33 LD_WAIT_CONFIG = LD_WAIT_SPIN,
34 #endif
35 LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
36
37 LD_WAIT_MAX, /* must be last */
38 };
39
40 #ifdef CONFIG_LOCKDEP
41
42 #include <linux/linkage.h>
43 #include <linux/list.h>
44 #include <linux/debug_locks.h>
45 #include <linux/stacktrace.h>
46
47 /*
48 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
49 * the total number of states... :-(
50 */
51 #define XXX_LOCK_USAGE_STATES (1+2*4)
52
53 /*
54 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
55 * cached in the instance of lockdep_map
56 *
57 * Currently main class (subclass == 0) and signle depth subclass
58 * are cached in lockdep_map. This optimization is mainly targeting
59 * on rq->lock. double_rq_lock() acquires this highly competitive with
60 * single depth.
61 */
62 #define NR_LOCKDEP_CACHING_CLASSES 2
63
64 /*
65 * A lockdep key is associated with each lock object. For static locks we use
66 * the lock address itself as the key. Dynamically allocated lock objects can
67 * have a statically or dynamically allocated key. Dynamically allocated lock
68 * keys must be registered before being used and must be unregistered before
69 * the key memory is freed.
70 */
71 struct lockdep_subclass_key {
72 char __one_byte;
73 } __attribute__ ((__packed__));
74
75 /* hash_entry is used to keep track of dynamically allocated keys. */
76 struct lock_class_key {
77 union {
78 struct hlist_node hash_entry;
79 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
80 };
81 };
82
83 extern struct lock_class_key __lockdep_no_validate__;
84
85 struct lock_trace;
86
87 #define LOCKSTAT_POINTS 4
88
89 /*
90 * The lock-class itself. The order of the structure members matters.
91 * reinit_class() zeroes the key member and all subsequent members.
92 */
93 struct lock_class {
94 /*
95 * class-hash:
96 */
97 struct hlist_node hash_entry;
98
99 /*
100 * Entry in all_lock_classes when in use. Entry in free_lock_classes
101 * when not in use. Instances that are being freed are on one of the
102 * zapped_classes lists.
103 */
104 struct list_head lock_entry;
105
106 /*
107 * These fields represent a directed graph of lock dependencies,
108 * to every node we attach a list of "forward" and a list of
109 * "backward" graph nodes.
110 */
111 struct list_head locks_after, locks_before;
112
113 const struct lockdep_subclass_key *key;
114 unsigned int subclass;
115 unsigned int dep_gen_id;
116
117 /*
118 * IRQ/softirq usage tracking bits:
119 */
120 unsigned long usage_mask;
121 const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
122
123 /*
124 * Generation counter, when doing certain classes of graph walking,
125 * to ensure that we check one node only once:
126 */
127 int name_version;
128 const char *name;
129
130 short wait_type_inner;
131 short wait_type_outer;
132
133 #ifdef CONFIG_LOCK_STAT
134 unsigned long contention_point[LOCKSTAT_POINTS];
135 unsigned long contending_point[LOCKSTAT_POINTS];
136 #endif
137 } __no_randomize_layout;
138
139 #ifdef CONFIG_LOCK_STAT
140 struct lock_time {
141 s64 min;
142 s64 max;
143 s64 total;
144 unsigned long nr;
145 };
146
147 enum bounce_type {
148 bounce_acquired_write,
149 bounce_acquired_read,
150 bounce_contended_write,
151 bounce_contended_read,
152 nr_bounce_types,
153
154 bounce_acquired = bounce_acquired_write,
155 bounce_contended = bounce_contended_write,
156 };
157
158 struct lock_class_stats {
159 unsigned long contention_point[LOCKSTAT_POINTS];
160 unsigned long contending_point[LOCKSTAT_POINTS];
161 struct lock_time read_waittime;
162 struct lock_time write_waittime;
163 struct lock_time read_holdtime;
164 struct lock_time write_holdtime;
165 unsigned long bounces[nr_bounce_types];
166 };
167
168 struct lock_class_stats lock_stats(struct lock_class *class);
169 void clear_lock_stats(struct lock_class *class);
170 #endif
171
172 /*
173 * Map the lock object (the lock instance) to the lock-class object.
174 * This is embedded into specific lock instances:
175 */
176 struct lockdep_map {
177 struct lock_class_key *key;
178 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
179 const char *name;
180 short wait_type_outer; /* can be taken in this context */
181 short wait_type_inner; /* presents this context */
182 #ifdef CONFIG_LOCK_STAT
183 int cpu;
184 unsigned long ip;
185 #endif
186 };
187
188 static inline void lockdep_copy_map(struct lockdep_map *to,
189 struct lockdep_map *from)
190 {
191 int i;
192
193 *to = *from;
194 /*
195 * Since the class cache can be modified concurrently we could observe
196 * half pointers (64bit arch using 32bit copy insns). Therefore clear
197 * the caches and take the performance hit.
198 *
199 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
200 * that relies on cache abuse.
201 */
202 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
203 to->class_cache[i] = NULL;
204 }
205
206 /*
207 * Every lock has a list of other locks that were taken after it.
208 * We only grow the list, never remove from it:
209 */
210 struct lock_list {
211 struct list_head entry;
212 struct lock_class *class;
213 struct lock_class *links_to;
214 const struct lock_trace *trace;
215 int distance;
216
217 /*
218 * The parent field is used to implement breadth-first search, and the
219 * bit 0 is reused to indicate if the lock has been accessed in BFS.
220 */
221 struct lock_list *parent;
222 };
223
224 /**
225 * struct lock_chain - lock dependency chain record
226 *
227 * @irq_context: the same as irq_context in held_lock below
228 * @depth: the number of held locks in this chain
229 * @base: the index in chain_hlocks for this chain
230 * @entry: the collided lock chains in lock_chain hash list
231 * @chain_key: the hash key of this lock_chain
232 */
233 struct lock_chain {
234 /* see BUILD_BUG_ON()s in add_chain_cache() */
235 unsigned int irq_context : 2,
236 depth : 6,
237 base : 24;
238 /* 4 byte hole */
239 struct hlist_node entry;
240 u64 chain_key;
241 };
242
243 #define MAX_LOCKDEP_KEYS_BITS 13
244 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
245 #define INITIAL_CHAIN_KEY -1
246
247 struct held_lock {
248 /*
249 * One-way hash of the dependency chain up to this point. We
250 * hash the hashes step by step as the dependency chain grows.
251 *
252 * We use it for dependency-caching and we skip detection
253 * passes and dependency-updates if there is a cache-hit, so
254 * it is absolutely critical for 100% coverage of the validator
255 * to have a unique key value for every unique dependency path
256 * that can occur in the system, to make a unique hash value
257 * as likely as possible - hence the 64-bit width.
258 *
259 * The task struct holds the current hash value (initialized
260 * with zero), here we store the previous hash value:
261 */
262 u64 prev_chain_key;
263 unsigned long acquire_ip;
264 struct lockdep_map *instance;
265 struct lockdep_map *nest_lock;
266 #ifdef CONFIG_LOCK_STAT
267 u64 waittime_stamp;
268 u64 holdtime_stamp;
269 #endif
270 /*
271 * class_idx is zero-indexed; it points to the element in
272 * lock_classes this held lock instance belongs to. class_idx is in
273 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
274 */
275 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
276 /*
277 * The lock-stack is unified in that the lock chains of interrupt
278 * contexts nest ontop of process context chains, but we 'separate'
279 * the hashes by starting with 0 if we cross into an interrupt
280 * context, and we also keep do not add cross-context lock
281 * dependencies - the lock usage graph walking covers that area
282 * anyway, and we'd just unnecessarily increase the number of
283 * dependencies otherwise. [Note: hardirq and softirq contexts
284 * are separated from each other too.]
285 *
286 * The following field is used to detect when we cross into an
287 * interrupt context:
288 */
289 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
290 unsigned int trylock:1; /* 16 bits */
291
292 unsigned int read:2; /* see lock_acquire() comment */
293 unsigned int check:1; /* see lock_acquire() comment */
294 unsigned int hardirqs_off:1;
295 unsigned int references:12; /* 32 bits */
296 unsigned int pin_count;
297 };
298
299 /*
300 * Initialization, self-test and debugging-output methods:
301 */
302 extern void lockdep_init(void);
303 extern void lockdep_reset(void);
304 extern void lockdep_reset_lock(struct lockdep_map *lock);
305 extern void lockdep_free_key_range(void *start, unsigned long size);
306 extern asmlinkage void lockdep_sys_exit(void);
307 extern void lockdep_set_selftest_task(struct task_struct *task);
308
309 extern void lockdep_init_task(struct task_struct *task);
310
311 extern void lockdep_off(void);
312 extern void lockdep_on(void);
313
314 extern void lockdep_register_key(struct lock_class_key *key);
315 extern void lockdep_unregister_key(struct lock_class_key *key);
316
317 /*
318 * These methods are used by specific locking variants (spinlocks,
319 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
320 * to lockdep:
321 */
322
323 extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
324 struct lock_class_key *key, int subclass, short inner, short outer);
325
326 static inline void
327 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
328 struct lock_class_key *key, int subclass, short inner)
329 {
330 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
331 }
332
333 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
334 struct lock_class_key *key, int subclass)
335 {
336 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
337 }
338
339 /*
340 * Reinitialize a lock key - for cases where there is special locking or
341 * special initialization of locks so that the validator gets the scope
342 * of dependencies wrong: they are either too broad (they need a class-split)
343 * or they are too narrow (they suffer from a false class-split):
344 */
345 #define lockdep_set_class(lock, key) \
346 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
347 (lock)->dep_map.wait_type_inner, \
348 (lock)->dep_map.wait_type_outer)
349
350 #define lockdep_set_class_and_name(lock, key, name) \
351 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
352 (lock)->dep_map.wait_type_inner, \
353 (lock)->dep_map.wait_type_outer)
354
355 #define lockdep_set_class_and_subclass(lock, key, sub) \
356 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
357 (lock)->dep_map.wait_type_inner, \
358 (lock)->dep_map.wait_type_outer)
359
360 #define lockdep_set_subclass(lock, sub) \
361 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
362 (lock)->dep_map.wait_type_inner, \
363 (lock)->dep_map.wait_type_outer)
364
365 #define lockdep_set_novalidate_class(lock) \
366 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
367
368 /*
369 * Compare locking classes
370 */
371 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
372
373 static inline int lockdep_match_key(struct lockdep_map *lock,
374 struct lock_class_key *key)
375 {
376 return lock->key == key;
377 }
378
379 /*
380 * Acquire a lock.
381 *
382 * Values for "read":
383 *
384 * 0: exclusive (write) acquire
385 * 1: read-acquire (no recursion allowed)
386 * 2: read-acquire with same-instance recursion allowed
387 *
388 * Values for check:
389 *
390 * 0: simple checks (freeing, held-at-exit-time, etc.)
391 * 1: full validation
392 */
393 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
394 int trylock, int read, int check,
395 struct lockdep_map *nest_lock, unsigned long ip);
396
397 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
398
399 /*
400 * Same "read" as for lock_acquire(), except -1 means any.
401 */
402 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
403
404 static inline int lock_is_held(const struct lockdep_map *lock)
405 {
406 return lock_is_held_type(lock, -1);
407 }
408
409 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
410 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
411
412 extern void lock_set_class(struct lockdep_map *lock, const char *name,
413 struct lock_class_key *key, unsigned int subclass,
414 unsigned long ip);
415
416 static inline void lock_set_subclass(struct lockdep_map *lock,
417 unsigned int subclass, unsigned long ip)
418 {
419 lock_set_class(lock, lock->name, lock->key, subclass, ip);
420 }
421
422 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
423
424 struct pin_cookie { unsigned int val; };
425
426 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
427
428 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
429 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
430 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
431
432 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
433
434 #define lockdep_assert_held(l) do { \
435 WARN_ON(debug_locks && !lockdep_is_held(l)); \
436 } while (0)
437
438 #define lockdep_assert_held_write(l) do { \
439 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
440 } while (0)
441
442 #define lockdep_assert_held_read(l) do { \
443 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
444 } while (0)
445
446 #define lockdep_assert_held_once(l) do { \
447 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
448 } while (0)
449
450 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
451
452 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
453 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
454 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
455
456 #else /* !CONFIG_LOCKDEP */
457
458 static inline void lockdep_init_task(struct task_struct *task)
459 {
460 }
461
462 static inline void lockdep_off(void)
463 {
464 }
465
466 static inline void lockdep_on(void)
467 {
468 }
469
470 static inline void lockdep_set_selftest_task(struct task_struct *task)
471 {
472 }
473
474 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
475 # define lock_release(l, i) do { } while (0)
476 # define lock_downgrade(l, i) do { } while (0)
477 # define lock_set_class(l, n, k, s, i) do { } while (0)
478 # define lock_set_subclass(l, s, i) do { } while (0)
479 # define lockdep_init() do { } while (0)
480 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
481 do { (void)(name); (void)(key); } while (0)
482 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
483 do { (void)(name); (void)(key); } while (0)
484 # define lockdep_init_map(lock, name, key, sub) \
485 do { (void)(name); (void)(key); } while (0)
486 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
487 # define lockdep_set_class_and_name(lock, key, name) \
488 do { (void)(key); (void)(name); } while (0)
489 #define lockdep_set_class_and_subclass(lock, key, sub) \
490 do { (void)(key); } while (0)
491 #define lockdep_set_subclass(lock, sub) do { } while (0)
492
493 #define lockdep_set_novalidate_class(lock) do { } while (0)
494
495 /*
496 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
497 * case since the result is not well defined and the caller should rather
498 * #ifdef the call himself.
499 */
500
501 # define lockdep_reset() do { debug_locks = 1; } while (0)
502 # define lockdep_free_key_range(start, size) do { } while (0)
503 # define lockdep_sys_exit() do { } while (0)
504 /*
505 * The class key takes no space if lockdep is disabled:
506 */
507 struct lock_class_key { };
508
509 static inline void lockdep_register_key(struct lock_class_key *key)
510 {
511 }
512
513 static inline void lockdep_unregister_key(struct lock_class_key *key)
514 {
515 }
516
517 /*
518 * The lockdep_map takes no space if lockdep is disabled:
519 */
520 struct lockdep_map { };
521
522 #define lockdep_depth(tsk) (0)
523
524 #define lockdep_is_held_type(l, r) (1)
525
526 #define lockdep_assert_held(l) do { (void)(l); } while (0)
527 #define lockdep_assert_held_write(l) do { (void)(l); } while (0)
528 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
529 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
530
531 #define lockdep_recursing(tsk) (0)
532
533 struct pin_cookie { };
534
535 #define NIL_COOKIE (struct pin_cookie){ }
536
537 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
538 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
539 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
540
541 #endif /* !LOCKDEP */
542
543 enum xhlock_context_t {
544 XHLOCK_HARD,
545 XHLOCK_SOFT,
546 XHLOCK_CTX_NR,
547 };
548
549 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
550 /*
551 * To initialize a lockdep_map statically use this macro.
552 * Note that _name must not be NULL.
553 */
554 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
555 { .name = (_name), .key = (void *)(_key), }
556
557 static inline void lockdep_invariant_state(bool force) {}
558 static inline void lockdep_free_task(struct task_struct *task) {}
559
560 #ifdef CONFIG_LOCK_STAT
561
562 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
563 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
564
565 #define LOCK_CONTENDED(_lock, try, lock) \
566 do { \
567 if (!try(_lock)) { \
568 lock_contended(&(_lock)->dep_map, _RET_IP_); \
569 lock(_lock); \
570 } \
571 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
572 } while (0)
573
574 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
575 ({ \
576 int ____err = 0; \
577 if (!try(_lock)) { \
578 lock_contended(&(_lock)->dep_map, _RET_IP_); \
579 ____err = lock(_lock); \
580 } \
581 if (!____err) \
582 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
583 ____err; \
584 })
585
586 #else /* CONFIG_LOCK_STAT */
587
588 #define lock_contended(lockdep_map, ip) do {} while (0)
589 #define lock_acquired(lockdep_map, ip) do {} while (0)
590
591 #define LOCK_CONTENDED(_lock, try, lock) \
592 lock(_lock)
593
594 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
595 lock(_lock)
596
597 #endif /* CONFIG_LOCK_STAT */
598
599 #ifdef CONFIG_LOCKDEP
600
601 /*
602 * On lockdep we dont want the hand-coded irq-enable of
603 * _raw_*_lock_flags() code, because lockdep assumes
604 * that interrupts are not re-enabled during lock-acquire:
605 */
606 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
607 LOCK_CONTENDED((_lock), (try), (lock))
608
609 #else /* CONFIG_LOCKDEP */
610
611 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
612 lockfl((_lock), (flags))
613
614 #endif /* CONFIG_LOCKDEP */
615
616 #ifdef CONFIG_PROVE_LOCKING
617 extern void print_irqtrace_events(struct task_struct *curr);
618 #else
619 static inline void print_irqtrace_events(struct task_struct *curr)
620 {
621 }
622 #endif
623
624 /*
625 * For trivial one-depth nesting of a lock-class, the following
626 * global define can be used. (Subsystems with multiple levels
627 * of nesting should define their own lock-nesting subclasses.)
628 */
629 #define SINGLE_DEPTH_NESTING 1
630
631 /*
632 * Map the dependency ops to NOP or to real lockdep ops, depending
633 * on the per lock-class debug mode:
634 */
635
636 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
637 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
638 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
639
640 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
641 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
642 #define spin_release(l, i) lock_release(l, i)
643
644 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
645 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
646 #define rwlock_release(l, i) lock_release(l, i)
647
648 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
649 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
650 #define seqcount_release(l, i) lock_release(l, i)
651
652 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
653 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
654 #define mutex_release(l, i) lock_release(l, i)
655
656 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
657 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
658 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
659 #define rwsem_release(l, i) lock_release(l, i)
660
661 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
662 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
663 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
664 #define lock_map_release(l) lock_release(l, _THIS_IP_)
665
666 #ifdef CONFIG_PROVE_LOCKING
667 # define might_lock(lock) \
668 do { \
669 typecheck(struct lockdep_map *, &(lock)->dep_map); \
670 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
671 lock_release(&(lock)->dep_map, _THIS_IP_); \
672 } while (0)
673 # define might_lock_read(lock) \
674 do { \
675 typecheck(struct lockdep_map *, &(lock)->dep_map); \
676 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
677 lock_release(&(lock)->dep_map, _THIS_IP_); \
678 } while (0)
679 # define might_lock_nested(lock, subclass) \
680 do { \
681 typecheck(struct lockdep_map *, &(lock)->dep_map); \
682 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
683 _THIS_IP_); \
684 lock_release(&(lock)->dep_map, _THIS_IP_); \
685 } while (0)
686
687 #define lockdep_assert_irqs_enabled() do { \
688 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
689 !current->hardirqs_enabled, \
690 "IRQs not enabled as expected\n"); \
691 } while (0)
692
693 #define lockdep_assert_irqs_disabled() do { \
694 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
695 current->hardirqs_enabled, \
696 "IRQs not disabled as expected\n"); \
697 } while (0)
698
699 #define lockdep_assert_in_irq() do { \
700 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
701 !current->hardirq_context, \
702 "Not in hardirq as expected\n"); \
703 } while (0)
704
705 #else
706 # define might_lock(lock) do { } while (0)
707 # define might_lock_read(lock) do { } while (0)
708 # define might_lock_nested(lock, subclass) do { } while (0)
709 # define lockdep_assert_irqs_enabled() do { } while (0)
710 # define lockdep_assert_irqs_disabled() do { } while (0)
711 # define lockdep_assert_in_irq() do { } while (0)
712 #endif
713
714 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
715
716 # define lockdep_assert_RT_in_threaded_ctx() do { \
717 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
718 current->hardirq_context && \
719 !(current->hardirq_threaded || current->irq_config), \
720 "Not in threaded context on PREEMPT_RT as expected\n"); \
721 } while (0)
722
723 #else
724
725 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
726
727 #endif
728
729 #ifdef CONFIG_LOCKDEP
730 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
731 #else
732 static inline void
733 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
734 {
735 }
736 #endif
737
738 #endif /* __LINUX_LOCKDEP_H */