]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/locking/lockdep_internals.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/linux.git] / kernel / locking / lockdep_internals.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * kernel/lockdep_internals.h
4 *
5 * Runtime locking correctness validator
6 *
7 * lockdep subsystem internal functions and variables.
8 */
9
10 /*
11 * Lock-class usage-state bits:
12 */
13 enum lock_usage_bit {
14 #define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
20 #undef LOCKDEP_STATE
21 LOCK_USED,
22 LOCK_USAGE_STATES
23 };
24
25 /*
26 * Usage-state bitmasks:
27 */
28 #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
29
30 enum {
31 #define LOCKDEP_STATE(__STATE) \
32 __LOCKF(USED_IN_##__STATE) \
33 __LOCKF(USED_IN_##__STATE##_READ) \
34 __LOCKF(ENABLED_##__STATE) \
35 __LOCKF(ENABLED_##__STATE##_READ)
36 #include "lockdep_states.h"
37 #undef LOCKDEP_STATE
38 __LOCKF(USED)
39 };
40
41 #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
42 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
43
44 #define LOCKF_ENABLED_IRQ_READ \
45 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
46 #define LOCKF_USED_IN_IRQ_READ \
47 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
48
49 /*
50 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
51 * .data and .bss to fit in required 32MB limit for the kernel. With
52 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
53 * So, reduce the static allocations for lockdeps related structures so that
54 * everything fits in current required size limit.
55 */
56 #ifdef CONFIG_LOCKDEP_SMALL
57 /*
58 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
59 * we track.
60 *
61 * We use the per-lock dependency maps in two ways: we grow it by adding
62 * every to-be-taken lock to all currently held lock's own dependency
63 * table (if it's not there yet), and we check it for lock order
64 * conflicts and deadlocks.
65 */
66 #define MAX_LOCKDEP_ENTRIES 16384UL
67 #define MAX_LOCKDEP_CHAINS_BITS 15
68 #define MAX_STACK_TRACE_ENTRIES 262144UL
69 #else
70 #define MAX_LOCKDEP_ENTRIES 32768UL
71
72 #define MAX_LOCKDEP_CHAINS_BITS 16
73
74 /*
75 * Stack-trace: tightly packed array of stack backtrace
76 * addresses. Protected by the hash_lock.
77 */
78 #define MAX_STACK_TRACE_ENTRIES 524288UL
79 #endif
80
81 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
82
83 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
84
85 extern struct list_head all_lock_classes;
86 extern struct lock_chain lock_chains[];
87
88 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
89
90 extern void get_usage_chars(struct lock_class *class,
91 char usage[LOCK_USAGE_CHARS]);
92
93 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
94
95 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
96
97 extern unsigned long nr_lock_classes;
98 extern unsigned long nr_list_entries;
99 extern unsigned long nr_lock_chains;
100 extern int nr_chain_hlocks;
101 extern unsigned long nr_stack_trace_entries;
102
103 extern unsigned int nr_hardirq_chains;
104 extern unsigned int nr_softirq_chains;
105 extern unsigned int nr_process_chains;
106 extern unsigned int max_lockdep_depth;
107 extern unsigned int max_recursion_depth;
108
109 extern unsigned int max_bfs_queue_depth;
110
111 #ifdef CONFIG_PROVE_LOCKING
112 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
113 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
114 #else
115 static inline unsigned long
116 lockdep_count_forward_deps(struct lock_class *class)
117 {
118 return 0;
119 }
120 static inline unsigned long
121 lockdep_count_backward_deps(struct lock_class *class)
122 {
123 return 0;
124 }
125 #endif
126
127 #ifdef CONFIG_DEBUG_LOCKDEP
128
129 #include <asm/local.h>
130 /*
131 * Various lockdep statistics.
132 * We want them per cpu as they are often accessed in fast path
133 * and we want to avoid too much cache bouncing.
134 */
135 struct lockdep_stats {
136 int chain_lookup_hits;
137 int chain_lookup_misses;
138 int hardirqs_on_events;
139 int hardirqs_off_events;
140 int redundant_hardirqs_on;
141 int redundant_hardirqs_off;
142 int softirqs_on_events;
143 int softirqs_off_events;
144 int redundant_softirqs_on;
145 int redundant_softirqs_off;
146 int nr_unused_locks;
147 int nr_redundant_checks;
148 int nr_redundant;
149 int nr_cyclic_checks;
150 int nr_cyclic_check_recursions;
151 int nr_find_usage_forwards_checks;
152 int nr_find_usage_forwards_recursions;
153 int nr_find_usage_backwards_checks;
154 int nr_find_usage_backwards_recursions;
155 };
156
157 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
158
159 #define __debug_atomic_inc(ptr) \
160 this_cpu_inc(lockdep_stats.ptr);
161
162 #define debug_atomic_inc(ptr) { \
163 WARN_ON_ONCE(!irqs_disabled()); \
164 __this_cpu_inc(lockdep_stats.ptr); \
165 }
166
167 #define debug_atomic_dec(ptr) { \
168 WARN_ON_ONCE(!irqs_disabled()); \
169 __this_cpu_dec(lockdep_stats.ptr); \
170 }
171
172 #define debug_atomic_read(ptr) ({ \
173 struct lockdep_stats *__cpu_lockdep_stats; \
174 unsigned long long __total = 0; \
175 int __cpu; \
176 for_each_possible_cpu(__cpu) { \
177 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
178 __total += __cpu_lockdep_stats->ptr; \
179 } \
180 __total; \
181 })
182 #else
183 # define __debug_atomic_inc(ptr) do { } while (0)
184 # define debug_atomic_inc(ptr) do { } while (0)
185 # define debug_atomic_dec(ptr) do { } while (0)
186 # define debug_atomic_read(ptr) 0
187 #endif