1 /* SPDX-License-Identifier: GPL-2.0 */
3 * kernel/lockdep_internals.h
5 * Runtime locking correctness validator
7 * lockdep subsystem internal functions and variables.
11 * Lock-class usage-state bits:
14 #define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
25 #define LOCK_USAGE_READ_MASK 1
26 #define LOCK_USAGE_DIR_MASK 2
27 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
30 * Usage-state bitmasks:
32 #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
35 #define LOCKDEP_STATE(__STATE) \
36 __LOCKF(USED_IN_##__STATE) \
37 __LOCKF(USED_IN_##__STATE##_READ) \
38 __LOCKF(ENABLED_##__STATE) \
39 __LOCKF(ENABLED_##__STATE##_READ)
40 #include "lockdep_states.h"
45 #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
46 static const unsigned long LOCKF_ENABLED_IRQ
=
47 #include "lockdep_states.h"
51 #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
52 static const unsigned long LOCKF_USED_IN_IRQ
=
53 #include "lockdep_states.h"
57 #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
58 static const unsigned long LOCKF_ENABLED_IRQ_READ
=
59 #include "lockdep_states.h"
63 #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
64 static const unsigned long LOCKF_USED_IN_IRQ_READ
=
65 #include "lockdep_states.h"
69 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
70 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
72 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
73 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
76 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
77 * .data and .bss to fit in required 32MB limit for the kernel. With
78 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
79 * So, reduce the static allocations for lockdeps related structures so that
80 * everything fits in current required size limit.
82 #ifdef CONFIG_LOCKDEP_SMALL
84 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
87 * We use the per-lock dependency maps in two ways: we grow it by adding
88 * every to-be-taken lock to all currently held lock's own dependency
89 * table (if it's not there yet), and we check it for lock order
90 * conflicts and deadlocks.
92 #define MAX_LOCKDEP_ENTRIES 16384UL
93 #define MAX_LOCKDEP_CHAINS_BITS 15
94 #define MAX_STACK_TRACE_ENTRIES 262144UL
95 #define STACK_TRACE_HASH_SIZE 8192
97 #define MAX_LOCKDEP_ENTRIES 32768UL
99 #define MAX_LOCKDEP_CHAINS_BITS 16
102 * Stack-trace: tightly packed array of stack backtrace
103 * addresses. Protected by the hash_lock.
105 #define MAX_STACK_TRACE_ENTRIES 524288UL
106 #define STACK_TRACE_HASH_SIZE 16384
110 * Bit definitions for lock_chain.irq_context
112 #define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
113 #define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
115 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
117 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
119 extern struct list_head all_lock_classes
;
120 extern struct lock_chain lock_chains
[];
122 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
124 extern void get_usage_chars(struct lock_class
*class,
125 char usage
[LOCK_USAGE_CHARS
]);
127 extern const char *__get_key_name(const struct lockdep_subclass_key
*key
,
130 struct lock_class
*lock_chain_get_class(struct lock_chain
*chain
, int i
);
132 extern unsigned long nr_lock_classes
;
133 extern unsigned long nr_zapped_classes
;
134 extern unsigned long nr_zapped_lock_chains
;
135 extern unsigned long nr_list_entries
;
136 long lockdep_next_lockchain(long i
);
137 unsigned long lock_chain_count(void);
138 extern unsigned long nr_stack_trace_entries
;
140 extern unsigned int nr_hardirq_chains
;
141 extern unsigned int nr_softirq_chains
;
142 extern unsigned int nr_process_chains
;
143 extern unsigned int nr_free_chain_hlocks
;
144 extern unsigned int nr_lost_chain_hlocks
;
145 extern unsigned int nr_large_chain_blocks
;
147 extern unsigned int max_lockdep_depth
;
148 extern unsigned int max_bfs_queue_depth
;
150 #ifdef CONFIG_PROVE_LOCKING
151 extern unsigned long lockdep_count_forward_deps(struct lock_class
*);
152 extern unsigned long lockdep_count_backward_deps(struct lock_class
*);
153 #ifdef CONFIG_TRACE_IRQFLAGS
154 u64
lockdep_stack_trace_count(void);
155 u64
lockdep_stack_hash_count(void);
158 static inline unsigned long
159 lockdep_count_forward_deps(struct lock_class
*class)
163 static inline unsigned long
164 lockdep_count_backward_deps(struct lock_class
*class)
170 #ifdef CONFIG_DEBUG_LOCKDEP
172 #include <asm/local.h>
174 * Various lockdep statistics.
175 * We want them per cpu as they are often accessed in fast path
176 * and we want to avoid too much cache bouncing.
178 struct lockdep_stats
{
179 unsigned long chain_lookup_hits
;
180 unsigned int chain_lookup_misses
;
181 unsigned long hardirqs_on_events
;
182 unsigned long hardirqs_off_events
;
183 unsigned long redundant_hardirqs_on
;
184 unsigned long redundant_hardirqs_off
;
185 unsigned long softirqs_on_events
;
186 unsigned long softirqs_off_events
;
187 unsigned long redundant_softirqs_on
;
188 unsigned long redundant_softirqs_off
;
190 unsigned int nr_redundant_checks
;
191 unsigned int nr_redundant
;
192 unsigned int nr_cyclic_checks
;
193 unsigned int nr_find_usage_forwards_checks
;
194 unsigned int nr_find_usage_backwards_checks
;
197 * Per lock class locking operation stat counts
199 unsigned long lock_class_ops
[MAX_LOCKDEP_KEYS
];
202 DECLARE_PER_CPU(struct lockdep_stats
, lockdep_stats
);
203 extern struct lock_class lock_classes
[MAX_LOCKDEP_KEYS
];
205 #define __debug_atomic_inc(ptr) \
206 this_cpu_inc(lockdep_stats.ptr);
208 #define debug_atomic_inc(ptr) { \
209 WARN_ON_ONCE(!irqs_disabled()); \
210 __this_cpu_inc(lockdep_stats.ptr); \
213 #define debug_atomic_dec(ptr) { \
214 WARN_ON_ONCE(!irqs_disabled()); \
215 __this_cpu_dec(lockdep_stats.ptr); \
218 #define debug_atomic_read(ptr) ({ \
219 struct lockdep_stats *__cpu_lockdep_stats; \
220 unsigned long long __total = 0; \
222 for_each_possible_cpu(__cpu) { \
223 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
224 __total += __cpu_lockdep_stats->ptr; \
229 static inline void debug_class_ops_inc(struct lock_class
*class)
233 idx
= class - lock_classes
;
234 __debug_atomic_inc(lock_class_ops
[idx
]);
237 static inline unsigned long debug_class_ops_read(struct lock_class
*class)
240 unsigned long ops
= 0;
242 idx
= class - lock_classes
;
243 for_each_possible_cpu(cpu
)
244 ops
+= per_cpu(lockdep_stats
.lock_class_ops
[idx
], cpu
);
249 # define __debug_atomic_inc(ptr) do { } while (0)
250 # define debug_atomic_inc(ptr) do { } while (0)
251 # define debug_atomic_dec(ptr) do { } while (0)
252 # define debug_atomic_read(ptr) 0
253 # define debug_class_ops_inc(ptr) do { } while (0)