1 // SPDX-License-Identifier: GPL-2.0
3 * kernel/lockdep_proc.c
5 * Runtime locking correctness validator
7 * Started by Ingo Molnar:
9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
12 * Code for /proc/lockdep and /proc/lockdep_stats:
15 #include <linux/export.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/kallsyms.h>
19 #include <linux/debug_locks.h>
20 #include <linux/vmalloc.h>
21 #include <linux/sort.h>
22 #include <linux/uaccess.h>
23 #include <asm/div64.h>
25 #include "lockdep_internals.h"
28 * Since iteration of lock_classes is done without holding the lockdep lock,
29 * it is not safe to iterate all_lock_classes list directly as the iteration
30 * may branch off to free_lock_classes or the zapped list. Iteration is done
31 * directly on the lock_classes array by checking the lock_classes_in_use
32 * bitmap and max_lock_class_idx.
34 #define iterate_lock_classes(idx, class) \
35 for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
38 static void *l_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
40 struct lock_class
*class = v
;
43 *pos
= class - lock_classes
;
44 return (*pos
> max_lock_class_idx
) ? NULL
: class;
47 static void *l_start(struct seq_file
*m
, loff_t
*pos
)
49 unsigned long idx
= *pos
;
51 if (idx
> max_lock_class_idx
)
53 return lock_classes
+ idx
;
56 static void l_stop(struct seq_file
*m
, void *v
)
60 static void print_name(struct seq_file
*m
, struct lock_class
*class)
62 char str
[KSYM_NAME_LEN
];
63 const char *name
= class->name
;
66 name
= __get_key_name(class->key
, str
);
67 seq_printf(m
, "%s", name
);
69 seq_printf(m
, "%s", name
);
70 if (class->name_version
> 1)
71 seq_printf(m
, "#%d", class->name_version
);
73 seq_printf(m
, "/%d", class->subclass
);
77 static int l_show(struct seq_file
*m
, void *v
)
79 struct lock_class
*class = v
;
80 struct lock_list
*entry
;
81 char usage
[LOCK_USAGE_CHARS
];
82 int idx
= class - lock_classes
;
84 if (v
== lock_classes
)
85 seq_printf(m
, "all lock classes:\n");
87 if (!test_bit(idx
, lock_classes_in_use
))
90 seq_printf(m
, "%p", class->key
);
91 #ifdef CONFIG_DEBUG_LOCKDEP
92 seq_printf(m
, " OPS:%8ld", debug_class_ops_read(class));
94 if (IS_ENABLED(CONFIG_PROVE_LOCKING
)) {
95 seq_printf(m
, " FD:%5ld", lockdep_count_forward_deps(class));
96 seq_printf(m
, " BD:%5ld", lockdep_count_backward_deps(class));
98 get_usage_chars(class, usage
);
99 seq_printf(m
, " %s", usage
);
103 print_name(m
, class);
106 if (IS_ENABLED(CONFIG_PROVE_LOCKING
)) {
107 list_for_each_entry(entry
, &class->locks_after
, entry
) {
108 if (entry
->distance
== 1) {
109 seq_printf(m
, " -> [%p] ", entry
->class->key
);
110 print_name(m
, entry
->class);
120 static const struct seq_operations lockdep_ops
= {
127 #ifdef CONFIG_PROVE_LOCKING
128 static void *lc_start(struct seq_file
*m
, loff_t
*pos
)
134 return SEQ_START_TOKEN
;
136 return lock_chains
+ (*pos
- 1);
139 static void *lc_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
141 *pos
= lockdep_next_lockchain(*pos
- 1) + 1;
142 return lc_start(m
, pos
);
145 static void lc_stop(struct seq_file
*m
, void *v
)
149 static int lc_show(struct seq_file
*m
, void *v
)
151 struct lock_chain
*chain
= v
;
152 struct lock_class
*class;
154 static const char * const irq_strs
[] = {
156 [LOCK_CHAIN_HARDIRQ_CONTEXT
] = "hardirq",
157 [LOCK_CHAIN_SOFTIRQ_CONTEXT
] = "softirq",
158 [LOCK_CHAIN_SOFTIRQ_CONTEXT
|
159 LOCK_CHAIN_HARDIRQ_CONTEXT
] = "hardirq|softirq",
162 if (v
== SEQ_START_TOKEN
) {
163 if (!nr_free_chain_hlocks
)
164 seq_printf(m
, "(buggered) ");
165 seq_printf(m
, "all lock chains:\n");
169 seq_printf(m
, "irq_context: %s\n", irq_strs
[chain
->irq_context
]);
171 for (i
= 0; i
< chain
->depth
; i
++) {
172 class = lock_chain_get_class(chain
, i
);
176 seq_printf(m
, "[%p] ", class->key
);
177 print_name(m
, class);
185 static const struct seq_operations lockdep_chains_ops
= {
191 #endif /* CONFIG_PROVE_LOCKING */
193 static void lockdep_stats_debug_show(struct seq_file
*m
)
195 #ifdef CONFIG_DEBUG_LOCKDEP
196 unsigned long long hi1
= debug_atomic_read(hardirqs_on_events
),
197 hi2
= debug_atomic_read(hardirqs_off_events
),
198 hr1
= debug_atomic_read(redundant_hardirqs_on
),
199 hr2
= debug_atomic_read(redundant_hardirqs_off
),
200 si1
= debug_atomic_read(softirqs_on_events
),
201 si2
= debug_atomic_read(softirqs_off_events
),
202 sr1
= debug_atomic_read(redundant_softirqs_on
),
203 sr2
= debug_atomic_read(redundant_softirqs_off
);
205 seq_printf(m
, " chain lookup misses: %11llu\n",
206 debug_atomic_read(chain_lookup_misses
));
207 seq_printf(m
, " chain lookup hits: %11llu\n",
208 debug_atomic_read(chain_lookup_hits
));
209 seq_printf(m
, " cyclic checks: %11llu\n",
210 debug_atomic_read(nr_cyclic_checks
));
211 seq_printf(m
, " redundant checks: %11llu\n",
212 debug_atomic_read(nr_redundant_checks
));
213 seq_printf(m
, " redundant links: %11llu\n",
214 debug_atomic_read(nr_redundant
));
215 seq_printf(m
, " find-mask forwards checks: %11llu\n",
216 debug_atomic_read(nr_find_usage_forwards_checks
));
217 seq_printf(m
, " find-mask backwards checks: %11llu\n",
218 debug_atomic_read(nr_find_usage_backwards_checks
));
220 seq_printf(m
, " hardirq on events: %11llu\n", hi1
);
221 seq_printf(m
, " hardirq off events: %11llu\n", hi2
);
222 seq_printf(m
, " redundant hardirq ons: %11llu\n", hr1
);
223 seq_printf(m
, " redundant hardirq offs: %11llu\n", hr2
);
224 seq_printf(m
, " softirq on events: %11llu\n", si1
);
225 seq_printf(m
, " softirq off events: %11llu\n", si2
);
226 seq_printf(m
, " redundant softirq ons: %11llu\n", sr1
);
227 seq_printf(m
, " redundant softirq offs: %11llu\n", sr2
);
231 static int lockdep_stats_show(struct seq_file
*m
, void *v
)
233 unsigned long nr_unused
= 0, nr_uncategorized
= 0,
234 nr_irq_safe
= 0, nr_irq_unsafe
= 0,
235 nr_softirq_safe
= 0, nr_softirq_unsafe
= 0,
236 nr_hardirq_safe
= 0, nr_hardirq_unsafe
= 0,
237 nr_irq_read_safe
= 0, nr_irq_read_unsafe
= 0,
238 nr_softirq_read_safe
= 0, nr_softirq_read_unsafe
= 0,
239 nr_hardirq_read_safe
= 0, nr_hardirq_read_unsafe
= 0,
240 sum_forward_deps
= 0;
242 #ifdef CONFIG_PROVE_LOCKING
243 struct lock_class
*class;
246 iterate_lock_classes(idx
, class) {
247 if (!test_bit(idx
, lock_classes_in_use
))
250 if (class->usage_mask
== 0)
252 if (class->usage_mask
== LOCKF_USED
)
254 if (class->usage_mask
& LOCKF_USED_IN_IRQ
)
256 if (class->usage_mask
& LOCKF_ENABLED_IRQ
)
258 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ
)
260 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQ
)
262 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ
)
264 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQ
)
266 if (class->usage_mask
& LOCKF_USED_IN_IRQ_READ
)
268 if (class->usage_mask
& LOCKF_ENABLED_IRQ_READ
)
269 nr_irq_read_unsafe
++;
270 if (class->usage_mask
& LOCKF_USED_IN_SOFTIRQ_READ
)
271 nr_softirq_read_safe
++;
272 if (class->usage_mask
& LOCKF_ENABLED_SOFTIRQ_READ
)
273 nr_softirq_read_unsafe
++;
274 if (class->usage_mask
& LOCKF_USED_IN_HARDIRQ_READ
)
275 nr_hardirq_read_safe
++;
276 if (class->usage_mask
& LOCKF_ENABLED_HARDIRQ_READ
)
277 nr_hardirq_read_unsafe
++;
279 sum_forward_deps
+= lockdep_count_forward_deps(class);
282 #ifdef CONFIG_DEBUG_LOCKDEP
283 DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks
) != nr_unused
);
287 seq_printf(m
, " lock-classes: %11lu [max: %lu]\n",
288 nr_lock_classes
, MAX_LOCKDEP_KEYS
);
289 seq_printf(m
, " direct dependencies: %11lu [max: %lu]\n",
290 nr_list_entries
, MAX_LOCKDEP_ENTRIES
);
291 seq_printf(m
, " indirect dependencies: %11lu\n",
295 * Total number of dependencies:
297 * All irq-safe locks may nest inside irq-unsafe locks,
298 * plus all the other known dependencies:
300 seq_printf(m
, " all direct dependencies: %11lu\n",
301 nr_irq_unsafe
* nr_irq_safe
+
302 nr_hardirq_unsafe
* nr_hardirq_safe
+
305 #ifdef CONFIG_PROVE_LOCKING
306 seq_printf(m
, " dependency chains: %11lu [max: %lu]\n",
307 lock_chain_count(), MAX_LOCKDEP_CHAINS
);
308 seq_printf(m
, " dependency chain hlocks used: %11lu [max: %lu]\n",
309 MAX_LOCKDEP_CHAIN_HLOCKS
-
310 (nr_free_chain_hlocks
+ nr_lost_chain_hlocks
),
311 MAX_LOCKDEP_CHAIN_HLOCKS
);
312 seq_printf(m
, " dependency chain hlocks lost: %11u\n",
313 nr_lost_chain_hlocks
);
316 #ifdef CONFIG_TRACE_IRQFLAGS
317 seq_printf(m
, " in-hardirq chains: %11u\n",
319 seq_printf(m
, " in-softirq chains: %11u\n",
322 seq_printf(m
, " in-process chains: %11u\n",
324 seq_printf(m
, " stack-trace entries: %11lu [max: %lu]\n",
325 nr_stack_trace_entries
, MAX_STACK_TRACE_ENTRIES
);
326 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
327 seq_printf(m
, " number of stack traces: %11llu\n",
328 lockdep_stack_trace_count());
329 seq_printf(m
, " number of stack hash chains: %11llu\n",
330 lockdep_stack_hash_count());
332 seq_printf(m
, " combined max dependencies: %11u\n",
333 (nr_hardirq_chains
+ 1) *
334 (nr_softirq_chains
+ 1) *
335 (nr_process_chains
+ 1)
337 seq_printf(m
, " hardirq-safe locks: %11lu\n",
339 seq_printf(m
, " hardirq-unsafe locks: %11lu\n",
341 seq_printf(m
, " softirq-safe locks: %11lu\n",
343 seq_printf(m
, " softirq-unsafe locks: %11lu\n",
345 seq_printf(m
, " irq-safe locks: %11lu\n",
347 seq_printf(m
, " irq-unsafe locks: %11lu\n",
350 seq_printf(m
, " hardirq-read-safe locks: %11lu\n",
351 nr_hardirq_read_safe
);
352 seq_printf(m
, " hardirq-read-unsafe locks: %11lu\n",
353 nr_hardirq_read_unsafe
);
354 seq_printf(m
, " softirq-read-safe locks: %11lu\n",
355 nr_softirq_read_safe
);
356 seq_printf(m
, " softirq-read-unsafe locks: %11lu\n",
357 nr_softirq_read_unsafe
);
358 seq_printf(m
, " irq-read-safe locks: %11lu\n",
360 seq_printf(m
, " irq-read-unsafe locks: %11lu\n",
363 seq_printf(m
, " uncategorized locks: %11lu\n",
365 seq_printf(m
, " unused locks: %11lu\n",
367 seq_printf(m
, " max locking depth: %11u\n",
369 #ifdef CONFIG_PROVE_LOCKING
370 seq_printf(m
, " max bfs queue depth: %11u\n",
371 max_bfs_queue_depth
);
373 seq_printf(m
, " max lock class index: %11lu\n",
375 lockdep_stats_debug_show(m
);
376 seq_printf(m
, " debug_locks: %11u\n",
380 * Zapped classes and lockdep data buffers reuse statistics.
383 seq_printf(m
, " zapped classes: %11lu\n",
385 #ifdef CONFIG_PROVE_LOCKING
386 seq_printf(m
, " zapped lock chains: %11lu\n",
387 nr_zapped_lock_chains
);
388 seq_printf(m
, " large chain blocks: %11u\n",
389 nr_large_chain_blocks
);
394 #ifdef CONFIG_LOCK_STAT
396 struct lock_stat_data
{
397 struct lock_class
*class;
398 struct lock_class_stats stats
;
401 struct lock_stat_seq
{
402 struct lock_stat_data
*iter_end
;
403 struct lock_stat_data stats
[MAX_LOCKDEP_KEYS
];
407 * sort on absolute number of contentions
409 static int lock_stat_cmp(const void *l
, const void *r
)
411 const struct lock_stat_data
*dl
= l
, *dr
= r
;
412 unsigned long nl
, nr
;
414 nl
= dl
->stats
.read_waittime
.nr
+ dl
->stats
.write_waittime
.nr
;
415 nr
= dr
->stats
.read_waittime
.nr
+ dr
->stats
.write_waittime
.nr
;
420 static void seq_line(struct seq_file
*m
, char c
, int offset
, int length
)
424 for (i
= 0; i
< offset
; i
++)
426 for (i
= 0; i
< length
; i
++)
431 static void snprint_time(char *buf
, size_t bufsiz
, s64 nr
)
436 nr
+= 5; /* for display rounding */
437 div
= div_s64_rem(nr
, 1000, &rem
);
438 snprintf(buf
, bufsiz
, "%lld.%02d", (long long)div
, (int)rem
/10);
441 static void seq_time(struct seq_file
*m
, s64 time
)
445 snprint_time(num
, sizeof(num
), time
);
446 seq_printf(m
, " %14s", num
);
449 static void seq_lock_time(struct seq_file
*m
, struct lock_time
*lt
)
451 seq_printf(m
, "%14lu", lt
->nr
);
452 seq_time(m
, lt
->min
);
453 seq_time(m
, lt
->max
);
454 seq_time(m
, lt
->total
);
455 seq_time(m
, lt
->nr
? div64_u64(lt
->total
, lt
->nr
) : 0);
458 static void seq_stats(struct seq_file
*m
, struct lock_stat_data
*data
)
460 const struct lockdep_subclass_key
*ckey
;
461 struct lock_class_stats
*stats
;
462 struct lock_class
*class;
468 stats
= &data
->stats
;
471 if (class->name_version
> 1)
472 namelen
-= 2; /* XXX truncates versions > 9 */
476 rcu_read_lock_sched();
477 cname
= rcu_dereference_sched(class->name
);
478 ckey
= rcu_dereference_sched(class->key
);
480 if (!cname
&& !ckey
) {
481 rcu_read_unlock_sched();
485 char str
[KSYM_NAME_LEN
];
486 const char *key_name
;
488 key_name
= __get_key_name(ckey
, str
);
489 snprintf(name
, namelen
, "%s", key_name
);
491 snprintf(name
, namelen
, "%s", cname
);
493 rcu_read_unlock_sched();
495 namelen
= strlen(name
);
496 if (class->name_version
> 1) {
497 snprintf(name
+namelen
, 3, "#%d", class->name_version
);
500 if (class->subclass
) {
501 snprintf(name
+namelen
, 3, "/%d", class->subclass
);
505 if (stats
->write_holdtime
.nr
) {
506 if (stats
->read_holdtime
.nr
)
507 seq_printf(m
, "%38s-W:", name
);
509 seq_printf(m
, "%40s:", name
);
511 seq_printf(m
, "%14lu ", stats
->bounces
[bounce_contended_write
]);
512 seq_lock_time(m
, &stats
->write_waittime
);
513 seq_printf(m
, " %14lu ", stats
->bounces
[bounce_acquired_write
]);
514 seq_lock_time(m
, &stats
->write_holdtime
);
518 if (stats
->read_holdtime
.nr
) {
519 seq_printf(m
, "%38s-R:", name
);
520 seq_printf(m
, "%14lu ", stats
->bounces
[bounce_contended_read
]);
521 seq_lock_time(m
, &stats
->read_waittime
);
522 seq_printf(m
, " %14lu ", stats
->bounces
[bounce_acquired_read
]);
523 seq_lock_time(m
, &stats
->read_holdtime
);
527 if (stats
->read_waittime
.nr
+ stats
->write_waittime
.nr
== 0)
530 if (stats
->read_holdtime
.nr
)
533 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
536 if (class->contention_point
[i
] == 0)
540 seq_line(m
, '-', 40-namelen
, namelen
);
542 snprintf(ip
, sizeof(ip
), "[<%p>]",
543 (void *)class->contention_point
[i
]);
544 seq_printf(m
, "%40s %14lu %29s %pS\n",
545 name
, stats
->contention_point
[i
],
546 ip
, (void *)class->contention_point
[i
]);
548 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
551 if (class->contending_point
[i
] == 0)
555 seq_line(m
, '-', 40-namelen
, namelen
);
557 snprintf(ip
, sizeof(ip
), "[<%p>]",
558 (void *)class->contending_point
[i
]);
559 seq_printf(m
, "%40s %14lu %29s %pS\n",
560 name
, stats
->contending_point
[i
],
561 ip
, (void *)class->contending_point
[i
]);
565 seq_line(m
, '.', 0, 40 + 1 + 12 * (14 + 1));
570 static void seq_header(struct seq_file
*m
)
572 seq_puts(m
, "lock_stat version 0.4\n");
574 if (unlikely(!debug_locks
))
575 seq_printf(m
, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
577 seq_line(m
, '-', 0, 40 + 1 + 12 * (14 + 1));
578 seq_printf(m
, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
593 seq_line(m
, '-', 0, 40 + 1 + 12 * (14 + 1));
597 static void *ls_start(struct seq_file
*m
, loff_t
*pos
)
599 struct lock_stat_seq
*data
= m
->private;
600 struct lock_stat_data
*iter
;
603 return SEQ_START_TOKEN
;
605 iter
= data
->stats
+ (*pos
- 1);
606 if (iter
>= data
->iter_end
)
612 static void *ls_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
615 return ls_start(m
, pos
);
618 static void ls_stop(struct seq_file
*m
, void *v
)
622 static int ls_show(struct seq_file
*m
, void *v
)
624 if (v
== SEQ_START_TOKEN
)
632 static const struct seq_operations lockstat_ops
= {
639 static int lock_stat_open(struct inode
*inode
, struct file
*file
)
642 struct lock_class
*class;
643 struct lock_stat_seq
*data
= vmalloc(sizeof(struct lock_stat_seq
));
648 res
= seq_open(file
, &lockstat_ops
);
650 struct lock_stat_data
*iter
= data
->stats
;
651 struct seq_file
*m
= file
->private_data
;
654 iterate_lock_classes(idx
, class) {
655 if (!test_bit(idx
, lock_classes_in_use
))
658 iter
->stats
= lock_stats(class);
662 data
->iter_end
= iter
;
664 sort(data
->stats
, data
->iter_end
- data
->stats
,
665 sizeof(struct lock_stat_data
),
666 lock_stat_cmp
, NULL
);
675 static ssize_t
lock_stat_write(struct file
*file
, const char __user
*buf
,
676 size_t count
, loff_t
*ppos
)
678 struct lock_class
*class;
683 if (get_user(c
, buf
))
689 iterate_lock_classes(idx
, class) {
690 if (!test_bit(idx
, lock_classes_in_use
))
692 clear_lock_stats(class);
698 static int lock_stat_release(struct inode
*inode
, struct file
*file
)
700 struct seq_file
*seq
= file
->private_data
;
703 return seq_release(inode
, file
);
706 static const struct proc_ops lock_stat_proc_ops
= {
707 .proc_open
= lock_stat_open
,
708 .proc_write
= lock_stat_write
,
709 .proc_read
= seq_read
,
710 .proc_lseek
= seq_lseek
,
711 .proc_release
= lock_stat_release
,
713 #endif /* CONFIG_LOCK_STAT */
715 static int __init
lockdep_proc_init(void)
717 proc_create_seq("lockdep", S_IRUSR
, NULL
, &lockdep_ops
);
718 #ifdef CONFIG_PROVE_LOCKING
719 proc_create_seq("lockdep_chains", S_IRUSR
, NULL
, &lockdep_chains_ops
);
721 proc_create_single("lockdep_stats", S_IRUSR
, NULL
, lockdep_stats_show
);
722 #ifdef CONFIG_LOCK_STAT
723 proc_create("lock_stat", S_IRUSR
| S_IWUSR
, NULL
, &lock_stat_proc_ops
);
729 __initcall(lockdep_proc_init
);