4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitmap.h>
49 #include <linux/bitops.h>
50 #include <linux/gfp.h>
51 #include <linux/random.h>
52 #include <linux/jhash.h>
53 #include <linux/nmi.h>
54 #include <linux/rcupdate.h>
55 #include <linux/kprobes.h>
57 #include <asm/sections.h>
59 #include "lockdep_internals.h"
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/lock.h>
64 #ifdef CONFIG_PROVE_LOCKING
65 int prove_locking
= 1;
66 module_param(prove_locking
, int, 0644);
68 #define prove_locking 0
71 #ifdef CONFIG_LOCK_STAT
73 module_param(lock_stat
, int, 0644);
79 * lockdep_lock: protects the lockdep graph, the hashes and the
80 * class/list/hash allocators.
82 * This is one of the rare exceptions where it's justified
83 * to use a raw spinlock - we really dont want the spinlock
84 * code to recurse back into the lockdep code...
86 static arch_spinlock_t lockdep_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
87 static struct task_struct
*lockdep_selftest_task_struct
;
89 static int graph_lock(void)
91 arch_spin_lock(&lockdep_lock
);
93 * Make sure that if another CPU detected a bug while
94 * walking the graph we dont change it (while the other
95 * CPU is busy printing out stuff with the graph lock
99 arch_spin_unlock(&lockdep_lock
);
102 /* prevent any recursions within lockdep from causing deadlocks */
103 current
->lockdep_recursion
++;
107 static inline int graph_unlock(void)
109 if (debug_locks
&& !arch_spin_is_locked(&lockdep_lock
)) {
111 * The lockdep graph lock isn't locked while we expect it to
112 * be, we're confused now, bye!
114 return DEBUG_LOCKS_WARN_ON(1);
117 current
->lockdep_recursion
--;
118 arch_spin_unlock(&lockdep_lock
);
123 * Turn lock debugging off and return with 0 if it was off already,
124 * and also release the graph lock:
126 static inline int debug_locks_off_graph_unlock(void)
128 int ret
= debug_locks_off();
130 arch_spin_unlock(&lockdep_lock
);
135 unsigned long nr_list_entries
;
136 static struct lock_list list_entries
[MAX_LOCKDEP_ENTRIES
];
137 static DECLARE_BITMAP(list_entries_in_use
, MAX_LOCKDEP_ENTRIES
);
140 * All data structures here are protected by the global debug_lock.
142 * nr_lock_classes is the number of elements of lock_classes[] that is
145 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
146 #define KEYHASH_SIZE (1UL << KEYHASH_BITS)
147 static struct hlist_head lock_keys_hash
[KEYHASH_SIZE
];
148 unsigned long nr_lock_classes
;
149 #ifndef CONFIG_DEBUG_LOCKDEP
152 struct lock_class lock_classes
[MAX_LOCKDEP_KEYS
];
154 static inline struct lock_class
*hlock_class(struct held_lock
*hlock
)
156 if (!hlock
->class_idx
) {
158 * Someone passed in garbage, we give up.
160 DEBUG_LOCKS_WARN_ON(1);
163 return lock_classes
+ hlock
->class_idx
- 1;
166 #ifdef CONFIG_LOCK_STAT
167 static DEFINE_PER_CPU(struct lock_class_stats
[MAX_LOCKDEP_KEYS
], cpu_lock_stats
);
169 static inline u64
lockstat_clock(void)
171 return local_clock();
174 static int lock_point(unsigned long points
[], unsigned long ip
)
178 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
179 if (points
[i
] == 0) {
190 static void lock_time_inc(struct lock_time
*lt
, u64 time
)
195 if (time
< lt
->min
|| !lt
->nr
)
202 static inline void lock_time_add(struct lock_time
*src
, struct lock_time
*dst
)
207 if (src
->max
> dst
->max
)
210 if (src
->min
< dst
->min
|| !dst
->nr
)
213 dst
->total
+= src
->total
;
217 struct lock_class_stats
lock_stats(struct lock_class
*class)
219 struct lock_class_stats stats
;
222 memset(&stats
, 0, sizeof(struct lock_class_stats
));
223 for_each_possible_cpu(cpu
) {
224 struct lock_class_stats
*pcs
=
225 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
227 for (i
= 0; i
< ARRAY_SIZE(stats
.contention_point
); i
++)
228 stats
.contention_point
[i
] += pcs
->contention_point
[i
];
230 for (i
= 0; i
< ARRAY_SIZE(stats
.contending_point
); i
++)
231 stats
.contending_point
[i
] += pcs
->contending_point
[i
];
233 lock_time_add(&pcs
->read_waittime
, &stats
.read_waittime
);
234 lock_time_add(&pcs
->write_waittime
, &stats
.write_waittime
);
236 lock_time_add(&pcs
->read_holdtime
, &stats
.read_holdtime
);
237 lock_time_add(&pcs
->write_holdtime
, &stats
.write_holdtime
);
239 for (i
= 0; i
< ARRAY_SIZE(stats
.bounces
); i
++)
240 stats
.bounces
[i
] += pcs
->bounces
[i
];
246 void clear_lock_stats(struct lock_class
*class)
250 for_each_possible_cpu(cpu
) {
251 struct lock_class_stats
*cpu_stats
=
252 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
254 memset(cpu_stats
, 0, sizeof(struct lock_class_stats
));
256 memset(class->contention_point
, 0, sizeof(class->contention_point
));
257 memset(class->contending_point
, 0, sizeof(class->contending_point
));
260 static struct lock_class_stats
*get_lock_stats(struct lock_class
*class)
262 return &this_cpu_ptr(cpu_lock_stats
)[class - lock_classes
];
265 static void lock_release_holdtime(struct held_lock
*hlock
)
267 struct lock_class_stats
*stats
;
273 holdtime
= lockstat_clock() - hlock
->holdtime_stamp
;
275 stats
= get_lock_stats(hlock_class(hlock
));
277 lock_time_inc(&stats
->read_holdtime
, holdtime
);
279 lock_time_inc(&stats
->write_holdtime
, holdtime
);
282 static inline void lock_release_holdtime(struct held_lock
*hlock
)
288 * We keep a global list of all lock classes. The list is only accessed with
289 * the lockdep spinlock lock held. free_lock_classes is a list with free
290 * elements. These elements are linked together by the lock_entry member in
293 LIST_HEAD(all_lock_classes
);
294 static LIST_HEAD(free_lock_classes
);
297 * struct pending_free - information about data structures about to be freed
298 * @zapped: Head of a list with struct lock_class elements.
299 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
300 * are about to be freed.
302 struct pending_free
{
303 struct list_head zapped
;
304 DECLARE_BITMAP(lock_chains_being_freed
, MAX_LOCKDEP_CHAINS
);
308 * struct delayed_free - data structures used for delayed freeing
310 * A data structure for delayed freeing of data structures that may be
311 * accessed by RCU readers at the time these were freed.
313 * @rcu_head: Used to schedule an RCU callback for freeing data structures.
314 * @index: Index of @pf to which freed data structures are added.
315 * @scheduled: Whether or not an RCU callback has been scheduled.
316 * @pf: Array with information about data structures about to be freed.
318 static struct delayed_free
{
319 struct rcu_head rcu_head
;
322 struct pending_free pf
[2];
326 * The lockdep classes are in a hash-table as well, for fast lookup:
328 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
329 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
330 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
331 #define classhashentry(key) (classhash_table + __classhashfn((key)))
333 static struct hlist_head classhash_table
[CLASSHASH_SIZE
];
336 * We put the lock dependency chains into a hash-table as well, to cache
339 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
340 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
341 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
342 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
344 static struct hlist_head chainhash_table
[CHAINHASH_SIZE
];
347 * The hash key of the lock dependency chains is a hash itself too:
348 * it's a hash of all locks taken up to that lock, including that lock.
349 * It's a 64-bit hash, because it's important for the keys to be
352 static inline u64
iterate_chain_key(u64 key
, u32 idx
)
354 u32 k0
= key
, k1
= key
>> 32;
356 __jhash_mix(idx
, k0
, k1
); /* Macro that modifies arguments! */
358 return k0
| (u64
)k1
<< 32;
361 void lockdep_off(void)
363 current
->lockdep_recursion
++;
365 EXPORT_SYMBOL(lockdep_off
);
367 void lockdep_on(void)
369 current
->lockdep_recursion
--;
371 EXPORT_SYMBOL(lockdep_on
);
373 void lockdep_set_selftest_task(struct task_struct
*task
)
375 lockdep_selftest_task_struct
= task
;
379 * Debugging switches:
383 #define VERY_VERBOSE 0
386 # define HARDIRQ_VERBOSE 1
387 # define SOFTIRQ_VERBOSE 1
389 # define HARDIRQ_VERBOSE 0
390 # define SOFTIRQ_VERBOSE 0
393 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
395 * Quick filtering for interesting events:
397 static int class_filter(struct lock_class
*class)
401 if (class->name_version
== 1 &&
402 !strcmp(class->name
, "lockname"))
404 if (class->name_version
== 1 &&
405 !strcmp(class->name
, "&struct->lockfield"))
408 /* Filter everything else. 1 would be to allow everything else */
413 static int verbose(struct lock_class
*class)
416 return class_filter(class);
422 * Stack-trace: tightly packed array of stack backtrace
423 * addresses. Protected by the graph_lock.
425 unsigned long nr_stack_trace_entries
;
426 static unsigned long stack_trace
[MAX_STACK_TRACE_ENTRIES
];
428 static void print_lockdep_off(const char *bug_msg
)
430 printk(KERN_DEBUG
"%s\n", bug_msg
);
431 printk(KERN_DEBUG
"turning off the locking correctness validator.\n");
432 #ifdef CONFIG_LOCK_STAT
433 printk(KERN_DEBUG
"Please attach the output of /proc/lock_stat to the bug report\n");
437 static int save_trace(struct lock_trace
*trace
)
439 unsigned long *entries
= stack_trace
+ nr_stack_trace_entries
;
440 unsigned int max_entries
;
442 trace
->offset
= nr_stack_trace_entries
;
443 max_entries
= MAX_STACK_TRACE_ENTRIES
- nr_stack_trace_entries
;
444 trace
->nr_entries
= stack_trace_save(entries
, max_entries
, 3);
445 nr_stack_trace_entries
+= trace
->nr_entries
;
447 if (nr_stack_trace_entries
>= MAX_STACK_TRACE_ENTRIES
-1) {
448 if (!debug_locks_off_graph_unlock())
451 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
460 unsigned int nr_hardirq_chains
;
461 unsigned int nr_softirq_chains
;
462 unsigned int nr_process_chains
;
463 unsigned int max_lockdep_depth
;
465 #ifdef CONFIG_DEBUG_LOCKDEP
467 * Various lockdep statistics:
469 DEFINE_PER_CPU(struct lockdep_stats
, lockdep_stats
);
476 #define __USAGE(__STATE) \
477 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
478 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
479 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
480 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
482 static const char *usage_str
[] =
484 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
485 #include "lockdep_states.h"
487 [LOCK_USED
] = "INITIAL USE",
490 const char * __get_key_name(struct lockdep_subclass_key
*key
, char *str
)
492 return kallsyms_lookup((unsigned long)key
, NULL
, NULL
, NULL
, str
);
495 static inline unsigned long lock_flag(enum lock_usage_bit bit
)
500 static char get_usage_char(struct lock_class
*class, enum lock_usage_bit bit
)
504 if (class->usage_mask
& lock_flag(bit
+ LOCK_USAGE_DIR_MASK
))
506 if (class->usage_mask
& lock_flag(bit
)) {
508 if (class->usage_mask
& lock_flag(bit
+ LOCK_USAGE_DIR_MASK
))
515 void get_usage_chars(struct lock_class
*class, char usage
[LOCK_USAGE_CHARS
])
519 #define LOCKDEP_STATE(__STATE) \
520 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
521 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
522 #include "lockdep_states.h"
528 static void __print_lock_name(struct lock_class
*class)
530 char str
[KSYM_NAME_LEN
];
535 name
= __get_key_name(class->key
, str
);
536 printk(KERN_CONT
"%s", name
);
538 printk(KERN_CONT
"%s", name
);
539 if (class->name_version
> 1)
540 printk(KERN_CONT
"#%d", class->name_version
);
542 printk(KERN_CONT
"/%d", class->subclass
);
546 static void print_lock_name(struct lock_class
*class)
548 char usage
[LOCK_USAGE_CHARS
];
550 get_usage_chars(class, usage
);
552 printk(KERN_CONT
" (");
553 __print_lock_name(class);
554 printk(KERN_CONT
"){%s}", usage
);
557 static void print_lockdep_cache(struct lockdep_map
*lock
)
560 char str
[KSYM_NAME_LEN
];
564 name
= __get_key_name(lock
->key
->subkeys
, str
);
566 printk(KERN_CONT
"%s", name
);
569 static void print_lock(struct held_lock
*hlock
)
572 * We can be called locklessly through debug_show_all_locks() so be
573 * extra careful, the hlock might have been released and cleared.
575 unsigned int class_idx
= hlock
->class_idx
;
577 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
580 if (!class_idx
|| (class_idx
- 1) >= MAX_LOCKDEP_KEYS
) {
581 printk(KERN_CONT
"<RELEASED>\n");
585 printk(KERN_CONT
"%p", hlock
->instance
);
586 print_lock_name(lock_classes
+ class_idx
- 1);
587 printk(KERN_CONT
", at: %pS\n", (void *)hlock
->acquire_ip
);
590 static void lockdep_print_held_locks(struct task_struct
*p
)
592 int i
, depth
= READ_ONCE(p
->lockdep_depth
);
595 printk("no locks held by %s/%d.\n", p
->comm
, task_pid_nr(p
));
597 printk("%d lock%s held by %s/%d:\n", depth
,
598 depth
> 1 ? "s" : "", p
->comm
, task_pid_nr(p
));
600 * It's not reliable to print a task's held locks if it's not sleeping
601 * and it's not the current task.
603 if (p
->state
== TASK_RUNNING
&& p
!= current
)
605 for (i
= 0; i
< depth
; i
++) {
607 print_lock(p
->held_locks
+ i
);
611 static void print_kernel_ident(void)
613 printk("%s %.*s %s\n", init_utsname()->release
,
614 (int)strcspn(init_utsname()->version
, " "),
615 init_utsname()->version
,
619 static int very_verbose(struct lock_class
*class)
622 return class_filter(class);
628 * Is this the address of a static object:
631 static int static_obj(const void *obj
)
633 unsigned long start
= (unsigned long) &_stext
,
634 end
= (unsigned long) &_end
,
635 addr
= (unsigned long) obj
;
637 if (arch_is_kernel_initmem_freed(addr
))
643 if ((addr
>= start
) && (addr
< end
))
646 if (arch_is_kernel_data(addr
))
650 * in-kernel percpu var?
652 if (is_kernel_percpu_address(addr
))
656 * module static or percpu var?
658 return is_module_address(addr
) || is_module_percpu_address(addr
);
663 * To make lock name printouts unique, we calculate a unique
664 * class->name_version generation counter. The caller must hold the graph
667 static int count_matching_names(struct lock_class
*new_class
)
669 struct lock_class
*class;
672 if (!new_class
->name
)
675 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
676 if (new_class
->key
- new_class
->subclass
== class->key
)
677 return class->name_version
;
678 if (class->name
&& !strcmp(class->name
, new_class
->name
))
679 count
= max(count
, class->name_version
);
685 static inline struct lock_class
*
686 look_up_lock_class(const struct lockdep_map
*lock
, unsigned int subclass
)
688 struct lockdep_subclass_key
*key
;
689 struct hlist_head
*hash_head
;
690 struct lock_class
*class;
692 if (unlikely(subclass
>= MAX_LOCKDEP_SUBCLASSES
)) {
695 "BUG: looking up invalid subclass: %u\n", subclass
);
697 "turning off the locking correctness validator.\n");
703 * If it is not initialised then it has never been locked,
704 * so it won't be present in the hash table.
706 if (unlikely(!lock
->key
))
710 * NOTE: the class-key must be unique. For dynamic locks, a static
711 * lock_class_key variable is passed in through the mutex_init()
712 * (or spin_lock_init()) call - which acts as the key. For static
713 * locks we use the lock object itself as the key.
715 BUILD_BUG_ON(sizeof(struct lock_class_key
) >
716 sizeof(struct lockdep_map
));
718 key
= lock
->key
->subkeys
+ subclass
;
720 hash_head
= classhashentry(key
);
723 * We do an RCU walk of the hash, see lockdep_free_key_range().
725 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
728 hlist_for_each_entry_rcu(class, hash_head
, hash_entry
) {
729 if (class->key
== key
) {
731 * Huh! same key, different name? Did someone trample
732 * on some memory? We're most confused.
734 WARN_ON_ONCE(class->name
!= lock
->name
);
743 * Static locks do not have their class-keys yet - for them the key is
744 * the lock object itself. If the lock is in the per cpu area, the
745 * canonical address of the lock (per cpu offset removed) is used.
747 static bool assign_lock_key(struct lockdep_map
*lock
)
749 unsigned long can_addr
, addr
= (unsigned long)lock
;
753 * lockdep_free_key_range() assumes that struct lock_class_key
754 * objects do not overlap. Since we use the address of lock
755 * objects as class key for static objects, check whether the
756 * size of lock_class_key objects does not exceed the size of
757 * the smallest lock object.
759 BUILD_BUG_ON(sizeof(struct lock_class_key
) > sizeof(raw_spinlock_t
));
762 if (__is_kernel_percpu_address(addr
, &can_addr
))
763 lock
->key
= (void *)can_addr
;
764 else if (__is_module_percpu_address(addr
, &can_addr
))
765 lock
->key
= (void *)can_addr
;
766 else if (static_obj(lock
))
767 lock
->key
= (void *)lock
;
769 /* Debug-check: all keys must be persistent! */
771 pr_err("INFO: trying to register non-static key.\n");
772 pr_err("the code is fine but needs lockdep annotation.\n");
773 pr_err("turning off the locking correctness validator.\n");
781 #ifdef CONFIG_DEBUG_LOCKDEP
783 /* Check whether element @e occurs in list @h */
784 static bool in_list(struct list_head
*e
, struct list_head
*h
)
788 list_for_each(f
, h
) {
797 * Check whether entry @e occurs in any of the locks_after or locks_before
800 static bool in_any_class_list(struct list_head
*e
)
802 struct lock_class
*class;
805 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
806 class = &lock_classes
[i
];
807 if (in_list(e
, &class->locks_after
) ||
808 in_list(e
, &class->locks_before
))
814 static bool class_lock_list_valid(struct lock_class
*c
, struct list_head
*h
)
818 list_for_each_entry(e
, h
, entry
) {
819 if (e
->links_to
!= c
) {
820 printk(KERN_INFO
"class %s: mismatch for lock entry %ld; class %s <> %s",
822 (unsigned long)(e
- list_entries
),
823 e
->links_to
&& e
->links_to
->name
?
824 e
->links_to
->name
: "(?)",
825 e
->class && e
->class->name
? e
->class->name
:
833 #ifdef CONFIG_PROVE_LOCKING
834 static u16 chain_hlocks
[MAX_LOCKDEP_CHAIN_HLOCKS
];
837 static bool check_lock_chain_key(struct lock_chain
*chain
)
839 #ifdef CONFIG_PROVE_LOCKING
843 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++)
844 chain_key
= iterate_chain_key(chain_key
, chain_hlocks
[i
] + 1);
846 * The 'unsigned long long' casts avoid that a compiler warning
847 * is reported when building tools/lib/lockdep.
849 if (chain
->chain_key
!= chain_key
) {
850 printk(KERN_INFO
"chain %lld: key %#llx <> %#llx\n",
851 (unsigned long long)(chain
- lock_chains
),
852 (unsigned long long)chain
->chain_key
,
853 (unsigned long long)chain_key
);
860 static bool in_any_zapped_class_list(struct lock_class
*class)
862 struct pending_free
*pf
;
865 for (i
= 0, pf
= delayed_free
.pf
; i
< ARRAY_SIZE(delayed_free
.pf
); i
++, pf
++) {
866 if (in_list(&class->lock_entry
, &pf
->zapped
))
873 static bool __check_data_structures(void)
875 struct lock_class
*class;
876 struct lock_chain
*chain
;
877 struct hlist_head
*head
;
881 /* Check whether all classes occur in a lock list. */
882 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
883 class = &lock_classes
[i
];
884 if (!in_list(&class->lock_entry
, &all_lock_classes
) &&
885 !in_list(&class->lock_entry
, &free_lock_classes
) &&
886 !in_any_zapped_class_list(class)) {
887 printk(KERN_INFO
"class %px/%s is not in any class list\n",
888 class, class->name
? : "(?)");
893 /* Check whether all classes have valid lock lists. */
894 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
895 class = &lock_classes
[i
];
896 if (!class_lock_list_valid(class, &class->locks_before
))
898 if (!class_lock_list_valid(class, &class->locks_after
))
902 /* Check the chain_key of all lock chains. */
903 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
904 head
= chainhash_table
+ i
;
905 hlist_for_each_entry_rcu(chain
, head
, entry
) {
906 if (!check_lock_chain_key(chain
))
912 * Check whether all list entries that are in use occur in a class
915 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
916 e
= list_entries
+ i
;
917 if (!in_any_class_list(&e
->entry
)) {
918 printk(KERN_INFO
"list entry %d is not in any class list; class %s <> %s\n",
919 (unsigned int)(e
- list_entries
),
920 e
->class->name
? : "(?)",
921 e
->links_to
->name
? : "(?)");
927 * Check whether all list entries that are not in use do not occur in
930 for_each_clear_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
931 e
= list_entries
+ i
;
932 if (in_any_class_list(&e
->entry
)) {
933 printk(KERN_INFO
"list entry %d occurs in a class list; class %s <> %s\n",
934 (unsigned int)(e
- list_entries
),
935 e
->class && e
->class->name
? e
->class->name
:
937 e
->links_to
&& e
->links_to
->name
?
938 e
->links_to
->name
: "(?)");
946 int check_consistency
= 0;
947 module_param(check_consistency
, int, 0644);
949 static void check_data_structures(void)
951 static bool once
= false;
953 if (check_consistency
&& !once
) {
954 if (!__check_data_structures()) {
961 #else /* CONFIG_DEBUG_LOCKDEP */
963 static inline void check_data_structures(void) { }
965 #endif /* CONFIG_DEBUG_LOCKDEP */
968 * Initialize the lock_classes[] array elements, the free_lock_classes list
969 * and also the delayed_free structure.
971 static void init_data_structures_once(void)
973 static bool ds_initialized
, rcu_head_initialized
;
976 if (likely(rcu_head_initialized
))
979 if (system_state
>= SYSTEM_SCHEDULING
) {
980 init_rcu_head(&delayed_free
.rcu_head
);
981 rcu_head_initialized
= true;
987 ds_initialized
= true;
989 INIT_LIST_HEAD(&delayed_free
.pf
[0].zapped
);
990 INIT_LIST_HEAD(&delayed_free
.pf
[1].zapped
);
992 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
993 list_add_tail(&lock_classes
[i
].lock_entry
, &free_lock_classes
);
994 INIT_LIST_HEAD(&lock_classes
[i
].locks_after
);
995 INIT_LIST_HEAD(&lock_classes
[i
].locks_before
);
999 static inline struct hlist_head
*keyhashentry(const struct lock_class_key
*key
)
1001 unsigned long hash
= hash_long((uintptr_t)key
, KEYHASH_BITS
);
1003 return lock_keys_hash
+ hash
;
1006 /* Register a dynamically allocated key. */
1007 void lockdep_register_key(struct lock_class_key
*key
)
1009 struct hlist_head
*hash_head
;
1010 struct lock_class_key
*k
;
1011 unsigned long flags
;
1013 if (WARN_ON_ONCE(static_obj(key
)))
1015 hash_head
= keyhashentry(key
);
1017 raw_local_irq_save(flags
);
1020 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1021 if (WARN_ON_ONCE(k
== key
))
1024 hlist_add_head_rcu(&key
->hash_entry
, hash_head
);
1028 raw_local_irq_restore(flags
);
1030 EXPORT_SYMBOL_GPL(lockdep_register_key
);
1032 /* Check whether a key has been registered as a dynamic key. */
1033 static bool is_dynamic_key(const struct lock_class_key
*key
)
1035 struct hlist_head
*hash_head
;
1036 struct lock_class_key
*k
;
1039 if (WARN_ON_ONCE(static_obj(key
)))
1043 * If lock debugging is disabled lock_keys_hash[] may contain
1044 * pointers to memory that has already been freed. Avoid triggering
1045 * a use-after-free in that case by returning early.
1050 hash_head
= keyhashentry(key
);
1053 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1065 * Register a lock's class in the hash-table, if the class is not present
1066 * yet. Otherwise we look it up. We cache the result in the lock object
1067 * itself, so actual lookup of the hash should be once per lock object.
1069 static struct lock_class
*
1070 register_lock_class(struct lockdep_map
*lock
, unsigned int subclass
, int force
)
1072 struct lockdep_subclass_key
*key
;
1073 struct hlist_head
*hash_head
;
1074 struct lock_class
*class;
1076 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1078 class = look_up_lock_class(lock
, subclass
);
1080 goto out_set_class_cache
;
1083 if (!assign_lock_key(lock
))
1085 } else if (!static_obj(lock
->key
) && !is_dynamic_key(lock
->key
)) {
1089 key
= lock
->key
->subkeys
+ subclass
;
1090 hash_head
= classhashentry(key
);
1092 if (!graph_lock()) {
1096 * We have to do the hash-walk again, to avoid races
1099 hlist_for_each_entry_rcu(class, hash_head
, hash_entry
) {
1100 if (class->key
== key
)
1101 goto out_unlock_set
;
1104 init_data_structures_once();
1106 /* Allocate a new lock class and add it to the hash. */
1107 class = list_first_entry_or_null(&free_lock_classes
, typeof(*class),
1110 if (!debug_locks_off_graph_unlock()) {
1114 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1119 debug_atomic_inc(nr_unused_locks
);
1121 class->name
= lock
->name
;
1122 class->subclass
= subclass
;
1123 WARN_ON_ONCE(!list_empty(&class->locks_before
));
1124 WARN_ON_ONCE(!list_empty(&class->locks_after
));
1125 class->name_version
= count_matching_names(class);
1127 * We use RCU's safe list-add method to make
1128 * parallel walking of the hash-list safe:
1130 hlist_add_head_rcu(&class->hash_entry
, hash_head
);
1132 * Remove the class from the free list and add it to the global list
1135 list_move_tail(&class->lock_entry
, &all_lock_classes
);
1137 if (verbose(class)) {
1140 printk("\nnew class %px: %s", class->key
, class->name
);
1141 if (class->name_version
> 1)
1142 printk(KERN_CONT
"#%d", class->name_version
);
1143 printk(KERN_CONT
"\n");
1146 if (!graph_lock()) {
1153 out_set_class_cache
:
1154 if (!subclass
|| force
)
1155 lock
->class_cache
[0] = class;
1156 else if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
1157 lock
->class_cache
[subclass
] = class;
1160 * Hash collision, did we smoke some? We found a class with a matching
1161 * hash but the subclass -- which is hashed in -- didn't match.
1163 if (DEBUG_LOCKS_WARN_ON(class->subclass
!= subclass
))
1169 #ifdef CONFIG_PROVE_LOCKING
1171 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1172 * with NULL on failure)
1174 static struct lock_list
*alloc_list_entry(void)
1176 int idx
= find_first_zero_bit(list_entries_in_use
,
1177 ARRAY_SIZE(list_entries
));
1179 if (idx
>= ARRAY_SIZE(list_entries
)) {
1180 if (!debug_locks_off_graph_unlock())
1183 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1188 __set_bit(idx
, list_entries_in_use
);
1189 return list_entries
+ idx
;
1193 * Add a new dependency to the head of the list:
1195 static int add_lock_to_list(struct lock_class
*this,
1196 struct lock_class
*links_to
, struct list_head
*head
,
1197 unsigned long ip
, int distance
,
1198 struct lock_trace
*trace
)
1200 struct lock_list
*entry
;
1202 * Lock not present yet - get a new dependency struct and
1203 * add it to the list:
1205 entry
= alloc_list_entry();
1209 entry
->class = this;
1210 entry
->links_to
= links_to
;
1211 entry
->distance
= distance
;
1212 entry
->trace
= *trace
;
1214 * Both allocation and removal are done under the graph lock; but
1215 * iteration is under RCU-sched; see look_up_lock_class() and
1216 * lockdep_free_key_range().
1218 list_add_tail_rcu(&entry
->entry
, head
);
1224 * For good efficiency of modular, we use power of 2
1226 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL
1227 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1230 * The circular_queue and helpers is used to implement the
1231 * breadth-first search(BFS)algorithem, by which we can build
1232 * the shortest path from the next lock to be acquired to the
1233 * previous held lock if there is a circular between them.
1235 struct circular_queue
{
1236 unsigned long element
[MAX_CIRCULAR_QUEUE_SIZE
];
1237 unsigned int front
, rear
;
1240 static struct circular_queue lock_cq
;
1242 unsigned int max_bfs_queue_depth
;
1244 static unsigned int lockdep_dependency_gen_id
;
1246 static inline void __cq_init(struct circular_queue
*cq
)
1248 cq
->front
= cq
->rear
= 0;
1249 lockdep_dependency_gen_id
++;
1252 static inline int __cq_empty(struct circular_queue
*cq
)
1254 return (cq
->front
== cq
->rear
);
1257 static inline int __cq_full(struct circular_queue
*cq
)
1259 return ((cq
->rear
+ 1) & CQ_MASK
) == cq
->front
;
1262 static inline int __cq_enqueue(struct circular_queue
*cq
, unsigned long elem
)
1267 cq
->element
[cq
->rear
] = elem
;
1268 cq
->rear
= (cq
->rear
+ 1) & CQ_MASK
;
1272 static inline int __cq_dequeue(struct circular_queue
*cq
, unsigned long *elem
)
1277 *elem
= cq
->element
[cq
->front
];
1278 cq
->front
= (cq
->front
+ 1) & CQ_MASK
;
1282 static inline unsigned int __cq_get_elem_count(struct circular_queue
*cq
)
1284 return (cq
->rear
- cq
->front
) & CQ_MASK
;
1287 static inline void mark_lock_accessed(struct lock_list
*lock
,
1288 struct lock_list
*parent
)
1292 nr
= lock
- list_entries
;
1293 WARN_ON(nr
>= ARRAY_SIZE(list_entries
)); /* Out-of-bounds, input fail */
1294 lock
->parent
= parent
;
1295 lock
->class->dep_gen_id
= lockdep_dependency_gen_id
;
1298 static inline unsigned long lock_accessed(struct lock_list
*lock
)
1302 nr
= lock
- list_entries
;
1303 WARN_ON(nr
>= ARRAY_SIZE(list_entries
)); /* Out-of-bounds, input fail */
1304 return lock
->class->dep_gen_id
== lockdep_dependency_gen_id
;
1307 static inline struct lock_list
*get_lock_parent(struct lock_list
*child
)
1309 return child
->parent
;
1312 static inline int get_lock_depth(struct lock_list
*child
)
1315 struct lock_list
*parent
;
1317 while ((parent
= get_lock_parent(child
))) {
1324 static int __bfs(struct lock_list
*source_entry
,
1326 int (*match
)(struct lock_list
*entry
, void *data
),
1327 struct lock_list
**target_entry
,
1330 struct lock_list
*entry
;
1331 struct list_head
*head
;
1332 struct circular_queue
*cq
= &lock_cq
;
1335 if (match(source_entry
, data
)) {
1336 *target_entry
= source_entry
;
1342 head
= &source_entry
->class->locks_after
;
1344 head
= &source_entry
->class->locks_before
;
1346 if (list_empty(head
))
1350 __cq_enqueue(cq
, (unsigned long)source_entry
);
1352 while (!__cq_empty(cq
)) {
1353 struct lock_list
*lock
;
1355 __cq_dequeue(cq
, (unsigned long *)&lock
);
1363 head
= &lock
->class->locks_after
;
1365 head
= &lock
->class->locks_before
;
1367 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1369 list_for_each_entry_rcu(entry
, head
, entry
) {
1370 if (!lock_accessed(entry
)) {
1371 unsigned int cq_depth
;
1372 mark_lock_accessed(entry
, lock
);
1373 if (match(entry
, data
)) {
1374 *target_entry
= entry
;
1379 if (__cq_enqueue(cq
, (unsigned long)entry
)) {
1383 cq_depth
= __cq_get_elem_count(cq
);
1384 if (max_bfs_queue_depth
< cq_depth
)
1385 max_bfs_queue_depth
= cq_depth
;
1393 static inline int __bfs_forwards(struct lock_list
*src_entry
,
1395 int (*match
)(struct lock_list
*entry
, void *data
),
1396 struct lock_list
**target_entry
)
1398 return __bfs(src_entry
, data
, match
, target_entry
, 1);
1402 static inline int __bfs_backwards(struct lock_list
*src_entry
,
1404 int (*match
)(struct lock_list
*entry
, void *data
),
1405 struct lock_list
**target_entry
)
1407 return __bfs(src_entry
, data
, match
, target_entry
, 0);
1412 * Recursive, forwards-direction lock-dependency checking, used for
1413 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1417 static void print_lock_trace(struct lock_trace
*trace
, unsigned int spaces
)
1419 unsigned long *entries
= stack_trace
+ trace
->offset
;
1421 stack_trace_print(entries
, trace
->nr_entries
, spaces
);
1425 * Print a dependency chain entry (this is only done when a deadlock
1426 * has been detected):
1429 print_circular_bug_entry(struct lock_list
*target
, int depth
)
1431 if (debug_locks_silent
)
1433 printk("\n-> #%u", depth
);
1434 print_lock_name(target
->class);
1435 printk(KERN_CONT
":\n");
1436 print_lock_trace(&target
->trace
, 6);
1441 print_circular_lock_scenario(struct held_lock
*src
,
1442 struct held_lock
*tgt
,
1443 struct lock_list
*prt
)
1445 struct lock_class
*source
= hlock_class(src
);
1446 struct lock_class
*target
= hlock_class(tgt
);
1447 struct lock_class
*parent
= prt
->class;
1450 * A direct locking problem where unsafe_class lock is taken
1451 * directly by safe_class lock, then all we need to show
1452 * is the deadlock scenario, as it is obvious that the
1453 * unsafe lock is taken under the safe lock.
1455 * But if there is a chain instead, where the safe lock takes
1456 * an intermediate lock (middle_class) where this lock is
1457 * not the same as the safe lock, then the lock chain is
1458 * used to describe the problem. Otherwise we would need
1459 * to show a different CPU case for each link in the chain
1460 * from the safe_class lock to the unsafe_class lock.
1462 if (parent
!= source
) {
1463 printk("Chain exists of:\n ");
1464 __print_lock_name(source
);
1465 printk(KERN_CONT
" --> ");
1466 __print_lock_name(parent
);
1467 printk(KERN_CONT
" --> ");
1468 __print_lock_name(target
);
1469 printk(KERN_CONT
"\n\n");
1472 printk(" Possible unsafe locking scenario:\n\n");
1473 printk(" CPU0 CPU1\n");
1474 printk(" ---- ----\n");
1476 __print_lock_name(target
);
1477 printk(KERN_CONT
");\n");
1479 __print_lock_name(parent
);
1480 printk(KERN_CONT
");\n");
1482 __print_lock_name(target
);
1483 printk(KERN_CONT
");\n");
1485 __print_lock_name(source
);
1486 printk(KERN_CONT
");\n");
1487 printk("\n *** DEADLOCK ***\n\n");
1491 * When a circular dependency is detected, print the
1495 print_circular_bug_header(struct lock_list
*entry
, unsigned int depth
,
1496 struct held_lock
*check_src
,
1497 struct held_lock
*check_tgt
)
1499 struct task_struct
*curr
= current
;
1501 if (debug_locks_silent
)
1505 pr_warn("======================================================\n");
1506 pr_warn("WARNING: possible circular locking dependency detected\n");
1507 print_kernel_ident();
1508 pr_warn("------------------------------------------------------\n");
1509 pr_warn("%s/%d is trying to acquire lock:\n",
1510 curr
->comm
, task_pid_nr(curr
));
1511 print_lock(check_src
);
1513 pr_warn("\nbut task is already holding lock:\n");
1515 print_lock(check_tgt
);
1516 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1517 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1519 print_circular_bug_entry(entry
, depth
);
1524 static inline int class_equal(struct lock_list
*entry
, void *data
)
1526 return entry
->class == data
;
1529 static noinline
int print_circular_bug(struct lock_list
*this,
1530 struct lock_list
*target
,
1531 struct held_lock
*check_src
,
1532 struct held_lock
*check_tgt
)
1534 struct task_struct
*curr
= current
;
1535 struct lock_list
*parent
;
1536 struct lock_list
*first_parent
;
1539 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
1542 if (!save_trace(&this->trace
))
1545 depth
= get_lock_depth(target
);
1547 print_circular_bug_header(target
, depth
, check_src
, check_tgt
);
1549 parent
= get_lock_parent(target
);
1550 first_parent
= parent
;
1553 print_circular_bug_entry(parent
, --depth
);
1554 parent
= get_lock_parent(parent
);
1557 printk("\nother info that might help us debug this:\n\n");
1558 print_circular_lock_scenario(check_src
, check_tgt
,
1561 lockdep_print_held_locks(curr
);
1563 printk("\nstack backtrace:\n");
1569 static noinline
int print_bfs_bug(int ret
)
1571 if (!debug_locks_off_graph_unlock())
1575 * Breadth-first-search failed, graph got corrupted?
1577 WARN(1, "lockdep bfs error:%d\n", ret
);
1582 static int noop_count(struct lock_list
*entry
, void *data
)
1584 (*(unsigned long *)data
)++;
1588 static unsigned long __lockdep_count_forward_deps(struct lock_list
*this)
1590 unsigned long count
= 0;
1591 struct lock_list
*uninitialized_var(target_entry
);
1593 __bfs_forwards(this, (void *)&count
, noop_count
, &target_entry
);
1597 unsigned long lockdep_count_forward_deps(struct lock_class
*class)
1599 unsigned long ret
, flags
;
1600 struct lock_list
this;
1605 raw_local_irq_save(flags
);
1606 arch_spin_lock(&lockdep_lock
);
1607 ret
= __lockdep_count_forward_deps(&this);
1608 arch_spin_unlock(&lockdep_lock
);
1609 raw_local_irq_restore(flags
);
1614 static unsigned long __lockdep_count_backward_deps(struct lock_list
*this)
1616 unsigned long count
= 0;
1617 struct lock_list
*uninitialized_var(target_entry
);
1619 __bfs_backwards(this, (void *)&count
, noop_count
, &target_entry
);
1624 unsigned long lockdep_count_backward_deps(struct lock_class
*class)
1626 unsigned long ret
, flags
;
1627 struct lock_list
this;
1632 raw_local_irq_save(flags
);
1633 arch_spin_lock(&lockdep_lock
);
1634 ret
= __lockdep_count_backward_deps(&this);
1635 arch_spin_unlock(&lockdep_lock
);
1636 raw_local_irq_restore(flags
);
1642 * Prove that the dependency graph starting at <entry> can not
1643 * lead to <target>. Print an error and return 0 if it does.
1646 check_noncircular(struct lock_list
*root
, struct lock_class
*target
,
1647 struct lock_list
**target_entry
)
1651 debug_atomic_inc(nr_cyclic_checks
);
1653 result
= __bfs_forwards(root
, target
, class_equal
, target_entry
);
1659 check_redundant(struct lock_list
*root
, struct lock_class
*target
,
1660 struct lock_list
**target_entry
)
1664 debug_atomic_inc(nr_redundant_checks
);
1666 result
= __bfs_forwards(root
, target
, class_equal
, target_entry
);
1671 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1673 static inline int usage_accumulate(struct lock_list
*entry
, void *mask
)
1675 *(unsigned long *)mask
|= entry
->class->usage_mask
;
1681 * Forwards and backwards subgraph searching, for the purposes of
1682 * proving that two subgraphs can be connected by a new dependency
1683 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1686 static inline int usage_match(struct lock_list
*entry
, void *mask
)
1688 return entry
->class->usage_mask
& *(unsigned long *)mask
;
1692 * Find a node in the forwards-direction dependency sub-graph starting
1693 * at @root->class that matches @bit.
1695 * Return 0 if such a node exists in the subgraph, and put that node
1696 * into *@target_entry.
1698 * Return 1 otherwise and keep *@target_entry unchanged.
1699 * Return <0 on error.
1702 find_usage_forwards(struct lock_list
*root
, unsigned long usage_mask
,
1703 struct lock_list
**target_entry
)
1707 debug_atomic_inc(nr_find_usage_forwards_checks
);
1709 result
= __bfs_forwards(root
, &usage_mask
, usage_match
, target_entry
);
1715 * Find a node in the backwards-direction dependency sub-graph starting
1716 * at @root->class that matches @bit.
1718 * Return 0 if such a node exists in the subgraph, and put that node
1719 * into *@target_entry.
1721 * Return 1 otherwise and keep *@target_entry unchanged.
1722 * Return <0 on error.
1725 find_usage_backwards(struct lock_list
*root
, unsigned long usage_mask
,
1726 struct lock_list
**target_entry
)
1730 debug_atomic_inc(nr_find_usage_backwards_checks
);
1732 result
= __bfs_backwards(root
, &usage_mask
, usage_match
, target_entry
);
1737 static void print_lock_class_header(struct lock_class
*class, int depth
)
1741 printk("%*s->", depth
, "");
1742 print_lock_name(class);
1743 #ifdef CONFIG_DEBUG_LOCKDEP
1744 printk(KERN_CONT
" ops: %lu", debug_class_ops_read(class));
1746 printk(KERN_CONT
" {\n");
1748 for (bit
= 0; bit
< LOCK_USAGE_STATES
; bit
++) {
1749 if (class->usage_mask
& (1 << bit
)) {
1752 len
+= printk("%*s %s", depth
, "", usage_str
[bit
]);
1753 len
+= printk(KERN_CONT
" at:\n");
1754 print_lock_trace(class->usage_traces
+ bit
, len
);
1757 printk("%*s }\n", depth
, "");
1759 printk("%*s ... key at: [<%px>] %pS\n",
1760 depth
, "", class->key
, class->key
);
1764 * printk the shortest lock dependencies from @start to @end in reverse order:
1767 print_shortest_lock_dependencies(struct lock_list
*leaf
,
1768 struct lock_list
*root
)
1770 struct lock_list
*entry
= leaf
;
1773 /*compute depth from generated tree by BFS*/
1774 depth
= get_lock_depth(leaf
);
1777 print_lock_class_header(entry
->class, depth
);
1778 printk("%*s ... acquired at:\n", depth
, "");
1779 print_lock_trace(&entry
->trace
, 2);
1782 if (depth
== 0 && (entry
!= root
)) {
1783 printk("lockdep:%s bad path found in chain graph\n", __func__
);
1787 entry
= get_lock_parent(entry
);
1789 } while (entry
&& (depth
>= 0));
1795 print_irq_lock_scenario(struct lock_list
*safe_entry
,
1796 struct lock_list
*unsafe_entry
,
1797 struct lock_class
*prev_class
,
1798 struct lock_class
*next_class
)
1800 struct lock_class
*safe_class
= safe_entry
->class;
1801 struct lock_class
*unsafe_class
= unsafe_entry
->class;
1802 struct lock_class
*middle_class
= prev_class
;
1804 if (middle_class
== safe_class
)
1805 middle_class
= next_class
;
1808 * A direct locking problem where unsafe_class lock is taken
1809 * directly by safe_class lock, then all we need to show
1810 * is the deadlock scenario, as it is obvious that the
1811 * unsafe lock is taken under the safe lock.
1813 * But if there is a chain instead, where the safe lock takes
1814 * an intermediate lock (middle_class) where this lock is
1815 * not the same as the safe lock, then the lock chain is
1816 * used to describe the problem. Otherwise we would need
1817 * to show a different CPU case for each link in the chain
1818 * from the safe_class lock to the unsafe_class lock.
1820 if (middle_class
!= unsafe_class
) {
1821 printk("Chain exists of:\n ");
1822 __print_lock_name(safe_class
);
1823 printk(KERN_CONT
" --> ");
1824 __print_lock_name(middle_class
);
1825 printk(KERN_CONT
" --> ");
1826 __print_lock_name(unsafe_class
);
1827 printk(KERN_CONT
"\n\n");
1830 printk(" Possible interrupt unsafe locking scenario:\n\n");
1831 printk(" CPU0 CPU1\n");
1832 printk(" ---- ----\n");
1834 __print_lock_name(unsafe_class
);
1835 printk(KERN_CONT
");\n");
1836 printk(" local_irq_disable();\n");
1838 __print_lock_name(safe_class
);
1839 printk(KERN_CONT
");\n");
1841 __print_lock_name(middle_class
);
1842 printk(KERN_CONT
");\n");
1843 printk(" <Interrupt>\n");
1845 __print_lock_name(safe_class
);
1846 printk(KERN_CONT
");\n");
1847 printk("\n *** DEADLOCK ***\n\n");
1851 print_bad_irq_dependency(struct task_struct
*curr
,
1852 struct lock_list
*prev_root
,
1853 struct lock_list
*next_root
,
1854 struct lock_list
*backwards_entry
,
1855 struct lock_list
*forwards_entry
,
1856 struct held_lock
*prev
,
1857 struct held_lock
*next
,
1858 enum lock_usage_bit bit1
,
1859 enum lock_usage_bit bit2
,
1860 const char *irqclass
)
1862 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
1866 pr_warn("=====================================================\n");
1867 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1868 irqclass
, irqclass
);
1869 print_kernel_ident();
1870 pr_warn("-----------------------------------------------------\n");
1871 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1872 curr
->comm
, task_pid_nr(curr
),
1873 curr
->hardirq_context
, hardirq_count() >> HARDIRQ_SHIFT
,
1874 curr
->softirq_context
, softirq_count() >> SOFTIRQ_SHIFT
,
1875 curr
->hardirqs_enabled
,
1876 curr
->softirqs_enabled
);
1879 pr_warn("\nand this task is already holding:\n");
1881 pr_warn("which would create a new lock dependency:\n");
1882 print_lock_name(hlock_class(prev
));
1884 print_lock_name(hlock_class(next
));
1887 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1889 print_lock_name(backwards_entry
->class);
1890 pr_warn("\n... which became %s-irq-safe at:\n", irqclass
);
1892 print_lock_trace(backwards_entry
->class->usage_traces
+ bit1
, 1);
1894 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass
);
1895 print_lock_name(forwards_entry
->class);
1896 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass
);
1899 print_lock_trace(forwards_entry
->class->usage_traces
+ bit2
, 1);
1901 pr_warn("\nother info that might help us debug this:\n\n");
1902 print_irq_lock_scenario(backwards_entry
, forwards_entry
,
1903 hlock_class(prev
), hlock_class(next
));
1905 lockdep_print_held_locks(curr
);
1907 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass
);
1908 if (!save_trace(&prev_root
->trace
))
1910 print_shortest_lock_dependencies(backwards_entry
, prev_root
);
1912 pr_warn("\nthe dependencies between the lock to be acquired");
1913 pr_warn(" and %s-irq-unsafe lock:\n", irqclass
);
1914 if (!save_trace(&next_root
->trace
))
1916 print_shortest_lock_dependencies(forwards_entry
, next_root
);
1918 pr_warn("\nstack backtrace:\n");
1924 static const char *state_names
[] = {
1925 #define LOCKDEP_STATE(__STATE) \
1926 __stringify(__STATE),
1927 #include "lockdep_states.h"
1928 #undef LOCKDEP_STATE
1931 static const char *state_rnames
[] = {
1932 #define LOCKDEP_STATE(__STATE) \
1933 __stringify(__STATE)"-READ",
1934 #include "lockdep_states.h"
1935 #undef LOCKDEP_STATE
1938 static inline const char *state_name(enum lock_usage_bit bit
)
1940 if (bit
& LOCK_USAGE_READ_MASK
)
1941 return state_rnames
[bit
>> LOCK_USAGE_DIR_MASK
];
1943 return state_names
[bit
>> LOCK_USAGE_DIR_MASK
];
1947 * The bit number is encoded like:
1949 * bit0: 0 exclusive, 1 read lock
1950 * bit1: 0 used in irq, 1 irq enabled
1953 static int exclusive_bit(int new_bit
)
1955 int state
= new_bit
& LOCK_USAGE_STATE_MASK
;
1956 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
1959 * keep state, bit flip the direction and strip read.
1961 return state
| (dir
^ LOCK_USAGE_DIR_MASK
);
1965 * Observe that when given a bitmask where each bitnr is encoded as above, a
1966 * right shift of the mask transforms the individual bitnrs as -1 and
1967 * conversely, a left shift transforms into +1 for the individual bitnrs.
1969 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
1970 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
1971 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
1973 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
1975 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
1976 * all bits set) and recompose with bitnr1 flipped.
1978 static unsigned long invert_dir_mask(unsigned long mask
)
1980 unsigned long excl
= 0;
1983 excl
|= (mask
& LOCKF_ENABLED_IRQ_ALL
) >> LOCK_USAGE_DIR_MASK
;
1984 excl
|= (mask
& LOCKF_USED_IN_IRQ_ALL
) << LOCK_USAGE_DIR_MASK
;
1990 * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
1991 * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
1992 * And then mask out all bitnr0.
1994 static unsigned long exclusive_mask(unsigned long mask
)
1996 unsigned long excl
= invert_dir_mask(mask
);
1999 excl
|= (excl
& LOCKF_IRQ_READ
) >> LOCK_USAGE_READ_MASK
;
2000 excl
&= ~LOCKF_IRQ_READ
;
2006 * Retrieve the _possible_ original mask to which @mask is
2007 * exclusive. Ie: this is the opposite of exclusive_mask().
2008 * Note that 2 possible original bits can match an exclusive
2009 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2010 * cleared. So both are returned for each exclusive bit.
2012 static unsigned long original_mask(unsigned long mask
)
2014 unsigned long excl
= invert_dir_mask(mask
);
2016 /* Include read in existing usages */
2017 excl
|= (excl
& LOCKF_IRQ
) << LOCK_USAGE_READ_MASK
;
2023 * Find the first pair of bit match between an original
2024 * usage mask and an exclusive usage mask.
2026 static int find_exclusive_match(unsigned long mask
,
2027 unsigned long excl_mask
,
2028 enum lock_usage_bit
*bitp
,
2029 enum lock_usage_bit
*excl_bitp
)
2033 for_each_set_bit(bit
, &mask
, LOCK_USED
) {
2034 excl
= exclusive_bit(bit
);
2035 if (excl_mask
& lock_flag(excl
)) {
2045 * Prove that the new dependency does not connect a hardirq-safe(-read)
2046 * lock with a hardirq-unsafe lock - to achieve this we search
2047 * the backwards-subgraph starting at <prev>, and the
2048 * forwards-subgraph starting at <next>:
2050 static int check_irq_usage(struct task_struct
*curr
, struct held_lock
*prev
,
2051 struct held_lock
*next
)
2053 unsigned long usage_mask
= 0, forward_mask
, backward_mask
;
2054 enum lock_usage_bit forward_bit
= 0, backward_bit
= 0;
2055 struct lock_list
*uninitialized_var(target_entry1
);
2056 struct lock_list
*uninitialized_var(target_entry
);
2057 struct lock_list
this, that
;
2061 * Step 1: gather all hard/soft IRQs usages backward in an
2062 * accumulated usage mask.
2065 this.class = hlock_class(prev
);
2067 ret
= __bfs_backwards(&this, &usage_mask
, usage_accumulate
, NULL
);
2069 return print_bfs_bug(ret
);
2071 usage_mask
&= LOCKF_USED_IN_IRQ_ALL
;
2076 * Step 2: find exclusive uses forward that match the previous
2077 * backward accumulated mask.
2079 forward_mask
= exclusive_mask(usage_mask
);
2082 that
.class = hlock_class(next
);
2084 ret
= find_usage_forwards(&that
, forward_mask
, &target_entry1
);
2086 return print_bfs_bug(ret
);
2091 * Step 3: we found a bad match! Now retrieve a lock from the backward
2092 * list whose usage mask matches the exclusive usage mask from the
2093 * lock found on the forward list.
2095 backward_mask
= original_mask(target_entry1
->class->usage_mask
);
2097 ret
= find_usage_backwards(&this, backward_mask
, &target_entry
);
2099 return print_bfs_bug(ret
);
2100 if (DEBUG_LOCKS_WARN_ON(ret
== 1))
2104 * Step 4: narrow down to a pair of incompatible usage bits
2107 ret
= find_exclusive_match(target_entry
->class->usage_mask
,
2108 target_entry1
->class->usage_mask
,
2109 &backward_bit
, &forward_bit
);
2110 if (DEBUG_LOCKS_WARN_ON(ret
== -1))
2113 return print_bad_irq_dependency(curr
, &this, &that
,
2114 target_entry
, target_entry1
,
2116 backward_bit
, forward_bit
,
2117 state_name(backward_bit
));
2120 static void inc_chains(void)
2122 if (current
->hardirq_context
)
2123 nr_hardirq_chains
++;
2125 if (current
->softirq_context
)
2126 nr_softirq_chains
++;
2128 nr_process_chains
++;
2134 static inline int check_irq_usage(struct task_struct
*curr
,
2135 struct held_lock
*prev
, struct held_lock
*next
)
2140 static inline void inc_chains(void)
2142 nr_process_chains
++;
2148 print_deadlock_scenario(struct held_lock
*nxt
,
2149 struct held_lock
*prv
)
2151 struct lock_class
*next
= hlock_class(nxt
);
2152 struct lock_class
*prev
= hlock_class(prv
);
2154 printk(" Possible unsafe locking scenario:\n\n");
2158 __print_lock_name(prev
);
2159 printk(KERN_CONT
");\n");
2161 __print_lock_name(next
);
2162 printk(KERN_CONT
");\n");
2163 printk("\n *** DEADLOCK ***\n\n");
2164 printk(" May be due to missing lock nesting notation\n\n");
2168 print_deadlock_bug(struct task_struct
*curr
, struct held_lock
*prev
,
2169 struct held_lock
*next
)
2171 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2175 pr_warn("============================================\n");
2176 pr_warn("WARNING: possible recursive locking detected\n");
2177 print_kernel_ident();
2178 pr_warn("--------------------------------------------\n");
2179 pr_warn("%s/%d is trying to acquire lock:\n",
2180 curr
->comm
, task_pid_nr(curr
));
2182 pr_warn("\nbut task is already holding lock:\n");
2185 pr_warn("\nother info that might help us debug this:\n");
2186 print_deadlock_scenario(next
, prev
);
2187 lockdep_print_held_locks(curr
);
2189 pr_warn("\nstack backtrace:\n");
2196 * Check whether we are holding such a class already.
2198 * (Note that this has to be done separately, because the graph cannot
2199 * detect such classes of deadlocks.)
2201 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2204 check_deadlock(struct task_struct
*curr
, struct held_lock
*next
,
2205 struct lockdep_map
*next_instance
, int read
)
2207 struct held_lock
*prev
;
2208 struct held_lock
*nest
= NULL
;
2211 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
2212 prev
= curr
->held_locks
+ i
;
2214 if (prev
->instance
== next
->nest_lock
)
2217 if (hlock_class(prev
) != hlock_class(next
))
2221 * Allow read-after-read recursion of the same
2222 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2224 if ((read
== 2) && prev
->read
)
2228 * We're holding the nest_lock, which serializes this lock's
2229 * nesting behaviour.
2234 return print_deadlock_bug(curr
, prev
, next
);
2240 * There was a chain-cache miss, and we are about to add a new dependency
2241 * to a previous lock. We recursively validate the following rules:
2243 * - would the adding of the <prev> -> <next> dependency create a
2244 * circular dependency in the graph? [== circular deadlock]
2246 * - does the new prev->next dependency connect any hardirq-safe lock
2247 * (in the full backwards-subgraph starting at <prev>) with any
2248 * hardirq-unsafe lock (in the full forwards-subgraph starting at
2249 * <next>)? [== illegal lock inversion with hardirq contexts]
2251 * - does the new prev->next dependency connect any softirq-safe lock
2252 * (in the full backwards-subgraph starting at <prev>) with any
2253 * softirq-unsafe lock (in the full forwards-subgraph starting at
2254 * <next>)? [== illegal lock inversion with softirq contexts]
2256 * any of these scenarios could lead to a deadlock.
2258 * Then if all the validations pass, we add the forwards and backwards
2262 check_prev_add(struct task_struct
*curr
, struct held_lock
*prev
,
2263 struct held_lock
*next
, int distance
, struct lock_trace
*trace
)
2265 struct lock_list
*uninitialized_var(target_entry
);
2266 struct lock_list
*entry
;
2267 struct lock_list
this;
2270 if (!hlock_class(prev
)->key
|| !hlock_class(next
)->key
) {
2272 * The warning statements below may trigger a use-after-free
2273 * of the class name. It is better to trigger a use-after free
2274 * and to have the class name most of the time instead of not
2275 * having the class name available.
2277 WARN_ONCE(!debug_locks_silent
&& !hlock_class(prev
)->key
,
2278 "Detected use-after-free of lock class %px/%s\n",
2280 hlock_class(prev
)->name
);
2281 WARN_ONCE(!debug_locks_silent
&& !hlock_class(next
)->key
,
2282 "Detected use-after-free of lock class %px/%s\n",
2284 hlock_class(next
)->name
);
2289 * Prove that the new <prev> -> <next> dependency would not
2290 * create a circular dependency in the graph. (We do this by
2291 * forward-recursing into the graph starting at <next>, and
2292 * checking whether we can reach <prev>.)
2294 * We are using global variables to control the recursion, to
2295 * keep the stackframe size of the recursive functions low:
2297 this.class = hlock_class(next
);
2299 ret
= check_noncircular(&this, hlock_class(prev
), &target_entry
);
2300 if (unlikely(!ret
)) {
2301 if (!trace
->nr_entries
) {
2303 * If save_trace fails here, the printing might
2304 * trigger a WARN but because of the !nr_entries it
2305 * should not do bad things.
2309 return print_circular_bug(&this, target_entry
, next
, prev
);
2311 else if (unlikely(ret
< 0))
2312 return print_bfs_bug(ret
);
2314 if (!check_irq_usage(curr
, prev
, next
))
2318 * For recursive read-locks we do all the dependency checks,
2319 * but we dont store read-triggered dependencies (only
2320 * write-triggered dependencies). This ensures that only the
2321 * write-side dependencies matter, and that if for example a
2322 * write-lock never takes any other locks, then the reads are
2323 * equivalent to a NOP.
2325 if (next
->read
== 2 || prev
->read
== 2)
2328 * Is the <prev> -> <next> dependency already present?
2330 * (this may occur even though this is a new chain: consider
2331 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2332 * chains - the second one will be new, but L1 already has
2333 * L2 added to its dependency list, due to the first chain.)
2335 list_for_each_entry(entry
, &hlock_class(prev
)->locks_after
, entry
) {
2336 if (entry
->class == hlock_class(next
)) {
2338 entry
->distance
= 1;
2344 * Is the <prev> -> <next> link redundant?
2346 this.class = hlock_class(prev
);
2348 ret
= check_redundant(&this, hlock_class(next
), &target_entry
);
2350 debug_atomic_inc(nr_redundant
);
2354 return print_bfs_bug(ret
);
2357 if (!trace
->nr_entries
&& !save_trace(trace
))
2361 * Ok, all validations passed, add the new lock
2362 * to the previous lock's dependency list:
2364 ret
= add_lock_to_list(hlock_class(next
), hlock_class(prev
),
2365 &hlock_class(prev
)->locks_after
,
2366 next
->acquire_ip
, distance
, trace
);
2371 ret
= add_lock_to_list(hlock_class(prev
), hlock_class(next
),
2372 &hlock_class(next
)->locks_before
,
2373 next
->acquire_ip
, distance
, trace
);
2381 * Add the dependency to all directly-previous locks that are 'relevant'.
2382 * The ones that are relevant are (in increasing distance from curr):
2383 * all consecutive trylock entries and the final non-trylock entry - or
2384 * the end of this context's lock-chain - whichever comes first.
2387 check_prevs_add(struct task_struct
*curr
, struct held_lock
*next
)
2389 struct lock_trace trace
= { .nr_entries
= 0 };
2390 int depth
= curr
->lockdep_depth
;
2391 struct held_lock
*hlock
;
2396 * Depth must not be zero for a non-head lock:
2401 * At least two relevant locks must exist for this
2404 if (curr
->held_locks
[depth
].irq_context
!=
2405 curr
->held_locks
[depth
-1].irq_context
)
2409 int distance
= curr
->lockdep_depth
- depth
+ 1;
2410 hlock
= curr
->held_locks
+ depth
- 1;
2413 * Only non-recursive-read entries get new dependencies
2416 if (hlock
->read
!= 2 && hlock
->check
) {
2417 int ret
= check_prev_add(curr
, hlock
, next
, distance
,
2423 * Stop after the first non-trylock entry,
2424 * as non-trylock entries have added their
2425 * own direct dependencies already, so this
2426 * lock is connected to them indirectly:
2428 if (!hlock
->trylock
)
2434 * End of lock-stack?
2439 * Stop the search if we cross into another context:
2441 if (curr
->held_locks
[depth
].irq_context
!=
2442 curr
->held_locks
[depth
-1].irq_context
)
2447 if (!debug_locks_off_graph_unlock())
2451 * Clearly we all shouldn't be here, but since we made it we
2452 * can reliable say we messed up our state. See the above two
2453 * gotos for reasons why we could possibly end up here.
2460 struct lock_chain lock_chains
[MAX_LOCKDEP_CHAINS
];
2461 static DECLARE_BITMAP(lock_chains_in_use
, MAX_LOCKDEP_CHAINS
);
2462 int nr_chain_hlocks
;
2463 static u16 chain_hlocks
[MAX_LOCKDEP_CHAIN_HLOCKS
];
2465 struct lock_class
*lock_chain_get_class(struct lock_chain
*chain
, int i
)
2467 return lock_classes
+ chain_hlocks
[chain
->base
+ i
];
2471 * Returns the index of the first held_lock of the current chain
2473 static inline int get_first_held_lock(struct task_struct
*curr
,
2474 struct held_lock
*hlock
)
2477 struct held_lock
*hlock_curr
;
2479 for (i
= curr
->lockdep_depth
- 1; i
>= 0; i
--) {
2480 hlock_curr
= curr
->held_locks
+ i
;
2481 if (hlock_curr
->irq_context
!= hlock
->irq_context
)
2489 #ifdef CONFIG_DEBUG_LOCKDEP
2491 * Returns the next chain_key iteration
2493 static u64
print_chain_key_iteration(int class_idx
, u64 chain_key
)
2495 u64 new_chain_key
= iterate_chain_key(chain_key
, class_idx
);
2497 printk(" class_idx:%d -> chain_key:%016Lx",
2499 (unsigned long long)new_chain_key
);
2500 return new_chain_key
;
2504 print_chain_keys_held_locks(struct task_struct
*curr
, struct held_lock
*hlock_next
)
2506 struct held_lock
*hlock
;
2508 int depth
= curr
->lockdep_depth
;
2511 printk("depth: %u\n", depth
+ 1);
2512 for (i
= get_first_held_lock(curr
, hlock_next
); i
< depth
; i
++) {
2513 hlock
= curr
->held_locks
+ i
;
2514 chain_key
= print_chain_key_iteration(hlock
->class_idx
, chain_key
);
2519 print_chain_key_iteration(hlock_next
->class_idx
, chain_key
);
2520 print_lock(hlock_next
);
2523 static void print_chain_keys_chain(struct lock_chain
*chain
)
2529 printk("depth: %u\n", chain
->depth
);
2530 for (i
= 0; i
< chain
->depth
; i
++) {
2531 class_id
= chain_hlocks
[chain
->base
+ i
];
2532 chain_key
= print_chain_key_iteration(class_id
+ 1, chain_key
);
2534 print_lock_name(lock_classes
+ class_id
);
2539 static void print_collision(struct task_struct
*curr
,
2540 struct held_lock
*hlock_next
,
2541 struct lock_chain
*chain
)
2544 pr_warn("============================\n");
2545 pr_warn("WARNING: chain_key collision\n");
2546 print_kernel_ident();
2547 pr_warn("----------------------------\n");
2548 pr_warn("%s/%d: ", current
->comm
, task_pid_nr(current
));
2549 pr_warn("Hash chain already cached but the contents don't match!\n");
2551 pr_warn("Held locks:");
2552 print_chain_keys_held_locks(curr
, hlock_next
);
2554 pr_warn("Locks in cached chain:");
2555 print_chain_keys_chain(chain
);
2557 pr_warn("\nstack backtrace:\n");
2563 * Checks whether the chain and the current held locks are consistent
2564 * in depth and also in content. If they are not it most likely means
2565 * that there was a collision during the calculation of the chain_key.
2566 * Returns: 0 not passed, 1 passed
2568 static int check_no_collision(struct task_struct
*curr
,
2569 struct held_lock
*hlock
,
2570 struct lock_chain
*chain
)
2572 #ifdef CONFIG_DEBUG_LOCKDEP
2575 i
= get_first_held_lock(curr
, hlock
);
2577 if (DEBUG_LOCKS_WARN_ON(chain
->depth
!= curr
->lockdep_depth
- (i
- 1))) {
2578 print_collision(curr
, hlock
, chain
);
2582 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
2583 id
= curr
->held_locks
[i
].class_idx
- 1;
2585 if (DEBUG_LOCKS_WARN_ON(chain_hlocks
[chain
->base
+ j
] != id
)) {
2586 print_collision(curr
, hlock
, chain
);
2595 * Given an index that is >= -1, return the index of the next lock chain.
2596 * Return -2 if there is no next lock chain.
2598 long lockdep_next_lockchain(long i
)
2600 i
= find_next_bit(lock_chains_in_use
, ARRAY_SIZE(lock_chains
), i
+ 1);
2601 return i
< ARRAY_SIZE(lock_chains
) ? i
: -2;
2604 unsigned long lock_chain_count(void)
2606 return bitmap_weight(lock_chains_in_use
, ARRAY_SIZE(lock_chains
));
2609 /* Must be called with the graph lock held. */
2610 static struct lock_chain
*alloc_lock_chain(void)
2612 int idx
= find_first_zero_bit(lock_chains_in_use
,
2613 ARRAY_SIZE(lock_chains
));
2615 if (unlikely(idx
>= ARRAY_SIZE(lock_chains
)))
2617 __set_bit(idx
, lock_chains_in_use
);
2618 return lock_chains
+ idx
;
2622 * Adds a dependency chain into chain hashtable. And must be called with
2625 * Return 0 if fail, and graph_lock is released.
2626 * Return 1 if succeed, with graph_lock held.
2628 static inline int add_chain_cache(struct task_struct
*curr
,
2629 struct held_lock
*hlock
,
2632 struct lock_class
*class = hlock_class(hlock
);
2633 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
2634 struct lock_chain
*chain
;
2638 * The caller must hold the graph lock, ensure we've got IRQs
2639 * disabled to make this an IRQ-safe lock.. for recursion reasons
2640 * lockdep won't complain about its own locking errors.
2642 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2645 chain
= alloc_lock_chain();
2647 if (!debug_locks_off_graph_unlock())
2650 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2654 chain
->chain_key
= chain_key
;
2655 chain
->irq_context
= hlock
->irq_context
;
2656 i
= get_first_held_lock(curr
, hlock
);
2657 chain
->depth
= curr
->lockdep_depth
+ 1 - i
;
2659 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks
));
2660 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr
->held_locks
));
2661 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks
[0])) <= ARRAY_SIZE(lock_classes
));
2663 if (likely(nr_chain_hlocks
+ chain
->depth
<= MAX_LOCKDEP_CHAIN_HLOCKS
)) {
2664 chain
->base
= nr_chain_hlocks
;
2665 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
2666 int lock_id
= curr
->held_locks
[i
].class_idx
- 1;
2667 chain_hlocks
[chain
->base
+ j
] = lock_id
;
2669 chain_hlocks
[chain
->base
+ j
] = class - lock_classes
;
2670 nr_chain_hlocks
+= chain
->depth
;
2672 if (!debug_locks_off_graph_unlock())
2675 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2680 hlist_add_head_rcu(&chain
->entry
, hash_head
);
2681 debug_atomic_inc(chain_lookup_misses
);
2688 * Look up a dependency chain. Must be called with either the graph lock or
2689 * the RCU read lock held.
2691 static inline struct lock_chain
*lookup_chain_cache(u64 chain_key
)
2693 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
2694 struct lock_chain
*chain
;
2696 hlist_for_each_entry_rcu(chain
, hash_head
, entry
) {
2697 if (READ_ONCE(chain
->chain_key
) == chain_key
) {
2698 debug_atomic_inc(chain_lookup_hits
);
2706 * If the key is not present yet in dependency chain cache then
2707 * add it and return 1 - in this case the new dependency chain is
2708 * validated. If the key is already hashed, return 0.
2709 * (On return with 1 graph_lock is held.)
2711 static inline int lookup_chain_cache_add(struct task_struct
*curr
,
2712 struct held_lock
*hlock
,
2715 struct lock_class
*class = hlock_class(hlock
);
2716 struct lock_chain
*chain
= lookup_chain_cache(chain_key
);
2720 if (!check_no_collision(curr
, hlock
, chain
))
2723 if (very_verbose(class)) {
2724 printk("\nhash chain already cached, key: "
2725 "%016Lx tail class: [%px] %s\n",
2726 (unsigned long long)chain_key
,
2727 class->key
, class->name
);
2733 if (very_verbose(class)) {
2734 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2735 (unsigned long long)chain_key
, class->key
, class->name
);
2742 * We have to walk the chain again locked - to avoid duplicates:
2744 chain
= lookup_chain_cache(chain_key
);
2750 if (!add_chain_cache(curr
, hlock
, chain_key
))
2756 static int validate_chain(struct task_struct
*curr
, struct lockdep_map
*lock
,
2757 struct held_lock
*hlock
, int chain_head
, u64 chain_key
)
2760 * Trylock needs to maintain the stack of held locks, but it
2761 * does not add new dependencies, because trylock can be done
2764 * We look up the chain_key and do the O(N^2) check and update of
2765 * the dependencies only if this is a new dependency chain.
2766 * (If lookup_chain_cache_add() return with 1 it acquires
2767 * graph_lock for us)
2769 if (!hlock
->trylock
&& hlock
->check
&&
2770 lookup_chain_cache_add(curr
, hlock
, chain_key
)) {
2772 * Check whether last held lock:
2774 * - is irq-safe, if this lock is irq-unsafe
2775 * - is softirq-safe, if this lock is hardirq-unsafe
2777 * And check whether the new lock's dependency graph
2778 * could lead back to the previous lock.
2780 * any of these scenarios could lead to a deadlock. If
2783 int ret
= check_deadlock(curr
, hlock
, lock
, hlock
->read
);
2788 * Mark recursive read, as we jump over it when
2789 * building dependencies (just like we jump over
2795 * Add dependency only if this lock is not the head
2796 * of the chain, and if it's not a secondary read-lock:
2798 if (!chain_head
&& ret
!= 2) {
2799 if (!check_prevs_add(curr
, hlock
))
2805 /* after lookup_chain_cache_add(): */
2806 if (unlikely(!debug_locks
))
2813 static inline int validate_chain(struct task_struct
*curr
,
2814 struct lockdep_map
*lock
, struct held_lock
*hlock
,
2815 int chain_head
, u64 chain_key
)
2820 static void print_lock_trace(struct lock_trace
*trace
, unsigned int spaces
)
2826 * We are building curr_chain_key incrementally, so double-check
2827 * it from scratch, to make sure that it's done correctly:
2829 static void check_chain_key(struct task_struct
*curr
)
2831 #ifdef CONFIG_DEBUG_LOCKDEP
2832 struct held_lock
*hlock
, *prev_hlock
= NULL
;
2836 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
2837 hlock
= curr
->held_locks
+ i
;
2838 if (chain_key
!= hlock
->prev_chain_key
) {
2841 * We got mighty confused, our chain keys don't match
2842 * with what we expect, someone trample on our task state?
2844 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2845 curr
->lockdep_depth
, i
,
2846 (unsigned long long)chain_key
,
2847 (unsigned long long)hlock
->prev_chain_key
);
2851 * Whoops ran out of static storage again?
2853 if (DEBUG_LOCKS_WARN_ON(hlock
->class_idx
> MAX_LOCKDEP_KEYS
))
2856 if (prev_hlock
&& (prev_hlock
->irq_context
!=
2857 hlock
->irq_context
))
2859 chain_key
= iterate_chain_key(chain_key
, hlock
->class_idx
);
2862 if (chain_key
!= curr
->curr_chain_key
) {
2865 * More smoking hash instead of calculating it, damn see these
2866 * numbers float.. I bet that a pink elephant stepped on my memory.
2868 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2869 curr
->lockdep_depth
, i
,
2870 (unsigned long long)chain_key
,
2871 (unsigned long long)curr
->curr_chain_key
);
2876 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
2877 enum lock_usage_bit new_bit
);
2879 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2883 print_usage_bug_scenario(struct held_lock
*lock
)
2885 struct lock_class
*class = hlock_class(lock
);
2887 printk(" Possible unsafe locking scenario:\n\n");
2891 __print_lock_name(class);
2892 printk(KERN_CONT
");\n");
2893 printk(" <Interrupt>\n");
2895 __print_lock_name(class);
2896 printk(KERN_CONT
");\n");
2897 printk("\n *** DEADLOCK ***\n\n");
2901 print_usage_bug(struct task_struct
*curr
, struct held_lock
*this,
2902 enum lock_usage_bit prev_bit
, enum lock_usage_bit new_bit
)
2904 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2908 pr_warn("================================\n");
2909 pr_warn("WARNING: inconsistent lock state\n");
2910 print_kernel_ident();
2911 pr_warn("--------------------------------\n");
2913 pr_warn("inconsistent {%s} -> {%s} usage.\n",
2914 usage_str
[prev_bit
], usage_str
[new_bit
]);
2916 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2917 curr
->comm
, task_pid_nr(curr
),
2918 trace_hardirq_context(curr
), hardirq_count() >> HARDIRQ_SHIFT
,
2919 trace_softirq_context(curr
), softirq_count() >> SOFTIRQ_SHIFT
,
2920 trace_hardirqs_enabled(curr
),
2921 trace_softirqs_enabled(curr
));
2924 pr_warn("{%s} state was registered at:\n", usage_str
[prev_bit
]);
2925 print_lock_trace(hlock_class(this)->usage_traces
+ prev_bit
, 1);
2927 print_irqtrace_events(curr
);
2928 pr_warn("\nother info that might help us debug this:\n");
2929 print_usage_bug_scenario(this);
2931 lockdep_print_held_locks(curr
);
2933 pr_warn("\nstack backtrace:\n");
2940 * Print out an error if an invalid bit is set:
2943 valid_state(struct task_struct
*curr
, struct held_lock
*this,
2944 enum lock_usage_bit new_bit
, enum lock_usage_bit bad_bit
)
2946 if (unlikely(hlock_class(this)->usage_mask
& (1 << bad_bit
)))
2947 return print_usage_bug(curr
, this, bad_bit
, new_bit
);
2953 * print irq inversion bug:
2956 print_irq_inversion_bug(struct task_struct
*curr
,
2957 struct lock_list
*root
, struct lock_list
*other
,
2958 struct held_lock
*this, int forwards
,
2959 const char *irqclass
)
2961 struct lock_list
*entry
= other
;
2962 struct lock_list
*middle
= NULL
;
2965 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2969 pr_warn("========================================================\n");
2970 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2971 print_kernel_ident();
2972 pr_warn("--------------------------------------------------------\n");
2973 pr_warn("%s/%d just changed the state of lock:\n",
2974 curr
->comm
, task_pid_nr(curr
));
2977 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass
);
2979 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass
);
2980 print_lock_name(other
->class);
2981 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2983 pr_warn("\nother info that might help us debug this:\n");
2985 /* Find a middle lock (if one exists) */
2986 depth
= get_lock_depth(other
);
2988 if (depth
== 0 && (entry
!= root
)) {
2989 pr_warn("lockdep:%s bad path found in chain graph\n", __func__
);
2993 entry
= get_lock_parent(entry
);
2995 } while (entry
&& entry
!= root
&& (depth
>= 0));
2997 print_irq_lock_scenario(root
, other
,
2998 middle
? middle
->class : root
->class, other
->class);
3000 print_irq_lock_scenario(other
, root
,
3001 middle
? middle
->class : other
->class, root
->class);
3003 lockdep_print_held_locks(curr
);
3005 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3006 if (!save_trace(&root
->trace
))
3008 print_shortest_lock_dependencies(other
, root
);
3010 pr_warn("\nstack backtrace:\n");
3017 * Prove that in the forwards-direction subgraph starting at <this>
3018 * there is no lock matching <mask>:
3021 check_usage_forwards(struct task_struct
*curr
, struct held_lock
*this,
3022 enum lock_usage_bit bit
, const char *irqclass
)
3025 struct lock_list root
;
3026 struct lock_list
*uninitialized_var(target_entry
);
3029 root
.class = hlock_class(this);
3030 ret
= find_usage_forwards(&root
, lock_flag(bit
), &target_entry
);
3032 return print_bfs_bug(ret
);
3036 return print_irq_inversion_bug(curr
, &root
, target_entry
,
3041 * Prove that in the backwards-direction subgraph starting at <this>
3042 * there is no lock matching <mask>:
3045 check_usage_backwards(struct task_struct
*curr
, struct held_lock
*this,
3046 enum lock_usage_bit bit
, const char *irqclass
)
3049 struct lock_list root
;
3050 struct lock_list
*uninitialized_var(target_entry
);
3053 root
.class = hlock_class(this);
3054 ret
= find_usage_backwards(&root
, lock_flag(bit
), &target_entry
);
3056 return print_bfs_bug(ret
);
3060 return print_irq_inversion_bug(curr
, &root
, target_entry
,
3064 void print_irqtrace_events(struct task_struct
*curr
)
3066 printk("irq event stamp: %u\n", curr
->irq_events
);
3067 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
3068 curr
->hardirq_enable_event
, (void *)curr
->hardirq_enable_ip
,
3069 (void *)curr
->hardirq_enable_ip
);
3070 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3071 curr
->hardirq_disable_event
, (void *)curr
->hardirq_disable_ip
,
3072 (void *)curr
->hardirq_disable_ip
);
3073 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
3074 curr
->softirq_enable_event
, (void *)curr
->softirq_enable_ip
,
3075 (void *)curr
->softirq_enable_ip
);
3076 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3077 curr
->softirq_disable_event
, (void *)curr
->softirq_disable_ip
,
3078 (void *)curr
->softirq_disable_ip
);
3081 static int HARDIRQ_verbose(struct lock_class
*class)
3084 return class_filter(class);
3089 static int SOFTIRQ_verbose(struct lock_class
*class)
3092 return class_filter(class);
3097 #define STRICT_READ_CHECKS 1
3099 static int (*state_verbose_f
[])(struct lock_class
*class) = {
3100 #define LOCKDEP_STATE(__STATE) \
3102 #include "lockdep_states.h"
3103 #undef LOCKDEP_STATE
3106 static inline int state_verbose(enum lock_usage_bit bit
,
3107 struct lock_class
*class)
3109 return state_verbose_f
[bit
>> LOCK_USAGE_DIR_MASK
](class);
3112 typedef int (*check_usage_f
)(struct task_struct
*, struct held_lock
*,
3113 enum lock_usage_bit bit
, const char *name
);
3116 mark_lock_irq(struct task_struct
*curr
, struct held_lock
*this,
3117 enum lock_usage_bit new_bit
)
3119 int excl_bit
= exclusive_bit(new_bit
);
3120 int read
= new_bit
& LOCK_USAGE_READ_MASK
;
3121 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
3124 * mark USED_IN has to look forwards -- to ensure no dependency
3125 * has ENABLED state, which would allow recursion deadlocks.
3127 * mark ENABLED has to look backwards -- to ensure no dependee
3128 * has USED_IN state, which, again, would allow recursion deadlocks.
3130 check_usage_f usage
= dir
?
3131 check_usage_backwards
: check_usage_forwards
;
3134 * Validate that this particular lock does not have conflicting
3137 if (!valid_state(curr
, this, new_bit
, excl_bit
))
3141 * Validate that the lock dependencies don't have conflicting usage
3144 if ((!read
|| !dir
|| STRICT_READ_CHECKS
) &&
3145 !usage(curr
, this, excl_bit
, state_name(new_bit
& ~LOCK_USAGE_READ_MASK
)))
3149 * Check for read in write conflicts
3152 if (!valid_state(curr
, this, new_bit
, excl_bit
+ LOCK_USAGE_READ_MASK
))
3155 if (STRICT_READ_CHECKS
&&
3156 !usage(curr
, this, excl_bit
+ LOCK_USAGE_READ_MASK
,
3157 state_name(new_bit
+ LOCK_USAGE_READ_MASK
)))
3161 if (state_verbose(new_bit
, hlock_class(this)))
3168 * Mark all held locks with a usage bit:
3171 mark_held_locks(struct task_struct
*curr
, enum lock_usage_bit base_bit
)
3173 struct held_lock
*hlock
;
3176 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
3177 enum lock_usage_bit hlock_bit
= base_bit
;
3178 hlock
= curr
->held_locks
+ i
;
3181 hlock_bit
+= LOCK_USAGE_READ_MASK
;
3183 BUG_ON(hlock_bit
>= LOCK_USAGE_STATES
);
3188 if (!mark_lock(curr
, hlock
, hlock_bit
))
3196 * Hardirqs will be enabled:
3198 static void __trace_hardirqs_on_caller(unsigned long ip
)
3200 struct task_struct
*curr
= current
;
3202 /* we'll do an OFF -> ON transition: */
3203 curr
->hardirqs_enabled
= 1;
3206 * We are going to turn hardirqs on, so set the
3207 * usage bit for all held locks:
3209 if (!mark_held_locks(curr
, LOCK_ENABLED_HARDIRQ
))
3212 * If we have softirqs enabled, then set the usage
3213 * bit for all held locks. (disabled hardirqs prevented
3214 * this bit from being set before)
3216 if (curr
->softirqs_enabled
)
3217 if (!mark_held_locks(curr
, LOCK_ENABLED_SOFTIRQ
))
3220 curr
->hardirq_enable_ip
= ip
;
3221 curr
->hardirq_enable_event
= ++curr
->irq_events
;
3222 debug_atomic_inc(hardirqs_on_events
);
3225 void lockdep_hardirqs_on(unsigned long ip
)
3227 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3230 if (unlikely(current
->hardirqs_enabled
)) {
3232 * Neither irq nor preemption are disabled here
3233 * so this is racy by nature but losing one hit
3234 * in a stat is not a big deal.
3236 __debug_atomic_inc(redundant_hardirqs_on
);
3241 * We're enabling irqs and according to our state above irqs weren't
3242 * already enabled, yet we find the hardware thinks they are in fact
3243 * enabled.. someone messed up their IRQ state tracing.
3245 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3249 * See the fine text that goes along with this variable definition.
3251 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled
))
3255 * Can't allow enabling interrupts while in an interrupt handler,
3256 * that's general bad form and such. Recursion, limited stack etc..
3258 if (DEBUG_LOCKS_WARN_ON(current
->hardirq_context
))
3261 current
->lockdep_recursion
= 1;
3262 __trace_hardirqs_on_caller(ip
);
3263 current
->lockdep_recursion
= 0;
3265 NOKPROBE_SYMBOL(lockdep_hardirqs_on
);
3268 * Hardirqs were disabled:
3270 void lockdep_hardirqs_off(unsigned long ip
)
3272 struct task_struct
*curr
= current
;
3274 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3278 * So we're supposed to get called after you mask local IRQs, but for
3279 * some reason the hardware doesn't quite think you did a proper job.
3281 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3284 if (curr
->hardirqs_enabled
) {
3286 * We have done an ON -> OFF transition:
3288 curr
->hardirqs_enabled
= 0;
3289 curr
->hardirq_disable_ip
= ip
;
3290 curr
->hardirq_disable_event
= ++curr
->irq_events
;
3291 debug_atomic_inc(hardirqs_off_events
);
3293 debug_atomic_inc(redundant_hardirqs_off
);
3295 NOKPROBE_SYMBOL(lockdep_hardirqs_off
);
3298 * Softirqs will be enabled:
3300 void trace_softirqs_on(unsigned long ip
)
3302 struct task_struct
*curr
= current
;
3304 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3308 * We fancy IRQs being disabled here, see softirq.c, avoids
3309 * funny state and nesting things.
3311 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3314 if (curr
->softirqs_enabled
) {
3315 debug_atomic_inc(redundant_softirqs_on
);
3319 current
->lockdep_recursion
= 1;
3321 * We'll do an OFF -> ON transition:
3323 curr
->softirqs_enabled
= 1;
3324 curr
->softirq_enable_ip
= ip
;
3325 curr
->softirq_enable_event
= ++curr
->irq_events
;
3326 debug_atomic_inc(softirqs_on_events
);
3328 * We are going to turn softirqs on, so set the
3329 * usage bit for all held locks, if hardirqs are
3332 if (curr
->hardirqs_enabled
)
3333 mark_held_locks(curr
, LOCK_ENABLED_SOFTIRQ
);
3334 current
->lockdep_recursion
= 0;
3338 * Softirqs were disabled:
3340 void trace_softirqs_off(unsigned long ip
)
3342 struct task_struct
*curr
= current
;
3344 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3348 * We fancy IRQs being disabled here, see softirq.c
3350 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3353 if (curr
->softirqs_enabled
) {
3355 * We have done an ON -> OFF transition:
3357 curr
->softirqs_enabled
= 0;
3358 curr
->softirq_disable_ip
= ip
;
3359 curr
->softirq_disable_event
= ++curr
->irq_events
;
3360 debug_atomic_inc(softirqs_off_events
);
3362 * Whoops, we wanted softirqs off, so why aren't they?
3364 DEBUG_LOCKS_WARN_ON(!softirq_count());
3366 debug_atomic_inc(redundant_softirqs_off
);
3369 static int mark_irqflags(struct task_struct
*curr
, struct held_lock
*hlock
)
3372 * If non-trylock use in a hardirq or softirq context, then
3373 * mark the lock as used in these contexts:
3375 if (!hlock
->trylock
) {
3377 if (curr
->hardirq_context
)
3378 if (!mark_lock(curr
, hlock
,
3379 LOCK_USED_IN_HARDIRQ_READ
))
3381 if (curr
->softirq_context
)
3382 if (!mark_lock(curr
, hlock
,
3383 LOCK_USED_IN_SOFTIRQ_READ
))
3386 if (curr
->hardirq_context
)
3387 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_HARDIRQ
))
3389 if (curr
->softirq_context
)
3390 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_SOFTIRQ
))
3394 if (!hlock
->hardirqs_off
) {
3396 if (!mark_lock(curr
, hlock
,
3397 LOCK_ENABLED_HARDIRQ_READ
))
3399 if (curr
->softirqs_enabled
)
3400 if (!mark_lock(curr
, hlock
,
3401 LOCK_ENABLED_SOFTIRQ_READ
))
3404 if (!mark_lock(curr
, hlock
,
3405 LOCK_ENABLED_HARDIRQ
))
3407 if (curr
->softirqs_enabled
)
3408 if (!mark_lock(curr
, hlock
,
3409 LOCK_ENABLED_SOFTIRQ
))
3417 static inline unsigned int task_irq_context(struct task_struct
*task
)
3419 return 2 * !!task
->hardirq_context
+ !!task
->softirq_context
;
3422 static int separate_irq_context(struct task_struct
*curr
,
3423 struct held_lock
*hlock
)
3425 unsigned int depth
= curr
->lockdep_depth
;
3428 * Keep track of points where we cross into an interrupt context:
3431 struct held_lock
*prev_hlock
;
3433 prev_hlock
= curr
->held_locks
+ depth
-1;
3435 * If we cross into another context, reset the
3436 * hash key (this also prevents the checking and the
3437 * adding of the dependency to 'prev'):
3439 if (prev_hlock
->irq_context
!= hlock
->irq_context
)
3445 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3448 int mark_lock_irq(struct task_struct
*curr
, struct held_lock
*this,
3449 enum lock_usage_bit new_bit
)
3451 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3455 static inline int mark_irqflags(struct task_struct
*curr
,
3456 struct held_lock
*hlock
)
3461 static inline unsigned int task_irq_context(struct task_struct
*task
)
3466 static inline int separate_irq_context(struct task_struct
*curr
,
3467 struct held_lock
*hlock
)
3472 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3475 * Mark a lock with a usage bit, and validate the state transition:
3477 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
3478 enum lock_usage_bit new_bit
)
3480 unsigned int new_mask
= 1 << new_bit
, ret
= 1;
3483 * If already set then do not dirty the cacheline,
3484 * nor do any checks:
3486 if (likely(hlock_class(this)->usage_mask
& new_mask
))
3492 * Make sure we didn't race:
3494 if (unlikely(hlock_class(this)->usage_mask
& new_mask
)) {
3499 hlock_class(this)->usage_mask
|= new_mask
;
3501 if (!save_trace(hlock_class(this)->usage_traces
+ new_bit
))
3505 #define LOCKDEP_STATE(__STATE) \
3506 case LOCK_USED_IN_##__STATE: \
3507 case LOCK_USED_IN_##__STATE##_READ: \
3508 case LOCK_ENABLED_##__STATE: \
3509 case LOCK_ENABLED_##__STATE##_READ:
3510 #include "lockdep_states.h"
3511 #undef LOCKDEP_STATE
3512 ret
= mark_lock_irq(curr
, this, new_bit
);
3517 debug_atomic_dec(nr_unused_locks
);
3520 if (!debug_locks_off_graph_unlock())
3529 * We must printk outside of the graph_lock:
3532 printk("\nmarked lock as {%s}:\n", usage_str
[new_bit
]);
3534 print_irqtrace_events(curr
);
3542 * Initialize a lock instance's lock-class mapping info:
3544 void lockdep_init_map(struct lockdep_map
*lock
, const char *name
,
3545 struct lock_class_key
*key
, int subclass
)
3549 for (i
= 0; i
< NR_LOCKDEP_CACHING_CLASSES
; i
++)
3550 lock
->class_cache
[i
] = NULL
;
3552 #ifdef CONFIG_LOCK_STAT
3553 lock
->cpu
= raw_smp_processor_id();
3557 * Can't be having no nameless bastards around this place!
3559 if (DEBUG_LOCKS_WARN_ON(!name
)) {
3560 lock
->name
= "NULL";
3567 * No key, no joy, we need to hash something.
3569 if (DEBUG_LOCKS_WARN_ON(!key
))
3572 * Sanity check, the lock-class key must either have been allocated
3573 * statically or must have been registered as a dynamic key.
3575 if (!static_obj(key
) && !is_dynamic_key(key
)) {
3577 printk(KERN_ERR
"BUG: key %px has not been registered!\n", key
);
3578 DEBUG_LOCKS_WARN_ON(1);
3583 if (unlikely(!debug_locks
))
3587 unsigned long flags
;
3589 if (DEBUG_LOCKS_WARN_ON(current
->lockdep_recursion
))
3592 raw_local_irq_save(flags
);
3593 current
->lockdep_recursion
= 1;
3594 register_lock_class(lock
, subclass
, 1);
3595 current
->lockdep_recursion
= 0;
3596 raw_local_irq_restore(flags
);
3599 EXPORT_SYMBOL_GPL(lockdep_init_map
);
3601 struct lock_class_key __lockdep_no_validate__
;
3602 EXPORT_SYMBOL_GPL(__lockdep_no_validate__
);
3605 print_lock_nested_lock_not_held(struct task_struct
*curr
,
3606 struct held_lock
*hlock
,
3609 if (!debug_locks_off())
3611 if (debug_locks_silent
)
3615 pr_warn("==================================\n");
3616 pr_warn("WARNING: Nested lock was not taken\n");
3617 print_kernel_ident();
3618 pr_warn("----------------------------------\n");
3620 pr_warn("%s/%d is trying to lock:\n", curr
->comm
, task_pid_nr(curr
));
3623 pr_warn("\nbut this task is not holding:\n");
3624 pr_warn("%s\n", hlock
->nest_lock
->name
);
3626 pr_warn("\nstack backtrace:\n");
3629 pr_warn("\nother info that might help us debug this:\n");
3630 lockdep_print_held_locks(curr
);
3632 pr_warn("\nstack backtrace:\n");
3638 static int __lock_is_held(const struct lockdep_map
*lock
, int read
);
3641 * This gets called for every mutex_lock*()/spin_lock*() operation.
3642 * We maintain the dependency maps and validate the locking attempt:
3644 * The callers must make sure that IRQs are disabled before calling it,
3645 * otherwise we could get an interrupt which would want to take locks,
3646 * which would end up in lockdep again.
3648 static int __lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
3649 int trylock
, int read
, int check
, int hardirqs_off
,
3650 struct lockdep_map
*nest_lock
, unsigned long ip
,
3651 int references
, int pin_count
)
3653 struct task_struct
*curr
= current
;
3654 struct lock_class
*class = NULL
;
3655 struct held_lock
*hlock
;
3661 if (unlikely(!debug_locks
))
3664 if (!prove_locking
|| lock
->key
== &__lockdep_no_validate__
)
3667 if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
3668 class = lock
->class_cache
[subclass
];
3672 if (unlikely(!class)) {
3673 class = register_lock_class(lock
, subclass
, 0);
3678 debug_class_ops_inc(class);
3680 if (very_verbose(class)) {
3681 printk("\nacquire class [%px] %s", class->key
, class->name
);
3682 if (class->name_version
> 1)
3683 printk(KERN_CONT
"#%d", class->name_version
);
3684 printk(KERN_CONT
"\n");
3689 * Add the lock to the list of currently held locks.
3690 * (we dont increase the depth just yet, up until the
3691 * dependency checks are done)
3693 depth
= curr
->lockdep_depth
;
3695 * Ran out of static storage for our per-task lock stack again have we?
3697 if (DEBUG_LOCKS_WARN_ON(depth
>= MAX_LOCK_DEPTH
))
3700 class_idx
= class - lock_classes
+ 1;
3703 hlock
= curr
->held_locks
+ depth
- 1;
3704 if (hlock
->class_idx
== class_idx
&& nest_lock
) {
3705 if (hlock
->references
) {
3707 * Check: unsigned int references:12, overflow.
3709 if (DEBUG_LOCKS_WARN_ON(hlock
->references
== (1 << 12)-1))
3712 hlock
->references
++;
3714 hlock
->references
= 2;
3721 hlock
= curr
->held_locks
+ depth
;
3723 * Plain impossible, we just registered it and checked it weren't no
3724 * NULL like.. I bet this mushroom I ate was good!
3726 if (DEBUG_LOCKS_WARN_ON(!class))
3728 hlock
->class_idx
= class_idx
;
3729 hlock
->acquire_ip
= ip
;
3730 hlock
->instance
= lock
;
3731 hlock
->nest_lock
= nest_lock
;
3732 hlock
->irq_context
= task_irq_context(curr
);
3733 hlock
->trylock
= trylock
;
3735 hlock
->check
= check
;
3736 hlock
->hardirqs_off
= !!hardirqs_off
;
3737 hlock
->references
= references
;
3738 #ifdef CONFIG_LOCK_STAT
3739 hlock
->waittime_stamp
= 0;
3740 hlock
->holdtime_stamp
= lockstat_clock();
3742 hlock
->pin_count
= pin_count
;
3744 if (check
&& !mark_irqflags(curr
, hlock
))
3747 /* mark it as used: */
3748 if (!mark_lock(curr
, hlock
, LOCK_USED
))
3752 * Calculate the chain hash: it's the combined hash of all the
3753 * lock keys along the dependency chain. We save the hash value
3754 * at every step so that we can get the current hash easily
3755 * after unlock. The chain hash is then used to cache dependency
3758 * The 'key ID' is what is the most compact key value to drive
3759 * the hash, not class->key.
3762 * Whoops, we did it again.. ran straight out of our static allocation.
3764 if (DEBUG_LOCKS_WARN_ON(class_idx
> MAX_LOCKDEP_KEYS
))
3767 chain_key
= curr
->curr_chain_key
;
3770 * How can we have a chain hash when we ain't got no keys?!
3772 if (DEBUG_LOCKS_WARN_ON(chain_key
!= 0))
3777 hlock
->prev_chain_key
= chain_key
;
3778 if (separate_irq_context(curr
, hlock
)) {
3782 chain_key
= iterate_chain_key(chain_key
, class_idx
);
3784 if (nest_lock
&& !__lock_is_held(nest_lock
, -1))
3785 return print_lock_nested_lock_not_held(curr
, hlock
, ip
);
3787 if (!debug_locks_silent
) {
3788 WARN_ON_ONCE(depth
&& !hlock_class(hlock
- 1)->key
);
3789 WARN_ON_ONCE(!hlock_class(hlock
)->key
);
3792 if (!validate_chain(curr
, lock
, hlock
, chain_head
, chain_key
))
3795 curr
->curr_chain_key
= chain_key
;
3796 curr
->lockdep_depth
++;
3797 check_chain_key(curr
);
3798 #ifdef CONFIG_DEBUG_LOCKDEP
3799 if (unlikely(!debug_locks
))
3802 if (unlikely(curr
->lockdep_depth
>= MAX_LOCK_DEPTH
)) {
3804 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3805 printk(KERN_DEBUG
"depth: %i max: %lu!\n",
3806 curr
->lockdep_depth
, MAX_LOCK_DEPTH
);
3808 lockdep_print_held_locks(current
);
3809 debug_show_all_locks();
3815 if (unlikely(curr
->lockdep_depth
> max_lockdep_depth
))
3816 max_lockdep_depth
= curr
->lockdep_depth
;
3822 print_unlock_imbalance_bug(struct task_struct
*curr
, struct lockdep_map
*lock
,
3825 if (!debug_locks_off())
3827 if (debug_locks_silent
)
3831 pr_warn("=====================================\n");
3832 pr_warn("WARNING: bad unlock balance detected!\n");
3833 print_kernel_ident();
3834 pr_warn("-------------------------------------\n");
3835 pr_warn("%s/%d is trying to release lock (",
3836 curr
->comm
, task_pid_nr(curr
));
3837 print_lockdep_cache(lock
);
3840 pr_warn("but there are no more locks to release!\n");
3841 pr_warn("\nother info that might help us debug this:\n");
3842 lockdep_print_held_locks(curr
);
3844 pr_warn("\nstack backtrace:\n");
3850 static int match_held_lock(const struct held_lock
*hlock
,
3851 const struct lockdep_map
*lock
)
3853 if (hlock
->instance
== lock
)
3856 if (hlock
->references
) {
3857 const struct lock_class
*class = lock
->class_cache
[0];
3860 class = look_up_lock_class(lock
, 0);
3863 * If look_up_lock_class() failed to find a class, we're trying
3864 * to test if we hold a lock that has never yet been acquired.
3865 * Clearly if the lock hasn't been acquired _ever_, we're not
3866 * holding it either, so report failure.
3872 * References, but not a lock we're actually ref-counting?
3873 * State got messed up, follow the sites that change ->references
3874 * and try to make sense of it.
3876 if (DEBUG_LOCKS_WARN_ON(!hlock
->nest_lock
))
3879 if (hlock
->class_idx
== class - lock_classes
+ 1)
3886 /* @depth must not be zero */
3887 static struct held_lock
*find_held_lock(struct task_struct
*curr
,
3888 struct lockdep_map
*lock
,
3889 unsigned int depth
, int *idx
)
3891 struct held_lock
*ret
, *hlock
, *prev_hlock
;
3895 hlock
= curr
->held_locks
+ i
;
3897 if (match_held_lock(hlock
, lock
))
3901 for (i
--, prev_hlock
= hlock
--;
3903 i
--, prev_hlock
= hlock
--) {
3905 * We must not cross into another context:
3907 if (prev_hlock
->irq_context
!= hlock
->irq_context
) {
3911 if (match_held_lock(hlock
, lock
)) {
3922 static int reacquire_held_locks(struct task_struct
*curr
, unsigned int depth
,
3925 struct held_lock
*hlock
;
3927 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3930 for (hlock
= curr
->held_locks
+ idx
; idx
< depth
; idx
++, hlock
++) {
3931 if (!__lock_acquire(hlock
->instance
,
3932 hlock_class(hlock
)->subclass
,
3934 hlock
->read
, hlock
->check
,
3935 hlock
->hardirqs_off
,
3936 hlock
->nest_lock
, hlock
->acquire_ip
,
3937 hlock
->references
, hlock
->pin_count
))
3944 __lock_set_class(struct lockdep_map
*lock
, const char *name
,
3945 struct lock_class_key
*key
, unsigned int subclass
,
3948 struct task_struct
*curr
= current
;
3949 struct held_lock
*hlock
;
3950 struct lock_class
*class;
3954 if (unlikely(!debug_locks
))
3957 depth
= curr
->lockdep_depth
;
3959 * This function is about (re)setting the class of a held lock,
3960 * yet we're not actually holding any locks. Naughty user!
3962 if (DEBUG_LOCKS_WARN_ON(!depth
))
3965 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
3967 return print_unlock_imbalance_bug(curr
, lock
, ip
);
3969 lockdep_init_map(lock
, name
, key
, 0);
3970 class = register_lock_class(lock
, subclass
, 0);
3971 hlock
->class_idx
= class - lock_classes
+ 1;
3973 curr
->lockdep_depth
= i
;
3974 curr
->curr_chain_key
= hlock
->prev_chain_key
;
3976 if (reacquire_held_locks(curr
, depth
, i
))
3980 * I took it apart and put it back together again, except now I have
3981 * these 'spare' parts.. where shall I put them.
3983 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
))
3988 static int __lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
3990 struct task_struct
*curr
= current
;
3991 struct held_lock
*hlock
;
3995 if (unlikely(!debug_locks
))
3998 depth
= curr
->lockdep_depth
;
4000 * This function is about (re)setting the class of a held lock,
4001 * yet we're not actually holding any locks. Naughty user!
4003 if (DEBUG_LOCKS_WARN_ON(!depth
))
4006 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4008 return print_unlock_imbalance_bug(curr
, lock
, ip
);
4010 curr
->lockdep_depth
= i
;
4011 curr
->curr_chain_key
= hlock
->prev_chain_key
;
4013 WARN(hlock
->read
, "downgrading a read lock");
4015 hlock
->acquire_ip
= ip
;
4017 if (reacquire_held_locks(curr
, depth
, i
))
4021 * I took it apart and put it back together again, except now I have
4022 * these 'spare' parts.. where shall I put them.
4024 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
))
4030 * Remove the lock to the list of currently held locks - this gets
4031 * called on mutex_unlock()/spin_unlock*() (or on a failed
4032 * mutex_lock_interruptible()).
4034 * @nested is an hysterical artifact, needs a tree wide cleanup.
4037 __lock_release(struct lockdep_map
*lock
, int nested
, unsigned long ip
)
4039 struct task_struct
*curr
= current
;
4040 struct held_lock
*hlock
;
4044 if (unlikely(!debug_locks
))
4047 depth
= curr
->lockdep_depth
;
4049 * So we're all set to release this lock.. wait what lock? We don't
4050 * own any locks, you've been drinking again?
4052 if (DEBUG_LOCKS_WARN_ON(depth
<= 0))
4053 return print_unlock_imbalance_bug(curr
, lock
, ip
);
4056 * Check whether the lock exists in the current stack
4059 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4061 return print_unlock_imbalance_bug(curr
, lock
, ip
);
4063 if (hlock
->instance
== lock
)
4064 lock_release_holdtime(hlock
);
4066 WARN(hlock
->pin_count
, "releasing a pinned lock\n");
4068 if (hlock
->references
) {
4069 hlock
->references
--;
4070 if (hlock
->references
) {
4072 * We had, and after removing one, still have
4073 * references, the current lock stack is still
4074 * valid. We're done!
4081 * We have the right lock to unlock, 'hlock' points to it.
4082 * Now we remove it from the stack, and add back the other
4083 * entries (if any), recalculating the hash along the way:
4086 curr
->lockdep_depth
= i
;
4087 curr
->curr_chain_key
= hlock
->prev_chain_key
;
4090 * The most likely case is when the unlock is on the innermost
4091 * lock. In this case, we are done!
4096 if (reacquire_held_locks(curr
, depth
, i
+ 1))
4100 * We had N bottles of beer on the wall, we drank one, but now
4101 * there's not N-1 bottles of beer left on the wall...
4103 DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
-1);
4106 * Since reacquire_held_locks() would have called check_chain_key()
4107 * indirectly via __lock_acquire(), we don't need to do it again
4113 static nokprobe_inline
4114 int __lock_is_held(const struct lockdep_map
*lock
, int read
)
4116 struct task_struct
*curr
= current
;
4119 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4120 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4122 if (match_held_lock(hlock
, lock
)) {
4123 if (read
== -1 || hlock
->read
== read
)
4133 static struct pin_cookie
__lock_pin_lock(struct lockdep_map
*lock
)
4135 struct pin_cookie cookie
= NIL_COOKIE
;
4136 struct task_struct
*curr
= current
;
4139 if (unlikely(!debug_locks
))
4142 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4143 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4145 if (match_held_lock(hlock
, lock
)) {
4147 * Grab 16bits of randomness; this is sufficient to not
4148 * be guessable and still allows some pin nesting in
4149 * our u32 pin_count.
4151 cookie
.val
= 1 + (prandom_u32() >> 16);
4152 hlock
->pin_count
+= cookie
.val
;
4157 WARN(1, "pinning an unheld lock\n");
4161 static void __lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4163 struct task_struct
*curr
= current
;
4166 if (unlikely(!debug_locks
))
4169 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4170 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4172 if (match_held_lock(hlock
, lock
)) {
4173 hlock
->pin_count
+= cookie
.val
;
4178 WARN(1, "pinning an unheld lock\n");
4181 static void __lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4183 struct task_struct
*curr
= current
;
4186 if (unlikely(!debug_locks
))
4189 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4190 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4192 if (match_held_lock(hlock
, lock
)) {
4193 if (WARN(!hlock
->pin_count
, "unpinning an unpinned lock\n"))
4196 hlock
->pin_count
-= cookie
.val
;
4198 if (WARN((int)hlock
->pin_count
< 0, "pin count corrupted\n"))
4199 hlock
->pin_count
= 0;
4205 WARN(1, "unpinning an unheld lock\n");
4209 * Check whether we follow the irq-flags state precisely:
4211 static void check_flags(unsigned long flags
)
4213 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
4214 defined(CONFIG_TRACE_IRQFLAGS)
4218 if (irqs_disabled_flags(flags
)) {
4219 if (DEBUG_LOCKS_WARN_ON(current
->hardirqs_enabled
)) {
4220 printk("possible reason: unannotated irqs-off.\n");
4223 if (DEBUG_LOCKS_WARN_ON(!current
->hardirqs_enabled
)) {
4224 printk("possible reason: unannotated irqs-on.\n");
4229 * We dont accurately track softirq state in e.g.
4230 * hardirq contexts (such as on 4KSTACKS), so only
4231 * check if not in hardirq contexts:
4233 if (!hardirq_count()) {
4234 if (softirq_count()) {
4235 /* like the above, but with softirqs */
4236 DEBUG_LOCKS_WARN_ON(current
->softirqs_enabled
);
4238 /* lick the above, does it taste good? */
4239 DEBUG_LOCKS_WARN_ON(!current
->softirqs_enabled
);
4244 print_irqtrace_events(current
);
4248 void lock_set_class(struct lockdep_map
*lock
, const char *name
,
4249 struct lock_class_key
*key
, unsigned int subclass
,
4252 unsigned long flags
;
4254 if (unlikely(current
->lockdep_recursion
))
4257 raw_local_irq_save(flags
);
4258 current
->lockdep_recursion
= 1;
4260 if (__lock_set_class(lock
, name
, key
, subclass
, ip
))
4261 check_chain_key(current
);
4262 current
->lockdep_recursion
= 0;
4263 raw_local_irq_restore(flags
);
4265 EXPORT_SYMBOL_GPL(lock_set_class
);
4267 void lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
4269 unsigned long flags
;
4271 if (unlikely(current
->lockdep_recursion
))
4274 raw_local_irq_save(flags
);
4275 current
->lockdep_recursion
= 1;
4277 if (__lock_downgrade(lock
, ip
))
4278 check_chain_key(current
);
4279 current
->lockdep_recursion
= 0;
4280 raw_local_irq_restore(flags
);
4282 EXPORT_SYMBOL_GPL(lock_downgrade
);
4285 * We are not always called with irqs disabled - do that here,
4286 * and also avoid lockdep recursion:
4288 void lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
4289 int trylock
, int read
, int check
,
4290 struct lockdep_map
*nest_lock
, unsigned long ip
)
4292 unsigned long flags
;
4294 if (unlikely(current
->lockdep_recursion
))
4297 raw_local_irq_save(flags
);
4300 current
->lockdep_recursion
= 1;
4301 trace_lock_acquire(lock
, subclass
, trylock
, read
, check
, nest_lock
, ip
);
4302 __lock_acquire(lock
, subclass
, trylock
, read
, check
,
4303 irqs_disabled_flags(flags
), nest_lock
, ip
, 0, 0);
4304 current
->lockdep_recursion
= 0;
4305 raw_local_irq_restore(flags
);
4307 EXPORT_SYMBOL_GPL(lock_acquire
);
4309 void lock_release(struct lockdep_map
*lock
, int nested
,
4312 unsigned long flags
;
4314 if (unlikely(current
->lockdep_recursion
))
4317 raw_local_irq_save(flags
);
4319 current
->lockdep_recursion
= 1;
4320 trace_lock_release(lock
, ip
);
4321 if (__lock_release(lock
, nested
, ip
))
4322 check_chain_key(current
);
4323 current
->lockdep_recursion
= 0;
4324 raw_local_irq_restore(flags
);
4326 EXPORT_SYMBOL_GPL(lock_release
);
4328 int lock_is_held_type(const struct lockdep_map
*lock
, int read
)
4330 unsigned long flags
;
4333 if (unlikely(current
->lockdep_recursion
))
4334 return 1; /* avoid false negative lockdep_assert_held() */
4336 raw_local_irq_save(flags
);
4339 current
->lockdep_recursion
= 1;
4340 ret
= __lock_is_held(lock
, read
);
4341 current
->lockdep_recursion
= 0;
4342 raw_local_irq_restore(flags
);
4346 EXPORT_SYMBOL_GPL(lock_is_held_type
);
4347 NOKPROBE_SYMBOL(lock_is_held_type
);
4349 struct pin_cookie
lock_pin_lock(struct lockdep_map
*lock
)
4351 struct pin_cookie cookie
= NIL_COOKIE
;
4352 unsigned long flags
;
4354 if (unlikely(current
->lockdep_recursion
))
4357 raw_local_irq_save(flags
);
4360 current
->lockdep_recursion
= 1;
4361 cookie
= __lock_pin_lock(lock
);
4362 current
->lockdep_recursion
= 0;
4363 raw_local_irq_restore(flags
);
4367 EXPORT_SYMBOL_GPL(lock_pin_lock
);
4369 void lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4371 unsigned long flags
;
4373 if (unlikely(current
->lockdep_recursion
))
4376 raw_local_irq_save(flags
);
4379 current
->lockdep_recursion
= 1;
4380 __lock_repin_lock(lock
, cookie
);
4381 current
->lockdep_recursion
= 0;
4382 raw_local_irq_restore(flags
);
4384 EXPORT_SYMBOL_GPL(lock_repin_lock
);
4386 void lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4388 unsigned long flags
;
4390 if (unlikely(current
->lockdep_recursion
))
4393 raw_local_irq_save(flags
);
4396 current
->lockdep_recursion
= 1;
4397 __lock_unpin_lock(lock
, cookie
);
4398 current
->lockdep_recursion
= 0;
4399 raw_local_irq_restore(flags
);
4401 EXPORT_SYMBOL_GPL(lock_unpin_lock
);
4403 #ifdef CONFIG_LOCK_STAT
4405 print_lock_contention_bug(struct task_struct
*curr
, struct lockdep_map
*lock
,
4408 if (!debug_locks_off())
4410 if (debug_locks_silent
)
4414 pr_warn("=================================\n");
4415 pr_warn("WARNING: bad contention detected!\n");
4416 print_kernel_ident();
4417 pr_warn("---------------------------------\n");
4418 pr_warn("%s/%d is trying to contend lock (",
4419 curr
->comm
, task_pid_nr(curr
));
4420 print_lockdep_cache(lock
);
4423 pr_warn("but there are no locks held!\n");
4424 pr_warn("\nother info that might help us debug this:\n");
4425 lockdep_print_held_locks(curr
);
4427 pr_warn("\nstack backtrace:\n");
4434 __lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
4436 struct task_struct
*curr
= current
;
4437 struct held_lock
*hlock
;
4438 struct lock_class_stats
*stats
;
4440 int i
, contention_point
, contending_point
;
4442 depth
= curr
->lockdep_depth
;
4444 * Whee, we contended on this lock, except it seems we're not
4445 * actually trying to acquire anything much at all..
4447 if (DEBUG_LOCKS_WARN_ON(!depth
))
4450 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4452 print_lock_contention_bug(curr
, lock
, ip
);
4456 if (hlock
->instance
!= lock
)
4459 hlock
->waittime_stamp
= lockstat_clock();
4461 contention_point
= lock_point(hlock_class(hlock
)->contention_point
, ip
);
4462 contending_point
= lock_point(hlock_class(hlock
)->contending_point
,
4465 stats
= get_lock_stats(hlock_class(hlock
));
4466 if (contention_point
< LOCKSTAT_POINTS
)
4467 stats
->contention_point
[contention_point
]++;
4468 if (contending_point
< LOCKSTAT_POINTS
)
4469 stats
->contending_point
[contending_point
]++;
4470 if (lock
->cpu
!= smp_processor_id())
4471 stats
->bounces
[bounce_contended
+ !!hlock
->read
]++;
4475 __lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
4477 struct task_struct
*curr
= current
;
4478 struct held_lock
*hlock
;
4479 struct lock_class_stats
*stats
;
4481 u64 now
, waittime
= 0;
4484 depth
= curr
->lockdep_depth
;
4486 * Yay, we acquired ownership of this lock we didn't try to
4487 * acquire, how the heck did that happen?
4489 if (DEBUG_LOCKS_WARN_ON(!depth
))
4492 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4494 print_lock_contention_bug(curr
, lock
, _RET_IP_
);
4498 if (hlock
->instance
!= lock
)
4501 cpu
= smp_processor_id();
4502 if (hlock
->waittime_stamp
) {
4503 now
= lockstat_clock();
4504 waittime
= now
- hlock
->waittime_stamp
;
4505 hlock
->holdtime_stamp
= now
;
4508 trace_lock_acquired(lock
, ip
);
4510 stats
= get_lock_stats(hlock_class(hlock
));
4513 lock_time_inc(&stats
->read_waittime
, waittime
);
4515 lock_time_inc(&stats
->write_waittime
, waittime
);
4517 if (lock
->cpu
!= cpu
)
4518 stats
->bounces
[bounce_acquired
+ !!hlock
->read
]++;
4524 void lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
4526 unsigned long flags
;
4528 if (unlikely(!lock_stat
|| !debug_locks
))
4531 if (unlikely(current
->lockdep_recursion
))
4534 raw_local_irq_save(flags
);
4536 current
->lockdep_recursion
= 1;
4537 trace_lock_contended(lock
, ip
);
4538 __lock_contended(lock
, ip
);
4539 current
->lockdep_recursion
= 0;
4540 raw_local_irq_restore(flags
);
4542 EXPORT_SYMBOL_GPL(lock_contended
);
4544 void lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
4546 unsigned long flags
;
4548 if (unlikely(!lock_stat
|| !debug_locks
))
4551 if (unlikely(current
->lockdep_recursion
))
4554 raw_local_irq_save(flags
);
4556 current
->lockdep_recursion
= 1;
4557 __lock_acquired(lock
, ip
);
4558 current
->lockdep_recursion
= 0;
4559 raw_local_irq_restore(flags
);
4561 EXPORT_SYMBOL_GPL(lock_acquired
);
4565 * Used by the testsuite, sanitize the validator state
4566 * after a simulated failure:
4569 void lockdep_reset(void)
4571 unsigned long flags
;
4574 raw_local_irq_save(flags
);
4575 current
->curr_chain_key
= 0;
4576 current
->lockdep_depth
= 0;
4577 current
->lockdep_recursion
= 0;
4578 memset(current
->held_locks
, 0, MAX_LOCK_DEPTH
*sizeof(struct held_lock
));
4579 nr_hardirq_chains
= 0;
4580 nr_softirq_chains
= 0;
4581 nr_process_chains
= 0;
4583 for (i
= 0; i
< CHAINHASH_SIZE
; i
++)
4584 INIT_HLIST_HEAD(chainhash_table
+ i
);
4585 raw_local_irq_restore(flags
);
4588 /* Remove a class from a lock chain. Must be called with the graph lock held. */
4589 static void remove_class_from_lock_chain(struct pending_free
*pf
,
4590 struct lock_chain
*chain
,
4591 struct lock_class
*class)
4593 #ifdef CONFIG_PROVE_LOCKING
4594 struct lock_chain
*new_chain
;
4598 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++) {
4599 if (chain_hlocks
[i
] != class - lock_classes
)
4601 /* The code below leaks one chain_hlock[] entry. */
4602 if (--chain
->depth
> 0) {
4603 memmove(&chain_hlocks
[i
], &chain_hlocks
[i
+ 1],
4604 (chain
->base
+ chain
->depth
- i
) *
4605 sizeof(chain_hlocks
[0]));
4608 * Each lock class occurs at most once in a lock chain so once
4609 * we found a match we can break out of this loop.
4613 /* Since the chain has not been modified, return. */
4618 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++)
4619 chain_key
= iterate_chain_key(chain_key
, chain_hlocks
[i
] + 1);
4620 if (chain
->depth
&& chain
->chain_key
== chain_key
)
4622 /* Overwrite the chain key for concurrent RCU readers. */
4623 WRITE_ONCE(chain
->chain_key
, chain_key
);
4625 * Note: calling hlist_del_rcu() from inside a
4626 * hlist_for_each_entry_rcu() loop is safe.
4628 hlist_del_rcu(&chain
->entry
);
4629 __set_bit(chain
- lock_chains
, pf
->lock_chains_being_freed
);
4630 if (chain
->depth
== 0)
4633 * If the modified lock chain matches an existing lock chain, drop
4634 * the modified lock chain.
4636 if (lookup_chain_cache(chain_key
))
4638 new_chain
= alloc_lock_chain();
4639 if (WARN_ON_ONCE(!new_chain
)) {
4643 *new_chain
= *chain
;
4644 hlist_add_head_rcu(&new_chain
->entry
, chainhashentry(chain_key
));
4648 /* Must be called with the graph lock held. */
4649 static void remove_class_from_lock_chains(struct pending_free
*pf
,
4650 struct lock_class
*class)
4652 struct lock_chain
*chain
;
4653 struct hlist_head
*head
;
4656 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
4657 head
= chainhash_table
+ i
;
4658 hlist_for_each_entry_rcu(chain
, head
, entry
) {
4659 remove_class_from_lock_chain(pf
, chain
, class);
4665 * Remove all references to a lock class. The caller must hold the graph lock.
4667 static void zap_class(struct pending_free
*pf
, struct lock_class
*class)
4669 struct lock_list
*entry
;
4672 WARN_ON_ONCE(!class->key
);
4675 * Remove all dependencies this lock is
4678 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
4679 entry
= list_entries
+ i
;
4680 if (entry
->class != class && entry
->links_to
!= class)
4682 __clear_bit(i
, list_entries_in_use
);
4684 list_del_rcu(&entry
->entry
);
4686 if (list_empty(&class->locks_after
) &&
4687 list_empty(&class->locks_before
)) {
4688 list_move_tail(&class->lock_entry
, &pf
->zapped
);
4689 hlist_del_rcu(&class->hash_entry
);
4690 WRITE_ONCE(class->key
, NULL
);
4691 WRITE_ONCE(class->name
, NULL
);
4694 WARN_ONCE(true, "%s() failed for class %s\n", __func__
,
4698 remove_class_from_lock_chains(pf
, class);
4701 static void reinit_class(struct lock_class
*class)
4703 void *const p
= class;
4704 const unsigned int offset
= offsetof(struct lock_class
, key
);
4706 WARN_ON_ONCE(!class->lock_entry
.next
);
4707 WARN_ON_ONCE(!list_empty(&class->locks_after
));
4708 WARN_ON_ONCE(!list_empty(&class->locks_before
));
4709 memset(p
+ offset
, 0, sizeof(*class) - offset
);
4710 WARN_ON_ONCE(!class->lock_entry
.next
);
4711 WARN_ON_ONCE(!list_empty(&class->locks_after
));
4712 WARN_ON_ONCE(!list_empty(&class->locks_before
));
4715 static inline int within(const void *addr
, void *start
, unsigned long size
)
4717 return addr
>= start
&& addr
< start
+ size
;
4720 static bool inside_selftest(void)
4722 return current
== lockdep_selftest_task_struct
;
4725 /* The caller must hold the graph lock. */
4726 static struct pending_free
*get_pending_free(void)
4728 return delayed_free
.pf
+ delayed_free
.index
;
4731 static void free_zapped_rcu(struct rcu_head
*cb
);
4734 * Schedule an RCU callback if no RCU callback is pending. Must be called with
4735 * the graph lock held.
4737 static void call_rcu_zapped(struct pending_free
*pf
)
4739 WARN_ON_ONCE(inside_selftest());
4741 if (list_empty(&pf
->zapped
))
4744 if (delayed_free
.scheduled
)
4747 delayed_free
.scheduled
= true;
4749 WARN_ON_ONCE(delayed_free
.pf
+ delayed_free
.index
!= pf
);
4750 delayed_free
.index
^= 1;
4752 call_rcu(&delayed_free
.rcu_head
, free_zapped_rcu
);
4755 /* The caller must hold the graph lock. May be called from RCU context. */
4756 static void __free_zapped_classes(struct pending_free
*pf
)
4758 struct lock_class
*class;
4760 check_data_structures();
4762 list_for_each_entry(class, &pf
->zapped
, lock_entry
)
4763 reinit_class(class);
4765 list_splice_init(&pf
->zapped
, &free_lock_classes
);
4767 #ifdef CONFIG_PROVE_LOCKING
4768 bitmap_andnot(lock_chains_in_use
, lock_chains_in_use
,
4769 pf
->lock_chains_being_freed
, ARRAY_SIZE(lock_chains
));
4770 bitmap_clear(pf
->lock_chains_being_freed
, 0, ARRAY_SIZE(lock_chains
));
4774 static void free_zapped_rcu(struct rcu_head
*ch
)
4776 struct pending_free
*pf
;
4777 unsigned long flags
;
4779 if (WARN_ON_ONCE(ch
!= &delayed_free
.rcu_head
))
4782 raw_local_irq_save(flags
);
4783 arch_spin_lock(&lockdep_lock
);
4784 current
->lockdep_recursion
= 1;
4787 pf
= delayed_free
.pf
+ (delayed_free
.index
^ 1);
4788 __free_zapped_classes(pf
);
4789 delayed_free
.scheduled
= false;
4792 * If there's anything on the open list, close and start a new callback.
4794 call_rcu_zapped(delayed_free
.pf
+ delayed_free
.index
);
4796 current
->lockdep_recursion
= 0;
4797 arch_spin_unlock(&lockdep_lock
);
4798 raw_local_irq_restore(flags
);
4802 * Remove all lock classes from the class hash table and from the
4803 * all_lock_classes list whose key or name is in the address range [start,
4804 * start + size). Move these lock classes to the zapped_classes list. Must
4805 * be called with the graph lock held.
4807 static void __lockdep_free_key_range(struct pending_free
*pf
, void *start
,
4810 struct lock_class
*class;
4811 struct hlist_head
*head
;
4814 /* Unhash all classes that were created by a module. */
4815 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
4816 head
= classhash_table
+ i
;
4817 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
4818 if (!within(class->key
, start
, size
) &&
4819 !within(class->name
, start
, size
))
4821 zap_class(pf
, class);
4827 * Used in module.c to remove lock classes from memory that is going to be
4828 * freed; and possibly re-used by other modules.
4830 * We will have had one synchronize_rcu() before getting here, so we're
4831 * guaranteed nobody will look up these exact classes -- they're properly dead
4832 * but still allocated.
4834 static void lockdep_free_key_range_reg(void *start
, unsigned long size
)
4836 struct pending_free
*pf
;
4837 unsigned long flags
;
4839 init_data_structures_once();
4841 raw_local_irq_save(flags
);
4842 arch_spin_lock(&lockdep_lock
);
4843 current
->lockdep_recursion
= 1;
4844 pf
= get_pending_free();
4845 __lockdep_free_key_range(pf
, start
, size
);
4846 call_rcu_zapped(pf
);
4847 current
->lockdep_recursion
= 0;
4848 arch_spin_unlock(&lockdep_lock
);
4849 raw_local_irq_restore(flags
);
4852 * Wait for any possible iterators from look_up_lock_class() to pass
4853 * before continuing to free the memory they refer to.
4859 * Free all lockdep keys in the range [start, start+size). Does not sleep.
4860 * Ignores debug_locks. Must only be used by the lockdep selftests.
4862 static void lockdep_free_key_range_imm(void *start
, unsigned long size
)
4864 struct pending_free
*pf
= delayed_free
.pf
;
4865 unsigned long flags
;
4867 init_data_structures_once();
4869 raw_local_irq_save(flags
);
4870 arch_spin_lock(&lockdep_lock
);
4871 __lockdep_free_key_range(pf
, start
, size
);
4872 __free_zapped_classes(pf
);
4873 arch_spin_unlock(&lockdep_lock
);
4874 raw_local_irq_restore(flags
);
4877 void lockdep_free_key_range(void *start
, unsigned long size
)
4879 init_data_structures_once();
4881 if (inside_selftest())
4882 lockdep_free_key_range_imm(start
, size
);
4884 lockdep_free_key_range_reg(start
, size
);
4888 * Check whether any element of the @lock->class_cache[] array refers to a
4889 * registered lock class. The caller must hold either the graph lock or the
4892 static bool lock_class_cache_is_registered(struct lockdep_map
*lock
)
4894 struct lock_class
*class;
4895 struct hlist_head
*head
;
4898 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
4899 head
= classhash_table
+ i
;
4900 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
4901 for (j
= 0; j
< NR_LOCKDEP_CACHING_CLASSES
; j
++)
4902 if (lock
->class_cache
[j
] == class)
4909 /* The caller must hold the graph lock. Does not sleep. */
4910 static void __lockdep_reset_lock(struct pending_free
*pf
,
4911 struct lockdep_map
*lock
)
4913 struct lock_class
*class;
4917 * Remove all classes this lock might have:
4919 for (j
= 0; j
< MAX_LOCKDEP_SUBCLASSES
; j
++) {
4921 * If the class exists we look it up and zap it:
4923 class = look_up_lock_class(lock
, j
);
4925 zap_class(pf
, class);
4928 * Debug check: in the end all mapped classes should
4931 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock
)))
4936 * Remove all information lockdep has about a lock if debug_locks == 1. Free
4937 * released data structures from RCU context.
4939 static void lockdep_reset_lock_reg(struct lockdep_map
*lock
)
4941 struct pending_free
*pf
;
4942 unsigned long flags
;
4945 raw_local_irq_save(flags
);
4946 locked
= graph_lock();
4950 pf
= get_pending_free();
4951 __lockdep_reset_lock(pf
, lock
);
4952 call_rcu_zapped(pf
);
4956 raw_local_irq_restore(flags
);
4960 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
4961 * lockdep selftests.
4963 static void lockdep_reset_lock_imm(struct lockdep_map
*lock
)
4965 struct pending_free
*pf
= delayed_free
.pf
;
4966 unsigned long flags
;
4968 raw_local_irq_save(flags
);
4969 arch_spin_lock(&lockdep_lock
);
4970 __lockdep_reset_lock(pf
, lock
);
4971 __free_zapped_classes(pf
);
4972 arch_spin_unlock(&lockdep_lock
);
4973 raw_local_irq_restore(flags
);
4976 void lockdep_reset_lock(struct lockdep_map
*lock
)
4978 init_data_structures_once();
4980 if (inside_selftest())
4981 lockdep_reset_lock_imm(lock
);
4983 lockdep_reset_lock_reg(lock
);
4986 /* Unregister a dynamically allocated key. */
4987 void lockdep_unregister_key(struct lock_class_key
*key
)
4989 struct hlist_head
*hash_head
= keyhashentry(key
);
4990 struct lock_class_key
*k
;
4991 struct pending_free
*pf
;
4992 unsigned long flags
;
4997 if (WARN_ON_ONCE(static_obj(key
)))
5000 raw_local_irq_save(flags
);
5004 pf
= get_pending_free();
5005 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
5007 hlist_del_rcu(&k
->hash_entry
);
5012 WARN_ON_ONCE(!found
);
5013 __lockdep_free_key_range(pf
, key
, 1);
5014 call_rcu_zapped(pf
);
5017 raw_local_irq_restore(flags
);
5019 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
5022 EXPORT_SYMBOL_GPL(lockdep_unregister_key
);
5024 void __init
lockdep_init(void)
5026 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
5028 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES
);
5029 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH
);
5030 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS
);
5031 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE
);
5032 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES
);
5033 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS
);
5034 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE
);
5036 printk(" memory used by lock dependency info: %zu kB\n",
5037 (sizeof(lock_classes
) +
5038 sizeof(classhash_table
) +
5039 sizeof(list_entries
) +
5040 sizeof(list_entries_in_use
) +
5041 sizeof(chainhash_table
) +
5042 sizeof(delayed_free
)
5043 #ifdef CONFIG_PROVE_LOCKING
5045 + sizeof(lock_chains
)
5046 + sizeof(lock_chains_in_use
)
5047 + sizeof(chain_hlocks
)
5052 printk(" per task-struct memory footprint: %zu bytes\n",
5053 sizeof(((struct task_struct
*)NULL
)->held_locks
));
5057 print_freed_lock_bug(struct task_struct
*curr
, const void *mem_from
,
5058 const void *mem_to
, struct held_lock
*hlock
)
5060 if (!debug_locks_off())
5062 if (debug_locks_silent
)
5066 pr_warn("=========================\n");
5067 pr_warn("WARNING: held lock freed!\n");
5068 print_kernel_ident();
5069 pr_warn("-------------------------\n");
5070 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
5071 curr
->comm
, task_pid_nr(curr
), mem_from
, mem_to
-1);
5073 lockdep_print_held_locks(curr
);
5075 pr_warn("\nstack backtrace:\n");
5079 static inline int not_in_range(const void* mem_from
, unsigned long mem_len
,
5080 const void* lock_from
, unsigned long lock_len
)
5082 return lock_from
+ lock_len
<= mem_from
||
5083 mem_from
+ mem_len
<= lock_from
;
5087 * Called when kernel memory is freed (or unmapped), or if a lock
5088 * is destroyed or reinitialized - this code checks whether there is
5089 * any held lock in the memory range of <from> to <to>:
5091 void debug_check_no_locks_freed(const void *mem_from
, unsigned long mem_len
)
5093 struct task_struct
*curr
= current
;
5094 struct held_lock
*hlock
;
5095 unsigned long flags
;
5098 if (unlikely(!debug_locks
))
5101 raw_local_irq_save(flags
);
5102 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
5103 hlock
= curr
->held_locks
+ i
;
5105 if (not_in_range(mem_from
, mem_len
, hlock
->instance
,
5106 sizeof(*hlock
->instance
)))
5109 print_freed_lock_bug(curr
, mem_from
, mem_from
+ mem_len
, hlock
);
5112 raw_local_irq_restore(flags
);
5114 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed
);
5116 static void print_held_locks_bug(void)
5118 if (!debug_locks_off())
5120 if (debug_locks_silent
)
5124 pr_warn("====================================\n");
5125 pr_warn("WARNING: %s/%d still has locks held!\n",
5126 current
->comm
, task_pid_nr(current
));
5127 print_kernel_ident();
5128 pr_warn("------------------------------------\n");
5129 lockdep_print_held_locks(current
);
5130 pr_warn("\nstack backtrace:\n");
5134 void debug_check_no_locks_held(void)
5136 if (unlikely(current
->lockdep_depth
> 0))
5137 print_held_locks_bug();
5139 EXPORT_SYMBOL_GPL(debug_check_no_locks_held
);
5142 void debug_show_all_locks(void)
5144 struct task_struct
*g
, *p
;
5146 if (unlikely(!debug_locks
)) {
5147 pr_warn("INFO: lockdep is turned off.\n");
5150 pr_warn("\nShowing all locks held in the system:\n");
5153 for_each_process_thread(g
, p
) {
5154 if (!p
->lockdep_depth
)
5156 lockdep_print_held_locks(p
);
5157 touch_nmi_watchdog();
5158 touch_all_softlockup_watchdogs();
5163 pr_warn("=============================================\n\n");
5165 EXPORT_SYMBOL_GPL(debug_show_all_locks
);
5169 * Careful: only use this function if you are sure that
5170 * the task cannot run in parallel!
5172 void debug_show_held_locks(struct task_struct
*task
)
5174 if (unlikely(!debug_locks
)) {
5175 printk("INFO: lockdep is turned off.\n");
5178 lockdep_print_held_locks(task
);
5180 EXPORT_SYMBOL_GPL(debug_show_held_locks
);
5182 asmlinkage __visible
void lockdep_sys_exit(void)
5184 struct task_struct
*curr
= current
;
5186 if (unlikely(curr
->lockdep_depth
)) {
5187 if (!debug_locks_off())
5190 pr_warn("================================================\n");
5191 pr_warn("WARNING: lock held when returning to user space!\n");
5192 print_kernel_ident();
5193 pr_warn("------------------------------------------------\n");
5194 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
5195 curr
->comm
, curr
->pid
);
5196 lockdep_print_held_locks(curr
);
5200 * The lock history for each syscall should be independent. So wipe the
5201 * slate clean on return to userspace.
5203 lockdep_invariant_state(false);
5206 void lockdep_rcu_suspicious(const char *file
, const int line
, const char *s
)
5208 struct task_struct
*curr
= current
;
5210 /* Note: the following can be executed concurrently, so be careful. */
5212 pr_warn("=============================\n");
5213 pr_warn("WARNING: suspicious RCU usage\n");
5214 print_kernel_ident();
5215 pr_warn("-----------------------------\n");
5216 pr_warn("%s:%d %s!\n", file
, line
, s
);
5217 pr_warn("\nother info that might help us debug this:\n\n");
5218 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5219 !rcu_lockdep_current_cpu_online()
5220 ? "RCU used illegally from offline CPU!\n"
5221 : !rcu_is_watching()
5222 ? "RCU used illegally from idle CPU!\n"
5224 rcu_scheduler_active
, debug_locks
);
5227 * If a CPU is in the RCU-free window in idle (ie: in the section
5228 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5229 * considers that CPU to be in an "extended quiescent state",
5230 * which means that RCU will be completely ignoring that CPU.
5231 * Therefore, rcu_read_lock() and friends have absolutely no
5232 * effect on a CPU running in that state. In other words, even if
5233 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
5234 * delete data structures out from under it. RCU really has no
5235 * choice here: we need to keep an RCU-free window in idle where
5236 * the CPU may possibly enter into low power mode. This way we can
5237 * notice an extended quiescent state to other CPUs that started a grace
5238 * period. Otherwise we would delay any grace period as long as we run
5241 * So complain bitterly if someone does call rcu_read_lock(),
5242 * rcu_read_lock_bh() and so on from extended quiescent states.
5244 if (!rcu_is_watching())
5245 pr_warn("RCU used illegally from extended quiescent state!\n");
5247 lockdep_print_held_locks(curr
);
5248 pr_warn("\nstack backtrace:\n");
5251 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious
);