1 // SPDX-License-Identifier: GPL-2.0-only
5 * Runtime locking correctness validator
7 * Started by Ingo Molnar:
9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
12 * this code maps all the lock dependencies as they occur in a live kernel
13 * and will warn about the following classes of locking bugs:
15 * - lock inversion scenarios
16 * - circular lock dependencies
17 * - hardirq/softirq safe/unsafe locking bugs
19 * Bugs are reported even if the current locking scenario does not cause
20 * any deadlock at this point.
22 * I.e. if anytime in the past two locks were taken in a different order,
23 * even if it happened for another task, even if those were different
24 * locks (but of the same class as this lock), this code will detect it.
26 * Thanks to Arjan van de Ven for coming up with the initial idea of
27 * mapping lock dependencies runtime.
29 #define DISABLE_BRANCH_PROFILING
30 #include <linux/mutex.h>
31 #include <linux/sched.h>
32 #include <linux/sched/clock.h>
33 #include <linux/sched/task.h>
34 #include <linux/sched/mm.h>
35 #include <linux/delay.h>
36 #include <linux/module.h>
37 #include <linux/proc_fs.h>
38 #include <linux/seq_file.h>
39 #include <linux/spinlock.h>
40 #include <linux/kallsyms.h>
41 #include <linux/interrupt.h>
42 #include <linux/stacktrace.h>
43 #include <linux/debug_locks.h>
44 #include <linux/irqflags.h>
45 #include <linux/utsname.h>
46 #include <linux/hash.h>
47 #include <linux/ftrace.h>
48 #include <linux/stringify.h>
49 #include <linux/bitmap.h>
50 #include <linux/bitops.h>
51 #include <linux/gfp.h>
52 #include <linux/random.h>
53 #include <linux/jhash.h>
54 #include <linux/nmi.h>
55 #include <linux/rcupdate.h>
56 #include <linux/kprobes.h>
58 #include <asm/sections.h>
60 #include "lockdep_internals.h"
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/lock.h>
65 #ifdef CONFIG_PROVE_LOCKING
66 int prove_locking
= 1;
67 module_param(prove_locking
, int, 0644);
69 #define prove_locking 0
72 #ifdef CONFIG_LOCK_STAT
74 module_param(lock_stat
, int, 0644);
80 * lockdep_lock: protects the lockdep graph, the hashes and the
81 * class/list/hash allocators.
83 * This is one of the rare exceptions where it's justified
84 * to use a raw spinlock - we really dont want the spinlock
85 * code to recurse back into the lockdep code...
87 static arch_spinlock_t lockdep_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
88 static struct task_struct
*lockdep_selftest_task_struct
;
90 static int graph_lock(void)
92 arch_spin_lock(&lockdep_lock
);
94 * Make sure that if another CPU detected a bug while
95 * walking the graph we dont change it (while the other
96 * CPU is busy printing out stuff with the graph lock
100 arch_spin_unlock(&lockdep_lock
);
103 /* prevent any recursions within lockdep from causing deadlocks */
104 current
->lockdep_recursion
++;
108 static inline int graph_unlock(void)
110 if (debug_locks
&& !arch_spin_is_locked(&lockdep_lock
)) {
112 * The lockdep graph lock isn't locked while we expect it to
113 * be, we're confused now, bye!
115 return DEBUG_LOCKS_WARN_ON(1);
118 current
->lockdep_recursion
--;
119 arch_spin_unlock(&lockdep_lock
);
124 * Turn lock debugging off and return with 0 if it was off already,
125 * and also release the graph lock:
127 static inline int debug_locks_off_graph_unlock(void)
129 int ret
= debug_locks_off();
131 arch_spin_unlock(&lockdep_lock
);
136 unsigned long nr_list_entries
;
137 static struct lock_list list_entries
[MAX_LOCKDEP_ENTRIES
];
138 static DECLARE_BITMAP(list_entries_in_use
, MAX_LOCKDEP_ENTRIES
);
141 * All data structures here are protected by the global debug_lock.
143 * nr_lock_classes is the number of elements of lock_classes[] that is
146 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
147 #define KEYHASH_SIZE (1UL << KEYHASH_BITS)
148 static struct hlist_head lock_keys_hash
[KEYHASH_SIZE
];
149 unsigned long nr_lock_classes
;
150 #ifndef CONFIG_DEBUG_LOCKDEP
153 struct lock_class lock_classes
[MAX_LOCKDEP_KEYS
];
154 static DECLARE_BITMAP(lock_classes_in_use
, MAX_LOCKDEP_KEYS
);
156 static inline struct lock_class
*hlock_class(struct held_lock
*hlock
)
158 unsigned int class_idx
= hlock
->class_idx
;
160 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
163 if (!test_bit(class_idx
, lock_classes_in_use
)) {
165 * Someone passed in garbage, we give up.
167 DEBUG_LOCKS_WARN_ON(1);
172 * At this point, if the passed hlock->class_idx is still garbage,
173 * we just have to live with it
175 return lock_classes
+ class_idx
;
178 #ifdef CONFIG_LOCK_STAT
179 static DEFINE_PER_CPU(struct lock_class_stats
[MAX_LOCKDEP_KEYS
], cpu_lock_stats
);
181 static inline u64
lockstat_clock(void)
183 return local_clock();
186 static int lock_point(unsigned long points
[], unsigned long ip
)
190 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
191 if (points
[i
] == 0) {
202 static void lock_time_inc(struct lock_time
*lt
, u64 time
)
207 if (time
< lt
->min
|| !lt
->nr
)
214 static inline void lock_time_add(struct lock_time
*src
, struct lock_time
*dst
)
219 if (src
->max
> dst
->max
)
222 if (src
->min
< dst
->min
|| !dst
->nr
)
225 dst
->total
+= src
->total
;
229 struct lock_class_stats
lock_stats(struct lock_class
*class)
231 struct lock_class_stats stats
;
234 memset(&stats
, 0, sizeof(struct lock_class_stats
));
235 for_each_possible_cpu(cpu
) {
236 struct lock_class_stats
*pcs
=
237 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
239 for (i
= 0; i
< ARRAY_SIZE(stats
.contention_point
); i
++)
240 stats
.contention_point
[i
] += pcs
->contention_point
[i
];
242 for (i
= 0; i
< ARRAY_SIZE(stats
.contending_point
); i
++)
243 stats
.contending_point
[i
] += pcs
->contending_point
[i
];
245 lock_time_add(&pcs
->read_waittime
, &stats
.read_waittime
);
246 lock_time_add(&pcs
->write_waittime
, &stats
.write_waittime
);
248 lock_time_add(&pcs
->read_holdtime
, &stats
.read_holdtime
);
249 lock_time_add(&pcs
->write_holdtime
, &stats
.write_holdtime
);
251 for (i
= 0; i
< ARRAY_SIZE(stats
.bounces
); i
++)
252 stats
.bounces
[i
] += pcs
->bounces
[i
];
258 void clear_lock_stats(struct lock_class
*class)
262 for_each_possible_cpu(cpu
) {
263 struct lock_class_stats
*cpu_stats
=
264 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
266 memset(cpu_stats
, 0, sizeof(struct lock_class_stats
));
268 memset(class->contention_point
, 0, sizeof(class->contention_point
));
269 memset(class->contending_point
, 0, sizeof(class->contending_point
));
272 static struct lock_class_stats
*get_lock_stats(struct lock_class
*class)
274 return &this_cpu_ptr(cpu_lock_stats
)[class - lock_classes
];
277 static void lock_release_holdtime(struct held_lock
*hlock
)
279 struct lock_class_stats
*stats
;
285 holdtime
= lockstat_clock() - hlock
->holdtime_stamp
;
287 stats
= get_lock_stats(hlock_class(hlock
));
289 lock_time_inc(&stats
->read_holdtime
, holdtime
);
291 lock_time_inc(&stats
->write_holdtime
, holdtime
);
294 static inline void lock_release_holdtime(struct held_lock
*hlock
)
300 * We keep a global list of all lock classes. The list is only accessed with
301 * the lockdep spinlock lock held. free_lock_classes is a list with free
302 * elements. These elements are linked together by the lock_entry member in
305 LIST_HEAD(all_lock_classes
);
306 static LIST_HEAD(free_lock_classes
);
309 * struct pending_free - information about data structures about to be freed
310 * @zapped: Head of a list with struct lock_class elements.
311 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
312 * are about to be freed.
314 struct pending_free
{
315 struct list_head zapped
;
316 DECLARE_BITMAP(lock_chains_being_freed
, MAX_LOCKDEP_CHAINS
);
320 * struct delayed_free - data structures used for delayed freeing
322 * A data structure for delayed freeing of data structures that may be
323 * accessed by RCU readers at the time these were freed.
325 * @rcu_head: Used to schedule an RCU callback for freeing data structures.
326 * @index: Index of @pf to which freed data structures are added.
327 * @scheduled: Whether or not an RCU callback has been scheduled.
328 * @pf: Array with information about data structures about to be freed.
330 static struct delayed_free
{
331 struct rcu_head rcu_head
;
334 struct pending_free pf
[2];
338 * The lockdep classes are in a hash-table as well, for fast lookup:
340 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
341 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
342 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
343 #define classhashentry(key) (classhash_table + __classhashfn((key)))
345 static struct hlist_head classhash_table
[CLASSHASH_SIZE
];
348 * We put the lock dependency chains into a hash-table as well, to cache
351 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
352 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
353 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
354 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
356 static struct hlist_head chainhash_table
[CHAINHASH_SIZE
];
359 * The hash key of the lock dependency chains is a hash itself too:
360 * it's a hash of all locks taken up to that lock, including that lock.
361 * It's a 64-bit hash, because it's important for the keys to be
364 static inline u64
iterate_chain_key(u64 key
, u32 idx
)
366 u32 k0
= key
, k1
= key
>> 32;
368 __jhash_mix(idx
, k0
, k1
); /* Macro that modifies arguments! */
370 return k0
| (u64
)k1
<< 32;
373 void lockdep_init_task(struct task_struct
*task
)
375 task
->lockdep_depth
= 0; /* no locks held yet */
376 task
->curr_chain_key
= INITIAL_CHAIN_KEY
;
377 task
->lockdep_recursion
= 0;
380 void lockdep_off(void)
382 current
->lockdep_recursion
++;
384 EXPORT_SYMBOL(lockdep_off
);
386 void lockdep_on(void)
388 current
->lockdep_recursion
--;
390 EXPORT_SYMBOL(lockdep_on
);
392 void lockdep_set_selftest_task(struct task_struct
*task
)
394 lockdep_selftest_task_struct
= task
;
398 * Debugging switches:
402 #define VERY_VERBOSE 0
405 # define HARDIRQ_VERBOSE 1
406 # define SOFTIRQ_VERBOSE 1
408 # define HARDIRQ_VERBOSE 0
409 # define SOFTIRQ_VERBOSE 0
412 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
414 * Quick filtering for interesting events:
416 static int class_filter(struct lock_class
*class)
420 if (class->name_version
== 1 &&
421 !strcmp(class->name
, "lockname"))
423 if (class->name_version
== 1 &&
424 !strcmp(class->name
, "&struct->lockfield"))
427 /* Filter everything else. 1 would be to allow everything else */
432 static int verbose(struct lock_class
*class)
435 return class_filter(class);
440 static void print_lockdep_off(const char *bug_msg
)
442 printk(KERN_DEBUG
"%s\n", bug_msg
);
443 printk(KERN_DEBUG
"turning off the locking correctness validator.\n");
444 #ifdef CONFIG_LOCK_STAT
445 printk(KERN_DEBUG
"Please attach the output of /proc/lock_stat to the bug report\n");
449 unsigned long nr_stack_trace_entries
;
451 #ifdef CONFIG_PROVE_LOCKING
453 * Stack-trace: tightly packed array of stack backtrace
454 * addresses. Protected by the graph_lock.
456 static unsigned long stack_trace
[MAX_STACK_TRACE_ENTRIES
];
458 static int save_trace(struct lock_trace
*trace
)
460 unsigned long *entries
= stack_trace
+ nr_stack_trace_entries
;
461 unsigned int max_entries
;
463 trace
->offset
= nr_stack_trace_entries
;
464 max_entries
= MAX_STACK_TRACE_ENTRIES
- nr_stack_trace_entries
;
465 trace
->nr_entries
= stack_trace_save(entries
, max_entries
, 3);
466 nr_stack_trace_entries
+= trace
->nr_entries
;
468 if (nr_stack_trace_entries
>= MAX_STACK_TRACE_ENTRIES
-1) {
469 if (!debug_locks_off_graph_unlock())
472 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
482 unsigned int nr_hardirq_chains
;
483 unsigned int nr_softirq_chains
;
484 unsigned int nr_process_chains
;
485 unsigned int max_lockdep_depth
;
487 #ifdef CONFIG_DEBUG_LOCKDEP
489 * Various lockdep statistics:
491 DEFINE_PER_CPU(struct lockdep_stats
, lockdep_stats
);
494 #ifdef CONFIG_PROVE_LOCKING
499 #define __USAGE(__STATE) \
500 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
501 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
502 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
503 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
505 static const char *usage_str
[] =
507 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
508 #include "lockdep_states.h"
510 [LOCK_USED
] = "INITIAL USE",
514 const char * __get_key_name(struct lockdep_subclass_key
*key
, char *str
)
516 return kallsyms_lookup((unsigned long)key
, NULL
, NULL
, NULL
, str
);
519 static inline unsigned long lock_flag(enum lock_usage_bit bit
)
524 static char get_usage_char(struct lock_class
*class, enum lock_usage_bit bit
)
527 * The usage character defaults to '.' (i.e., irqs disabled and not in
528 * irq context), which is the safest usage category.
533 * The order of the following usage checks matters, which will
534 * result in the outcome character as follows:
536 * - '+': irq is enabled and not in irq context
537 * - '-': in irq context and irq is disabled
538 * - '?': in irq context and irq is enabled
540 if (class->usage_mask
& lock_flag(bit
+ LOCK_USAGE_DIR_MASK
)) {
542 if (class->usage_mask
& lock_flag(bit
))
544 } else if (class->usage_mask
& lock_flag(bit
))
550 void get_usage_chars(struct lock_class
*class, char usage
[LOCK_USAGE_CHARS
])
554 #define LOCKDEP_STATE(__STATE) \
555 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
556 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
557 #include "lockdep_states.h"
563 static void __print_lock_name(struct lock_class
*class)
565 char str
[KSYM_NAME_LEN
];
570 name
= __get_key_name(class->key
, str
);
571 printk(KERN_CONT
"%s", name
);
573 printk(KERN_CONT
"%s", name
);
574 if (class->name_version
> 1)
575 printk(KERN_CONT
"#%d", class->name_version
);
577 printk(KERN_CONT
"/%d", class->subclass
);
581 static void print_lock_name(struct lock_class
*class)
583 char usage
[LOCK_USAGE_CHARS
];
585 get_usage_chars(class, usage
);
587 printk(KERN_CONT
" (");
588 __print_lock_name(class);
589 printk(KERN_CONT
"){%s}", usage
);
592 static void print_lockdep_cache(struct lockdep_map
*lock
)
595 char str
[KSYM_NAME_LEN
];
599 name
= __get_key_name(lock
->key
->subkeys
, str
);
601 printk(KERN_CONT
"%s", name
);
604 static void print_lock(struct held_lock
*hlock
)
607 * We can be called locklessly through debug_show_all_locks() so be
608 * extra careful, the hlock might have been released and cleared.
610 * If this indeed happens, lets pretend it does not hurt to continue
611 * to print the lock unless the hlock class_idx does not point to a
612 * registered class. The rationale here is: since we don't attempt
613 * to distinguish whether we are in this situation, if it just
614 * happened we can't count on class_idx to tell either.
616 struct lock_class
*lock
= hlock_class(hlock
);
619 printk(KERN_CONT
"<RELEASED>\n");
623 printk(KERN_CONT
"%p", hlock
->instance
);
624 print_lock_name(lock
);
625 printk(KERN_CONT
", at: %pS\n", (void *)hlock
->acquire_ip
);
628 static void lockdep_print_held_locks(struct task_struct
*p
)
630 int i
, depth
= READ_ONCE(p
->lockdep_depth
);
633 printk("no locks held by %s/%d.\n", p
->comm
, task_pid_nr(p
));
635 printk("%d lock%s held by %s/%d:\n", depth
,
636 depth
> 1 ? "s" : "", p
->comm
, task_pid_nr(p
));
638 * It's not reliable to print a task's held locks if it's not sleeping
639 * and it's not the current task.
641 if (p
->state
== TASK_RUNNING
&& p
!= current
)
643 for (i
= 0; i
< depth
; i
++) {
645 print_lock(p
->held_locks
+ i
);
649 static void print_kernel_ident(void)
651 printk("%s %.*s %s\n", init_utsname()->release
,
652 (int)strcspn(init_utsname()->version
, " "),
653 init_utsname()->version
,
657 static int very_verbose(struct lock_class
*class)
660 return class_filter(class);
666 * Is this the address of a static object:
669 static int static_obj(const void *obj
)
671 unsigned long start
= (unsigned long) &_stext
,
672 end
= (unsigned long) &_end
,
673 addr
= (unsigned long) obj
;
675 if (arch_is_kernel_initmem_freed(addr
))
681 if ((addr
>= start
) && (addr
< end
))
684 if (arch_is_kernel_data(addr
))
688 * in-kernel percpu var?
690 if (is_kernel_percpu_address(addr
))
694 * module static or percpu var?
696 return is_module_address(addr
) || is_module_percpu_address(addr
);
701 * To make lock name printouts unique, we calculate a unique
702 * class->name_version generation counter. The caller must hold the graph
705 static int count_matching_names(struct lock_class
*new_class
)
707 struct lock_class
*class;
710 if (!new_class
->name
)
713 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
714 if (new_class
->key
- new_class
->subclass
== class->key
)
715 return class->name_version
;
716 if (class->name
&& !strcmp(class->name
, new_class
->name
))
717 count
= max(count
, class->name_version
);
723 static inline struct lock_class
*
724 look_up_lock_class(const struct lockdep_map
*lock
, unsigned int subclass
)
726 struct lockdep_subclass_key
*key
;
727 struct hlist_head
*hash_head
;
728 struct lock_class
*class;
730 if (unlikely(subclass
>= MAX_LOCKDEP_SUBCLASSES
)) {
733 "BUG: looking up invalid subclass: %u\n", subclass
);
735 "turning off the locking correctness validator.\n");
741 * If it is not initialised then it has never been locked,
742 * so it won't be present in the hash table.
744 if (unlikely(!lock
->key
))
748 * NOTE: the class-key must be unique. For dynamic locks, a static
749 * lock_class_key variable is passed in through the mutex_init()
750 * (or spin_lock_init()) call - which acts as the key. For static
751 * locks we use the lock object itself as the key.
753 BUILD_BUG_ON(sizeof(struct lock_class_key
) >
754 sizeof(struct lockdep_map
));
756 key
= lock
->key
->subkeys
+ subclass
;
758 hash_head
= classhashentry(key
);
761 * We do an RCU walk of the hash, see lockdep_free_key_range().
763 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
766 hlist_for_each_entry_rcu(class, hash_head
, hash_entry
) {
767 if (class->key
== key
) {
769 * Huh! same key, different name? Did someone trample
770 * on some memory? We're most confused.
772 WARN_ON_ONCE(class->name
!= lock
->name
&&
773 lock
->key
!= &__lockdep_no_validate__
);
782 * Static locks do not have their class-keys yet - for them the key is
783 * the lock object itself. If the lock is in the per cpu area, the
784 * canonical address of the lock (per cpu offset removed) is used.
786 static bool assign_lock_key(struct lockdep_map
*lock
)
788 unsigned long can_addr
, addr
= (unsigned long)lock
;
792 * lockdep_free_key_range() assumes that struct lock_class_key
793 * objects do not overlap. Since we use the address of lock
794 * objects as class key for static objects, check whether the
795 * size of lock_class_key objects does not exceed the size of
796 * the smallest lock object.
798 BUILD_BUG_ON(sizeof(struct lock_class_key
) > sizeof(raw_spinlock_t
));
801 if (__is_kernel_percpu_address(addr
, &can_addr
))
802 lock
->key
= (void *)can_addr
;
803 else if (__is_module_percpu_address(addr
, &can_addr
))
804 lock
->key
= (void *)can_addr
;
805 else if (static_obj(lock
))
806 lock
->key
= (void *)lock
;
808 /* Debug-check: all keys must be persistent! */
810 pr_err("INFO: trying to register non-static key.\n");
811 pr_err("the code is fine but needs lockdep annotation.\n");
812 pr_err("turning off the locking correctness validator.\n");
820 #ifdef CONFIG_DEBUG_LOCKDEP
822 /* Check whether element @e occurs in list @h */
823 static bool in_list(struct list_head
*e
, struct list_head
*h
)
827 list_for_each(f
, h
) {
836 * Check whether entry @e occurs in any of the locks_after or locks_before
839 static bool in_any_class_list(struct list_head
*e
)
841 struct lock_class
*class;
844 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
845 class = &lock_classes
[i
];
846 if (in_list(e
, &class->locks_after
) ||
847 in_list(e
, &class->locks_before
))
853 static bool class_lock_list_valid(struct lock_class
*c
, struct list_head
*h
)
857 list_for_each_entry(e
, h
, entry
) {
858 if (e
->links_to
!= c
) {
859 printk(KERN_INFO
"class %s: mismatch for lock entry %ld; class %s <> %s",
861 (unsigned long)(e
- list_entries
),
862 e
->links_to
&& e
->links_to
->name
?
863 e
->links_to
->name
: "(?)",
864 e
->class && e
->class->name
? e
->class->name
:
872 #ifdef CONFIG_PROVE_LOCKING
873 static u16 chain_hlocks
[MAX_LOCKDEP_CHAIN_HLOCKS
];
876 static bool check_lock_chain_key(struct lock_chain
*chain
)
878 #ifdef CONFIG_PROVE_LOCKING
879 u64 chain_key
= INITIAL_CHAIN_KEY
;
882 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++)
883 chain_key
= iterate_chain_key(chain_key
, chain_hlocks
[i
]);
885 * The 'unsigned long long' casts avoid that a compiler warning
886 * is reported when building tools/lib/lockdep.
888 if (chain
->chain_key
!= chain_key
) {
889 printk(KERN_INFO
"chain %lld: key %#llx <> %#llx\n",
890 (unsigned long long)(chain
- lock_chains
),
891 (unsigned long long)chain
->chain_key
,
892 (unsigned long long)chain_key
);
899 static bool in_any_zapped_class_list(struct lock_class
*class)
901 struct pending_free
*pf
;
904 for (i
= 0, pf
= delayed_free
.pf
; i
< ARRAY_SIZE(delayed_free
.pf
); i
++, pf
++) {
905 if (in_list(&class->lock_entry
, &pf
->zapped
))
912 static bool __check_data_structures(void)
914 struct lock_class
*class;
915 struct lock_chain
*chain
;
916 struct hlist_head
*head
;
920 /* Check whether all classes occur in a lock list. */
921 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
922 class = &lock_classes
[i
];
923 if (!in_list(&class->lock_entry
, &all_lock_classes
) &&
924 !in_list(&class->lock_entry
, &free_lock_classes
) &&
925 !in_any_zapped_class_list(class)) {
926 printk(KERN_INFO
"class %px/%s is not in any class list\n",
927 class, class->name
? : "(?)");
932 /* Check whether all classes have valid lock lists. */
933 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
934 class = &lock_classes
[i
];
935 if (!class_lock_list_valid(class, &class->locks_before
))
937 if (!class_lock_list_valid(class, &class->locks_after
))
941 /* Check the chain_key of all lock chains. */
942 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
943 head
= chainhash_table
+ i
;
944 hlist_for_each_entry_rcu(chain
, head
, entry
) {
945 if (!check_lock_chain_key(chain
))
951 * Check whether all list entries that are in use occur in a class
954 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
955 e
= list_entries
+ i
;
956 if (!in_any_class_list(&e
->entry
)) {
957 printk(KERN_INFO
"list entry %d is not in any class list; class %s <> %s\n",
958 (unsigned int)(e
- list_entries
),
959 e
->class->name
? : "(?)",
960 e
->links_to
->name
? : "(?)");
966 * Check whether all list entries that are not in use do not occur in
969 for_each_clear_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
970 e
= list_entries
+ i
;
971 if (in_any_class_list(&e
->entry
)) {
972 printk(KERN_INFO
"list entry %d occurs in a class list; class %s <> %s\n",
973 (unsigned int)(e
- list_entries
),
974 e
->class && e
->class->name
? e
->class->name
:
976 e
->links_to
&& e
->links_to
->name
?
977 e
->links_to
->name
: "(?)");
985 int check_consistency
= 0;
986 module_param(check_consistency
, int, 0644);
988 static void check_data_structures(void)
990 static bool once
= false;
992 if (check_consistency
&& !once
) {
993 if (!__check_data_structures()) {
1000 #else /* CONFIG_DEBUG_LOCKDEP */
1002 static inline void check_data_structures(void) { }
1004 #endif /* CONFIG_DEBUG_LOCKDEP */
1007 * Initialize the lock_classes[] array elements, the free_lock_classes list
1008 * and also the delayed_free structure.
1010 static void init_data_structures_once(void)
1012 static bool ds_initialized
, rcu_head_initialized
;
1015 if (likely(rcu_head_initialized
))
1018 if (system_state
>= SYSTEM_SCHEDULING
) {
1019 init_rcu_head(&delayed_free
.rcu_head
);
1020 rcu_head_initialized
= true;
1026 ds_initialized
= true;
1028 INIT_LIST_HEAD(&delayed_free
.pf
[0].zapped
);
1029 INIT_LIST_HEAD(&delayed_free
.pf
[1].zapped
);
1031 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
1032 list_add_tail(&lock_classes
[i
].lock_entry
, &free_lock_classes
);
1033 INIT_LIST_HEAD(&lock_classes
[i
].locks_after
);
1034 INIT_LIST_HEAD(&lock_classes
[i
].locks_before
);
1038 static inline struct hlist_head
*keyhashentry(const struct lock_class_key
*key
)
1040 unsigned long hash
= hash_long((uintptr_t)key
, KEYHASH_BITS
);
1042 return lock_keys_hash
+ hash
;
1045 /* Register a dynamically allocated key. */
1046 void lockdep_register_key(struct lock_class_key
*key
)
1048 struct hlist_head
*hash_head
;
1049 struct lock_class_key
*k
;
1050 unsigned long flags
;
1052 if (WARN_ON_ONCE(static_obj(key
)))
1054 hash_head
= keyhashentry(key
);
1056 raw_local_irq_save(flags
);
1059 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1060 if (WARN_ON_ONCE(k
== key
))
1063 hlist_add_head_rcu(&key
->hash_entry
, hash_head
);
1067 raw_local_irq_restore(flags
);
1069 EXPORT_SYMBOL_GPL(lockdep_register_key
);
1071 /* Check whether a key has been registered as a dynamic key. */
1072 static bool is_dynamic_key(const struct lock_class_key
*key
)
1074 struct hlist_head
*hash_head
;
1075 struct lock_class_key
*k
;
1078 if (WARN_ON_ONCE(static_obj(key
)))
1082 * If lock debugging is disabled lock_keys_hash[] may contain
1083 * pointers to memory that has already been freed. Avoid triggering
1084 * a use-after-free in that case by returning early.
1089 hash_head
= keyhashentry(key
);
1092 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1104 * Register a lock's class in the hash-table, if the class is not present
1105 * yet. Otherwise we look it up. We cache the result in the lock object
1106 * itself, so actual lookup of the hash should be once per lock object.
1108 static struct lock_class
*
1109 register_lock_class(struct lockdep_map
*lock
, unsigned int subclass
, int force
)
1111 struct lockdep_subclass_key
*key
;
1112 struct hlist_head
*hash_head
;
1113 struct lock_class
*class;
1115 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1117 class = look_up_lock_class(lock
, subclass
);
1119 goto out_set_class_cache
;
1122 if (!assign_lock_key(lock
))
1124 } else if (!static_obj(lock
->key
) && !is_dynamic_key(lock
->key
)) {
1128 key
= lock
->key
->subkeys
+ subclass
;
1129 hash_head
= classhashentry(key
);
1131 if (!graph_lock()) {
1135 * We have to do the hash-walk again, to avoid races
1138 hlist_for_each_entry_rcu(class, hash_head
, hash_entry
) {
1139 if (class->key
== key
)
1140 goto out_unlock_set
;
1143 init_data_structures_once();
1145 /* Allocate a new lock class and add it to the hash. */
1146 class = list_first_entry_or_null(&free_lock_classes
, typeof(*class),
1149 if (!debug_locks_off_graph_unlock()) {
1153 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1158 __set_bit(class - lock_classes
, lock_classes_in_use
);
1159 debug_atomic_inc(nr_unused_locks
);
1161 class->name
= lock
->name
;
1162 class->subclass
= subclass
;
1163 WARN_ON_ONCE(!list_empty(&class->locks_before
));
1164 WARN_ON_ONCE(!list_empty(&class->locks_after
));
1165 class->name_version
= count_matching_names(class);
1167 * We use RCU's safe list-add method to make
1168 * parallel walking of the hash-list safe:
1170 hlist_add_head_rcu(&class->hash_entry
, hash_head
);
1172 * Remove the class from the free list and add it to the global list
1175 list_move_tail(&class->lock_entry
, &all_lock_classes
);
1177 if (verbose(class)) {
1180 printk("\nnew class %px: %s", class->key
, class->name
);
1181 if (class->name_version
> 1)
1182 printk(KERN_CONT
"#%d", class->name_version
);
1183 printk(KERN_CONT
"\n");
1186 if (!graph_lock()) {
1193 out_set_class_cache
:
1194 if (!subclass
|| force
)
1195 lock
->class_cache
[0] = class;
1196 else if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
1197 lock
->class_cache
[subclass
] = class;
1200 * Hash collision, did we smoke some? We found a class with a matching
1201 * hash but the subclass -- which is hashed in -- didn't match.
1203 if (DEBUG_LOCKS_WARN_ON(class->subclass
!= subclass
))
1209 #ifdef CONFIG_PROVE_LOCKING
1211 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1212 * with NULL on failure)
1214 static struct lock_list
*alloc_list_entry(void)
1216 int idx
= find_first_zero_bit(list_entries_in_use
,
1217 ARRAY_SIZE(list_entries
));
1219 if (idx
>= ARRAY_SIZE(list_entries
)) {
1220 if (!debug_locks_off_graph_unlock())
1223 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1228 __set_bit(idx
, list_entries_in_use
);
1229 return list_entries
+ idx
;
1233 * Add a new dependency to the head of the list:
1235 static int add_lock_to_list(struct lock_class
*this,
1236 struct lock_class
*links_to
, struct list_head
*head
,
1237 unsigned long ip
, int distance
,
1238 struct lock_trace
*trace
)
1240 struct lock_list
*entry
;
1242 * Lock not present yet - get a new dependency struct and
1243 * add it to the list:
1245 entry
= alloc_list_entry();
1249 entry
->class = this;
1250 entry
->links_to
= links_to
;
1251 entry
->distance
= distance
;
1252 entry
->trace
= *trace
;
1254 * Both allocation and removal are done under the graph lock; but
1255 * iteration is under RCU-sched; see look_up_lock_class() and
1256 * lockdep_free_key_range().
1258 list_add_tail_rcu(&entry
->entry
, head
);
1264 * For good efficiency of modular, we use power of 2
1266 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL
1267 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1270 * The circular_queue and helpers are used to implement graph
1271 * breadth-first search (BFS) algorithm, by which we can determine
1272 * whether there is a path from a lock to another. In deadlock checks,
1273 * a path from the next lock to be acquired to a previous held lock
1274 * indicates that adding the <prev> -> <next> lock dependency will
1275 * produce a circle in the graph. Breadth-first search instead of
1276 * depth-first search is used in order to find the shortest (circular)
1279 struct circular_queue
{
1280 struct lock_list
*element
[MAX_CIRCULAR_QUEUE_SIZE
];
1281 unsigned int front
, rear
;
1284 static struct circular_queue lock_cq
;
1286 unsigned int max_bfs_queue_depth
;
1288 static unsigned int lockdep_dependency_gen_id
;
1290 static inline void __cq_init(struct circular_queue
*cq
)
1292 cq
->front
= cq
->rear
= 0;
1293 lockdep_dependency_gen_id
++;
1296 static inline int __cq_empty(struct circular_queue
*cq
)
1298 return (cq
->front
== cq
->rear
);
1301 static inline int __cq_full(struct circular_queue
*cq
)
1303 return ((cq
->rear
+ 1) & CQ_MASK
) == cq
->front
;
1306 static inline int __cq_enqueue(struct circular_queue
*cq
, struct lock_list
*elem
)
1311 cq
->element
[cq
->rear
] = elem
;
1312 cq
->rear
= (cq
->rear
+ 1) & CQ_MASK
;
1317 * Dequeue an element from the circular_queue, return a lock_list if
1318 * the queue is not empty, or NULL if otherwise.
1320 static inline struct lock_list
* __cq_dequeue(struct circular_queue
*cq
)
1322 struct lock_list
* lock
;
1327 lock
= cq
->element
[cq
->front
];
1328 cq
->front
= (cq
->front
+ 1) & CQ_MASK
;
1333 static inline unsigned int __cq_get_elem_count(struct circular_queue
*cq
)
1335 return (cq
->rear
- cq
->front
) & CQ_MASK
;
1338 static inline void mark_lock_accessed(struct lock_list
*lock
,
1339 struct lock_list
*parent
)
1343 nr
= lock
- list_entries
;
1344 WARN_ON(nr
>= ARRAY_SIZE(list_entries
)); /* Out-of-bounds, input fail */
1345 lock
->parent
= parent
;
1346 lock
->class->dep_gen_id
= lockdep_dependency_gen_id
;
1349 static inline unsigned long lock_accessed(struct lock_list
*lock
)
1353 nr
= lock
- list_entries
;
1354 WARN_ON(nr
>= ARRAY_SIZE(list_entries
)); /* Out-of-bounds, input fail */
1355 return lock
->class->dep_gen_id
== lockdep_dependency_gen_id
;
1358 static inline struct lock_list
*get_lock_parent(struct lock_list
*child
)
1360 return child
->parent
;
1363 static inline int get_lock_depth(struct lock_list
*child
)
1366 struct lock_list
*parent
;
1368 while ((parent
= get_lock_parent(child
))) {
1376 * Return the forward or backward dependency list.
1378 * @lock: the lock_list to get its class's dependency list
1379 * @offset: the offset to struct lock_class to determine whether it is
1380 * locks_after or locks_before
1382 static inline struct list_head
*get_dep_list(struct lock_list
*lock
, int offset
)
1384 void *lock_class
= lock
->class;
1386 return lock_class
+ offset
;
1390 * Forward- or backward-dependency search, used for both circular dependency
1391 * checking and hardirq-unsafe/softirq-unsafe checking.
1393 static int __bfs(struct lock_list
*source_entry
,
1395 int (*match
)(struct lock_list
*entry
, void *data
),
1396 struct lock_list
**target_entry
,
1399 struct lock_list
*entry
;
1400 struct lock_list
*lock
;
1401 struct list_head
*head
;
1402 struct circular_queue
*cq
= &lock_cq
;
1405 if (match(source_entry
, data
)) {
1406 *target_entry
= source_entry
;
1411 head
= get_dep_list(source_entry
, offset
);
1412 if (list_empty(head
))
1416 __cq_enqueue(cq
, source_entry
);
1418 while ((lock
= __cq_dequeue(cq
))) {
1425 head
= get_dep_list(lock
, offset
);
1427 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1429 list_for_each_entry_rcu(entry
, head
, entry
) {
1430 if (!lock_accessed(entry
)) {
1431 unsigned int cq_depth
;
1432 mark_lock_accessed(entry
, lock
);
1433 if (match(entry
, data
)) {
1434 *target_entry
= entry
;
1439 if (__cq_enqueue(cq
, entry
)) {
1443 cq_depth
= __cq_get_elem_count(cq
);
1444 if (max_bfs_queue_depth
< cq_depth
)
1445 max_bfs_queue_depth
= cq_depth
;
1453 static inline int __bfs_forwards(struct lock_list
*src_entry
,
1455 int (*match
)(struct lock_list
*entry
, void *data
),
1456 struct lock_list
**target_entry
)
1458 return __bfs(src_entry
, data
, match
, target_entry
,
1459 offsetof(struct lock_class
, locks_after
));
1463 static inline int __bfs_backwards(struct lock_list
*src_entry
,
1465 int (*match
)(struct lock_list
*entry
, void *data
),
1466 struct lock_list
**target_entry
)
1468 return __bfs(src_entry
, data
, match
, target_entry
,
1469 offsetof(struct lock_class
, locks_before
));
1473 static void print_lock_trace(struct lock_trace
*trace
, unsigned int spaces
)
1475 unsigned long *entries
= stack_trace
+ trace
->offset
;
1477 stack_trace_print(entries
, trace
->nr_entries
, spaces
);
1481 * Print a dependency chain entry (this is only done when a deadlock
1482 * has been detected):
1484 static noinline
void
1485 print_circular_bug_entry(struct lock_list
*target
, int depth
)
1487 if (debug_locks_silent
)
1489 printk("\n-> #%u", depth
);
1490 print_lock_name(target
->class);
1491 printk(KERN_CONT
":\n");
1492 print_lock_trace(&target
->trace
, 6);
1496 print_circular_lock_scenario(struct held_lock
*src
,
1497 struct held_lock
*tgt
,
1498 struct lock_list
*prt
)
1500 struct lock_class
*source
= hlock_class(src
);
1501 struct lock_class
*target
= hlock_class(tgt
);
1502 struct lock_class
*parent
= prt
->class;
1505 * A direct locking problem where unsafe_class lock is taken
1506 * directly by safe_class lock, then all we need to show
1507 * is the deadlock scenario, as it is obvious that the
1508 * unsafe lock is taken under the safe lock.
1510 * But if there is a chain instead, where the safe lock takes
1511 * an intermediate lock (middle_class) where this lock is
1512 * not the same as the safe lock, then the lock chain is
1513 * used to describe the problem. Otherwise we would need
1514 * to show a different CPU case for each link in the chain
1515 * from the safe_class lock to the unsafe_class lock.
1517 if (parent
!= source
) {
1518 printk("Chain exists of:\n ");
1519 __print_lock_name(source
);
1520 printk(KERN_CONT
" --> ");
1521 __print_lock_name(parent
);
1522 printk(KERN_CONT
" --> ");
1523 __print_lock_name(target
);
1524 printk(KERN_CONT
"\n\n");
1527 printk(" Possible unsafe locking scenario:\n\n");
1528 printk(" CPU0 CPU1\n");
1529 printk(" ---- ----\n");
1531 __print_lock_name(target
);
1532 printk(KERN_CONT
");\n");
1534 __print_lock_name(parent
);
1535 printk(KERN_CONT
");\n");
1537 __print_lock_name(target
);
1538 printk(KERN_CONT
");\n");
1540 __print_lock_name(source
);
1541 printk(KERN_CONT
");\n");
1542 printk("\n *** DEADLOCK ***\n\n");
1546 * When a circular dependency is detected, print the
1549 static noinline
void
1550 print_circular_bug_header(struct lock_list
*entry
, unsigned int depth
,
1551 struct held_lock
*check_src
,
1552 struct held_lock
*check_tgt
)
1554 struct task_struct
*curr
= current
;
1556 if (debug_locks_silent
)
1560 pr_warn("======================================================\n");
1561 pr_warn("WARNING: possible circular locking dependency detected\n");
1562 print_kernel_ident();
1563 pr_warn("------------------------------------------------------\n");
1564 pr_warn("%s/%d is trying to acquire lock:\n",
1565 curr
->comm
, task_pid_nr(curr
));
1566 print_lock(check_src
);
1568 pr_warn("\nbut task is already holding lock:\n");
1570 print_lock(check_tgt
);
1571 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1572 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1574 print_circular_bug_entry(entry
, depth
);
1577 static inline int class_equal(struct lock_list
*entry
, void *data
)
1579 return entry
->class == data
;
1582 static noinline
void print_circular_bug(struct lock_list
*this,
1583 struct lock_list
*target
,
1584 struct held_lock
*check_src
,
1585 struct held_lock
*check_tgt
)
1587 struct task_struct
*curr
= current
;
1588 struct lock_list
*parent
;
1589 struct lock_list
*first_parent
;
1592 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
1595 if (!save_trace(&this->trace
))
1598 depth
= get_lock_depth(target
);
1600 print_circular_bug_header(target
, depth
, check_src
, check_tgt
);
1602 parent
= get_lock_parent(target
);
1603 first_parent
= parent
;
1606 print_circular_bug_entry(parent
, --depth
);
1607 parent
= get_lock_parent(parent
);
1610 printk("\nother info that might help us debug this:\n\n");
1611 print_circular_lock_scenario(check_src
, check_tgt
,
1614 lockdep_print_held_locks(curr
);
1616 printk("\nstack backtrace:\n");
1620 static noinline
void print_bfs_bug(int ret
)
1622 if (!debug_locks_off_graph_unlock())
1626 * Breadth-first-search failed, graph got corrupted?
1628 WARN(1, "lockdep bfs error:%d\n", ret
);
1631 static int noop_count(struct lock_list
*entry
, void *data
)
1633 (*(unsigned long *)data
)++;
1637 static unsigned long __lockdep_count_forward_deps(struct lock_list
*this)
1639 unsigned long count
= 0;
1640 struct lock_list
*uninitialized_var(target_entry
);
1642 __bfs_forwards(this, (void *)&count
, noop_count
, &target_entry
);
1646 unsigned long lockdep_count_forward_deps(struct lock_class
*class)
1648 unsigned long ret
, flags
;
1649 struct lock_list
this;
1654 raw_local_irq_save(flags
);
1655 arch_spin_lock(&lockdep_lock
);
1656 ret
= __lockdep_count_forward_deps(&this);
1657 arch_spin_unlock(&lockdep_lock
);
1658 raw_local_irq_restore(flags
);
1663 static unsigned long __lockdep_count_backward_deps(struct lock_list
*this)
1665 unsigned long count
= 0;
1666 struct lock_list
*uninitialized_var(target_entry
);
1668 __bfs_backwards(this, (void *)&count
, noop_count
, &target_entry
);
1673 unsigned long lockdep_count_backward_deps(struct lock_class
*class)
1675 unsigned long ret
, flags
;
1676 struct lock_list
this;
1681 raw_local_irq_save(flags
);
1682 arch_spin_lock(&lockdep_lock
);
1683 ret
= __lockdep_count_backward_deps(&this);
1684 arch_spin_unlock(&lockdep_lock
);
1685 raw_local_irq_restore(flags
);
1691 * Check that the dependency graph starting at <src> can lead to
1692 * <target> or not. Print an error and return 0 if it does.
1695 check_path(struct lock_class
*target
, struct lock_list
*src_entry
,
1696 struct lock_list
**target_entry
)
1700 ret
= __bfs_forwards(src_entry
, (void *)target
, class_equal
,
1703 if (unlikely(ret
< 0))
1710 * Prove that the dependency graph starting at <src> can not
1711 * lead to <target>. If it can, there is a circle when adding
1712 * <target> -> <src> dependency.
1714 * Print an error and return 0 if it does.
1717 check_noncircular(struct held_lock
*src
, struct held_lock
*target
,
1718 struct lock_trace
*trace
)
1721 struct lock_list
*uninitialized_var(target_entry
);
1722 struct lock_list src_entry
= {
1723 .class = hlock_class(src
),
1727 debug_atomic_inc(nr_cyclic_checks
);
1729 ret
= check_path(hlock_class(target
), &src_entry
, &target_entry
);
1731 if (unlikely(!ret
)) {
1732 if (!trace
->nr_entries
) {
1734 * If save_trace fails here, the printing might
1735 * trigger a WARN but because of the !nr_entries it
1736 * should not do bad things.
1741 print_circular_bug(&src_entry
, target_entry
, src
, target
);
1747 #ifdef CONFIG_LOCKDEP_SMALL
1749 * Check that the dependency graph starting at <src> can lead to
1750 * <target> or not. If it can, <src> -> <target> dependency is already
1753 * Print an error and return 2 if it does or 1 if it does not.
1756 check_redundant(struct held_lock
*src
, struct held_lock
*target
)
1759 struct lock_list
*uninitialized_var(target_entry
);
1760 struct lock_list src_entry
= {
1761 .class = hlock_class(src
),
1765 debug_atomic_inc(nr_redundant_checks
);
1767 ret
= check_path(hlock_class(target
), &src_entry
, &target_entry
);
1770 debug_atomic_inc(nr_redundant
);
1779 #ifdef CONFIG_TRACE_IRQFLAGS
1781 static inline int usage_accumulate(struct lock_list
*entry
, void *mask
)
1783 *(unsigned long *)mask
|= entry
->class->usage_mask
;
1789 * Forwards and backwards subgraph searching, for the purposes of
1790 * proving that two subgraphs can be connected by a new dependency
1791 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1794 static inline int usage_match(struct lock_list
*entry
, void *mask
)
1796 return entry
->class->usage_mask
& *(unsigned long *)mask
;
1800 * Find a node in the forwards-direction dependency sub-graph starting
1801 * at @root->class that matches @bit.
1803 * Return 0 if such a node exists in the subgraph, and put that node
1804 * into *@target_entry.
1806 * Return 1 otherwise and keep *@target_entry unchanged.
1807 * Return <0 on error.
1810 find_usage_forwards(struct lock_list
*root
, unsigned long usage_mask
,
1811 struct lock_list
**target_entry
)
1815 debug_atomic_inc(nr_find_usage_forwards_checks
);
1817 result
= __bfs_forwards(root
, &usage_mask
, usage_match
, target_entry
);
1823 * Find a node in the backwards-direction dependency sub-graph starting
1824 * at @root->class that matches @bit.
1826 * Return 0 if such a node exists in the subgraph, and put that node
1827 * into *@target_entry.
1829 * Return 1 otherwise and keep *@target_entry unchanged.
1830 * Return <0 on error.
1833 find_usage_backwards(struct lock_list
*root
, unsigned long usage_mask
,
1834 struct lock_list
**target_entry
)
1838 debug_atomic_inc(nr_find_usage_backwards_checks
);
1840 result
= __bfs_backwards(root
, &usage_mask
, usage_match
, target_entry
);
1845 static void print_lock_class_header(struct lock_class
*class, int depth
)
1849 printk("%*s->", depth
, "");
1850 print_lock_name(class);
1851 #ifdef CONFIG_DEBUG_LOCKDEP
1852 printk(KERN_CONT
" ops: %lu", debug_class_ops_read(class));
1854 printk(KERN_CONT
" {\n");
1856 for (bit
= 0; bit
< LOCK_USAGE_STATES
; bit
++) {
1857 if (class->usage_mask
& (1 << bit
)) {
1860 len
+= printk("%*s %s", depth
, "", usage_str
[bit
]);
1861 len
+= printk(KERN_CONT
" at:\n");
1862 print_lock_trace(class->usage_traces
+ bit
, len
);
1865 printk("%*s }\n", depth
, "");
1867 printk("%*s ... key at: [<%px>] %pS\n",
1868 depth
, "", class->key
, class->key
);
1872 * printk the shortest lock dependencies from @start to @end in reverse order:
1875 print_shortest_lock_dependencies(struct lock_list
*leaf
,
1876 struct lock_list
*root
)
1878 struct lock_list
*entry
= leaf
;
1881 /*compute depth from generated tree by BFS*/
1882 depth
= get_lock_depth(leaf
);
1885 print_lock_class_header(entry
->class, depth
);
1886 printk("%*s ... acquired at:\n", depth
, "");
1887 print_lock_trace(&entry
->trace
, 2);
1890 if (depth
== 0 && (entry
!= root
)) {
1891 printk("lockdep:%s bad path found in chain graph\n", __func__
);
1895 entry
= get_lock_parent(entry
);
1897 } while (entry
&& (depth
>= 0));
1901 print_irq_lock_scenario(struct lock_list
*safe_entry
,
1902 struct lock_list
*unsafe_entry
,
1903 struct lock_class
*prev_class
,
1904 struct lock_class
*next_class
)
1906 struct lock_class
*safe_class
= safe_entry
->class;
1907 struct lock_class
*unsafe_class
= unsafe_entry
->class;
1908 struct lock_class
*middle_class
= prev_class
;
1910 if (middle_class
== safe_class
)
1911 middle_class
= next_class
;
1914 * A direct locking problem where unsafe_class lock is taken
1915 * directly by safe_class lock, then all we need to show
1916 * is the deadlock scenario, as it is obvious that the
1917 * unsafe lock is taken under the safe lock.
1919 * But if there is a chain instead, where the safe lock takes
1920 * an intermediate lock (middle_class) where this lock is
1921 * not the same as the safe lock, then the lock chain is
1922 * used to describe the problem. Otherwise we would need
1923 * to show a different CPU case for each link in the chain
1924 * from the safe_class lock to the unsafe_class lock.
1926 if (middle_class
!= unsafe_class
) {
1927 printk("Chain exists of:\n ");
1928 __print_lock_name(safe_class
);
1929 printk(KERN_CONT
" --> ");
1930 __print_lock_name(middle_class
);
1931 printk(KERN_CONT
" --> ");
1932 __print_lock_name(unsafe_class
);
1933 printk(KERN_CONT
"\n\n");
1936 printk(" Possible interrupt unsafe locking scenario:\n\n");
1937 printk(" CPU0 CPU1\n");
1938 printk(" ---- ----\n");
1940 __print_lock_name(unsafe_class
);
1941 printk(KERN_CONT
");\n");
1942 printk(" local_irq_disable();\n");
1944 __print_lock_name(safe_class
);
1945 printk(KERN_CONT
");\n");
1947 __print_lock_name(middle_class
);
1948 printk(KERN_CONT
");\n");
1949 printk(" <Interrupt>\n");
1951 __print_lock_name(safe_class
);
1952 printk(KERN_CONT
");\n");
1953 printk("\n *** DEADLOCK ***\n\n");
1957 print_bad_irq_dependency(struct task_struct
*curr
,
1958 struct lock_list
*prev_root
,
1959 struct lock_list
*next_root
,
1960 struct lock_list
*backwards_entry
,
1961 struct lock_list
*forwards_entry
,
1962 struct held_lock
*prev
,
1963 struct held_lock
*next
,
1964 enum lock_usage_bit bit1
,
1965 enum lock_usage_bit bit2
,
1966 const char *irqclass
)
1968 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
1972 pr_warn("=====================================================\n");
1973 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1974 irqclass
, irqclass
);
1975 print_kernel_ident();
1976 pr_warn("-----------------------------------------------------\n");
1977 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1978 curr
->comm
, task_pid_nr(curr
),
1979 curr
->hardirq_context
, hardirq_count() >> HARDIRQ_SHIFT
,
1980 curr
->softirq_context
, softirq_count() >> SOFTIRQ_SHIFT
,
1981 curr
->hardirqs_enabled
,
1982 curr
->softirqs_enabled
);
1985 pr_warn("\nand this task is already holding:\n");
1987 pr_warn("which would create a new lock dependency:\n");
1988 print_lock_name(hlock_class(prev
));
1990 print_lock_name(hlock_class(next
));
1993 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1995 print_lock_name(backwards_entry
->class);
1996 pr_warn("\n... which became %s-irq-safe at:\n", irqclass
);
1998 print_lock_trace(backwards_entry
->class->usage_traces
+ bit1
, 1);
2000 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass
);
2001 print_lock_name(forwards_entry
->class);
2002 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass
);
2005 print_lock_trace(forwards_entry
->class->usage_traces
+ bit2
, 1);
2007 pr_warn("\nother info that might help us debug this:\n\n");
2008 print_irq_lock_scenario(backwards_entry
, forwards_entry
,
2009 hlock_class(prev
), hlock_class(next
));
2011 lockdep_print_held_locks(curr
);
2013 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass
);
2014 if (!save_trace(&prev_root
->trace
))
2016 print_shortest_lock_dependencies(backwards_entry
, prev_root
);
2018 pr_warn("\nthe dependencies between the lock to be acquired");
2019 pr_warn(" and %s-irq-unsafe lock:\n", irqclass
);
2020 if (!save_trace(&next_root
->trace
))
2022 print_shortest_lock_dependencies(forwards_entry
, next_root
);
2024 pr_warn("\nstack backtrace:\n");
2028 static const char *state_names
[] = {
2029 #define LOCKDEP_STATE(__STATE) \
2030 __stringify(__STATE),
2031 #include "lockdep_states.h"
2032 #undef LOCKDEP_STATE
2035 static const char *state_rnames
[] = {
2036 #define LOCKDEP_STATE(__STATE) \
2037 __stringify(__STATE)"-READ",
2038 #include "lockdep_states.h"
2039 #undef LOCKDEP_STATE
2042 static inline const char *state_name(enum lock_usage_bit bit
)
2044 if (bit
& LOCK_USAGE_READ_MASK
)
2045 return state_rnames
[bit
>> LOCK_USAGE_DIR_MASK
];
2047 return state_names
[bit
>> LOCK_USAGE_DIR_MASK
];
2051 * The bit number is encoded like:
2053 * bit0: 0 exclusive, 1 read lock
2054 * bit1: 0 used in irq, 1 irq enabled
2057 static int exclusive_bit(int new_bit
)
2059 int state
= new_bit
& LOCK_USAGE_STATE_MASK
;
2060 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
2063 * keep state, bit flip the direction and strip read.
2065 return state
| (dir
^ LOCK_USAGE_DIR_MASK
);
2069 * Observe that when given a bitmask where each bitnr is encoded as above, a
2070 * right shift of the mask transforms the individual bitnrs as -1 and
2071 * conversely, a left shift transforms into +1 for the individual bitnrs.
2073 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2074 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2075 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2077 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2079 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2080 * all bits set) and recompose with bitnr1 flipped.
2082 static unsigned long invert_dir_mask(unsigned long mask
)
2084 unsigned long excl
= 0;
2087 excl
|= (mask
& LOCKF_ENABLED_IRQ_ALL
) >> LOCK_USAGE_DIR_MASK
;
2088 excl
|= (mask
& LOCKF_USED_IN_IRQ_ALL
) << LOCK_USAGE_DIR_MASK
;
2094 * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
2095 * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
2096 * And then mask out all bitnr0.
2098 static unsigned long exclusive_mask(unsigned long mask
)
2100 unsigned long excl
= invert_dir_mask(mask
);
2103 excl
|= (excl
& LOCKF_IRQ_READ
) >> LOCK_USAGE_READ_MASK
;
2104 excl
&= ~LOCKF_IRQ_READ
;
2110 * Retrieve the _possible_ original mask to which @mask is
2111 * exclusive. Ie: this is the opposite of exclusive_mask().
2112 * Note that 2 possible original bits can match an exclusive
2113 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2114 * cleared. So both are returned for each exclusive bit.
2116 static unsigned long original_mask(unsigned long mask
)
2118 unsigned long excl
= invert_dir_mask(mask
);
2120 /* Include read in existing usages */
2121 excl
|= (excl
& LOCKF_IRQ
) << LOCK_USAGE_READ_MASK
;
2127 * Find the first pair of bit match between an original
2128 * usage mask and an exclusive usage mask.
2130 static int find_exclusive_match(unsigned long mask
,
2131 unsigned long excl_mask
,
2132 enum lock_usage_bit
*bitp
,
2133 enum lock_usage_bit
*excl_bitp
)
2137 for_each_set_bit(bit
, &mask
, LOCK_USED
) {
2138 excl
= exclusive_bit(bit
);
2139 if (excl_mask
& lock_flag(excl
)) {
2149 * Prove that the new dependency does not connect a hardirq-safe(-read)
2150 * lock with a hardirq-unsafe lock - to achieve this we search
2151 * the backwards-subgraph starting at <prev>, and the
2152 * forwards-subgraph starting at <next>:
2154 static int check_irq_usage(struct task_struct
*curr
, struct held_lock
*prev
,
2155 struct held_lock
*next
)
2157 unsigned long usage_mask
= 0, forward_mask
, backward_mask
;
2158 enum lock_usage_bit forward_bit
= 0, backward_bit
= 0;
2159 struct lock_list
*uninitialized_var(target_entry1
);
2160 struct lock_list
*uninitialized_var(target_entry
);
2161 struct lock_list
this, that
;
2165 * Step 1: gather all hard/soft IRQs usages backward in an
2166 * accumulated usage mask.
2169 this.class = hlock_class(prev
);
2171 ret
= __bfs_backwards(&this, &usage_mask
, usage_accumulate
, NULL
);
2177 usage_mask
&= LOCKF_USED_IN_IRQ_ALL
;
2182 * Step 2: find exclusive uses forward that match the previous
2183 * backward accumulated mask.
2185 forward_mask
= exclusive_mask(usage_mask
);
2188 that
.class = hlock_class(next
);
2190 ret
= find_usage_forwards(&that
, forward_mask
, &target_entry1
);
2199 * Step 3: we found a bad match! Now retrieve a lock from the backward
2200 * list whose usage mask matches the exclusive usage mask from the
2201 * lock found on the forward list.
2203 backward_mask
= original_mask(target_entry1
->class->usage_mask
);
2205 ret
= find_usage_backwards(&this, backward_mask
, &target_entry
);
2210 if (DEBUG_LOCKS_WARN_ON(ret
== 1))
2214 * Step 4: narrow down to a pair of incompatible usage bits
2217 ret
= find_exclusive_match(target_entry
->class->usage_mask
,
2218 target_entry1
->class->usage_mask
,
2219 &backward_bit
, &forward_bit
);
2220 if (DEBUG_LOCKS_WARN_ON(ret
== -1))
2223 print_bad_irq_dependency(curr
, &this, &that
,
2224 target_entry
, target_entry1
,
2226 backward_bit
, forward_bit
,
2227 state_name(backward_bit
));
2232 static void inc_chains(void)
2234 if (current
->hardirq_context
)
2235 nr_hardirq_chains
++;
2237 if (current
->softirq_context
)
2238 nr_softirq_chains
++;
2240 nr_process_chains
++;
2246 static inline int check_irq_usage(struct task_struct
*curr
,
2247 struct held_lock
*prev
, struct held_lock
*next
)
2252 static inline void inc_chains(void)
2254 nr_process_chains
++;
2257 #endif /* CONFIG_TRACE_IRQFLAGS */
2260 print_deadlock_scenario(struct held_lock
*nxt
, struct held_lock
*prv
)
2262 struct lock_class
*next
= hlock_class(nxt
);
2263 struct lock_class
*prev
= hlock_class(prv
);
2265 printk(" Possible unsafe locking scenario:\n\n");
2269 __print_lock_name(prev
);
2270 printk(KERN_CONT
");\n");
2272 __print_lock_name(next
);
2273 printk(KERN_CONT
");\n");
2274 printk("\n *** DEADLOCK ***\n\n");
2275 printk(" May be due to missing lock nesting notation\n\n");
2279 print_deadlock_bug(struct task_struct
*curr
, struct held_lock
*prev
,
2280 struct held_lock
*next
)
2282 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2286 pr_warn("============================================\n");
2287 pr_warn("WARNING: possible recursive locking detected\n");
2288 print_kernel_ident();
2289 pr_warn("--------------------------------------------\n");
2290 pr_warn("%s/%d is trying to acquire lock:\n",
2291 curr
->comm
, task_pid_nr(curr
));
2293 pr_warn("\nbut task is already holding lock:\n");
2296 pr_warn("\nother info that might help us debug this:\n");
2297 print_deadlock_scenario(next
, prev
);
2298 lockdep_print_held_locks(curr
);
2300 pr_warn("\nstack backtrace:\n");
2305 * Check whether we are holding such a class already.
2307 * (Note that this has to be done separately, because the graph cannot
2308 * detect such classes of deadlocks.)
2310 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2313 check_deadlock(struct task_struct
*curr
, struct held_lock
*next
)
2315 struct held_lock
*prev
;
2316 struct held_lock
*nest
= NULL
;
2319 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
2320 prev
= curr
->held_locks
+ i
;
2322 if (prev
->instance
== next
->nest_lock
)
2325 if (hlock_class(prev
) != hlock_class(next
))
2329 * Allow read-after-read recursion of the same
2330 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2332 if ((next
->read
== 2) && prev
->read
)
2336 * We're holding the nest_lock, which serializes this lock's
2337 * nesting behaviour.
2342 print_deadlock_bug(curr
, prev
, next
);
2349 * There was a chain-cache miss, and we are about to add a new dependency
2350 * to a previous lock. We validate the following rules:
2352 * - would the adding of the <prev> -> <next> dependency create a
2353 * circular dependency in the graph? [== circular deadlock]
2355 * - does the new prev->next dependency connect any hardirq-safe lock
2356 * (in the full backwards-subgraph starting at <prev>) with any
2357 * hardirq-unsafe lock (in the full forwards-subgraph starting at
2358 * <next>)? [== illegal lock inversion with hardirq contexts]
2360 * - does the new prev->next dependency connect any softirq-safe lock
2361 * (in the full backwards-subgraph starting at <prev>) with any
2362 * softirq-unsafe lock (in the full forwards-subgraph starting at
2363 * <next>)? [== illegal lock inversion with softirq contexts]
2365 * any of these scenarios could lead to a deadlock.
2367 * Then if all the validations pass, we add the forwards and backwards
2371 check_prev_add(struct task_struct
*curr
, struct held_lock
*prev
,
2372 struct held_lock
*next
, int distance
, struct lock_trace
*trace
)
2374 struct lock_list
*entry
;
2377 if (!hlock_class(prev
)->key
|| !hlock_class(next
)->key
) {
2379 * The warning statements below may trigger a use-after-free
2380 * of the class name. It is better to trigger a use-after free
2381 * and to have the class name most of the time instead of not
2382 * having the class name available.
2384 WARN_ONCE(!debug_locks_silent
&& !hlock_class(prev
)->key
,
2385 "Detected use-after-free of lock class %px/%s\n",
2387 hlock_class(prev
)->name
);
2388 WARN_ONCE(!debug_locks_silent
&& !hlock_class(next
)->key
,
2389 "Detected use-after-free of lock class %px/%s\n",
2391 hlock_class(next
)->name
);
2396 * Prove that the new <prev> -> <next> dependency would not
2397 * create a circular dependency in the graph. (We do this by
2398 * a breadth-first search into the graph starting at <next>,
2399 * and check whether we can reach <prev>.)
2401 * The search is limited by the size of the circular queue (i.e.,
2402 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
2403 * in the graph whose neighbours are to be checked.
2405 ret
= check_noncircular(next
, prev
, trace
);
2406 if (unlikely(ret
<= 0))
2409 if (!check_irq_usage(curr
, prev
, next
))
2413 * For recursive read-locks we do all the dependency checks,
2414 * but we dont store read-triggered dependencies (only
2415 * write-triggered dependencies). This ensures that only the
2416 * write-side dependencies matter, and that if for example a
2417 * write-lock never takes any other locks, then the reads are
2418 * equivalent to a NOP.
2420 if (next
->read
== 2 || prev
->read
== 2)
2423 * Is the <prev> -> <next> dependency already present?
2425 * (this may occur even though this is a new chain: consider
2426 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2427 * chains - the second one will be new, but L1 already has
2428 * L2 added to its dependency list, due to the first chain.)
2430 list_for_each_entry(entry
, &hlock_class(prev
)->locks_after
, entry
) {
2431 if (entry
->class == hlock_class(next
)) {
2433 entry
->distance
= 1;
2438 #ifdef CONFIG_LOCKDEP_SMALL
2440 * Is the <prev> -> <next> link redundant?
2442 ret
= check_redundant(prev
, next
);
2447 if (!trace
->nr_entries
&& !save_trace(trace
))
2451 * Ok, all validations passed, add the new lock
2452 * to the previous lock's dependency list:
2454 ret
= add_lock_to_list(hlock_class(next
), hlock_class(prev
),
2455 &hlock_class(prev
)->locks_after
,
2456 next
->acquire_ip
, distance
, trace
);
2461 ret
= add_lock_to_list(hlock_class(prev
), hlock_class(next
),
2462 &hlock_class(next
)->locks_before
,
2463 next
->acquire_ip
, distance
, trace
);
2471 * Add the dependency to all directly-previous locks that are 'relevant'.
2472 * The ones that are relevant are (in increasing distance from curr):
2473 * all consecutive trylock entries and the final non-trylock entry - or
2474 * the end of this context's lock-chain - whichever comes first.
2477 check_prevs_add(struct task_struct
*curr
, struct held_lock
*next
)
2479 struct lock_trace trace
= { .nr_entries
= 0 };
2480 int depth
= curr
->lockdep_depth
;
2481 struct held_lock
*hlock
;
2486 * Depth must not be zero for a non-head lock:
2491 * At least two relevant locks must exist for this
2494 if (curr
->held_locks
[depth
].irq_context
!=
2495 curr
->held_locks
[depth
-1].irq_context
)
2499 int distance
= curr
->lockdep_depth
- depth
+ 1;
2500 hlock
= curr
->held_locks
+ depth
- 1;
2503 * Only non-recursive-read entries get new dependencies
2506 if (hlock
->read
!= 2 && hlock
->check
) {
2507 int ret
= check_prev_add(curr
, hlock
, next
, distance
,
2513 * Stop after the first non-trylock entry,
2514 * as non-trylock entries have added their
2515 * own direct dependencies already, so this
2516 * lock is connected to them indirectly:
2518 if (!hlock
->trylock
)
2524 * End of lock-stack?
2529 * Stop the search if we cross into another context:
2531 if (curr
->held_locks
[depth
].irq_context
!=
2532 curr
->held_locks
[depth
-1].irq_context
)
2537 if (!debug_locks_off_graph_unlock())
2541 * Clearly we all shouldn't be here, but since we made it we
2542 * can reliable say we messed up our state. See the above two
2543 * gotos for reasons why we could possibly end up here.
2550 struct lock_chain lock_chains
[MAX_LOCKDEP_CHAINS
];
2551 static DECLARE_BITMAP(lock_chains_in_use
, MAX_LOCKDEP_CHAINS
);
2552 int nr_chain_hlocks
;
2553 static u16 chain_hlocks
[MAX_LOCKDEP_CHAIN_HLOCKS
];
2555 struct lock_class
*lock_chain_get_class(struct lock_chain
*chain
, int i
)
2557 return lock_classes
+ chain_hlocks
[chain
->base
+ i
];
2561 * Returns the index of the first held_lock of the current chain
2563 static inline int get_first_held_lock(struct task_struct
*curr
,
2564 struct held_lock
*hlock
)
2567 struct held_lock
*hlock_curr
;
2569 for (i
= curr
->lockdep_depth
- 1; i
>= 0; i
--) {
2570 hlock_curr
= curr
->held_locks
+ i
;
2571 if (hlock_curr
->irq_context
!= hlock
->irq_context
)
2579 #ifdef CONFIG_DEBUG_LOCKDEP
2581 * Returns the next chain_key iteration
2583 static u64
print_chain_key_iteration(int class_idx
, u64 chain_key
)
2585 u64 new_chain_key
= iterate_chain_key(chain_key
, class_idx
);
2587 printk(" class_idx:%d -> chain_key:%016Lx",
2589 (unsigned long long)new_chain_key
);
2590 return new_chain_key
;
2594 print_chain_keys_held_locks(struct task_struct
*curr
, struct held_lock
*hlock_next
)
2596 struct held_lock
*hlock
;
2597 u64 chain_key
= INITIAL_CHAIN_KEY
;
2598 int depth
= curr
->lockdep_depth
;
2599 int i
= get_first_held_lock(curr
, hlock_next
);
2601 printk("depth: %u (irq_context %u)\n", depth
- i
+ 1,
2602 hlock_next
->irq_context
);
2603 for (; i
< depth
; i
++) {
2604 hlock
= curr
->held_locks
+ i
;
2605 chain_key
= print_chain_key_iteration(hlock
->class_idx
, chain_key
);
2610 print_chain_key_iteration(hlock_next
->class_idx
, chain_key
);
2611 print_lock(hlock_next
);
2614 static void print_chain_keys_chain(struct lock_chain
*chain
)
2617 u64 chain_key
= INITIAL_CHAIN_KEY
;
2620 printk("depth: %u\n", chain
->depth
);
2621 for (i
= 0; i
< chain
->depth
; i
++) {
2622 class_id
= chain_hlocks
[chain
->base
+ i
];
2623 chain_key
= print_chain_key_iteration(class_id
, chain_key
);
2625 print_lock_name(lock_classes
+ class_id
);
2630 static void print_collision(struct task_struct
*curr
,
2631 struct held_lock
*hlock_next
,
2632 struct lock_chain
*chain
)
2635 pr_warn("============================\n");
2636 pr_warn("WARNING: chain_key collision\n");
2637 print_kernel_ident();
2638 pr_warn("----------------------------\n");
2639 pr_warn("%s/%d: ", current
->comm
, task_pid_nr(current
));
2640 pr_warn("Hash chain already cached but the contents don't match!\n");
2642 pr_warn("Held locks:");
2643 print_chain_keys_held_locks(curr
, hlock_next
);
2645 pr_warn("Locks in cached chain:");
2646 print_chain_keys_chain(chain
);
2648 pr_warn("\nstack backtrace:\n");
2654 * Checks whether the chain and the current held locks are consistent
2655 * in depth and also in content. If they are not it most likely means
2656 * that there was a collision during the calculation of the chain_key.
2657 * Returns: 0 not passed, 1 passed
2659 static int check_no_collision(struct task_struct
*curr
,
2660 struct held_lock
*hlock
,
2661 struct lock_chain
*chain
)
2663 #ifdef CONFIG_DEBUG_LOCKDEP
2666 i
= get_first_held_lock(curr
, hlock
);
2668 if (DEBUG_LOCKS_WARN_ON(chain
->depth
!= curr
->lockdep_depth
- (i
- 1))) {
2669 print_collision(curr
, hlock
, chain
);
2673 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
2674 id
= curr
->held_locks
[i
].class_idx
;
2676 if (DEBUG_LOCKS_WARN_ON(chain_hlocks
[chain
->base
+ j
] != id
)) {
2677 print_collision(curr
, hlock
, chain
);
2686 * Given an index that is >= -1, return the index of the next lock chain.
2687 * Return -2 if there is no next lock chain.
2689 long lockdep_next_lockchain(long i
)
2691 i
= find_next_bit(lock_chains_in_use
, ARRAY_SIZE(lock_chains
), i
+ 1);
2692 return i
< ARRAY_SIZE(lock_chains
) ? i
: -2;
2695 unsigned long lock_chain_count(void)
2697 return bitmap_weight(lock_chains_in_use
, ARRAY_SIZE(lock_chains
));
2700 /* Must be called with the graph lock held. */
2701 static struct lock_chain
*alloc_lock_chain(void)
2703 int idx
= find_first_zero_bit(lock_chains_in_use
,
2704 ARRAY_SIZE(lock_chains
));
2706 if (unlikely(idx
>= ARRAY_SIZE(lock_chains
)))
2708 __set_bit(idx
, lock_chains_in_use
);
2709 return lock_chains
+ idx
;
2713 * Adds a dependency chain into chain hashtable. And must be called with
2716 * Return 0 if fail, and graph_lock is released.
2717 * Return 1 if succeed, with graph_lock held.
2719 static inline int add_chain_cache(struct task_struct
*curr
,
2720 struct held_lock
*hlock
,
2723 struct lock_class
*class = hlock_class(hlock
);
2724 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
2725 struct lock_chain
*chain
;
2729 * The caller must hold the graph lock, ensure we've got IRQs
2730 * disabled to make this an IRQ-safe lock.. for recursion reasons
2731 * lockdep won't complain about its own locking errors.
2733 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2736 chain
= alloc_lock_chain();
2738 if (!debug_locks_off_graph_unlock())
2741 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2745 chain
->chain_key
= chain_key
;
2746 chain
->irq_context
= hlock
->irq_context
;
2747 i
= get_first_held_lock(curr
, hlock
);
2748 chain
->depth
= curr
->lockdep_depth
+ 1 - i
;
2750 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks
));
2751 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr
->held_locks
));
2752 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks
[0])) <= ARRAY_SIZE(lock_classes
));
2754 if (likely(nr_chain_hlocks
+ chain
->depth
<= MAX_LOCKDEP_CHAIN_HLOCKS
)) {
2755 chain
->base
= nr_chain_hlocks
;
2756 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
2757 int lock_id
= curr
->held_locks
[i
].class_idx
;
2758 chain_hlocks
[chain
->base
+ j
] = lock_id
;
2760 chain_hlocks
[chain
->base
+ j
] = class - lock_classes
;
2761 nr_chain_hlocks
+= chain
->depth
;
2763 if (!debug_locks_off_graph_unlock())
2766 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2771 hlist_add_head_rcu(&chain
->entry
, hash_head
);
2772 debug_atomic_inc(chain_lookup_misses
);
2779 * Look up a dependency chain. Must be called with either the graph lock or
2780 * the RCU read lock held.
2782 static inline struct lock_chain
*lookup_chain_cache(u64 chain_key
)
2784 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
2785 struct lock_chain
*chain
;
2787 hlist_for_each_entry_rcu(chain
, hash_head
, entry
) {
2788 if (READ_ONCE(chain
->chain_key
) == chain_key
) {
2789 debug_atomic_inc(chain_lookup_hits
);
2797 * If the key is not present yet in dependency chain cache then
2798 * add it and return 1 - in this case the new dependency chain is
2799 * validated. If the key is already hashed, return 0.
2800 * (On return with 1 graph_lock is held.)
2802 static inline int lookup_chain_cache_add(struct task_struct
*curr
,
2803 struct held_lock
*hlock
,
2806 struct lock_class
*class = hlock_class(hlock
);
2807 struct lock_chain
*chain
= lookup_chain_cache(chain_key
);
2811 if (!check_no_collision(curr
, hlock
, chain
))
2814 if (very_verbose(class)) {
2815 printk("\nhash chain already cached, key: "
2816 "%016Lx tail class: [%px] %s\n",
2817 (unsigned long long)chain_key
,
2818 class->key
, class->name
);
2824 if (very_verbose(class)) {
2825 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2826 (unsigned long long)chain_key
, class->key
, class->name
);
2833 * We have to walk the chain again locked - to avoid duplicates:
2835 chain
= lookup_chain_cache(chain_key
);
2841 if (!add_chain_cache(curr
, hlock
, chain_key
))
2847 static int validate_chain(struct task_struct
*curr
,
2848 struct held_lock
*hlock
,
2849 int chain_head
, u64 chain_key
)
2852 * Trylock needs to maintain the stack of held locks, but it
2853 * does not add new dependencies, because trylock can be done
2856 * We look up the chain_key and do the O(N^2) check and update of
2857 * the dependencies only if this is a new dependency chain.
2858 * (If lookup_chain_cache_add() return with 1 it acquires
2859 * graph_lock for us)
2861 if (!hlock
->trylock
&& hlock
->check
&&
2862 lookup_chain_cache_add(curr
, hlock
, chain_key
)) {
2864 * Check whether last held lock:
2866 * - is irq-safe, if this lock is irq-unsafe
2867 * - is softirq-safe, if this lock is hardirq-unsafe
2869 * And check whether the new lock's dependency graph
2870 * could lead back to the previous lock:
2872 * - within the current held-lock stack
2873 * - across our accumulated lock dependency records
2875 * any of these scenarios could lead to a deadlock.
2878 * The simple case: does the current hold the same lock
2881 int ret
= check_deadlock(curr
, hlock
);
2886 * Mark recursive read, as we jump over it when
2887 * building dependencies (just like we jump over
2893 * Add dependency only if this lock is not the head
2894 * of the chain, and if it's not a secondary read-lock:
2896 if (!chain_head
&& ret
!= 2) {
2897 if (!check_prevs_add(curr
, hlock
))
2903 /* after lookup_chain_cache_add(): */
2904 if (unlikely(!debug_locks
))
2911 static inline int validate_chain(struct task_struct
*curr
,
2912 struct held_lock
*hlock
,
2913 int chain_head
, u64 chain_key
)
2917 #endif /* CONFIG_PROVE_LOCKING */
2920 * We are building curr_chain_key incrementally, so double-check
2921 * it from scratch, to make sure that it's done correctly:
2923 static void check_chain_key(struct task_struct
*curr
)
2925 #ifdef CONFIG_DEBUG_LOCKDEP
2926 struct held_lock
*hlock
, *prev_hlock
= NULL
;
2928 u64 chain_key
= INITIAL_CHAIN_KEY
;
2930 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
2931 hlock
= curr
->held_locks
+ i
;
2932 if (chain_key
!= hlock
->prev_chain_key
) {
2935 * We got mighty confused, our chain keys don't match
2936 * with what we expect, someone trample on our task state?
2938 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2939 curr
->lockdep_depth
, i
,
2940 (unsigned long long)chain_key
,
2941 (unsigned long long)hlock
->prev_chain_key
);
2946 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
2947 * it registered lock class index?
2949 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock
->class_idx
, lock_classes_in_use
)))
2952 if (prev_hlock
&& (prev_hlock
->irq_context
!=
2953 hlock
->irq_context
))
2954 chain_key
= INITIAL_CHAIN_KEY
;
2955 chain_key
= iterate_chain_key(chain_key
, hlock
->class_idx
);
2958 if (chain_key
!= curr
->curr_chain_key
) {
2961 * More smoking hash instead of calculating it, damn see these
2962 * numbers float.. I bet that a pink elephant stepped on my memory.
2964 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2965 curr
->lockdep_depth
, i
,
2966 (unsigned long long)chain_key
,
2967 (unsigned long long)curr
->curr_chain_key
);
2972 #ifdef CONFIG_PROVE_LOCKING
2973 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
2974 enum lock_usage_bit new_bit
);
2976 static void print_usage_bug_scenario(struct held_lock
*lock
)
2978 struct lock_class
*class = hlock_class(lock
);
2980 printk(" Possible unsafe locking scenario:\n\n");
2984 __print_lock_name(class);
2985 printk(KERN_CONT
");\n");
2986 printk(" <Interrupt>\n");
2988 __print_lock_name(class);
2989 printk(KERN_CONT
");\n");
2990 printk("\n *** DEADLOCK ***\n\n");
2994 print_usage_bug(struct task_struct
*curr
, struct held_lock
*this,
2995 enum lock_usage_bit prev_bit
, enum lock_usage_bit new_bit
)
2997 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
3001 pr_warn("================================\n");
3002 pr_warn("WARNING: inconsistent lock state\n");
3003 print_kernel_ident();
3004 pr_warn("--------------------------------\n");
3006 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3007 usage_str
[prev_bit
], usage_str
[new_bit
]);
3009 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3010 curr
->comm
, task_pid_nr(curr
),
3011 trace_hardirq_context(curr
), hardirq_count() >> HARDIRQ_SHIFT
,
3012 trace_softirq_context(curr
), softirq_count() >> SOFTIRQ_SHIFT
,
3013 trace_hardirqs_enabled(curr
),
3014 trace_softirqs_enabled(curr
));
3017 pr_warn("{%s} state was registered at:\n", usage_str
[prev_bit
]);
3018 print_lock_trace(hlock_class(this)->usage_traces
+ prev_bit
, 1);
3020 print_irqtrace_events(curr
);
3021 pr_warn("\nother info that might help us debug this:\n");
3022 print_usage_bug_scenario(this);
3024 lockdep_print_held_locks(curr
);
3026 pr_warn("\nstack backtrace:\n");
3031 * Print out an error if an invalid bit is set:
3034 valid_state(struct task_struct
*curr
, struct held_lock
*this,
3035 enum lock_usage_bit new_bit
, enum lock_usage_bit bad_bit
)
3037 if (unlikely(hlock_class(this)->usage_mask
& (1 << bad_bit
))) {
3038 print_usage_bug(curr
, this, bad_bit
, new_bit
);
3046 * print irq inversion bug:
3049 print_irq_inversion_bug(struct task_struct
*curr
,
3050 struct lock_list
*root
, struct lock_list
*other
,
3051 struct held_lock
*this, int forwards
,
3052 const char *irqclass
)
3054 struct lock_list
*entry
= other
;
3055 struct lock_list
*middle
= NULL
;
3058 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
3062 pr_warn("========================================================\n");
3063 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3064 print_kernel_ident();
3065 pr_warn("--------------------------------------------------------\n");
3066 pr_warn("%s/%d just changed the state of lock:\n",
3067 curr
->comm
, task_pid_nr(curr
));
3070 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass
);
3072 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass
);
3073 print_lock_name(other
->class);
3074 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3076 pr_warn("\nother info that might help us debug this:\n");
3078 /* Find a middle lock (if one exists) */
3079 depth
= get_lock_depth(other
);
3081 if (depth
== 0 && (entry
!= root
)) {
3082 pr_warn("lockdep:%s bad path found in chain graph\n", __func__
);
3086 entry
= get_lock_parent(entry
);
3088 } while (entry
&& entry
!= root
&& (depth
>= 0));
3090 print_irq_lock_scenario(root
, other
,
3091 middle
? middle
->class : root
->class, other
->class);
3093 print_irq_lock_scenario(other
, root
,
3094 middle
? middle
->class : other
->class, root
->class);
3096 lockdep_print_held_locks(curr
);
3098 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3099 if (!save_trace(&root
->trace
))
3101 print_shortest_lock_dependencies(other
, root
);
3103 pr_warn("\nstack backtrace:\n");
3108 * Prove that in the forwards-direction subgraph starting at <this>
3109 * there is no lock matching <mask>:
3112 check_usage_forwards(struct task_struct
*curr
, struct held_lock
*this,
3113 enum lock_usage_bit bit
, const char *irqclass
)
3116 struct lock_list root
;
3117 struct lock_list
*uninitialized_var(target_entry
);
3120 root
.class = hlock_class(this);
3121 ret
= find_usage_forwards(&root
, lock_flag(bit
), &target_entry
);
3129 print_irq_inversion_bug(curr
, &root
, target_entry
,
3135 * Prove that in the backwards-direction subgraph starting at <this>
3136 * there is no lock matching <mask>:
3139 check_usage_backwards(struct task_struct
*curr
, struct held_lock
*this,
3140 enum lock_usage_bit bit
, const char *irqclass
)
3143 struct lock_list root
;
3144 struct lock_list
*uninitialized_var(target_entry
);
3147 root
.class = hlock_class(this);
3148 ret
= find_usage_backwards(&root
, lock_flag(bit
), &target_entry
);
3156 print_irq_inversion_bug(curr
, &root
, target_entry
,
3161 void print_irqtrace_events(struct task_struct
*curr
)
3163 printk("irq event stamp: %u\n", curr
->irq_events
);
3164 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
3165 curr
->hardirq_enable_event
, (void *)curr
->hardirq_enable_ip
,
3166 (void *)curr
->hardirq_enable_ip
);
3167 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3168 curr
->hardirq_disable_event
, (void *)curr
->hardirq_disable_ip
,
3169 (void *)curr
->hardirq_disable_ip
);
3170 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
3171 curr
->softirq_enable_event
, (void *)curr
->softirq_enable_ip
,
3172 (void *)curr
->softirq_enable_ip
);
3173 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3174 curr
->softirq_disable_event
, (void *)curr
->softirq_disable_ip
,
3175 (void *)curr
->softirq_disable_ip
);
3178 static int HARDIRQ_verbose(struct lock_class
*class)
3181 return class_filter(class);
3186 static int SOFTIRQ_verbose(struct lock_class
*class)
3189 return class_filter(class);
3194 #define STRICT_READ_CHECKS 1
3196 static int (*state_verbose_f
[])(struct lock_class
*class) = {
3197 #define LOCKDEP_STATE(__STATE) \
3199 #include "lockdep_states.h"
3200 #undef LOCKDEP_STATE
3203 static inline int state_verbose(enum lock_usage_bit bit
,
3204 struct lock_class
*class)
3206 return state_verbose_f
[bit
>> LOCK_USAGE_DIR_MASK
](class);
3209 typedef int (*check_usage_f
)(struct task_struct
*, struct held_lock
*,
3210 enum lock_usage_bit bit
, const char *name
);
3213 mark_lock_irq(struct task_struct
*curr
, struct held_lock
*this,
3214 enum lock_usage_bit new_bit
)
3216 int excl_bit
= exclusive_bit(new_bit
);
3217 int read
= new_bit
& LOCK_USAGE_READ_MASK
;
3218 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
3221 * mark USED_IN has to look forwards -- to ensure no dependency
3222 * has ENABLED state, which would allow recursion deadlocks.
3224 * mark ENABLED has to look backwards -- to ensure no dependee
3225 * has USED_IN state, which, again, would allow recursion deadlocks.
3227 check_usage_f usage
= dir
?
3228 check_usage_backwards
: check_usage_forwards
;
3231 * Validate that this particular lock does not have conflicting
3234 if (!valid_state(curr
, this, new_bit
, excl_bit
))
3238 * Validate that the lock dependencies don't have conflicting usage
3241 if ((!read
|| STRICT_READ_CHECKS
) &&
3242 !usage(curr
, this, excl_bit
, state_name(new_bit
& ~LOCK_USAGE_READ_MASK
)))
3246 * Check for read in write conflicts
3249 if (!valid_state(curr
, this, new_bit
, excl_bit
+ LOCK_USAGE_READ_MASK
))
3252 if (STRICT_READ_CHECKS
&&
3253 !usage(curr
, this, excl_bit
+ LOCK_USAGE_READ_MASK
,
3254 state_name(new_bit
+ LOCK_USAGE_READ_MASK
)))
3258 if (state_verbose(new_bit
, hlock_class(this)))
3265 * Mark all held locks with a usage bit:
3268 mark_held_locks(struct task_struct
*curr
, enum lock_usage_bit base_bit
)
3270 struct held_lock
*hlock
;
3273 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
3274 enum lock_usage_bit hlock_bit
= base_bit
;
3275 hlock
= curr
->held_locks
+ i
;
3278 hlock_bit
+= LOCK_USAGE_READ_MASK
;
3280 BUG_ON(hlock_bit
>= LOCK_USAGE_STATES
);
3285 if (!mark_lock(curr
, hlock
, hlock_bit
))
3293 * Hardirqs will be enabled:
3295 static void __trace_hardirqs_on_caller(unsigned long ip
)
3297 struct task_struct
*curr
= current
;
3299 /* we'll do an OFF -> ON transition: */
3300 curr
->hardirqs_enabled
= 1;
3303 * We are going to turn hardirqs on, so set the
3304 * usage bit for all held locks:
3306 if (!mark_held_locks(curr
, LOCK_ENABLED_HARDIRQ
))
3309 * If we have softirqs enabled, then set the usage
3310 * bit for all held locks. (disabled hardirqs prevented
3311 * this bit from being set before)
3313 if (curr
->softirqs_enabled
)
3314 if (!mark_held_locks(curr
, LOCK_ENABLED_SOFTIRQ
))
3317 curr
->hardirq_enable_ip
= ip
;
3318 curr
->hardirq_enable_event
= ++curr
->irq_events
;
3319 debug_atomic_inc(hardirqs_on_events
);
3322 void lockdep_hardirqs_on(unsigned long ip
)
3324 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3327 if (unlikely(current
->hardirqs_enabled
)) {
3329 * Neither irq nor preemption are disabled here
3330 * so this is racy by nature but losing one hit
3331 * in a stat is not a big deal.
3333 __debug_atomic_inc(redundant_hardirqs_on
);
3338 * We're enabling irqs and according to our state above irqs weren't
3339 * already enabled, yet we find the hardware thinks they are in fact
3340 * enabled.. someone messed up their IRQ state tracing.
3342 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3346 * See the fine text that goes along with this variable definition.
3348 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled
))
3352 * Can't allow enabling interrupts while in an interrupt handler,
3353 * that's general bad form and such. Recursion, limited stack etc..
3355 if (DEBUG_LOCKS_WARN_ON(current
->hardirq_context
))
3358 current
->lockdep_recursion
= 1;
3359 __trace_hardirqs_on_caller(ip
);
3360 current
->lockdep_recursion
= 0;
3362 NOKPROBE_SYMBOL(lockdep_hardirqs_on
);
3365 * Hardirqs were disabled:
3367 void lockdep_hardirqs_off(unsigned long ip
)
3369 struct task_struct
*curr
= current
;
3371 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3375 * So we're supposed to get called after you mask local IRQs, but for
3376 * some reason the hardware doesn't quite think you did a proper job.
3378 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3381 if (curr
->hardirqs_enabled
) {
3383 * We have done an ON -> OFF transition:
3385 curr
->hardirqs_enabled
= 0;
3386 curr
->hardirq_disable_ip
= ip
;
3387 curr
->hardirq_disable_event
= ++curr
->irq_events
;
3388 debug_atomic_inc(hardirqs_off_events
);
3390 debug_atomic_inc(redundant_hardirqs_off
);
3392 NOKPROBE_SYMBOL(lockdep_hardirqs_off
);
3395 * Softirqs will be enabled:
3397 void trace_softirqs_on(unsigned long ip
)
3399 struct task_struct
*curr
= current
;
3401 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3405 * We fancy IRQs being disabled here, see softirq.c, avoids
3406 * funny state and nesting things.
3408 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3411 if (curr
->softirqs_enabled
) {
3412 debug_atomic_inc(redundant_softirqs_on
);
3416 current
->lockdep_recursion
= 1;
3418 * We'll do an OFF -> ON transition:
3420 curr
->softirqs_enabled
= 1;
3421 curr
->softirq_enable_ip
= ip
;
3422 curr
->softirq_enable_event
= ++curr
->irq_events
;
3423 debug_atomic_inc(softirqs_on_events
);
3425 * We are going to turn softirqs on, so set the
3426 * usage bit for all held locks, if hardirqs are
3429 if (curr
->hardirqs_enabled
)
3430 mark_held_locks(curr
, LOCK_ENABLED_SOFTIRQ
);
3431 current
->lockdep_recursion
= 0;
3435 * Softirqs were disabled:
3437 void trace_softirqs_off(unsigned long ip
)
3439 struct task_struct
*curr
= current
;
3441 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3445 * We fancy IRQs being disabled here, see softirq.c
3447 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3450 if (curr
->softirqs_enabled
) {
3452 * We have done an ON -> OFF transition:
3454 curr
->softirqs_enabled
= 0;
3455 curr
->softirq_disable_ip
= ip
;
3456 curr
->softirq_disable_event
= ++curr
->irq_events
;
3457 debug_atomic_inc(softirqs_off_events
);
3459 * Whoops, we wanted softirqs off, so why aren't they?
3461 DEBUG_LOCKS_WARN_ON(!softirq_count());
3463 debug_atomic_inc(redundant_softirqs_off
);
3467 mark_usage(struct task_struct
*curr
, struct held_lock
*hlock
, int check
)
3473 * If non-trylock use in a hardirq or softirq context, then
3474 * mark the lock as used in these contexts:
3476 if (!hlock
->trylock
) {
3478 if (curr
->hardirq_context
)
3479 if (!mark_lock(curr
, hlock
,
3480 LOCK_USED_IN_HARDIRQ_READ
))
3482 if (curr
->softirq_context
)
3483 if (!mark_lock(curr
, hlock
,
3484 LOCK_USED_IN_SOFTIRQ_READ
))
3487 if (curr
->hardirq_context
)
3488 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_HARDIRQ
))
3490 if (curr
->softirq_context
)
3491 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_SOFTIRQ
))
3495 if (!hlock
->hardirqs_off
) {
3497 if (!mark_lock(curr
, hlock
,
3498 LOCK_ENABLED_HARDIRQ_READ
))
3500 if (curr
->softirqs_enabled
)
3501 if (!mark_lock(curr
, hlock
,
3502 LOCK_ENABLED_SOFTIRQ_READ
))
3505 if (!mark_lock(curr
, hlock
,
3506 LOCK_ENABLED_HARDIRQ
))
3508 if (curr
->softirqs_enabled
)
3509 if (!mark_lock(curr
, hlock
,
3510 LOCK_ENABLED_SOFTIRQ
))
3516 /* mark it as used: */
3517 if (!mark_lock(curr
, hlock
, LOCK_USED
))
3523 static inline unsigned int task_irq_context(struct task_struct
*task
)
3525 return 2 * !!task
->hardirq_context
+ !!task
->softirq_context
;
3528 static int separate_irq_context(struct task_struct
*curr
,
3529 struct held_lock
*hlock
)
3531 unsigned int depth
= curr
->lockdep_depth
;
3534 * Keep track of points where we cross into an interrupt context:
3537 struct held_lock
*prev_hlock
;
3539 prev_hlock
= curr
->held_locks
+ depth
-1;
3541 * If we cross into another context, reset the
3542 * hash key (this also prevents the checking and the
3543 * adding of the dependency to 'prev'):
3545 if (prev_hlock
->irq_context
!= hlock
->irq_context
)
3552 * Mark a lock with a usage bit, and validate the state transition:
3554 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
3555 enum lock_usage_bit new_bit
)
3557 unsigned int new_mask
= 1 << new_bit
, ret
= 1;
3559 if (new_bit
>= LOCK_USAGE_STATES
) {
3560 DEBUG_LOCKS_WARN_ON(1);
3565 * If already set then do not dirty the cacheline,
3566 * nor do any checks:
3568 if (likely(hlock_class(this)->usage_mask
& new_mask
))
3574 * Make sure we didn't race:
3576 if (unlikely(hlock_class(this)->usage_mask
& new_mask
)) {
3581 hlock_class(this)->usage_mask
|= new_mask
;
3583 if (!save_trace(hlock_class(this)->usage_traces
+ new_bit
))
3588 debug_atomic_dec(nr_unused_locks
);
3591 ret
= mark_lock_irq(curr
, this, new_bit
);
3599 * We must printk outside of the graph_lock:
3602 printk("\nmarked lock as {%s}:\n", usage_str
[new_bit
]);
3604 print_irqtrace_events(curr
);
3611 #else /* CONFIG_PROVE_LOCKING */
3614 mark_usage(struct task_struct
*curr
, struct held_lock
*hlock
, int check
)
3619 static inline unsigned int task_irq_context(struct task_struct
*task
)
3624 static inline int separate_irq_context(struct task_struct
*curr
,
3625 struct held_lock
*hlock
)
3630 #endif /* CONFIG_PROVE_LOCKING */
3633 * Initialize a lock instance's lock-class mapping info:
3635 void lockdep_init_map(struct lockdep_map
*lock
, const char *name
,
3636 struct lock_class_key
*key
, int subclass
)
3640 for (i
= 0; i
< NR_LOCKDEP_CACHING_CLASSES
; i
++)
3641 lock
->class_cache
[i
] = NULL
;
3643 #ifdef CONFIG_LOCK_STAT
3644 lock
->cpu
= raw_smp_processor_id();
3648 * Can't be having no nameless bastards around this place!
3650 if (DEBUG_LOCKS_WARN_ON(!name
)) {
3651 lock
->name
= "NULL";
3658 * No key, no joy, we need to hash something.
3660 if (DEBUG_LOCKS_WARN_ON(!key
))
3663 * Sanity check, the lock-class key must either have been allocated
3664 * statically or must have been registered as a dynamic key.
3666 if (!static_obj(key
) && !is_dynamic_key(key
)) {
3668 printk(KERN_ERR
"BUG: key %px has not been registered!\n", key
);
3669 DEBUG_LOCKS_WARN_ON(1);
3674 if (unlikely(!debug_locks
))
3678 unsigned long flags
;
3680 if (DEBUG_LOCKS_WARN_ON(current
->lockdep_recursion
))
3683 raw_local_irq_save(flags
);
3684 current
->lockdep_recursion
= 1;
3685 register_lock_class(lock
, subclass
, 1);
3686 current
->lockdep_recursion
= 0;
3687 raw_local_irq_restore(flags
);
3690 EXPORT_SYMBOL_GPL(lockdep_init_map
);
3692 struct lock_class_key __lockdep_no_validate__
;
3693 EXPORT_SYMBOL_GPL(__lockdep_no_validate__
);
3696 print_lock_nested_lock_not_held(struct task_struct
*curr
,
3697 struct held_lock
*hlock
,
3700 if (!debug_locks_off())
3702 if (debug_locks_silent
)
3706 pr_warn("==================================\n");
3707 pr_warn("WARNING: Nested lock was not taken\n");
3708 print_kernel_ident();
3709 pr_warn("----------------------------------\n");
3711 pr_warn("%s/%d is trying to lock:\n", curr
->comm
, task_pid_nr(curr
));
3714 pr_warn("\nbut this task is not holding:\n");
3715 pr_warn("%s\n", hlock
->nest_lock
->name
);
3717 pr_warn("\nstack backtrace:\n");
3720 pr_warn("\nother info that might help us debug this:\n");
3721 lockdep_print_held_locks(curr
);
3723 pr_warn("\nstack backtrace:\n");
3727 static int __lock_is_held(const struct lockdep_map
*lock
, int read
);
3730 * This gets called for every mutex_lock*()/spin_lock*() operation.
3731 * We maintain the dependency maps and validate the locking attempt:
3733 * The callers must make sure that IRQs are disabled before calling it,
3734 * otherwise we could get an interrupt which would want to take locks,
3735 * which would end up in lockdep again.
3737 static int __lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
3738 int trylock
, int read
, int check
, int hardirqs_off
,
3739 struct lockdep_map
*nest_lock
, unsigned long ip
,
3740 int references
, int pin_count
)
3742 struct task_struct
*curr
= current
;
3743 struct lock_class
*class = NULL
;
3744 struct held_lock
*hlock
;
3750 if (unlikely(!debug_locks
))
3753 if (!prove_locking
|| lock
->key
== &__lockdep_no_validate__
)
3756 if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
3757 class = lock
->class_cache
[subclass
];
3761 if (unlikely(!class)) {
3762 class = register_lock_class(lock
, subclass
, 0);
3767 debug_class_ops_inc(class);
3769 if (very_verbose(class)) {
3770 printk("\nacquire class [%px] %s", class->key
, class->name
);
3771 if (class->name_version
> 1)
3772 printk(KERN_CONT
"#%d", class->name_version
);
3773 printk(KERN_CONT
"\n");
3778 * Add the lock to the list of currently held locks.
3779 * (we dont increase the depth just yet, up until the
3780 * dependency checks are done)
3782 depth
= curr
->lockdep_depth
;
3784 * Ran out of static storage for our per-task lock stack again have we?
3786 if (DEBUG_LOCKS_WARN_ON(depth
>= MAX_LOCK_DEPTH
))
3789 class_idx
= class - lock_classes
;
3792 hlock
= curr
->held_locks
+ depth
- 1;
3793 if (hlock
->class_idx
== class_idx
&& nest_lock
) {
3797 if (!hlock
->references
)
3798 hlock
->references
++;
3800 hlock
->references
+= references
;
3803 if (DEBUG_LOCKS_WARN_ON(hlock
->references
< references
))
3810 hlock
= curr
->held_locks
+ depth
;
3812 * Plain impossible, we just registered it and checked it weren't no
3813 * NULL like.. I bet this mushroom I ate was good!
3815 if (DEBUG_LOCKS_WARN_ON(!class))
3817 hlock
->class_idx
= class_idx
;
3818 hlock
->acquire_ip
= ip
;
3819 hlock
->instance
= lock
;
3820 hlock
->nest_lock
= nest_lock
;
3821 hlock
->irq_context
= task_irq_context(curr
);
3822 hlock
->trylock
= trylock
;
3824 hlock
->check
= check
;
3825 hlock
->hardirqs_off
= !!hardirqs_off
;
3826 hlock
->references
= references
;
3827 #ifdef CONFIG_LOCK_STAT
3828 hlock
->waittime_stamp
= 0;
3829 hlock
->holdtime_stamp
= lockstat_clock();
3831 hlock
->pin_count
= pin_count
;
3833 /* Initialize the lock usage bit */
3834 if (!mark_usage(curr
, hlock
, check
))
3838 * Calculate the chain hash: it's the combined hash of all the
3839 * lock keys along the dependency chain. We save the hash value
3840 * at every step so that we can get the current hash easily
3841 * after unlock. The chain hash is then used to cache dependency
3844 * The 'key ID' is what is the most compact key value to drive
3845 * the hash, not class->key.
3848 * Whoops, we did it again.. class_idx is invalid.
3850 if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx
, lock_classes_in_use
)))
3853 chain_key
= curr
->curr_chain_key
;
3856 * How can we have a chain hash when we ain't got no keys?!
3858 if (DEBUG_LOCKS_WARN_ON(chain_key
!= INITIAL_CHAIN_KEY
))
3863 hlock
->prev_chain_key
= chain_key
;
3864 if (separate_irq_context(curr
, hlock
)) {
3865 chain_key
= INITIAL_CHAIN_KEY
;
3868 chain_key
= iterate_chain_key(chain_key
, class_idx
);
3870 if (nest_lock
&& !__lock_is_held(nest_lock
, -1)) {
3871 print_lock_nested_lock_not_held(curr
, hlock
, ip
);
3875 if (!debug_locks_silent
) {
3876 WARN_ON_ONCE(depth
&& !hlock_class(hlock
- 1)->key
);
3877 WARN_ON_ONCE(!hlock_class(hlock
)->key
);
3880 if (!validate_chain(curr
, hlock
, chain_head
, chain_key
))
3883 curr
->curr_chain_key
= chain_key
;
3884 curr
->lockdep_depth
++;
3885 check_chain_key(curr
);
3886 #ifdef CONFIG_DEBUG_LOCKDEP
3887 if (unlikely(!debug_locks
))
3890 if (unlikely(curr
->lockdep_depth
>= MAX_LOCK_DEPTH
)) {
3892 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3893 printk(KERN_DEBUG
"depth: %i max: %lu!\n",
3894 curr
->lockdep_depth
, MAX_LOCK_DEPTH
);
3896 lockdep_print_held_locks(current
);
3897 debug_show_all_locks();
3903 if (unlikely(curr
->lockdep_depth
> max_lockdep_depth
))
3904 max_lockdep_depth
= curr
->lockdep_depth
;
3909 static void print_unlock_imbalance_bug(struct task_struct
*curr
,
3910 struct lockdep_map
*lock
,
3913 if (!debug_locks_off())
3915 if (debug_locks_silent
)
3919 pr_warn("=====================================\n");
3920 pr_warn("WARNING: bad unlock balance detected!\n");
3921 print_kernel_ident();
3922 pr_warn("-------------------------------------\n");
3923 pr_warn("%s/%d is trying to release lock (",
3924 curr
->comm
, task_pid_nr(curr
));
3925 print_lockdep_cache(lock
);
3928 pr_warn("but there are no more locks to release!\n");
3929 pr_warn("\nother info that might help us debug this:\n");
3930 lockdep_print_held_locks(curr
);
3932 pr_warn("\nstack backtrace:\n");
3936 static int match_held_lock(const struct held_lock
*hlock
,
3937 const struct lockdep_map
*lock
)
3939 if (hlock
->instance
== lock
)
3942 if (hlock
->references
) {
3943 const struct lock_class
*class = lock
->class_cache
[0];
3946 class = look_up_lock_class(lock
, 0);
3949 * If look_up_lock_class() failed to find a class, we're trying
3950 * to test if we hold a lock that has never yet been acquired.
3951 * Clearly if the lock hasn't been acquired _ever_, we're not
3952 * holding it either, so report failure.
3958 * References, but not a lock we're actually ref-counting?
3959 * State got messed up, follow the sites that change ->references
3960 * and try to make sense of it.
3962 if (DEBUG_LOCKS_WARN_ON(!hlock
->nest_lock
))
3965 if (hlock
->class_idx
== class - lock_classes
)
3972 /* @depth must not be zero */
3973 static struct held_lock
*find_held_lock(struct task_struct
*curr
,
3974 struct lockdep_map
*lock
,
3975 unsigned int depth
, int *idx
)
3977 struct held_lock
*ret
, *hlock
, *prev_hlock
;
3981 hlock
= curr
->held_locks
+ i
;
3983 if (match_held_lock(hlock
, lock
))
3987 for (i
--, prev_hlock
= hlock
--;
3989 i
--, prev_hlock
= hlock
--) {
3991 * We must not cross into another context:
3993 if (prev_hlock
->irq_context
!= hlock
->irq_context
) {
3997 if (match_held_lock(hlock
, lock
)) {
4008 static int reacquire_held_locks(struct task_struct
*curr
, unsigned int depth
,
4009 int idx
, unsigned int *merged
)
4011 struct held_lock
*hlock
;
4012 int first_idx
= idx
;
4014 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4017 for (hlock
= curr
->held_locks
+ idx
; idx
< depth
; idx
++, hlock
++) {
4018 switch (__lock_acquire(hlock
->instance
,
4019 hlock_class(hlock
)->subclass
,
4021 hlock
->read
, hlock
->check
,
4022 hlock
->hardirqs_off
,
4023 hlock
->nest_lock
, hlock
->acquire_ip
,
4024 hlock
->references
, hlock
->pin_count
)) {
4030 *merged
+= (idx
== first_idx
);
4041 __lock_set_class(struct lockdep_map
*lock
, const char *name
,
4042 struct lock_class_key
*key
, unsigned int subclass
,
4045 struct task_struct
*curr
= current
;
4046 unsigned int depth
, merged
= 0;
4047 struct held_lock
*hlock
;
4048 struct lock_class
*class;
4051 if (unlikely(!debug_locks
))
4054 depth
= curr
->lockdep_depth
;
4056 * This function is about (re)setting the class of a held lock,
4057 * yet we're not actually holding any locks. Naughty user!
4059 if (DEBUG_LOCKS_WARN_ON(!depth
))
4062 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4064 print_unlock_imbalance_bug(curr
, lock
, ip
);
4068 lockdep_init_map(lock
, name
, key
, 0);
4069 class = register_lock_class(lock
, subclass
, 0);
4070 hlock
->class_idx
= class - lock_classes
;
4072 curr
->lockdep_depth
= i
;
4073 curr
->curr_chain_key
= hlock
->prev_chain_key
;
4075 if (reacquire_held_locks(curr
, depth
, i
, &merged
))
4079 * I took it apart and put it back together again, except now I have
4080 * these 'spare' parts.. where shall I put them.
4082 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
- merged
))
4087 static int __lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
4089 struct task_struct
*curr
= current
;
4090 unsigned int depth
, merged
= 0;
4091 struct held_lock
*hlock
;
4094 if (unlikely(!debug_locks
))
4097 depth
= curr
->lockdep_depth
;
4099 * This function is about (re)setting the class of a held lock,
4100 * yet we're not actually holding any locks. Naughty user!
4102 if (DEBUG_LOCKS_WARN_ON(!depth
))
4105 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4107 print_unlock_imbalance_bug(curr
, lock
, ip
);
4111 curr
->lockdep_depth
= i
;
4112 curr
->curr_chain_key
= hlock
->prev_chain_key
;
4114 WARN(hlock
->read
, "downgrading a read lock");
4116 hlock
->acquire_ip
= ip
;
4118 if (reacquire_held_locks(curr
, depth
, i
, &merged
))
4121 /* Merging can't happen with unchanged classes.. */
4122 if (DEBUG_LOCKS_WARN_ON(merged
))
4126 * I took it apart and put it back together again, except now I have
4127 * these 'spare' parts.. where shall I put them.
4129 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
))
4136 * Remove the lock to the list of currently held locks - this gets
4137 * called on mutex_unlock()/spin_unlock*() (or on a failed
4138 * mutex_lock_interruptible()).
4140 * @nested is an hysterical artifact, needs a tree wide cleanup.
4143 __lock_release(struct lockdep_map
*lock
, unsigned long ip
)
4145 struct task_struct
*curr
= current
;
4146 unsigned int depth
, merged
= 1;
4147 struct held_lock
*hlock
;
4150 if (unlikely(!debug_locks
))
4153 depth
= curr
->lockdep_depth
;
4155 * So we're all set to release this lock.. wait what lock? We don't
4156 * own any locks, you've been drinking again?
4159 print_unlock_imbalance_bug(curr
, lock
, ip
);
4164 * Check whether the lock exists in the current stack
4167 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4169 print_unlock_imbalance_bug(curr
, lock
, ip
);
4173 if (hlock
->instance
== lock
)
4174 lock_release_holdtime(hlock
);
4176 WARN(hlock
->pin_count
, "releasing a pinned lock\n");
4178 if (hlock
->references
) {
4179 hlock
->references
--;
4180 if (hlock
->references
) {
4182 * We had, and after removing one, still have
4183 * references, the current lock stack is still
4184 * valid. We're done!
4191 * We have the right lock to unlock, 'hlock' points to it.
4192 * Now we remove it from the stack, and add back the other
4193 * entries (if any), recalculating the hash along the way:
4196 curr
->lockdep_depth
= i
;
4197 curr
->curr_chain_key
= hlock
->prev_chain_key
;
4200 * The most likely case is when the unlock is on the innermost
4201 * lock. In this case, we are done!
4206 if (reacquire_held_locks(curr
, depth
, i
+ 1, &merged
))
4210 * We had N bottles of beer on the wall, we drank one, but now
4211 * there's not N-1 bottles of beer left on the wall...
4212 * Pouring two of the bottles together is acceptable.
4214 DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
- merged
);
4217 * Since reacquire_held_locks() would have called check_chain_key()
4218 * indirectly via __lock_acquire(), we don't need to do it again
4224 static nokprobe_inline
4225 int __lock_is_held(const struct lockdep_map
*lock
, int read
)
4227 struct task_struct
*curr
= current
;
4230 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4231 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4233 if (match_held_lock(hlock
, lock
)) {
4234 if (read
== -1 || hlock
->read
== read
)
4244 static struct pin_cookie
__lock_pin_lock(struct lockdep_map
*lock
)
4246 struct pin_cookie cookie
= NIL_COOKIE
;
4247 struct task_struct
*curr
= current
;
4250 if (unlikely(!debug_locks
))
4253 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4254 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4256 if (match_held_lock(hlock
, lock
)) {
4258 * Grab 16bits of randomness; this is sufficient to not
4259 * be guessable and still allows some pin nesting in
4260 * our u32 pin_count.
4262 cookie
.val
= 1 + (prandom_u32() >> 16);
4263 hlock
->pin_count
+= cookie
.val
;
4268 WARN(1, "pinning an unheld lock\n");
4272 static void __lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4274 struct task_struct
*curr
= current
;
4277 if (unlikely(!debug_locks
))
4280 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4281 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4283 if (match_held_lock(hlock
, lock
)) {
4284 hlock
->pin_count
+= cookie
.val
;
4289 WARN(1, "pinning an unheld lock\n");
4292 static void __lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4294 struct task_struct
*curr
= current
;
4297 if (unlikely(!debug_locks
))
4300 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4301 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4303 if (match_held_lock(hlock
, lock
)) {
4304 if (WARN(!hlock
->pin_count
, "unpinning an unpinned lock\n"))
4307 hlock
->pin_count
-= cookie
.val
;
4309 if (WARN((int)hlock
->pin_count
< 0, "pin count corrupted\n"))
4310 hlock
->pin_count
= 0;
4316 WARN(1, "unpinning an unheld lock\n");
4320 * Check whether we follow the irq-flags state precisely:
4322 static void check_flags(unsigned long flags
)
4324 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
4328 if (irqs_disabled_flags(flags
)) {
4329 if (DEBUG_LOCKS_WARN_ON(current
->hardirqs_enabled
)) {
4330 printk("possible reason: unannotated irqs-off.\n");
4333 if (DEBUG_LOCKS_WARN_ON(!current
->hardirqs_enabled
)) {
4334 printk("possible reason: unannotated irqs-on.\n");
4339 * We dont accurately track softirq state in e.g.
4340 * hardirq contexts (such as on 4KSTACKS), so only
4341 * check if not in hardirq contexts:
4343 if (!hardirq_count()) {
4344 if (softirq_count()) {
4345 /* like the above, but with softirqs */
4346 DEBUG_LOCKS_WARN_ON(current
->softirqs_enabled
);
4348 /* lick the above, does it taste good? */
4349 DEBUG_LOCKS_WARN_ON(!current
->softirqs_enabled
);
4354 print_irqtrace_events(current
);
4358 void lock_set_class(struct lockdep_map
*lock
, const char *name
,
4359 struct lock_class_key
*key
, unsigned int subclass
,
4362 unsigned long flags
;
4364 if (unlikely(current
->lockdep_recursion
))
4367 raw_local_irq_save(flags
);
4368 current
->lockdep_recursion
= 1;
4370 if (__lock_set_class(lock
, name
, key
, subclass
, ip
))
4371 check_chain_key(current
);
4372 current
->lockdep_recursion
= 0;
4373 raw_local_irq_restore(flags
);
4375 EXPORT_SYMBOL_GPL(lock_set_class
);
4377 void lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
4379 unsigned long flags
;
4381 if (unlikely(current
->lockdep_recursion
))
4384 raw_local_irq_save(flags
);
4385 current
->lockdep_recursion
= 1;
4387 if (__lock_downgrade(lock
, ip
))
4388 check_chain_key(current
);
4389 current
->lockdep_recursion
= 0;
4390 raw_local_irq_restore(flags
);
4392 EXPORT_SYMBOL_GPL(lock_downgrade
);
4395 * We are not always called with irqs disabled - do that here,
4396 * and also avoid lockdep recursion:
4398 void lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
4399 int trylock
, int read
, int check
,
4400 struct lockdep_map
*nest_lock
, unsigned long ip
)
4402 unsigned long flags
;
4404 if (unlikely(current
->lockdep_recursion
))
4407 raw_local_irq_save(flags
);
4410 current
->lockdep_recursion
= 1;
4411 trace_lock_acquire(lock
, subclass
, trylock
, read
, check
, nest_lock
, ip
);
4412 __lock_acquire(lock
, subclass
, trylock
, read
, check
,
4413 irqs_disabled_flags(flags
), nest_lock
, ip
, 0, 0);
4414 current
->lockdep_recursion
= 0;
4415 raw_local_irq_restore(flags
);
4417 EXPORT_SYMBOL_GPL(lock_acquire
);
4419 void lock_release(struct lockdep_map
*lock
, int nested
,
4422 unsigned long flags
;
4424 if (unlikely(current
->lockdep_recursion
))
4427 raw_local_irq_save(flags
);
4429 current
->lockdep_recursion
= 1;
4430 trace_lock_release(lock
, ip
);
4431 if (__lock_release(lock
, ip
))
4432 check_chain_key(current
);
4433 current
->lockdep_recursion
= 0;
4434 raw_local_irq_restore(flags
);
4436 EXPORT_SYMBOL_GPL(lock_release
);
4438 int lock_is_held_type(const struct lockdep_map
*lock
, int read
)
4440 unsigned long flags
;
4443 if (unlikely(current
->lockdep_recursion
))
4444 return 1; /* avoid false negative lockdep_assert_held() */
4446 raw_local_irq_save(flags
);
4449 current
->lockdep_recursion
= 1;
4450 ret
= __lock_is_held(lock
, read
);
4451 current
->lockdep_recursion
= 0;
4452 raw_local_irq_restore(flags
);
4456 EXPORT_SYMBOL_GPL(lock_is_held_type
);
4457 NOKPROBE_SYMBOL(lock_is_held_type
);
4459 struct pin_cookie
lock_pin_lock(struct lockdep_map
*lock
)
4461 struct pin_cookie cookie
= NIL_COOKIE
;
4462 unsigned long flags
;
4464 if (unlikely(current
->lockdep_recursion
))
4467 raw_local_irq_save(flags
);
4470 current
->lockdep_recursion
= 1;
4471 cookie
= __lock_pin_lock(lock
);
4472 current
->lockdep_recursion
= 0;
4473 raw_local_irq_restore(flags
);
4477 EXPORT_SYMBOL_GPL(lock_pin_lock
);
4479 void lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4481 unsigned long flags
;
4483 if (unlikely(current
->lockdep_recursion
))
4486 raw_local_irq_save(flags
);
4489 current
->lockdep_recursion
= 1;
4490 __lock_repin_lock(lock
, cookie
);
4491 current
->lockdep_recursion
= 0;
4492 raw_local_irq_restore(flags
);
4494 EXPORT_SYMBOL_GPL(lock_repin_lock
);
4496 void lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4498 unsigned long flags
;
4500 if (unlikely(current
->lockdep_recursion
))
4503 raw_local_irq_save(flags
);
4506 current
->lockdep_recursion
= 1;
4507 __lock_unpin_lock(lock
, cookie
);
4508 current
->lockdep_recursion
= 0;
4509 raw_local_irq_restore(flags
);
4511 EXPORT_SYMBOL_GPL(lock_unpin_lock
);
4513 #ifdef CONFIG_LOCK_STAT
4514 static void print_lock_contention_bug(struct task_struct
*curr
,
4515 struct lockdep_map
*lock
,
4518 if (!debug_locks_off())
4520 if (debug_locks_silent
)
4524 pr_warn("=================================\n");
4525 pr_warn("WARNING: bad contention detected!\n");
4526 print_kernel_ident();
4527 pr_warn("---------------------------------\n");
4528 pr_warn("%s/%d is trying to contend lock (",
4529 curr
->comm
, task_pid_nr(curr
));
4530 print_lockdep_cache(lock
);
4533 pr_warn("but there are no locks held!\n");
4534 pr_warn("\nother info that might help us debug this:\n");
4535 lockdep_print_held_locks(curr
);
4537 pr_warn("\nstack backtrace:\n");
4542 __lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
4544 struct task_struct
*curr
= current
;
4545 struct held_lock
*hlock
;
4546 struct lock_class_stats
*stats
;
4548 int i
, contention_point
, contending_point
;
4550 depth
= curr
->lockdep_depth
;
4552 * Whee, we contended on this lock, except it seems we're not
4553 * actually trying to acquire anything much at all..
4555 if (DEBUG_LOCKS_WARN_ON(!depth
))
4558 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4560 print_lock_contention_bug(curr
, lock
, ip
);
4564 if (hlock
->instance
!= lock
)
4567 hlock
->waittime_stamp
= lockstat_clock();
4569 contention_point
= lock_point(hlock_class(hlock
)->contention_point
, ip
);
4570 contending_point
= lock_point(hlock_class(hlock
)->contending_point
,
4573 stats
= get_lock_stats(hlock_class(hlock
));
4574 if (contention_point
< LOCKSTAT_POINTS
)
4575 stats
->contention_point
[contention_point
]++;
4576 if (contending_point
< LOCKSTAT_POINTS
)
4577 stats
->contending_point
[contending_point
]++;
4578 if (lock
->cpu
!= smp_processor_id())
4579 stats
->bounces
[bounce_contended
+ !!hlock
->read
]++;
4583 __lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
4585 struct task_struct
*curr
= current
;
4586 struct held_lock
*hlock
;
4587 struct lock_class_stats
*stats
;
4589 u64 now
, waittime
= 0;
4592 depth
= curr
->lockdep_depth
;
4594 * Yay, we acquired ownership of this lock we didn't try to
4595 * acquire, how the heck did that happen?
4597 if (DEBUG_LOCKS_WARN_ON(!depth
))
4600 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4602 print_lock_contention_bug(curr
, lock
, _RET_IP_
);
4606 if (hlock
->instance
!= lock
)
4609 cpu
= smp_processor_id();
4610 if (hlock
->waittime_stamp
) {
4611 now
= lockstat_clock();
4612 waittime
= now
- hlock
->waittime_stamp
;
4613 hlock
->holdtime_stamp
= now
;
4616 trace_lock_acquired(lock
, ip
);
4618 stats
= get_lock_stats(hlock_class(hlock
));
4621 lock_time_inc(&stats
->read_waittime
, waittime
);
4623 lock_time_inc(&stats
->write_waittime
, waittime
);
4625 if (lock
->cpu
!= cpu
)
4626 stats
->bounces
[bounce_acquired
+ !!hlock
->read
]++;
4632 void lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
4634 unsigned long flags
;
4636 if (unlikely(!lock_stat
|| !debug_locks
))
4639 if (unlikely(current
->lockdep_recursion
))
4642 raw_local_irq_save(flags
);
4644 current
->lockdep_recursion
= 1;
4645 trace_lock_contended(lock
, ip
);
4646 __lock_contended(lock
, ip
);
4647 current
->lockdep_recursion
= 0;
4648 raw_local_irq_restore(flags
);
4650 EXPORT_SYMBOL_GPL(lock_contended
);
4652 void lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
4654 unsigned long flags
;
4656 if (unlikely(!lock_stat
|| !debug_locks
))
4659 if (unlikely(current
->lockdep_recursion
))
4662 raw_local_irq_save(flags
);
4664 current
->lockdep_recursion
= 1;
4665 __lock_acquired(lock
, ip
);
4666 current
->lockdep_recursion
= 0;
4667 raw_local_irq_restore(flags
);
4669 EXPORT_SYMBOL_GPL(lock_acquired
);
4673 * Used by the testsuite, sanitize the validator state
4674 * after a simulated failure:
4677 void lockdep_reset(void)
4679 unsigned long flags
;
4682 raw_local_irq_save(flags
);
4683 lockdep_init_task(current
);
4684 memset(current
->held_locks
, 0, MAX_LOCK_DEPTH
*sizeof(struct held_lock
));
4685 nr_hardirq_chains
= 0;
4686 nr_softirq_chains
= 0;
4687 nr_process_chains
= 0;
4689 for (i
= 0; i
< CHAINHASH_SIZE
; i
++)
4690 INIT_HLIST_HEAD(chainhash_table
+ i
);
4691 raw_local_irq_restore(flags
);
4694 /* Remove a class from a lock chain. Must be called with the graph lock held. */
4695 static void remove_class_from_lock_chain(struct pending_free
*pf
,
4696 struct lock_chain
*chain
,
4697 struct lock_class
*class)
4699 #ifdef CONFIG_PROVE_LOCKING
4700 struct lock_chain
*new_chain
;
4704 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++) {
4705 if (chain_hlocks
[i
] != class - lock_classes
)
4707 /* The code below leaks one chain_hlock[] entry. */
4708 if (--chain
->depth
> 0) {
4709 memmove(&chain_hlocks
[i
], &chain_hlocks
[i
+ 1],
4710 (chain
->base
+ chain
->depth
- i
) *
4711 sizeof(chain_hlocks
[0]));
4714 * Each lock class occurs at most once in a lock chain so once
4715 * we found a match we can break out of this loop.
4719 /* Since the chain has not been modified, return. */
4723 chain_key
= INITIAL_CHAIN_KEY
;
4724 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++)
4725 chain_key
= iterate_chain_key(chain_key
, chain_hlocks
[i
]);
4726 if (chain
->depth
&& chain
->chain_key
== chain_key
)
4728 /* Overwrite the chain key for concurrent RCU readers. */
4729 WRITE_ONCE(chain
->chain_key
, chain_key
);
4731 * Note: calling hlist_del_rcu() from inside a
4732 * hlist_for_each_entry_rcu() loop is safe.
4734 hlist_del_rcu(&chain
->entry
);
4735 __set_bit(chain
- lock_chains
, pf
->lock_chains_being_freed
);
4736 if (chain
->depth
== 0)
4739 * If the modified lock chain matches an existing lock chain, drop
4740 * the modified lock chain.
4742 if (lookup_chain_cache(chain_key
))
4744 new_chain
= alloc_lock_chain();
4745 if (WARN_ON_ONCE(!new_chain
)) {
4749 *new_chain
= *chain
;
4750 hlist_add_head_rcu(&new_chain
->entry
, chainhashentry(chain_key
));
4754 /* Must be called with the graph lock held. */
4755 static void remove_class_from_lock_chains(struct pending_free
*pf
,
4756 struct lock_class
*class)
4758 struct lock_chain
*chain
;
4759 struct hlist_head
*head
;
4762 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
4763 head
= chainhash_table
+ i
;
4764 hlist_for_each_entry_rcu(chain
, head
, entry
) {
4765 remove_class_from_lock_chain(pf
, chain
, class);
4771 * Remove all references to a lock class. The caller must hold the graph lock.
4773 static void zap_class(struct pending_free
*pf
, struct lock_class
*class)
4775 struct lock_list
*entry
;
4778 WARN_ON_ONCE(!class->key
);
4781 * Remove all dependencies this lock is
4784 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
4785 entry
= list_entries
+ i
;
4786 if (entry
->class != class && entry
->links_to
!= class)
4788 __clear_bit(i
, list_entries_in_use
);
4790 list_del_rcu(&entry
->entry
);
4792 if (list_empty(&class->locks_after
) &&
4793 list_empty(&class->locks_before
)) {
4794 list_move_tail(&class->lock_entry
, &pf
->zapped
);
4795 hlist_del_rcu(&class->hash_entry
);
4796 WRITE_ONCE(class->key
, NULL
);
4797 WRITE_ONCE(class->name
, NULL
);
4799 __clear_bit(class - lock_classes
, lock_classes_in_use
);
4801 WARN_ONCE(true, "%s() failed for class %s\n", __func__
,
4805 remove_class_from_lock_chains(pf
, class);
4808 static void reinit_class(struct lock_class
*class)
4810 void *const p
= class;
4811 const unsigned int offset
= offsetof(struct lock_class
, key
);
4813 WARN_ON_ONCE(!class->lock_entry
.next
);
4814 WARN_ON_ONCE(!list_empty(&class->locks_after
));
4815 WARN_ON_ONCE(!list_empty(&class->locks_before
));
4816 memset(p
+ offset
, 0, sizeof(*class) - offset
);
4817 WARN_ON_ONCE(!class->lock_entry
.next
);
4818 WARN_ON_ONCE(!list_empty(&class->locks_after
));
4819 WARN_ON_ONCE(!list_empty(&class->locks_before
));
4822 static inline int within(const void *addr
, void *start
, unsigned long size
)
4824 return addr
>= start
&& addr
< start
+ size
;
4827 static bool inside_selftest(void)
4829 return current
== lockdep_selftest_task_struct
;
4832 /* The caller must hold the graph lock. */
4833 static struct pending_free
*get_pending_free(void)
4835 return delayed_free
.pf
+ delayed_free
.index
;
4838 static void free_zapped_rcu(struct rcu_head
*cb
);
4841 * Schedule an RCU callback if no RCU callback is pending. Must be called with
4842 * the graph lock held.
4844 static void call_rcu_zapped(struct pending_free
*pf
)
4846 WARN_ON_ONCE(inside_selftest());
4848 if (list_empty(&pf
->zapped
))
4851 if (delayed_free
.scheduled
)
4854 delayed_free
.scheduled
= true;
4856 WARN_ON_ONCE(delayed_free
.pf
+ delayed_free
.index
!= pf
);
4857 delayed_free
.index
^= 1;
4859 call_rcu(&delayed_free
.rcu_head
, free_zapped_rcu
);
4862 /* The caller must hold the graph lock. May be called from RCU context. */
4863 static void __free_zapped_classes(struct pending_free
*pf
)
4865 struct lock_class
*class;
4867 check_data_structures();
4869 list_for_each_entry(class, &pf
->zapped
, lock_entry
)
4870 reinit_class(class);
4872 list_splice_init(&pf
->zapped
, &free_lock_classes
);
4874 #ifdef CONFIG_PROVE_LOCKING
4875 bitmap_andnot(lock_chains_in_use
, lock_chains_in_use
,
4876 pf
->lock_chains_being_freed
, ARRAY_SIZE(lock_chains
));
4877 bitmap_clear(pf
->lock_chains_being_freed
, 0, ARRAY_SIZE(lock_chains
));
4881 static void free_zapped_rcu(struct rcu_head
*ch
)
4883 struct pending_free
*pf
;
4884 unsigned long flags
;
4886 if (WARN_ON_ONCE(ch
!= &delayed_free
.rcu_head
))
4889 raw_local_irq_save(flags
);
4890 arch_spin_lock(&lockdep_lock
);
4891 current
->lockdep_recursion
= 1;
4894 pf
= delayed_free
.pf
+ (delayed_free
.index
^ 1);
4895 __free_zapped_classes(pf
);
4896 delayed_free
.scheduled
= false;
4899 * If there's anything on the open list, close and start a new callback.
4901 call_rcu_zapped(delayed_free
.pf
+ delayed_free
.index
);
4903 current
->lockdep_recursion
= 0;
4904 arch_spin_unlock(&lockdep_lock
);
4905 raw_local_irq_restore(flags
);
4909 * Remove all lock classes from the class hash table and from the
4910 * all_lock_classes list whose key or name is in the address range [start,
4911 * start + size). Move these lock classes to the zapped_classes list. Must
4912 * be called with the graph lock held.
4914 static void __lockdep_free_key_range(struct pending_free
*pf
, void *start
,
4917 struct lock_class
*class;
4918 struct hlist_head
*head
;
4921 /* Unhash all classes that were created by a module. */
4922 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
4923 head
= classhash_table
+ i
;
4924 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
4925 if (!within(class->key
, start
, size
) &&
4926 !within(class->name
, start
, size
))
4928 zap_class(pf
, class);
4934 * Used in module.c to remove lock classes from memory that is going to be
4935 * freed; and possibly re-used by other modules.
4937 * We will have had one synchronize_rcu() before getting here, so we're
4938 * guaranteed nobody will look up these exact classes -- they're properly dead
4939 * but still allocated.
4941 static void lockdep_free_key_range_reg(void *start
, unsigned long size
)
4943 struct pending_free
*pf
;
4944 unsigned long flags
;
4946 init_data_structures_once();
4948 raw_local_irq_save(flags
);
4949 arch_spin_lock(&lockdep_lock
);
4950 current
->lockdep_recursion
= 1;
4951 pf
= get_pending_free();
4952 __lockdep_free_key_range(pf
, start
, size
);
4953 call_rcu_zapped(pf
);
4954 current
->lockdep_recursion
= 0;
4955 arch_spin_unlock(&lockdep_lock
);
4956 raw_local_irq_restore(flags
);
4959 * Wait for any possible iterators from look_up_lock_class() to pass
4960 * before continuing to free the memory they refer to.
4966 * Free all lockdep keys in the range [start, start+size). Does not sleep.
4967 * Ignores debug_locks. Must only be used by the lockdep selftests.
4969 static void lockdep_free_key_range_imm(void *start
, unsigned long size
)
4971 struct pending_free
*pf
= delayed_free
.pf
;
4972 unsigned long flags
;
4974 init_data_structures_once();
4976 raw_local_irq_save(flags
);
4977 arch_spin_lock(&lockdep_lock
);
4978 __lockdep_free_key_range(pf
, start
, size
);
4979 __free_zapped_classes(pf
);
4980 arch_spin_unlock(&lockdep_lock
);
4981 raw_local_irq_restore(flags
);
4984 void lockdep_free_key_range(void *start
, unsigned long size
)
4986 init_data_structures_once();
4988 if (inside_selftest())
4989 lockdep_free_key_range_imm(start
, size
);
4991 lockdep_free_key_range_reg(start
, size
);
4995 * Check whether any element of the @lock->class_cache[] array refers to a
4996 * registered lock class. The caller must hold either the graph lock or the
4999 static bool lock_class_cache_is_registered(struct lockdep_map
*lock
)
5001 struct lock_class
*class;
5002 struct hlist_head
*head
;
5005 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
5006 head
= classhash_table
+ i
;
5007 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
5008 for (j
= 0; j
< NR_LOCKDEP_CACHING_CLASSES
; j
++)
5009 if (lock
->class_cache
[j
] == class)
5016 /* The caller must hold the graph lock. Does not sleep. */
5017 static void __lockdep_reset_lock(struct pending_free
*pf
,
5018 struct lockdep_map
*lock
)
5020 struct lock_class
*class;
5024 * Remove all classes this lock might have:
5026 for (j
= 0; j
< MAX_LOCKDEP_SUBCLASSES
; j
++) {
5028 * If the class exists we look it up and zap it:
5030 class = look_up_lock_class(lock
, j
);
5032 zap_class(pf
, class);
5035 * Debug check: in the end all mapped classes should
5038 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock
)))
5043 * Remove all information lockdep has about a lock if debug_locks == 1. Free
5044 * released data structures from RCU context.
5046 static void lockdep_reset_lock_reg(struct lockdep_map
*lock
)
5048 struct pending_free
*pf
;
5049 unsigned long flags
;
5052 raw_local_irq_save(flags
);
5053 locked
= graph_lock();
5057 pf
= get_pending_free();
5058 __lockdep_reset_lock(pf
, lock
);
5059 call_rcu_zapped(pf
);
5063 raw_local_irq_restore(flags
);
5067 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
5068 * lockdep selftests.
5070 static void lockdep_reset_lock_imm(struct lockdep_map
*lock
)
5072 struct pending_free
*pf
= delayed_free
.pf
;
5073 unsigned long flags
;
5075 raw_local_irq_save(flags
);
5076 arch_spin_lock(&lockdep_lock
);
5077 __lockdep_reset_lock(pf
, lock
);
5078 __free_zapped_classes(pf
);
5079 arch_spin_unlock(&lockdep_lock
);
5080 raw_local_irq_restore(flags
);
5083 void lockdep_reset_lock(struct lockdep_map
*lock
)
5085 init_data_structures_once();
5087 if (inside_selftest())
5088 lockdep_reset_lock_imm(lock
);
5090 lockdep_reset_lock_reg(lock
);
5093 /* Unregister a dynamically allocated key. */
5094 void lockdep_unregister_key(struct lock_class_key
*key
)
5096 struct hlist_head
*hash_head
= keyhashentry(key
);
5097 struct lock_class_key
*k
;
5098 struct pending_free
*pf
;
5099 unsigned long flags
;
5104 if (WARN_ON_ONCE(static_obj(key
)))
5107 raw_local_irq_save(flags
);
5111 pf
= get_pending_free();
5112 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
5114 hlist_del_rcu(&k
->hash_entry
);
5119 WARN_ON_ONCE(!found
);
5120 __lockdep_free_key_range(pf
, key
, 1);
5121 call_rcu_zapped(pf
);
5124 raw_local_irq_restore(flags
);
5126 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
5129 EXPORT_SYMBOL_GPL(lockdep_unregister_key
);
5131 void __init
lockdep_init(void)
5133 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
5135 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES
);
5136 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH
);
5137 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS
);
5138 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE
);
5139 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES
);
5140 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS
);
5141 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE
);
5143 printk(" memory used by lock dependency info: %zu kB\n",
5144 (sizeof(lock_classes
) +
5145 sizeof(lock_classes_in_use
) +
5146 sizeof(classhash_table
) +
5147 sizeof(list_entries
) +
5148 sizeof(list_entries_in_use
) +
5149 sizeof(chainhash_table
) +
5150 sizeof(delayed_free
)
5151 #ifdef CONFIG_PROVE_LOCKING
5153 + sizeof(lock_chains
)
5154 + sizeof(lock_chains_in_use
)
5155 + sizeof(chain_hlocks
)
5160 printk(" per task-struct memory footprint: %zu bytes\n",
5161 sizeof(((struct task_struct
*)NULL
)->held_locks
));
5165 print_freed_lock_bug(struct task_struct
*curr
, const void *mem_from
,
5166 const void *mem_to
, struct held_lock
*hlock
)
5168 if (!debug_locks_off())
5170 if (debug_locks_silent
)
5174 pr_warn("=========================\n");
5175 pr_warn("WARNING: held lock freed!\n");
5176 print_kernel_ident();
5177 pr_warn("-------------------------\n");
5178 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
5179 curr
->comm
, task_pid_nr(curr
), mem_from
, mem_to
-1);
5181 lockdep_print_held_locks(curr
);
5183 pr_warn("\nstack backtrace:\n");
5187 static inline int not_in_range(const void* mem_from
, unsigned long mem_len
,
5188 const void* lock_from
, unsigned long lock_len
)
5190 return lock_from
+ lock_len
<= mem_from
||
5191 mem_from
+ mem_len
<= lock_from
;
5195 * Called when kernel memory is freed (or unmapped), or if a lock
5196 * is destroyed or reinitialized - this code checks whether there is
5197 * any held lock in the memory range of <from> to <to>:
5199 void debug_check_no_locks_freed(const void *mem_from
, unsigned long mem_len
)
5201 struct task_struct
*curr
= current
;
5202 struct held_lock
*hlock
;
5203 unsigned long flags
;
5206 if (unlikely(!debug_locks
))
5209 raw_local_irq_save(flags
);
5210 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
5211 hlock
= curr
->held_locks
+ i
;
5213 if (not_in_range(mem_from
, mem_len
, hlock
->instance
,
5214 sizeof(*hlock
->instance
)))
5217 print_freed_lock_bug(curr
, mem_from
, mem_from
+ mem_len
, hlock
);
5220 raw_local_irq_restore(flags
);
5222 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed
);
5224 static void print_held_locks_bug(void)
5226 if (!debug_locks_off())
5228 if (debug_locks_silent
)
5232 pr_warn("====================================\n");
5233 pr_warn("WARNING: %s/%d still has locks held!\n",
5234 current
->comm
, task_pid_nr(current
));
5235 print_kernel_ident();
5236 pr_warn("------------------------------------\n");
5237 lockdep_print_held_locks(current
);
5238 pr_warn("\nstack backtrace:\n");
5242 void debug_check_no_locks_held(void)
5244 if (unlikely(current
->lockdep_depth
> 0))
5245 print_held_locks_bug();
5247 EXPORT_SYMBOL_GPL(debug_check_no_locks_held
);
5250 void debug_show_all_locks(void)
5252 struct task_struct
*g
, *p
;
5254 if (unlikely(!debug_locks
)) {
5255 pr_warn("INFO: lockdep is turned off.\n");
5258 pr_warn("\nShowing all locks held in the system:\n");
5261 for_each_process_thread(g
, p
) {
5262 if (!p
->lockdep_depth
)
5264 lockdep_print_held_locks(p
);
5265 touch_nmi_watchdog();
5266 touch_all_softlockup_watchdogs();
5271 pr_warn("=============================================\n\n");
5273 EXPORT_SYMBOL_GPL(debug_show_all_locks
);
5277 * Careful: only use this function if you are sure that
5278 * the task cannot run in parallel!
5280 void debug_show_held_locks(struct task_struct
*task
)
5282 if (unlikely(!debug_locks
)) {
5283 printk("INFO: lockdep is turned off.\n");
5286 lockdep_print_held_locks(task
);
5288 EXPORT_SYMBOL_GPL(debug_show_held_locks
);
5290 asmlinkage __visible
void lockdep_sys_exit(void)
5292 struct task_struct
*curr
= current
;
5294 if (unlikely(curr
->lockdep_depth
)) {
5295 if (!debug_locks_off())
5298 pr_warn("================================================\n");
5299 pr_warn("WARNING: lock held when returning to user space!\n");
5300 print_kernel_ident();
5301 pr_warn("------------------------------------------------\n");
5302 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
5303 curr
->comm
, curr
->pid
);
5304 lockdep_print_held_locks(curr
);
5308 * The lock history for each syscall should be independent. So wipe the
5309 * slate clean on return to userspace.
5311 lockdep_invariant_state(false);
5314 void lockdep_rcu_suspicious(const char *file
, const int line
, const char *s
)
5316 struct task_struct
*curr
= current
;
5318 /* Note: the following can be executed concurrently, so be careful. */
5320 pr_warn("=============================\n");
5321 pr_warn("WARNING: suspicious RCU usage\n");
5322 print_kernel_ident();
5323 pr_warn("-----------------------------\n");
5324 pr_warn("%s:%d %s!\n", file
, line
, s
);
5325 pr_warn("\nother info that might help us debug this:\n\n");
5326 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5327 !rcu_lockdep_current_cpu_online()
5328 ? "RCU used illegally from offline CPU!\n"
5329 : !rcu_is_watching()
5330 ? "RCU used illegally from idle CPU!\n"
5332 rcu_scheduler_active
, debug_locks
);
5335 * If a CPU is in the RCU-free window in idle (ie: in the section
5336 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5337 * considers that CPU to be in an "extended quiescent state",
5338 * which means that RCU will be completely ignoring that CPU.
5339 * Therefore, rcu_read_lock() and friends have absolutely no
5340 * effect on a CPU running in that state. In other words, even if
5341 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
5342 * delete data structures out from under it. RCU really has no
5343 * choice here: we need to keep an RCU-free window in idle where
5344 * the CPU may possibly enter into low power mode. This way we can
5345 * notice an extended quiescent state to other CPUs that started a grace
5346 * period. Otherwise we would delay any grace period as long as we run
5349 * So complain bitterly if someone does call rcu_read_lock(),
5350 * rcu_read_lock_bh() and so on from extended quiescent states.
5352 if (!rcu_is_watching())
5353 pr_warn("RCU used illegally from extended quiescent state!\n");
5355 lockdep_print_held_locks(curr
);
5356 pr_warn("\nstack backtrace:\n");
5359 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious
);