Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / kern / subr_lockdebug.c
blob06c9b97d3fcaf86460886079e0ae8553dc661c2e
1 /* $NetBSD: subr_lockdebug.c,v 1.40 2009/10/05 23:39:27 rmind Exp $ */
3 /*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Basic lock debugging code shared among lock primitives.
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.40 2009/10/05 23:39:27 rmind Exp $");
39 #include "opt_ddb.h"
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51 #include <sys/rb.h>
53 #include <machine/lock.h>
55 unsigned int ld_panic;
57 #ifdef LOCKDEBUG
59 #define LD_BATCH_SHIFT 9
60 #define LD_BATCH (1 << LD_BATCH_SHIFT)
61 #define LD_BATCH_MASK (LD_BATCH - 1)
62 #define LD_MAX_LOCKS 1048576
63 #define LD_SLOP 16
65 #define LD_LOCKED 0x01
66 #define LD_SLEEPER 0x02
68 #define LD_WRITE_LOCK 0x80000000
70 typedef struct lockdebug {
71 struct rb_node ld_rb_node; /* must be the first member */
72 __cpu_simple_lock_t ld_spinlock;
73 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
74 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
75 volatile void *ld_lock;
76 lockops_t *ld_lockops;
77 struct lwp *ld_lwp;
78 uintptr_t ld_locked;
79 uintptr_t ld_unlocked;
80 uintptr_t ld_initaddr;
81 uint16_t ld_shares;
82 uint16_t ld_cpu;
83 uint8_t ld_flags;
84 uint8_t ld_shwant; /* advisory */
85 uint8_t ld_exwant; /* advisory */
86 uint8_t ld_unused;
87 } volatile lockdebug_t;
89 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
91 __cpu_simple_lock_t ld_mod_lk;
92 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
93 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
94 int ld_nfree;
95 int ld_freeptr;
96 int ld_recurse;
97 bool ld_nomore;
98 lockdebug_t ld_prime[LD_BATCH];
100 static void lockdebug_abort1(lockdebug_t *, int, const char *,
101 const char *, bool);
102 static int lockdebug_more(int);
103 static void lockdebug_init(void);
105 static signed int
106 ld_rbto_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
108 const lockdebug_t *ld1 = (const void *)n1;
109 const lockdebug_t *ld2 = (const void *)n2;
110 const uintptr_t a = (uintptr_t)ld1->ld_lock;
111 const uintptr_t b = (uintptr_t)ld2->ld_lock;
113 if (a < b)
114 return 1;
115 if (a > b)
116 return -1;
117 return 0;
120 static signed int
121 ld_rbto_compare_key(const struct rb_node *n, const void *key)
123 const lockdebug_t *ld = (const void *)n;
124 const uintptr_t a = (uintptr_t)ld->ld_lock;
125 const uintptr_t b = (uintptr_t)key;
127 if (a < b)
128 return 1;
129 if (a > b)
130 return -1;
131 return 0;
134 static struct rb_tree ld_rb_tree;
136 static const struct rb_tree_ops ld_rb_tree_ops = {
137 .rbto_compare_nodes = ld_rbto_compare_nodes,
138 .rbto_compare_key = ld_rbto_compare_key,
141 static inline lockdebug_t *
142 lockdebug_lookup1(volatile void *lock)
144 lockdebug_t *ld;
145 struct cpu_info *ci;
147 ci = curcpu();
148 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
149 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
150 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
151 if (ld == NULL) {
152 return NULL;
154 __cpu_simple_lock(&ld->ld_spinlock);
156 return ld;
159 static void
160 lockdebug_lock_cpus(void)
162 CPU_INFO_ITERATOR cii;
163 struct cpu_info *ci;
165 for (CPU_INFO_FOREACH(cii, ci)) {
166 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
170 static void
171 lockdebug_unlock_cpus(void)
173 CPU_INFO_ITERATOR cii;
174 struct cpu_info *ci;
176 for (CPU_INFO_FOREACH(cii, ci)) {
177 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
182 * lockdebug_lookup:
184 * Find a lockdebug structure by a pointer to a lock and return it locked.
186 static inline lockdebug_t *
187 lockdebug_lookup(volatile void *lock, uintptr_t where)
189 lockdebug_t *ld;
191 ld = lockdebug_lookup1(lock);
192 if (ld == NULL)
193 panic("lockdebug_lookup: uninitialized lock (lock=%p, from=%08"PRIxPTR")", lock, where);
194 return ld;
198 * lockdebug_init:
200 * Initialize the lockdebug system. Allocate an initial pool of
201 * lockdebug structures before the VM system is up and running.
203 static void
204 lockdebug_init(void)
206 lockdebug_t *ld;
207 int i;
209 TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
210 TAILQ_INIT(&curlwp->l_ld_locks);
211 __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
212 __cpu_simple_lock_init(&ld_mod_lk);
214 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
216 ld = ld_prime;
217 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
218 __cpu_simple_lock_init(&ld->ld_spinlock);
219 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
220 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
222 ld_freeptr = 1;
223 ld_nfree = LD_BATCH - 1;
227 * lockdebug_alloc:
229 * A lock is being initialized, so allocate an associated debug
230 * structure.
232 bool
233 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
235 struct cpu_info *ci;
236 lockdebug_t *ld;
237 int s;
239 if (lo == NULL || panicstr != NULL || ld_panic)
240 return false;
241 if (ld_freeptr == 0)
242 lockdebug_init();
244 s = splhigh();
245 __cpu_simple_lock(&ld_mod_lk);
246 if ((ld = lockdebug_lookup1(lock)) != NULL) {
247 __cpu_simple_unlock(&ld_mod_lk);
248 lockdebug_abort1(ld, s, __func__, "already initialized", true);
249 return false;
253 * Pinch a new debug structure. We may recurse because we call
254 * kmem_alloc(), which may need to initialize new locks somewhere
255 * down the path. If not recursing, we try to maintain at least
256 * LD_SLOP structures free, which should hopefully be enough to
257 * satisfy kmem_alloc(). If we can't provide a structure, not to
258 * worry: we'll just mark the lock as not having an ID.
260 ci = curcpu();
261 ci->ci_lkdebug_recurse++;
262 if (TAILQ_EMPTY(&ld_free)) {
263 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
264 ci->ci_lkdebug_recurse--;
265 __cpu_simple_unlock(&ld_mod_lk);
266 splx(s);
267 return false;
269 s = lockdebug_more(s);
270 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
271 s = lockdebug_more(s);
273 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
274 __cpu_simple_unlock(&ld_mod_lk);
275 splx(s);
276 return false;
278 TAILQ_REMOVE(&ld_free, ld, ld_chain);
279 ld_nfree--;
280 ci->ci_lkdebug_recurse--;
282 if (ld->ld_lock != NULL) {
283 panic("lockdebug_alloc: corrupt table");
286 /* Initialise the structure. */
287 ld->ld_lock = lock;
288 ld->ld_lockops = lo;
289 ld->ld_locked = 0;
290 ld->ld_unlocked = 0;
291 ld->ld_lwp = NULL;
292 ld->ld_initaddr = initaddr;
293 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
294 lockdebug_lock_cpus();
295 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
296 lockdebug_unlock_cpus();
297 __cpu_simple_unlock(&ld_mod_lk);
299 splx(s);
300 return true;
304 * lockdebug_free:
306 * A lock is being destroyed, so release debugging resources.
308 void
309 lockdebug_free(volatile void *lock)
311 lockdebug_t *ld;
312 int s;
314 if (panicstr != NULL || ld_panic)
315 return;
317 s = splhigh();
318 __cpu_simple_lock(&ld_mod_lk);
319 ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
320 if (ld == NULL) {
321 __cpu_simple_unlock(&ld_mod_lk);
322 panic("lockdebug_free: destroying uninitialized object %p"
323 "(ld_lock=%p)", lock, ld->ld_lock);
324 lockdebug_abort1(ld, s, __func__, "record follows", true);
325 return;
327 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
328 __cpu_simple_unlock(&ld_mod_lk);
329 lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
330 return;
332 lockdebug_lock_cpus();
333 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
334 lockdebug_unlock_cpus();
335 ld->ld_lock = NULL;
336 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
337 ld_nfree++;
338 __cpu_simple_unlock(&ld->ld_spinlock);
339 __cpu_simple_unlock(&ld_mod_lk);
340 splx(s);
344 * lockdebug_more:
346 * Allocate a batch of debug structures and add to the free list.
347 * Must be called with ld_mod_lk held.
349 static int
350 lockdebug_more(int s)
352 lockdebug_t *ld;
353 void *block;
354 int i, base, m;
357 * Can't call kmem_alloc() if in interrupt context. XXX We could
358 * deadlock, because we don't know which locks the caller holds.
360 if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
361 return s;
364 while (ld_nfree < LD_SLOP) {
365 __cpu_simple_unlock(&ld_mod_lk);
366 splx(s);
367 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
368 s = splhigh();
369 __cpu_simple_lock(&ld_mod_lk);
371 if (block == NULL)
372 return s;
374 if (ld_nfree > LD_SLOP) {
375 /* Somebody beat us to it. */
376 __cpu_simple_unlock(&ld_mod_lk);
377 splx(s);
378 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
379 s = splhigh();
380 __cpu_simple_lock(&ld_mod_lk);
381 continue;
384 base = ld_freeptr;
385 ld_nfree += LD_BATCH;
386 ld = block;
387 base <<= LD_BATCH_SHIFT;
388 m = min(LD_MAX_LOCKS, base + LD_BATCH);
390 if (m == LD_MAX_LOCKS)
391 ld_nomore = true;
393 for (i = base; i < m; i++, ld++) {
394 __cpu_simple_lock_init(&ld->ld_spinlock);
395 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
396 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
399 membar_producer();
402 return s;
406 * lockdebug_wantlock:
408 * Process the preamble to a lock acquire.
410 void
411 lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared,
412 bool trylock)
414 struct lwp *l = curlwp;
415 lockdebug_t *ld;
416 bool recurse;
417 int s;
419 (void)shared;
420 recurse = false;
422 if (panicstr != NULL || ld_panic)
423 return;
425 s = splhigh();
426 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
427 splx(s);
428 return;
430 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
431 if ((ld->ld_flags & LD_SLEEPER) != 0) {
432 if (ld->ld_lwp == l && !(shared && trylock))
433 recurse = true;
434 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
435 recurse = true;
437 if (cpu_intr_p()) {
438 if ((ld->ld_flags & LD_SLEEPER) != 0) {
439 lockdebug_abort1(ld, s, __func__,
440 "acquiring sleep lock from interrupt context",
441 true);
442 return;
445 if (shared)
446 ld->ld_shwant++;
447 else
448 ld->ld_exwant++;
449 if (recurse) {
450 lockdebug_abort1(ld, s, __func__, "locking against myself",
451 true);
452 return;
454 __cpu_simple_unlock(&ld->ld_spinlock);
455 splx(s);
459 * lockdebug_locked:
461 * Process a lock acquire operation.
463 void
464 lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
465 int shared)
467 struct lwp *l = curlwp;
468 lockdebug_t *ld;
469 int s;
471 if (panicstr != NULL || ld_panic)
472 return;
474 s = splhigh();
475 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
476 splx(s);
477 return;
479 if (cvlock) {
480 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
481 if (lock == (void *)&lbolt) {
482 /* nothing */
483 } else if (ld->ld_shares++ == 0) {
484 ld->ld_locked = (uintptr_t)cvlock;
485 } else if (cvlock != (void *)ld->ld_locked) {
486 lockdebug_abort1(ld, s, __func__, "multiple locks used"
487 " with condition variable", true);
488 return;
490 } else if (shared) {
491 l->l_shlocks++;
492 ld->ld_shares++;
493 ld->ld_shwant--;
494 } else {
495 if ((ld->ld_flags & LD_LOCKED) != 0) {
496 lockdebug_abort1(ld, s, __func__, "already locked",
497 true);
498 return;
500 ld->ld_flags |= LD_LOCKED;
501 ld->ld_locked = where;
502 ld->ld_exwant--;
503 if ((ld->ld_flags & LD_SLEEPER) != 0) {
504 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
505 } else {
506 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
507 ld, ld_chain);
510 ld->ld_cpu = (uint16_t)cpu_index(curcpu());
511 ld->ld_lwp = l;
512 __cpu_simple_unlock(&ld->ld_spinlock);
513 splx(s);
517 * lockdebug_unlocked:
519 * Process a lock release operation.
521 void
522 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
524 struct lwp *l = curlwp;
525 lockdebug_t *ld;
526 int s;
528 if (panicstr != NULL || ld_panic)
529 return;
531 s = splhigh();
532 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
533 splx(s);
534 return;
536 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
537 if (lock == (void *)&lbolt) {
538 /* nothing */
539 } else {
540 ld->ld_shares--;
542 } else if (shared) {
543 if (l->l_shlocks == 0) {
544 lockdebug_abort1(ld, s, __func__,
545 "no shared locks held by LWP", true);
546 return;
548 if (ld->ld_shares == 0) {
549 lockdebug_abort1(ld, s, __func__,
550 "no shared holds on this lock", true);
551 return;
553 l->l_shlocks--;
554 ld->ld_shares--;
555 if (ld->ld_lwp == l)
556 ld->ld_lwp = NULL;
557 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
558 ld->ld_cpu = (uint16_t)-1;
559 } else {
560 if ((ld->ld_flags & LD_LOCKED) == 0) {
561 lockdebug_abort1(ld, s, __func__, "not locked", true);
562 return;
565 if ((ld->ld_flags & LD_SLEEPER) != 0) {
566 if (ld->ld_lwp != curlwp) {
567 lockdebug_abort1(ld, s, __func__,
568 "not held by current LWP", true);
569 return;
571 ld->ld_flags &= ~LD_LOCKED;
572 ld->ld_unlocked = where;
573 ld->ld_lwp = NULL;
574 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
575 } else {
576 if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
577 lockdebug_abort1(ld, s, __func__,
578 "not held by current CPU", true);
579 return;
581 ld->ld_flags &= ~LD_LOCKED;
582 ld->ld_unlocked = where;
583 ld->ld_lwp = NULL;
584 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
585 ld_chain);
588 __cpu_simple_unlock(&ld->ld_spinlock);
589 splx(s);
593 * lockdebug_wakeup:
595 * Process a wakeup on a condition variable.
597 void
598 lockdebug_wakeup(volatile void *lock, uintptr_t where)
600 lockdebug_t *ld;
601 int s;
603 if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
604 return;
606 s = splhigh();
607 /* Find the CV... */
608 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
609 splx(s);
610 return;
613 * If it has any waiters, ensure that they are using the
614 * same interlock.
616 if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
617 lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
618 "held during wakeup", true);
619 return;
621 __cpu_simple_unlock(&ld->ld_spinlock);
622 splx(s);
626 * lockdebug_barrier:
628 * Panic if we hold more than one specified spin lock, and optionally,
629 * if we hold sleep locks.
631 void
632 lockdebug_barrier(volatile void *spinlock, int slplocks)
634 struct lwp *l = curlwp;
635 lockdebug_t *ld;
636 int s;
638 if (panicstr != NULL || ld_panic)
639 return;
641 s = splhigh();
642 if ((l->l_pflag & LP_INTR) == 0) {
643 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
644 if (ld->ld_lock == spinlock) {
645 continue;
647 __cpu_simple_lock(&ld->ld_spinlock);
648 lockdebug_abort1(ld, s, __func__,
649 "spin lock held", true);
650 return;
653 if (slplocks) {
654 splx(s);
655 return;
657 if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
658 __cpu_simple_lock(&ld->ld_spinlock);
659 lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
660 return;
662 splx(s);
663 if (l->l_shlocks != 0) {
664 panic("lockdebug_barrier: holding %d shared locks",
665 l->l_shlocks);
670 * lockdebug_mem_check:
672 * Check for in-use locks within a memory region that is
673 * being freed.
675 void
676 lockdebug_mem_check(const char *func, void *base, size_t sz)
678 lockdebug_t *ld;
679 struct cpu_info *ci;
680 int s;
682 if (panicstr != NULL || ld_panic)
683 return;
685 s = splhigh();
686 ci = curcpu();
687 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
688 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
689 if (ld != NULL) {
690 const uintptr_t lock = (uintptr_t)ld->ld_lock;
692 if ((uintptr_t)base > lock)
693 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
694 __func__, ld, base, sz);
695 if (lock >= (uintptr_t)base + sz)
696 ld = NULL;
698 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
699 if (ld != NULL) {
700 __cpu_simple_lock(&ld->ld_spinlock);
701 lockdebug_abort1(ld, s, func,
702 "allocation contains active lock", !cold);
703 return;
705 splx(s);
709 * lockdebug_dump:
711 * Dump information about a lock on panic, or for DDB.
713 static void
714 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
716 int sleeper = (ld->ld_flags & LD_SLEEPER);
718 (*pr)(
719 "lock address : %#018lx type : %18s\n"
720 "initialized : %#018lx",
721 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
722 (long)ld->ld_initaddr);
724 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
725 (*pr)(" interlock: %#018lx\n", ld->ld_locked);
726 } else {
727 (*pr)("\n"
728 "shared holds : %18u exclusive: %18u\n"
729 "shares wanted: %18u exclusive: %18u\n"
730 "current cpu : %18u last held: %18u\n"
731 "current lwp : %#018lx last held: %#018lx\n"
732 "last locked : %#018lx unlocked : %#018lx\n",
733 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
734 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
735 (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
736 (long)curlwp, (long)ld->ld_lwp,
737 (long)ld->ld_locked, (long)ld->ld_unlocked);
740 if (ld->ld_lockops->lo_dump != NULL)
741 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
743 if (sleeper) {
744 (*pr)("\n");
745 turnstile_print(ld->ld_lock, pr);
750 * lockdebug_abort1:
752 * An error has been trapped - dump lock info and panic.
754 static void
755 lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
756 const char *msg, bool dopanic)
760 * Don't make the situation wose if the system is already going
761 * down in flames. Once a panic is triggered, lockdebug state
762 * becomes stale and cannot be trusted.
764 if (atomic_inc_uint_nv(&ld_panic) != 1) {
765 __cpu_simple_unlock(&ld->ld_spinlock);
766 splx(s);
767 return;
770 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
771 func, msg);
772 lockdebug_dump(ld, printf_nolog);
773 __cpu_simple_unlock(&ld->ld_spinlock);
774 splx(s);
775 printf_nolog("\n");
776 if (dopanic)
777 panic("LOCKDEBUG");
780 #endif /* LOCKDEBUG */
783 * lockdebug_lock_print:
785 * Handle the DDB 'show lock' command.
787 #ifdef DDB
788 void
789 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
791 #ifdef LOCKDEBUG
792 lockdebug_t *ld;
794 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
795 if (ld->ld_lock == NULL)
796 continue;
797 if (addr == NULL || ld->ld_lock == addr) {
798 lockdebug_dump(ld, pr);
799 if (addr != NULL)
800 return;
803 if (addr != NULL) {
804 (*pr)("Sorry, no record of a lock with address %p found.\n",
805 addr);
807 #else
808 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
809 #endif /* LOCKDEBUG */
811 #endif /* DDB */
814 * lockdebug_abort:
816 * An error has been trapped - dump lock info and call panic().
818 void
819 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
820 const char *msg)
822 #ifdef LOCKDEBUG
823 lockdebug_t *ld;
824 int s;
826 s = splhigh();
827 if ((ld = lockdebug_lookup(lock,
828 (uintptr_t) __builtin_return_address(0))) != NULL) {
829 lockdebug_abort1(ld, s, func, msg, true);
830 return;
832 splx(s);
833 #endif /* LOCKDEBUG */
836 * Complain first on the occurrance only. Otherwise proceeed to
837 * panic where we will `rendezvous' with other CPUs if the machine
838 * is going down in flames.
840 if (atomic_inc_uint_nv(&ld_panic) == 1) {
841 printf_nolog("%s error: %s: %s\n\n"
842 "lock address : %#018lx\n"
843 "current cpu : %18d\n"
844 "current lwp : %#018lx\n",
845 ops->lo_name, func, msg, (long)lock,
846 (int)cpu_index(curcpu()), (long)curlwp);
847 (*ops->lo_dump)(lock);
848 printf_nolog("\n");
851 panic("lock error");