1 /* $NetBSD: kern_timeout.c,v 1.43 2008/10/10 11:42:58 ad Exp $ */
4 * Copyright (c) 2003, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
34 * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
50 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
51 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
52 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
53 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.43 2008/10/10 11:42:58 ad Exp $");
65 * Timeouts are kept in a hierarchical timing wheel. The c_time is the
66 * value of c_cpu->cc_ticks when the timeout should be called. There are
67 * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and
68 * Hierarchical Timing Wheels: Efficient Data Structures for Implementing
69 * a Timer Facility" by George Varghese and Tony Lauck.
71 * Some of the "math" in here is a bit tricky. We have to beware of
74 * We use the fact that any element added to the queue must be added with
75 * a positive time. That means that any element `to' on the queue cannot
76 * be scheduled to timeout further in time than INT_MAX, but c->c_time can
77 * be positive or negative so comparing it with anything is dangerous.
78 * The only way we can use the c->c_time value in any predictable way is
79 * when we calculate how far in the future `to' will timeout - "c->c_time
80 * - c->c_cpu->cc_ticks". The result will always be positive for future
81 * timeouts and 0 or negative for due timeouts.
84 #define _CALLOUT_PRIVATE
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/callout.h>
90 #include <sys/mutex.h>
92 #include <sys/sleepq.h>
93 #include <sys/syncobj.h>
94 #include <sys/evcnt.h>
100 #include <machine/db_machdep.h>
101 #include <ddb/db_interface.h>
102 #include <ddb/db_access.h>
103 #include <ddb/db_sym.h>
104 #include <ddb/db_output.h>
108 #define WHEELSIZE 256
109 #define WHEELMASK 255
112 #define MASKWHEEL(wheel, time) (((time) >> ((wheel)*WHEELBITS)) & WHEELMASK)
114 #define BUCKET(cc, rel, abs) \
115 (((rel) <= (1 << (2*WHEELBITS))) \
116 ? ((rel) <= (1 << WHEELBITS)) \
117 ? &(cc)->cc_wheel[MASKWHEEL(0, (abs))] \
118 : &(cc)->cc_wheel[MASKWHEEL(1, (abs)) + WHEELSIZE] \
119 : ((rel) <= (1 << (3*WHEELBITS))) \
120 ? &(cc)->cc_wheel[MASKWHEEL(2, (abs)) + 2*WHEELSIZE] \
121 : &(cc)->cc_wheel[MASKWHEEL(3, (abs)) + 3*WHEELSIZE])
123 #define MOVEBUCKET(cc, wheel, time) \
124 CIRCQ_APPEND(&(cc)->cc_todo, \
125 &(cc)->cc_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE])
128 * Circular queue definitions.
131 #define CIRCQ_INIT(list) \
133 (list)->cq_next_l = (list); \
134 (list)->cq_prev_l = (list); \
135 } while (/*CONSTCOND*/0)
137 #define CIRCQ_INSERT(elem, list) \
139 (elem)->cq_prev_e = (list)->cq_prev_e; \
140 (elem)->cq_next_l = (list); \
141 (list)->cq_prev_l->cq_next_l = (elem); \
142 (list)->cq_prev_l = (elem); \
143 } while (/*CONSTCOND*/0)
145 #define CIRCQ_APPEND(fst, snd) \
147 if (!CIRCQ_EMPTY(snd)) { \
148 (fst)->cq_prev_l->cq_next_l = (snd)->cq_next_l; \
149 (snd)->cq_next_l->cq_prev_l = (fst)->cq_prev_l; \
150 (snd)->cq_prev_l->cq_next_l = (fst); \
151 (fst)->cq_prev_l = (snd)->cq_prev_l; \
154 } while (/*CONSTCOND*/0)
156 #define CIRCQ_REMOVE(elem) \
158 (elem)->cq_next_l->cq_prev_e = (elem)->cq_prev_e; \
159 (elem)->cq_prev_l->cq_next_e = (elem)->cq_next_e; \
160 } while (/*CONSTCOND*/0)
162 #define CIRCQ_FIRST(list) ((list)->cq_next_e)
163 #define CIRCQ_NEXT(elem) ((elem)->cq_next_e)
164 #define CIRCQ_LAST(elem,list) ((elem)->cq_next_l == (list))
165 #define CIRCQ_EMPTY(list) ((list)->cq_next_l == (list))
167 static void callout_softclock(void *);
175 callout_impl_t
*cc_active
;
176 callout_impl_t
*cc_cancel
;
177 struct evcnt cc_ev_late
;
178 struct evcnt cc_ev_block
;
179 struct callout_circq cc_todo
; /* Worklist */
180 struct callout_circq cc_wheel
[BUCKETS
]; /* Queues of timeouts */
185 static struct callout_cpu callout_cpu0
;
186 static void *callout_sih
;
188 static inline kmutex_t
*
189 callout_lock(callout_impl_t
*c
)
191 struct callout_cpu
*cc
;
197 mutex_spin_enter(lock
);
198 if (__predict_true(cc
== c
->c_cpu
))
200 mutex_spin_exit(lock
);
207 * Initialize the callout facility, called at system startup time.
208 * Do just enough to allow callouts to be safely registered.
211 callout_startup(void)
213 struct callout_cpu
*cc
;
216 KASSERT(curcpu()->ci_data
.cpu_callout
== NULL
);
219 cc
->cc_lock
= mutex_obj_alloc(MUTEX_DEFAULT
, IPL_SCHED
);
220 CIRCQ_INIT(&cc
->cc_todo
);
221 for (b
= 0; b
< BUCKETS
; b
++)
222 CIRCQ_INIT(&cc
->cc_wheel
[b
]);
223 curcpu()->ci_data
.cpu_callout
= cc
;
229 * Per-CPU initialization.
232 callout_init_cpu(struct cpu_info
*ci
)
234 struct callout_cpu
*cc
;
237 CTASSERT(sizeof(callout_impl_t
) <= sizeof(callout_t
));
239 if ((cc
= ci
->ci_data
.cpu_callout
) == NULL
) {
240 cc
= kmem_zalloc(sizeof(*cc
), KM_SLEEP
);
242 panic("callout_init_cpu (1)");
243 cc
->cc_lock
= mutex_obj_alloc(MUTEX_DEFAULT
, IPL_SCHED
);
244 CIRCQ_INIT(&cc
->cc_todo
);
245 for (b
= 0; b
< BUCKETS
; b
++)
246 CIRCQ_INIT(&cc
->cc_wheel
[b
]);
248 /* Boot CPU, one time only. */
249 callout_sih
= softint_establish(SOFTINT_CLOCK
| SOFTINT_MPSAFE
,
250 callout_softclock
, NULL
);
251 if (callout_sih
== NULL
)
252 panic("callout_init_cpu (2)");
255 sleepq_init(&cc
->cc_sleepq
);
257 snprintf(cc
->cc_name1
, sizeof(cc
->cc_name1
), "late/%u",
259 evcnt_attach_dynamic(&cc
->cc_ev_late
, EVCNT_TYPE_MISC
,
260 NULL
, "callout", cc
->cc_name1
);
262 snprintf(cc
->cc_name2
, sizeof(cc
->cc_name2
), "wait/%u",
264 evcnt_attach_dynamic(&cc
->cc_ev_block
, EVCNT_TYPE_MISC
,
265 NULL
, "callout", cc
->cc_name2
);
267 ci
->ci_data
.cpu_callout
= cc
;
273 * Initialize a callout structure. This must be quick, so we fill
274 * only the minimum number of fields.
277 callout_init(callout_t
*cs
, u_int flags
)
279 callout_impl_t
*c
= (callout_impl_t
*)cs
;
280 struct callout_cpu
*cc
;
282 KASSERT((flags
& ~CALLOUT_FLAGMASK
) == 0);
284 cc
= curcpu()->ci_data
.cpu_callout
;
286 c
->c_magic
= CALLOUT_MAGIC
;
287 if (__predict_true((flags
& CALLOUT_MPSAFE
) != 0 && cc
!= NULL
)) {
292 c
->c_flags
= flags
| CALLOUT_BOUND
;
293 c
->c_cpu
= &callout_cpu0
;
299 * Destroy a callout structure. The callout must be stopped.
302 callout_destroy(callout_t
*cs
)
304 callout_impl_t
*c
= (callout_impl_t
*)cs
;
307 * It's not necessary to lock in order to see the correct value
308 * of c->c_flags. If the callout could potentially have been
309 * running, the current thread should have stopped it.
311 KASSERT((c
->c_flags
& CALLOUT_PENDING
) == 0);
312 KASSERT(c
->c_cpu
->cc_lwp
== curlwp
|| c
->c_cpu
->cc_active
!= c
);
313 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
318 * callout_schedule_locked:
320 * Schedule a callout to run. The function and argument must
321 * already be set in the callout structure. Must be called with
325 callout_schedule_locked(callout_impl_t
*c
, kmutex_t
*lock
, int to_ticks
)
327 struct callout_cpu
*cc
, *occ
;
330 KASSERT(to_ticks
>= 0);
331 KASSERT(c
->c_func
!= NULL
);
333 /* Initialize the time here, it won't change. */
335 c
->c_flags
&= ~(CALLOUT_FIRED
| CALLOUT_INVOKING
);
338 * If this timeout is already scheduled and now is moved
339 * earlier, reschedule it now. Otherwise leave it in place
340 * and let it be rescheduled later.
342 if ((c
->c_flags
& CALLOUT_PENDING
) != 0) {
343 /* Leave on existing CPU. */
344 old_time
= c
->c_time
;
345 c
->c_time
= to_ticks
+ occ
->cc_ticks
;
346 if (c
->c_time
- old_time
< 0) {
347 CIRCQ_REMOVE(&c
->c_list
);
348 CIRCQ_INSERT(&c
->c_list
, &occ
->cc_todo
);
350 mutex_spin_exit(lock
);
354 cc
= curcpu()->ci_data
.cpu_callout
;
355 if ((c
->c_flags
& CALLOUT_BOUND
) != 0 || cc
== occ
||
356 !mutex_tryenter(cc
->cc_lock
)) {
357 /* Leave on existing CPU. */
358 c
->c_time
= to_ticks
+ occ
->cc_ticks
;
359 c
->c_flags
|= CALLOUT_PENDING
;
360 CIRCQ_INSERT(&c
->c_list
, &occ
->cc_todo
);
362 /* Move to this CPU. */
364 c
->c_time
= to_ticks
+ cc
->cc_ticks
;
365 c
->c_flags
|= CALLOUT_PENDING
;
366 CIRCQ_INSERT(&c
->c_list
, &cc
->cc_todo
);
367 mutex_spin_exit(cc
->cc_lock
);
369 mutex_spin_exit(lock
);
375 * Reset a callout structure with a new function and argument, and
376 * schedule it to run.
379 callout_reset(callout_t
*cs
, int to_ticks
, void (*func
)(void *), void *arg
)
381 callout_impl_t
*c
= (callout_impl_t
*)cs
;
384 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
385 KASSERT(func
!= NULL
);
387 lock
= callout_lock(c
);
390 callout_schedule_locked(c
, lock
, to_ticks
);
396 * Schedule a callout to run. The function and argument must
397 * already be set in the callout structure.
400 callout_schedule(callout_t
*cs
, int to_ticks
)
402 callout_impl_t
*c
= (callout_impl_t
*)cs
;
405 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
407 lock
= callout_lock(c
);
408 callout_schedule_locked(c
, lock
, to_ticks
);
414 * Try to cancel a pending callout. It may be too late: the callout
415 * could be running on another CPU. If called from interrupt context,
416 * the callout could already be in progress at a lower priority.
419 callout_stop(callout_t
*cs
)
421 callout_impl_t
*c
= (callout_impl_t
*)cs
;
422 struct callout_cpu
*cc
;
426 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
428 lock
= callout_lock(c
);
430 if ((c
->c_flags
& CALLOUT_PENDING
) != 0)
431 CIRCQ_REMOVE(&c
->c_list
);
432 expired
= ((c
->c_flags
& CALLOUT_FIRED
) != 0);
433 c
->c_flags
&= ~(CALLOUT_PENDING
|CALLOUT_FIRED
);
436 if (cc
->cc_active
== c
) {
438 * This is for non-MPSAFE callouts only. To synchronize
439 * effectively we must be called with kernel_lock held.
440 * It's also taken in callout_softclock.
445 mutex_spin_exit(lock
);
453 * Cancel a pending callout. If in-flight, block until it completes.
454 * May not be called from a hard interrupt handler. If the callout
455 * can take locks, the caller of callout_halt() must not hold any of
456 * those locks, otherwise the two could deadlock. If 'interlock' is
457 * non-NULL and we must wait for the callout to complete, it will be
458 * released and re-acquired before returning.
461 callout_halt(callout_t
*cs
, void *interlock
)
463 callout_impl_t
*c
= (callout_impl_t
*)cs
;
464 struct callout_cpu
*cc
;
466 kmutex_t
*lock
, *relock
;
469 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
470 KASSERT(!cpu_intr_p());
472 lock
= callout_lock(c
);
475 expired
= ((c
->c_flags
& CALLOUT_FIRED
) != 0);
476 if ((c
->c_flags
& CALLOUT_PENDING
) != 0)
477 CIRCQ_REMOVE(&c
->c_list
);
478 c
->c_flags
&= ~(CALLOUT_PENDING
|CALLOUT_FIRED
);
483 if (__predict_true(cc
->cc_active
!= c
|| cc
->cc_lwp
== l
))
485 if (interlock
!= NULL
) {
487 * Avoid potential scheduler lock order problems by
488 * dropping the interlock without the callout lock
491 mutex_spin_exit(lock
);
492 mutex_exit(interlock
);
496 /* XXX Better to do priority inheritance. */
497 KASSERT(l
->l_wchan
== NULL
);
499 cc
->cc_ev_block
.ev_count
++;
500 l
->l_kpriority
= true;
501 sleepq_enter(&cc
->cc_sleepq
, l
, cc
->cc_lock
);
502 sleepq_enqueue(&cc
->cc_sleepq
, cc
, "callout",
504 sleepq_block(0, false);
506 lock
= callout_lock(c
);
509 mutex_spin_exit(lock
);
510 if (__predict_false(relock
!= NULL
))
520 * Bind a callout so that it will only execute on one CPU.
521 * The callout must be stopped, and must be MPSAFE.
523 * XXX Disabled for now until it is decided how to handle
524 * offlined CPUs. We may want weak+strong binding.
527 callout_bind(callout_t
*cs
, struct cpu_info
*ci
)
529 callout_impl_t
*c
= (callout_impl_t
*)cs
;
530 struct callout_cpu
*cc
;
533 KASSERT((c
->c_flags
& CALLOUT_PENDING
) == 0);
534 KASSERT(c
->c_cpu
->cc_active
!= c
);
535 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
536 KASSERT((c
->c_flags
& CALLOUT_MPSAFE
) != 0);
538 lock
= callout_lock(c
);
539 cc
= ci
->ci_data
.cpu_callout
;
540 c
->c_flags
|= CALLOUT_BOUND
;
541 if (c
->c_cpu
!= cc
) {
543 * Assigning c_cpu effectively unlocks the callout
544 * structure, as we don't hold the new CPU's lock.
545 * Issue memory barrier to prevent accesses being
551 mutex_spin_exit(lock
);
556 callout_setfunc(callout_t
*cs
, void (*func
)(void *), void *arg
)
558 callout_impl_t
*c
= (callout_impl_t
*)cs
;
561 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
562 KASSERT(func
!= NULL
);
564 lock
= callout_lock(c
);
567 mutex_spin_exit(lock
);
571 callout_expired(callout_t
*cs
)
573 callout_impl_t
*c
= (callout_impl_t
*)cs
;
577 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
579 lock
= callout_lock(c
);
580 rv
= ((c
->c_flags
& CALLOUT_FIRED
) != 0);
581 mutex_spin_exit(lock
);
587 callout_active(callout_t
*cs
)
589 callout_impl_t
*c
= (callout_impl_t
*)cs
;
593 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
595 lock
= callout_lock(c
);
596 rv
= ((c
->c_flags
& (CALLOUT_PENDING
|CALLOUT_FIRED
)) != 0);
597 mutex_spin_exit(lock
);
603 callout_pending(callout_t
*cs
)
605 callout_impl_t
*c
= (callout_impl_t
*)cs
;
609 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
611 lock
= callout_lock(c
);
612 rv
= ((c
->c_flags
& CALLOUT_PENDING
) != 0);
613 mutex_spin_exit(lock
);
619 callout_invoking(callout_t
*cs
)
621 callout_impl_t
*c
= (callout_impl_t
*)cs
;
625 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
627 lock
= callout_lock(c
);
628 rv
= ((c
->c_flags
& CALLOUT_INVOKING
) != 0);
629 mutex_spin_exit(lock
);
635 callout_ack(callout_t
*cs
)
637 callout_impl_t
*c
= (callout_impl_t
*)cs
;
640 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
642 lock
= callout_lock(c
);
643 c
->c_flags
&= ~CALLOUT_INVOKING
;
644 mutex_spin_exit(lock
);
650 * Called from hardclock() once every tick. We schedule a soft
651 * interrupt if there is work to be done.
654 callout_hardclock(void)
656 struct callout_cpu
*cc
;
657 int needsoftclock
, ticks
;
659 cc
= curcpu()->ci_data
.cpu_callout
;
660 mutex_spin_enter(cc
->cc_lock
);
662 ticks
= ++cc
->cc_ticks
;
664 MOVEBUCKET(cc
, 0, ticks
);
665 if (MASKWHEEL(0, ticks
) == 0) {
666 MOVEBUCKET(cc
, 1, ticks
);
667 if (MASKWHEEL(1, ticks
) == 0) {
668 MOVEBUCKET(cc
, 2, ticks
);
669 if (MASKWHEEL(2, ticks
) == 0)
670 MOVEBUCKET(cc
, 3, ticks
);
674 needsoftclock
= !CIRCQ_EMPTY(&cc
->cc_todo
);
675 mutex_spin_exit(cc
->cc_lock
);
678 softint_schedule(callout_sih
);
684 * Soft interrupt handler, scheduled above if there is work to
685 * be done. Callouts are made in soft interrupt context.
688 callout_softclock(void *v
)
691 struct callout_cpu
*cc
;
692 void (*func
)(void *);
694 int mpsafe
, count
, ticks
, delta
;
698 KASSERT(l
->l_cpu
== curcpu());
699 cc
= l
->l_cpu
->ci_data
.cpu_callout
;
701 mutex_spin_enter(cc
->cc_lock
);
703 while (!CIRCQ_EMPTY(&cc
->cc_todo
)) {
704 c
= CIRCQ_FIRST(&cc
->cc_todo
);
705 KASSERT(c
->c_magic
== CALLOUT_MAGIC
);
706 KASSERT(c
->c_func
!= NULL
);
707 KASSERT(c
->c_cpu
== cc
);
708 KASSERT((c
->c_flags
& CALLOUT_PENDING
) != 0);
709 KASSERT((c
->c_flags
& CALLOUT_FIRED
) == 0);
710 CIRCQ_REMOVE(&c
->c_list
);
712 /* If due run it, otherwise insert it into the right bucket. */
713 ticks
= cc
->cc_ticks
;
714 delta
= c
->c_time
- ticks
;
716 CIRCQ_INSERT(&c
->c_list
, BUCKET(cc
, delta
, c
->c_time
));
720 cc
->cc_ev_late
.ev_count
++;
722 c
->c_flags
= (c
->c_flags
& ~CALLOUT_PENDING
) |
723 (CALLOUT_FIRED
| CALLOUT_INVOKING
);
724 mpsafe
= (c
->c_flags
& CALLOUT_MPSAFE
);
729 mutex_spin_exit(cc
->cc_lock
);
730 KASSERT(func
!= NULL
);
731 if (__predict_false(!mpsafe
)) {
732 KERNEL_LOCK(1, NULL
);
734 KERNEL_UNLOCK_ONE(NULL
);
737 mutex_spin_enter(cc
->cc_lock
);
740 * We can't touch 'c' here because it might be
741 * freed already. If LWPs waiting for callout
742 * to complete, awaken them.
744 cc
->cc_active
= NULL
;
745 if ((count
= cc
->cc_nwait
) != 0) {
747 /* sleepq_wake() drops the lock. */
748 sleepq_wake(&cc
->cc_sleepq
, cc
, count
, cc
->cc_lock
);
749 mutex_spin_enter(cc
->cc_lock
);
753 mutex_spin_exit(cc
->cc_lock
);
758 db_show_callout_bucket(struct callout_cpu
*cc
, struct callout_circq
*bucket
)
763 static char question
[] = "?";
766 if (CIRCQ_EMPTY(bucket
))
769 for (c
= CIRCQ_FIRST(bucket
); /*nothing*/; c
= CIRCQ_NEXT(&c
->c_list
)) {
770 db_find_sym_and_offset((db_addr_t
)(intptr_t)c
->c_func
, &name
,
772 name
= name
? name
: question
;
773 b
= (bucket
- cc
->cc_wheel
);
776 db_printf("%9d %2d/%-4d %16lx %s\n",
777 c
->c_time
- cc
->cc_ticks
, b
/ WHEELSIZE
, b
,
778 (u_long
)c
->c_arg
, name
);
779 if (CIRCQ_LAST(&c
->c_list
, bucket
))
785 db_show_callout(db_expr_t addr
, bool haddr
, db_expr_t count
, const char *modif
)
787 CPU_INFO_ITERATOR cii
;
788 struct callout_cpu
*cc
;
792 db_printf("hardclock_ticks now: %d\n", hardclock_ticks
);
793 db_printf(" ticks wheel arg func\n");
796 * Don't lock the callwheel; all the other CPUs are paused
797 * anyhow, and we might be called in a circumstance where
798 * some other CPU was paused while holding the lock.
800 for (CPU_INFO_FOREACH(cii
, ci
)) {
801 cc
= ci
->ci_data
.cpu_callout
;
802 db_show_callout_bucket(cc
, &cc
->cc_todo
);
804 for (b
= 0; b
< BUCKETS
; b
++) {
805 for (CPU_INFO_FOREACH(cii
, ci
)) {
806 cc
= ci
->ci_data
.cpu_callout
;
807 db_show_callout_bucket(cc
, &cc
->cc_wheel
[b
]);