2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/cpuset.h>
36 #include <sys/rtprio.h>
37 #include <sys/systm.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
42 #include <sys/limits.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
47 #include <sys/random.h>
48 #include <sys/resourcevar.h>
49 #include <sys/sched.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53 #include <sys/unistd.h>
54 #include <sys/vmmeter.h>
55 #include <machine/atomic.h>
56 #include <machine/cpu.h>
57 #include <machine/md_var.h>
58 #include <machine/stdarg.h>
61 #include <ddb/db_sym.h>
65 * Describe an interrupt thread. There is one of these per interrupt event.
68 struct intr_event
*it_event
;
69 struct thread
*it_thread
; /* Kernel thread. */
70 int it_flags
; /* (j) IT_* flags. */
71 int it_need
; /* Needs service. */
74 /* Interrupt thread flags kept in it_flags */
75 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */
82 struct intr_event
*clk_intr_event
;
83 struct intr_event
*tty_intr_event
;
85 struct proc
*intrproc
;
87 static MALLOC_DEFINE(M_ITHREAD
, "ithread", "Interrupt Threads");
89 static int intr_storm_threshold
= 1000;
90 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold
);
91 SYSCTL_INT(_hw
, OID_AUTO
, intr_storm_threshold
, CTLFLAG_RW
,
92 &intr_storm_threshold
, 0,
93 "Number of consecutive interrupts before storm protection is enabled");
94 static TAILQ_HEAD(, intr_event
) event_list
=
95 TAILQ_HEAD_INITIALIZER(event_list
);
96 static struct mtx event_lock
;
97 MTX_SYSINIT(intr_event_list
, &event_lock
, "intr event list", MTX_DEF
);
99 static void intr_event_update(struct intr_event
*ie
);
101 static int intr_event_schedule_thread(struct intr_event
*ie
,
102 struct intr_thread
*ithd
);
103 static int intr_filter_loop(struct intr_event
*ie
,
104 struct trapframe
*frame
, struct intr_thread
**ithd
);
105 static struct intr_thread
*ithread_create(const char *name
,
106 struct intr_handler
*ih
);
108 static int intr_event_schedule_thread(struct intr_event
*ie
);
109 static struct intr_thread
*ithread_create(const char *name
);
111 static void ithread_destroy(struct intr_thread
*ithread
);
112 static void ithread_execute_handlers(struct proc
*p
,
113 struct intr_event
*ie
);
115 static void priv_ithread_execute_handler(struct proc
*p
,
116 struct intr_handler
*ih
);
118 static void ithread_loop(void *);
119 static void ithread_update(struct intr_thread
*ithd
);
120 static void start_softintr(void *);
122 /* Map an interrupt type to an ithread priority. */
124 intr_priority(enum intr_type flags
)
128 flags
&= (INTR_TYPE_TTY
| INTR_TYPE_BIO
| INTR_TYPE_NET
|
129 INTR_TYPE_CAM
| INTR_TYPE_MISC
| INTR_TYPE_CLK
| INTR_TYPE_AV
);
136 * XXX We need to refine this. BSD/OS distinguishes
137 * between tape and disk priorities.
145 pri
= PI_DISK
; /* XXX or PI_CAM? */
147 case INTR_TYPE_AV
: /* Audio/video */
154 pri
= PI_DULL
; /* don't care */
157 /* We didn't specify an interrupt level. */
158 panic("intr_priority: no interrupt type in flags");
165 * Update an ithread based on the associated intr_event.
168 ithread_update(struct intr_thread
*ithd
)
170 struct intr_event
*ie
;
175 td
= ithd
->it_thread
;
177 /* Determine the overall priority of this event. */
178 if (TAILQ_EMPTY(&ie
->ie_handlers
))
181 pri
= TAILQ_FIRST(&ie
->ie_handlers
)->ih_pri
;
183 /* Update name and priority. */
184 strlcpy(td
->td_name
, ie
->ie_fullname
, sizeof(td
->td_name
));
191 * Regenerate the full name of an interrupt event and update its priority.
194 intr_event_update(struct intr_event
*ie
)
196 struct intr_handler
*ih
;
200 /* Start off with no entropy and just the name of the event. */
201 mtx_assert(&ie
->ie_lock
, MA_OWNED
);
202 strlcpy(ie
->ie_fullname
, ie
->ie_name
, sizeof(ie
->ie_fullname
));
203 ie
->ie_flags
&= ~IE_ENTROPY
;
207 /* Run through all the handlers updating values. */
208 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
) {
209 if (strlen(ie
->ie_fullname
) + strlen(ih
->ih_name
) + 1 <
210 sizeof(ie
->ie_fullname
)) {
211 strcat(ie
->ie_fullname
, " ");
212 strcat(ie
->ie_fullname
, ih
->ih_name
);
216 if (ih
->ih_flags
& IH_ENTROPY
)
217 ie
->ie_flags
|= IE_ENTROPY
;
221 * If the handler names were too long, add +'s to indicate missing
222 * names. If we run out of room and still have +'s to add, change
223 * the last character from a + to a *.
225 last
= &ie
->ie_fullname
[sizeof(ie
->ie_fullname
) - 2];
226 while (missed
-- > 0) {
227 if (strlen(ie
->ie_fullname
) + 1 == sizeof(ie
->ie_fullname
)) {
234 strcat(ie
->ie_fullname
, " +");
237 strcat(ie
->ie_fullname
, "+");
241 * If this event has an ithread, update it's priority and
244 if (ie
->ie_thread
!= NULL
)
245 ithread_update(ie
->ie_thread
);
246 CTR2(KTR_INTR
, "%s: updated %s", __func__
, ie
->ie_fullname
);
250 intr_event_create(struct intr_event
**event
, void *source
,int flags
, int irq
,
251 void (*pre_ithread
)(void *), void (*post_ithread
)(void *),
252 void (*post_filter
)(void *), int (*assign_cpu
)(void *, u_char
),
253 const char *fmt
, ...)
255 struct intr_event
*ie
;
258 /* The only valid flag during creation is IE_SOFT. */
259 if ((flags
& ~IE_SOFT
) != 0)
261 ie
= malloc(sizeof(struct intr_event
), M_ITHREAD
, M_WAITOK
| M_ZERO
);
262 ie
->ie_source
= source
;
263 ie
->ie_pre_ithread
= pre_ithread
;
264 ie
->ie_post_ithread
= post_ithread
;
265 ie
->ie_post_filter
= post_filter
;
266 ie
->ie_assign_cpu
= assign_cpu
;
267 ie
->ie_flags
= flags
;
270 TAILQ_INIT(&ie
->ie_handlers
);
271 mtx_init(&ie
->ie_lock
, "intr event", NULL
, MTX_DEF
);
274 vsnprintf(ie
->ie_name
, sizeof(ie
->ie_name
), fmt
, ap
);
276 strlcpy(ie
->ie_fullname
, ie
->ie_name
, sizeof(ie
->ie_fullname
));
277 mtx_lock(&event_lock
);
278 TAILQ_INSERT_TAIL(&event_list
, ie
, ie_list
);
279 mtx_unlock(&event_lock
);
282 CTR2(KTR_INTR
, "%s: created %s", __func__
, ie
->ie_name
);
287 * Bind an interrupt event to the specified CPU. Note that not all
288 * platforms support binding an interrupt to a CPU. For those
289 * platforms this request will fail. For supported platforms, any
290 * associated ithreads as well as the primary interrupt context will
291 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds
292 * the interrupt event.
295 intr_event_bind(struct intr_event
*ie
, u_char cpu
)
301 /* Need a CPU to bind to. */
302 if (cpu
!= NOCPU
&& CPU_ABSENT(cpu
))
305 if (ie
->ie_assign_cpu
== NULL
)
308 * If we have any ithreads try to set their mask first since this
311 mtx_lock(&ie
->ie_lock
);
312 if (ie
->ie_thread
!= NULL
) {
315 CPU_COPY(cpuset_root
, &mask
);
318 id
= ie
->ie_thread
->it_thread
->td_tid
;
319 mtx_unlock(&ie
->ie_lock
);
320 error
= cpuset_setthread(id
, &mask
);
324 mtx_unlock(&ie
->ie_lock
);
325 error
= ie
->ie_assign_cpu(ie
->ie_source
, cpu
);
328 mtx_lock(&ie
->ie_lock
);
330 mtx_unlock(&ie
->ie_lock
);
335 static struct intr_event
*
338 struct intr_event
*ie
;
340 mtx_lock(&event_lock
);
341 TAILQ_FOREACH(ie
, &event_list
, ie_list
)
342 if (ie
->ie_irq
== irq
&&
343 (ie
->ie_flags
& IE_SOFT
) == 0 &&
344 TAILQ_FIRST(&ie
->ie_handlers
) != NULL
)
346 mtx_unlock(&event_lock
);
351 intr_setaffinity(int irq
, void *m
)
353 struct intr_event
*ie
;
361 * If we're setting all cpus we can unbind. Otherwise make sure
362 * only one cpu is in the set.
364 if (CPU_CMP(cpuset_root
, mask
)) {
365 for (n
= 0; n
< CPU_SETSIZE
; n
++) {
366 if (!CPU_ISSET(n
, mask
))
373 ie
= intr_lookup(irq
);
376 intr_event_bind(ie
, cpu
);
381 intr_getaffinity(int irq
, void *m
)
383 struct intr_event
*ie
;
387 ie
= intr_lookup(irq
);
391 mtx_lock(&ie
->ie_lock
);
392 if (ie
->ie_cpu
== NOCPU
)
393 CPU_COPY(cpuset_root
, mask
);
395 CPU_SET(ie
->ie_cpu
, mask
);
396 mtx_unlock(&ie
->ie_lock
);
401 intr_event_destroy(struct intr_event
*ie
)
404 mtx_lock(&event_lock
);
405 mtx_lock(&ie
->ie_lock
);
406 if (!TAILQ_EMPTY(&ie
->ie_handlers
)) {
407 mtx_unlock(&ie
->ie_lock
);
408 mtx_unlock(&event_lock
);
411 TAILQ_REMOVE(&event_list
, ie
, ie_list
);
413 if (ie
->ie_thread
!= NULL
) {
414 ithread_destroy(ie
->ie_thread
);
415 ie
->ie_thread
= NULL
;
418 mtx_unlock(&ie
->ie_lock
);
419 mtx_unlock(&event_lock
);
420 mtx_destroy(&ie
->ie_lock
);
426 static struct intr_thread
*
427 ithread_create(const char *name
)
429 struct intr_thread
*ithd
;
433 ithd
= malloc(sizeof(struct intr_thread
), M_ITHREAD
, M_WAITOK
| M_ZERO
);
435 error
= kproc_kthread_add(ithread_loop
, ithd
, &intrproc
,
436 &td
, RFSTOPPED
| RFHIGHPID
,
437 0, "intr", "%s", name
);
439 panic("kproc_create() failed with %d", error
);
441 sched_class(td
, PRI_ITHD
);
444 td
->td_pflags
|= TDP_ITHREAD
;
445 ithd
->it_thread
= td
;
446 CTR2(KTR_INTR
, "%s: created %s", __func__
, name
);
450 static struct intr_thread
*
451 ithread_create(const char *name
, struct intr_handler
*ih
)
453 struct intr_thread
*ithd
;
457 ithd
= malloc(sizeof(struct intr_thread
), M_ITHREAD
, M_WAITOK
| M_ZERO
);
459 error
= kproc_kthread_add(ithread_loop
, ih
, &intrproc
,
460 &td
, RFSTOPPED
| RFHIGHPID
,
461 0, "intr", "%s", name
);
463 panic("kproc_create() failed with %d", error
);
465 sched_class(td
, PRI_ITHD
);
468 td
->td_pflags
|= TDP_ITHREAD
;
469 ithd
->it_thread
= td
;
470 CTR2(KTR_INTR
, "%s: created %s", __func__
, name
);
476 ithread_destroy(struct intr_thread
*ithread
)
480 CTR2(KTR_INTR
, "%s: killing %s", __func__
, ithread
->it_event
->ie_name
);
481 td
= ithread
->it_thread
;
483 ithread
->it_flags
|= IT_DEAD
;
484 if (TD_AWAITING_INTR(td
)) {
486 sched_add(td
, SRQ_INTR
);
493 intr_event_add_handler(struct intr_event
*ie
, const char *name
,
494 driver_filter_t filter
, driver_intr_t handler
, void *arg
, u_char pri
,
495 enum intr_type flags
, void **cookiep
)
497 struct intr_handler
*ih
, *temp_ih
;
498 struct intr_thread
*it
;
500 if (ie
== NULL
|| name
== NULL
|| (handler
== NULL
&& filter
== NULL
))
503 /* Allocate and populate an interrupt handler structure. */
504 ih
= malloc(sizeof(struct intr_handler
), M_ITHREAD
, M_WAITOK
| M_ZERO
);
505 ih
->ih_filter
= filter
;
506 ih
->ih_handler
= handler
;
507 ih
->ih_argument
= arg
;
511 if (flags
& INTR_EXCL
)
512 ih
->ih_flags
= IH_EXCLUSIVE
;
513 if (flags
& INTR_MPSAFE
)
514 ih
->ih_flags
|= IH_MPSAFE
;
515 if (flags
& INTR_ENTROPY
)
516 ih
->ih_flags
|= IH_ENTROPY
;
518 /* We can only have one exclusive handler in a event. */
519 mtx_lock(&ie
->ie_lock
);
520 if (!TAILQ_EMPTY(&ie
->ie_handlers
)) {
521 if ((flags
& INTR_EXCL
) ||
522 (TAILQ_FIRST(&ie
->ie_handlers
)->ih_flags
& IH_EXCLUSIVE
)) {
523 mtx_unlock(&ie
->ie_lock
);
529 /* Add the new handler to the event in priority order. */
530 TAILQ_FOREACH(temp_ih
, &ie
->ie_handlers
, ih_next
) {
531 if (temp_ih
->ih_pri
> ih
->ih_pri
)
535 TAILQ_INSERT_TAIL(&ie
->ie_handlers
, ih
, ih_next
);
537 TAILQ_INSERT_BEFORE(temp_ih
, ih
, ih_next
);
538 intr_event_update(ie
);
540 /* Create a thread if we need one. */
541 while (ie
->ie_thread
== NULL
&& handler
!= NULL
) {
542 if (ie
->ie_flags
& IE_ADDING_THREAD
)
543 msleep(ie
, &ie
->ie_lock
, 0, "ithread", 0);
545 ie
->ie_flags
|= IE_ADDING_THREAD
;
546 mtx_unlock(&ie
->ie_lock
);
547 it
= ithread_create("intr: newborn");
548 mtx_lock(&ie
->ie_lock
);
549 ie
->ie_flags
&= ~IE_ADDING_THREAD
;
556 CTR3(KTR_INTR
, "%s: added %s to %s", __func__
, ih
->ih_name
,
558 mtx_unlock(&ie
->ie_lock
);
566 intr_event_add_handler(struct intr_event
*ie
, const char *name
,
567 driver_filter_t filter
, driver_intr_t handler
, void *arg
, u_char pri
,
568 enum intr_type flags
, void **cookiep
)
570 struct intr_handler
*ih
, *temp_ih
;
571 struct intr_thread
*it
;
573 if (ie
== NULL
|| name
== NULL
|| (handler
== NULL
&& filter
== NULL
))
576 /* Allocate and populate an interrupt handler structure. */
577 ih
= malloc(sizeof(struct intr_handler
), M_ITHREAD
, M_WAITOK
| M_ZERO
);
578 ih
->ih_filter
= filter
;
579 ih
->ih_handler
= handler
;
580 ih
->ih_argument
= arg
;
584 if (flags
& INTR_EXCL
)
585 ih
->ih_flags
= IH_EXCLUSIVE
;
586 if (flags
& INTR_MPSAFE
)
587 ih
->ih_flags
|= IH_MPSAFE
;
588 if (flags
& INTR_ENTROPY
)
589 ih
->ih_flags
|= IH_ENTROPY
;
591 /* We can only have one exclusive handler in a event. */
592 mtx_lock(&ie
->ie_lock
);
593 if (!TAILQ_EMPTY(&ie
->ie_handlers
)) {
594 if ((flags
& INTR_EXCL
) ||
595 (TAILQ_FIRST(&ie
->ie_handlers
)->ih_flags
& IH_EXCLUSIVE
)) {
596 mtx_unlock(&ie
->ie_lock
);
602 /* Add the new handler to the event in priority order. */
603 TAILQ_FOREACH(temp_ih
, &ie
->ie_handlers
, ih_next
) {
604 if (temp_ih
->ih_pri
> ih
->ih_pri
)
608 TAILQ_INSERT_TAIL(&ie
->ie_handlers
, ih
, ih_next
);
610 TAILQ_INSERT_BEFORE(temp_ih
, ih
, ih_next
);
611 intr_event_update(ie
);
613 /* For filtered handlers, create a private ithread to run on. */
614 if (filter
!= NULL
&& handler
!= NULL
) {
615 mtx_unlock(&ie
->ie_lock
);
616 it
= ithread_create("intr: newborn", ih
);
617 mtx_lock(&ie
->ie_lock
);
620 ithread_update(it
); // XXX - do we really need this?!?!?
621 } else { /* Create the global per-event thread if we need one. */
622 while (ie
->ie_thread
== NULL
&& handler
!= NULL
) {
623 if (ie
->ie_flags
& IE_ADDING_THREAD
)
624 msleep(ie
, &ie
->ie_lock
, 0, "ithread", 0);
626 ie
->ie_flags
|= IE_ADDING_THREAD
;
627 mtx_unlock(&ie
->ie_lock
);
628 it
= ithread_create("intr: newborn", ih
);
629 mtx_lock(&ie
->ie_lock
);
630 ie
->ie_flags
&= ~IE_ADDING_THREAD
;
638 CTR3(KTR_INTR
, "%s: added %s to %s", __func__
, ih
->ih_name
,
640 mtx_unlock(&ie
->ie_lock
);
649 * Return the ie_source field from the intr_event an intr_handler is
653 intr_handler_source(void *cookie
)
655 struct intr_handler
*ih
;
656 struct intr_event
*ie
;
658 ih
= (struct intr_handler
*)cookie
;
663 ("interrupt handler \"%s\" has a NULL interrupt event",
665 return (ie
->ie_source
);
670 intr_event_remove_handler(void *cookie
)
672 struct intr_handler
*handler
= (struct intr_handler
*)cookie
;
673 struct intr_event
*ie
;
675 struct intr_handler
*ih
;
683 ie
= handler
->ih_event
;
685 ("interrupt handler \"%s\" has a NULL interrupt event",
687 mtx_lock(&ie
->ie_lock
);
688 CTR3(KTR_INTR
, "%s: removing %s from %s", __func__
, handler
->ih_name
,
691 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
)
694 mtx_unlock(&ie
->ie_lock
);
695 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
696 ih
->ih_name
, ie
->ie_name
);
700 * If there is no ithread, then just remove the handler and return.
701 * XXX: Note that an INTR_FAST handler might be running on another
704 if (ie
->ie_thread
== NULL
) {
705 TAILQ_REMOVE(&ie
->ie_handlers
, handler
, ih_next
);
706 mtx_unlock(&ie
->ie_lock
);
707 free(handler
, M_ITHREAD
);
712 * If the interrupt thread is already running, then just mark this
713 * handler as being dead and let the ithread do the actual removal.
715 * During a cold boot while cold is set, msleep() does not sleep,
716 * so we have to remove the handler here rather than letting the
719 thread_lock(ie
->ie_thread
->it_thread
);
720 if (!TD_AWAITING_INTR(ie
->ie_thread
->it_thread
) && !cold
) {
721 handler
->ih_flags
|= IH_DEAD
;
724 * Ensure that the thread will process the handler list
725 * again and remove this handler if it has already passed
728 ie
->ie_thread
->it_need
= 1;
730 TAILQ_REMOVE(&ie
->ie_handlers
, handler
, ih_next
);
731 thread_unlock(ie
->ie_thread
->it_thread
);
732 while (handler
->ih_flags
& IH_DEAD
)
733 msleep(handler
, &ie
->ie_lock
, 0, "iev_rmh", 0);
734 intr_event_update(ie
);
737 * XXX: This could be bad in the case of ppbus(8). Also, I think
738 * this could lead to races of stale data when servicing an
742 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
) {
743 if (!(ih
->ih_flags
& IH_FAST
)) {
749 ithread_destroy(ie
->ie_thread
);
750 ie
->ie_thread
= NULL
;
753 mtx_unlock(&ie
->ie_lock
);
754 free(handler
, M_ITHREAD
);
759 intr_event_schedule_thread(struct intr_event
*ie
)
761 struct intr_entropy entropy
;
762 struct intr_thread
*it
;
768 * If no ithread or no handlers, then we have a stray interrupt.
770 if (ie
== NULL
|| TAILQ_EMPTY(&ie
->ie_handlers
) ||
771 ie
->ie_thread
== NULL
)
780 * If any of the handlers for this ithread claim to be good
781 * sources of entropy, then gather some.
783 if (harvest
.interrupt
&& ie
->ie_flags
& IE_ENTROPY
) {
784 CTR3(KTR_INTR
, "%s: pid %d (%s) gathering entropy", __func__
,
785 p
->p_pid
, td
->td_name
);
786 entropy
.event
= (uintptr_t)ie
;
788 random_harvest(&entropy
, sizeof(entropy
), 2, 0,
792 KASSERT(p
!= NULL
, ("ithread %s has no process", ie
->ie_name
));
795 * Set it_need to tell the thread to keep running if it is already
796 * running. Then, lock the thread and see if we actually need to
797 * put it on the runqueue.
801 if (TD_AWAITING_INTR(td
)) {
802 CTR3(KTR_INTR
, "%s: schedule pid %d (%s)", __func__
, p
->p_pid
,
805 sched_add(td
, SRQ_INTR
);
807 CTR5(KTR_INTR
, "%s: pid %d (%s): it_need %d, state %d",
808 __func__
, p
->p_pid
, td
->td_name
, it
->it_need
, td
->td_state
);
816 intr_event_remove_handler(void *cookie
)
818 struct intr_handler
*handler
= (struct intr_handler
*)cookie
;
819 struct intr_event
*ie
;
820 struct intr_thread
*it
;
822 struct intr_handler
*ih
;
830 ie
= handler
->ih_event
;
832 ("interrupt handler \"%s\" has a NULL interrupt event",
834 mtx_lock(&ie
->ie_lock
);
835 CTR3(KTR_INTR
, "%s: removing %s from %s", __func__
, handler
->ih_name
,
838 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
)
841 mtx_unlock(&ie
->ie_lock
);
842 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
843 ih
->ih_name
, ie
->ie_name
);
847 * If there are no ithreads (per event and per handler), then
848 * just remove the handler and return.
849 * XXX: Note that an INTR_FAST handler might be running on another CPU!
851 if (ie
->ie_thread
== NULL
&& handler
->ih_thread
== NULL
) {
852 TAILQ_REMOVE(&ie
->ie_handlers
, handler
, ih_next
);
853 mtx_unlock(&ie
->ie_lock
);
854 free(handler
, M_ITHREAD
);
858 /* Private or global ithread? */
859 it
= (handler
->ih_thread
) ? handler
->ih_thread
: ie
->ie_thread
;
861 * If the interrupt thread is already running, then just mark this
862 * handler as being dead and let the ithread do the actual removal.
864 * During a cold boot while cold is set, msleep() does not sleep,
865 * so we have to remove the handler here rather than letting the
868 thread_lock(it
->it_thread
);
869 if (!TD_AWAITING_INTR(it
->it_thread
) && !cold
) {
870 handler
->ih_flags
|= IH_DEAD
;
873 * Ensure that the thread will process the handler list
874 * again and remove this handler if it has already passed
879 TAILQ_REMOVE(&ie
->ie_handlers
, handler
, ih_next
);
880 thread_unlock(it
->it_thread
);
881 while (handler
->ih_flags
& IH_DEAD
)
882 msleep(handler
, &ie
->ie_lock
, 0, "iev_rmh", 0);
884 * At this point, the handler has been disconnected from the event,
885 * so we can kill the private ithread if any.
887 if (handler
->ih_thread
) {
888 ithread_destroy(handler
->ih_thread
);
889 handler
->ih_thread
= NULL
;
891 intr_event_update(ie
);
894 * XXX: This could be bad in the case of ppbus(8). Also, I think
895 * this could lead to races of stale data when servicing an
899 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
) {
900 if (handler
!= NULL
) {
906 ithread_destroy(ie
->ie_thread
);
907 ie
->ie_thread
= NULL
;
910 mtx_unlock(&ie
->ie_lock
);
911 free(handler
, M_ITHREAD
);
916 intr_event_schedule_thread(struct intr_event
*ie
, struct intr_thread
*it
)
918 struct intr_entropy entropy
;
924 * If no ithread or no handlers, then we have a stray interrupt.
926 if (ie
== NULL
|| TAILQ_EMPTY(&ie
->ie_handlers
) || it
== NULL
)
934 * If any of the handlers for this ithread claim to be good
935 * sources of entropy, then gather some.
937 if (harvest
.interrupt
&& ie
->ie_flags
& IE_ENTROPY
) {
938 CTR3(KTR_INTR
, "%s: pid %d (%s) gathering entropy", __func__
,
939 p
->p_pid
, td
->td_name
);
940 entropy
.event
= (uintptr_t)ie
;
942 random_harvest(&entropy
, sizeof(entropy
), 2, 0,
946 KASSERT(p
!= NULL
, ("ithread %s has no process", ie
->ie_name
));
949 * Set it_need to tell the thread to keep running if it is already
950 * running. Then, lock the thread and see if we actually need to
951 * put it on the runqueue.
955 if (TD_AWAITING_INTR(td
)) {
956 CTR3(KTR_INTR
, "%s: schedule pid %d (%s)", __func__
, p
->p_pid
,
959 sched_add(td
, SRQ_INTR
);
961 CTR5(KTR_INTR
, "%s: pid %d (%s): it_need %d, state %d",
962 __func__
, p
->p_pid
, td
->td_name
, it
->it_need
, td
->td_state
);
971 * Add a software interrupt handler to a specified event. If a given event
972 * is not specified, then a new event is created.
975 swi_add(struct intr_event
**eventp
, const char *name
, driver_intr_t handler
,
976 void *arg
, int pri
, enum intr_type flags
, void **cookiep
)
978 struct intr_event
*ie
;
981 if (flags
& INTR_ENTROPY
)
984 ie
= (eventp
!= NULL
) ? *eventp
: NULL
;
987 if (!(ie
->ie_flags
& IE_SOFT
))
990 error
= intr_event_create(&ie
, NULL
, IE_SOFT
, 0,
991 NULL
, NULL
, NULL
, NULL
, "swi%d:", pri
);
997 error
= intr_event_add_handler(ie
, name
, NULL
, handler
, arg
,
998 (pri
* RQ_PPQ
) + PI_SOFT
, flags
, cookiep
);
1001 if (pri
== SWI_CLOCK
) {
1003 p
= ie
->ie_thread
->it_thread
->td_proc
;
1005 p
->p_flag
|= P_NOLOAD
;
1012 * Schedule a software interrupt thread.
1015 swi_sched(void *cookie
, int flags
)
1017 struct intr_handler
*ih
= (struct intr_handler
*)cookie
;
1018 struct intr_event
*ie
= ih
->ih_event
;
1021 CTR3(KTR_INTR
, "swi_sched: %s %s need=%d", ie
->ie_name
, ih
->ih_name
,
1025 * Set ih_need for this handler so that if the ithread is already
1026 * running it will execute this handler on the next pass. Otherwise,
1027 * it will execute it the next time it runs.
1029 atomic_store_rel_int(&ih
->ih_need
, 1);
1031 if (!(flags
& SWI_DELAY
)) {
1032 PCPU_INC(cnt
.v_soft
);
1034 error
= intr_event_schedule_thread(ie
, ie
->ie_thread
);
1036 error
= intr_event_schedule_thread(ie
);
1038 KASSERT(error
== 0, ("stray software interrupt"));
1043 * Remove a software interrupt handler. Currently this code does not
1044 * remove the associated interrupt event if it becomes empty. Calling code
1045 * may do so manually via intr_event_destroy(), but that's not really
1046 * an optimal interface.
1049 swi_remove(void *cookie
)
1052 return (intr_event_remove_handler(cookie
));
1057 priv_ithread_execute_handler(struct proc
*p
, struct intr_handler
*ih
)
1059 struct intr_event
*ie
;
1063 * If this handler is marked for death, remove it from
1064 * the list of handlers and wake up the sleeper.
1066 if (ih
->ih_flags
& IH_DEAD
) {
1067 mtx_lock(&ie
->ie_lock
);
1068 TAILQ_REMOVE(&ie
->ie_handlers
, ih
, ih_next
);
1069 ih
->ih_flags
&= ~IH_DEAD
;
1071 mtx_unlock(&ie
->ie_lock
);
1075 /* Execute this handler. */
1076 CTR6(KTR_INTR
, "%s: pid %d exec %p(%p) for %s flg=%x",
1077 __func__
, p
->p_pid
, (void *)ih
->ih_handler
, ih
->ih_argument
,
1078 ih
->ih_name
, ih
->ih_flags
);
1080 if (!(ih
->ih_flags
& IH_MPSAFE
))
1082 ih
->ih_handler(ih
->ih_argument
);
1083 if (!(ih
->ih_flags
& IH_MPSAFE
))
1089 ithread_execute_handlers(struct proc
*p
, struct intr_event
*ie
)
1091 struct intr_handler
*ih
, *ihn
;
1093 /* Interrupt handlers should not sleep. */
1094 if (!(ie
->ie_flags
& IE_SOFT
))
1095 THREAD_NO_SLEEPING();
1096 TAILQ_FOREACH_SAFE(ih
, &ie
->ie_handlers
, ih_next
, ihn
) {
1099 * If this handler is marked for death, remove it from
1100 * the list of handlers and wake up the sleeper.
1102 if (ih
->ih_flags
& IH_DEAD
) {
1103 mtx_lock(&ie
->ie_lock
);
1104 TAILQ_REMOVE(&ie
->ie_handlers
, ih
, ih_next
);
1105 ih
->ih_flags
&= ~IH_DEAD
;
1107 mtx_unlock(&ie
->ie_lock
);
1111 /* Skip filter only handlers */
1112 if (ih
->ih_handler
== NULL
)
1116 * For software interrupt threads, we only execute
1117 * handlers that have their need flag set. Hardware
1118 * interrupt threads always invoke all of their handlers.
1120 if (ie
->ie_flags
& IE_SOFT
) {
1124 atomic_store_rel_int(&ih
->ih_need
, 0);
1127 /* Execute this handler. */
1128 CTR6(KTR_INTR
, "%s: pid %d exec %p(%p) for %s flg=%x",
1129 __func__
, p
->p_pid
, (void *)ih
->ih_handler
,
1130 ih
->ih_argument
, ih
->ih_name
, ih
->ih_flags
);
1132 if (!(ih
->ih_flags
& IH_MPSAFE
))
1134 ih
->ih_handler(ih
->ih_argument
);
1135 if (!(ih
->ih_flags
& IH_MPSAFE
))
1138 if (!(ie
->ie_flags
& IE_SOFT
))
1139 THREAD_SLEEPING_OK();
1142 * Interrupt storm handling:
1144 * If this interrupt source is currently storming, then throttle
1145 * it to only fire the handler once per clock tick.
1147 * If this interrupt source is not currently storming, but the
1148 * number of back to back interrupts exceeds the storm threshold,
1149 * then enter storming mode.
1151 if (intr_storm_threshold
!= 0 && ie
->ie_count
>= intr_storm_threshold
&&
1152 !(ie
->ie_flags
& IE_SOFT
)) {
1153 /* Report the message only once every second. */
1154 if (ppsratecheck(&ie
->ie_warntm
, &ie
->ie_warncnt
, 1)) {
1156 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1164 * Now that all the handlers have had a chance to run, reenable
1165 * the interrupt source.
1167 if (ie
->ie_post_ithread
!= NULL
)
1168 ie
->ie_post_ithread(ie
->ie_source
);
1173 * This is the main code for interrupt threads.
1176 ithread_loop(void *arg
)
1178 struct intr_thread
*ithd
;
1179 struct intr_event
*ie
;
1185 ithd
= (struct intr_thread
*)arg
;
1186 KASSERT(ithd
->it_thread
== td
,
1187 ("%s: ithread and proc linkage out of sync", __func__
));
1188 ie
= ithd
->it_event
;
1192 * As long as we have interrupts outstanding, go through the
1193 * list of handlers, giving each one a go at it.
1197 * If we are an orphaned thread, then just die.
1199 if (ithd
->it_flags
& IT_DEAD
) {
1200 CTR3(KTR_INTR
, "%s: pid %d (%s) exiting", __func__
,
1201 p
->p_pid
, td
->td_name
);
1202 free(ithd
, M_ITHREAD
);
1207 * Service interrupts. If another interrupt arrives while
1208 * we are running, it will set it_need to note that we
1209 * should make another pass.
1211 while (ithd
->it_need
) {
1213 * This might need a full read and write barrier
1214 * to make sure that this write posts before any
1215 * of the memory or device accesses in the
1218 atomic_store_rel_int(&ithd
->it_need
, 0);
1219 ithread_execute_handlers(p
, ie
);
1221 WITNESS_WARN(WARN_PANIC
, NULL
, "suspending ithread");
1222 mtx_assert(&Giant
, MA_NOTOWNED
);
1225 * Processed all our interrupts. Now get the sched
1226 * lock. This may take a while and it_need may get
1227 * set again, so we have to check it again.
1230 if (!ithd
->it_need
&& !(ithd
->it_flags
& IT_DEAD
)) {
1233 mi_switch(SW_VOL
| SWT_IWAIT
, NULL
);
1240 * Main interrupt handling body.
1243 * o ie: the event connected to this interrupt.
1244 * o frame: some archs (i.e. i386) pass a frame to some.
1245 * handlers as their main argument.
1247 * o 0: everything ok.
1248 * o EINVAL: stray interrupt.
1251 intr_event_handle(struct intr_event
*ie
, struct trapframe
*frame
)
1253 struct intr_handler
*ih
;
1255 int error
, ret
, thread
;
1259 /* An interrupt with no event or handlers is a stray interrupt. */
1260 if (ie
== NULL
|| TAILQ_EMPTY(&ie
->ie_handlers
))
1264 * Execute fast interrupt handlers directly.
1265 * To support clock handlers, if a handler registers
1266 * with a NULL argument, then we pass it a pointer to
1267 * a trapframe as its argument.
1269 td
->td_intr_nesting_level
++;
1273 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
) {
1274 if (ih
->ih_filter
== NULL
) {
1278 CTR4(KTR_INTR
, "%s: exec %p(%p) for %s", __func__
,
1279 ih
->ih_filter
, ih
->ih_argument
== NULL
? frame
:
1280 ih
->ih_argument
, ih
->ih_name
);
1281 if (ih
->ih_argument
== NULL
)
1282 ret
= ih
->ih_filter(frame
);
1284 ret
= ih
->ih_filter(ih
->ih_argument
);
1286 * Wrapper handler special handling:
1288 * in some particular cases (like pccard and pccbb),
1289 * the _real_ device handler is wrapped in a couple of
1290 * functions - a filter wrapper and an ithread wrapper.
1291 * In this case (and just in this case), the filter wrapper
1292 * could ask the system to schedule the ithread and mask
1293 * the interrupt source if the wrapped handler is composed
1294 * of just an ithread handler.
1296 * TODO: write a generic wrapper to avoid people rolling
1300 if (ret
== FILTER_SCHEDULE_THREAD
)
1306 if (ie
->ie_pre_ithread
!= NULL
)
1307 ie
->ie_pre_ithread(ie
->ie_source
);
1309 if (ie
->ie_post_filter
!= NULL
)
1310 ie
->ie_post_filter(ie
->ie_source
);
1313 /* Schedule the ithread if needed. */
1315 error
= intr_event_schedule_thread(ie
);
1317 KASSERT(error
== 0, ("bad stray interrupt"));
1320 log(LOG_WARNING
, "bad stray interrupt");
1324 td
->td_intr_nesting_level
--;
1329 * This is the main code for interrupt threads.
1332 ithread_loop(void *arg
)
1334 struct intr_thread
*ithd
;
1335 struct intr_handler
*ih
;
1336 struct intr_event
*ie
;
1343 ih
= (struct intr_handler
*)arg
;
1344 priv
= (ih
->ih_thread
!= NULL
) ? 1 : 0;
1345 ithd
= (priv
) ? ih
->ih_thread
: ih
->ih_event
->ie_thread
;
1346 KASSERT(ithd
->it_thread
== td
,
1347 ("%s: ithread and proc linkage out of sync", __func__
));
1348 ie
= ithd
->it_event
;
1352 * As long as we have interrupts outstanding, go through the
1353 * list of handlers, giving each one a go at it.
1357 * If we are an orphaned thread, then just die.
1359 if (ithd
->it_flags
& IT_DEAD
) {
1360 CTR3(KTR_INTR
, "%s: pid %d (%s) exiting", __func__
,
1361 p
->p_pid
, td
->td_name
);
1362 free(ithd
, M_ITHREAD
);
1367 * Service interrupts. If another interrupt arrives while
1368 * we are running, it will set it_need to note that we
1369 * should make another pass.
1371 while (ithd
->it_need
) {
1373 * This might need a full read and write barrier
1374 * to make sure that this write posts before any
1375 * of the memory or device accesses in the
1378 atomic_store_rel_int(&ithd
->it_need
, 0);
1380 priv_ithread_execute_handler(p
, ih
);
1382 ithread_execute_handlers(p
, ie
);
1384 WITNESS_WARN(WARN_PANIC
, NULL
, "suspending ithread");
1385 mtx_assert(&Giant
, MA_NOTOWNED
);
1388 * Processed all our interrupts. Now get the sched
1389 * lock. This may take a while and it_need may get
1390 * set again, so we have to check it again.
1393 if (!ithd
->it_need
&& !(ithd
->it_flags
& IT_DEAD
)) {
1396 mi_switch(SW_VOL
| SWT_IWAIT
, NULL
);
1403 * Main loop for interrupt filter.
1405 * Some architectures (i386, amd64 and arm) require the optional frame
1406 * parameter, and use it as the main argument for fast handler execution
1407 * when ih_argument == NULL.
1410 * o FILTER_STRAY: No filter recognized the event, and no
1411 * filter-less handler is registered on this
1413 * o FILTER_HANDLED: A filter claimed the event and served it.
1414 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at
1415 * least one filter-less handler on this line.
1416 * o FILTER_HANDLED |
1417 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for
1418 * scheduling the per-handler ithread.
1420 * In case an ithread has to be scheduled, in *ithd there will be a
1421 * pointer to a struct intr_thread containing the thread to be
1426 intr_filter_loop(struct intr_event
*ie
, struct trapframe
*frame
,
1427 struct intr_thread
**ithd
)
1429 struct intr_handler
*ih
;
1431 int ret
, thread_only
;
1435 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
) {
1437 * Execute fast interrupt handlers directly.
1438 * To support clock handlers, if a handler registers
1439 * with a NULL argument, then we pass it a pointer to
1440 * a trapframe as its argument.
1442 arg
= ((ih
->ih_argument
== NULL
) ? frame
: ih
->ih_argument
);
1444 CTR5(KTR_INTR
, "%s: exec %p/%p(%p) for %s", __func__
,
1445 ih
->ih_filter
, ih
->ih_handler
, arg
, ih
->ih_name
);
1447 if (ih
->ih_filter
!= NULL
)
1448 ret
= ih
->ih_filter(arg
);
1454 if (ret
& FILTER_STRAY
)
1457 *ithd
= ih
->ih_thread
;
1463 * No filters handled the interrupt and we have at least
1464 * one handler without a filter. In this case, we schedule
1465 * all of the filter-less handlers to run in the ithread.
1468 *ithd
= ie
->ie_thread
;
1469 return (FILTER_SCHEDULE_THREAD
);
1471 return (FILTER_STRAY
);
1475 * Main interrupt handling body.
1478 * o ie: the event connected to this interrupt.
1479 * o frame: some archs (i.e. i386) pass a frame to some.
1480 * handlers as their main argument.
1482 * o 0: everything ok.
1483 * o EINVAL: stray interrupt.
1486 intr_event_handle(struct intr_event
*ie
, struct trapframe
*frame
)
1488 struct intr_thread
*ithd
;
1495 if (ie
== NULL
|| TAILQ_EMPTY(&ie
->ie_handlers
))
1498 td
->td_intr_nesting_level
++;
1501 thread
= intr_filter_loop(ie
, frame
, &ithd
);
1502 if (thread
& FILTER_HANDLED
) {
1503 if (ie
->ie_post_filter
!= NULL
)
1504 ie
->ie_post_filter(ie
->ie_source
);
1506 if (ie
->ie_pre_ithread
!= NULL
)
1507 ie
->ie_pre_ithread(ie
->ie_source
);
1511 /* Interrupt storm logic */
1512 if (thread
& FILTER_STRAY
) {
1514 if (ie
->ie_count
< intr_storm_threshold
)
1515 printf("Interrupt stray detection not present\n");
1518 /* Schedule an ithread if needed. */
1519 if (thread
& FILTER_SCHEDULE_THREAD
) {
1520 if (intr_event_schedule_thread(ie
, ithd
) != 0)
1521 panic("%s: impossible stray interrupt", __func__
);
1523 td
->td_intr_nesting_level
--;
1530 * Dump details about an interrupt handler
1533 db_dump_intrhand(struct intr_handler
*ih
)
1537 db_printf("\t%-10s ", ih
->ih_name
);
1538 switch (ih
->ih_pri
) {
1563 if (ih
->ih_pri
>= PI_SOFT
)
1566 db_printf("%4u", ih
->ih_pri
);
1570 db_printsym((uintptr_t)ih
->ih_handler
, DB_STGY_PROC
);
1571 db_printf("(%p)", ih
->ih_argument
);
1573 (ih
->ih_flags
& (IH_EXCLUSIVE
| IH_ENTROPY
| IH_DEAD
|
1577 if (ih
->ih_flags
& IH_EXCLUSIVE
) {
1583 if (ih
->ih_flags
& IH_ENTROPY
) {
1586 db_printf("ENTROPY");
1589 if (ih
->ih_flags
& IH_DEAD
) {
1595 if (ih
->ih_flags
& IH_MPSAFE
) {
1598 db_printf("MPSAFE");
1612 * Dump details about a event.
1615 db_dump_intr_event(struct intr_event
*ie
, int handlers
)
1617 struct intr_handler
*ih
;
1618 struct intr_thread
*it
;
1621 db_printf("%s ", ie
->ie_fullname
);
1624 db_printf("(pid %d)", it
->it_thread
->td_proc
->p_pid
);
1626 db_printf("(no thread)");
1627 if ((ie
->ie_flags
& (IE_SOFT
| IE_ENTROPY
| IE_ADDING_THREAD
)) != 0 ||
1628 (it
!= NULL
&& it
->it_need
)) {
1631 if (ie
->ie_flags
& IE_SOFT
) {
1635 if (ie
->ie_flags
& IE_ENTROPY
) {
1638 db_printf("ENTROPY");
1641 if (ie
->ie_flags
& IE_ADDING_THREAD
) {
1644 db_printf("ADDING_THREAD");
1647 if (it
!= NULL
&& it
->it_need
) {
1657 TAILQ_FOREACH(ih
, &ie
->ie_handlers
, ih_next
)
1658 db_dump_intrhand(ih
);
1662 * Dump data about interrupt handlers
1664 DB_SHOW_COMMAND(intr
, db_show_intr
)
1666 struct intr_event
*ie
;
1669 verbose
= index(modif
, 'v') != NULL
;
1670 all
= index(modif
, 'a') != NULL
;
1671 TAILQ_FOREACH(ie
, &event_list
, ie_list
) {
1672 if (!all
&& TAILQ_EMPTY(&ie
->ie_handlers
))
1674 db_dump_intr_event(ie
, verbose
);
1682 * Start standard software interrupt threads
1685 start_softintr(void *dummy
)
1688 if (swi_add(NULL
, "vm", swi_vm
, NULL
, SWI_VM
, INTR_MPSAFE
, &vm_ih
))
1689 panic("died while creating vm swi ithread");
1691 SYSINIT(start_softintr
, SI_SUB_SOFTINTR
, SI_ORDER_FIRST
, start_softintr
,
1695 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1696 * The data for this machine dependent, and the declarations are in machine
1697 * dependent code. The layout of intrnames and intrcnt however is machine
1700 * We do not know the length of intrcnt and intrnames at compile time, so
1701 * calculate things at run time.
1704 sysctl_intrnames(SYSCTL_HANDLER_ARGS
)
1706 return (sysctl_handle_opaque(oidp
, intrnames
, eintrnames
- intrnames
,
1710 SYSCTL_PROC(_hw
, OID_AUTO
, intrnames
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1711 NULL
, 0, sysctl_intrnames
, "", "Interrupt Names");
1714 sysctl_intrcnt(SYSCTL_HANDLER_ARGS
)
1716 return (sysctl_handle_opaque(oidp
, intrcnt
,
1717 (char *)eintrcnt
- (char *)intrcnt
, req
));
1720 SYSCTL_PROC(_hw
, OID_AUTO
, intrcnt
, CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1721 NULL
, 0, sysctl_intrcnt
, "", "Interrupt Counts");
1725 * DDB command to dump the interrupt statistics.
1727 DB_SHOW_COMMAND(intrcnt
, db_show_intrcnt
)
1733 for (i
= intrcnt
; i
!= eintrcnt
&& !db_pager_quit
; i
++) {
1737 db_printf("%s\t%lu\n", cp
, *i
);
1738 cp
+= strlen(cp
) + 1;