headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / int.cpp
blobb34ba01c7ffeefd6984f1dbfd5a691b80d062572
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2011, Michael Lotz, mmlr@mlotz.ch.
6 * Distributed under the terms of the MIT License.
8 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
9 * Distributed under the terms of the MIT License.
11 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
12 * Distributed under the terms of the NewOS License.
16 #include <int.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
22 #include <arch/debug_console.h>
23 #include <arch/int.h>
24 #include <boot/kernel_args.h>
25 #include <elf.h>
26 #include <load_tracking.h>
27 #include <util/AutoLock.h>
28 #include <util/kqueue.h>
29 #include <smp.h>
31 #include "kernel_debug_config.h"
34 //#define TRACE_INT
35 #ifdef TRACE_INT
36 # define TRACE(x) dprintf x
37 #else
38 # define TRACE(x) ;
39 #endif
42 struct io_handler {
43 struct io_handler *next;
44 interrupt_handler func;
45 void *data;
46 bool use_enable_counter;
47 bool no_handled_info;
48 #if DEBUG_INTERRUPTS
49 int64 handled_count;
50 #endif
53 struct io_vector {
54 struct io_handler *handler_list;
55 spinlock vector_lock;
56 int32 enable_count;
57 bool no_lock_vector;
58 interrupt_type type;
60 spinlock load_lock;
61 bigtime_t last_measure_time;
62 bigtime_t last_measure_active;
63 int32 load;
65 irq_assignment* assigned_cpu;
67 #if DEBUG_INTERRUPTS
68 int64 handled_count;
69 int64 unhandled_count;
70 int trigger_count;
71 int ignored_count;
72 #endif
75 static int32 sLastCPU;
77 static io_vector sVectors[NUM_IO_VECTORS];
78 static bool sAllocatedIOInterruptVectors[NUM_IO_VECTORS];
79 static irq_assignment sVectorCPUAssignments[NUM_IO_VECTORS];
80 static mutex sIOInterruptVectorAllocationLock
81 = MUTEX_INITIALIZER("io_interrupt_vector_allocation");
84 #if DEBUG_INTERRUPTS
85 static int
86 dump_int_statistics(int argc, char **argv)
88 int i;
89 for (i = 0; i < NUM_IO_VECTORS; i++) {
90 struct io_handler *io;
92 if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
93 && sVectors[i].enable_count == 0
94 && sVectors[i].handled_count == 0
95 && sVectors[i].unhandled_count == 0
96 && sVectors[i].handler_list == NULL)
97 continue;
99 kprintf("int %3d, enabled %" B_PRId32 ", handled %8" B_PRId64 ", "
100 "unhandled %8" B_PRId64 "%s%s\n", i, sVectors[i].enable_count,
101 sVectors[i].handled_count,sVectors[i].unhandled_count,
102 B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "",
103 sVectors[i].handler_list == NULL ? ", no handler" : "");
105 for (io = sVectors[i].handler_list; io != NULL; io = io->next) {
106 const char *symbol, *imageName;
107 bool exactMatch;
109 status_t error = elf_debug_lookup_symbol_address((addr_t)io->func,
110 NULL, &symbol, &imageName, &exactMatch);
111 if (error == B_OK && exactMatch) {
112 if (strchr(imageName, '/') != NULL)
113 imageName = strrchr(imageName, '/') + 1;
115 int length = 4 + strlen(imageName);
116 kprintf(" %s:%-*s (%p)", imageName, 45 - length, symbol,
117 io->func);
118 } else
119 kprintf("\t\t\t\t\t func %p", io->func);
121 kprintf(", data %p, handled ", io->data);
122 if (io->no_handled_info)
123 kprintf("<unknown>\n");
124 else
125 kprintf("%8" B_PRId64 "\n", io->handled_count);
128 kprintf("\n");
130 return 0;
132 #endif
135 static int
136 dump_int_load(int argc, char** argv)
138 static const char* typeNames[]
139 = { "exception", "irq", "local irq", "syscall", "ici", "unknown" };
141 for (int i = 0; i < NUM_IO_VECTORS; i++) {
142 if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
143 && sVectors[i].handler_list == NULL
144 && sVectors[i].enable_count == 0)
145 continue;
147 kprintf("int %3d, type %s, enabled %" B_PRId32 ", load %" B_PRId32
148 "%%", i, typeNames[min_c(sVectors[i].type,
149 INTERRUPT_TYPE_UNKNOWN)],
150 sVectors[i].enable_count,
151 sVectors[i].assigned_cpu != NULL
152 ? sVectors[i].assigned_cpu->load / 10 : 0);
154 if (sVectors[i].type == INTERRUPT_TYPE_IRQ) {
155 ASSERT(sVectors[i].assigned_cpu != NULL);
157 if (sVectors[i].assigned_cpu->cpu != -1)
158 kprintf(", cpu %" B_PRId32, sVectors[i].assigned_cpu->cpu);
159 else
160 kprintf(", cpu -");
163 if (B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock))
164 kprintf(", ACTIVE");
165 kprintf("\n");
168 return 0;
172 // #pragma mark - private kernel API
175 bool
176 interrupts_enabled(void)
178 return arch_int_are_interrupts_enabled();
182 status_t
183 int_init(kernel_args* args)
185 TRACE(("init_int_handlers: entry\n"));
187 return arch_int_init(args);
191 status_t
192 int_init_post_vm(kernel_args* args)
194 int i;
196 /* initialize the vector list */
197 for (i = 0; i < NUM_IO_VECTORS; i++) {
198 B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
199 sVectors[i].enable_count = 0;
200 sVectors[i].no_lock_vector = false;
201 sVectors[i].type = INTERRUPT_TYPE_UNKNOWN;
203 B_INITIALIZE_SPINLOCK(&sVectors[i].load_lock);
204 sVectors[i].last_measure_time = 0;
205 sVectors[i].last_measure_active = 0;
206 sVectors[i].load = 0;
208 #if DEBUG_INTERRUPTS
209 sVectors[i].handled_count = 0;
210 sVectors[i].unhandled_count = 0;
211 sVectors[i].trigger_count = 0;
212 sVectors[i].ignored_count = 0;
213 #endif
214 sVectors[i].handler_list = NULL;
216 sVectorCPUAssignments[i].irq = i;
217 sVectorCPUAssignments[i].count = 1;
218 sVectorCPUAssignments[i].handlers_count = 0;
219 sVectorCPUAssignments[i].load = 0;
220 sVectorCPUAssignments[i].cpu = -1;
223 #if DEBUG_INTERRUPTS
224 add_debugger_command("ints", &dump_int_statistics,
225 "list interrupt statistics");
226 #endif
228 add_debugger_command("int_load", &dump_int_load,
229 "list interrupt usage statistics");
231 return arch_int_init_post_vm(args);
235 status_t
236 int_init_io(kernel_args* args)
238 return arch_int_init_io(args);
242 status_t
243 int_init_post_device_manager(kernel_args* args)
245 arch_debug_install_interrupt_handlers();
247 return arch_int_init_post_device_manager(args);
251 static void
252 update_int_load(int i)
254 if (!try_acquire_spinlock(&sVectors[i].load_lock))
255 return;
257 int32 oldLoad = sVectors[i].load;
258 compute_load(sVectors[i].last_measure_time, sVectors[i].last_measure_active,
259 sVectors[i].load, system_time());
261 if (oldLoad != sVectors[i].load)
262 atomic_add(&sVectors[i].assigned_cpu->load, sVectors[i].load - oldLoad);
264 release_spinlock(&sVectors[i].load_lock);
268 /*! Actually process an interrupt via the handlers registered for that
269 vector (IRQ).
272 int_io_interrupt_handler(int vector, bool levelTriggered)
274 int status = B_UNHANDLED_INTERRUPT;
275 struct io_handler* io;
276 bool handled = false;
278 bigtime_t start = system_time();
280 // exceptions and syscalls have their own handlers
281 ASSERT(sVectors[vector].type != INTERRUPT_TYPE_EXCEPTION
282 && sVectors[vector].type != INTERRUPT_TYPE_SYSCALL);
284 if (!sVectors[vector].no_lock_vector)
285 acquire_spinlock(&sVectors[vector].vector_lock);
287 #if !DEBUG_INTERRUPTS
288 // The list can be empty at this place
289 if (sVectors[vector].handler_list == NULL) {
290 dprintf("unhandled io interrupt %d\n", vector);
291 if (!sVectors[vector].no_lock_vector)
292 release_spinlock(&sVectors[vector].vector_lock);
293 return B_UNHANDLED_INTERRUPT;
295 #endif
297 // For level-triggered interrupts, we actually handle the return
298 // value (ie. B_HANDLED_INTERRUPT) to decide whether or not we
299 // want to call another interrupt handler.
300 // For edge-triggered interrupts, however, we always need to call
301 // all handlers, as multiple interrupts cannot be identified. We
302 // still make sure the return code of this function will issue
303 // whatever the driver thought would be useful.
305 for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
306 status = io->func(io->data);
308 #if DEBUG_INTERRUPTS
309 if (status != B_UNHANDLED_INTERRUPT)
310 io->handled_count++;
311 #endif
312 if (levelTriggered && status != B_UNHANDLED_INTERRUPT)
313 break;
315 if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER)
316 handled = true;
319 #if DEBUG_INTERRUPTS
320 sVectors[vector].trigger_count++;
321 if (status != B_UNHANDLED_INTERRUPT || handled) {
322 sVectors[vector].handled_count++;
323 } else {
324 sVectors[vector].unhandled_count++;
325 sVectors[vector].ignored_count++;
328 if (sVectors[vector].trigger_count > 10000) {
329 if (sVectors[vector].ignored_count > 9900) {
330 struct io_handler *last = sVectors[vector].handler_list;
331 while (last && last->next)
332 last = last->next;
334 if (last != NULL && last->no_handled_info) {
335 // we have an interrupt handler installed that does not
336 // know whether or not it has actually handled the interrupt,
337 // so this unhandled count is inaccurate and we can't just
338 // disable
339 } else {
340 if (sVectors[vector].handler_list == NULL
341 || sVectors[vector].handler_list->next == NULL) {
342 // this interrupt vector is not shared, disable it
343 sVectors[vector].enable_count = -100;
344 arch_int_disable_io_interrupt(vector);
345 dprintf("Disabling unhandled io interrupt %d\n", vector);
346 } else {
347 // this is a shared interrupt vector, we cannot just disable it
348 dprintf("More than 99%% interrupts of vector %d are unhandled\n",
349 vector);
354 sVectors[vector].trigger_count = 0;
355 sVectors[vector].ignored_count = 0;
357 #endif
359 if (!sVectors[vector].no_lock_vector)
360 release_spinlock(&sVectors[vector].vector_lock);
362 SpinLocker vectorLocker(sVectors[vector].load_lock);
363 bigtime_t deltaTime = system_time() - start;
364 sVectors[vector].last_measure_active += deltaTime;
365 vectorLocker.Unlock();
367 cpu_ent* cpu = get_cpu_struct();
368 if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
369 || sVectors[vector].type == INTERRUPT_TYPE_ICI
370 || sVectors[vector].type == INTERRUPT_TYPE_LOCAL_IRQ) {
371 cpu->interrupt_time += deltaTime;
372 if (sVectors[vector].type == INTERRUPT_TYPE_IRQ)
373 cpu->irq_time += deltaTime;
376 update_int_load(vector);
378 if (levelTriggered)
379 return status;
381 // edge triggered return value
383 if (handled)
384 return B_HANDLED_INTERRUPT;
386 return B_UNHANDLED_INTERRUPT;
390 // #pragma mark - public API
393 #undef disable_interrupts
394 #undef restore_interrupts
397 cpu_status
398 disable_interrupts(void)
400 return arch_int_disable_interrupts();
404 void
405 restore_interrupts(cpu_status status)
407 arch_int_restore_interrupts(status);
411 static
412 uint32 assign_cpu(void)
414 const cpu_topology_node* node;
415 do {
416 int32 nextID = atomic_add(&sLastCPU, 1);
417 node = get_cpu_topology();
419 while (node->level != CPU_TOPOLOGY_SMT) {
420 int levelSize = node->children_count;
421 node = node->children[nextID % levelSize];
422 nextID /= levelSize;
424 } while (gCPU[node->id].disabled);
426 return node->id;
430 /*! Install a handler to be called when an interrupt is triggered
431 for the given interrupt number with \a data as the argument.
433 status_t
434 install_io_interrupt_handler(long vector, interrupt_handler handler, void *data,
435 ulong flags)
437 struct io_handler *io = NULL;
438 cpu_status state;
440 if (vector < 0 || vector >= NUM_IO_VECTORS)
441 return B_BAD_VALUE;
443 io = (struct io_handler *)malloc(sizeof(struct io_handler));
444 if (io == NULL)
445 return B_NO_MEMORY;
447 arch_debug_remove_interrupt_handler(vector);
448 // There might be a temporary debug interrupt installed on this
449 // vector that should be removed now.
451 io->func = handler;
452 io->data = data;
453 io->use_enable_counter = (flags & B_NO_ENABLE_COUNTER) == 0;
454 io->no_handled_info = (flags & B_NO_HANDLED_INFO) != 0;
455 #if DEBUG_INTERRUPTS
456 io->handled_count = 0LL;
457 #endif
459 // Disable the interrupts, get the spinlock for this irq only
460 // and then insert the handler
461 state = disable_interrupts();
462 acquire_spinlock(&sVectors[vector].vector_lock);
464 // Initial attempt to balance IRQs, the scheduler will correct this
465 // if some cores end up being overloaded.
466 if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
467 && sVectors[vector].handler_list == NULL
468 && sVectors[vector].assigned_cpu->cpu == -1) {
470 int32 cpuID = assign_cpu();
471 arch_int_assign_to_cpu(vector, cpuID);
472 sVectors[vector].assigned_cpu->cpu = cpuID;
474 cpu_ent* cpu = &gCPU[cpuID];
475 SpinLocker _(cpu->irqs_lock);
476 atomic_add(&sVectors[vector].assigned_cpu->handlers_count, 1);
477 list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
480 if ((flags & B_NO_HANDLED_INFO) != 0
481 && sVectors[vector].handler_list != NULL) {
482 // The driver registering this interrupt handler doesn't know
483 // whether or not it actually handled the interrupt after the
484 // handler returns. This is incompatible with shared interrupts
485 // as we'd potentially steal interrupts from other handlers
486 // resulting in interrupt storms. Therefore we enqueue this interrupt
487 // handler as the very last one, meaning all other handlers will
488 // get their go at any interrupt first.
489 struct io_handler *last = sVectors[vector].handler_list;
490 while (last->next)
491 last = last->next;
493 io->next = NULL;
494 last->next = io;
495 } else {
496 // A normal interrupt handler, just add it to the head of the list.
497 io->next = sVectors[vector].handler_list;
498 sVectors[vector].handler_list = io;
501 // If B_NO_ENABLE_COUNTER is set, we're being asked to not alter
502 // whether the interrupt should be enabled or not
503 if (io->use_enable_counter) {
504 if (sVectors[vector].enable_count++ == 0)
505 arch_int_enable_io_interrupt(vector);
508 // If B_NO_LOCK_VECTOR is specified this is a vector that is not supposed
509 // to have multiple handlers and does not require locking of the vector
510 // when entering the handler. For example this is used by internally
511 // registered interrupt handlers like for handling local APIC interrupts
512 // that may run concurently on multiple CPUs. Locking with a spinlock
513 // would in that case defeat the purpose as it would serialize calling the
514 // handlers in parallel on different CPUs.
515 if (flags & B_NO_LOCK_VECTOR)
516 sVectors[vector].no_lock_vector = true;
518 release_spinlock(&sVectors[vector].vector_lock);
520 restore_interrupts(state);
522 return B_OK;
526 /*! Remove a previously installed interrupt handler */
527 status_t
528 remove_io_interrupt_handler(long vector, interrupt_handler handler, void *data)
530 status_t status = B_BAD_VALUE;
531 struct io_handler *io = NULL;
532 struct io_handler *last = NULL;
533 cpu_status state;
535 if (vector < 0 || vector >= NUM_IO_VECTORS)
536 return B_BAD_VALUE;
538 /* lock the structures down so it is not modified while we search */
539 state = disable_interrupts();
540 acquire_spinlock(&sVectors[vector].vector_lock);
542 /* loop through the available handlers and try to find a match.
543 * We go forward through the list but this means we start with the
544 * most recently added handlers.
546 for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
547 /* we have to match both function and data */
548 if (io->func == handler && io->data == data) {
549 if (last != NULL)
550 last->next = io->next;
551 else
552 sVectors[vector].handler_list = io->next;
554 // Check if we need to disable the interrupt
555 if (io->use_enable_counter && --sVectors[vector].enable_count == 0)
556 arch_int_disable_io_interrupt(vector);
558 status = B_OK;
559 break;
562 last = io;
565 if (sVectors[vector].handler_list == NULL
566 && sVectors[vector].type == INTERRUPT_TYPE_IRQ
567 && sVectors[vector].assigned_cpu != NULL
568 && sVectors[vector].assigned_cpu->handlers_count > 0) {
570 int32 oldHandlersCount
571 = atomic_add(&sVectors[vector].assigned_cpu->handlers_count, -1);
573 if (oldHandlersCount == 1) {
574 int32 oldCPU;
575 SpinLocker locker;
576 cpu_ent* cpu;
578 do {
579 locker.Unlock();
581 oldCPU = sVectors[vector].assigned_cpu->cpu;
583 ASSERT(oldCPU != -1);
584 cpu = &gCPU[oldCPU];
586 locker.SetTo(cpu->irqs_lock, false);
587 } while (sVectors[vector].assigned_cpu->cpu != oldCPU);
589 sVectors[vector].assigned_cpu->cpu = -1;
590 list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
594 release_spinlock(&sVectors[vector].vector_lock);
595 restore_interrupts(state);
597 // if the handler could be found and removed, we still have to free it
598 if (status == B_OK)
599 free(io);
601 return status;
605 /* Mark \a count contigous interrupts starting at \a startVector as in use.
606 This will prevent them from being allocated by others. Only use this when
607 the reserved range is hardwired to the given vector, otherwise allocate
608 vectors using allocate_io_interrupt_vectors() instead.
610 status_t
611 reserve_io_interrupt_vectors(long count, long startVector, interrupt_type type)
613 MutexLocker locker(&sIOInterruptVectorAllocationLock);
615 for (long i = 0; i < count; i++) {
616 if (sAllocatedIOInterruptVectors[startVector + i]) {
617 panic("reserved interrupt vector range %ld-%ld overlaps already "
618 "allocated vector %ld", startVector, startVector + count - 1,
619 startVector + i);
620 free_io_interrupt_vectors(i, startVector);
621 return B_BUSY;
624 sVectors[startVector + i].type = type;
625 sVectors[startVector + i].assigned_cpu
626 = &sVectorCPUAssignments[startVector + i];
627 sVectorCPUAssignments[startVector + i].count = 1;
628 sAllocatedIOInterruptVectors[startVector + i] = true;
631 dprintf("reserve_io_interrupt_vectors: reserved %ld vectors starting "
632 "from %ld\n", count, startVector);
633 return B_OK;
637 /*! Allocate \a count contiguous interrupt vectors. The vectors are allocated
638 as available so that they do not overlap with any other reserved vector.
639 The first vector to be used is returned in \a startVector on success.
641 status_t
642 allocate_io_interrupt_vectors(long count, long *startVector,
643 interrupt_type type)
645 MutexLocker locker(&sIOInterruptVectorAllocationLock);
647 long vector = 0;
648 bool runFound = true;
649 for (long i = 0; i < NUM_IO_VECTORS - (count - 1); i++) {
650 if (sAllocatedIOInterruptVectors[i])
651 continue;
653 vector = i;
654 runFound = true;
655 for (uint16 j = 1; j < count; j++) {
656 if (sAllocatedIOInterruptVectors[i + j]) {
657 runFound = false;
658 i += j;
659 break;
663 if (runFound)
664 break;
667 if (!runFound) {
668 dprintf("found no free vectors to allocate %ld io interrupts\n", count);
669 return B_NO_MEMORY;
672 for (long i = 0; i < count; i++) {
673 sVectors[vector + i].type = type;
674 sVectors[vector + i].assigned_cpu = &sVectorCPUAssignments[vector];
675 sAllocatedIOInterruptVectors[vector + i] = true;
678 sVectorCPUAssignments[vector].irq = vector;
679 sVectorCPUAssignments[vector].count = count;
681 *startVector = vector;
682 dprintf("allocate_io_interrupt_vectors: allocated %ld vectors starting "
683 "from %ld\n", count, vector);
684 return B_OK;
688 /*! Free/unreserve interrupt vectors previously allocated with the
689 {reserve|allocate}_io_interrupt_vectors() functions. The \a count and
690 \a startVector can be adjusted from the allocation calls to partially free
691 a vector range.
693 void
694 free_io_interrupt_vectors(long count, long startVector)
696 if (startVector + count > NUM_IO_VECTORS) {
697 panic("invalid start vector %ld or count %ld supplied to "
698 "free_io_interrupt_vectors\n", startVector, count);
701 dprintf("free_io_interrupt_vectors: freeing %ld vectors starting "
702 "from %ld\n", count, startVector);
704 MutexLocker locker(sIOInterruptVectorAllocationLock);
705 for (long i = 0; i < count; i++) {
706 if (!sAllocatedIOInterruptVectors[startVector + i]) {
707 panic("io interrupt vector %ld was not allocated\n",
708 startVector + i);
711 sVectors[startVector + i].assigned_cpu = NULL;
712 sAllocatedIOInterruptVectors[startVector + i] = false;
717 void assign_io_interrupt_to_cpu(long vector, int32 newCPU)
719 ASSERT(sVectors[vector].type == INTERRUPT_TYPE_IRQ);
721 int32 oldCPU = sVectors[vector].assigned_cpu->cpu;
723 if (newCPU == -1)
724 newCPU = assign_cpu();
726 if (newCPU == oldCPU)
727 return;
729 ASSERT(oldCPU != -1);
730 cpu_ent* cpu = &gCPU[oldCPU];
732 SpinLocker locker(cpu->irqs_lock);
733 sVectors[vector].assigned_cpu->cpu = -1;
734 list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
735 locker.Unlock();
737 cpu = &gCPU[newCPU];
738 locker.SetTo(cpu->irqs_lock, false);
739 sVectors[vector].assigned_cpu->cpu = newCPU;
740 arch_int_assign_to_cpu(vector, newCPU);
741 list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);