MIPS: Alchemy: Convert dbdma.c to syscore_ops
[linux-2.6/linux-mips.git] / arch / mips / kernel / irq.c
blob9b734d74ae8e0ee43922d34629ed1838d5c0e7c8
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/proc_fs.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/kallsyms.h>
23 #include <linux/kgdb.h>
24 #include <linux/ftrace.h>
26 #include <asm/atomic.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
30 #ifdef CONFIG_KGDB
31 int kgdb_early_setup;
32 #endif
34 static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
36 int allocate_irqno(void)
38 int irq;
40 again:
41 irq = find_first_zero_bit(irq_map, NR_IRQS);
43 if (irq >= NR_IRQS)
44 return -ENOSPC;
46 if (test_and_set_bit(irq, irq_map))
47 goto again;
49 return irq;
53 * Allocate the 16 legacy interrupts for i8259 devices. This happens early
54 * in the kernel initialization so treating allocation failure as BUG() is
55 * ok.
57 void __init alloc_legacy_irqno(void)
59 int i;
61 for (i = 0; i <= 16; i++)
62 BUG_ON(test_and_set_bit(i, irq_map));
65 void free_irqno(unsigned int irq)
67 smp_mb__before_clear_bit();
68 clear_bit(irq, irq_map);
69 smp_mb__after_clear_bit();
73 * 'what should we do if we get a hw irq event on an illegal vector'.
74 * each architecture has to answer this themselves.
76 void ack_bad_irq(unsigned int irq)
78 smtc_im_ack_irq(irq);
79 printk("unexpected IRQ # %d\n", irq);
82 atomic_t irq_err_count;
84 int arch_show_interrupts(struct seq_file *p, int prec)
86 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
87 return 0;
90 asmlinkage void spurious_interrupt(void)
92 atomic_inc(&irq_err_count);
95 void __init init_IRQ(void)
97 int i;
99 #ifdef CONFIG_KGDB
100 if (kgdb_early_setup)
101 return;
102 #endif
104 for (i = 0; i < NR_IRQS; i++)
105 irq_set_noprobe(i);
107 arch_init_irq();
109 #ifdef CONFIG_KGDB
110 if (!kgdb_early_setup)
111 kgdb_early_setup = 1;
112 #endif
115 #ifdef DEBUG_STACKOVERFLOW
116 static inline void check_stack_overflow(void)
118 unsigned long sp;
120 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
121 sp &= THREAD_MASK;
124 * Check for stack overflow: is there less than STACK_WARN free?
125 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
127 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
128 printk("do_IRQ: stack overflow: %ld\n",
129 sp - sizeof(struct thread_info));
130 dump_stack();
133 #else
134 static inline void check_stack_overflow(void) {}
135 #endif
139 * do_IRQ handles all normal device IRQ's (the special
140 * SMP cross-CPU interrupts have their own specific
141 * handlers).
143 void __irq_entry do_IRQ(unsigned int irq)
145 irq_enter();
146 check_stack_overflow();
147 if (!smtc_handle_on_other_cpu(irq))
148 generic_handle_irq(irq);
149 irq_exit();
152 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
154 * To avoid inefficient and in some cases pathological re-checking of
155 * IRQ affinity, we have this variant that skips the affinity check.
158 void __irq_entry do_IRQ_no_affinity(unsigned int irq)
160 irq_enter();
161 smtc_im_backstop(irq);
162 generic_handle_irq(irq);
163 irq_exit();
166 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */