* better
[mascara-docs.git] / i386 / linux-2.3.21 / arch / arm / kernel / irq.c
blob41e2050e62fde8862d87b5966f274d8bd4505b29
1 /*
2 * linux/arch/arm/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-1998 Russell King.
7 * This file contains the code used by various IRQ handling routines:
8 * asking for different IRQ's should be done through these routines
9 * instead of just grabbing them. Thus setups with different IRQ numbers
10 * shouldn't result in any weird surprises, and installing new handlers
11 * should be easier.
15 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
16 * Naturally it's not a 1:1 relation, but there are similarities.
18 #include <linux/config.h> /* for CONFIG_DEBUG_ERRORS */
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/malloc.h>
27 #include <linux/random.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/init.h>
32 #include <asm/hardware.h>
33 #include <asm/io.h>
34 #include <asm/system.h>
36 #ifndef SMP
37 #define irq_enter(cpu, irq) (++local_irq_count[cpu])
38 #define irq_exit(cpu, irq) (--local_irq_count[cpu])
39 #else
40 #error SMP not supported
41 #endif
43 #ifndef cliIF
44 #define cliIF()
45 #endif
48 * Maximum IRQ count. Currently, this is arbitary.
49 * However, it should not be set too low to prevent
50 * false triggering. Conversely, if it is set too
51 * high, then you could miss a stuck IRQ.
53 * Maybe we ought to set a timer and re-enable the
54 * IRQ at a later time?
56 #define MAX_IRQ_CNT 100000
58 unsigned int local_bh_count[NR_CPUS];
59 unsigned int local_irq_count[NR_CPUS];
60 spinlock_t irq_controller_lock;
62 int setup_arm_irq(int, struct irqaction *);
63 extern int get_fiq_list(char *);
64 extern void init_FIQ(void);
66 struct irqdesc {
67 unsigned int nomask : 1; /* IRQ does not mask in IRQ */
68 unsigned int enabled : 1; /* IRQ is currently enabled */
69 unsigned int triggered: 1; /* IRQ has occurred */
70 unsigned int probing : 1; /* IRQ in use for a probe */
71 unsigned int probe_ok : 1; /* IRQ can be used for probe */
72 unsigned int valid : 1; /* IRQ claimable */
73 unsigned int noautoenable : 1; /* don't automatically enable IRQ */
74 unsigned int unused :25;
75 void (*mask_ack)(unsigned int irq); /* Mask and acknowledge IRQ */
76 void (*mask)(unsigned int irq); /* Mask IRQ */
77 void (*unmask)(unsigned int irq); /* Unmask IRQ */
78 struct irqaction *action;
80 * IRQ lock detection
82 unsigned int lck_cnt;
83 unsigned int lck_pc;
84 unsigned int lck_jif;
87 static struct irqdesc irq_desc[NR_IRQS];
90 * Get architecture specific interrupt handlers
91 * and interrupt initialisation.
93 #include <asm/arch/irq.h>
96 * Dummy mask/unmask handler
98 static void dummy_mask_unmask_irq(unsigned int irq)
102 void disable_irq(unsigned int irq)
104 unsigned long flags;
106 spin_lock_irqsave(&irq_controller_lock, flags);
107 cliIF();
108 irq_desc[irq].enabled = 0;
109 irq_desc[irq].mask(irq);
110 spin_unlock_irqrestore(&irq_controller_lock, flags);
113 void enable_irq(unsigned int irq)
115 unsigned long flags;
117 spin_lock_irqsave(&irq_controller_lock, flags);
118 cliIF();
119 irq_desc[irq].probing = 0;
120 irq_desc[irq].triggered = 0;
121 if (!irq_desc[irq].noautoenable) {
122 irq_desc[irq].enabled = 1;
123 irq_desc[irq].unmask(irq);
125 spin_unlock_irqrestore(&irq_controller_lock, flags);
128 int get_irq_list(char *buf)
130 int i;
131 struct irqaction * action;
132 char *p = buf;
134 for (i = 0 ; i < NR_IRQS ; i++) {
135 action = irq_desc[i].action;
136 if (!action)
137 continue;
138 p += sprintf(p, "%3d: %10u %s",
139 i, kstat_irqs(i), action->name);
140 for (action = action->next; action; action = action->next) {
141 p += sprintf(p, ", %s", action->name);
143 *p++ = '\n';
146 #ifdef CONFIG_ARCH_ACORN
147 p += get_fiq_list(p);
148 #endif
149 return p - buf;
153 * IRQ lock detection.
155 * Hopefully, this should get us out of a few locked situations.
156 * However, it may take a while for this to happen, since we need
157 * a large number if IRQs to appear in the same jiffie with the
158 * same instruction pointer (or within 2 instructions).
160 static void check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
162 unsigned long instr_ptr = instruction_pointer(regs);
164 if (desc->lck_jif == jiffies &&
165 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
166 desc->lck_cnt += 1;
168 if (desc->lck_cnt > MAX_IRQ_CNT) {
169 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
170 disable_irq(irq);
172 } else {
173 desc->lck_cnt = 0;
174 desc->lck_pc = instruction_pointer(regs);
175 desc->lck_jif = jiffies;
180 * do_IRQ handles all normal device IRQ's
182 asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
184 struct irqdesc * desc;
185 struct irqaction * action;
186 int status, cpu;
188 irq = fixup_irq(irq);
190 desc = irq_desc + irq;
192 spin_lock(&irq_controller_lock);
193 desc->mask_ack(irq);
194 spin_unlock(&irq_controller_lock);
196 cpu = smp_processor_id();
197 irq_enter(cpu, irq);
198 kstat.irqs[cpu][irq]++;
199 desc->triggered = 1;
201 /* Return with this interrupt masked if no action */
202 status = 0;
203 action = desc->action;
205 if (action) {
206 if (desc->nomask) {
207 spin_lock(&irq_controller_lock);
208 desc->unmask(irq);
209 spin_unlock(&irq_controller_lock);
212 if (!(action->flags & SA_INTERRUPT))
213 __sti();
215 do {
216 status |= action->flags;
217 action->handler(irq, action->dev_id, regs);
218 action = action->next;
219 } while (action);
221 if (status & SA_SAMPLE_RANDOM)
222 add_interrupt_randomness(irq);
223 __cli();
225 if (!desc->nomask && desc->enabled) {
226 spin_lock(&irq_controller_lock);
227 desc->unmask(irq);
228 spin_unlock(&irq_controller_lock);
233 * Debug measure - hopefully we can continue if an
234 * IRQ lockup problem occurs...
236 check_irq_lock(desc, irq, regs);
238 irq_exit(cpu, irq);
241 * This should be conditional: we should really get
242 * a return code from the irq handler to tell us
243 * whether the handler wants us to do software bottom
244 * half handling or not..
246 if (1) {
247 if (bh_active & bh_mask)
248 do_bottom_half();
252 #if defined(CONFIG_ARCH_ACORN)
253 void do_ecard_IRQ(int irq, struct pt_regs *regs)
255 struct irqdesc * desc;
256 struct irqaction * action;
257 int cpu;
259 desc = irq_desc + irq;
261 cpu = smp_processor_id();
262 kstat.irqs[cpu][irq]++;
263 desc->triggered = 1;
265 action = desc->action;
267 if (action) {
268 do {
269 action->handler(irq, action->dev_id, regs);
270 action = action->next;
271 } while (action);
272 } else {
273 spin_lock(&irq_controller_lock);
274 desc->mask(irq);
275 spin_unlock(&irq_controller_lock);
278 #endif
280 int setup_arm_irq(int irq, struct irqaction * new)
282 int shared = 0;
283 struct irqaction *old, **p;
284 unsigned long flags;
287 * Some drivers like serial.c use request_irq() heavily,
288 * so we have to be careful not to interfere with a
289 * running system.
291 if (new->flags & SA_SAMPLE_RANDOM) {
293 * This function might sleep, we want to call it first,
294 * outside of the atomic block.
295 * Yes, this might clear the entropy pool if the wrong
296 * driver is attempted to be loaded, without actually
297 * installing a new handler, but is this really a problem,
298 * only the sysadmin is able to do this.
300 rand_initialize_irq(irq);
304 * The following block of code has to be executed atomically
306 spin_lock_irqsave(&irq_controller_lock, flags);
307 p = &irq_desc[irq].action;
308 if ((old = *p) != NULL) {
309 /* Can't share interrupts unless both agree to */
310 if (!(old->flags & new->flags & SA_SHIRQ)) {
311 spin_unlock_irqrestore(&irq_controller_lock, flags);
312 return -EBUSY;
315 /* add new interrupt at end of irq queue */
316 do {
317 p = &old->next;
318 old = *p;
319 } while (old);
320 shared = 1;
323 *p = new;
325 if (!shared) {
326 irq_desc[irq].nomask = (new->flags & SA_IRQNOMASK) ? 1 : 0;
327 irq_desc[irq].probing = 0;
328 if (!irq_desc[irq].noautoenable) {
329 irq_desc[irq].enabled = 1;
330 irq_desc[irq].unmask(irq);
334 spin_unlock_irqrestore(&irq_controller_lock, flags);
335 return 0;
338 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
339 unsigned long irq_flags, const char * devname, void *dev_id)
341 unsigned long retval;
342 struct irqaction *action;
344 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler)
345 return -EINVAL;
347 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
348 if (!action)
349 return -ENOMEM;
351 action->handler = handler;
352 action->flags = irq_flags;
353 action->mask = 0;
354 action->name = devname;
355 action->next = NULL;
356 action->dev_id = dev_id;
358 retval = setup_arm_irq(irq, action);
360 if (retval)
361 kfree(action);
362 return retval;
365 void free_irq(unsigned int irq, void *dev_id)
367 struct irqaction * action, **p;
368 unsigned long flags;
370 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
371 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
372 #ifdef CONFIG_DEBUG_ERRORS
373 __backtrace();
374 #endif
375 return;
378 spin_lock_irqsave(&irq_controller_lock, flags);
379 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
380 if (action->dev_id != dev_id)
381 continue;
383 /* Found it - now free it */
384 *p = action->next;
385 kfree(action);
386 goto out;
388 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
389 #ifdef CONFIG_DEBUG_ERRORS
390 __backtrace();
391 #endif
392 out:
393 spin_unlock_irqrestore(&irq_controller_lock, flags);
396 /* Start the interrupt probing. Unlike other architectures,
397 * we don't return a mask of interrupts from probe_irq_on,
398 * but return the number of interrupts enabled for the probe.
399 * The interrupts which have been enabled for probing is
400 * instead recorded in the irq_desc structure.
402 unsigned long probe_irq_on(void)
404 unsigned int i, irqs = 0;
405 unsigned long delay;
408 * first snaffle up any unassigned but
409 * probe-able interrupts
411 spin_lock_irq(&irq_controller_lock);
412 for (i = 0; i < NR_IRQS; i++) {
413 if (!irq_desc[i].valid ||
414 !irq_desc[i].probe_ok ||
415 irq_desc[i].action)
416 continue;
418 irq_desc[i].probing = 1;
419 irq_desc[i].triggered = 0;
420 irq_desc[i].unmask(i);
421 irqs += 1;
423 spin_unlock_irq(&irq_controller_lock);
426 * wait for spurious interrupts to mask themselves out again
428 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
429 /* min 100ms delay */;
432 * now filter out any obviously spurious interrupts
434 spin_lock_irq(&irq_controller_lock);
435 for (i = 0; i < NR_IRQS; i++) {
436 if (irq_desc[i].probing &&
437 irq_desc[i].triggered) {
438 irq_desc[i].probing = 0;
439 irqs -= 1;
442 spin_unlock_irq(&irq_controller_lock);
444 /* now filter out any obviously spurious interrupts */
445 return irqs;
449 * Possible return values:
450 * >= 0 - interrupt number
451 * -1 - no interrupt/many interrupts
453 int probe_irq_off(unsigned long irqs)
455 unsigned int i;
456 int irq_found = NO_IRQ;
459 * look at the interrupts, and find exactly one
460 * that we were probing has been triggered
462 spin_lock_irq(&irq_controller_lock);
463 for (i = 0; i < NR_IRQS; i++) {
464 if (irq_desc[i].probing &&
465 irq_desc[i].triggered) {
466 if (irq_found != NO_IRQ) {
467 irq_found = NO_IRQ;
468 goto out;
470 irq_found = i;
474 if (irq_found == -1)
475 irq_found = NO_IRQ;
476 out:
477 spin_unlock_irq(&irq_controller_lock);
479 return irq_found;
482 void __init init_IRQ(void)
484 extern void init_dma(void);
485 int irq;
487 for (irq = 0; irq < NR_IRQS; irq++) {
488 irq_desc[irq].probe_ok = 0;
489 irq_desc[irq].valid = 0;
490 irq_desc[irq].noautoenable = 0;
491 irq_desc[irq].mask_ack = dummy_mask_unmask_irq;
492 irq_desc[irq].mask = dummy_mask_unmask_irq;
493 irq_desc[irq].unmask = dummy_mask_unmask_irq;
496 irq_init_irq();
497 #ifdef CONFIG_ARCH_ACORN
498 init_FIQ();
499 #endif
500 init_dma();