2 * linux/arch/x86_64/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
16 * IRQs are in fact implemented a bit like signal handlers for the kernel.
17 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
37 #include <asm/atomic.h>
40 #include <asm/system.h>
41 #include <asm/bitops.h>
42 #include <asm/uaccess.h>
43 #include <asm/pgalloc.h>
44 #include <asm/delay.h>
51 * Linux has a controller-independent x86 interrupt architecture.
52 * every controller has a 'controller-template', that is used
53 * by the main code to do the right thing. Each driver-visible
54 * interrupt source is transparently wired to the appropriate
55 * controller. Thus drivers need not be aware of the
56 * interrupt-controller.
58 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
59 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
60 * (IO-APICs assumed to be messaging to Pentium local-APICs)
62 * the code is designed to be easily extended with new/different
63 * interrupt controllers, without having to do assembly magic.
67 * Controller mappings for all interrupt sources:
69 irq_desc_t irq_desc
[NR_IRQS
] __cacheline_aligned
= {
71 .handler
= &no_irq_type
,
72 .lock
= SPIN_LOCK_UNLOCKED
76 static void register_irq_proc (unsigned int irq
);
79 * Special irq handlers.
82 irqreturn_t
no_action(int cpl
, void *dev_id
, struct pt_regs
*regs
) { return IRQ_NONE
; }
85 * Generic no controller code
88 static void enable_none(unsigned int irq
) { }
89 static unsigned int startup_none(unsigned int irq
) { return 0; }
90 static void disable_none(unsigned int irq
) { }
91 static void ack_none(unsigned int irq
)
94 * 'what should we do if we get a hw irq event on an illegal vector'.
95 * each architecture has to answer this themselves, it doesn't deserve
96 * a generic callback i think.
99 printk("unexpected IRQ trap at vector %02x\n", irq
);
100 #ifdef CONFIG_X86_LOCAL_APIC
102 * Currently unexpected vectors happen only on SMP and APIC.
103 * We _must_ ack these because every local APIC has only N
104 * irq slots per priority level, and a 'hanging, unacked' IRQ
105 * holds up an irq slot - in excessive cases (when multiple
106 * unexpected vectors occur) that might lock up the APIC
114 /* startup is the same as "enable", shutdown is same as "disable" */
115 #define shutdown_none disable_none
116 #define end_none enable_none
118 struct hw_interrupt_type no_irq_type
= {
128 atomic_t irq_err_count
;
129 #ifdef CONFIG_X86_IO_APIC
130 #ifdef APIC_MISMATCH_DEBUG
131 atomic_t irq_mis_count
;
136 * Generic, controller-independent functions:
139 int show_interrupts(struct seq_file
*p
, void *v
)
141 int i
= *(loff_t
*) v
, j
;
142 struct irqaction
* action
;
147 for (j
=0; j
<NR_CPUS
; j
++)
149 seq_printf(p
, "CPU%d ",j
);
154 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
155 action
= irq_desc
[i
].action
;
158 seq_printf(p
, "%3d: ",i
);
160 seq_printf(p
, "%10u ", kstat_irqs(i
));
162 for (j
=0; j
<NR_CPUS
; j
++)
164 seq_printf(p
, "%10u ",
165 kstat_cpu(j
).irqs
[i
]);
167 seq_printf(p
, " %14s", irq_desc
[i
].handler
->typename
);
169 seq_printf(p
, " %s", action
->name
);
170 for (action
=action
->next
; action
; action
= action
->next
)
171 seq_printf(p
, ", %s", action
->name
);
174 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
175 } else if (i
== NR_IRQS
) {
176 seq_printf(p
, "NMI: ");
177 for (j
= 0; j
< NR_CPUS
; j
++)
179 seq_printf(p
, "%10u ", cpu_pda
[j
].__nmi_count
);
181 #ifdef CONFIG_X86_LOCAL_APIC
182 seq_printf(p
, "LOC: ");
183 for (j
= 0; j
< NR_CPUS
; j
++)
185 seq_printf(p
, "%10u ", cpu_pda
[j
].apic_timer_irqs
);
188 seq_printf(p
, "ERR: %10u\n", atomic_read(&irq_err_count
));
189 #ifdef CONFIG_X86_IO_APIC
190 #ifdef APIC_MISMATCH_DEBUG
191 seq_printf(p
, "MIS: %10u\n", atomic_read(&irq_mis_count
));
199 inline void synchronize_irq(unsigned int irq
)
201 while (irq_desc
[irq
].status
& IRQ_INPROGRESS
)
207 * This should really return information about whether
208 * we should do bottom half handling etc. Right now we
209 * end up _always_ checking the bottom half, which is a
210 * waste of time and is not what some drivers would
213 int handle_IRQ_event(unsigned int irq
, struct pt_regs
* regs
, struct irqaction
* action
)
215 int status
= 1; /* Force the "do bottom halves" bit */
218 if (!(action
->flags
& SA_INTERRUPT
))
222 ret
= action
->handler(irq
, action
->dev_id
, regs
);
223 if (ret
== IRQ_HANDLED
)
224 status
|= action
->flags
;
225 action
= action
->next
;
227 if (status
& SA_SAMPLE_RANDOM
)
228 add_interrupt_randomness(irq
);
235 * Generic enable/disable code: this just calls
236 * down into the PIC-specific version for the actual
237 * hardware disable after having gotten the irq
242 * disable_irq_nosync - disable an irq without waiting
243 * @irq: Interrupt to disable
245 * Disable the selected interrupt line. Disables and Enables are
247 * Unlike disable_irq(), this function does not ensure existing
248 * instances of the IRQ handler have completed before returning.
250 * This function must not be called from IRQ context.
253 inline void disable_irq_nosync(unsigned int irq
)
255 irq_desc_t
*desc
= irq_desc
+ irq
;
258 spin_lock_irqsave(&desc
->lock
, flags
);
259 if (!desc
->depth
++) {
260 desc
->status
|= IRQ_DISABLED
;
261 desc
->handler
->disable(irq
);
263 spin_unlock_irqrestore(&desc
->lock
, flags
);
267 * disable_irq - disable an irq and wait for completion
268 * @irq: Interrupt to disable
270 * Disable the selected interrupt line. Enables and Disables are
272 * This function waits for any pending IRQ handlers for this interrupt
273 * to complete before returning. If you use this function while
274 * holding a resource the IRQ handler may need you will deadlock.
276 * This function may be called - with care - from IRQ context.
279 void disable_irq(unsigned int irq
)
281 disable_irq_nosync(irq
);
282 synchronize_irq(irq
);
286 * enable_irq - enable handling of an irq
287 * @irq: Interrupt to enable
289 * Undoes the effect of one call to disable_irq(). If this
290 * matches the last disable, processing of interrupts on this
291 * IRQ line is re-enabled.
293 * This function may be called from IRQ context.
296 void enable_irq(unsigned int irq
)
298 irq_desc_t
*desc
= irq_desc
+ irq
;
301 spin_lock_irqsave(&desc
->lock
, flags
);
302 switch (desc
->depth
) {
304 unsigned int status
= desc
->status
& ~IRQ_DISABLED
;
305 desc
->status
= status
;
306 if ((status
& (IRQ_PENDING
| IRQ_REPLAY
)) == IRQ_PENDING
) {
307 desc
->status
= status
| IRQ_REPLAY
;
308 hw_resend_irq(desc
->handler
,irq
);
310 desc
->handler
->enable(irq
);
317 printk("enable_irq(%u) unbalanced from %p\n", irq
,
318 __builtin_return_address(0));
320 spin_unlock_irqrestore(&desc
->lock
, flags
);
324 * do_IRQ handles all normal device IRQ's (the special
325 * SMP cross-CPU interrupts have their own specific
328 asmlinkage
unsigned int do_IRQ(struct pt_regs
*regs
)
331 * We ack quickly, we don't want the irq controller
332 * thinking we're snobs just because some other CPU has
333 * disabled global interrupts (we have already done the
334 * INT_ACK cycles, it's too late to try to pretend to the
335 * controller that we aren't taking the interrupt).
337 * 0 return value means that this irq is already being
338 * handled by some other CPU. (or is disabled)
340 unsigned irq
= regs
->orig_rax
& 0xff; /* high bits used in ret_from_ code */
341 int cpu
= smp_processor_id();
342 irq_desc_t
*desc
= irq_desc
+ irq
;
343 struct irqaction
* action
;
346 if (irq
> 256) BUG();
349 kstat_cpu(cpu
).irqs
[irq
]++;
350 spin_lock(&desc
->lock
);
351 desc
->handler
->ack(irq
);
353 REPLAY is when Linux resends an IRQ that was dropped earlier
354 WAITING is used by probe to mark irqs that are being tested
356 status
= desc
->status
& ~(IRQ_REPLAY
| IRQ_WAITING
);
357 status
|= IRQ_PENDING
; /* we _want_ to handle it */
360 * If the IRQ is disabled for whatever reason, we cannot
361 * use the action we have.
364 if (likely(!(status
& (IRQ_DISABLED
| IRQ_INPROGRESS
)))) {
365 action
= desc
->action
;
366 status
&= ~IRQ_PENDING
; /* we commit to handling */
367 status
|= IRQ_INPROGRESS
; /* we are handling it */
369 desc
->status
= status
;
372 * If there is no IRQ handler or it was disabled, exit early.
373 Since we set PENDING, if another processor is handling
374 a different instance of this same irq, the other processor
375 will take care of it.
377 if (unlikely(!action
))
381 * Edge triggered interrupts need to remember
383 * This applies to any hw interrupts that allow a second
384 * instance of the same irq to arrive while we are in do_IRQ
385 * or in the handler. But the code here only handles the _second_
386 * instance of the irq, not the third or fourth. So it is mostly
387 * useful for irq hardware that does not mask cleanly in an
391 spin_unlock(&desc
->lock
);
392 handle_IRQ_event(irq
, regs
, action
);
393 spin_lock(&desc
->lock
);
395 if (unlikely(!(desc
->status
& IRQ_PENDING
)))
397 desc
->status
&= ~IRQ_PENDING
;
399 desc
->status
&= ~IRQ_INPROGRESS
;
402 * The ->end() handler has to deal with interrupts which got
403 * disabled while the handler was running.
405 if (irq
> 256) BUG();
406 desc
->handler
->end(irq
);
407 spin_unlock(&desc
->lock
);
413 int can_request_irq(unsigned int irq
, unsigned long irqflags
)
415 struct irqaction
*action
;
419 action
= irq_desc
[irq
].action
;
421 if (irqflags
& action
->flags
& SA_SHIRQ
)
428 * request_irq - allocate an interrupt line
429 * @irq: Interrupt line to allocate
430 * @handler: Function to be called when the IRQ occurs
431 * @irqflags: Interrupt type flags
432 * @devname: An ascii name for the claiming device
433 * @dev_id: A cookie passed back to the handler function
435 * This call allocates interrupt resources and enables the
436 * interrupt line and IRQ handling. From the point this
437 * call is made your handler function may be invoked. Since
438 * your handler function must clear any interrupt the board
439 * raises, you must take care both to initialise your hardware
440 * and to set up the interrupt handler in the right order.
442 * Dev_id must be globally unique. Normally the address of the
443 * device data structure is used as the cookie. Since the handler
444 * receives this value it makes sense to use it.
446 * If your interrupt is shared you must pass a non NULL dev_id
447 * as this is required when freeing the interrupt.
451 * SA_SHIRQ Interrupt is shared
453 * SA_INTERRUPT Disable local interrupts while processing
455 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
459 int request_irq(unsigned int irq
,
460 irqreturn_t (*handler
)(int, void *, struct pt_regs
*),
461 unsigned long irqflags
,
462 const char * devname
,
466 struct irqaction
* action
;
470 * Sanity-check: shared interrupts should REALLY pass in
471 * a real dev-ID, otherwise we'll have trouble later trying
472 * to figure out which interrupt is which (messes up the
473 * interrupt freeing logic etc).
475 if (irqflags
& SA_SHIRQ
) {
477 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname
, (&irq
)[-1]);
486 action
= (struct irqaction
*)
487 kmalloc(sizeof(struct irqaction
), GFP_ATOMIC
);
491 action
->handler
= handler
;
492 action
->flags
= irqflags
;
493 cpus_clear(action
->mask
);
494 action
->name
= devname
;
496 action
->dev_id
= dev_id
;
498 retval
= setup_irq(irq
, action
);
504 EXPORT_SYMBOL(request_irq
);
507 * free_irq - free an interrupt
508 * @irq: Interrupt line to free
509 * @dev_id: Device identity to free
511 * Remove an interrupt handler. The handler is removed and if the
512 * interrupt line is no longer in use by any driver it is disabled.
513 * On a shared IRQ the caller must ensure the interrupt is disabled
514 * on the card it drives before calling this function. The function
515 * does not return until any executing interrupts for this IRQ
518 * This function may be called from interrupt context.
520 * Bugs: Attempting to free an irq in a handler for the same irq hangs
524 void free_irq(unsigned int irq
, void *dev_id
)
527 struct irqaction
**p
;
533 desc
= irq_desc
+ irq
;
534 spin_lock_irqsave(&desc
->lock
,flags
);
537 struct irqaction
* action
= *p
;
539 struct irqaction
**pp
= p
;
541 if (action
->dev_id
!= dev_id
)
544 /* Found it - now remove it from the list of entries */
547 desc
->status
|= IRQ_DISABLED
;
548 desc
->handler
->shutdown(irq
);
550 spin_unlock_irqrestore(&desc
->lock
,flags
);
552 synchronize_irq(irq
);
556 printk("Trying to free free IRQ%d\n",irq
);
557 spin_unlock_irqrestore(&desc
->lock
,flags
);
562 EXPORT_SYMBOL(free_irq
);
565 * IRQ autodetection code..
567 * This depends on the fact that any interrupt that
568 * comes in on to an unassigned handler will get stuck
569 * with "IRQ_WAITING" cleared and the interrupt
573 static DECLARE_MUTEX(probe_sem
);
576 * probe_irq_on - begin an interrupt autodetect
578 * Commence probing for an interrupt. The interrupts are scanned
579 * and a mask of potential interrupt lines is returned.
583 unsigned long probe_irq_on(void)
592 * something may have generated an irq long ago and we want to
593 * flush such a longstanding irq before considering it as spurious.
595 for (i
= NR_IRQS
-1; i
> 0; i
--) {
598 spin_lock_irq(&desc
->lock
);
599 if (!irq_desc
[i
].action
)
600 irq_desc
[i
].handler
->startup(i
);
601 spin_unlock_irq(&desc
->lock
);
604 /* Wait for longstanding interrupts to trigger. */
605 for (delay
= jiffies
+ HZ
/50; time_after(delay
, jiffies
); )
606 /* about 20ms delay */ barrier();
609 * enable any unassigned irqs
610 * (we must startup again here because if a longstanding irq
611 * happened in the previous stage, it may have masked itself)
613 for (i
= NR_IRQS
-1; i
> 0; i
--) {
616 spin_lock_irq(&desc
->lock
);
618 desc
->status
|= IRQ_AUTODETECT
| IRQ_WAITING
;
619 if (desc
->handler
->startup(i
))
620 desc
->status
|= IRQ_PENDING
;
622 spin_unlock_irq(&desc
->lock
);
626 * Wait for spurious interrupts to trigger
628 for (delay
= jiffies
+ HZ
/10; time_after(delay
, jiffies
); )
629 /* about 100ms delay */ barrier();
632 * Now filter out any obviously spurious interrupts
635 for (i
= 0; i
< NR_IRQS
; i
++) {
636 irq_desc_t
*desc
= irq_desc
+ i
;
639 spin_lock_irq(&desc
->lock
);
640 status
= desc
->status
;
642 if (status
& IRQ_AUTODETECT
) {
643 /* It triggered already - consider it spurious. */
644 if (!(status
& IRQ_WAITING
)) {
645 desc
->status
= status
& ~IRQ_AUTODETECT
;
646 desc
->handler
->shutdown(i
);
651 spin_unlock_irq(&desc
->lock
);
657 EXPORT_SYMBOL(probe_irq_on
);
660 * Return a mask of triggered interrupts (this
661 * can handle only legacy ISA interrupts).
665 * probe_irq_mask - scan a bitmap of interrupt lines
666 * @val: mask of interrupts to consider
668 * Scan the ISA bus interrupt lines and return a bitmap of
669 * active interrupts. The interrupt probe logic state is then
670 * returned to its previous value.
672 * Note: we need to scan all the irq's even though we will
673 * only return ISA irq numbers - just so that we reset them
674 * all to a known state.
676 unsigned int probe_irq_mask(unsigned long val
)
682 for (i
= 0; i
< NR_IRQS
; i
++) {
683 irq_desc_t
*desc
= irq_desc
+ i
;
686 spin_lock_irq(&desc
->lock
);
687 status
= desc
->status
;
689 if (status
& IRQ_AUTODETECT
) {
690 if (i
< 16 && !(status
& IRQ_WAITING
))
693 desc
->status
= status
& ~IRQ_AUTODETECT
;
694 desc
->handler
->shutdown(i
);
696 spin_unlock_irq(&desc
->lock
);
704 * Return the one interrupt that triggered (this can
705 * handle any interrupt source).
709 * probe_irq_off - end an interrupt autodetect
710 * @val: mask of potential interrupts (unused)
712 * Scans the unused interrupt lines and returns the line which
713 * appears to have triggered the interrupt. If no interrupt was
714 * found then zero is returned. If more than one interrupt is
715 * found then minus the first candidate is returned to indicate
718 * The interrupt probe logic state is returned to its previous
721 * BUGS: When used in a module (which arguably shouldn't happen)
722 * nothing prevents two IRQ probe callers from overlapping. The
723 * results of this are non-optimal.
726 int probe_irq_off(unsigned long val
)
728 int i
, irq_found
, nr_irqs
;
732 for (i
= 0; i
< NR_IRQS
; i
++) {
733 irq_desc_t
*desc
= irq_desc
+ i
;
736 spin_lock_irq(&desc
->lock
);
737 status
= desc
->status
;
739 if (status
& IRQ_AUTODETECT
) {
740 if (!(status
& IRQ_WAITING
)) {
745 desc
->status
= status
& ~IRQ_AUTODETECT
;
746 desc
->handler
->shutdown(i
);
748 spin_unlock_irq(&desc
->lock
);
753 irq_found
= -irq_found
;
757 EXPORT_SYMBOL(probe_irq_off
);
759 /* this was setup_x86_irq but it seems pretty generic */
760 int setup_irq(unsigned int irq
, struct irqaction
* new)
764 struct irqaction
*old
, **p
;
765 irq_desc_t
*desc
= irq_desc
+ irq
;
767 if (desc
->handler
== &no_irq_type
)
771 * Some drivers like serial.c use request_irq() heavily,
772 * so we have to be careful not to interfere with a
775 if (new->flags
& SA_SAMPLE_RANDOM
) {
777 * This function might sleep, we want to call it first,
778 * outside of the atomic block.
779 * Yes, this might clear the entropy pool if the wrong
780 * driver is attempted to be loaded, without actually
781 * installing a new handler, but is this really a problem,
782 * only the sysadmin is able to do this.
784 rand_initialize_irq(irq
);
788 * The following block of code has to be executed atomically
790 spin_lock_irqsave(&desc
->lock
,flags
);
792 if ((old
= *p
) != NULL
) {
793 /* Can't share interrupts unless both agree to */
794 if (!(old
->flags
& new->flags
& SA_SHIRQ
)) {
795 spin_unlock_irqrestore(&desc
->lock
,flags
);
799 /* add new interrupt at end of irq queue */
811 desc
->status
&= ~(IRQ_DISABLED
| IRQ_AUTODETECT
| IRQ_WAITING
);
812 desc
->handler
->startup(irq
);
814 spin_unlock_irqrestore(&desc
->lock
,flags
);
816 register_irq_proc(irq
);
820 static struct proc_dir_entry
* root_irq_dir
;
821 static struct proc_dir_entry
* irq_dir
[NR_IRQS
];
825 static struct proc_dir_entry
* smp_affinity_entry
[NR_IRQS
];
827 static cpumask_t irq_affinity
[NR_IRQS
] = { [0 ... NR_IRQS
-1] = CPU_MASK_ALL
};
828 static int irq_affinity_read_proc (char *page
, char **start
, off_t off
,
829 int count
, int *eof
, void *data
)
831 int len
= cpumask_scnprintf(page
, count
, irq_affinity
[(long)data
]);
834 len
+= sprintf(page
+ len
, "\n");
838 static int irq_affinity_write_proc (struct file
*file
,
839 const char __user
*buffer
,
840 unsigned long count
, void *data
)
842 int irq
= (long) data
, full_count
= count
, err
;
843 cpumask_t tmp
, new_value
;
845 if (!irq_desc
[irq
].handler
->set_affinity
)
848 err
= cpumask_parse(buffer
, count
, new_value
);
851 * Do not allow disabling IRQs completely - it's a too easy
852 * way to make the system unusable accidentally :-) At least
853 * one online CPU still has to be targeted.
855 cpus_and(tmp
, new_value
, cpu_online_map
);
859 irq_affinity
[irq
] = new_value
;
860 irq_desc
[irq
].handler
->set_affinity(irq
, new_value
);
867 #define MAX_NAMELEN 10
869 static void register_irq_proc (unsigned int irq
)
871 char name
[MAX_NAMELEN
];
873 if (!root_irq_dir
|| (irq_desc
[irq
].handler
== &no_irq_type
) ||
877 memset(name
, 0, MAX_NAMELEN
);
878 sprintf(name
, "%d", irq
);
880 /* create /proc/irq/1234 */
881 irq_dir
[irq
] = proc_mkdir(name
, root_irq_dir
);
885 struct proc_dir_entry
*entry
;
887 /* create /proc/irq/1234/smp_affinity */
888 entry
= create_proc_entry("smp_affinity", 0600, irq_dir
[irq
]);
892 entry
->data
= (void *)(long)irq
;
893 entry
->read_proc
= irq_affinity_read_proc
;
894 entry
->write_proc
= irq_affinity_write_proc
;
897 smp_affinity_entry
[irq
] = entry
;
902 void init_irq_proc (void)
906 /* create /proc/irq */
907 root_irq_dir
= proc_mkdir("irq", NULL
);
909 /* create /proc/irq/prof_cpu_mask */
910 create_prof_cpu_mask(root_irq_dir
);
913 * Create entries for all existing IRQs.
915 for (i
= 0; i
< NR_IRQS
; i
++)
916 register_irq_proc(i
);