2 * linux/arch/alpha/kernel/irq.c
4 * Copyright (C) 1995 Linus Torvalds
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/ptrace.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/malloc.h>
22 #include <linux/random.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
26 #include <asm/system.h>
29 #include <asm/bitops.h>
30 #include <asm/machvec.h>
35 #define vulp volatile unsigned long *
36 #define vuip volatile unsigned int *
38 /* Only uniprocessor needs this IRQ/BH locking depth, on SMP it lives
39 in the per-cpu structure for cache reasons. */
41 int __local_irq_count
;
46 # error Unable to handle more than 64 irq levels.
49 #ifdef CONFIG_ALPHA_GENERIC
50 #define ACTUAL_NR_IRQS alpha_mv.nr_irqs
52 #define ACTUAL_NR_IRQS NR_IRQS
55 /* Reserved interrupts. These must NEVER be requested by any driver!
56 IRQ 2 used by hw cascade */
57 #define IS_RESERVED_IRQ(irq) ((irq)==2)
61 * Shadow-copy of masked interrupts.
63 unsigned long alpha_irq_mask
= ~0UL;
66 * The ack_irq routine used by 80% of the systems.
70 common_ack_irq(unsigned long irq
)
73 /* Ack the interrupt making it the lowest priority */
74 /* First the slave .. */
76 outb(0xE0 | (irq
- 8), 0xa0);
79 /* .. then the master */
80 outb(0xE0 | irq
, 0x20);
86 static void dummy_perf(unsigned long vector
, struct pt_regs
*regs
)
88 printk(KERN_CRIT
"Performance counter interrupt!\n");
91 void (*perf_irq
)(unsigned long, struct pt_regs
*) = dummy_perf
;
94 * Dispatch device interrupts.
97 /* Handle ISA interrupt via the PICs. */
99 #if defined(CONFIG_ALPHA_GENERIC)
100 # define IACK_SC alpha_mv.iack_sc
101 #elif defined(CONFIG_ALPHA_APECS)
102 # define IACK_SC APECS_IACK_SC
103 #elif defined(CONFIG_ALPHA_LCA)
104 # define IACK_SC LCA_IACK_SC
105 #elif defined(CONFIG_ALPHA_CIA)
106 # define IACK_SC CIA_IACK_SC
107 #elif defined(CONFIG_ALPHA_PYXIS)
108 # define IACK_SC PYXIS_IACK_SC
109 #elif defined(CONFIG_ALPHA_TSUNAMI)
110 # define IACK_SC TSUNAMI_IACK_SC
111 #elif defined(CONFIG_ALPHA_POLARIS)
112 # define IACK_SC POLARIS_IACK_SC
114 /* This is bogus but necessary to get it to compile on all platforms. */
119 isa_device_interrupt(unsigned long vector
, struct pt_regs
* regs
)
123 * Generate a PCI interrupt acknowledge cycle. The PIC will
124 * respond with the interrupt vector of the highest priority
125 * interrupt that is pending. The PALcode sets up the
126 * interrupts vectors such that irq level L generates vector L.
128 int j
= *(vuip
) IACK_SC
;
131 if (!(inb(0x20) & 0x80)) {
132 /* It's only a passive release... */
136 handle_irq(j
, j
, regs
);
141 * It seems to me that the probability of two or more *device*
142 * interrupts occurring at almost exactly the same time is
143 * pretty low. So why pay the price of checking for
144 * additional interrupts here if the common case can be
145 * handled so much easier?
148 * The first read of gives you *all* interrupting lines.
149 * Therefore, read the mask register and and out those lines
150 * not enabled. Note that some documentation has 21 and a1
151 * write only. This is not true.
153 pic
= inb(0x20) | (inb(0xA0) << 8); /* read isr */
154 pic
&= ~alpha_irq_mask
; /* apply mask */
155 pic
&= 0xFFFB; /* mask out cascade & hibits */
160 handle_irq(j
, j
, regs
);
165 /* Handle interrupts from the SRM, assuming no additional weirdness. */
168 srm_device_interrupt(unsigned long vector
, struct pt_regs
* regs
)
172 ack
= irq
= (vector
- 0x800) >> 4;
173 handle_irq(irq
, ack
, regs
);
178 * Initial irq handlers.
181 static struct irqaction timer_irq
= { NULL
, 0, 0, NULL
, NULL
, NULL
};
182 static struct irqaction
*irq_action
[NR_IRQS
];
186 mask_irq(unsigned long irq
)
188 alpha_mv
.update_irq_hw(irq
, alpha_irq_mask
|= 1UL << irq
, 0);
192 unmask_irq(unsigned long irq
)
194 alpha_mv
.update_irq_hw(irq
, alpha_irq_mask
&= ~(1UL << irq
), 1);
198 disable_irq_nosync(unsigned int irq_nr
)
204 restore_flags(flags
);
208 disable_irq(unsigned int irq_nr
)
210 /* This works non-SMP, and SMP until we write code to distribute
211 interrupts to more that cpu 0. */
212 disable_irq_nosync(irq_nr
);
216 enable_irq(unsigned int irq_nr
)
222 restore_flags(flags
);
226 check_irq(unsigned int irq
)
228 struct irqaction
**p
;
230 p
= irq_action
+ irq
;
237 request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
238 unsigned long irqflags
, const char * devname
, void *dev_id
)
241 struct irqaction
* action
, **p
;
244 if (irq
>= ACTUAL_NR_IRQS
)
246 if (IS_RESERVED_IRQ(irq
))
251 p
= irq_action
+ irq
;
254 /* Can't share interrupts unless both agree to */
255 if (!(action
->flags
& irqflags
& SA_SHIRQ
))
258 /* Can't share interrupts unless both are same type */
259 if ((action
->flags
^ irqflags
) & SA_INTERRUPT
)
262 /* Add new interrupt at end of irq queue */
271 if (irq
!= TIMER_IRQ
) {
272 action
= (struct irqaction
*)
273 kmalloc(sizeof(struct irqaction
), GFP_KERNEL
);
278 if (irqflags
& SA_SAMPLE_RANDOM
)
279 rand_initialize_irq(irq
);
281 action
->handler
= handler
;
282 action
->flags
= irqflags
;
284 action
->name
= devname
;
286 action
->dev_id
= dev_id
;
294 restore_flags(flags
);
299 free_irq(unsigned int irq
, void *dev_id
)
301 struct irqaction
* action
, **p
;
304 if (irq
>= ACTUAL_NR_IRQS
) {
305 printk("Trying to free IRQ%d\n",irq
);
308 if (IS_RESERVED_IRQ(irq
)) {
309 printk("Trying to free reserved IRQ %d\n", irq
);
312 for (p
= irq
+ irq_action
; (action
= *p
) != NULL
; p
= &action
->next
) {
313 if (action
->dev_id
!= dev_id
)
316 /* Found it - now free it */
319 if (!irq
[irq_action
])
321 restore_flags(flags
);
325 printk("Trying to free free IRQ%d\n",irq
);
328 int get_irq_list(char *buf
)
331 struct irqaction
* action
;
335 p
+= sprintf(p
, " ");
336 for (i
= 0; i
< smp_num_cpus
; i
++)
337 p
+= sprintf(p
, "CPU%d ", i
);
341 for (i
= 0; i
< NR_IRQS
; i
++) {
342 action
= irq_action
[i
];
345 p
+= sprintf(p
, "%3d: ",i
);
347 p
+= sprintf(p
, "%10u ", kstat_irqs(i
));
351 for (j
= 0; j
< smp_num_cpus
; j
++)
352 p
+= sprintf(p
, "%10u ",
353 kstat
.irqs
[cpu_logical_map(j
)][i
]);
356 p
+= sprintf(p
, " %c%s",
357 (action
->flags
& SA_INTERRUPT
)?'+':' ',
360 for (action
=action
->next
; action
; action
= action
->next
) {
361 p
+= sprintf(p
, ", %c%s",
362 (action
->flags
& SA_INTERRUPT
)?'+':' ',
371 /* Who has global_irq_lock. */
372 int global_irq_holder
= NO_PROC_ID
;
374 /* This protects IRQ's. */
375 spinlock_t global_irq_lock
= SPIN_LOCK_UNLOCKED
;
377 /* Global IRQ locking depth. */
378 atomic_t global_irq_count
= ATOMIC_INIT(0);
380 /* This protects BH software state (masks, things like that). */
381 atomic_t global_bh_lock
= ATOMIC_INIT(0);
382 atomic_t global_bh_count
= ATOMIC_INIT(0);
384 static void *previous_irqholder
= NULL
;
386 #define MAXCOUNT 100000000
388 static void show(char * str
, void *where
);
390 #define SYNC_OTHER_CPUS(x) udelay((x)+1);
393 wait_on_irq(int cpu
, void *where
)
395 int count
= MAXCOUNT
;
400 * Wait until all interrupts are gone. Wait
401 * for bottom half handlers unless we're
402 * already executing in one..
404 if (!atomic_read(&global_irq_count
)) {
405 if (local_bh_count(cpu
)
406 || !atomic_read(&global_bh_count
))
410 /* Duh, we have to loop. Release the lock to avoid deadlocks */
411 spin_unlock(&global_irq_lock
);
416 show("wait_on_irq", where
);
420 SYNC_OTHER_CPUS(cpu
);
423 if (atomic_read(&global_irq_count
))
425 if (spin_is_locked(&global_irq_lock
))
427 if (!local_bh_count(cpu
)
428 && atomic_read(&global_bh_count
))
430 if (spin_trylock(&global_irq_lock
))
437 get_irqlock(int cpu
, void* where
)
439 if (!spin_trylock(&global_irq_lock
)) {
440 /* do we already hold the lock? */
441 if (cpu
== global_irq_holder
) {
443 printk("get_irqlock: already held at %08lx\n",
448 /* Uhhuh.. Somebody else got it. Wait.. */
449 spin_lock(&global_irq_lock
);
452 * Ok, we got the lock bit.
453 * But that's actually just the easy part.. Now
454 * we need to make sure that nobody else is running
455 * in an interrupt context.
457 wait_on_irq(cpu
, where
);
463 global_irq_lock
.task
= current
;
464 global_irq_lock
.previous
= where
;
466 global_irq_holder
= cpu
;
467 previous_irqholder
= where
;
473 int cpu
= smp_processor_id();
474 void *where
= __builtin_return_address(0);
477 * Maximize ipl. If ipl was previously 0 and if this thread
478 * is not in an irq, then take global_irq_lock.
480 if (swpipl(7) == 0 && !local_irq_count(cpu
))
481 get_irqlock(cpu
, where
);
487 int cpu
= smp_processor_id();
489 if (!local_irq_count(cpu
))
490 release_irqlock(cpu
);
495 * SMP flags value to restore to:
502 __global_save_flags(void)
507 int cpu
= smp_processor_id();
510 local_enabled
= (!(flags
& 7));
511 /* default to local */
512 retval
= 2 + local_enabled
;
514 /* Check for global flags if we're not in an interrupt. */
515 if (!local_irq_count(cpu
)) {
518 if (global_irq_holder
== cpu
)
525 __global_restore_flags(unsigned long flags
)
541 printk("global_restore_flags: %08lx (%p)\n",
542 flags
, __builtin_return_address(0));
547 #define INIT_STUCK (1<<26)
552 printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n", \
553 irq, cpu, global_irq_holder); \
554 stuck = INIT_STUCK; \
557 #undef VERBOSE_IRQLOCK_DEBUGGING
560 irq_enter(int cpu
, int irq
)
562 #ifdef VERBOSE_IRQLOCK_DEBUGGING
563 extern void smp_show_backtrace_all_cpus(void);
565 int stuck
= INIT_STUCK
;
567 hardirq_enter(cpu
, irq
);
569 while (spin_is_locked(&global_irq_lock
)) {
570 if (cpu
== global_irq_holder
) {
571 int globl_locked
= spin_is_locked(&global_irq_lock
);
572 int globl_icount
= atomic_read(&global_irq_count
);
573 int local_count
= local_irq_count(cpu
);
575 /* It is very important that we load the state
576 variables before we do the first call to
577 printk() as printk() could end up changing
580 printk("CPU[%d]: where [%p] glocked[%d] gicnt[%d]"
582 cpu
, previous_irqholder
, globl_locked
,
583 globl_icount
, local_count
);
584 #ifdef VERBOSE_IRQLOCK_DEBUGGING
585 printk("Performing backtrace on all CPUs,"
586 " write this down!\n");
587 smp_show_backtrace_all_cpus();
597 irq_exit(int cpu
, int irq
)
599 hardirq_exit(cpu
, irq
);
600 release_irqlock(cpu
);
604 show(char * str
, void *where
)
608 unsigned long *stack
;
610 int cpu
= smp_processor_id();
612 printk("\n%s, CPU %d: %p\n", str
, cpu
, where
);
613 printk("irq: %d [%d %d]\n",
614 atomic_read(&global_irq_count
),
615 cpu_data
[0].irq_count
,
616 cpu_data
[1].irq_count
);
618 printk("bh: %d [%d %d]\n",
619 atomic_read(&global_bh_count
),
620 cpu_data
[0].bh_count
,
621 cpu_data
[1].bh_count
);
623 stack
= (unsigned long *) &str
;
624 for (i
= 40; i
; i
--) {
625 unsigned long x
= *++stack
;
626 if (x
> (unsigned long) &init_task_union
&&
627 x
< (unsigned long) &vsprintf
) {
628 printk("<[%08lx]> ", x
);
637 int count
= MAXCOUNT
;
640 show("wait_on_bh", 0);
643 /* nothing .. wait for the other bh's to go away */
645 } while (atomic_read(&global_bh_count
) != 0);
649 * This is called when we want to synchronize with
650 * bottom half handlers. We need to wait until
651 * no other CPU is executing any bottom half handler.
653 * Don't wait if we're already running in an interrupt
654 * context or are inside a bh handler.
659 if (atomic_read(&global_bh_count
) && !in_interrupt())
664 * From its use, I infer that synchronize_irq() stalls a thread until
665 * the effects of a command to an external device are known to have
666 * taken hold. Typically, the command is to stop sending interrupts.
667 * The strategy here is wait until there is at most one processor
668 * (this one) in an irq. The memory barrier serializes the write to
669 * the device and the subsequent accesses of global_irq_count.
672 #define DEBUG_SYNCHRONIZE_IRQ 0
675 synchronize_irq(void)
679 int cpu
= smp_processor_id();
682 int countdown
= 1<<24;
683 void *where
= __builtin_return_address(0);
687 local_count
= local_irq_count(cpu
);
688 global_count
= atomic_read(&global_irq_count
);
689 if (DEBUG_SYNCHRONIZE_IRQ
&& (--countdown
== 0)) {
690 printk("%d:%d/%d\n", cpu
, local_count
, global_count
);
691 show("synchronize_irq", where
);
694 } while (global_count
!= local_count
);
697 if (atomic_read(&global_irq_count
)) {
706 #define irq_enter(cpu, irq) (++local_irq_count(cpu))
707 #define irq_exit(cpu, irq) (--local_irq_count(cpu))
712 unexpected_irq(int irq
, struct pt_regs
* regs
)
716 printk("device_interrupt: unexpected interrupt %d\n", irq
);
718 struct irqaction
*action
;
721 printk("IO device interrupt, irq = %d\n", irq
);
722 printk("PC = %016lx PS=%04lx\n", regs
->pc
, regs
->ps
);
723 printk("Expecting: ");
724 for (i
= 0; i
< ACTUAL_NR_IRQS
; i
++)
725 if ((action
= irq_action
[i
]))
726 while (action
->handler
) {
727 printk("[%s:%d] ", action
->name
, i
);
728 action
= action
->next
;
734 #if defined(CONFIG_ALPHA_JENSEN)
735 /* ??? Is all this just debugging, or are the inb's and outb's
736 necessary to make things work? */
737 printk("64=%02x, 60=%02x, 3fa=%02x 2fa=%02x\n",
738 inb(0x64), inb(0x60), inb(0x3fa), inb(0x2fa));
747 handle_irq(int irq
, int ack
, struct pt_regs
* regs
)
749 struct irqaction
* action
;
750 int cpu
= smp_processor_id();
752 if ((unsigned) irq
> ACTUAL_NR_IRQS
) {
753 printk("device_interrupt: illegal interrupt %d\n", irq
);
758 /* A useful bit of code to find out if an interrupt is going wild. */
760 static unsigned int last_msg
, last_cc
;
761 static int last_irq
, count
;
764 __asm
__volatile("rpcc %0" : "=r"(cc
));
766 if (cc
- last_msg
> 150000000 || irq
!= last_irq
) {
767 printk("handle_irq: irq %d count %d cc %u @ %p\n",
768 irq
, count
, cc
-last_cc
, regs
->pc
);
778 kstat
.irqs
[cpu
][irq
] += 1;
779 action
= irq_action
[irq
];
782 * For normal interrupts, we mask it out, and then ACK it.
783 * This way another (more timing-critical) interrupt can
784 * come through while we're doing this one.
786 * Note! An irq without a handler gets masked and acked, but
787 * never unmasked. The autoirq stuff depends on this (it looks
788 * at the masks before and after doing the probing).
792 alpha_mv
.ack_irq(ack
);
795 if (action
->flags
& SA_SAMPLE_RANDOM
)
796 add_interrupt_randomness(irq
);
798 action
->handler(irq
, action
->dev_id
, regs
);
799 action
= action
->next
;
804 unexpected_irq(irq
, regs
);
811 * Start listening for interrupts..
817 struct irqaction
* action
;
818 unsigned long irqs
= 0;
822 for (i
= ACTUAL_NR_IRQS
- 1; i
> 0; i
--) {
823 if (!(PROBE_MASK
& (1UL << i
))) {
826 action
= irq_action
[i
];
834 * Wait about 100ms for spurious interrupts to mask themselves
837 for (delay
= jiffies
+ HZ
/10; time_before(jiffies
, delay
); )
840 /* Now filter out any obviously spurious interrupts. */
841 return irqs
& ~alpha_irq_mask
;
845 * Get the result of the IRQ probe.. A negative result means that
846 * we have several candidates (but we return the lowest-numbered
851 probe_irq_off(unsigned long irqs
)
855 irqs
&= alpha_irq_mask
;
859 if (irqs
!= (1UL << i
))
866 * The main interrupt entry point.
870 do_entInt(unsigned long type
, unsigned long vector
, unsigned long la_ptr
,
871 unsigned long a3
, unsigned long a4
, unsigned long a5
,
880 printk("Interprocessor interrupt? You must be kidding\n");
884 handle_irq(RTC_IRQ
, -1, ®s
);
887 alpha_mv
.machine_check(vector
, la_ptr
, ®s
);
890 alpha_mv
.device_interrupt(vector
, ®s
);
893 perf_irq(vector
, ®s
);
896 printk("Hardware intr %ld %lx? Huh?\n", type
, vector
);
898 printk("PC = %016lx PS=%04lx\n", regs
.pc
, regs
.ps
);
911 #define MCHK_K_TPERR 0x0080
912 #define MCHK_K_TCPERR 0x0082
913 #define MCHK_K_HERR 0x0084
914 #define MCHK_K_ECC_C 0x0086
915 #define MCHK_K_ECC_NC 0x0088
916 #define MCHK_K_OS_BUGCHECK 0x008A
917 #define MCHK_K_PAL_BUGCHECK 0x0090
920 struct mcheck_info __mcheck_info
;
924 process_mcheck_info(unsigned long vector
, unsigned long la_ptr
,
925 struct pt_regs
*regs
, const char *machine
,
928 struct el_common
*mchk_header
;
932 * See if the machine check is due to a badaddr() and if so,
937 printk(KERN_CRIT
"%s machine check %s\n", machine
,
938 expected
? "expected." : "NOT expected!!!");
942 int cpu
= smp_processor_id();
943 mcheck_expected(cpu
) = 0;
944 mcheck_taken(cpu
) = 1;
948 mchk_header
= (struct el_common
*)la_ptr
;
950 printk(KERN_CRIT
"%s machine check: vector=0x%lx pc=0x%lx code=0x%lx\n",
951 machine
, vector
, regs
->pc
, mchk_header
->code
);
953 switch ((unsigned int) mchk_header
->code
) {
954 /* Machine check reasons. Defined according to PALcode sources. */
955 case 0x80: reason
= "tag parity error"; break;
956 case 0x82: reason
= "tag control parity error"; break;
957 case 0x84: reason
= "generic hard error"; break;
958 case 0x86: reason
= "correctable ECC error"; break;
959 case 0x88: reason
= "uncorrectable ECC error"; break;
960 case 0x8A: reason
= "OS-specific PAL bugcheck"; break;
961 case 0x90: reason
= "callsys in kernel mode"; break;
962 case 0x96: reason
= "i-cache read retryable error"; break;
963 case 0x98: reason
= "processor detected hard error"; break;
965 /* System specific (these are for Alcor, at least): */
966 case 0x203: reason
= "system detected uncorrectable ECC error"; break;
967 case 0x204: reason
= "SIO SERR occurred on PCI bus"; break;
968 case 0x205: reason
= "parity error detected by CIA"; break;
969 case 0x206: reason
= "SIO IOCHK occurred on ISA bus"; break;
970 case 0x207: reason
= "non-existent memory error"; break;
971 case 0x208: reason
= "MCHK_K_DCSR"; break;
972 case 0x209: reason
= "PCI SERR detected"; break;
973 case 0x20b: reason
= "PCI data parity error detected"; break;
974 case 0x20d: reason
= "PCI address parity error detected"; break;
975 case 0x20f: reason
= "PCI master abort error"; break;
976 case 0x211: reason
= "PCI target abort error"; break;
977 case 0x213: reason
= "scatter/gather PTE invalid error"; break;
978 case 0x215: reason
= "flash ROM write error"; break;
979 case 0x217: reason
= "IOA timeout detected"; break;
980 case 0x219: reason
= "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
981 case 0x21b: reason
= "EISA fail-safe timer timeout"; break;
982 case 0x21d: reason
= "EISA bus time-out"; break;
983 case 0x21f: reason
= "EISA software generated NMI"; break;
984 case 0x221: reason
= "unexpected ev5 IRQ[3] interrupt"; break;
985 default: reason
= "unknown"; break;
988 printk(KERN_CRIT
"machine check type: %s%s\n",
989 reason
, mchk_header
->retry
? " (retryable)" : "");
991 dik_show_regs(regs
, NULL
);
995 /* Dump the logout area to give all info. */
996 unsigned long *ptr
= (unsigned long *)la_ptr
;
998 for (i
= 0; i
< mchk_header
->size
/ sizeof(long); i
+= 2) {
999 printk(KERN_CRIT
" +%8lx %016lx %016lx\n",
1000 i
*sizeof(long), ptr
[i
], ptr
[i
+1]);