1 /* $Id: irq.c,v 1.78 1999/08/31 06:54:54 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
14 #include <linux/interrupt.h>
15 #include <linux/malloc.h>
16 #include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
17 #include <linux/init.h>
18 #include <linux/delay.h>
20 #include <asm/ptrace.h>
21 #include <asm/processor.h>
22 #include <asm/atomic.h>
23 #include <asm/system.h>
26 #include <asm/iommu.h>
28 #include <asm/oplib.h>
29 #include <asm/timer.h>
31 #include <asm/hardirq.h>
32 #include <asm/softirq.h>
34 /* Internal flag, should not be visible elsewhere at all. */
35 #define SA_IMAP_MASKED 0x100
36 #define SA_DMA_SYNC 0x200
39 static void distribute_irqs(void);
42 /* UPA nodes send interrupt packet to UltraSparc with first data reg
43 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
44 * delivered. We must translate this into a non-vector IRQ so we can
45 * set the softint on this cpu.
47 * To make processing these packets efficient and race free we use
48 * an array of irq buckets below. The interrupt vector handler in
49 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
50 * The IVEC handler does not need to act atomically, the PIL dispatch
51 * code uses CAS to get an atomic snapshot of the list and clear it
55 struct ino_bucket ivector_table
[NUM_IVECS
] __attribute__ ((aligned (64)));
58 unsigned int __up_workvec
[16] __attribute__ ((aligned (64)));
59 #define irq_work(__cpu, __pil) &(__up_workvec[(__pil)])
61 #define irq_work(__cpu, __pil) &(cpu_data[(__cpu)].irq_worklists[(__pil)])
64 /* This is based upon code in the 32-bit Sparc kernel written mostly by
65 * David Redman (djhr@tadpole.co.uk).
67 #define MAX_STATIC_ALLOC 4
68 static struct irqaction static_irqaction
[MAX_STATIC_ALLOC
];
69 static int static_irq_count
= 0;
71 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
72 struct irqaction
*irq_action
[NR_IRQS
+1] = {
73 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
74 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
77 int get_irq_list(char *buf
)
80 struct irqaction
*action
;
85 for(i
= 0; i
< (NR_IRQS
+ 1); i
++) {
86 if(!(action
= *(i
+ irq_action
)))
88 len
+= sprintf(buf
+ len
, "%3d: ", i
);
90 len
+= sprintf(buf
+ len
, "%10u ", kstat_irqs(i
));
92 for (j
= 0; j
< smp_num_cpus
; j
++)
93 len
+= sprintf(buf
+ len
, "%10u ",
94 kstat
.irqs
[cpu_logical_map(j
)][i
]);
96 len
+= sprintf(buf
+ len
, "%c %s",
97 (action
->flags
& SA_INTERRUPT
) ? '+' : ' ',
99 for(action
= action
->next
; action
; action
= action
->next
) {
100 len
+= sprintf(buf
+len
, ",%s %s",
101 (action
->flags
& SA_INTERRUPT
) ? " +" : "",
104 len
+= sprintf(buf
+ len
, "\n");
109 /* SBUS SYSIO INO number to Sparc PIL level. */
110 unsigned char sysio_ino_to_pil
[] = {
111 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 0 */
112 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 1 */
113 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 2 */
114 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 3 */
115 3, /* Onboard SCSI */
116 5, /* Onboard Ethernet */
117 /*XXX*/ 8, /* Onboard BPP */
120 /*XXX*/15, /* PowerFail */
123 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
125 0, /* Spare Hardware (bogon for now) */
126 0, /* Keyboard (bogon for now) */
127 0, /* Mouse (bogon for now) */
128 0, /* Serial (bogon for now) */
129 0, 0, /* Bogon, Bogon */
132 0, 0, /* Bogon, Bogon */
133 15, /* Uncorrectable SBUS Error */
134 15, /* Correctable SBUS Error */
136 /*XXX*/ 0, /* Power Management (bogon for now) */
139 /* INO number to IMAP register offset for SYSIO external IRQ's.
140 * This should conform to both Sunfire/Wildfire server and Fusion
143 #define offset(x) ((unsigned long)(&(((struct sysio_regs *)0)->x)))
144 #define bogon ((unsigned long) -1)
145 static unsigned long sysio_irq_offsets
[] = {
146 /* SBUS Slot 0 --> 3, level 1 --> 7 */
147 offset(imap_slot0
),offset(imap_slot0
),offset(imap_slot0
),offset(imap_slot0
),
148 offset(imap_slot0
),offset(imap_slot0
),offset(imap_slot0
),offset(imap_slot0
),
149 offset(imap_slot1
),offset(imap_slot1
),offset(imap_slot1
),offset(imap_slot1
),
150 offset(imap_slot1
),offset(imap_slot1
),offset(imap_slot1
),offset(imap_slot1
),
151 offset(imap_slot2
),offset(imap_slot2
),offset(imap_slot2
),offset(imap_slot2
),
152 offset(imap_slot2
),offset(imap_slot2
),offset(imap_slot2
),offset(imap_slot2
),
153 offset(imap_slot3
),offset(imap_slot3
),offset(imap_slot3
),offset(imap_slot3
),
154 offset(imap_slot3
),offset(imap_slot3
),offset(imap_slot3
),offset(imap_slot3
),
155 /* Onboard devices (not relevant/used on SunFire). */
156 offset(imap_scsi
), offset(imap_eth
), offset(imap_bpp
), bogon
,
157 offset(imap_audio
), offset(imap_pfail
), bogon
, bogon
,
158 offset(imap_kms
), offset(imap_flpy
), offset(imap_shw
),
159 offset(imap_kbd
), offset(imap_ms
), offset(imap_ser
), bogon
, bogon
,
160 offset(imap_tim0
), offset(imap_tim1
), bogon
, bogon
,
161 offset(imap_ue
), offset(imap_ce
), offset(imap_sberr
),
167 #define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
169 /* Convert Interrupt Mapping register pointer to assosciated
170 * Interrupt Clear register pointer, SYSIO specific version.
172 static volatile unsigned int *sysio_imap_to_iclr(volatile unsigned int *imap
)
176 diff
= offset(iclr_unused0
) - offset(imap_slot0
);
177 return (volatile unsigned int *) (((unsigned long)imap
) + diff
);
182 /* Now these are always passed a true fully specified sun4u INO. */
183 void enable_irq(unsigned int irq
)
185 extern int this_is_starfire
;
186 struct ino_bucket
*bucket
= __bucket(irq
);
187 volatile unsigned int *imap
;
194 if(this_is_starfire
== 0) {
195 /* We set it to our UPA MID. */
196 __asm__
__volatile__("ldxa [%%g0] %1, %0"
198 : "i" (ASI_UPA_CONFIG
));
199 tid
= ((tid
& UPA_CONFIG_MID
) << 9);
201 extern unsigned int starfire_translate(volatile unsigned int *imap
,
204 tid
= (starfire_translate(imap
, current
->processor
) << 26);
207 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
208 * of this SYSIO's preconfigured IGN in the SYSIO Control
209 * Register, the hardware just mirrors that value here.
210 * However for Graphics and UPA Slave devices the full
211 * SYSIO_IMAP_INR field can be set by the programmer here.
213 * Things like FFB can now be handled via the new IRQ mechanism.
215 *imap
= SYSIO_IMAP_VALID
| (tid
& SYSIO_IMAP_TID
);
218 /* This now gets passed true ino's as well. */
219 void disable_irq(unsigned int irq
)
221 struct ino_bucket
*bucket
= __bucket(irq
);
222 volatile unsigned int *imap
;
226 /* NOTE: We do not want to futz with the IRQ clear registers
227 * and move the state to IDLE, the SCSI code does call
228 * disable_irq() to assure atomicity in the queue cmd
229 * SCSI adapter driver code. Thus we'd lose interrupts.
231 *imap
&= ~(SYSIO_IMAP_VALID
);
235 /* The timer is the one "weird" interrupt which is generated by
236 * the CPU %tick register and not by some normal vectored interrupt
237 * source. To handle this special case, we use this dummy INO bucket.
239 static struct ino_bucket pil0_dummy_bucket
= {
250 unsigned int build_irq(int pil
, int inofixup
, volatile unsigned int *iclr
, volatile unsigned int *imap
)
252 struct ino_bucket
*bucket
;
256 if(iclr
!= NULL
|| imap
!= NULL
) {
257 prom_printf("Invalid dummy bucket for PIL0 (%p:%p)\n",
261 return __irq(&pil0_dummy_bucket
);
264 /* RULE: Both must be specified in all other cases. */
265 if (iclr
== NULL
|| imap
== NULL
) {
266 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
267 pil
, inofixup
, iclr
, imap
);
271 ino
= (*imap
& (SYSIO_IMAP_IGN
| SYSIO_IMAP_INO
)) + inofixup
;
272 if(ino
> NUM_IVECS
) {
273 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
274 ino
, pil
, inofixup
, iclr
, imap
);
278 /* Ok, looks good, set it up. Don't touch the irq_chain or
281 bucket
= &ivector_table
[ino
];
282 if ((bucket
->flags
& IBF_ACTIVE
) ||
283 (bucket
->irq_info
!= NULL
)) {
284 /* This is a gross fatal error if it happens here. */
285 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
286 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
287 ino
, pil
, inofixup
, iclr
, imap
);
288 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
289 bucket
->pil
, bucket
->iclr
, bucket
->imap
);
290 prom_printf("IRQ: Cannot continue, halting...\n");
298 bucket
->irq_info
= NULL
;
300 return __irq(bucket
);
303 unsigned int sbus_build_irq(void *buscookie
, unsigned int ino
)
305 struct linux_sbus
*sbus
= (struct linux_sbus
*)buscookie
;
306 struct sysio_regs
*sregs
= sbus
->iommu
->sysio_regs
;
307 unsigned long offset
;
309 volatile unsigned int *imap
, *iclr
;
312 pil
= sysio_ino_to_pil
[ino
];
314 printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino
);
315 panic("Bad SYSIO IRQ translations...");
317 offset
= sysio_irq_offsets
[ino
];
318 if(offset
== ((unsigned long)-1)) {
319 printk("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
321 panic("BAD SYSIO IRQ offset...");
323 offset
+= ((unsigned long)sregs
);
324 imap
= ((volatile unsigned int *)offset
);
326 /* SYSIO inconsistancy. For external SLOTS, we have to select
327 * the right ICLR register based upon the lower SBUS irq level
331 iclr
= sysio_imap_to_iclr(imap
);
333 unsigned long iclraddr
;
334 int sbus_slot
= (ino
& 0x18)>>3;
336 sbus_level
= ino
& 0x7;
340 iclr
= &sregs
->iclr_slot0
;
343 iclr
= &sregs
->iclr_slot1
;
346 iclr
= &sregs
->iclr_slot2
;
350 iclr
= &sregs
->iclr_slot3
;
354 iclraddr
= (unsigned long) iclr
;
355 iclraddr
+= ((sbus_level
- 1) * 8);
356 iclr
= (volatile unsigned int *) iclraddr
;
358 return build_irq(pil
, sbus_level
, iclr
, imap
);
361 static void atomic_bucket_insert(struct ino_bucket
*bucket
)
363 unsigned long pstate
;
366 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
367 __asm__
__volatile__("wrpr %0, %1, %%pstate"
368 : : "r" (pstate
), "i" (PSTATE_IE
));
369 ent
= irq_work(smp_processor_id(), bucket
->pil
);
370 bucket
->irq_chain
= *ent
;
371 *ent
= __irq(bucket
);
372 __asm__
__volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate
));
375 int request_irq(unsigned int irq
, void (*handler
)(int, void *, struct pt_regs
*),
376 unsigned long irqflags
, const char *name
, void *dev_id
)
378 struct irqaction
*action
, *tmp
= NULL
;
379 struct ino_bucket
*bucket
= __bucket(irq
);
383 if ((bucket
!= &pil0_dummy_bucket
) &&
384 (bucket
< &ivector_table
[0] ||
385 bucket
>= &ivector_table
[NUM_IVECS
])) {
386 unsigned int *caller
;
388 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
389 printk(KERN_CRIT
"request_irq: Old style IRQ registry attempt "
390 "from %p, irq %08x.\n", caller
, irq
);
397 irqflags
&= ~SA_IMAP_MASKED
;
399 irqflags
|= SA_IMAP_MASKED
;
400 if (bucket
->flags
& IBF_PCI
) {
402 * PCI IRQs should never use SA_INTERRUPT.
404 irqflags
&= ~(SA_INTERRUPT
);
407 * Check wether we _should_ use DMA Write Sync
408 * (for devices behind bridges behind APB).
410 * XXX: Not implemented, yet.
412 if (bucket
->flags
& IBF_DMA_SYNC
)
413 irqflags
|= SA_DMA_SYNC
;
419 action
= *(bucket
->pil
+ irq_action
);
421 if((action
->flags
& SA_SHIRQ
) && (irqflags
& SA_SHIRQ
))
422 for (tmp
= action
; tmp
->next
; tmp
= tmp
->next
)
425 restore_flags(flags
);
428 if((action
->flags
& SA_INTERRUPT
) ^ (irqflags
& SA_INTERRUPT
)) {
429 printk("Attempt to mix fast and slow interrupts on IRQ%d "
430 "denied\n", bucket
->pil
);
431 restore_flags(flags
);
434 action
= NULL
; /* Or else! */
437 /* If this is flagged as statically allocated then we use our
438 * private struct which is never freed.
440 if(irqflags
& SA_STATIC_ALLOC
) {
441 if(static_irq_count
< MAX_STATIC_ALLOC
)
442 action
= &static_irqaction
[static_irq_count
++];
444 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
445 "using kmalloc\n", irq
, name
);
448 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
452 restore_flags(flags
);
456 if ((irqflags
& SA_IMAP_MASKED
) == 0) {
457 bucket
->irq_info
= action
;
458 bucket
->flags
|= IBF_ACTIVE
;
460 if((bucket
->flags
& IBF_ACTIVE
) != 0) {
461 void *orig
= bucket
->irq_info
;
462 void **vector
= NULL
;
464 if((bucket
->flags
& IBF_PCI
) == 0) {
465 printk("IRQ: Trying to share non-PCI bucket.\n");
468 if((bucket
->flags
& IBF_MULTI
) == 0) {
469 vector
= kmalloc(sizeof(void *) * 4, GFP_KERNEL
);
471 goto free_and_enomem
;
473 /* We might have slept. */
474 if ((bucket
->flags
& IBF_MULTI
) != 0) {
478 vector
= (void **)bucket
->irq_info
;
479 for(ent
= 0; ent
< 4; ent
++) {
480 if (vector
[ent
] == NULL
) {
481 vector
[ent
] = action
;
492 bucket
->irq_info
= vector
;
493 bucket
->flags
|= IBF_MULTI
;
498 vector
= (void **)orig
;
499 for(ent
= 0; ent
< 4; ent
++) {
500 if(vector
[ent
] == NULL
) {
501 vector
[ent
] = action
;
509 bucket
->irq_info
= action
;
510 bucket
->flags
|= IBF_ACTIVE
;
512 pending
= bucket
->pending
;
517 action
->mask
= (unsigned long) bucket
;
518 action
->handler
= handler
;
519 action
->flags
= irqflags
;
522 action
->dev_id
= dev_id
;
527 *(bucket
->pil
+ irq_action
) = action
;
531 /* We ate the IVEC already, this makes sure it does not get lost. */
533 atomic_bucket_insert(bucket
);
534 set_softint(1 << bucket
->pil
);
536 restore_flags(flags
);
545 restore_flags(flags
);
550 restore_flags(flags
);
554 void free_irq(unsigned int irq
, void *dev_id
)
556 struct irqaction
*action
;
557 struct irqaction
*tmp
= NULL
;
559 struct ino_bucket
*bucket
= __bucket(irq
), *bp
;
561 if ((bucket
!= &pil0_dummy_bucket
) &&
562 (bucket
< &ivector_table
[0] ||
563 bucket
>= &ivector_table
[NUM_IVECS
])) {
564 unsigned int *caller
;
566 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
567 printk(KERN_CRIT
"free_irq: Old style IRQ removal attempt "
568 "from %p, irq %08x.\n", caller
, irq
);
572 action
= *(bucket
->pil
+ irq_action
);
573 if(!action
->handler
) {
574 printk("Freeing free IRQ %d\n", bucket
->pil
);
578 for( ; action
; action
= action
->next
) {
579 if(action
->dev_id
== dev_id
)
584 printk("Trying to free free shared IRQ %d\n", bucket
->pil
);
587 } else if(action
->flags
& SA_SHIRQ
) {
588 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket
->pil
);
592 if(action
->flags
& SA_STATIC_ALLOC
) {
593 printk("Attempt to free statically allocated IRQ %d (%s)\n",
594 bucket
->pil
, action
->name
);
600 tmp
->next
= action
->next
;
602 *(bucket
->pil
+ irq_action
) = action
->next
;
604 if(action
->flags
& SA_IMAP_MASKED
) {
605 volatile unsigned int *imap
= bucket
->imap
;
606 void **vector
, *orig
;
609 orig
= bucket
->irq_info
;
610 vector
= (void **)orig
;
612 if ((bucket
->flags
& IBF_MULTI
) != 0) {
615 for(ent
= 0; ent
< 4; ent
++) {
616 if(vector
[ent
] == action
)
618 else if(vector
[ent
] != NULL
) {
619 orphan
= vector
[ent
];
624 /* Only free when no other shared irq
629 /* Convert back to non-shared bucket. */
630 bucket
->irq_info
= orphan
;
631 bucket
->flags
&= ~(IBF_MULTI
);
637 bucket
->irq_info
= NULL
;
640 /* This unique interrupt source is now inactive. */
641 bucket
->flags
&= ~IBF_ACTIVE
;
643 /* See if any other buckets share this bucket's IMAP
644 * and are still active.
646 for(ent
= 0; ent
< NUM_IVECS
; ent
++) {
647 bp
= &ivector_table
[ent
];
650 (bp
->flags
& IBF_ACTIVE
) != 0)
654 /* Only disable when no other sub-irq levels of
655 * the same IMAP are active.
657 if (ent
== NUM_IVECS
)
663 restore_flags(flags
);
666 /* Only uniprocessor needs this IRQ/BH locking depth, on SMP it
667 * lives in the per-cpu structure for cache reasons.
670 unsigned int local_irq_count
;
671 unsigned int local_bh_count
;
673 #define irq_enter(cpu, irq) (local_irq_count++)
674 #define irq_exit(cpu, irq) (local_irq_count--)
676 atomic_t global_bh_lock
= ATOMIC_INIT(0);
677 spinlock_t global_bh_count
= SPIN_LOCK_UNLOCKED
;
679 /* Who has global_irq_lock. */
680 unsigned char global_irq_holder
= NO_PROC_ID
;
682 /* This protects IRQ's. */
683 spinlock_t global_irq_lock
= SPIN_LOCK_UNLOCKED
;
685 /* Global IRQ locking depth. */
686 atomic_t global_irq_count
= ATOMIC_INIT(0);
688 #define irq_enter(cpu, irq) \
689 do { hardirq_enter(cpu); \
690 spin_unlock_wait(&global_irq_lock); \
692 #define irq_exit(cpu, irq) hardirq_exit(cpu)
694 static void show(char * str
)
696 int cpu
= smp_processor_id();
698 printk("\n%s, CPU %d:\n", str
, cpu
);
699 printk("irq: %d [%ld %ld]\n",
700 atomic_read(&global_irq_count
),
701 cpu_data
[0].irq_count
, cpu_data
[1].irq_count
);
702 printk("bh: %d [%ld %ld]\n",
703 (spin_is_locked(&global_bh_count
) ? 1 : 0),
704 cpu_data
[0].bh_count
, cpu_data
[1].bh_count
);
707 #define MAXCOUNT 100000000
709 static inline void wait_on_bh(void)
711 int count
= MAXCOUNT
;
718 } while(spin_is_locked(&global_bh_count
));
721 #define SYNC_OTHER_ULTRAS(x) udelay(x+1)
723 static inline void wait_on_irq(int cpu
)
725 int count
= MAXCOUNT
;
728 if (!atomic_read (&global_irq_count
)) {
729 if (local_bh_count
|| ! spin_is_locked(&global_bh_count
))
732 spin_unlock (&global_irq_lock
);
733 membar("#StoreLoad | #StoreStore");
740 SYNC_OTHER_ULTRAS(cpu
);
742 if (atomic_read(&global_irq_count
))
744 if (spin_is_locked (&global_irq_lock
))
746 if (!local_bh_count
&& spin_is_locked (&global_bh_count
))
748 if (spin_trylock(&global_irq_lock
))
754 void synchronize_bh(void)
756 if (spin_is_locked (&global_bh_count
) && !in_interrupt())
760 void synchronize_irq(void)
762 if (atomic_read(&global_irq_count
)) {
768 static inline void get_irqlock(int cpu
)
770 if (! spin_trylock(&global_irq_lock
)) {
771 if ((unsigned char) cpu
== global_irq_holder
)
774 while (spin_is_locked (&global_irq_lock
))
776 } while(! spin_trylock(&global_irq_lock
));
779 global_irq_holder
= cpu
;
782 void __global_cli(void)
788 int cpu
= smp_processor_id();
790 if (! local_irq_count
)
795 void __global_sti(void)
797 int cpu
= smp_processor_id();
799 if (! local_irq_count
)
800 release_irqlock(cpu
);
804 unsigned long __global_save_flags(void)
806 unsigned long flags
, local_enabled
, retval
;
809 local_enabled
= ((flags
== 0) ? 1 : 0);
810 retval
= 2 + local_enabled
;
811 if (! local_irq_count
) {
814 if (global_irq_holder
== (unsigned char) smp_processor_id())
820 void __global_restore_flags(unsigned long flags
)
838 __asm__
__volatile__("mov %%i7, %0" : "=r" (pc
));
839 printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
847 void catch_disabled_ivec(struct pt_regs
*regs
)
849 int cpu
= smp_processor_id();
850 struct ino_bucket
*bucket
= __bucket(*irq_work(cpu
, 0));
852 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
853 * to other devices. Here a single IMAP enabled potentially multiple
854 * unique interrupt sources (which each do have a unique ICLR register.
856 * So what we do is just register that the IVEC arrived, when registered
857 * for real the request_irq() code will check the bit and signal
858 * a local CPU interrupt for it.
861 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
862 bucket
- &ivector_table
[0], regs
->tpc
);
864 *irq_work(cpu
, 0) = 0;
869 #define FORWARD_VOLUME 12
871 void handler_irq(int irq
, struct pt_regs
*regs
)
873 struct ino_bucket
*bp
, *nbp
;
874 int cpu
= smp_processor_id();
876 extern int this_is_starfire
;
877 int should_forward
= (this_is_starfire
== 0 &&
880 unsigned int buddy
= 0;
882 /* 'cpu' is the MID (ie. UPAID), calculate the MID
885 if(should_forward
!= 0) {
886 buddy
= cpu_number_map
[cpu
] + 1;
887 if (buddy
>= NR_CPUS
||
888 (buddy
= cpu_logical_map(buddy
)) == -1)
889 buddy
= cpu_logical_map(0);
891 /* Voo-doo programming. */
892 if(cpu_data
[buddy
].idle_volume
< FORWARD_VOLUME
)
900 * Check for TICK_INT on level 14 softint.
902 if ((irq
== 14) && (get_softint() & (1UL << 0)))
905 clear_softint(1 << irq
);
908 kstat
.irqs
[cpu
][irq
]++;
913 __bucket(xchg32(irq_work(cpu
, irq
), 0)) :
916 bp
= __bucket(xchg32(irq_work(cpu
, irq
), 0));
918 for( ; bp
!= NULL
; bp
= nbp
) {
919 unsigned char flags
= bp
->flags
;
921 nbp
= __bucket(bp
->irq_chain
);
922 if((flags
& IBF_ACTIVE
) != 0) {
923 if((flags
& IBF_MULTI
) == 0) {
924 struct irqaction
*ap
= bp
->irq_info
;
925 ap
->handler(__irq(bp
), ap
->dev_id
, regs
);
927 void **vector
= (void **)bp
->irq_info
;
929 for(ent
= 0; ent
< 4; ent
++) {
930 struct irqaction
*ap
= vector
[ent
];
932 ap
->handler(__irq(bp
), ap
->dev_id
, regs
);
935 /* Only the dummy bucket lacks IMAP/ICLR. */
938 /* Ok, here is what is going on:
939 * 1) Retargeting IRQs on Starfire is very
940 * expensive so just forget about it on them.
941 * 2) Moving around very high priority interrupts
943 * 3) If the current cpu is idle, interrupts are
944 * useful work, so keep them here. But do not
945 * pass to our neighbour if he is not very idle.
947 if (should_forward
!= 0) {
948 /* Push it to our buddy. */
950 *(bp
->imap
) = (buddy
| SYSIO_IMAP_VALID
);
953 *(bp
->iclr
) = SYSIO_ICLR_IDLE
;
961 #ifdef CONFIG_BLK_DEV_FD
962 extern void floppy_interrupt(int irq
, void *dev_cookie
, struct pt_regs
*regs
);
964 void sparc_floppy_irq(int irq
, void *dev_cookie
, struct pt_regs
*regs
)
966 struct irqaction
*action
= *(irq
+ irq_action
);
967 struct ino_bucket
*bucket
;
968 int cpu
= smp_processor_id();
971 kstat
.irqs
[cpu
][irq
]++;
973 *(irq_work(cpu
, irq
)) = 0;
974 bucket
= (struct ino_bucket
*)action
->mask
;
976 floppy_interrupt(irq
, dev_cookie
, regs
);
977 *(bucket
->iclr
) = SYSIO_ICLR_IDLE
;
983 /* The following assumes that the branch lies before the place we
984 * are branching to. This is the case for a trap vector...
985 * You have been warned.
987 #define SPARC_BRANCH(dest_addr, inst_addr) \
988 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
990 #define SPARC_NOP (0x01000000)
992 static void install_fast_irq(unsigned int cpu_irq
,
993 void (*handler
)(int, void *, struct pt_regs
*))
995 extern unsigned long sparc64_ttable_tl0
;
996 unsigned long ttent
= (unsigned long) &sparc64_ttable_tl0
;
1000 ttent
+= (cpu_irq
- 1) << 5;
1001 insns
= (unsigned int *) ttent
;
1002 insns
[0] = SPARC_BRANCH(((unsigned long) handler
),
1003 ((unsigned long)&insns
[0]));
1004 insns
[1] = SPARC_NOP
;
1005 __asm__
__volatile__("membar #StoreStore; flush %0" : : "r" (ttent
));
1008 int request_fast_irq(unsigned int irq
,
1009 void (*handler
)(int, void *, struct pt_regs
*),
1010 unsigned long irqflags
, const char *name
, void *dev_id
)
1012 struct irqaction
*action
;
1013 struct ino_bucket
*bucket
= __bucket(irq
);
1014 unsigned long flags
;
1016 /* No pil0 dummy buckets allowed here. */
1017 if (bucket
< &ivector_table
[0] ||
1018 bucket
>= &ivector_table
[NUM_IVECS
]) {
1019 unsigned int *caller
;
1021 __asm__
__volatile__("mov %%i7, %0" : "=r" (caller
));
1022 printk(KERN_CRIT
"request_fast_irq: Old style IRQ registry attempt "
1023 "from %p, irq %08x.\n", caller
, irq
);
1027 /* Only IMAP style interrupts can be registered as fast. */
1028 if(bucket
->pil
== 0)
1034 if ((bucket
->pil
== 0) || (bucket
->pil
== 14)) {
1035 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
1039 action
= *(bucket
->pil
+ irq_action
);
1041 if(action
->flags
& SA_SHIRQ
)
1042 panic("Trying to register fast irq when already shared.\n");
1043 if(irqflags
& SA_SHIRQ
)
1044 panic("Trying to register fast irq as shared.\n");
1045 printk("request_fast_irq: Trying to register yet already owned.\n");
1049 save_and_cli(flags
);
1050 if(irqflags
& SA_STATIC_ALLOC
) {
1051 if(static_irq_count
< MAX_STATIC_ALLOC
)
1052 action
= &static_irqaction
[static_irq_count
++];
1054 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
1055 "using kmalloc\n", bucket
->pil
, name
);
1058 action
= (struct irqaction
*)kmalloc(sizeof(struct irqaction
),
1061 restore_flags(flags
);
1064 install_fast_irq(bucket
->pil
, handler
);
1066 bucket
->irq_info
= action
;
1067 bucket
->flags
|= IBF_ACTIVE
;
1069 action
->mask
= (unsigned long) bucket
;
1070 action
->handler
= handler
;
1071 action
->flags
= irqflags
| SA_IMAP_MASKED
;
1072 action
->dev_id
= NULL
;
1073 action
->name
= name
;
1074 action
->next
= NULL
;
1076 *(bucket
->pil
+ irq_action
) = action
;
1079 restore_flags(flags
);
1087 /* We really don't need these at all on the Sparc. We only have
1088 * stubs here because they are exported to modules.
1090 unsigned long probe_irq_on(void)
1095 int probe_irq_off(unsigned long mask
)
1100 /* This is gets the master TICK_INT timer going. */
1101 void init_timers(void (*cfunc
)(int, void *, struct pt_regs
*),
1102 unsigned long *clock
)
1104 unsigned long flags
;
1105 extern unsigned long timer_tick_offset
;
1108 extern void smp_tick_init(void);
1111 node
= linux_cpus
[0].prom_node
;
1112 *clock
= prom_getint(node
, "clock-frequency");
1113 timer_tick_offset
= *clock
/ HZ
;
1118 /* Register IRQ handler. */
1119 err
= request_irq(build_irq(0, 0, NULL
, NULL
), cfunc
, (SA_INTERRUPT
| SA_STATIC_ALLOC
),
1123 prom_printf("Serious problem, cannot register TICK_INT\n");
1127 save_and_cli(flags
);
1129 /* Set things up so user can access tick register for profiling
1132 __asm__
__volatile__("
1133 sethi %%hi(0x80000000), %%g1
1137 andn %%g2, %%g1, %%g2
1138 wrpr %%g2, 0, %%tick
1139 " : /* no outputs */
1143 __asm__
__volatile__("
1146 wr %%g1, 0x0, %%tick_cmpr"
1148 : "r" (timer_tick_offset
)
1151 restore_flags(flags
);
1156 static int retarget_one_irq(struct irqaction
*p
, int goal_cpu
)
1158 extern int this_is_starfire
;
1159 struct ino_bucket
*bucket
= __bucket(p
->mask
);
1160 volatile unsigned int *imap
= bucket
->imap
;
1163 /* Never change this, it causes problems on Ex000 systems. */
1164 if (bucket
->pil
== 12)
1167 if(this_is_starfire
== 0) {
1168 tid
= __cpu_logical_map
[goal_cpu
] << 26;
1170 extern unsigned int starfire_translate(volatile unsigned int *imap
,
1171 unsigned int upaid
);
1173 tid
= (starfire_translate(imap
, __cpu_logical_map
[goal_cpu
]) << 26);
1175 *imap
= SYSIO_IMAP_VALID
| (tid
& SYSIO_IMAP_TID
);
1178 if(goal_cpu
>= NR_CPUS
||
1179 __cpu_logical_map
[goal_cpu
] == -1)
1184 /* Called from request_irq. */
1185 static void distribute_irqs(void)
1187 unsigned long flags
;
1190 save_and_cli(flags
);
1192 for(level
= 0; level
< NR_IRQS
; level
++) {
1193 struct irqaction
*p
= irq_action
[level
];
1195 if(p
->flags
& SA_IMAP_MASKED
)
1196 cpu
= retarget_one_irq(p
, cpu
);
1200 restore_flags(flags
);
1205 struct sun5_timer
*prom_timers
;
1206 static u64 prom_limit0
, prom_limit1
;
1208 static void map_prom_timers(void)
1210 unsigned int addr
[3];
1213 /* PROM timer node hangs out in the top level of device siblings... */
1214 tnode
= prom_finddevice("/counter-timer");
1216 /* Assume if node is not present, PROM uses different tick mechanism
1217 * which we should not care about.
1219 if(tnode
== 0 || tnode
== -1) {
1220 prom_timers
= (struct sun5_timer
*) 0;
1224 /* If PROM is really using this, it must be mapped by him. */
1225 err
= prom_getproperty(tnode
, "address", (char *)addr
, sizeof(addr
));
1227 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1228 prom_timers
= (struct sun5_timer
*) 0;
1231 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
1234 static void kill_prom_timer(void)
1239 /* Save them away for later. */
1240 prom_limit0
= prom_timers
->limit0
;
1241 prom_limit1
= prom_timers
->limit1
;
1243 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1244 * We turn both off here just to be paranoid.
1246 prom_timers
->limit0
= 0;
1247 prom_timers
->limit1
= 0;
1249 /* Wheee, eat the interrupt packet too... */
1250 __asm__
__volatile__("
1252 ldxa [%%g0] %0, %%g1
1253 ldxa [%%g2] %1, %%g1
1254 stxa %%g0, [%%g0] %0
1256 " : /* no outputs */
1257 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_UDB_INTR_R
)
1261 void enable_prom_timer(void)
1266 /* Set it to whatever was there before. */
1267 prom_timers
->limit1
= prom_limit1
;
1268 prom_timers
->count1
= 0;
1269 prom_timers
->limit0
= prom_limit0
;
1270 prom_timers
->count0
= 0;
1273 void __init
init_IRQ(void)
1275 static int called
= 0;
1281 memset(&ivector_table
[0], 0, sizeof(ivector_table
));
1283 memset(&__up_workvec
[0], 0, sizeof(__up_workvec
));
1287 /* We need to clear any IRQ's pending in the soft interrupt
1288 * registers, a spurious one could be left around from the
1289 * PROM timer which we just disabled.
1291 clear_softint(get_softint());
1293 /* Now that ivector table is initialized, it is safe
1294 * to receive IRQ vector traps. We will normally take
1295 * one or two right now, in case some device PROM used
1296 * to boot us wants to speak to us. We just ignore them.
1298 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1299 "or %%g1, %0, %%g1\n\t"
1300 "wrpr %%g1, 0x0, %%pstate"