* add p cc
[mascara-docs.git] / i386 / linux / linux-2.3.21 / arch / sparc64 / kernel / irq.c
blob598cece4eb087bab76a2f39d0dce29d4613a20f1
1 /* $Id: irq.c,v 1.78 1999/08/31 06:54:54 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
14 #include <linux/interrupt.h>
15 #include <linux/malloc.h>
16 #include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
17 #include <linux/init.h>
18 #include <linux/delay.h>
20 #include <asm/ptrace.h>
21 #include <asm/processor.h>
22 #include <asm/atomic.h>
23 #include <asm/system.h>
24 #include <asm/irq.h>
25 #include <asm/sbus.h>
26 #include <asm/iommu.h>
27 #include <asm/upa.h>
28 #include <asm/oplib.h>
29 #include <asm/timer.h>
30 #include <asm/smp.h>
31 #include <asm/hardirq.h>
32 #include <asm/softirq.h>
34 /* Internal flag, should not be visible elsewhere at all. */
35 #define SA_IMAP_MASKED 0x100
36 #define SA_DMA_SYNC 0x200
38 #ifdef __SMP__
39 static void distribute_irqs(void);
40 #endif
42 /* UPA nodes send interrupt packet to UltraSparc with first data reg
43 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
44 * delivered. We must translate this into a non-vector IRQ so we can
45 * set the softint on this cpu.
47 * To make processing these packets efficient and race free we use
48 * an array of irq buckets below. The interrupt vector handler in
49 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
50 * The IVEC handler does not need to act atomically, the PIL dispatch
51 * code uses CAS to get an atomic snapshot of the list and clear it
52 * at the same time.
55 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (64)));
57 #ifndef __SMP__
58 unsigned int __up_workvec[16] __attribute__ ((aligned (64)));
59 #define irq_work(__cpu, __pil) &(__up_workvec[(__pil)])
60 #else
61 #define irq_work(__cpu, __pil) &(cpu_data[(__cpu)].irq_worklists[(__pil)])
62 #endif
64 /* This is based upon code in the 32-bit Sparc kernel written mostly by
65 * David Redman (djhr@tadpole.co.uk).
67 #define MAX_STATIC_ALLOC 4
68 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
69 static int static_irq_count = 0;
71 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
72 struct irqaction *irq_action[NR_IRQS+1] = {
73 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
74 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
77 int get_irq_list(char *buf)
79 int i, len = 0;
80 struct irqaction *action;
81 #ifdef __SMP__
82 int j;
83 #endif
85 for(i = 0; i < (NR_IRQS + 1); i++) {
86 if(!(action = *(i + irq_action)))
87 continue;
88 len += sprintf(buf + len, "%3d: ", i);
89 #ifndef __SMP__
90 len += sprintf(buf + len, "%10u ", kstat_irqs(i));
91 #else
92 for (j = 0; j < smp_num_cpus; j++)
93 len += sprintf(buf + len, "%10u ",
94 kstat.irqs[cpu_logical_map(j)][i]);
95 #endif
96 len += sprintf(buf + len, "%c %s",
97 (action->flags & SA_INTERRUPT) ? '+' : ' ',
98 action->name);
99 for(action = action->next; action; action = action->next) {
100 len += sprintf(buf+len, ",%s %s",
101 (action->flags & SA_INTERRUPT) ? " +" : "",
102 action->name);
104 len += sprintf(buf + len, "\n");
106 return len;
109 /* SBUS SYSIO INO number to Sparc PIL level. */
110 unsigned char sysio_ino_to_pil[] = {
111 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 0 */
112 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 1 */
113 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 2 */
114 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 3 */
115 3, /* Onboard SCSI */
116 5, /* Onboard Ethernet */
117 /*XXX*/ 8, /* Onboard BPP */
118 0, /* Bogon */
119 13, /* Audio */
120 /*XXX*/15, /* PowerFail */
121 0, /* Bogon */
122 0, /* Bogon */
123 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
124 11, /* Floppy */
125 0, /* Spare Hardware (bogon for now) */
126 0, /* Keyboard (bogon for now) */
127 0, /* Mouse (bogon for now) */
128 0, /* Serial (bogon for now) */
129 0, 0, /* Bogon, Bogon */
130 10, /* Timer 0 */
131 11, /* Timer 1 */
132 0, 0, /* Bogon, Bogon */
133 15, /* Uncorrectable SBUS Error */
134 15, /* Correctable SBUS Error */
135 15, /* SBUS Error */
136 /*XXX*/ 0, /* Power Management (bogon for now) */
139 /* INO number to IMAP register offset for SYSIO external IRQ's.
140 * This should conform to both Sunfire/Wildfire server and Fusion
141 * desktop designs.
143 #define offset(x) ((unsigned long)(&(((struct sysio_regs *)0)->x)))
144 #define bogon ((unsigned long) -1)
145 static unsigned long sysio_irq_offsets[] = {
146 /* SBUS Slot 0 --> 3, level 1 --> 7 */
147 offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
148 offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
149 offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
150 offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
151 offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
152 offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
153 offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
154 offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
155 /* Onboard devices (not relevant/used on SunFire). */
156 offset(imap_scsi), offset(imap_eth), offset(imap_bpp), bogon,
157 offset(imap_audio), offset(imap_pfail), bogon, bogon,
158 offset(imap_kms), offset(imap_flpy), offset(imap_shw),
159 offset(imap_kbd), offset(imap_ms), offset(imap_ser), bogon, bogon,
160 offset(imap_tim0), offset(imap_tim1), bogon, bogon,
161 offset(imap_ue), offset(imap_ce), offset(imap_sberr),
162 offset(imap_pmgmt),
165 #undef bogon
167 #define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
169 /* Convert Interrupt Mapping register pointer to assosciated
170 * Interrupt Clear register pointer, SYSIO specific version.
172 static volatile unsigned int *sysio_imap_to_iclr(volatile unsigned int *imap)
174 unsigned long diff;
176 diff = offset(iclr_unused0) - offset(imap_slot0);
177 return (volatile unsigned int *) (((unsigned long)imap) + diff);
180 #undef offset
182 /* Now these are always passed a true fully specified sun4u INO. */
183 void enable_irq(unsigned int irq)
185 extern int this_is_starfire;
186 struct ino_bucket *bucket = __bucket(irq);
187 volatile unsigned int *imap;
188 unsigned long tid;
190 imap = bucket->imap;
191 if (!imap)
192 return;
194 if(this_is_starfire == 0) {
195 /* We set it to our UPA MID. */
196 __asm__ __volatile__("ldxa [%%g0] %1, %0"
197 : "=r" (tid)
198 : "i" (ASI_UPA_CONFIG));
199 tid = ((tid & UPA_CONFIG_MID) << 9);
200 } else {
201 extern unsigned int starfire_translate(volatile unsigned int *imap,
202 unsigned int upaid);
204 tid = (starfire_translate(imap, current->processor) << 26);
207 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
208 * of this SYSIO's preconfigured IGN in the SYSIO Control
209 * Register, the hardware just mirrors that value here.
210 * However for Graphics and UPA Slave devices the full
211 * SYSIO_IMAP_INR field can be set by the programmer here.
213 * Things like FFB can now be handled via the new IRQ mechanism.
215 *imap = SYSIO_IMAP_VALID | (tid & SYSIO_IMAP_TID);
218 /* This now gets passed true ino's as well. */
219 void disable_irq(unsigned int irq)
221 struct ino_bucket *bucket = __bucket(irq);
222 volatile unsigned int *imap;
224 imap = bucket->imap;
225 if (imap != NULL) {
226 /* NOTE: We do not want to futz with the IRQ clear registers
227 * and move the state to IDLE, the SCSI code does call
228 * disable_irq() to assure atomicity in the queue cmd
229 * SCSI adapter driver code. Thus we'd lose interrupts.
231 *imap &= ~(SYSIO_IMAP_VALID);
235 /* The timer is the one "weird" interrupt which is generated by
236 * the CPU %tick register and not by some normal vectored interrupt
237 * source. To handle this special case, we use this dummy INO bucket.
239 static struct ino_bucket pil0_dummy_bucket = {
240 0, /* irq_chain */
241 0, /* pil */
242 0, /* pending */
243 0, /* flags */
244 0, /* __unused */
245 NULL, /* irq_info */
246 NULL, /* iclr */
247 NULL, /* imap */
250 unsigned int build_irq(int pil, int inofixup, volatile unsigned int *iclr, volatile unsigned int *imap)
252 struct ino_bucket *bucket;
253 int ino;
255 if(pil == 0) {
256 if(iclr != NULL || imap != NULL) {
257 prom_printf("Invalid dummy bucket for PIL0 (%p:%p)\n",
258 iclr, imap);
259 prom_halt();
261 return __irq(&pil0_dummy_bucket);
264 /* RULE: Both must be specified in all other cases. */
265 if (iclr == NULL || imap == NULL) {
266 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
267 pil, inofixup, iclr, imap);
268 prom_halt();
271 ino = (*imap & (SYSIO_IMAP_IGN | SYSIO_IMAP_INO)) + inofixup;
272 if(ino > NUM_IVECS) {
273 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
274 ino, pil, inofixup, iclr, imap);
275 prom_halt();
278 /* Ok, looks good, set it up. Don't touch the irq_chain or
279 * the pending flag.
281 bucket = &ivector_table[ino];
282 if ((bucket->flags & IBF_ACTIVE) ||
283 (bucket->irq_info != NULL)) {
284 /* This is a gross fatal error if it happens here. */
285 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
286 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
287 ino, pil, inofixup, iclr, imap);
288 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
289 bucket->pil, bucket->iclr, bucket->imap);
290 prom_printf("IRQ: Cannot continue, halting...\n");
291 prom_halt();
293 bucket->imap = imap;
294 bucket->iclr = iclr;
295 bucket->pil = pil;
296 bucket->flags = 0;
298 bucket->irq_info = NULL;
300 return __irq(bucket);
303 unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
305 struct linux_sbus *sbus = (struct linux_sbus *)buscookie;
306 struct sysio_regs *sregs = sbus->iommu->sysio_regs;
307 unsigned long offset;
308 int pil;
309 volatile unsigned int *imap, *iclr;
310 int sbus_level = 0;
312 pil = sysio_ino_to_pil[ino];
313 if(!pil) {
314 printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
315 panic("Bad SYSIO IRQ translations...");
317 offset = sysio_irq_offsets[ino];
318 if(offset == ((unsigned long)-1)) {
319 printk("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
320 ino, pil);
321 panic("BAD SYSIO IRQ offset...");
323 offset += ((unsigned long)sregs);
324 imap = ((volatile unsigned int *)offset);
326 /* SYSIO inconsistancy. For external SLOTS, we have to select
327 * the right ICLR register based upon the lower SBUS irq level
328 * bits.
330 if(ino >= 0x20) {
331 iclr = sysio_imap_to_iclr(imap);
332 } else {
333 unsigned long iclraddr;
334 int sbus_slot = (ino & 0x18)>>3;
336 sbus_level = ino & 0x7;
338 switch(sbus_slot) {
339 case 0:
340 iclr = &sregs->iclr_slot0;
341 break;
342 case 1:
343 iclr = &sregs->iclr_slot1;
344 break;
345 case 2:
346 iclr = &sregs->iclr_slot2;
347 break;
348 default:
349 case 3:
350 iclr = &sregs->iclr_slot3;
351 break;
354 iclraddr = (unsigned long) iclr;
355 iclraddr += ((sbus_level - 1) * 8);
356 iclr = (volatile unsigned int *) iclraddr;
358 return build_irq(pil, sbus_level, iclr, imap);
361 static void atomic_bucket_insert(struct ino_bucket *bucket)
363 unsigned long pstate;
364 unsigned int *ent;
366 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
367 __asm__ __volatile__("wrpr %0, %1, %%pstate"
368 : : "r" (pstate), "i" (PSTATE_IE));
369 ent = irq_work(smp_processor_id(), bucket->pil);
370 bucket->irq_chain = *ent;
371 *ent = __irq(bucket);
372 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
375 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
376 unsigned long irqflags, const char *name, void *dev_id)
378 struct irqaction *action, *tmp = NULL;
379 struct ino_bucket *bucket = __bucket(irq);
380 unsigned long flags;
381 int pending = 0;
383 if ((bucket != &pil0_dummy_bucket) &&
384 (bucket < &ivector_table[0] ||
385 bucket >= &ivector_table[NUM_IVECS])) {
386 unsigned int *caller;
388 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
389 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
390 "from %p, irq %08x.\n", caller, irq);
391 return -EINVAL;
393 if(!handler)
394 return -EINVAL;
396 if (!bucket->pil)
397 irqflags &= ~SA_IMAP_MASKED;
398 else {
399 irqflags |= SA_IMAP_MASKED;
400 if (bucket->flags & IBF_PCI) {
402 * PCI IRQs should never use SA_INTERRUPT.
404 irqflags &= ~(SA_INTERRUPT);
407 * Check wether we _should_ use DMA Write Sync
408 * (for devices behind bridges behind APB).
410 * XXX: Not implemented, yet.
412 if (bucket->flags & IBF_DMA_SYNC)
413 irqflags |= SA_DMA_SYNC;
417 save_and_cli(flags);
419 action = *(bucket->pil + irq_action);
420 if(action) {
421 if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
422 for (tmp = action; tmp->next; tmp = tmp->next)
424 else {
425 restore_flags(flags);
426 return -EBUSY;
428 if((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
429 printk("Attempt to mix fast and slow interrupts on IRQ%d "
430 "denied\n", bucket->pil);
431 restore_flags(flags);
432 return -EBUSY;
434 action = NULL; /* Or else! */
437 /* If this is flagged as statically allocated then we use our
438 * private struct which is never freed.
440 if(irqflags & SA_STATIC_ALLOC) {
441 if(static_irq_count < MAX_STATIC_ALLOC)
442 action = &static_irqaction[static_irq_count++];
443 else
444 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
445 "using kmalloc\n", irq, name);
447 if(action == NULL)
448 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
449 GFP_KERNEL);
451 if(!action) {
452 restore_flags(flags);
453 return -ENOMEM;
456 if ((irqflags & SA_IMAP_MASKED) == 0) {
457 bucket->irq_info = action;
458 bucket->flags |= IBF_ACTIVE;
459 } else {
460 if((bucket->flags & IBF_ACTIVE) != 0) {
461 void *orig = bucket->irq_info;
462 void **vector = NULL;
464 if((bucket->flags & IBF_PCI) == 0) {
465 printk("IRQ: Trying to share non-PCI bucket.\n");
466 goto free_and_ebusy;
468 if((bucket->flags & IBF_MULTI) == 0) {
469 vector = kmalloc(sizeof(void *) * 4, GFP_KERNEL);
470 if(vector == NULL)
471 goto free_and_enomem;
473 /* We might have slept. */
474 if ((bucket->flags & IBF_MULTI) != 0) {
475 int ent;
477 kfree(vector);
478 vector = (void **)bucket->irq_info;
479 for(ent = 0; ent < 4; ent++) {
480 if (vector[ent] == NULL) {
481 vector[ent] = action;
482 break;
485 if (ent == 4)
486 goto free_and_ebusy;
487 } else {
488 vector[0] = orig;
489 vector[1] = action;
490 vector[2] = NULL;
491 vector[3] = NULL;
492 bucket->irq_info = vector;
493 bucket->flags |= IBF_MULTI;
495 } else {
496 int ent;
498 vector = (void **)orig;
499 for(ent = 0; ent < 4; ent++) {
500 if(vector[ent] == NULL) {
501 vector[ent] = action;
502 break;
505 if (ent == 4)
506 goto free_and_ebusy;
508 } else {
509 bucket->irq_info = action;
510 bucket->flags |= IBF_ACTIVE;
512 pending = bucket->pending;
513 if(pending)
514 bucket->pending = 0;
517 action->mask = (unsigned long) bucket;
518 action->handler = handler;
519 action->flags = irqflags;
520 action->name = name;
521 action->next = NULL;
522 action->dev_id = dev_id;
524 if(tmp)
525 tmp->next = action;
526 else
527 *(bucket->pil + irq_action) = action;
529 enable_irq(irq);
531 /* We ate the IVEC already, this makes sure it does not get lost. */
532 if(pending) {
533 atomic_bucket_insert(bucket);
534 set_softint(1 << bucket->pil);
536 restore_flags(flags);
538 #ifdef __SMP__
539 distribute_irqs();
540 #endif
541 return 0;
543 free_and_ebusy:
544 kfree(action);
545 restore_flags(flags);
546 return -EBUSY;
548 free_and_enomem:
549 kfree(action);
550 restore_flags(flags);
551 return -ENOMEM;
554 void free_irq(unsigned int irq, void *dev_id)
556 struct irqaction *action;
557 struct irqaction *tmp = NULL;
558 unsigned long flags;
559 struct ino_bucket *bucket = __bucket(irq), *bp;
561 if ((bucket != &pil0_dummy_bucket) &&
562 (bucket < &ivector_table[0] ||
563 bucket >= &ivector_table[NUM_IVECS])) {
564 unsigned int *caller;
566 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
567 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
568 "from %p, irq %08x.\n", caller, irq);
569 return;
572 action = *(bucket->pil + irq_action);
573 if(!action->handler) {
574 printk("Freeing free IRQ %d\n", bucket->pil);
575 return;
577 if(dev_id) {
578 for( ; action; action = action->next) {
579 if(action->dev_id == dev_id)
580 break;
581 tmp = action;
583 if(!action) {
584 printk("Trying to free free shared IRQ %d\n", bucket->pil);
585 return;
587 } else if(action->flags & SA_SHIRQ) {
588 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
589 return;
592 if(action->flags & SA_STATIC_ALLOC) {
593 printk("Attempt to free statically allocated IRQ %d (%s)\n",
594 bucket->pil, action->name);
595 return;
598 save_and_cli(flags);
599 if(action && tmp)
600 tmp->next = action->next;
601 else
602 *(bucket->pil + irq_action) = action->next;
604 if(action->flags & SA_IMAP_MASKED) {
605 volatile unsigned int *imap = bucket->imap;
606 void **vector, *orig;
607 int ent;
609 orig = bucket->irq_info;
610 vector = (void **)orig;
612 if ((bucket->flags & IBF_MULTI) != 0) {
613 int other = 0;
614 void *orphan = NULL;
615 for(ent = 0; ent < 4; ent++) {
616 if(vector[ent] == action)
617 vector[ent] = NULL;
618 else if(vector[ent] != NULL) {
619 orphan = vector[ent];
620 other++;
624 /* Only free when no other shared irq
625 * uses this bucket.
627 if(other) {
628 if (other == 1) {
629 /* Convert back to non-shared bucket. */
630 bucket->irq_info = orphan;
631 bucket->flags &= ~(IBF_MULTI);
632 kfree(vector);
634 goto out;
636 } else {
637 bucket->irq_info = NULL;
640 /* This unique interrupt source is now inactive. */
641 bucket->flags &= ~IBF_ACTIVE;
643 /* See if any other buckets share this bucket's IMAP
644 * and are still active.
646 for(ent = 0; ent < NUM_IVECS; ent++) {
647 bp = &ivector_table[ent];
648 if(bp != bucket &&
649 bp->imap == imap &&
650 (bp->flags & IBF_ACTIVE) != 0)
651 break;
654 /* Only disable when no other sub-irq levels of
655 * the same IMAP are active.
657 if (ent == NUM_IVECS)
658 disable_irq(irq);
661 out:
662 kfree(action);
663 restore_flags(flags);
666 /* Only uniprocessor needs this IRQ/BH locking depth, on SMP it
667 * lives in the per-cpu structure for cache reasons.
669 #ifndef __SMP__
670 unsigned int local_irq_count;
671 unsigned int local_bh_count;
673 #define irq_enter(cpu, irq) (local_irq_count++)
674 #define irq_exit(cpu, irq) (local_irq_count--)
675 #else
676 atomic_t global_bh_lock = ATOMIC_INIT(0);
677 spinlock_t global_bh_count = SPIN_LOCK_UNLOCKED;
679 /* Who has global_irq_lock. */
680 unsigned char global_irq_holder = NO_PROC_ID;
682 /* This protects IRQ's. */
683 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
685 /* Global IRQ locking depth. */
686 atomic_t global_irq_count = ATOMIC_INIT(0);
688 #define irq_enter(cpu, irq) \
689 do { hardirq_enter(cpu); \
690 spin_unlock_wait(&global_irq_lock); \
691 } while(0)
692 #define irq_exit(cpu, irq) hardirq_exit(cpu)
694 static void show(char * str)
696 int cpu = smp_processor_id();
698 printk("\n%s, CPU %d:\n", str, cpu);
699 printk("irq: %d [%ld %ld]\n",
700 atomic_read(&global_irq_count),
701 cpu_data[0].irq_count, cpu_data[1].irq_count);
702 printk("bh: %d [%ld %ld]\n",
703 (spin_is_locked(&global_bh_count) ? 1 : 0),
704 cpu_data[0].bh_count, cpu_data[1].bh_count);
707 #define MAXCOUNT 100000000
709 static inline void wait_on_bh(void)
711 int count = MAXCOUNT;
712 do {
713 if(!--count) {
714 show("wait_on_bh");
715 count = 0;
717 membar("#LoadLoad");
718 } while(spin_is_locked(&global_bh_count));
721 #define SYNC_OTHER_ULTRAS(x) udelay(x+1)
723 static inline void wait_on_irq(int cpu)
725 int count = MAXCOUNT;
726 for(;;) {
727 membar("#LoadLoad");
728 if (!atomic_read (&global_irq_count)) {
729 if (local_bh_count || ! spin_is_locked(&global_bh_count))
730 break;
732 spin_unlock (&global_irq_lock);
733 membar("#StoreLoad | #StoreStore");
734 for(;;) {
735 if (!--count) {
736 show("wait_on_irq");
737 count = ~0;
739 __sti();
740 SYNC_OTHER_ULTRAS(cpu);
741 __cli();
742 if (atomic_read(&global_irq_count))
743 continue;
744 if (spin_is_locked (&global_irq_lock))
745 continue;
746 if (!local_bh_count && spin_is_locked (&global_bh_count))
747 continue;
748 if (spin_trylock(&global_irq_lock))
749 break;
754 void synchronize_bh(void)
756 if (spin_is_locked (&global_bh_count) && !in_interrupt())
757 wait_on_bh();
760 void synchronize_irq(void)
762 if (atomic_read(&global_irq_count)) {
763 cli();
764 sti();
768 static inline void get_irqlock(int cpu)
770 if (! spin_trylock(&global_irq_lock)) {
771 if ((unsigned char) cpu == global_irq_holder)
772 return;
773 do {
774 while (spin_is_locked (&global_irq_lock))
775 membar("#LoadLoad");
776 } while(! spin_trylock(&global_irq_lock));
778 wait_on_irq(cpu);
779 global_irq_holder = cpu;
782 void __global_cli(void)
784 unsigned long flags;
786 __save_flags(flags);
787 if(flags == 0) {
788 int cpu = smp_processor_id();
789 __cli();
790 if (! local_irq_count)
791 get_irqlock(cpu);
795 void __global_sti(void)
797 int cpu = smp_processor_id();
799 if (! local_irq_count)
800 release_irqlock(cpu);
801 __sti();
804 unsigned long __global_save_flags(void)
806 unsigned long flags, local_enabled, retval;
808 __save_flags(flags);
809 local_enabled = ((flags == 0) ? 1 : 0);
810 retval = 2 + local_enabled;
811 if (! local_irq_count) {
812 if (local_enabled)
813 retval = 1;
814 if (global_irq_holder == (unsigned char) smp_processor_id())
815 retval = 0;
817 return retval;
820 void __global_restore_flags(unsigned long flags)
822 switch (flags) {
823 case 0:
824 __global_cli();
825 break;
826 case 1:
827 __global_sti();
828 break;
829 case 2:
830 __cli();
831 break;
832 case 3:
833 __sti();
834 break;
835 default:
837 unsigned long pc;
838 __asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
839 printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
840 flags, pc);
845 #endif /* __SMP__ */
847 void catch_disabled_ivec(struct pt_regs *regs)
849 int cpu = smp_processor_id();
850 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
852 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
853 * to other devices. Here a single IMAP enabled potentially multiple
854 * unique interrupt sources (which each do have a unique ICLR register.
856 * So what we do is just register that the IVEC arrived, when registered
857 * for real the request_irq() code will check the bit and signal
858 * a local CPU interrupt for it.
860 #if 0
861 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
862 bucket - &ivector_table[0], regs->tpc);
863 #endif
864 *irq_work(cpu, 0) = 0;
865 bucket->pending = 1;
868 /* Tune this... */
869 #define FORWARD_VOLUME 12
871 void handler_irq(int irq, struct pt_regs *regs)
873 struct ino_bucket *bp, *nbp;
874 int cpu = smp_processor_id();
875 #ifdef __SMP__
876 extern int this_is_starfire;
877 int should_forward = (this_is_starfire == 0 &&
878 irq < 10 &&
879 current->pid != 0);
880 unsigned int buddy = 0;
882 /* 'cpu' is the MID (ie. UPAID), calculate the MID
883 * of our buddy.
885 if(should_forward != 0) {
886 buddy = cpu_number_map[cpu] + 1;
887 if (buddy >= NR_CPUS ||
888 (buddy = cpu_logical_map(buddy)) == -1)
889 buddy = cpu_logical_map(0);
891 /* Voo-doo programming. */
892 if(cpu_data[buddy].idle_volume < FORWARD_VOLUME)
893 should_forward = 0;
894 buddy <<= 26;
896 #endif
898 #ifndef __SMP__
900 * Check for TICK_INT on level 14 softint.
902 if ((irq == 14) && (get_softint() & (1UL << 0)))
903 irq = 0;
904 #endif
905 clear_softint(1 << irq);
907 irq_enter(cpu, irq);
908 kstat.irqs[cpu][irq]++;
910 /* Sliiiick... */
911 #ifndef __SMP__
912 bp = ((irq != 0) ?
913 __bucket(xchg32(irq_work(cpu, irq), 0)) :
914 &pil0_dummy_bucket);
915 #else
916 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
917 #endif
918 for( ; bp != NULL; bp = nbp) {
919 unsigned char flags = bp->flags;
921 nbp = __bucket(bp->irq_chain);
922 if((flags & IBF_ACTIVE) != 0) {
923 if((flags & IBF_MULTI) == 0) {
924 struct irqaction *ap = bp->irq_info;
925 ap->handler(__irq(bp), ap->dev_id, regs);
926 } else {
927 void **vector = (void **)bp->irq_info;
928 int ent;
929 for(ent = 0; ent < 4; ent++) {
930 struct irqaction *ap = vector[ent];
931 if(ap != NULL)
932 ap->handler(__irq(bp), ap->dev_id, regs);
935 /* Only the dummy bucket lacks IMAP/ICLR. */
936 if(bp->pil != 0) {
937 #ifdef __SMP__
938 /* Ok, here is what is going on:
939 * 1) Retargeting IRQs on Starfire is very
940 * expensive so just forget about it on them.
941 * 2) Moving around very high priority interrupts
942 * is a losing game.
943 * 3) If the current cpu is idle, interrupts are
944 * useful work, so keep them here. But do not
945 * pass to our neighbour if he is not very idle.
947 if (should_forward != 0) {
948 /* Push it to our buddy. */
949 should_forward = 0;
950 *(bp->imap) = (buddy | SYSIO_IMAP_VALID);
952 #endif
953 *(bp->iclr) = SYSIO_ICLR_IDLE;
955 } else
956 bp->pending = 1;
958 irq_exit(cpu, irq);
961 #ifdef CONFIG_BLK_DEV_FD
962 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
964 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
966 struct irqaction *action = *(irq + irq_action);
967 struct ino_bucket *bucket;
968 int cpu = smp_processor_id();
970 irq_enter(cpu, irq);
971 kstat.irqs[cpu][irq]++;
973 *(irq_work(cpu, irq)) = 0;
974 bucket = (struct ino_bucket *)action->mask;
976 floppy_interrupt(irq, dev_cookie, regs);
977 *(bucket->iclr) = SYSIO_ICLR_IDLE;
979 irq_exit(cpu, irq);
981 #endif
983 /* The following assumes that the branch lies before the place we
984 * are branching to. This is the case for a trap vector...
985 * You have been warned.
987 #define SPARC_BRANCH(dest_addr, inst_addr) \
988 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
990 #define SPARC_NOP (0x01000000)
992 static void install_fast_irq(unsigned int cpu_irq,
993 void (*handler)(int, void *, struct pt_regs *))
995 extern unsigned long sparc64_ttable_tl0;
996 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
997 unsigned int *insns;
999 ttent += 0x820;
1000 ttent += (cpu_irq - 1) << 5;
1001 insns = (unsigned int *) ttent;
1002 insns[0] = SPARC_BRANCH(((unsigned long) handler),
1003 ((unsigned long)&insns[0]));
1004 insns[1] = SPARC_NOP;
1005 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
1008 int request_fast_irq(unsigned int irq,
1009 void (*handler)(int, void *, struct pt_regs *),
1010 unsigned long irqflags, const char *name, void *dev_id)
1012 struct irqaction *action;
1013 struct ino_bucket *bucket = __bucket(irq);
1014 unsigned long flags;
1016 /* No pil0 dummy buckets allowed here. */
1017 if (bucket < &ivector_table[0] ||
1018 bucket >= &ivector_table[NUM_IVECS]) {
1019 unsigned int *caller;
1021 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
1022 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
1023 "from %p, irq %08x.\n", caller, irq);
1024 return -EINVAL;
1027 /* Only IMAP style interrupts can be registered as fast. */
1028 if(bucket->pil == 0)
1029 return -EINVAL;
1031 if(!handler)
1032 return -EINVAL;
1034 if ((bucket->pil == 0) || (bucket->pil == 14)) {
1035 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
1036 return -EBUSY;
1039 action = *(bucket->pil + irq_action);
1040 if(action) {
1041 if(action->flags & SA_SHIRQ)
1042 panic("Trying to register fast irq when already shared.\n");
1043 if(irqflags & SA_SHIRQ)
1044 panic("Trying to register fast irq as shared.\n");
1045 printk("request_fast_irq: Trying to register yet already owned.\n");
1046 return -EBUSY;
1049 save_and_cli(flags);
1050 if(irqflags & SA_STATIC_ALLOC) {
1051 if(static_irq_count < MAX_STATIC_ALLOC)
1052 action = &static_irqaction[static_irq_count++];
1053 else
1054 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
1055 "using kmalloc\n", bucket->pil, name);
1057 if(action == NULL)
1058 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
1059 GFP_KERNEL);
1060 if(!action) {
1061 restore_flags(flags);
1062 return -ENOMEM;
1064 install_fast_irq(bucket->pil, handler);
1066 bucket->irq_info = action;
1067 bucket->flags |= IBF_ACTIVE;
1069 action->mask = (unsigned long) bucket;
1070 action->handler = handler;
1071 action->flags = irqflags | SA_IMAP_MASKED;
1072 action->dev_id = NULL;
1073 action->name = name;
1074 action->next = NULL;
1076 *(bucket->pil + irq_action) = action;
1077 enable_irq(irq);
1079 restore_flags(flags);
1081 #ifdef __SMP__
1082 distribute_irqs();
1083 #endif
1084 return 0;
1087 /* We really don't need these at all on the Sparc. We only have
1088 * stubs here because they are exported to modules.
1090 unsigned long probe_irq_on(void)
1092 return 0;
1095 int probe_irq_off(unsigned long mask)
1097 return 0;
1100 /* This is gets the master TICK_INT timer going. */
1101 void init_timers(void (*cfunc)(int, void *, struct pt_regs *),
1102 unsigned long *clock)
1104 unsigned long flags;
1105 extern unsigned long timer_tick_offset;
1106 int node, err;
1107 #ifdef __SMP__
1108 extern void smp_tick_init(void);
1109 #endif
1111 node = linux_cpus[0].prom_node;
1112 *clock = prom_getint(node, "clock-frequency");
1113 timer_tick_offset = *clock / HZ;
1114 #ifdef __SMP__
1115 smp_tick_init();
1116 #endif
1118 /* Register IRQ handler. */
1119 err = request_irq(build_irq(0, 0, NULL, NULL), cfunc, (SA_INTERRUPT | SA_STATIC_ALLOC),
1120 "timer", NULL);
1122 if(err) {
1123 prom_printf("Serious problem, cannot register TICK_INT\n");
1124 prom_halt();
1127 save_and_cli(flags);
1129 /* Set things up so user can access tick register for profiling
1130 * purposes.
1132 __asm__ __volatile__("
1133 sethi %%hi(0x80000000), %%g1
1134 sllx %%g1, 32, %%g1
1135 rd %%tick, %%g2
1136 add %%g2, 6, %%g2
1137 andn %%g2, %%g1, %%g2
1138 wrpr %%g2, 0, %%tick
1139 " : /* no outputs */
1140 : /* no inputs */
1141 : "g1", "g2");
1143 __asm__ __volatile__("
1144 rd %%tick, %%g1
1145 add %%g1, %0, %%g1
1146 wr %%g1, 0x0, %%tick_cmpr"
1147 : /* no outputs */
1148 : "r" (timer_tick_offset)
1149 : "g1");
1151 restore_flags(flags);
1152 sti();
1155 #ifdef __SMP__
1156 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
1158 extern int this_is_starfire;
1159 struct ino_bucket *bucket = __bucket(p->mask);
1160 volatile unsigned int *imap = bucket->imap;
1161 unsigned int tid;
1163 /* Never change this, it causes problems on Ex000 systems. */
1164 if (bucket->pil == 12)
1165 return goal_cpu;
1167 if(this_is_starfire == 0) {
1168 tid = __cpu_logical_map[goal_cpu] << 26;
1169 } else {
1170 extern unsigned int starfire_translate(volatile unsigned int *imap,
1171 unsigned int upaid);
1173 tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26);
1175 *imap = SYSIO_IMAP_VALID | (tid & SYSIO_IMAP_TID);
1177 goal_cpu++;
1178 if(goal_cpu >= NR_CPUS ||
1179 __cpu_logical_map[goal_cpu] == -1)
1180 goal_cpu = 0;
1181 return goal_cpu;
1184 /* Called from request_irq. */
1185 static void distribute_irqs(void)
1187 unsigned long flags;
1188 int cpu, level;
1190 save_and_cli(flags);
1191 cpu = 0;
1192 for(level = 0; level < NR_IRQS; level++) {
1193 struct irqaction *p = irq_action[level];
1194 while(p) {
1195 if(p->flags & SA_IMAP_MASKED)
1196 cpu = retarget_one_irq(p, cpu);
1197 p = p->next;
1200 restore_flags(flags);
1202 #endif
1205 struct sun5_timer *prom_timers;
1206 static u64 prom_limit0, prom_limit1;
1208 static void map_prom_timers(void)
1210 unsigned int addr[3];
1211 int tnode, err;
1213 /* PROM timer node hangs out in the top level of device siblings... */
1214 tnode = prom_finddevice("/counter-timer");
1216 /* Assume if node is not present, PROM uses different tick mechanism
1217 * which we should not care about.
1219 if(tnode == 0 || tnode == -1) {
1220 prom_timers = (struct sun5_timer *) 0;
1221 return;
1224 /* If PROM is really using this, it must be mapped by him. */
1225 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1226 if(err == -1) {
1227 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1228 prom_timers = (struct sun5_timer *) 0;
1229 return;
1231 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1234 static void kill_prom_timer(void)
1236 if(!prom_timers)
1237 return;
1239 /* Save them away for later. */
1240 prom_limit0 = prom_timers->limit0;
1241 prom_limit1 = prom_timers->limit1;
1243 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1244 * We turn both off here just to be paranoid.
1246 prom_timers->limit0 = 0;
1247 prom_timers->limit1 = 0;
1249 /* Wheee, eat the interrupt packet too... */
1250 __asm__ __volatile__("
1251 mov 0x40, %%g2
1252 ldxa [%%g0] %0, %%g1
1253 ldxa [%%g2] %1, %%g1
1254 stxa %%g0, [%%g0] %0
1255 membar #Sync
1256 " : /* no outputs */
1257 : "i" (ASI_INTR_RECEIVE), "i" (ASI_UDB_INTR_R)
1258 : "g1", "g2");
1261 void enable_prom_timer(void)
1263 if(!prom_timers)
1264 return;
1266 /* Set it to whatever was there before. */
1267 prom_timers->limit1 = prom_limit1;
1268 prom_timers->count1 = 0;
1269 prom_timers->limit0 = prom_limit0;
1270 prom_timers->count0 = 0;
1273 void __init init_IRQ(void)
1275 static int called = 0;
1277 if (called == 0) {
1278 called = 1;
1279 map_prom_timers();
1280 kill_prom_timer();
1281 memset(&ivector_table[0], 0, sizeof(ivector_table));
1282 #ifndef __SMP__
1283 memset(&__up_workvec[0], 0, sizeof(__up_workvec));
1284 #endif
1287 /* We need to clear any IRQ's pending in the soft interrupt
1288 * registers, a spurious one could be left around from the
1289 * PROM timer which we just disabled.
1291 clear_softint(get_softint());
1293 /* Now that ivector table is initialized, it is safe
1294 * to receive IRQ vector traps. We will normally take
1295 * one or two right now, in case some device PROM used
1296 * to boot us wants to speak to us. We just ignore them.
1298 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1299 "or %%g1, %0, %%g1\n\t"
1300 "wrpr %%g1, 0x0, %%pstate"
1301 : /* No outputs */
1302 : "i" (PSTATE_IE)
1303 : "g1");