1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/bootmem.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
33 #include <asm/iommu.h>
35 #include <asm/oplib.h>
36 #include <asm/timer.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
46 static void distribute_irqs(void);
49 /* UPA nodes send interrupt packet to UltraSparc with first data reg
50 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
51 * delivered. We must translate this into a non-vector IRQ so we can
52 * set the softint on this cpu.
54 * To make processing these packets efficient and race free we use
55 * an array of irq buckets below. The interrupt vector handler in
56 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
57 * The IVEC handler does not need to act atomically, the PIL dispatch
58 * code uses CAS to get an atomic snapshot of the list and clear it
62 struct ino_bucket ivector_table
[NUM_IVECS
] __attribute__ ((aligned (SMP_CACHE_BYTES
)));
64 /* This has to be in the main kernel image, it cannot be
65 * turned into per-cpu data. The reason is that the main
66 * kernel image is locked into the TLB and this structure
67 * is accessed from the vectored interrupt trap handler. If
68 * access to this structure takes a TLB miss it could cause
69 * the 5-level sparc v9 trap stack to overflow.
71 #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
73 static struct irqaction timer_irq_action
= {
76 static struct irqaction
*irq_action
[NR_IRQS
] = { &timer_irq_action
, };
78 /* This only synchronizes entities which modify IRQ handler
79 * state and some selected user-level spots that want to
80 * read things in the table. IRQ handler processing orders
81 * its' accesses such that no locking is needed.
83 static DEFINE_SPINLOCK(irq_action_lock
);
85 static unsigned int virt_to_real_irq_table
[NR_IRQS
];
86 static unsigned char virt_irq_cur
= 1;
88 static unsigned char virt_irq_alloc(unsigned int real_irq
)
92 BUILD_BUG_ON(NR_IRQS
>= 256);
96 printk(KERN_ERR
"IRQ: Out of virtual IRQs.\n");
100 virt_irq_cur
= ent
+ 1;
101 virt_to_real_irq_table
[ent
] = real_irq
;
106 #if 0 /* Currently unused. */
107 static unsigned char real_to_virt_irq(unsigned int real_irq
)
109 struct ino_bucket
*bucket
= __bucket(real_irq
);
111 return bucket
->virt_irq
;
115 static unsigned int virt_to_real_irq(unsigned char virt_irq
)
117 return virt_to_real_irq_table
[virt_irq
];
120 void irq_install_pre_handler(int virt_irq
,
121 void (*func
)(struct ino_bucket
*, void *, void *),
122 void *arg1
, void *arg2
)
124 unsigned int real_irq
= virt_to_real_irq(virt_irq
);
125 struct ino_bucket
*bucket
;
128 if (unlikely(!real_irq
))
131 bucket
= __bucket(real_irq
);
132 d
= bucket
->irq_info
;
133 d
->pre_handler
= func
;
134 d
->pre_handler_arg1
= arg1
;
135 d
->pre_handler_arg2
= arg2
;
138 static void register_irq_proc (unsigned int irq
);
141 * Upper 2b of irqaction->flags holds the ino.
142 * irqaction->mask holds the smp affinity information.
144 #define put_ino_in_irqaction(action, irq) \
145 action->flags &= 0xffffffffffffUL; \
146 action->flags |= __irq_ino(irq) << 48;
148 #define get_ino_in_irqaction(action) (action->flags >> 48)
150 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
151 #define get_smpaff_in_irqaction(action) ((action)->mask)
153 int show_interrupts(struct seq_file
*p
, void *v
)
156 int i
= *(loff_t
*) v
;
157 struct irqaction
*action
;
162 spin_lock_irqsave(&irq_action_lock
, flags
);
164 if (!(action
= *(i
+ irq_action
)))
166 seq_printf(p
, "%3d: ", i
);
168 seq_printf(p
, "%10u ", kstat_irqs(i
));
170 for_each_online_cpu(j
) {
171 seq_printf(p
, "%10u ",
172 kstat_cpu(j
).irqs
[i
]);
175 seq_printf(p
, " %s", action
->name
);
176 for (action
= action
->next
; action
; action
= action
->next
)
177 seq_printf(p
, ", %s", action
->name
);
181 spin_unlock_irqrestore(&irq_action_lock
, flags
);
186 extern unsigned long real_hard_smp_processor_id(void);
188 static unsigned int sun4u_compute_tid(unsigned long imap
, unsigned long cpuid
)
192 if (this_is_starfire
) {
193 tid
= starfire_translate(imap
, cpuid
);
194 tid
<<= IMAP_TID_SHIFT
;
197 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
200 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
201 if ((ver
>> 32UL) == __JALAPENO_ID
||
202 (ver
>> 32UL) == __SERRANO_ID
) {
203 tid
= cpuid
<< IMAP_TID_SHIFT
;
204 tid
&= IMAP_TID_JBUS
;
206 unsigned int a
= cpuid
& 0x1f;
207 unsigned int n
= (cpuid
>> 5) & 0x1f;
209 tid
= ((a
<< IMAP_AID_SHIFT
) |
210 (n
<< IMAP_NID_SHIFT
));
211 tid
&= (IMAP_AID_SAFARI
|
215 tid
= cpuid
<< IMAP_TID_SHIFT
;
223 void enable_irq(unsigned int virt_irq
)
225 unsigned int real_irq
= virt_to_real_irq(virt_irq
);
226 struct ino_bucket
*bucket
;
227 unsigned long imap
, cpuid
;
229 if (unlikely(!real_irq
))
232 bucket
= __bucket(real_irq
);
234 if (unlikely(imap
== 0UL))
239 /* This gets the physical processor ID, even on uniprocessor,
240 * so we can always program the interrupt target correctly.
242 cpuid
= real_hard_smp_processor_id();
244 if (tlb_type
== hypervisor
) {
245 unsigned int ino
= __irq_ino(real_irq
);
248 err
= sun4v_intr_settarget(ino
, cpuid
);
250 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
252 err
= sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
254 printk("sun4v_intr_setenabled(%x): err(%d)\n",
257 unsigned int tid
= sun4u_compute_tid(imap
, cpuid
);
259 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
260 * of this SYSIO's preconfigured IGN in the SYSIO Control
261 * Register, the hardware just mirrors that value here.
262 * However for Graphics and UPA Slave devices the full
263 * IMAP_INR field can be set by the programmer here.
265 * Things like FFB can now be handled via the new IRQ
268 upa_writel(tid
| IMAP_VALID
, imap
);
274 void disable_irq(unsigned int virt_irq
)
276 unsigned int real_irq
= virt_to_real_irq(virt_irq
);
277 struct ino_bucket
*bucket
;
280 if (unlikely(!real_irq
))
283 bucket
= __bucket(real_irq
);
285 if (unlikely(imap
== 0UL))
288 if (tlb_type
== hypervisor
) {
289 unsigned int ino
= __irq_ino(real_irq
);
292 err
= sun4v_intr_setenabled(ino
, HV_INTR_DISABLED
);
294 printk("sun4v_intr_setenabled(%x): "
295 "err(%d)\n", ino
, err
);
299 /* NOTE: We do not want to futz with the IRQ clear registers
300 * and move the state to IDLE, the SCSI code does call
301 * disable_irq() to assure atomicity in the queue cmd
302 * SCSI adapter driver code. Thus we'd lose interrupts.
304 tmp
= upa_readl(imap
);
306 upa_writel(tmp
, imap
);
310 static void build_irq_error(const char *msg
, unsigned int ino
, int inofixup
,
311 unsigned long iclr
, unsigned long imap
,
312 struct ino_bucket
*bucket
)
314 prom_printf("IRQ: INO %04x (%016lx:%016lx) --> "
315 "(%d:%016lx:%016lx), halting...\n",
316 ino
, bucket
->iclr
, bucket
->imap
,
317 inofixup
, iclr
, imap
);
321 unsigned int build_irq(int inofixup
, unsigned long iclr
, unsigned long imap
, unsigned char flags
)
323 struct ino_bucket
*bucket
;
326 BUG_ON(tlb_type
== hypervisor
);
328 /* RULE: Both must be specified. */
329 if (iclr
== 0UL || imap
== 0UL) {
330 prom_printf("Invalid build_irq %d %016lx %016lx\n",
331 inofixup
, iclr
, imap
);
335 ino
= (upa_readl(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
336 if (ino
> NUM_IVECS
) {
337 prom_printf("Invalid INO %04x (%d:%016lx:%016lx)\n",
338 ino
, inofixup
, iclr
, imap
);
342 bucket
= &ivector_table
[ino
];
343 if (bucket
->flags
& IBF_ACTIVE
)
344 build_irq_error("IRQ: Trying to build active INO bucket.\n",
345 ino
, inofixup
, iclr
, imap
, bucket
);
347 if (bucket
->irq_info
) {
348 if (bucket
->imap
!= imap
|| bucket
->iclr
!= iclr
)
349 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
350 ino
, inofixup
, iclr
, imap
, bucket
);
355 bucket
->irq_info
= kzalloc(sizeof(struct irq_desc
), GFP_ATOMIC
);
356 if (!bucket
->irq_info
) {
357 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
361 /* Ok, looks good, set it up. Don't touch the irq_chain or
366 if (!bucket
->virt_irq
)
367 bucket
->virt_irq
= virt_irq_alloc(__irq(bucket
));
368 bucket
->flags
= flags
;
371 return bucket
->virt_irq
;
374 unsigned int sun4v_build_irq(u32 devhandle
, unsigned int devino
, unsigned char flags
)
376 struct ino_bucket
*bucket
;
377 unsigned long sysino
;
379 sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
381 bucket
= &ivector_table
[sysino
];
383 /* Catch accidental accesses to these things. IMAP/ICLR handling
384 * is done by hypervisor calls on sun4v platforms, not by direct
387 * But we need to make them look unique for the disable_irq() logic
390 bucket
->imap
= ~0UL - sysino
;
391 bucket
->iclr
= ~0UL - sysino
;
392 if (!bucket
->virt_irq
)
393 bucket
->virt_irq
= virt_irq_alloc(__irq(bucket
));
394 bucket
->flags
= flags
;
396 bucket
->irq_info
= kzalloc(sizeof(struct irq_desc
), GFP_ATOMIC
);
397 if (!bucket
->irq_info
) {
398 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
402 return bucket
->virt_irq
;
405 static void atomic_bucket_insert(struct ino_bucket
*bucket
)
407 unsigned long pstate
;
410 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
411 __asm__
__volatile__("wrpr %0, %1, %%pstate"
412 : : "r" (pstate
), "i" (PSTATE_IE
));
413 ent
= irq_work(smp_processor_id());
414 bucket
->irq_chain
= *ent
;
415 *ent
= __irq(bucket
);
416 __asm__
__volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate
));
419 static int check_irq_sharing(int pil
, unsigned long irqflags
)
421 struct irqaction
*action
;
423 action
= *(irq_action
+ pil
);
425 if (!(action
->flags
& SA_SHIRQ
) || !(irqflags
& SA_SHIRQ
))
431 static void append_irq_action(int pil
, struct irqaction
*action
)
433 struct irqaction
**pp
= irq_action
+ pil
;
440 static struct irqaction
*get_action_slot(struct ino_bucket
*bucket
)
442 struct irq_desc
*desc
= bucket
->irq_info
;
446 if (bucket
->flags
& IBF_PCI
)
447 max_irq
= MAX_IRQ_DESC_ACTION
;
448 for (i
= 0; i
< max_irq
; i
++) {
449 struct irqaction
*p
= &desc
->action
[i
];
452 if (desc
->action_active_mask
& mask
)
455 desc
->action_active_mask
|= mask
;
461 int request_irq(unsigned int virt_irq
,
462 irqreturn_t (*handler
)(int, void *, struct pt_regs
*),
463 unsigned long irqflags
, const char *name
, void *dev_id
)
465 struct irqaction
*action
;
466 struct ino_bucket
*bucket
;
468 unsigned int real_irq
;
471 real_irq
= virt_to_real_irq(virt_irq
);
472 if (unlikely(!real_irq
))
475 if (unlikely(!handler
))
478 bucket
= __bucket(real_irq
);
479 if (unlikely(!bucket
->irq_info
))
482 if (irqflags
& SA_SAMPLE_RANDOM
) {
484 * This function might sleep, we want to call it first,
485 * outside of the atomic block.
486 * Yes, this might clear the entropy pool if the wrong
487 * driver is attempted to be loaded, without actually
488 * installing a new handler, but is this really a problem,
489 * only the sysadmin is able to do this.
491 rand_initialize_irq(virt_irq
);
494 spin_lock_irqsave(&irq_action_lock
, flags
);
496 if (check_irq_sharing(virt_irq
, irqflags
)) {
497 spin_unlock_irqrestore(&irq_action_lock
, flags
);
501 action
= get_action_slot(bucket
);
503 spin_unlock_irqrestore(&irq_action_lock
, flags
);
507 bucket
->flags
|= IBF_ACTIVE
;
508 pending
= bucket
->pending
;
512 action
->handler
= handler
;
513 action
->flags
= irqflags
;
516 action
->dev_id
= dev_id
;
517 put_ino_in_irqaction(action
, __irq_ino(real_irq
));
518 put_smpaff_in_irqaction(action
, CPU_MASK_NONE
);
520 append_irq_action(virt_irq
, action
);
522 enable_irq(virt_irq
);
524 /* We ate the IVEC already, this makes sure it does not get lost. */
526 atomic_bucket_insert(bucket
);
527 set_softint(1 << PIL_DEVICE_IRQ
);
530 spin_unlock_irqrestore(&irq_action_lock
, flags
);
532 register_irq_proc(virt_irq
);
540 EXPORT_SYMBOL(request_irq
);
542 static struct irqaction
*unlink_irq_action(unsigned int virt_irq
, void *dev_id
)
544 struct irqaction
*action
, **pp
;
546 pp
= irq_action
+ virt_irq
;
548 if (unlikely(!action
))
551 if (unlikely(!action
->handler
)) {
552 printk("Freeing free IRQ %d\n", virt_irq
);
556 while (action
&& action
->dev_id
!= dev_id
) {
567 void free_irq(unsigned int virt_irq
, void *dev_id
)
569 struct irqaction
*action
;
570 struct ino_bucket
*bucket
;
571 struct irq_desc
*desc
;
573 unsigned int real_irq
;
576 real_irq
= virt_to_real_irq(virt_irq
);
577 if (unlikely(!real_irq
))
580 spin_lock_irqsave(&irq_action_lock
, flags
);
582 action
= unlink_irq_action(virt_irq
, dev_id
);
584 spin_unlock_irqrestore(&irq_action_lock
, flags
);
586 if (unlikely(!action
))
589 synchronize_irq(virt_irq
);
591 spin_lock_irqsave(&irq_action_lock
, flags
);
593 bucket
= __bucket(real_irq
);
594 desc
= bucket
->irq_info
;
596 for (i
= 0; i
< MAX_IRQ_DESC_ACTION
; i
++) {
597 struct irqaction
*p
= &desc
->action
[i
];
600 desc
->action_active_mask
&= ~(1 << i
);
605 if (!desc
->action_active_mask
) {
606 unsigned long imap
= bucket
->imap
;
608 /* This unique interrupt source is now inactive. */
609 bucket
->flags
&= ~IBF_ACTIVE
;
611 /* See if any other buckets share this bucket's IMAP
612 * and are still active.
614 for (ent
= 0; ent
< NUM_IVECS
; ent
++) {
615 struct ino_bucket
*bp
= &ivector_table
[ent
];
618 (bp
->flags
& IBF_ACTIVE
) != 0)
622 /* Only disable when no other sub-irq levels of
623 * the same IMAP are active.
625 if (ent
== NUM_IVECS
)
626 disable_irq(virt_irq
);
629 spin_unlock_irqrestore(&irq_action_lock
, flags
);
632 EXPORT_SYMBOL(free_irq
);
635 void synchronize_irq(unsigned int virt_irq
)
637 unsigned int real_irq
= virt_to_real_irq(virt_irq
);
638 struct ino_bucket
*bucket
;
640 if (unlikely(!real_irq
))
643 bucket
= __bucket(real_irq
);
645 /* The following is how I wish I could implement this.
646 * Unfortunately the ICLR registers are read-only, you can
647 * only write ICLR_foo values to them. To get the current
648 * IRQ status you would need to get at the IRQ diag registers
649 * in the PCI/SBUS controller and the layout of those vary
650 * from one controller to the next, sigh... -DaveM
652 unsigned long iclr
= bucket
->iclr
;
655 u32 tmp
= upa_readl(iclr
);
657 if (tmp
== ICLR_TRANSMIT
||
658 tmp
== ICLR_PENDING
) {
665 /* So we have to do this with a INPROGRESS bit just like x86. */
666 while (bucket
->flags
& IBF_INPROGRESS
)
670 #endif /* CONFIG_SMP */
672 static void process_bucket(struct ino_bucket
*bp
, struct pt_regs
*regs
)
674 struct irq_desc
*desc
= bp
->irq_info
;
675 unsigned char flags
= bp
->flags
;
679 bp
->flags
|= IBF_INPROGRESS
;
681 if (unlikely(!(flags
& IBF_ACTIVE
))) {
686 if (desc
->pre_handler
)
687 desc
->pre_handler(bp
,
688 desc
->pre_handler_arg1
,
689 desc
->pre_handler_arg2
);
691 action_mask
= desc
->action_active_mask
;
693 for (i
= 0; i
< MAX_IRQ_DESC_ACTION
; i
++) {
694 struct irqaction
*p
= &desc
->action
[i
];
697 if (!(action_mask
& mask
))
700 action_mask
&= ~mask
;
702 if (p
->handler(bp
->virt_irq
, p
->dev_id
, regs
) == IRQ_HANDLED
)
709 if (tlb_type
== hypervisor
) {
710 unsigned int ino
= __irq_ino(bp
);
713 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
715 printk("sun4v_intr_setstate(%x): "
716 "err(%d)\n", ino
, err
);
718 upa_writel(ICLR_IDLE
, bp
->iclr
);
721 /* Test and add entropy */
722 if (random
& SA_SAMPLE_RANDOM
)
723 add_interrupt_randomness(bp
->virt_irq
);
725 bp
->flags
&= ~IBF_INPROGRESS
;
729 extern irqreturn_t
timer_interrupt(int, void *, struct pt_regs
*);
731 void timer_irq(int irq
, struct pt_regs
*regs
)
733 unsigned long clr_mask
= 1 << irq
;
734 unsigned long tick_mask
= tick_ops
->softint_mask
;
736 if (get_softint() & tick_mask
) {
738 clr_mask
= tick_mask
;
740 clear_softint(clr_mask
);
743 kstat_this_cpu
.irqs
[0]++;
744 timer_interrupt(irq
, NULL
, regs
);
749 void handler_irq(int irq
, struct pt_regs
*regs
)
751 struct ino_bucket
*bp
;
752 int cpu
= smp_processor_id();
754 /* XXX at this point we should be able to assert that
755 * XXX irq is PIL_DEVICE_IRQ...
757 clear_softint(1 << irq
);
762 bp
= __bucket(xchg32(irq_work(cpu
), 0));
764 struct ino_bucket
*nbp
= __bucket(bp
->irq_chain
);
766 kstat_this_cpu
.irqs
[bp
->virt_irq
]++;
769 process_bucket(bp
, regs
);
775 #ifdef CONFIG_BLK_DEV_FD
776 extern irqreturn_t
floppy_interrupt(int, void *, struct pt_regs
*);
778 /* XXX No easy way to include asm/floppy.h XXX */
779 extern unsigned char *pdma_vaddr
;
780 extern unsigned long pdma_size
;
781 extern volatile int doing_pdma
;
782 extern unsigned long fdc_status
;
784 irqreturn_t
sparc_floppy_irq(int irq
, void *dev_cookie
, struct pt_regs
*regs
)
786 if (likely(doing_pdma
)) {
787 void __iomem
*stat
= (void __iomem
*) fdc_status
;
788 unsigned char *vaddr
= pdma_vaddr
;
789 unsigned long size
= pdma_size
;
794 if (unlikely(!(val
& 0x80))) {
799 if (unlikely(!(val
& 0x20))) {
807 *vaddr
++ = readb(stat
+ 1);
809 unsigned char data
= *vaddr
++;
812 writeb(data
, stat
+ 1);
820 /* Send Terminal Count pulse to floppy controller. */
821 val
= readb(auxio_register
);
822 val
|= AUXIO_AUX1_FTCNT
;
823 writeb(val
, auxio_register
);
824 val
&= ~AUXIO_AUX1_FTCNT
;
825 writeb(val
, auxio_register
);
831 return floppy_interrupt(irq
, dev_cookie
, regs
);
833 EXPORT_SYMBOL(sparc_floppy_irq
);
836 /* We really don't need these at all on the Sparc. We only have
837 * stubs here because they are exported to modules.
839 unsigned long probe_irq_on(void)
844 EXPORT_SYMBOL(probe_irq_on
);
846 int probe_irq_off(unsigned long mask
)
851 EXPORT_SYMBOL(probe_irq_off
);
854 static int retarget_one_irq(struct irqaction
*p
, int goal_cpu
)
856 struct ino_bucket
*bucket
= get_ino_in_irqaction(p
) + ivector_table
;
858 while (!cpu_online(goal_cpu
)) {
859 if (++goal_cpu
>= NR_CPUS
)
863 if (tlb_type
== hypervisor
) {
864 unsigned int ino
= __irq_ino(bucket
);
866 sun4v_intr_settarget(ino
, goal_cpu
);
867 sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
869 unsigned long imap
= bucket
->imap
;
870 unsigned int tid
= sun4u_compute_tid(imap
, goal_cpu
);
872 upa_writel(tid
| IMAP_VALID
, imap
);
876 if (++goal_cpu
>= NR_CPUS
)
878 } while (!cpu_online(goal_cpu
));
883 /* Called from request_irq. */
884 static void distribute_irqs(void)
889 spin_lock_irqsave(&irq_action_lock
, flags
);
892 for (level
= 1; level
< NR_IRQS
; level
++) {
893 struct irqaction
*p
= irq_action
[level
];
896 cpu
= retarget_one_irq(p
, cpu
);
900 spin_unlock_irqrestore(&irq_action_lock
, flags
);
911 static struct sun5_timer
*prom_timers
;
912 static u64 prom_limit0
, prom_limit1
;
914 static void map_prom_timers(void)
916 unsigned int addr
[3];
919 /* PROM timer node hangs out in the top level of device siblings... */
920 tnode
= prom_finddevice("/counter-timer");
922 /* Assume if node is not present, PROM uses different tick mechanism
923 * which we should not care about.
925 if (tnode
== 0 || tnode
== -1) {
926 prom_timers
= (struct sun5_timer
*) 0;
930 /* If PROM is really using this, it must be mapped by him. */
931 err
= prom_getproperty(tnode
, "address", (char *)addr
, sizeof(addr
));
933 prom_printf("PROM does not have timer mapped, trying to continue.\n");
934 prom_timers
= (struct sun5_timer
*) 0;
937 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
940 static void kill_prom_timer(void)
945 /* Save them away for later. */
946 prom_limit0
= prom_timers
->limit0
;
947 prom_limit1
= prom_timers
->limit1
;
949 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
950 * We turn both off here just to be paranoid.
952 prom_timers
->limit0
= 0;
953 prom_timers
->limit1
= 0;
955 /* Wheee, eat the interrupt packet too... */
956 __asm__
__volatile__(
958 " ldxa [%%g0] %0, %%g1\n"
959 " ldxa [%%g2] %1, %%g1\n"
960 " stxa %%g0, [%%g0] %0\n"
963 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
967 void init_irqwork_curcpu(void)
969 int cpu
= hard_smp_processor_id();
971 trap_block
[cpu
].irq_worklist
= 0;
974 static void __cpuinit
register_one_mondo(unsigned long paddr
, unsigned long type
)
976 unsigned long num_entries
= 128;
977 unsigned long status
;
979 status
= sun4v_cpu_qconf(type
, paddr
, num_entries
);
980 if (status
!= HV_EOK
) {
981 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
982 "err %lu\n", type
, paddr
, num_entries
, status
);
987 static void __cpuinit
sun4v_register_mondo_queues(int this_cpu
)
989 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
991 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
);
992 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
);
993 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
);
994 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
);
997 static void __cpuinit
alloc_one_mondo(unsigned long *pa_ptr
, int use_bootmem
)
1002 page
= alloc_bootmem_low_pages(PAGE_SIZE
);
1004 page
= (void *) get_zeroed_page(GFP_ATOMIC
);
1007 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
1011 *pa_ptr
= __pa(page
);
1014 static void __cpuinit
alloc_one_kbuf(unsigned long *pa_ptr
, int use_bootmem
)
1019 page
= alloc_bootmem_low_pages(PAGE_SIZE
);
1021 page
= (void *) get_zeroed_page(GFP_ATOMIC
);
1024 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
1028 *pa_ptr
= __pa(page
);
1031 static void __cpuinit
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
, int use_bootmem
)
1036 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
1039 page
= alloc_bootmem_low_pages(PAGE_SIZE
);
1041 page
= (void *) get_zeroed_page(GFP_ATOMIC
);
1044 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
1048 tb
->cpu_mondo_block_pa
= __pa(page
);
1049 tb
->cpu_list_pa
= __pa(page
+ 64);
1053 /* Allocate and register the mondo and error queues for this cpu. */
1054 void __cpuinit
sun4v_init_mondo_queues(int use_bootmem
, int cpu
, int alloc
, int load
)
1056 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1059 alloc_one_mondo(&tb
->cpu_mondo_pa
, use_bootmem
);
1060 alloc_one_mondo(&tb
->dev_mondo_pa
, use_bootmem
);
1061 alloc_one_mondo(&tb
->resum_mondo_pa
, use_bootmem
);
1062 alloc_one_kbuf(&tb
->resum_kernel_buf_pa
, use_bootmem
);
1063 alloc_one_mondo(&tb
->nonresum_mondo_pa
, use_bootmem
);
1064 alloc_one_kbuf(&tb
->nonresum_kernel_buf_pa
, use_bootmem
);
1066 init_cpu_send_mondo_info(tb
, use_bootmem
);
1070 if (cpu
!= hard_smp_processor_id()) {
1071 prom_printf("SUN4V: init mondo on cpu %d not %d\n",
1072 cpu
, hard_smp_processor_id());
1075 sun4v_register_mondo_queues(cpu
);
1079 /* Only invoked on boot processor. */
1080 void __init
init_IRQ(void)
1084 memset(&ivector_table
[0], 0, sizeof(ivector_table
));
1086 if (tlb_type
== hypervisor
)
1087 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
1089 /* We need to clear any IRQ's pending in the soft interrupt
1090 * registers, a spurious one could be left around from the
1091 * PROM timer which we just disabled.
1093 clear_softint(get_softint());
1095 /* Now that ivector table is initialized, it is safe
1096 * to receive IRQ vector traps. We will normally take
1097 * one or two right now, in case some device PROM used
1098 * to boot us wants to speak to us. We just ignore them.
1100 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1101 "or %%g1, %0, %%g1\n\t"
1102 "wrpr %%g1, 0x0, %%pstate"
1108 static struct proc_dir_entry
*root_irq_dir
;
1109 static struct proc_dir_entry
*irq_dir
[NR_IRQS
];
1113 static int irq_affinity_read_proc(char *page
, char **start
, off_t off
,
1114 int count
, int *eof
, void *data
)
1116 struct ino_bucket
*bp
= ivector_table
+ (long)data
;
1117 struct irq_desc
*desc
= bp
->irq_info
;
1118 struct irqaction
*ap
= desc
->action
;
1122 mask
= get_smpaff_in_irqaction(ap
);
1123 if (cpus_empty(mask
))
1124 mask
= cpu_online_map
;
1126 len
= cpumask_scnprintf(page
, count
, mask
);
1127 if (count
- len
< 2)
1129 len
+= sprintf(page
+ len
, "\n");
1133 static inline void set_intr_affinity(int virt_irq
, cpumask_t hw_aff
)
1135 struct ino_bucket
*bp
;
1136 struct irq_desc
*desc
;
1137 struct irqaction
*ap
;
1138 unsigned int real_irq
;
1140 real_irq
= virt_to_real_irq(virt_irq
);
1141 if (unlikely(!real_irq
))
1144 bp
= __bucket(real_irq
);
1145 desc
= bp
->irq_info
;
1148 /* Users specify affinity in terms of hw cpu ids.
1149 * As soon as we do this, handler_irq() might see and take action.
1151 put_smpaff_in_irqaction(ap
, hw_aff
);
1153 /* Migration is simply done by the next cpu to service this
1156 * XXX Broken, this doesn't happen anymore...
1160 static int irq_affinity_write_proc(struct file
*file
,
1161 const char __user
*buffer
,
1162 unsigned long count
, void *data
)
1164 int virt_irq
= (long) data
, full_count
= count
, err
;
1165 cpumask_t new_value
;
1167 err
= cpumask_parse(buffer
, count
, new_value
);
1170 * Do not allow disabling IRQs completely - it's a too easy
1171 * way to make the system unusable accidentally :-) At least
1172 * one online CPU still has to be targeted.
1174 cpus_and(new_value
, new_value
, cpu_online_map
);
1175 if (cpus_empty(new_value
))
1178 set_intr_affinity(virt_irq
, new_value
);
1185 #define MAX_NAMELEN 10
1187 static void register_irq_proc(unsigned int virt_irq
)
1189 char name
[MAX_NAMELEN
];
1191 if (!root_irq_dir
|| irq_dir
[virt_irq
])
1194 memset(name
, 0, MAX_NAMELEN
);
1195 sprintf(name
, "%d", virt_irq
);
1197 /* create /proc/irq/1234 */
1198 irq_dir
[virt_irq
] = proc_mkdir(name
, root_irq_dir
);
1201 /* XXX SMP affinity not supported on starfire yet. */
1202 if (this_is_starfire
== 0) {
1203 struct proc_dir_entry
*entry
;
1205 /* create /proc/irq/1234/smp_affinity */
1206 entry
= create_proc_entry("smp_affinity", 0600, irq_dir
[irq
]);
1210 entry
->data
= (void *)(long)virt_irq
;
1211 entry
->read_proc
= irq_affinity_read_proc
;
1212 entry
->write_proc
= irq_affinity_write_proc
;
1218 void init_irq_proc(void)
1220 /* create /proc/irq */
1221 root_irq_dir
= proc_mkdir("irq", NULL
);