1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/linkage.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/ftrace.h>
24 #include <linux/irq.h>
25 #include <linux/kmemleak.h>
27 #include <asm/ptrace.h>
28 #include <asm/processor.h>
29 #include <linux/atomic.h>
30 #include <asm/system.h>
33 #include <asm/iommu.h>
35 #include <asm/oplib.h>
37 #include <asm/timer.h>
39 #include <asm/starfire.h>
40 #include <asm/uaccess.h>
41 #include <asm/cache.h>
42 #include <asm/cpudata.h>
43 #include <asm/auxio.h>
45 #include <asm/hypervisor.h>
46 #include <asm/cacheflush.h>
52 #define NUM_IVECS (IMAP_INR + 1)
54 struct ino_bucket
*ivector_table
;
55 unsigned long ivector_table_pa
;
57 /* On several sun4u processors, it is illegal to mix bypass and
58 * non-bypass accesses. Therefore we access all INO buckets
59 * using bypass accesses only.
61 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa
)
65 __asm__
__volatile__("ldxa [%1] %2, %0"
68 offsetof(struct ino_bucket
,
70 "i" (ASI_PHYS_USE_EC
));
75 static void bucket_clear_chain_pa(unsigned long bucket_pa
)
77 __asm__
__volatile__("stxa %%g0, [%0] %1"
80 offsetof(struct ino_bucket
,
82 "i" (ASI_PHYS_USE_EC
));
85 static unsigned int bucket_get_irq(unsigned long bucket_pa
)
89 __asm__
__volatile__("lduwa [%1] %2, %0"
92 offsetof(struct ino_bucket
,
94 "i" (ASI_PHYS_USE_EC
));
99 static void bucket_set_irq(unsigned long bucket_pa
, unsigned int irq
)
101 __asm__
__volatile__("stwa %0, [%1] %2"
105 offsetof(struct ino_bucket
,
107 "i" (ASI_PHYS_USE_EC
));
110 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
113 unsigned int dev_handle
;
114 unsigned int dev_ino
;
116 } irq_table
[NR_IRQS
];
117 static DEFINE_SPINLOCK(irq_alloc_lock
);
119 unsigned char irq_alloc(unsigned int dev_handle
, unsigned int dev_ino
)
124 BUILD_BUG_ON(NR_IRQS
>= 256);
126 spin_lock_irqsave(&irq_alloc_lock
, flags
);
128 for (ent
= 1; ent
< NR_IRQS
; ent
++) {
129 if (!irq_table
[ent
].in_use
)
132 if (ent
>= NR_IRQS
) {
133 printk(KERN_ERR
"IRQ: Out of virtual IRQs.\n");
136 irq_table
[ent
].dev_handle
= dev_handle
;
137 irq_table
[ent
].dev_ino
= dev_ino
;
138 irq_table
[ent
].in_use
= 1;
141 spin_unlock_irqrestore(&irq_alloc_lock
, flags
);
146 #ifdef CONFIG_PCI_MSI
147 void irq_free(unsigned int irq
)
154 spin_lock_irqsave(&irq_alloc_lock
, flags
);
156 irq_table
[irq
].in_use
= 0;
158 spin_unlock_irqrestore(&irq_alloc_lock
, flags
);
163 * /proc/interrupts printing:
165 int arch_show_interrupts(struct seq_file
*p
, int prec
)
169 seq_printf(p
, "NMI: ");
170 for_each_online_cpu(j
)
171 seq_printf(p
, "%10u ", cpu_data(j
).__nmi_count
);
172 seq_printf(p
, " Non-maskable interrupts\n");
176 static unsigned int sun4u_compute_tid(unsigned long imap
, unsigned long cpuid
)
180 if (this_is_starfire
) {
181 tid
= starfire_translate(imap
, cpuid
);
182 tid
<<= IMAP_TID_SHIFT
;
185 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
188 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
189 if ((ver
>> 32UL) == __JALAPENO_ID
||
190 (ver
>> 32UL) == __SERRANO_ID
) {
191 tid
= cpuid
<< IMAP_TID_SHIFT
;
192 tid
&= IMAP_TID_JBUS
;
194 unsigned int a
= cpuid
& 0x1f;
195 unsigned int n
= (cpuid
>> 5) & 0x1f;
197 tid
= ((a
<< IMAP_AID_SHIFT
) |
198 (n
<< IMAP_NID_SHIFT
));
199 tid
&= (IMAP_AID_SAFARI
|
203 tid
= cpuid
<< IMAP_TID_SHIFT
;
211 struct irq_handler_data
{
215 void (*pre_handler
)(unsigned int, void *, void *);
221 static int irq_choose_cpu(unsigned int irq
, const struct cpumask
*affinity
)
226 cpumask_copy(&mask
, affinity
);
227 if (cpumask_equal(&mask
, cpu_online_mask
)) {
228 cpuid
= map_to_cpu(irq
);
232 cpumask_and(&tmp
, cpu_online_mask
, &mask
);
233 cpuid
= cpumask_empty(&tmp
) ? map_to_cpu(irq
) : cpumask_first(&tmp
);
239 #define irq_choose_cpu(irq, affinity) \
240 real_hard_smp_processor_id()
243 static void sun4u_irq_enable(struct irq_data
*data
)
245 struct irq_handler_data
*handler_data
= data
->handler_data
;
247 if (likely(handler_data
)) {
248 unsigned long cpuid
, imap
, val
;
251 cpuid
= irq_choose_cpu(data
->irq
, data
->affinity
);
252 imap
= handler_data
->imap
;
254 tid
= sun4u_compute_tid(imap
, cpuid
);
256 val
= upa_readq(imap
);
257 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
258 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
259 val
|= tid
| IMAP_VALID
;
260 upa_writeq(val
, imap
);
261 upa_writeq(ICLR_IDLE
, handler_data
->iclr
);
265 static int sun4u_set_affinity(struct irq_data
*data
,
266 const struct cpumask
*mask
, bool force
)
268 struct irq_handler_data
*handler_data
= data
->handler_data
;
270 if (likely(handler_data
)) {
271 unsigned long cpuid
, imap
, val
;
274 cpuid
= irq_choose_cpu(data
->irq
, mask
);
275 imap
= handler_data
->imap
;
277 tid
= sun4u_compute_tid(imap
, cpuid
);
279 val
= upa_readq(imap
);
280 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
281 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
282 val
|= tid
| IMAP_VALID
;
283 upa_writeq(val
, imap
);
284 upa_writeq(ICLR_IDLE
, handler_data
->iclr
);
290 /* Don't do anything. The desc->status check for IRQ_DISABLED in
291 * handler_irq() will skip the handler call and that will leave the
292 * interrupt in the sent state. The next ->enable() call will hit the
293 * ICLR register to reset the state machine.
295 * This scheme is necessary, instead of clearing the Valid bit in the
296 * IMAP register, to handle the case of IMAP registers being shared by
297 * multiple INOs (and thus ICLR registers). Since we use a different
298 * virtual IRQ for each shared IMAP instance, the generic code thinks
299 * there is only one user so it prematurely calls ->disable() on
302 * We have to provide an explicit ->disable() method instead of using
303 * NULL to get the default. The reason is that if the generic code
304 * sees that, it also hooks up a default ->shutdown method which
305 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
307 static void sun4u_irq_disable(struct irq_data
*data
)
311 static void sun4u_irq_eoi(struct irq_data
*data
)
313 struct irq_handler_data
*handler_data
= data
->handler_data
;
315 if (likely(handler_data
))
316 upa_writeq(ICLR_IDLE
, handler_data
->iclr
);
319 static void sun4v_irq_enable(struct irq_data
*data
)
321 unsigned int ino
= irq_table
[data
->irq
].dev_ino
;
322 unsigned long cpuid
= irq_choose_cpu(data
->irq
, data
->affinity
);
325 err
= sun4v_intr_settarget(ino
, cpuid
);
327 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
328 "err(%d)\n", ino
, cpuid
, err
);
329 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
331 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
332 "err(%d)\n", ino
, err
);
333 err
= sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
335 printk(KERN_ERR
"sun4v_intr_setenabled(%x): err(%d)\n",
339 static int sun4v_set_affinity(struct irq_data
*data
,
340 const struct cpumask
*mask
, bool force
)
342 unsigned int ino
= irq_table
[data
->irq
].dev_ino
;
343 unsigned long cpuid
= irq_choose_cpu(data
->irq
, mask
);
346 err
= sun4v_intr_settarget(ino
, cpuid
);
348 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
349 "err(%d)\n", ino
, cpuid
, err
);
354 static void sun4v_irq_disable(struct irq_data
*data
)
356 unsigned int ino
= irq_table
[data
->irq
].dev_ino
;
359 err
= sun4v_intr_setenabled(ino
, HV_INTR_DISABLED
);
361 printk(KERN_ERR
"sun4v_intr_setenabled(%x): "
362 "err(%d)\n", ino
, err
);
365 static void sun4v_irq_eoi(struct irq_data
*data
)
367 unsigned int ino
= irq_table
[data
->irq
].dev_ino
;
370 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
372 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
373 "err(%d)\n", ino
, err
);
376 static void sun4v_virq_enable(struct irq_data
*data
)
378 unsigned long cpuid
, dev_handle
, dev_ino
;
381 cpuid
= irq_choose_cpu(data
->irq
, data
->affinity
);
383 dev_handle
= irq_table
[data
->irq
].dev_handle
;
384 dev_ino
= irq_table
[data
->irq
].dev_ino
;
386 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
388 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
390 dev_handle
, dev_ino
, cpuid
, err
);
391 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
394 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
395 "HV_INTR_STATE_IDLE): err(%d)\n",
396 dev_handle
, dev_ino
, err
);
397 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
400 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
401 "HV_INTR_ENABLED): err(%d)\n",
402 dev_handle
, dev_ino
, err
);
405 static int sun4v_virt_set_affinity(struct irq_data
*data
,
406 const struct cpumask
*mask
, bool force
)
408 unsigned long cpuid
, dev_handle
, dev_ino
;
411 cpuid
= irq_choose_cpu(data
->irq
, mask
);
413 dev_handle
= irq_table
[data
->irq
].dev_handle
;
414 dev_ino
= irq_table
[data
->irq
].dev_ino
;
416 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
418 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
420 dev_handle
, dev_ino
, cpuid
, err
);
425 static void sun4v_virq_disable(struct irq_data
*data
)
427 unsigned long dev_handle
, dev_ino
;
430 dev_handle
= irq_table
[data
->irq
].dev_handle
;
431 dev_ino
= irq_table
[data
->irq
].dev_ino
;
433 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
436 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
437 "HV_INTR_DISABLED): err(%d)\n",
438 dev_handle
, dev_ino
, err
);
441 static void sun4v_virq_eoi(struct irq_data
*data
)
443 unsigned long dev_handle
, dev_ino
;
446 dev_handle
= irq_table
[data
->irq
].dev_handle
;
447 dev_ino
= irq_table
[data
->irq
].dev_ino
;
449 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
452 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
453 "HV_INTR_STATE_IDLE): err(%d)\n",
454 dev_handle
, dev_ino
, err
);
457 static struct irq_chip sun4u_irq
= {
459 .irq_enable
= sun4u_irq_enable
,
460 .irq_disable
= sun4u_irq_disable
,
461 .irq_eoi
= sun4u_irq_eoi
,
462 .irq_set_affinity
= sun4u_set_affinity
,
463 .flags
= IRQCHIP_EOI_IF_HANDLED
,
466 static struct irq_chip sun4v_irq
= {
468 .irq_enable
= sun4v_irq_enable
,
469 .irq_disable
= sun4v_irq_disable
,
470 .irq_eoi
= sun4v_irq_eoi
,
471 .irq_set_affinity
= sun4v_set_affinity
,
472 .flags
= IRQCHIP_EOI_IF_HANDLED
,
475 static struct irq_chip sun4v_virq
= {
477 .irq_enable
= sun4v_virq_enable
,
478 .irq_disable
= sun4v_virq_disable
,
479 .irq_eoi
= sun4v_virq_eoi
,
480 .irq_set_affinity
= sun4v_virt_set_affinity
,
481 .flags
= IRQCHIP_EOI_IF_HANDLED
,
484 static void pre_flow_handler(struct irq_data
*d
)
486 struct irq_handler_data
*handler_data
= irq_data_get_irq_handler_data(d
);
487 unsigned int ino
= irq_table
[d
->irq
].dev_ino
;
489 handler_data
->pre_handler(ino
, handler_data
->arg1
, handler_data
->arg2
);
492 void irq_install_pre_handler(int irq
,
493 void (*func
)(unsigned int, void *, void *),
494 void *arg1
, void *arg2
)
496 struct irq_handler_data
*handler_data
= irq_get_handler_data(irq
);
498 handler_data
->pre_handler
= func
;
499 handler_data
->arg1
= arg1
;
500 handler_data
->arg2
= arg2
;
502 __irq_set_preflow_handler(irq
, pre_flow_handler
);
505 unsigned int build_irq(int inofixup
, unsigned long iclr
, unsigned long imap
)
507 struct ino_bucket
*bucket
;
508 struct irq_handler_data
*handler_data
;
512 BUG_ON(tlb_type
== hypervisor
);
514 ino
= (upa_readq(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
515 bucket
= &ivector_table
[ino
];
516 irq
= bucket_get_irq(__pa(bucket
));
518 irq
= irq_alloc(0, ino
);
519 bucket_set_irq(__pa(bucket
), irq
);
520 irq_set_chip_and_handler_name(irq
, &sun4u_irq
,
521 handle_fasteoi_irq
, "IVEC");
524 handler_data
= irq_get_handler_data(irq
);
525 if (unlikely(handler_data
))
528 handler_data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
529 if (unlikely(!handler_data
)) {
530 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
533 irq_set_handler_data(irq
, handler_data
);
535 handler_data
->imap
= imap
;
536 handler_data
->iclr
= iclr
;
542 static unsigned int sun4v_build_common(unsigned long sysino
,
543 struct irq_chip
*chip
)
545 struct ino_bucket
*bucket
;
546 struct irq_handler_data
*handler_data
;
549 BUG_ON(tlb_type
!= hypervisor
);
551 bucket
= &ivector_table
[sysino
];
552 irq
= bucket_get_irq(__pa(bucket
));
554 irq
= irq_alloc(0, sysino
);
555 bucket_set_irq(__pa(bucket
), irq
);
556 irq_set_chip_and_handler_name(irq
, chip
, handle_fasteoi_irq
,
560 handler_data
= irq_get_handler_data(irq
);
561 if (unlikely(handler_data
))
564 handler_data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
565 if (unlikely(!handler_data
)) {
566 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
569 irq_set_handler_data(irq
, handler_data
);
571 /* Catch accidental accesses to these things. IMAP/ICLR handling
572 * is done by hypervisor calls on sun4v platforms, not by direct
575 handler_data
->imap
= ~0UL;
576 handler_data
->iclr
= ~0UL;
582 unsigned int sun4v_build_irq(u32 devhandle
, unsigned int devino
)
584 unsigned long sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
586 return sun4v_build_common(sysino
, &sun4v_irq
);
589 unsigned int sun4v_build_virq(u32 devhandle
, unsigned int devino
)
591 struct irq_handler_data
*handler_data
;
592 unsigned long hv_err
, cookie
;
593 struct ino_bucket
*bucket
;
596 bucket
= kzalloc(sizeof(struct ino_bucket
), GFP_ATOMIC
);
597 if (unlikely(!bucket
))
600 /* The only reference we store to the IRQ bucket is
601 * by physical address which kmemleak can't see, tell
602 * it that this object explicitly is not a leak and
605 kmemleak_not_leak(bucket
);
607 __flush_dcache_range((unsigned long) bucket
,
608 ((unsigned long) bucket
+
609 sizeof(struct ino_bucket
)));
611 irq
= irq_alloc(devhandle
, devino
);
612 bucket_set_irq(__pa(bucket
), irq
);
614 irq_set_chip_and_handler_name(irq
, &sun4v_virq
, handle_fasteoi_irq
,
617 handler_data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
618 if (unlikely(!handler_data
))
621 /* In order to make the LDC channel startup sequence easier,
622 * especially wrt. locking, we do not let request_irq() enable
625 irq_set_status_flags(irq
, IRQ_NOAUTOEN
);
626 irq_set_handler_data(irq
, handler_data
);
628 /* Catch accidental accesses to these things. IMAP/ICLR handling
629 * is done by hypervisor calls on sun4v platforms, not by direct
632 handler_data
->imap
= ~0UL;
633 handler_data
->iclr
= ~0UL;
635 cookie
= ~__pa(bucket
);
636 hv_err
= sun4v_vintr_set_cookie(devhandle
, devino
, cookie
);
638 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
639 "err=%lu\n", devhandle
, devino
, hv_err
);
646 void ack_bad_irq(unsigned int irq
)
648 unsigned int ino
= irq_table
[irq
].dev_ino
;
653 printk(KERN_CRIT
"Unexpected IRQ from ino[%x] irq[%u]\n",
657 void *hardirq_stack
[NR_CPUS
];
658 void *softirq_stack
[NR_CPUS
];
660 void __irq_entry
handler_irq(int pil
, struct pt_regs
*regs
)
662 unsigned long pstate
, bucket_pa
;
663 struct pt_regs
*old_regs
;
666 clear_softint(1 << pil
);
668 old_regs
= set_irq_regs(regs
);
671 /* Grab an atomic snapshot of the pending IVECs. */
672 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
673 "wrpr %0, %3, %%pstate\n\t"
676 "wrpr %0, 0x0, %%pstate\n\t"
677 : "=&r" (pstate
), "=&r" (bucket_pa
)
678 : "r" (irq_work_pa(smp_processor_id())),
682 orig_sp
= set_hardirq_stack();
685 unsigned long next_pa
;
688 next_pa
= bucket_get_chain_pa(bucket_pa
);
689 irq
= bucket_get_irq(bucket_pa
);
690 bucket_clear_chain_pa(bucket_pa
);
692 generic_handle_irq(irq
);
697 restore_hardirq_stack(orig_sp
);
700 set_irq_regs(old_regs
);
703 void do_softirq(void)
710 local_irq_save(flags
);
712 if (local_softirq_pending()) {
713 void *orig_sp
, *sp
= softirq_stack
[smp_processor_id()];
715 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
717 __asm__
__volatile__("mov %%sp, %0\n\t"
722 __asm__
__volatile__("mov %0, %%sp"
726 local_irq_restore(flags
);
729 #ifdef CONFIG_HOTPLUG_CPU
730 void fixup_irqs(void)
734 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
735 struct irq_desc
*desc
= irq_to_desc(irq
);
736 struct irq_data
*data
= irq_desc_get_irq_data(desc
);
739 raw_spin_lock_irqsave(&desc
->lock
, flags
);
740 if (desc
->action
&& !irqd_is_per_cpu(data
)) {
741 if (data
->chip
->irq_set_affinity
)
742 data
->chip
->irq_set_affinity(data
,
746 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
749 tick_ops
->disable_irq();
760 static struct sun5_timer
*prom_timers
;
761 static u64 prom_limit0
, prom_limit1
;
763 static void map_prom_timers(void)
765 struct device_node
*dp
;
766 const unsigned int *addr
;
768 /* PROM timer node hangs out in the top level of device siblings... */
769 dp
= of_find_node_by_path("/");
772 if (!strcmp(dp
->name
, "counter-timer"))
777 /* Assume if node is not present, PROM uses different tick mechanism
778 * which we should not care about.
781 prom_timers
= (struct sun5_timer
*) 0;
785 /* If PROM is really using this, it must be mapped by him. */
786 addr
= of_get_property(dp
, "address", NULL
);
788 prom_printf("PROM does not have timer mapped, trying to continue.\n");
789 prom_timers
= (struct sun5_timer
*) 0;
792 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
795 static void kill_prom_timer(void)
800 /* Save them away for later. */
801 prom_limit0
= prom_timers
->limit0
;
802 prom_limit1
= prom_timers
->limit1
;
804 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
805 * We turn both off here just to be paranoid.
807 prom_timers
->limit0
= 0;
808 prom_timers
->limit1
= 0;
810 /* Wheee, eat the interrupt packet too... */
811 __asm__
__volatile__(
813 " ldxa [%%g0] %0, %%g1\n"
814 " ldxa [%%g2] %1, %%g1\n"
815 " stxa %%g0, [%%g0] %0\n"
818 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
822 void notrace
init_irqwork_curcpu(void)
824 int cpu
= hard_smp_processor_id();
826 trap_block
[cpu
].irq_worklist_pa
= 0UL;
829 /* Please be very careful with register_one_mondo() and
830 * sun4v_register_mondo_queues().
832 * On SMP this gets invoked from the CPU trampoline before
833 * the cpu has fully taken over the trap table from OBP,
834 * and it's kernel stack + %g6 thread register state is
835 * not fully cooked yet.
837 * Therefore you cannot make any OBP calls, not even prom_printf,
838 * from these two routines.
840 static void __cpuinit notrace
register_one_mondo(unsigned long paddr
, unsigned long type
, unsigned long qmask
)
842 unsigned long num_entries
= (qmask
+ 1) / 64;
843 unsigned long status
;
845 status
= sun4v_cpu_qconf(type
, paddr
, num_entries
);
846 if (status
!= HV_EOK
) {
847 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
848 "err %lu\n", type
, paddr
, num_entries
, status
);
853 void __cpuinit notrace
sun4v_register_mondo_queues(int this_cpu
)
855 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
857 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
,
858 tb
->cpu_mondo_qmask
);
859 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
,
860 tb
->dev_mondo_qmask
);
861 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
,
863 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
,
867 /* Each queue region must be a power of 2 multiple of 64 bytes in
868 * size. The base real address must be aligned to the size of the
869 * region. Thus, an 8KB queue must be 8KB aligned, for example.
871 static void __init
alloc_one_queue(unsigned long *pa_ptr
, unsigned long qmask
)
873 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
874 unsigned long order
= get_order(size
);
877 p
= __get_free_pages(GFP_KERNEL
, order
);
879 prom_printf("SUN4V: Error, cannot allocate queue.\n");
886 static void __init
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
)
891 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
893 page
= get_zeroed_page(GFP_KERNEL
);
895 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
899 tb
->cpu_mondo_block_pa
= __pa(page
);
900 tb
->cpu_list_pa
= __pa(page
+ 64);
904 /* Allocate mondo and error queues for all possible cpus. */
905 static void __init
sun4v_init_mondo_queues(void)
909 for_each_possible_cpu(cpu
) {
910 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
912 alloc_one_queue(&tb
->cpu_mondo_pa
, tb
->cpu_mondo_qmask
);
913 alloc_one_queue(&tb
->dev_mondo_pa
, tb
->dev_mondo_qmask
);
914 alloc_one_queue(&tb
->resum_mondo_pa
, tb
->resum_qmask
);
915 alloc_one_queue(&tb
->resum_kernel_buf_pa
, tb
->resum_qmask
);
916 alloc_one_queue(&tb
->nonresum_mondo_pa
, tb
->nonresum_qmask
);
917 alloc_one_queue(&tb
->nonresum_kernel_buf_pa
,
922 static void __init
init_send_mondo_info(void)
926 for_each_possible_cpu(cpu
) {
927 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
929 init_cpu_send_mondo_info(tb
);
933 static struct irqaction timer_irq_action
= {
937 /* Only invoked on boot processor. */
938 void __init
init_IRQ(void)
945 size
= sizeof(struct ino_bucket
) * NUM_IVECS
;
946 ivector_table
= kzalloc(size
, GFP_KERNEL
);
947 if (!ivector_table
) {
948 prom_printf("Fatal error, cannot allocate ivector_table\n");
951 __flush_dcache_range((unsigned long) ivector_table
,
952 ((unsigned long) ivector_table
) + size
);
954 ivector_table_pa
= __pa(ivector_table
);
956 if (tlb_type
== hypervisor
)
957 sun4v_init_mondo_queues();
959 init_send_mondo_info();
961 if (tlb_type
== hypervisor
) {
962 /* Load up the boot cpu's entries. */
963 sun4v_register_mondo_queues(hard_smp_processor_id());
966 /* We need to clear any IRQ's pending in the soft interrupt
967 * registers, a spurious one could be left around from the
968 * PROM timer which we just disabled.
970 clear_softint(get_softint());
972 /* Now that ivector table is initialized, it is safe
973 * to receive IRQ vector traps. We will normally take
974 * one or two right now, in case some device PROM used
975 * to boot us wants to speak to us. We just ignore them.
977 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
978 "or %%g1, %0, %%g1\n\t"
979 "wrpr %%g1, 0x0, %%pstate"
984 irq_to_desc(0)->action
= &timer_irq_action
;