1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/sched.h>
9 #include <linux/linkage.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/ftrace.h>
23 #include <linux/irq.h>
24 #include <linux/kmemleak.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <linux/atomic.h>
31 #include <asm/iommu.h>
33 #include <asm/oplib.h>
35 #include <asm/timer.h>
37 #include <asm/starfire.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
40 #include <asm/cpudata.h>
41 #include <asm/auxio.h>
43 #include <asm/hypervisor.h>
44 #include <asm/cacheflush.h>
50 struct ino_bucket
*ivector_table
;
51 unsigned long ivector_table_pa
;
53 /* On several sun4u processors, it is illegal to mix bypass and
54 * non-bypass accesses. Therefore we access all INO buckets
55 * using bypass accesses only.
57 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa
)
61 __asm__
__volatile__("ldxa [%1] %2, %0"
64 offsetof(struct ino_bucket
,
66 "i" (ASI_PHYS_USE_EC
));
71 static void bucket_clear_chain_pa(unsigned long bucket_pa
)
73 __asm__
__volatile__("stxa %%g0, [%0] %1"
76 offsetof(struct ino_bucket
,
78 "i" (ASI_PHYS_USE_EC
));
81 static unsigned int bucket_get_irq(unsigned long bucket_pa
)
85 __asm__
__volatile__("lduwa [%1] %2, %0"
88 offsetof(struct ino_bucket
,
90 "i" (ASI_PHYS_USE_EC
));
95 static void bucket_set_irq(unsigned long bucket_pa
, unsigned int irq
)
97 __asm__
__volatile__("stwa %0, [%1] %2"
101 offsetof(struct ino_bucket
,
103 "i" (ASI_PHYS_USE_EC
));
106 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
108 static unsigned long hvirq_major __initdata
;
109 static int __init
early_hvirq_major(char *p
)
111 int rc
= kstrtoul(p
, 10, &hvirq_major
);
115 early_param("hvirq", early_hvirq_major
);
117 static int hv_irq_version
;
119 /* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
120 * based interfaces, but:
122 * 1) Several OSs, Solaris and Linux included, use them even when only
123 * negotiating version 1.0 (or failing to negotiate at all). So the
124 * hypervisor has a workaround that provides the VIRQ interfaces even
125 * when only verion 1.0 of the API is in use.
127 * 2) Second, and more importantly, with major version 2.0 these VIRQ
128 * interfaces only were actually hooked up for LDC interrupts, even
129 * though the Hypervisor specification clearly stated:
131 * The new interrupt API functions will be available to a guest
132 * when it negotiates version 2.0 in the interrupt API group 0x2. When
133 * a guest negotiates version 2.0, all interrupt sources will only
134 * support using the cookie interface, and any attempt to use the
135 * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
136 * ENOTSUPPORTED error being returned.
138 * with an emphasis on "all interrupt sources".
140 * To correct this, major version 3.0 was created which does actually
141 * support VIRQs for all interrupt sources (not just LDC devices). So
142 * if we want to move completely over the cookie based VIRQs we must
143 * negotiate major version 3.0 or later of HV_GRP_INTR.
145 static bool sun4v_cookie_only_virqs(void)
147 if (hv_irq_version
>= 3)
152 static void __init
irq_init_hv(void)
154 unsigned long hv_error
, major
, minor
= 0;
156 if (tlb_type
!= hypervisor
)
164 hv_error
= sun4v_hvapi_register(HV_GRP_INTR
, major
, &minor
);
166 hv_irq_version
= major
;
170 pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
172 sun4v_cookie_only_virqs() ? "enabled" : "disabled");
175 /* This function is for the timer interrupt.*/
176 int __init
arch_probe_nr_irqs(void)
181 #define DEFAULT_NUM_IVECS (0xfffU)
182 static unsigned int nr_ivec
= DEFAULT_NUM_IVECS
;
183 #define NUM_IVECS (nr_ivec)
185 static unsigned int __init
size_nr_ivec(void)
187 if (tlb_type
== hypervisor
) {
188 switch (sun4v_chip_type
) {
189 /* Athena's devhandle|devino is large.*/
190 case SUN4V_CHIP_SPARC64X
:
198 struct irq_handler_data
{
201 unsigned int dev_handle
;
202 unsigned int dev_ino
;
204 unsigned long sysino
;
206 struct ino_bucket bucket
;
211 static inline unsigned int irq_data_to_handle(struct irq_data
*data
)
213 struct irq_handler_data
*ihd
= data
->handler_data
;
215 return ihd
->dev_handle
;
218 static inline unsigned int irq_data_to_ino(struct irq_data
*data
)
220 struct irq_handler_data
*ihd
= data
->handler_data
;
225 static inline unsigned long irq_data_to_sysino(struct irq_data
*data
)
227 struct irq_handler_data
*ihd
= data
->handler_data
;
232 void irq_free(unsigned int irq
)
234 void *data
= irq_get_handler_data(irq
);
237 irq_set_handler_data(irq
, NULL
);
238 irq_free_descs(irq
, 1);
241 unsigned int irq_alloc(unsigned int dev_handle
, unsigned int dev_ino
)
245 irq
= __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL
);
254 static unsigned int cookie_exists(u32 devhandle
, unsigned int devino
)
256 unsigned long hv_err
, cookie
;
257 struct ino_bucket
*bucket
;
258 unsigned int irq
= 0U;
260 hv_err
= sun4v_vintr_get_cookie(devhandle
, devino
, &cookie
);
262 pr_err("HV get cookie failed hv_err = %ld\n", hv_err
);
266 if (cookie
& ((1UL << 63UL))) {
268 bucket
= (struct ino_bucket
*) __va(cookie
);
275 static unsigned int sysino_exists(u32 devhandle
, unsigned int devino
)
277 unsigned long sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
278 struct ino_bucket
*bucket
;
281 bucket
= &ivector_table
[sysino
];
282 irq
= bucket_get_irq(__pa(bucket
));
287 void ack_bad_irq(unsigned int irq
)
289 pr_crit("BAD IRQ ack %d\n", irq
);
292 void irq_install_pre_handler(int irq
,
293 void (*func
)(unsigned int, void *, void *),
294 void *arg1
, void *arg2
)
296 pr_warn("IRQ pre handler NOT supported.\n");
300 * /proc/interrupts printing:
302 int arch_show_interrupts(struct seq_file
*p
, int prec
)
306 seq_printf(p
, "NMI: ");
307 for_each_online_cpu(j
)
308 seq_printf(p
, "%10u ", cpu_data(j
).__nmi_count
);
309 seq_printf(p
, " Non-maskable interrupts\n");
313 static unsigned int sun4u_compute_tid(unsigned long imap
, unsigned long cpuid
)
317 if (this_is_starfire
) {
318 tid
= starfire_translate(imap
, cpuid
);
319 tid
<<= IMAP_TID_SHIFT
;
322 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
325 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
326 if ((ver
>> 32UL) == __JALAPENO_ID
||
327 (ver
>> 32UL) == __SERRANO_ID
) {
328 tid
= cpuid
<< IMAP_TID_SHIFT
;
329 tid
&= IMAP_TID_JBUS
;
331 unsigned int a
= cpuid
& 0x1f;
332 unsigned int n
= (cpuid
>> 5) & 0x1f;
334 tid
= ((a
<< IMAP_AID_SHIFT
) |
335 (n
<< IMAP_NID_SHIFT
));
336 tid
&= (IMAP_AID_SAFARI
|
340 tid
= cpuid
<< IMAP_TID_SHIFT
;
349 static int irq_choose_cpu(unsigned int irq
, const struct cpumask
*affinity
)
354 cpumask_copy(&mask
, affinity
);
355 if (cpumask_equal(&mask
, cpu_online_mask
)) {
356 cpuid
= map_to_cpu(irq
);
360 cpumask_and(&tmp
, cpu_online_mask
, &mask
);
361 cpuid
= cpumask_empty(&tmp
) ? map_to_cpu(irq
) : cpumask_first(&tmp
);
367 #define irq_choose_cpu(irq, affinity) \
368 real_hard_smp_processor_id()
371 static void sun4u_irq_enable(struct irq_data
*data
)
373 struct irq_handler_data
*handler_data
= data
->handler_data
;
375 if (likely(handler_data
)) {
376 unsigned long cpuid
, imap
, val
;
379 cpuid
= irq_choose_cpu(data
->irq
, data
->affinity
);
380 imap
= handler_data
->imap
;
382 tid
= sun4u_compute_tid(imap
, cpuid
);
384 val
= upa_readq(imap
);
385 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
386 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
387 val
|= tid
| IMAP_VALID
;
388 upa_writeq(val
, imap
);
389 upa_writeq(ICLR_IDLE
, handler_data
->iclr
);
393 static int sun4u_set_affinity(struct irq_data
*data
,
394 const struct cpumask
*mask
, bool force
)
396 struct irq_handler_data
*handler_data
= data
->handler_data
;
398 if (likely(handler_data
)) {
399 unsigned long cpuid
, imap
, val
;
402 cpuid
= irq_choose_cpu(data
->irq
, mask
);
403 imap
= handler_data
->imap
;
405 tid
= sun4u_compute_tid(imap
, cpuid
);
407 val
= upa_readq(imap
);
408 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
409 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
410 val
|= tid
| IMAP_VALID
;
411 upa_writeq(val
, imap
);
412 upa_writeq(ICLR_IDLE
, handler_data
->iclr
);
418 /* Don't do anything. The desc->status check for IRQ_DISABLED in
419 * handler_irq() will skip the handler call and that will leave the
420 * interrupt in the sent state. The next ->enable() call will hit the
421 * ICLR register to reset the state machine.
423 * This scheme is necessary, instead of clearing the Valid bit in the
424 * IMAP register, to handle the case of IMAP registers being shared by
425 * multiple INOs (and thus ICLR registers). Since we use a different
426 * virtual IRQ for each shared IMAP instance, the generic code thinks
427 * there is only one user so it prematurely calls ->disable() on
430 * We have to provide an explicit ->disable() method instead of using
431 * NULL to get the default. The reason is that if the generic code
432 * sees that, it also hooks up a default ->shutdown method which
433 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
435 static void sun4u_irq_disable(struct irq_data
*data
)
439 static void sun4u_irq_eoi(struct irq_data
*data
)
441 struct irq_handler_data
*handler_data
= data
->handler_data
;
443 if (likely(handler_data
))
444 upa_writeq(ICLR_IDLE
, handler_data
->iclr
);
447 static void sun4v_irq_enable(struct irq_data
*data
)
449 unsigned long cpuid
= irq_choose_cpu(data
->irq
, data
->affinity
);
450 unsigned int ino
= irq_data_to_sysino(data
);
453 err
= sun4v_intr_settarget(ino
, cpuid
);
455 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
456 "err(%d)\n", ino
, cpuid
, err
);
457 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
459 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
460 "err(%d)\n", ino
, err
);
461 err
= sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
463 printk(KERN_ERR
"sun4v_intr_setenabled(%x): err(%d)\n",
467 static int sun4v_set_affinity(struct irq_data
*data
,
468 const struct cpumask
*mask
, bool force
)
470 unsigned long cpuid
= irq_choose_cpu(data
->irq
, mask
);
471 unsigned int ino
= irq_data_to_sysino(data
);
474 err
= sun4v_intr_settarget(ino
, cpuid
);
476 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
477 "err(%d)\n", ino
, cpuid
, err
);
482 static void sun4v_irq_disable(struct irq_data
*data
)
484 unsigned int ino
= irq_data_to_sysino(data
);
487 err
= sun4v_intr_setenabled(ino
, HV_INTR_DISABLED
);
489 printk(KERN_ERR
"sun4v_intr_setenabled(%x): "
490 "err(%d)\n", ino
, err
);
493 static void sun4v_irq_eoi(struct irq_data
*data
)
495 unsigned int ino
= irq_data_to_sysino(data
);
498 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
500 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
501 "err(%d)\n", ino
, err
);
504 static void sun4v_virq_enable(struct irq_data
*data
)
506 unsigned long dev_handle
= irq_data_to_handle(data
);
507 unsigned long dev_ino
= irq_data_to_ino(data
);
511 cpuid
= irq_choose_cpu(data
->irq
, data
->affinity
);
513 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
515 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
517 dev_handle
, dev_ino
, cpuid
, err
);
518 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
521 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
522 "HV_INTR_STATE_IDLE): err(%d)\n",
523 dev_handle
, dev_ino
, err
);
524 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
527 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
528 "HV_INTR_ENABLED): err(%d)\n",
529 dev_handle
, dev_ino
, err
);
532 static int sun4v_virt_set_affinity(struct irq_data
*data
,
533 const struct cpumask
*mask
, bool force
)
535 unsigned long dev_handle
= irq_data_to_handle(data
);
536 unsigned long dev_ino
= irq_data_to_ino(data
);
540 cpuid
= irq_choose_cpu(data
->irq
, mask
);
542 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
544 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
546 dev_handle
, dev_ino
, cpuid
, err
);
551 static void sun4v_virq_disable(struct irq_data
*data
)
553 unsigned long dev_handle
= irq_data_to_handle(data
);
554 unsigned long dev_ino
= irq_data_to_ino(data
);
558 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
561 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
562 "HV_INTR_DISABLED): err(%d)\n",
563 dev_handle
, dev_ino
, err
);
566 static void sun4v_virq_eoi(struct irq_data
*data
)
568 unsigned long dev_handle
= irq_data_to_handle(data
);
569 unsigned long dev_ino
= irq_data_to_ino(data
);
572 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
575 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
576 "HV_INTR_STATE_IDLE): err(%d)\n",
577 dev_handle
, dev_ino
, err
);
580 static struct irq_chip sun4u_irq
= {
582 .irq_enable
= sun4u_irq_enable
,
583 .irq_disable
= sun4u_irq_disable
,
584 .irq_eoi
= sun4u_irq_eoi
,
585 .irq_set_affinity
= sun4u_set_affinity
,
586 .flags
= IRQCHIP_EOI_IF_HANDLED
,
589 static struct irq_chip sun4v_irq
= {
591 .irq_enable
= sun4v_irq_enable
,
592 .irq_disable
= sun4v_irq_disable
,
593 .irq_eoi
= sun4v_irq_eoi
,
594 .irq_set_affinity
= sun4v_set_affinity
,
595 .flags
= IRQCHIP_EOI_IF_HANDLED
,
598 static struct irq_chip sun4v_virq
= {
600 .irq_enable
= sun4v_virq_enable
,
601 .irq_disable
= sun4v_virq_disable
,
602 .irq_eoi
= sun4v_virq_eoi
,
603 .irq_set_affinity
= sun4v_virt_set_affinity
,
604 .flags
= IRQCHIP_EOI_IF_HANDLED
,
607 unsigned int build_irq(int inofixup
, unsigned long iclr
, unsigned long imap
)
609 struct irq_handler_data
*handler_data
;
610 struct ino_bucket
*bucket
;
614 BUG_ON(tlb_type
== hypervisor
);
616 ino
= (upa_readq(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
617 bucket
= &ivector_table
[ino
];
618 irq
= bucket_get_irq(__pa(bucket
));
620 irq
= irq_alloc(0, ino
);
621 bucket_set_irq(__pa(bucket
), irq
);
622 irq_set_chip_and_handler_name(irq
, &sun4u_irq
,
623 handle_fasteoi_irq
, "IVEC");
626 handler_data
= irq_get_handler_data(irq
);
627 if (unlikely(handler_data
))
630 handler_data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
631 if (unlikely(!handler_data
)) {
632 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
635 irq_set_handler_data(irq
, handler_data
);
637 handler_data
->imap
= imap
;
638 handler_data
->iclr
= iclr
;
644 static unsigned int sun4v_build_common(u32 devhandle
, unsigned int devino
,
645 void (*handler_data_init
)(struct irq_handler_data
*data
,
646 u32 devhandle
, unsigned int devino
),
647 struct irq_chip
*chip
)
649 struct irq_handler_data
*data
;
652 irq
= irq_alloc(devhandle
, devino
);
656 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
657 if (unlikely(!data
)) {
658 pr_err("IRQ handler data allocation failed.\n");
664 irq_set_handler_data(irq
, data
);
665 handler_data_init(data
, devhandle
, devino
);
666 irq_set_chip_and_handler_name(irq
, chip
, handle_fasteoi_irq
, "IVEC");
673 static unsigned long cookie_assign(unsigned int irq
, u32 devhandle
,
676 struct irq_handler_data
*ihd
= irq_get_handler_data(irq
);
677 unsigned long hv_error
, cookie
;
679 /* handler_irq needs to find the irq. cookie is seen signed in
680 * sun4v_dev_mondo and treated as a non ivector_table delivery.
682 ihd
->bucket
.__irq
= irq
;
683 cookie
= ~__pa(&ihd
->bucket
);
685 hv_error
= sun4v_vintr_set_cookie(devhandle
, devino
, cookie
);
687 pr_err("HV vintr set cookie failed = %ld\n", hv_error
);
692 static void cookie_handler_data(struct irq_handler_data
*data
,
693 u32 devhandle
, unsigned int devino
)
695 data
->dev_handle
= devhandle
;
696 data
->dev_ino
= devino
;
699 static unsigned int cookie_build_irq(u32 devhandle
, unsigned int devino
,
700 struct irq_chip
*chip
)
702 unsigned long hv_error
;
705 irq
= sun4v_build_common(devhandle
, devino
, cookie_handler_data
, chip
);
707 hv_error
= cookie_assign(irq
, devhandle
, devino
);
716 static unsigned int sun4v_build_cookie(u32 devhandle
, unsigned int devino
)
720 irq
= cookie_exists(devhandle
, devino
);
724 irq
= cookie_build_irq(devhandle
, devino
, &sun4v_virq
);
730 static void sysino_set_bucket(unsigned int irq
)
732 struct irq_handler_data
*ihd
= irq_get_handler_data(irq
);
733 struct ino_bucket
*bucket
;
734 unsigned long sysino
;
736 sysino
= sun4v_devino_to_sysino(ihd
->dev_handle
, ihd
->dev_ino
);
737 BUG_ON(sysino
>= nr_ivec
);
738 bucket
= &ivector_table
[sysino
];
739 bucket_set_irq(__pa(bucket
), irq
);
742 static void sysino_handler_data(struct irq_handler_data
*data
,
743 u32 devhandle
, unsigned int devino
)
745 unsigned long sysino
;
747 sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
748 data
->sysino
= sysino
;
751 static unsigned int sysino_build_irq(u32 devhandle
, unsigned int devino
,
752 struct irq_chip
*chip
)
756 irq
= sun4v_build_common(devhandle
, devino
, sysino_handler_data
, chip
);
760 sysino_set_bucket(irq
);
765 static int sun4v_build_sysino(u32 devhandle
, unsigned int devino
)
769 irq
= sysino_exists(devhandle
, devino
);
773 irq
= sysino_build_irq(devhandle
, devino
, &sun4v_irq
);
778 unsigned int sun4v_build_irq(u32 devhandle
, unsigned int devino
)
782 if (sun4v_cookie_only_virqs())
783 irq
= sun4v_build_cookie(devhandle
, devino
);
785 irq
= sun4v_build_sysino(devhandle
, devino
);
790 unsigned int sun4v_build_virq(u32 devhandle
, unsigned int devino
)
794 irq
= cookie_build_irq(devhandle
, devino
, &sun4v_virq
);
798 /* This is borrowed from the original function.
800 irq_set_status_flags(irq
, IRQ_NOAUTOEN
);
806 void *hardirq_stack
[NR_CPUS
];
807 void *softirq_stack
[NR_CPUS
];
809 void __irq_entry
handler_irq(int pil
, struct pt_regs
*regs
)
811 unsigned long pstate
, bucket_pa
;
812 struct pt_regs
*old_regs
;
815 clear_softint(1 << pil
);
817 old_regs
= set_irq_regs(regs
);
820 /* Grab an atomic snapshot of the pending IVECs. */
821 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
822 "wrpr %0, %3, %%pstate\n\t"
825 "wrpr %0, 0x0, %%pstate\n\t"
826 : "=&r" (pstate
), "=&r" (bucket_pa
)
827 : "r" (irq_work_pa(smp_processor_id())),
831 orig_sp
= set_hardirq_stack();
834 unsigned long next_pa
;
837 next_pa
= bucket_get_chain_pa(bucket_pa
);
838 irq
= bucket_get_irq(bucket_pa
);
839 bucket_clear_chain_pa(bucket_pa
);
841 generic_handle_irq(irq
);
846 restore_hardirq_stack(orig_sp
);
849 set_irq_regs(old_regs
);
852 void do_softirq_own_stack(void)
854 void *orig_sp
, *sp
= softirq_stack
[smp_processor_id()];
856 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
858 __asm__
__volatile__("mov %%sp, %0\n\t"
863 __asm__
__volatile__("mov %0, %%sp"
867 #ifdef CONFIG_HOTPLUG_CPU
868 void fixup_irqs(void)
872 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
873 struct irq_desc
*desc
= irq_to_desc(irq
);
874 struct irq_data
*data
;
879 data
= irq_desc_get_irq_data(desc
);
880 raw_spin_lock_irqsave(&desc
->lock
, flags
);
881 if (desc
->action
&& !irqd_is_per_cpu(data
)) {
882 if (data
->chip
->irq_set_affinity
)
883 data
->chip
->irq_set_affinity(data
,
887 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
890 tick_ops
->disable_irq();
901 static struct sun5_timer
*prom_timers
;
902 static u64 prom_limit0
, prom_limit1
;
904 static void map_prom_timers(void)
906 struct device_node
*dp
;
907 const unsigned int *addr
;
909 /* PROM timer node hangs out in the top level of device siblings... */
910 dp
= of_find_node_by_path("/");
913 if (!strcmp(dp
->name
, "counter-timer"))
918 /* Assume if node is not present, PROM uses different tick mechanism
919 * which we should not care about.
922 prom_timers
= (struct sun5_timer
*) 0;
926 /* If PROM is really using this, it must be mapped by him. */
927 addr
= of_get_property(dp
, "address", NULL
);
929 prom_printf("PROM does not have timer mapped, trying to continue.\n");
930 prom_timers
= (struct sun5_timer
*) 0;
933 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
936 static void kill_prom_timer(void)
941 /* Save them away for later. */
942 prom_limit0
= prom_timers
->limit0
;
943 prom_limit1
= prom_timers
->limit1
;
945 /* Just as in sun4c PROM uses timer which ticks at IRQ 14.
946 * We turn both off here just to be paranoid.
948 prom_timers
->limit0
= 0;
949 prom_timers
->limit1
= 0;
951 /* Wheee, eat the interrupt packet too... */
952 __asm__
__volatile__(
954 " ldxa [%%g0] %0, %%g1\n"
955 " ldxa [%%g2] %1, %%g1\n"
956 " stxa %%g0, [%%g0] %0\n"
959 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
963 void notrace
init_irqwork_curcpu(void)
965 int cpu
= hard_smp_processor_id();
967 trap_block
[cpu
].irq_worklist_pa
= 0UL;
970 /* Please be very careful with register_one_mondo() and
971 * sun4v_register_mondo_queues().
973 * On SMP this gets invoked from the CPU trampoline before
974 * the cpu has fully taken over the trap table from OBP,
975 * and it's kernel stack + %g6 thread register state is
976 * not fully cooked yet.
978 * Therefore you cannot make any OBP calls, not even prom_printf,
979 * from these two routines.
981 static void notrace
register_one_mondo(unsigned long paddr
, unsigned long type
,
984 unsigned long num_entries
= (qmask
+ 1) / 64;
985 unsigned long status
;
987 status
= sun4v_cpu_qconf(type
, paddr
, num_entries
);
988 if (status
!= HV_EOK
) {
989 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
990 "err %lu\n", type
, paddr
, num_entries
, status
);
995 void notrace
sun4v_register_mondo_queues(int this_cpu
)
997 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
999 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
,
1000 tb
->cpu_mondo_qmask
);
1001 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
,
1002 tb
->dev_mondo_qmask
);
1003 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
,
1005 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
,
1006 tb
->nonresum_qmask
);
1009 /* Each queue region must be a power of 2 multiple of 64 bytes in
1010 * size. The base real address must be aligned to the size of the
1011 * region. Thus, an 8KB queue must be 8KB aligned, for example.
1013 static void __init
alloc_one_queue(unsigned long *pa_ptr
, unsigned long qmask
)
1015 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
1016 unsigned long order
= get_order(size
);
1019 p
= __get_free_pages(GFP_KERNEL
, order
);
1021 prom_printf("SUN4V: Error, cannot allocate queue.\n");
1028 static void __init
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
)
1033 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
1035 page
= get_zeroed_page(GFP_KERNEL
);
1037 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
1041 tb
->cpu_mondo_block_pa
= __pa(page
);
1042 tb
->cpu_list_pa
= __pa(page
+ 64);
1046 /* Allocate mondo and error queues for all possible cpus. */
1047 static void __init
sun4v_init_mondo_queues(void)
1051 for_each_possible_cpu(cpu
) {
1052 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1054 alloc_one_queue(&tb
->cpu_mondo_pa
, tb
->cpu_mondo_qmask
);
1055 alloc_one_queue(&tb
->dev_mondo_pa
, tb
->dev_mondo_qmask
);
1056 alloc_one_queue(&tb
->resum_mondo_pa
, tb
->resum_qmask
);
1057 alloc_one_queue(&tb
->resum_kernel_buf_pa
, tb
->resum_qmask
);
1058 alloc_one_queue(&tb
->nonresum_mondo_pa
, tb
->nonresum_qmask
);
1059 alloc_one_queue(&tb
->nonresum_kernel_buf_pa
,
1060 tb
->nonresum_qmask
);
1064 static void __init
init_send_mondo_info(void)
1068 for_each_possible_cpu(cpu
) {
1069 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1071 init_cpu_send_mondo_info(tb
);
1075 static struct irqaction timer_irq_action
= {
1079 static void __init
irq_ivector_init(void)
1081 unsigned long size
, order
;
1084 /* If we are doing cookie only VIRQs then we do not need the ivector
1085 * table to process interrupts.
1087 if (sun4v_cookie_only_virqs())
1090 ivecs
= size_nr_ivec();
1091 size
= sizeof(struct ino_bucket
) * ivecs
;
1092 order
= get_order(size
);
1093 ivector_table
= (struct ino_bucket
*)
1094 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
1095 if (!ivector_table
) {
1096 prom_printf("Fatal error, cannot allocate ivector_table\n");
1099 __flush_dcache_range((unsigned long) ivector_table
,
1100 ((unsigned long) ivector_table
) + size
);
1102 ivector_table_pa
= __pa(ivector_table
);
1105 /* Only invoked on boot processor.*/
1106 void __init
init_IRQ(void)
1113 if (tlb_type
== hypervisor
)
1114 sun4v_init_mondo_queues();
1116 init_send_mondo_info();
1118 if (tlb_type
== hypervisor
) {
1119 /* Load up the boot cpu's entries. */
1120 sun4v_register_mondo_queues(hard_smp_processor_id());
1123 /* We need to clear any IRQ's pending in the soft interrupt
1124 * registers, a spurious one could be left around from the
1125 * PROM timer which we just disabled.
1127 clear_softint(get_softint());
1129 /* Now that ivector table is initialized, it is safe
1130 * to receive IRQ vector traps. We will normally take
1131 * one or two right now, in case some device PROM used
1132 * to boot us wants to speak to us. We just ignore them.
1134 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1135 "or %%g1, %0, %%g1\n\t"
1136 "wrpr %%g1, 0x0, %%pstate"
1141 irq_to_desc(0)->action
= &timer_irq_action
;