2 * arch/ppc64/kernel/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
25 #include <asm/pgtable.h>
29 #include <asm/hvcall.h>
30 #include <asm/machdep.h>
34 static unsigned int xics_startup(unsigned int irq
);
35 static void xics_enable_irq(unsigned int irq
);
36 static void xics_disable_irq(unsigned int irq
);
37 static void xics_mask_and_ack_irq(unsigned int irq
);
38 static void xics_end_irq(unsigned int irq
);
39 static void xics_set_affinity(unsigned int irq_nr
, cpumask_t cpumask
);
41 static struct hw_interrupt_type xics_pic
= {
43 .startup
= xics_startup
,
44 .enable
= xics_enable_irq
,
45 .disable
= xics_disable_irq
,
46 .ack
= xics_mask_and_ack_irq
,
48 .set_affinity
= xics_set_affinity
51 static struct hw_interrupt_type xics_8259_pic
= {
52 .typename
= " XICS/8259",
53 .ack
= xics_mask_and_ack_irq
,
56 /* This is used to map real irq numbers to virtual */
57 static struct radix_tree_root irq_map
= RADIX_TREE_INIT(GFP_ATOMIC
);
60 #define XICS_IRQ_SPURIOUS 0
62 /* Want a priority other than 0. Various HW issues require this. */
63 #define DEFAULT_PRIORITY 5
66 * Mark IPIs as higher priority so we can take them inside interrupts that
67 * arent marked SA_INTERRUPT
69 #define IPI_PRIORITY 4
87 static struct xics_ipl __iomem
*xics_per_cpu
[NR_CPUS
];
89 static int xics_irq_8259_cascade
= 0;
90 static int xics_irq_8259_cascade_real
= 0;
91 static unsigned int default_server
= 0xFF;
92 static unsigned int default_distrib_server
= 0;
93 static unsigned int interrupt_server_size
= 8;
96 * XICS only has a single IPI, so encode the messages per CPU
98 struct xics_ipi_struct xics_ipi_message
[NR_CPUS
] __cacheline_aligned
;
100 /* RTAS service tokens */
101 static int ibm_get_xive
;
102 static int ibm_set_xive
;
103 static int ibm_int_on
;
104 static int ibm_int_off
;
107 int (*xirr_info_get
)(int cpu
);
108 void (*xirr_info_set
)(int cpu
, int val
);
109 void (*cppr_info
)(int cpu
, u8 val
);
110 void (*qirr_info
)(int cpu
, u8 val
);
116 static int pSeries_xirr_info_get(int n_cpu
)
118 return in_be32(&xics_per_cpu
[n_cpu
]->xirr
.word
);
121 static void pSeries_xirr_info_set(int n_cpu
, int value
)
123 out_be32(&xics_per_cpu
[n_cpu
]->xirr
.word
, value
);
126 static void pSeries_cppr_info(int n_cpu
, u8 value
)
128 out_8(&xics_per_cpu
[n_cpu
]->xirr
.bytes
[0], value
);
131 static void pSeries_qirr_info(int n_cpu
, u8 value
)
133 out_8(&xics_per_cpu
[n_cpu
]->qirr
.bytes
[0], value
);
136 static xics_ops pSeries_ops
= {
137 pSeries_xirr_info_get
,
138 pSeries_xirr_info_set
,
143 static xics_ops
*ops
= &pSeries_ops
;
148 static inline long plpar_eoi(unsigned long xirr
)
150 return plpar_hcall_norets(H_EOI
, xirr
);
153 static inline long plpar_cppr(unsigned long cppr
)
155 return plpar_hcall_norets(H_CPPR
, cppr
);
158 static inline long plpar_ipi(unsigned long servernum
, unsigned long mfrr
)
160 return plpar_hcall_norets(H_IPI
, servernum
, mfrr
);
163 static inline long plpar_xirr(unsigned long *xirr_ret
)
166 return plpar_hcall(H_XIRR
, 0, 0, 0, 0, xirr_ret
, &dummy
, &dummy
);
169 static int pSeriesLP_xirr_info_get(int n_cpu
)
171 unsigned long lpar_rc
;
172 unsigned long return_value
;
174 lpar_rc
= plpar_xirr(&return_value
);
175 if (lpar_rc
!= H_Success
)
176 panic(" bad return code xirr - rc = %lx \n", lpar_rc
);
177 return (int)return_value
;
180 static void pSeriesLP_xirr_info_set(int n_cpu
, int value
)
182 unsigned long lpar_rc
;
183 unsigned long val64
= value
& 0xffffffff;
185 lpar_rc
= plpar_eoi(val64
);
186 if (lpar_rc
!= H_Success
)
187 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc
,
191 void pSeriesLP_cppr_info(int n_cpu
, u8 value
)
193 unsigned long lpar_rc
;
195 lpar_rc
= plpar_cppr(value
);
196 if (lpar_rc
!= H_Success
)
197 panic("bad return code cppr - rc = %lx\n", lpar_rc
);
200 static void pSeriesLP_qirr_info(int n_cpu
, u8 value
)
202 unsigned long lpar_rc
;
204 lpar_rc
= plpar_ipi(get_hard_smp_processor_id(n_cpu
), value
);
205 if (lpar_rc
!= H_Success
)
206 panic("bad return code qirr - rc = %lx\n", lpar_rc
);
209 xics_ops pSeriesLP_ops
= {
210 pSeriesLP_xirr_info_get
,
211 pSeriesLP_xirr_info_set
,
216 static unsigned int xics_startup(unsigned int virq
)
220 irq
= irq_offset_down(virq
);
221 if (radix_tree_insert(&irq_map
, virt_irq_to_real(irq
),
222 &virt_irq_to_real_map
[irq
]) == -ENOMEM
)
223 printk(KERN_CRIT
"Out of memory creating real -> virtual"
224 " IRQ mapping for irq %u (real 0x%x)\n",
225 virq
, virt_irq_to_real(irq
));
226 xics_enable_irq(virq
);
227 return 0; /* return value is ignored */
230 static unsigned int real_irq_to_virt(unsigned int real_irq
)
234 ptr
= radix_tree_lookup(&irq_map
, real_irq
);
237 return ptr
- virt_irq_to_real_map
;
241 static int get_irq_server(unsigned int irq
)
244 /* For the moment only implement delivery to all cpus or one cpu */
245 cpumask_t cpumask
= irq_affinity
[irq
];
246 cpumask_t tmp
= CPU_MASK_NONE
;
248 if (!distribute_irqs
)
249 return default_server
;
251 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
252 server
= default_distrib_server
;
254 cpus_and(tmp
, cpu_online_map
, cpumask
);
257 server
= default_distrib_server
;
259 server
= get_hard_smp_processor_id(first_cpu(tmp
));
266 static int get_irq_server(unsigned int irq
)
268 return default_server
;
272 static void xics_enable_irq(unsigned int virq
)
278 irq
= virt_irq_to_real(irq_offset_down(virq
));
282 server
= get_irq_server(virq
);
283 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
,
285 if (call_status
!= 0) {
286 printk(KERN_ERR
"xics_enable_irq: irq=%u: ibm_set_xive "
287 "returned %d\n", irq
, call_status
);
288 printk("set_xive %x, server %x\n", ibm_set_xive
, server
);
292 /* Now unmask the interrupt (often a no-op) */
293 call_status
= rtas_call(ibm_int_on
, 1, 1, NULL
, irq
);
294 if (call_status
!= 0) {
295 printk(KERN_ERR
"xics_enable_irq: irq=%u: ibm_int_on "
296 "returned %d\n", irq
, call_status
);
301 static void xics_disable_real_irq(unsigned int irq
)
309 call_status
= rtas_call(ibm_int_off
, 1, 1, NULL
, irq
);
310 if (call_status
!= 0) {
311 printk(KERN_ERR
"xics_disable_real_irq: irq=%u: "
312 "ibm_int_off returned %d\n", irq
, call_status
);
316 server
= get_irq_server(irq
);
317 /* Have to set XIVE to 0xff to be able to remove a slot */
318 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
, 0xff);
319 if (call_status
!= 0) {
320 printk(KERN_ERR
"xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
321 " returned %d\n", irq
, call_status
);
326 static void xics_disable_irq(unsigned int virq
)
330 irq
= virt_irq_to_real(irq_offset_down(virq
));
331 xics_disable_real_irq(irq
);
334 static void xics_end_irq(unsigned int irq
)
336 int cpu
= smp_processor_id();
339 ops
->xirr_info_set(cpu
, ((0xff << 24) |
340 (virt_irq_to_real(irq_offset_down(irq
)))));
344 static void xics_mask_and_ack_irq(unsigned int irq
)
346 int cpu
= smp_processor_id();
348 if (irq
< irq_offset_value()) {
351 ops
->xirr_info_set(cpu
, ((0xff<<24) |
352 xics_irq_8259_cascade_real
));
357 int xics_get_irq(struct pt_regs
*regs
)
359 unsigned int cpu
= smp_processor_id();
363 vec
= ops
->xirr_info_get(cpu
);
364 /* (vec >> 24) == old priority */
367 /* for sanity, this had better be < NR_IRQS - 16 */
368 if (vec
== xics_irq_8259_cascade_real
) {
369 irq
= i8259_irq(cpu
);
371 /* Spurious cascaded interrupt. Still must ack xics */
372 xics_end_irq(irq_offset_up(xics_irq_8259_cascade
));
376 } else if (vec
== XICS_IRQ_SPURIOUS
) {
379 irq
= real_irq_to_virt(vec
);
381 irq
= real_irq_to_virt_slowpath(vec
);
383 printk(KERN_ERR
"Interrupt %u (real) is invalid,"
384 " disabling it.\n", vec
);
385 xics_disable_real_irq(vec
);
387 irq
= irq_offset_up(irq
);
394 irqreturn_t
xics_ipi_action(int irq
, void *dev_id
, struct pt_regs
*regs
)
396 int cpu
= smp_processor_id();
398 ops
->qirr_info(cpu
, 0xff);
400 WARN_ON(cpu_is_offline(cpu
));
402 while (xics_ipi_message
[cpu
].value
) {
403 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION
,
404 &xics_ipi_message
[cpu
].value
)) {
406 smp_message_recv(PPC_MSG_CALL_FUNCTION
, regs
);
408 if (test_and_clear_bit(PPC_MSG_RESCHEDULE
,
409 &xics_ipi_message
[cpu
].value
)) {
411 smp_message_recv(PPC_MSG_RESCHEDULE
, regs
);
414 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK
,
415 &xics_ipi_message
[cpu
].value
)) {
417 smp_message_recv(PPC_MSG_MIGRATE_TASK
, regs
);
420 #ifdef CONFIG_DEBUGGER
421 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK
,
422 &xics_ipi_message
[cpu
].value
)) {
424 smp_message_recv(PPC_MSG_DEBUGGER_BREAK
, regs
);
431 void xics_cause_IPI(int cpu
)
433 ops
->qirr_info(cpu
, IPI_PRIORITY
);
435 #endif /* CONFIG_SMP */
437 void xics_setup_cpu(void)
439 int cpu
= smp_processor_id();
441 ops
->cppr_info(cpu
, 0xff);
445 * Put the calling processor into the GIQ. This is really only
446 * necessary from a secondary thread as the OF start-cpu interface
447 * performs this function for us on primary threads.
449 * XXX: undo of teardown on kexec needs this too, as may hotplug
451 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE
,
452 (1UL << interrupt_server_size
) - 1 - default_distrib_server
, 1);
455 void xics_init_IRQ(void)
458 unsigned long intr_size
= 0;
459 struct device_node
*np
;
460 uint
*ireg
, ilen
, indx
= 0;
461 unsigned long intr_base
= 0;
462 struct xics_interrupt_node
{
467 ppc64_boot_msg(0x20, "XICS Init");
469 ibm_get_xive
= rtas_token("ibm,get-xive");
470 ibm_set_xive
= rtas_token("ibm,set-xive");
471 ibm_int_on
= rtas_token("ibm,int-on");
472 ibm_int_off
= rtas_token("ibm,int-off");
474 np
= of_find_node_by_type(NULL
, "PowerPC-External-Interrupt-Presentation");
476 panic("xics_init_IRQ: can't find interrupt presentation");
479 ireg
= (uint
*)get_property(np
, "ibm,interrupt-server-ranges", NULL
);
482 * set node starting index for this node
487 ireg
= (uint
*)get_property(np
, "reg", &ilen
);
489 panic("xics_init_IRQ: can't find interrupt reg property");
492 intnodes
[indx
].addr
= (unsigned long)*ireg
++ << 32;
493 ilen
-= sizeof(uint
);
494 intnodes
[indx
].addr
|= *ireg
++;
495 ilen
-= sizeof(uint
);
496 intnodes
[indx
].size
= (unsigned long)*ireg
++ << 32;
497 ilen
-= sizeof(uint
);
498 intnodes
[indx
].size
|= *ireg
++;
499 ilen
-= sizeof(uint
);
501 if (indx
>= NR_CPUS
) break;
504 np
= of_find_node_by_type(np
, "PowerPC-External-Interrupt-Presentation");
505 if ((indx
< NR_CPUS
) && np
) goto nextnode
;
507 /* Find the server numbers for the boot cpu. */
508 for (np
= of_find_node_by_type(NULL
, "cpu");
510 np
= of_find_node_by_type(np
, "cpu")) {
511 ireg
= (uint
*)get_property(np
, "reg", &ilen
);
512 if (ireg
&& ireg
[0] == boot_cpuid_phys
) {
513 ireg
= (uint
*)get_property(np
, "ibm,ppc-interrupt-gserver#s",
515 i
= ilen
/ sizeof(int);
517 default_server
= ireg
[0];
518 default_distrib_server
= ireg
[i
-1]; /* take last element */
520 ireg
= (uint
*)get_property(np
,
521 "ibm,interrupt-server#-size", NULL
);
523 interrupt_server_size
= *ireg
;
529 intr_base
= intnodes
[0].addr
;
530 intr_size
= intnodes
[0].size
;
532 np
= of_find_node_by_type(NULL
, "interrupt-controller");
534 printk(KERN_WARNING
"xics: no ISA interrupt controller\n");
535 xics_irq_8259_cascade_real
= -1;
536 xics_irq_8259_cascade
= -1;
538 ireg
= (uint
*) get_property(np
, "interrupts", NULL
);
540 panic("xics_init_IRQ: can't find ISA interrupts property");
542 xics_irq_8259_cascade_real
= *ireg
;
543 xics_irq_8259_cascade
544 = virt_irq_create_mapping(xics_irq_8259_cascade_real
);
548 if (systemcfg
->platform
== PLATFORM_PSERIES
) {
553 /* FIXME: Do this dynamically! --RR */
557 hard_id
= get_hard_smp_processor_id(i
);
558 xics_per_cpu
[i
] = ioremap(intnodes
[hard_id
].addr
,
559 intnodes
[hard_id
].size
);
562 xics_per_cpu
[0] = ioremap(intr_base
, intr_size
);
563 #endif /* CONFIG_SMP */
564 } else if (systemcfg
->platform
== PLATFORM_PSERIES_LPAR
) {
565 ops
= &pSeriesLP_ops
;
568 xics_8259_pic
.enable
= i8259_pic
.enable
;
569 xics_8259_pic
.disable
= i8259_pic
.disable
;
570 for (i
= 0; i
< 16; ++i
)
571 get_irq_desc(i
)->handler
= &xics_8259_pic
;
572 for (; i
< NR_IRQS
; ++i
)
573 get_irq_desc(i
)->handler
= &xics_pic
;
577 ppc64_boot_msg(0x21, "XICS Done");
581 * We cant do this in init_IRQ because we need the memory subsystem up for
584 static int __init
xics_setup_i8259(void)
586 if (ppc64_interrupt_controller
== IC_PPC_XIC
&&
587 xics_irq_8259_cascade
!= -1) {
588 if (request_irq(irq_offset_up(xics_irq_8259_cascade
),
589 no_action
, 0, "8259 cascade", NULL
))
590 printk(KERN_ERR
"xics_setup_i8259: couldn't get 8259 "
596 arch_initcall(xics_setup_i8259
);
599 void xics_request_IPIs(void)
601 virt_irq_to_real_map
[XICS_IPI
] = XICS_IPI
;
603 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
604 request_irq(irq_offset_up(XICS_IPI
), xics_ipi_action
, SA_INTERRUPT
,
606 get_irq_desc(irq_offset_up(XICS_IPI
))->status
|= IRQ_PER_CPU
;
610 static void xics_set_affinity(unsigned int virq
, cpumask_t cpumask
)
615 unsigned long newmask
;
616 cpumask_t tmp
= CPU_MASK_NONE
;
618 irq
= virt_irq_to_real(irq_offset_down(virq
));
619 if (irq
== XICS_IPI
|| irq
== NO_IRQ
)
622 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
625 printk(KERN_ERR
"xics_set_affinity: irq=%u ibm,get-xive "
626 "returns %d\n", irq
, status
);
630 /* For the moment only implement delivery to all cpus or one cpu */
631 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
632 newmask
= default_distrib_server
;
634 cpus_and(tmp
, cpu_online_map
, cpumask
);
637 newmask
= get_hard_smp_processor_id(first_cpu(tmp
));
640 status
= rtas_call(ibm_set_xive
, 3, 1, NULL
,
641 irq
, newmask
, xics_status
[1]);
644 printk(KERN_ERR
"xics_set_affinity: irq=%u ibm,set-xive "
645 "returns %d\n", irq
, status
);
650 void xics_teardown_cpu(int secondary
)
652 int cpu
= smp_processor_id();
654 ops
->cppr_info(cpu
, 0x00);
658 * Some machines need to have at least one cpu in the GIQ,
659 * so leave the master cpu in the group.
663 * we need to EOI the IPI if we got here from kexec down IPI
665 * probably need to check all the other interrupts too
666 * should we be flagging idle loop instead?
667 * or creating some task to be scheduled?
669 ops
->xirr_info_set(cpu
, XICS_IPI
);
670 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE
,
671 (1UL << interrupt_server_size
) - 1 -
672 default_distrib_server
, 0);
676 #ifdef CONFIG_HOTPLUG_CPU
678 /* Interrupts are disabled. */
679 void xics_migrate_irqs_away(void)
682 unsigned int irq
, virq
, cpu
= smp_processor_id();
684 /* Reject any interrupt that was queued to us... */
685 ops
->cppr_info(cpu
, 0);
688 /* remove ourselves from the global interrupt queue */
689 status
= rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE
,
690 (1UL << interrupt_server_size
) - 1 - default_distrib_server
, 0);
693 /* Allow IPIs again... */
694 ops
->cppr_info(cpu
, DEFAULT_PRIORITY
);
702 /* We cant set affinity on ISA interrupts */
703 if (virq
< irq_offset_value())
706 desc
= get_irq_desc(virq
);
707 irq
= virt_irq_to_real(irq_offset_down(virq
));
709 /* We need to get IPIs still. */
710 if (irq
== XICS_IPI
|| irq
== NO_IRQ
)
713 /* We only need to migrate enabled IRQS */
714 if (desc
== NULL
|| desc
->handler
== NULL
715 || desc
->action
== NULL
716 || desc
->handler
->set_affinity
== NULL
)
719 spin_lock_irqsave(&desc
->lock
, flags
);
721 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
723 printk(KERN_ERR
"migrate_irqs_away: irq=%u "
724 "ibm,get-xive returns %d\n",
730 * We only support delivery to all cpus or to one cpu.
731 * The irq has to be migrated only in the single cpu
734 if (xics_status
[0] != get_hard_smp_processor_id(cpu
))
737 printk(KERN_WARNING
"IRQ %u affinity broken off cpu %u\n",
740 /* Reset affinity to all cpus */
741 desc
->handler
->set_affinity(virq
, CPU_MASK_ALL
);
742 irq_affinity
[virq
] = CPU_MASK_ALL
;
744 spin_unlock_irqrestore(&desc
->lock
, flags
);