2 * arch/ppc64/kernel/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
25 #include <asm/pgtable.h>
29 #include <asm/hvcall.h>
30 #include <asm/machdep.h>
34 static unsigned int xics_startup(unsigned int irq
);
35 static void xics_enable_irq(unsigned int irq
);
36 static void xics_disable_irq(unsigned int irq
);
37 static void xics_mask_and_ack_irq(unsigned int irq
);
38 static void xics_end_irq(unsigned int irq
);
39 static void xics_set_affinity(unsigned int irq_nr
, cpumask_t cpumask
);
41 struct hw_interrupt_type xics_pic
= {
43 .startup
= xics_startup
,
44 .enable
= xics_enable_irq
,
45 .disable
= xics_disable_irq
,
46 .ack
= xics_mask_and_ack_irq
,
48 .set_affinity
= xics_set_affinity
51 struct hw_interrupt_type xics_8259_pic
= {
52 .typename
= " XICS/8259",
53 .ack
= xics_mask_and_ack_irq
,
56 /* This is used to map real irq numbers to virtual */
57 static struct radix_tree_root irq_map
= RADIX_TREE_INIT(GFP_ATOMIC
);
60 #define XICS_IRQ_SPURIOUS 0
62 /* Want a priority other than 0. Various HW issues require this. */
63 #define DEFAULT_PRIORITY 5
66 * Mark IPIs as higher priority so we can take them inside interrupts that
67 * arent marked SA_INTERRUPT
69 #define IPI_PRIORITY 4
87 static struct xics_ipl __iomem
*xics_per_cpu
[NR_CPUS
];
89 static int xics_irq_8259_cascade
= 0;
90 static int xics_irq_8259_cascade_real
= 0;
91 static unsigned int default_server
= 0xFF;
92 /* also referenced in smp.c... */
93 unsigned int default_distrib_server
= 0;
94 unsigned int interrupt_server_size
= 8;
97 * XICS only has a single IPI, so encode the messages per CPU
99 struct xics_ipi_struct xics_ipi_message
[NR_CPUS
] __cacheline_aligned
;
101 /* RTAS service tokens */
108 int (*xirr_info_get
)(int cpu
);
109 void (*xirr_info_set
)(int cpu
, int val
);
110 void (*cppr_info
)(int cpu
, u8 val
);
111 void (*qirr_info
)(int cpu
, u8 val
);
117 static int pSeries_xirr_info_get(int n_cpu
)
119 return in_be32(&xics_per_cpu
[n_cpu
]->xirr
.word
);
122 static void pSeries_xirr_info_set(int n_cpu
, int value
)
124 out_be32(&xics_per_cpu
[n_cpu
]->xirr
.word
, value
);
127 static void pSeries_cppr_info(int n_cpu
, u8 value
)
129 out_8(&xics_per_cpu
[n_cpu
]->xirr
.bytes
[0], value
);
132 static void pSeries_qirr_info(int n_cpu
, u8 value
)
134 out_8(&xics_per_cpu
[n_cpu
]->qirr
.bytes
[0], value
);
137 static xics_ops pSeries_ops
= {
138 pSeries_xirr_info_get
,
139 pSeries_xirr_info_set
,
144 static xics_ops
*ops
= &pSeries_ops
;
149 static inline long plpar_eoi(unsigned long xirr
)
151 return plpar_hcall_norets(H_EOI
, xirr
);
154 static inline long plpar_cppr(unsigned long cppr
)
156 return plpar_hcall_norets(H_CPPR
, cppr
);
159 static inline long plpar_ipi(unsigned long servernum
, unsigned long mfrr
)
161 return plpar_hcall_norets(H_IPI
, servernum
, mfrr
);
164 static inline long plpar_xirr(unsigned long *xirr_ret
)
167 return plpar_hcall(H_XIRR
, 0, 0, 0, 0, xirr_ret
, &dummy
, &dummy
);
170 static int pSeriesLP_xirr_info_get(int n_cpu
)
172 unsigned long lpar_rc
;
173 unsigned long return_value
;
175 lpar_rc
= plpar_xirr(&return_value
);
176 if (lpar_rc
!= H_Success
)
177 panic(" bad return code xirr - rc = %lx \n", lpar_rc
);
178 return (int)return_value
;
181 static void pSeriesLP_xirr_info_set(int n_cpu
, int value
)
183 unsigned long lpar_rc
;
184 unsigned long val64
= value
& 0xffffffff;
186 lpar_rc
= plpar_eoi(val64
);
187 if (lpar_rc
!= H_Success
)
188 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc
,
192 void pSeriesLP_cppr_info(int n_cpu
, u8 value
)
194 unsigned long lpar_rc
;
196 lpar_rc
= plpar_cppr(value
);
197 if (lpar_rc
!= H_Success
)
198 panic("bad return code cppr - rc = %lx\n", lpar_rc
);
201 static void pSeriesLP_qirr_info(int n_cpu
, u8 value
)
203 unsigned long lpar_rc
;
205 lpar_rc
= plpar_ipi(get_hard_smp_processor_id(n_cpu
), value
);
206 if (lpar_rc
!= H_Success
)
207 panic("bad return code qirr - rc = %lx\n", lpar_rc
);
210 xics_ops pSeriesLP_ops
= {
211 pSeriesLP_xirr_info_get
,
212 pSeriesLP_xirr_info_set
,
217 static unsigned int xics_startup(unsigned int virq
)
221 irq
= irq_offset_down(virq
);
222 if (radix_tree_insert(&irq_map
, virt_irq_to_real(irq
),
223 &virt_irq_to_real_map
[irq
]) == -ENOMEM
)
224 printk(KERN_CRIT
"Out of memory creating real -> virtual"
225 " IRQ mapping for irq %u (real 0x%x)\n",
226 virq
, virt_irq_to_real(irq
));
227 xics_enable_irq(virq
);
228 return 0; /* return value is ignored */
231 static unsigned int real_irq_to_virt(unsigned int real_irq
)
235 ptr
= radix_tree_lookup(&irq_map
, real_irq
);
238 return ptr
- virt_irq_to_real_map
;
242 static int get_irq_server(unsigned int irq
)
245 /* For the moment only implement delivery to all cpus or one cpu */
246 cpumask_t cpumask
= irq_affinity
[irq
];
247 cpumask_t tmp
= CPU_MASK_NONE
;
249 if (!distribute_irqs
)
250 return default_server
;
252 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
253 server
= default_distrib_server
;
255 cpus_and(tmp
, cpu_online_map
, cpumask
);
258 server
= default_distrib_server
;
260 server
= get_hard_smp_processor_id(first_cpu(tmp
));
267 static int get_irq_server(unsigned int irq
)
269 return default_server
;
273 static void xics_enable_irq(unsigned int virq
)
279 irq
= virt_irq_to_real(irq_offset_down(virq
));
283 server
= get_irq_server(virq
);
284 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
,
286 if (call_status
!= 0) {
287 printk(KERN_ERR
"xics_enable_irq: irq=%d: ibm_set_xive "
288 "returned %x\n", irq
, call_status
);
292 /* Now unmask the interrupt (often a no-op) */
293 call_status
= rtas_call(ibm_int_on
, 1, 1, NULL
, irq
);
294 if (call_status
!= 0) {
295 printk(KERN_ERR
"xics_enable_irq: irq=%d: ibm_int_on "
296 "returned %x\n", irq
, call_status
);
301 static void xics_disable_real_irq(unsigned int irq
)
309 call_status
= rtas_call(ibm_int_off
, 1, 1, NULL
, irq
);
310 if (call_status
!= 0) {
311 printk(KERN_ERR
"xics_disable_real_irq: irq=%d: "
312 "ibm_int_off returned %x\n", irq
, call_status
);
316 server
= get_irq_server(irq
);
317 /* Have to set XIVE to 0xff to be able to remove a slot */
318 call_status
= rtas_call(ibm_set_xive
, 3, 1, NULL
, irq
, server
, 0xff);
319 if (call_status
!= 0) {
320 printk(KERN_ERR
"xics_disable_irq: irq=%d: ibm_set_xive(0xff)"
321 " returned %x\n", irq
, call_status
);
326 static void xics_disable_irq(unsigned int virq
)
330 irq
= virt_irq_to_real(irq_offset_down(virq
));
331 xics_disable_real_irq(irq
);
334 static void xics_end_irq(unsigned int irq
)
336 int cpu
= smp_processor_id();
339 ops
->xirr_info_set(cpu
, ((0xff << 24) |
340 (virt_irq_to_real(irq_offset_down(irq
)))));
344 static void xics_mask_and_ack_irq(unsigned int irq
)
346 int cpu
= smp_processor_id();
348 if (irq
< irq_offset_value()) {
351 ops
->xirr_info_set(cpu
, ((0xff<<24) |
352 xics_irq_8259_cascade_real
));
357 int xics_get_irq(struct pt_regs
*regs
)
359 unsigned int cpu
= smp_processor_id();
363 vec
= ops
->xirr_info_get(cpu
);
364 /* (vec >> 24) == old priority */
367 /* for sanity, this had better be < NR_IRQS - 16 */
368 if (vec
== xics_irq_8259_cascade_real
) {
369 irq
= i8259_irq(cpu
);
371 /* Spurious cascaded interrupt. Still must ack xics */
372 xics_end_irq(irq_offset_up(xics_irq_8259_cascade
));
376 } else if (vec
== XICS_IRQ_SPURIOUS
) {
379 irq
= real_irq_to_virt(vec
);
381 irq
= real_irq_to_virt_slowpath(vec
);
383 printk(KERN_ERR
"Interrupt %d (real) is invalid,"
384 " disabling it.\n", vec
);
385 xics_disable_real_irq(vec
);
387 irq
= irq_offset_up(irq
);
394 irqreturn_t
xics_ipi_action(int irq
, void *dev_id
, struct pt_regs
*regs
)
396 int cpu
= smp_processor_id();
398 ops
->qirr_info(cpu
, 0xff);
400 WARN_ON(cpu_is_offline(cpu
));
402 while (xics_ipi_message
[cpu
].value
) {
403 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION
,
404 &xics_ipi_message
[cpu
].value
)) {
406 smp_message_recv(PPC_MSG_CALL_FUNCTION
, regs
);
408 if (test_and_clear_bit(PPC_MSG_RESCHEDULE
,
409 &xics_ipi_message
[cpu
].value
)) {
411 smp_message_recv(PPC_MSG_RESCHEDULE
, regs
);
414 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK
,
415 &xics_ipi_message
[cpu
].value
)) {
417 smp_message_recv(PPC_MSG_MIGRATE_TASK
, regs
);
420 #ifdef CONFIG_DEBUGGER
421 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK
,
422 &xics_ipi_message
[cpu
].value
)) {
424 smp_message_recv(PPC_MSG_DEBUGGER_BREAK
, regs
);
431 void xics_cause_IPI(int cpu
)
433 ops
->qirr_info(cpu
, IPI_PRIORITY
);
435 #endif /* CONFIG_SMP */
437 void xics_setup_cpu(void)
439 int cpu
= smp_processor_id();
441 ops
->cppr_info(cpu
, 0xff);
445 * Put the calling processor into the GIQ. This is really only
446 * necessary from a secondary thread as the OF start-cpu interface
447 * performs this function for us on primary threads.
449 * XXX: undo of teardown on kexec needs this too, as may hotplug
451 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE
,
452 (1UL << interrupt_server_size
) - 1 - default_distrib_server
, 1);
455 void xics_init_IRQ(void)
458 unsigned long intr_size
= 0;
459 struct device_node
*np
;
460 uint
*ireg
, ilen
, indx
= 0;
461 unsigned long intr_base
= 0;
462 struct xics_interrupt_node
{
467 ppc64_boot_msg(0x20, "XICS Init");
469 ibm_get_xive
= rtas_token("ibm,get-xive");
470 ibm_set_xive
= rtas_token("ibm,set-xive");
471 ibm_int_on
= rtas_token("ibm,int-on");
472 ibm_int_off
= rtas_token("ibm,int-off");
474 np
= of_find_node_by_type(NULL
, "PowerPC-External-Interrupt-Presentation");
476 panic("xics_init_IRQ: can't find interrupt presentation");
479 ireg
= (uint
*)get_property(np
, "ibm,interrupt-server-ranges", NULL
);
482 * set node starting index for this node
487 ireg
= (uint
*)get_property(np
, "reg", &ilen
);
489 panic("xics_init_IRQ: can't find interrupt reg property");
492 intnodes
[indx
].addr
= (unsigned long)*ireg
++ << 32;
493 ilen
-= sizeof(uint
);
494 intnodes
[indx
].addr
|= *ireg
++;
495 ilen
-= sizeof(uint
);
496 intnodes
[indx
].size
= (unsigned long)*ireg
++ << 32;
497 ilen
-= sizeof(uint
);
498 intnodes
[indx
].size
|= *ireg
++;
499 ilen
-= sizeof(uint
);
501 if (indx
>= NR_CPUS
) break;
504 np
= of_find_node_by_type(np
, "PowerPC-External-Interrupt-Presentation");
505 if ((indx
< NR_CPUS
) && np
) goto nextnode
;
507 /* Find the server numbers for the boot cpu. */
508 for (np
= of_find_node_by_type(NULL
, "cpu");
510 np
= of_find_node_by_type(np
, "cpu")) {
511 ireg
= (uint
*)get_property(np
, "reg", &ilen
);
512 if (ireg
&& ireg
[0] == boot_cpuid_phys
) {
513 ireg
= (uint
*)get_property(np
, "ibm,ppc-interrupt-gserver#s",
515 i
= ilen
/ sizeof(int);
517 default_server
= ireg
[0];
518 default_distrib_server
= ireg
[i
-1]; /* take last element */
520 ireg
= (uint
*)get_property(np
,
521 "ibm,interrupt-server#-size", NULL
);
523 interrupt_server_size
= *ireg
;
529 intr_base
= intnodes
[0].addr
;
530 intr_size
= intnodes
[0].size
;
532 np
= of_find_node_by_type(NULL
, "interrupt-controller");
534 printk(KERN_WARNING
"xics: no ISA interrupt controller\n");
535 xics_irq_8259_cascade_real
= -1;
536 xics_irq_8259_cascade
= -1;
538 ireg
= (uint
*) get_property(np
, "interrupts", NULL
);
540 panic("xics_init_IRQ: can't find ISA interrupts property");
542 xics_irq_8259_cascade_real
= *ireg
;
543 xics_irq_8259_cascade
544 = virt_irq_create_mapping(xics_irq_8259_cascade_real
);
548 if (systemcfg
->platform
== PLATFORM_PSERIES
) {
553 /* FIXME: Do this dynamically! --RR */
557 hard_id
= get_hard_smp_processor_id(i
);
558 xics_per_cpu
[i
] = ioremap(intnodes
[hard_id
].addr
,
559 intnodes
[hard_id
].size
);
562 xics_per_cpu
[0] = ioremap(intr_base
, intr_size
);
563 #endif /* CONFIG_SMP */
564 } else if (systemcfg
->platform
== PLATFORM_PSERIES_LPAR
) {
565 ops
= &pSeriesLP_ops
;
568 xics_8259_pic
.enable
= i8259_pic
.enable
;
569 xics_8259_pic
.disable
= i8259_pic
.disable
;
570 for (i
= 0; i
< 16; ++i
)
571 get_irq_desc(i
)->handler
= &xics_8259_pic
;
572 for (; i
< NR_IRQS
; ++i
)
573 get_irq_desc(i
)->handler
= &xics_pic
;
577 ppc64_boot_msg(0x21, "XICS Done");
581 * We cant do this in init_IRQ because we need the memory subsystem up for
584 static int __init
xics_setup_i8259(void)
586 if (ppc64_interrupt_controller
== IC_PPC_XIC
&&
587 xics_irq_8259_cascade
!= -1) {
588 if (request_irq(irq_offset_up(xics_irq_8259_cascade
),
589 no_action
, 0, "8259 cascade", NULL
))
590 printk(KERN_ERR
"xics_setup_i8259: couldn't get 8259 "
596 arch_initcall(xics_setup_i8259
);
599 void xics_request_IPIs(void)
601 virt_irq_to_real_map
[XICS_IPI
] = XICS_IPI
;
603 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
604 request_irq(irq_offset_up(XICS_IPI
), xics_ipi_action
, SA_INTERRUPT
,
606 get_irq_desc(irq_offset_up(XICS_IPI
))->status
|= IRQ_PER_CPU
;
610 static void xics_set_affinity(unsigned int virq
, cpumask_t cpumask
)
615 unsigned long newmask
;
616 cpumask_t tmp
= CPU_MASK_NONE
;
618 irq
= virt_irq_to_real(irq_offset_down(virq
));
619 if (irq
== XICS_IPI
|| irq
== NO_IRQ
)
622 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
625 printk(KERN_ERR
"xics_set_affinity: irq=%d ibm,get-xive "
626 "returns %d\n", irq
, status
);
630 /* For the moment only implement delivery to all cpus or one cpu */
631 if (cpus_equal(cpumask
, CPU_MASK_ALL
)) {
632 newmask
= default_distrib_server
;
634 cpus_and(tmp
, cpu_online_map
, cpumask
);
637 newmask
= get_hard_smp_processor_id(first_cpu(tmp
));
640 status
= rtas_call(ibm_set_xive
, 3, 1, NULL
,
641 irq
, newmask
, xics_status
[1]);
644 printk(KERN_ERR
"xics_set_affinity: irq=%d ibm,set-xive "
645 "returns %d\n", irq
, status
);
650 void xics_teardown_cpu(void)
652 int cpu
= smp_processor_id();
655 ops
->cppr_info(cpu
, 0x00);
659 * we need to EOI the IPI if we got here from kexec down IPI
661 * xics doesn't care if we duplicate an EOI as long as we
662 * don't EOI and raise priority.
664 * probably need to check all the other interrupts too
665 * should we be flagging idle loop instead?
666 * or creating some task to be scheduled?
668 ops
->xirr_info_set(cpu
, XICS_IPI
);
670 status
= rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE
,
671 (1UL << interrupt_server_size
) - 1 - default_distrib_server
, 0);
672 WARN_ON(status
!= 0);
675 #ifdef CONFIG_HOTPLUG_CPU
677 /* Interrupts are disabled. */
678 void xics_migrate_irqs_away(void)
681 unsigned int irq
, virq
, cpu
= smp_processor_id();
683 /* Reject any interrupt that was queued to us... */
684 ops
->cppr_info(cpu
, 0);
687 /* remove ourselves from the global interrupt queue */
688 status
= rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE
,
689 (1UL << interrupt_server_size
) - 1 - default_distrib_server
, 0);
692 /* Allow IPIs again... */
693 ops
->cppr_info(cpu
, DEFAULT_PRIORITY
);
701 /* We cant set affinity on ISA interrupts */
702 if (virq
< irq_offset_value())
705 desc
= get_irq_desc(virq
);
706 irq
= virt_irq_to_real(irq_offset_down(virq
));
708 /* We need to get IPIs still. */
709 if (irq
== XICS_IPI
|| irq
== NO_IRQ
)
712 /* We only need to migrate enabled IRQS */
713 if (desc
== NULL
|| desc
->handler
== NULL
714 || desc
->action
== NULL
715 || desc
->handler
->set_affinity
== NULL
)
718 spin_lock_irqsave(&desc
->lock
, flags
);
720 status
= rtas_call(ibm_get_xive
, 1, 3, xics_status
, irq
);
722 printk(KERN_ERR
"migrate_irqs_away: irq=%d "
723 "ibm,get-xive returns %d\n",
729 * We only support delivery to all cpus or to one cpu.
730 * The irq has to be migrated only in the single cpu
733 if (xics_status
[0] != get_hard_smp_processor_id(cpu
))
736 printk(KERN_WARNING
"IRQ %d affinity broken off cpu %u\n",
739 /* Reset affinity to all cpus */
740 desc
->handler
->set_affinity(virq
, CPU_MASK_ALL
);
741 irq_affinity
[virq
] = CPU_MASK_ALL
;
743 spin_unlock_irqrestore(&desc
->lock
, flags
);