2 * Cell Internal Interrupt Controller
4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
9 * Author: Arnd Bergmann <arndb@de.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
27 * vs node numbers in the setup code
28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
29 * a non-active node to the active node)
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #include <linux/export.h>
35 #include <linux/percpu.h>
36 #include <linux/types.h>
37 #include <linux/ioport.h>
38 #include <linux/kernel_stat.h>
41 #include <asm/pgtable.h>
43 #include <asm/ptrace.h>
44 #include <asm/machdep.h>
45 #include <asm/cell-regs.h>
47 #include "interrupt.h"
50 struct cbe_iic_thread_regs __iomem
*regs
;
54 struct device_node
*node
;
57 static DEFINE_PER_CPU(struct iic
, cpu_iic
);
58 #define IIC_NODE_COUNT 2
59 static struct irq_domain
*iic_host
;
61 /* Convert between "pending" bits and hw irq number */
62 static irq_hw_number_t
iic_pending_to_hwnum(struct cbe_iic_pending_bits bits
)
64 unsigned char unit
= bits
.source
& 0xf;
65 unsigned char node
= bits
.source
>> 4;
66 unsigned char class = bits
.class & 3;
69 if (bits
.flags
& CBE_IIC_IRQ_IPI
)
70 return IIC_IRQ_TYPE_IPI
| (bits
.prio
>> 4);
72 return (node
<< IIC_IRQ_NODE_SHIFT
) | (class << 4) | unit
;
75 static void iic_mask(struct irq_data
*d
)
79 static void iic_unmask(struct irq_data
*d
)
83 static void iic_eoi(struct irq_data
*d
)
85 struct iic
*iic
= this_cpu_ptr(&cpu_iic
);
86 out_be64(&iic
->regs
->prio
, iic
->eoi_stack
[--iic
->eoi_ptr
]);
87 BUG_ON(iic
->eoi_ptr
< 0);
90 static struct irq_chip iic_chip
= {
93 .irq_unmask
= iic_unmask
,
98 static void iic_ioexc_eoi(struct irq_data
*d
)
102 static void iic_ioexc_cascade(struct irq_desc
*desc
)
104 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
105 struct cbe_iic_regs __iomem
*node_iic
=
106 (void __iomem
*)irq_desc_get_handler_data(desc
);
107 unsigned int irq
= irq_desc_get_irq(desc
);
108 unsigned int base
= (irq
& 0xffffff00) | IIC_IRQ_TYPE_IOEXC
;
109 unsigned long bits
, ack
;
113 bits
= in_be64(&node_iic
->iic_is
);
116 /* pre-ack edge interrupts */
117 ack
= bits
& IIC_ISR_EDGE_MASK
;
119 out_be64(&node_iic
->iic_is
, ack
);
121 for (cascade
= 63; cascade
>= 0; cascade
--)
122 if (bits
& (0x8000000000000000UL
>> cascade
)) {
124 irq_linear_revmap(iic_host
,
127 generic_handle_irq(cirq
);
129 /* post-ack level interrupts */
130 ack
= bits
& ~IIC_ISR_EDGE_MASK
;
132 out_be64(&node_iic
->iic_is
, ack
);
134 chip
->irq_eoi(&desc
->irq_data
);
138 static struct irq_chip iic_ioexc_chip
= {
140 .irq_mask
= iic_mask
,
141 .irq_unmask
= iic_unmask
,
142 .irq_eoi
= iic_ioexc_eoi
,
145 /* Get an IRQ number from the pending state register of the IIC */
146 static unsigned int iic_get_irq(void)
148 struct cbe_iic_pending_bits pending
;
152 iic
= this_cpu_ptr(&cpu_iic
);
153 *(unsigned long *) &pending
=
154 in_be64((u64 __iomem
*) &iic
->regs
->pending_destr
);
155 if (!(pending
.flags
& CBE_IIC_IRQ_VALID
))
157 virq
= irq_linear_revmap(iic_host
, iic_pending_to_hwnum(pending
));
160 iic
->eoi_stack
[++iic
->eoi_ptr
] = pending
.prio
;
161 BUG_ON(iic
->eoi_ptr
> 15);
165 void iic_setup_cpu(void)
167 out_be64(&this_cpu_ptr(&cpu_iic
)->regs
->prio
, 0xff);
170 u8
iic_get_target_id(int cpu
)
172 return per_cpu(cpu_iic
, cpu
).target_id
;
175 EXPORT_SYMBOL_GPL(iic_get_target_id
);
179 /* Use the highest interrupt priorities for IPI */
180 static inline int iic_msg_to_irq(int msg
)
182 return IIC_IRQ_TYPE_IPI
+ 0xf - msg
;
185 void iic_message_pass(int cpu
, int msg
)
187 out_be64(&per_cpu(cpu_iic
, cpu
).regs
->generate
, (0xf - msg
) << 4);
190 static void iic_request_ipi(int msg
)
194 virq
= irq_create_mapping(iic_host
, iic_msg_to_irq(msg
));
197 "iic: failed to map IPI %s\n", smp_ipi_name
[msg
]);
202 * If smp_request_message_ipi encounters an error it will notify
203 * the error. If a message is not needed it will return non-zero.
205 if (smp_request_message_ipi(virq
, msg
))
206 irq_dispose_mapping(virq
);
209 void iic_request_IPIs(void)
211 iic_request_ipi(PPC_MSG_CALL_FUNCTION
);
212 iic_request_ipi(PPC_MSG_RESCHEDULE
);
213 iic_request_ipi(PPC_MSG_TICK_BROADCAST
);
214 iic_request_ipi(PPC_MSG_NMI_IPI
);
217 #endif /* CONFIG_SMP */
220 static int iic_host_match(struct irq_domain
*h
, struct device_node
*node
,
221 enum irq_domain_bus_token bus_token
)
223 return of_device_is_compatible(node
,
224 "IBM,CBEA-Internal-Interrupt-Controller");
227 static int iic_host_map(struct irq_domain
*h
, unsigned int virq
,
230 switch (hw
& IIC_IRQ_TYPE_MASK
) {
231 case IIC_IRQ_TYPE_IPI
:
232 irq_set_chip_and_handler(virq
, &iic_chip
, handle_percpu_irq
);
234 case IIC_IRQ_TYPE_IOEXC
:
235 irq_set_chip_and_handler(virq
, &iic_ioexc_chip
,
236 handle_edge_eoi_irq
);
239 irq_set_chip_and_handler(virq
, &iic_chip
, handle_edge_eoi_irq
);
244 static int iic_host_xlate(struct irq_domain
*h
, struct device_node
*ct
,
245 const u32
*intspec
, unsigned int intsize
,
246 irq_hw_number_t
*out_hwirq
, unsigned int *out_flags
)
249 unsigned int node
, ext
, unit
, class;
252 if (!of_device_is_compatible(ct
,
253 "IBM,CBEA-Internal-Interrupt-Controller"))
257 val
= of_get_property(ct
, "#interrupt-cells", NULL
);
258 if (val
== NULL
|| *val
!= 1)
261 node
= intspec
[0] >> 24;
262 ext
= (intspec
[0] >> 16) & 0xff;
263 class = (intspec
[0] >> 8) & 0xff;
264 unit
= intspec
[0] & 0xff;
266 /* Check if node is in supported range */
270 /* Build up interrupt number, special case for IO exceptions */
271 *out_hwirq
= (node
<< IIC_IRQ_NODE_SHIFT
);
272 if (unit
== IIC_UNIT_IIC
&& class == 1)
273 *out_hwirq
|= IIC_IRQ_TYPE_IOEXC
| ext
;
275 *out_hwirq
|= IIC_IRQ_TYPE_NORMAL
|
276 (class << IIC_IRQ_CLASS_SHIFT
) | unit
;
278 /* Dummy flags, ignored by iic code */
279 *out_flags
= IRQ_TYPE_EDGE_RISING
;
284 static const struct irq_domain_ops iic_host_ops
= {
285 .match
= iic_host_match
,
287 .xlate
= iic_host_xlate
,
290 static void __init
init_one_iic(unsigned int hw_cpu
, unsigned long addr
,
291 struct device_node
*node
)
293 /* XXX FIXME: should locate the linux CPU number from the HW cpu
294 * number properly. We are lucky for now
296 struct iic
*iic
= &per_cpu(cpu_iic
, hw_cpu
);
298 iic
->regs
= ioremap(addr
, sizeof(struct cbe_iic_thread_regs
));
299 BUG_ON(iic
->regs
== NULL
);
301 iic
->target_id
= ((hw_cpu
& 2) << 3) | ((hw_cpu
& 1) ? 0xf : 0xe);
302 iic
->eoi_stack
[0] = 0xff;
303 iic
->node
= of_node_get(node
);
304 out_be64(&iic
->regs
->prio
, 0);
306 printk(KERN_INFO
"IIC for CPU %d target id 0x%x : %pOF\n",
307 hw_cpu
, iic
->target_id
, node
);
310 static int __init
setup_iic(void)
312 struct device_node
*dn
;
313 struct resource r0
, r1
;
314 unsigned int node
, cascade
, found
= 0;
315 struct cbe_iic_regs __iomem
*node_iic
;
319 (dn
= of_find_node_by_name(dn
,"interrupt-controller")) != NULL
;) {
320 if (!of_device_is_compatible(dn
,
321 "IBM,CBEA-Internal-Interrupt-Controller"))
323 np
= of_get_property(dn
, "ibm,interrupt-server-ranges", NULL
);
325 printk(KERN_WARNING
"IIC: CPU association not found\n");
329 if (of_address_to_resource(dn
, 0, &r0
) ||
330 of_address_to_resource(dn
, 1, &r1
)) {
331 printk(KERN_WARNING
"IIC: Can't resolve addresses\n");
336 init_one_iic(np
[0], r0
.start
, dn
);
337 init_one_iic(np
[1], r1
.start
, dn
);
339 /* Setup cascade for IO exceptions. XXX cleanup tricks to get
341 * Note that we configure the IIC_IRR here with a hard coded
342 * priority of 1. We might want to improve that later.
345 node_iic
= cbe_get_cpu_iic_regs(np
[0]);
346 cascade
= node
<< IIC_IRQ_NODE_SHIFT
;
347 cascade
|= 1 << IIC_IRQ_CLASS_SHIFT
;
348 cascade
|= IIC_UNIT_IIC
;
349 cascade
= irq_create_mapping(iic_host
, cascade
);
353 * irq_data is a generic pointer that gets passed back
354 * to us later, so the forced cast is fine.
356 irq_set_handler_data(cascade
, (void __force
*)node_iic
);
357 irq_set_chained_handler(cascade
, iic_ioexc_cascade
);
358 out_be64(&node_iic
->iic_ir
,
359 (1 << 12) /* priority */ |
360 (node
<< 4) /* dest node */ |
361 IIC_UNIT_THREAD_0
/* route them to thread 0 */);
362 /* Flush pending (make sure it triggers if there is
365 out_be64(&node_iic
->iic_is
, 0xfffffffffffffffful
);
374 void __init
iic_init_IRQ(void)
376 /* Setup an irq host data structure */
377 iic_host
= irq_domain_add_linear(NULL
, IIC_SOURCE_COUNT
, &iic_host_ops
,
379 BUG_ON(iic_host
== NULL
);
380 irq_set_default_host(iic_host
);
382 /* Discover and initialize iics */
384 panic("IIC: Failed to initialize !\n");
386 /* Set master interrupt handling function */
387 ppc_md
.get_irq
= iic_get_irq
;
389 /* Enable on current CPU */
393 void iic_set_interrupt_routing(int cpu
, int thread
, int priority
)
395 struct cbe_iic_regs __iomem
*iic_regs
= cbe_get_cpu_iic_regs(cpu
);
399 /* Set which node and thread will handle the next interrupt */
400 iic_ir
|= CBE_IIC_IR_PRIO(priority
) |
401 CBE_IIC_IR_DEST_NODE(node
);
403 iic_ir
|= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0
);
405 iic_ir
|= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1
);
406 out_be64(&iic_regs
->iic_ir
, iic_ir
);