1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * arch/powerpc/sysdev/qe_lib/qe_ic.c
5 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
7 * Author: Li Yang <leoli@freescale.com>
8 * Based on code from Shlomi Gridish <gridish@freescale.com>
10 * QUICC ENGINE Interrupt Controller
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/irq.h>
19 #include <linux/reboot.h>
20 #include <linux/slab.h>
21 #include <linux/stddef.h>
22 #include <linux/sched.h>
23 #include <linux/signal.h>
24 #include <linux/device.h>
25 #include <linux/spinlock.h>
28 #include <soc/fsl/qe/qe.h>
30 #define NR_QE_IC_INTS 64
32 /* QE IC registers offset */
33 #define QEIC_CICR 0x00
34 #define QEIC_CIVEC 0x04
35 #define QEIC_CIPXCC 0x10
36 #define QEIC_CIPYCC 0x14
37 #define QEIC_CIPWCC 0x18
38 #define QEIC_CIPZCC 0x1c
39 #define QEIC_CIMR 0x20
40 #define QEIC_CRIMR 0x24
41 #define QEIC_CIPRTA 0x30
42 #define QEIC_CIPRTB 0x34
43 #define QEIC_CHIVEC 0x60
46 /* Control registers offset */
49 /* The remapper for this QEIC */
50 struct irq_domain
*irqhost
;
52 /* The "linux" controller struct */
53 struct irq_chip hc_irq
;
55 /* VIRQ numbers of QE high/low irqs */
56 unsigned int virq_high
;
57 unsigned int virq_low
;
61 * QE interrupt controller internal structure
64 /* Location of this source at the QIMR register */
67 /* Mask register offset */
71 * For grouped interrupts sources - the interrupt code as
72 * appears at the group priority register
76 /* Group priority register offset */
80 static DEFINE_RAW_SPINLOCK(qe_ic_lock
);
82 static struct qe_ic_info qe_ic_info
[] = {
85 .mask_reg
= QEIC_CIMR
,
87 .pri_reg
= QEIC_CIPWCC
,
91 .mask_reg
= QEIC_CIMR
,
93 .pri_reg
= QEIC_CIPWCC
,
97 .mask_reg
= QEIC_CIMR
,
99 .pri_reg
= QEIC_CIPWCC
,
103 .mask_reg
= QEIC_CIMR
,
105 .pri_reg
= QEIC_CIPZCC
,
109 .mask_reg
= QEIC_CIMR
,
111 .pri_reg
= QEIC_CIPZCC
,
115 .mask_reg
= QEIC_CIMR
,
117 .pri_reg
= QEIC_CIPZCC
,
121 .mask_reg
= QEIC_CIMR
,
123 .pri_reg
= QEIC_CIPZCC
,
127 .mask_reg
= QEIC_CIMR
,
129 .pri_reg
= QEIC_CIPZCC
,
133 .mask_reg
= QEIC_CIMR
,
135 .pri_reg
= QEIC_CIPZCC
,
139 .mask_reg
= QEIC_CRIMR
,
141 .pri_reg
= QEIC_CIPRTA
,
145 .mask_reg
= QEIC_CRIMR
,
147 .pri_reg
= QEIC_CIPRTB
,
151 .mask_reg
= QEIC_CRIMR
,
153 .pri_reg
= QEIC_CIPRTB
,
157 .mask_reg
= QEIC_CRIMR
,
159 .pri_reg
= QEIC_CIPRTB
,
163 .mask_reg
= QEIC_CRIMR
,
165 .pri_reg
= QEIC_CIPRTB
,
169 .mask_reg
= QEIC_CIMR
,
171 .pri_reg
= QEIC_CIPXCC
,
175 .mask_reg
= QEIC_CIMR
,
177 .pri_reg
= QEIC_CIPXCC
,
181 .mask_reg
= QEIC_CIMR
,
183 .pri_reg
= QEIC_CIPXCC
,
187 .mask_reg
= QEIC_CIMR
,
189 .pri_reg
= QEIC_CIPXCC
,
193 .mask_reg
= QEIC_CIMR
,
195 .pri_reg
= QEIC_CIPXCC
,
199 .mask_reg
= QEIC_CIMR
,
201 .pri_reg
= QEIC_CIPYCC
,
205 .mask_reg
= QEIC_CIMR
,
207 .pri_reg
= QEIC_CIPYCC
,
211 .mask_reg
= QEIC_CIMR
,
213 .pri_reg
= QEIC_CIPYCC
,
217 .mask_reg
= QEIC_CIMR
,
219 .pri_reg
= QEIC_CIPYCC
,
223 static inline u32
qe_ic_read(__be32 __iomem
*base
, unsigned int reg
)
225 return qe_ioread32be(base
+ (reg
>> 2));
228 static inline void qe_ic_write(__be32 __iomem
*base
, unsigned int reg
,
231 qe_iowrite32be(value
, base
+ (reg
>> 2));
234 static inline struct qe_ic
*qe_ic_from_irq(unsigned int virq
)
236 return irq_get_chip_data(virq
);
239 static inline struct qe_ic
*qe_ic_from_irq_data(struct irq_data
*d
)
241 return irq_data_get_irq_chip_data(d
);
244 static void qe_ic_unmask_irq(struct irq_data
*d
)
246 struct qe_ic
*qe_ic
= qe_ic_from_irq_data(d
);
247 unsigned int src
= irqd_to_hwirq(d
);
251 raw_spin_lock_irqsave(&qe_ic_lock
, flags
);
253 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
254 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
255 temp
| qe_ic_info
[src
].mask
);
257 raw_spin_unlock_irqrestore(&qe_ic_lock
, flags
);
260 static void qe_ic_mask_irq(struct irq_data
*d
)
262 struct qe_ic
*qe_ic
= qe_ic_from_irq_data(d
);
263 unsigned int src
= irqd_to_hwirq(d
);
267 raw_spin_lock_irqsave(&qe_ic_lock
, flags
);
269 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
270 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
271 temp
& ~qe_ic_info
[src
].mask
);
273 /* Flush the above write before enabling interrupts; otherwise,
274 * spurious interrupts will sometimes happen. To be 100% sure
275 * that the write has reached the device before interrupts are
276 * enabled, the mask register would have to be read back; however,
277 * this is not required for correctness, only to avoid wasting
278 * time on a large number of spurious interrupts. In testing,
279 * a sync reduced the observed spurious interrupts to zero.
283 raw_spin_unlock_irqrestore(&qe_ic_lock
, flags
);
286 static struct irq_chip qe_ic_irq_chip
= {
288 .irq_unmask
= qe_ic_unmask_irq
,
289 .irq_mask
= qe_ic_mask_irq
,
290 .irq_mask_ack
= qe_ic_mask_irq
,
293 static int qe_ic_host_match(struct irq_domain
*h
, struct device_node
*node
,
294 enum irq_domain_bus_token bus_token
)
296 /* Exact match, unless qe_ic node is NULL */
297 struct device_node
*of_node
= irq_domain_get_of_node(h
);
298 return of_node
== NULL
|| of_node
== node
;
301 static int qe_ic_host_map(struct irq_domain
*h
, unsigned int virq
,
304 struct qe_ic
*qe_ic
= h
->host_data
;
305 struct irq_chip
*chip
;
307 if (hw
>= ARRAY_SIZE(qe_ic_info
)) {
308 pr_err("%s: Invalid hw irq number for QEIC\n", __func__
);
312 if (qe_ic_info
[hw
].mask
== 0) {
313 printk(KERN_ERR
"Can't map reserved IRQ\n");
317 chip
= &qe_ic
->hc_irq
;
319 irq_set_chip_data(virq
, qe_ic
);
320 irq_set_status_flags(virq
, IRQ_LEVEL
);
322 irq_set_chip_and_handler(virq
, chip
, handle_level_irq
);
327 static const struct irq_domain_ops qe_ic_host_ops
= {
328 .match
= qe_ic_host_match
,
329 .map
= qe_ic_host_map
,
330 .xlate
= irq_domain_xlate_onetwocell
,
333 /* Return an interrupt vector or 0 if no interrupt is pending. */
334 static unsigned int qe_ic_get_low_irq(struct qe_ic
*qe_ic
)
338 BUG_ON(qe_ic
== NULL
);
340 /* get the interrupt source vector. */
341 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CIVEC
) >> 26;
346 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
349 /* Return an interrupt vector or 0 if no interrupt is pending. */
350 static unsigned int qe_ic_get_high_irq(struct qe_ic
*qe_ic
)
354 BUG_ON(qe_ic
== NULL
);
356 /* get the interrupt source vector. */
357 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CHIVEC
) >> 26;
362 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
365 static void qe_ic_cascade_low(struct irq_desc
*desc
)
367 struct qe_ic
*qe_ic
= irq_desc_get_handler_data(desc
);
368 unsigned int cascade_irq
= qe_ic_get_low_irq(qe_ic
);
369 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
371 if (cascade_irq
!= 0)
372 generic_handle_irq(cascade_irq
);
375 chip
->irq_eoi(&desc
->irq_data
);
378 static void qe_ic_cascade_high(struct irq_desc
*desc
)
380 struct qe_ic
*qe_ic
= irq_desc_get_handler_data(desc
);
381 unsigned int cascade_irq
= qe_ic_get_high_irq(qe_ic
);
382 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
384 if (cascade_irq
!= 0)
385 generic_handle_irq(cascade_irq
);
388 chip
->irq_eoi(&desc
->irq_data
);
391 static void qe_ic_cascade_muxed_mpic(struct irq_desc
*desc
)
393 struct qe_ic
*qe_ic
= irq_desc_get_handler_data(desc
);
394 unsigned int cascade_irq
;
395 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
397 cascade_irq
= qe_ic_get_high_irq(qe_ic
);
398 if (cascade_irq
== 0)
399 cascade_irq
= qe_ic_get_low_irq(qe_ic
);
401 if (cascade_irq
!= 0)
402 generic_handle_irq(cascade_irq
);
404 chip
->irq_eoi(&desc
->irq_data
);
407 static void __init
qe_ic_init(struct device_node
*node
)
409 void (*low_handler
)(struct irq_desc
*desc
);
410 void (*high_handler
)(struct irq_desc
*desc
);
415 ret
= of_address_to_resource(node
, 0, &res
);
419 qe_ic
= kzalloc(sizeof(*qe_ic
), GFP_KERNEL
);
423 qe_ic
->irqhost
= irq_domain_add_linear(node
, NR_QE_IC_INTS
,
424 &qe_ic_host_ops
, qe_ic
);
425 if (qe_ic
->irqhost
== NULL
) {
430 qe_ic
->regs
= ioremap(res
.start
, resource_size(&res
));
432 qe_ic
->hc_irq
= qe_ic_irq_chip
;
434 qe_ic
->virq_high
= irq_of_parse_and_map(node
, 0);
435 qe_ic
->virq_low
= irq_of_parse_and_map(node
, 1);
437 if (!qe_ic
->virq_low
) {
438 printk(KERN_ERR
"Failed to map QE_IC low IRQ\n");
442 if (qe_ic
->virq_high
!= qe_ic
->virq_low
) {
443 low_handler
= qe_ic_cascade_low
;
444 high_handler
= qe_ic_cascade_high
;
446 low_handler
= qe_ic_cascade_muxed_mpic
;
450 qe_ic_write(qe_ic
->regs
, QEIC_CICR
, 0);
452 irq_set_handler_data(qe_ic
->virq_low
, qe_ic
);
453 irq_set_chained_handler(qe_ic
->virq_low
, low_handler
);
455 if (qe_ic
->virq_high
&& qe_ic
->virq_high
!= qe_ic
->virq_low
) {
456 irq_set_handler_data(qe_ic
->virq_high
, qe_ic
);
457 irq_set_chained_handler(qe_ic
->virq_high
, high_handler
);
461 static int __init
qe_ic_of_init(void)
463 struct device_node
*np
;
465 np
= of_find_compatible_node(NULL
, NULL
, "fsl,qe-ic");
467 np
= of_find_node_by_type(NULL
, "qeic");
475 subsys_initcall(qe_ic_of_init
);