1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * arch/powerpc/sysdev/qe_lib/qe_ic.c
5 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
7 * Author: Li Yang <leoli@freescale.com>
8 * Based on code from Shlomi Gridish <gridish@freescale.com>
10 * QUICC ENGINE Interrupt Controller
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/sched.h>
22 #include <linux/signal.h>
23 #include <linux/device.h>
24 #include <linux/spinlock.h>
27 #include <soc/fsl/qe/qe_ic.h>
31 static DEFINE_RAW_SPINLOCK(qe_ic_lock
);
33 static struct qe_ic_info qe_ic_info
[] = {
36 .mask_reg
= QEIC_CIMR
,
38 .pri_reg
= QEIC_CIPWCC
,
42 .mask_reg
= QEIC_CIMR
,
44 .pri_reg
= QEIC_CIPWCC
,
48 .mask_reg
= QEIC_CIMR
,
50 .pri_reg
= QEIC_CIPWCC
,
54 .mask_reg
= QEIC_CIMR
,
56 .pri_reg
= QEIC_CIPZCC
,
60 .mask_reg
= QEIC_CIMR
,
62 .pri_reg
= QEIC_CIPZCC
,
66 .mask_reg
= QEIC_CIMR
,
68 .pri_reg
= QEIC_CIPZCC
,
72 .mask_reg
= QEIC_CIMR
,
74 .pri_reg
= QEIC_CIPZCC
,
78 .mask_reg
= QEIC_CIMR
,
80 .pri_reg
= QEIC_CIPZCC
,
84 .mask_reg
= QEIC_CIMR
,
86 .pri_reg
= QEIC_CIPZCC
,
90 .mask_reg
= QEIC_CRIMR
,
92 .pri_reg
= QEIC_CIPRTA
,
96 .mask_reg
= QEIC_CRIMR
,
98 .pri_reg
= QEIC_CIPRTB
,
102 .mask_reg
= QEIC_CRIMR
,
104 .pri_reg
= QEIC_CIPRTB
,
108 .mask_reg
= QEIC_CRIMR
,
110 .pri_reg
= QEIC_CIPRTB
,
114 .mask_reg
= QEIC_CRIMR
,
116 .pri_reg
= QEIC_CIPRTB
,
120 .mask_reg
= QEIC_CIMR
,
122 .pri_reg
= QEIC_CIPXCC
,
126 .mask_reg
= QEIC_CIMR
,
128 .pri_reg
= QEIC_CIPXCC
,
132 .mask_reg
= QEIC_CIMR
,
134 .pri_reg
= QEIC_CIPXCC
,
138 .mask_reg
= QEIC_CIMR
,
140 .pri_reg
= QEIC_CIPXCC
,
144 .mask_reg
= QEIC_CIMR
,
146 .pri_reg
= QEIC_CIPXCC
,
150 .mask_reg
= QEIC_CIMR
,
152 .pri_reg
= QEIC_CIPYCC
,
156 .mask_reg
= QEIC_CIMR
,
158 .pri_reg
= QEIC_CIPYCC
,
162 .mask_reg
= QEIC_CIMR
,
164 .pri_reg
= QEIC_CIPYCC
,
168 .mask_reg
= QEIC_CIMR
,
170 .pri_reg
= QEIC_CIPYCC
,
174 static inline u32
qe_ic_read(volatile __be32 __iomem
* base
, unsigned int reg
)
176 return in_be32(base
+ (reg
>> 2));
179 static inline void qe_ic_write(volatile __be32 __iomem
* base
, unsigned int reg
,
182 out_be32(base
+ (reg
>> 2), value
);
185 static inline struct qe_ic
*qe_ic_from_irq(unsigned int virq
)
187 return irq_get_chip_data(virq
);
190 static inline struct qe_ic
*qe_ic_from_irq_data(struct irq_data
*d
)
192 return irq_data_get_irq_chip_data(d
);
195 static void qe_ic_unmask_irq(struct irq_data
*d
)
197 struct qe_ic
*qe_ic
= qe_ic_from_irq_data(d
);
198 unsigned int src
= irqd_to_hwirq(d
);
202 raw_spin_lock_irqsave(&qe_ic_lock
, flags
);
204 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
205 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
206 temp
| qe_ic_info
[src
].mask
);
208 raw_spin_unlock_irqrestore(&qe_ic_lock
, flags
);
211 static void qe_ic_mask_irq(struct irq_data
*d
)
213 struct qe_ic
*qe_ic
= qe_ic_from_irq_data(d
);
214 unsigned int src
= irqd_to_hwirq(d
);
218 raw_spin_lock_irqsave(&qe_ic_lock
, flags
);
220 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
221 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
222 temp
& ~qe_ic_info
[src
].mask
);
224 /* Flush the above write before enabling interrupts; otherwise,
225 * spurious interrupts will sometimes happen. To be 100% sure
226 * that the write has reached the device before interrupts are
227 * enabled, the mask register would have to be read back; however,
228 * this is not required for correctness, only to avoid wasting
229 * time on a large number of spurious interrupts. In testing,
230 * a sync reduced the observed spurious interrupts to zero.
234 raw_spin_unlock_irqrestore(&qe_ic_lock
, flags
);
237 static struct irq_chip qe_ic_irq_chip
= {
239 .irq_unmask
= qe_ic_unmask_irq
,
240 .irq_mask
= qe_ic_mask_irq
,
241 .irq_mask_ack
= qe_ic_mask_irq
,
244 static int qe_ic_host_match(struct irq_domain
*h
, struct device_node
*node
,
245 enum irq_domain_bus_token bus_token
)
247 /* Exact match, unless qe_ic node is NULL */
248 struct device_node
*of_node
= irq_domain_get_of_node(h
);
249 return of_node
== NULL
|| of_node
== node
;
252 static int qe_ic_host_map(struct irq_domain
*h
, unsigned int virq
,
255 struct qe_ic
*qe_ic
= h
->host_data
;
256 struct irq_chip
*chip
;
258 if (hw
>= ARRAY_SIZE(qe_ic_info
)) {
259 pr_err("%s: Invalid hw irq number for QEIC\n", __func__
);
263 if (qe_ic_info
[hw
].mask
== 0) {
264 printk(KERN_ERR
"Can't map reserved IRQ\n");
268 chip
= &qe_ic
->hc_irq
;
270 irq_set_chip_data(virq
, qe_ic
);
271 irq_set_status_flags(virq
, IRQ_LEVEL
);
273 irq_set_chip_and_handler(virq
, chip
, handle_level_irq
);
278 static const struct irq_domain_ops qe_ic_host_ops
= {
279 .match
= qe_ic_host_match
,
280 .map
= qe_ic_host_map
,
281 .xlate
= irq_domain_xlate_onetwocell
,
284 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
285 unsigned int qe_ic_get_low_irq(struct qe_ic
*qe_ic
)
289 BUG_ON(qe_ic
== NULL
);
291 /* get the interrupt source vector. */
292 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CIVEC
) >> 26;
297 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
300 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
301 unsigned int qe_ic_get_high_irq(struct qe_ic
*qe_ic
)
305 BUG_ON(qe_ic
== NULL
);
307 /* get the interrupt source vector. */
308 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CHIVEC
) >> 26;
313 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
316 void __init
qe_ic_init(struct device_node
*node
, unsigned int flags
,
317 void (*low_handler
)(struct irq_desc
*desc
),
318 void (*high_handler
)(struct irq_desc
*desc
))
322 u32 temp
= 0, ret
, high_active
= 0;
324 ret
= of_address_to_resource(node
, 0, &res
);
328 qe_ic
= kzalloc(sizeof(*qe_ic
), GFP_KERNEL
);
332 qe_ic
->irqhost
= irq_domain_add_linear(node
, NR_QE_IC_INTS
,
333 &qe_ic_host_ops
, qe_ic
);
334 if (qe_ic
->irqhost
== NULL
) {
339 qe_ic
->regs
= ioremap(res
.start
, resource_size(&res
));
341 qe_ic
->hc_irq
= qe_ic_irq_chip
;
343 qe_ic
->virq_high
= irq_of_parse_and_map(node
, 0);
344 qe_ic
->virq_low
= irq_of_parse_and_map(node
, 1);
346 if (qe_ic
->virq_low
== NO_IRQ
) {
347 printk(KERN_ERR
"Failed to map QE_IC low IRQ\n");
352 /* default priority scheme is grouped. If spread mode is */
353 /* required, configure cicr accordingly. */
354 if (flags
& QE_IC_SPREADMODE_GRP_W
)
356 if (flags
& QE_IC_SPREADMODE_GRP_X
)
358 if (flags
& QE_IC_SPREADMODE_GRP_Y
)
360 if (flags
& QE_IC_SPREADMODE_GRP_Z
)
362 if (flags
& QE_IC_SPREADMODE_GRP_RISCA
)
364 if (flags
& QE_IC_SPREADMODE_GRP_RISCB
)
367 /* choose destination signal for highest priority interrupt */
368 if (flags
& QE_IC_HIGH_SIGNAL
) {
369 temp
|= (SIGNAL_HIGH
<< CICR_HPIT_SHIFT
);
373 qe_ic_write(qe_ic
->regs
, QEIC_CICR
, temp
);
375 irq_set_handler_data(qe_ic
->virq_low
, qe_ic
);
376 irq_set_chained_handler(qe_ic
->virq_low
, low_handler
);
378 if (qe_ic
->virq_high
!= NO_IRQ
&&
379 qe_ic
->virq_high
!= qe_ic
->virq_low
) {
380 irq_set_handler_data(qe_ic
->virq_high
, qe_ic
);
381 irq_set_chained_handler(qe_ic
->virq_high
, high_handler
);
385 void qe_ic_set_highest_priority(unsigned int virq
, int high
)
387 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
388 unsigned int src
= virq_to_hw(virq
);
391 temp
= qe_ic_read(qe_ic
->regs
, QEIC_CICR
);
393 temp
&= ~CICR_HP_MASK
;
394 temp
|= src
<< CICR_HP_SHIFT
;
396 temp
&= ~CICR_HPIT_MASK
;
397 temp
|= (high
? SIGNAL_HIGH
: SIGNAL_LOW
) << CICR_HPIT_SHIFT
;
399 qe_ic_write(qe_ic
->regs
, QEIC_CICR
, temp
);
402 /* Set Priority level within its group, from 1 to 8 */
403 int qe_ic_set_priority(unsigned int virq
, unsigned int priority
)
405 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
406 unsigned int src
= virq_to_hw(virq
);
409 if (priority
> 8 || priority
== 0)
411 if (WARN_ONCE(src
>= ARRAY_SIZE(qe_ic_info
),
412 "%s: Invalid hw irq number for QEIC\n", __func__
))
414 if (qe_ic_info
[src
].pri_reg
== 0)
417 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].pri_reg
);
420 temp
&= ~(0x7 << (32 - priority
* 3));
421 temp
|= qe_ic_info
[src
].pri_code
<< (32 - priority
* 3);
423 temp
&= ~(0x7 << (24 - priority
* 3));
424 temp
|= qe_ic_info
[src
].pri_code
<< (24 - priority
* 3);
427 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].pri_reg
, temp
);
432 /* Set a QE priority to use high irq, only priority 1~2 can use high irq */
433 int qe_ic_set_high_priority(unsigned int virq
, unsigned int priority
, int high
)
435 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
436 unsigned int src
= virq_to_hw(virq
);
437 u32 temp
, control_reg
= QEIC_CICNR
, shift
= 0;
439 if (priority
> 2 || priority
== 0)
441 if (WARN_ONCE(src
>= ARRAY_SIZE(qe_ic_info
),
442 "%s: Invalid hw irq number for QEIC\n", __func__
))
445 switch (qe_ic_info
[src
].pri_reg
) {
447 shift
= CICNR_ZCC1T_SHIFT
;
450 shift
= CICNR_WCC1T_SHIFT
;
453 shift
= CICNR_YCC1T_SHIFT
;
456 shift
= CICNR_XCC1T_SHIFT
;
459 shift
= CRICR_RTA1T_SHIFT
;
460 control_reg
= QEIC_CRICR
;
463 shift
= CRICR_RTB1T_SHIFT
;
464 control_reg
= QEIC_CRICR
;
470 shift
+= (2 - priority
) * 2;
471 temp
= qe_ic_read(qe_ic
->regs
, control_reg
);
472 temp
&= ~(SIGNAL_MASK
<< shift
);
473 temp
|= (high
? SIGNAL_HIGH
: SIGNAL_LOW
) << shift
;
474 qe_ic_write(qe_ic
->regs
, control_reg
, temp
);
479 static struct bus_type qe_ic_subsys
= {
484 static struct device device_qe_ic
= {
486 .bus
= &qe_ic_subsys
,
489 static int __init
init_qe_ic_sysfs(void)
493 printk(KERN_DEBUG
"Registering qe_ic with sysfs...\n");
495 rc
= subsys_system_register(&qe_ic_subsys
, NULL
);
497 printk(KERN_ERR
"Failed registering qe_ic sys class\n");
500 rc
= device_register(&device_qe_ic
);
502 printk(KERN_ERR
"Failed registering qe_ic sys device\n");
508 subsys_initcall(init_qe_ic_sysfs
);