2 * arch/powerpc/sysdev/qe_lib/qe_ic.c
4 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
6 * Author: Li Yang <leoli@freescale.com>
7 * Based on code from Shlomi Gridish <gridish@freescale.com>
9 * QUICC ENGINE Interrupt Controller
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/reboot.h>
21 #include <linux/slab.h>
22 #include <linux/stddef.h>
23 #include <linux/sched.h>
24 #include <linux/signal.h>
25 #include <linux/sysdev.h>
26 #include <linux/device.h>
27 #include <linux/bootmem.h>
28 #include <linux/spinlock.h>
32 #include <asm/qe_ic.h>
36 static DEFINE_SPINLOCK(qe_ic_lock
);
38 static struct qe_ic_info qe_ic_info
[] = {
41 .mask_reg
= QEIC_CIMR
,
43 .pri_reg
= QEIC_CIPWCC
,
47 .mask_reg
= QEIC_CIMR
,
49 .pri_reg
= QEIC_CIPWCC
,
53 .mask_reg
= QEIC_CIMR
,
55 .pri_reg
= QEIC_CIPWCC
,
59 .mask_reg
= QEIC_CIMR
,
61 .pri_reg
= QEIC_CIPZCC
,
65 .mask_reg
= QEIC_CIMR
,
67 .pri_reg
= QEIC_CIPZCC
,
71 .mask_reg
= QEIC_CIMR
,
73 .pri_reg
= QEIC_CIPZCC
,
77 .mask_reg
= QEIC_CIMR
,
79 .pri_reg
= QEIC_CIPZCC
,
83 .mask_reg
= QEIC_CIMR
,
85 .pri_reg
= QEIC_CIPZCC
,
89 .mask_reg
= QEIC_CIMR
,
91 .pri_reg
= QEIC_CIPZCC
,
95 .mask_reg
= QEIC_CRIMR
,
97 .pri_reg
= QEIC_CIPRTA
,
101 .mask_reg
= QEIC_CRIMR
,
103 .pri_reg
= QEIC_CIPRTB
,
107 .mask_reg
= QEIC_CRIMR
,
109 .pri_reg
= QEIC_CIPRTB
,
113 .mask_reg
= QEIC_CRIMR
,
115 .pri_reg
= QEIC_CIPRTB
,
119 .mask_reg
= QEIC_CRIMR
,
121 .pri_reg
= QEIC_CIPRTB
,
125 .mask_reg
= QEIC_CIMR
,
127 .pri_reg
= QEIC_CIPXCC
,
131 .mask_reg
= QEIC_CIMR
,
133 .pri_reg
= QEIC_CIPXCC
,
137 .mask_reg
= QEIC_CIMR
,
139 .pri_reg
= QEIC_CIPXCC
,
143 .mask_reg
= QEIC_CIMR
,
145 .pri_reg
= QEIC_CIPXCC
,
149 .mask_reg
= QEIC_CIMR
,
151 .pri_reg
= QEIC_CIPXCC
,
155 .mask_reg
= QEIC_CIMR
,
157 .pri_reg
= QEIC_CIPYCC
,
161 .mask_reg
= QEIC_CIMR
,
163 .pri_reg
= QEIC_CIPYCC
,
167 .mask_reg
= QEIC_CIMR
,
169 .pri_reg
= QEIC_CIPYCC
,
173 .mask_reg
= QEIC_CIMR
,
175 .pri_reg
= QEIC_CIPYCC
,
179 static inline u32
qe_ic_read(volatile __be32 __iomem
* base
, unsigned int reg
)
181 return in_be32(base
+ (reg
>> 2));
184 static inline void qe_ic_write(volatile __be32 __iomem
* base
, unsigned int reg
,
187 out_be32(base
+ (reg
>> 2), value
);
190 static inline struct qe_ic
*qe_ic_from_irq(unsigned int virq
)
192 return irq_desc
[virq
].chip_data
;
195 #define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
197 static void qe_ic_unmask_irq(unsigned int virq
)
199 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
200 unsigned int src
= virq_to_hw(virq
);
204 spin_lock_irqsave(&qe_ic_lock
, flags
);
206 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
207 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
208 temp
| qe_ic_info
[src
].mask
);
210 spin_unlock_irqrestore(&qe_ic_lock
, flags
);
213 static void qe_ic_mask_irq(unsigned int virq
)
215 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
216 unsigned int src
= virq_to_hw(virq
);
220 spin_lock_irqsave(&qe_ic_lock
, flags
);
222 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
);
223 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].mask_reg
,
224 temp
& ~qe_ic_info
[src
].mask
);
226 /* Flush the above write before enabling interrupts; otherwise,
227 * spurious interrupts will sometimes happen. To be 100% sure
228 * that the write has reached the device before interrupts are
229 * enabled, the mask register would have to be read back; however,
230 * this is not required for correctness, only to avoid wasting
231 * time on a large number of spurious interrupts. In testing,
232 * a sync reduced the observed spurious interrupts to zero.
236 spin_unlock_irqrestore(&qe_ic_lock
, flags
);
239 static struct irq_chip qe_ic_irq_chip
= {
240 .typename
= " QEIC ",
241 .unmask
= qe_ic_unmask_irq
,
242 .mask
= qe_ic_mask_irq
,
243 .mask_ack
= qe_ic_mask_irq
,
246 static int qe_ic_host_match(struct irq_host
*h
, struct device_node
*node
)
248 /* Exact match, unless qe_ic node is NULL */
249 return h
->of_node
== NULL
|| h
->of_node
== node
;
252 static int qe_ic_host_map(struct irq_host
*h
, unsigned int virq
,
255 struct qe_ic
*qe_ic
= h
->host_data
;
256 struct irq_chip
*chip
;
258 if (qe_ic_info
[hw
].mask
== 0) {
259 printk(KERN_ERR
"Can't map reserved IRQ \n");
263 chip
= &qe_ic
->hc_irq
;
265 set_irq_chip_data(virq
, qe_ic
);
266 get_irq_desc(virq
)->status
|= IRQ_LEVEL
;
268 set_irq_chip_and_handler(virq
, chip
, handle_level_irq
);
273 static int qe_ic_host_xlate(struct irq_host
*h
, struct device_node
*ct
,
274 u32
* intspec
, unsigned int intsize
,
275 irq_hw_number_t
* out_hwirq
,
276 unsigned int *out_flags
)
278 *out_hwirq
= intspec
[0];
280 *out_flags
= intspec
[1];
282 *out_flags
= IRQ_TYPE_NONE
;
286 static struct irq_host_ops qe_ic_host_ops
= {
287 .match
= qe_ic_host_match
,
288 .map
= qe_ic_host_map
,
289 .xlate
= qe_ic_host_xlate
,
292 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
293 unsigned int qe_ic_get_low_irq(struct qe_ic
*qe_ic
)
297 BUG_ON(qe_ic
== NULL
);
299 /* get the interrupt source vector. */
300 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CIVEC
) >> 26;
305 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
308 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
309 unsigned int qe_ic_get_high_irq(struct qe_ic
*qe_ic
)
313 BUG_ON(qe_ic
== NULL
);
315 /* get the interrupt source vector. */
316 irq
= qe_ic_read(qe_ic
->regs
, QEIC_CHIVEC
) >> 26;
321 return irq_linear_revmap(qe_ic
->irqhost
, irq
);
324 void __init
qe_ic_init(struct device_node
*node
, unsigned int flags
,
325 void (*low_handler
)(unsigned int irq
, struct irq_desc
*desc
),
326 void (*high_handler
)(unsigned int irq
, struct irq_desc
*desc
))
330 u32 temp
= 0, ret
, high_active
= 0;
332 ret
= of_address_to_resource(node
, 0, &res
);
336 qe_ic
= kzalloc(sizeof(*qe_ic
), GFP_KERNEL
);
340 qe_ic
->irqhost
= irq_alloc_host(node
, IRQ_HOST_MAP_LINEAR
,
341 NR_QE_IC_INTS
, &qe_ic_host_ops
, 0);
342 if (qe_ic
->irqhost
== NULL
) {
347 qe_ic
->regs
= ioremap(res
.start
, res
.end
- res
.start
+ 1);
349 qe_ic
->irqhost
->host_data
= qe_ic
;
350 qe_ic
->hc_irq
= qe_ic_irq_chip
;
352 qe_ic
->virq_high
= irq_of_parse_and_map(node
, 0);
353 qe_ic
->virq_low
= irq_of_parse_and_map(node
, 1);
355 if (qe_ic
->virq_low
== NO_IRQ
) {
356 printk(KERN_ERR
"Failed to map QE_IC low IRQ\n");
361 /* default priority scheme is grouped. If spread mode is */
362 /* required, configure cicr accordingly. */
363 if (flags
& QE_IC_SPREADMODE_GRP_W
)
365 if (flags
& QE_IC_SPREADMODE_GRP_X
)
367 if (flags
& QE_IC_SPREADMODE_GRP_Y
)
369 if (flags
& QE_IC_SPREADMODE_GRP_Z
)
371 if (flags
& QE_IC_SPREADMODE_GRP_RISCA
)
373 if (flags
& QE_IC_SPREADMODE_GRP_RISCB
)
376 /* choose destination signal for highest priority interrupt */
377 if (flags
& QE_IC_HIGH_SIGNAL
) {
378 temp
|= (SIGNAL_HIGH
<< CICR_HPIT_SHIFT
);
382 qe_ic_write(qe_ic
->regs
, QEIC_CICR
, temp
);
384 set_irq_data(qe_ic
->virq_low
, qe_ic
);
385 set_irq_chained_handler(qe_ic
->virq_low
, low_handler
);
387 if (qe_ic
->virq_high
!= NO_IRQ
&&
388 qe_ic
->virq_high
!= qe_ic
->virq_low
) {
389 set_irq_data(qe_ic
->virq_high
, qe_ic
);
390 set_irq_chained_handler(qe_ic
->virq_high
, high_handler
);
394 void qe_ic_set_highest_priority(unsigned int virq
, int high
)
396 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
397 unsigned int src
= virq_to_hw(virq
);
400 temp
= qe_ic_read(qe_ic
->regs
, QEIC_CICR
);
402 temp
&= ~CICR_HP_MASK
;
403 temp
|= src
<< CICR_HP_SHIFT
;
405 temp
&= ~CICR_HPIT_MASK
;
406 temp
|= (high
? SIGNAL_HIGH
: SIGNAL_LOW
) << CICR_HPIT_SHIFT
;
408 qe_ic_write(qe_ic
->regs
, QEIC_CICR
, temp
);
411 /* Set Priority level within its group, from 1 to 8 */
412 int qe_ic_set_priority(unsigned int virq
, unsigned int priority
)
414 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
415 unsigned int src
= virq_to_hw(virq
);
418 if (priority
> 8 || priority
== 0)
422 if (qe_ic_info
[src
].pri_reg
== 0)
425 temp
= qe_ic_read(qe_ic
->regs
, qe_ic_info
[src
].pri_reg
);
428 temp
&= ~(0x7 << (32 - priority
* 3));
429 temp
|= qe_ic_info
[src
].pri_code
<< (32 - priority
* 3);
431 temp
&= ~(0x7 << (24 - priority
* 3));
432 temp
|= qe_ic_info
[src
].pri_code
<< (24 - priority
* 3);
435 qe_ic_write(qe_ic
->regs
, qe_ic_info
[src
].pri_reg
, temp
);
440 /* Set a QE priority to use high irq, only priority 1~2 can use high irq */
441 int qe_ic_set_high_priority(unsigned int virq
, unsigned int priority
, int high
)
443 struct qe_ic
*qe_ic
= qe_ic_from_irq(virq
);
444 unsigned int src
= virq_to_hw(virq
);
445 u32 temp
, control_reg
= QEIC_CICNR
, shift
= 0;
447 if (priority
> 2 || priority
== 0)
450 switch (qe_ic_info
[src
].pri_reg
) {
452 shift
= CICNR_ZCC1T_SHIFT
;
455 shift
= CICNR_WCC1T_SHIFT
;
458 shift
= CICNR_YCC1T_SHIFT
;
461 shift
= CICNR_XCC1T_SHIFT
;
464 shift
= CRICR_RTA1T_SHIFT
;
465 control_reg
= QEIC_CRICR
;
468 shift
= CRICR_RTB1T_SHIFT
;
469 control_reg
= QEIC_CRICR
;
475 shift
+= (2 - priority
) * 2;
476 temp
= qe_ic_read(qe_ic
->regs
, control_reg
);
477 temp
&= ~(SIGNAL_MASK
<< shift
);
478 temp
|= (high
? SIGNAL_HIGH
: SIGNAL_LOW
) << shift
;
479 qe_ic_write(qe_ic
->regs
, control_reg
, temp
);
484 static struct sysdev_class qe_ic_sysclass
= {
488 static struct sys_device device_qe_ic
= {
490 .cls
= &qe_ic_sysclass
,
493 static int __init
init_qe_ic_sysfs(void)
497 printk(KERN_DEBUG
"Registering qe_ic with sysfs...\n");
499 rc
= sysdev_class_register(&qe_ic_sysclass
);
501 printk(KERN_ERR
"Failed registering qe_ic sys class\n");
504 rc
= sysdev_register(&device_qe_ic
);
506 printk(KERN_ERR
"Failed registering qe_ic sys device\n");
512 subsys_initcall(init_qe_ic_sysfs
);