2 * linux/arch/arm/mach-mmp/irq.c
4 * Generic IRQ handling, GPIO IRQ demultiplexing, etc.
5 * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
7 * Author: Bin Yang <bin.yang@marvell.com>
8 * Haojian Zhuang <haojian.zhuang@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip.h>
19 #include <linux/irqdomain.h>
21 #include <linux/ioport.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
25 #include <asm/exception.h>
26 #include <asm/hardirq.h>
30 #define PJ1_INT_SEL 0x10c
31 #define PJ4_INT_SEL 0x104
33 /* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
34 #define SEL_INT_PENDING (1 << 6)
35 #define SEL_INT_NUM_MASK 0x3f
37 #define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
38 #define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
40 struct icu_chip_data
{
42 unsigned int virq_base
;
43 unsigned int cascade_irq
;
44 void __iomem
*reg_status
;
45 void __iomem
*reg_mask
;
46 unsigned int conf_enable
;
47 unsigned int conf_disable
;
48 unsigned int conf_mask
;
49 unsigned int clr_mfp_irq_base
;
50 unsigned int clr_mfp_hwirq
;
51 struct irq_domain
*domain
;
54 struct mmp_intc_conf
{
55 unsigned int conf_enable
;
56 unsigned int conf_disable
;
57 unsigned int conf_mask
;
60 static void __iomem
*mmp_icu_base
;
61 static struct icu_chip_data icu_data
[MAX_ICU_NR
];
62 static int max_icu_nr
;
64 extern void mmp2_clear_pmic_int(void);
66 static void icu_mask_ack_irq(struct irq_data
*d
)
68 struct irq_domain
*domain
= d
->domain
;
69 struct icu_chip_data
*data
= (struct icu_chip_data
*)domain
->host_data
;
73 hwirq
= d
->irq
- data
->virq_base
;
74 if (data
== &icu_data
[0]) {
75 r
= readl_relaxed(mmp_icu_base
+ (hwirq
<< 2));
76 r
&= ~data
->conf_mask
;
77 r
|= data
->conf_disable
;
78 writel_relaxed(r
, mmp_icu_base
+ (hwirq
<< 2));
80 #ifdef CONFIG_CPU_MMP2
81 if ((data
->virq_base
== data
->clr_mfp_irq_base
)
82 && (hwirq
== data
->clr_mfp_hwirq
))
83 mmp2_clear_pmic_int();
85 r
= readl_relaxed(data
->reg_mask
) | (1 << hwirq
);
86 writel_relaxed(r
, data
->reg_mask
);
90 static void icu_mask_irq(struct irq_data
*d
)
92 struct irq_domain
*domain
= d
->domain
;
93 struct icu_chip_data
*data
= (struct icu_chip_data
*)domain
->host_data
;
97 hwirq
= d
->irq
- data
->virq_base
;
98 if (data
== &icu_data
[0]) {
99 r
= readl_relaxed(mmp_icu_base
+ (hwirq
<< 2));
100 r
&= ~data
->conf_mask
;
101 r
|= data
->conf_disable
;
102 writel_relaxed(r
, mmp_icu_base
+ (hwirq
<< 2));
104 r
= readl_relaxed(data
->reg_mask
) | (1 << hwirq
);
105 writel_relaxed(r
, data
->reg_mask
);
109 static void icu_unmask_irq(struct irq_data
*d
)
111 struct irq_domain
*domain
= d
->domain
;
112 struct icu_chip_data
*data
= (struct icu_chip_data
*)domain
->host_data
;
116 hwirq
= d
->irq
- data
->virq_base
;
117 if (data
== &icu_data
[0]) {
118 r
= readl_relaxed(mmp_icu_base
+ (hwirq
<< 2));
119 r
&= ~data
->conf_mask
;
120 r
|= data
->conf_enable
;
121 writel_relaxed(r
, mmp_icu_base
+ (hwirq
<< 2));
123 r
= readl_relaxed(data
->reg_mask
) & ~(1 << hwirq
);
124 writel_relaxed(r
, data
->reg_mask
);
128 struct irq_chip icu_irq_chip
= {
130 .irq_mask
= icu_mask_irq
,
131 .irq_mask_ack
= icu_mask_ack_irq
,
132 .irq_unmask
= icu_unmask_irq
,
135 static void icu_mux_irq_demux(struct irq_desc
*desc
)
137 unsigned int irq
= irq_desc_get_irq(desc
);
138 struct irq_domain
*domain
;
139 struct icu_chip_data
*data
;
141 unsigned long mask
, status
, n
;
143 for (i
= 1; i
< max_icu_nr
; i
++) {
144 if (irq
== icu_data
[i
].cascade_irq
) {
145 domain
= icu_data
[i
].domain
;
146 data
= (struct icu_chip_data
*)domain
->host_data
;
150 if (i
>= max_icu_nr
) {
151 pr_err("Spurious irq %d in MMP INTC\n", irq
);
155 mask
= readl_relaxed(data
->reg_mask
);
157 status
= readl_relaxed(data
->reg_status
) & ~mask
;
160 for_each_set_bit(n
, &status
, BITS_PER_LONG
) {
161 generic_handle_irq(icu_data
[i
].virq_base
+ n
);
166 static int mmp_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
169 irq_set_chip_and_handler(irq
, &icu_irq_chip
, handle_level_irq
);
173 static int mmp_irq_domain_xlate(struct irq_domain
*d
, struct device_node
*node
,
174 const u32
*intspec
, unsigned int intsize
,
175 unsigned long *out_hwirq
,
176 unsigned int *out_type
)
178 *out_hwirq
= intspec
[0];
182 const struct irq_domain_ops mmp_irq_domain_ops
= {
183 .map
= mmp_irq_domain_map
,
184 .xlate
= mmp_irq_domain_xlate
,
187 static const struct mmp_intc_conf mmp_conf
= {
193 static const struct mmp_intc_conf mmp2_conf
= {
196 .conf_mask
= MMP2_ICU_INT_ROUTE_PJ4_IRQ
|
197 MMP2_ICU_INT_ROUTE_PJ4_FIQ
,
200 static void __exception_irq_entry
mmp_handle_irq(struct pt_regs
*regs
)
204 hwirq
= readl_relaxed(mmp_icu_base
+ PJ1_INT_SEL
);
205 if (!(hwirq
& SEL_INT_PENDING
))
207 hwirq
&= SEL_INT_NUM_MASK
;
208 handle_domain_irq(icu_data
[0].domain
, hwirq
, regs
);
211 static void __exception_irq_entry
mmp2_handle_irq(struct pt_regs
*regs
)
215 hwirq
= readl_relaxed(mmp_icu_base
+ PJ4_INT_SEL
);
216 if (!(hwirq
& SEL_INT_PENDING
))
218 hwirq
&= SEL_INT_NUM_MASK
;
219 handle_domain_irq(icu_data
[0].domain
, hwirq
, regs
);
223 void __init
icu_init_irq(void)
228 mmp_icu_base
= ioremap(0xd4282000, 0x1000);
229 icu_data
[0].conf_enable
= mmp_conf
.conf_enable
;
230 icu_data
[0].conf_disable
= mmp_conf
.conf_disable
;
231 icu_data
[0].conf_mask
= mmp_conf
.conf_mask
;
232 icu_data
[0].nr_irqs
= 64;
233 icu_data
[0].virq_base
= 0;
234 icu_data
[0].domain
= irq_domain_add_legacy(NULL
, 64, 0, 0,
235 &irq_domain_simple_ops
,
237 for (irq
= 0; irq
< 64; irq
++) {
238 icu_mask_irq(irq_get_irq_data(irq
));
239 irq_set_chip_and_handler(irq
, &icu_irq_chip
, handle_level_irq
);
241 irq_set_default_host(icu_data
[0].domain
);
242 set_handle_irq(mmp_handle_irq
);
246 void __init
mmp2_init_icu(void)
251 mmp_icu_base
= ioremap(0xd4282000, 0x1000);
252 icu_data
[0].conf_enable
= mmp2_conf
.conf_enable
;
253 icu_data
[0].conf_disable
= mmp2_conf
.conf_disable
;
254 icu_data
[0].conf_mask
= mmp2_conf
.conf_mask
;
255 icu_data
[0].nr_irqs
= 64;
256 icu_data
[0].virq_base
= 0;
257 icu_data
[0].domain
= irq_domain_add_legacy(NULL
, 64, 0, 0,
258 &irq_domain_simple_ops
,
260 icu_data
[1].reg_status
= mmp_icu_base
+ 0x150;
261 icu_data
[1].reg_mask
= mmp_icu_base
+ 0x168;
262 icu_data
[1].clr_mfp_irq_base
= icu_data
[0].virq_base
+
264 icu_data
[1].clr_mfp_hwirq
= 1; /* offset to IRQ_MMP2_PMIC_BASE */
265 icu_data
[1].nr_irqs
= 2;
266 icu_data
[1].cascade_irq
= 4;
267 icu_data
[1].virq_base
= icu_data
[0].virq_base
+ icu_data
[0].nr_irqs
;
268 icu_data
[1].domain
= irq_domain_add_legacy(NULL
, icu_data
[1].nr_irqs
,
269 icu_data
[1].virq_base
, 0,
270 &irq_domain_simple_ops
,
272 icu_data
[2].reg_status
= mmp_icu_base
+ 0x154;
273 icu_data
[2].reg_mask
= mmp_icu_base
+ 0x16c;
274 icu_data
[2].nr_irqs
= 2;
275 icu_data
[2].cascade_irq
= 5;
276 icu_data
[2].virq_base
= icu_data
[1].virq_base
+ icu_data
[1].nr_irqs
;
277 icu_data
[2].domain
= irq_domain_add_legacy(NULL
, icu_data
[2].nr_irqs
,
278 icu_data
[2].virq_base
, 0,
279 &irq_domain_simple_ops
,
281 icu_data
[3].reg_status
= mmp_icu_base
+ 0x180;
282 icu_data
[3].reg_mask
= mmp_icu_base
+ 0x17c;
283 icu_data
[3].nr_irqs
= 3;
284 icu_data
[3].cascade_irq
= 9;
285 icu_data
[3].virq_base
= icu_data
[2].virq_base
+ icu_data
[2].nr_irqs
;
286 icu_data
[3].domain
= irq_domain_add_legacy(NULL
, icu_data
[3].nr_irqs
,
287 icu_data
[3].virq_base
, 0,
288 &irq_domain_simple_ops
,
290 icu_data
[4].reg_status
= mmp_icu_base
+ 0x158;
291 icu_data
[4].reg_mask
= mmp_icu_base
+ 0x170;
292 icu_data
[4].nr_irqs
= 5;
293 icu_data
[4].cascade_irq
= 17;
294 icu_data
[4].virq_base
= icu_data
[3].virq_base
+ icu_data
[3].nr_irqs
;
295 icu_data
[4].domain
= irq_domain_add_legacy(NULL
, icu_data
[4].nr_irqs
,
296 icu_data
[4].virq_base
, 0,
297 &irq_domain_simple_ops
,
299 icu_data
[5].reg_status
= mmp_icu_base
+ 0x15c;
300 icu_data
[5].reg_mask
= mmp_icu_base
+ 0x174;
301 icu_data
[5].nr_irqs
= 15;
302 icu_data
[5].cascade_irq
= 35;
303 icu_data
[5].virq_base
= icu_data
[4].virq_base
+ icu_data
[4].nr_irqs
;
304 icu_data
[5].domain
= irq_domain_add_legacy(NULL
, icu_data
[5].nr_irqs
,
305 icu_data
[5].virq_base
, 0,
306 &irq_domain_simple_ops
,
308 icu_data
[6].reg_status
= mmp_icu_base
+ 0x160;
309 icu_data
[6].reg_mask
= mmp_icu_base
+ 0x178;
310 icu_data
[6].nr_irqs
= 2;
311 icu_data
[6].cascade_irq
= 51;
312 icu_data
[6].virq_base
= icu_data
[5].virq_base
+ icu_data
[5].nr_irqs
;
313 icu_data
[6].domain
= irq_domain_add_legacy(NULL
, icu_data
[6].nr_irqs
,
314 icu_data
[6].virq_base
, 0,
315 &irq_domain_simple_ops
,
317 icu_data
[7].reg_status
= mmp_icu_base
+ 0x188;
318 icu_data
[7].reg_mask
= mmp_icu_base
+ 0x184;
319 icu_data
[7].nr_irqs
= 2;
320 icu_data
[7].cascade_irq
= 55;
321 icu_data
[7].virq_base
= icu_data
[6].virq_base
+ icu_data
[6].nr_irqs
;
322 icu_data
[7].domain
= irq_domain_add_legacy(NULL
, icu_data
[7].nr_irqs
,
323 icu_data
[7].virq_base
, 0,
324 &irq_domain_simple_ops
,
326 end
= icu_data
[7].virq_base
+ icu_data
[7].nr_irqs
;
327 for (irq
= 0; irq
< end
; irq
++) {
328 icu_mask_irq(irq_get_irq_data(irq
));
329 if (irq
== icu_data
[1].cascade_irq
||
330 irq
== icu_data
[2].cascade_irq
||
331 irq
== icu_data
[3].cascade_irq
||
332 irq
== icu_data
[4].cascade_irq
||
333 irq
== icu_data
[5].cascade_irq
||
334 irq
== icu_data
[6].cascade_irq
||
335 irq
== icu_data
[7].cascade_irq
) {
336 irq_set_chip(irq
, &icu_irq_chip
);
337 irq_set_chained_handler(irq
, icu_mux_irq_demux
);
339 irq_set_chip_and_handler(irq
, &icu_irq_chip
,
343 irq_set_default_host(icu_data
[0].domain
);
344 set_handle_irq(mmp2_handle_irq
);
348 static int __init
mmp_init_bases(struct device_node
*node
)
350 int ret
, nr_irqs
, irq
, i
= 0;
352 ret
= of_property_read_u32(node
, "mrvl,intc-nr-irqs", &nr_irqs
);
354 pr_err("Not found mrvl,intc-nr-irqs property\n");
358 mmp_icu_base
= of_iomap(node
, 0);
360 pr_err("Failed to get interrupt controller register\n");
364 icu_data
[0].virq_base
= 0;
365 icu_data
[0].domain
= irq_domain_add_linear(node
, nr_irqs
,
368 for (irq
= 0; irq
< nr_irqs
; irq
++) {
369 ret
= irq_create_mapping(icu_data
[0].domain
, irq
);
371 pr_err("Failed to mapping hwirq\n");
375 icu_data
[0].virq_base
= ret
;
377 icu_data
[0].nr_irqs
= nr_irqs
;
380 if (icu_data
[0].virq_base
) {
381 for (i
= 0; i
< irq
; i
++)
382 irq_dispose_mapping(icu_data
[0].virq_base
+ i
);
384 irq_domain_remove(icu_data
[0].domain
);
385 iounmap(mmp_icu_base
);
389 static int __init
mmp_of_init(struct device_node
*node
,
390 struct device_node
*parent
)
394 ret
= mmp_init_bases(node
);
398 icu_data
[0].conf_enable
= mmp_conf
.conf_enable
;
399 icu_data
[0].conf_disable
= mmp_conf
.conf_disable
;
400 icu_data
[0].conf_mask
= mmp_conf
.conf_mask
;
401 irq_set_default_host(icu_data
[0].domain
);
402 set_handle_irq(mmp_handle_irq
);
406 IRQCHIP_DECLARE(mmp_intc
, "mrvl,mmp-intc", mmp_of_init
);
408 static int __init
mmp2_of_init(struct device_node
*node
,
409 struct device_node
*parent
)
413 ret
= mmp_init_bases(node
);
417 icu_data
[0].conf_enable
= mmp2_conf
.conf_enable
;
418 icu_data
[0].conf_disable
= mmp2_conf
.conf_disable
;
419 icu_data
[0].conf_mask
= mmp2_conf
.conf_mask
;
420 irq_set_default_host(icu_data
[0].domain
);
421 set_handle_irq(mmp2_handle_irq
);
425 IRQCHIP_DECLARE(mmp2_intc
, "mrvl,mmp2-intc", mmp2_of_init
);
427 static int __init
mmp2_mux_of_init(struct device_node
*node
,
428 struct device_node
*parent
)
431 int i
, ret
, irq
, j
= 0;
432 u32 nr_irqs
, mfp_irq
;
438 ret
= of_property_read_u32(node
, "mrvl,intc-nr-irqs",
441 pr_err("Not found mrvl,intc-nr-irqs property\n");
444 ret
= of_address_to_resource(node
, 0, &res
);
446 pr_err("Not found reg property\n");
449 icu_data
[i
].reg_status
= mmp_icu_base
+ res
.start
;
450 ret
= of_address_to_resource(node
, 1, &res
);
452 pr_err("Not found reg property\n");
455 icu_data
[i
].reg_mask
= mmp_icu_base
+ res
.start
;
456 icu_data
[i
].cascade_irq
= irq_of_parse_and_map(node
, 0);
457 if (!icu_data
[i
].cascade_irq
)
460 icu_data
[i
].virq_base
= 0;
461 icu_data
[i
].domain
= irq_domain_add_linear(node
, nr_irqs
,
464 for (irq
= 0; irq
< nr_irqs
; irq
++) {
465 ret
= irq_create_mapping(icu_data
[i
].domain
, irq
);
467 pr_err("Failed to mapping hwirq\n");
471 icu_data
[i
].virq_base
= ret
;
473 icu_data
[i
].nr_irqs
= nr_irqs
;
474 if (!of_property_read_u32(node
, "mrvl,clr-mfp-irq",
476 icu_data
[i
].clr_mfp_irq_base
= icu_data
[i
].virq_base
;
477 icu_data
[i
].clr_mfp_hwirq
= mfp_irq
;
479 irq_set_chained_handler(icu_data
[i
].cascade_irq
,
484 if (icu_data
[i
].virq_base
) {
485 for (j
= 0; j
< irq
; j
++)
486 irq_dispose_mapping(icu_data
[i
].virq_base
+ j
);
488 irq_domain_remove(icu_data
[i
].domain
);
491 IRQCHIP_DECLARE(mmp2_mux_intc
, "mrvl,mmp2-mux-intc", mmp2_mux_of_init
);