2 * linux/arch/arm/mach-mmp/irq.c
4 * Generic IRQ handling, GPIO IRQ demultiplexing, etc.
5 * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
7 * Author: Bin Yang <bin.yang@marvell.com>
8 * Haojian Zhuang <haojian.zhuang@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
18 #include <linux/irqdomain.h>
20 #include <linux/ioport.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
24 #include <mach/irqs.h>
26 #ifdef CONFIG_CPU_MMP2
27 #include <mach/pm-mmp2.h>
29 #ifdef CONFIG_CPU_PXA910
30 #include <mach/pm-pxa910.h>
37 struct icu_chip_data
{
39 unsigned int virq_base
;
40 unsigned int cascade_irq
;
41 void __iomem
*reg_status
;
42 void __iomem
*reg_mask
;
43 unsigned int conf_enable
;
44 unsigned int conf_disable
;
45 unsigned int conf_mask
;
46 unsigned int clr_mfp_irq_base
;
47 unsigned int clr_mfp_hwirq
;
48 struct irq_domain
*domain
;
51 struct mmp_intc_conf
{
52 unsigned int conf_enable
;
53 unsigned int conf_disable
;
54 unsigned int conf_mask
;
57 void __iomem
*mmp_icu_base
;
58 static struct icu_chip_data icu_data
[MAX_ICU_NR
];
59 static int max_icu_nr
;
61 extern void mmp2_clear_pmic_int(void);
63 static void icu_mask_ack_irq(struct irq_data
*d
)
65 struct irq_domain
*domain
= d
->domain
;
66 struct icu_chip_data
*data
= (struct icu_chip_data
*)domain
->host_data
;
70 hwirq
= d
->irq
- data
->virq_base
;
71 if (data
== &icu_data
[0]) {
72 r
= readl_relaxed(mmp_icu_base
+ (hwirq
<< 2));
73 r
&= ~data
->conf_mask
;
74 r
|= data
->conf_disable
;
75 writel_relaxed(r
, mmp_icu_base
+ (hwirq
<< 2));
77 #ifdef CONFIG_CPU_MMP2
78 if ((data
->virq_base
== data
->clr_mfp_irq_base
)
79 && (hwirq
== data
->clr_mfp_hwirq
))
80 mmp2_clear_pmic_int();
82 r
= readl_relaxed(data
->reg_mask
) | (1 << hwirq
);
83 writel_relaxed(r
, data
->reg_mask
);
87 static void icu_mask_irq(struct irq_data
*d
)
89 struct irq_domain
*domain
= d
->domain
;
90 struct icu_chip_data
*data
= (struct icu_chip_data
*)domain
->host_data
;
94 hwirq
= d
->irq
- data
->virq_base
;
95 if (data
== &icu_data
[0]) {
96 r
= readl_relaxed(mmp_icu_base
+ (hwirq
<< 2));
97 r
&= ~data
->conf_mask
;
98 r
|= data
->conf_disable
;
99 writel_relaxed(r
, mmp_icu_base
+ (hwirq
<< 2));
101 r
= readl_relaxed(data
->reg_mask
) | (1 << hwirq
);
102 writel_relaxed(r
, data
->reg_mask
);
106 static void icu_unmask_irq(struct irq_data
*d
)
108 struct irq_domain
*domain
= d
->domain
;
109 struct icu_chip_data
*data
= (struct icu_chip_data
*)domain
->host_data
;
113 hwirq
= d
->irq
- data
->virq_base
;
114 if (data
== &icu_data
[0]) {
115 r
= readl_relaxed(mmp_icu_base
+ (hwirq
<< 2));
116 r
&= ~data
->conf_mask
;
117 r
|= data
->conf_enable
;
118 writel_relaxed(r
, mmp_icu_base
+ (hwirq
<< 2));
120 r
= readl_relaxed(data
->reg_mask
) & ~(1 << hwirq
);
121 writel_relaxed(r
, data
->reg_mask
);
125 static struct irq_chip icu_irq_chip
= {
127 .irq_mask
= icu_mask_irq
,
128 .irq_mask_ack
= icu_mask_ack_irq
,
129 .irq_unmask
= icu_unmask_irq
,
132 static void icu_mux_irq_demux(unsigned int irq
, struct irq_desc
*desc
)
134 struct irq_domain
*domain
;
135 struct icu_chip_data
*data
;
137 unsigned long mask
, status
, n
;
139 for (i
= 1; i
< max_icu_nr
; i
++) {
140 if (irq
== icu_data
[i
].cascade_irq
) {
141 domain
= icu_data
[i
].domain
;
142 data
= (struct icu_chip_data
*)domain
->host_data
;
146 if (i
>= max_icu_nr
) {
147 pr_err("Spurious irq %d in MMP INTC\n", irq
);
151 mask
= readl_relaxed(data
->reg_mask
);
153 status
= readl_relaxed(data
->reg_status
) & ~mask
;
156 n
= find_first_bit(&status
, BITS_PER_LONG
);
157 while (n
< BITS_PER_LONG
) {
158 generic_handle_irq(icu_data
[i
].virq_base
+ n
);
159 n
= find_next_bit(&status
, BITS_PER_LONG
, n
+ 1);
164 static int mmp_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
167 irq_set_chip_and_handler(irq
, &icu_irq_chip
, handle_level_irq
);
168 set_irq_flags(irq
, IRQF_VALID
);
172 static int mmp_irq_domain_xlate(struct irq_domain
*d
, struct device_node
*node
,
173 const u32
*intspec
, unsigned int intsize
,
174 unsigned long *out_hwirq
,
175 unsigned int *out_type
)
177 *out_hwirq
= intspec
[0];
181 const struct irq_domain_ops mmp_irq_domain_ops
= {
182 .map
= mmp_irq_domain_map
,
183 .xlate
= mmp_irq_domain_xlate
,
186 static struct mmp_intc_conf mmp_conf
= {
192 static struct mmp_intc_conf mmp2_conf
= {
199 void __init
icu_init_irq(void)
204 mmp_icu_base
= ioremap(0xd4282000, 0x1000);
205 icu_data
[0].conf_enable
= mmp_conf
.conf_enable
;
206 icu_data
[0].conf_disable
= mmp_conf
.conf_disable
;
207 icu_data
[0].conf_mask
= mmp_conf
.conf_mask
;
208 icu_data
[0].nr_irqs
= 64;
209 icu_data
[0].virq_base
= 0;
210 icu_data
[0].domain
= irq_domain_add_legacy(NULL
, 64, 0, 0,
211 &irq_domain_simple_ops
,
213 for (irq
= 0; irq
< 64; irq
++) {
214 icu_mask_irq(irq_get_irq_data(irq
));
215 irq_set_chip_and_handler(irq
, &icu_irq_chip
, handle_level_irq
);
216 set_irq_flags(irq
, IRQF_VALID
);
218 irq_set_default_host(icu_data
[0].domain
);
219 #ifdef CONFIG_CPU_PXA910
220 icu_irq_chip
.irq_set_wake
= pxa910_set_wake
;
225 void __init
mmp2_init_icu(void)
230 mmp_icu_base
= ioremap(0xd4282000, 0x1000);
231 icu_data
[0].conf_enable
= mmp2_conf
.conf_enable
;
232 icu_data
[0].conf_disable
= mmp2_conf
.conf_disable
;
233 icu_data
[0].conf_mask
= mmp2_conf
.conf_mask
;
234 icu_data
[0].nr_irqs
= 64;
235 icu_data
[0].virq_base
= 0;
236 icu_data
[0].domain
= irq_domain_add_legacy(NULL
, 64, 0, 0,
237 &irq_domain_simple_ops
,
239 icu_data
[1].reg_status
= mmp_icu_base
+ 0x150;
240 icu_data
[1].reg_mask
= mmp_icu_base
+ 0x168;
241 icu_data
[1].clr_mfp_irq_base
= IRQ_MMP2_PMIC_BASE
;
242 icu_data
[1].clr_mfp_hwirq
= IRQ_MMP2_PMIC
- IRQ_MMP2_PMIC_BASE
;
243 icu_data
[1].nr_irqs
= 2;
244 icu_data
[1].cascade_irq
= 4;
245 icu_data
[1].virq_base
= IRQ_MMP2_PMIC_BASE
;
246 icu_data
[1].domain
= irq_domain_add_legacy(NULL
, icu_data
[1].nr_irqs
,
247 icu_data
[1].virq_base
, 0,
248 &irq_domain_simple_ops
,
250 icu_data
[2].reg_status
= mmp_icu_base
+ 0x154;
251 icu_data
[2].reg_mask
= mmp_icu_base
+ 0x16c;
252 icu_data
[2].nr_irqs
= 2;
253 icu_data
[2].cascade_irq
= 5;
254 icu_data
[2].virq_base
= IRQ_MMP2_RTC_BASE
;
255 icu_data
[2].domain
= irq_domain_add_legacy(NULL
, icu_data
[2].nr_irqs
,
256 icu_data
[2].virq_base
, 0,
257 &irq_domain_simple_ops
,
259 icu_data
[3].reg_status
= mmp_icu_base
+ 0x180;
260 icu_data
[3].reg_mask
= mmp_icu_base
+ 0x17c;
261 icu_data
[3].nr_irqs
= 3;
262 icu_data
[3].cascade_irq
= 9;
263 icu_data
[3].virq_base
= IRQ_MMP2_KEYPAD_BASE
;
264 icu_data
[3].domain
= irq_domain_add_legacy(NULL
, icu_data
[3].nr_irqs
,
265 icu_data
[3].virq_base
, 0,
266 &irq_domain_simple_ops
,
268 icu_data
[4].reg_status
= mmp_icu_base
+ 0x158;
269 icu_data
[4].reg_mask
= mmp_icu_base
+ 0x170;
270 icu_data
[4].nr_irqs
= 5;
271 icu_data
[4].cascade_irq
= 17;
272 icu_data
[4].virq_base
= IRQ_MMP2_TWSI_BASE
;
273 icu_data
[4].domain
= irq_domain_add_legacy(NULL
, icu_data
[4].nr_irqs
,
274 icu_data
[4].virq_base
, 0,
275 &irq_domain_simple_ops
,
277 icu_data
[5].reg_status
= mmp_icu_base
+ 0x15c;
278 icu_data
[5].reg_mask
= mmp_icu_base
+ 0x174;
279 icu_data
[5].nr_irqs
= 15;
280 icu_data
[5].cascade_irq
= 35;
281 icu_data
[5].virq_base
= IRQ_MMP2_MISC_BASE
;
282 icu_data
[5].domain
= irq_domain_add_legacy(NULL
, icu_data
[5].nr_irqs
,
283 icu_data
[5].virq_base
, 0,
284 &irq_domain_simple_ops
,
286 icu_data
[6].reg_status
= mmp_icu_base
+ 0x160;
287 icu_data
[6].reg_mask
= mmp_icu_base
+ 0x178;
288 icu_data
[6].nr_irqs
= 2;
289 icu_data
[6].cascade_irq
= 51;
290 icu_data
[6].virq_base
= IRQ_MMP2_MIPI_HSI1_BASE
;
291 icu_data
[6].domain
= irq_domain_add_legacy(NULL
, icu_data
[6].nr_irqs
,
292 icu_data
[6].virq_base
, 0,
293 &irq_domain_simple_ops
,
295 icu_data
[7].reg_status
= mmp_icu_base
+ 0x188;
296 icu_data
[7].reg_mask
= mmp_icu_base
+ 0x184;
297 icu_data
[7].nr_irqs
= 2;
298 icu_data
[7].cascade_irq
= 55;
299 icu_data
[7].virq_base
= IRQ_MMP2_MIPI_HSI0_BASE
;
300 icu_data
[7].domain
= irq_domain_add_legacy(NULL
, icu_data
[7].nr_irqs
,
301 icu_data
[7].virq_base
, 0,
302 &irq_domain_simple_ops
,
304 for (irq
= 0; irq
< IRQ_MMP2_MUX_END
; irq
++) {
305 icu_mask_irq(irq_get_irq_data(irq
));
307 case IRQ_MMP2_PMIC_MUX
:
308 case IRQ_MMP2_RTC_MUX
:
309 case IRQ_MMP2_KEYPAD_MUX
:
310 case IRQ_MMP2_TWSI_MUX
:
311 case IRQ_MMP2_MISC_MUX
:
312 case IRQ_MMP2_MIPI_HSI1_MUX
:
313 case IRQ_MMP2_MIPI_HSI0_MUX
:
314 irq_set_chip(irq
, &icu_irq_chip
);
315 irq_set_chained_handler(irq
, icu_mux_irq_demux
);
318 irq_set_chip_and_handler(irq
, &icu_irq_chip
,
322 set_irq_flags(irq
, IRQF_VALID
);
324 irq_set_default_host(icu_data
[0].domain
);
325 #ifdef CONFIG_CPU_MMP2
326 icu_irq_chip
.irq_set_wake
= mmp2_set_wake
;
331 static const struct of_device_id intc_ids
[] __initconst
= {
332 { .compatible
= "mrvl,mmp-intc", .data
= &mmp_conf
},
333 { .compatible
= "mrvl,mmp2-intc", .data
= &mmp2_conf
},
337 static const struct of_device_id mmp_mux_irq_match
[] __initconst
= {
338 { .compatible
= "mrvl,mmp2-mux-intc" },
342 int __init
mmp2_mux_init(struct device_node
*parent
)
344 struct device_node
*node
;
345 const struct of_device_id
*of_id
;
347 int i
, irq_base
, ret
, irq
;
348 u32 nr_irqs
, mfp_irq
;
352 for (i
= 1; i
< MAX_ICU_NR
; i
++) {
353 node
= of_find_matching_node(node
, mmp_mux_irq_match
);
356 of_id
= of_match_node(&mmp_mux_irq_match
[0], node
);
357 ret
= of_property_read_u32(node
, "mrvl,intc-nr-irqs",
360 pr_err("Not found mrvl,intc-nr-irqs property\n");
364 ret
= of_address_to_resource(node
, 0, &res
);
366 pr_err("Not found reg property\n");
370 icu_data
[i
].reg_status
= mmp_icu_base
+ res
.start
;
371 ret
= of_address_to_resource(node
, 1, &res
);
373 pr_err("Not found reg property\n");
377 icu_data
[i
].reg_mask
= mmp_icu_base
+ res
.start
;
378 icu_data
[i
].cascade_irq
= irq_of_parse_and_map(node
, 0);
379 if (!icu_data
[i
].cascade_irq
) {
384 irq_base
= irq_alloc_descs(-1, 0, nr_irqs
, 0);
386 pr_err("Failed to allocate IRQ numbers for mux intc\n");
390 if (!of_property_read_u32(node
, "mrvl,clr-mfp-irq",
392 icu_data
[i
].clr_mfp_irq_base
= irq_base
;
393 icu_data
[i
].clr_mfp_hwirq
= mfp_irq
;
395 irq_set_chained_handler(icu_data
[i
].cascade_irq
,
397 icu_data
[i
].nr_irqs
= nr_irqs
;
398 icu_data
[i
].virq_base
= irq_base
;
399 icu_data
[i
].domain
= irq_domain_add_legacy(node
, nr_irqs
,
403 for (irq
= irq_base
; irq
< irq_base
+ nr_irqs
; irq
++)
404 icu_mask_irq(irq_get_irq_data(irq
));
414 void __init
mmp_dt_irq_init(void)
416 struct device_node
*node
;
417 const struct of_device_id
*of_id
;
418 struct mmp_intc_conf
*conf
;
419 int nr_irqs
, irq_base
, ret
, irq
;
421 node
= of_find_matching_node(NULL
, intc_ids
);
423 pr_err("Failed to find interrupt controller in arch-mmp\n");
426 of_id
= of_match_node(intc_ids
, node
);
429 ret
= of_property_read_u32(node
, "mrvl,intc-nr-irqs", &nr_irqs
);
431 pr_err("Not found mrvl,intc-nr-irqs property\n");
435 mmp_icu_base
= of_iomap(node
, 0);
437 pr_err("Failed to get interrupt controller register\n");
441 irq_base
= irq_alloc_descs(-1, 0, nr_irqs
- NR_IRQS_LEGACY
, 0);
443 pr_err("Failed to allocate IRQ numbers\n");
445 } else if (irq_base
!= NR_IRQS_LEGACY
) {
446 pr_err("ICU's irqbase should be started from 0\n");
449 icu_data
[0].conf_enable
= conf
->conf_enable
;
450 icu_data
[0].conf_disable
= conf
->conf_disable
;
451 icu_data
[0].conf_mask
= conf
->conf_mask
;
452 icu_data
[0].nr_irqs
= nr_irqs
;
453 icu_data
[0].virq_base
= 0;
454 icu_data
[0].domain
= irq_domain_add_legacy(node
, nr_irqs
, 0, 0,
457 irq_set_default_host(icu_data
[0].domain
);
458 for (irq
= 0; irq
< nr_irqs
; irq
++)
459 icu_mask_irq(irq_get_irq_data(irq
));
463 iounmap(mmp_icu_base
);