2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
6 * Copyright (C) 2010 John Crispin <john@phrozen.org>
7 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
10 #include <linux/interrupt.h>
11 #include <linux/ioport.h>
12 #include <linux/sched.h>
13 #include <linux/irqdomain.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
18 #include <asm/bootinfo.h>
19 #include <asm/irq_cpu.h>
21 #include <lantiq_soc.h>
24 /* register definitions - internal irqs */
25 #define LTQ_ICU_IM0_ISR 0x0000
26 #define LTQ_ICU_IM0_IER 0x0008
27 #define LTQ_ICU_IM0_IOSR 0x0010
28 #define LTQ_ICU_IM0_IRSR 0x0018
29 #define LTQ_ICU_IM0_IMR 0x0020
30 #define LTQ_ICU_IM1_ISR 0x0028
31 #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
33 /* register definitions - external irqs */
34 #define LTQ_EIU_EXIN_C 0x0000
35 #define LTQ_EIU_EXIN_INIC 0x0004
36 #define LTQ_EIU_EXIN_INC 0x0008
37 #define LTQ_EIU_EXIN_INEN 0x000C
39 /* number of external interrupts */
42 /* the performance counter */
43 #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
46 * irqs generated by devices attached to the EBU need to be acked in
49 #define LTQ_ICU_EBU_IRQ 22
51 #define ltq_icu_w32(m, x, y) ltq_w32((x), ltq_icu_membase[m] + (y))
52 #define ltq_icu_r32(m, x) ltq_r32(ltq_icu_membase[m] + (x))
54 #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
55 #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
57 /* our 2 ipi interrupts for VSMP */
58 #define MIPS_CPU_IPI_RESCHED_IRQ 0
59 #define MIPS_CPU_IPI_CALL_IRQ 1
61 /* we have a cascade of 8 irqs */
62 #define MIPS_CPU_IRQ_CASCADE 8
64 static int exin_avail
;
65 static u32 ltq_eiu_irq
[MAX_EIU
];
66 static void __iomem
*ltq_icu_membase
[MAX_IM
];
67 static void __iomem
*ltq_eiu_membase
;
68 static struct irq_domain
*ltq_domain
;
69 static int ltq_perfcount_irq
;
71 int ltq_eiu_get_irq(int exin
)
73 if (exin
< exin_avail
)
74 return ltq_eiu_irq
[exin
];
78 void ltq_disable_irq(struct irq_data
*d
)
80 u32 ier
= LTQ_ICU_IM0_IER
;
81 int offset
= d
->hwirq
- MIPS_CPU_IRQ_CASCADE
;
82 int im
= offset
/ INT_NUM_IM_OFFSET
;
84 offset
%= INT_NUM_IM_OFFSET
;
85 ltq_icu_w32(im
, ltq_icu_r32(im
, ier
) & ~BIT(offset
), ier
);
88 void ltq_mask_and_ack_irq(struct irq_data
*d
)
90 u32 ier
= LTQ_ICU_IM0_IER
;
91 u32 isr
= LTQ_ICU_IM0_ISR
;
92 int offset
= d
->hwirq
- MIPS_CPU_IRQ_CASCADE
;
93 int im
= offset
/ INT_NUM_IM_OFFSET
;
95 offset
%= INT_NUM_IM_OFFSET
;
96 ltq_icu_w32(im
, ltq_icu_r32(im
, ier
) & ~BIT(offset
), ier
);
97 ltq_icu_w32(im
, BIT(offset
), isr
);
100 static void ltq_ack_irq(struct irq_data
*d
)
102 u32 isr
= LTQ_ICU_IM0_ISR
;
103 int offset
= d
->hwirq
- MIPS_CPU_IRQ_CASCADE
;
104 int im
= offset
/ INT_NUM_IM_OFFSET
;
106 offset
%= INT_NUM_IM_OFFSET
;
107 ltq_icu_w32(im
, BIT(offset
), isr
);
110 void ltq_enable_irq(struct irq_data
*d
)
112 u32 ier
= LTQ_ICU_IM0_IER
;
113 int offset
= d
->hwirq
- MIPS_CPU_IRQ_CASCADE
;
114 int im
= offset
/ INT_NUM_IM_OFFSET
;
116 offset
%= INT_NUM_IM_OFFSET
;
117 ltq_icu_w32(im
, ltq_icu_r32(im
, ier
) | BIT(offset
), ier
);
120 static int ltq_eiu_settype(struct irq_data
*d
, unsigned int type
)
124 for (i
= 0; i
< exin_avail
; i
++) {
125 if (d
->hwirq
== ltq_eiu_irq
[i
]) {
130 case IRQF_TRIGGER_NONE
:
132 case IRQF_TRIGGER_RISING
:
136 case IRQF_TRIGGER_FALLING
:
140 case IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
:
144 case IRQF_TRIGGER_HIGH
:
147 case IRQF_TRIGGER_LOW
:
151 pr_err("invalid type %d for irq %ld\n",
157 irq_set_handler(d
->hwirq
, handle_edge_irq
);
159 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C
) |
160 (val
<< (i
* 4)), LTQ_EIU_EXIN_C
);
167 static unsigned int ltq_startup_eiu_irq(struct irq_data
*d
)
172 for (i
= 0; i
< exin_avail
; i
++) {
173 if (d
->hwirq
== ltq_eiu_irq
[i
]) {
174 /* by default we are low level triggered */
175 ltq_eiu_settype(d
, IRQF_TRIGGER_LOW
);
176 /* clear all pending */
177 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC
) & ~BIT(i
),
180 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN
) | BIT(i
),
189 static void ltq_shutdown_eiu_irq(struct irq_data
*d
)
194 for (i
= 0; i
< exin_avail
; i
++) {
195 if (d
->hwirq
== ltq_eiu_irq
[i
]) {
197 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN
) & ~BIT(i
),
204 static struct irq_chip ltq_irq_type
= {
206 .irq_enable
= ltq_enable_irq
,
207 .irq_disable
= ltq_disable_irq
,
208 .irq_unmask
= ltq_enable_irq
,
209 .irq_ack
= ltq_ack_irq
,
210 .irq_mask
= ltq_disable_irq
,
211 .irq_mask_ack
= ltq_mask_and_ack_irq
,
214 static struct irq_chip ltq_eiu_type
= {
216 .irq_startup
= ltq_startup_eiu_irq
,
217 .irq_shutdown
= ltq_shutdown_eiu_irq
,
218 .irq_enable
= ltq_enable_irq
,
219 .irq_disable
= ltq_disable_irq
,
220 .irq_unmask
= ltq_enable_irq
,
221 .irq_ack
= ltq_ack_irq
,
222 .irq_mask
= ltq_disable_irq
,
223 .irq_mask_ack
= ltq_mask_and_ack_irq
,
224 .irq_set_type
= ltq_eiu_settype
,
227 static void ltq_hw_irqdispatch(int module
)
231 irq
= ltq_icu_r32(module
, LTQ_ICU_IM0_IOSR
);
236 * silicon bug causes only the msb set to 1 to be valid. all
237 * other bits might be bogus
240 do_IRQ((int)irq
+ MIPS_CPU_IRQ_CASCADE
+ (INT_NUM_IM_OFFSET
* module
));
242 /* if this is a EBU irq, we need to ack it or get a deadlock */
243 if ((irq
== LTQ_ICU_EBU_IRQ
) && (module
== 0) && LTQ_EBU_PCC_ISTAT
)
244 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT
) | 0x10,
248 #define DEFINE_HWx_IRQDISPATCH(x) \
249 static void ltq_hw ## x ## _irqdispatch(void) \
251 ltq_hw_irqdispatch(x); \
253 DEFINE_HWx_IRQDISPATCH(0)
254 DEFINE_HWx_IRQDISPATCH(1)
255 DEFINE_HWx_IRQDISPATCH(2)
256 DEFINE_HWx_IRQDISPATCH(3)
257 DEFINE_HWx_IRQDISPATCH(4)
259 #if MIPS_CPU_TIMER_IRQ == 7
260 static void ltq_hw5_irqdispatch(void)
262 do_IRQ(MIPS_CPU_TIMER_IRQ
);
265 DEFINE_HWx_IRQDISPATCH(5)
268 static void ltq_hw_irq_handler(struct irq_desc
*desc
)
270 ltq_hw_irqdispatch(irq_desc_get_irq(desc
) - 2);
273 asmlinkage
void plat_irq_dispatch(void)
275 unsigned int pending
= read_c0_status() & read_c0_cause() & ST0_IM
;
279 spurious_interrupt();
283 pending
>>= CAUSEB_IP
;
285 irq
= fls(pending
) - 1;
286 do_IRQ(MIPS_CPU_IRQ_BASE
+ irq
);
287 pending
&= ~BIT(irq
);
291 static int icu_map(struct irq_domain
*d
, unsigned int irq
, irq_hw_number_t hw
)
293 struct irq_chip
*chip
= <q_irq_type
;
296 if (hw
< MIPS_CPU_IRQ_CASCADE
)
299 for (i
= 0; i
< exin_avail
; i
++)
300 if (hw
== ltq_eiu_irq
[i
])
301 chip
= <q_eiu_type
;
303 irq_set_chip_and_handler(irq
, chip
, handle_level_irq
);
308 static const struct irq_domain_ops irq_domain_ops
= {
309 .xlate
= irq_domain_xlate_onetwocell
,
313 int __init
icu_of_init(struct device_node
*node
, struct device_node
*parent
)
315 struct device_node
*eiu_node
;
319 for (i
= 0; i
< MAX_IM
; i
++) {
320 if (of_address_to_resource(node
, i
, &res
))
321 panic("Failed to get icu memory range");
323 if (!request_mem_region(res
.start
, resource_size(&res
),
325 pr_err("Failed to request icu memory");
327 ltq_icu_membase
[i
] = ioremap_nocache(res
.start
,
328 resource_size(&res
));
329 if (!ltq_icu_membase
[i
])
330 panic("Failed to remap icu memory");
333 /* turn off all irqs by default */
334 for (i
= 0; i
< MAX_IM
; i
++) {
335 /* make sure all irqs are turned off by default */
336 ltq_icu_w32(i
, 0, LTQ_ICU_IM0_IER
);
337 /* clear all possibly pending interrupts */
338 ltq_icu_w32(i
, ~0, LTQ_ICU_IM0_ISR
);
343 for (i
= 0; i
< MAX_IM
; i
++)
344 irq_set_chained_handler(i
+ 2, ltq_hw_irq_handler
);
347 pr_info("Setting up vectored interrupts\n");
348 set_vi_handler(2, ltq_hw0_irqdispatch
);
349 set_vi_handler(3, ltq_hw1_irqdispatch
);
350 set_vi_handler(4, ltq_hw2_irqdispatch
);
351 set_vi_handler(5, ltq_hw3_irqdispatch
);
352 set_vi_handler(6, ltq_hw4_irqdispatch
);
353 set_vi_handler(7, ltq_hw5_irqdispatch
);
356 ltq_domain
= irq_domain_add_linear(node
,
357 (MAX_IM
* INT_NUM_IM_OFFSET
) + MIPS_CPU_IRQ_CASCADE
,
360 #ifndef CONFIG_MIPS_MT_SMP
361 set_c0_status(IE_IRQ0
| IE_IRQ1
| IE_IRQ2
|
362 IE_IRQ3
| IE_IRQ4
| IE_IRQ5
);
364 set_c0_status(IE_SW0
| IE_SW1
| IE_IRQ0
| IE_IRQ1
|
365 IE_IRQ2
| IE_IRQ3
| IE_IRQ4
| IE_IRQ5
);
368 /* tell oprofile which irq to use */
369 ltq_perfcount_irq
= irq_create_mapping(ltq_domain
, LTQ_PERF_IRQ
);
372 * if the timer irq is not one of the mips irqs we need to
375 if (MIPS_CPU_TIMER_IRQ
!= 7)
376 irq_create_mapping(ltq_domain
, MIPS_CPU_TIMER_IRQ
);
378 /* the external interrupts are optional and xway only */
379 eiu_node
= of_find_compatible_node(NULL
, NULL
, "lantiq,eiu-xway");
380 if (eiu_node
&& !of_address_to_resource(eiu_node
, 0, &res
)) {
381 /* find out how many external irq sources we have */
382 exin_avail
= of_property_count_u32_elems(eiu_node
,
385 if (exin_avail
> MAX_EIU
)
386 exin_avail
= MAX_EIU
;
388 ret
= of_property_read_u32_array(eiu_node
, "lantiq,eiu-irqs",
389 ltq_eiu_irq
, exin_avail
);
391 panic("failed to load external irq resources");
393 if (!request_mem_region(res
.start
, resource_size(&res
),
395 pr_err("Failed to request eiu memory");
397 ltq_eiu_membase
= ioremap_nocache(res
.start
,
398 resource_size(&res
));
399 if (!ltq_eiu_membase
)
400 panic("Failed to remap eiu memory");
406 int get_c0_perfcount_int(void)
408 return ltq_perfcount_irq
;
410 EXPORT_SYMBOL_GPL(get_c0_perfcount_int
);
412 unsigned int get_c0_compare_int(void)
414 return MIPS_CPU_TIMER_IRQ
;
417 static struct of_device_id __initdata of_irq_ids
[] = {
418 { .compatible
= "lantiq,icu", .data
= icu_of_init
},
422 void __init
arch_init_irq(void)
424 of_irq_init(of_irq_ids
);