2 * linux/arch/arm/mach-omap2/irq.c
4 * Interrupt handler for OMAP2 boards.
6 * Copyright (C) 2005 Nokia Corporation
7 * Author: Paul Mundt <paul.mundt@nokia.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
19 #include <asm/exception.h>
20 #include <linux/irqchip.h>
21 #include <linux/irqdomain.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
26 #include <linux/irqchip/irq-omap-intc.h>
28 /* selected INTC register offsets */
30 #define INTC_REVISION 0x0000
31 #define INTC_SYSCONFIG 0x0010
32 #define INTC_SYSSTATUS 0x0014
33 #define INTC_SIR 0x0040
34 #define INTC_CONTROL 0x0048
35 #define INTC_PROTECTION 0x004C
36 #define INTC_IDLE 0x0050
37 #define INTC_THRESHOLD 0x0068
38 #define INTC_MIR0 0x0084
39 #define INTC_MIR_CLEAR0 0x0088
40 #define INTC_MIR_SET0 0x008c
41 #define INTC_PENDING_IRQ0 0x0098
42 #define INTC_PENDING_IRQ1 0x00b8
43 #define INTC_PENDING_IRQ2 0x00d8
44 #define INTC_PENDING_IRQ3 0x00f8
45 #define INTC_ILR0 0x0100
47 #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
48 #define SPURIOUSIRQ_MASK (0x1ffffff << 7)
49 #define INTCPS_NR_ILR_REGS 128
50 #define INTCPS_NR_MIR_REGS 4
52 #define INTC_IDLE_FUNCIDLE (1 << 0)
53 #define INTC_IDLE_TURBO (1 << 1)
55 #define INTC_PROTECTION_ENABLE (1 << 0)
57 struct omap_intc_regs
{
62 u32 ilr
[INTCPS_NR_ILR_REGS
];
63 u32 mir
[INTCPS_NR_MIR_REGS
];
65 static struct omap_intc_regs intc_context
;
67 static struct irq_domain
*domain
;
68 static void __iomem
*omap_irq_base
;
69 static int omap_nr_pending
;
70 static int omap_nr_irqs
;
72 static void intc_writel(u32 reg
, u32 val
)
74 writel_relaxed(val
, omap_irq_base
+ reg
);
77 static u32
intc_readl(u32 reg
)
79 return readl_relaxed(omap_irq_base
+ reg
);
82 void omap_intc_save_context(void)
86 intc_context
.sysconfig
=
87 intc_readl(INTC_SYSCONFIG
);
88 intc_context
.protection
=
89 intc_readl(INTC_PROTECTION
);
91 intc_readl(INTC_IDLE
);
92 intc_context
.threshold
=
93 intc_readl(INTC_THRESHOLD
);
95 for (i
= 0; i
< omap_nr_irqs
; i
++)
97 intc_readl((INTC_ILR0
+ 0x4 * i
));
98 for (i
= 0; i
< INTCPS_NR_MIR_REGS
; i
++)
100 intc_readl(INTC_MIR0
+ (0x20 * i
));
103 void omap_intc_restore_context(void)
107 intc_writel(INTC_SYSCONFIG
, intc_context
.sysconfig
);
108 intc_writel(INTC_PROTECTION
, intc_context
.protection
);
109 intc_writel(INTC_IDLE
, intc_context
.idle
);
110 intc_writel(INTC_THRESHOLD
, intc_context
.threshold
);
112 for (i
= 0; i
< omap_nr_irqs
; i
++)
113 intc_writel(INTC_ILR0
+ 0x4 * i
,
114 intc_context
.ilr
[i
]);
116 for (i
= 0; i
< INTCPS_NR_MIR_REGS
; i
++)
117 intc_writel(INTC_MIR0
+ 0x20 * i
,
118 intc_context
.mir
[i
]);
119 /* MIRs are saved and restore with other PRCM registers */
122 void omap3_intc_prepare_idle(void)
125 * Disable autoidle as it can stall interrupt controller,
126 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
128 intc_writel(INTC_SYSCONFIG
, 0);
129 intc_writel(INTC_IDLE
, INTC_IDLE_TURBO
);
132 void omap3_intc_resume_idle(void)
134 /* Re-enable autoidle */
135 intc_writel(INTC_SYSCONFIG
, 1);
136 intc_writel(INTC_IDLE
, 0);
139 /* XXX: FIQ and additional INTC support (only MPU at the moment) */
140 static void omap_ack_irq(struct irq_data
*d
)
142 intc_writel(INTC_CONTROL
, 0x1);
145 static void omap_mask_ack_irq(struct irq_data
*d
)
147 irq_gc_mask_disable_reg(d
);
151 static void __init
omap_irq_soft_reset(void)
155 tmp
= intc_readl(INTC_REVISION
) & 0xff;
157 pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
158 omap_irq_base
, tmp
>> 4, tmp
& 0xf, omap_nr_irqs
);
160 tmp
= intc_readl(INTC_SYSCONFIG
);
161 tmp
|= 1 << 1; /* soft reset */
162 intc_writel(INTC_SYSCONFIG
, tmp
);
164 while (!(intc_readl(INTC_SYSSTATUS
) & 0x1))
165 /* Wait for reset to complete */;
167 /* Enable autoidle */
168 intc_writel(INTC_SYSCONFIG
, 1 << 0);
171 int omap_irq_pending(void)
175 for (i
= 0; i
< omap_nr_pending
; i
++)
176 if (intc_readl(INTC_PENDING_IRQ0
+ (0x20 * i
)))
181 void omap3_intc_suspend(void)
183 /* A pending interrupt would prevent OMAP from entering suspend */
187 static int __init
omap_alloc_gc_of(struct irq_domain
*d
, void __iomem
*base
)
192 ret
= irq_alloc_domain_generic_chips(d
, 32, 1, "INTC",
193 handle_level_irq
, IRQ_NOREQUEST
| IRQ_NOPROBE
,
196 pr_warn("Failed to allocate irq chips\n");
200 for (i
= 0; i
< omap_nr_pending
; i
++) {
201 struct irq_chip_generic
*gc
;
202 struct irq_chip_type
*ct
;
204 gc
= irq_get_domain_generic_chip(d
, 32 * i
);
208 ct
->type
= IRQ_TYPE_LEVEL_MASK
;
210 ct
->chip
.irq_ack
= omap_mask_ack_irq
;
211 ct
->chip
.irq_mask
= irq_gc_mask_disable_reg
;
212 ct
->chip
.irq_unmask
= irq_gc_unmask_enable_reg
;
214 ct
->chip
.flags
|= IRQCHIP_SKIP_SET_WAKE
;
216 ct
->regs
.enable
= INTC_MIR_CLEAR0
+ 32 * i
;
217 ct
->regs
.disable
= INTC_MIR_SET0
+ 32 * i
;
223 static void __init
omap_alloc_gc_legacy(void __iomem
*base
,
224 unsigned int irq_start
, unsigned int num
)
226 struct irq_chip_generic
*gc
;
227 struct irq_chip_type
*ct
;
229 gc
= irq_alloc_generic_chip("INTC", 1, irq_start
, base
,
232 ct
->chip
.irq_ack
= omap_mask_ack_irq
;
233 ct
->chip
.irq_mask
= irq_gc_mask_disable_reg
;
234 ct
->chip
.irq_unmask
= irq_gc_unmask_enable_reg
;
235 ct
->chip
.flags
|= IRQCHIP_SKIP_SET_WAKE
;
237 ct
->regs
.enable
= INTC_MIR_CLEAR0
;
238 ct
->regs
.disable
= INTC_MIR_SET0
;
239 irq_setup_generic_chip(gc
, IRQ_MSK(num
), IRQ_GC_INIT_MASK_CACHE
,
240 IRQ_NOREQUEST
| IRQ_NOPROBE
, 0);
243 static int __init
omap_init_irq_of(struct device_node
*node
)
247 omap_irq_base
= of_iomap(node
, 0);
248 if (WARN_ON(!omap_irq_base
))
251 domain
= irq_domain_add_linear(node
, omap_nr_irqs
,
252 &irq_generic_chip_ops
, NULL
);
254 omap_irq_soft_reset();
256 ret
= omap_alloc_gc_of(domain
, omap_irq_base
);
258 irq_domain_remove(domain
);
263 static int __init
omap_init_irq_legacy(u32 base
, struct device_node
*node
)
267 omap_irq_base
= ioremap(base
, SZ_4K
);
268 if (WARN_ON(!omap_irq_base
))
271 irq_base
= irq_alloc_descs(-1, 0, omap_nr_irqs
, 0);
273 pr_warn("Couldn't allocate IRQ numbers\n");
277 domain
= irq_domain_add_legacy(node
, omap_nr_irqs
, irq_base
, 0,
278 &irq_domain_simple_ops
, NULL
);
280 omap_irq_soft_reset();
282 for (j
= 0; j
< omap_nr_irqs
; j
+= 32)
283 omap_alloc_gc_legacy(omap_irq_base
+ j
, j
+ irq_base
, 32);
288 static void __init
omap_irq_enable_protection(void)
292 reg
= intc_readl(INTC_PROTECTION
);
293 reg
|= INTC_PROTECTION_ENABLE
;
294 intc_writel(INTC_PROTECTION
, reg
);
297 static int __init
omap_init_irq(u32 base
, struct device_node
*node
)
302 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
303 * depends is still not ready for linear IRQ domains; because of that
304 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
305 * linear IRQ Domain until that driver is finally fixed.
307 if (of_device_is_compatible(node
, "ti,omap2-intc") ||
308 of_device_is_compatible(node
, "ti,omap3-intc")) {
311 if (of_address_to_resource(node
, 0, &res
))
315 ret
= omap_init_irq_legacy(base
, node
);
317 ret
= omap_init_irq_of(node
);
319 ret
= omap_init_irq_legacy(base
, NULL
);
323 omap_irq_enable_protection();
328 static asmlinkage
void __exception_irq_entry
329 omap_intc_handle_irq(struct pt_regs
*regs
)
331 extern unsigned long irq_err_count
;
334 irqnr
= intc_readl(INTC_SIR
);
337 * A spurious IRQ can result if interrupt that triggered the
338 * sorting is no longer active during the sorting (10 INTC
339 * functional clock cycles after interrupt assertion). Or a
340 * change in interrupt mask affected the result during sorting
341 * time. There is no special handling required except ignoring
342 * the SIR register value just read and retrying.
343 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
345 * Many a times, a spurious interrupt situation has been fixed
346 * by adding a flush for the posted write acking the IRQ in
347 * the device driver. Typically, this is going be the device
348 * driver whose interrupt was handled just before the spurious
349 * IRQ occurred. Pay attention to those device drivers if you
350 * run into hitting the spurious IRQ condition below.
352 if (unlikely((irqnr
& SPURIOUSIRQ_MASK
) == SPURIOUSIRQ_MASK
)) {
353 pr_err_once("%s: spurious irq!\n", __func__
);
359 irqnr
&= ACTIVEIRQ_MASK
;
360 handle_domain_irq(domain
, irqnr
, regs
);
363 static int __init
intc_of_init(struct device_node
*node
,
364 struct device_node
*parent
)
374 if (of_device_is_compatible(node
, "ti,dm814-intc") ||
375 of_device_is_compatible(node
, "ti,dm816-intc") ||
376 of_device_is_compatible(node
, "ti,am33xx-intc")) {
381 ret
= omap_init_irq(-1, of_node_get(node
));
385 set_handle_irq(omap_intc_handle_irq
);
390 IRQCHIP_DECLARE(omap2_intc
, "ti,omap2-intc", intc_of_init
);
391 IRQCHIP_DECLARE(omap3_intc
, "ti,omap3-intc", intc_of_init
);
392 IRQCHIP_DECLARE(dm814x_intc
, "ti,dm814-intc", intc_of_init
);
393 IRQCHIP_DECLARE(dm816x_intc
, "ti,dm816-intc", intc_of_init
);
394 IRQCHIP_DECLARE(am33xx_intc
, "ti,am33xx-intc", intc_of_init
);