x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / irqchip / irq-omap-intc.c
blobb04a8ac6e744bde9e7d9aa7159e4e9d3da7b86cd
1 /*
2 * linux/arch/arm/mach-omap2/irq.c
4 * Interrupt handler for OMAP2 boards.
6 * Copyright (C) 2005 Nokia Corporation
7 * Author: Paul Mundt <paul.mundt@nokia.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
19 #include <asm/exception.h>
20 #include <linux/irqchip.h>
21 #include <linux/irqdomain.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
26 #include <linux/irqchip/irq-omap-intc.h>
28 /* Define these here for now until we drop all board-files */
29 #define OMAP24XX_IC_BASE 0x480fe000
30 #define OMAP34XX_IC_BASE 0x48200000
32 /* selected INTC register offsets */
34 #define INTC_REVISION 0x0000
35 #define INTC_SYSCONFIG 0x0010
36 #define INTC_SYSSTATUS 0x0014
37 #define INTC_SIR 0x0040
38 #define INTC_CONTROL 0x0048
39 #define INTC_PROTECTION 0x004C
40 #define INTC_IDLE 0x0050
41 #define INTC_THRESHOLD 0x0068
42 #define INTC_MIR0 0x0084
43 #define INTC_MIR_CLEAR0 0x0088
44 #define INTC_MIR_SET0 0x008c
45 #define INTC_PENDING_IRQ0 0x0098
46 #define INTC_PENDING_IRQ1 0x00b8
47 #define INTC_PENDING_IRQ2 0x00d8
48 #define INTC_PENDING_IRQ3 0x00f8
49 #define INTC_ILR0 0x0100
51 #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
52 #define SPURIOUSIRQ_MASK (0x1ffffff << 7)
53 #define INTCPS_NR_ILR_REGS 128
54 #define INTCPS_NR_MIR_REGS 4
56 #define INTC_IDLE_FUNCIDLE (1 << 0)
57 #define INTC_IDLE_TURBO (1 << 1)
59 #define INTC_PROTECTION_ENABLE (1 << 0)
61 struct omap_intc_regs {
62 u32 sysconfig;
63 u32 protection;
64 u32 idle;
65 u32 threshold;
66 u32 ilr[INTCPS_NR_ILR_REGS];
67 u32 mir[INTCPS_NR_MIR_REGS];
69 static struct omap_intc_regs intc_context;
71 static struct irq_domain *domain;
72 static void __iomem *omap_irq_base;
73 static int omap_nr_pending = 3;
74 static int omap_nr_irqs = 96;
76 static void intc_writel(u32 reg, u32 val)
78 writel_relaxed(val, omap_irq_base + reg);
81 static u32 intc_readl(u32 reg)
83 return readl_relaxed(omap_irq_base + reg);
86 void omap_intc_save_context(void)
88 int i;
90 intc_context.sysconfig =
91 intc_readl(INTC_SYSCONFIG);
92 intc_context.protection =
93 intc_readl(INTC_PROTECTION);
94 intc_context.idle =
95 intc_readl(INTC_IDLE);
96 intc_context.threshold =
97 intc_readl(INTC_THRESHOLD);
99 for (i = 0; i < omap_nr_irqs; i++)
100 intc_context.ilr[i] =
101 intc_readl((INTC_ILR0 + 0x4 * i));
102 for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
103 intc_context.mir[i] =
104 intc_readl(INTC_MIR0 + (0x20 * i));
107 void omap_intc_restore_context(void)
109 int i;
111 intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
112 intc_writel(INTC_PROTECTION, intc_context.protection);
113 intc_writel(INTC_IDLE, intc_context.idle);
114 intc_writel(INTC_THRESHOLD, intc_context.threshold);
116 for (i = 0; i < omap_nr_irqs; i++)
117 intc_writel(INTC_ILR0 + 0x4 * i,
118 intc_context.ilr[i]);
120 for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
121 intc_writel(INTC_MIR0 + 0x20 * i,
122 intc_context.mir[i]);
123 /* MIRs are saved and restore with other PRCM registers */
126 void omap3_intc_prepare_idle(void)
129 * Disable autoidle as it can stall interrupt controller,
130 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
132 intc_writel(INTC_SYSCONFIG, 0);
133 intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
136 void omap3_intc_resume_idle(void)
138 /* Re-enable autoidle */
139 intc_writel(INTC_SYSCONFIG, 1);
140 intc_writel(INTC_IDLE, 0);
143 /* XXX: FIQ and additional INTC support (only MPU at the moment) */
144 static void omap_ack_irq(struct irq_data *d)
146 intc_writel(INTC_CONTROL, 0x1);
149 static void omap_mask_ack_irq(struct irq_data *d)
151 irq_gc_mask_disable_reg(d);
152 omap_ack_irq(d);
155 static void __init omap_irq_soft_reset(void)
157 unsigned long tmp;
159 tmp = intc_readl(INTC_REVISION) & 0xff;
161 pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
162 omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
164 tmp = intc_readl(INTC_SYSCONFIG);
165 tmp |= 1 << 1; /* soft reset */
166 intc_writel(INTC_SYSCONFIG, tmp);
168 while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
169 /* Wait for reset to complete */;
171 /* Enable autoidle */
172 intc_writel(INTC_SYSCONFIG, 1 << 0);
175 int omap_irq_pending(void)
177 int i;
179 for (i = 0; i < omap_nr_pending; i++)
180 if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
181 return 1;
182 return 0;
185 void omap3_intc_suspend(void)
187 /* A pending interrupt would prevent OMAP from entering suspend */
188 omap_ack_irq(NULL);
191 static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
193 int ret;
194 int i;
196 ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
197 handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
198 IRQ_LEVEL, 0);
199 if (ret) {
200 pr_warn("Failed to allocate irq chips\n");
201 return ret;
204 for (i = 0; i < omap_nr_pending; i++) {
205 struct irq_chip_generic *gc;
206 struct irq_chip_type *ct;
208 gc = irq_get_domain_generic_chip(d, 32 * i);
209 gc->reg_base = base;
210 ct = gc->chip_types;
212 ct->type = IRQ_TYPE_LEVEL_MASK;
214 ct->chip.irq_ack = omap_mask_ack_irq;
215 ct->chip.irq_mask = irq_gc_mask_disable_reg;
216 ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
218 ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
220 ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
221 ct->regs.disable = INTC_MIR_SET0 + 32 * i;
224 return 0;
227 static void __init omap_alloc_gc_legacy(void __iomem *base,
228 unsigned int irq_start, unsigned int num)
230 struct irq_chip_generic *gc;
231 struct irq_chip_type *ct;
233 gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
234 handle_level_irq);
235 ct = gc->chip_types;
236 ct->chip.irq_ack = omap_mask_ack_irq;
237 ct->chip.irq_mask = irq_gc_mask_disable_reg;
238 ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
239 ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
241 ct->regs.enable = INTC_MIR_CLEAR0;
242 ct->regs.disable = INTC_MIR_SET0;
243 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
244 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
247 static int __init omap_init_irq_of(struct device_node *node)
249 int ret;
251 omap_irq_base = of_iomap(node, 0);
252 if (WARN_ON(!omap_irq_base))
253 return -ENOMEM;
255 domain = irq_domain_add_linear(node, omap_nr_irqs,
256 &irq_generic_chip_ops, NULL);
258 omap_irq_soft_reset();
260 ret = omap_alloc_gc_of(domain, omap_irq_base);
261 if (ret < 0)
262 irq_domain_remove(domain);
264 return ret;
267 static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
269 int j, irq_base;
271 omap_irq_base = ioremap(base, SZ_4K);
272 if (WARN_ON(!omap_irq_base))
273 return -ENOMEM;
275 irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
276 if (irq_base < 0) {
277 pr_warn("Couldn't allocate IRQ numbers\n");
278 irq_base = 0;
281 domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
282 &irq_domain_simple_ops, NULL);
284 omap_irq_soft_reset();
286 for (j = 0; j < omap_nr_irqs; j += 32)
287 omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);
289 return 0;
292 static void __init omap_irq_enable_protection(void)
294 u32 reg;
296 reg = intc_readl(INTC_PROTECTION);
297 reg |= INTC_PROTECTION_ENABLE;
298 intc_writel(INTC_PROTECTION, reg);
301 static int __init omap_init_irq(u32 base, struct device_node *node)
303 int ret;
306 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
307 * depends is still not ready for linear IRQ domains; because of that
308 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
309 * linear IRQ Domain until that driver is finally fixed.
311 if (of_device_is_compatible(node, "ti,omap2-intc") ||
312 of_device_is_compatible(node, "ti,omap3-intc")) {
313 struct resource res;
315 if (of_address_to_resource(node, 0, &res))
316 return -ENOMEM;
318 base = res.start;
319 ret = omap_init_irq_legacy(base, node);
320 } else if (node) {
321 ret = omap_init_irq_of(node);
322 } else {
323 ret = omap_init_irq_legacy(base, NULL);
326 if (ret == 0)
327 omap_irq_enable_protection();
329 return ret;
332 static asmlinkage void __exception_irq_entry
333 omap_intc_handle_irq(struct pt_regs *regs)
335 extern unsigned long irq_err_count;
336 u32 irqnr;
338 irqnr = intc_readl(INTC_SIR);
341 * A spurious IRQ can result if interrupt that triggered the
342 * sorting is no longer active during the sorting (10 INTC
343 * functional clock cycles after interrupt assertion). Or a
344 * change in interrupt mask affected the result during sorting
345 * time. There is no special handling required except ignoring
346 * the SIR register value just read and retrying.
347 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
349 * Many a times, a spurious interrupt situation has been fixed
350 * by adding a flush for the posted write acking the IRQ in
351 * the device driver. Typically, this is going be the device
352 * driver whose interrupt was handled just before the spurious
353 * IRQ occurred. Pay attention to those device drivers if you
354 * run into hitting the spurious IRQ condition below.
356 if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
357 pr_err_once("%s: spurious irq!\n", __func__);
358 irq_err_count++;
359 omap_ack_irq(NULL);
360 return;
363 irqnr &= ACTIVEIRQ_MASK;
364 handle_domain_irq(domain, irqnr, regs);
367 void __init omap3_init_irq(void)
369 omap_nr_irqs = 96;
370 omap_nr_pending = 3;
371 omap_init_irq(OMAP34XX_IC_BASE, NULL);
372 set_handle_irq(omap_intc_handle_irq);
375 static int __init intc_of_init(struct device_node *node,
376 struct device_node *parent)
378 int ret;
380 omap_nr_pending = 3;
381 omap_nr_irqs = 96;
383 if (WARN_ON(!node))
384 return -ENODEV;
386 if (of_device_is_compatible(node, "ti,dm814-intc") ||
387 of_device_is_compatible(node, "ti,dm816-intc") ||
388 of_device_is_compatible(node, "ti,am33xx-intc")) {
389 omap_nr_irqs = 128;
390 omap_nr_pending = 4;
393 ret = omap_init_irq(-1, of_node_get(node));
394 if (ret < 0)
395 return ret;
397 set_handle_irq(omap_intc_handle_irq);
399 return 0;
402 IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
403 IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
404 IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
405 IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
406 IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);