powerpc/powernv: Rename alloc_m64_pe() to reserve_m64_pe()
[linux/fpc-iii.git] / drivers / irqchip / irq-atmel-aic5.c
bloba11aae8fb006abfbe6782b85dd3a1eb10ea54162
1 /*
2 * Atmel AT91 AIC5 (Advanced Interrupt Controller) driver
4 * Copyright (C) 2004 SAN People
5 * Copyright (C) 2004 ATMEL
6 * Copyright (C) Rick Bronson
7 * Copyright (C) 2014 Free Electrons
9 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/mm.h>
19 #include <linux/bitmap.h>
20 #include <linux/types.h>
21 #include <linux/irq.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/irqdomain.h>
26 #include <linux/err.h>
27 #include <linux/slab.h>
28 #include <linux/io.h>
30 #include <asm/exception.h>
31 #include <asm/mach/irq.h>
33 #include "irq-atmel-aic-common.h"
34 #include "irqchip.h"
36 /* Number of irq lines managed by AIC */
37 #define NR_AIC5_IRQS 128
39 #define AT91_AIC5_SSR 0x0
40 #define AT91_AIC5_INTSEL_MSK (0x7f << 0)
42 #define AT91_AIC5_SMR 0x4
44 #define AT91_AIC5_SVR 0x8
45 #define AT91_AIC5_IVR 0x10
46 #define AT91_AIC5_FVR 0x14
47 #define AT91_AIC5_ISR 0x18
49 #define AT91_AIC5_IPR0 0x20
50 #define AT91_AIC5_IPR1 0x24
51 #define AT91_AIC5_IPR2 0x28
52 #define AT91_AIC5_IPR3 0x2c
53 #define AT91_AIC5_IMR 0x30
54 #define AT91_AIC5_CISR 0x34
56 #define AT91_AIC5_IECR 0x40
57 #define AT91_AIC5_IDCR 0x44
58 #define AT91_AIC5_ICCR 0x48
59 #define AT91_AIC5_ISCR 0x4c
60 #define AT91_AIC5_EOICR 0x38
61 #define AT91_AIC5_SPU 0x3c
62 #define AT91_AIC5_DCR 0x6c
64 #define AT91_AIC5_FFER 0x50
65 #define AT91_AIC5_FFDR 0x54
66 #define AT91_AIC5_FFSR 0x58
68 static struct irq_domain *aic5_domain;
70 static asmlinkage void __exception_irq_entry
71 aic5_handle(struct pt_regs *regs)
73 struct irq_domain_chip_generic *dgc = aic5_domain->gc;
74 struct irq_chip_generic *gc = dgc->gc[0];
75 u32 irqnr;
76 u32 irqstat;
78 irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR);
79 irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR);
81 if (!irqstat)
82 irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
83 else
84 handle_domain_irq(aic5_domain, irqnr, regs);
87 static void aic5_mask(struct irq_data *d)
89 struct irq_domain *domain = d->domain;
90 struct irq_domain_chip_generic *dgc = domain->gc;
91 struct irq_chip_generic *gc = dgc->gc[0];
93 /* Disable interrupt on AIC5 */
94 irq_gc_lock(gc);
95 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
96 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
97 gc->mask_cache &= ~d->mask;
98 irq_gc_unlock(gc);
101 static void aic5_unmask(struct irq_data *d)
103 struct irq_domain *domain = d->domain;
104 struct irq_domain_chip_generic *dgc = domain->gc;
105 struct irq_chip_generic *gc = dgc->gc[0];
107 /* Enable interrupt on AIC5 */
108 irq_gc_lock(gc);
109 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
110 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR);
111 gc->mask_cache |= d->mask;
112 irq_gc_unlock(gc);
115 static int aic5_retrigger(struct irq_data *d)
117 struct irq_domain *domain = d->domain;
118 struct irq_domain_chip_generic *dgc = domain->gc;
119 struct irq_chip_generic *gc = dgc->gc[0];
121 /* Enable interrupt on AIC5 */
122 irq_gc_lock(gc);
123 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
124 irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR);
125 irq_gc_unlock(gc);
127 return 0;
130 static int aic5_set_type(struct irq_data *d, unsigned type)
132 struct irq_domain *domain = d->domain;
133 struct irq_domain_chip_generic *dgc = domain->gc;
134 struct irq_chip_generic *gc = dgc->gc[0];
135 unsigned int smr;
136 int ret;
138 irq_gc_lock(gc);
139 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
140 smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
141 ret = aic_common_set_type(d, type, &smr);
142 if (!ret)
143 irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR);
144 irq_gc_unlock(gc);
146 return ret;
149 #ifdef CONFIG_PM
150 static void aic5_suspend(struct irq_data *d)
152 struct irq_domain *domain = d->domain;
153 struct irq_domain_chip_generic *dgc = domain->gc;
154 struct irq_chip_generic *bgc = dgc->gc[0];
155 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
156 int i;
157 u32 mask;
159 irq_gc_lock(bgc);
160 for (i = 0; i < dgc->irqs_per_chip; i++) {
161 mask = 1 << i;
162 if ((mask & gc->mask_cache) == (mask & gc->wake_active))
163 continue;
165 irq_reg_writel(i + gc->irq_base,
166 bgc->reg_base + AT91_AIC5_SSR);
167 if (mask & gc->wake_active)
168 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
169 else
170 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
172 irq_gc_unlock(bgc);
175 static void aic5_resume(struct irq_data *d)
177 struct irq_domain *domain = d->domain;
178 struct irq_domain_chip_generic *dgc = domain->gc;
179 struct irq_chip_generic *bgc = dgc->gc[0];
180 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
181 int i;
182 u32 mask;
184 irq_gc_lock(bgc);
185 for (i = 0; i < dgc->irqs_per_chip; i++) {
186 mask = 1 << i;
187 if ((mask & gc->mask_cache) == (mask & gc->wake_active))
188 continue;
190 irq_reg_writel(i + gc->irq_base,
191 bgc->reg_base + AT91_AIC5_SSR);
192 if (mask & gc->mask_cache)
193 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
194 else
195 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
197 irq_gc_unlock(bgc);
200 static void aic5_pm_shutdown(struct irq_data *d)
202 struct irq_domain *domain = d->domain;
203 struct irq_domain_chip_generic *dgc = domain->gc;
204 struct irq_chip_generic *bgc = dgc->gc[0];
205 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
206 int i;
208 irq_gc_lock(bgc);
209 for (i = 0; i < dgc->irqs_per_chip; i++) {
210 irq_reg_writel(i + gc->irq_base,
211 bgc->reg_base + AT91_AIC5_SSR);
212 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
213 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR);
215 irq_gc_unlock(bgc);
217 #else
218 #define aic5_suspend NULL
219 #define aic5_resume NULL
220 #define aic5_pm_shutdown NULL
221 #endif /* CONFIG_PM */
223 static void __init aic5_hw_init(struct irq_domain *domain)
225 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
226 int i;
229 * Perform 8 End Of Interrupt Command to make sure AIC
230 * will not Lock out nIRQ
232 for (i = 0; i < 8; i++)
233 irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
236 * Spurious Interrupt ID in Spurious Vector Register.
237 * When there is no current interrupt, the IRQ Vector Register
238 * reads the value stored in AIC_SPU
240 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU);
242 /* No debugging in AIC: Debug (Protect) Control Register */
243 irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR);
245 /* Disable and clear all interrupts initially */
246 for (i = 0; i < domain->revmap_size; i++) {
247 irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR);
248 irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR);
249 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
250 irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR);
254 static int aic5_irq_domain_xlate(struct irq_domain *d,
255 struct device_node *ctrlr,
256 const u32 *intspec, unsigned int intsize,
257 irq_hw_number_t *out_hwirq,
258 unsigned int *out_type)
260 struct irq_domain_chip_generic *dgc = d->gc;
261 struct irq_chip_generic *gc;
262 unsigned smr;
263 int ret;
265 if (!dgc)
266 return -EINVAL;
268 ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
269 out_hwirq, out_type);
270 if (ret)
271 return ret;
273 gc = dgc->gc[0];
275 irq_gc_lock(gc);
276 irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR);
277 smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
278 ret = aic_common_set_priority(intspec[2], &smr);
279 if (!ret)
280 irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR);
281 irq_gc_unlock(gc);
283 return ret;
286 static const struct irq_domain_ops aic5_irq_ops = {
287 .map = irq_map_generic_chip,
288 .xlate = aic5_irq_domain_xlate,
291 static void __init sama5d3_aic_irq_fixup(struct device_node *root)
293 aic_common_rtc_irq_fixup(root);
296 static const struct of_device_id __initdata aic5_irq_fixups[] = {
297 { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
298 { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
299 { /* sentinel */ },
302 static int __init aic5_of_init(struct device_node *node,
303 struct device_node *parent,
304 int nirqs)
306 struct irq_chip_generic *gc;
307 struct irq_domain *domain;
308 int nchips;
309 int i;
311 if (nirqs > NR_AIC5_IRQS)
312 return -EINVAL;
314 if (aic5_domain)
315 return -EEXIST;
317 domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
318 nirqs);
319 if (IS_ERR(domain))
320 return PTR_ERR(domain);
322 aic_common_irq_fixup(aic5_irq_fixups);
324 aic5_domain = domain;
325 nchips = aic5_domain->revmap_size / 32;
326 for (i = 0; i < nchips; i++) {
327 gc = irq_get_domain_generic_chip(domain, i * 32);
329 gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR;
330 gc->chip_types[0].chip.irq_mask = aic5_mask;
331 gc->chip_types[0].chip.irq_unmask = aic5_unmask;
332 gc->chip_types[0].chip.irq_retrigger = aic5_retrigger;
333 gc->chip_types[0].chip.irq_set_type = aic5_set_type;
334 gc->chip_types[0].chip.irq_suspend = aic5_suspend;
335 gc->chip_types[0].chip.irq_resume = aic5_resume;
336 gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown;
339 aic5_hw_init(domain);
340 set_handle_irq(aic5_handle);
342 return 0;
345 #define NR_SAMA5D3_IRQS 48
347 static int __init sama5d3_aic5_of_init(struct device_node *node,
348 struct device_node *parent)
350 return aic5_of_init(node, parent, NR_SAMA5D3_IRQS);
352 IRQCHIP_DECLARE(sama5d3_aic5, "atmel,sama5d3-aic", sama5d3_aic5_of_init);
354 #define NR_SAMA5D4_IRQS 68
356 static int __init sama5d4_aic5_of_init(struct device_node *node,
357 struct device_node *parent)
359 return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
361 IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);