x86, efi: Set runtime_version to the EFI spec revision
[linux/fpc-iii.git] / arch / arm / mach-at91 / irq.c
blob8e210262aeee59c3aef9ea13ab2a8d4e5d0af120
1 /*
2 * linux/arch/arm/mach-at91/irq.c
4 * Copyright (C) 2004 SAN People
5 * Copyright (C) 2004 ATMEL
6 * Copyright (C) Rick Bronson
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/mm.h>
26 #include <linux/bitmap.h>
27 #include <linux/types.h>
28 #include <linux/irq.h>
29 #include <linux/of.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/irqdomain.h>
33 #include <linux/err.h>
34 #include <linux/slab.h>
36 #include <mach/hardware.h>
37 #include <asm/irq.h>
38 #include <asm/setup.h>
40 #include <asm/exception.h>
41 #include <asm/mach/arch.h>
42 #include <asm/mach/irq.h>
43 #include <asm/mach/map.h>
45 #include "at91_aic.h"
47 void __iomem *at91_aic_base;
48 static struct irq_domain *at91_aic_domain;
49 static struct device_node *at91_aic_np;
50 static unsigned int n_irqs = NR_AIC_IRQS;
51 static unsigned long at91_aic_caps = 0;
53 /* AIC5 introduces a Source Select Register */
54 #define AT91_AIC_CAP_AIC5 (1 << 0)
55 #define has_aic5() (at91_aic_caps & AT91_AIC_CAP_AIC5)
57 #ifdef CONFIG_PM
59 static unsigned long *wakeups;
60 static unsigned long *backups;
62 #define set_backup(bit) set_bit(bit, backups)
63 #define clear_backup(bit) clear_bit(bit, backups)
65 static int at91_aic_pm_init(void)
67 backups = kzalloc(BITS_TO_LONGS(n_irqs) * sizeof(*backups), GFP_KERNEL);
68 if (!backups)
69 return -ENOMEM;
71 wakeups = kzalloc(BITS_TO_LONGS(n_irqs) * sizeof(*backups), GFP_KERNEL);
72 if (!wakeups) {
73 kfree(backups);
74 return -ENOMEM;
77 return 0;
80 static int at91_aic_set_wake(struct irq_data *d, unsigned value)
82 if (unlikely(d->hwirq >= n_irqs))
83 return -EINVAL;
85 if (value)
86 set_bit(d->hwirq, wakeups);
87 else
88 clear_bit(d->hwirq, wakeups);
90 return 0;
93 void at91_irq_suspend(void)
95 int i = 0, bit;
97 if (has_aic5()) {
98 /* disable enabled irqs */
99 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
100 at91_aic_write(AT91_AIC5_SSR,
101 bit & AT91_AIC5_INTSEL_MSK);
102 at91_aic_write(AT91_AIC5_IDCR, 1);
103 i = bit;
105 /* enable wakeup irqs */
106 i = 0;
107 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
108 at91_aic_write(AT91_AIC5_SSR,
109 bit & AT91_AIC5_INTSEL_MSK);
110 at91_aic_write(AT91_AIC5_IECR, 1);
111 i = bit;
113 } else {
114 at91_aic_write(AT91_AIC_IDCR, *backups);
115 at91_aic_write(AT91_AIC_IECR, *wakeups);
119 void at91_irq_resume(void)
121 int i = 0, bit;
123 if (has_aic5()) {
124 /* disable wakeup irqs */
125 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
126 at91_aic_write(AT91_AIC5_SSR,
127 bit & AT91_AIC5_INTSEL_MSK);
128 at91_aic_write(AT91_AIC5_IDCR, 1);
129 i = bit;
131 /* enable irqs disabled for suspend */
132 i = 0;
133 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
134 at91_aic_write(AT91_AIC5_SSR,
135 bit & AT91_AIC5_INTSEL_MSK);
136 at91_aic_write(AT91_AIC5_IECR, 1);
137 i = bit;
139 } else {
140 at91_aic_write(AT91_AIC_IDCR, *wakeups);
141 at91_aic_write(AT91_AIC_IECR, *backups);
145 #else
146 static inline int at91_aic_pm_init(void)
148 return 0;
151 #define set_backup(bit)
152 #define clear_backup(bit)
153 #define at91_aic_set_wake NULL
155 #endif /* CONFIG_PM */
157 asmlinkage void __exception_irq_entry
158 at91_aic_handle_irq(struct pt_regs *regs)
160 u32 irqnr;
161 u32 irqstat;
163 irqnr = at91_aic_read(AT91_AIC_IVR);
164 irqstat = at91_aic_read(AT91_AIC_ISR);
167 * ISR value is 0 when there is no current interrupt or when there is
168 * a spurious interrupt
170 if (!irqstat)
171 at91_aic_write(AT91_AIC_EOICR, 0);
172 else
173 handle_IRQ(irqnr, regs);
176 asmlinkage void __exception_irq_entry
177 at91_aic5_handle_irq(struct pt_regs *regs)
179 u32 irqnr;
180 u32 irqstat;
182 irqnr = at91_aic_read(AT91_AIC5_IVR);
183 irqstat = at91_aic_read(AT91_AIC5_ISR);
185 if (!irqstat)
186 at91_aic_write(AT91_AIC5_EOICR, 0);
187 else
188 handle_IRQ(irqnr, regs);
191 static void at91_aic_mask_irq(struct irq_data *d)
193 /* Disable interrupt on AIC */
194 at91_aic_write(AT91_AIC_IDCR, 1 << d->hwirq);
195 /* Update ISR cache */
196 clear_backup(d->hwirq);
199 static void __maybe_unused at91_aic5_mask_irq(struct irq_data *d)
201 /* Disable interrupt on AIC5 */
202 at91_aic_write(AT91_AIC5_SSR, d->hwirq & AT91_AIC5_INTSEL_MSK);
203 at91_aic_write(AT91_AIC5_IDCR, 1);
204 /* Update ISR cache */
205 clear_backup(d->hwirq);
208 static void at91_aic_unmask_irq(struct irq_data *d)
210 /* Enable interrupt on AIC */
211 at91_aic_write(AT91_AIC_IECR, 1 << d->hwirq);
212 /* Update ISR cache */
213 set_backup(d->hwirq);
216 static void __maybe_unused at91_aic5_unmask_irq(struct irq_data *d)
218 /* Enable interrupt on AIC5 */
219 at91_aic_write(AT91_AIC5_SSR, d->hwirq & AT91_AIC5_INTSEL_MSK);
220 at91_aic_write(AT91_AIC5_IECR, 1);
221 /* Update ISR cache */
222 set_backup(d->hwirq);
225 static void at91_aic_eoi(struct irq_data *d)
228 * Mark end-of-interrupt on AIC, the controller doesn't care about
229 * the value written. Moreover it's a write-only register.
231 at91_aic_write(AT91_AIC_EOICR, 0);
234 static void __maybe_unused at91_aic5_eoi(struct irq_data *d)
236 at91_aic_write(AT91_AIC5_EOICR, 0);
239 unsigned long *at91_extern_irq;
241 #define is_extern_irq(hwirq) test_bit(hwirq, at91_extern_irq)
243 static int at91_aic_compute_srctype(struct irq_data *d, unsigned type)
245 int srctype;
247 switch (type) {
248 case IRQ_TYPE_LEVEL_HIGH:
249 srctype = AT91_AIC_SRCTYPE_HIGH;
250 break;
251 case IRQ_TYPE_EDGE_RISING:
252 srctype = AT91_AIC_SRCTYPE_RISING;
253 break;
254 case IRQ_TYPE_LEVEL_LOW:
255 if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */
256 srctype = AT91_AIC_SRCTYPE_LOW;
257 else
258 srctype = -EINVAL;
259 break;
260 case IRQ_TYPE_EDGE_FALLING:
261 if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */
262 srctype = AT91_AIC_SRCTYPE_FALLING;
263 else
264 srctype = -EINVAL;
265 break;
266 default:
267 srctype = -EINVAL;
270 return srctype;
273 static int at91_aic_set_type(struct irq_data *d, unsigned type)
275 unsigned int smr;
276 int srctype;
278 srctype = at91_aic_compute_srctype(d, type);
279 if (srctype < 0)
280 return srctype;
282 if (has_aic5()) {
283 at91_aic_write(AT91_AIC5_SSR,
284 d->hwirq & AT91_AIC5_INTSEL_MSK);
285 smr = at91_aic_read(AT91_AIC5_SMR) & ~AT91_AIC_SRCTYPE;
286 at91_aic_write(AT91_AIC5_SMR, smr | srctype);
287 } else {
288 smr = at91_aic_read(AT91_AIC_SMR(d->hwirq))
289 & ~AT91_AIC_SRCTYPE;
290 at91_aic_write(AT91_AIC_SMR(d->hwirq), smr | srctype);
293 return 0;
296 static struct irq_chip at91_aic_chip = {
297 .name = "AIC",
298 .irq_mask = at91_aic_mask_irq,
299 .irq_unmask = at91_aic_unmask_irq,
300 .irq_set_type = at91_aic_set_type,
301 .irq_set_wake = at91_aic_set_wake,
302 .irq_eoi = at91_aic_eoi,
305 static void __init at91_aic_hw_init(unsigned int spu_vector)
307 int i;
310 * Perform 8 End Of Interrupt Command to make sure AIC
311 * will not Lock out nIRQ
313 for (i = 0; i < 8; i++)
314 at91_aic_write(AT91_AIC_EOICR, 0);
317 * Spurious Interrupt ID in Spurious Vector Register.
318 * When there is no current interrupt, the IRQ Vector Register
319 * reads the value stored in AIC_SPU
321 at91_aic_write(AT91_AIC_SPU, spu_vector);
323 /* No debugging in AIC: Debug (Protect) Control Register */
324 at91_aic_write(AT91_AIC_DCR, 0);
326 /* Disable and clear all interrupts initially */
327 at91_aic_write(AT91_AIC_IDCR, 0xFFFFFFFF);
328 at91_aic_write(AT91_AIC_ICCR, 0xFFFFFFFF);
331 static void __init __maybe_unused at91_aic5_hw_init(unsigned int spu_vector)
333 int i;
336 * Perform 8 End Of Interrupt Command to make sure AIC
337 * will not Lock out nIRQ
339 for (i = 0; i < 8; i++)
340 at91_aic_write(AT91_AIC5_EOICR, 0);
343 * Spurious Interrupt ID in Spurious Vector Register.
344 * When there is no current interrupt, the IRQ Vector Register
345 * reads the value stored in AIC_SPU
347 at91_aic_write(AT91_AIC5_SPU, spu_vector);
349 /* No debugging in AIC: Debug (Protect) Control Register */
350 at91_aic_write(AT91_AIC5_DCR, 0);
352 /* Disable and clear all interrupts initially */
353 for (i = 0; i < n_irqs; i++) {
354 at91_aic_write(AT91_AIC5_SSR, i & AT91_AIC5_INTSEL_MSK);
355 at91_aic_write(AT91_AIC5_IDCR, 1);
356 at91_aic_write(AT91_AIC5_ICCR, 1);
360 #if defined(CONFIG_OF)
361 static unsigned int *at91_aic_irq_priorities;
363 static int at91_aic_irq_map(struct irq_domain *h, unsigned int virq,
364 irq_hw_number_t hw)
366 /* Put virq number in Source Vector Register */
367 at91_aic_write(AT91_AIC_SVR(hw), virq);
369 /* Active Low interrupt, with priority */
370 at91_aic_write(AT91_AIC_SMR(hw),
371 AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
373 irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
374 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
376 return 0;
379 static int at91_aic5_irq_map(struct irq_domain *h, unsigned int virq,
380 irq_hw_number_t hw)
382 at91_aic_write(AT91_AIC5_SSR, hw & AT91_AIC5_INTSEL_MSK);
384 /* Put virq number in Source Vector Register */
385 at91_aic_write(AT91_AIC5_SVR, virq);
387 /* Active Low interrupt, with priority */
388 at91_aic_write(AT91_AIC5_SMR,
389 AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
391 irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
392 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
394 return 0;
397 static int at91_aic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
398 const u32 *intspec, unsigned int intsize,
399 irq_hw_number_t *out_hwirq, unsigned int *out_type)
401 if (WARN_ON(intsize < 3))
402 return -EINVAL;
403 if (WARN_ON(intspec[0] >= n_irqs))
404 return -EINVAL;
405 if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY)
406 || (intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
407 return -EINVAL;
409 *out_hwirq = intspec[0];
410 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
411 at91_aic_irq_priorities[*out_hwirq] = intspec[2];
413 return 0;
416 static struct irq_domain_ops at91_aic_irq_ops = {
417 .map = at91_aic_irq_map,
418 .xlate = at91_aic_irq_domain_xlate,
421 int __init at91_aic_of_common_init(struct device_node *node,
422 struct device_node *parent)
424 struct property *prop;
425 const __be32 *p;
426 u32 val;
428 at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
429 * sizeof(*at91_extern_irq), GFP_KERNEL);
430 if (!at91_extern_irq)
431 return -ENOMEM;
433 if (at91_aic_pm_init()) {
434 kfree(at91_extern_irq);
435 return -ENOMEM;
438 at91_aic_irq_priorities = kzalloc(n_irqs
439 * sizeof(*at91_aic_irq_priorities),
440 GFP_KERNEL);
441 if (!at91_aic_irq_priorities)
442 return -ENOMEM;
444 at91_aic_base = of_iomap(node, 0);
445 at91_aic_np = node;
447 at91_aic_domain = irq_domain_add_linear(at91_aic_np, n_irqs,
448 &at91_aic_irq_ops, NULL);
449 if (!at91_aic_domain)
450 panic("Unable to add AIC irq domain (DT)\n");
452 of_property_for_each_u32(node, "atmel,external-irqs", prop, p, val) {
453 if (val >= n_irqs)
454 pr_warn("AIC: external irq %d >= %d skip it\n",
455 val, n_irqs);
456 else
457 set_bit(val, at91_extern_irq);
460 irq_set_default_host(at91_aic_domain);
462 return 0;
465 int __init at91_aic_of_init(struct device_node *node,
466 struct device_node *parent)
468 int err;
470 err = at91_aic_of_common_init(node, parent);
471 if (err)
472 return err;
474 at91_aic_hw_init(n_irqs);
476 return 0;
479 int __init at91_aic5_of_init(struct device_node *node,
480 struct device_node *parent)
482 int err;
484 at91_aic_caps |= AT91_AIC_CAP_AIC5;
485 n_irqs = NR_AIC5_IRQS;
486 at91_aic_chip.irq_ack = at91_aic5_mask_irq;
487 at91_aic_chip.irq_mask = at91_aic5_mask_irq;
488 at91_aic_chip.irq_unmask = at91_aic5_unmask_irq;
489 at91_aic_chip.irq_eoi = at91_aic5_eoi;
490 at91_aic_irq_ops.map = at91_aic5_irq_map;
492 err = at91_aic_of_common_init(node, parent);
493 if (err)
494 return err;
496 at91_aic5_hw_init(n_irqs);
498 return 0;
500 #endif
503 * Initialize the AIC interrupt controller.
505 void __init at91_aic_init(unsigned int *priority, unsigned int ext_irq_mask)
507 unsigned int i;
508 int irq_base;
510 at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
511 * sizeof(*at91_extern_irq), GFP_KERNEL);
513 if (at91_aic_pm_init() || at91_extern_irq == NULL)
514 panic("Unable to allocate bit maps\n");
516 *at91_extern_irq = ext_irq_mask;
518 at91_aic_base = ioremap(AT91_AIC, 512);
519 if (!at91_aic_base)
520 panic("Unable to ioremap AIC registers\n");
522 /* Add irq domain for AIC */
523 irq_base = irq_alloc_descs(-1, 0, n_irqs, 0);
524 if (irq_base < 0) {
525 WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n");
526 irq_base = 0;
528 at91_aic_domain = irq_domain_add_legacy(at91_aic_np, n_irqs,
529 irq_base, 0,
530 &irq_domain_simple_ops, NULL);
532 if (!at91_aic_domain)
533 panic("Unable to add AIC irq domain\n");
535 irq_set_default_host(at91_aic_domain);
538 * The IVR is used by macro get_irqnr_and_base to read and verify.
539 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
541 for (i = 0; i < n_irqs; i++) {
542 /* Put hardware irq number in Source Vector Register: */
543 at91_aic_write(AT91_AIC_SVR(i), NR_IRQS_LEGACY + i);
544 /* Active Low interrupt, with the specified priority */
545 at91_aic_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
546 irq_set_chip_and_handler(NR_IRQS_LEGACY + i, &at91_aic_chip, handle_fasteoi_irq);
547 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
550 at91_aic_hw_init(n_irqs);