2 * linux/arch/arm/mach-at91/irq.c
4 * Copyright (C) 2004 SAN People
5 * Copyright (C) 2004 ATMEL
6 * Copyright (C) Rick Bronson
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/init.h>
24 #include <linux/module.h>
26 #include <linux/bitmap.h>
27 #include <linux/types.h>
28 #include <linux/irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/irqdomain.h>
33 #include <linux/err.h>
34 #include <linux/slab.h>
36 #include <mach/hardware.h>
38 #include <asm/setup.h>
40 #include <asm/exception.h>
41 #include <asm/mach/arch.h>
42 #include <asm/mach/irq.h>
43 #include <asm/mach/map.h>
47 void __iomem
*at91_aic_base
;
48 static struct irq_domain
*at91_aic_domain
;
49 static struct device_node
*at91_aic_np
;
50 static unsigned int n_irqs
= NR_AIC_IRQS
;
51 static unsigned long at91_aic_caps
= 0;
53 /* AIC5 introduces a Source Select Register */
54 #define AT91_AIC_CAP_AIC5 (1 << 0)
55 #define has_aic5() (at91_aic_caps & AT91_AIC_CAP_AIC5)
59 static unsigned long *wakeups
;
60 static unsigned long *backups
;
62 #define set_backup(bit) set_bit(bit, backups)
63 #define clear_backup(bit) clear_bit(bit, backups)
65 static int at91_aic_pm_init(void)
67 backups
= kzalloc(BITS_TO_LONGS(n_irqs
) * sizeof(*backups
), GFP_KERNEL
);
71 wakeups
= kzalloc(BITS_TO_LONGS(n_irqs
) * sizeof(*backups
), GFP_KERNEL
);
80 static int at91_aic_set_wake(struct irq_data
*d
, unsigned value
)
82 if (unlikely(d
->hwirq
>= n_irqs
))
86 set_bit(d
->hwirq
, wakeups
);
88 clear_bit(d
->hwirq
, wakeups
);
93 void at91_irq_suspend(void)
98 /* disable enabled irqs */
99 while ((bit
= find_next_bit(backups
, n_irqs
, bit
+ 1)) < n_irqs
) {
100 at91_aic_write(AT91_AIC5_SSR
,
101 bit
& AT91_AIC5_INTSEL_MSK
);
102 at91_aic_write(AT91_AIC5_IDCR
, 1);
104 /* enable wakeup irqs */
106 while ((bit
= find_next_bit(wakeups
, n_irqs
, bit
+ 1)) < n_irqs
) {
107 at91_aic_write(AT91_AIC5_SSR
,
108 bit
& AT91_AIC5_INTSEL_MSK
);
109 at91_aic_write(AT91_AIC5_IECR
, 1);
112 at91_aic_write(AT91_AIC_IDCR
, *backups
);
113 at91_aic_write(AT91_AIC_IECR
, *wakeups
);
117 void at91_irq_resume(void)
122 /* disable wakeup irqs */
123 while ((bit
= find_next_bit(wakeups
, n_irqs
, bit
+ 1)) < n_irqs
) {
124 at91_aic_write(AT91_AIC5_SSR
,
125 bit
& AT91_AIC5_INTSEL_MSK
);
126 at91_aic_write(AT91_AIC5_IDCR
, 1);
128 /* enable irqs disabled for suspend */
130 while ((bit
= find_next_bit(backups
, n_irqs
, bit
+ 1)) < n_irqs
) {
131 at91_aic_write(AT91_AIC5_SSR
,
132 bit
& AT91_AIC5_INTSEL_MSK
);
133 at91_aic_write(AT91_AIC5_IECR
, 1);
136 at91_aic_write(AT91_AIC_IDCR
, *wakeups
);
137 at91_aic_write(AT91_AIC_IECR
, *backups
);
142 static inline int at91_aic_pm_init(void)
147 #define set_backup(bit)
148 #define clear_backup(bit)
149 #define at91_aic_set_wake NULL
151 #endif /* CONFIG_PM */
153 asmlinkage
void __exception_irq_entry
154 at91_aic_handle_irq(struct pt_regs
*regs
)
159 irqnr
= at91_aic_read(AT91_AIC_IVR
);
160 irqstat
= at91_aic_read(AT91_AIC_ISR
);
163 * ISR value is 0 when there is no current interrupt or when there is
164 * a spurious interrupt
167 at91_aic_write(AT91_AIC_EOICR
, 0);
169 handle_IRQ(irqnr
, regs
);
172 asmlinkage
void __exception_irq_entry
173 at91_aic5_handle_irq(struct pt_regs
*regs
)
178 irqnr
= at91_aic_read(AT91_AIC5_IVR
);
179 irqstat
= at91_aic_read(AT91_AIC5_ISR
);
182 at91_aic_write(AT91_AIC5_EOICR
, 0);
184 handle_IRQ(irqnr
, regs
);
187 static void at91_aic_mask_irq(struct irq_data
*d
)
189 /* Disable interrupt on AIC */
190 at91_aic_write(AT91_AIC_IDCR
, 1 << d
->hwirq
);
191 /* Update ISR cache */
192 clear_backup(d
->hwirq
);
195 static void __maybe_unused
at91_aic5_mask_irq(struct irq_data
*d
)
197 /* Disable interrupt on AIC5 */
198 at91_aic_write(AT91_AIC5_SSR
, d
->hwirq
& AT91_AIC5_INTSEL_MSK
);
199 at91_aic_write(AT91_AIC5_IDCR
, 1);
200 /* Update ISR cache */
201 clear_backup(d
->hwirq
);
204 static void at91_aic_unmask_irq(struct irq_data
*d
)
206 /* Enable interrupt on AIC */
207 at91_aic_write(AT91_AIC_IECR
, 1 << d
->hwirq
);
208 /* Update ISR cache */
209 set_backup(d
->hwirq
);
212 static void __maybe_unused
at91_aic5_unmask_irq(struct irq_data
*d
)
214 /* Enable interrupt on AIC5 */
215 at91_aic_write(AT91_AIC5_SSR
, d
->hwirq
& AT91_AIC5_INTSEL_MSK
);
216 at91_aic_write(AT91_AIC5_IECR
, 1);
217 /* Update ISR cache */
218 set_backup(d
->hwirq
);
221 static void at91_aic_eoi(struct irq_data
*d
)
224 * Mark end-of-interrupt on AIC, the controller doesn't care about
225 * the value written. Moreover it's a write-only register.
227 at91_aic_write(AT91_AIC_EOICR
, 0);
230 static void __maybe_unused
at91_aic5_eoi(struct irq_data
*d
)
232 at91_aic_write(AT91_AIC5_EOICR
, 0);
235 static unsigned long *at91_extern_irq
;
237 u32
at91_get_extern_irq(void)
239 if (!at91_extern_irq
)
241 return *at91_extern_irq
;
244 #define is_extern_irq(hwirq) test_bit(hwirq, at91_extern_irq)
246 static int at91_aic_compute_srctype(struct irq_data
*d
, unsigned type
)
251 case IRQ_TYPE_LEVEL_HIGH
:
252 srctype
= AT91_AIC_SRCTYPE_HIGH
;
254 case IRQ_TYPE_EDGE_RISING
:
255 srctype
= AT91_AIC_SRCTYPE_RISING
;
257 case IRQ_TYPE_LEVEL_LOW
:
258 if ((d
->hwirq
== AT91_ID_FIQ
) || is_extern_irq(d
->hwirq
)) /* only supported on external interrupts */
259 srctype
= AT91_AIC_SRCTYPE_LOW
;
263 case IRQ_TYPE_EDGE_FALLING
:
264 if ((d
->hwirq
== AT91_ID_FIQ
) || is_extern_irq(d
->hwirq
)) /* only supported on external interrupts */
265 srctype
= AT91_AIC_SRCTYPE_FALLING
;
276 static int at91_aic_set_type(struct irq_data
*d
, unsigned type
)
281 srctype
= at91_aic_compute_srctype(d
, type
);
286 at91_aic_write(AT91_AIC5_SSR
,
287 d
->hwirq
& AT91_AIC5_INTSEL_MSK
);
288 smr
= at91_aic_read(AT91_AIC5_SMR
) & ~AT91_AIC_SRCTYPE
;
289 at91_aic_write(AT91_AIC5_SMR
, smr
| srctype
);
291 smr
= at91_aic_read(AT91_AIC_SMR(d
->hwirq
))
293 at91_aic_write(AT91_AIC_SMR(d
->hwirq
), smr
| srctype
);
299 static struct irq_chip at91_aic_chip
= {
301 .irq_mask
= at91_aic_mask_irq
,
302 .irq_unmask
= at91_aic_unmask_irq
,
303 .irq_set_type
= at91_aic_set_type
,
304 .irq_set_wake
= at91_aic_set_wake
,
305 .irq_eoi
= at91_aic_eoi
,
308 static void __init
at91_aic_hw_init(unsigned int spu_vector
)
313 * Perform 8 End Of Interrupt Command to make sure AIC
314 * will not Lock out nIRQ
316 for (i
= 0; i
< 8; i
++)
317 at91_aic_write(AT91_AIC_EOICR
, 0);
320 * Spurious Interrupt ID in Spurious Vector Register.
321 * When there is no current interrupt, the IRQ Vector Register
322 * reads the value stored in AIC_SPU
324 at91_aic_write(AT91_AIC_SPU
, spu_vector
);
326 /* No debugging in AIC: Debug (Protect) Control Register */
327 at91_aic_write(AT91_AIC_DCR
, 0);
329 /* Disable and clear all interrupts initially */
330 at91_aic_write(AT91_AIC_IDCR
, 0xFFFFFFFF);
331 at91_aic_write(AT91_AIC_ICCR
, 0xFFFFFFFF);
334 static void __init __maybe_unused
at91_aic5_hw_init(unsigned int spu_vector
)
339 * Perform 8 End Of Interrupt Command to make sure AIC
340 * will not Lock out nIRQ
342 for (i
= 0; i
< 8; i
++)
343 at91_aic_write(AT91_AIC5_EOICR
, 0);
346 * Spurious Interrupt ID in Spurious Vector Register.
347 * When there is no current interrupt, the IRQ Vector Register
348 * reads the value stored in AIC_SPU
350 at91_aic_write(AT91_AIC5_SPU
, spu_vector
);
352 /* No debugging in AIC: Debug (Protect) Control Register */
353 at91_aic_write(AT91_AIC5_DCR
, 0);
355 /* Disable and clear all interrupts initially */
356 for (i
= 0; i
< n_irqs
; i
++) {
357 at91_aic_write(AT91_AIC5_SSR
, i
& AT91_AIC5_INTSEL_MSK
);
358 at91_aic_write(AT91_AIC5_IDCR
, 1);
359 at91_aic_write(AT91_AIC5_ICCR
, 1);
363 #if defined(CONFIG_OF)
364 static unsigned int *at91_aic_irq_priorities
;
366 static int at91_aic_irq_map(struct irq_domain
*h
, unsigned int virq
,
369 /* Put virq number in Source Vector Register */
370 at91_aic_write(AT91_AIC_SVR(hw
), virq
);
372 /* Active Low interrupt, with priority */
373 at91_aic_write(AT91_AIC_SMR(hw
),
374 AT91_AIC_SRCTYPE_LOW
| at91_aic_irq_priorities
[hw
]);
376 irq_set_chip_and_handler(virq
, &at91_aic_chip
, handle_fasteoi_irq
);
377 set_irq_flags(virq
, IRQF_VALID
| IRQF_PROBE
);
382 static int at91_aic5_irq_map(struct irq_domain
*h
, unsigned int virq
,
385 at91_aic_write(AT91_AIC5_SSR
, hw
& AT91_AIC5_INTSEL_MSK
);
387 /* Put virq number in Source Vector Register */
388 at91_aic_write(AT91_AIC5_SVR
, virq
);
390 /* Active Low interrupt, with priority */
391 at91_aic_write(AT91_AIC5_SMR
,
392 AT91_AIC_SRCTYPE_LOW
| at91_aic_irq_priorities
[hw
]);
394 irq_set_chip_and_handler(virq
, &at91_aic_chip
, handle_fasteoi_irq
);
395 set_irq_flags(virq
, IRQF_VALID
| IRQF_PROBE
);
400 static int at91_aic_irq_domain_xlate(struct irq_domain
*d
, struct device_node
*ctrlr
,
401 const u32
*intspec
, unsigned int intsize
,
402 irq_hw_number_t
*out_hwirq
, unsigned int *out_type
)
404 if (WARN_ON(intsize
< 3))
406 if (WARN_ON(intspec
[0] >= n_irqs
))
408 if (WARN_ON((intspec
[2] < AT91_AIC_IRQ_MIN_PRIORITY
)
409 || (intspec
[2] > AT91_AIC_IRQ_MAX_PRIORITY
)))
412 *out_hwirq
= intspec
[0];
413 *out_type
= intspec
[1] & IRQ_TYPE_SENSE_MASK
;
414 at91_aic_irq_priorities
[*out_hwirq
] = intspec
[2];
419 static struct irq_domain_ops at91_aic_irq_ops
= {
420 .map
= at91_aic_irq_map
,
421 .xlate
= at91_aic_irq_domain_xlate
,
424 int __init
at91_aic_of_common_init(struct device_node
*node
,
425 struct device_node
*parent
)
427 struct property
*prop
;
431 at91_extern_irq
= kzalloc(BITS_TO_LONGS(n_irqs
)
432 * sizeof(*at91_extern_irq
), GFP_KERNEL
);
433 if (!at91_extern_irq
)
436 if (at91_aic_pm_init()) {
437 kfree(at91_extern_irq
);
441 at91_aic_irq_priorities
= kzalloc(n_irqs
442 * sizeof(*at91_aic_irq_priorities
),
444 if (!at91_aic_irq_priorities
)
447 at91_aic_base
= of_iomap(node
, 0);
450 at91_aic_domain
= irq_domain_add_linear(at91_aic_np
, n_irqs
,
451 &at91_aic_irq_ops
, NULL
);
452 if (!at91_aic_domain
)
453 panic("Unable to add AIC irq domain (DT)\n");
455 of_property_for_each_u32(node
, "atmel,external-irqs", prop
, p
, val
) {
457 pr_warn("AIC: external irq %d >= %d skip it\n",
460 set_bit(val
, at91_extern_irq
);
463 irq_set_default_host(at91_aic_domain
);
468 int __init
at91_aic_of_init(struct device_node
*node
,
469 struct device_node
*parent
)
473 err
= at91_aic_of_common_init(node
, parent
);
477 at91_aic_hw_init(n_irqs
);
482 int __init
at91_aic5_of_init(struct device_node
*node
,
483 struct device_node
*parent
)
487 at91_aic_caps
|= AT91_AIC_CAP_AIC5
;
488 n_irqs
= NR_AIC5_IRQS
;
489 at91_aic_chip
.irq_ack
= at91_aic5_mask_irq
;
490 at91_aic_chip
.irq_mask
= at91_aic5_mask_irq
;
491 at91_aic_chip
.irq_unmask
= at91_aic5_unmask_irq
;
492 at91_aic_chip
.irq_eoi
= at91_aic5_eoi
;
493 at91_aic_irq_ops
.map
= at91_aic5_irq_map
;
495 err
= at91_aic_of_common_init(node
, parent
);
499 at91_aic5_hw_init(n_irqs
);
506 * Initialize the AIC interrupt controller.
508 void __init
at91_aic_init(unsigned int *priority
, unsigned int ext_irq_mask
)
513 at91_extern_irq
= kzalloc(BITS_TO_LONGS(n_irqs
)
514 * sizeof(*at91_extern_irq
), GFP_KERNEL
);
516 if (at91_aic_pm_init() || at91_extern_irq
== NULL
)
517 panic("Unable to allocate bit maps\n");
519 *at91_extern_irq
= ext_irq_mask
;
521 at91_aic_base
= ioremap(AT91_AIC
, 512);
523 panic("Unable to ioremap AIC registers\n");
525 /* Add irq domain for AIC */
526 irq_base
= irq_alloc_descs(-1, 0, n_irqs
, 0);
528 WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n");
531 at91_aic_domain
= irq_domain_add_legacy(at91_aic_np
, n_irqs
,
533 &irq_domain_simple_ops
, NULL
);
535 if (!at91_aic_domain
)
536 panic("Unable to add AIC irq domain\n");
538 irq_set_default_host(at91_aic_domain
);
541 * The IVR is used by macro get_irqnr_and_base to read and verify.
542 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
544 for (i
= 0; i
< n_irqs
; i
++) {
545 /* Put hardware irq number in Source Vector Register: */
546 at91_aic_write(AT91_AIC_SVR(i
), NR_IRQS_LEGACY
+ i
);
547 /* Active Low interrupt, with the specified priority */
548 at91_aic_write(AT91_AIC_SMR(i
), AT91_AIC_SRCTYPE_LOW
| priority
[i
]);
549 irq_set_chip_and_handler(NR_IRQS_LEGACY
+ i
, &at91_aic_chip
, handle_fasteoi_irq
);
550 set_irq_flags(i
, IRQF_VALID
| IRQF_PROBE
);
553 at91_aic_hw_init(n_irqs
);