2 * linux/arch/arm/common/gic.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Interrupt architecture for the GIC:
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpumask.h>
33 #include <asm/mach/irq.h>
34 #include <asm/hardware/gic.h>
36 static DEFINE_SPINLOCK(irq_controller_lock
);
38 /* Address of GIC 0 CPU interface */
39 void __iomem
*gic_cpu_base_addr __read_mostly
;
41 struct gic_chip_data
{
42 unsigned int irq_offset
;
43 void __iomem
*dist_base
;
44 void __iomem
*cpu_base
;
48 * Supported arch specific GIC irq extension.
49 * Default make them NULL.
51 struct irq_chip gic_arch_extn
= {
55 .irq_retrigger
= NULL
,
64 static struct gic_chip_data gic_data
[MAX_GIC_NR
] __read_mostly
;
66 static inline void __iomem
*gic_dist_base(struct irq_data
*d
)
68 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
69 return gic_data
->dist_base
;
72 static inline void __iomem
*gic_cpu_base(struct irq_data
*d
)
74 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
75 return gic_data
->cpu_base
;
78 static inline unsigned int gic_irq(struct irq_data
*d
)
80 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
81 return d
->irq
- gic_data
->irq_offset
;
85 * Routines to acknowledge, disable and enable interrupts
87 static void gic_ack_irq(struct irq_data
*d
)
89 spin_lock(&irq_controller_lock
);
90 if (gic_arch_extn
.irq_ack
)
91 gic_arch_extn
.irq_ack(d
);
92 writel(gic_irq(d
), gic_cpu_base(d
) + GIC_CPU_EOI
);
93 spin_unlock(&irq_controller_lock
);
96 static void gic_mask_irq(struct irq_data
*d
)
98 u32 mask
= 1 << (d
->irq
% 32);
100 spin_lock(&irq_controller_lock
);
101 writel(mask
, gic_dist_base(d
) + GIC_DIST_ENABLE_CLEAR
+ (gic_irq(d
) / 32) * 4);
102 if (gic_arch_extn
.irq_mask
)
103 gic_arch_extn
.irq_mask(d
);
104 spin_unlock(&irq_controller_lock
);
107 static void gic_unmask_irq(struct irq_data
*d
)
109 u32 mask
= 1 << (d
->irq
% 32);
111 spin_lock(&irq_controller_lock
);
112 if (gic_arch_extn
.irq_unmask
)
113 gic_arch_extn
.irq_unmask(d
);
114 writel(mask
, gic_dist_base(d
) + GIC_DIST_ENABLE_SET
+ (gic_irq(d
) / 32) * 4);
115 spin_unlock(&irq_controller_lock
);
118 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
120 void __iomem
*base
= gic_dist_base(d
);
121 unsigned int gicirq
= gic_irq(d
);
122 u32 enablemask
= 1 << (gicirq
% 32);
123 u32 enableoff
= (gicirq
/ 32) * 4;
124 u32 confmask
= 0x2 << ((gicirq
% 16) * 2);
125 u32 confoff
= (gicirq
/ 16) * 4;
126 bool enabled
= false;
129 /* Interrupt configuration for SGIs can't be changed */
133 if (type
!= IRQ_TYPE_LEVEL_HIGH
&& type
!= IRQ_TYPE_EDGE_RISING
)
136 spin_lock(&irq_controller_lock
);
138 if (gic_arch_extn
.irq_set_type
)
139 gic_arch_extn
.irq_set_type(d
, type
);
141 val
= readl(base
+ GIC_DIST_CONFIG
+ confoff
);
142 if (type
== IRQ_TYPE_LEVEL_HIGH
)
144 else if (type
== IRQ_TYPE_EDGE_RISING
)
148 * As recommended by the spec, disable the interrupt before changing
151 if (readl(base
+ GIC_DIST_ENABLE_SET
+ enableoff
) & enablemask
) {
152 writel(enablemask
, base
+ GIC_DIST_ENABLE_CLEAR
+ enableoff
);
156 writel(val
, base
+ GIC_DIST_CONFIG
+ confoff
);
159 writel(enablemask
, base
+ GIC_DIST_ENABLE_SET
+ enableoff
);
161 spin_unlock(&irq_controller_lock
);
166 static int gic_retrigger(struct irq_data
*d
)
168 if (gic_arch_extn
.irq_retrigger
)
169 return gic_arch_extn
.irq_retrigger(d
);
175 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
178 void __iomem
*reg
= gic_dist_base(d
) + GIC_DIST_TARGET
+ (gic_irq(d
) & ~3);
179 unsigned int shift
= (d
->irq
% 4) * 8;
180 unsigned int cpu
= cpumask_first(mask_val
);
186 mask
= 0xff << shift
;
187 bit
= 1 << (cpu
+ shift
);
189 spin_lock(&irq_controller_lock
);
191 val
= readl(reg
) & ~mask
;
192 writel(val
| bit
, reg
);
193 spin_unlock(&irq_controller_lock
);
200 static int gic_set_wake(struct irq_data
*d
, unsigned int on
)
204 if (gic_arch_extn
.irq_set_wake
)
205 ret
= gic_arch_extn
.irq_set_wake(d
, on
);
211 #define gic_set_wake NULL
214 static void gic_handle_cascade_irq(unsigned int irq
, struct irq_desc
*desc
)
216 struct gic_chip_data
*chip_data
= irq_get_handler_data(irq
);
217 struct irq_chip
*chip
= irq_get_chip(irq
);
218 unsigned int cascade_irq
, gic_irq
;
219 unsigned long status
;
221 /* primary controller ack'ing */
222 chip
->irq_ack(&desc
->irq_data
);
224 spin_lock(&irq_controller_lock
);
225 status
= readl(chip_data
->cpu_base
+ GIC_CPU_INTACK
);
226 spin_unlock(&irq_controller_lock
);
228 gic_irq
= (status
& 0x3ff);
232 cascade_irq
= gic_irq
+ chip_data
->irq_offset
;
233 if (unlikely(gic_irq
< 32 || gic_irq
> 1020 || cascade_irq
>= NR_IRQS
))
234 do_bad_IRQ(cascade_irq
, desc
);
236 generic_handle_irq(cascade_irq
);
239 /* primary controller unmasking */
240 chip
->irq_unmask(&desc
->irq_data
);
243 static struct irq_chip gic_chip
= {
245 .irq_ack
= gic_ack_irq
,
246 .irq_mask
= gic_mask_irq
,
247 .irq_unmask
= gic_unmask_irq
,
248 .irq_set_type
= gic_set_type
,
249 .irq_retrigger
= gic_retrigger
,
251 .irq_set_affinity
= gic_set_affinity
,
253 .irq_set_wake
= gic_set_wake
,
256 void __init
gic_cascade_irq(unsigned int gic_nr
, unsigned int irq
)
258 if (gic_nr
>= MAX_GIC_NR
)
260 if (irq_set_handler_data(irq
, &gic_data
[gic_nr
]) != 0)
262 irq_set_chained_handler(irq
, gic_handle_cascade_irq
);
265 static void __init
gic_dist_init(struct gic_chip_data
*gic
,
266 unsigned int irq_start
)
268 unsigned int gic_irqs
, irq_limit
, i
;
269 void __iomem
*base
= gic
->dist_base
;
270 u32 cpumask
= 1 << smp_processor_id();
272 cpumask
|= cpumask
<< 8;
273 cpumask
|= cpumask
<< 16;
275 writel(0, base
+ GIC_DIST_CTRL
);
278 * Find out how many interrupts are supported.
279 * The GIC only supports up to 1020 interrupt sources.
281 gic_irqs
= readl(base
+ GIC_DIST_CTR
) & 0x1f;
282 gic_irqs
= (gic_irqs
+ 1) * 32;
287 * Set all global interrupts to be level triggered, active low.
289 for (i
= 32; i
< gic_irqs
; i
+= 16)
290 writel(0, base
+ GIC_DIST_CONFIG
+ i
* 4 / 16);
293 * Set all global interrupts to this CPU only.
295 for (i
= 32; i
< gic_irqs
; i
+= 4)
296 writel(cpumask
, base
+ GIC_DIST_TARGET
+ i
* 4 / 4);
299 * Set priority on all global interrupts.
301 for (i
= 32; i
< gic_irqs
; i
+= 4)
302 writel(0xa0a0a0a0, base
+ GIC_DIST_PRI
+ i
* 4 / 4);
305 * Disable all interrupts. Leave the PPI and SGIs alone
306 * as these enables are banked registers.
308 for (i
= 32; i
< gic_irqs
; i
+= 32)
309 writel(0xffffffff, base
+ GIC_DIST_ENABLE_CLEAR
+ i
* 4 / 32);
312 * Limit number of interrupts registered to the platform maximum
314 irq_limit
= gic
->irq_offset
+ gic_irqs
;
315 if (WARN_ON(irq_limit
> NR_IRQS
))
319 * Setup the Linux IRQ subsystem.
321 for (i
= irq_start
; i
< irq_limit
; i
++) {
322 irq_set_chip_and_handler(i
, &gic_chip
, handle_level_irq
);
323 irq_set_chip_data(i
, gic
);
324 set_irq_flags(i
, IRQF_VALID
| IRQF_PROBE
);
327 writel(1, base
+ GIC_DIST_CTRL
);
330 static void __cpuinit
gic_cpu_init(struct gic_chip_data
*gic
)
332 void __iomem
*dist_base
= gic
->dist_base
;
333 void __iomem
*base
= gic
->cpu_base
;
337 * Deal with the banked PPI and SGI interrupts - disable all
338 * PPI interrupts, ensure all SGI interrupts are enabled.
340 writel(0xffff0000, dist_base
+ GIC_DIST_ENABLE_CLEAR
);
341 writel(0x0000ffff, dist_base
+ GIC_DIST_ENABLE_SET
);
344 * Set priority on PPI and SGI interrupts
346 for (i
= 0; i
< 32; i
+= 4)
347 writel(0xa0a0a0a0, dist_base
+ GIC_DIST_PRI
+ i
* 4 / 4);
349 writel(0xf0, base
+ GIC_CPU_PRIMASK
);
350 writel(1, base
+ GIC_CPU_CTRL
);
353 void __init
gic_init(unsigned int gic_nr
, unsigned int irq_start
,
354 void __iomem
*dist_base
, void __iomem
*cpu_base
)
356 struct gic_chip_data
*gic
;
358 BUG_ON(gic_nr
>= MAX_GIC_NR
);
360 gic
= &gic_data
[gic_nr
];
361 gic
->dist_base
= dist_base
;
362 gic
->cpu_base
= cpu_base
;
363 gic
->irq_offset
= (irq_start
- 1) & ~31;
366 gic_cpu_base_addr
= cpu_base
;
368 gic_dist_init(gic
, irq_start
);
372 void __cpuinit
gic_secondary_init(unsigned int gic_nr
)
374 BUG_ON(gic_nr
>= MAX_GIC_NR
);
376 gic_cpu_init(&gic_data
[gic_nr
]);
379 void __cpuinit
gic_enable_ppi(unsigned int irq
)
383 local_irq_save(flags
);
384 irq_set_status_flags(irq
, IRQ_NOPROBE
);
385 gic_unmask_irq(irq_get_irq_data(irq
));
386 local_irq_restore(flags
);
390 void gic_raise_softirq(const struct cpumask
*mask
, unsigned int irq
)
392 unsigned long map
= *cpus_addr(*mask
);
394 /* this always happens on GIC0 */
395 writel(map
<< 16 | irq
, gic_data
[0].dist_base
+ GIC_DIST_SOFTINT
);