2 * linux/arch/arm/common/gic.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Interrupt architecture for the GIC:
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpumask.h>
33 #include <asm/mach/irq.h>
34 #include <asm/hardware/gic.h>
36 static DEFINE_SPINLOCK(irq_controller_lock
);
38 /* Address of GIC 0 CPU interface */
39 void __iomem
*gic_cpu_base_addr __read_mostly
;
42 * Supported arch specific GIC irq extension.
43 * Default make them NULL.
45 struct irq_chip gic_arch_extn
= {
49 .irq_retrigger
= NULL
,
58 static struct gic_chip_data gic_data
[MAX_GIC_NR
] __read_mostly
;
60 static inline void __iomem
*gic_dist_base(struct irq_data
*d
)
62 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
63 return gic_data
->dist_base
;
66 static inline void __iomem
*gic_cpu_base(struct irq_data
*d
)
68 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
69 return gic_data
->cpu_base
;
72 static inline unsigned int gic_irq(struct irq_data
*d
)
74 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
75 return d
->irq
- gic_data
->irq_offset
;
79 * Routines to acknowledge, disable and enable interrupts
81 static void gic_mask_irq(struct irq_data
*d
)
83 u32 mask
= 1 << (d
->irq
% 32);
85 spin_lock(&irq_controller_lock
);
86 writel_relaxed(mask
, gic_dist_base(d
) + GIC_DIST_ENABLE_CLEAR
+ (gic_irq(d
) / 32) * 4);
87 if (gic_arch_extn
.irq_mask
)
88 gic_arch_extn
.irq_mask(d
);
89 spin_unlock(&irq_controller_lock
);
92 static void gic_unmask_irq(struct irq_data
*d
)
94 u32 mask
= 1 << (d
->irq
% 32);
96 spin_lock(&irq_controller_lock
);
97 if (gic_arch_extn
.irq_unmask
)
98 gic_arch_extn
.irq_unmask(d
);
99 writel_relaxed(mask
, gic_dist_base(d
) + GIC_DIST_ENABLE_SET
+ (gic_irq(d
) / 32) * 4);
100 spin_unlock(&irq_controller_lock
);
103 static void gic_eoi_irq(struct irq_data
*d
)
105 if (gic_arch_extn
.irq_eoi
) {
106 spin_lock(&irq_controller_lock
);
107 gic_arch_extn
.irq_eoi(d
);
108 spin_unlock(&irq_controller_lock
);
111 writel_relaxed(gic_irq(d
), gic_cpu_base(d
) + GIC_CPU_EOI
);
114 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
116 void __iomem
*base
= gic_dist_base(d
);
117 unsigned int gicirq
= gic_irq(d
);
118 u32 enablemask
= 1 << (gicirq
% 32);
119 u32 enableoff
= (gicirq
/ 32) * 4;
120 u32 confmask
= 0x2 << ((gicirq
% 16) * 2);
121 u32 confoff
= (gicirq
/ 16) * 4;
122 bool enabled
= false;
125 /* Interrupt configuration for SGIs can't be changed */
129 if (type
!= IRQ_TYPE_LEVEL_HIGH
&& type
!= IRQ_TYPE_EDGE_RISING
)
132 spin_lock(&irq_controller_lock
);
134 if (gic_arch_extn
.irq_set_type
)
135 gic_arch_extn
.irq_set_type(d
, type
);
137 val
= readl_relaxed(base
+ GIC_DIST_CONFIG
+ confoff
);
138 if (type
== IRQ_TYPE_LEVEL_HIGH
)
140 else if (type
== IRQ_TYPE_EDGE_RISING
)
144 * As recommended by the spec, disable the interrupt before changing
147 if (readl_relaxed(base
+ GIC_DIST_ENABLE_SET
+ enableoff
) & enablemask
) {
148 writel_relaxed(enablemask
, base
+ GIC_DIST_ENABLE_CLEAR
+ enableoff
);
152 writel_relaxed(val
, base
+ GIC_DIST_CONFIG
+ confoff
);
155 writel_relaxed(enablemask
, base
+ GIC_DIST_ENABLE_SET
+ enableoff
);
157 spin_unlock(&irq_controller_lock
);
162 static int gic_retrigger(struct irq_data
*d
)
164 if (gic_arch_extn
.irq_retrigger
)
165 return gic_arch_extn
.irq_retrigger(d
);
171 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
174 void __iomem
*reg
= gic_dist_base(d
) + GIC_DIST_TARGET
+ (gic_irq(d
) & ~3);
175 unsigned int shift
= (d
->irq
% 4) * 8;
176 unsigned int cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
179 if (cpu
>= 8 || cpu
>= nr_cpu_ids
)
182 mask
= 0xff << shift
;
183 bit
= 1 << (cpu
+ shift
);
185 spin_lock(&irq_controller_lock
);
186 val
= readl_relaxed(reg
) & ~mask
;
187 writel_relaxed(val
| bit
, reg
);
188 spin_unlock(&irq_controller_lock
);
190 return IRQ_SET_MASK_OK
;
195 static int gic_set_wake(struct irq_data
*d
, unsigned int on
)
199 if (gic_arch_extn
.irq_set_wake
)
200 ret
= gic_arch_extn
.irq_set_wake(d
, on
);
206 #define gic_set_wake NULL
209 static void gic_handle_cascade_irq(unsigned int irq
, struct irq_desc
*desc
)
211 struct gic_chip_data
*chip_data
= irq_get_handler_data(irq
);
212 struct irq_chip
*chip
= irq_get_chip(irq
);
213 unsigned int cascade_irq
, gic_irq
;
214 unsigned long status
;
216 chained_irq_enter(chip
, desc
);
218 spin_lock(&irq_controller_lock
);
219 status
= readl_relaxed(chip_data
->cpu_base
+ GIC_CPU_INTACK
);
220 spin_unlock(&irq_controller_lock
);
222 gic_irq
= (status
& 0x3ff);
226 cascade_irq
= gic_irq
+ chip_data
->irq_offset
;
227 if (unlikely(gic_irq
< 32 || gic_irq
> 1020 || cascade_irq
>= NR_IRQS
))
228 do_bad_IRQ(cascade_irq
, desc
);
230 generic_handle_irq(cascade_irq
);
233 chained_irq_exit(chip
, desc
);
236 static struct irq_chip gic_chip
= {
238 .irq_mask
= gic_mask_irq
,
239 .irq_unmask
= gic_unmask_irq
,
240 .irq_eoi
= gic_eoi_irq
,
241 .irq_set_type
= gic_set_type
,
242 .irq_retrigger
= gic_retrigger
,
244 .irq_set_affinity
= gic_set_affinity
,
246 .irq_set_wake
= gic_set_wake
,
249 void __init
gic_cascade_irq(unsigned int gic_nr
, unsigned int irq
)
251 if (gic_nr
>= MAX_GIC_NR
)
253 if (irq_set_handler_data(irq
, &gic_data
[gic_nr
]) != 0)
255 irq_set_chained_handler(irq
, gic_handle_cascade_irq
);
258 static void __init
gic_dist_init(struct gic_chip_data
*gic
,
259 unsigned int irq_start
)
261 unsigned int gic_irqs
, irq_limit
, i
;
262 void __iomem
*base
= gic
->dist_base
;
263 u32 cpumask
= 1 << smp_processor_id();
265 cpumask
|= cpumask
<< 8;
266 cpumask
|= cpumask
<< 16;
268 writel_relaxed(0, base
+ GIC_DIST_CTRL
);
271 * Find out how many interrupts are supported.
272 * The GIC only supports up to 1020 interrupt sources.
274 gic_irqs
= readl_relaxed(base
+ GIC_DIST_CTR
) & 0x1f;
275 gic_irqs
= (gic_irqs
+ 1) * 32;
280 * Set all global interrupts to be level triggered, active low.
282 for (i
= 32; i
< gic_irqs
; i
+= 16)
283 writel_relaxed(0, base
+ GIC_DIST_CONFIG
+ i
* 4 / 16);
286 * Set all global interrupts to this CPU only.
288 for (i
= 32; i
< gic_irqs
; i
+= 4)
289 writel_relaxed(cpumask
, base
+ GIC_DIST_TARGET
+ i
* 4 / 4);
292 * Set priority on all global interrupts.
294 for (i
= 32; i
< gic_irqs
; i
+= 4)
295 writel_relaxed(0xa0a0a0a0, base
+ GIC_DIST_PRI
+ i
* 4 / 4);
298 * Disable all interrupts. Leave the PPI and SGIs alone
299 * as these enables are banked registers.
301 for (i
= 32; i
< gic_irqs
; i
+= 32)
302 writel_relaxed(0xffffffff, base
+ GIC_DIST_ENABLE_CLEAR
+ i
* 4 / 32);
305 * Limit number of interrupts registered to the platform maximum
307 irq_limit
= gic
->irq_offset
+ gic_irqs
;
308 if (WARN_ON(irq_limit
> NR_IRQS
))
312 * Setup the Linux IRQ subsystem.
314 for (i
= irq_start
; i
< irq_limit
; i
++) {
315 irq_set_chip_and_handler(i
, &gic_chip
, handle_fasteoi_irq
);
316 irq_set_chip_data(i
, gic
);
317 set_irq_flags(i
, IRQF_VALID
| IRQF_PROBE
);
320 writel_relaxed(1, base
+ GIC_DIST_CTRL
);
323 static void __cpuinit
gic_cpu_init(struct gic_chip_data
*gic
)
325 void __iomem
*dist_base
= gic
->dist_base
;
326 void __iomem
*base
= gic
->cpu_base
;
330 * Deal with the banked PPI and SGI interrupts - disable all
331 * PPI interrupts, ensure all SGI interrupts are enabled.
333 writel_relaxed(0xffff0000, dist_base
+ GIC_DIST_ENABLE_CLEAR
);
334 writel_relaxed(0x0000ffff, dist_base
+ GIC_DIST_ENABLE_SET
);
337 * Set priority on PPI and SGI interrupts
339 for (i
= 0; i
< 32; i
+= 4)
340 writel_relaxed(0xa0a0a0a0, dist_base
+ GIC_DIST_PRI
+ i
* 4 / 4);
342 writel_relaxed(0xf0, base
+ GIC_CPU_PRIMASK
);
343 writel_relaxed(1, base
+ GIC_CPU_CTRL
);
346 void __init
gic_init(unsigned int gic_nr
, unsigned int irq_start
,
347 void __iomem
*dist_base
, void __iomem
*cpu_base
)
349 struct gic_chip_data
*gic
;
351 BUG_ON(gic_nr
>= MAX_GIC_NR
);
353 gic
= &gic_data
[gic_nr
];
354 gic
->dist_base
= dist_base
;
355 gic
->cpu_base
= cpu_base
;
356 gic
->irq_offset
= (irq_start
- 1) & ~31;
359 gic_cpu_base_addr
= cpu_base
;
361 gic_dist_init(gic
, irq_start
);
365 void __cpuinit
gic_secondary_init(unsigned int gic_nr
)
367 BUG_ON(gic_nr
>= MAX_GIC_NR
);
369 gic_cpu_init(&gic_data
[gic_nr
]);
372 void __cpuinit
gic_enable_ppi(unsigned int irq
)
376 local_irq_save(flags
);
377 irq_set_status_flags(irq
, IRQ_NOPROBE
);
378 gic_unmask_irq(irq_get_irq_data(irq
));
379 local_irq_restore(flags
);
383 void gic_raise_softirq(const struct cpumask
*mask
, unsigned int irq
)
385 unsigned long map
= *cpus_addr(*mask
);
388 * Ensure that stores to Normal memory are visible to the
389 * other CPUs before issuing the IPI.
393 /* this always happens on GIC0 */
394 writel_relaxed(map
<< 16 | irq
, gic_data
[0].dist_base
+ GIC_DIST_SOFTINT
);