2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
28 #include <linux/irqchip/arm-gic-v3.h>
30 #include <asm/cputype.h>
31 #include <asm/exception.h>
32 #include <asm/smp_plat.h>
34 #include "irq-gic-common.h"
37 struct gic_chip_data
{
38 void __iomem
*dist_base
;
39 void __iomem
**redist_base
;
40 void __iomem
* __percpu
*rdist
;
41 struct irq_domain
*domain
;
47 static struct gic_chip_data gic_data __read_mostly
;
49 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
50 #define gic_data_rdist_rd_base() (*gic_data_rdist())
51 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
53 /* Our default, arbitrary priority value. Linux only uses one anyway. */
54 #define DEFAULT_PMR_VALUE 0xf0
56 static inline unsigned int gic_irq(struct irq_data
*d
)
61 static inline int gic_irq_in_rdist(struct irq_data
*d
)
63 return gic_irq(d
) < 32;
66 static inline void __iomem
*gic_dist_base(struct irq_data
*d
)
68 if (gic_irq_in_rdist(d
)) /* SGI+PPI -> SGI_base for this CPU */
69 return gic_data_rdist_sgi_base();
71 if (d
->hwirq
<= 1023) /* SPI -> dist_base */
72 return gic_data
.dist_base
;
75 BUG(); /* LPI Detected!!! */
80 static void gic_do_wait_for_rwp(void __iomem
*base
)
82 u32 count
= 1000000; /* 1s! */
84 while (readl_relaxed(base
+ GICD_CTLR
) & GICD_CTLR_RWP
) {
87 pr_err_ratelimited("RWP timeout, gone fishing\n");
95 /* Wait for completion of a distributor change */
96 static void gic_dist_wait_for_rwp(void)
98 gic_do_wait_for_rwp(gic_data
.dist_base
);
101 /* Wait for completion of a redistributor change */
102 static void gic_redist_wait_for_rwp(void)
104 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
107 /* Low level accessors */
108 static u64 __maybe_unused
gic_read_iar(void)
112 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1
) : "=r" (irqstat
));
116 static void __maybe_unused
gic_write_pmr(u64 val
)
118 asm volatile("msr_s " __stringify(ICC_PMR_EL1
) ", %0" : : "r" (val
));
121 static void __maybe_unused
gic_write_ctlr(u64 val
)
123 asm volatile("msr_s " __stringify(ICC_CTLR_EL1
) ", %0" : : "r" (val
));
127 static void __maybe_unused
gic_write_grpen1(u64 val
)
129 asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1
) ", %0" : : "r" (val
));
133 static void __maybe_unused
gic_write_sgi1r(u64 val
)
135 asm volatile("msr_s " __stringify(ICC_SGI1R_EL1
) ", %0" : : "r" (val
));
138 static void gic_enable_sre(void)
142 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1
) : "=r" (val
));
143 val
|= ICC_SRE_EL1_SRE
;
144 asm volatile("msr_s " __stringify(ICC_SRE_EL1
) ", %0" : : "r" (val
));
148 * Need to check that the SRE bit has actually been set. If
149 * not, it means that SRE is disabled at EL2. We're going to
150 * die painfully, and there is nothing we can do about it.
152 * Kindly inform the luser.
154 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1
) : "=r" (val
));
155 if (!(val
& ICC_SRE_EL1_SRE
))
156 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
159 static void gic_enable_redist(bool enable
)
162 u32 count
= 1000000; /* 1s! */
165 rbase
= gic_data_rdist_rd_base();
167 val
= readl_relaxed(rbase
+ GICR_WAKER
);
169 /* Wake up this CPU redistributor */
170 val
&= ~GICR_WAKER_ProcessorSleep
;
172 val
|= GICR_WAKER_ProcessorSleep
;
173 writel_relaxed(val
, rbase
+ GICR_WAKER
);
175 if (!enable
) { /* Check that GICR_WAKER is writeable */
176 val
= readl_relaxed(rbase
+ GICR_WAKER
);
177 if (!(val
& GICR_WAKER_ProcessorSleep
))
178 return; /* No PM support in this redistributor */
182 val
= readl_relaxed(rbase
+ GICR_WAKER
);
183 if (enable
^ (val
& GICR_WAKER_ChildrenAsleep
))
189 pr_err_ratelimited("redistributor failed to %s...\n",
190 enable
? "wakeup" : "sleep");
194 * Routines to disable, enable, EOI and route interrupts
196 static void gic_poke_irq(struct irq_data
*d
, u32 offset
)
198 u32 mask
= 1 << (gic_irq(d
) % 32);
199 void (*rwp_wait
)(void);
202 if (gic_irq_in_rdist(d
)) {
203 base
= gic_data_rdist_sgi_base();
204 rwp_wait
= gic_redist_wait_for_rwp
;
206 base
= gic_data
.dist_base
;
207 rwp_wait
= gic_dist_wait_for_rwp
;
210 writel_relaxed(mask
, base
+ offset
+ (gic_irq(d
) / 32) * 4);
214 static void gic_mask_irq(struct irq_data
*d
)
216 gic_poke_irq(d
, GICD_ICENABLER
);
219 static void gic_unmask_irq(struct irq_data
*d
)
221 gic_poke_irq(d
, GICD_ISENABLER
);
224 static void gic_eoi_irq(struct irq_data
*d
)
226 gic_write_eoir(gic_irq(d
));
229 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
231 unsigned int irq
= gic_irq(d
);
232 void (*rwp_wait
)(void);
235 /* Interrupt configuration for SGIs can't be changed */
239 if (type
!= IRQ_TYPE_LEVEL_HIGH
&& type
!= IRQ_TYPE_EDGE_RISING
)
242 if (gic_irq_in_rdist(d
)) {
243 base
= gic_data_rdist_sgi_base();
244 rwp_wait
= gic_redist_wait_for_rwp
;
246 base
= gic_data
.dist_base
;
247 rwp_wait
= gic_dist_wait_for_rwp
;
250 gic_configure_irq(irq
, type
, base
, rwp_wait
);
255 static u64
gic_mpidr_to_affinity(u64 mpidr
)
259 aff
= (MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 32 |
260 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
261 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
262 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
267 static asmlinkage
void __exception_irq_entry
gic_handle_irq(struct pt_regs
*regs
)
272 irqnr
= gic_read_iar();
274 if (likely(irqnr
> 15 && irqnr
< 1020)) {
276 err
= handle_domain_irq(gic_data
.domain
, irqnr
, regs
);
278 WARN_ONCE(true, "Unexpected SPI received!\n");
279 gic_write_eoir(irqnr
);
284 gic_write_eoir(irqnr
);
287 * Unlike GICv2, we don't need an smp_rmb() here.
288 * The control dependency from gic_read_iar to
289 * the ISB in gic_write_eoir is enough to ensure
290 * that any shared data read by handle_IPI will
291 * be read after the ACK.
293 handle_IPI(irqnr
, regs
);
295 WARN_ONCE(true, "Unexpected SGI received!\n");
299 } while (irqnr
!= ICC_IAR1_EL1_SPURIOUS
);
302 static void __init
gic_dist_init(void)
306 void __iomem
*base
= gic_data
.dist_base
;
308 /* Disable the distributor */
309 writel_relaxed(0, base
+ GICD_CTLR
);
310 gic_dist_wait_for_rwp();
312 gic_dist_config(base
, gic_data
.irq_nr
, gic_dist_wait_for_rwp
);
314 /* Enable distributor with ARE, Group1 */
315 writel_relaxed(GICD_CTLR_ARE_NS
| GICD_CTLR_ENABLE_G1A
| GICD_CTLR_ENABLE_G1
,
319 * Set all global interrupts to the boot CPU only. ARE must be
322 affinity
= gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
323 for (i
= 32; i
< gic_data
.irq_nr
; i
++)
324 writeq_relaxed(affinity
, base
+ GICD_IROUTER
+ i
* 8);
327 static int gic_populate_rdist(void)
329 u64 mpidr
= cpu_logical_map(smp_processor_id());
335 * Convert affinity to a 32bit value that can be matched to
336 * GICR_TYPER bits [63:32].
338 aff
= (MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 24 |
339 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
340 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
341 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
343 for (i
= 0; i
< gic_data
.redist_regions
; i
++) {
344 void __iomem
*ptr
= gic_data
.redist_base
[i
];
347 reg
= readl_relaxed(ptr
+ GICR_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
348 if (reg
!= GIC_PIDR2_ARCH_GICv3
&&
349 reg
!= GIC_PIDR2_ARCH_GICv4
) { /* We're in trouble... */
350 pr_warn("No redistributor present @%p\n", ptr
);
355 typer
= readq_relaxed(ptr
+ GICR_TYPER
);
356 if ((typer
>> 32) == aff
) {
357 gic_data_rdist_rd_base() = ptr
;
358 pr_info("CPU%d: found redistributor %llx @%p\n",
360 (unsigned long long)mpidr
, ptr
);
364 if (gic_data
.redist_stride
) {
365 ptr
+= gic_data
.redist_stride
;
367 ptr
+= SZ_64K
* 2; /* Skip RD_base + SGI_base */
368 if (typer
& GICR_TYPER_VLPIS
)
369 ptr
+= SZ_64K
* 2; /* Skip VLPI_base + reserved page */
371 } while (!(typer
& GICR_TYPER_LAST
));
374 /* We couldn't even deal with ourselves... */
375 WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
376 smp_processor_id(), (unsigned long long)mpidr
);
380 static void gic_cpu_sys_reg_init(void)
382 /* Enable system registers */
385 /* Set priority mask register */
386 gic_write_pmr(DEFAULT_PMR_VALUE
);
388 /* EOI deactivates interrupt too (mode 0) */
389 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir
);
391 /* ... and let's hit the road... */
395 static void gic_cpu_init(void)
399 /* Register ourselves with the rest of the world */
400 if (gic_populate_rdist())
403 gic_enable_redist(true);
405 rbase
= gic_data_rdist_sgi_base();
407 gic_cpu_config(rbase
, gic_redist_wait_for_rwp
);
409 /* initialise system registers */
410 gic_cpu_sys_reg_init();
414 static int gic_peek_irq(struct irq_data
*d
, u32 offset
)
416 u32 mask
= 1 << (gic_irq(d
) % 32);
419 if (gic_irq_in_rdist(d
))
420 base
= gic_data_rdist_sgi_base();
422 base
= gic_data
.dist_base
;
424 return !!(readl_relaxed(base
+ offset
+ (gic_irq(d
) / 32) * 4) & mask
);
427 static int gic_secondary_init(struct notifier_block
*nfb
,
428 unsigned long action
, void *hcpu
)
430 if (action
== CPU_STARTING
|| action
== CPU_STARTING_FROZEN
)
436 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
437 * priority because the GIC needs to be up before the ARM generic timers.
439 static struct notifier_block gic_cpu_notifier
= {
440 .notifier_call
= gic_secondary_init
,
444 static u16
gic_compute_target_list(int *base_cpu
, const struct cpumask
*mask
,
448 u64 mpidr
= cpu_logical_map(cpu
);
451 while (cpu
< nr_cpu_ids
) {
453 * If we ever get a cluster of more than 16 CPUs, just
454 * scream and skip that CPU.
456 if (WARN_ON((mpidr
& 0xff) >= 16))
459 tlist
|= 1 << (mpidr
& 0xf);
461 cpu
= cpumask_next(cpu
, mask
);
462 if (cpu
== nr_cpu_ids
)
465 mpidr
= cpu_logical_map(cpu
);
467 if (cluster_id
!= (mpidr
& ~0xffUL
)) {
477 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
478 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
479 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
481 static void gic_send_sgi(u64 cluster_id
, u16 tlist
, unsigned int irq
)
485 val
= (MPIDR_TO_SGI_AFFINITY(cluster_id
, 3) |
486 MPIDR_TO_SGI_AFFINITY(cluster_id
, 2) |
487 irq
<< ICC_SGI1R_SGI_ID_SHIFT
|
488 MPIDR_TO_SGI_AFFINITY(cluster_id
, 1) |
489 tlist
<< ICC_SGI1R_TARGET_LIST_SHIFT
);
491 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val
);
492 gic_write_sgi1r(val
);
495 static void gic_raise_softirq(const struct cpumask
*mask
, unsigned int irq
)
499 if (WARN_ON(irq
>= 16))
503 * Ensure that stores to Normal memory are visible to the
504 * other CPUs before issuing the IPI.
508 for_each_cpu_mask(cpu
, *mask
) {
509 u64 cluster_id
= cpu_logical_map(cpu
) & ~0xffUL
;
512 tlist
= gic_compute_target_list(&cpu
, mask
, cluster_id
);
513 gic_send_sgi(cluster_id
, tlist
, irq
);
516 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
520 static void gic_smp_init(void)
522 set_smp_cross_call(gic_raise_softirq
);
523 register_cpu_notifier(&gic_cpu_notifier
);
526 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
529 unsigned int cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
534 if (cpu
>= nr_cpu_ids
)
537 if (gic_irq_in_rdist(d
))
540 /* If interrupt was enabled, disable it first */
541 enabled
= gic_peek_irq(d
, GICD_ISENABLER
);
545 reg
= gic_dist_base(d
) + GICD_IROUTER
+ (gic_irq(d
) * 8);
546 val
= gic_mpidr_to_affinity(cpu_logical_map(cpu
));
548 writeq_relaxed(val
, reg
);
551 * If the interrupt was enabled, enabled it again. Otherwise,
552 * just wait for the distributor to have digested our changes.
557 gic_dist_wait_for_rwp();
559 return IRQ_SET_MASK_OK
;
562 #define gic_set_affinity NULL
563 #define gic_smp_init() do { } while(0)
567 static int gic_cpu_pm_notifier(struct notifier_block
*self
,
568 unsigned long cmd
, void *v
)
570 if (cmd
== CPU_PM_EXIT
) {
571 gic_enable_redist(true);
572 gic_cpu_sys_reg_init();
573 } else if (cmd
== CPU_PM_ENTER
) {
575 gic_enable_redist(false);
580 static struct notifier_block gic_cpu_pm_notifier_block
= {
581 .notifier_call
= gic_cpu_pm_notifier
,
584 static void gic_cpu_pm_init(void)
586 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block
);
590 static inline void gic_cpu_pm_init(void) { }
591 #endif /* CONFIG_CPU_PM */
593 static struct irq_chip gic_chip
= {
595 .irq_mask
= gic_mask_irq
,
596 .irq_unmask
= gic_unmask_irq
,
597 .irq_eoi
= gic_eoi_irq
,
598 .irq_set_type
= gic_set_type
,
599 .irq_set_affinity
= gic_set_affinity
,
602 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
605 /* SGIs are private to the core kernel */
610 irq_set_percpu_devid(irq
);
611 irq_set_chip_and_handler(irq
, &gic_chip
,
612 handle_percpu_devid_irq
);
613 set_irq_flags(irq
, IRQF_VALID
| IRQF_NOAUTOEN
);
616 if (hw
>= 32 && hw
< gic_data
.irq_nr
) {
617 irq_set_chip_and_handler(irq
, &gic_chip
,
619 set_irq_flags(irq
, IRQF_VALID
| IRQF_PROBE
);
621 irq_set_chip_data(irq
, d
->host_data
);
625 static int gic_irq_domain_xlate(struct irq_domain
*d
,
626 struct device_node
*controller
,
627 const u32
*intspec
, unsigned int intsize
,
628 unsigned long *out_hwirq
, unsigned int *out_type
)
630 if (d
->of_node
!= controller
)
637 *out_hwirq
= intspec
[1] + 32;
640 *out_hwirq
= intspec
[1] + 16;
646 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
650 static const struct irq_domain_ops gic_irq_domain_ops
= {
651 .map
= gic_irq_domain_map
,
652 .xlate
= gic_irq_domain_xlate
,
655 static int __init
gic_of_init(struct device_node
*node
, struct device_node
*parent
)
657 void __iomem
*dist_base
;
658 void __iomem
**redist_base
;
666 dist_base
= of_iomap(node
, 0);
668 pr_err("%s: unable to map gic dist registers\n",
673 reg
= readl_relaxed(dist_base
+ GICD_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
674 if (reg
!= GIC_PIDR2_ARCH_GICv3
&& reg
!= GIC_PIDR2_ARCH_GICv4
) {
675 pr_err("%s: no distributor detected, giving up\n",
681 if (of_property_read_u32(node
, "#redistributor-regions", &redist_regions
))
684 redist_base
= kzalloc(sizeof(*redist_base
) * redist_regions
, GFP_KERNEL
);
690 for (i
= 0; i
< redist_regions
; i
++) {
691 redist_base
[i
] = of_iomap(node
, 1 + i
);
692 if (!redist_base
[i
]) {
693 pr_err("%s: couldn't map region %d\n",
696 goto out_unmap_rdist
;
700 if (of_property_read_u64(node
, "redistributor-stride", &redist_stride
))
703 gic_data
.dist_base
= dist_base
;
704 gic_data
.redist_base
= redist_base
;
705 gic_data
.redist_regions
= redist_regions
;
706 gic_data
.redist_stride
= redist_stride
;
709 * Find out how many interrupts are supported.
710 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
712 gic_irqs
= readl_relaxed(gic_data
.dist_base
+ GICD_TYPER
) & 0x1f;
713 gic_irqs
= (gic_irqs
+ 1) * 32;
716 gic_data
.irq_nr
= gic_irqs
;
718 gic_data
.domain
= irq_domain_add_tree(node
, &gic_irq_domain_ops
,
720 gic_data
.rdist
= alloc_percpu(typeof(*gic_data
.rdist
));
722 if (WARN_ON(!gic_data
.domain
) || WARN_ON(!gic_data
.rdist
)) {
727 set_handle_irq(gic_handle_irq
);
738 irq_domain_remove(gic_data
.domain
);
739 free_percpu(gic_data
.rdist
);
741 for (i
= 0; i
< redist_regions
; i
++)
743 iounmap(redist_base
[i
]);
750 IRQCHIP_DECLARE(gic_v3
, "arm,gic-v3", gic_of_init
);