2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #define pr_fmt(fmt) "GICv3: " fmt
20 #include <linux/acpi.h>
21 #include <linux/cpu.h>
22 #include <linux/cpu_pm.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
32 #include <linux/irqchip.h>
33 #include <linux/irqchip/arm-gic-common.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/irq-partition-percpu.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
39 #include <asm/smp_plat.h>
42 #include "irq-gic-common.h"
44 struct redist_region
{
45 void __iomem
*redist_base
;
46 phys_addr_t phys_base
;
50 struct gic_chip_data
{
51 struct fwnode_handle
*fwnode
;
52 void __iomem
*dist_base
;
53 struct redist_region
*redist_regions
;
55 struct irq_domain
*domain
;
57 u32 nr_redist_regions
;
59 struct partition_desc
*ppi_descs
[16];
62 static struct gic_chip_data gic_data __read_mostly
;
63 static struct static_key supports_deactivate
= STATIC_KEY_INIT_TRUE
;
65 static struct gic_kvm_info gic_v3_kvm_info
;
67 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
68 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
69 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
71 /* Our default, arbitrary priority value. Linux only uses one anyway. */
72 #define DEFAULT_PMR_VALUE 0xf0
74 static inline unsigned int gic_irq(struct irq_data
*d
)
79 static inline int gic_irq_in_rdist(struct irq_data
*d
)
81 return gic_irq(d
) < 32;
84 static inline void __iomem
*gic_dist_base(struct irq_data
*d
)
86 if (gic_irq_in_rdist(d
)) /* SGI+PPI -> SGI_base for this CPU */
87 return gic_data_rdist_sgi_base();
89 if (d
->hwirq
<= 1023) /* SPI -> dist_base */
90 return gic_data
.dist_base
;
95 static void gic_do_wait_for_rwp(void __iomem
*base
)
97 u32 count
= 1000000; /* 1s! */
99 while (readl_relaxed(base
+ GICD_CTLR
) & GICD_CTLR_RWP
) {
102 pr_err_ratelimited("RWP timeout, gone fishing\n");
110 /* Wait for completion of a distributor change */
111 static void gic_dist_wait_for_rwp(void)
113 gic_do_wait_for_rwp(gic_data
.dist_base
);
116 /* Wait for completion of a redistributor change */
117 static void gic_redist_wait_for_rwp(void)
119 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
124 static u64 __maybe_unused
gic_read_iar(void)
126 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154
))
127 return gic_read_iar_cavium_thunderx();
129 return gic_read_iar_common();
133 static void gic_enable_redist(bool enable
)
136 u32 count
= 1000000; /* 1s! */
139 rbase
= gic_data_rdist_rd_base();
141 val
= readl_relaxed(rbase
+ GICR_WAKER
);
143 /* Wake up this CPU redistributor */
144 val
&= ~GICR_WAKER_ProcessorSleep
;
146 val
|= GICR_WAKER_ProcessorSleep
;
147 writel_relaxed(val
, rbase
+ GICR_WAKER
);
149 if (!enable
) { /* Check that GICR_WAKER is writeable */
150 val
= readl_relaxed(rbase
+ GICR_WAKER
);
151 if (!(val
& GICR_WAKER_ProcessorSleep
))
152 return; /* No PM support in this redistributor */
156 val
= readl_relaxed(rbase
+ GICR_WAKER
);
157 if (enable
^ (bool)(val
& GICR_WAKER_ChildrenAsleep
))
163 pr_err_ratelimited("redistributor failed to %s...\n",
164 enable
? "wakeup" : "sleep");
168 * Routines to disable, enable, EOI and route interrupts
170 static int gic_peek_irq(struct irq_data
*d
, u32 offset
)
172 u32 mask
= 1 << (gic_irq(d
) % 32);
175 if (gic_irq_in_rdist(d
))
176 base
= gic_data_rdist_sgi_base();
178 base
= gic_data
.dist_base
;
180 return !!(readl_relaxed(base
+ offset
+ (gic_irq(d
) / 32) * 4) & mask
);
183 static void gic_poke_irq(struct irq_data
*d
, u32 offset
)
185 u32 mask
= 1 << (gic_irq(d
) % 32);
186 void (*rwp_wait
)(void);
189 if (gic_irq_in_rdist(d
)) {
190 base
= gic_data_rdist_sgi_base();
191 rwp_wait
= gic_redist_wait_for_rwp
;
193 base
= gic_data
.dist_base
;
194 rwp_wait
= gic_dist_wait_for_rwp
;
197 writel_relaxed(mask
, base
+ offset
+ (gic_irq(d
) / 32) * 4);
201 static void gic_mask_irq(struct irq_data
*d
)
203 gic_poke_irq(d
, GICD_ICENABLER
);
206 static void gic_eoimode1_mask_irq(struct irq_data
*d
)
210 * When masking a forwarded interrupt, make sure it is
211 * deactivated as well.
213 * This ensures that an interrupt that is getting
214 * disabled/masked will not get "stuck", because there is
215 * noone to deactivate it (guest is being terminated).
217 if (irqd_is_forwarded_to_vcpu(d
))
218 gic_poke_irq(d
, GICD_ICACTIVER
);
221 static void gic_unmask_irq(struct irq_data
*d
)
223 gic_poke_irq(d
, GICD_ISENABLER
);
226 static int gic_irq_set_irqchip_state(struct irq_data
*d
,
227 enum irqchip_irq_state which
, bool val
)
231 if (d
->hwirq
>= gic_data
.irq_nr
) /* PPI/SPI only */
235 case IRQCHIP_STATE_PENDING
:
236 reg
= val
? GICD_ISPENDR
: GICD_ICPENDR
;
239 case IRQCHIP_STATE_ACTIVE
:
240 reg
= val
? GICD_ISACTIVER
: GICD_ICACTIVER
;
243 case IRQCHIP_STATE_MASKED
:
244 reg
= val
? GICD_ICENABLER
: GICD_ISENABLER
;
251 gic_poke_irq(d
, reg
);
255 static int gic_irq_get_irqchip_state(struct irq_data
*d
,
256 enum irqchip_irq_state which
, bool *val
)
258 if (d
->hwirq
>= gic_data
.irq_nr
) /* PPI/SPI only */
262 case IRQCHIP_STATE_PENDING
:
263 *val
= gic_peek_irq(d
, GICD_ISPENDR
);
266 case IRQCHIP_STATE_ACTIVE
:
267 *val
= gic_peek_irq(d
, GICD_ISACTIVER
);
270 case IRQCHIP_STATE_MASKED
:
271 *val
= !gic_peek_irq(d
, GICD_ISENABLER
);
281 static void gic_eoi_irq(struct irq_data
*d
)
283 gic_write_eoir(gic_irq(d
));
286 static void gic_eoimode1_eoi_irq(struct irq_data
*d
)
289 * No need to deactivate an LPI, or an interrupt that
290 * is is getting forwarded to a vcpu.
292 if (gic_irq(d
) >= 8192 || irqd_is_forwarded_to_vcpu(d
))
294 gic_write_dir(gic_irq(d
));
297 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
299 unsigned int irq
= gic_irq(d
);
300 void (*rwp_wait
)(void);
303 /* Interrupt configuration for SGIs can't be changed */
307 /* SPIs have restrictions on the supported types */
308 if (irq
>= 32 && type
!= IRQ_TYPE_LEVEL_HIGH
&&
309 type
!= IRQ_TYPE_EDGE_RISING
)
312 if (gic_irq_in_rdist(d
)) {
313 base
= gic_data_rdist_sgi_base();
314 rwp_wait
= gic_redist_wait_for_rwp
;
316 base
= gic_data
.dist_base
;
317 rwp_wait
= gic_dist_wait_for_rwp
;
320 return gic_configure_irq(irq
, type
, base
, rwp_wait
);
323 static int gic_irq_set_vcpu_affinity(struct irq_data
*d
, void *vcpu
)
326 irqd_set_forwarded_to_vcpu(d
);
328 irqd_clr_forwarded_to_vcpu(d
);
332 static u64
gic_mpidr_to_affinity(unsigned long mpidr
)
336 aff
= ((u64
)MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 32 |
337 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
338 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
339 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
344 static asmlinkage
void __exception_irq_entry
gic_handle_irq(struct pt_regs
*regs
)
349 irqnr
= gic_read_iar();
351 if (likely(irqnr
> 15 && irqnr
< 1020) || irqnr
>= 8192) {
354 if (static_key_true(&supports_deactivate
))
355 gic_write_eoir(irqnr
);
359 err
= handle_domain_irq(gic_data
.domain
, irqnr
, regs
);
361 WARN_ONCE(true, "Unexpected interrupt received!\n");
362 if (static_key_true(&supports_deactivate
)) {
364 gic_write_dir(irqnr
);
366 gic_write_eoir(irqnr
);
372 gic_write_eoir(irqnr
);
373 if (static_key_true(&supports_deactivate
))
374 gic_write_dir(irqnr
);
377 * Unlike GICv2, we don't need an smp_rmb() here.
378 * The control dependency from gic_read_iar to
379 * the ISB in gic_write_eoir is enough to ensure
380 * that any shared data read by handle_IPI will
381 * be read after the ACK.
383 handle_IPI(irqnr
, regs
);
385 WARN_ONCE(true, "Unexpected SGI received!\n");
389 } while (irqnr
!= ICC_IAR1_EL1_SPURIOUS
);
392 static void __init
gic_dist_init(void)
396 void __iomem
*base
= gic_data
.dist_base
;
398 /* Disable the distributor */
399 writel_relaxed(0, base
+ GICD_CTLR
);
400 gic_dist_wait_for_rwp();
403 * Configure SPIs as non-secure Group-1. This will only matter
404 * if the GIC only has a single security state. This will not
405 * do the right thing if the kernel is running in secure mode,
406 * but that's not the intended use case anyway.
408 for (i
= 32; i
< gic_data
.irq_nr
; i
+= 32)
409 writel_relaxed(~0, base
+ GICD_IGROUPR
+ i
/ 8);
411 gic_dist_config(base
, gic_data
.irq_nr
, gic_dist_wait_for_rwp
);
413 /* Enable distributor with ARE, Group1 */
414 writel_relaxed(GICD_CTLR_ARE_NS
| GICD_CTLR_ENABLE_G1A
| GICD_CTLR_ENABLE_G1
,
418 * Set all global interrupts to the boot CPU only. ARE must be
421 affinity
= gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
422 for (i
= 32; i
< gic_data
.irq_nr
; i
++)
423 gic_write_irouter(affinity
, base
+ GICD_IROUTER
+ i
* 8);
426 static int gic_iterate_rdists(int (*fn
)(struct redist_region
*, void __iomem
*))
431 for (i
= 0; i
< gic_data
.nr_redist_regions
; i
++) {
432 void __iomem
*ptr
= gic_data
.redist_regions
[i
].redist_base
;
436 reg
= readl_relaxed(ptr
+ GICR_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
437 if (reg
!= GIC_PIDR2_ARCH_GICv3
&&
438 reg
!= GIC_PIDR2_ARCH_GICv4
) { /* We're in trouble... */
439 pr_warn("No redistributor present @%p\n", ptr
);
444 typer
= gic_read_typer(ptr
+ GICR_TYPER
);
445 ret
= fn(gic_data
.redist_regions
+ i
, ptr
);
449 if (gic_data
.redist_regions
[i
].single_redist
)
452 if (gic_data
.redist_stride
) {
453 ptr
+= gic_data
.redist_stride
;
455 ptr
+= SZ_64K
* 2; /* Skip RD_base + SGI_base */
456 if (typer
& GICR_TYPER_VLPIS
)
457 ptr
+= SZ_64K
* 2; /* Skip VLPI_base + reserved page */
459 } while (!(typer
& GICR_TYPER_LAST
));
462 return ret
? -ENODEV
: 0;
465 static int __gic_populate_rdist(struct redist_region
*region
, void __iomem
*ptr
)
467 unsigned long mpidr
= cpu_logical_map(smp_processor_id());
472 * Convert affinity to a 32bit value that can be matched to
473 * GICR_TYPER bits [63:32].
475 aff
= (MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 24 |
476 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
477 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
478 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
480 typer
= gic_read_typer(ptr
+ GICR_TYPER
);
481 if ((typer
>> 32) == aff
) {
482 u64 offset
= ptr
- region
->redist_base
;
483 gic_data_rdist_rd_base() = ptr
;
484 gic_data_rdist()->phys_base
= region
->phys_base
+ offset
;
486 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
487 smp_processor_id(), mpidr
,
488 (int)(region
- gic_data
.redist_regions
),
489 &gic_data_rdist()->phys_base
);
497 static int gic_populate_rdist(void)
499 if (gic_iterate_rdists(__gic_populate_rdist
) == 0)
502 /* We couldn't even deal with ourselves... */
503 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
505 (unsigned long)cpu_logical_map(smp_processor_id()));
509 static int __gic_update_vlpi_properties(struct redist_region
*region
,
512 u64 typer
= gic_read_typer(ptr
+ GICR_TYPER
);
513 gic_data
.rdists
.has_vlpis
&= !!(typer
& GICR_TYPER_VLPIS
);
514 gic_data
.rdists
.has_direct_lpi
&= !!(typer
& GICR_TYPER_DirectLPIS
);
519 static void gic_update_vlpi_properties(void)
521 gic_iterate_rdists(__gic_update_vlpi_properties
);
522 pr_info("%sVLPI support, %sdirect LPI support\n",
523 !gic_data
.rdists
.has_vlpis
? "no " : "",
524 !gic_data
.rdists
.has_direct_lpi
? "no " : "");
527 static void gic_cpu_sys_reg_init(void)
530 * Need to check that the SRE bit has actually been set. If
531 * not, it means that SRE is disabled at EL2. We're going to
532 * die painfully, and there is nothing we can do about it.
534 * Kindly inform the luser.
536 if (!gic_enable_sre())
537 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
539 /* Set priority mask register */
540 gic_write_pmr(DEFAULT_PMR_VALUE
);
543 * Some firmwares hand over to the kernel with the BPR changed from
544 * its reset value (and with a value large enough to prevent
545 * any pre-emptive interrupts from working at all). Writing a zero
546 * to BPR restores is reset value.
550 if (static_key_true(&supports_deactivate
)) {
551 /* EOI drops priority only (mode 1) */
552 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop
);
554 /* EOI deactivates interrupt too (mode 0) */
555 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir
);
558 /* ... and let's hit the road... */
562 static int gic_dist_supports_lpis(void)
564 return !!(readl_relaxed(gic_data
.dist_base
+ GICD_TYPER
) & GICD_TYPER_LPIS
);
567 static void gic_cpu_init(void)
571 /* Register ourselves with the rest of the world */
572 if (gic_populate_rdist())
575 gic_enable_redist(true);
577 rbase
= gic_data_rdist_sgi_base();
579 /* Configure SGIs/PPIs as non-secure Group-1 */
580 writel_relaxed(~0, rbase
+ GICR_IGROUPR0
);
582 gic_cpu_config(rbase
, gic_redist_wait_for_rwp
);
584 /* Give LPIs a spin */
585 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS
) && gic_dist_supports_lpis())
588 /* initialise system registers */
589 gic_cpu_sys_reg_init();
594 static int gic_starting_cpu(unsigned int cpu
)
600 static u16
gic_compute_target_list(int *base_cpu
, const struct cpumask
*mask
,
601 unsigned long cluster_id
)
603 int next_cpu
, cpu
= *base_cpu
;
604 unsigned long mpidr
= cpu_logical_map(cpu
);
607 while (cpu
< nr_cpu_ids
) {
609 * If we ever get a cluster of more than 16 CPUs, just
610 * scream and skip that CPU.
612 if (WARN_ON((mpidr
& 0xff) >= 16))
615 tlist
|= 1 << (mpidr
& 0xf);
617 next_cpu
= cpumask_next(cpu
, mask
);
618 if (next_cpu
>= nr_cpu_ids
)
622 mpidr
= cpu_logical_map(cpu
);
624 if (cluster_id
!= (mpidr
& ~0xffUL
)) {
634 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
635 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
636 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
638 static void gic_send_sgi(u64 cluster_id
, u16 tlist
, unsigned int irq
)
642 val
= (MPIDR_TO_SGI_AFFINITY(cluster_id
, 3) |
643 MPIDR_TO_SGI_AFFINITY(cluster_id
, 2) |
644 irq
<< ICC_SGI1R_SGI_ID_SHIFT
|
645 MPIDR_TO_SGI_AFFINITY(cluster_id
, 1) |
646 tlist
<< ICC_SGI1R_TARGET_LIST_SHIFT
);
648 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val
);
649 gic_write_sgi1r(val
);
652 static void gic_raise_softirq(const struct cpumask
*mask
, unsigned int irq
)
656 if (WARN_ON(irq
>= 16))
660 * Ensure that stores to Normal memory are visible to the
661 * other CPUs before issuing the IPI.
665 for_each_cpu(cpu
, mask
) {
666 unsigned long cluster_id
= cpu_logical_map(cpu
) & ~0xffUL
;
669 tlist
= gic_compute_target_list(&cpu
, mask
, cluster_id
);
670 gic_send_sgi(cluster_id
, tlist
, irq
);
673 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
677 static void gic_smp_init(void)
679 set_smp_cross_call(gic_raise_softirq
);
680 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING
,
681 "irqchip/arm/gicv3:starting",
682 gic_starting_cpu
, NULL
);
685 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
694 cpu
= cpumask_first(mask_val
);
696 cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
698 if (cpu
>= nr_cpu_ids
)
701 if (gic_irq_in_rdist(d
))
704 /* If interrupt was enabled, disable it first */
705 enabled
= gic_peek_irq(d
, GICD_ISENABLER
);
709 reg
= gic_dist_base(d
) + GICD_IROUTER
+ (gic_irq(d
) * 8);
710 val
= gic_mpidr_to_affinity(cpu_logical_map(cpu
));
712 gic_write_irouter(val
, reg
);
715 * If the interrupt was enabled, enabled it again. Otherwise,
716 * just wait for the distributor to have digested our changes.
721 gic_dist_wait_for_rwp();
723 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
725 return IRQ_SET_MASK_OK_DONE
;
728 #define gic_set_affinity NULL
729 #define gic_smp_init() do { } while(0)
733 /* Check whether it's single security state view */
734 static bool gic_dist_security_disabled(void)
736 return readl_relaxed(gic_data
.dist_base
+ GICD_CTLR
) & GICD_CTLR_DS
;
739 static int gic_cpu_pm_notifier(struct notifier_block
*self
,
740 unsigned long cmd
, void *v
)
742 if (cmd
== CPU_PM_EXIT
) {
743 if (gic_dist_security_disabled())
744 gic_enable_redist(true);
745 gic_cpu_sys_reg_init();
746 } else if (cmd
== CPU_PM_ENTER
&& gic_dist_security_disabled()) {
748 gic_enable_redist(false);
753 static struct notifier_block gic_cpu_pm_notifier_block
= {
754 .notifier_call
= gic_cpu_pm_notifier
,
757 static void gic_cpu_pm_init(void)
759 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block
);
763 static inline void gic_cpu_pm_init(void) { }
764 #endif /* CONFIG_CPU_PM */
766 static struct irq_chip gic_chip
= {
768 .irq_mask
= gic_mask_irq
,
769 .irq_unmask
= gic_unmask_irq
,
770 .irq_eoi
= gic_eoi_irq
,
771 .irq_set_type
= gic_set_type
,
772 .irq_set_affinity
= gic_set_affinity
,
773 .irq_get_irqchip_state
= gic_irq_get_irqchip_state
,
774 .irq_set_irqchip_state
= gic_irq_set_irqchip_state
,
775 .flags
= IRQCHIP_SET_TYPE_MASKED
,
778 static struct irq_chip gic_eoimode1_chip
= {
780 .irq_mask
= gic_eoimode1_mask_irq
,
781 .irq_unmask
= gic_unmask_irq
,
782 .irq_eoi
= gic_eoimode1_eoi_irq
,
783 .irq_set_type
= gic_set_type
,
784 .irq_set_affinity
= gic_set_affinity
,
785 .irq_get_irqchip_state
= gic_irq_get_irqchip_state
,
786 .irq_set_irqchip_state
= gic_irq_set_irqchip_state
,
787 .irq_set_vcpu_affinity
= gic_irq_set_vcpu_affinity
,
788 .flags
= IRQCHIP_SET_TYPE_MASKED
,
791 #define GIC_ID_NR (1U << gic_data.rdists.id_bits)
793 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
796 struct irq_chip
*chip
= &gic_chip
;
798 if (static_key_true(&supports_deactivate
))
799 chip
= &gic_eoimode1_chip
;
801 /* SGIs are private to the core kernel */
805 if (hw
>= gic_data
.irq_nr
&& hw
< 8192)
813 irq_set_percpu_devid(irq
);
814 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
815 handle_percpu_devid_irq
, NULL
, NULL
);
816 irq_set_status_flags(irq
, IRQ_NOAUTOEN
);
819 if (hw
>= 32 && hw
< gic_data
.irq_nr
) {
820 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
821 handle_fasteoi_irq
, NULL
, NULL
);
823 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq
)));
826 if (hw
>= 8192 && hw
< GIC_ID_NR
) {
827 if (!gic_dist_supports_lpis())
829 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
830 handle_fasteoi_irq
, NULL
, NULL
);
836 static int gic_irq_domain_translate(struct irq_domain
*d
,
837 struct irq_fwspec
*fwspec
,
838 unsigned long *hwirq
,
841 if (is_of_node(fwspec
->fwnode
)) {
842 if (fwspec
->param_count
< 3)
845 switch (fwspec
->param
[0]) {
847 *hwirq
= fwspec
->param
[1] + 32;
850 *hwirq
= fwspec
->param
[1] + 16;
852 case GIC_IRQ_TYPE_LPI
: /* LPI */
853 *hwirq
= fwspec
->param
[1];
859 *type
= fwspec
->param
[2] & IRQ_TYPE_SENSE_MASK
;
863 if (is_fwnode_irqchip(fwspec
->fwnode
)) {
864 if(fwspec
->param_count
!= 2)
867 *hwirq
= fwspec
->param
[0];
868 *type
= fwspec
->param
[1];
875 static int gic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
876 unsigned int nr_irqs
, void *arg
)
879 irq_hw_number_t hwirq
;
880 unsigned int type
= IRQ_TYPE_NONE
;
881 struct irq_fwspec
*fwspec
= arg
;
883 ret
= gic_irq_domain_translate(domain
, fwspec
, &hwirq
, &type
);
887 for (i
= 0; i
< nr_irqs
; i
++) {
888 ret
= gic_irq_domain_map(domain
, virq
+ i
, hwirq
+ i
);
896 static void gic_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
897 unsigned int nr_irqs
)
901 for (i
= 0; i
< nr_irqs
; i
++) {
902 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
903 irq_set_handler(virq
+ i
, NULL
);
904 irq_domain_reset_irq_data(d
);
908 static int gic_irq_domain_select(struct irq_domain
*d
,
909 struct irq_fwspec
*fwspec
,
910 enum irq_domain_bus_token bus_token
)
913 if (fwspec
->fwnode
!= d
->fwnode
)
916 /* If this is not DT, then we have a single domain */
917 if (!is_of_node(fwspec
->fwnode
))
921 * If this is a PPI and we have a 4th (non-null) parameter,
922 * then we need to match the partition domain.
924 if (fwspec
->param_count
>= 4 &&
925 fwspec
->param
[0] == 1 && fwspec
->param
[3] != 0)
926 return d
== partition_get_domain(gic_data
.ppi_descs
[fwspec
->param
[1]]);
928 return d
== gic_data
.domain
;
931 static const struct irq_domain_ops gic_irq_domain_ops
= {
932 .translate
= gic_irq_domain_translate
,
933 .alloc
= gic_irq_domain_alloc
,
934 .free
= gic_irq_domain_free
,
935 .select
= gic_irq_domain_select
,
938 static int partition_domain_translate(struct irq_domain
*d
,
939 struct irq_fwspec
*fwspec
,
940 unsigned long *hwirq
,
943 struct device_node
*np
;
946 np
= of_find_node_by_phandle(fwspec
->param
[3]);
950 ret
= partition_translate_id(gic_data
.ppi_descs
[fwspec
->param
[1]],
951 of_node_to_fwnode(np
));
956 *type
= fwspec
->param
[2] & IRQ_TYPE_SENSE_MASK
;
961 static const struct irq_domain_ops partition_domain_ops
= {
962 .translate
= partition_domain_translate
,
963 .select
= gic_irq_domain_select
,
966 static int __init
gic_init_bases(void __iomem
*dist_base
,
967 struct redist_region
*rdist_regs
,
968 u32 nr_redist_regions
,
970 struct fwnode_handle
*handle
)
976 if (!is_hyp_mode_available())
977 static_key_slow_dec(&supports_deactivate
);
979 if (static_key_true(&supports_deactivate
))
980 pr_info("GIC: Using split EOI/Deactivate mode\n");
982 gic_data
.fwnode
= handle
;
983 gic_data
.dist_base
= dist_base
;
984 gic_data
.redist_regions
= rdist_regs
;
985 gic_data
.nr_redist_regions
= nr_redist_regions
;
986 gic_data
.redist_stride
= redist_stride
;
989 * Find out how many interrupts are supported.
990 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
992 typer
= readl_relaxed(gic_data
.dist_base
+ GICD_TYPER
);
993 gic_data
.rdists
.id_bits
= GICD_TYPER_ID_BITS(typer
);
994 gic_irqs
= GICD_TYPER_IRQS(typer
);
997 gic_data
.irq_nr
= gic_irqs
;
999 gic_data
.domain
= irq_domain_create_tree(handle
, &gic_irq_domain_ops
,
1001 gic_data
.rdists
.rdist
= alloc_percpu(typeof(*gic_data
.rdists
.rdist
));
1002 gic_data
.rdists
.has_vlpis
= true;
1003 gic_data
.rdists
.has_direct_lpi
= true;
1005 if (WARN_ON(!gic_data
.domain
) || WARN_ON(!gic_data
.rdists
.rdist
)) {
1010 set_handle_irq(gic_handle_irq
);
1012 gic_update_vlpi_properties();
1014 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS
) && gic_dist_supports_lpis())
1015 its_init(handle
, &gic_data
.rdists
, gic_data
.domain
);
1025 if (gic_data
.domain
)
1026 irq_domain_remove(gic_data
.domain
);
1027 free_percpu(gic_data
.rdists
.rdist
);
1031 static int __init
gic_validate_dist_version(void __iomem
*dist_base
)
1033 u32 reg
= readl_relaxed(dist_base
+ GICD_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
1035 if (reg
!= GIC_PIDR2_ARCH_GICv3
&& reg
!= GIC_PIDR2_ARCH_GICv4
)
1041 static int get_cpu_number(struct device_node
*dn
)
1047 cell
= of_get_property(dn
, "reg", NULL
);
1051 hwid
= of_read_number(cell
, of_n_addr_cells(dn
));
1054 * Non affinity bits must be set to 0 in the DT
1056 if (hwid
& ~MPIDR_HWID_BITMASK
)
1059 for (i
= 0; i
< num_possible_cpus(); i
++)
1060 if (cpu_logical_map(i
) == hwid
)
1066 /* Create all possible partitions at boot time */
1067 static void __init
gic_populate_ppi_partitions(struct device_node
*gic_node
)
1069 struct device_node
*parts_node
, *child_part
;
1070 int part_idx
= 0, i
;
1072 struct partition_affinity
*parts
;
1074 parts_node
= of_find_node_by_name(gic_node
, "ppi-partitions");
1078 nr_parts
= of_get_child_count(parts_node
);
1083 parts
= kzalloc(sizeof(*parts
) * nr_parts
, GFP_KERNEL
);
1084 if (WARN_ON(!parts
))
1087 for_each_child_of_node(parts_node
, child_part
) {
1088 struct partition_affinity
*part
;
1091 part
= &parts
[part_idx
];
1093 part
->partition_id
= of_node_to_fwnode(child_part
);
1095 pr_info("GIC: PPI partition %s[%d] { ",
1096 child_part
->name
, part_idx
);
1098 n
= of_property_count_elems_of_size(child_part
, "affinity",
1102 for (i
= 0; i
< n
; i
++) {
1105 struct device_node
*cpu_node
;
1107 err
= of_property_read_u32_index(child_part
, "affinity",
1112 cpu_node
= of_find_node_by_phandle(cpu_phandle
);
1113 if (WARN_ON(!cpu_node
))
1116 cpu
= get_cpu_number(cpu_node
);
1117 if (WARN_ON(cpu
== -1))
1120 pr_cont("%pOF[%d] ", cpu_node
, cpu
);
1122 cpumask_set_cpu(cpu
, &part
->mask
);
1129 for (i
= 0; i
< 16; i
++) {
1131 struct partition_desc
*desc
;
1132 struct irq_fwspec ppi_fwspec
= {
1133 .fwnode
= gic_data
.fwnode
,
1138 [2] = IRQ_TYPE_NONE
,
1142 irq
= irq_create_fwspec_mapping(&ppi_fwspec
);
1145 desc
= partition_create_desc(gic_data
.fwnode
, parts
, nr_parts
,
1146 irq
, &partition_domain_ops
);
1150 gic_data
.ppi_descs
[i
] = desc
;
1154 static void __init
gic_of_setup_kvm_info(struct device_node
*node
)
1160 gic_v3_kvm_info
.type
= GIC_V3
;
1162 gic_v3_kvm_info
.maint_irq
= irq_of_parse_and_map(node
, 0);
1163 if (!gic_v3_kvm_info
.maint_irq
)
1166 if (of_property_read_u32(node
, "#redistributor-regions",
1170 gicv_idx
+= 3; /* Also skip GICD, GICC, GICH */
1171 ret
= of_address_to_resource(node
, gicv_idx
, &r
);
1173 gic_v3_kvm_info
.vcpu
= r
;
1175 gic_v3_kvm_info
.has_v4
= gic_data
.rdists
.has_vlpis
;
1176 gic_set_kvm_info(&gic_v3_kvm_info
);
1179 static int __init
gic_of_init(struct device_node
*node
, struct device_node
*parent
)
1181 void __iomem
*dist_base
;
1182 struct redist_region
*rdist_regs
;
1184 u32 nr_redist_regions
;
1187 dist_base
= of_iomap(node
, 0);
1189 pr_err("%pOF: unable to map gic dist registers\n", node
);
1193 err
= gic_validate_dist_version(dist_base
);
1195 pr_err("%pOF: no distributor detected, giving up\n", node
);
1196 goto out_unmap_dist
;
1199 if (of_property_read_u32(node
, "#redistributor-regions", &nr_redist_regions
))
1200 nr_redist_regions
= 1;
1202 rdist_regs
= kzalloc(sizeof(*rdist_regs
) * nr_redist_regions
, GFP_KERNEL
);
1205 goto out_unmap_dist
;
1208 for (i
= 0; i
< nr_redist_regions
; i
++) {
1209 struct resource res
;
1212 ret
= of_address_to_resource(node
, 1 + i
, &res
);
1213 rdist_regs
[i
].redist_base
= of_iomap(node
, 1 + i
);
1214 if (ret
|| !rdist_regs
[i
].redist_base
) {
1215 pr_err("%pOF: couldn't map region %d\n", node
, i
);
1217 goto out_unmap_rdist
;
1219 rdist_regs
[i
].phys_base
= res
.start
;
1222 if (of_property_read_u64(node
, "redistributor-stride", &redist_stride
))
1225 err
= gic_init_bases(dist_base
, rdist_regs
, nr_redist_regions
,
1226 redist_stride
, &node
->fwnode
);
1228 goto out_unmap_rdist
;
1230 gic_populate_ppi_partitions(node
);
1231 gic_of_setup_kvm_info(node
);
1235 for (i
= 0; i
< nr_redist_regions
; i
++)
1236 if (rdist_regs
[i
].redist_base
)
1237 iounmap(rdist_regs
[i
].redist_base
);
1244 IRQCHIP_DECLARE(gic_v3
, "arm,gic-v3", gic_of_init
);
1249 void __iomem
*dist_base
;
1250 struct redist_region
*redist_regs
;
1251 u32 nr_redist_regions
;
1255 phys_addr_t vcpu_base
;
1256 } acpi_data __initdata
;
1259 gic_acpi_register_redist(phys_addr_t phys_base
, void __iomem
*redist_base
)
1261 static int count
= 0;
1263 acpi_data
.redist_regs
[count
].phys_base
= phys_base
;
1264 acpi_data
.redist_regs
[count
].redist_base
= redist_base
;
1265 acpi_data
.redist_regs
[count
].single_redist
= acpi_data
.single_redist
;
1270 gic_acpi_parse_madt_redist(struct acpi_subtable_header
*header
,
1271 const unsigned long end
)
1273 struct acpi_madt_generic_redistributor
*redist
=
1274 (struct acpi_madt_generic_redistributor
*)header
;
1275 void __iomem
*redist_base
;
1277 redist_base
= ioremap(redist
->base_address
, redist
->length
);
1279 pr_err("Couldn't map GICR region @%llx\n", redist
->base_address
);
1283 gic_acpi_register_redist(redist
->base_address
, redist_base
);
1288 gic_acpi_parse_madt_gicc(struct acpi_subtable_header
*header
,
1289 const unsigned long end
)
1291 struct acpi_madt_generic_interrupt
*gicc
=
1292 (struct acpi_madt_generic_interrupt
*)header
;
1293 u32 reg
= readl_relaxed(acpi_data
.dist_base
+ GICD_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
1294 u32 size
= reg
== GIC_PIDR2_ARCH_GICv4
? SZ_64K
* 4 : SZ_64K
* 2;
1295 void __iomem
*redist_base
;
1297 redist_base
= ioremap(gicc
->gicr_base_address
, size
);
1301 gic_acpi_register_redist(gicc
->gicr_base_address
, redist_base
);
1305 static int __init
gic_acpi_collect_gicr_base(void)
1307 acpi_tbl_entry_handler redist_parser
;
1308 enum acpi_madt_type type
;
1310 if (acpi_data
.single_redist
) {
1311 type
= ACPI_MADT_TYPE_GENERIC_INTERRUPT
;
1312 redist_parser
= gic_acpi_parse_madt_gicc
;
1314 type
= ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR
;
1315 redist_parser
= gic_acpi_parse_madt_redist
;
1318 /* Collect redistributor base addresses in GICR entries */
1319 if (acpi_table_parse_madt(type
, redist_parser
, 0) > 0)
1322 pr_info("No valid GICR entries exist\n");
1326 static int __init
gic_acpi_match_gicr(struct acpi_subtable_header
*header
,
1327 const unsigned long end
)
1329 /* Subtable presence means that redist exists, that's it */
1333 static int __init
gic_acpi_match_gicc(struct acpi_subtable_header
*header
,
1334 const unsigned long end
)
1336 struct acpi_madt_generic_interrupt
*gicc
=
1337 (struct acpi_madt_generic_interrupt
*)header
;
1340 * If GICC is enabled and has valid gicr base address, then it means
1341 * GICR base is presented via GICC
1343 if ((gicc
->flags
& ACPI_MADT_ENABLED
) && gicc
->gicr_base_address
)
1349 static int __init
gic_acpi_count_gicr_regions(void)
1354 * Count how many redistributor regions we have. It is not allowed
1355 * to mix redistributor description, GICR and GICC subtables have to be
1356 * mutually exclusive.
1358 count
= acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR
,
1359 gic_acpi_match_gicr
, 0);
1361 acpi_data
.single_redist
= false;
1365 count
= acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT
,
1366 gic_acpi_match_gicc
, 0);
1368 acpi_data
.single_redist
= true;
1373 static bool __init
acpi_validate_gic_table(struct acpi_subtable_header
*header
,
1374 struct acpi_probe_entry
*ape
)
1376 struct acpi_madt_generic_distributor
*dist
;
1379 dist
= (struct acpi_madt_generic_distributor
*)header
;
1380 if (dist
->version
!= ape
->driver_data
)
1383 /* We need to do that exercise anyway, the sooner the better */
1384 count
= gic_acpi_count_gicr_regions();
1388 acpi_data
.nr_redist_regions
= count
;
1392 static int __init
gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header
*header
,
1393 const unsigned long end
)
1395 struct acpi_madt_generic_interrupt
*gicc
=
1396 (struct acpi_madt_generic_interrupt
*)header
;
1398 static int first_madt
= true;
1400 /* Skip unusable CPUs */
1401 if (!(gicc
->flags
& ACPI_MADT_ENABLED
))
1404 maint_irq_mode
= (gicc
->flags
& ACPI_MADT_VGIC_IRQ_MODE
) ?
1405 ACPI_EDGE_SENSITIVE
: ACPI_LEVEL_SENSITIVE
;
1410 acpi_data
.maint_irq
= gicc
->vgic_interrupt
;
1411 acpi_data
.maint_irq_mode
= maint_irq_mode
;
1412 acpi_data
.vcpu_base
= gicc
->gicv_base_address
;
1418 * The maintenance interrupt and GICV should be the same for every CPU
1420 if ((acpi_data
.maint_irq
!= gicc
->vgic_interrupt
) ||
1421 (acpi_data
.maint_irq_mode
!= maint_irq_mode
) ||
1422 (acpi_data
.vcpu_base
!= gicc
->gicv_base_address
))
1428 static bool __init
gic_acpi_collect_virt_info(void)
1432 count
= acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT
,
1433 gic_acpi_parse_virt_madt_gicc
, 0);
1438 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
1439 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
1440 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
1442 static void __init
gic_acpi_setup_kvm_info(void)
1446 if (!gic_acpi_collect_virt_info()) {
1447 pr_warn("Unable to get hardware information used for virtualization\n");
1451 gic_v3_kvm_info
.type
= GIC_V3
;
1453 irq
= acpi_register_gsi(NULL
, acpi_data
.maint_irq
,
1454 acpi_data
.maint_irq_mode
,
1459 gic_v3_kvm_info
.maint_irq
= irq
;
1461 if (acpi_data
.vcpu_base
) {
1462 struct resource
*vcpu
= &gic_v3_kvm_info
.vcpu
;
1464 vcpu
->flags
= IORESOURCE_MEM
;
1465 vcpu
->start
= acpi_data
.vcpu_base
;
1466 vcpu
->end
= vcpu
->start
+ ACPI_GICV2_VCPU_MEM_SIZE
- 1;
1469 gic_v3_kvm_info
.has_v4
= gic_data
.rdists
.has_vlpis
;
1470 gic_set_kvm_info(&gic_v3_kvm_info
);
1474 gic_acpi_init(struct acpi_subtable_header
*header
, const unsigned long end
)
1476 struct acpi_madt_generic_distributor
*dist
;
1477 struct fwnode_handle
*domain_handle
;
1481 /* Get distributor base address */
1482 dist
= (struct acpi_madt_generic_distributor
*)header
;
1483 acpi_data
.dist_base
= ioremap(dist
->base_address
,
1484 ACPI_GICV3_DIST_MEM_SIZE
);
1485 if (!acpi_data
.dist_base
) {
1486 pr_err("Unable to map GICD registers\n");
1490 err
= gic_validate_dist_version(acpi_data
.dist_base
);
1492 pr_err("No distributor detected at @%p, giving up",
1493 acpi_data
.dist_base
);
1494 goto out_dist_unmap
;
1497 size
= sizeof(*acpi_data
.redist_regs
) * acpi_data
.nr_redist_regions
;
1498 acpi_data
.redist_regs
= kzalloc(size
, GFP_KERNEL
);
1499 if (!acpi_data
.redist_regs
) {
1501 goto out_dist_unmap
;
1504 err
= gic_acpi_collect_gicr_base();
1506 goto out_redist_unmap
;
1508 domain_handle
= irq_domain_alloc_fwnode(acpi_data
.dist_base
);
1509 if (!domain_handle
) {
1511 goto out_redist_unmap
;
1514 err
= gic_init_bases(acpi_data
.dist_base
, acpi_data
.redist_regs
,
1515 acpi_data
.nr_redist_regions
, 0, domain_handle
);
1517 goto out_fwhandle_free
;
1519 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC
, domain_handle
);
1520 gic_acpi_setup_kvm_info();
1525 irq_domain_free_fwnode(domain_handle
);
1527 for (i
= 0; i
< acpi_data
.nr_redist_regions
; i
++)
1528 if (acpi_data
.redist_regs
[i
].redist_base
)
1529 iounmap(acpi_data
.redist_regs
[i
].redist_base
);
1530 kfree(acpi_data
.redist_regs
);
1532 iounmap(acpi_data
.dist_base
);
1535 IRQCHIP_ACPI_DECLARE(gic_v3
, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR
,
1536 acpi_validate_gic_table
, ACPI_MADT_GIC_VERSION_V3
,
1538 IRQCHIP_ACPI_DECLARE(gic_v4
, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR
,
1539 acpi_validate_gic_table
, ACPI_MADT_GIC_VERSION_V4
,
1541 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4
, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR
,
1542 acpi_validate_gic_table
, ACPI_MADT_GIC_VERSION_NONE
,