1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #define pr_fmt(fmt) "GICv3: " fmt
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kernel.h>
16 #include <linux/kstrtox.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/percpu.h>
21 #include <linux/refcount.h>
22 #include <linux/slab.h>
23 #include <linux/iopoll.h>
25 #include <linux/irqchip.h>
26 #include <linux/irqchip/arm-gic-common.h>
27 #include <linux/irqchip/arm-gic-v3.h>
28 #include <linux/irqchip/arm-gic-v3-prio.h>
29 #include <linux/irqchip/irq-partition-percpu.h>
30 #include <linux/bitfield.h>
31 #include <linux/bits.h>
32 #include <linux/arm-smccc.h>
34 #include <asm/cputype.h>
35 #include <asm/exception.h>
36 #include <asm/smp_plat.h>
39 #include "irq-gic-common.h"
41 static u8 dist_prio_irq __ro_after_init
= GICV3_PRIO_IRQ
;
42 static u8 dist_prio_nmi __ro_after_init
= GICV3_PRIO_NMI
;
44 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
45 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
46 #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
48 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
50 static struct cpumask broken_rdists __read_mostly __maybe_unused
;
52 struct redist_region
{
53 void __iomem
*redist_base
;
54 phys_addr_t phys_base
;
58 struct gic_chip_data
{
59 struct fwnode_handle
*fwnode
;
60 phys_addr_t dist_phys_base
;
61 void __iomem
*dist_base
;
62 struct redist_region
*redist_regions
;
64 struct irq_domain
*domain
;
66 u32 nr_redist_regions
;
70 struct partition_desc
**ppi_descs
;
73 #define T241_CHIPS_MAX 4
74 static void __iomem
*t241_dist_base_alias
[T241_CHIPS_MAX
] __read_mostly
;
75 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum
);
77 static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum
);
79 static struct gic_chip_data gic_data __read_mostly
;
80 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key
);
82 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
83 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
84 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
87 * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
88 * are potentially stolen by the secure side. Some code, especially code dealing
89 * with hwirq IDs, is simplified by accounting for all 16.
94 * The behaviours of RPR and PMR registers differ depending on the value of
95 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
96 * distributor and redistributors depends on whether security is enabled in the
99 * When security is enabled, non-secure priority values from the (re)distributor
100 * are presented to the GIC CPUIF as follow:
101 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
103 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
104 * EL1 are subject to a similar operation thus matching the priorities presented
105 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
106 * these values are unchanged by the GIC.
108 * see GICv3/GICv4 Architecture Specification (IHI0069D):
109 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
111 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
114 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis
);
116 static u32
gic_get_pribits(void)
120 pribits
= gic_read_ctlr();
121 pribits
&= ICC_CTLR_EL1_PRI_BITS_MASK
;
122 pribits
>>= ICC_CTLR_EL1_PRI_BITS_SHIFT
;
128 static bool gic_has_group0(void)
133 old_pmr
= gic_read_pmr();
136 * Let's find out if Group0 is under control of EL3 or not by
137 * setting the highest possible, non-zero priority in PMR.
139 * If SCR_EL3.FIQ is set, the priority gets shifted down in
140 * order for the CPU interface to set bit 7, and keep the
141 * actual priority in the non-secure range. In the process, it
142 * looses the least significant bit and the actual priority
143 * becomes 0x80. Reading it back returns 0, indicating that
144 * we're don't have access to Group0.
146 gic_write_pmr(BIT(8 - gic_get_pribits()));
147 val
= gic_read_pmr();
149 gic_write_pmr(old_pmr
);
154 static inline bool gic_dist_security_disabled(void)
156 return readl_relaxed(gic_data
.dist_base
+ GICD_CTLR
) & GICD_CTLR_DS
;
159 static bool cpus_have_security_disabled __ro_after_init
;
160 static bool cpus_have_group0 __ro_after_init
;
162 static void __init
gic_prio_init(void)
164 cpus_have_security_disabled
= gic_dist_security_disabled();
165 cpus_have_group0
= gic_has_group0();
168 * How priority values are used by the GIC depends on two things:
169 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
170 * and if Group 0 interrupts can be delivered to Linux in the non-secure
171 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
172 * way priorities are presented in ICC_PMR_EL1 and in the distributor:
174 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor
175 * -------------------------------------------------------
176 * 1 | - | unchanged | unchanged
177 * -------------------------------------------------------
178 * 0 | 1 | non-secure | non-secure
179 * -------------------------------------------------------
180 * 0 | 0 | unchanged | non-secure
182 * In the non-secure view reads and writes are modified:
184 * - A value written is right-shifted by one and the MSB is set,
185 * forcing the priority into the non-secure range.
187 * - A value read is left-shifted by one.
189 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
190 * are both either modified or unchanged, we can use the same set of
193 * In the last case, where only the interrupt priorities are modified to
194 * be in the non-secure range, we program the non-secure values into
195 * the distributor to match the PMR values we want.
197 if (cpus_have_group0
& !cpus_have_security_disabled
) {
198 dist_prio_irq
= __gicv3_prio_to_ns(dist_prio_irq
);
199 dist_prio_nmi
= __gicv3_prio_to_ns(dist_prio_nmi
);
202 pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n",
203 cpus_have_security_disabled
,
207 /* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
208 static refcount_t
*rdist_nmi_refs
;
210 static struct gic_kvm_info gic_v3_kvm_info __initdata
;
211 static DEFINE_PER_CPU(bool, has_rss
);
213 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
214 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
215 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
216 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
218 /* Our default, arbitrary priority value. Linux only uses one anyway. */
219 #define DEFAULT_PMR_VALUE 0xf0
221 enum gic_intid_range
{
231 static enum gic_intid_range
__get_intid_range(irq_hw_number_t hwirq
)
240 case EPPI_BASE_INTID
... (EPPI_BASE_INTID
+ 63):
242 case ESPI_BASE_INTID
... (ESPI_BASE_INTID
+ 1023):
244 case 8192 ... GENMASK(23, 0):
247 return __INVALID_RANGE__
;
251 static enum gic_intid_range
get_intid_range(struct irq_data
*d
)
253 return __get_intid_range(d
->hwirq
);
256 static inline bool gic_irq_in_rdist(struct irq_data
*d
)
258 switch (get_intid_range(d
)) {
268 static inline void __iomem
*gic_dist_base_alias(struct irq_data
*d
)
270 if (static_branch_unlikely(&gic_nvidia_t241_erratum
)) {
271 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
275 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
276 * registers are directed to the chip that owns the SPI. The
277 * the alias region can also be used for writes to the
278 * GICD_In{E} except GICD_ICENABLERn. Each chip has support
279 * for 320 {E}SPIs. Mappings for all 4 chips:
285 switch (__get_intid_range(hwirq
)) {
287 chip
= (hwirq
- 32) / 320;
295 return t241_dist_base_alias
[chip
];
298 return gic_data
.dist_base
;
301 static inline void __iomem
*gic_dist_base(struct irq_data
*d
)
303 switch (get_intid_range(d
)) {
307 /* SGI+PPI -> SGI_base for this CPU */
308 return gic_data_rdist_sgi_base();
312 /* SPI -> dist_base */
313 return gic_data
.dist_base
;
320 static void gic_do_wait_for_rwp(void __iomem
*base
, u32 bit
)
325 ret
= readl_relaxed_poll_timeout_atomic(base
+ GICD_CTLR
, val
, !(val
& bit
),
327 if (ret
== -ETIMEDOUT
)
328 pr_err_ratelimited("RWP timeout, gone fishing\n");
331 /* Wait for completion of a distributor change */
332 static void gic_dist_wait_for_rwp(void)
334 gic_do_wait_for_rwp(gic_data
.dist_base
, GICD_CTLR_RWP
);
337 /* Wait for completion of a redistributor change */
338 static void gic_redist_wait_for_rwp(void)
340 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP
);
343 static void gic_enable_redist(bool enable
)
349 if (gic_data
.flags
& FLAGS_WORKAROUND_GICR_WAKER_MSM8996
)
352 rbase
= gic_data_rdist_rd_base();
354 val
= readl_relaxed(rbase
+ GICR_WAKER
);
356 /* Wake up this CPU redistributor */
357 val
&= ~GICR_WAKER_ProcessorSleep
;
359 val
|= GICR_WAKER_ProcessorSleep
;
360 writel_relaxed(val
, rbase
+ GICR_WAKER
);
362 if (!enable
) { /* Check that GICR_WAKER is writeable */
363 val
= readl_relaxed(rbase
+ GICR_WAKER
);
364 if (!(val
& GICR_WAKER_ProcessorSleep
))
365 return; /* No PM support in this redistributor */
368 ret
= readl_relaxed_poll_timeout_atomic(rbase
+ GICR_WAKER
, val
,
369 enable
^ (bool)(val
& GICR_WAKER_ChildrenAsleep
),
371 if (ret
== -ETIMEDOUT
) {
372 pr_err_ratelimited("redistributor failed to %s...\n",
373 enable
? "wakeup" : "sleep");
378 * Routines to disable, enable, EOI and route interrupts
380 static u32
convert_offset_index(struct irq_data
*d
, u32 offset
, u32
*index
)
382 switch (get_intid_range(d
)) {
390 * Contrary to the ESPI range, the EPPI range is contiguous
391 * to the PPI range in the registers, so let's adjust the
392 * displacement accordingly. Consistency is overrated.
394 *index
= d
->hwirq
- EPPI_BASE_INTID
+ 32;
397 *index
= d
->hwirq
- ESPI_BASE_INTID
;
400 return GICD_ISENABLERnE
;
402 return GICD_ICENABLERnE
;
404 return GICD_ISPENDRnE
;
406 return GICD_ICPENDRnE
;
408 return GICD_ISACTIVERnE
;
410 return GICD_ICACTIVERnE
;
411 case GICD_IPRIORITYR
:
412 return GICD_IPRIORITYRnE
;
416 return GICD_IROUTERnE
;
430 static int gic_peek_irq(struct irq_data
*d
, u32 offset
)
435 offset
= convert_offset_index(d
, offset
, &index
);
436 mask
= 1 << (index
% 32);
438 if (gic_irq_in_rdist(d
))
439 base
= gic_data_rdist_sgi_base();
441 base
= gic_dist_base_alias(d
);
443 return !!(readl_relaxed(base
+ offset
+ (index
/ 32) * 4) & mask
);
446 static void gic_poke_irq(struct irq_data
*d
, u32 offset
)
451 offset
= convert_offset_index(d
, offset
, &index
);
452 mask
= 1 << (index
% 32);
454 if (gic_irq_in_rdist(d
))
455 base
= gic_data_rdist_sgi_base();
457 base
= gic_data
.dist_base
;
459 writel_relaxed(mask
, base
+ offset
+ (index
/ 32) * 4);
462 static void gic_mask_irq(struct irq_data
*d
)
464 gic_poke_irq(d
, GICD_ICENABLER
);
465 if (gic_irq_in_rdist(d
))
466 gic_redist_wait_for_rwp();
468 gic_dist_wait_for_rwp();
471 static void gic_eoimode1_mask_irq(struct irq_data
*d
)
475 * When masking a forwarded interrupt, make sure it is
476 * deactivated as well.
478 * This ensures that an interrupt that is getting
479 * disabled/masked will not get "stuck", because there is
480 * noone to deactivate it (guest is being terminated).
482 if (irqd_is_forwarded_to_vcpu(d
))
483 gic_poke_irq(d
, GICD_ICACTIVER
);
486 static void gic_unmask_irq(struct irq_data
*d
)
488 gic_poke_irq(d
, GICD_ISENABLER
);
491 static inline bool gic_supports_nmi(void)
493 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI
) &&
494 static_branch_likely(&supports_pseudo_nmis
);
497 static int gic_irq_set_irqchip_state(struct irq_data
*d
,
498 enum irqchip_irq_state which
, bool val
)
502 if (d
->hwirq
>= 8192) /* SGI/PPI/SPI only */
506 case IRQCHIP_STATE_PENDING
:
507 reg
= val
? GICD_ISPENDR
: GICD_ICPENDR
;
510 case IRQCHIP_STATE_ACTIVE
:
511 reg
= val
? GICD_ISACTIVER
: GICD_ICACTIVER
;
514 case IRQCHIP_STATE_MASKED
:
519 reg
= GICD_ISENABLER
;
526 gic_poke_irq(d
, reg
);
529 * Force read-back to guarantee that the active state has taken
530 * effect, and won't race with a guest-driven deactivation.
532 if (reg
== GICD_ISACTIVER
)
533 gic_peek_irq(d
, reg
);
537 static int gic_irq_get_irqchip_state(struct irq_data
*d
,
538 enum irqchip_irq_state which
, bool *val
)
540 if (d
->hwirq
>= 8192) /* PPI/SPI only */
544 case IRQCHIP_STATE_PENDING
:
545 *val
= gic_peek_irq(d
, GICD_ISPENDR
);
548 case IRQCHIP_STATE_ACTIVE
:
549 *val
= gic_peek_irq(d
, GICD_ISACTIVER
);
552 case IRQCHIP_STATE_MASKED
:
553 *val
= !gic_peek_irq(d
, GICD_ISENABLER
);
563 static void gic_irq_set_prio(struct irq_data
*d
, u8 prio
)
565 void __iomem
*base
= gic_dist_base(d
);
568 offset
= convert_offset_index(d
, GICD_IPRIORITYR
, &index
);
570 writeb_relaxed(prio
, base
+ offset
+ index
);
573 static u32
__gic_get_ppi_index(irq_hw_number_t hwirq
)
575 switch (__get_intid_range(hwirq
)) {
579 return hwirq
- EPPI_BASE_INTID
+ 16;
585 static u32
__gic_get_rdist_index(irq_hw_number_t hwirq
)
587 switch (__get_intid_range(hwirq
)) {
592 return hwirq
- EPPI_BASE_INTID
+ 32;
598 static u32
gic_get_rdist_index(struct irq_data
*d
)
600 return __gic_get_rdist_index(d
->hwirq
);
603 static int gic_irq_nmi_setup(struct irq_data
*d
)
605 struct irq_desc
*desc
= irq_to_desc(d
->irq
);
607 if (!gic_supports_nmi())
610 if (gic_peek_irq(d
, GICD_ISENABLER
)) {
611 pr_err("Cannot set NMI property of enabled IRQ %u\n", d
->irq
);
616 * A secondary irq_chip should be in charge of LPI request,
617 * it should not be possible to get there
619 if (WARN_ON(irqd_to_hwirq(d
) >= 8192))
622 /* desc lock should already be held */
623 if (gic_irq_in_rdist(d
)) {
624 u32 idx
= gic_get_rdist_index(d
);
627 * Setting up a percpu interrupt as NMI, only switch handler
630 if (!refcount_inc_not_zero(&rdist_nmi_refs
[idx
])) {
631 refcount_set(&rdist_nmi_refs
[idx
], 1);
632 desc
->handle_irq
= handle_percpu_devid_fasteoi_nmi
;
635 desc
->handle_irq
= handle_fasteoi_nmi
;
638 gic_irq_set_prio(d
, dist_prio_nmi
);
643 static void gic_irq_nmi_teardown(struct irq_data
*d
)
645 struct irq_desc
*desc
= irq_to_desc(d
->irq
);
647 if (WARN_ON(!gic_supports_nmi()))
650 if (gic_peek_irq(d
, GICD_ISENABLER
)) {
651 pr_err("Cannot set NMI property of enabled IRQ %u\n", d
->irq
);
656 * A secondary irq_chip should be in charge of LPI request,
657 * it should not be possible to get there
659 if (WARN_ON(irqd_to_hwirq(d
) >= 8192))
662 /* desc lock should already be held */
663 if (gic_irq_in_rdist(d
)) {
664 u32 idx
= gic_get_rdist_index(d
);
666 /* Tearing down NMI, only switch handler for last NMI */
667 if (refcount_dec_and_test(&rdist_nmi_refs
[idx
]))
668 desc
->handle_irq
= handle_percpu_devid_irq
;
670 desc
->handle_irq
= handle_fasteoi_irq
;
673 gic_irq_set_prio(d
, dist_prio_irq
);
676 static bool gic_arm64_erratum_2941627_needed(struct irq_data
*d
)
678 enum gic_intid_range range
;
680 if (!static_branch_unlikely(&gic_arm64_2941627_erratum
))
683 range
= get_intid_range(d
);
686 * The workaround is needed if the IRQ is an SPI and
687 * the target cpu is different from the one we are
690 return (range
== SPI_RANGE
|| range
== ESPI_RANGE
) &&
691 !cpumask_test_cpu(raw_smp_processor_id(),
692 irq_data_get_effective_affinity_mask(d
));
695 static void gic_eoi_irq(struct irq_data
*d
)
697 write_gicreg(irqd_to_hwirq(d
), ICC_EOIR1_EL1
);
700 if (gic_arm64_erratum_2941627_needed(d
)) {
702 * Make sure the GIC stream deactivate packet
703 * issued by ICC_EOIR1_EL1 has completed before
704 * deactivating through GICD_IACTIVER.
707 gic_poke_irq(d
, GICD_ICACTIVER
);
711 static void gic_eoimode1_eoi_irq(struct irq_data
*d
)
714 * No need to deactivate an LPI, or an interrupt that
715 * is is getting forwarded to a vcpu.
717 if (irqd_to_hwirq(d
) >= 8192 || irqd_is_forwarded_to_vcpu(d
))
720 if (!gic_arm64_erratum_2941627_needed(d
))
721 gic_write_dir(irqd_to_hwirq(d
));
723 gic_poke_irq(d
, GICD_ICACTIVER
);
726 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
728 irq_hw_number_t irq
= irqd_to_hwirq(d
);
729 enum gic_intid_range range
;
734 range
= get_intid_range(d
);
736 /* Interrupt configuration for SGIs can't be changed */
737 if (range
== SGI_RANGE
)
738 return type
!= IRQ_TYPE_EDGE_RISING
? -EINVAL
: 0;
740 /* SPIs have restrictions on the supported types */
741 if ((range
== SPI_RANGE
|| range
== ESPI_RANGE
) &&
742 type
!= IRQ_TYPE_LEVEL_HIGH
&& type
!= IRQ_TYPE_EDGE_RISING
)
745 if (gic_irq_in_rdist(d
))
746 base
= gic_data_rdist_sgi_base();
748 base
= gic_dist_base_alias(d
);
750 offset
= convert_offset_index(d
, GICD_ICFGR
, &index
);
752 ret
= gic_configure_irq(index
, type
, base
+ offset
);
753 if (ret
&& (range
== PPI_RANGE
|| range
== EPPI_RANGE
)) {
754 /* Misconfigured PPIs are usually not fatal */
755 pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq
);
762 static int gic_irq_set_vcpu_affinity(struct irq_data
*d
, void *vcpu
)
764 if (get_intid_range(d
) == SGI_RANGE
)
768 irqd_set_forwarded_to_vcpu(d
);
770 irqd_clr_forwarded_to_vcpu(d
);
774 static u64
gic_cpu_to_affinity(int cpu
)
776 u64 mpidr
= cpu_logical_map(cpu
);
779 /* ASR8601 needs to have its affinities shifted down... */
780 if (unlikely(gic_data
.flags
& FLAGS_WORKAROUND_ASR_ERRATUM_8601001
))
781 mpidr
= (MPIDR_AFFINITY_LEVEL(mpidr
, 1) |
782 (MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 8));
784 aff
= ((u64
)MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 32 |
785 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
786 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
787 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
792 static void gic_deactivate_unhandled(u32 irqnr
)
794 if (static_branch_likely(&supports_deactivate_key
)) {
796 gic_write_dir(irqnr
);
798 write_gicreg(irqnr
, ICC_EOIR1_EL1
);
804 * Follow a read of the IAR with any HW maintenance that needs to happen prior
805 * to invoking the relevant IRQ handler. We must do two things:
807 * (1) Ensure instruction ordering between a read of IAR and subsequent
808 * instructions in the IRQ handler using an ISB.
810 * It is possible for the IAR to report an IRQ which was signalled *after*
811 * the CPU took an IRQ exception as multiple interrupts can race to be
812 * recognized by the GIC, earlier interrupts could be withdrawn, and/or
813 * later interrupts could be prioritized by the GIC.
815 * For devices which are tightly coupled to the CPU, such as PMUs, a
816 * context synchronization event is necessary to ensure that system
817 * register state is not stale, as these may have been indirectly written
818 * *after* exception entry.
820 * (2) Deactivate the interrupt when EOI mode 1 is in use.
822 static inline void gic_complete_ack(u32 irqnr
)
824 if (static_branch_likely(&supports_deactivate_key
))
825 write_gicreg(irqnr
, ICC_EOIR1_EL1
);
830 static bool gic_rpr_is_nmi_prio(void)
832 if (!gic_supports_nmi())
835 return unlikely(gic_read_rpr() == GICV3_PRIO_NMI
);
838 static bool gic_irqnr_is_special(u32 irqnr
)
840 return irqnr
>= 1020 && irqnr
<= 1023;
843 static void __gic_handle_irq(u32 irqnr
, struct pt_regs
*regs
)
845 if (gic_irqnr_is_special(irqnr
))
848 gic_complete_ack(irqnr
);
850 if (generic_handle_domain_irq(gic_data
.domain
, irqnr
)) {
851 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr
);
852 gic_deactivate_unhandled(irqnr
);
856 static void __gic_handle_nmi(u32 irqnr
, struct pt_regs
*regs
)
858 if (gic_irqnr_is_special(irqnr
))
861 gic_complete_ack(irqnr
);
863 if (generic_handle_domain_nmi(gic_data
.domain
, irqnr
)) {
864 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr
);
865 gic_deactivate_unhandled(irqnr
);
870 * An exception has been taken from a context with IRQs enabled, and this could
871 * be an IRQ or an NMI.
873 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
874 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
875 * after handling any NMI but before handling any IRQ.
877 * The entry code has performed IRQ entry, and if an NMI is detected we must
878 * perform NMI entry/exit around invoking the handler.
880 static void __gic_handle_irq_from_irqson(struct pt_regs
*regs
)
885 irqnr
= gic_read_iar();
887 is_nmi
= gic_rpr_is_nmi_prio();
891 __gic_handle_nmi(irqnr
, regs
);
895 if (gic_prio_masking_enabled()) {
897 gic_arch_enable_irqs();
901 __gic_handle_irq(irqnr
, regs
);
905 * An exception has been taken from a context with IRQs disabled, which can only
908 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
909 * DAIF.IF (and ICC_PMR_EL1) unchanged.
911 * The entry code has performed NMI entry.
913 static void __gic_handle_irq_from_irqsoff(struct pt_regs
*regs
)
919 * We were in a context with IRQs disabled. However, the
920 * entry code has set PMR to a value that allows any
921 * interrupt to be acknowledged, and not just NMIs. This can
922 * lead to surprising effects if the NMI has been retired in
923 * the meantime, and that there is an IRQ pending. The IRQ
924 * would then be taken in NMI context, something that nobody
925 * wants to debug twice.
927 * Until we sort this, drop PMR again to a level that will
928 * actually only allow NMIs before reading IAR, and then
929 * restore it to what it was.
931 pmr
= gic_read_pmr();
934 irqnr
= gic_read_iar();
937 __gic_handle_nmi(irqnr
, regs
);
940 static void __exception_irq_entry
gic_handle_irq(struct pt_regs
*regs
)
942 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs
)))
943 __gic_handle_irq_from_irqsoff(regs
);
945 __gic_handle_irq_from_irqson(regs
);
948 static void __init
gic_dist_init(void)
952 void __iomem
*base
= gic_data
.dist_base
;
955 /* Disable the distributor */
956 writel_relaxed(0, base
+ GICD_CTLR
);
957 gic_dist_wait_for_rwp();
960 * Configure SPIs as non-secure Group-1. This will only matter
961 * if the GIC only has a single security state. This will not
962 * do the right thing if the kernel is running in secure mode,
963 * but that's not the intended use case anyway.
965 for (i
= 32; i
< GIC_LINE_NR
; i
+= 32)
966 writel_relaxed(~0, base
+ GICD_IGROUPR
+ i
/ 8);
968 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
969 for (i
= 0; i
< GIC_ESPI_NR
; i
+= 32) {
970 writel_relaxed(~0U, base
+ GICD_ICENABLERnE
+ i
/ 8);
971 writel_relaxed(~0U, base
+ GICD_ICACTIVERnE
+ i
/ 8);
974 for (i
= 0; i
< GIC_ESPI_NR
; i
+= 32)
975 writel_relaxed(~0U, base
+ GICD_IGROUPRnE
+ i
/ 8);
977 for (i
= 0; i
< GIC_ESPI_NR
; i
+= 16)
978 writel_relaxed(0, base
+ GICD_ICFGRnE
+ i
/ 4);
980 for (i
= 0; i
< GIC_ESPI_NR
; i
+= 4)
981 writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq
),
982 base
+ GICD_IPRIORITYRnE
+ i
);
984 /* Now do the common stuff */
985 gic_dist_config(base
, GIC_LINE_NR
, dist_prio_irq
);
987 val
= GICD_CTLR_ARE_NS
| GICD_CTLR_ENABLE_G1A
| GICD_CTLR_ENABLE_G1
;
988 if (gic_data
.rdists
.gicd_typer2
& GICD_TYPER2_nASSGIcap
) {
989 pr_info("Enabling SGIs without active state\n");
990 val
|= GICD_CTLR_nASSGIreq
;
993 /* Enable distributor with ARE, Group1, and wait for it to drain */
994 writel_relaxed(val
, base
+ GICD_CTLR
);
995 gic_dist_wait_for_rwp();
998 * Set all global interrupts to the boot CPU only. ARE must be
1001 affinity
= gic_cpu_to_affinity(smp_processor_id());
1002 for (i
= 32; i
< GIC_LINE_NR
; i
++)
1003 gic_write_irouter(affinity
, base
+ GICD_IROUTER
+ i
* 8);
1005 for (i
= 0; i
< GIC_ESPI_NR
; i
++)
1006 gic_write_irouter(affinity
, base
+ GICD_IROUTERnE
+ i
* 8);
1009 static int gic_iterate_rdists(int (*fn
)(struct redist_region
*, void __iomem
*))
1014 for (i
= 0; i
< gic_data
.nr_redist_regions
; i
++) {
1015 void __iomem
*ptr
= gic_data
.redist_regions
[i
].redist_base
;
1019 reg
= readl_relaxed(ptr
+ GICR_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
1020 if (reg
!= GIC_PIDR2_ARCH_GICv3
&&
1021 reg
!= GIC_PIDR2_ARCH_GICv4
) { /* We're in trouble... */
1022 pr_warn("No redistributor present @%p\n", ptr
);
1027 typer
= gic_read_typer(ptr
+ GICR_TYPER
);
1028 ret
= fn(gic_data
.redist_regions
+ i
, ptr
);
1032 if (gic_data
.redist_regions
[i
].single_redist
)
1035 if (gic_data
.redist_stride
) {
1036 ptr
+= gic_data
.redist_stride
;
1038 ptr
+= SZ_64K
* 2; /* Skip RD_base + SGI_base */
1039 if (typer
& GICR_TYPER_VLPIS
)
1040 ptr
+= SZ_64K
* 2; /* Skip VLPI_base + reserved page */
1042 } while (!(typer
& GICR_TYPER_LAST
));
1045 return ret
? -ENODEV
: 0;
1048 static int __gic_populate_rdist(struct redist_region
*region
, void __iomem
*ptr
)
1050 unsigned long mpidr
;
1055 * Convert affinity to a 32bit value that can be matched to
1056 * GICR_TYPER bits [63:32].
1058 mpidr
= gic_cpu_to_affinity(smp_processor_id());
1060 aff
= (MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 24 |
1061 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
1062 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
1063 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
1065 typer
= gic_read_typer(ptr
+ GICR_TYPER
);
1066 if ((typer
>> 32) == aff
) {
1067 u64 offset
= ptr
- region
->redist_base
;
1068 raw_spin_lock_init(&gic_data_rdist()->rd_lock
);
1069 gic_data_rdist_rd_base() = ptr
;
1070 gic_data_rdist()->phys_base
= region
->phys_base
+ offset
;
1072 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
1073 smp_processor_id(), mpidr
,
1074 (int)(region
- gic_data
.redist_regions
),
1075 &gic_data_rdist()->phys_base
);
1083 static int gic_populate_rdist(void)
1085 if (gic_iterate_rdists(__gic_populate_rdist
) == 0)
1088 /* We couldn't even deal with ourselves... */
1089 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
1091 (unsigned long)cpu_logical_map(smp_processor_id()));
1095 static int __gic_update_rdist_properties(struct redist_region
*region
,
1098 u64 typer
= gic_read_typer(ptr
+ GICR_TYPER
);
1099 u32 ctlr
= readl_relaxed(ptr
+ GICR_CTLR
);
1101 /* Boot-time cleanup */
1102 if ((typer
& GICR_TYPER_VLPIS
) && (typer
& GICR_TYPER_RVPEID
)) {
1105 /* Deactivate any present vPE */
1106 val
= gicr_read_vpendbaser(ptr
+ SZ_128K
+ GICR_VPENDBASER
);
1107 if (val
& GICR_VPENDBASER_Valid
)
1108 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast
,
1109 ptr
+ SZ_128K
+ GICR_VPENDBASER
);
1111 /* Mark the VPE table as invalid */
1112 val
= gicr_read_vpropbaser(ptr
+ SZ_128K
+ GICR_VPROPBASER
);
1113 val
&= ~GICR_VPROPBASER_4_1_VALID
;
1114 gicr_write_vpropbaser(val
, ptr
+ SZ_128K
+ GICR_VPROPBASER
);
1117 gic_data
.rdists
.has_vlpis
&= !!(typer
& GICR_TYPER_VLPIS
);
1120 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
1121 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
1122 * that the ITS driver can make use of for LPIs (and not VLPIs).
1124 * These are 3 different ways to express the same thing, depending
1125 * on the revision of the architecture and its relaxations over
1126 * time. Just group them under the 'direct_lpi' banner.
1128 gic_data
.rdists
.has_rvpeid
&= !!(typer
& GICR_TYPER_RVPEID
);
1129 gic_data
.rdists
.has_direct_lpi
&= (!!(typer
& GICR_TYPER_DirectLPIS
) |
1130 !!(ctlr
& GICR_CTLR_IR
) |
1131 gic_data
.rdists
.has_rvpeid
);
1132 gic_data
.rdists
.has_vpend_valid_dirty
&= !!(typer
& GICR_TYPER_DIRTY
);
1134 /* Detect non-sensical configurations */
1135 if (WARN_ON_ONCE(gic_data
.rdists
.has_rvpeid
&& !gic_data
.rdists
.has_vlpis
)) {
1136 gic_data
.rdists
.has_direct_lpi
= false;
1137 gic_data
.rdists
.has_vlpis
= false;
1138 gic_data
.rdists
.has_rvpeid
= false;
1141 gic_data
.ppi_nr
= min(GICR_TYPER_NR_PPIS(typer
), gic_data
.ppi_nr
);
1146 static void gic_update_rdist_properties(void)
1148 gic_data
.ppi_nr
= UINT_MAX
;
1149 gic_iterate_rdists(__gic_update_rdist_properties
);
1150 if (WARN_ON(gic_data
.ppi_nr
== UINT_MAX
))
1151 gic_data
.ppi_nr
= 0;
1152 pr_info("GICv3 features: %d PPIs%s%s\n",
1154 gic_data
.has_rss
? ", RSS" : "",
1155 gic_data
.rdists
.has_direct_lpi
? ", DirectLPI" : "");
1157 if (gic_data
.rdists
.has_vlpis
)
1158 pr_info("GICv4 features: %s%s%s\n",
1159 gic_data
.rdists
.has_direct_lpi
? "DirectLPI " : "",
1160 gic_data
.rdists
.has_rvpeid
? "RVPEID " : "",
1161 gic_data
.rdists
.has_vpend_valid_dirty
? "Valid+Dirty " : "");
1164 static void gic_cpu_sys_reg_enable(void)
1167 * Need to check that the SRE bit has actually been set. If
1168 * not, it means that SRE is disabled at EL2. We're going to
1169 * die painfully, and there is nothing we can do about it.
1171 * Kindly inform the luser.
1173 if (!gic_enable_sre())
1174 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1178 static void gic_cpu_sys_reg_init(void)
1180 int i
, cpu
= smp_processor_id();
1181 u64 mpidr
= gic_cpu_to_affinity(cpu
);
1182 u64 need_rss
= MPIDR_RS(mpidr
);
1186 pribits
= gic_get_pribits();
1188 group0
= gic_has_group0();
1190 /* Set priority mask register */
1191 if (!gic_prio_masking_enabled()) {
1192 write_gicreg(DEFAULT_PMR_VALUE
, ICC_PMR_EL1
);
1193 } else if (gic_supports_nmi()) {
1195 * Check that all CPUs use the same priority space.
1197 * If there's a mismatch with the boot CPU, the system is
1198 * likely to die as interrupt masking will not work properly on
1201 WARN_ON(group0
!= cpus_have_group0
);
1202 WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled
);
1206 * Some firmwares hand over to the kernel with the BPR changed from
1207 * its reset value (and with a value large enough to prevent
1208 * any pre-emptive interrupts from working at all). Writing a zero
1209 * to BPR restores is reset value.
1213 if (static_branch_likely(&supports_deactivate_key
)) {
1214 /* EOI drops priority only (mode 1) */
1215 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop
);
1217 /* EOI deactivates interrupt too (mode 0) */
1218 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir
);
1221 /* Always whack Group0 before Group1 */
1226 write_gicreg(0, ICC_AP0R3_EL1
);
1227 write_gicreg(0, ICC_AP0R2_EL1
);
1230 write_gicreg(0, ICC_AP0R1_EL1
);
1234 write_gicreg(0, ICC_AP0R0_EL1
);
1243 write_gicreg(0, ICC_AP1R3_EL1
);
1244 write_gicreg(0, ICC_AP1R2_EL1
);
1247 write_gicreg(0, ICC_AP1R1_EL1
);
1251 write_gicreg(0, ICC_AP1R0_EL1
);
1256 /* ... and let's hit the road... */
1257 gic_write_grpen1(1);
1259 /* Keep the RSS capability status in per_cpu variable */
1260 per_cpu(has_rss
, cpu
) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS
);
1262 /* Check all the CPUs have capable of sending SGIs to other CPUs */
1263 for_each_online_cpu(i
) {
1264 bool have_rss
= per_cpu(has_rss
, i
) && per_cpu(has_rss
, cpu
);
1266 need_rss
|= MPIDR_RS(gic_cpu_to_affinity(i
));
1267 if (need_rss
&& (!have_rss
))
1268 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1269 cpu
, (unsigned long)mpidr
,
1270 i
, (unsigned long)gic_cpu_to_affinity(i
));
1274 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1275 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1276 * UNPREDICTABLE choice of :
1277 * - The write is ignored.
1278 * - The RS field is treated as 0.
1280 if (need_rss
&& (!gic_data
.has_rss
))
1281 pr_crit_once("RSS is required but GICD doesn't support it\n");
1284 static bool gicv3_nolpi
;
1286 static int __init
gicv3_nolpi_cfg(char *buf
)
1288 return kstrtobool(buf
, &gicv3_nolpi
);
1290 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg
);
1292 static int gic_dist_supports_lpis(void)
1294 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS
) &&
1295 !!(readl_relaxed(gic_data
.dist_base
+ GICD_TYPER
) & GICD_TYPER_LPIS
) &&
1299 static void gic_cpu_init(void)
1301 void __iomem
*rbase
;
1304 /* Register ourselves with the rest of the world */
1305 if (gic_populate_rdist())
1308 gic_enable_redist(true);
1310 WARN((gic_data
.ppi_nr
> 16 || GIC_ESPI_NR
!= 0) &&
1311 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange
),
1312 "Distributor has extended ranges, but CPU%d doesn't\n",
1313 smp_processor_id());
1315 rbase
= gic_data_rdist_sgi_base();
1317 /* Configure SGIs/PPIs as non-secure Group-1 */
1318 for (i
= 0; i
< gic_data
.ppi_nr
+ SGI_NR
; i
+= 32)
1319 writel_relaxed(~0, rbase
+ GICR_IGROUPR0
+ i
/ 8);
1321 gic_cpu_config(rbase
, gic_data
.ppi_nr
+ SGI_NR
, dist_prio_irq
);
1322 gic_redist_wait_for_rwp();
1324 /* initialise system registers */
1325 gic_cpu_sys_reg_init();
1330 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1331 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1334 * gic_starting_cpu() is called after the last point where cpuhp is allowed
1335 * to fail. So pre check for problems earlier.
1337 static int gic_check_rdist(unsigned int cpu
)
1339 if (cpumask_test_cpu(cpu
, &broken_rdists
))
1345 static int gic_starting_cpu(unsigned int cpu
)
1347 gic_cpu_sys_reg_enable();
1350 if (gic_dist_supports_lpis())
1356 static u16
gic_compute_target_list(int *base_cpu
, const struct cpumask
*mask
,
1357 unsigned long cluster_id
)
1359 int next_cpu
, cpu
= *base_cpu
;
1360 unsigned long mpidr
;
1363 mpidr
= gic_cpu_to_affinity(cpu
);
1365 while (cpu
< nr_cpu_ids
) {
1366 tlist
|= 1 << (mpidr
& 0xf);
1368 next_cpu
= cpumask_next(cpu
, mask
);
1369 if (next_cpu
>= nr_cpu_ids
)
1373 mpidr
= gic_cpu_to_affinity(cpu
);
1375 if (cluster_id
!= MPIDR_TO_SGI_CLUSTER_ID(mpidr
)) {
1385 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1386 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1387 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1389 static void gic_send_sgi(u64 cluster_id
, u16 tlist
, unsigned int irq
)
1393 val
= (MPIDR_TO_SGI_AFFINITY(cluster_id
, 3) |
1394 MPIDR_TO_SGI_AFFINITY(cluster_id
, 2) |
1395 irq
<< ICC_SGI1R_SGI_ID_SHIFT
|
1396 MPIDR_TO_SGI_AFFINITY(cluster_id
, 1) |
1397 MPIDR_TO_SGI_RS(cluster_id
) |
1398 tlist
<< ICC_SGI1R_TARGET_LIST_SHIFT
);
1400 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val
);
1401 gic_write_sgi1r(val
);
1404 static void gic_ipi_send_mask(struct irq_data
*d
, const struct cpumask
*mask
)
1408 if (WARN_ON(d
->hwirq
>= 16))
1412 * Ensure that stores to Normal memory are visible to the
1413 * other CPUs before issuing the IPI.
1417 for_each_cpu(cpu
, mask
) {
1418 u64 cluster_id
= MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu
));
1421 tlist
= gic_compute_target_list(&cpu
, mask
, cluster_id
);
1422 gic_send_sgi(cluster_id
, tlist
, d
->hwirq
);
1425 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1429 static void __init
gic_smp_init(void)
1431 struct irq_fwspec sgi_fwspec
= {
1432 .fwnode
= gic_data
.fwnode
,
1437 cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN
,
1438 "irqchip/arm/gicv3:checkrdist",
1439 gic_check_rdist
, NULL
);
1441 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING
,
1442 "irqchip/arm/gicv3:starting",
1443 gic_starting_cpu
, NULL
);
1445 /* Register all 8 non-secure SGIs */
1446 base_sgi
= irq_domain_alloc_irqs(gic_data
.domain
, 8, NUMA_NO_NODE
, &sgi_fwspec
);
1447 if (WARN_ON(base_sgi
<= 0))
1450 set_smp_ipi_range(base_sgi
, 8);
1453 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
1463 cpu
= cpumask_first(mask_val
);
1465 cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
1467 if (cpu
>= nr_cpu_ids
)
1470 if (gic_irq_in_rdist(d
))
1473 /* If interrupt was enabled, disable it first */
1474 enabled
= gic_peek_irq(d
, GICD_ISENABLER
);
1478 offset
= convert_offset_index(d
, GICD_IROUTER
, &index
);
1479 reg
= gic_dist_base(d
) + offset
+ (index
* 8);
1480 val
= gic_cpu_to_affinity(cpu
);
1482 gic_write_irouter(val
, reg
);
1485 * If the interrupt was enabled, enabled it again. Otherwise,
1486 * just wait for the distributor to have digested our changes.
1491 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
1493 return IRQ_SET_MASK_OK_DONE
;
1496 #define gic_set_affinity NULL
1497 #define gic_ipi_send_mask NULL
1498 #define gic_smp_init() do { } while(0)
1501 static int gic_retrigger(struct irq_data
*data
)
1503 return !gic_irq_set_irqchip_state(data
, IRQCHIP_STATE_PENDING
, true);
1506 #ifdef CONFIG_CPU_PM
1507 static int gic_cpu_pm_notifier(struct notifier_block
*self
,
1508 unsigned long cmd
, void *v
)
1510 if (cmd
== CPU_PM_EXIT
) {
1511 if (gic_dist_security_disabled())
1512 gic_enable_redist(true);
1513 gic_cpu_sys_reg_enable();
1514 gic_cpu_sys_reg_init();
1515 } else if (cmd
== CPU_PM_ENTER
&& gic_dist_security_disabled()) {
1516 gic_write_grpen1(0);
1517 gic_enable_redist(false);
1522 static struct notifier_block gic_cpu_pm_notifier_block
= {
1523 .notifier_call
= gic_cpu_pm_notifier
,
1526 static void gic_cpu_pm_init(void)
1528 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block
);
1532 static inline void gic_cpu_pm_init(void) { }
1533 #endif /* CONFIG_CPU_PM */
1535 static struct irq_chip gic_chip
= {
1537 .irq_mask
= gic_mask_irq
,
1538 .irq_unmask
= gic_unmask_irq
,
1539 .irq_eoi
= gic_eoi_irq
,
1540 .irq_set_type
= gic_set_type
,
1541 .irq_set_affinity
= gic_set_affinity
,
1542 .irq_retrigger
= gic_retrigger
,
1543 .irq_get_irqchip_state
= gic_irq_get_irqchip_state
,
1544 .irq_set_irqchip_state
= gic_irq_set_irqchip_state
,
1545 .irq_nmi_setup
= gic_irq_nmi_setup
,
1546 .irq_nmi_teardown
= gic_irq_nmi_teardown
,
1547 .ipi_send_mask
= gic_ipi_send_mask
,
1548 .flags
= IRQCHIP_SET_TYPE_MASKED
|
1549 IRQCHIP_SKIP_SET_WAKE
|
1550 IRQCHIP_MASK_ON_SUSPEND
,
1553 static struct irq_chip gic_eoimode1_chip
= {
1555 .irq_mask
= gic_eoimode1_mask_irq
,
1556 .irq_unmask
= gic_unmask_irq
,
1557 .irq_eoi
= gic_eoimode1_eoi_irq
,
1558 .irq_set_type
= gic_set_type
,
1559 .irq_set_affinity
= gic_set_affinity
,
1560 .irq_retrigger
= gic_retrigger
,
1561 .irq_get_irqchip_state
= gic_irq_get_irqchip_state
,
1562 .irq_set_irqchip_state
= gic_irq_set_irqchip_state
,
1563 .irq_set_vcpu_affinity
= gic_irq_set_vcpu_affinity
,
1564 .irq_nmi_setup
= gic_irq_nmi_setup
,
1565 .irq_nmi_teardown
= gic_irq_nmi_teardown
,
1566 .ipi_send_mask
= gic_ipi_send_mask
,
1567 .flags
= IRQCHIP_SET_TYPE_MASKED
|
1568 IRQCHIP_SKIP_SET_WAKE
|
1569 IRQCHIP_MASK_ON_SUSPEND
,
1572 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
1575 struct irq_chip
*chip
= &gic_chip
;
1576 struct irq_data
*irqd
= irq_desc_get_irq_data(irq_to_desc(irq
));
1578 if (static_branch_likely(&supports_deactivate_key
))
1579 chip
= &gic_eoimode1_chip
;
1581 switch (__get_intid_range(hw
)) {
1585 irq_set_percpu_devid(irq
);
1586 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
1587 handle_percpu_devid_irq
, NULL
, NULL
);
1592 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
1593 handle_fasteoi_irq
, NULL
, NULL
);
1595 irqd_set_single_target(irqd
);
1599 if (!gic_dist_supports_lpis())
1601 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
1602 handle_fasteoi_irq
, NULL
, NULL
);
1609 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1610 irqd_set_handle_enforce_irqctx(irqd
);
1614 static int gic_irq_domain_translate(struct irq_domain
*d
,
1615 struct irq_fwspec
*fwspec
,
1616 unsigned long *hwirq
,
1619 if (fwspec
->param_count
== 1 && fwspec
->param
[0] < 16) {
1620 *hwirq
= fwspec
->param
[0];
1621 *type
= IRQ_TYPE_EDGE_RISING
;
1625 if (is_of_node(fwspec
->fwnode
)) {
1626 if (fwspec
->param_count
< 3)
1629 switch (fwspec
->param
[0]) {
1631 *hwirq
= fwspec
->param
[1] + 32;
1634 *hwirq
= fwspec
->param
[1] + 16;
1637 *hwirq
= fwspec
->param
[1] + ESPI_BASE_INTID
;
1640 *hwirq
= fwspec
->param
[1] + EPPI_BASE_INTID
;
1642 case GIC_IRQ_TYPE_LPI
: /* LPI */
1643 *hwirq
= fwspec
->param
[1];
1645 case GIC_IRQ_TYPE_PARTITION
:
1646 *hwirq
= fwspec
->param
[1];
1647 if (fwspec
->param
[1] >= 16)
1648 *hwirq
+= EPPI_BASE_INTID
- 16;
1656 *type
= fwspec
->param
[2] & IRQ_TYPE_SENSE_MASK
;
1659 * Make it clear that broken DTs are... broken.
1660 * Partitioned PPIs are an unfortunate exception.
1662 WARN_ON(*type
== IRQ_TYPE_NONE
&&
1663 fwspec
->param
[0] != GIC_IRQ_TYPE_PARTITION
);
1667 if (is_fwnode_irqchip(fwspec
->fwnode
)) {
1668 if(fwspec
->param_count
!= 2)
1671 if (fwspec
->param
[0] < 16) {
1672 pr_err(FW_BUG
"Illegal GSI%d translation request\n",
1677 *hwirq
= fwspec
->param
[0];
1678 *type
= fwspec
->param
[1];
1680 WARN_ON(*type
== IRQ_TYPE_NONE
);
1687 static int gic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
1688 unsigned int nr_irqs
, void *arg
)
1691 irq_hw_number_t hwirq
;
1692 unsigned int type
= IRQ_TYPE_NONE
;
1693 struct irq_fwspec
*fwspec
= arg
;
1695 ret
= gic_irq_domain_translate(domain
, fwspec
, &hwirq
, &type
);
1699 for (i
= 0; i
< nr_irqs
; i
++) {
1700 ret
= gic_irq_domain_map(domain
, virq
+ i
, hwirq
+ i
);
1708 static void gic_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
1709 unsigned int nr_irqs
)
1713 for (i
= 0; i
< nr_irqs
; i
++) {
1714 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
1715 irq_set_handler(virq
+ i
, NULL
);
1716 irq_domain_reset_irq_data(d
);
1720 static bool fwspec_is_partitioned_ppi(struct irq_fwspec
*fwspec
,
1721 irq_hw_number_t hwirq
)
1723 enum gic_intid_range range
;
1725 if (!gic_data
.ppi_descs
)
1728 if (!is_of_node(fwspec
->fwnode
))
1731 if (fwspec
->param_count
< 4 || !fwspec
->param
[3])
1734 range
= __get_intid_range(hwirq
);
1735 if (range
!= PPI_RANGE
&& range
!= EPPI_RANGE
)
1741 static int gic_irq_domain_select(struct irq_domain
*d
,
1742 struct irq_fwspec
*fwspec
,
1743 enum irq_domain_bus_token bus_token
)
1745 unsigned int type
, ret
, ppi_idx
;
1746 irq_hw_number_t hwirq
;
1749 if (fwspec
->fwnode
!= d
->fwnode
)
1752 /* Handle pure domain searches */
1753 if (!fwspec
->param_count
)
1754 return d
->bus_token
== bus_token
;
1756 /* If this is not DT, then we have a single domain */
1757 if (!is_of_node(fwspec
->fwnode
))
1760 ret
= gic_irq_domain_translate(d
, fwspec
, &hwirq
, &type
);
1761 if (WARN_ON_ONCE(ret
))
1764 if (!fwspec_is_partitioned_ppi(fwspec
, hwirq
))
1765 return d
== gic_data
.domain
;
1768 * If this is a PPI and we have a 4th (non-null) parameter,
1769 * then we need to match the partition domain.
1771 ppi_idx
= __gic_get_ppi_index(hwirq
);
1772 return d
== partition_get_domain(gic_data
.ppi_descs
[ppi_idx
]);
1775 static const struct irq_domain_ops gic_irq_domain_ops
= {
1776 .translate
= gic_irq_domain_translate
,
1777 .alloc
= gic_irq_domain_alloc
,
1778 .free
= gic_irq_domain_free
,
1779 .select
= gic_irq_domain_select
,
1782 static int partition_domain_translate(struct irq_domain
*d
,
1783 struct irq_fwspec
*fwspec
,
1784 unsigned long *hwirq
,
1787 unsigned long ppi_intid
;
1788 struct device_node
*np
;
1789 unsigned int ppi_idx
;
1792 if (!gic_data
.ppi_descs
)
1795 np
= of_find_node_by_phandle(fwspec
->param
[3]);
1799 ret
= gic_irq_domain_translate(d
, fwspec
, &ppi_intid
, type
);
1800 if (WARN_ON_ONCE(ret
))
1803 ppi_idx
= __gic_get_ppi_index(ppi_intid
);
1804 ret
= partition_translate_id(gic_data
.ppi_descs
[ppi_idx
],
1805 of_node_to_fwnode(np
));
1810 *type
= fwspec
->param
[2] & IRQ_TYPE_SENSE_MASK
;
1815 static const struct irq_domain_ops partition_domain_ops
= {
1816 .translate
= partition_domain_translate
,
1817 .select
= gic_irq_domain_select
,
1820 static bool gic_enable_quirk_msm8996(void *data
)
1822 struct gic_chip_data
*d
= data
;
1824 d
->flags
|= FLAGS_WORKAROUND_GICR_WAKER_MSM8996
;
1829 static bool gic_enable_quirk_cavium_38539(void *data
)
1831 struct gic_chip_data
*d
= data
;
1833 d
->flags
|= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539
;
1838 static bool gic_enable_quirk_hip06_07(void *data
)
1840 struct gic_chip_data
*d
= data
;
1843 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1844 * not being an actual ARM implementation). The saving grace is
1845 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1846 * HIP07 doesn't even have a proper IIDR, and still pretends to
1847 * have ESPI. In both cases, put them right.
1849 if (d
->rdists
.gicd_typer
& GICD_TYPER_ESPI
) {
1850 /* Zero both ESPI and the RES0 field next to it... */
1851 d
->rdists
.gicd_typer
&= ~GENMASK(9, 8);
1858 #define T241_CHIPN_MASK GENMASK_ULL(45, 44)
1859 #define T241_CHIP_GICDA_OFFSET 0x1580000
1860 #define SMCCC_SOC_ID_T241 0x036b0241
1862 static bool gic_enable_quirk_nvidia_t241(void *data
)
1864 s32 soc_id
= arm_smccc_get_soc_id_version();
1865 unsigned long chip_bmask
= 0;
1869 /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
1870 if ((soc_id
< 0) || (soc_id
!= SMCCC_SOC_ID_T241
))
1873 /* Find the chips based on GICR regions PHYS addr */
1874 for (i
= 0; i
< gic_data
.nr_redist_regions
; i
++) {
1875 chip_bmask
|= BIT(FIELD_GET(T241_CHIPN_MASK
,
1876 (u64
)gic_data
.redist_regions
[i
].phys_base
));
1879 if (hweight32(chip_bmask
) < 3)
1882 /* Setup GICD alias regions */
1883 for (i
= 0; i
< ARRAY_SIZE(t241_dist_base_alias
); i
++) {
1884 if (chip_bmask
& BIT(i
)) {
1885 phys
= gic_data
.dist_phys_base
+ T241_CHIP_GICDA_OFFSET
;
1886 phys
|= FIELD_PREP(T241_CHIPN_MASK
, i
);
1887 t241_dist_base_alias
[i
] = ioremap(phys
, SZ_64K
);
1888 WARN_ON_ONCE(!t241_dist_base_alias
[i
]);
1891 static_branch_enable(&gic_nvidia_t241_erratum
);
1895 static bool gic_enable_quirk_asr8601(void *data
)
1897 struct gic_chip_data
*d
= data
;
1899 d
->flags
|= FLAGS_WORKAROUND_ASR_ERRATUM_8601001
;
1904 static bool gic_enable_quirk_arm64_2941627(void *data
)
1906 static_branch_enable(&gic_arm64_2941627_erratum
);
1910 static bool rd_set_non_coherent(void *data
)
1912 struct gic_chip_data
*d
= data
;
1914 d
->rdists
.flags
|= RDIST_FLAGS_FORCE_NON_SHAREABLE
;
1918 static const struct gic_quirk gic_quirks
[] = {
1920 .desc
= "GICv3: Qualcomm MSM8996 broken firmware",
1921 .compatible
= "qcom,msm8996-gic-v3",
1922 .init
= gic_enable_quirk_msm8996
,
1925 .desc
= "GICv3: ASR erratum 8601001",
1926 .compatible
= "asr,asr8601-gic-v3",
1927 .init
= gic_enable_quirk_asr8601
,
1930 .desc
= "GICv3: HIP06 erratum 161010803",
1933 .init
= gic_enable_quirk_hip06_07
,
1936 .desc
= "GICv3: HIP07 erratum 161010803",
1939 .init
= gic_enable_quirk_hip06_07
,
1943 * Reserved register accesses generate a Synchronous
1944 * External Abort. This erratum applies to:
1945 * - ThunderX: CN88xx
1946 * - OCTEON TX: CN83xx, CN81xx
1947 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1949 .desc
= "GICv3: Cavium erratum 38539",
1952 .init
= gic_enable_quirk_cavium_38539
,
1955 .desc
= "GICv3: NVIDIA erratum T241-FABRIC-4",
1958 .init
= gic_enable_quirk_nvidia_t241
,
1962 * GIC-700: 2941627 workaround - IP variant [0,1]
1965 .desc
= "GICv3: ARM64 erratum 2941627",
1968 .init
= gic_enable_quirk_arm64_2941627
,
1972 * GIC-700: 2941627 workaround - IP variant [2]
1974 .desc
= "GICv3: ARM64 erratum 2941627",
1977 .init
= gic_enable_quirk_arm64_2941627
,
1980 .desc
= "GICv3: non-coherent attribute",
1981 .property
= "dma-noncoherent",
1982 .init
= rd_set_non_coherent
,
1988 static void gic_enable_nmi_support(void)
1992 if (!gic_prio_masking_enabled())
1995 rdist_nmi_refs
= kcalloc(gic_data
.ppi_nr
+ SGI_NR
,
1996 sizeof(*rdist_nmi_refs
), GFP_KERNEL
);
1997 if (!rdist_nmi_refs
)
2000 for (i
= 0; i
< gic_data
.ppi_nr
+ SGI_NR
; i
++)
2001 refcount_set(&rdist_nmi_refs
[i
], 0);
2003 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
2004 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
2006 static_branch_enable(&supports_pseudo_nmis
);
2008 if (static_branch_likely(&supports_deactivate_key
))
2009 gic_eoimode1_chip
.flags
|= IRQCHIP_SUPPORTS_NMI
;
2011 gic_chip
.flags
|= IRQCHIP_SUPPORTS_NMI
;
2014 static int __init
gic_init_bases(phys_addr_t dist_phys_base
,
2015 void __iomem
*dist_base
,
2016 struct redist_region
*rdist_regs
,
2017 u32 nr_redist_regions
,
2019 struct fwnode_handle
*handle
)
2024 if (!is_hyp_mode_available())
2025 static_branch_disable(&supports_deactivate_key
);
2027 if (static_branch_likely(&supports_deactivate_key
))
2028 pr_info("GIC: Using split EOI/Deactivate mode\n");
2030 gic_data
.fwnode
= handle
;
2031 gic_data
.dist_phys_base
= dist_phys_base
;
2032 gic_data
.dist_base
= dist_base
;
2033 gic_data
.redist_regions
= rdist_regs
;
2034 gic_data
.nr_redist_regions
= nr_redist_regions
;
2035 gic_data
.redist_stride
= redist_stride
;
2038 * Find out how many interrupts are supported.
2040 typer
= readl_relaxed(gic_data
.dist_base
+ GICD_TYPER
);
2041 gic_data
.rdists
.gicd_typer
= typer
;
2043 gic_enable_quirks(readl_relaxed(gic_data
.dist_base
+ GICD_IIDR
),
2044 gic_quirks
, &gic_data
);
2046 pr_info("%d SPIs implemented\n", GIC_LINE_NR
- 32);
2047 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR
);
2050 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
2051 * architecture spec (which says that reserved registers are RES0).
2053 if (!(gic_data
.flags
& FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539
))
2054 gic_data
.rdists
.gicd_typer2
= readl_relaxed(gic_data
.dist_base
+ GICD_TYPER2
);
2056 gic_data
.domain
= irq_domain_create_tree(handle
, &gic_irq_domain_ops
,
2058 gic_data
.rdists
.rdist
= alloc_percpu(typeof(*gic_data
.rdists
.rdist
));
2059 if (!static_branch_unlikely(&gic_nvidia_t241_erratum
)) {
2060 /* Disable GICv4.x features for the erratum T241-FABRIC-4 */
2061 gic_data
.rdists
.has_rvpeid
= true;
2062 gic_data
.rdists
.has_vlpis
= true;
2063 gic_data
.rdists
.has_direct_lpi
= true;
2064 gic_data
.rdists
.has_vpend_valid_dirty
= true;
2067 if (WARN_ON(!gic_data
.domain
) || WARN_ON(!gic_data
.rdists
.rdist
)) {
2072 irq_domain_update_bus_token(gic_data
.domain
, DOMAIN_BUS_WIRED
);
2074 gic_data
.has_rss
= !!(typer
& GICD_TYPER_RSS
);
2076 if (typer
& GICD_TYPER_MBIS
) {
2077 err
= mbi_init(handle
, gic_data
.domain
);
2079 pr_err("Failed to initialize MBIs\n");
2082 set_handle_irq(gic_handle_irq
);
2084 gic_update_rdist_properties();
2086 gic_cpu_sys_reg_enable();
2090 gic_enable_nmi_support();
2094 if (gic_dist_supports_lpis()) {
2095 its_init(handle
, &gic_data
.rdists
, gic_data
.domain
, dist_prio_irq
);
2097 its_lpi_memreserve_init();
2099 if (IS_ENABLED(CONFIG_ARM_GIC_V2M
))
2100 gicv2m_init(handle
, gic_data
.domain
);
2106 if (gic_data
.domain
)
2107 irq_domain_remove(gic_data
.domain
);
2108 free_percpu(gic_data
.rdists
.rdist
);
2112 static int __init
gic_validate_dist_version(void __iomem
*dist_base
)
2114 u32 reg
= readl_relaxed(dist_base
+ GICD_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
2116 if (reg
!= GIC_PIDR2_ARCH_GICv3
&& reg
!= GIC_PIDR2_ARCH_GICv4
)
2122 /* Create all possible partitions at boot time */
2123 static void __init
gic_populate_ppi_partitions(struct device_node
*gic_node
)
2125 struct device_node
*parts_node
, *child_part
;
2126 int part_idx
= 0, i
;
2128 struct partition_affinity
*parts
;
2130 parts_node
= of_get_child_by_name(gic_node
, "ppi-partitions");
2134 gic_data
.ppi_descs
= kcalloc(gic_data
.ppi_nr
, sizeof(*gic_data
.ppi_descs
), GFP_KERNEL
);
2135 if (!gic_data
.ppi_descs
)
2138 nr_parts
= of_get_child_count(parts_node
);
2143 parts
= kcalloc(nr_parts
, sizeof(*parts
), GFP_KERNEL
);
2144 if (WARN_ON(!parts
))
2147 for_each_child_of_node(parts_node
, child_part
) {
2148 struct partition_affinity
*part
;
2151 part
= &parts
[part_idx
];
2153 part
->partition_id
= of_node_to_fwnode(child_part
);
2155 pr_info("GIC: PPI partition %pOFn[%d] { ",
2156 child_part
, part_idx
);
2158 n
= of_property_count_elems_of_size(child_part
, "affinity",
2162 for (i
= 0; i
< n
; i
++) {
2165 struct device_node
*cpu_node
;
2167 err
= of_property_read_u32_index(child_part
, "affinity",
2172 cpu_node
= of_find_node_by_phandle(cpu_phandle
);
2173 if (WARN_ON(!cpu_node
))
2176 cpu
= of_cpu_node_to_id(cpu_node
);
2177 if (WARN_ON(cpu
< 0)) {
2178 of_node_put(cpu_node
);
2182 pr_cont("%pOF[%d] ", cpu_node
, cpu
);
2184 cpumask_set_cpu(cpu
, &part
->mask
);
2185 of_node_put(cpu_node
);
2192 for (i
= 0; i
< gic_data
.ppi_nr
; i
++) {
2194 struct partition_desc
*desc
;
2195 struct irq_fwspec ppi_fwspec
= {
2196 .fwnode
= gic_data
.fwnode
,
2199 [0] = GIC_IRQ_TYPE_PARTITION
,
2201 [2] = IRQ_TYPE_NONE
,
2205 irq
= irq_create_fwspec_mapping(&ppi_fwspec
);
2208 desc
= partition_create_desc(gic_data
.fwnode
, parts
, nr_parts
,
2209 irq
, &partition_domain_ops
);
2213 gic_data
.ppi_descs
[i
] = desc
;
2217 of_node_put(parts_node
);
2220 static void __init
gic_of_setup_kvm_info(struct device_node
*node
, u32 nr_redist_regions
)
2225 gic_v3_kvm_info
.type
= GIC_V3
;
2227 gic_v3_kvm_info
.maint_irq
= irq_of_parse_and_map(node
, 0);
2228 if (!gic_v3_kvm_info
.maint_irq
)
2231 /* Also skip GICD, GICC, GICH */
2232 ret
= of_address_to_resource(node
, nr_redist_regions
+ 3, &r
);
2234 gic_v3_kvm_info
.vcpu
= r
;
2236 gic_v3_kvm_info
.has_v4
= gic_data
.rdists
.has_vlpis
;
2237 gic_v3_kvm_info
.has_v4_1
= gic_data
.rdists
.has_rvpeid
;
2238 vgic_set_kvm_info(&gic_v3_kvm_info
);
2241 static void gic_request_region(resource_size_t base
, resource_size_t size
,
2244 if (!request_mem_region(base
, size
, name
))
2245 pr_warn_once(FW_BUG
"%s region %pa has overlapping address\n",
2249 static void __iomem
*gic_of_iomap(struct device_node
*node
, int idx
,
2250 const char *name
, struct resource
*res
)
2255 ret
= of_address_to_resource(node
, idx
, res
);
2257 return IOMEM_ERR_PTR(ret
);
2259 gic_request_region(res
->start
, resource_size(res
), name
);
2260 base
= of_iomap(node
, idx
);
2262 return base
?: IOMEM_ERR_PTR(-ENOMEM
);
2265 static int __init
gic_of_init(struct device_node
*node
, struct device_node
*parent
)
2267 phys_addr_t dist_phys_base
;
2268 void __iomem
*dist_base
;
2269 struct redist_region
*rdist_regs
;
2270 struct resource res
;
2272 u32 nr_redist_regions
;
2275 dist_base
= gic_of_iomap(node
, 0, "GICD", &res
);
2276 if (IS_ERR(dist_base
)) {
2277 pr_err("%pOF: unable to map gic dist registers\n", node
);
2278 return PTR_ERR(dist_base
);
2281 dist_phys_base
= res
.start
;
2283 err
= gic_validate_dist_version(dist_base
);
2285 pr_err("%pOF: no distributor detected, giving up\n", node
);
2286 goto out_unmap_dist
;
2289 if (of_property_read_u32(node
, "#redistributor-regions", &nr_redist_regions
))
2290 nr_redist_regions
= 1;
2292 rdist_regs
= kcalloc(nr_redist_regions
, sizeof(*rdist_regs
),
2296 goto out_unmap_dist
;
2299 for (i
= 0; i
< nr_redist_regions
; i
++) {
2300 rdist_regs
[i
].redist_base
= gic_of_iomap(node
, 1 + i
, "GICR", &res
);
2301 if (IS_ERR(rdist_regs
[i
].redist_base
)) {
2302 pr_err("%pOF: couldn't map region %d\n", node
, i
);
2304 goto out_unmap_rdist
;
2306 rdist_regs
[i
].phys_base
= res
.start
;
2309 if (of_property_read_u64(node
, "redistributor-stride", &redist_stride
))
2312 gic_enable_of_quirks(node
, gic_quirks
, &gic_data
);
2314 err
= gic_init_bases(dist_phys_base
, dist_base
, rdist_regs
,
2315 nr_redist_regions
, redist_stride
, &node
->fwnode
);
2317 goto out_unmap_rdist
;
2319 gic_populate_ppi_partitions(node
);
2321 if (static_branch_likely(&supports_deactivate_key
))
2322 gic_of_setup_kvm_info(node
, nr_redist_regions
);
2326 for (i
= 0; i
< nr_redist_regions
; i
++)
2327 if (rdist_regs
[i
].redist_base
&& !IS_ERR(rdist_regs
[i
].redist_base
))
2328 iounmap(rdist_regs
[i
].redist_base
);
2335 IRQCHIP_DECLARE(gic_v3
, "arm,gic-v3", gic_of_init
);
2340 void __iomem
*dist_base
;
2341 struct redist_region
*redist_regs
;
2342 u32 nr_redist_regions
;
2347 phys_addr_t vcpu_base
;
2348 } acpi_data __initdata
;
2351 gic_acpi_register_redist(phys_addr_t phys_base
, void __iomem
*redist_base
)
2353 static int count
= 0;
2355 acpi_data
.redist_regs
[count
].phys_base
= phys_base
;
2356 acpi_data
.redist_regs
[count
].redist_base
= redist_base
;
2357 acpi_data
.redist_regs
[count
].single_redist
= acpi_data
.single_redist
;
2362 gic_acpi_parse_madt_redist(union acpi_subtable_headers
*header
,
2363 const unsigned long end
)
2365 struct acpi_madt_generic_redistributor
*redist
=
2366 (struct acpi_madt_generic_redistributor
*)header
;
2367 void __iomem
*redist_base
;
2369 redist_base
= ioremap(redist
->base_address
, redist
->length
);
2371 pr_err("Couldn't map GICR region @%llx\n", redist
->base_address
);
2375 if (acpi_get_madt_revision() >= 7 &&
2376 (redist
->flags
& ACPI_MADT_GICR_NON_COHERENT
))
2377 gic_data
.rdists
.flags
|= RDIST_FLAGS_FORCE_NON_SHAREABLE
;
2379 gic_request_region(redist
->base_address
, redist
->length
, "GICR");
2381 gic_acpi_register_redist(redist
->base_address
, redist_base
);
2386 gic_acpi_parse_madt_gicc(union acpi_subtable_headers
*header
,
2387 const unsigned long end
)
2389 struct acpi_madt_generic_interrupt
*gicc
=
2390 (struct acpi_madt_generic_interrupt
*)header
;
2391 u32 reg
= readl_relaxed(acpi_data
.dist_base
+ GICD_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
2392 u32 size
= reg
== GIC_PIDR2_ARCH_GICv4
? SZ_64K
* 4 : SZ_64K
* 2;
2393 void __iomem
*redist_base
;
2395 /* Neither enabled or online capable means it doesn't exist, skip it */
2396 if (!(gicc
->flags
& (ACPI_MADT_ENABLED
| ACPI_MADT_GICC_ONLINE_CAPABLE
)))
2400 * Capable but disabled CPUs can be brought online later. What about
2401 * the redistributor? ACPI doesn't want to say!
2402 * Virtual hotplug systems can use the MADT's "always-on" GICR entries.
2403 * Otherwise, prevent such CPUs from being brought online.
2405 if (!(gicc
->flags
& ACPI_MADT_ENABLED
)) {
2406 int cpu
= get_cpu_for_acpi_id(gicc
->uid
);
2408 pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu
);
2410 cpumask_set_cpu(cpu
, &broken_rdists
);
2414 redist_base
= ioremap(gicc
->gicr_base_address
, size
);
2417 gic_request_region(gicc
->gicr_base_address
, size
, "GICR");
2419 if (acpi_get_madt_revision() >= 7 &&
2420 (gicc
->flags
& ACPI_MADT_GICC_NON_COHERENT
))
2421 gic_data
.rdists
.flags
|= RDIST_FLAGS_FORCE_NON_SHAREABLE
;
2423 gic_acpi_register_redist(gicc
->gicr_base_address
, redist_base
);
2427 static int __init
gic_acpi_collect_gicr_base(void)
2429 acpi_tbl_entry_handler redist_parser
;
2430 enum acpi_madt_type type
;
2432 if (acpi_data
.single_redist
) {
2433 type
= ACPI_MADT_TYPE_GENERIC_INTERRUPT
;
2434 redist_parser
= gic_acpi_parse_madt_gicc
;
2436 type
= ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR
;
2437 redist_parser
= gic_acpi_parse_madt_redist
;
2440 /* Collect redistributor base addresses in GICR entries */
2441 if (acpi_table_parse_madt(type
, redist_parser
, 0) > 0)
2444 pr_info("No valid GICR entries exist\n");
2448 static int __init
gic_acpi_match_gicr(union acpi_subtable_headers
*header
,
2449 const unsigned long end
)
2451 /* Subtable presence means that redist exists, that's it */
2455 static int __init
gic_acpi_match_gicc(union acpi_subtable_headers
*header
,
2456 const unsigned long end
)
2458 struct acpi_madt_generic_interrupt
*gicc
=
2459 (struct acpi_madt_generic_interrupt
*)header
;
2462 * If GICC is enabled and has valid gicr base address, then it means
2463 * GICR base is presented via GICC. The redistributor is only known to
2464 * be accessible if the GICC is marked as enabled. If this bit is not
2465 * set, we'd need to add the redistributor at runtime, which isn't
2468 if (gicc
->flags
& ACPI_MADT_ENABLED
&& gicc
->gicr_base_address
)
2469 acpi_data
.enabled_rdists
++;
2474 static int __init
gic_acpi_count_gicr_regions(void)
2479 * Count how many redistributor regions we have. It is not allowed
2480 * to mix redistributor description, GICR and GICC subtables have to be
2481 * mutually exclusive.
2483 count
= acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR
,
2484 gic_acpi_match_gicr
, 0);
2486 acpi_data
.single_redist
= false;
2490 count
= acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT
,
2491 gic_acpi_match_gicc
, 0);
2493 acpi_data
.single_redist
= true;
2494 count
= acpi_data
.enabled_rdists
;
2500 static bool __init
acpi_validate_gic_table(struct acpi_subtable_header
*header
,
2501 struct acpi_probe_entry
*ape
)
2503 struct acpi_madt_generic_distributor
*dist
;
2506 dist
= (struct acpi_madt_generic_distributor
*)header
;
2507 if (dist
->version
!= ape
->driver_data
)
2510 /* We need to do that exercise anyway, the sooner the better */
2511 count
= gic_acpi_count_gicr_regions();
2515 acpi_data
.nr_redist_regions
= count
;
2519 static int __init
gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers
*header
,
2520 const unsigned long end
)
2522 struct acpi_madt_generic_interrupt
*gicc
=
2523 (struct acpi_madt_generic_interrupt
*)header
;
2525 static int first_madt
= true;
2528 (ACPI_MADT_ENABLED
| ACPI_MADT_GICC_ONLINE_CAPABLE
)))
2531 maint_irq_mode
= (gicc
->flags
& ACPI_MADT_VGIC_IRQ_MODE
) ?
2532 ACPI_EDGE_SENSITIVE
: ACPI_LEVEL_SENSITIVE
;
2537 acpi_data
.maint_irq
= gicc
->vgic_interrupt
;
2538 acpi_data
.maint_irq_mode
= maint_irq_mode
;
2539 acpi_data
.vcpu_base
= gicc
->gicv_base_address
;
2545 * The maintenance interrupt and GICV should be the same for every CPU
2547 if ((acpi_data
.maint_irq
!= gicc
->vgic_interrupt
) ||
2548 (acpi_data
.maint_irq_mode
!= maint_irq_mode
) ||
2549 (acpi_data
.vcpu_base
!= gicc
->gicv_base_address
))
2555 static bool __init
gic_acpi_collect_virt_info(void)
2559 count
= acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT
,
2560 gic_acpi_parse_virt_madt_gicc
, 0);
2565 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2566 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2567 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2569 static void __init
gic_acpi_setup_kvm_info(void)
2573 if (!gic_acpi_collect_virt_info()) {
2574 pr_warn("Unable to get hardware information used for virtualization\n");
2578 gic_v3_kvm_info
.type
= GIC_V3
;
2580 irq
= acpi_register_gsi(NULL
, acpi_data
.maint_irq
,
2581 acpi_data
.maint_irq_mode
,
2586 gic_v3_kvm_info
.maint_irq
= irq
;
2588 if (acpi_data
.vcpu_base
) {
2589 struct resource
*vcpu
= &gic_v3_kvm_info
.vcpu
;
2591 vcpu
->flags
= IORESOURCE_MEM
;
2592 vcpu
->start
= acpi_data
.vcpu_base
;
2593 vcpu
->end
= vcpu
->start
+ ACPI_GICV2_VCPU_MEM_SIZE
- 1;
2596 gic_v3_kvm_info
.has_v4
= gic_data
.rdists
.has_vlpis
;
2597 gic_v3_kvm_info
.has_v4_1
= gic_data
.rdists
.has_rvpeid
;
2598 vgic_set_kvm_info(&gic_v3_kvm_info
);
2601 static struct fwnode_handle
*gsi_domain_handle
;
2603 static struct fwnode_handle
*gic_v3_get_gsi_domain_id(u32 gsi
)
2605 return gsi_domain_handle
;
2609 gic_acpi_init(union acpi_subtable_headers
*header
, const unsigned long end
)
2611 struct acpi_madt_generic_distributor
*dist
;
2615 /* Get distributor base address */
2616 dist
= (struct acpi_madt_generic_distributor
*)header
;
2617 acpi_data
.dist_base
= ioremap(dist
->base_address
,
2618 ACPI_GICV3_DIST_MEM_SIZE
);
2619 if (!acpi_data
.dist_base
) {
2620 pr_err("Unable to map GICD registers\n");
2623 gic_request_region(dist
->base_address
, ACPI_GICV3_DIST_MEM_SIZE
, "GICD");
2625 err
= gic_validate_dist_version(acpi_data
.dist_base
);
2627 pr_err("No distributor detected at @%p, giving up\n",
2628 acpi_data
.dist_base
);
2629 goto out_dist_unmap
;
2632 size
= sizeof(*acpi_data
.redist_regs
) * acpi_data
.nr_redist_regions
;
2633 acpi_data
.redist_regs
= kzalloc(size
, GFP_KERNEL
);
2634 if (!acpi_data
.redist_regs
) {
2636 goto out_dist_unmap
;
2639 err
= gic_acpi_collect_gicr_base();
2641 goto out_redist_unmap
;
2643 gsi_domain_handle
= irq_domain_alloc_fwnode(&dist
->base_address
);
2644 if (!gsi_domain_handle
) {
2646 goto out_redist_unmap
;
2649 err
= gic_init_bases(dist
->base_address
, acpi_data
.dist_base
,
2650 acpi_data
.redist_regs
, acpi_data
.nr_redist_regions
,
2651 0, gsi_domain_handle
);
2653 goto out_fwhandle_free
;
2655 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC
, gic_v3_get_gsi_domain_id
);
2657 if (static_branch_likely(&supports_deactivate_key
))
2658 gic_acpi_setup_kvm_info();
2663 irq_domain_free_fwnode(gsi_domain_handle
);
2665 for (i
= 0; i
< acpi_data
.nr_redist_regions
; i
++)
2666 if (acpi_data
.redist_regs
[i
].redist_base
)
2667 iounmap(acpi_data
.redist_regs
[i
].redist_base
);
2668 kfree(acpi_data
.redist_regs
);
2670 iounmap(acpi_data
.dist_base
);
2673 IRQCHIP_ACPI_DECLARE(gic_v3
, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR
,
2674 acpi_validate_gic_table
, ACPI_MADT_GIC_VERSION_V3
,
2676 IRQCHIP_ACPI_DECLARE(gic_v4
, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR
,
2677 acpi_validate_gic_table
, ACPI_MADT_GIC_VERSION_V4
,
2679 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4
, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR
,
2680 acpi_validate_gic_table
, ACPI_MADT_GIC_VERSION_NONE
,