1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright The Asahi Linux Contributors
5 * Based on irq-lpc32xx:
6 * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
7 * Based on irq-bcm2836:
8 * Copyright 2015 Broadcom
12 * AIC is a fairly simple interrupt controller with the following features:
14 * - 896 level-triggered hardware IRQs
15 * - Single mask bit per IRQ
16 * - Per-IRQ affinity setting
17 * - Automatic masking on event delivery (auto-ack)
18 * - Software triggering (ORed with hw line)
19 * - 2 per-CPU IPIs (meant as "self" and "other", but they are
20 * interchangeable if not symmetric)
21 * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
23 * - Automatic masking on ack
24 * - Default "this CPU" register view and explicit per-CPU views
26 * In addition, this driver also handles FIQs, as these are routed to the same
27 * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
28 * performance counters (TODO).
30 * Implementation notes:
32 * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
34 * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
35 * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
36 * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
37 * - DT bindings use 3-cell form (like GIC):
38 * - <0 nr flags> - hwirq #nr
39 * - <1 nr flags> - FIQ #nr
40 * - nr=0 Physical HV timer
41 * - nr=1 Virtual HV timer
42 * - nr=2 Physical guest timer
43 * - nr=3 Virtual guest timer
46 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 #include <linux/bits.h>
49 #include <linux/bitfield.h>
50 #include <linux/cpuhotplug.h>
52 #include <linux/irqchip.h>
53 #include <linux/irqchip/arm-vgic-info.h>
54 #include <linux/irqdomain.h>
55 #include <linux/jump_label.h>
56 #include <linux/limits.h>
57 #include <linux/of_address.h>
58 #include <linux/slab.h>
59 #include <asm/apple_m1_pmu.h>
60 #include <asm/cputype.h>
61 #include <asm/exception.h>
62 #include <asm/sysreg.h>
65 #include <dt-bindings/interrupt-controller/apple-aic.h>
68 * AIC v1 registers (MMIO)
71 #define AIC_INFO 0x0004
72 #define AIC_INFO_NR_IRQ GENMASK(15, 0)
74 #define AIC_CONFIG 0x0010
76 #define AIC_WHOAMI 0x2000
77 #define AIC_EVENT 0x2004
78 #define AIC_EVENT_DIE GENMASK(31, 24)
79 #define AIC_EVENT_TYPE GENMASK(23, 16)
80 #define AIC_EVENT_NUM GENMASK(15, 0)
82 #define AIC_EVENT_TYPE_FIQ 0 /* Software use */
83 #define AIC_EVENT_TYPE_IRQ 1
84 #define AIC_EVENT_TYPE_IPI 4
85 #define AIC_EVENT_IPI_OTHER 1
86 #define AIC_EVENT_IPI_SELF 2
88 #define AIC_IPI_SEND 0x2008
89 #define AIC_IPI_ACK 0x200c
90 #define AIC_IPI_MASK_SET 0x2024
91 #define AIC_IPI_MASK_CLR 0x2028
93 #define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
95 #define AIC_IPI_OTHER BIT(0)
96 #define AIC_IPI_SELF BIT(31)
98 #define AIC_TARGET_CPU 0x3000
100 #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
101 #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
102 #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
103 #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
105 #define AIC_MAX_IRQ 0x400
108 * AIC v2 registers (MMIO)
111 #define AIC2_VERSION 0x0000
112 #define AIC2_VERSION_VER GENMASK(7, 0)
114 #define AIC2_INFO1 0x0004
115 #define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
116 #define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
118 #define AIC2_INFO2 0x0008
120 #define AIC2_INFO3 0x000c
121 #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
122 #define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
124 #define AIC2_RESET 0x0010
125 #define AIC2_RESET_RESET BIT(0)
127 #define AIC2_CONFIG 0x0014
128 #define AIC2_CONFIG_ENABLE BIT(0)
129 #define AIC2_CONFIG_PREFER_PCPU BIT(28)
131 #define AIC2_TIMEOUT 0x0028
132 #define AIC2_CLUSTER_PRIO 0x0030
133 #define AIC2_DELAY_GROUPS 0x0100
135 #define AIC2_IRQ_CFG 0x2000
138 * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
140 * Repeat for each die:
141 * IRQ_CFG: u32 * MAX_IRQS
142 * SW_SET: u32 * (MAX_IRQS / 32)
143 * SW_CLR: u32 * (MAX_IRQS / 32)
144 * MASK_SET: u32 * (MAX_IRQS / 32)
145 * MASK_CLR: u32 * (MAX_IRQS / 32)
146 * HW_STATE: u32 * (MAX_IRQS / 32)
148 * This is followed by a set of event registers, each 16K page aligned.
149 * The first one is the AP event register we will use. Unfortunately,
150 * the actual implemented die count is not specified anywhere in the
151 * capability registers, so we have to explicitly specify the event
152 * register as a second reg entry in the device tree to remain
153 * forward-compatible.
156 #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
157 #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
159 #define MASK_REG(x) (4 * ((x) >> 5))
160 #define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
163 * IMP-DEF sysregs that control FIQ sources
166 /* IPI request registers */
167 #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
168 #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
169 #define IPI_RR_CPU GENMASK(7, 0)
170 /* Cluster only used for the GLOBAL register */
171 #define IPI_RR_CLUSTER GENMASK(23, 16)
172 #define IPI_RR_TYPE GENMASK(29, 28)
173 #define IPI_RR_IMMEDIATE 0
174 #define IPI_RR_RETRACT 1
175 #define IPI_RR_DEFERRED 2
176 #define IPI_RR_NOWAKE 3
178 /* IPI status register */
179 #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
180 #define IPI_SR_PENDING BIT(0)
182 /* Guest timer FIQ enable register */
183 #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
184 #define VM_TMR_FIQ_ENABLE_V BIT(0)
185 #define VM_TMR_FIQ_ENABLE_P BIT(1)
187 /* Deferred IPI countdown register */
188 #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
190 /* Uncore PMC control register */
191 #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
192 #define UPMCR0_IMODE GENMASK(18, 16)
193 #define UPMCR0_IMODE_OFF 0
194 #define UPMCR0_IMODE_AIC 2
195 #define UPMCR0_IMODE_HALT 3
196 #define UPMCR0_IMODE_FIQ 4
198 /* Uncore PMC status register */
199 #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
200 #define UPMSR_IACT BIT(0)
203 #define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
204 #define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
206 #define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
207 FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
208 FIELD_PREP(AIC_EVENT_NUM, irq))
209 #define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
210 FIELD_PREP(AIC_EVENT_NUM, x))
211 #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
212 #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
213 #define AIC_NR_SWIPI 32
216 * FIQ hwirq index definitions: FIQ sources use the DT binding defines
217 * directly, except that timers are special. At the irqchip level, the
218 * two timer types are represented by their access method: _EL0 registers
219 * or _EL02 registers. In the DT binding, the timers are represented
220 * by their purpose (HV or guest). This mapping is for when the kernel is
221 * running at EL2 (with VHE). When the kernel is running at EL1, the
222 * mapping differs and aic_irq_domain_translate() performs the remapping.
225 /* Must be ordered as in apple-aic.h */
226 AIC_TMR_EL0_PHYS
= AIC_TMR_HV_PHYS
,
227 AIC_TMR_EL0_VIRT
= AIC_TMR_HV_VIRT
,
228 AIC_TMR_EL02_PHYS
= AIC_TMR_GUEST_PHYS
,
229 AIC_TMR_EL02_VIRT
= AIC_TMR_GUEST_VIRT
,
230 AIC_CPU_PMU_Effi
= AIC_CPU_PMU_E
,
231 AIC_CPU_PMU_Perf
= AIC_CPU_PMU_P
,
232 /* No need for this to be discovered from DT */
237 /* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */
238 static DEFINE_STATIC_KEY_TRUE(use_fast_ipi
);
239 /* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */
240 static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi
);
245 /* Register offsets */
261 static const struct aic_info aic1_info __initconst
= {
265 .target_cpu
= AIC_TARGET_CPU
,
268 static const struct aic_info aic1_fipi_info __initconst
= {
272 .target_cpu
= AIC_TARGET_CPU
,
277 static const struct aic_info aic1_local_fipi_info __initconst
= {
281 .target_cpu
= AIC_TARGET_CPU
,
284 .local_fast_ipi
= true,
287 static const struct aic_info aic2_info __initconst
= {
290 .irq_cfg
= AIC2_IRQ_CFG
,
293 .local_fast_ipi
= true,
296 static const struct of_device_id aic_info_match
[] = {
298 .compatible
= "apple,t8103-aic",
299 .data
= &aic1_local_fipi_info
,
302 .compatible
= "apple,t8015-aic",
303 .data
= &aic1_fipi_info
,
306 .compatible
= "apple,aic",
310 .compatible
= "apple,aic2",
316 struct aic_irq_chip
{
319 struct irq_domain
*hw_domain
;
322 } *fiq_aff
[AIC_NR_FIQ
];
329 struct aic_info info
;
332 static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked
);
334 static struct aic_irq_chip
*aic_irqc
;
336 static void aic_handle_ipi(struct pt_regs
*regs
);
338 static u32
aic_ic_read(struct aic_irq_chip
*ic
, u32 reg
)
340 return readl_relaxed(ic
->base
+ reg
);
343 static void aic_ic_write(struct aic_irq_chip
*ic
, u32 reg
, u32 val
)
345 writel_relaxed(val
, ic
->base
+ reg
);
352 static void aic_irq_mask(struct irq_data
*d
)
354 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
355 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
357 u32 off
= AIC_HWIRQ_DIE(hwirq
) * ic
->info
.die_stride
;
358 u32 irq
= AIC_HWIRQ_IRQ(hwirq
);
360 aic_ic_write(ic
, ic
->info
.mask_set
+ off
+ MASK_REG(irq
), MASK_BIT(irq
));
363 static void aic_irq_unmask(struct irq_data
*d
)
365 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
366 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
368 u32 off
= AIC_HWIRQ_DIE(hwirq
) * ic
->info
.die_stride
;
369 u32 irq
= AIC_HWIRQ_IRQ(hwirq
);
371 aic_ic_write(ic
, ic
->info
.mask_clr
+ off
+ MASK_REG(irq
), MASK_BIT(irq
));
374 static void aic_irq_eoi(struct irq_data
*d
)
377 * Reading the interrupt reason automatically acknowledges and masks
378 * the IRQ, so we just unmask it here if needed.
380 if (!irqd_irq_masked(d
))
384 static void __exception_irq_entry
aic_handle_irq(struct pt_regs
*regs
)
386 struct aic_irq_chip
*ic
= aic_irqc
;
387 u32 event
, type
, irq
;
391 * We cannot use a relaxed read here, as reads from DMA buffers
392 * need to be ordered after the IRQ fires.
394 event
= readl(ic
->event
+ ic
->info
.event
);
395 type
= FIELD_GET(AIC_EVENT_TYPE
, event
);
396 irq
= FIELD_GET(AIC_EVENT_NUM
, event
);
398 if (type
== AIC_EVENT_TYPE_IRQ
)
399 generic_handle_domain_irq(aic_irqc
->hw_domain
, event
);
400 else if (type
== AIC_EVENT_TYPE_IPI
&& irq
== 1)
401 aic_handle_ipi(regs
);
403 pr_err_ratelimited("Unknown IRQ event %d, %d\n", type
, irq
);
407 * vGIC maintenance interrupts end up here too, so we need to check
408 * for them separately. It should however only trigger when NV is
409 * in use, and be cleared when coming back from the handler.
411 if (is_kernel_in_hyp_mode() &&
412 (read_sysreg_s(SYS_ICH_HCR_EL2
) & ICH_HCR_EN
) &&
413 read_sysreg_s(SYS_ICH_MISR_EL2
) != 0) {
414 generic_handle_domain_irq(aic_irqc
->hw_domain
,
415 AIC_FIQ_HWIRQ(AIC_VGIC_MI
));
417 if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2
) & ICH_HCR_EN
) &&
418 read_sysreg_s(SYS_ICH_MISR_EL2
))) {
419 pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
420 sysreg_clear_set_s(SYS_ICH_HCR_EL2
, ICH_HCR_EN
, 0);
425 static int aic_irq_set_affinity(struct irq_data
*d
,
426 const struct cpumask
*mask_val
, bool force
)
428 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
429 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
432 BUG_ON(!ic
->info
.target_cpu
);
435 cpu
= cpumask_first(mask_val
);
437 cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
439 aic_ic_write(ic
, ic
->info
.target_cpu
+ AIC_HWIRQ_IRQ(hwirq
) * 4, BIT(cpu
));
440 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
442 return IRQ_SET_MASK_OK
;
445 static int aic_irq_set_type(struct irq_data
*d
, unsigned int type
)
448 * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
449 * have a way to find out the type of any given IRQ, so just allow both.
451 return (type
== IRQ_TYPE_LEVEL_HIGH
|| type
== IRQ_TYPE_EDGE_RISING
) ? 0 : -EINVAL
;
454 static struct irq_chip aic_chip
= {
456 .irq_mask
= aic_irq_mask
,
457 .irq_unmask
= aic_irq_unmask
,
458 .irq_eoi
= aic_irq_eoi
,
459 .irq_set_affinity
= aic_irq_set_affinity
,
460 .irq_set_type
= aic_irq_set_type
,
463 static struct irq_chip aic2_chip
= {
465 .irq_mask
= aic_irq_mask
,
466 .irq_unmask
= aic_irq_unmask
,
467 .irq_eoi
= aic_irq_eoi
,
468 .irq_set_type
= aic_irq_set_type
,
475 static unsigned long aic_fiq_get_idx(struct irq_data
*d
)
477 return AIC_HWIRQ_IRQ(irqd_to_hwirq(d
));
480 static void aic_fiq_set_mask(struct irq_data
*d
)
482 /* Only the guest timers have real mask bits, unfortunately. */
483 switch (aic_fiq_get_idx(d
)) {
484 case AIC_TMR_EL02_PHYS
:
485 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, VM_TMR_FIQ_ENABLE_P
, 0);
488 case AIC_TMR_EL02_VIRT
:
489 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, VM_TMR_FIQ_ENABLE_V
, 0);
497 static void aic_fiq_clear_mask(struct irq_data
*d
)
499 switch (aic_fiq_get_idx(d
)) {
500 case AIC_TMR_EL02_PHYS
:
501 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, 0, VM_TMR_FIQ_ENABLE_P
);
504 case AIC_TMR_EL02_VIRT
:
505 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, 0, VM_TMR_FIQ_ENABLE_V
);
513 static void aic_fiq_mask(struct irq_data
*d
)
516 __this_cpu_and(aic_fiq_unmasked
, ~BIT(aic_fiq_get_idx(d
)));
519 static void aic_fiq_unmask(struct irq_data
*d
)
521 aic_fiq_clear_mask(d
);
522 __this_cpu_or(aic_fiq_unmasked
, BIT(aic_fiq_get_idx(d
)));
525 static void aic_fiq_eoi(struct irq_data
*d
)
527 /* We mask to ack (where we can), so we need to unmask at EOI. */
528 if (__this_cpu_read(aic_fiq_unmasked
) & BIT(aic_fiq_get_idx(d
)))
529 aic_fiq_clear_mask(d
);
532 #define TIMER_FIRING(x) \
533 (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
534 ARCH_TIMER_CTRL_IT_STAT)) == \
535 (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
537 static void __exception_irq_entry
aic_handle_fiq(struct pt_regs
*regs
)
540 * It would be really nice if we had a system register that lets us get
541 * the FIQ source state without having to peek down into sources...
542 * but such a register does not seem to exist.
544 * So, we have these potential sources to test for:
545 * - Fast IPIs (not yet used)
546 * - The 4 timers (CNTP, CNTV for each of HV and guest)
547 * - Per-core PMCs (not yet supported)
548 * - Per-cluster uncore PMCs (not yet supported)
550 * Since not dealing with any of these results in a FIQ storm,
551 * we check for everything here, even things we don't support yet.
554 if (static_branch_likely(&use_fast_ipi
) &&
555 (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1
) & IPI_SR_PENDING
))
556 aic_handle_ipi(regs
);
558 if (TIMER_FIRING(read_sysreg(cntp_ctl_el0
)))
559 generic_handle_domain_irq(aic_irqc
->hw_domain
,
560 AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS
));
562 if (TIMER_FIRING(read_sysreg(cntv_ctl_el0
)))
563 generic_handle_domain_irq(aic_irqc
->hw_domain
,
564 AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT
));
566 if (is_kernel_in_hyp_mode()) {
567 uint64_t enabled
= read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
);
569 if ((enabled
& VM_TMR_FIQ_ENABLE_P
) &&
570 TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02
)))
571 generic_handle_domain_irq(aic_irqc
->hw_domain
,
572 AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS
));
574 if ((enabled
& VM_TMR_FIQ_ENABLE_V
) &&
575 TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02
)))
576 generic_handle_domain_irq(aic_irqc
->hw_domain
,
577 AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT
));
580 if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1
) & PMCR0_IACT
) {
582 if (cpumask_test_cpu(smp_processor_id(),
583 &aic_irqc
->fiq_aff
[AIC_CPU_PMU_P
]->aff
))
587 generic_handle_domain_irq(aic_irqc
->hw_domain
,
591 if (static_branch_likely(&use_fast_ipi
) &&
592 (FIELD_GET(UPMCR0_IMODE
, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1
)) == UPMCR0_IMODE_FIQ
) &&
593 (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1
) & UPMSR_IACT
)) {
594 /* Same story with uncore PMCs */
595 pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
596 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1
, UPMCR0_IMODE
,
597 FIELD_PREP(UPMCR0_IMODE
, UPMCR0_IMODE_OFF
));
601 static int aic_fiq_set_type(struct irq_data
*d
, unsigned int type
)
603 return (type
== IRQ_TYPE_LEVEL_HIGH
) ? 0 : -EINVAL
;
606 static struct irq_chip fiq_chip
= {
608 .irq_mask
= aic_fiq_mask
,
609 .irq_unmask
= aic_fiq_unmask
,
610 .irq_ack
= aic_fiq_set_mask
,
611 .irq_eoi
= aic_fiq_eoi
,
612 .irq_set_type
= aic_fiq_set_type
,
619 static int aic_irq_domain_map(struct irq_domain
*id
, unsigned int irq
,
622 struct aic_irq_chip
*ic
= id
->host_data
;
623 u32 type
= FIELD_GET(AIC_EVENT_TYPE
, hw
);
624 struct irq_chip
*chip
= &aic_chip
;
626 if (ic
->info
.version
== 2)
629 if (type
== AIC_EVENT_TYPE_IRQ
) {
630 irq_domain_set_info(id
, irq
, hw
, chip
, id
->host_data
,
631 handle_fasteoi_irq
, NULL
, NULL
);
632 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq
)));
634 int fiq
= FIELD_GET(AIC_EVENT_NUM
, hw
);
639 irq_set_percpu_devid_partition(irq
, &ic
->fiq_aff
[fiq
]->aff
);
642 irq_set_percpu_devid(irq
);
646 irq_domain_set_info(id
, irq
, hw
, &fiq_chip
, id
->host_data
,
647 handle_percpu_devid_irq
, NULL
, NULL
);
653 static int aic_irq_domain_translate(struct irq_domain
*id
,
654 struct irq_fwspec
*fwspec
,
655 unsigned long *hwirq
,
658 struct aic_irq_chip
*ic
= id
->host_data
;
662 if (fwspec
->param_count
< 3 || fwspec
->param_count
> 4 ||
663 !is_of_node(fwspec
->fwnode
))
666 args
= &fwspec
->param
[1];
668 if (fwspec
->param_count
== 4) {
673 switch (fwspec
->param
[0]) {
675 if (die
>= ic
->nr_die
)
677 if (args
[0] >= ic
->nr_irq
)
679 *hwirq
= AIC_IRQ_HWIRQ(die
, args
[0]);
684 if (args
[0] >= AIC_NR_FIQ
)
686 *hwirq
= AIC_FIQ_HWIRQ(args
[0]);
689 * In EL1 the non-redirected registers are the guest's,
690 * not EL2's, so remap the hwirqs to match.
692 if (!is_kernel_in_hyp_mode()) {
694 case AIC_TMR_GUEST_PHYS
:
695 *hwirq
= AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS
);
697 case AIC_TMR_GUEST_VIRT
:
698 *hwirq
= AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT
);
700 case AIC_TMR_HV_PHYS
:
701 case AIC_TMR_HV_VIRT
:
712 *type
= args
[1] & IRQ_TYPE_SENSE_MASK
;
717 static int aic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
718 unsigned int nr_irqs
, void *arg
)
720 unsigned int type
= IRQ_TYPE_NONE
;
721 struct irq_fwspec
*fwspec
= arg
;
722 irq_hw_number_t hwirq
;
725 ret
= aic_irq_domain_translate(domain
, fwspec
, &hwirq
, &type
);
729 for (i
= 0; i
< nr_irqs
; i
++) {
730 ret
= aic_irq_domain_map(domain
, virq
+ i
, hwirq
+ i
);
738 static void aic_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
739 unsigned int nr_irqs
)
743 for (i
= 0; i
< nr_irqs
; i
++) {
744 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
746 irq_set_handler(virq
+ i
, NULL
);
747 irq_domain_reset_irq_data(d
);
751 static const struct irq_domain_ops aic_irq_domain_ops
= {
752 .translate
= aic_irq_domain_translate
,
753 .alloc
= aic_irq_domain_alloc
,
754 .free
= aic_irq_domain_free
,
761 static void aic_ipi_send_fast(int cpu
)
763 u64 mpidr
= cpu_logical_map(cpu
);
764 u64 my_mpidr
= read_cpuid_mpidr();
765 u64 cluster
= MPIDR_CLUSTER(mpidr
);
766 u64 idx
= MPIDR_CPU(mpidr
);
768 if (static_branch_likely(&use_local_fast_ipi
) && MPIDR_CLUSTER(my_mpidr
) == cluster
) {
769 write_sysreg_s(FIELD_PREP(IPI_RR_CPU
, idx
), SYS_IMP_APL_IPI_RR_LOCAL_EL1
);
771 write_sysreg_s(FIELD_PREP(IPI_RR_CPU
, idx
) | FIELD_PREP(IPI_RR_CLUSTER
, cluster
),
772 SYS_IMP_APL_IPI_RR_GLOBAL_EL1
);
777 static void aic_handle_ipi(struct pt_regs
*regs
)
780 * Ack the IPI. We need to order this after the AIC event read, but
781 * that is enforced by normal MMIO ordering guarantees.
783 * For the Fast IPI case, this needs to be ordered before the vIPI
784 * handling below, so we need to isb();
786 if (static_branch_likely(&use_fast_ipi
)) {
787 write_sysreg_s(IPI_SR_PENDING
, SYS_IMP_APL_IPI_SR_EL1
);
790 aic_ic_write(aic_irqc
, AIC_IPI_ACK
, AIC_IPI_OTHER
);
796 * No ordering needed here; at worst this just changes the timing of
797 * when the next IPI will be delivered.
799 if (!static_branch_likely(&use_fast_ipi
))
800 aic_ic_write(aic_irqc
, AIC_IPI_MASK_CLR
, AIC_IPI_OTHER
);
803 static void aic_ipi_send_single(unsigned int cpu
)
805 if (static_branch_likely(&use_fast_ipi
))
806 aic_ipi_send_fast(cpu
);
808 aic_ic_write(aic_irqc
, AIC_IPI_SEND
, AIC_IPI_SEND_CPU(cpu
));
811 static int __init
aic_init_smp(struct aic_irq_chip
*irqc
, struct device_node
*node
)
815 base_ipi
= ipi_mux_create(AIC_NR_SWIPI
, aic_ipi_send_single
);
816 if (WARN_ON(base_ipi
<= 0))
819 set_smp_ipi_range(base_ipi
, AIC_NR_SWIPI
);
824 static int aic_init_cpu(unsigned int cpu
)
826 /* Mask all hard-wired per-CPU IRQ/FIQ sources */
828 /* Pending Fast IPI FIQs */
829 if (static_branch_likely(&use_fast_ipi
))
830 write_sysreg_s(IPI_SR_PENDING
, SYS_IMP_APL_IPI_SR_EL1
);
833 sysreg_clear_set(cntp_ctl_el0
, 0, ARCH_TIMER_CTRL_IT_MASK
);
834 sysreg_clear_set(cntv_ctl_el0
, 0, ARCH_TIMER_CTRL_IT_MASK
);
836 /* EL2-only (VHE mode) IRQ sources */
837 if (is_kernel_in_hyp_mode()) {
839 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
,
840 VM_TMR_FIQ_ENABLE_V
| VM_TMR_FIQ_ENABLE_P
, 0);
842 /* vGIC maintenance IRQ */
843 sysreg_clear_set_s(SYS_ICH_HCR_EL2
, ICH_HCR_EN
, 0);
847 sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1
, PMCR0_IMODE
| PMCR0_IACT
,
848 FIELD_PREP(PMCR0_IMODE
, PMCR0_IMODE_OFF
));
851 if (static_branch_likely(&use_fast_ipi
)) {
852 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1
, UPMCR0_IMODE
,
853 FIELD_PREP(UPMCR0_IMODE
, UPMCR0_IMODE_OFF
));
856 /* Commit all of the above */
859 if (aic_irqc
->info
.version
== 1) {
861 * Make sure the kernel's idea of logical CPU order is the same as AIC's
862 * If we ever end up with a mismatch here, we will have to introduce
863 * a mapping table similar to what other irqchip drivers do.
865 WARN_ON(aic_ic_read(aic_irqc
, AIC_WHOAMI
) != smp_processor_id());
868 * Always keep IPIs unmasked at the hardware level (except auto-masking
869 * by AIC during processing). We manage masks at the vIPI level.
870 * These registers only exist on AICv1, AICv2 always uses fast IPIs.
872 aic_ic_write(aic_irqc
, AIC_IPI_ACK
, AIC_IPI_SELF
| AIC_IPI_OTHER
);
873 if (static_branch_likely(&use_fast_ipi
)) {
874 aic_ic_write(aic_irqc
, AIC_IPI_MASK_SET
, AIC_IPI_SELF
| AIC_IPI_OTHER
);
876 aic_ic_write(aic_irqc
, AIC_IPI_MASK_SET
, AIC_IPI_SELF
);
877 aic_ic_write(aic_irqc
, AIC_IPI_MASK_CLR
, AIC_IPI_OTHER
);
881 /* Initialize the local mask state */
882 __this_cpu_write(aic_fiq_unmasked
, 0);
887 static struct gic_kvm_info vgic_info __initdata
= {
889 .no_maint_irq_mask
= true,
890 .no_hw_deactivation
= true,
893 static void build_fiq_affinity(struct aic_irq_chip
*ic
, struct device_node
*aff
)
898 if (of_property_read_u32(aff
, "apple,fiq-index", &fiq
) ||
899 WARN_ON(fiq
>= AIC_NR_FIQ
) || ic
->fiq_aff
[fiq
])
902 n
= of_property_count_elems_of_size(aff
, "cpus", sizeof(u32
));
906 ic
->fiq_aff
[fiq
] = kzalloc(sizeof(*ic
->fiq_aff
[fiq
]), GFP_KERNEL
);
907 if (!ic
->fiq_aff
[fiq
])
910 for (i
= 0; i
< n
; i
++) {
911 struct device_node
*cpu_node
;
915 if (of_property_read_u32_index(aff
, "cpus", i
, &cpu_phandle
))
918 cpu_node
= of_find_node_by_phandle(cpu_phandle
);
919 if (WARN_ON(!cpu_node
))
922 cpu
= of_cpu_node_to_id(cpu_node
);
923 of_node_put(cpu_node
);
924 if (WARN_ON(cpu
< 0))
927 cpumask_set_cpu(cpu
, &ic
->fiq_aff
[fiq
]->aff
);
931 static int __init
aic_of_ic_init(struct device_node
*node
, struct device_node
*parent
)
936 struct aic_irq_chip
*irqc
;
937 struct device_node
*affs
;
938 const struct of_device_id
*match
;
940 regs
= of_iomap(node
, 0);
944 irqc
= kzalloc(sizeof(*irqc
), GFP_KERNEL
);
952 match
= of_match_node(aic_info_match
, node
);
956 irqc
->info
= *(struct aic_info
*)match
->data
;
960 switch (irqc
->info
.version
) {
964 info
= aic_ic_read(irqc
, AIC_INFO
);
965 irqc
->nr_irq
= FIELD_GET(AIC_INFO_NR_IRQ
, info
);
966 irqc
->max_irq
= AIC_MAX_IRQ
;
967 irqc
->nr_die
= irqc
->max_die
= 1;
969 off
= start_off
= irqc
->info
.target_cpu
;
970 off
+= sizeof(u32
) * irqc
->max_irq
; /* TARGET_CPU */
972 irqc
->event
= irqc
->base
;
979 info1
= aic_ic_read(irqc
, AIC2_INFO1
);
980 info3
= aic_ic_read(irqc
, AIC2_INFO3
);
982 irqc
->nr_irq
= FIELD_GET(AIC2_INFO1_NR_IRQ
, info1
);
983 irqc
->max_irq
= FIELD_GET(AIC2_INFO3_MAX_IRQ
, info3
);
984 irqc
->nr_die
= FIELD_GET(AIC2_INFO1_LAST_DIE
, info1
) + 1;
985 irqc
->max_die
= FIELD_GET(AIC2_INFO3_MAX_DIE
, info3
);
987 off
= start_off
= irqc
->info
.irq_cfg
;
988 off
+= sizeof(u32
) * irqc
->max_irq
; /* IRQ_CFG */
990 irqc
->event
= of_iomap(node
, 1);
991 if (WARN_ON(!irqc
->event
))
998 irqc
->info
.sw_set
= off
;
999 off
+= sizeof(u32
) * (irqc
->max_irq
>> 5); /* SW_SET */
1000 irqc
->info
.sw_clr
= off
;
1001 off
+= sizeof(u32
) * (irqc
->max_irq
>> 5); /* SW_CLR */
1002 irqc
->info
.mask_set
= off
;
1003 off
+= sizeof(u32
) * (irqc
->max_irq
>> 5); /* MASK_SET */
1004 irqc
->info
.mask_clr
= off
;
1005 off
+= sizeof(u32
) * (irqc
->max_irq
>> 5); /* MASK_CLR */
1006 off
+= sizeof(u32
) * (irqc
->max_irq
>> 5); /* HW_STATE */
1008 if (!irqc
->info
.fast_ipi
)
1009 static_branch_disable(&use_fast_ipi
);
1011 if (!irqc
->info
.local_fast_ipi
)
1012 static_branch_disable(&use_local_fast_ipi
);
1014 irqc
->info
.die_stride
= off
- start_off
;
1016 irqc
->hw_domain
= irq_domain_create_tree(of_node_to_fwnode(node
),
1017 &aic_irq_domain_ops
, irqc
);
1018 if (WARN_ON(!irqc
->hw_domain
))
1021 irq_domain_update_bus_token(irqc
->hw_domain
, DOMAIN_BUS_WIRED
);
1023 if (aic_init_smp(irqc
, node
))
1024 goto err_remove_domain
;
1026 affs
= of_get_child_by_name(node
, "affinities");
1028 struct device_node
*chld
;
1030 for_each_child_of_node(affs
, chld
)
1031 build_fiq_affinity(irqc
, chld
);
1035 set_handle_irq(aic_handle_irq
);
1036 set_handle_fiq(aic_handle_fiq
);
1039 for (die
= 0; die
< irqc
->nr_die
; die
++) {
1040 for (i
= 0; i
< BITS_TO_U32(irqc
->nr_irq
); i
++)
1041 aic_ic_write(irqc
, irqc
->info
.mask_set
+ off
+ i
* 4, U32_MAX
);
1042 for (i
= 0; i
< BITS_TO_U32(irqc
->nr_irq
); i
++)
1043 aic_ic_write(irqc
, irqc
->info
.sw_clr
+ off
+ i
* 4, U32_MAX
);
1044 if (irqc
->info
.target_cpu
)
1045 for (i
= 0; i
< irqc
->nr_irq
; i
++)
1046 aic_ic_write(irqc
, irqc
->info
.target_cpu
+ off
+ i
* 4, 1);
1047 off
+= irqc
->info
.die_stride
;
1050 if (irqc
->info
.version
== 2) {
1051 u32 config
= aic_ic_read(irqc
, AIC2_CONFIG
);
1053 config
|= AIC2_CONFIG_ENABLE
;
1054 aic_ic_write(irqc
, AIC2_CONFIG
, config
);
1057 if (!is_kernel_in_hyp_mode())
1058 pr_info("Kernel running in EL1, mapping interrupts");
1060 if (static_branch_likely(&use_fast_ipi
))
1061 pr_info("Using Fast IPIs");
1063 cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING
,
1064 "irqchip/apple-aic/ipi:starting",
1065 aic_init_cpu
, NULL
);
1067 if (is_kernel_in_hyp_mode()) {
1068 struct irq_fwspec mi
= {
1069 .fwnode
= of_node_to_fwnode(node
),
1072 [0] = AIC_FIQ
, /* This is a lie */
1074 [2] = IRQ_TYPE_LEVEL_HIGH
,
1078 vgic_info
.maint_irq
= irq_create_fwspec_mapping(&mi
);
1079 WARN_ON(!vgic_info
.maint_irq
);
1082 vgic_set_kvm_info(&vgic_info
);
1084 pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
1085 irqc
->nr_irq
, irqc
->max_irq
, irqc
->nr_die
, irqc
->max_die
, AIC_NR_FIQ
, AIC_NR_SWIPI
);
1090 irq_domain_remove(irqc
->hw_domain
);
1092 if (irqc
->event
&& irqc
->event
!= irqc
->base
)
1093 iounmap(irqc
->event
);
1094 iounmap(irqc
->base
);
1099 IRQCHIP_DECLARE(apple_aic
, "apple,aic", aic_of_ic_init
);
1100 IRQCHIP_DECLARE(apple_aic2
, "apple,aic2", aic_of_ic_init
);