1 // SPDX-License-Identifier: GPL-2.0-only
3 * ACPI probing code for ARM performance counters.
5 * Copyright (C) 2017 ARM Ltd.
8 #include <linux/acpi.h>
9 #include <linux/cpumask.h>
10 #include <linux/init.h>
11 #include <linux/irq.h>
12 #include <linux/irqdesc.h>
13 #include <linux/percpu.h>
14 #include <linux/perf/arm_pmu.h>
16 #include <asm/cputype.h>
18 static DEFINE_PER_CPU(struct arm_pmu
*, probed_pmus
);
19 static DEFINE_PER_CPU(int, pmu_irqs
);
21 static int arm_pmu_acpi_register_irq(int cpu
)
23 struct acpi_madt_generic_interrupt
*gicc
;
26 gicc
= acpi_cpu_get_madt_gicc(cpu
);
30 gsi
= gicc
->performance_interrupt
;
33 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
34 * have an interrupt. QEMU advertises this by using a GSI of zero,
35 * which is not known to be valid on any hardware despite being
36 * valid per the spec. Take the pragmatic approach and reject a
37 * GSI of zero for now.
42 if (gicc
->flags
& ACPI_MADT_PERFORMANCE_IRQ_MODE
)
43 trigger
= ACPI_EDGE_SENSITIVE
;
45 trigger
= ACPI_LEVEL_SENSITIVE
;
48 * Helpfully, the MADT GICC doesn't have a polarity flag for the
49 * "performance interrupt". Luckily, on compliant GICs the polarity is
50 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
53 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
54 * may not match the real polarity, but that should not matter.
56 * Other interrupt controllers are not supported with ACPI.
58 return acpi_register_gsi(NULL
, gsi
, trigger
, ACPI_ACTIVE_HIGH
);
61 static void arm_pmu_acpi_unregister_irq(int cpu
)
63 struct acpi_madt_generic_interrupt
*gicc
;
66 gicc
= acpi_cpu_get_madt_gicc(cpu
);
70 gsi
= gicc
->performance_interrupt
;
71 acpi_unregister_gsi(gsi
);
74 #if IS_ENABLED(CONFIG_ARM_SPE_PMU)
75 static struct resource spe_resources
[] = {
78 .flags
= IORESOURCE_IRQ
,
82 static struct platform_device spe_dev
= {
83 .name
= ARMV8_SPE_PDEV_NAME
,
85 .resource
= spe_resources
,
86 .num_resources
= ARRAY_SIZE(spe_resources
)
90 * For lack of a better place, hook the normal PMU MADT walk
91 * and create a SPE device if we detect a recent MADT with
92 * a homogeneous PPI mapping.
94 static void arm_spe_acpi_register_device(void)
96 int cpu
, hetid
, irq
, ret
;
101 * Sanity check all the GICC tables for the same interrupt number.
102 * For now, we only support homogeneous ACPI/SPE machines.
104 for_each_possible_cpu(cpu
) {
105 struct acpi_madt_generic_interrupt
*gicc
;
107 gicc
= acpi_cpu_get_madt_gicc(cpu
);
108 if (gicc
->header
.length
< ACPI_MADT_GICC_SPE
)
112 gsi
= gicc
->spe_interrupt
;
115 hetid
= find_acpi_cpu_topology_hetero_id(cpu
);
117 } else if ((gsi
!= gicc
->spe_interrupt
) ||
118 (hetid
!= find_acpi_cpu_topology_hetero_id(cpu
))) {
119 pr_warn("ACPI: SPE must be homogeneous\n");
124 irq
= acpi_register_gsi(NULL
, gsi
, ACPI_LEVEL_SENSITIVE
,
127 pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi
);
131 spe_resources
[0].start
= irq
;
132 ret
= platform_device_register(&spe_dev
);
134 pr_warn("ACPI: SPE: Unable to register device\n");
135 acpi_unregister_gsi(gsi
);
139 static inline void arm_spe_acpi_register_device(void)
142 #endif /* CONFIG_ARM_SPE_PMU */
144 static int arm_pmu_acpi_parse_irqs(void)
146 int irq
, cpu
, irq_cpu
, err
;
148 for_each_possible_cpu(cpu
) {
149 irq
= arm_pmu_acpi_register_irq(cpu
);
152 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
155 } else if (irq
== 0) {
156 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu
);
160 * Log and request the IRQ so the core arm_pmu code can manage
161 * it. We'll have to sanity-check IRQs later when we associate
162 * them with their PMUs.
164 per_cpu(pmu_irqs
, cpu
) = irq
;
165 armpmu_request_irq(irq
, cpu
);
171 for_each_possible_cpu(cpu
) {
172 irq
= per_cpu(pmu_irqs
, cpu
);
176 arm_pmu_acpi_unregister_irq(cpu
);
179 * Blat all copies of the IRQ so that we only unregister the
180 * corresponding GSI once (e.g. when we have PPIs).
182 for_each_possible_cpu(irq_cpu
) {
183 if (per_cpu(pmu_irqs
, irq_cpu
) == irq
)
184 per_cpu(pmu_irqs
, irq_cpu
) = 0;
191 static struct arm_pmu
*arm_pmu_acpi_find_alloc_pmu(void)
193 unsigned long cpuid
= read_cpuid_id();
197 for_each_possible_cpu(cpu
) {
198 pmu
= per_cpu(probed_pmus
, cpu
);
199 if (!pmu
|| pmu
->acpi_cpuid
!= cpuid
)
205 pmu
= armpmu_alloc_atomic();
207 pr_warn("Unable to allocate PMU for CPU%d\n",
212 pmu
->acpi_cpuid
= cpuid
;
218 * Check whether the new IRQ is compatible with those already associated with
219 * the PMU (e.g. we don't have mismatched PPIs).
221 static bool pmu_irq_matches(struct arm_pmu
*pmu
, int irq
)
223 struct pmu_hw_events __percpu
*hw_events
= pmu
->hw_events
;
229 for_each_cpu(cpu
, &pmu
->supported_cpus
) {
230 int other_irq
= per_cpu(hw_events
->irq
, cpu
);
234 if (irq
== other_irq
)
236 if (!irq_is_percpu_devid(irq
) && !irq_is_percpu_devid(other_irq
))
239 pr_warn("mismatched PPIs detected\n");
247 * This must run before the common arm_pmu hotplug logic, so that we can
248 * associate a CPU and its interrupt before the common code tries to manage the
249 * affinity and so on.
251 * Note that hotplug events are serialized, so we cannot race with another CPU
252 * coming up. The perf core won't open events while a hotplug event is in
255 static int arm_pmu_acpi_cpu_starting(unsigned int cpu
)
258 struct pmu_hw_events __percpu
*hw_events
;
261 /* If we've already probed this CPU, we have nothing to do */
262 if (per_cpu(probed_pmus
, cpu
))
265 irq
= per_cpu(pmu_irqs
, cpu
);
267 pmu
= arm_pmu_acpi_find_alloc_pmu();
271 per_cpu(probed_pmus
, cpu
) = pmu
;
273 if (pmu_irq_matches(pmu
, irq
)) {
274 hw_events
= pmu
->hw_events
;
275 per_cpu(hw_events
->irq
, cpu
) = irq
;
278 cpumask_set_cpu(cpu
, &pmu
->supported_cpus
);
281 * Ideally, we'd probe the PMU here when we find the first matching
282 * CPU. We can't do that for several reasons; see the comment in
283 * arm_pmu_acpi_init().
285 * So for the time being, we're done.
290 int arm_pmu_acpi_probe(armpmu_init_fn init_fn
)
296 * Initialise and register the set of PMUs which we know about right
297 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
298 * could handle late hotplug, but this may lead to deadlock since we
299 * might try to register a hotplug notifier instance from within a
302 * There's also the problem of having access to the right init_fn,
303 * without tying this too deeply into the "real" PMU driver.
305 * For the moment, as with the platform/DT case, we need at least one
306 * of a PMU's CPUs to be online at probe time.
308 for_each_possible_cpu(cpu
) {
309 struct arm_pmu
*pmu
= per_cpu(probed_pmus
, cpu
);
312 if (!pmu
|| pmu
->name
)
316 if (ret
== -ENODEV
) {
317 /* PMU not handled by this driver, or not present */
320 pr_warn("Unable to initialise PMU for CPU%d\n", cpu
);
324 base_name
= pmu
->name
;
325 pmu
->name
= kasprintf(GFP_KERNEL
, "%s_%d", base_name
, pmu_idx
++);
327 pr_warn("Unable to allocate PMU name for CPU%d\n", cpu
);
331 ret
= armpmu_register(pmu
);
333 pr_warn("Failed to register PMU for CPU%d\n", cpu
);
342 static int arm_pmu_acpi_init(void)
349 arm_spe_acpi_register_device();
351 ret
= arm_pmu_acpi_parse_irqs();
355 ret
= cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING
,
356 "perf/arm/pmu_acpi:starting",
357 arm_pmu_acpi_cpu_starting
, NULL
);
361 subsys_initcall(arm_pmu_acpi_init
)