2 * ACPI probing code for ARM performance counters.
4 * Copyright (C) 2017 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/acpi.h>
12 #include <linux/cpumask.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/irqdesc.h>
16 #include <linux/percpu.h>
17 #include <linux/perf/arm_pmu.h>
19 #include <asm/cputype.h>
21 static DEFINE_PER_CPU(struct arm_pmu
*, probed_pmus
);
22 static DEFINE_PER_CPU(int, pmu_irqs
);
24 static int arm_pmu_acpi_register_irq(int cpu
)
26 struct acpi_madt_generic_interrupt
*gicc
;
29 gicc
= acpi_cpu_get_madt_gicc(cpu
);
33 gsi
= gicc
->performance_interrupt
;
36 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
37 * have an interrupt. QEMU advertises this by using a GSI of zero,
38 * which is not known to be valid on any hardware despite being
39 * valid per the spec. Take the pragmatic approach and reject a
40 * GSI of zero for now.
45 if (gicc
->flags
& ACPI_MADT_PERFORMANCE_IRQ_MODE
)
46 trigger
= ACPI_EDGE_SENSITIVE
;
48 trigger
= ACPI_LEVEL_SENSITIVE
;
51 * Helpfully, the MADT GICC doesn't have a polarity flag for the
52 * "performance interrupt". Luckily, on compliant GICs the polarity is
53 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
56 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
57 * may not match the real polarity, but that should not matter.
59 * Other interrupt controllers are not supported with ACPI.
61 return acpi_register_gsi(NULL
, gsi
, trigger
, ACPI_ACTIVE_HIGH
);
64 static void arm_pmu_acpi_unregister_irq(int cpu
)
66 struct acpi_madt_generic_interrupt
*gicc
;
69 gicc
= acpi_cpu_get_madt_gicc(cpu
);
73 gsi
= gicc
->performance_interrupt
;
74 acpi_unregister_gsi(gsi
);
77 static int arm_pmu_acpi_parse_irqs(void)
79 int irq
, cpu
, irq_cpu
, err
;
81 for_each_possible_cpu(cpu
) {
82 irq
= arm_pmu_acpi_register_irq(cpu
);
85 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
88 } else if (irq
== 0) {
89 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu
);
93 * Log and request the IRQ so the core arm_pmu code can manage
94 * it. We'll have to sanity-check IRQs later when we associate
95 * them with their PMUs.
97 per_cpu(pmu_irqs
, cpu
) = irq
;
98 armpmu_request_irq(irq
, cpu
);
104 for_each_possible_cpu(cpu
) {
105 irq
= per_cpu(pmu_irqs
, cpu
);
109 arm_pmu_acpi_unregister_irq(cpu
);
112 * Blat all copies of the IRQ so that we only unregister the
113 * corresponding GSI once (e.g. when we have PPIs).
115 for_each_possible_cpu(irq_cpu
) {
116 if (per_cpu(pmu_irqs
, irq_cpu
) == irq
)
117 per_cpu(pmu_irqs
, irq_cpu
) = 0;
124 static struct arm_pmu
*arm_pmu_acpi_find_alloc_pmu(void)
126 unsigned long cpuid
= read_cpuid_id();
130 for_each_possible_cpu(cpu
) {
131 pmu
= per_cpu(probed_pmus
, cpu
);
132 if (!pmu
|| pmu
->acpi_cpuid
!= cpuid
)
138 pmu
= armpmu_alloc_atomic();
140 pr_warn("Unable to allocate PMU for CPU%d\n",
145 pmu
->acpi_cpuid
= cpuid
;
151 * Check whether the new IRQ is compatible with those already associated with
152 * the PMU (e.g. we don't have mismatched PPIs).
154 static bool pmu_irq_matches(struct arm_pmu
*pmu
, int irq
)
156 struct pmu_hw_events __percpu
*hw_events
= pmu
->hw_events
;
162 for_each_cpu(cpu
, &pmu
->supported_cpus
) {
163 int other_irq
= per_cpu(hw_events
->irq
, cpu
);
167 if (irq
== other_irq
)
169 if (!irq_is_percpu_devid(irq
) && !irq_is_percpu_devid(other_irq
))
172 pr_warn("mismatched PPIs detected\n");
180 * This must run before the common arm_pmu hotplug logic, so that we can
181 * associate a CPU and its interrupt before the common code tries to manage the
182 * affinity and so on.
184 * Note that hotplug events are serialized, so we cannot race with another CPU
185 * coming up. The perf core won't open events while a hotplug event is in
188 static int arm_pmu_acpi_cpu_starting(unsigned int cpu
)
191 struct pmu_hw_events __percpu
*hw_events
;
194 /* If we've already probed this CPU, we have nothing to do */
195 if (per_cpu(probed_pmus
, cpu
))
198 irq
= per_cpu(pmu_irqs
, cpu
);
200 pmu
= arm_pmu_acpi_find_alloc_pmu();
204 per_cpu(probed_pmus
, cpu
) = pmu
;
206 if (pmu_irq_matches(pmu
, irq
)) {
207 hw_events
= pmu
->hw_events
;
208 per_cpu(hw_events
->irq
, cpu
) = irq
;
211 cpumask_set_cpu(cpu
, &pmu
->supported_cpus
);
214 * Ideally, we'd probe the PMU here when we find the first matching
215 * CPU. We can't do that for several reasons; see the comment in
216 * arm_pmu_acpi_init().
218 * So for the time being, we're done.
223 int arm_pmu_acpi_probe(armpmu_init_fn init_fn
)
229 * Initialise and register the set of PMUs which we know about right
230 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
231 * could handle late hotplug, but this may lead to deadlock since we
232 * might try to register a hotplug notifier instance from within a
235 * There's also the problem of having access to the right init_fn,
236 * without tying this too deeply into the "real" PMU driver.
238 * For the moment, as with the platform/DT case, we need at least one
239 * of a PMU's CPUs to be online at probe time.
241 for_each_possible_cpu(cpu
) {
242 struct arm_pmu
*pmu
= per_cpu(probed_pmus
, cpu
);
245 if (!pmu
|| pmu
->name
)
249 if (ret
== -ENODEV
) {
250 /* PMU not handled by this driver, or not present */
253 pr_warn("Unable to initialise PMU for CPU%d\n", cpu
);
257 base_name
= pmu
->name
;
258 pmu
->name
= kasprintf(GFP_KERNEL
, "%s_%d", base_name
, pmu_idx
++);
260 pr_warn("Unable to allocate PMU name for CPU%d\n", cpu
);
264 ret
= armpmu_register(pmu
);
266 pr_warn("Failed to register PMU for CPU%d\n", cpu
);
275 static int arm_pmu_acpi_init(void)
282 ret
= arm_pmu_acpi_parse_irqs();
286 ret
= cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING
,
287 "perf/arm/pmu_acpi:starting",
288 arm_pmu_acpi_cpu_starting
, NULL
);
292 subsys_initcall(arm_pmu_acpi_init
)