2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2012 ARM Limited
17 * Author: Will Deacon <will.deacon@arm.com>
19 #define pr_fmt(fmt) "CPU PMU: " fmt
21 #include <linux/bitmap.h>
22 #include <linux/export.h>
23 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/irq.h>
29 #include <linux/irqdesc.h>
31 #include <asm/cputype.h>
32 #include <asm/irq_regs.h>
35 /* Set at runtime when we know what CPU type we are. */
36 static struct arm_pmu
*cpu_pmu
;
38 static DEFINE_PER_CPU(struct arm_pmu
*, percpu_pmu
);
39 static DEFINE_PER_CPU(struct perf_event
* [ARMPMU_MAX_HWEVENTS
], hw_events
);
40 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)], used_mask
);
41 static DEFINE_PER_CPU(struct pmu_hw_events
, cpu_hw_events
);
44 * Despite the names, these two functions are CPU-specific and are used
45 * by the OProfile/perf code.
47 const char *perf_pmu_name(void)
54 EXPORT_SYMBOL_GPL(perf_pmu_name
);
56 int perf_num_counters(void)
61 max_events
= cpu_pmu
->num_events
;
65 EXPORT_SYMBOL_GPL(perf_num_counters
);
67 /* Include the PMU-specific implementations. */
68 #include "perf_event_xscale.c"
69 #include "perf_event_v6.c"
70 #include "perf_event_v7.c"
72 static struct pmu_hw_events
*cpu_pmu_get_cpu_events(void)
74 return this_cpu_ptr(&cpu_hw_events
);
77 static void cpu_pmu_enable_percpu_irq(void *data
)
79 struct arm_pmu
*cpu_pmu
= data
;
80 struct platform_device
*pmu_device
= cpu_pmu
->plat_device
;
81 int irq
= platform_get_irq(pmu_device
, 0);
83 enable_percpu_irq(irq
, IRQ_TYPE_NONE
);
84 cpumask_set_cpu(smp_processor_id(), &cpu_pmu
->active_irqs
);
87 static void cpu_pmu_disable_percpu_irq(void *data
)
89 struct arm_pmu
*cpu_pmu
= data
;
90 struct platform_device
*pmu_device
= cpu_pmu
->plat_device
;
91 int irq
= platform_get_irq(pmu_device
, 0);
93 cpumask_clear_cpu(smp_processor_id(), &cpu_pmu
->active_irqs
);
94 disable_percpu_irq(irq
);
97 static void cpu_pmu_free_irq(struct arm_pmu
*cpu_pmu
)
100 struct platform_device
*pmu_device
= cpu_pmu
->plat_device
;
102 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
104 irq
= platform_get_irq(pmu_device
, 0);
105 if (irq
>= 0 && irq_is_percpu(irq
)) {
106 on_each_cpu(cpu_pmu_disable_percpu_irq
, cpu_pmu
, 1);
107 free_percpu_irq(irq
, &percpu_pmu
);
109 for (i
= 0; i
< irqs
; ++i
) {
110 if (!cpumask_test_and_clear_cpu(i
, &cpu_pmu
->active_irqs
))
112 irq
= platform_get_irq(pmu_device
, i
);
114 free_irq(irq
, cpu_pmu
);
119 static int cpu_pmu_request_irq(struct arm_pmu
*cpu_pmu
, irq_handler_t handler
)
121 int i
, err
, irq
, irqs
;
122 struct platform_device
*pmu_device
= cpu_pmu
->plat_device
;
127 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
129 printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
133 irq
= platform_get_irq(pmu_device
, 0);
134 if (irq
>= 0 && irq_is_percpu(irq
)) {
135 err
= request_percpu_irq(irq
, handler
, "arm-pmu", &percpu_pmu
);
137 pr_err("unable to request IRQ%d for ARM PMU counters\n",
141 on_each_cpu(cpu_pmu_enable_percpu_irq
, cpu_pmu
, 1);
143 for (i
= 0; i
< irqs
; ++i
) {
145 irq
= platform_get_irq(pmu_device
, i
);
150 * If we have a single PMU interrupt that we can't shift,
151 * assume that we're running on a uniprocessor machine and
152 * continue. Otherwise, continue without this interrupt.
154 if (irq_set_affinity(irq
, cpumask_of(i
)) && irqs
> 1) {
155 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
160 err
= request_irq(irq
, handler
,
161 IRQF_NOBALANCING
| IRQF_NO_THREAD
, "arm-pmu",
164 pr_err("unable to request IRQ%d for ARM PMU counters\n",
169 cpumask_set_cpu(i
, &cpu_pmu
->active_irqs
);
176 static void cpu_pmu_init(struct arm_pmu
*cpu_pmu
)
179 for_each_possible_cpu(cpu
) {
180 struct pmu_hw_events
*events
= &per_cpu(cpu_hw_events
, cpu
);
181 events
->events
= per_cpu(hw_events
, cpu
);
182 events
->used_mask
= per_cpu(used_mask
, cpu
);
183 raw_spin_lock_init(&events
->pmu_lock
);
184 per_cpu(percpu_pmu
, cpu
) = cpu_pmu
;
187 cpu_pmu
->get_hw_events
= cpu_pmu_get_cpu_events
;
188 cpu_pmu
->request_irq
= cpu_pmu_request_irq
;
189 cpu_pmu
->free_irq
= cpu_pmu_free_irq
;
191 /* Ensure the PMU has sane values out of reset. */
193 on_each_cpu(cpu_pmu
->reset
, cpu_pmu
, 1);
195 /* If no interrupts available, set the corresponding capability flag */
196 if (!platform_get_irq(cpu_pmu
->plat_device
, 0))
197 cpu_pmu
->pmu
.capabilities
|= PERF_PMU_CAP_NO_INTERRUPT
;
201 * PMU hardware loses all context when a CPU goes offline.
202 * When a CPU is hotplugged back in, since some hardware registers are
203 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
204 * junk values out of them.
206 static int cpu_pmu_notify(struct notifier_block
*b
, unsigned long action
,
209 if ((action
& ~CPU_TASKS_FROZEN
) != CPU_STARTING
)
212 if (cpu_pmu
&& cpu_pmu
->reset
)
213 cpu_pmu
->reset(cpu_pmu
);
220 static struct notifier_block cpu_pmu_hotplug_notifier
= {
221 .notifier_call
= cpu_pmu_notify
,
225 * PMU platform driver and devicetree bindings.
227 static struct of_device_id cpu_pmu_of_device_ids
[] = {
228 {.compatible
= "arm,cortex-a17-pmu", .data
= armv7_a17_pmu_init
},
229 {.compatible
= "arm,cortex-a15-pmu", .data
= armv7_a15_pmu_init
},
230 {.compatible
= "arm,cortex-a12-pmu", .data
= armv7_a12_pmu_init
},
231 {.compatible
= "arm,cortex-a9-pmu", .data
= armv7_a9_pmu_init
},
232 {.compatible
= "arm,cortex-a8-pmu", .data
= armv7_a8_pmu_init
},
233 {.compatible
= "arm,cortex-a7-pmu", .data
= armv7_a7_pmu_init
},
234 {.compatible
= "arm,cortex-a5-pmu", .data
= armv7_a5_pmu_init
},
235 {.compatible
= "arm,arm11mpcore-pmu", .data
= armv6mpcore_pmu_init
},
236 {.compatible
= "arm,arm1176-pmu", .data
= armv6pmu_init
},
237 {.compatible
= "arm,arm1136-pmu", .data
= armv6pmu_init
},
238 {.compatible
= "qcom,krait-pmu", .data
= krait_pmu_init
},
242 static struct platform_device_id cpu_pmu_plat_device_ids
[] = {
248 * CPU PMU identification and probing.
250 static int probe_current_pmu(struct arm_pmu
*pmu
)
253 unsigned long implementor
= read_cpuid_implementor();
254 unsigned long part_number
= read_cpuid_part_number();
257 pr_info("probing PMU on CPU %d\n", cpu
);
260 if (implementor
== ARM_CPU_IMP_ARM
) {
261 switch (part_number
) {
262 case ARM_CPU_PART_ARM1136
:
263 case ARM_CPU_PART_ARM1156
:
264 case ARM_CPU_PART_ARM1176
:
265 ret
= armv6pmu_init(pmu
);
267 case ARM_CPU_PART_ARM11MPCORE
:
268 ret
= armv6mpcore_pmu_init(pmu
);
270 case ARM_CPU_PART_CORTEX_A8
:
271 ret
= armv7_a8_pmu_init(pmu
);
273 case ARM_CPU_PART_CORTEX_A9
:
274 ret
= armv7_a9_pmu_init(pmu
);
277 /* Intel CPUs [xscale]. */
278 } else if (implementor
== ARM_CPU_IMP_INTEL
) {
279 switch (xscale_cpu_arch_version()) {
280 case ARM_CPU_XSCALE_ARCH_V1
:
281 ret
= xscale1pmu_init(pmu
);
283 case ARM_CPU_XSCALE_ARCH_V2
:
284 ret
= xscale2pmu_init(pmu
);
293 static int cpu_pmu_device_probe(struct platform_device
*pdev
)
295 const struct of_device_id
*of_id
;
296 const int (*init_fn
)(struct arm_pmu
*);
297 struct device_node
*node
= pdev
->dev
.of_node
;
302 pr_info("attempt to register multiple PMU devices!");
306 pmu
= kzalloc(sizeof(struct arm_pmu
), GFP_KERNEL
);
308 pr_info("failed to allocate PMU device!");
313 cpu_pmu
->plat_device
= pdev
;
315 if (node
&& (of_id
= of_match_node(cpu_pmu_of_device_ids
, pdev
->dev
.of_node
))) {
316 init_fn
= of_id
->data
;
319 ret
= probe_current_pmu(pmu
);
323 pr_info("failed to probe PMU!");
327 cpu_pmu_init(cpu_pmu
);
328 ret
= armpmu_register(cpu_pmu
, PERF_TYPE_RAW
);
334 pr_info("failed to register PMU devices!");
339 static struct platform_driver cpu_pmu_driver
= {
342 .pm
= &armpmu_dev_pm_ops
,
343 .of_match_table
= cpu_pmu_of_device_ids
,
345 .probe
= cpu_pmu_device_probe
,
346 .id_table
= cpu_pmu_plat_device_ids
,
349 static int __init
register_pmu_driver(void)
353 err
= register_cpu_notifier(&cpu_pmu_hotplug_notifier
);
357 err
= platform_driver_register(&cpu_pmu_driver
);
359 unregister_cpu_notifier(&cpu_pmu_hotplug_notifier
);
363 device_initcall(register_pmu_driver
);