4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <asm/irq_regs.h>
25 #include <linux/perf/arm_pmu.h>
26 #include <linux/platform_device.h>
29 * ARMv8 PMUv3 Performance Events handling code.
32 enum armv8_pmuv3_perf_types
{
33 /* Required events. */
34 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR
= 0x00,
35 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
= 0x03,
36 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
= 0x04,
37 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
38 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
= 0x11,
39 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
= 0x12,
41 /* At least one of the following is required. */
42 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED
= 0x08,
43 ARMV8_PMUV3_PERFCTR_OP_SPEC
= 0x1B,
45 /* Common architectural events. */
46 ARMV8_PMUV3_PERFCTR_MEM_READ
= 0x06,
47 ARMV8_PMUV3_PERFCTR_MEM_WRITE
= 0x07,
48 ARMV8_PMUV3_PERFCTR_EXC_TAKEN
= 0x09,
49 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED
= 0x0A,
50 ARMV8_PMUV3_PERFCTR_CID_WRITE
= 0x0B,
51 ARMV8_PMUV3_PERFCTR_PC_WRITE
= 0x0C,
52 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH
= 0x0D,
53 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN
= 0x0E,
54 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS
= 0x0F,
55 ARMV8_PMUV3_PERFCTR_TTBR_WRITE
= 0x1C,
57 /* Common microarchitectural events. */
58 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL
= 0x01,
59 ARMV8_PMUV3_PERFCTR_ITLB_REFILL
= 0x02,
60 ARMV8_PMUV3_PERFCTR_DTLB_REFILL
= 0x05,
61 ARMV8_PMUV3_PERFCTR_MEM_ACCESS
= 0x13,
62 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS
= 0x14,
63 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB
= 0x15,
64 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS
= 0x16,
65 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL
= 0x17,
66 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB
= 0x18,
67 ARMV8_PMUV3_PERFCTR_BUS_ACCESS
= 0x19,
68 ARMV8_PMUV3_PERFCTR_MEM_ERROR
= 0x1A,
69 ARMV8_PMUV3_PERFCTR_BUS_CYCLES
= 0x1D,
72 /* ARMv8 Cortex-A53 specific event types. */
73 enum armv8_a53_pmu_perf_types
{
74 ARMV8_A53_PERFCTR_PREFETCH_LINEFILL
= 0xC2,
77 /* ARMv8 Cortex-A57 specific event types. */
78 enum armv8_a57_perf_types
{
79 ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD
= 0x40,
80 ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST
= 0x41,
81 ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD
= 0x42,
82 ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST
= 0x43,
83 ARMV8_A57_PERFCTR_DTLB_REFILL_LD
= 0x4c,
84 ARMV8_A57_PERFCTR_DTLB_REFILL_ST
= 0x4d,
87 /* PMUv3 HW events mapping. */
88 static const unsigned armv8_pmuv3_perf_map
[PERF_COUNT_HW_MAX
] = {
89 PERF_MAP_ALL_UNSUPPORTED
,
90 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
,
91 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED
,
92 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
93 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
94 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
97 /* ARM Cortex-A53 HW events mapping. */
98 static const unsigned armv8_a53_perf_map
[PERF_COUNT_HW_MAX
] = {
99 PERF_MAP_ALL_UNSUPPORTED
,
100 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
,
101 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED
,
102 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
103 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
104 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV8_PMUV3_PERFCTR_PC_WRITE
,
105 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
106 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES
,
109 static const unsigned armv8_a57_perf_map
[PERF_COUNT_HW_MAX
] = {
110 PERF_MAP_ALL_UNSUPPORTED
,
111 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
,
112 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED
,
113 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
114 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
115 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
116 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES
,
119 static const unsigned armv8_pmuv3_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
120 [PERF_COUNT_HW_CACHE_OP_MAX
]
121 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
122 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
124 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
125 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
126 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
127 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
129 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
130 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
131 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
132 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
135 static const unsigned armv8_a53_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
136 [PERF_COUNT_HW_CACHE_OP_MAX
]
137 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
138 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
140 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
141 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
142 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
143 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
144 [C(L1D
)][C(OP_PREFETCH
)][C(RESULT_MISS
)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL
,
146 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS
,
147 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL
,
149 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL
,
151 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
152 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
153 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
154 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
157 static const unsigned armv8_a57_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
158 [PERF_COUNT_HW_CACHE_OP_MAX
]
159 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
160 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
162 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD
,
163 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD
,
164 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST
,
165 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST
,
167 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS
,
168 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL
,
170 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD
,
171 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST
,
173 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL
,
175 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
176 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
177 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
178 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
183 * Perf Events' indices
185 #define ARMV8_IDX_CYCLE_COUNTER 0
186 #define ARMV8_IDX_COUNTER0 1
187 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
188 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
190 #define ARMV8_MAX_COUNTERS 32
191 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
194 * ARMv8 low level PMU access
198 * Perf Event to low level counters mapping
200 #define ARMV8_IDX_TO_COUNTER(x) \
201 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
204 * Per-CPU PMCR: config reg
206 #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
207 #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
208 #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
209 #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
210 #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
211 #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
212 #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
213 #define ARMV8_PMCR_N_MASK 0x1f
214 #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
217 * PMOVSR: counters overflow flag status reg
219 #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
220 #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
223 * PMXEVTYPER: Event selection reg
225 #define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
226 #define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
229 * Event filters for PMUv3
231 #define ARMV8_EXCLUDE_EL1 (1 << 31)
232 #define ARMV8_EXCLUDE_EL0 (1 << 30)
233 #define ARMV8_INCLUDE_EL2 (1 << 27)
235 static inline u32
armv8pmu_pmcr_read(void)
238 asm volatile("mrs %0, pmcr_el0" : "=r" (val
));
242 static inline void armv8pmu_pmcr_write(u32 val
)
244 val
&= ARMV8_PMCR_MASK
;
246 asm volatile("msr pmcr_el0, %0" :: "r" (val
));
249 static inline int armv8pmu_has_overflowed(u32 pmovsr
)
251 return pmovsr
& ARMV8_OVERFLOWED_MASK
;
254 static inline int armv8pmu_counter_valid(struct arm_pmu
*cpu_pmu
, int idx
)
256 return idx
>= ARMV8_IDX_CYCLE_COUNTER
&&
257 idx
<= ARMV8_IDX_COUNTER_LAST(cpu_pmu
);
260 static inline int armv8pmu_counter_has_overflowed(u32 pmnc
, int idx
)
262 return pmnc
& BIT(ARMV8_IDX_TO_COUNTER(idx
));
265 static inline int armv8pmu_select_counter(int idx
)
267 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
268 asm volatile("msr pmselr_el0, %0" :: "r" (counter
));
274 static inline u32
armv8pmu_read_counter(struct perf_event
*event
)
276 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
277 struct hw_perf_event
*hwc
= &event
->hw
;
281 if (!armv8pmu_counter_valid(cpu_pmu
, idx
))
282 pr_err("CPU%u reading wrong counter %d\n",
283 smp_processor_id(), idx
);
284 else if (idx
== ARMV8_IDX_CYCLE_COUNTER
)
285 asm volatile("mrs %0, pmccntr_el0" : "=r" (value
));
286 else if (armv8pmu_select_counter(idx
) == idx
)
287 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value
));
292 static inline void armv8pmu_write_counter(struct perf_event
*event
, u32 value
)
294 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
295 struct hw_perf_event
*hwc
= &event
->hw
;
298 if (!armv8pmu_counter_valid(cpu_pmu
, idx
))
299 pr_err("CPU%u writing wrong counter %d\n",
300 smp_processor_id(), idx
);
301 else if (idx
== ARMV8_IDX_CYCLE_COUNTER
)
302 asm volatile("msr pmccntr_el0, %0" :: "r" (value
));
303 else if (armv8pmu_select_counter(idx
) == idx
)
304 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value
));
307 static inline void armv8pmu_write_evtype(int idx
, u32 val
)
309 if (armv8pmu_select_counter(idx
) == idx
) {
310 val
&= ARMV8_EVTYPE_MASK
;
311 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val
));
315 static inline int armv8pmu_enable_counter(int idx
)
317 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
318 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter
)));
322 static inline int armv8pmu_disable_counter(int idx
)
324 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
325 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter
)));
329 static inline int armv8pmu_enable_intens(int idx
)
331 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
332 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter
)));
336 static inline int armv8pmu_disable_intens(int idx
)
338 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
339 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter
)));
341 /* Clear the overflow flag in case an interrupt is pending. */
342 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter
)));
348 static inline u32
armv8pmu_getreset_flags(void)
353 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value
));
355 /* Write to clear flags */
356 value
&= ARMV8_OVSR_MASK
;
357 asm volatile("msr pmovsclr_el0, %0" :: "r" (value
));
362 static void armv8pmu_enable_event(struct perf_event
*event
)
365 struct hw_perf_event
*hwc
= &event
->hw
;
366 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
367 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
371 * Enable counter and interrupt, and set the counter to count
372 * the event that we're interested in.
374 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
379 armv8pmu_disable_counter(idx
);
382 * Set event (if destined for PMNx counters).
384 armv8pmu_write_evtype(idx
, hwc
->config_base
);
387 * Enable interrupt for this counter
389 armv8pmu_enable_intens(idx
);
394 armv8pmu_enable_counter(idx
);
396 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
399 static void armv8pmu_disable_event(struct perf_event
*event
)
402 struct hw_perf_event
*hwc
= &event
->hw
;
403 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
404 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
408 * Disable counter and interrupt
410 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
415 armv8pmu_disable_counter(idx
);
418 * Disable interrupt for this counter
420 armv8pmu_disable_intens(idx
);
422 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
425 static irqreturn_t
armv8pmu_handle_irq(int irq_num
, void *dev
)
428 struct perf_sample_data data
;
429 struct arm_pmu
*cpu_pmu
= (struct arm_pmu
*)dev
;
430 struct pmu_hw_events
*cpuc
= this_cpu_ptr(cpu_pmu
->hw_events
);
431 struct pt_regs
*regs
;
435 * Get and reset the IRQ flags
437 pmovsr
= armv8pmu_getreset_flags();
440 * Did an overflow occur?
442 if (!armv8pmu_has_overflowed(pmovsr
))
446 * Handle the counter(s) overflow(s)
448 regs
= get_irq_regs();
450 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
451 struct perf_event
*event
= cpuc
->events
[idx
];
452 struct hw_perf_event
*hwc
;
454 /* Ignore if we don't have an event. */
459 * We have a single interrupt for all counters. Check that
460 * each counter has overflowed before we process it.
462 if (!armv8pmu_counter_has_overflowed(pmovsr
, idx
))
466 armpmu_event_update(event
);
467 perf_sample_data_init(&data
, 0, hwc
->last_period
);
468 if (!armpmu_event_set_period(event
))
471 if (perf_event_overflow(event
, &data
, regs
))
472 cpu_pmu
->disable(event
);
476 * Handle the pending perf events.
478 * Note: this call *must* be run with interrupts disabled. For
479 * platforms that can have the PMU interrupts raised as an NMI, this
487 static void armv8pmu_start(struct arm_pmu
*cpu_pmu
)
490 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
492 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
493 /* Enable all counters */
494 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E
);
495 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
498 static void armv8pmu_stop(struct arm_pmu
*cpu_pmu
)
501 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
503 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
504 /* Disable all counters */
505 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E
);
506 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
509 static int armv8pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
510 struct perf_event
*event
)
513 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
514 struct hw_perf_event
*hwc
= &event
->hw
;
515 unsigned long evtype
= hwc
->config_base
& ARMV8_EVTYPE_EVENT
;
517 /* Always place a cycle counter into the cycle counter. */
518 if (evtype
== ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
) {
519 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER
, cpuc
->used_mask
))
522 return ARMV8_IDX_CYCLE_COUNTER
;
526 * For anything other than a cycle counter, try and use
527 * the events counters
529 for (idx
= ARMV8_IDX_COUNTER0
; idx
< cpu_pmu
->num_events
; ++idx
) {
530 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
534 /* The counters are all in use. */
539 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
541 static int armv8pmu_set_event_filter(struct hw_perf_event
*event
,
542 struct perf_event_attr
*attr
)
544 unsigned long config_base
= 0;
546 if (attr
->exclude_idle
)
548 if (attr
->exclude_user
)
549 config_base
|= ARMV8_EXCLUDE_EL0
;
550 if (attr
->exclude_kernel
)
551 config_base
|= ARMV8_EXCLUDE_EL1
;
552 if (!attr
->exclude_hv
)
553 config_base
|= ARMV8_INCLUDE_EL2
;
556 * Install the filter into config_base as this is used to
557 * construct the event type.
559 event
->config_base
= config_base
;
564 static void armv8pmu_reset(void *info
)
566 struct arm_pmu
*cpu_pmu
= (struct arm_pmu
*)info
;
567 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
569 /* The counter and interrupt enable registers are unknown at reset. */
570 for (idx
= ARMV8_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
) {
571 armv8pmu_disable_counter(idx
);
572 armv8pmu_disable_intens(idx
);
575 /* Initialize & Reset PMNC: C and P bits. */
576 armv8pmu_pmcr_write(ARMV8_PMCR_P
| ARMV8_PMCR_C
);
578 /* Disable access from userspace. */
579 asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
582 static int armv8_pmuv3_map_event(struct perf_event
*event
)
584 return armpmu_map_event(event
, &armv8_pmuv3_perf_map
,
585 &armv8_pmuv3_perf_cache_map
,
589 static int armv8_a53_map_event(struct perf_event
*event
)
591 return armpmu_map_event(event
, &armv8_a53_perf_map
,
592 &armv8_a53_perf_cache_map
,
596 static int armv8_a57_map_event(struct perf_event
*event
)
598 return armpmu_map_event(event
, &armv8_a57_perf_map
,
599 &armv8_a57_perf_cache_map
,
603 static void armv8pmu_read_num_pmnc_events(void *info
)
607 /* Read the nb of CNTx counters supported from PMNC */
608 *nb_cnt
= (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT
) & ARMV8_PMCR_N_MASK
;
610 /* Add the CPU cycles counter */
614 static int armv8pmu_probe_num_events(struct arm_pmu
*arm_pmu
)
616 return smp_call_function_any(&arm_pmu
->supported_cpus
,
617 armv8pmu_read_num_pmnc_events
,
618 &arm_pmu
->num_events
, 1);
621 static void armv8_pmu_init(struct arm_pmu
*cpu_pmu
)
623 cpu_pmu
->handle_irq
= armv8pmu_handle_irq
,
624 cpu_pmu
->enable
= armv8pmu_enable_event
,
625 cpu_pmu
->disable
= armv8pmu_disable_event
,
626 cpu_pmu
->read_counter
= armv8pmu_read_counter
,
627 cpu_pmu
->write_counter
= armv8pmu_write_counter
,
628 cpu_pmu
->get_event_idx
= armv8pmu_get_event_idx
,
629 cpu_pmu
->start
= armv8pmu_start
,
630 cpu_pmu
->stop
= armv8pmu_stop
,
631 cpu_pmu
->reset
= armv8pmu_reset
,
632 cpu_pmu
->max_period
= (1LLU << 32) - 1,
633 cpu_pmu
->set_event_filter
= armv8pmu_set_event_filter
;
636 static int armv8_pmuv3_init(struct arm_pmu
*cpu_pmu
)
638 armv8_pmu_init(cpu_pmu
);
639 cpu_pmu
->name
= "armv8_pmuv3";
640 cpu_pmu
->map_event
= armv8_pmuv3_map_event
;
641 return armv8pmu_probe_num_events(cpu_pmu
);
644 static int armv8_a53_pmu_init(struct arm_pmu
*cpu_pmu
)
646 armv8_pmu_init(cpu_pmu
);
647 cpu_pmu
->name
= "armv8_cortex_a53";
648 cpu_pmu
->map_event
= armv8_a53_map_event
;
649 return armv8pmu_probe_num_events(cpu_pmu
);
652 static int armv8_a57_pmu_init(struct arm_pmu
*cpu_pmu
)
654 armv8_pmu_init(cpu_pmu
);
655 cpu_pmu
->name
= "armv8_cortex_a57";
656 cpu_pmu
->map_event
= armv8_a57_map_event
;
657 return armv8pmu_probe_num_events(cpu_pmu
);
660 static const struct of_device_id armv8_pmu_of_device_ids
[] = {
661 {.compatible
= "arm,armv8-pmuv3", .data
= armv8_pmuv3_init
},
662 {.compatible
= "arm,cortex-a53-pmu", .data
= armv8_a53_pmu_init
},
663 {.compatible
= "arm,cortex-a57-pmu", .data
= armv8_a57_pmu_init
},
667 static int armv8_pmu_device_probe(struct platform_device
*pdev
)
669 return arm_pmu_device_probe(pdev
, armv8_pmu_of_device_ids
, NULL
);
672 static struct platform_driver armv8_pmu_driver
= {
675 .of_match_table
= armv8_pmu_of_device_ids
,
677 .probe
= armv8_pmu_device_probe
,
680 static int __init
register_armv8_pmu_driver(void)
682 return platform_driver_register(&armv8_pmu_driver
);
684 device_initcall(register_armv8_pmu_driver
);