1 // SPDX-License-Identifier: GPL-2.0-only
3 * ARMv8 PMUv3 Performance Events handling code.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This code is based heavily on the ARMv7 perf event code.
11 #include <asm/irq_regs.h>
12 #include <asm/perf_event.h>
13 #include <asm/sysreg.h>
16 #include <linux/acpi.h>
17 #include <linux/clocksource.h>
18 #include <linux/kvm_host.h>
20 #include <linux/perf/arm_pmu.h>
21 #include <linux/platform_device.h>
23 /* ARMv8 Cortex-A53 specific event types. */
24 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
26 /* ARMv8 Cavium ThunderX specific event types. */
27 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
28 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
29 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
30 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
31 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
34 * ARMv8 Architectural defined events, not all of these may
35 * be supported on any given implementation. Unsupported events will
36 * be disabled at run-time based on the PMCEID registers.
38 static const unsigned armv8_pmuv3_perf_map
[PERF_COUNT_HW_MAX
] = {
39 PERF_MAP_ALL_UNSUPPORTED
,
40 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES
,
41 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV8_PMUV3_PERFCTR_INST_RETIRED
,
42 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV8_PMUV3_PERFCTR_L1D_CACHE
,
43 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL
,
44 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED
,
45 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED
,
46 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES
,
47 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND
,
48 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND
,
51 static const unsigned armv8_pmuv3_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
52 [PERF_COUNT_HW_CACHE_OP_MAX
]
53 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
54 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
56 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE
,
57 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL
,
59 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE
,
60 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL
,
62 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL
,
63 [C(DTLB
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1D_TLB
,
65 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL
,
66 [C(ITLB
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1I_TLB
,
68 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_BR_PRED
,
69 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED
,
72 static const unsigned armv8_a53_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
73 [PERF_COUNT_HW_CACHE_OP_MAX
]
74 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
75 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
77 [C(L1D
)][C(OP_PREFETCH
)][C(RESULT_MISS
)] = ARMV8_A53_PERFCTR_PREF_LINEFILL
,
79 [C(NODE
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD
,
80 [C(NODE
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR
,
83 static const unsigned armv8_a57_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
84 [PERF_COUNT_HW_CACHE_OP_MAX
]
85 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
86 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
88 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD
,
89 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD
,
90 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR
,
91 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR
,
93 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD
,
94 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR
,
96 [C(NODE
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD
,
97 [C(NODE
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR
,
100 static const unsigned armv8_a73_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
101 [PERF_COUNT_HW_CACHE_OP_MAX
]
102 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
103 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
105 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD
,
106 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR
,
109 static const unsigned armv8_thunder_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
110 [PERF_COUNT_HW_CACHE_OP_MAX
]
111 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
112 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
114 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD
,
115 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD
,
116 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR
,
117 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST
,
118 [C(L1D
)][C(OP_PREFETCH
)][C(RESULT_ACCESS
)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS
,
119 [C(L1D
)][C(OP_PREFETCH
)][C(RESULT_MISS
)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS
,
121 [C(L1I
)][C(OP_PREFETCH
)][C(RESULT_ACCESS
)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS
,
122 [C(L1I
)][C(OP_PREFETCH
)][C(RESULT_MISS
)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS
,
124 [C(DTLB
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD
,
125 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD
,
126 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR
,
127 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR
,
130 static const unsigned armv8_vulcan_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
131 [PERF_COUNT_HW_CACHE_OP_MAX
]
132 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
133 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
135 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD
,
136 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD
,
137 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR
,
138 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR
,
140 [C(DTLB
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD
,
141 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR
,
142 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD
,
143 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR
,
145 [C(NODE
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD
,
146 [C(NODE
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR
,
150 armv8pmu_events_sysfs_show(struct device
*dev
,
151 struct device_attribute
*attr
, char *page
)
153 struct perf_pmu_events_attr
*pmu_attr
;
155 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
);
157 return sprintf(page
, "event=0x%03llx\n", pmu_attr
->id
);
160 #define ARMV8_EVENT_ATTR_RESOLVE(m) #m
161 #define ARMV8_EVENT_ATTR(name, config) \
162 PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
163 config, armv8pmu_events_sysfs_show)
165 ARMV8_EVENT_ATTR(sw_incr
, ARMV8_PMUV3_PERFCTR_SW_INCR
);
166 ARMV8_EVENT_ATTR(l1i_cache_refill
, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL
);
167 ARMV8_EVENT_ATTR(l1i_tlb_refill
, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL
);
168 ARMV8_EVENT_ATTR(l1d_cache_refill
, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL
);
169 ARMV8_EVENT_ATTR(l1d_cache
, ARMV8_PMUV3_PERFCTR_L1D_CACHE
);
170 ARMV8_EVENT_ATTR(l1d_tlb_refill
, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL
);
171 ARMV8_EVENT_ATTR(ld_retired
, ARMV8_PMUV3_PERFCTR_LD_RETIRED
);
172 ARMV8_EVENT_ATTR(st_retired
, ARMV8_PMUV3_PERFCTR_ST_RETIRED
);
173 ARMV8_EVENT_ATTR(inst_retired
, ARMV8_PMUV3_PERFCTR_INST_RETIRED
);
174 ARMV8_EVENT_ATTR(exc_taken
, ARMV8_PMUV3_PERFCTR_EXC_TAKEN
);
175 ARMV8_EVENT_ATTR(exc_return
, ARMV8_PMUV3_PERFCTR_EXC_RETURN
);
176 ARMV8_EVENT_ATTR(cid_write_retired
, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED
);
177 ARMV8_EVENT_ATTR(pc_write_retired
, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED
);
178 ARMV8_EVENT_ATTR(br_immed_retired
, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED
);
179 ARMV8_EVENT_ATTR(br_return_retired
, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED
);
180 ARMV8_EVENT_ATTR(unaligned_ldst_retired
, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED
);
181 ARMV8_EVENT_ATTR(br_mis_pred
, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED
);
182 ARMV8_EVENT_ATTR(cpu_cycles
, ARMV8_PMUV3_PERFCTR_CPU_CYCLES
);
183 ARMV8_EVENT_ATTR(br_pred
, ARMV8_PMUV3_PERFCTR_BR_PRED
);
184 ARMV8_EVENT_ATTR(mem_access
, ARMV8_PMUV3_PERFCTR_MEM_ACCESS
);
185 ARMV8_EVENT_ATTR(l1i_cache
, ARMV8_PMUV3_PERFCTR_L1I_CACHE
);
186 ARMV8_EVENT_ATTR(l1d_cache_wb
, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB
);
187 ARMV8_EVENT_ATTR(l2d_cache
, ARMV8_PMUV3_PERFCTR_L2D_CACHE
);
188 ARMV8_EVENT_ATTR(l2d_cache_refill
, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL
);
189 ARMV8_EVENT_ATTR(l2d_cache_wb
, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB
);
190 ARMV8_EVENT_ATTR(bus_access
, ARMV8_PMUV3_PERFCTR_BUS_ACCESS
);
191 ARMV8_EVENT_ATTR(memory_error
, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR
);
192 ARMV8_EVENT_ATTR(inst_spec
, ARMV8_PMUV3_PERFCTR_INST_SPEC
);
193 ARMV8_EVENT_ATTR(ttbr_write_retired
, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED
);
194 ARMV8_EVENT_ATTR(bus_cycles
, ARMV8_PMUV3_PERFCTR_BUS_CYCLES
);
195 /* Don't expose the chain event in /sys, since it's useless in isolation */
196 ARMV8_EVENT_ATTR(l1d_cache_allocate
, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE
);
197 ARMV8_EVENT_ATTR(l2d_cache_allocate
, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE
);
198 ARMV8_EVENT_ATTR(br_retired
, ARMV8_PMUV3_PERFCTR_BR_RETIRED
);
199 ARMV8_EVENT_ATTR(br_mis_pred_retired
, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED
);
200 ARMV8_EVENT_ATTR(stall_frontend
, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND
);
201 ARMV8_EVENT_ATTR(stall_backend
, ARMV8_PMUV3_PERFCTR_STALL_BACKEND
);
202 ARMV8_EVENT_ATTR(l1d_tlb
, ARMV8_PMUV3_PERFCTR_L1D_TLB
);
203 ARMV8_EVENT_ATTR(l1i_tlb
, ARMV8_PMUV3_PERFCTR_L1I_TLB
);
204 ARMV8_EVENT_ATTR(l2i_cache
, ARMV8_PMUV3_PERFCTR_L2I_CACHE
);
205 ARMV8_EVENT_ATTR(l2i_cache_refill
, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL
);
206 ARMV8_EVENT_ATTR(l3d_cache_allocate
, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE
);
207 ARMV8_EVENT_ATTR(l3d_cache_refill
, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL
);
208 ARMV8_EVENT_ATTR(l3d_cache
, ARMV8_PMUV3_PERFCTR_L3D_CACHE
);
209 ARMV8_EVENT_ATTR(l3d_cache_wb
, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB
);
210 ARMV8_EVENT_ATTR(l2d_tlb_refill
, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL
);
211 ARMV8_EVENT_ATTR(l2i_tlb_refill
, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL
);
212 ARMV8_EVENT_ATTR(l2d_tlb
, ARMV8_PMUV3_PERFCTR_L2D_TLB
);
213 ARMV8_EVENT_ATTR(l2i_tlb
, ARMV8_PMUV3_PERFCTR_L2I_TLB
);
214 ARMV8_EVENT_ATTR(remote_access
, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS
);
215 ARMV8_EVENT_ATTR(ll_cache
, ARMV8_PMUV3_PERFCTR_LL_CACHE
);
216 ARMV8_EVENT_ATTR(ll_cache_miss
, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS
);
217 ARMV8_EVENT_ATTR(dtlb_walk
, ARMV8_PMUV3_PERFCTR_DTLB_WALK
);
218 ARMV8_EVENT_ATTR(itlb_walk
, ARMV8_PMUV3_PERFCTR_ITLB_WALK
);
219 ARMV8_EVENT_ATTR(ll_cache_rd
, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD
);
220 ARMV8_EVENT_ATTR(ll_cache_miss_rd
, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD
);
221 ARMV8_EVENT_ATTR(remote_access_rd
, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD
);
222 ARMV8_EVENT_ATTR(sample_pop
, ARMV8_SPE_PERFCTR_SAMPLE_POP
);
223 ARMV8_EVENT_ATTR(sample_feed
, ARMV8_SPE_PERFCTR_SAMPLE_FEED
);
224 ARMV8_EVENT_ATTR(sample_filtrate
, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE
);
225 ARMV8_EVENT_ATTR(sample_collision
, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION
);
227 static struct attribute
*armv8_pmuv3_event_attrs
[] = {
228 &armv8_event_attr_sw_incr
.attr
.attr
,
229 &armv8_event_attr_l1i_cache_refill
.attr
.attr
,
230 &armv8_event_attr_l1i_tlb_refill
.attr
.attr
,
231 &armv8_event_attr_l1d_cache_refill
.attr
.attr
,
232 &armv8_event_attr_l1d_cache
.attr
.attr
,
233 &armv8_event_attr_l1d_tlb_refill
.attr
.attr
,
234 &armv8_event_attr_ld_retired
.attr
.attr
,
235 &armv8_event_attr_st_retired
.attr
.attr
,
236 &armv8_event_attr_inst_retired
.attr
.attr
,
237 &armv8_event_attr_exc_taken
.attr
.attr
,
238 &armv8_event_attr_exc_return
.attr
.attr
,
239 &armv8_event_attr_cid_write_retired
.attr
.attr
,
240 &armv8_event_attr_pc_write_retired
.attr
.attr
,
241 &armv8_event_attr_br_immed_retired
.attr
.attr
,
242 &armv8_event_attr_br_return_retired
.attr
.attr
,
243 &armv8_event_attr_unaligned_ldst_retired
.attr
.attr
,
244 &armv8_event_attr_br_mis_pred
.attr
.attr
,
245 &armv8_event_attr_cpu_cycles
.attr
.attr
,
246 &armv8_event_attr_br_pred
.attr
.attr
,
247 &armv8_event_attr_mem_access
.attr
.attr
,
248 &armv8_event_attr_l1i_cache
.attr
.attr
,
249 &armv8_event_attr_l1d_cache_wb
.attr
.attr
,
250 &armv8_event_attr_l2d_cache
.attr
.attr
,
251 &armv8_event_attr_l2d_cache_refill
.attr
.attr
,
252 &armv8_event_attr_l2d_cache_wb
.attr
.attr
,
253 &armv8_event_attr_bus_access
.attr
.attr
,
254 &armv8_event_attr_memory_error
.attr
.attr
,
255 &armv8_event_attr_inst_spec
.attr
.attr
,
256 &armv8_event_attr_ttbr_write_retired
.attr
.attr
,
257 &armv8_event_attr_bus_cycles
.attr
.attr
,
258 &armv8_event_attr_l1d_cache_allocate
.attr
.attr
,
259 &armv8_event_attr_l2d_cache_allocate
.attr
.attr
,
260 &armv8_event_attr_br_retired
.attr
.attr
,
261 &armv8_event_attr_br_mis_pred_retired
.attr
.attr
,
262 &armv8_event_attr_stall_frontend
.attr
.attr
,
263 &armv8_event_attr_stall_backend
.attr
.attr
,
264 &armv8_event_attr_l1d_tlb
.attr
.attr
,
265 &armv8_event_attr_l1i_tlb
.attr
.attr
,
266 &armv8_event_attr_l2i_cache
.attr
.attr
,
267 &armv8_event_attr_l2i_cache_refill
.attr
.attr
,
268 &armv8_event_attr_l3d_cache_allocate
.attr
.attr
,
269 &armv8_event_attr_l3d_cache_refill
.attr
.attr
,
270 &armv8_event_attr_l3d_cache
.attr
.attr
,
271 &armv8_event_attr_l3d_cache_wb
.attr
.attr
,
272 &armv8_event_attr_l2d_tlb_refill
.attr
.attr
,
273 &armv8_event_attr_l2i_tlb_refill
.attr
.attr
,
274 &armv8_event_attr_l2d_tlb
.attr
.attr
,
275 &armv8_event_attr_l2i_tlb
.attr
.attr
,
276 &armv8_event_attr_remote_access
.attr
.attr
,
277 &armv8_event_attr_ll_cache
.attr
.attr
,
278 &armv8_event_attr_ll_cache_miss
.attr
.attr
,
279 &armv8_event_attr_dtlb_walk
.attr
.attr
,
280 &armv8_event_attr_itlb_walk
.attr
.attr
,
281 &armv8_event_attr_ll_cache_rd
.attr
.attr
,
282 &armv8_event_attr_ll_cache_miss_rd
.attr
.attr
,
283 &armv8_event_attr_remote_access_rd
.attr
.attr
,
284 &armv8_event_attr_sample_pop
.attr
.attr
,
285 &armv8_event_attr_sample_feed
.attr
.attr
,
286 &armv8_event_attr_sample_filtrate
.attr
.attr
,
287 &armv8_event_attr_sample_collision
.attr
.attr
,
292 armv8pmu_event_attr_is_visible(struct kobject
*kobj
,
293 struct attribute
*attr
, int unused
)
295 struct device
*dev
= kobj_to_dev(kobj
);
296 struct pmu
*pmu
= dev_get_drvdata(dev
);
297 struct arm_pmu
*cpu_pmu
= container_of(pmu
, struct arm_pmu
, pmu
);
298 struct perf_pmu_events_attr
*pmu_attr
;
300 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
.attr
);
302 if (pmu_attr
->id
< ARMV8_PMUV3_MAX_COMMON_EVENTS
&&
303 test_bit(pmu_attr
->id
, cpu_pmu
->pmceid_bitmap
))
306 pmu_attr
->id
-= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE
;
307 if (pmu_attr
->id
< ARMV8_PMUV3_MAX_COMMON_EVENTS
&&
308 test_bit(pmu_attr
->id
, cpu_pmu
->pmceid_ext_bitmap
))
314 static struct attribute_group armv8_pmuv3_events_attr_group
= {
316 .attrs
= armv8_pmuv3_event_attrs
,
317 .is_visible
= armv8pmu_event_attr_is_visible
,
320 PMU_FORMAT_ATTR(event
, "config:0-15");
321 PMU_FORMAT_ATTR(long, "config1:0");
323 static inline bool armv8pmu_event_is_64bit(struct perf_event
*event
)
325 return event
->attr
.config1
& 0x1;
328 static struct attribute
*armv8_pmuv3_format_attrs
[] = {
329 &format_attr_event
.attr
,
330 &format_attr_long
.attr
,
334 static struct attribute_group armv8_pmuv3_format_attr_group
= {
336 .attrs
= armv8_pmuv3_format_attrs
,
340 * Perf Events' indices
342 #define ARMV8_IDX_CYCLE_COUNTER 0
343 #define ARMV8_IDX_COUNTER0 1
344 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
345 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
348 * We must chain two programmable counters for 64 bit events,
349 * except when we have allocated the 64bit cycle counter (for CPU
350 * cycles event). This must be called only when the event has
351 * a counter allocated.
353 static inline bool armv8pmu_event_is_chained(struct perf_event
*event
)
355 int idx
= event
->hw
.idx
;
357 return !WARN_ON(idx
< 0) &&
358 armv8pmu_event_is_64bit(event
) &&
359 (idx
!= ARMV8_IDX_CYCLE_COUNTER
);
363 * ARMv8 low level PMU access
367 * Perf Event to low level counters mapping
369 #define ARMV8_IDX_TO_COUNTER(x) \
370 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
372 static inline u32
armv8pmu_pmcr_read(void)
374 return read_sysreg(pmcr_el0
);
377 static inline void armv8pmu_pmcr_write(u32 val
)
379 val
&= ARMV8_PMU_PMCR_MASK
;
381 write_sysreg(val
, pmcr_el0
);
384 static inline int armv8pmu_has_overflowed(u32 pmovsr
)
386 return pmovsr
& ARMV8_PMU_OVERFLOWED_MASK
;
389 static inline int armv8pmu_counter_valid(struct arm_pmu
*cpu_pmu
, int idx
)
391 return idx
>= ARMV8_IDX_CYCLE_COUNTER
&&
392 idx
<= ARMV8_IDX_COUNTER_LAST(cpu_pmu
);
395 static inline int armv8pmu_counter_has_overflowed(u32 pmnc
, int idx
)
397 return pmnc
& BIT(ARMV8_IDX_TO_COUNTER(idx
));
400 static inline void armv8pmu_select_counter(int idx
)
402 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
403 write_sysreg(counter
, pmselr_el0
);
407 static inline u32
armv8pmu_read_evcntr(int idx
)
409 armv8pmu_select_counter(idx
);
410 return read_sysreg(pmxevcntr_el0
);
413 static inline u64
armv8pmu_read_hw_counter(struct perf_event
*event
)
415 int idx
= event
->hw
.idx
;
418 val
= armv8pmu_read_evcntr(idx
);
419 if (armv8pmu_event_is_chained(event
))
420 val
= (val
<< 32) | armv8pmu_read_evcntr(idx
- 1);
424 static u64
armv8pmu_read_counter(struct perf_event
*event
)
426 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
427 struct hw_perf_event
*hwc
= &event
->hw
;
431 if (!armv8pmu_counter_valid(cpu_pmu
, idx
))
432 pr_err("CPU%u reading wrong counter %d\n",
433 smp_processor_id(), idx
);
434 else if (idx
== ARMV8_IDX_CYCLE_COUNTER
)
435 value
= read_sysreg(pmccntr_el0
);
437 value
= armv8pmu_read_hw_counter(event
);
442 static inline void armv8pmu_write_evcntr(int idx
, u32 value
)
444 armv8pmu_select_counter(idx
);
445 write_sysreg(value
, pmxevcntr_el0
);
448 static inline void armv8pmu_write_hw_counter(struct perf_event
*event
,
451 int idx
= event
->hw
.idx
;
453 if (armv8pmu_event_is_chained(event
)) {
454 armv8pmu_write_evcntr(idx
, upper_32_bits(value
));
455 armv8pmu_write_evcntr(idx
- 1, lower_32_bits(value
));
457 armv8pmu_write_evcntr(idx
, value
);
461 static void armv8pmu_write_counter(struct perf_event
*event
, u64 value
)
463 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
464 struct hw_perf_event
*hwc
= &event
->hw
;
467 if (!armv8pmu_counter_valid(cpu_pmu
, idx
))
468 pr_err("CPU%u writing wrong counter %d\n",
469 smp_processor_id(), idx
);
470 else if (idx
== ARMV8_IDX_CYCLE_COUNTER
) {
472 * The cycles counter is really a 64-bit counter.
473 * When treating it as a 32-bit counter, we only count
474 * the lower 32 bits, and set the upper 32-bits so that
475 * we get an interrupt upon 32-bit overflow.
477 if (!armv8pmu_event_is_64bit(event
))
478 value
|= 0xffffffff00000000ULL
;
479 write_sysreg(value
, pmccntr_el0
);
481 armv8pmu_write_hw_counter(event
, value
);
484 static inline void armv8pmu_write_evtype(int idx
, u32 val
)
486 armv8pmu_select_counter(idx
);
487 val
&= ARMV8_PMU_EVTYPE_MASK
;
488 write_sysreg(val
, pmxevtyper_el0
);
491 static inline void armv8pmu_write_event_type(struct perf_event
*event
)
493 struct hw_perf_event
*hwc
= &event
->hw
;
497 * For chained events, the low counter is programmed to count
498 * the event of interest and the high counter is programmed
499 * with CHAIN event code with filters set to count at all ELs.
501 if (armv8pmu_event_is_chained(event
)) {
502 u32 chain_evt
= ARMV8_PMUV3_PERFCTR_CHAIN
|
503 ARMV8_PMU_INCLUDE_EL2
;
505 armv8pmu_write_evtype(idx
- 1, hwc
->config_base
);
506 armv8pmu_write_evtype(idx
, chain_evt
);
508 armv8pmu_write_evtype(idx
, hwc
->config_base
);
512 static inline int armv8pmu_enable_counter(int idx
)
514 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
515 write_sysreg(BIT(counter
), pmcntenset_el0
);
519 static inline void armv8pmu_enable_event_counter(struct perf_event
*event
)
521 struct perf_event_attr
*attr
= &event
->attr
;
522 int idx
= event
->hw
.idx
;
523 u32 counter_bits
= BIT(ARMV8_IDX_TO_COUNTER(idx
));
525 if (armv8pmu_event_is_chained(event
))
526 counter_bits
|= BIT(ARMV8_IDX_TO_COUNTER(idx
- 1));
528 kvm_set_pmu_events(counter_bits
, attr
);
530 /* We rely on the hypervisor switch code to enable guest counters */
531 if (!kvm_pmu_counter_deferred(attr
)) {
532 armv8pmu_enable_counter(idx
);
533 if (armv8pmu_event_is_chained(event
))
534 armv8pmu_enable_counter(idx
- 1);
538 static inline int armv8pmu_disable_counter(int idx
)
540 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
541 write_sysreg(BIT(counter
), pmcntenclr_el0
);
545 static inline void armv8pmu_disable_event_counter(struct perf_event
*event
)
547 struct hw_perf_event
*hwc
= &event
->hw
;
548 struct perf_event_attr
*attr
= &event
->attr
;
550 u32 counter_bits
= BIT(ARMV8_IDX_TO_COUNTER(idx
));
552 if (armv8pmu_event_is_chained(event
))
553 counter_bits
|= BIT(ARMV8_IDX_TO_COUNTER(idx
- 1));
555 kvm_clr_pmu_events(counter_bits
);
557 /* We rely on the hypervisor switch code to disable guest counters */
558 if (!kvm_pmu_counter_deferred(attr
)) {
559 if (armv8pmu_event_is_chained(event
))
560 armv8pmu_disable_counter(idx
- 1);
561 armv8pmu_disable_counter(idx
);
565 static inline int armv8pmu_enable_intens(int idx
)
567 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
568 write_sysreg(BIT(counter
), pmintenset_el1
);
572 static inline int armv8pmu_enable_event_irq(struct perf_event
*event
)
574 return armv8pmu_enable_intens(event
->hw
.idx
);
577 static inline int armv8pmu_disable_intens(int idx
)
579 u32 counter
= ARMV8_IDX_TO_COUNTER(idx
);
580 write_sysreg(BIT(counter
), pmintenclr_el1
);
582 /* Clear the overflow flag in case an interrupt is pending. */
583 write_sysreg(BIT(counter
), pmovsclr_el0
);
589 static inline int armv8pmu_disable_event_irq(struct perf_event
*event
)
591 return armv8pmu_disable_intens(event
->hw
.idx
);
594 static inline u32
armv8pmu_getreset_flags(void)
599 value
= read_sysreg(pmovsclr_el0
);
601 /* Write to clear flags */
602 value
&= ARMV8_PMU_OVSR_MASK
;
603 write_sysreg(value
, pmovsclr_el0
);
608 static void armv8pmu_enable_event(struct perf_event
*event
)
611 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
612 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
615 * Enable counter and interrupt, and set the counter to count
616 * the event that we're interested in.
618 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
623 armv8pmu_disable_event_counter(event
);
626 * Set event (if destined for PMNx counters).
628 armv8pmu_write_event_type(event
);
631 * Enable interrupt for this counter
633 armv8pmu_enable_event_irq(event
);
638 armv8pmu_enable_event_counter(event
);
640 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
643 static void armv8pmu_disable_event(struct perf_event
*event
)
646 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
647 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
650 * Disable counter and interrupt
652 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
657 armv8pmu_disable_event_counter(event
);
660 * Disable interrupt for this counter
662 armv8pmu_disable_event_irq(event
);
664 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
667 static void armv8pmu_start(struct arm_pmu
*cpu_pmu
)
670 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
672 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
673 /* Enable all counters */
674 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E
);
675 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
678 static void armv8pmu_stop(struct arm_pmu
*cpu_pmu
)
681 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
683 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
684 /* Disable all counters */
685 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E
);
686 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
689 static irqreturn_t
armv8pmu_handle_irq(struct arm_pmu
*cpu_pmu
)
692 struct perf_sample_data data
;
693 struct pmu_hw_events
*cpuc
= this_cpu_ptr(cpu_pmu
->hw_events
);
694 struct pt_regs
*regs
;
698 * Get and reset the IRQ flags
700 pmovsr
= armv8pmu_getreset_flags();
703 * Did an overflow occur?
705 if (!armv8pmu_has_overflowed(pmovsr
))
709 * Handle the counter(s) overflow(s)
711 regs
= get_irq_regs();
714 * Stop the PMU while processing the counter overflows
715 * to prevent skews in group events.
717 armv8pmu_stop(cpu_pmu
);
718 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
719 struct perf_event
*event
= cpuc
->events
[idx
];
720 struct hw_perf_event
*hwc
;
722 /* Ignore if we don't have an event. */
727 * We have a single interrupt for all counters. Check that
728 * each counter has overflowed before we process it.
730 if (!armv8pmu_counter_has_overflowed(pmovsr
, idx
))
734 armpmu_event_update(event
);
735 perf_sample_data_init(&data
, 0, hwc
->last_period
);
736 if (!armpmu_event_set_period(event
))
739 if (perf_event_overflow(event
, &data
, regs
))
740 cpu_pmu
->disable(event
);
742 armv8pmu_start(cpu_pmu
);
745 * Handle the pending perf events.
747 * Note: this call *must* be run with interrupts disabled. For
748 * platforms that can have the PMU interrupts raised as an NMI, this
756 static int armv8pmu_get_single_idx(struct pmu_hw_events
*cpuc
,
757 struct arm_pmu
*cpu_pmu
)
761 for (idx
= ARMV8_IDX_COUNTER0
; idx
< cpu_pmu
->num_events
; idx
++) {
762 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
768 static int armv8pmu_get_chain_idx(struct pmu_hw_events
*cpuc
,
769 struct arm_pmu
*cpu_pmu
)
774 * Chaining requires two consecutive event counters, where
775 * the lower idx must be even.
777 for (idx
= ARMV8_IDX_COUNTER0
+ 1; idx
< cpu_pmu
->num_events
; idx
+= 2) {
778 if (!test_and_set_bit(idx
, cpuc
->used_mask
)) {
779 /* Check if the preceding even counter is available */
780 if (!test_and_set_bit(idx
- 1, cpuc
->used_mask
))
782 /* Release the Odd counter */
783 clear_bit(idx
, cpuc
->used_mask
);
789 static int armv8pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
790 struct perf_event
*event
)
792 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
793 struct hw_perf_event
*hwc
= &event
->hw
;
794 unsigned long evtype
= hwc
->config_base
& ARMV8_PMU_EVTYPE_EVENT
;
796 /* Always prefer to place a cycle counter into the cycle counter. */
797 if (evtype
== ARMV8_PMUV3_PERFCTR_CPU_CYCLES
) {
798 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER
, cpuc
->used_mask
))
799 return ARMV8_IDX_CYCLE_COUNTER
;
803 * Otherwise use events counters
805 if (armv8pmu_event_is_64bit(event
))
806 return armv8pmu_get_chain_idx(cpuc
, cpu_pmu
);
808 return armv8pmu_get_single_idx(cpuc
, cpu_pmu
);
811 static void armv8pmu_clear_event_idx(struct pmu_hw_events
*cpuc
,
812 struct perf_event
*event
)
814 int idx
= event
->hw
.idx
;
816 clear_bit(idx
, cpuc
->used_mask
);
817 if (armv8pmu_event_is_chained(event
))
818 clear_bit(idx
- 1, cpuc
->used_mask
);
822 * Add an event filter to a given event.
824 static int armv8pmu_set_event_filter(struct hw_perf_event
*event
,
825 struct perf_event_attr
*attr
)
827 unsigned long config_base
= 0;
829 if (attr
->exclude_idle
)
833 * If we're running in hyp mode, then we *are* the hypervisor.
834 * Therefore we ignore exclude_hv in this configuration, since
835 * there's no hypervisor to sample anyway. This is consistent
836 * with other architectures (x86 and Power).
838 if (is_kernel_in_hyp_mode()) {
839 if (!attr
->exclude_kernel
&& !attr
->exclude_host
)
840 config_base
|= ARMV8_PMU_INCLUDE_EL2
;
841 if (attr
->exclude_guest
)
842 config_base
|= ARMV8_PMU_EXCLUDE_EL1
;
843 if (attr
->exclude_host
)
844 config_base
|= ARMV8_PMU_EXCLUDE_EL0
;
846 if (!attr
->exclude_hv
&& !attr
->exclude_host
)
847 config_base
|= ARMV8_PMU_INCLUDE_EL2
;
851 * Filter out !VHE kernels and guest kernels
853 if (attr
->exclude_kernel
)
854 config_base
|= ARMV8_PMU_EXCLUDE_EL1
;
856 if (attr
->exclude_user
)
857 config_base
|= ARMV8_PMU_EXCLUDE_EL0
;
860 * Install the filter into config_base as this is used to
861 * construct the event type.
863 event
->config_base
= config_base
;
868 static int armv8pmu_filter_match(struct perf_event
*event
)
870 unsigned long evtype
= event
->hw
.config_base
& ARMV8_PMU_EVTYPE_EVENT
;
871 return evtype
!= ARMV8_PMUV3_PERFCTR_CHAIN
;
874 static void armv8pmu_reset(void *info
)
876 struct arm_pmu
*cpu_pmu
= (struct arm_pmu
*)info
;
877 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
879 /* The counter and interrupt enable registers are unknown at reset. */
880 for (idx
= ARMV8_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
) {
881 armv8pmu_disable_counter(idx
);
882 armv8pmu_disable_intens(idx
);
885 /* Clear the counters we flip at guest entry/exit */
886 kvm_clr_pmu_events(U32_MAX
);
889 * Initialize & Reset PMNC. Request overflow interrupt for
890 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
892 armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
|
896 static int __armv8_pmuv3_map_event(struct perf_event
*event
,
897 const unsigned (*extra_event_map
)
899 const unsigned (*extra_cache_map
)
900 [PERF_COUNT_HW_CACHE_MAX
]
901 [PERF_COUNT_HW_CACHE_OP_MAX
]
902 [PERF_COUNT_HW_CACHE_RESULT_MAX
])
905 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
907 hw_event_id
= armpmu_map_event(event
, &armv8_pmuv3_perf_map
,
908 &armv8_pmuv3_perf_cache_map
,
909 ARMV8_PMU_EVTYPE_EVENT
);
911 if (armv8pmu_event_is_64bit(event
))
912 event
->hw
.flags
|= ARMPMU_EVT_64BIT
;
914 /* Only expose micro/arch events supported by this PMU */
915 if ((hw_event_id
> 0) && (hw_event_id
< ARMV8_PMUV3_MAX_COMMON_EVENTS
)
916 && test_bit(hw_event_id
, armpmu
->pmceid_bitmap
)) {
920 return armpmu_map_event(event
, extra_event_map
, extra_cache_map
,
921 ARMV8_PMU_EVTYPE_EVENT
);
924 static int armv8_pmuv3_map_event(struct perf_event
*event
)
926 return __armv8_pmuv3_map_event(event
, NULL
, NULL
);
929 static int armv8_a53_map_event(struct perf_event
*event
)
931 return __armv8_pmuv3_map_event(event
, NULL
, &armv8_a53_perf_cache_map
);
934 static int armv8_a57_map_event(struct perf_event
*event
)
936 return __armv8_pmuv3_map_event(event
, NULL
, &armv8_a57_perf_cache_map
);
939 static int armv8_a73_map_event(struct perf_event
*event
)
941 return __armv8_pmuv3_map_event(event
, NULL
, &armv8_a73_perf_cache_map
);
944 static int armv8_thunder_map_event(struct perf_event
*event
)
946 return __armv8_pmuv3_map_event(event
, NULL
,
947 &armv8_thunder_perf_cache_map
);
950 static int armv8_vulcan_map_event(struct perf_event
*event
)
952 return __armv8_pmuv3_map_event(event
, NULL
,
953 &armv8_vulcan_perf_cache_map
);
956 struct armv8pmu_probe_info
{
961 static void __armv8pmu_probe_pmu(void *info
)
963 struct armv8pmu_probe_info
*probe
= info
;
964 struct arm_pmu
*cpu_pmu
= probe
->pmu
;
970 dfr0
= read_sysreg(id_aa64dfr0_el1
);
971 pmuver
= cpuid_feature_extract_unsigned_field(dfr0
,
972 ID_AA64DFR0_PMUVER_SHIFT
);
973 if (pmuver
== 0xf || pmuver
== 0)
976 probe
->present
= true;
978 /* Read the nb of CNTx counters supported from PMNC */
979 cpu_pmu
->num_events
= (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT
)
980 & ARMV8_PMU_PMCR_N_MASK
;
982 /* Add the CPU cycles counter */
983 cpu_pmu
->num_events
+= 1;
985 pmceid
[0] = pmceid_raw
[0] = read_sysreg(pmceid0_el0
);
986 pmceid
[1] = pmceid_raw
[1] = read_sysreg(pmceid1_el0
);
988 bitmap_from_arr32(cpu_pmu
->pmceid_bitmap
,
989 pmceid
, ARMV8_PMUV3_MAX_COMMON_EVENTS
);
991 pmceid
[0] = pmceid_raw
[0] >> 32;
992 pmceid
[1] = pmceid_raw
[1] >> 32;
994 bitmap_from_arr32(cpu_pmu
->pmceid_ext_bitmap
,
995 pmceid
, ARMV8_PMUV3_MAX_COMMON_EVENTS
);
998 static int armv8pmu_probe_pmu(struct arm_pmu
*cpu_pmu
)
1000 struct armv8pmu_probe_info probe
= {
1006 ret
= smp_call_function_any(&cpu_pmu
->supported_cpus
,
1007 __armv8pmu_probe_pmu
,
1012 return probe
.present
? 0 : -ENODEV
;
1015 static int armv8_pmu_init(struct arm_pmu
*cpu_pmu
)
1017 int ret
= armv8pmu_probe_pmu(cpu_pmu
);
1021 cpu_pmu
->handle_irq
= armv8pmu_handle_irq
;
1022 cpu_pmu
->enable
= armv8pmu_enable_event
;
1023 cpu_pmu
->disable
= armv8pmu_disable_event
;
1024 cpu_pmu
->read_counter
= armv8pmu_read_counter
;
1025 cpu_pmu
->write_counter
= armv8pmu_write_counter
;
1026 cpu_pmu
->get_event_idx
= armv8pmu_get_event_idx
;
1027 cpu_pmu
->clear_event_idx
= armv8pmu_clear_event_idx
;
1028 cpu_pmu
->start
= armv8pmu_start
;
1029 cpu_pmu
->stop
= armv8pmu_stop
;
1030 cpu_pmu
->reset
= armv8pmu_reset
;
1031 cpu_pmu
->set_event_filter
= armv8pmu_set_event_filter
;
1032 cpu_pmu
->filter_match
= armv8pmu_filter_match
;
1037 static int armv8_pmuv3_init(struct arm_pmu
*cpu_pmu
)
1039 int ret
= armv8_pmu_init(cpu_pmu
);
1043 cpu_pmu
->name
= "armv8_pmuv3";
1044 cpu_pmu
->map_event
= armv8_pmuv3_map_event
;
1045 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1046 &armv8_pmuv3_events_attr_group
;
1047 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1048 &armv8_pmuv3_format_attr_group
;
1053 static int armv8_a35_pmu_init(struct arm_pmu
*cpu_pmu
)
1055 int ret
= armv8_pmu_init(cpu_pmu
);
1059 cpu_pmu
->name
= "armv8_cortex_a35";
1060 cpu_pmu
->map_event
= armv8_a53_map_event
;
1061 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1062 &armv8_pmuv3_events_attr_group
;
1063 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1064 &armv8_pmuv3_format_attr_group
;
1069 static int armv8_a53_pmu_init(struct arm_pmu
*cpu_pmu
)
1071 int ret
= armv8_pmu_init(cpu_pmu
);
1075 cpu_pmu
->name
= "armv8_cortex_a53";
1076 cpu_pmu
->map_event
= armv8_a53_map_event
;
1077 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1078 &armv8_pmuv3_events_attr_group
;
1079 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1080 &armv8_pmuv3_format_attr_group
;
1085 static int armv8_a57_pmu_init(struct arm_pmu
*cpu_pmu
)
1087 int ret
= armv8_pmu_init(cpu_pmu
);
1091 cpu_pmu
->name
= "armv8_cortex_a57";
1092 cpu_pmu
->map_event
= armv8_a57_map_event
;
1093 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1094 &armv8_pmuv3_events_attr_group
;
1095 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1096 &armv8_pmuv3_format_attr_group
;
1101 static int armv8_a72_pmu_init(struct arm_pmu
*cpu_pmu
)
1103 int ret
= armv8_pmu_init(cpu_pmu
);
1107 cpu_pmu
->name
= "armv8_cortex_a72";
1108 cpu_pmu
->map_event
= armv8_a57_map_event
;
1109 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1110 &armv8_pmuv3_events_attr_group
;
1111 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1112 &armv8_pmuv3_format_attr_group
;
1117 static int armv8_a73_pmu_init(struct arm_pmu
*cpu_pmu
)
1119 int ret
= armv8_pmu_init(cpu_pmu
);
1123 cpu_pmu
->name
= "armv8_cortex_a73";
1124 cpu_pmu
->map_event
= armv8_a73_map_event
;
1125 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1126 &armv8_pmuv3_events_attr_group
;
1127 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1128 &armv8_pmuv3_format_attr_group
;
1133 static int armv8_thunder_pmu_init(struct arm_pmu
*cpu_pmu
)
1135 int ret
= armv8_pmu_init(cpu_pmu
);
1139 cpu_pmu
->name
= "armv8_cavium_thunder";
1140 cpu_pmu
->map_event
= armv8_thunder_map_event
;
1141 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1142 &armv8_pmuv3_events_attr_group
;
1143 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1144 &armv8_pmuv3_format_attr_group
;
1149 static int armv8_vulcan_pmu_init(struct arm_pmu
*cpu_pmu
)
1151 int ret
= armv8_pmu_init(cpu_pmu
);
1155 cpu_pmu
->name
= "armv8_brcm_vulcan";
1156 cpu_pmu
->map_event
= armv8_vulcan_map_event
;
1157 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] =
1158 &armv8_pmuv3_events_attr_group
;
1159 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] =
1160 &armv8_pmuv3_format_attr_group
;
1165 static const struct of_device_id armv8_pmu_of_device_ids
[] = {
1166 {.compatible
= "arm,armv8-pmuv3", .data
= armv8_pmuv3_init
},
1167 {.compatible
= "arm,cortex-a35-pmu", .data
= armv8_a35_pmu_init
},
1168 {.compatible
= "arm,cortex-a53-pmu", .data
= armv8_a53_pmu_init
},
1169 {.compatible
= "arm,cortex-a57-pmu", .data
= armv8_a57_pmu_init
},
1170 {.compatible
= "arm,cortex-a72-pmu", .data
= armv8_a72_pmu_init
},
1171 {.compatible
= "arm,cortex-a73-pmu", .data
= armv8_a73_pmu_init
},
1172 {.compatible
= "cavium,thunder-pmu", .data
= armv8_thunder_pmu_init
},
1173 {.compatible
= "brcm,vulcan-pmu", .data
= armv8_vulcan_pmu_init
},
1177 static int armv8_pmu_device_probe(struct platform_device
*pdev
)
1179 return arm_pmu_device_probe(pdev
, armv8_pmu_of_device_ids
, NULL
);
1182 static struct platform_driver armv8_pmu_driver
= {
1184 .name
= ARMV8_PMU_PDEV_NAME
,
1185 .of_match_table
= armv8_pmu_of_device_ids
,
1186 .suppress_bind_attrs
= true,
1188 .probe
= armv8_pmu_device_probe
,
1191 static int __init
armv8_pmu_driver_init(void)
1194 return platform_driver_register(&armv8_pmu_driver
);
1196 return arm_pmu_acpi_probe(armv8_pmuv3_init
);
1198 device_initcall(armv8_pmu_driver_init
)
1200 void arch_perf_update_userpage(struct perf_event
*event
,
1201 struct perf_event_mmap_page
*userpg
, u64 now
)
1207 * Internal timekeeping for enabled/running/stopped times
1208 * is always computed with the sched_clock.
1210 freq
= arch_timer_get_rate();
1211 userpg
->cap_user_time
= 1;
1213 clocks_calc_mult_shift(&userpg
->time_mult
, &shift
, freq
,
1216 * time_shift is not expected to be greater than 31 due to
1217 * the original published conversion algorithm shifting a
1218 * 32-bit value (now specifies a 64-bit value) - refer
1219 * perf_event_mmap_page documentation in perf_event.h.
1223 userpg
->time_mult
>>= 1;
1225 userpg
->time_shift
= (u16
)shift
;
1226 userpg
->time_offset
= -now
;