1 // SPDX-License-Identifier: GPL-2.0
3 * CPU PMU driver for the Apple M1 and derivatives
5 * Copyright (C) 2021 Google LLC
7 * Author: Marc Zyngier <maz@kernel.org>
9 * Most of the information used in this driver was provided by the
10 * Asahi Linux project. The rest was experimentally discovered.
14 #include <linux/perf/arm_pmu.h>
15 #include <linux/platform_device.h>
17 #include <asm/apple_m1_pmu.h>
18 #include <asm/irq_regs.h>
19 #include <asm/perf_event.h>
21 #define M1_PMU_NR_COUNTERS 10
23 #define M1_PMU_CFG_EVENT GENMASK(7, 0)
25 #define ANY_BUT_0_1 GENMASK(9, 2)
26 #define ONLY_2_TO_7 GENMASK(7, 2)
27 #define ONLY_2_4_6 (BIT(2) | BIT(4) | BIT(6))
28 #define ONLY_5_6_7 (BIT(5) | BIT(6) | BIT(7))
31 * Description of the events we actually know about, as well as those with
32 * a specific counter affinity. Yes, this is a grand total of two known
33 * counters, and the rest is anybody's guess.
35 * Not all counters can count all events. Counters #0 and #1 are wired to
36 * count cycles and instructions respectively, and some events have
37 * bizarre mappings (every other counter, or even *one* counter). These
38 * restrictions equally apply to both P and E cores.
40 * It is worth noting that the PMUs attached to P and E cores are likely
41 * to be different because the underlying uarches are different. At the
42 * moment, we don't really need to distinguish between the two because we
43 * know next to nothing about the events themselves, and we already have
44 * per cpu-type PMU abstractions.
46 * If we eventually find out that the events are different across
47 * implementations, we'll have to introduce per cpu-type tables.
50 M1_PMU_PERFCTR_RETIRE_UOP
= 0x1,
51 M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE
= 0x2,
52 M1_PMU_PERFCTR_L1I_TLB_FILL
= 0x4,
53 M1_PMU_PERFCTR_L1D_TLB_FILL
= 0x5,
54 M1_PMU_PERFCTR_MMU_TABLE_WALK_INSTRUCTION
= 0x7,
55 M1_PMU_PERFCTR_MMU_TABLE_WALK_DATA
= 0x8,
56 M1_PMU_PERFCTR_L2_TLB_MISS_INSTRUCTION
= 0xa,
57 M1_PMU_PERFCTR_L2_TLB_MISS_DATA
= 0xb,
58 M1_PMU_PERFCTR_MMU_VIRTUAL_MEMORY_FAULT_NONSPEC
= 0xd,
59 M1_PMU_PERFCTR_SCHEDULE_UOP
= 0x52,
60 M1_PMU_PERFCTR_INTERRUPT_PENDING
= 0x6c,
61 M1_PMU_PERFCTR_MAP_STALL_DISPATCH
= 0x70,
62 M1_PMU_PERFCTR_MAP_REWIND
= 0x75,
63 M1_PMU_PERFCTR_MAP_STALL
= 0x76,
64 M1_PMU_PERFCTR_MAP_INT_UOP
= 0x7c,
65 M1_PMU_PERFCTR_MAP_LDST_UOP
= 0x7d,
66 M1_PMU_PERFCTR_MAP_SIMD_UOP
= 0x7e,
67 M1_PMU_PERFCTR_FLUSH_RESTART_OTHER_NONSPEC
= 0x84,
68 M1_PMU_PERFCTR_INST_ALL
= 0x8c,
69 M1_PMU_PERFCTR_INST_BRANCH
= 0x8d,
70 M1_PMU_PERFCTR_INST_BRANCH_CALL
= 0x8e,
71 M1_PMU_PERFCTR_INST_BRANCH_RET
= 0x8f,
72 M1_PMU_PERFCTR_INST_BRANCH_TAKEN
= 0x90,
73 M1_PMU_PERFCTR_INST_BRANCH_INDIR
= 0x93,
74 M1_PMU_PERFCTR_INST_BRANCH_COND
= 0x94,
75 M1_PMU_PERFCTR_INST_INT_LD
= 0x95,
76 M1_PMU_PERFCTR_INST_INT_ST
= 0x96,
77 M1_PMU_PERFCTR_INST_INT_ALU
= 0x97,
78 M1_PMU_PERFCTR_INST_SIMD_LD
= 0x98,
79 M1_PMU_PERFCTR_INST_SIMD_ST
= 0x99,
80 M1_PMU_PERFCTR_INST_SIMD_ALU
= 0x9a,
81 M1_PMU_PERFCTR_INST_LDST
= 0x9b,
82 M1_PMU_PERFCTR_INST_BARRIER
= 0x9c,
83 M1_PMU_PERFCTR_UNKNOWN_9f
= 0x9f,
84 M1_PMU_PERFCTR_L1D_TLB_ACCESS
= 0xa0,
85 M1_PMU_PERFCTR_L1D_TLB_MISS
= 0xa1,
86 M1_PMU_PERFCTR_L1D_CACHE_MISS_ST
= 0xa2,
87 M1_PMU_PERFCTR_L1D_CACHE_MISS_LD
= 0xa3,
88 M1_PMU_PERFCTR_LD_UNIT_UOP
= 0xa6,
89 M1_PMU_PERFCTR_ST_UNIT_UOP
= 0xa7,
90 M1_PMU_PERFCTR_L1D_CACHE_WRITEBACK
= 0xa8,
91 M1_PMU_PERFCTR_LDST_X64_UOP
= 0xb1,
92 M1_PMU_PERFCTR_LDST_XPG_UOP
= 0xb2,
93 M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_SUCC
= 0xb3,
94 M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_FAIL
= 0xb4,
95 M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC
= 0xbf,
96 M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC
= 0xc0,
97 M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC
= 0xc1,
98 M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC
= 0xc4,
99 M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC
= 0xc5,
100 M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC
= 0xc6,
101 M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC
= 0xc8,
102 M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC
= 0xca,
103 M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC
= 0xcb,
104 M1_PMU_PERFCTR_L1I_TLB_MISS_DEMAND
= 0xd4,
105 M1_PMU_PERFCTR_MAP_DISPATCH_BUBBLE
= 0xd6,
106 M1_PMU_PERFCTR_L1I_CACHE_MISS_DEMAND
= 0xdb,
107 M1_PMU_PERFCTR_FETCH_RESTART
= 0xde,
108 M1_PMU_PERFCTR_ST_NT_UOP
= 0xe5,
109 M1_PMU_PERFCTR_LD_NT_UOP
= 0xe6,
110 M1_PMU_PERFCTR_UNKNOWN_f5
= 0xf5,
111 M1_PMU_PERFCTR_UNKNOWN_f6
= 0xf6,
112 M1_PMU_PERFCTR_UNKNOWN_f7
= 0xf7,
113 M1_PMU_PERFCTR_UNKNOWN_f8
= 0xf8,
114 M1_PMU_PERFCTR_UNKNOWN_fd
= 0xfd,
115 M1_PMU_PERFCTR_LAST
= M1_PMU_CFG_EVENT
,
118 * From this point onwards, these are not actual HW events,
119 * but attributes that get stored in hw->config_base.
121 M1_PMU_CFG_COUNT_USER
= BIT(8),
122 M1_PMU_CFG_COUNT_KERNEL
= BIT(9),
126 * Per-event affinity table. Most events can be installed on counter
127 * 2-9, but there are a number of exceptions. Note that this table
128 * has been created experimentally, and I wouldn't be surprised if more
129 * counters had strange affinities.
131 static const u16 m1_pmu_event_affinity
[M1_PMU_PERFCTR_LAST
+ 1] = {
132 [0 ... M1_PMU_PERFCTR_LAST
] = ANY_BUT_0_1
,
133 [M1_PMU_PERFCTR_RETIRE_UOP
] = BIT(7),
134 [M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE
] = ANY_BUT_0_1
| BIT(0),
135 [M1_PMU_PERFCTR_INST_ALL
] = BIT(7) | BIT(1),
136 [M1_PMU_PERFCTR_INST_BRANCH
] = ONLY_5_6_7
,
137 [M1_PMU_PERFCTR_INST_BRANCH_CALL
] = ONLY_5_6_7
,
138 [M1_PMU_PERFCTR_INST_BRANCH_RET
] = ONLY_5_6_7
,
139 [M1_PMU_PERFCTR_INST_BRANCH_TAKEN
] = ONLY_5_6_7
,
140 [M1_PMU_PERFCTR_INST_BRANCH_INDIR
] = ONLY_5_6_7
,
141 [M1_PMU_PERFCTR_INST_BRANCH_COND
] = ONLY_5_6_7
,
142 [M1_PMU_PERFCTR_INST_INT_LD
] = ONLY_5_6_7
,
143 [M1_PMU_PERFCTR_INST_INT_ST
] = BIT(7),
144 [M1_PMU_PERFCTR_INST_INT_ALU
] = BIT(7),
145 [M1_PMU_PERFCTR_INST_SIMD_LD
] = ONLY_5_6_7
,
146 [M1_PMU_PERFCTR_INST_SIMD_ST
] = ONLY_5_6_7
,
147 [M1_PMU_PERFCTR_INST_SIMD_ALU
] = BIT(7),
148 [M1_PMU_PERFCTR_INST_LDST
] = BIT(7),
149 [M1_PMU_PERFCTR_INST_BARRIER
] = ONLY_5_6_7
,
150 [M1_PMU_PERFCTR_UNKNOWN_9f
] = BIT(7),
151 [M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC
] = ONLY_5_6_7
,
152 [M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC
] = ONLY_5_6_7
,
153 [M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC
] = ONLY_5_6_7
,
154 [M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC
] = ONLY_5_6_7
,
155 [M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC
] = ONLY_5_6_7
,
156 [M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC
] = ONLY_5_6_7
,
157 [M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC
] = ONLY_5_6_7
,
158 [M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC
] = ONLY_5_6_7
,
159 [M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC
] = ONLY_5_6_7
,
160 [M1_PMU_PERFCTR_UNKNOWN_f5
] = ONLY_2_4_6
,
161 [M1_PMU_PERFCTR_UNKNOWN_f6
] = ONLY_2_4_6
,
162 [M1_PMU_PERFCTR_UNKNOWN_f7
] = ONLY_2_4_6
,
163 [M1_PMU_PERFCTR_UNKNOWN_f8
] = ONLY_2_TO_7
,
164 [M1_PMU_PERFCTR_UNKNOWN_fd
] = ONLY_2_4_6
,
167 static const unsigned m1_pmu_perf_map
[PERF_COUNT_HW_MAX
] = {
168 PERF_MAP_ALL_UNSUPPORTED
,
169 [PERF_COUNT_HW_CPU_CYCLES
] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE
,
170 [PERF_COUNT_HW_INSTRUCTIONS
] = M1_PMU_PERFCTR_INST_ALL
,
173 /* sysfs definitions */
174 static ssize_t
m1_pmu_events_sysfs_show(struct device
*dev
,
175 struct device_attribute
*attr
,
178 struct perf_pmu_events_attr
*pmu_attr
;
180 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
);
182 return sprintf(page
, "event=0x%04llx\n", pmu_attr
->id
);
185 #define M1_PMU_EVENT_ATTR(name, config) \
186 PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
188 static struct attribute
*m1_pmu_event_attrs
[] = {
189 M1_PMU_EVENT_ATTR(cycles
, M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE
),
190 M1_PMU_EVENT_ATTR(instructions
, M1_PMU_PERFCTR_INST_ALL
),
194 static const struct attribute_group m1_pmu_events_attr_group
= {
196 .attrs
= m1_pmu_event_attrs
,
199 PMU_FORMAT_ATTR(event
, "config:0-7");
201 static struct attribute
*m1_pmu_format_attrs
[] = {
202 &format_attr_event
.attr
,
206 static const struct attribute_group m1_pmu_format_attr_group
= {
208 .attrs
= m1_pmu_format_attrs
,
211 /* Low level accessors. No synchronisation. */
212 #define PMU_READ_COUNTER(_idx) \
213 case _idx: return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1)
215 #define PMU_WRITE_COUNTER(_val, _idx) \
217 write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1); \
220 static u64
m1_pmu_read_hw_counter(unsigned int index
)
238 static void m1_pmu_write_hw_counter(u64 val
, unsigned int index
)
241 PMU_WRITE_COUNTER(val
, 0);
242 PMU_WRITE_COUNTER(val
, 1);
243 PMU_WRITE_COUNTER(val
, 2);
244 PMU_WRITE_COUNTER(val
, 3);
245 PMU_WRITE_COUNTER(val
, 4);
246 PMU_WRITE_COUNTER(val
, 5);
247 PMU_WRITE_COUNTER(val
, 6);
248 PMU_WRITE_COUNTER(val
, 7);
249 PMU_WRITE_COUNTER(val
, 8);
250 PMU_WRITE_COUNTER(val
, 9);
256 #define get_bit_offset(index, mask) (__ffs(mask) + (index))
258 static void __m1_pmu_enable_counter(unsigned int index
, bool en
)
264 bit
= BIT(get_bit_offset(index
, PMCR0_CNT_ENABLE_0_7
));
267 bit
= BIT(get_bit_offset(index
- 8, PMCR0_CNT_ENABLE_8_9
));
273 val
= read_sysreg_s(SYS_IMP_APL_PMCR0_EL1
);
280 write_sysreg_s(val
, SYS_IMP_APL_PMCR0_EL1
);
283 static void m1_pmu_enable_counter(unsigned int index
)
285 __m1_pmu_enable_counter(index
, true);
288 static void m1_pmu_disable_counter(unsigned int index
)
290 __m1_pmu_enable_counter(index
, false);
293 static void __m1_pmu_enable_counter_interrupt(unsigned int index
, bool en
)
299 bit
= BIT(get_bit_offset(index
, PMCR0_PMI_ENABLE_0_7
));
302 bit
= BIT(get_bit_offset(index
- 8, PMCR0_PMI_ENABLE_8_9
));
308 val
= read_sysreg_s(SYS_IMP_APL_PMCR0_EL1
);
315 write_sysreg_s(val
, SYS_IMP_APL_PMCR0_EL1
);
318 static void m1_pmu_enable_counter_interrupt(unsigned int index
)
320 __m1_pmu_enable_counter_interrupt(index
, true);
323 static void m1_pmu_disable_counter_interrupt(unsigned int index
)
325 __m1_pmu_enable_counter_interrupt(index
, false);
328 static void m1_pmu_configure_counter(unsigned int index
, u8 event
,
329 bool user
, bool kernel
)
331 u64 val
, user_bit
, kernel_bit
;
336 user_bit
= BIT(get_bit_offset(index
, PMCR1_COUNT_A64_EL0_0_7
));
337 kernel_bit
= BIT(get_bit_offset(index
, PMCR1_COUNT_A64_EL1_0_7
));
340 user_bit
= BIT(get_bit_offset(index
- 8, PMCR1_COUNT_A64_EL0_8_9
));
341 kernel_bit
= BIT(get_bit_offset(index
- 8, PMCR1_COUNT_A64_EL1_8_9
));
347 val
= read_sysreg_s(SYS_IMP_APL_PMCR1_EL1
);
359 write_sysreg_s(val
, SYS_IMP_APL_PMCR1_EL1
);
362 * Counters 0 and 1 have fixed events. For anything else,
363 * place the event at the expected location in the relevant
364 * register (PMESR0 holds the event configuration for counters
365 * 2-5, resp. PMESR1 for counters 6-9).
371 shift
= (index
- 2) * 8;
372 val
= read_sysreg_s(SYS_IMP_APL_PMESR0_EL1
);
373 val
&= ~((u64
)0xff << shift
);
374 val
|= (u64
)event
<< shift
;
375 write_sysreg_s(val
, SYS_IMP_APL_PMESR0_EL1
);
378 shift
= (index
- 6) * 8;
379 val
= read_sysreg_s(SYS_IMP_APL_PMESR1_EL1
);
380 val
&= ~((u64
)0xff << shift
);
381 val
|= (u64
)event
<< shift
;
382 write_sysreg_s(val
, SYS_IMP_APL_PMESR1_EL1
);
387 /* arm_pmu backend */
388 static void m1_pmu_enable_event(struct perf_event
*event
)
393 evt
= event
->hw
.config_base
& M1_PMU_CFG_EVENT
;
394 user
= event
->hw
.config_base
& M1_PMU_CFG_COUNT_USER
;
395 kernel
= event
->hw
.config_base
& M1_PMU_CFG_COUNT_KERNEL
;
397 m1_pmu_disable_counter_interrupt(event
->hw
.idx
);
398 m1_pmu_disable_counter(event
->hw
.idx
);
401 m1_pmu_configure_counter(event
->hw
.idx
, evt
, user
, kernel
);
402 m1_pmu_enable_counter(event
->hw
.idx
);
403 m1_pmu_enable_counter_interrupt(event
->hw
.idx
);
407 static void m1_pmu_disable_event(struct perf_event
*event
)
409 m1_pmu_disable_counter_interrupt(event
->hw
.idx
);
410 m1_pmu_disable_counter(event
->hw
.idx
);
414 static irqreturn_t
m1_pmu_handle_irq(struct arm_pmu
*cpu_pmu
)
416 struct pmu_hw_events
*cpuc
= this_cpu_ptr(cpu_pmu
->hw_events
);
417 struct pt_regs
*regs
;
421 overflow
= read_sysreg_s(SYS_IMP_APL_PMSR_EL1
);
423 /* Spurious interrupt? */
424 state
= read_sysreg_s(SYS_IMP_APL_PMCR0_EL1
);
425 state
&= ~PMCR0_IACT
;
426 write_sysreg_s(state
, SYS_IMP_APL_PMCR0_EL1
);
431 cpu_pmu
->stop(cpu_pmu
);
433 regs
= get_irq_regs();
435 for_each_set_bit(idx
, cpu_pmu
->cntr_mask
, M1_PMU_NR_COUNTERS
) {
436 struct perf_event
*event
= cpuc
->events
[idx
];
437 struct perf_sample_data data
;
442 armpmu_event_update(event
);
443 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
444 if (!armpmu_event_set_period(event
))
447 if (perf_event_overflow(event
, &data
, regs
))
448 m1_pmu_disable_event(event
);
451 cpu_pmu
->start(cpu_pmu
);
456 static u64
m1_pmu_read_counter(struct perf_event
*event
)
458 return m1_pmu_read_hw_counter(event
->hw
.idx
);
461 static void m1_pmu_write_counter(struct perf_event
*event
, u64 value
)
463 m1_pmu_write_hw_counter(value
, event
->hw
.idx
);
467 static int m1_pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
468 struct perf_event
*event
)
470 unsigned long evtype
= event
->hw
.config_base
& M1_PMU_CFG_EVENT
;
471 unsigned long affinity
= m1_pmu_event_affinity
[evtype
];
475 * Place the event on the first free counter that can count
478 * We could do a better job if we had a view of all the events
479 * counting on the PMU at any given time, and by placing the
480 * most constraining events first.
482 for_each_set_bit(idx
, &affinity
, M1_PMU_NR_COUNTERS
) {
483 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
490 static void m1_pmu_clear_event_idx(struct pmu_hw_events
*cpuc
,
491 struct perf_event
*event
)
493 clear_bit(event
->hw
.idx
, cpuc
->used_mask
);
496 static void __m1_pmu_set_mode(u8 mode
)
500 val
= read_sysreg_s(SYS_IMP_APL_PMCR0_EL1
);
501 val
&= ~(PMCR0_IMODE
| PMCR0_IACT
);
502 val
|= FIELD_PREP(PMCR0_IMODE
, mode
);
503 write_sysreg_s(val
, SYS_IMP_APL_PMCR0_EL1
);
507 static void m1_pmu_start(struct arm_pmu
*cpu_pmu
)
509 __m1_pmu_set_mode(PMCR0_IMODE_FIQ
);
512 static void m1_pmu_stop(struct arm_pmu
*cpu_pmu
)
514 __m1_pmu_set_mode(PMCR0_IMODE_OFF
);
517 static int m1_pmu_map_event(struct perf_event
*event
)
520 * Although the counters are 48bit wide, bit 47 is what
521 * triggers the overflow interrupt. Advertise the counters
522 * being 47bit wide to mimick the behaviour of the ARM PMU.
524 event
->hw
.flags
|= ARMPMU_EVT_47BIT
;
525 return armpmu_map_event(event
, &m1_pmu_perf_map
, NULL
, M1_PMU_CFG_EVENT
);
528 static int m2_pmu_map_event(struct perf_event
*event
)
531 * Same deal as the above, except that M2 has 64bit counters.
532 * Which, as far as we're concerned, actually means 63 bits.
533 * Yes, this is getting awkward.
535 event
->hw
.flags
|= ARMPMU_EVT_63BIT
;
536 return armpmu_map_event(event
, &m1_pmu_perf_map
, NULL
, M1_PMU_CFG_EVENT
);
539 static void m1_pmu_reset(void *info
)
543 __m1_pmu_set_mode(PMCR0_IMODE_OFF
);
545 for (i
= 0; i
< M1_PMU_NR_COUNTERS
; i
++) {
546 m1_pmu_disable_counter(i
);
547 m1_pmu_disable_counter_interrupt(i
);
548 m1_pmu_write_hw_counter(0, i
);
554 static int m1_pmu_set_event_filter(struct hw_perf_event
*event
,
555 struct perf_event_attr
*attr
)
557 unsigned long config_base
= 0;
559 if (!attr
->exclude_guest
) {
560 pr_debug("ARM performance counters do not support mode exclusion\n");
563 if (!attr
->exclude_kernel
)
564 config_base
|= M1_PMU_CFG_COUNT_KERNEL
;
565 if (!attr
->exclude_user
)
566 config_base
|= M1_PMU_CFG_COUNT_USER
;
568 event
->config_base
= config_base
;
573 static int m1_pmu_init(struct arm_pmu
*cpu_pmu
, u32 flags
)
575 cpu_pmu
->handle_irq
= m1_pmu_handle_irq
;
576 cpu_pmu
->enable
= m1_pmu_enable_event
;
577 cpu_pmu
->disable
= m1_pmu_disable_event
;
578 cpu_pmu
->read_counter
= m1_pmu_read_counter
;
579 cpu_pmu
->write_counter
= m1_pmu_write_counter
;
580 cpu_pmu
->get_event_idx
= m1_pmu_get_event_idx
;
581 cpu_pmu
->clear_event_idx
= m1_pmu_clear_event_idx
;
582 cpu_pmu
->start
= m1_pmu_start
;
583 cpu_pmu
->stop
= m1_pmu_stop
;
585 if (flags
& ARMPMU_EVT_47BIT
)
586 cpu_pmu
->map_event
= m1_pmu_map_event
;
587 else if (flags
& ARMPMU_EVT_63BIT
)
588 cpu_pmu
->map_event
= m2_pmu_map_event
;
590 return WARN_ON(-EINVAL
);
592 cpu_pmu
->reset
= m1_pmu_reset
;
593 cpu_pmu
->set_event_filter
= m1_pmu_set_event_filter
;
595 bitmap_set(cpu_pmu
->cntr_mask
, 0, M1_PMU_NR_COUNTERS
);
596 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_EVENTS
] = &m1_pmu_events_attr_group
;
597 cpu_pmu
->attr_groups
[ARMPMU_ATTR_GROUP_FORMATS
] = &m1_pmu_format_attr_group
;
601 /* Device driver gunk */
602 static int m1_pmu_ice_init(struct arm_pmu
*cpu_pmu
)
604 cpu_pmu
->name
= "apple_icestorm_pmu";
605 return m1_pmu_init(cpu_pmu
, ARMPMU_EVT_47BIT
);
608 static int m1_pmu_fire_init(struct arm_pmu
*cpu_pmu
)
610 cpu_pmu
->name
= "apple_firestorm_pmu";
611 return m1_pmu_init(cpu_pmu
, ARMPMU_EVT_47BIT
);
614 static int m2_pmu_avalanche_init(struct arm_pmu
*cpu_pmu
)
616 cpu_pmu
->name
= "apple_avalanche_pmu";
617 return m1_pmu_init(cpu_pmu
, ARMPMU_EVT_63BIT
);
620 static int m2_pmu_blizzard_init(struct arm_pmu
*cpu_pmu
)
622 cpu_pmu
->name
= "apple_blizzard_pmu";
623 return m1_pmu_init(cpu_pmu
, ARMPMU_EVT_63BIT
);
626 static const struct of_device_id m1_pmu_of_device_ids
[] = {
627 { .compatible
= "apple,avalanche-pmu", .data
= m2_pmu_avalanche_init
, },
628 { .compatible
= "apple,blizzard-pmu", .data
= m2_pmu_blizzard_init
, },
629 { .compatible
= "apple,icestorm-pmu", .data
= m1_pmu_ice_init
, },
630 { .compatible
= "apple,firestorm-pmu", .data
= m1_pmu_fire_init
, },
633 MODULE_DEVICE_TABLE(of
, m1_pmu_of_device_ids
);
635 static int m1_pmu_device_probe(struct platform_device
*pdev
)
637 return arm_pmu_device_probe(pdev
, m1_pmu_of_device_ids
, NULL
);
640 static struct platform_driver m1_pmu_driver
= {
642 .name
= "apple-m1-cpu-pmu",
643 .of_match_table
= m1_pmu_of_device_ids
,
644 .suppress_bind_attrs
= true,
646 .probe
= m1_pmu_device_probe
,
649 module_platform_driver(m1_pmu_driver
);