1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/nospec.h>
4 #include <asm/intel-family.h>
18 static bool test_aperfmperf(int idx
)
20 return boot_cpu_has(X86_FEATURE_APERFMPERF
);
23 static bool test_ptsc(int idx
)
25 return boot_cpu_has(X86_FEATURE_PTSC
);
28 static bool test_irperf(int idx
)
30 return boot_cpu_has(X86_FEATURE_IRPERF
);
33 static bool test_intel(int idx
)
35 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
||
36 boot_cpu_data
.x86
!= 6)
39 switch (boot_cpu_data
.x86_model
) {
40 case INTEL_FAM6_NEHALEM
:
41 case INTEL_FAM6_NEHALEM_G
:
42 case INTEL_FAM6_NEHALEM_EP
:
43 case INTEL_FAM6_NEHALEM_EX
:
45 case INTEL_FAM6_WESTMERE
:
46 case INTEL_FAM6_WESTMERE_EP
:
47 case INTEL_FAM6_WESTMERE_EX
:
49 case INTEL_FAM6_SANDYBRIDGE
:
50 case INTEL_FAM6_SANDYBRIDGE_X
:
52 case INTEL_FAM6_IVYBRIDGE
:
53 case INTEL_FAM6_IVYBRIDGE_X
:
55 case INTEL_FAM6_HASWELL_CORE
:
56 case INTEL_FAM6_HASWELL_X
:
57 case INTEL_FAM6_HASWELL_ULT
:
58 case INTEL_FAM6_HASWELL_GT3E
:
60 case INTEL_FAM6_BROADWELL_CORE
:
61 case INTEL_FAM6_BROADWELL_XEON_D
:
62 case INTEL_FAM6_BROADWELL_GT3E
:
63 case INTEL_FAM6_BROADWELL_X
:
65 case INTEL_FAM6_ATOM_SILVERMONT
:
66 case INTEL_FAM6_ATOM_SILVERMONT_X
:
67 case INTEL_FAM6_ATOM_AIRMONT
:
69 case INTEL_FAM6_ATOM_GOLDMONT
:
70 case INTEL_FAM6_ATOM_GOLDMONT_X
:
72 case INTEL_FAM6_ATOM_GOLDMONT_PLUS
:
74 case INTEL_FAM6_XEON_PHI_KNL
:
75 case INTEL_FAM6_XEON_PHI_KNM
:
76 if (idx
== PERF_MSR_SMI
)
80 case INTEL_FAM6_SKYLAKE_MOBILE
:
81 case INTEL_FAM6_SKYLAKE_DESKTOP
:
82 case INTEL_FAM6_SKYLAKE_X
:
83 case INTEL_FAM6_KABYLAKE_MOBILE
:
84 case INTEL_FAM6_KABYLAKE_DESKTOP
:
85 if (idx
== PERF_MSR_SMI
|| idx
== PERF_MSR_PPERF
)
95 struct perf_pmu_events_attr
*attr
;
96 bool (*test
)(int idx
);
99 PMU_EVENT_ATTR_STRING(tsc
, evattr_tsc
, "event=0x00");
100 PMU_EVENT_ATTR_STRING(aperf
, evattr_aperf
, "event=0x01");
101 PMU_EVENT_ATTR_STRING(mperf
, evattr_mperf
, "event=0x02");
102 PMU_EVENT_ATTR_STRING(pperf
, evattr_pperf
, "event=0x03");
103 PMU_EVENT_ATTR_STRING(smi
, evattr_smi
, "event=0x04");
104 PMU_EVENT_ATTR_STRING(ptsc
, evattr_ptsc
, "event=0x05");
105 PMU_EVENT_ATTR_STRING(irperf
, evattr_irperf
, "event=0x06");
107 static struct perf_msr msr
[] = {
108 [PERF_MSR_TSC
] = { 0, &evattr_tsc
, NULL
, },
109 [PERF_MSR_APERF
] = { MSR_IA32_APERF
, &evattr_aperf
, test_aperfmperf
, },
110 [PERF_MSR_MPERF
] = { MSR_IA32_MPERF
, &evattr_mperf
, test_aperfmperf
, },
111 [PERF_MSR_PPERF
] = { MSR_PPERF
, &evattr_pperf
, test_intel
, },
112 [PERF_MSR_SMI
] = { MSR_SMI_COUNT
, &evattr_smi
, test_intel
, },
113 [PERF_MSR_PTSC
] = { MSR_F15H_PTSC
, &evattr_ptsc
, test_ptsc
, },
114 [PERF_MSR_IRPERF
] = { MSR_F17H_IRPERF
, &evattr_irperf
, test_irperf
, },
117 static struct attribute
*events_attrs
[PERF_MSR_EVENT_MAX
+ 1] = {
121 static struct attribute_group events_attr_group
= {
123 .attrs
= events_attrs
,
126 PMU_FORMAT_ATTR(event
, "config:0-63");
127 static struct attribute
*format_attrs
[] = {
128 &format_attr_event
.attr
,
131 static struct attribute_group format_attr_group
= {
133 .attrs
= format_attrs
,
136 static const struct attribute_group
*attr_groups
[] = {
142 static int msr_event_init(struct perf_event
*event
)
144 u64 cfg
= event
->attr
.config
;
146 if (event
->attr
.type
!= event
->pmu
->type
)
149 /* unsupported modes and filters */
150 if (event
->attr
.exclude_user
||
151 event
->attr
.exclude_kernel
||
152 event
->attr
.exclude_hv
||
153 event
->attr
.exclude_idle
||
154 event
->attr
.exclude_host
||
155 event
->attr
.exclude_guest
||
156 event
->attr
.sample_period
) /* no sampling */
159 if (cfg
>= PERF_MSR_EVENT_MAX
)
162 cfg
= array_index_nospec((unsigned long)cfg
, PERF_MSR_EVENT_MAX
);
168 event
->hw
.event_base
= msr
[cfg
].msr
;
169 event
->hw
.config
= cfg
;
174 static inline u64
msr_read_counter(struct perf_event
*event
)
178 if (event
->hw
.event_base
)
179 rdmsrl(event
->hw
.event_base
, now
);
185 static void msr_event_update(struct perf_event
*event
)
190 /* Careful, an NMI might modify the previous event value. */
192 prev
= local64_read(&event
->hw
.prev_count
);
193 now
= msr_read_counter(event
);
195 if (local64_cmpxchg(&event
->hw
.prev_count
, prev
, now
) != prev
)
199 if (unlikely(event
->hw
.event_base
== MSR_SMI_COUNT
))
200 delta
= sign_extend64(delta
, 31);
202 local64_add(delta
, &event
->count
);
205 static void msr_event_start(struct perf_event
*event
, int flags
)
209 now
= msr_read_counter(event
);
210 local64_set(&event
->hw
.prev_count
, now
);
213 static void msr_event_stop(struct perf_event
*event
, int flags
)
215 msr_event_update(event
);
218 static void msr_event_del(struct perf_event
*event
, int flags
)
220 msr_event_stop(event
, PERF_EF_UPDATE
);
223 static int msr_event_add(struct perf_event
*event
, int flags
)
225 if (flags
& PERF_EF_START
)
226 msr_event_start(event
, flags
);
231 static struct pmu pmu_msr
= {
232 .task_ctx_nr
= perf_sw_context
,
233 .attr_groups
= attr_groups
,
234 .event_init
= msr_event_init
,
235 .add
= msr_event_add
,
236 .del
= msr_event_del
,
237 .start
= msr_event_start
,
238 .stop
= msr_event_stop
,
239 .read
= msr_event_update
,
240 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
243 static int __init
msr_init(void)
247 if (!boot_cpu_has(X86_FEATURE_TSC
)) {
248 pr_cont("no MSR PMU driver.\n");
252 /* Probe the MSRs. */
253 for (i
= PERF_MSR_TSC
+ 1; i
< PERF_MSR_EVENT_MAX
; i
++) {
257 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
259 if (!msr
[i
].test(i
) || rdmsrl_safe(msr
[i
].msr
, &val
))
263 /* List remaining MSRs in the sysfs attrs. */
264 for (i
= 0; i
< PERF_MSR_EVENT_MAX
; i
++) {
266 events_attrs
[j
++] = &msr
[i
].attr
->attr
.attr
;
268 events_attrs
[j
] = NULL
;
270 perf_pmu_register(&pmu_msr
, "msr", -1);
274 device_initcall(msr_init
);