1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <asm/intel-family.h>
14 PERF_MSR_THERM_SNAP
= 8,
15 PERF_MSR_THERM_UNIT
= 9,
19 static bool test_aperfmperf(int idx
)
21 return boot_cpu_has(X86_FEATURE_APERFMPERF
);
24 static bool test_ptsc(int idx
)
26 return boot_cpu_has(X86_FEATURE_PTSC
);
29 static bool test_irperf(int idx
)
31 return boot_cpu_has(X86_FEATURE_IRPERF
);
34 static bool test_therm_status(int idx
)
36 return boot_cpu_has(X86_FEATURE_DTHERM
);
39 static bool test_intel(int idx
)
41 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
||
42 boot_cpu_data
.x86
!= 6)
45 switch (boot_cpu_data
.x86_model
) {
46 case INTEL_FAM6_NEHALEM
:
47 case INTEL_FAM6_NEHALEM_G
:
48 case INTEL_FAM6_NEHALEM_EP
:
49 case INTEL_FAM6_NEHALEM_EX
:
51 case INTEL_FAM6_WESTMERE
:
52 case INTEL_FAM6_WESTMERE_EP
:
53 case INTEL_FAM6_WESTMERE_EX
:
55 case INTEL_FAM6_SANDYBRIDGE
:
56 case INTEL_FAM6_SANDYBRIDGE_X
:
58 case INTEL_FAM6_IVYBRIDGE
:
59 case INTEL_FAM6_IVYBRIDGE_X
:
61 case INTEL_FAM6_HASWELL_CORE
:
62 case INTEL_FAM6_HASWELL_X
:
63 case INTEL_FAM6_HASWELL_ULT
:
64 case INTEL_FAM6_HASWELL_GT3E
:
66 case INTEL_FAM6_BROADWELL_CORE
:
67 case INTEL_FAM6_BROADWELL_XEON_D
:
68 case INTEL_FAM6_BROADWELL_GT3E
:
69 case INTEL_FAM6_BROADWELL_X
:
71 case INTEL_FAM6_ATOM_SILVERMONT1
:
72 case INTEL_FAM6_ATOM_SILVERMONT2
:
73 case INTEL_FAM6_ATOM_AIRMONT
:
75 case INTEL_FAM6_ATOM_GOLDMONT
:
76 case INTEL_FAM6_ATOM_DENVERTON
:
78 case INTEL_FAM6_ATOM_GEMINI_LAKE
:
80 case INTEL_FAM6_XEON_PHI_KNL
:
81 case INTEL_FAM6_XEON_PHI_KNM
:
82 if (idx
== PERF_MSR_SMI
)
86 case INTEL_FAM6_SKYLAKE_MOBILE
:
87 case INTEL_FAM6_SKYLAKE_DESKTOP
:
88 case INTEL_FAM6_SKYLAKE_X
:
89 case INTEL_FAM6_KABYLAKE_MOBILE
:
90 case INTEL_FAM6_KABYLAKE_DESKTOP
:
91 if (idx
== PERF_MSR_SMI
|| idx
== PERF_MSR_PPERF
)
101 struct perf_pmu_events_attr
*attr
;
102 bool (*test
)(int idx
);
105 PMU_EVENT_ATTR_STRING(tsc
, evattr_tsc
, "event=0x00" );
106 PMU_EVENT_ATTR_STRING(aperf
, evattr_aperf
, "event=0x01" );
107 PMU_EVENT_ATTR_STRING(mperf
, evattr_mperf
, "event=0x02" );
108 PMU_EVENT_ATTR_STRING(pperf
, evattr_pperf
, "event=0x03" );
109 PMU_EVENT_ATTR_STRING(smi
, evattr_smi
, "event=0x04" );
110 PMU_EVENT_ATTR_STRING(ptsc
, evattr_ptsc
, "event=0x05" );
111 PMU_EVENT_ATTR_STRING(irperf
, evattr_irperf
, "event=0x06" );
112 PMU_EVENT_ATTR_STRING(cpu_thermal_margin
, evattr_therm
, "event=0x07" );
113 PMU_EVENT_ATTR_STRING(cpu_thermal_margin
.snapshot
, evattr_therm_snap
, "1" );
114 PMU_EVENT_ATTR_STRING(cpu_thermal_margin
.unit
, evattr_therm_unit
, "C" );
116 static struct perf_msr msr
[] = {
117 [PERF_MSR_TSC
] = { 0, &evattr_tsc
, NULL
, },
118 [PERF_MSR_APERF
] = { MSR_IA32_APERF
, &evattr_aperf
, test_aperfmperf
, },
119 [PERF_MSR_MPERF
] = { MSR_IA32_MPERF
, &evattr_mperf
, test_aperfmperf
, },
120 [PERF_MSR_PPERF
] = { MSR_PPERF
, &evattr_pperf
, test_intel
, },
121 [PERF_MSR_SMI
] = { MSR_SMI_COUNT
, &evattr_smi
, test_intel
, },
122 [PERF_MSR_PTSC
] = { MSR_F15H_PTSC
, &evattr_ptsc
, test_ptsc
, },
123 [PERF_MSR_IRPERF
] = { MSR_F17H_IRPERF
, &evattr_irperf
, test_irperf
, },
124 [PERF_MSR_THERM
] = { MSR_IA32_THERM_STATUS
, &evattr_therm
, test_therm_status
, },
125 [PERF_MSR_THERM_SNAP
] = { MSR_IA32_THERM_STATUS
, &evattr_therm_snap
, test_therm_status
, },
126 [PERF_MSR_THERM_UNIT
] = { MSR_IA32_THERM_STATUS
, &evattr_therm_unit
, test_therm_status
, },
129 static struct attribute
*events_attrs
[PERF_MSR_EVENT_MAX
+ 1] = {
133 static struct attribute_group events_attr_group
= {
135 .attrs
= events_attrs
,
138 PMU_FORMAT_ATTR(event
, "config:0-63");
139 static struct attribute
*format_attrs
[] = {
140 &format_attr_event
.attr
,
143 static struct attribute_group format_attr_group
= {
145 .attrs
= format_attrs
,
148 static const struct attribute_group
*attr_groups
[] = {
154 static int msr_event_init(struct perf_event
*event
)
156 u64 cfg
= event
->attr
.config
;
158 if (event
->attr
.type
!= event
->pmu
->type
)
161 if (cfg
>= PERF_MSR_EVENT_MAX
)
164 /* unsupported modes and filters */
165 if (event
->attr
.exclude_user
||
166 event
->attr
.exclude_kernel
||
167 event
->attr
.exclude_hv
||
168 event
->attr
.exclude_idle
||
169 event
->attr
.exclude_host
||
170 event
->attr
.exclude_guest
||
171 event
->attr
.sample_period
) /* no sampling */
178 event
->hw
.event_base
= msr
[cfg
].msr
;
179 event
->hw
.config
= cfg
;
184 static inline u64
msr_read_counter(struct perf_event
*event
)
188 if (event
->hw
.event_base
)
189 rdmsrl(event
->hw
.event_base
, now
);
195 static void msr_event_update(struct perf_event
*event
)
200 /* Careful, an NMI might modify the previous event value: */
202 prev
= local64_read(&event
->hw
.prev_count
);
203 now
= msr_read_counter(event
);
205 if (local64_cmpxchg(&event
->hw
.prev_count
, prev
, now
) != prev
)
209 if (unlikely(event
->hw
.event_base
== MSR_SMI_COUNT
)) {
210 delta
= sign_extend64(delta
, 31);
211 local64_add(delta
, &event
->count
);
212 } else if (unlikely(event
->hw
.event_base
== MSR_IA32_THERM_STATUS
)) {
213 /* If valid, extract digital readout, otherwise set to -1: */
214 now
= now
& (1ULL << 31) ? (now
>> 16) & 0x3f : -1;
215 local64_set(&event
->count
, now
);
217 local64_add(delta
, &event
->count
);
221 static void msr_event_start(struct perf_event
*event
, int flags
)
223 u64 now
= msr_read_counter(event
);
225 local64_set(&event
->hw
.prev_count
, now
);
228 static void msr_event_stop(struct perf_event
*event
, int flags
)
230 msr_event_update(event
);
233 static void msr_event_del(struct perf_event
*event
, int flags
)
235 msr_event_stop(event
, PERF_EF_UPDATE
);
238 static int msr_event_add(struct perf_event
*event
, int flags
)
240 if (flags
& PERF_EF_START
)
241 msr_event_start(event
, flags
);
246 static struct pmu pmu_msr
= {
247 .task_ctx_nr
= perf_sw_context
,
248 .attr_groups
= attr_groups
,
249 .event_init
= msr_event_init
,
250 .add
= msr_event_add
,
251 .del
= msr_event_del
,
252 .start
= msr_event_start
,
253 .stop
= msr_event_stop
,
254 .read
= msr_event_update
,
255 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
258 static int __init
msr_init(void)
262 if (!boot_cpu_has(X86_FEATURE_TSC
)) {
263 pr_cont("no MSR PMU driver.\n");
267 /* Probe the MSRs. */
268 for (i
= PERF_MSR_TSC
+ 1; i
< PERF_MSR_EVENT_MAX
; i
++) {
271 /* Virt sucks; you cannot tell if a R/O MSR is present :/ */
272 if (!msr
[i
].test(i
) || rdmsrl_safe(msr
[i
].msr
, &val
))
276 /* List remaining MSRs in the sysfs attrs. */
277 for (i
= 0; i
< PERF_MSR_EVENT_MAX
; i
++) {
279 events_attrs
[j
++] = &msr
[i
].attr
->attr
.attr
;
281 events_attrs
[j
] = NULL
;
283 perf_pmu_register(&pmu_msr
, "msr", -1);
287 device_initcall(msr_init
);