1 #include <linux/perf_event.h>
13 static bool test_aperfmperf(int idx
)
15 return boot_cpu_has(X86_FEATURE_APERFMPERF
);
18 static bool test_intel(int idx
)
20 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
||
21 boot_cpu_data
.x86
!= 6)
24 switch (boot_cpu_data
.x86_model
) {
25 case 30: /* 45nm Nehalem */
26 case 26: /* 45nm Nehalem-EP */
27 case 46: /* 45nm Nehalem-EX */
29 case 37: /* 32nm Westmere */
30 case 44: /* 32nm Westmere-EP */
31 case 47: /* 32nm Westmere-EX */
33 case 42: /* 32nm SandyBridge */
34 case 45: /* 32nm SandyBridge-E/EN/EP */
36 case 58: /* 22nm IvyBridge */
37 case 62: /* 22nm IvyBridge-EP/EX */
39 case 60: /* 22nm Haswell Core */
40 case 63: /* 22nm Haswell Server */
41 case 69: /* 22nm Haswell ULT */
42 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
44 case 61: /* 14nm Broadwell Core-M */
45 case 86: /* 14nm Broadwell Xeon D */
46 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
47 case 79: /* 14nm Broadwell Server */
49 case 55: /* 22nm Atom "Silvermont" */
50 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
51 case 76: /* 14nm Atom "Airmont" */
52 if (idx
== PERF_MSR_SMI
)
56 case 78: /* 14nm Skylake Mobile */
57 case 94: /* 14nm Skylake Desktop */
58 if (idx
== PERF_MSR_SMI
|| idx
== PERF_MSR_PPERF
)
68 struct perf_pmu_events_attr
*attr
;
69 bool (*test
)(int idx
);
72 PMU_EVENT_ATTR_STRING(tsc
, evattr_tsc
, "event=0x00");
73 PMU_EVENT_ATTR_STRING(aperf
, evattr_aperf
, "event=0x01");
74 PMU_EVENT_ATTR_STRING(mperf
, evattr_mperf
, "event=0x02");
75 PMU_EVENT_ATTR_STRING(pperf
, evattr_pperf
, "event=0x03");
76 PMU_EVENT_ATTR_STRING(smi
, evattr_smi
, "event=0x04");
78 static struct perf_msr msr
[] = {
79 [PERF_MSR_TSC
] = { 0, &evattr_tsc
, NULL
, },
80 [PERF_MSR_APERF
] = { MSR_IA32_APERF
, &evattr_aperf
, test_aperfmperf
, },
81 [PERF_MSR_MPERF
] = { MSR_IA32_MPERF
, &evattr_mperf
, test_aperfmperf
, },
82 [PERF_MSR_PPERF
] = { MSR_PPERF
, &evattr_pperf
, test_intel
, },
83 [PERF_MSR_SMI
] = { MSR_SMI_COUNT
, &evattr_smi
, test_intel
, },
86 static struct attribute
*events_attrs
[PERF_MSR_EVENT_MAX
+ 1] = {
90 static struct attribute_group events_attr_group
= {
92 .attrs
= events_attrs
,
95 PMU_FORMAT_ATTR(event
, "config:0-63");
96 static struct attribute
*format_attrs
[] = {
97 &format_attr_event
.attr
,
100 static struct attribute_group format_attr_group
= {
102 .attrs
= format_attrs
,
105 static const struct attribute_group
*attr_groups
[] = {
111 static int msr_event_init(struct perf_event
*event
)
113 u64 cfg
= event
->attr
.config
;
115 if (event
->attr
.type
!= event
->pmu
->type
)
118 if (cfg
>= PERF_MSR_EVENT_MAX
)
121 /* unsupported modes and filters */
122 if (event
->attr
.exclude_user
||
123 event
->attr
.exclude_kernel
||
124 event
->attr
.exclude_hv
||
125 event
->attr
.exclude_idle
||
126 event
->attr
.exclude_host
||
127 event
->attr
.exclude_guest
||
128 event
->attr
.sample_period
) /* no sampling */
135 event
->hw
.event_base
= msr
[cfg
].msr
;
136 event
->hw
.config
= cfg
;
141 static inline u64
msr_read_counter(struct perf_event
*event
)
145 if (event
->hw
.event_base
)
146 rdmsrl(event
->hw
.event_base
, now
);
152 static void msr_event_update(struct perf_event
*event
)
157 /* Careful, an NMI might modify the previous event value. */
159 prev
= local64_read(&event
->hw
.prev_count
);
160 now
= msr_read_counter(event
);
162 if (local64_cmpxchg(&event
->hw
.prev_count
, prev
, now
) != prev
)
166 if (unlikely(event
->hw
.event_base
== MSR_SMI_COUNT
))
167 delta
= sign_extend64(delta
, 31);
169 local64_add(now
- prev
, &event
->count
);
172 static void msr_event_start(struct perf_event
*event
, int flags
)
176 now
= msr_read_counter(event
);
177 local64_set(&event
->hw
.prev_count
, now
);
180 static void msr_event_stop(struct perf_event
*event
, int flags
)
182 msr_event_update(event
);
185 static void msr_event_del(struct perf_event
*event
, int flags
)
187 msr_event_stop(event
, PERF_EF_UPDATE
);
190 static int msr_event_add(struct perf_event
*event
, int flags
)
192 if (flags
& PERF_EF_START
)
193 msr_event_start(event
, flags
);
198 static struct pmu pmu_msr
= {
199 .task_ctx_nr
= perf_sw_context
,
200 .attr_groups
= attr_groups
,
201 .event_init
= msr_event_init
,
202 .add
= msr_event_add
,
203 .del
= msr_event_del
,
204 .start
= msr_event_start
,
205 .stop
= msr_event_stop
,
206 .read
= msr_event_update
,
207 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
210 static int __init
msr_init(void)
214 if (!boot_cpu_has(X86_FEATURE_TSC
)) {
215 pr_cont("no MSR PMU driver.\n");
219 /* Probe the MSRs. */
220 for (i
= PERF_MSR_TSC
+ 1; i
< PERF_MSR_EVENT_MAX
; i
++) {
224 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
226 if (!msr
[i
].test(i
) || rdmsrl_safe(msr
[i
].msr
, &val
))
230 /* List remaining MSRs in the sysfs attrs. */
231 for (i
= 0; i
< PERF_MSR_EVENT_MAX
; i
++) {
233 events_attrs
[j
++] = &msr
[i
].attr
->attr
.attr
;
235 events_attrs
[j
] = NULL
;
237 perf_pmu_register(&pmu_msr
, "msr", -1);
241 device_initcall(msr_init
);