2 * perf_event_intel_cstate.c: support cstate residency counters
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
43 * Available model: SLM,AMT
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
53 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
55 * Available model: SNB,IVB,HSW,BDW,SKL
57 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
59 * Available model: SNB,IVB,HSW,BDW,SKL
60 * Scope: Package (physical package)
61 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
63 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
64 * Scope: Package (physical package)
65 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
67 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
68 * Scope: Package (physical package)
69 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
71 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
72 * Scope: Package (physical package)
73 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
75 * Available model: HSW ULT only
76 * Scope: Package (physical package)
77 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
79 * Available model: HSW ULT only
80 * Scope: Package (physical package)
81 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
83 * Available model: HSW ULT only
84 * Scope: Package (physical package)
88 #include <linux/module.h>
89 #include <linux/slab.h>
90 #include <linux/perf_event.h>
91 #include <asm/cpu_device_id.h>
92 #include "perf_event.h"
94 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
95 static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
96 struct kobj_attribute *attr, \
99 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
100 return sprintf(page, _format "\n"); \
102 static struct kobj_attribute format_attr_##_var = \
103 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
105 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
106 struct device_attribute
*attr
,
109 struct perf_cstate_msr
{
111 struct perf_pmu_events_attr
*attr
;
112 bool (*test
)(int idx
);
116 /* cstate_core PMU */
118 static struct pmu cstate_core_pmu
;
119 static bool has_cstate_core
;
121 enum perf_cstate_core_id
{
125 PERF_CSTATE_CORE_C1_RES
= 0,
126 PERF_CSTATE_CORE_C3_RES
,
127 PERF_CSTATE_CORE_C6_RES
,
128 PERF_CSTATE_CORE_C7_RES
,
130 PERF_CSTATE_CORE_EVENT_MAX
,
133 bool test_core(int idx
)
135 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
||
136 boot_cpu_data
.x86
!= 6)
139 switch (boot_cpu_data
.x86_model
) {
140 case 30: /* 45nm Nehalem */
141 case 26: /* 45nm Nehalem-EP */
142 case 46: /* 45nm Nehalem-EX */
144 case 37: /* 32nm Westmere */
145 case 44: /* 32nm Westmere-EP */
146 case 47: /* 32nm Westmere-EX */
147 if (idx
== PERF_CSTATE_CORE_C3_RES
||
148 idx
== PERF_CSTATE_CORE_C6_RES
)
151 case 42: /* 32nm SandyBridge */
152 case 45: /* 32nm SandyBridge-E/EN/EP */
154 case 58: /* 22nm IvyBridge */
155 case 62: /* 22nm IvyBridge-EP/EX */
157 case 60: /* 22nm Haswell Core */
158 case 63: /* 22nm Haswell Server */
159 case 69: /* 22nm Haswell ULT */
160 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
162 case 61: /* 14nm Broadwell Core-M */
163 case 86: /* 14nm Broadwell Xeon D */
164 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
165 case 79: /* 14nm Broadwell Server */
167 case 78: /* 14nm Skylake Mobile */
168 case 94: /* 14nm Skylake Desktop */
169 if (idx
== PERF_CSTATE_CORE_C3_RES
||
170 idx
== PERF_CSTATE_CORE_C6_RES
||
171 idx
== PERF_CSTATE_CORE_C7_RES
)
174 case 55: /* 22nm Atom "Silvermont" */
175 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
176 case 76: /* 14nm Atom "Airmont" */
177 if (idx
== PERF_CSTATE_CORE_C1_RES
||
178 idx
== PERF_CSTATE_CORE_C6_RES
)
186 PMU_EVENT_ATTR_STRING(c1
-residency
, evattr_cstate_core_c1
, "event=0x00");
187 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_core_c3
, "event=0x01");
188 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_core_c6
, "event=0x02");
189 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_core_c7
, "event=0x03");
191 static struct perf_cstate_msr core_msr
[] = {
192 [PERF_CSTATE_CORE_C1_RES
] = { MSR_CORE_C1_RES
, &evattr_cstate_core_c1
, test_core
, },
193 [PERF_CSTATE_CORE_C3_RES
] = { MSR_CORE_C3_RESIDENCY
, &evattr_cstate_core_c3
, test_core
, },
194 [PERF_CSTATE_CORE_C6_RES
] = { MSR_CORE_C6_RESIDENCY
, &evattr_cstate_core_c6
, test_core
, },
195 [PERF_CSTATE_CORE_C7_RES
] = { MSR_CORE_C7_RESIDENCY
, &evattr_cstate_core_c7
, test_core
, },
198 static struct attribute
*core_events_attrs
[PERF_CSTATE_CORE_EVENT_MAX
+ 1] = {
202 static struct attribute_group core_events_attr_group
= {
204 .attrs
= core_events_attrs
,
207 DEFINE_CSTATE_FORMAT_ATTR(core_event
, event
, "config:0-63");
208 static struct attribute
*core_format_attrs
[] = {
209 &format_attr_core_event
.attr
,
213 static struct attribute_group core_format_attr_group
= {
215 .attrs
= core_format_attrs
,
218 static cpumask_t cstate_core_cpu_mask
;
219 static DEVICE_ATTR(cpumask
, S_IRUGO
, cstate_get_attr_cpumask
, NULL
);
221 static struct attribute
*cstate_cpumask_attrs
[] = {
222 &dev_attr_cpumask
.attr
,
226 static struct attribute_group cpumask_attr_group
= {
227 .attrs
= cstate_cpumask_attrs
,
230 static const struct attribute_group
*core_attr_groups
[] = {
231 &core_events_attr_group
,
232 &core_format_attr_group
,
237 /* cstate_core PMU end */
242 static struct pmu cstate_pkg_pmu
;
243 static bool has_cstate_pkg
;
245 enum perf_cstate_pkg_id
{
249 PERF_CSTATE_PKG_C2_RES
= 0,
250 PERF_CSTATE_PKG_C3_RES
,
251 PERF_CSTATE_PKG_C6_RES
,
252 PERF_CSTATE_PKG_C7_RES
,
253 PERF_CSTATE_PKG_C8_RES
,
254 PERF_CSTATE_PKG_C9_RES
,
255 PERF_CSTATE_PKG_C10_RES
,
257 PERF_CSTATE_PKG_EVENT_MAX
,
260 bool test_pkg(int idx
)
262 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
||
263 boot_cpu_data
.x86
!= 6)
266 switch (boot_cpu_data
.x86_model
) {
267 case 30: /* 45nm Nehalem */
268 case 26: /* 45nm Nehalem-EP */
269 case 46: /* 45nm Nehalem-EX */
271 case 37: /* 32nm Westmere */
272 case 44: /* 32nm Westmere-EP */
273 case 47: /* 32nm Westmere-EX */
274 if (idx
== PERF_CSTATE_CORE_C3_RES
||
275 idx
== PERF_CSTATE_CORE_C6_RES
||
276 idx
== PERF_CSTATE_CORE_C7_RES
)
279 case 42: /* 32nm SandyBridge */
280 case 45: /* 32nm SandyBridge-E/EN/EP */
282 case 58: /* 22nm IvyBridge */
283 case 62: /* 22nm IvyBridge-EP/EX */
285 case 60: /* 22nm Haswell Core */
286 case 63: /* 22nm Haswell Server */
287 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
289 case 61: /* 14nm Broadwell Core-M */
290 case 86: /* 14nm Broadwell Xeon D */
291 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
292 case 79: /* 14nm Broadwell Server */
294 case 78: /* 14nm Skylake Mobile */
295 case 94: /* 14nm Skylake Desktop */
296 if (idx
== PERF_CSTATE_PKG_C2_RES
||
297 idx
== PERF_CSTATE_PKG_C3_RES
||
298 idx
== PERF_CSTATE_PKG_C6_RES
||
299 idx
== PERF_CSTATE_PKG_C7_RES
)
302 case 55: /* 22nm Atom "Silvermont" */
303 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
304 case 76: /* 14nm Atom "Airmont" */
305 if (idx
== PERF_CSTATE_CORE_C6_RES
)
308 case 69: /* 22nm Haswell ULT */
309 if (idx
== PERF_CSTATE_PKG_C2_RES
||
310 idx
== PERF_CSTATE_PKG_C3_RES
||
311 idx
== PERF_CSTATE_PKG_C6_RES
||
312 idx
== PERF_CSTATE_PKG_C7_RES
||
313 idx
== PERF_CSTATE_PKG_C8_RES
||
314 idx
== PERF_CSTATE_PKG_C9_RES
||
315 idx
== PERF_CSTATE_PKG_C10_RES
)
323 PMU_EVENT_ATTR_STRING(c2
-residency
, evattr_cstate_pkg_c2
, "event=0x00");
324 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_pkg_c3
, "event=0x01");
325 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_pkg_c6
, "event=0x02");
326 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_pkg_c7
, "event=0x03");
327 PMU_EVENT_ATTR_STRING(c8
-residency
, evattr_cstate_pkg_c8
, "event=0x04");
328 PMU_EVENT_ATTR_STRING(c9
-residency
, evattr_cstate_pkg_c9
, "event=0x05");
329 PMU_EVENT_ATTR_STRING(c10
-residency
, evattr_cstate_pkg_c10
, "event=0x06");
331 static struct perf_cstate_msr pkg_msr
[] = {
332 [PERF_CSTATE_PKG_C2_RES
] = { MSR_PKG_C2_RESIDENCY
, &evattr_cstate_pkg_c2
, test_pkg
, },
333 [PERF_CSTATE_PKG_C3_RES
] = { MSR_PKG_C3_RESIDENCY
, &evattr_cstate_pkg_c3
, test_pkg
, },
334 [PERF_CSTATE_PKG_C6_RES
] = { MSR_PKG_C6_RESIDENCY
, &evattr_cstate_pkg_c6
, test_pkg
, },
335 [PERF_CSTATE_PKG_C7_RES
] = { MSR_PKG_C7_RESIDENCY
, &evattr_cstate_pkg_c7
, test_pkg
, },
336 [PERF_CSTATE_PKG_C8_RES
] = { MSR_PKG_C8_RESIDENCY
, &evattr_cstate_pkg_c8
, test_pkg
, },
337 [PERF_CSTATE_PKG_C9_RES
] = { MSR_PKG_C9_RESIDENCY
, &evattr_cstate_pkg_c9
, test_pkg
, },
338 [PERF_CSTATE_PKG_C10_RES
] = { MSR_PKG_C10_RESIDENCY
, &evattr_cstate_pkg_c10
, test_pkg
, },
341 static struct attribute
*pkg_events_attrs
[PERF_CSTATE_PKG_EVENT_MAX
+ 1] = {
345 static struct attribute_group pkg_events_attr_group
= {
347 .attrs
= pkg_events_attrs
,
350 DEFINE_CSTATE_FORMAT_ATTR(pkg_event
, event
, "config:0-63");
351 static struct attribute
*pkg_format_attrs
[] = {
352 &format_attr_pkg_event
.attr
,
355 static struct attribute_group pkg_format_attr_group
= {
357 .attrs
= pkg_format_attrs
,
360 static cpumask_t cstate_pkg_cpu_mask
;
362 static const struct attribute_group
*pkg_attr_groups
[] = {
363 &pkg_events_attr_group
,
364 &pkg_format_attr_group
,
369 /* cstate_pkg PMU end*/
371 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
372 struct device_attribute
*attr
,
375 struct pmu
*pmu
= dev_get_drvdata(dev
);
377 if (pmu
== &cstate_core_pmu
)
378 return cpumap_print_to_pagebuf(true, buf
, &cstate_core_cpu_mask
);
379 else if (pmu
== &cstate_pkg_pmu
)
380 return cpumap_print_to_pagebuf(true, buf
, &cstate_pkg_cpu_mask
);
385 static int cstate_pmu_event_init(struct perf_event
*event
)
387 u64 cfg
= event
->attr
.config
;
390 if (event
->attr
.type
!= event
->pmu
->type
)
393 /* unsupported modes and filters */
394 if (event
->attr
.exclude_user
||
395 event
->attr
.exclude_kernel
||
396 event
->attr
.exclude_hv
||
397 event
->attr
.exclude_idle
||
398 event
->attr
.exclude_host
||
399 event
->attr
.exclude_guest
||
400 event
->attr
.sample_period
) /* no sampling */
403 if (event
->pmu
== &cstate_core_pmu
) {
404 if (cfg
>= PERF_CSTATE_CORE_EVENT_MAX
)
406 if (!core_msr
[cfg
].attr
)
408 event
->hw
.event_base
= core_msr
[cfg
].msr
;
409 } else if (event
->pmu
== &cstate_pkg_pmu
) {
410 if (cfg
>= PERF_CSTATE_PKG_EVENT_MAX
)
412 if (!pkg_msr
[cfg
].attr
)
414 event
->hw
.event_base
= pkg_msr
[cfg
].msr
;
418 /* must be done before validate_group */
419 event
->hw
.config
= cfg
;
425 static inline u64
cstate_pmu_read_counter(struct perf_event
*event
)
429 rdmsrl(event
->hw
.event_base
, val
);
433 static void cstate_pmu_event_update(struct perf_event
*event
)
435 struct hw_perf_event
*hwc
= &event
->hw
;
436 u64 prev_raw_count
, new_raw_count
;
439 prev_raw_count
= local64_read(&hwc
->prev_count
);
440 new_raw_count
= cstate_pmu_read_counter(event
);
442 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
443 new_raw_count
) != prev_raw_count
)
446 local64_add(new_raw_count
- prev_raw_count
, &event
->count
);
449 static void cstate_pmu_event_start(struct perf_event
*event
, int mode
)
451 local64_set(&event
->hw
.prev_count
, cstate_pmu_read_counter(event
));
454 static void cstate_pmu_event_stop(struct perf_event
*event
, int mode
)
456 cstate_pmu_event_update(event
);
459 static void cstate_pmu_event_del(struct perf_event
*event
, int mode
)
461 cstate_pmu_event_stop(event
, PERF_EF_UPDATE
);
464 static int cstate_pmu_event_add(struct perf_event
*event
, int mode
)
466 if (mode
& PERF_EF_START
)
467 cstate_pmu_event_start(event
, mode
);
472 static void cstate_cpu_exit(int cpu
)
476 /* cpu exit for cstate core */
477 if (has_cstate_core
) {
478 id
= topology_core_id(cpu
);
481 for_each_online_cpu(i
) {
484 if (id
== topology_core_id(i
)) {
489 if (cpumask_test_and_clear_cpu(cpu
, &cstate_core_cpu_mask
) && target
>= 0)
490 cpumask_set_cpu(target
, &cstate_core_cpu_mask
);
491 WARN_ON(cpumask_empty(&cstate_core_cpu_mask
));
493 perf_pmu_migrate_context(&cstate_core_pmu
, cpu
, target
);
496 /* cpu exit for cstate pkg */
497 if (has_cstate_pkg
) {
498 id
= topology_physical_package_id(cpu
);
501 for_each_online_cpu(i
) {
504 if (id
== topology_physical_package_id(i
)) {
509 if (cpumask_test_and_clear_cpu(cpu
, &cstate_pkg_cpu_mask
) && target
>= 0)
510 cpumask_set_cpu(target
, &cstate_pkg_cpu_mask
);
511 WARN_ON(cpumask_empty(&cstate_pkg_cpu_mask
));
513 perf_pmu_migrate_context(&cstate_pkg_pmu
, cpu
, target
);
517 static void cstate_cpu_init(int cpu
)
521 /* cpu init for cstate core */
522 if (has_cstate_core
) {
523 id
= topology_core_id(cpu
);
524 for_each_cpu(i
, &cstate_core_cpu_mask
) {
525 if (id
== topology_core_id(i
))
529 cpumask_set_cpu(cpu
, &cstate_core_cpu_mask
);
532 /* cpu init for cstate pkg */
533 if (has_cstate_pkg
) {
534 id
= topology_physical_package_id(cpu
);
535 for_each_cpu(i
, &cstate_pkg_cpu_mask
) {
536 if (id
== topology_physical_package_id(i
))
540 cpumask_set_cpu(cpu
, &cstate_pkg_cpu_mask
);
544 static int cstate_cpu_notifier(struct notifier_block
*self
,
545 unsigned long action
, void *hcpu
)
547 unsigned int cpu
= (long)hcpu
;
549 switch (action
& ~CPU_TASKS_FROZEN
) {
553 cstate_cpu_init(cpu
);
555 case CPU_UP_CANCELED
:
561 case CPU_DOWN_PREPARE
:
562 cstate_cpu_exit(cpu
);
572 * Probe the cstate events and insert the available one into sysfs attrs
573 * Return false if there is no available events.
575 static bool cstate_probe_msr(struct perf_cstate_msr
*msr
,
576 struct attribute
**events_attrs
,
582 /* Probe the cstate events. */
583 for (i
= 0; i
< max_event_nr
; i
++) {
584 if (!msr
[i
].test(i
) || rdmsrl_safe(msr
[i
].msr
, &val
))
588 /* List remaining events in the sysfs attrs. */
589 for (i
= 0; i
< max_event_nr
; i
++) {
591 events_attrs
[j
++] = &msr
[i
].attr
->attr
.attr
;
593 events_attrs
[j
] = NULL
;
595 return (j
> 0) ? true : false;
598 static int __init
cstate_init(void)
600 /* SLM has different MSR for PKG C6 */
601 switch (boot_cpu_data
.x86_model
) {
605 pkg_msr
[PERF_CSTATE_PKG_C6_RES
].msr
= MSR_PKG_C7_RESIDENCY
;
608 if (cstate_probe_msr(core_msr
, core_events_attrs
, PERF_CSTATE_CORE_EVENT_MAX
))
609 has_cstate_core
= true;
611 if (cstate_probe_msr(pkg_msr
, pkg_events_attrs
, PERF_CSTATE_PKG_EVENT_MAX
))
612 has_cstate_pkg
= true;
614 return (has_cstate_core
|| has_cstate_pkg
) ? 0 : -ENODEV
;
617 static void __init
cstate_cpumask_init(void)
621 cpu_notifier_register_begin();
623 for_each_online_cpu(cpu
)
624 cstate_cpu_init(cpu
);
626 __perf_cpu_notifier(cstate_cpu_notifier
);
628 cpu_notifier_register_done();
631 static struct pmu cstate_core_pmu
= {
632 .attr_groups
= core_attr_groups
,
633 .name
= "cstate_core",
634 .task_ctx_nr
= perf_invalid_context
,
635 .event_init
= cstate_pmu_event_init
,
636 .add
= cstate_pmu_event_add
, /* must have */
637 .del
= cstate_pmu_event_del
, /* must have */
638 .start
= cstate_pmu_event_start
,
639 .stop
= cstate_pmu_event_stop
,
640 .read
= cstate_pmu_event_update
,
641 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
644 static struct pmu cstate_pkg_pmu
= {
645 .attr_groups
= pkg_attr_groups
,
646 .name
= "cstate_pkg",
647 .task_ctx_nr
= perf_invalid_context
,
648 .event_init
= cstate_pmu_event_init
,
649 .add
= cstate_pmu_event_add
, /* must have */
650 .del
= cstate_pmu_event_del
, /* must have */
651 .start
= cstate_pmu_event_start
,
652 .stop
= cstate_pmu_event_stop
,
653 .read
= cstate_pmu_event_update
,
654 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
657 static void __init
cstate_pmus_register(void)
661 if (has_cstate_core
) {
662 err
= perf_pmu_register(&cstate_core_pmu
, cstate_core_pmu
.name
, -1);
664 pr_info("Failed to register PMU %s error %d\n",
665 cstate_core_pmu
.name
, err
);
668 if (has_cstate_pkg
) {
669 err
= perf_pmu_register(&cstate_pkg_pmu
, cstate_pkg_pmu
.name
, -1);
671 pr_info("Failed to register PMU %s error %d\n",
672 cstate_pkg_pmu
.name
, err
);
676 static int __init
cstate_pmu_init(void)
680 if (cpu_has_hypervisor
)
687 cstate_cpumask_init();
689 cstate_pmus_register();
694 device_initcall(cstate_pmu_init
);