2 * Support cstate residency counters
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
43 * Available model: SLM,AMT,GLM,CNL,TNT
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
50 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
52 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
53 * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
56 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
58 * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
61 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
63 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
64 * KBL,CML,ICL,TGL,TNT,RKL
65 * Scope: Package (physical package)
66 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
68 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
69 * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL
70 * Scope: Package (physical package)
71 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
73 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
74 * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
76 * Scope: Package (physical package)
77 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
79 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
81 * Scope: Package (physical package)
82 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
84 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
85 * Scope: Package (physical package)
86 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
88 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
89 * Scope: Package (physical package)
90 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
92 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
94 * Scope: Package (physical package)
98 #include <linux/module.h>
99 #include <linux/slab.h>
100 #include <linux/perf_event.h>
101 #include <linux/nospec.h>
102 #include <asm/cpu_device_id.h>
103 #include <asm/intel-family.h>
104 #include "../perf_event.h"
105 #include "../probe.h"
107 MODULE_LICENSE("GPL");
109 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
110 static ssize_t __cstate_##_var##_show(struct device *dev, \
111 struct device_attribute *attr, \
114 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
115 return sprintf(page, _format "\n"); \
117 static struct device_attribute format_attr_##_var = \
118 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
120 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
121 struct device_attribute
*attr
,
124 /* Model -> events mapping */
125 struct cstate_model
{
126 unsigned long core_events
;
127 unsigned long pkg_events
;
128 unsigned long quirks
;
132 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
133 #define KNL_CORE_C6_MSR (1UL << 1)
135 struct perf_cstate_msr
{
137 struct perf_pmu_events_attr
*attr
;
141 /* cstate_core PMU */
142 static struct pmu cstate_core_pmu
;
143 static bool has_cstate_core
;
145 enum perf_cstate_core_events
{
146 PERF_CSTATE_CORE_C1_RES
= 0,
147 PERF_CSTATE_CORE_C3_RES
,
148 PERF_CSTATE_CORE_C6_RES
,
149 PERF_CSTATE_CORE_C7_RES
,
151 PERF_CSTATE_CORE_EVENT_MAX
,
154 PMU_EVENT_ATTR_STRING(c1
-residency
, attr_cstate_core_c1
, "event=0x00");
155 PMU_EVENT_ATTR_STRING(c3
-residency
, attr_cstate_core_c3
, "event=0x01");
156 PMU_EVENT_ATTR_STRING(c6
-residency
, attr_cstate_core_c6
, "event=0x02");
157 PMU_EVENT_ATTR_STRING(c7
-residency
, attr_cstate_core_c7
, "event=0x03");
159 static unsigned long core_msr_mask
;
161 PMU_EVENT_GROUP(events
, cstate_core_c1
);
162 PMU_EVENT_GROUP(events
, cstate_core_c3
);
163 PMU_EVENT_GROUP(events
, cstate_core_c6
);
164 PMU_EVENT_GROUP(events
, cstate_core_c7
);
166 static bool test_msr(int idx
, void *data
)
168 return test_bit(idx
, (unsigned long *) data
);
171 static struct perf_msr core_msr
[] = {
172 [PERF_CSTATE_CORE_C1_RES
] = { MSR_CORE_C1_RES
, &group_cstate_core_c1
, test_msr
},
173 [PERF_CSTATE_CORE_C3_RES
] = { MSR_CORE_C3_RESIDENCY
, &group_cstate_core_c3
, test_msr
},
174 [PERF_CSTATE_CORE_C6_RES
] = { MSR_CORE_C6_RESIDENCY
, &group_cstate_core_c6
, test_msr
},
175 [PERF_CSTATE_CORE_C7_RES
] = { MSR_CORE_C7_RESIDENCY
, &group_cstate_core_c7
, test_msr
},
178 static struct attribute
*attrs_empty
[] = {
183 * There are no default events, but we need to create
184 * "events" group (with empty attrs) before updating
185 * it with detected events.
187 static struct attribute_group core_events_attr_group
= {
189 .attrs
= attrs_empty
,
192 DEFINE_CSTATE_FORMAT_ATTR(core_event
, event
, "config:0-63");
193 static struct attribute
*core_format_attrs
[] = {
194 &format_attr_core_event
.attr
,
198 static struct attribute_group core_format_attr_group
= {
200 .attrs
= core_format_attrs
,
203 static cpumask_t cstate_core_cpu_mask
;
204 static DEVICE_ATTR(cpumask
, S_IRUGO
, cstate_get_attr_cpumask
, NULL
);
206 static struct attribute
*cstate_cpumask_attrs
[] = {
207 &dev_attr_cpumask
.attr
,
211 static struct attribute_group cpumask_attr_group
= {
212 .attrs
= cstate_cpumask_attrs
,
215 static const struct attribute_group
*core_attr_groups
[] = {
216 &core_events_attr_group
,
217 &core_format_attr_group
,
223 static struct pmu cstate_pkg_pmu
;
224 static bool has_cstate_pkg
;
226 enum perf_cstate_pkg_events
{
227 PERF_CSTATE_PKG_C2_RES
= 0,
228 PERF_CSTATE_PKG_C3_RES
,
229 PERF_CSTATE_PKG_C6_RES
,
230 PERF_CSTATE_PKG_C7_RES
,
231 PERF_CSTATE_PKG_C8_RES
,
232 PERF_CSTATE_PKG_C9_RES
,
233 PERF_CSTATE_PKG_C10_RES
,
235 PERF_CSTATE_PKG_EVENT_MAX
,
238 PMU_EVENT_ATTR_STRING(c2
-residency
, attr_cstate_pkg_c2
, "event=0x00");
239 PMU_EVENT_ATTR_STRING(c3
-residency
, attr_cstate_pkg_c3
, "event=0x01");
240 PMU_EVENT_ATTR_STRING(c6
-residency
, attr_cstate_pkg_c6
, "event=0x02");
241 PMU_EVENT_ATTR_STRING(c7
-residency
, attr_cstate_pkg_c7
, "event=0x03");
242 PMU_EVENT_ATTR_STRING(c8
-residency
, attr_cstate_pkg_c8
, "event=0x04");
243 PMU_EVENT_ATTR_STRING(c9
-residency
, attr_cstate_pkg_c9
, "event=0x05");
244 PMU_EVENT_ATTR_STRING(c10
-residency
, attr_cstate_pkg_c10
, "event=0x06");
246 static unsigned long pkg_msr_mask
;
248 PMU_EVENT_GROUP(events
, cstate_pkg_c2
);
249 PMU_EVENT_GROUP(events
, cstate_pkg_c3
);
250 PMU_EVENT_GROUP(events
, cstate_pkg_c6
);
251 PMU_EVENT_GROUP(events
, cstate_pkg_c7
);
252 PMU_EVENT_GROUP(events
, cstate_pkg_c8
);
253 PMU_EVENT_GROUP(events
, cstate_pkg_c9
);
254 PMU_EVENT_GROUP(events
, cstate_pkg_c10
);
256 static struct perf_msr pkg_msr
[] = {
257 [PERF_CSTATE_PKG_C2_RES
] = { MSR_PKG_C2_RESIDENCY
, &group_cstate_pkg_c2
, test_msr
},
258 [PERF_CSTATE_PKG_C3_RES
] = { MSR_PKG_C3_RESIDENCY
, &group_cstate_pkg_c3
, test_msr
},
259 [PERF_CSTATE_PKG_C6_RES
] = { MSR_PKG_C6_RESIDENCY
, &group_cstate_pkg_c6
, test_msr
},
260 [PERF_CSTATE_PKG_C7_RES
] = { MSR_PKG_C7_RESIDENCY
, &group_cstate_pkg_c7
, test_msr
},
261 [PERF_CSTATE_PKG_C8_RES
] = { MSR_PKG_C8_RESIDENCY
, &group_cstate_pkg_c8
, test_msr
},
262 [PERF_CSTATE_PKG_C9_RES
] = { MSR_PKG_C9_RESIDENCY
, &group_cstate_pkg_c9
, test_msr
},
263 [PERF_CSTATE_PKG_C10_RES
] = { MSR_PKG_C10_RESIDENCY
, &group_cstate_pkg_c10
, test_msr
},
266 static struct attribute_group pkg_events_attr_group
= {
268 .attrs
= attrs_empty
,
271 DEFINE_CSTATE_FORMAT_ATTR(pkg_event
, event
, "config:0-63");
272 static struct attribute
*pkg_format_attrs
[] = {
273 &format_attr_pkg_event
.attr
,
276 static struct attribute_group pkg_format_attr_group
= {
278 .attrs
= pkg_format_attrs
,
281 static cpumask_t cstate_pkg_cpu_mask
;
283 static const struct attribute_group
*pkg_attr_groups
[] = {
284 &pkg_events_attr_group
,
285 &pkg_format_attr_group
,
290 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
291 struct device_attribute
*attr
,
294 struct pmu
*pmu
= dev_get_drvdata(dev
);
296 if (pmu
== &cstate_core_pmu
)
297 return cpumap_print_to_pagebuf(true, buf
, &cstate_core_cpu_mask
);
298 else if (pmu
== &cstate_pkg_pmu
)
299 return cpumap_print_to_pagebuf(true, buf
, &cstate_pkg_cpu_mask
);
304 static int cstate_pmu_event_init(struct perf_event
*event
)
306 u64 cfg
= event
->attr
.config
;
309 if (event
->attr
.type
!= event
->pmu
->type
)
312 /* unsupported modes and filters */
313 if (event
->attr
.sample_period
) /* no sampling */
319 if (event
->pmu
== &cstate_core_pmu
) {
320 if (cfg
>= PERF_CSTATE_CORE_EVENT_MAX
)
322 cfg
= array_index_nospec((unsigned long)cfg
, PERF_CSTATE_CORE_EVENT_MAX
);
323 if (!(core_msr_mask
& (1 << cfg
)))
325 event
->hw
.event_base
= core_msr
[cfg
].msr
;
326 cpu
= cpumask_any_and(&cstate_core_cpu_mask
,
327 topology_sibling_cpumask(event
->cpu
));
328 } else if (event
->pmu
== &cstate_pkg_pmu
) {
329 if (cfg
>= PERF_CSTATE_PKG_EVENT_MAX
)
331 cfg
= array_index_nospec((unsigned long)cfg
, PERF_CSTATE_PKG_EVENT_MAX
);
332 if (!(pkg_msr_mask
& (1 << cfg
)))
334 event
->hw
.event_base
= pkg_msr
[cfg
].msr
;
335 cpu
= cpumask_any_and(&cstate_pkg_cpu_mask
,
336 topology_die_cpumask(event
->cpu
));
341 if (cpu
>= nr_cpu_ids
)
345 event
->hw
.config
= cfg
;
350 static inline u64
cstate_pmu_read_counter(struct perf_event
*event
)
354 rdmsrl(event
->hw
.event_base
, val
);
358 static void cstate_pmu_event_update(struct perf_event
*event
)
360 struct hw_perf_event
*hwc
= &event
->hw
;
361 u64 prev_raw_count
, new_raw_count
;
364 prev_raw_count
= local64_read(&hwc
->prev_count
);
365 new_raw_count
= cstate_pmu_read_counter(event
);
367 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
368 new_raw_count
) != prev_raw_count
)
371 local64_add(new_raw_count
- prev_raw_count
, &event
->count
);
374 static void cstate_pmu_event_start(struct perf_event
*event
, int mode
)
376 local64_set(&event
->hw
.prev_count
, cstate_pmu_read_counter(event
));
379 static void cstate_pmu_event_stop(struct perf_event
*event
, int mode
)
381 cstate_pmu_event_update(event
);
384 static void cstate_pmu_event_del(struct perf_event
*event
, int mode
)
386 cstate_pmu_event_stop(event
, PERF_EF_UPDATE
);
389 static int cstate_pmu_event_add(struct perf_event
*event
, int mode
)
391 if (mode
& PERF_EF_START
)
392 cstate_pmu_event_start(event
, mode
);
398 * Check if exiting cpu is the designated reader. If so migrate the
399 * events when there is a valid target available
401 static int cstate_cpu_exit(unsigned int cpu
)
405 if (has_cstate_core
&&
406 cpumask_test_and_clear_cpu(cpu
, &cstate_core_cpu_mask
)) {
408 target
= cpumask_any_but(topology_sibling_cpumask(cpu
), cpu
);
409 /* Migrate events if there is a valid target */
410 if (target
< nr_cpu_ids
) {
411 cpumask_set_cpu(target
, &cstate_core_cpu_mask
);
412 perf_pmu_migrate_context(&cstate_core_pmu
, cpu
, target
);
416 if (has_cstate_pkg
&&
417 cpumask_test_and_clear_cpu(cpu
, &cstate_pkg_cpu_mask
)) {
419 target
= cpumask_any_but(topology_die_cpumask(cpu
), cpu
);
420 /* Migrate events if there is a valid target */
421 if (target
< nr_cpu_ids
) {
422 cpumask_set_cpu(target
, &cstate_pkg_cpu_mask
);
423 perf_pmu_migrate_context(&cstate_pkg_pmu
, cpu
, target
);
429 static int cstate_cpu_init(unsigned int cpu
)
434 * If this is the first online thread of that core, set it in
435 * the core cpu mask as the designated reader.
437 target
= cpumask_any_and(&cstate_core_cpu_mask
,
438 topology_sibling_cpumask(cpu
));
440 if (has_cstate_core
&& target
>= nr_cpu_ids
)
441 cpumask_set_cpu(cpu
, &cstate_core_cpu_mask
);
444 * If this is the first online thread of that package, set it
445 * in the package cpu mask as the designated reader.
447 target
= cpumask_any_and(&cstate_pkg_cpu_mask
,
448 topology_die_cpumask(cpu
));
449 if (has_cstate_pkg
&& target
>= nr_cpu_ids
)
450 cpumask_set_cpu(cpu
, &cstate_pkg_cpu_mask
);
455 static const struct attribute_group
*core_attr_update
[] = {
456 &group_cstate_core_c1
,
457 &group_cstate_core_c3
,
458 &group_cstate_core_c6
,
459 &group_cstate_core_c7
,
463 static const struct attribute_group
*pkg_attr_update
[] = {
464 &group_cstate_pkg_c2
,
465 &group_cstate_pkg_c3
,
466 &group_cstate_pkg_c6
,
467 &group_cstate_pkg_c7
,
468 &group_cstate_pkg_c8
,
469 &group_cstate_pkg_c9
,
470 &group_cstate_pkg_c10
,
474 static struct pmu cstate_core_pmu
= {
475 .attr_groups
= core_attr_groups
,
476 .attr_update
= core_attr_update
,
477 .name
= "cstate_core",
478 .task_ctx_nr
= perf_invalid_context
,
479 .event_init
= cstate_pmu_event_init
,
480 .add
= cstate_pmu_event_add
,
481 .del
= cstate_pmu_event_del
,
482 .start
= cstate_pmu_event_start
,
483 .stop
= cstate_pmu_event_stop
,
484 .read
= cstate_pmu_event_update
,
485 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
| PERF_PMU_CAP_NO_EXCLUDE
,
486 .module
= THIS_MODULE
,
489 static struct pmu cstate_pkg_pmu
= {
490 .attr_groups
= pkg_attr_groups
,
491 .attr_update
= pkg_attr_update
,
492 .name
= "cstate_pkg",
493 .task_ctx_nr
= perf_invalid_context
,
494 .event_init
= cstate_pmu_event_init
,
495 .add
= cstate_pmu_event_add
,
496 .del
= cstate_pmu_event_del
,
497 .start
= cstate_pmu_event_start
,
498 .stop
= cstate_pmu_event_stop
,
499 .read
= cstate_pmu_event_update
,
500 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
| PERF_PMU_CAP_NO_EXCLUDE
,
501 .module
= THIS_MODULE
,
504 static const struct cstate_model nhm_cstates __initconst
= {
505 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
506 BIT(PERF_CSTATE_CORE_C6_RES
),
508 .pkg_events
= BIT(PERF_CSTATE_PKG_C3_RES
) |
509 BIT(PERF_CSTATE_PKG_C6_RES
) |
510 BIT(PERF_CSTATE_PKG_C7_RES
),
513 static const struct cstate_model snb_cstates __initconst
= {
514 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
515 BIT(PERF_CSTATE_CORE_C6_RES
) |
516 BIT(PERF_CSTATE_CORE_C7_RES
),
518 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
519 BIT(PERF_CSTATE_PKG_C3_RES
) |
520 BIT(PERF_CSTATE_PKG_C6_RES
) |
521 BIT(PERF_CSTATE_PKG_C7_RES
),
524 static const struct cstate_model hswult_cstates __initconst
= {
525 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
526 BIT(PERF_CSTATE_CORE_C6_RES
) |
527 BIT(PERF_CSTATE_CORE_C7_RES
),
529 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
530 BIT(PERF_CSTATE_PKG_C3_RES
) |
531 BIT(PERF_CSTATE_PKG_C6_RES
) |
532 BIT(PERF_CSTATE_PKG_C7_RES
) |
533 BIT(PERF_CSTATE_PKG_C8_RES
) |
534 BIT(PERF_CSTATE_PKG_C9_RES
) |
535 BIT(PERF_CSTATE_PKG_C10_RES
),
538 static const struct cstate_model cnl_cstates __initconst
= {
539 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
540 BIT(PERF_CSTATE_CORE_C3_RES
) |
541 BIT(PERF_CSTATE_CORE_C6_RES
) |
542 BIT(PERF_CSTATE_CORE_C7_RES
),
544 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
545 BIT(PERF_CSTATE_PKG_C3_RES
) |
546 BIT(PERF_CSTATE_PKG_C6_RES
) |
547 BIT(PERF_CSTATE_PKG_C7_RES
) |
548 BIT(PERF_CSTATE_PKG_C8_RES
) |
549 BIT(PERF_CSTATE_PKG_C9_RES
) |
550 BIT(PERF_CSTATE_PKG_C10_RES
),
553 static const struct cstate_model icl_cstates __initconst
= {
554 .core_events
= BIT(PERF_CSTATE_CORE_C6_RES
) |
555 BIT(PERF_CSTATE_CORE_C7_RES
),
557 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
558 BIT(PERF_CSTATE_PKG_C3_RES
) |
559 BIT(PERF_CSTATE_PKG_C6_RES
) |
560 BIT(PERF_CSTATE_PKG_C7_RES
) |
561 BIT(PERF_CSTATE_PKG_C8_RES
) |
562 BIT(PERF_CSTATE_PKG_C9_RES
) |
563 BIT(PERF_CSTATE_PKG_C10_RES
),
566 static const struct cstate_model slm_cstates __initconst
= {
567 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
568 BIT(PERF_CSTATE_CORE_C6_RES
),
570 .pkg_events
= BIT(PERF_CSTATE_PKG_C6_RES
),
571 .quirks
= SLM_PKG_C6_USE_C7_MSR
,
575 static const struct cstate_model knl_cstates __initconst
= {
576 .core_events
= BIT(PERF_CSTATE_CORE_C6_RES
),
578 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
579 BIT(PERF_CSTATE_PKG_C3_RES
) |
580 BIT(PERF_CSTATE_PKG_C6_RES
),
581 .quirks
= KNL_CORE_C6_MSR
,
585 static const struct cstate_model glm_cstates __initconst
= {
586 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
587 BIT(PERF_CSTATE_CORE_C3_RES
) |
588 BIT(PERF_CSTATE_CORE_C6_RES
),
590 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
591 BIT(PERF_CSTATE_PKG_C3_RES
) |
592 BIT(PERF_CSTATE_PKG_C6_RES
) |
593 BIT(PERF_CSTATE_PKG_C10_RES
),
597 static const struct x86_cpu_id intel_cstates_match
[] __initconst
= {
598 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM
, &nhm_cstates
),
599 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP
, &nhm_cstates
),
600 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX
, &nhm_cstates
),
602 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE
, &nhm_cstates
),
603 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP
, &nhm_cstates
),
604 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX
, &nhm_cstates
),
606 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE
, &snb_cstates
),
607 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X
, &snb_cstates
),
609 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE
, &snb_cstates
),
610 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X
, &snb_cstates
),
612 X86_MATCH_INTEL_FAM6_MODEL(HASWELL
, &snb_cstates
),
613 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X
, &snb_cstates
),
614 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G
, &snb_cstates
),
616 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L
, &hswult_cstates
),
618 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT
, &slm_cstates
),
619 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D
, &slm_cstates
),
620 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT
, &slm_cstates
),
622 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL
, &snb_cstates
),
623 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D
, &snb_cstates
),
624 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G
, &snb_cstates
),
625 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X
, &snb_cstates
),
627 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L
, &snb_cstates
),
628 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE
, &snb_cstates
),
629 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X
, &snb_cstates
),
631 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L
, &hswult_cstates
),
632 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE
, &hswult_cstates
),
633 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L
, &hswult_cstates
),
634 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE
, &hswult_cstates
),
636 X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L
, &cnl_cstates
),
638 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL
, &knl_cstates
),
639 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM
, &knl_cstates
),
641 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT
, &glm_cstates
),
642 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D
, &glm_cstates
),
643 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS
, &glm_cstates
),
644 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D
, &glm_cstates
),
645 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT
, &glm_cstates
),
646 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L
, &glm_cstates
),
648 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L
, &icl_cstates
),
649 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE
, &icl_cstates
),
650 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L
, &icl_cstates
),
651 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE
, &icl_cstates
),
652 X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE
, &icl_cstates
),
655 MODULE_DEVICE_TABLE(x86cpu
, intel_cstates_match
);
657 static int __init
cstate_probe(const struct cstate_model
*cm
)
659 /* SLM has different MSR for PKG C6 */
660 if (cm
->quirks
& SLM_PKG_C6_USE_C7_MSR
)
661 pkg_msr
[PERF_CSTATE_PKG_C6_RES
].msr
= MSR_PKG_C7_RESIDENCY
;
663 /* KNL has different MSR for CORE C6 */
664 if (cm
->quirks
& KNL_CORE_C6_MSR
)
665 pkg_msr
[PERF_CSTATE_CORE_C6_RES
].msr
= MSR_KNL_CORE_C6_RESIDENCY
;
668 core_msr_mask
= perf_msr_probe(core_msr
, PERF_CSTATE_CORE_EVENT_MAX
,
669 true, (void *) &cm
->core_events
);
671 pkg_msr_mask
= perf_msr_probe(pkg_msr
, PERF_CSTATE_PKG_EVENT_MAX
,
672 true, (void *) &cm
->pkg_events
);
674 has_cstate_core
= !!core_msr_mask
;
675 has_cstate_pkg
= !!pkg_msr_mask
;
677 return (has_cstate_core
|| has_cstate_pkg
) ? 0 : -ENODEV
;
680 static inline void cstate_cleanup(void)
682 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE
);
683 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING
);
686 perf_pmu_unregister(&cstate_core_pmu
);
689 perf_pmu_unregister(&cstate_pkg_pmu
);
692 static int __init
cstate_init(void)
696 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING
,
697 "perf/x86/cstate:starting", cstate_cpu_init
, NULL
);
698 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE
,
699 "perf/x86/cstate:online", NULL
, cstate_cpu_exit
);
701 if (has_cstate_core
) {
702 err
= perf_pmu_register(&cstate_core_pmu
, cstate_core_pmu
.name
, -1);
704 has_cstate_core
= false;
705 pr_info("Failed to register cstate core pmu\n");
711 if (has_cstate_pkg
) {
712 if (topology_max_die_per_package() > 1) {
713 err
= perf_pmu_register(&cstate_pkg_pmu
,
716 err
= perf_pmu_register(&cstate_pkg_pmu
,
717 cstate_pkg_pmu
.name
, -1);
720 has_cstate_pkg
= false;
721 pr_info("Failed to register cstate pkg pmu\n");
729 static int __init
cstate_pmu_init(void)
731 const struct x86_cpu_id
*id
;
734 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
737 id
= x86_match_cpu(intel_cstates_match
);
741 err
= cstate_probe((const struct cstate_model
*) id
->driver_data
);
745 return cstate_init();
747 module_init(cstate_pmu_init
);
749 static void __exit
cstate_pmu_exit(void)
753 module_exit(cstate_pmu_exit
);