2 * Support cstate residency counters
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
43 * Available model: SLM,AMT,GLM,CNL
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
50 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
52 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
55 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
57 * Available model: SNB,IVB,HSW,BDW,SKL,CNL
59 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
61 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL
62 * Scope: Package (physical package)
63 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
65 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
67 * Scope: Package (physical package)
68 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
70 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
72 * Scope: Package (physical package)
73 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
75 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL
76 * Scope: Package (physical package)
77 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
79 * Available model: HSW ULT,KBL,CNL
80 * Scope: Package (physical package)
81 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
83 * Available model: HSW ULT,KBL,CNL
84 * Scope: Package (physical package)
85 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
87 * Available model: HSW ULT,KBL,GLM,CNL
88 * Scope: Package (physical package)
92 #include <linux/module.h>
93 #include <linux/slab.h>
94 #include <linux/perf_event.h>
95 #include <linux/nospec.h>
96 #include <asm/cpu_device_id.h>
97 #include <asm/intel-family.h>
98 #include "../perf_event.h"
100 MODULE_LICENSE("GPL");
102 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
103 static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
104 struct kobj_attribute *attr, \
107 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
108 return sprintf(page, _format "\n"); \
110 static struct kobj_attribute format_attr_##_var = \
111 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
113 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
114 struct device_attribute
*attr
,
117 /* Model -> events mapping */
118 struct cstate_model
{
119 unsigned long core_events
;
120 unsigned long pkg_events
;
121 unsigned long quirks
;
125 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
126 #define KNL_CORE_C6_MSR (1UL << 1)
128 struct perf_cstate_msr
{
130 struct perf_pmu_events_attr
*attr
;
134 /* cstate_core PMU */
135 static struct pmu cstate_core_pmu
;
136 static bool has_cstate_core
;
138 enum perf_cstate_core_events
{
139 PERF_CSTATE_CORE_C1_RES
= 0,
140 PERF_CSTATE_CORE_C3_RES
,
141 PERF_CSTATE_CORE_C6_RES
,
142 PERF_CSTATE_CORE_C7_RES
,
144 PERF_CSTATE_CORE_EVENT_MAX
,
147 PMU_EVENT_ATTR_STRING(c1
-residency
, evattr_cstate_core_c1
, "event=0x00");
148 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_core_c3
, "event=0x01");
149 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_core_c6
, "event=0x02");
150 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_core_c7
, "event=0x03");
152 static struct perf_cstate_msr core_msr
[] = {
153 [PERF_CSTATE_CORE_C1_RES
] = { MSR_CORE_C1_RES
, &evattr_cstate_core_c1
},
154 [PERF_CSTATE_CORE_C3_RES
] = { MSR_CORE_C3_RESIDENCY
, &evattr_cstate_core_c3
},
155 [PERF_CSTATE_CORE_C6_RES
] = { MSR_CORE_C6_RESIDENCY
, &evattr_cstate_core_c6
},
156 [PERF_CSTATE_CORE_C7_RES
] = { MSR_CORE_C7_RESIDENCY
, &evattr_cstate_core_c7
},
159 static struct attribute
*core_events_attrs
[PERF_CSTATE_CORE_EVENT_MAX
+ 1] = {
163 static struct attribute_group core_events_attr_group
= {
165 .attrs
= core_events_attrs
,
168 DEFINE_CSTATE_FORMAT_ATTR(core_event
, event
, "config:0-63");
169 static struct attribute
*core_format_attrs
[] = {
170 &format_attr_core_event
.attr
,
174 static struct attribute_group core_format_attr_group
= {
176 .attrs
= core_format_attrs
,
179 static cpumask_t cstate_core_cpu_mask
;
180 static DEVICE_ATTR(cpumask
, S_IRUGO
, cstate_get_attr_cpumask
, NULL
);
182 static struct attribute
*cstate_cpumask_attrs
[] = {
183 &dev_attr_cpumask
.attr
,
187 static struct attribute_group cpumask_attr_group
= {
188 .attrs
= cstate_cpumask_attrs
,
191 static const struct attribute_group
*core_attr_groups
[] = {
192 &core_events_attr_group
,
193 &core_format_attr_group
,
199 static struct pmu cstate_pkg_pmu
;
200 static bool has_cstate_pkg
;
202 enum perf_cstate_pkg_events
{
203 PERF_CSTATE_PKG_C2_RES
= 0,
204 PERF_CSTATE_PKG_C3_RES
,
205 PERF_CSTATE_PKG_C6_RES
,
206 PERF_CSTATE_PKG_C7_RES
,
207 PERF_CSTATE_PKG_C8_RES
,
208 PERF_CSTATE_PKG_C9_RES
,
209 PERF_CSTATE_PKG_C10_RES
,
211 PERF_CSTATE_PKG_EVENT_MAX
,
214 PMU_EVENT_ATTR_STRING(c2
-residency
, evattr_cstate_pkg_c2
, "event=0x00");
215 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_pkg_c3
, "event=0x01");
216 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_pkg_c6
, "event=0x02");
217 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_pkg_c7
, "event=0x03");
218 PMU_EVENT_ATTR_STRING(c8
-residency
, evattr_cstate_pkg_c8
, "event=0x04");
219 PMU_EVENT_ATTR_STRING(c9
-residency
, evattr_cstate_pkg_c9
, "event=0x05");
220 PMU_EVENT_ATTR_STRING(c10
-residency
, evattr_cstate_pkg_c10
, "event=0x06");
222 static struct perf_cstate_msr pkg_msr
[] = {
223 [PERF_CSTATE_PKG_C2_RES
] = { MSR_PKG_C2_RESIDENCY
, &evattr_cstate_pkg_c2
},
224 [PERF_CSTATE_PKG_C3_RES
] = { MSR_PKG_C3_RESIDENCY
, &evattr_cstate_pkg_c3
},
225 [PERF_CSTATE_PKG_C6_RES
] = { MSR_PKG_C6_RESIDENCY
, &evattr_cstate_pkg_c6
},
226 [PERF_CSTATE_PKG_C7_RES
] = { MSR_PKG_C7_RESIDENCY
, &evattr_cstate_pkg_c7
},
227 [PERF_CSTATE_PKG_C8_RES
] = { MSR_PKG_C8_RESIDENCY
, &evattr_cstate_pkg_c8
},
228 [PERF_CSTATE_PKG_C9_RES
] = { MSR_PKG_C9_RESIDENCY
, &evattr_cstate_pkg_c9
},
229 [PERF_CSTATE_PKG_C10_RES
] = { MSR_PKG_C10_RESIDENCY
, &evattr_cstate_pkg_c10
},
232 static struct attribute
*pkg_events_attrs
[PERF_CSTATE_PKG_EVENT_MAX
+ 1] = {
236 static struct attribute_group pkg_events_attr_group
= {
238 .attrs
= pkg_events_attrs
,
241 DEFINE_CSTATE_FORMAT_ATTR(pkg_event
, event
, "config:0-63");
242 static struct attribute
*pkg_format_attrs
[] = {
243 &format_attr_pkg_event
.attr
,
246 static struct attribute_group pkg_format_attr_group
= {
248 .attrs
= pkg_format_attrs
,
251 static cpumask_t cstate_pkg_cpu_mask
;
253 static const struct attribute_group
*pkg_attr_groups
[] = {
254 &pkg_events_attr_group
,
255 &pkg_format_attr_group
,
260 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
261 struct device_attribute
*attr
,
264 struct pmu
*pmu
= dev_get_drvdata(dev
);
266 if (pmu
== &cstate_core_pmu
)
267 return cpumap_print_to_pagebuf(true, buf
, &cstate_core_cpu_mask
);
268 else if (pmu
== &cstate_pkg_pmu
)
269 return cpumap_print_to_pagebuf(true, buf
, &cstate_pkg_cpu_mask
);
274 static int cstate_pmu_event_init(struct perf_event
*event
)
276 u64 cfg
= event
->attr
.config
;
279 if (event
->attr
.type
!= event
->pmu
->type
)
282 /* unsupported modes and filters */
283 if (event
->attr
.exclude_user
||
284 event
->attr
.exclude_kernel
||
285 event
->attr
.exclude_hv
||
286 event
->attr
.exclude_idle
||
287 event
->attr
.exclude_host
||
288 event
->attr
.exclude_guest
||
289 event
->attr
.sample_period
) /* no sampling */
295 if (event
->pmu
== &cstate_core_pmu
) {
296 if (cfg
>= PERF_CSTATE_CORE_EVENT_MAX
)
298 if (!core_msr
[cfg
].attr
)
300 event
->hw
.event_base
= core_msr
[cfg
].msr
;
301 cpu
= cpumask_any_and(&cstate_core_cpu_mask
,
302 topology_sibling_cpumask(event
->cpu
));
303 } else if (event
->pmu
== &cstate_pkg_pmu
) {
304 if (cfg
>= PERF_CSTATE_PKG_EVENT_MAX
)
306 cfg
= array_index_nospec((unsigned long)cfg
, PERF_CSTATE_PKG_EVENT_MAX
);
307 if (!pkg_msr
[cfg
].attr
)
309 event
->hw
.event_base
= pkg_msr
[cfg
].msr
;
310 cpu
= cpumask_any_and(&cstate_pkg_cpu_mask
,
311 topology_core_cpumask(event
->cpu
));
316 if (cpu
>= nr_cpu_ids
)
320 event
->hw
.config
= cfg
;
325 static inline u64
cstate_pmu_read_counter(struct perf_event
*event
)
329 rdmsrl(event
->hw
.event_base
, val
);
333 static void cstate_pmu_event_update(struct perf_event
*event
)
335 struct hw_perf_event
*hwc
= &event
->hw
;
336 u64 prev_raw_count
, new_raw_count
;
339 prev_raw_count
= local64_read(&hwc
->prev_count
);
340 new_raw_count
= cstate_pmu_read_counter(event
);
342 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
343 new_raw_count
) != prev_raw_count
)
346 local64_add(new_raw_count
- prev_raw_count
, &event
->count
);
349 static void cstate_pmu_event_start(struct perf_event
*event
, int mode
)
351 local64_set(&event
->hw
.prev_count
, cstate_pmu_read_counter(event
));
354 static void cstate_pmu_event_stop(struct perf_event
*event
, int mode
)
356 cstate_pmu_event_update(event
);
359 static void cstate_pmu_event_del(struct perf_event
*event
, int mode
)
361 cstate_pmu_event_stop(event
, PERF_EF_UPDATE
);
364 static int cstate_pmu_event_add(struct perf_event
*event
, int mode
)
366 if (mode
& PERF_EF_START
)
367 cstate_pmu_event_start(event
, mode
);
373 * Check if exiting cpu is the designated reader. If so migrate the
374 * events when there is a valid target available
376 static int cstate_cpu_exit(unsigned int cpu
)
380 if (has_cstate_core
&&
381 cpumask_test_and_clear_cpu(cpu
, &cstate_core_cpu_mask
)) {
383 target
= cpumask_any_but(topology_sibling_cpumask(cpu
), cpu
);
384 /* Migrate events if there is a valid target */
385 if (target
< nr_cpu_ids
) {
386 cpumask_set_cpu(target
, &cstate_core_cpu_mask
);
387 perf_pmu_migrate_context(&cstate_core_pmu
, cpu
, target
);
391 if (has_cstate_pkg
&&
392 cpumask_test_and_clear_cpu(cpu
, &cstate_pkg_cpu_mask
)) {
394 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
395 /* Migrate events if there is a valid target */
396 if (target
< nr_cpu_ids
) {
397 cpumask_set_cpu(target
, &cstate_pkg_cpu_mask
);
398 perf_pmu_migrate_context(&cstate_pkg_pmu
, cpu
, target
);
404 static int cstate_cpu_init(unsigned int cpu
)
409 * If this is the first online thread of that core, set it in
410 * the core cpu mask as the designated reader.
412 target
= cpumask_any_and(&cstate_core_cpu_mask
,
413 topology_sibling_cpumask(cpu
));
415 if (has_cstate_core
&& target
>= nr_cpu_ids
)
416 cpumask_set_cpu(cpu
, &cstate_core_cpu_mask
);
419 * If this is the first online thread of that package, set it
420 * in the package cpu mask as the designated reader.
422 target
= cpumask_any_and(&cstate_pkg_cpu_mask
,
423 topology_core_cpumask(cpu
));
424 if (has_cstate_pkg
&& target
>= nr_cpu_ids
)
425 cpumask_set_cpu(cpu
, &cstate_pkg_cpu_mask
);
430 static struct pmu cstate_core_pmu
= {
431 .attr_groups
= core_attr_groups
,
432 .name
= "cstate_core",
433 .task_ctx_nr
= perf_invalid_context
,
434 .event_init
= cstate_pmu_event_init
,
435 .add
= cstate_pmu_event_add
,
436 .del
= cstate_pmu_event_del
,
437 .start
= cstate_pmu_event_start
,
438 .stop
= cstate_pmu_event_stop
,
439 .read
= cstate_pmu_event_update
,
440 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
441 .module
= THIS_MODULE
,
444 static struct pmu cstate_pkg_pmu
= {
445 .attr_groups
= pkg_attr_groups
,
446 .name
= "cstate_pkg",
447 .task_ctx_nr
= perf_invalid_context
,
448 .event_init
= cstate_pmu_event_init
,
449 .add
= cstate_pmu_event_add
,
450 .del
= cstate_pmu_event_del
,
451 .start
= cstate_pmu_event_start
,
452 .stop
= cstate_pmu_event_stop
,
453 .read
= cstate_pmu_event_update
,
454 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
455 .module
= THIS_MODULE
,
458 static const struct cstate_model nhm_cstates __initconst
= {
459 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
460 BIT(PERF_CSTATE_CORE_C6_RES
),
462 .pkg_events
= BIT(PERF_CSTATE_PKG_C3_RES
) |
463 BIT(PERF_CSTATE_PKG_C6_RES
) |
464 BIT(PERF_CSTATE_PKG_C7_RES
),
467 static const struct cstate_model snb_cstates __initconst
= {
468 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
469 BIT(PERF_CSTATE_CORE_C6_RES
) |
470 BIT(PERF_CSTATE_CORE_C7_RES
),
472 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
473 BIT(PERF_CSTATE_PKG_C3_RES
) |
474 BIT(PERF_CSTATE_PKG_C6_RES
) |
475 BIT(PERF_CSTATE_PKG_C7_RES
),
478 static const struct cstate_model hswult_cstates __initconst
= {
479 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
480 BIT(PERF_CSTATE_CORE_C6_RES
) |
481 BIT(PERF_CSTATE_CORE_C7_RES
),
483 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
484 BIT(PERF_CSTATE_PKG_C3_RES
) |
485 BIT(PERF_CSTATE_PKG_C6_RES
) |
486 BIT(PERF_CSTATE_PKG_C7_RES
) |
487 BIT(PERF_CSTATE_PKG_C8_RES
) |
488 BIT(PERF_CSTATE_PKG_C9_RES
) |
489 BIT(PERF_CSTATE_PKG_C10_RES
),
492 static const struct cstate_model cnl_cstates __initconst
= {
493 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
494 BIT(PERF_CSTATE_CORE_C3_RES
) |
495 BIT(PERF_CSTATE_CORE_C6_RES
) |
496 BIT(PERF_CSTATE_CORE_C7_RES
),
498 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
499 BIT(PERF_CSTATE_PKG_C3_RES
) |
500 BIT(PERF_CSTATE_PKG_C6_RES
) |
501 BIT(PERF_CSTATE_PKG_C7_RES
) |
502 BIT(PERF_CSTATE_PKG_C8_RES
) |
503 BIT(PERF_CSTATE_PKG_C9_RES
) |
504 BIT(PERF_CSTATE_PKG_C10_RES
),
507 static const struct cstate_model slm_cstates __initconst
= {
508 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
509 BIT(PERF_CSTATE_CORE_C6_RES
),
511 .pkg_events
= BIT(PERF_CSTATE_PKG_C6_RES
),
512 .quirks
= SLM_PKG_C6_USE_C7_MSR
,
516 static const struct cstate_model knl_cstates __initconst
= {
517 .core_events
= BIT(PERF_CSTATE_CORE_C6_RES
),
519 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
520 BIT(PERF_CSTATE_PKG_C3_RES
) |
521 BIT(PERF_CSTATE_PKG_C6_RES
),
522 .quirks
= KNL_CORE_C6_MSR
,
526 static const struct cstate_model glm_cstates __initconst
= {
527 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
528 BIT(PERF_CSTATE_CORE_C3_RES
) |
529 BIT(PERF_CSTATE_CORE_C6_RES
),
531 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
532 BIT(PERF_CSTATE_PKG_C3_RES
) |
533 BIT(PERF_CSTATE_PKG_C6_RES
) |
534 BIT(PERF_CSTATE_PKG_C10_RES
),
538 #define X86_CSTATES_MODEL(model, states) \
539 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
541 static const struct x86_cpu_id intel_cstates_match
[] __initconst
= {
542 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM
, nhm_cstates
),
543 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP
, nhm_cstates
),
544 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX
, nhm_cstates
),
546 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE
, nhm_cstates
),
547 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP
, nhm_cstates
),
548 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX
, nhm_cstates
),
550 X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE
, snb_cstates
),
551 X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X
, snb_cstates
),
553 X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE
, snb_cstates
),
554 X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X
, snb_cstates
),
556 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE
, snb_cstates
),
557 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X
, snb_cstates
),
558 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E
, snb_cstates
),
560 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT
, hswult_cstates
),
562 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT
, slm_cstates
),
563 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X
, slm_cstates
),
564 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT
, slm_cstates
),
566 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE
, snb_cstates
),
567 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D
, snb_cstates
),
568 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E
, snb_cstates
),
569 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X
, snb_cstates
),
571 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE
, snb_cstates
),
572 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP
, snb_cstates
),
573 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X
, snb_cstates
),
575 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE
, hswult_cstates
),
576 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP
, hswult_cstates
),
578 X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE
, cnl_cstates
),
580 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL
, knl_cstates
),
581 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM
, knl_cstates
),
583 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT
, glm_cstates
),
584 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X
, glm_cstates
),
586 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS
, glm_cstates
),
588 X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE
, snb_cstates
),
591 MODULE_DEVICE_TABLE(x86cpu
, intel_cstates_match
);
594 * Probe the cstate events and insert the available one into sysfs attrs
595 * Return false if there are no available events.
597 static bool __init
cstate_probe_msr(const unsigned long evmsk
, int max
,
598 struct perf_cstate_msr
*msr
,
599 struct attribute
**attrs
)
605 for (bit
= 0; bit
< max
; bit
++) {
606 if (test_bit(bit
, &evmsk
) && !rdmsrl_safe(msr
[bit
].msr
, &val
)) {
607 *attrs
++ = &msr
[bit
].attr
->attr
.attr
;
610 msr
[bit
].attr
= NULL
;
618 static int __init
cstate_probe(const struct cstate_model
*cm
)
620 /* SLM has different MSR for PKG C6 */
621 if (cm
->quirks
& SLM_PKG_C6_USE_C7_MSR
)
622 pkg_msr
[PERF_CSTATE_PKG_C6_RES
].msr
= MSR_PKG_C7_RESIDENCY
;
624 /* KNL has different MSR for CORE C6 */
625 if (cm
->quirks
& KNL_CORE_C6_MSR
)
626 pkg_msr
[PERF_CSTATE_CORE_C6_RES
].msr
= MSR_KNL_CORE_C6_RESIDENCY
;
629 has_cstate_core
= cstate_probe_msr(cm
->core_events
,
630 PERF_CSTATE_CORE_EVENT_MAX
,
631 core_msr
, core_events_attrs
);
633 has_cstate_pkg
= cstate_probe_msr(cm
->pkg_events
,
634 PERF_CSTATE_PKG_EVENT_MAX
,
635 pkg_msr
, pkg_events_attrs
);
637 return (has_cstate_core
|| has_cstate_pkg
) ? 0 : -ENODEV
;
640 static inline void cstate_cleanup(void)
642 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE
);
643 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING
);
646 perf_pmu_unregister(&cstate_core_pmu
);
649 perf_pmu_unregister(&cstate_pkg_pmu
);
652 static int __init
cstate_init(void)
656 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING
,
657 "perf/x86/cstate:starting", cstate_cpu_init
, NULL
);
658 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE
,
659 "perf/x86/cstate:online", NULL
, cstate_cpu_exit
);
661 if (has_cstate_core
) {
662 err
= perf_pmu_register(&cstate_core_pmu
, cstate_core_pmu
.name
, -1);
664 has_cstate_core
= false;
665 pr_info("Failed to register cstate core pmu\n");
671 if (has_cstate_pkg
) {
672 err
= perf_pmu_register(&cstate_pkg_pmu
, cstate_pkg_pmu
.name
, -1);
674 has_cstate_pkg
= false;
675 pr_info("Failed to register cstate pkg pmu\n");
683 static int __init
cstate_pmu_init(void)
685 const struct x86_cpu_id
*id
;
688 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
691 id
= x86_match_cpu(intel_cstates_match
);
695 err
= cstate_probe((const struct cstate_model
*) id
->driver_data
);
699 return cstate_init();
701 module_init(cstate_pmu_init
);
703 static void __exit
cstate_pmu_exit(void)
707 module_exit(cstate_pmu_exit
);