2 * Support cstate residency counters
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
43 * Available model: SLM,AMT,GLM
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
54 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
56 * Available model: SNB,IVB,HSW,BDW,SKL
58 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
60 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
61 * Scope: Package (physical package)
62 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
64 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
66 * Scope: Package (physical package)
67 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
69 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
71 * Scope: Package (physical package)
72 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
74 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
75 * Scope: Package (physical package)
76 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
78 * Available model: HSW ULT only
79 * Scope: Package (physical package)
80 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
82 * Available model: HSW ULT only
83 * Scope: Package (physical package)
84 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
86 * Available model: HSW ULT, GLM
87 * Scope: Package (physical package)
91 #include <linux/module.h>
92 #include <linux/slab.h>
93 #include <linux/perf_event.h>
94 #include <asm/cpu_device_id.h>
95 #include <asm/intel-family.h>
96 #include "../perf_event.h"
98 MODULE_LICENSE("GPL");
100 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
101 static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
102 struct kobj_attribute *attr, \
105 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
106 return sprintf(page, _format "\n"); \
108 static struct kobj_attribute format_attr_##_var = \
109 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
111 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
112 struct device_attribute
*attr
,
115 /* Model -> events mapping */
116 struct cstate_model
{
117 unsigned long core_events
;
118 unsigned long pkg_events
;
119 unsigned long quirks
;
123 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
124 #define KNL_CORE_C6_MSR (1UL << 1)
126 struct perf_cstate_msr
{
128 struct perf_pmu_events_attr
*attr
;
132 /* cstate_core PMU */
133 static struct pmu cstate_core_pmu
;
134 static bool has_cstate_core
;
136 enum perf_cstate_core_events
{
137 PERF_CSTATE_CORE_C1_RES
= 0,
138 PERF_CSTATE_CORE_C3_RES
,
139 PERF_CSTATE_CORE_C6_RES
,
140 PERF_CSTATE_CORE_C7_RES
,
142 PERF_CSTATE_CORE_EVENT_MAX
,
145 PMU_EVENT_ATTR_STRING(c1
-residency
, evattr_cstate_core_c1
, "event=0x00");
146 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_core_c3
, "event=0x01");
147 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_core_c6
, "event=0x02");
148 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_core_c7
, "event=0x03");
150 static struct perf_cstate_msr core_msr
[] = {
151 [PERF_CSTATE_CORE_C1_RES
] = { MSR_CORE_C1_RES
, &evattr_cstate_core_c1
},
152 [PERF_CSTATE_CORE_C3_RES
] = { MSR_CORE_C3_RESIDENCY
, &evattr_cstate_core_c3
},
153 [PERF_CSTATE_CORE_C6_RES
] = { MSR_CORE_C6_RESIDENCY
, &evattr_cstate_core_c6
},
154 [PERF_CSTATE_CORE_C7_RES
] = { MSR_CORE_C7_RESIDENCY
, &evattr_cstate_core_c7
},
157 static struct attribute
*core_events_attrs
[PERF_CSTATE_CORE_EVENT_MAX
+ 1] = {
161 static struct attribute_group core_events_attr_group
= {
163 .attrs
= core_events_attrs
,
166 DEFINE_CSTATE_FORMAT_ATTR(core_event
, event
, "config:0-63");
167 static struct attribute
*core_format_attrs
[] = {
168 &format_attr_core_event
.attr
,
172 static struct attribute_group core_format_attr_group
= {
174 .attrs
= core_format_attrs
,
177 static cpumask_t cstate_core_cpu_mask
;
178 static DEVICE_ATTR(cpumask
, S_IRUGO
, cstate_get_attr_cpumask
, NULL
);
180 static struct attribute
*cstate_cpumask_attrs
[] = {
181 &dev_attr_cpumask
.attr
,
185 static struct attribute_group cpumask_attr_group
= {
186 .attrs
= cstate_cpumask_attrs
,
189 static const struct attribute_group
*core_attr_groups
[] = {
190 &core_events_attr_group
,
191 &core_format_attr_group
,
197 static struct pmu cstate_pkg_pmu
;
198 static bool has_cstate_pkg
;
200 enum perf_cstate_pkg_events
{
201 PERF_CSTATE_PKG_C2_RES
= 0,
202 PERF_CSTATE_PKG_C3_RES
,
203 PERF_CSTATE_PKG_C6_RES
,
204 PERF_CSTATE_PKG_C7_RES
,
205 PERF_CSTATE_PKG_C8_RES
,
206 PERF_CSTATE_PKG_C9_RES
,
207 PERF_CSTATE_PKG_C10_RES
,
209 PERF_CSTATE_PKG_EVENT_MAX
,
212 PMU_EVENT_ATTR_STRING(c2
-residency
, evattr_cstate_pkg_c2
, "event=0x00");
213 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_pkg_c3
, "event=0x01");
214 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_pkg_c6
, "event=0x02");
215 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_pkg_c7
, "event=0x03");
216 PMU_EVENT_ATTR_STRING(c8
-residency
, evattr_cstate_pkg_c8
, "event=0x04");
217 PMU_EVENT_ATTR_STRING(c9
-residency
, evattr_cstate_pkg_c9
, "event=0x05");
218 PMU_EVENT_ATTR_STRING(c10
-residency
, evattr_cstate_pkg_c10
, "event=0x06");
220 static struct perf_cstate_msr pkg_msr
[] = {
221 [PERF_CSTATE_PKG_C2_RES
] = { MSR_PKG_C2_RESIDENCY
, &evattr_cstate_pkg_c2
},
222 [PERF_CSTATE_PKG_C3_RES
] = { MSR_PKG_C3_RESIDENCY
, &evattr_cstate_pkg_c3
},
223 [PERF_CSTATE_PKG_C6_RES
] = { MSR_PKG_C6_RESIDENCY
, &evattr_cstate_pkg_c6
},
224 [PERF_CSTATE_PKG_C7_RES
] = { MSR_PKG_C7_RESIDENCY
, &evattr_cstate_pkg_c7
},
225 [PERF_CSTATE_PKG_C8_RES
] = { MSR_PKG_C8_RESIDENCY
, &evattr_cstate_pkg_c8
},
226 [PERF_CSTATE_PKG_C9_RES
] = { MSR_PKG_C9_RESIDENCY
, &evattr_cstate_pkg_c9
},
227 [PERF_CSTATE_PKG_C10_RES
] = { MSR_PKG_C10_RESIDENCY
, &evattr_cstate_pkg_c10
},
230 static struct attribute
*pkg_events_attrs
[PERF_CSTATE_PKG_EVENT_MAX
+ 1] = {
234 static struct attribute_group pkg_events_attr_group
= {
236 .attrs
= pkg_events_attrs
,
239 DEFINE_CSTATE_FORMAT_ATTR(pkg_event
, event
, "config:0-63");
240 static struct attribute
*pkg_format_attrs
[] = {
241 &format_attr_pkg_event
.attr
,
244 static struct attribute_group pkg_format_attr_group
= {
246 .attrs
= pkg_format_attrs
,
249 static cpumask_t cstate_pkg_cpu_mask
;
251 static const struct attribute_group
*pkg_attr_groups
[] = {
252 &pkg_events_attr_group
,
253 &pkg_format_attr_group
,
258 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
259 struct device_attribute
*attr
,
262 struct pmu
*pmu
= dev_get_drvdata(dev
);
264 if (pmu
== &cstate_core_pmu
)
265 return cpumap_print_to_pagebuf(true, buf
, &cstate_core_cpu_mask
);
266 else if (pmu
== &cstate_pkg_pmu
)
267 return cpumap_print_to_pagebuf(true, buf
, &cstate_pkg_cpu_mask
);
272 static int cstate_pmu_event_init(struct perf_event
*event
)
274 u64 cfg
= event
->attr
.config
;
277 if (event
->attr
.type
!= event
->pmu
->type
)
280 /* unsupported modes and filters */
281 if (event
->attr
.exclude_user
||
282 event
->attr
.exclude_kernel
||
283 event
->attr
.exclude_hv
||
284 event
->attr
.exclude_idle
||
285 event
->attr
.exclude_host
||
286 event
->attr
.exclude_guest
||
287 event
->attr
.sample_period
) /* no sampling */
293 if (event
->pmu
== &cstate_core_pmu
) {
294 if (cfg
>= PERF_CSTATE_CORE_EVENT_MAX
)
296 if (!core_msr
[cfg
].attr
)
298 event
->hw
.event_base
= core_msr
[cfg
].msr
;
299 cpu
= cpumask_any_and(&cstate_core_cpu_mask
,
300 topology_sibling_cpumask(event
->cpu
));
301 } else if (event
->pmu
== &cstate_pkg_pmu
) {
302 if (cfg
>= PERF_CSTATE_PKG_EVENT_MAX
)
304 if (!pkg_msr
[cfg
].attr
)
306 event
->hw
.event_base
= pkg_msr
[cfg
].msr
;
307 cpu
= cpumask_any_and(&cstate_pkg_cpu_mask
,
308 topology_core_cpumask(event
->cpu
));
313 if (cpu
>= nr_cpu_ids
)
317 event
->hw
.config
= cfg
;
322 static inline u64
cstate_pmu_read_counter(struct perf_event
*event
)
326 rdmsrl(event
->hw
.event_base
, val
);
330 static void cstate_pmu_event_update(struct perf_event
*event
)
332 struct hw_perf_event
*hwc
= &event
->hw
;
333 u64 prev_raw_count
, new_raw_count
;
336 prev_raw_count
= local64_read(&hwc
->prev_count
);
337 new_raw_count
= cstate_pmu_read_counter(event
);
339 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
340 new_raw_count
) != prev_raw_count
)
343 local64_add(new_raw_count
- prev_raw_count
, &event
->count
);
346 static void cstate_pmu_event_start(struct perf_event
*event
, int mode
)
348 local64_set(&event
->hw
.prev_count
, cstate_pmu_read_counter(event
));
351 static void cstate_pmu_event_stop(struct perf_event
*event
, int mode
)
353 cstate_pmu_event_update(event
);
356 static void cstate_pmu_event_del(struct perf_event
*event
, int mode
)
358 cstate_pmu_event_stop(event
, PERF_EF_UPDATE
);
361 static int cstate_pmu_event_add(struct perf_event
*event
, int mode
)
363 if (mode
& PERF_EF_START
)
364 cstate_pmu_event_start(event
, mode
);
370 * Check if exiting cpu is the designated reader. If so migrate the
371 * events when there is a valid target available
373 static int cstate_cpu_exit(unsigned int cpu
)
377 if (has_cstate_core
&&
378 cpumask_test_and_clear_cpu(cpu
, &cstate_core_cpu_mask
)) {
380 target
= cpumask_any_but(topology_sibling_cpumask(cpu
), cpu
);
381 /* Migrate events if there is a valid target */
382 if (target
< nr_cpu_ids
) {
383 cpumask_set_cpu(target
, &cstate_core_cpu_mask
);
384 perf_pmu_migrate_context(&cstate_core_pmu
, cpu
, target
);
388 if (has_cstate_pkg
&&
389 cpumask_test_and_clear_cpu(cpu
, &cstate_pkg_cpu_mask
)) {
391 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
392 /* Migrate events if there is a valid target */
393 if (target
< nr_cpu_ids
) {
394 cpumask_set_cpu(target
, &cstate_pkg_cpu_mask
);
395 perf_pmu_migrate_context(&cstate_pkg_pmu
, cpu
, target
);
401 static int cstate_cpu_init(unsigned int cpu
)
406 * If this is the first online thread of that core, set it in
407 * the core cpu mask as the designated reader.
409 target
= cpumask_any_and(&cstate_core_cpu_mask
,
410 topology_sibling_cpumask(cpu
));
412 if (has_cstate_core
&& target
>= nr_cpu_ids
)
413 cpumask_set_cpu(cpu
, &cstate_core_cpu_mask
);
416 * If this is the first online thread of that package, set it
417 * in the package cpu mask as the designated reader.
419 target
= cpumask_any_and(&cstate_pkg_cpu_mask
,
420 topology_core_cpumask(cpu
));
421 if (has_cstate_pkg
&& target
>= nr_cpu_ids
)
422 cpumask_set_cpu(cpu
, &cstate_pkg_cpu_mask
);
427 static struct pmu cstate_core_pmu
= {
428 .attr_groups
= core_attr_groups
,
429 .name
= "cstate_core",
430 .task_ctx_nr
= perf_invalid_context
,
431 .event_init
= cstate_pmu_event_init
,
432 .add
= cstate_pmu_event_add
,
433 .del
= cstate_pmu_event_del
,
434 .start
= cstate_pmu_event_start
,
435 .stop
= cstate_pmu_event_stop
,
436 .read
= cstate_pmu_event_update
,
437 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
438 .module
= THIS_MODULE
,
441 static struct pmu cstate_pkg_pmu
= {
442 .attr_groups
= pkg_attr_groups
,
443 .name
= "cstate_pkg",
444 .task_ctx_nr
= perf_invalid_context
,
445 .event_init
= cstate_pmu_event_init
,
446 .add
= cstate_pmu_event_add
,
447 .del
= cstate_pmu_event_del
,
448 .start
= cstate_pmu_event_start
,
449 .stop
= cstate_pmu_event_stop
,
450 .read
= cstate_pmu_event_update
,
451 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
452 .module
= THIS_MODULE
,
455 static const struct cstate_model nhm_cstates __initconst
= {
456 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
457 BIT(PERF_CSTATE_CORE_C6_RES
),
459 .pkg_events
= BIT(PERF_CSTATE_PKG_C3_RES
) |
460 BIT(PERF_CSTATE_PKG_C6_RES
) |
461 BIT(PERF_CSTATE_PKG_C7_RES
),
464 static const struct cstate_model snb_cstates __initconst
= {
465 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
466 BIT(PERF_CSTATE_CORE_C6_RES
) |
467 BIT(PERF_CSTATE_CORE_C7_RES
),
469 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
470 BIT(PERF_CSTATE_PKG_C3_RES
) |
471 BIT(PERF_CSTATE_PKG_C6_RES
) |
472 BIT(PERF_CSTATE_PKG_C7_RES
),
475 static const struct cstate_model hswult_cstates __initconst
= {
476 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
477 BIT(PERF_CSTATE_CORE_C6_RES
) |
478 BIT(PERF_CSTATE_CORE_C7_RES
),
480 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
481 BIT(PERF_CSTATE_PKG_C3_RES
) |
482 BIT(PERF_CSTATE_PKG_C6_RES
) |
483 BIT(PERF_CSTATE_PKG_C7_RES
) |
484 BIT(PERF_CSTATE_PKG_C8_RES
) |
485 BIT(PERF_CSTATE_PKG_C9_RES
) |
486 BIT(PERF_CSTATE_PKG_C10_RES
),
489 static const struct cstate_model slm_cstates __initconst
= {
490 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
491 BIT(PERF_CSTATE_CORE_C6_RES
),
493 .pkg_events
= BIT(PERF_CSTATE_PKG_C6_RES
),
494 .quirks
= SLM_PKG_C6_USE_C7_MSR
,
498 static const struct cstate_model knl_cstates __initconst
= {
499 .core_events
= BIT(PERF_CSTATE_CORE_C6_RES
),
501 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
502 BIT(PERF_CSTATE_PKG_C3_RES
) |
503 BIT(PERF_CSTATE_PKG_C6_RES
),
504 .quirks
= KNL_CORE_C6_MSR
,
508 static const struct cstate_model glm_cstates __initconst
= {
509 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
510 BIT(PERF_CSTATE_CORE_C3_RES
) |
511 BIT(PERF_CSTATE_CORE_C6_RES
),
513 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
514 BIT(PERF_CSTATE_PKG_C3_RES
) |
515 BIT(PERF_CSTATE_PKG_C6_RES
) |
516 BIT(PERF_CSTATE_PKG_C10_RES
),
520 #define X86_CSTATES_MODEL(model, states) \
521 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
523 static const struct x86_cpu_id intel_cstates_match
[] __initconst
= {
524 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM
, nhm_cstates
),
525 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP
, nhm_cstates
),
526 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX
, nhm_cstates
),
528 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE
, nhm_cstates
),
529 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP
, nhm_cstates
),
530 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX
, nhm_cstates
),
532 X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE
, snb_cstates
),
533 X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X
, snb_cstates
),
535 X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE
, snb_cstates
),
536 X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X
, snb_cstates
),
538 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE
, snb_cstates
),
539 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X
, snb_cstates
),
540 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E
, snb_cstates
),
542 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT
, hswult_cstates
),
544 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1
, slm_cstates
),
545 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2
, slm_cstates
),
546 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT
, slm_cstates
),
548 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE
, snb_cstates
),
549 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D
, snb_cstates
),
550 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E
, snb_cstates
),
551 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X
, snb_cstates
),
553 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE
, snb_cstates
),
554 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP
, snb_cstates
),
556 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE
, snb_cstates
),
557 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP
, snb_cstates
),
559 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL
, knl_cstates
),
560 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM
, knl_cstates
),
562 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT
, glm_cstates
),
565 MODULE_DEVICE_TABLE(x86cpu
, intel_cstates_match
);
568 * Probe the cstate events and insert the available one into sysfs attrs
569 * Return false if there are no available events.
571 static bool __init
cstate_probe_msr(const unsigned long evmsk
, int max
,
572 struct perf_cstate_msr
*msr
,
573 struct attribute
**attrs
)
579 for (bit
= 0; bit
< max
; bit
++) {
580 if (test_bit(bit
, &evmsk
) && !rdmsrl_safe(msr
[bit
].msr
, &val
)) {
581 *attrs
++ = &msr
[bit
].attr
->attr
.attr
;
584 msr
[bit
].attr
= NULL
;
592 static int __init
cstate_probe(const struct cstate_model
*cm
)
594 /* SLM has different MSR for PKG C6 */
595 if (cm
->quirks
& SLM_PKG_C6_USE_C7_MSR
)
596 pkg_msr
[PERF_CSTATE_PKG_C6_RES
].msr
= MSR_PKG_C7_RESIDENCY
;
598 /* KNL has different MSR for CORE C6 */
599 if (cm
->quirks
& KNL_CORE_C6_MSR
)
600 pkg_msr
[PERF_CSTATE_CORE_C6_RES
].msr
= MSR_KNL_CORE_C6_RESIDENCY
;
603 has_cstate_core
= cstate_probe_msr(cm
->core_events
,
604 PERF_CSTATE_CORE_EVENT_MAX
,
605 core_msr
, core_events_attrs
);
607 has_cstate_pkg
= cstate_probe_msr(cm
->pkg_events
,
608 PERF_CSTATE_PKG_EVENT_MAX
,
609 pkg_msr
, pkg_events_attrs
);
611 return (has_cstate_core
|| has_cstate_pkg
) ? 0 : -ENODEV
;
614 static inline void cstate_cleanup(void)
616 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE
);
617 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING
);
620 perf_pmu_unregister(&cstate_core_pmu
);
623 perf_pmu_unregister(&cstate_pkg_pmu
);
626 static int __init
cstate_init(void)
630 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING
,
631 "perf/x86/cstate:starting", cstate_cpu_init
, NULL
);
632 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE
,
633 "perf/x86/cstate:online", NULL
, cstate_cpu_exit
);
635 if (has_cstate_core
) {
636 err
= perf_pmu_register(&cstate_core_pmu
, cstate_core_pmu
.name
, -1);
638 has_cstate_core
= false;
639 pr_info("Failed to register cstate core pmu\n");
645 if (has_cstate_pkg
) {
646 err
= perf_pmu_register(&cstate_pkg_pmu
, cstate_pkg_pmu
.name
, -1);
648 has_cstate_pkg
= false;
649 pr_info("Failed to register cstate pkg pmu\n");
657 static int __init
cstate_pmu_init(void)
659 const struct x86_cpu_id
*id
;
662 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
665 id
= x86_match_cpu(intel_cstates_match
);
669 err
= cstate_probe((const struct cstate_model
*) id
->driver_data
);
673 return cstate_init();
675 module_init(cstate_pmu_init
);
677 static void __exit
cstate_pmu_exit(void)
681 module_exit(cstate_pmu_exit
);