2 * Support cstate residency counters
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
43 * Available model: SLM,AMT,GLM
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
54 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
56 * Available model: SNB,IVB,HSW,BDW,SKL
58 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
60 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
61 * Scope: Package (physical package)
62 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
64 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
66 * Scope: Package (physical package)
67 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
69 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
71 * Scope: Package (physical package)
72 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
74 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
75 * Scope: Package (physical package)
76 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
78 * Available model: HSW ULT only
79 * Scope: Package (physical package)
80 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
82 * Available model: HSW ULT only
83 * Scope: Package (physical package)
84 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
86 * Available model: HSW ULT, GLM
87 * Scope: Package (physical package)
91 #include <linux/module.h>
92 #include <linux/slab.h>
93 #include <linux/perf_event.h>
94 #include <linux/nospec.h>
95 #include <asm/cpu_device_id.h>
96 #include <asm/intel-family.h>
97 #include "../perf_event.h"
99 MODULE_LICENSE("GPL");
101 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
102 static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
103 struct kobj_attribute *attr, \
106 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
107 return sprintf(page, _format "\n"); \
109 static struct kobj_attribute format_attr_##_var = \
110 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
112 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
113 struct device_attribute
*attr
,
116 /* Model -> events mapping */
117 struct cstate_model
{
118 unsigned long core_events
;
119 unsigned long pkg_events
;
120 unsigned long quirks
;
124 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
125 #define KNL_CORE_C6_MSR (1UL << 1)
127 struct perf_cstate_msr
{
129 struct perf_pmu_events_attr
*attr
;
133 /* cstate_core PMU */
134 static struct pmu cstate_core_pmu
;
135 static bool has_cstate_core
;
137 enum perf_cstate_core_events
{
138 PERF_CSTATE_CORE_C1_RES
= 0,
139 PERF_CSTATE_CORE_C3_RES
,
140 PERF_CSTATE_CORE_C6_RES
,
141 PERF_CSTATE_CORE_C7_RES
,
143 PERF_CSTATE_CORE_EVENT_MAX
,
146 PMU_EVENT_ATTR_STRING(c1
-residency
, evattr_cstate_core_c1
, "event=0x00");
147 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_core_c3
, "event=0x01");
148 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_core_c6
, "event=0x02");
149 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_core_c7
, "event=0x03");
151 static struct perf_cstate_msr core_msr
[] = {
152 [PERF_CSTATE_CORE_C1_RES
] = { MSR_CORE_C1_RES
, &evattr_cstate_core_c1
},
153 [PERF_CSTATE_CORE_C3_RES
] = { MSR_CORE_C3_RESIDENCY
, &evattr_cstate_core_c3
},
154 [PERF_CSTATE_CORE_C6_RES
] = { MSR_CORE_C6_RESIDENCY
, &evattr_cstate_core_c6
},
155 [PERF_CSTATE_CORE_C7_RES
] = { MSR_CORE_C7_RESIDENCY
, &evattr_cstate_core_c7
},
158 static struct attribute
*core_events_attrs
[PERF_CSTATE_CORE_EVENT_MAX
+ 1] = {
162 static struct attribute_group core_events_attr_group
= {
164 .attrs
= core_events_attrs
,
167 DEFINE_CSTATE_FORMAT_ATTR(core_event
, event
, "config:0-63");
168 static struct attribute
*core_format_attrs
[] = {
169 &format_attr_core_event
.attr
,
173 static struct attribute_group core_format_attr_group
= {
175 .attrs
= core_format_attrs
,
178 static cpumask_t cstate_core_cpu_mask
;
179 static DEVICE_ATTR(cpumask
, S_IRUGO
, cstate_get_attr_cpumask
, NULL
);
181 static struct attribute
*cstate_cpumask_attrs
[] = {
182 &dev_attr_cpumask
.attr
,
186 static struct attribute_group cpumask_attr_group
= {
187 .attrs
= cstate_cpumask_attrs
,
190 static const struct attribute_group
*core_attr_groups
[] = {
191 &core_events_attr_group
,
192 &core_format_attr_group
,
198 static struct pmu cstate_pkg_pmu
;
199 static bool has_cstate_pkg
;
201 enum perf_cstate_pkg_events
{
202 PERF_CSTATE_PKG_C2_RES
= 0,
203 PERF_CSTATE_PKG_C3_RES
,
204 PERF_CSTATE_PKG_C6_RES
,
205 PERF_CSTATE_PKG_C7_RES
,
206 PERF_CSTATE_PKG_C8_RES
,
207 PERF_CSTATE_PKG_C9_RES
,
208 PERF_CSTATE_PKG_C10_RES
,
210 PERF_CSTATE_PKG_EVENT_MAX
,
213 PMU_EVENT_ATTR_STRING(c2
-residency
, evattr_cstate_pkg_c2
, "event=0x00");
214 PMU_EVENT_ATTR_STRING(c3
-residency
, evattr_cstate_pkg_c3
, "event=0x01");
215 PMU_EVENT_ATTR_STRING(c6
-residency
, evattr_cstate_pkg_c6
, "event=0x02");
216 PMU_EVENT_ATTR_STRING(c7
-residency
, evattr_cstate_pkg_c7
, "event=0x03");
217 PMU_EVENT_ATTR_STRING(c8
-residency
, evattr_cstate_pkg_c8
, "event=0x04");
218 PMU_EVENT_ATTR_STRING(c9
-residency
, evattr_cstate_pkg_c9
, "event=0x05");
219 PMU_EVENT_ATTR_STRING(c10
-residency
, evattr_cstate_pkg_c10
, "event=0x06");
221 static struct perf_cstate_msr pkg_msr
[] = {
222 [PERF_CSTATE_PKG_C2_RES
] = { MSR_PKG_C2_RESIDENCY
, &evattr_cstate_pkg_c2
},
223 [PERF_CSTATE_PKG_C3_RES
] = { MSR_PKG_C3_RESIDENCY
, &evattr_cstate_pkg_c3
},
224 [PERF_CSTATE_PKG_C6_RES
] = { MSR_PKG_C6_RESIDENCY
, &evattr_cstate_pkg_c6
},
225 [PERF_CSTATE_PKG_C7_RES
] = { MSR_PKG_C7_RESIDENCY
, &evattr_cstate_pkg_c7
},
226 [PERF_CSTATE_PKG_C8_RES
] = { MSR_PKG_C8_RESIDENCY
, &evattr_cstate_pkg_c8
},
227 [PERF_CSTATE_PKG_C9_RES
] = { MSR_PKG_C9_RESIDENCY
, &evattr_cstate_pkg_c9
},
228 [PERF_CSTATE_PKG_C10_RES
] = { MSR_PKG_C10_RESIDENCY
, &evattr_cstate_pkg_c10
},
231 static struct attribute
*pkg_events_attrs
[PERF_CSTATE_PKG_EVENT_MAX
+ 1] = {
235 static struct attribute_group pkg_events_attr_group
= {
237 .attrs
= pkg_events_attrs
,
240 DEFINE_CSTATE_FORMAT_ATTR(pkg_event
, event
, "config:0-63");
241 static struct attribute
*pkg_format_attrs
[] = {
242 &format_attr_pkg_event
.attr
,
245 static struct attribute_group pkg_format_attr_group
= {
247 .attrs
= pkg_format_attrs
,
250 static cpumask_t cstate_pkg_cpu_mask
;
252 static const struct attribute_group
*pkg_attr_groups
[] = {
253 &pkg_events_attr_group
,
254 &pkg_format_attr_group
,
259 static ssize_t
cstate_get_attr_cpumask(struct device
*dev
,
260 struct device_attribute
*attr
,
263 struct pmu
*pmu
= dev_get_drvdata(dev
);
265 if (pmu
== &cstate_core_pmu
)
266 return cpumap_print_to_pagebuf(true, buf
, &cstate_core_cpu_mask
);
267 else if (pmu
== &cstate_pkg_pmu
)
268 return cpumap_print_to_pagebuf(true, buf
, &cstate_pkg_cpu_mask
);
273 static int cstate_pmu_event_init(struct perf_event
*event
)
275 u64 cfg
= event
->attr
.config
;
278 if (event
->attr
.type
!= event
->pmu
->type
)
281 /* unsupported modes and filters */
282 if (event
->attr
.exclude_user
||
283 event
->attr
.exclude_kernel
||
284 event
->attr
.exclude_hv
||
285 event
->attr
.exclude_idle
||
286 event
->attr
.exclude_host
||
287 event
->attr
.exclude_guest
||
288 event
->attr
.sample_period
) /* no sampling */
294 if (event
->pmu
== &cstate_core_pmu
) {
295 if (cfg
>= PERF_CSTATE_CORE_EVENT_MAX
)
297 if (!core_msr
[cfg
].attr
)
299 event
->hw
.event_base
= core_msr
[cfg
].msr
;
300 cpu
= cpumask_any_and(&cstate_core_cpu_mask
,
301 topology_sibling_cpumask(event
->cpu
));
302 } else if (event
->pmu
== &cstate_pkg_pmu
) {
303 if (cfg
>= PERF_CSTATE_PKG_EVENT_MAX
)
305 cfg
= array_index_nospec((unsigned long)cfg
, PERF_CSTATE_PKG_EVENT_MAX
);
306 if (!pkg_msr
[cfg
].attr
)
308 event
->hw
.event_base
= pkg_msr
[cfg
].msr
;
309 cpu
= cpumask_any_and(&cstate_pkg_cpu_mask
,
310 topology_core_cpumask(event
->cpu
));
315 if (cpu
>= nr_cpu_ids
)
319 event
->hw
.config
= cfg
;
324 static inline u64
cstate_pmu_read_counter(struct perf_event
*event
)
328 rdmsrl(event
->hw
.event_base
, val
);
332 static void cstate_pmu_event_update(struct perf_event
*event
)
334 struct hw_perf_event
*hwc
= &event
->hw
;
335 u64 prev_raw_count
, new_raw_count
;
338 prev_raw_count
= local64_read(&hwc
->prev_count
);
339 new_raw_count
= cstate_pmu_read_counter(event
);
341 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
342 new_raw_count
) != prev_raw_count
)
345 local64_add(new_raw_count
- prev_raw_count
, &event
->count
);
348 static void cstate_pmu_event_start(struct perf_event
*event
, int mode
)
350 local64_set(&event
->hw
.prev_count
, cstate_pmu_read_counter(event
));
353 static void cstate_pmu_event_stop(struct perf_event
*event
, int mode
)
355 cstate_pmu_event_update(event
);
358 static void cstate_pmu_event_del(struct perf_event
*event
, int mode
)
360 cstate_pmu_event_stop(event
, PERF_EF_UPDATE
);
363 static int cstate_pmu_event_add(struct perf_event
*event
, int mode
)
365 if (mode
& PERF_EF_START
)
366 cstate_pmu_event_start(event
, mode
);
372 * Check if exiting cpu is the designated reader. If so migrate the
373 * events when there is a valid target available
375 static int cstate_cpu_exit(unsigned int cpu
)
379 if (has_cstate_core
&&
380 cpumask_test_and_clear_cpu(cpu
, &cstate_core_cpu_mask
)) {
382 target
= cpumask_any_but(topology_sibling_cpumask(cpu
), cpu
);
383 /* Migrate events if there is a valid target */
384 if (target
< nr_cpu_ids
) {
385 cpumask_set_cpu(target
, &cstate_core_cpu_mask
);
386 perf_pmu_migrate_context(&cstate_core_pmu
, cpu
, target
);
390 if (has_cstate_pkg
&&
391 cpumask_test_and_clear_cpu(cpu
, &cstate_pkg_cpu_mask
)) {
393 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
394 /* Migrate events if there is a valid target */
395 if (target
< nr_cpu_ids
) {
396 cpumask_set_cpu(target
, &cstate_pkg_cpu_mask
);
397 perf_pmu_migrate_context(&cstate_pkg_pmu
, cpu
, target
);
403 static int cstate_cpu_init(unsigned int cpu
)
408 * If this is the first online thread of that core, set it in
409 * the core cpu mask as the designated reader.
411 target
= cpumask_any_and(&cstate_core_cpu_mask
,
412 topology_sibling_cpumask(cpu
));
414 if (has_cstate_core
&& target
>= nr_cpu_ids
)
415 cpumask_set_cpu(cpu
, &cstate_core_cpu_mask
);
418 * If this is the first online thread of that package, set it
419 * in the package cpu mask as the designated reader.
421 target
= cpumask_any_and(&cstate_pkg_cpu_mask
,
422 topology_core_cpumask(cpu
));
423 if (has_cstate_pkg
&& target
>= nr_cpu_ids
)
424 cpumask_set_cpu(cpu
, &cstate_pkg_cpu_mask
);
429 static struct pmu cstate_core_pmu
= {
430 .attr_groups
= core_attr_groups
,
431 .name
= "cstate_core",
432 .task_ctx_nr
= perf_invalid_context
,
433 .event_init
= cstate_pmu_event_init
,
434 .add
= cstate_pmu_event_add
,
435 .del
= cstate_pmu_event_del
,
436 .start
= cstate_pmu_event_start
,
437 .stop
= cstate_pmu_event_stop
,
438 .read
= cstate_pmu_event_update
,
439 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
440 .module
= THIS_MODULE
,
443 static struct pmu cstate_pkg_pmu
= {
444 .attr_groups
= pkg_attr_groups
,
445 .name
= "cstate_pkg",
446 .task_ctx_nr
= perf_invalid_context
,
447 .event_init
= cstate_pmu_event_init
,
448 .add
= cstate_pmu_event_add
,
449 .del
= cstate_pmu_event_del
,
450 .start
= cstate_pmu_event_start
,
451 .stop
= cstate_pmu_event_stop
,
452 .read
= cstate_pmu_event_update
,
453 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
454 .module
= THIS_MODULE
,
457 static const struct cstate_model nhm_cstates __initconst
= {
458 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
459 BIT(PERF_CSTATE_CORE_C6_RES
),
461 .pkg_events
= BIT(PERF_CSTATE_PKG_C3_RES
) |
462 BIT(PERF_CSTATE_PKG_C6_RES
) |
463 BIT(PERF_CSTATE_PKG_C7_RES
),
466 static const struct cstate_model snb_cstates __initconst
= {
467 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
468 BIT(PERF_CSTATE_CORE_C6_RES
) |
469 BIT(PERF_CSTATE_CORE_C7_RES
),
471 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
472 BIT(PERF_CSTATE_PKG_C3_RES
) |
473 BIT(PERF_CSTATE_PKG_C6_RES
) |
474 BIT(PERF_CSTATE_PKG_C7_RES
),
477 static const struct cstate_model hswult_cstates __initconst
= {
478 .core_events
= BIT(PERF_CSTATE_CORE_C3_RES
) |
479 BIT(PERF_CSTATE_CORE_C6_RES
) |
480 BIT(PERF_CSTATE_CORE_C7_RES
),
482 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
483 BIT(PERF_CSTATE_PKG_C3_RES
) |
484 BIT(PERF_CSTATE_PKG_C6_RES
) |
485 BIT(PERF_CSTATE_PKG_C7_RES
) |
486 BIT(PERF_CSTATE_PKG_C8_RES
) |
487 BIT(PERF_CSTATE_PKG_C9_RES
) |
488 BIT(PERF_CSTATE_PKG_C10_RES
),
491 static const struct cstate_model slm_cstates __initconst
= {
492 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
493 BIT(PERF_CSTATE_CORE_C6_RES
),
495 .pkg_events
= BIT(PERF_CSTATE_PKG_C6_RES
),
496 .quirks
= SLM_PKG_C6_USE_C7_MSR
,
500 static const struct cstate_model knl_cstates __initconst
= {
501 .core_events
= BIT(PERF_CSTATE_CORE_C6_RES
),
503 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
504 BIT(PERF_CSTATE_PKG_C3_RES
) |
505 BIT(PERF_CSTATE_PKG_C6_RES
),
506 .quirks
= KNL_CORE_C6_MSR
,
510 static const struct cstate_model glm_cstates __initconst
= {
511 .core_events
= BIT(PERF_CSTATE_CORE_C1_RES
) |
512 BIT(PERF_CSTATE_CORE_C3_RES
) |
513 BIT(PERF_CSTATE_CORE_C6_RES
),
515 .pkg_events
= BIT(PERF_CSTATE_PKG_C2_RES
) |
516 BIT(PERF_CSTATE_PKG_C3_RES
) |
517 BIT(PERF_CSTATE_PKG_C6_RES
) |
518 BIT(PERF_CSTATE_PKG_C10_RES
),
522 #define X86_CSTATES_MODEL(model, states) \
523 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
525 static const struct x86_cpu_id intel_cstates_match
[] __initconst
= {
526 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM
, nhm_cstates
),
527 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP
, nhm_cstates
),
528 X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX
, nhm_cstates
),
530 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE
, nhm_cstates
),
531 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP
, nhm_cstates
),
532 X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX
, nhm_cstates
),
534 X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE
, snb_cstates
),
535 X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X
, snb_cstates
),
537 X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE
, snb_cstates
),
538 X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X
, snb_cstates
),
540 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE
, snb_cstates
),
541 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X
, snb_cstates
),
542 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E
, snb_cstates
),
544 X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT
, hswult_cstates
),
546 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT
, slm_cstates
),
547 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X
, slm_cstates
),
548 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT
, slm_cstates
),
550 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE
, snb_cstates
),
551 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D
, snb_cstates
),
552 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E
, snb_cstates
),
553 X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X
, snb_cstates
),
555 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE
, snb_cstates
),
556 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP
, snb_cstates
),
557 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X
, snb_cstates
),
559 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE
, snb_cstates
),
560 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP
, snb_cstates
),
562 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL
, knl_cstates
),
563 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM
, knl_cstates
),
565 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT
, glm_cstates
),
566 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X
, glm_cstates
),
568 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS
, glm_cstates
),
571 MODULE_DEVICE_TABLE(x86cpu
, intel_cstates_match
);
574 * Probe the cstate events and insert the available one into sysfs attrs
575 * Return false if there are no available events.
577 static bool __init
cstate_probe_msr(const unsigned long evmsk
, int max
,
578 struct perf_cstate_msr
*msr
,
579 struct attribute
**attrs
)
585 for (bit
= 0; bit
< max
; bit
++) {
586 if (test_bit(bit
, &evmsk
) && !rdmsrl_safe(msr
[bit
].msr
, &val
)) {
587 *attrs
++ = &msr
[bit
].attr
->attr
.attr
;
590 msr
[bit
].attr
= NULL
;
598 static int __init
cstate_probe(const struct cstate_model
*cm
)
600 /* SLM has different MSR for PKG C6 */
601 if (cm
->quirks
& SLM_PKG_C6_USE_C7_MSR
)
602 pkg_msr
[PERF_CSTATE_PKG_C6_RES
].msr
= MSR_PKG_C7_RESIDENCY
;
604 /* KNL has different MSR for CORE C6 */
605 if (cm
->quirks
& KNL_CORE_C6_MSR
)
606 pkg_msr
[PERF_CSTATE_CORE_C6_RES
].msr
= MSR_KNL_CORE_C6_RESIDENCY
;
609 has_cstate_core
= cstate_probe_msr(cm
->core_events
,
610 PERF_CSTATE_CORE_EVENT_MAX
,
611 core_msr
, core_events_attrs
);
613 has_cstate_pkg
= cstate_probe_msr(cm
->pkg_events
,
614 PERF_CSTATE_PKG_EVENT_MAX
,
615 pkg_msr
, pkg_events_attrs
);
617 return (has_cstate_core
|| has_cstate_pkg
) ? 0 : -ENODEV
;
620 static inline void cstate_cleanup(void)
622 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE
);
623 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING
);
626 perf_pmu_unregister(&cstate_core_pmu
);
629 perf_pmu_unregister(&cstate_pkg_pmu
);
632 static int __init
cstate_init(void)
636 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING
,
637 "perf/x86/cstate:starting", cstate_cpu_init
, NULL
);
638 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE
,
639 "perf/x86/cstate:online", NULL
, cstate_cpu_exit
);
641 if (has_cstate_core
) {
642 err
= perf_pmu_register(&cstate_core_pmu
, cstate_core_pmu
.name
, -1);
644 has_cstate_core
= false;
645 pr_info("Failed to register cstate core pmu\n");
651 if (has_cstate_pkg
) {
652 err
= perf_pmu_register(&cstate_pkg_pmu
, cstate_pkg_pmu
.name
, -1);
654 has_cstate_pkg
= false;
655 pr_info("Failed to register cstate pkg pmu\n");
663 static int __init
cstate_pmu_init(void)
665 const struct x86_cpu_id
*id
;
668 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
671 id
= x86_match_cpu(intel_cstates_match
);
675 err
= cstate_probe((const struct cstate_model
*) id
->driver_data
);
679 return cstate_init();
681 module_init(cstate_pmu_init
);
683 static void __exit
cstate_pmu_exit(void)
687 module_exit(cstate_pmu_exit
);