1 // SPDX-License-Identifier: GPL-2.0-only
3 * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
5 * Based on SandyBridge monitor. Implements the new package C-states
6 * (PC8, PC9, PC10) coming with a specific Haswell (family 0x45) CPU.
9 #if defined(__i386__) || defined(__x86_64__)
16 #include "helpers/helpers.h"
17 #include "idle_monitor/cpupower-monitor.h"
19 #define MSR_PKG_C8_RESIDENCY 0x00000630
20 #define MSR_PKG_C9_RESIDENCY 0x00000631
21 #define MSR_PKG_C10_RESIDENCY 0x00000632
25 enum intel_hsw_ext_id
{ PC8
= 0, PC9
, PC10
, HSW_EXT_CSTATE_COUNT
,
28 static int hsw_ext_get_count_percent(unsigned int self_id
, double *percent
,
31 static cstate_t hsw_ext_cstates
[HSW_EXT_CSTATE_COUNT
] = {
34 .desc
= N_("Processor Package C8"),
36 .range
= RANGE_PACKAGE
,
37 .get_count_percent
= hsw_ext_get_count_percent
,
41 .desc
= N_("Processor Package C9"),
43 .range
= RANGE_PACKAGE
,
44 .get_count_percent
= hsw_ext_get_count_percent
,
48 .desc
= N_("Processor Package C10"),
50 .range
= RANGE_PACKAGE
,
51 .get_count_percent
= hsw_ext_get_count_percent
,
55 static unsigned long long tsc_at_measure_start
;
56 static unsigned long long tsc_at_measure_end
;
57 static unsigned long long *previous_count
[HSW_EXT_CSTATE_COUNT
];
58 static unsigned long long *current_count
[HSW_EXT_CSTATE_COUNT
];
59 /* valid flag for all CPUs. If a MSR read failed it will be zero */
62 static int hsw_ext_get_count(enum intel_hsw_ext_id id
, unsigned long long *val
,
69 msr
= MSR_PKG_C8_RESIDENCY
;
72 msr
= MSR_PKG_C9_RESIDENCY
;
75 msr
= MSR_PKG_C10_RESIDENCY
;
83 if (read_msr(cpu
, msr
, val
))
88 static int hsw_ext_get_count_percent(unsigned int id
, double *percent
,
97 (current_count
[id
][cpu
] - previous_count
[id
][cpu
])) /
98 (tsc_at_measure_end
- tsc_at_measure_start
);
100 dprint("%s: previous: %llu - current: %llu - (%u)\n",
101 hsw_ext_cstates
[id
].name
, previous_count
[id
][cpu
],
102 current_count
[id
][cpu
], cpu
);
104 dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
105 hsw_ext_cstates
[id
].name
,
106 (unsigned long long) tsc_at_measure_end
- tsc_at_measure_start
,
107 current_count
[id
][cpu
] - previous_count
[id
][cpu
],
113 static int hsw_ext_start(void)
116 unsigned long long val
;
118 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
119 for (cpu
= 0; cpu
< cpu_count
; cpu
++) {
120 hsw_ext_get_count(num
, &val
, cpu
);
121 previous_count
[num
][cpu
] = val
;
124 hsw_ext_get_count(TSC
, &tsc_at_measure_start
, base_cpu
);
128 static int hsw_ext_stop(void)
130 unsigned long long val
;
133 hsw_ext_get_count(TSC
, &tsc_at_measure_end
, base_cpu
);
135 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
136 for (cpu
= 0; cpu
< cpu_count
; cpu
++) {
137 is_valid
[cpu
] = !hsw_ext_get_count(num
, &val
, cpu
);
138 current_count
[num
][cpu
] = val
;
144 struct cpuidle_monitor intel_hsw_ext_monitor
;
146 static struct cpuidle_monitor
*hsw_ext_register(void)
150 if (cpupower_cpu_info
.vendor
!= X86_VENDOR_INTEL
151 || cpupower_cpu_info
.family
!= 6)
154 switch (cpupower_cpu_info
.model
) {
161 is_valid
= calloc(cpu_count
, sizeof(int));
162 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
163 previous_count
[num
] = calloc(cpu_count
,
164 sizeof(unsigned long long));
165 current_count
[num
] = calloc(cpu_count
,
166 sizeof(unsigned long long));
168 intel_hsw_ext_monitor
.name_len
= strlen(intel_hsw_ext_monitor
.name
);
169 return &intel_hsw_ext_monitor
;
172 void hsw_ext_unregister(void)
176 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
177 free(previous_count
[num
]);
178 free(current_count
[num
]);
182 struct cpuidle_monitor intel_hsw_ext_monitor
= {
183 .name
= "HaswellExtended",
184 .hw_states
= hsw_ext_cstates
,
185 .hw_states_num
= HSW_EXT_CSTATE_COUNT
,
186 .start
= hsw_ext_start
,
187 .stop
= hsw_ext_stop
,
188 .do_register
= hsw_ext_register
,
189 .unregister
= hsw_ext_unregister
,
190 .flags
.needs_root
= 1,
191 .overflow_s
= 922000000 /* 922337203 seconds TSC overflow
194 #endif /* defined(__i386__) || defined(__x86_64__) */