2 * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
4 * Licensed under the terms of the GNU GPL License version 2.
6 * Based on SandyBridge monitor. Implements the new package C-states
7 * (PC8, PC9, PC10) coming with a specific Haswell (family 0x45) CPU.
10 #if defined(__i386__) || defined(__x86_64__)
17 #include "helpers/helpers.h"
18 #include "idle_monitor/cpupower-monitor.h"
20 #define MSR_PKG_C8_RESIDENCY 0x00000630
21 #define MSR_PKG_C9_RESIDENCY 0x00000631
22 #define MSR_PKG_C10_RESIDENCY 0x00000632
26 enum intel_hsw_ext_id
{ PC8
= 0, PC9
, PC10
, HSW_EXT_CSTATE_COUNT
,
29 static int hsw_ext_get_count_percent(unsigned int self_id
, double *percent
,
32 static cstate_t hsw_ext_cstates
[HSW_EXT_CSTATE_COUNT
] = {
35 .desc
= N_("Processor Package C8"),
37 .range
= RANGE_PACKAGE
,
38 .get_count_percent
= hsw_ext_get_count_percent
,
42 .desc
= N_("Processor Package C9"),
43 .desc
= N_("Processor Package C2"),
45 .range
= RANGE_PACKAGE
,
46 .get_count_percent
= hsw_ext_get_count_percent
,
50 .desc
= N_("Processor Package C10"),
52 .range
= RANGE_PACKAGE
,
53 .get_count_percent
= hsw_ext_get_count_percent
,
57 static unsigned long long tsc_at_measure_start
;
58 static unsigned long long tsc_at_measure_end
;
59 static unsigned long long *previous_count
[HSW_EXT_CSTATE_COUNT
];
60 static unsigned long long *current_count
[HSW_EXT_CSTATE_COUNT
];
61 /* valid flag for all CPUs. If a MSR read failed it will be zero */
64 static int hsw_ext_get_count(enum intel_hsw_ext_id id
, unsigned long long *val
,
71 msr
= MSR_PKG_C8_RESIDENCY
;
74 msr
= MSR_PKG_C9_RESIDENCY
;
77 msr
= MSR_PKG_C10_RESIDENCY
;
85 if (read_msr(cpu
, msr
, val
))
90 static int hsw_ext_get_count_percent(unsigned int id
, double *percent
,
99 (current_count
[id
][cpu
] - previous_count
[id
][cpu
])) /
100 (tsc_at_measure_end
- tsc_at_measure_start
);
102 dprint("%s: previous: %llu - current: %llu - (%u)\n",
103 hsw_ext_cstates
[id
].name
, previous_count
[id
][cpu
],
104 current_count
[id
][cpu
], cpu
);
106 dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
107 hsw_ext_cstates
[id
].name
,
108 (unsigned long long) tsc_at_measure_end
- tsc_at_measure_start
,
109 current_count
[id
][cpu
] - previous_count
[id
][cpu
],
115 static int hsw_ext_start(void)
118 unsigned long long val
;
120 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
121 for (cpu
= 0; cpu
< cpu_count
; cpu
++) {
122 hsw_ext_get_count(num
, &val
, cpu
);
123 previous_count
[num
][cpu
] = val
;
126 hsw_ext_get_count(TSC
, &tsc_at_measure_start
, base_cpu
);
130 static int hsw_ext_stop(void)
132 unsigned long long val
;
135 hsw_ext_get_count(TSC
, &tsc_at_measure_end
, base_cpu
);
137 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
138 for (cpu
= 0; cpu
< cpu_count
; cpu
++) {
139 is_valid
[cpu
] = !hsw_ext_get_count(num
, &val
, cpu
);
140 current_count
[num
][cpu
] = val
;
146 struct cpuidle_monitor intel_hsw_ext_monitor
;
148 static struct cpuidle_monitor
*hsw_ext_register(void)
152 if (cpupower_cpu_info
.vendor
!= X86_VENDOR_INTEL
153 || cpupower_cpu_info
.family
!= 6)
156 switch (cpupower_cpu_info
.model
) {
163 is_valid
= calloc(cpu_count
, sizeof(int));
164 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
165 previous_count
[num
] = calloc(cpu_count
,
166 sizeof(unsigned long long));
167 current_count
[num
] = calloc(cpu_count
,
168 sizeof(unsigned long long));
170 intel_hsw_ext_monitor
.name_len
= strlen(intel_hsw_ext_monitor
.name
);
171 return &intel_hsw_ext_monitor
;
174 void hsw_ext_unregister(void)
178 for (num
= 0; num
< HSW_EXT_CSTATE_COUNT
; num
++) {
179 free(previous_count
[num
]);
180 free(current_count
[num
]);
184 struct cpuidle_monitor intel_hsw_ext_monitor
= {
185 .name
= "HaswellExtended",
186 .hw_states
= hsw_ext_cstates
,
187 .hw_states_num
= HSW_EXT_CSTATE_COUNT
,
188 .start
= hsw_ext_start
,
189 .stop
= hsw_ext_stop
,
190 .do_register
= hsw_ext_register
,
191 .unregister
= hsw_ext_unregister
,
193 .overflow_s
= 922000000 /* 922337203 seconds TSC overflow
196 #endif /* defined(__i386__) || defined(__x86_64__) */