2 * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
4 * Licensed under the terms of the GNU GPL License version 2.
6 * Based on Len Brown's <lenb@kernel.org> turbostat tool.
9 #if defined(__i386__) || defined(__x86_64__)
16 #include "helpers/helpers.h"
17 #include "idle_monitor/cpupower-monitor.h"
19 #define MSR_PKG_C2_RESIDENCY 0x60D
20 #define MSR_PKG_C7_RESIDENCY 0x3FA
21 #define MSR_CORE_C7_RESIDENCY 0x3FE
25 enum intel_snb_id
{ C7
= 0, PC2
, PC7
, SNB_CSTATE_COUNT
, TSC
= 0xFFFF };
27 static int snb_get_count_percent(unsigned int self_id
, double *percent
,
30 static cstate_t snb_cstates
[SNB_CSTATE_COUNT
] = {
33 .desc
= N_("Processor Core C7"),
36 .get_count_percent
= snb_get_count_percent
,
40 .desc
= N_("Processor Package C2"),
42 .range
= RANGE_PACKAGE
,
43 .get_count_percent
= snb_get_count_percent
,
47 .desc
= N_("Processor Package C7"),
49 .range
= RANGE_PACKAGE
,
50 .get_count_percent
= snb_get_count_percent
,
54 static unsigned long long tsc_at_measure_start
;
55 static unsigned long long tsc_at_measure_end
;
56 static unsigned long long *previous_count
[SNB_CSTATE_COUNT
];
57 static unsigned long long *current_count
[SNB_CSTATE_COUNT
];
58 /* valid flag for all CPUs. If a MSR read failed it will be zero */
61 static int snb_get_count(enum intel_snb_id id
, unsigned long long *val
,
68 msr
= MSR_CORE_C7_RESIDENCY
;
71 msr
= MSR_PKG_C2_RESIDENCY
;
74 msr
= MSR_PKG_C7_RESIDENCY
;
82 if (read_msr(cpu
, msr
, val
))
87 static int snb_get_count_percent(unsigned int id
, double *percent
,
96 (current_count
[id
][cpu
] - previous_count
[id
][cpu
])) /
97 (tsc_at_measure_end
- tsc_at_measure_start
);
99 dprint("%s: previous: %llu - current: %llu - (%u)\n",
100 snb_cstates
[id
].name
, previous_count
[id
][cpu
],
101 current_count
[id
][cpu
], cpu
);
103 dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
104 snb_cstates
[id
].name
,
105 (unsigned long long) tsc_at_measure_end
- tsc_at_measure_start
,
106 current_count
[id
][cpu
] - previous_count
[id
][cpu
],
112 static int snb_start(void)
115 unsigned long long val
;
117 for (num
= 0; num
< SNB_CSTATE_COUNT
; num
++) {
118 for (cpu
= 0; cpu
< cpu_count
; cpu
++) {
119 snb_get_count(num
, &val
, cpu
);
120 previous_count
[num
][cpu
] = val
;
123 snb_get_count(TSC
, &tsc_at_measure_start
, base_cpu
);
127 static int snb_stop(void)
129 unsigned long long val
;
132 snb_get_count(TSC
, &tsc_at_measure_end
, base_cpu
);
134 for (num
= 0; num
< SNB_CSTATE_COUNT
; num
++) {
135 for (cpu
= 0; cpu
< cpu_count
; cpu
++) {
136 is_valid
[cpu
] = !snb_get_count(num
, &val
, cpu
);
137 current_count
[num
][cpu
] = val
;
143 struct cpuidle_monitor intel_snb_monitor
;
145 static struct cpuidle_monitor
*snb_register(void)
149 if (cpupower_cpu_info
.vendor
!= X86_VENDOR_INTEL
150 || cpupower_cpu_info
.family
!= 6)
153 switch (cpupower_cpu_info
.model
) {
155 case 0x2D: /* SNB Xeon */
157 case 0x3E: /* IVB Xeon */
167 is_valid
= calloc(cpu_count
, sizeof(int));
168 for (num
= 0; num
< SNB_CSTATE_COUNT
; num
++) {
169 previous_count
[num
] = calloc(cpu_count
,
170 sizeof(unsigned long long));
171 current_count
[num
] = calloc(cpu_count
,
172 sizeof(unsigned long long));
174 intel_snb_monitor
.name_len
= strlen(intel_snb_monitor
.name
);
175 return &intel_snb_monitor
;
178 void snb_unregister(void)
182 for (num
= 0; num
< SNB_CSTATE_COUNT
; num
++) {
183 free(previous_count
[num
]);
184 free(current_count
[num
]);
188 struct cpuidle_monitor intel_snb_monitor
= {
189 .name
= "SandyBridge",
190 .hw_states
= snb_cstates
,
191 .hw_states_num
= SNB_CSTATE_COUNT
,
194 .do_register
= snb_register
,
195 .unregister
= snb_unregister
,
197 .overflow_s
= 922000000 /* 922337203 seconds TSC overflow
200 #endif /* defined(__i386__) || defined(__x86_64__) */