2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
5 * Copyright (c) 2013 Intel Corporation.
6 * Len Brown <len.brown@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28 #include <sys/types.h>
31 #include <sys/resource.h>
42 #include <linux/capability.h>
45 char *proc_stat
= "/proc/stat";
46 unsigned int interval_sec
= 5;
48 unsigned int rapl_joules
;
49 unsigned int summary_only
;
50 unsigned int dump_only
;
53 unsigned int do_nhm_cstates
;
54 unsigned int do_snb_cstates
;
55 unsigned int do_knl_cstates
;
60 unsigned int do_c8_c9_c10
;
61 unsigned int do_skl_residency
;
62 unsigned int do_slm_cstates
;
63 unsigned int use_c1_residency_msr
;
64 unsigned int has_aperf
;
66 unsigned int units
= 1000000; /* MHz etc */
67 unsigned int genuine_intel
;
68 unsigned int has_invariant_tsc
;
69 unsigned int do_nhm_platform_info
;
70 unsigned int extra_msr_offset32
;
71 unsigned int extra_msr_offset64
;
72 unsigned int extra_delta_offset32
;
73 unsigned int extra_delta_offset64
;
76 unsigned int show_pkg
;
77 unsigned int show_core
;
78 unsigned int show_cpu
;
79 unsigned int show_pkg_only
;
80 unsigned int show_core_only
;
81 char *output_buffer
, *outp
;
85 unsigned int tcc_activation_temp
;
86 unsigned int tcc_activation_temp_override
;
87 double rapl_power_units
, rapl_time_units
;
88 double rapl_dram_energy_units
, rapl_energy_units
;
89 double rapl_joule_counter_range
;
90 unsigned int do_core_perf_limit_reasons
;
91 unsigned int do_gfx_perf_limit_reasons
;
92 unsigned int do_ring_perf_limit_reasons
;
93 unsigned int crystal_hz
;
94 unsigned long long tsc_hz
;
97 #define RAPL_PKG (1 << 0)
98 /* 0x610 MSR_PKG_POWER_LIMIT */
99 /* 0x611 MSR_PKG_ENERGY_STATUS */
100 #define RAPL_PKG_PERF_STATUS (1 << 1)
101 /* 0x613 MSR_PKG_PERF_STATUS */
102 #define RAPL_PKG_POWER_INFO (1 << 2)
103 /* 0x614 MSR_PKG_POWER_INFO */
105 #define RAPL_DRAM (1 << 3)
106 /* 0x618 MSR_DRAM_POWER_LIMIT */
107 /* 0x619 MSR_DRAM_ENERGY_STATUS */
108 #define RAPL_DRAM_PERF_STATUS (1 << 4)
109 /* 0x61b MSR_DRAM_PERF_STATUS */
110 #define RAPL_DRAM_POWER_INFO (1 << 5)
111 /* 0x61c MSR_DRAM_POWER_INFO */
113 #define RAPL_CORES (1 << 6)
114 /* 0x638 MSR_PP0_POWER_LIMIT */
115 /* 0x639 MSR_PP0_ENERGY_STATUS */
116 #define RAPL_CORE_POLICY (1 << 7)
117 /* 0x63a MSR_PP0_POLICY */
119 #define RAPL_GFX (1 << 8)
120 /* 0x640 MSR_PP1_POWER_LIMIT */
121 /* 0x641 MSR_PP1_ENERGY_STATUS */
122 /* 0x642 MSR_PP1_POLICY */
123 #define TJMAX_DEFAULT 100
125 #define MAX(a, b) ((a) > (b) ? (a) : (b))
127 int aperf_mperf_unstable
;
131 cpu_set_t
*cpu_present_set
, *cpu_affinity_set
;
132 size_t cpu_present_setsize
, cpu_affinity_setsize
;
135 unsigned long long tsc
;
136 unsigned long long aperf
;
137 unsigned long long mperf
;
138 unsigned long long c1
;
139 unsigned long long extra_msr64
;
140 unsigned long long extra_delta64
;
141 unsigned long long extra_msr32
;
142 unsigned long long extra_delta32
;
143 unsigned int smi_count
;
146 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
147 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
148 } *thread_even
, *thread_odd
;
151 unsigned long long c3
;
152 unsigned long long c6
;
153 unsigned long long c7
;
154 unsigned int core_temp_c
;
155 unsigned int core_id
;
156 } *core_even
, *core_odd
;
159 unsigned long long pc2
;
160 unsigned long long pc3
;
161 unsigned long long pc6
;
162 unsigned long long pc7
;
163 unsigned long long pc8
;
164 unsigned long long pc9
;
165 unsigned long long pc10
;
166 unsigned long long pkg_wtd_core_c0
;
167 unsigned long long pkg_any_core_c0
;
168 unsigned long long pkg_any_gfxe_c0
;
169 unsigned long long pkg_both_core_gfxe_c0
;
170 unsigned int package_id
;
171 unsigned int energy_pkg
; /* MSR_PKG_ENERGY_STATUS */
172 unsigned int energy_dram
; /* MSR_DRAM_ENERGY_STATUS */
173 unsigned int energy_cores
; /* MSR_PP0_ENERGY_STATUS */
174 unsigned int energy_gfx
; /* MSR_PP1_ENERGY_STATUS */
175 unsigned int rapl_pkg_perf_status
; /* MSR_PKG_PERF_STATUS */
176 unsigned int rapl_dram_perf_status
; /* MSR_DRAM_PERF_STATUS */
177 unsigned int pkg_temp_c
;
179 } *package_even
, *package_odd
;
181 #define ODD_COUNTERS thread_odd, core_odd, package_odd
182 #define EVEN_COUNTERS thread_even, core_even, package_even
184 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
185 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
186 topo.num_threads_per_core + \
187 (core_no) * topo.num_threads_per_core + (thread_no))
188 #define GET_CORE(core_base, core_no, pkg_no) \
189 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
190 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
192 struct system_summary
{
193 struct thread_data threads
;
194 struct core_data cores
;
195 struct pkg_data packages
;
204 int num_cores_per_pkg
;
205 int num_threads_per_core
;
208 struct timeval tv_even
, tv_odd
, tv_delta
;
210 void setup_all_buffers(void);
212 int cpu_is_not_present(int cpu
)
214 return !CPU_ISSET_S(cpu
, cpu_present_setsize
, cpu_present_set
);
217 * run func(thread, core, package) in topology order
218 * skip non-present cpus
221 int for_all_cpus(int (func
)(struct thread_data
*, struct core_data
*, struct pkg_data
*),
222 struct thread_data
*thread_base
, struct core_data
*core_base
, struct pkg_data
*pkg_base
)
224 int retval
, pkg_no
, core_no
, thread_no
;
226 for (pkg_no
= 0; pkg_no
< topo
.num_packages
; ++pkg_no
) {
227 for (core_no
= 0; core_no
< topo
.num_cores_per_pkg
; ++core_no
) {
228 for (thread_no
= 0; thread_no
<
229 topo
.num_threads_per_core
; ++thread_no
) {
230 struct thread_data
*t
;
234 t
= GET_THREAD(thread_base
, thread_no
, core_no
, pkg_no
);
236 if (cpu_is_not_present(t
->cpu_id
))
239 c
= GET_CORE(core_base
, core_no
, pkg_no
);
240 p
= GET_PKG(pkg_base
, pkg_no
);
242 retval
= func(t
, c
, p
);
251 int cpu_migrate(int cpu
)
253 CPU_ZERO_S(cpu_affinity_setsize
, cpu_affinity_set
);
254 CPU_SET_S(cpu
, cpu_affinity_setsize
, cpu_affinity_set
);
255 if (sched_setaffinity(0, cpu_affinity_setsize
, cpu_affinity_set
) == -1)
261 int get_msr(int cpu
, off_t offset
, unsigned long long *msr
)
267 sprintf(pathname
, "/dev/cpu/%d/msr", cpu
);
268 fd
= open(pathname
, O_RDONLY
);
270 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname
);
272 retval
= pread(fd
, msr
, sizeof *msr
, offset
);
275 if (retval
!= sizeof *msr
)
276 err(-1, "%s offset 0x%llx read failed", pathname
, (unsigned long long)offset
);
282 * Example Format w/ field column widths:
284 * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz SMI %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
285 * 123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678
288 void print_header(void)
291 outp
+= sprintf(outp
, " Package");
293 outp
+= sprintf(outp
, " Core");
295 outp
+= sprintf(outp
, " CPU");
297 outp
+= sprintf(outp
, " Avg_MHz");
299 outp
+= sprintf(outp
, " %%Busy");
301 outp
+= sprintf(outp
, " Bzy_MHz");
302 outp
+= sprintf(outp
, " TSC_MHz");
304 if (extra_delta_offset32
)
305 outp
+= sprintf(outp
, " count 0x%03X", extra_delta_offset32
);
306 if (extra_delta_offset64
)
307 outp
+= sprintf(outp
, " COUNT 0x%03X", extra_delta_offset64
);
308 if (extra_msr_offset32
)
309 outp
+= sprintf(outp
, " MSR 0x%03X", extra_msr_offset32
);
310 if (extra_msr_offset64
)
311 outp
+= sprintf(outp
, " MSR 0x%03X", extra_msr_offset64
);
317 outp
+= sprintf(outp
, " SMI");
320 outp
+= sprintf(outp
, " CPU%%c1");
321 if (do_nhm_cstates
&& !do_slm_cstates
&& !do_knl_cstates
)
322 outp
+= sprintf(outp
, " CPU%%c3");
324 outp
+= sprintf(outp
, " CPU%%c6");
326 outp
+= sprintf(outp
, " CPU%%c7");
329 outp
+= sprintf(outp
, " CoreTmp");
331 outp
+= sprintf(outp
, " PkgTmp");
333 if (do_skl_residency
) {
334 outp
+= sprintf(outp
, " Totl%%C0");
335 outp
+= sprintf(outp
, " Any%%C0");
336 outp
+= sprintf(outp
, " GFX%%C0");
337 outp
+= sprintf(outp
, " CPUGFX%%");
341 outp
+= sprintf(outp
, " Pkg%%pc2");
343 outp
+= sprintf(outp
, " Pkg%%pc3");
345 outp
+= sprintf(outp
, " Pkg%%pc6");
347 outp
+= sprintf(outp
, " Pkg%%pc7");
349 outp
+= sprintf(outp
, " Pkg%%pc8");
350 outp
+= sprintf(outp
, " Pkg%%pc9");
351 outp
+= sprintf(outp
, " Pk%%pc10");
354 if (do_rapl
&& !rapl_joules
) {
355 if (do_rapl
& RAPL_PKG
)
356 outp
+= sprintf(outp
, " PkgWatt");
357 if (do_rapl
& RAPL_CORES
)
358 outp
+= sprintf(outp
, " CorWatt");
359 if (do_rapl
& RAPL_GFX
)
360 outp
+= sprintf(outp
, " GFXWatt");
361 if (do_rapl
& RAPL_DRAM
)
362 outp
+= sprintf(outp
, " RAMWatt");
363 if (do_rapl
& RAPL_PKG_PERF_STATUS
)
364 outp
+= sprintf(outp
, " PKG_%%");
365 if (do_rapl
& RAPL_DRAM_PERF_STATUS
)
366 outp
+= sprintf(outp
, " RAM_%%");
367 } else if (do_rapl
&& rapl_joules
) {
368 if (do_rapl
& RAPL_PKG
)
369 outp
+= sprintf(outp
, " Pkg_J");
370 if (do_rapl
& RAPL_CORES
)
371 outp
+= sprintf(outp
, " Cor_J");
372 if (do_rapl
& RAPL_GFX
)
373 outp
+= sprintf(outp
, " GFX_J");
374 if (do_rapl
& RAPL_DRAM
)
375 outp
+= sprintf(outp
, " RAM_W");
376 if (do_rapl
& RAPL_PKG_PERF_STATUS
)
377 outp
+= sprintf(outp
, " PKG_%%");
378 if (do_rapl
& RAPL_DRAM_PERF_STATUS
)
379 outp
+= sprintf(outp
, " RAM_%%");
380 outp
+= sprintf(outp
, " time");
384 outp
+= sprintf(outp
, "\n");
387 int dump_counters(struct thread_data
*t
, struct core_data
*c
,
390 outp
+= sprintf(outp
, "t %p, c %p, p %p\n", t
, c
, p
);
393 outp
+= sprintf(outp
, "CPU: %d flags 0x%x\n",
394 t
->cpu_id
, t
->flags
);
395 outp
+= sprintf(outp
, "TSC: %016llX\n", t
->tsc
);
396 outp
+= sprintf(outp
, "aperf: %016llX\n", t
->aperf
);
397 outp
+= sprintf(outp
, "mperf: %016llX\n", t
->mperf
);
398 outp
+= sprintf(outp
, "c1: %016llX\n", t
->c1
);
399 outp
+= sprintf(outp
, "msr0x%x: %08llX\n",
400 extra_delta_offset32
, t
->extra_delta32
);
401 outp
+= sprintf(outp
, "msr0x%x: %016llX\n",
402 extra_delta_offset64
, t
->extra_delta64
);
403 outp
+= sprintf(outp
, "msr0x%x: %08llX\n",
404 extra_msr_offset32
, t
->extra_msr32
);
405 outp
+= sprintf(outp
, "msr0x%x: %016llX\n",
406 extra_msr_offset64
, t
->extra_msr64
);
408 outp
+= sprintf(outp
, "SMI: %08X\n", t
->smi_count
);
412 outp
+= sprintf(outp
, "core: %d\n", c
->core_id
);
413 outp
+= sprintf(outp
, "c3: %016llX\n", c
->c3
);
414 outp
+= sprintf(outp
, "c6: %016llX\n", c
->c6
);
415 outp
+= sprintf(outp
, "c7: %016llX\n", c
->c7
);
416 outp
+= sprintf(outp
, "DTS: %dC\n", c
->core_temp_c
);
420 outp
+= sprintf(outp
, "package: %d\n", p
->package_id
);
422 outp
+= sprintf(outp
, "Weighted cores: %016llX\n", p
->pkg_wtd_core_c0
);
423 outp
+= sprintf(outp
, "Any cores: %016llX\n", p
->pkg_any_core_c0
);
424 outp
+= sprintf(outp
, "Any GFX: %016llX\n", p
->pkg_any_gfxe_c0
);
425 outp
+= sprintf(outp
, "CPU + GFX: %016llX\n", p
->pkg_both_core_gfxe_c0
);
427 outp
+= sprintf(outp
, "pc2: %016llX\n", p
->pc2
);
429 outp
+= sprintf(outp
, "pc3: %016llX\n", p
->pc3
);
431 outp
+= sprintf(outp
, "pc6: %016llX\n", p
->pc6
);
433 outp
+= sprintf(outp
, "pc7: %016llX\n", p
->pc7
);
434 outp
+= sprintf(outp
, "pc8: %016llX\n", p
->pc8
);
435 outp
+= sprintf(outp
, "pc9: %016llX\n", p
->pc9
);
436 outp
+= sprintf(outp
, "pc10: %016llX\n", p
->pc10
);
437 outp
+= sprintf(outp
, "Joules PKG: %0X\n", p
->energy_pkg
);
438 outp
+= sprintf(outp
, "Joules COR: %0X\n", p
->energy_cores
);
439 outp
+= sprintf(outp
, "Joules GFX: %0X\n", p
->energy_gfx
);
440 outp
+= sprintf(outp
, "Joules RAM: %0X\n", p
->energy_dram
);
441 outp
+= sprintf(outp
, "Throttle PKG: %0X\n",
442 p
->rapl_pkg_perf_status
);
443 outp
+= sprintf(outp
, "Throttle RAM: %0X\n",
444 p
->rapl_dram_perf_status
);
445 outp
+= sprintf(outp
, "PTM: %dC\n", p
->pkg_temp_c
);
448 outp
+= sprintf(outp
, "\n");
454 * column formatting convention & formats
456 int format_counters(struct thread_data
*t
, struct core_data
*c
,
459 double interval_float
;
462 /* if showing only 1st thread in core and this isn't one, bail out */
463 if (show_core_only
&& !(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
466 /* if showing only 1st thread in pkg and this isn't one, bail out */
467 if (show_pkg_only
&& !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
470 interval_float
= tv_delta
.tv_sec
+ tv_delta
.tv_usec
/1000000.0;
472 /* topo columns, print blanks on 1st (average) line */
473 if (t
== &average
.threads
) {
475 outp
+= sprintf(outp
, " -");
477 outp
+= sprintf(outp
, " -");
479 outp
+= sprintf(outp
, " -");
483 outp
+= sprintf(outp
, "%8d", p
->package_id
);
485 outp
+= sprintf(outp
, " -");
489 outp
+= sprintf(outp
, "%8d", c
->core_id
);
491 outp
+= sprintf(outp
, " -");
494 outp
+= sprintf(outp
, "%8d", t
->cpu_id
);
499 outp
+= sprintf(outp
, "%8.0f",
500 1.0 / units
* t
->aperf
/ interval_float
);
505 outp
+= sprintf(outp
, "%8.2f", 100.0 * t
->mperf
/t
->tsc
);
507 outp
+= sprintf(outp
, "********");
512 outp
+= sprintf(outp
, "%8.0f",
513 1.0 * t
->tsc
/ units
* t
->aperf
/ t
->mperf
/ interval_float
);
516 outp
+= sprintf(outp
, "%8.0f", 1.0 * t
->tsc
/units
/interval_float
);
519 if (extra_delta_offset32
)
520 outp
+= sprintf(outp
, " %11llu", t
->extra_delta32
);
523 if (extra_delta_offset64
)
524 outp
+= sprintf(outp
, " %11llu", t
->extra_delta64
);
526 if (extra_msr_offset32
)
527 outp
+= sprintf(outp
, " 0x%08llx", t
->extra_msr32
);
530 if (extra_msr_offset64
)
531 outp
+= sprintf(outp
, " 0x%016llx", t
->extra_msr64
);
538 outp
+= sprintf(outp
, "%8d", t
->smi_count
);
540 if (do_nhm_cstates
) {
542 outp
+= sprintf(outp
, "%8.2f", 100.0 * t
->c1
/t
->tsc
);
544 outp
+= sprintf(outp
, "********");
547 /* print per-core data only for 1st thread in core */
548 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
551 if (do_nhm_cstates
&& !do_slm_cstates
&& !do_knl_cstates
)
552 outp
+= sprintf(outp
, "%8.2f", 100.0 * c
->c3
/t
->tsc
);
554 outp
+= sprintf(outp
, "%8.2f", 100.0 * c
->c6
/t
->tsc
);
556 outp
+= sprintf(outp
, "%8.2f", 100.0 * c
->c7
/t
->tsc
);
559 outp
+= sprintf(outp
, "%8d", c
->core_temp_c
);
561 /* print per-package data only for 1st core in package */
562 if (!(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
567 outp
+= sprintf(outp
, "%8d", p
->pkg_temp_c
);
569 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
570 if (do_skl_residency
) {
571 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pkg_wtd_core_c0
/t
->tsc
);
572 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pkg_any_core_c0
/t
->tsc
);
573 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pkg_any_gfxe_c0
/t
->tsc
);
574 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pkg_both_core_gfxe_c0
/t
->tsc
);
578 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pc2
/t
->tsc
);
580 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pc3
/t
->tsc
);
582 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pc6
/t
->tsc
);
584 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pc7
/t
->tsc
);
586 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pc8
/t
->tsc
);
587 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pc9
/t
->tsc
);
588 outp
+= sprintf(outp
, "%8.2f", 100.0 * p
->pc10
/t
->tsc
);
592 * If measurement interval exceeds minimum RAPL Joule Counter range,
593 * indicate that results are suspect by printing "**" in fraction place.
595 if (interval_float
< rapl_joule_counter_range
)
600 if (do_rapl
&& !rapl_joules
) {
601 if (do_rapl
& RAPL_PKG
)
602 outp
+= sprintf(outp
, fmt8
, p
->energy_pkg
* rapl_energy_units
/ interval_float
);
603 if (do_rapl
& RAPL_CORES
)
604 outp
+= sprintf(outp
, fmt8
, p
->energy_cores
* rapl_energy_units
/ interval_float
);
605 if (do_rapl
& RAPL_GFX
)
606 outp
+= sprintf(outp
, fmt8
, p
->energy_gfx
* rapl_energy_units
/ interval_float
);
607 if (do_rapl
& RAPL_DRAM
)
608 outp
+= sprintf(outp
, fmt8
, p
->energy_dram
* rapl_dram_energy_units
/ interval_float
);
609 if (do_rapl
& RAPL_PKG_PERF_STATUS
)
610 outp
+= sprintf(outp
, fmt8
, 100.0 * p
->rapl_pkg_perf_status
* rapl_time_units
/ interval_float
);
611 if (do_rapl
& RAPL_DRAM_PERF_STATUS
)
612 outp
+= sprintf(outp
, fmt8
, 100.0 * p
->rapl_dram_perf_status
* rapl_time_units
/ interval_float
);
613 } else if (do_rapl
&& rapl_joules
) {
614 if (do_rapl
& RAPL_PKG
)
615 outp
+= sprintf(outp
, fmt8
,
616 p
->energy_pkg
* rapl_energy_units
);
617 if (do_rapl
& RAPL_CORES
)
618 outp
+= sprintf(outp
, fmt8
,
619 p
->energy_cores
* rapl_energy_units
);
620 if (do_rapl
& RAPL_GFX
)
621 outp
+= sprintf(outp
, fmt8
,
622 p
->energy_gfx
* rapl_energy_units
);
623 if (do_rapl
& RAPL_DRAM
)
624 outp
+= sprintf(outp
, fmt8
,
625 p
->energy_dram
* rapl_dram_energy_units
);
626 if (do_rapl
& RAPL_PKG_PERF_STATUS
)
627 outp
+= sprintf(outp
, fmt8
, 100.0 * p
->rapl_pkg_perf_status
* rapl_time_units
/ interval_float
);
628 if (do_rapl
& RAPL_DRAM_PERF_STATUS
)
629 outp
+= sprintf(outp
, fmt8
, 100.0 * p
->rapl_dram_perf_status
* rapl_time_units
/ interval_float
);
631 outp
+= sprintf(outp
, fmt8
, interval_float
);
634 outp
+= sprintf(outp
, "\n");
641 fputs(output_buffer
, stdout
);
643 outp
= output_buffer
;
647 fputs(output_buffer
, stderr
);
648 outp
= output_buffer
;
650 void format_all_counters(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
654 if (!printed
|| !summary_only
)
657 if (topo
.num_cpus
> 1)
658 format_counters(&average
.threads
, &average
.cores
,
666 for_all_cpus(format_counters
, t
, c
, p
);
669 #define DELTA_WRAP32(new, old) \
673 old = 0x100000000 + new - old; \
677 delta_package(struct pkg_data
*new, struct pkg_data
*old
)
680 if (do_skl_residency
) {
681 old
->pkg_wtd_core_c0
= new->pkg_wtd_core_c0
- old
->pkg_wtd_core_c0
;
682 old
->pkg_any_core_c0
= new->pkg_any_core_c0
- old
->pkg_any_core_c0
;
683 old
->pkg_any_gfxe_c0
= new->pkg_any_gfxe_c0
- old
->pkg_any_gfxe_c0
;
684 old
->pkg_both_core_gfxe_c0
= new->pkg_both_core_gfxe_c0
- old
->pkg_both_core_gfxe_c0
;
686 old
->pc2
= new->pc2
- old
->pc2
;
688 old
->pc3
= new->pc3
- old
->pc3
;
690 old
->pc6
= new->pc6
- old
->pc6
;
692 old
->pc7
= new->pc7
- old
->pc7
;
693 old
->pc8
= new->pc8
- old
->pc8
;
694 old
->pc9
= new->pc9
- old
->pc9
;
695 old
->pc10
= new->pc10
- old
->pc10
;
696 old
->pkg_temp_c
= new->pkg_temp_c
;
698 DELTA_WRAP32(new->energy_pkg
, old
->energy_pkg
);
699 DELTA_WRAP32(new->energy_cores
, old
->energy_cores
);
700 DELTA_WRAP32(new->energy_gfx
, old
->energy_gfx
);
701 DELTA_WRAP32(new->energy_dram
, old
->energy_dram
);
702 DELTA_WRAP32(new->rapl_pkg_perf_status
, old
->rapl_pkg_perf_status
);
703 DELTA_WRAP32(new->rapl_dram_perf_status
, old
->rapl_dram_perf_status
);
707 delta_core(struct core_data
*new, struct core_data
*old
)
709 old
->c3
= new->c3
- old
->c3
;
710 old
->c6
= new->c6
- old
->c6
;
711 old
->c7
= new->c7
- old
->c7
;
712 old
->core_temp_c
= new->core_temp_c
;
719 delta_thread(struct thread_data
*new, struct thread_data
*old
,
720 struct core_data
*core_delta
)
722 old
->tsc
= new->tsc
- old
->tsc
;
724 /* check for TSC < 1 Mcycles over interval */
725 if (old
->tsc
< (1000 * 1000))
726 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
727 "You can disable all c-states by booting with \"idle=poll\"\n"
728 "or just the deep ones with \"processor.max_cstate=1\"");
730 old
->c1
= new->c1
- old
->c1
;
733 if ((new->aperf
> old
->aperf
) && (new->mperf
> old
->mperf
)) {
734 old
->aperf
= new->aperf
- old
->aperf
;
735 old
->mperf
= new->mperf
- old
->mperf
;
738 if (!aperf_mperf_unstable
) {
739 fprintf(stderr
, "%s: APERF or MPERF went backwards *\n", progname
);
740 fprintf(stderr
, "* Frequency results do not cover entire interval *\n");
741 fprintf(stderr
, "* fix this by running Linux-2.6.30 or later *\n");
743 aperf_mperf_unstable
= 1;
746 * mperf delta is likely a huge "positive" number
747 * can not use it for calculating c0 time
755 if (use_c1_residency_msr
) {
757 * Some models have a dedicated C1 residency MSR,
758 * which should be more accurate than the derivation below.
762 * As counter collection is not atomic,
763 * it is possible for mperf's non-halted cycles + idle states
764 * to exceed TSC's all cycles: show c1 = 0% in that case.
766 if ((old
->mperf
+ core_delta
->c3
+ core_delta
->c6
+ core_delta
->c7
) > old
->tsc
)
769 /* normal case, derive c1 */
770 old
->c1
= old
->tsc
- old
->mperf
- core_delta
->c3
771 - core_delta
->c6
- core_delta
->c7
;
775 if (old
->mperf
== 0) {
776 if (debug
> 1) fprintf(stderr
, "cpu%d MPERF 0!\n", old
->cpu_id
);
777 old
->mperf
= 1; /* divide by 0 protection */
780 old
->extra_delta32
= new->extra_delta32
- old
->extra_delta32
;
781 old
->extra_delta32
&= 0xFFFFFFFF;
783 old
->extra_delta64
= new->extra_delta64
- old
->extra_delta64
;
786 * Extra MSR is just a snapshot, simply copy latest w/o subtracting
788 old
->extra_msr32
= new->extra_msr32
;
789 old
->extra_msr64
= new->extra_msr64
;
792 old
->smi_count
= new->smi_count
- old
->smi_count
;
795 int delta_cpu(struct thread_data
*t
, struct core_data
*c
,
796 struct pkg_data
*p
, struct thread_data
*t2
,
797 struct core_data
*c2
, struct pkg_data
*p2
)
799 /* calculate core delta only for 1st thread in core */
800 if (t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
)
803 /* always calculate thread delta */
804 delta_thread(t
, t2
, c2
); /* c2 is core delta */
806 /* calculate package delta only for 1st core in package */
807 if (t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
)
808 delta_package(p
, p2
);
813 void clear_counters(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
821 t
->extra_delta32
= 0;
822 t
->extra_delta64
= 0;
824 /* tells format_counters to dump all fields from this set */
825 t
->flags
= CPU_IS_FIRST_THREAD_IN_CORE
| CPU_IS_FIRST_CORE_IN_PACKAGE
;
832 p
->pkg_wtd_core_c0
= 0;
833 p
->pkg_any_core_c0
= 0;
834 p
->pkg_any_gfxe_c0
= 0;
835 p
->pkg_both_core_gfxe_c0
= 0;
852 p
->rapl_pkg_perf_status
= 0;
853 p
->rapl_dram_perf_status
= 0;
856 int sum_counters(struct thread_data
*t
, struct core_data
*c
,
859 average
.threads
.tsc
+= t
->tsc
;
860 average
.threads
.aperf
+= t
->aperf
;
861 average
.threads
.mperf
+= t
->mperf
;
862 average
.threads
.c1
+= t
->c1
;
864 average
.threads
.extra_delta32
+= t
->extra_delta32
;
865 average
.threads
.extra_delta64
+= t
->extra_delta64
;
867 /* sum per-core values only for 1st thread in core */
868 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
871 average
.cores
.c3
+= c
->c3
;
872 average
.cores
.c6
+= c
->c6
;
873 average
.cores
.c7
+= c
->c7
;
875 average
.cores
.core_temp_c
= MAX(average
.cores
.core_temp_c
, c
->core_temp_c
);
877 /* sum per-pkg values only for 1st core in pkg */
878 if (!(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
881 if (do_skl_residency
) {
882 average
.packages
.pkg_wtd_core_c0
+= p
->pkg_wtd_core_c0
;
883 average
.packages
.pkg_any_core_c0
+= p
->pkg_any_core_c0
;
884 average
.packages
.pkg_any_gfxe_c0
+= p
->pkg_any_gfxe_c0
;
885 average
.packages
.pkg_both_core_gfxe_c0
+= p
->pkg_both_core_gfxe_c0
;
888 average
.packages
.pc2
+= p
->pc2
;
890 average
.packages
.pc3
+= p
->pc3
;
892 average
.packages
.pc6
+= p
->pc6
;
894 average
.packages
.pc7
+= p
->pc7
;
895 average
.packages
.pc8
+= p
->pc8
;
896 average
.packages
.pc9
+= p
->pc9
;
897 average
.packages
.pc10
+= p
->pc10
;
899 average
.packages
.energy_pkg
+= p
->energy_pkg
;
900 average
.packages
.energy_dram
+= p
->energy_dram
;
901 average
.packages
.energy_cores
+= p
->energy_cores
;
902 average
.packages
.energy_gfx
+= p
->energy_gfx
;
904 average
.packages
.pkg_temp_c
= MAX(average
.packages
.pkg_temp_c
, p
->pkg_temp_c
);
906 average
.packages
.rapl_pkg_perf_status
+= p
->rapl_pkg_perf_status
;
907 average
.packages
.rapl_dram_perf_status
+= p
->rapl_dram_perf_status
;
911 * sum the counters for all cpus in the system
912 * compute the weighted average
914 void compute_average(struct thread_data
*t
, struct core_data
*c
,
917 clear_counters(&average
.threads
, &average
.cores
, &average
.packages
);
919 for_all_cpus(sum_counters
, t
, c
, p
);
921 average
.threads
.tsc
/= topo
.num_cpus
;
922 average
.threads
.aperf
/= topo
.num_cpus
;
923 average
.threads
.mperf
/= topo
.num_cpus
;
924 average
.threads
.c1
/= topo
.num_cpus
;
926 average
.threads
.extra_delta32
/= topo
.num_cpus
;
927 average
.threads
.extra_delta32
&= 0xFFFFFFFF;
929 average
.threads
.extra_delta64
/= topo
.num_cpus
;
931 average
.cores
.c3
/= topo
.num_cores
;
932 average
.cores
.c6
/= topo
.num_cores
;
933 average
.cores
.c7
/= topo
.num_cores
;
935 if (do_skl_residency
) {
936 average
.packages
.pkg_wtd_core_c0
/= topo
.num_packages
;
937 average
.packages
.pkg_any_core_c0
/= topo
.num_packages
;
938 average
.packages
.pkg_any_gfxe_c0
/= topo
.num_packages
;
939 average
.packages
.pkg_both_core_gfxe_c0
/= topo
.num_packages
;
942 average
.packages
.pc2
/= topo
.num_packages
;
944 average
.packages
.pc3
/= topo
.num_packages
;
946 average
.packages
.pc6
/= topo
.num_packages
;
948 average
.packages
.pc7
/= topo
.num_packages
;
950 average
.packages
.pc8
/= topo
.num_packages
;
951 average
.packages
.pc9
/= topo
.num_packages
;
952 average
.packages
.pc10
/= topo
.num_packages
;
955 static unsigned long long rdtsc(void)
957 unsigned int low
, high
;
959 asm volatile("rdtsc" : "=a" (low
), "=d" (high
));
961 return low
| ((unsigned long long)high
) << 32;
968 * acquire and record local counters for that cpu
970 int get_counters(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
973 unsigned long long msr
;
975 if (cpu_migrate(cpu
)) {
976 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
980 t
->tsc
= rdtsc(); /* we are running on local CPU of interest */
983 if (get_msr(cpu
, MSR_IA32_APERF
, &t
->aperf
))
985 if (get_msr(cpu
, MSR_IA32_MPERF
, &t
->mperf
))
990 if (get_msr(cpu
, MSR_SMI_COUNT
, &msr
))
992 t
->smi_count
= msr
& 0xFFFFFFFF;
994 if (extra_delta_offset32
) {
995 if (get_msr(cpu
, extra_delta_offset32
, &msr
))
997 t
->extra_delta32
= msr
& 0xFFFFFFFF;
1000 if (extra_delta_offset64
)
1001 if (get_msr(cpu
, extra_delta_offset64
, &t
->extra_delta64
))
1004 if (extra_msr_offset32
) {
1005 if (get_msr(cpu
, extra_msr_offset32
, &msr
))
1007 t
->extra_msr32
= msr
& 0xFFFFFFFF;
1010 if (extra_msr_offset64
)
1011 if (get_msr(cpu
, extra_msr_offset64
, &t
->extra_msr64
))
1014 if (use_c1_residency_msr
) {
1015 if (get_msr(cpu
, MSR_CORE_C1_RES
, &t
->c1
))
1019 /* collect core counters only for 1st thread in core */
1020 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
1023 if (do_nhm_cstates
&& !do_slm_cstates
&& !do_knl_cstates
) {
1024 if (get_msr(cpu
, MSR_CORE_C3_RESIDENCY
, &c
->c3
))
1028 if (do_nhm_cstates
&& !do_knl_cstates
) {
1029 if (get_msr(cpu
, MSR_CORE_C6_RESIDENCY
, &c
->c6
))
1031 } else if (do_knl_cstates
) {
1032 if (get_msr(cpu
, MSR_KNL_CORE_C6_RESIDENCY
, &c
->c6
))
1037 if (get_msr(cpu
, MSR_CORE_C7_RESIDENCY
, &c
->c7
))
1041 if (get_msr(cpu
, MSR_IA32_THERM_STATUS
, &msr
))
1043 c
->core_temp_c
= tcc_activation_temp
- ((msr
>> 16) & 0x7F);
1047 /* collect package counters only for 1st core in package */
1048 if (!(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
1051 if (do_skl_residency
) {
1052 if (get_msr(cpu
, MSR_PKG_WEIGHTED_CORE_C0_RES
, &p
->pkg_wtd_core_c0
))
1054 if (get_msr(cpu
, MSR_PKG_ANY_CORE_C0_RES
, &p
->pkg_any_core_c0
))
1056 if (get_msr(cpu
, MSR_PKG_ANY_GFXE_C0_RES
, &p
->pkg_any_gfxe_c0
))
1058 if (get_msr(cpu
, MSR_PKG_BOTH_CORE_GFXE_C0_RES
, &p
->pkg_both_core_gfxe_c0
))
1062 if (get_msr(cpu
, MSR_PKG_C3_RESIDENCY
, &p
->pc3
))
1065 if (get_msr(cpu
, MSR_PKG_C6_RESIDENCY
, &p
->pc6
))
1068 if (get_msr(cpu
, MSR_PKG_C2_RESIDENCY
, &p
->pc2
))
1071 if (get_msr(cpu
, MSR_PKG_C7_RESIDENCY
, &p
->pc7
))
1074 if (get_msr(cpu
, MSR_PKG_C8_RESIDENCY
, &p
->pc8
))
1076 if (get_msr(cpu
, MSR_PKG_C9_RESIDENCY
, &p
->pc9
))
1078 if (get_msr(cpu
, MSR_PKG_C10_RESIDENCY
, &p
->pc10
))
1081 if (do_rapl
& RAPL_PKG
) {
1082 if (get_msr(cpu
, MSR_PKG_ENERGY_STATUS
, &msr
))
1084 p
->energy_pkg
= msr
& 0xFFFFFFFF;
1086 if (do_rapl
& RAPL_CORES
) {
1087 if (get_msr(cpu
, MSR_PP0_ENERGY_STATUS
, &msr
))
1089 p
->energy_cores
= msr
& 0xFFFFFFFF;
1091 if (do_rapl
& RAPL_DRAM
) {
1092 if (get_msr(cpu
, MSR_DRAM_ENERGY_STATUS
, &msr
))
1094 p
->energy_dram
= msr
& 0xFFFFFFFF;
1096 if (do_rapl
& RAPL_GFX
) {
1097 if (get_msr(cpu
, MSR_PP1_ENERGY_STATUS
, &msr
))
1099 p
->energy_gfx
= msr
& 0xFFFFFFFF;
1101 if (do_rapl
& RAPL_PKG_PERF_STATUS
) {
1102 if (get_msr(cpu
, MSR_PKG_PERF_STATUS
, &msr
))
1104 p
->rapl_pkg_perf_status
= msr
& 0xFFFFFFFF;
1106 if (do_rapl
& RAPL_DRAM_PERF_STATUS
) {
1107 if (get_msr(cpu
, MSR_DRAM_PERF_STATUS
, &msr
))
1109 p
->rapl_dram_perf_status
= msr
& 0xFFFFFFFF;
1112 if (get_msr(cpu
, MSR_IA32_PACKAGE_THERM_STATUS
, &msr
))
1114 p
->pkg_temp_c
= tcc_activation_temp
- ((msr
>> 16) & 0x7F);
1120 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1121 * If you change the values, note they are used both in comparisons
1122 * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1125 #define PCLUKN 0 /* Unknown */
1126 #define PCLRSV 1 /* Reserved */
1127 #define PCL__0 2 /* PC0 */
1128 #define PCL__1 3 /* PC1 */
1129 #define PCL__2 4 /* PC2 */
1130 #define PCL__3 5 /* PC3 */
1131 #define PCL__4 6 /* PC4 */
1132 #define PCL__6 7 /* PC6 */
1133 #define PCL_6N 8 /* PC6 No Retention */
1134 #define PCL_6R 9 /* PC6 Retention */
1135 #define PCL__7 10 /* PC7 */
1136 #define PCL_7S 11 /* PC7 Shrink */
1137 #define PCL__8 12 /* PC8 */
1138 #define PCL__9 13 /* PC9 */
1139 #define PCLUNL 14 /* Unlimited */
1141 int pkg_cstate_limit
= PCLUKN
;
1142 char *pkg_cstate_limit_strings
[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1143 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1145 int nhm_pkg_cstate_limits
[16] = {PCL__0
, PCL__1
, PCL__3
, PCL__6
, PCL__7
, PCLRSV
, PCLRSV
, PCLUNL
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
};
1146 int snb_pkg_cstate_limits
[16] = {PCL__0
, PCL__2
, PCL_6N
, PCL_6R
, PCL__7
, PCL_7S
, PCLRSV
, PCLUNL
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
};
1147 int hsw_pkg_cstate_limits
[16] = {PCL__0
, PCL__2
, PCL__3
, PCL__6
, PCL__7
, PCL_7S
, PCL__8
, PCL__9
, PCLUNL
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
};
1148 int slv_pkg_cstate_limits
[16] = {PCL__0
, PCL__1
, PCLRSV
, PCLRSV
, PCL__4
, PCLRSV
, PCL__6
, PCL__7
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
};
1149 int amt_pkg_cstate_limits
[16] = {PCL__0
, PCL__1
, PCL__2
, PCLRSV
, PCLRSV
, PCLRSV
, PCL__6
, PCL__7
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
};
1150 int phi_pkg_cstate_limits
[16] = {PCL__0
, PCL__2
, PCL_6N
, PCL_6R
, PCLRSV
, PCLRSV
, PCLRSV
, PCLUNL
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
, PCLRSV
};
1153 dump_nhm_platform_info(void)
1155 unsigned long long msr
;
1158 get_msr(base_cpu
, MSR_NHM_PLATFORM_INFO
, &msr
);
1160 fprintf(stderr
, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr
);
1162 ratio
= (msr
>> 40) & 0xFF;
1163 fprintf(stderr
, "%d * %.0f = %.0f MHz max efficiency frequency\n",
1164 ratio
, bclk
, ratio
* bclk
);
1166 ratio
= (msr
>> 8) & 0xFF;
1167 fprintf(stderr
, "%d * %.0f = %.0f MHz base frequency\n",
1168 ratio
, bclk
, ratio
* bclk
);
1170 get_msr(base_cpu
, MSR_IA32_POWER_CTL
, &msr
);
1171 fprintf(stderr
, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1172 msr
, msr
& 0x2 ? "EN" : "DIS");
1178 dump_hsw_turbo_ratio_limits(void)
1180 unsigned long long msr
;
1183 get_msr(base_cpu
, MSR_TURBO_RATIO_LIMIT2
, &msr
);
1185 fprintf(stderr
, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr
);
1187 ratio
= (msr
>> 8) & 0xFF;
1189 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
1190 ratio
, bclk
, ratio
* bclk
);
1192 ratio
= (msr
>> 0) & 0xFF;
1194 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
1195 ratio
, bclk
, ratio
* bclk
);
1200 dump_ivt_turbo_ratio_limits(void)
1202 unsigned long long msr
;
1205 get_msr(base_cpu
, MSR_TURBO_RATIO_LIMIT1
, &msr
);
1207 fprintf(stderr
, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr
);
1209 ratio
= (msr
>> 56) & 0xFF;
1211 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
1212 ratio
, bclk
, ratio
* bclk
);
1214 ratio
= (msr
>> 48) & 0xFF;
1216 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
1217 ratio
, bclk
, ratio
* bclk
);
1219 ratio
= (msr
>> 40) & 0xFF;
1221 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
1222 ratio
, bclk
, ratio
* bclk
);
1224 ratio
= (msr
>> 32) & 0xFF;
1226 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
1227 ratio
, bclk
, ratio
* bclk
);
1229 ratio
= (msr
>> 24) & 0xFF;
1231 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
1232 ratio
, bclk
, ratio
* bclk
);
1234 ratio
= (msr
>> 16) & 0xFF;
1236 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
1237 ratio
, bclk
, ratio
* bclk
);
1239 ratio
= (msr
>> 8) & 0xFF;
1241 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
1242 ratio
, bclk
, ratio
* bclk
);
1244 ratio
= (msr
>> 0) & 0xFF;
1246 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
1247 ratio
, bclk
, ratio
* bclk
);
1252 dump_nhm_turbo_ratio_limits(void)
1254 unsigned long long msr
;
1257 get_msr(base_cpu
, MSR_TURBO_RATIO_LIMIT
, &msr
);
1259 fprintf(stderr
, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr
);
1261 ratio
= (msr
>> 56) & 0xFF;
1263 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
1264 ratio
, bclk
, ratio
* bclk
);
1266 ratio
= (msr
>> 48) & 0xFF;
1268 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
1269 ratio
, bclk
, ratio
* bclk
);
1271 ratio
= (msr
>> 40) & 0xFF;
1273 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
1274 ratio
, bclk
, ratio
* bclk
);
1276 ratio
= (msr
>> 32) & 0xFF;
1278 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
1279 ratio
, bclk
, ratio
* bclk
);
1281 ratio
= (msr
>> 24) & 0xFF;
1283 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
1284 ratio
, bclk
, ratio
* bclk
);
1286 ratio
= (msr
>> 16) & 0xFF;
1288 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
1289 ratio
, bclk
, ratio
* bclk
);
1291 ratio
= (msr
>> 8) & 0xFF;
1293 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
1294 ratio
, bclk
, ratio
* bclk
);
1296 ratio
= (msr
>> 0) & 0xFF;
1298 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1299 ratio
, bclk
, ratio
* bclk
);
1304 dump_knl_turbo_ratio_limits(void)
1308 unsigned long long msr
;
1313 get_msr(base_cpu
, MSR_NHM_TURBO_RATIO_LIMIT
, &msr
);
1315 fprintf(stderr
, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
1319 * Turbo encoding in KNL is as follows:
1320 * [7:0] -- Base value of number of active cores of bucket 1.
1321 * [15:8] -- Base value of freq ratio of bucket 1.
1322 * [20:16] -- +ve delta of number of active cores of bucket 2.
1323 * i.e. active cores of bucket 2 =
1324 * active cores of bucket 1 + delta
1325 * [23:21] -- Negative delta of freq ratio of bucket 2.
1326 * i.e. freq ratio of bucket 2 =
1327 * freq ratio of bucket 1 - delta
1328 * [28:24]-- +ve delta of number of active cores of bucket 3.
1329 * [31:29]-- -ve delta of freq ratio of bucket 3.
1330 * [36:32]-- +ve delta of number of active cores of bucket 4.
1331 * [39:37]-- -ve delta of freq ratio of bucket 4.
1332 * [44:40]-- +ve delta of number of active cores of bucket 5.
1333 * [47:45]-- -ve delta of freq ratio of bucket 5.
1334 * [52:48]-- +ve delta of number of active cores of bucket 6.
1335 * [55:53]-- -ve delta of freq ratio of bucket 6.
1336 * [60:56]-- +ve delta of number of active cores of bucket 7.
1337 * [63:61]-- -ve delta of freq ratio of bucket 7.
1340 ratio
= (msr
>> 8) && 0xFF;
1343 "%d * %.0f = %.0f MHz max turbo %d active cores\n",
1344 ratio
, bclk
, ratio
* bclk
, cores
);
1346 for (i
= 16; i
< 64; i
= i
+ 8) {
1347 delta_cores
= (msr
>> i
) & 0x1F;
1348 delta_ratio
= (msr
>> (i
+ 5)) && 0x7;
1349 if (!delta_cores
|| !delta_ratio
)
1351 cores
= cores
+ delta_cores
;
1352 ratio
= ratio
- delta_ratio
;
1354 /** -ve ratios will make successive ratio calculations
1355 * negative. Hence return instead of carrying on.
1359 "%d * %.0f = %.0f MHz max turbo %d active cores\n",
1360 ratio
, bclk
, ratio
* bclk
, cores
);
1365 dump_nhm_cst_cfg(void)
1367 unsigned long long msr
;
1369 get_msr(base_cpu
, MSR_NHM_SNB_PKG_CST_CFG_CTL
, &msr
);
1371 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
1372 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
1374 fprintf(stderr
, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr
);
1376 fprintf(stderr
, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1377 (msr
& SNB_C3_AUTO_UNDEMOTE
) ? "UNdemote-C3, " : "",
1378 (msr
& SNB_C1_AUTO_UNDEMOTE
) ? "UNdemote-C1, " : "",
1379 (msr
& NHM_C3_AUTO_DEMOTE
) ? "demote-C3, " : "",
1380 (msr
& NHM_C1_AUTO_DEMOTE
) ? "demote-C1, " : "",
1381 (msr
& (1 << 15)) ? "" : "UN",
1382 (unsigned int)msr
& 7,
1383 pkg_cstate_limit_strings
[pkg_cstate_limit
]);
1387 void free_all_buffers(void)
1389 CPU_FREE(cpu_present_set
);
1390 cpu_present_set
= NULL
;
1391 cpu_present_set
= 0;
1393 CPU_FREE(cpu_affinity_set
);
1394 cpu_affinity_set
= NULL
;
1395 cpu_affinity_setsize
= 0;
1403 package_even
= NULL
;
1413 free(output_buffer
);
1414 output_buffer
= NULL
;
1419 * Open a file, and exit on failure
1421 FILE *fopen_or_die(const char *path
, const char *mode
)
1423 FILE *filep
= fopen(path
, "r");
1425 err(1, "%s: open failed", path
);
1430 * Parse a file containing a single int.
1432 int parse_int_file(const char *fmt
, ...)
1435 char path
[PATH_MAX
];
1439 va_start(args
, fmt
);
1440 vsnprintf(path
, sizeof(path
), fmt
, args
);
1442 filep
= fopen_or_die(path
, "r");
1443 if (fscanf(filep
, "%d", &value
) != 1)
1444 err(1, "%s: failed to parse number from file", path
);
1450 * get_cpu_position_in_core(cpu)
1451 * return the position of the CPU among its HT siblings in the core
1452 * return -1 if the sibling is not in list
1454 int get_cpu_position_in_core(int cpu
)
1463 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
1465 filep
= fopen(path
, "r");
1466 if (filep
== NULL
) {
1471 for (i
= 0; i
< topo
.num_threads_per_core
; i
++) {
1472 fscanf(filep
, "%d", &this_cpu
);
1473 if (this_cpu
== cpu
) {
1478 /* Account for no separator after last thread*/
1479 if (i
!= (topo
.num_threads_per_core
- 1))
1480 fscanf(filep
, "%c", &character
);
1488 * cpu_is_first_core_in_package(cpu)
1489 * return 1 if given CPU is 1st core in package
1491 int cpu_is_first_core_in_package(int cpu
)
1493 return cpu
== parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu
);
1496 int get_physical_package_id(int cpu
)
1498 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu
);
1501 int get_core_id(int cpu
)
1503 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu
);
1506 int get_num_ht_siblings(int cpu
)
1516 sprintf(path
, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu
);
1517 filep
= fopen_or_die(path
, "r");
1521 * A ',' separated or '-' separated set of numbers
1522 * (eg 1-2 or 1,3,4,5)
1524 fscanf(filep
, "%d%c\n", &sib1
, &character
);
1525 fseek(filep
, 0, SEEK_SET
);
1526 fgets(str
, 100, filep
);
1527 ch
= strchr(str
, character
);
1528 while (ch
!= NULL
) {
1530 ch
= strchr(ch
+1, character
);
1538 * run func(thread, core, package) in topology order
1539 * skip non-present cpus
1542 int for_all_cpus_2(int (func
)(struct thread_data
*, struct core_data
*,
1543 struct pkg_data
*, struct thread_data
*, struct core_data
*,
1544 struct pkg_data
*), struct thread_data
*thread_base
,
1545 struct core_data
*core_base
, struct pkg_data
*pkg_base
,
1546 struct thread_data
*thread_base2
, struct core_data
*core_base2
,
1547 struct pkg_data
*pkg_base2
)
1549 int retval
, pkg_no
, core_no
, thread_no
;
1551 for (pkg_no
= 0; pkg_no
< topo
.num_packages
; ++pkg_no
) {
1552 for (core_no
= 0; core_no
< topo
.num_cores_per_pkg
; ++core_no
) {
1553 for (thread_no
= 0; thread_no
<
1554 topo
.num_threads_per_core
; ++thread_no
) {
1555 struct thread_data
*t
, *t2
;
1556 struct core_data
*c
, *c2
;
1557 struct pkg_data
*p
, *p2
;
1559 t
= GET_THREAD(thread_base
, thread_no
, core_no
, pkg_no
);
1561 if (cpu_is_not_present(t
->cpu_id
))
1564 t2
= GET_THREAD(thread_base2
, thread_no
, core_no
, pkg_no
);
1566 c
= GET_CORE(core_base
, core_no
, pkg_no
);
1567 c2
= GET_CORE(core_base2
, core_no
, pkg_no
);
1569 p
= GET_PKG(pkg_base
, pkg_no
);
1570 p2
= GET_PKG(pkg_base2
, pkg_no
);
1572 retval
= func(t
, c
, p
, t2
, c2
, p2
);
1582 * run func(cpu) on every cpu in /proc/stat
1583 * return max_cpu number
1585 int for_all_proc_cpus(int (func
)(int))
1591 fp
= fopen_or_die(proc_stat
, "r");
1593 retval
= fscanf(fp
, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1595 err(1, "%s: failed to parse format", proc_stat
);
1598 retval
= fscanf(fp
, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num
);
1602 retval
= func(cpu_num
);
1612 void re_initialize(void)
1615 setup_all_buffers();
1616 printf("turbostat: re-initialized with num_cpus %d\n", topo
.num_cpus
);
1622 * remember the last one seen, it will be the max
1624 int count_cpus(int cpu
)
1626 if (topo
.max_cpu_num
< cpu
)
1627 topo
.max_cpu_num
= cpu
;
1632 int mark_cpu_present(int cpu
)
1634 CPU_SET_S(cpu
, cpu_present_setsize
, cpu_present_set
);
1638 void turbostat_loop()
1646 retval
= for_all_cpus(get_counters
, EVEN_COUNTERS
);
1649 } else if (retval
== -1) {
1650 if (restarted
> 1) {
1657 gettimeofday(&tv_even
, (struct timezone
*)NULL
);
1660 if (for_all_proc_cpus(cpu_is_not_present
)) {
1664 sleep(interval_sec
);
1665 retval
= for_all_cpus(get_counters
, ODD_COUNTERS
);
1668 } else if (retval
== -1) {
1672 gettimeofday(&tv_odd
, (struct timezone
*)NULL
);
1673 timersub(&tv_odd
, &tv_even
, &tv_delta
);
1674 for_all_cpus_2(delta_cpu
, ODD_COUNTERS
, EVEN_COUNTERS
);
1675 compute_average(EVEN_COUNTERS
);
1676 format_all_counters(EVEN_COUNTERS
);
1678 sleep(interval_sec
);
1679 retval
= for_all_cpus(get_counters
, EVEN_COUNTERS
);
1682 } else if (retval
== -1) {
1686 gettimeofday(&tv_even
, (struct timezone
*)NULL
);
1687 timersub(&tv_even
, &tv_odd
, &tv_delta
);
1688 for_all_cpus_2(delta_cpu
, EVEN_COUNTERS
, ODD_COUNTERS
);
1689 compute_average(ODD_COUNTERS
);
1690 format_all_counters(ODD_COUNTERS
);
1695 void check_dev_msr()
1700 sprintf(pathname
, "/dev/cpu/%d/msr", base_cpu
);
1701 if (stat(pathname
, &sb
))
1702 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1703 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1706 void check_permissions()
1708 struct __user_cap_header_struct cap_header_data
;
1709 cap_user_header_t cap_header
= &cap_header_data
;
1710 struct __user_cap_data_struct cap_data_data
;
1711 cap_user_data_t cap_data
= &cap_data_data
;
1712 extern int capget(cap_user_header_t hdrp
, cap_user_data_t datap
);
1716 /* check for CAP_SYS_RAWIO */
1717 cap_header
->pid
= getpid();
1718 cap_header
->version
= _LINUX_CAPABILITY_VERSION
;
1719 if (capget(cap_header
, cap_data
) < 0)
1720 err(-6, "capget(2) failed");
1722 if ((cap_data
->effective
& (1 << CAP_SYS_RAWIO
)) == 0) {
1724 warnx("capget(CAP_SYS_RAWIO) failed,"
1725 " try \"# setcap cap_sys_rawio=ep %s\"", progname
);
1728 /* test file permissions */
1729 sprintf(pathname
, "/dev/cpu/%d/msr", base_cpu
);
1730 if (euidaccess(pathname
, R_OK
)) {
1732 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1735 /* if all else fails, thell them to be root */
1738 warnx("... or simply run as root");
1745 * NHM adds support for additional MSRs:
1747 * MSR_SMI_COUNT 0x00000034
1749 * MSR_NHM_PLATFORM_INFO 0x000000ce
1750 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
1752 * MSR_PKG_C3_RESIDENCY 0x000003f8
1753 * MSR_PKG_C6_RESIDENCY 0x000003f9
1754 * MSR_CORE_C3_RESIDENCY 0x000003fc
1755 * MSR_CORE_C6_RESIDENCY 0x000003fd
1758 * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
1760 int probe_nhm_msrs(unsigned int family
, unsigned int model
)
1762 unsigned long long msr
;
1763 int *pkg_cstate_limits
;
1772 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1773 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1774 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1775 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1776 case 0x2C: /* Westmere EP - Gulftown */
1777 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1778 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1779 pkg_cstate_limits
= nhm_pkg_cstate_limits
;
1781 case 0x2A: /* SNB */
1782 case 0x2D: /* SNB Xeon */
1783 case 0x3A: /* IVB */
1784 case 0x3E: /* IVB Xeon */
1785 pkg_cstate_limits
= snb_pkg_cstate_limits
;
1787 case 0x3C: /* HSW */
1788 case 0x3F: /* HSX */
1789 case 0x45: /* HSW */
1790 case 0x46: /* HSW */
1791 case 0x3D: /* BDW */
1792 case 0x47: /* BDW */
1793 case 0x4F: /* BDX */
1794 case 0x56: /* BDX-DE */
1795 case 0x4E: /* SKL */
1796 case 0x5E: /* SKL */
1797 pkg_cstate_limits
= hsw_pkg_cstate_limits
;
1799 case 0x37: /* BYT */
1800 case 0x4D: /* AVN */
1801 pkg_cstate_limits
= slv_pkg_cstate_limits
;
1803 case 0x4C: /* AMT */
1804 pkg_cstate_limits
= amt_pkg_cstate_limits
;
1806 case 0x57: /* PHI */
1807 pkg_cstate_limits
= phi_pkg_cstate_limits
;
1812 get_msr(base_cpu
, MSR_NHM_SNB_PKG_CST_CFG_CTL
, &msr
);
1814 pkg_cstate_limit
= pkg_cstate_limits
[msr
& 0xF];
1818 int has_nhm_turbo_ratio_limit(unsigned int family
, unsigned int model
)
1821 /* Nehalem compatible, but do not include turbo-ratio limit support */
1822 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1823 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1829 int has_ivt_turbo_ratio_limit(unsigned int family
, unsigned int model
)
1838 case 0x3E: /* IVB Xeon */
1839 case 0x3F: /* HSW Xeon */
1845 int has_hsw_turbo_ratio_limit(unsigned int family
, unsigned int model
)
1854 case 0x3F: /* HSW Xeon */
1861 int has_knl_turbo_ratio_limit(unsigned int family
, unsigned int model
)
1870 case 0x57: /* Knights Landing */
1877 dump_cstate_pstate_config_info(family
, model
)
1879 if (!do_nhm_platform_info
)
1882 dump_nhm_platform_info();
1884 if (has_hsw_turbo_ratio_limit(family
, model
))
1885 dump_hsw_turbo_ratio_limits();
1887 if (has_ivt_turbo_ratio_limit(family
, model
))
1888 dump_ivt_turbo_ratio_limits();
1890 if (has_nhm_turbo_ratio_limit(family
, model
))
1891 dump_nhm_turbo_ratio_limits();
1893 if (has_knl_turbo_ratio_limit(family
, model
))
1894 dump_knl_turbo_ratio_limits();
1902 * Decode the ENERGY_PERF_BIAS MSR
1904 int print_epb(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
1906 unsigned long long msr
;
1915 /* EPB is per-package */
1916 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
) || !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
1919 if (cpu_migrate(cpu
)) {
1920 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
1924 if (get_msr(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &msr
))
1927 switch (msr
& 0xF) {
1928 case ENERGY_PERF_BIAS_PERFORMANCE
:
1929 epb_string
= "performance";
1931 case ENERGY_PERF_BIAS_NORMAL
:
1932 epb_string
= "balanced";
1934 case ENERGY_PERF_BIAS_POWERSAVE
:
1935 epb_string
= "powersave";
1938 epb_string
= "custom";
1941 fprintf(stderr
, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu
, msr
, epb_string
);
1947 * print_perf_limit()
1949 int print_perf_limit(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
1951 unsigned long long msr
;
1957 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
) || !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
1960 if (cpu_migrate(cpu
)) {
1961 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
1965 if (do_core_perf_limit_reasons
) {
1966 get_msr(cpu
, MSR_CORE_PERF_LIMIT_REASONS
, &msr
);
1967 fprintf(stderr
, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu
, msr
);
1968 fprintf(stderr
, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
1969 (msr
& 1 << 15) ? "bit15, " : "",
1970 (msr
& 1 << 14) ? "bit14, " : "",
1971 (msr
& 1 << 13) ? "Transitions, " : "",
1972 (msr
& 1 << 12) ? "MultiCoreTurbo, " : "",
1973 (msr
& 1 << 11) ? "PkgPwrL2, " : "",
1974 (msr
& 1 << 10) ? "PkgPwrL1, " : "",
1975 (msr
& 1 << 9) ? "CorePwr, " : "",
1976 (msr
& 1 << 8) ? "Amps, " : "",
1977 (msr
& 1 << 6) ? "VR-Therm, " : "",
1978 (msr
& 1 << 5) ? "Auto-HWP, " : "",
1979 (msr
& 1 << 4) ? "Graphics, " : "",
1980 (msr
& 1 << 2) ? "bit2, " : "",
1981 (msr
& 1 << 1) ? "ThermStatus, " : "",
1982 (msr
& 1 << 0) ? "PROCHOT, " : "");
1983 fprintf(stderr
, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
1984 (msr
& 1 << 31) ? "bit31, " : "",
1985 (msr
& 1 << 30) ? "bit30, " : "",
1986 (msr
& 1 << 29) ? "Transitions, " : "",
1987 (msr
& 1 << 28) ? "MultiCoreTurbo, " : "",
1988 (msr
& 1 << 27) ? "PkgPwrL2, " : "",
1989 (msr
& 1 << 26) ? "PkgPwrL1, " : "",
1990 (msr
& 1 << 25) ? "CorePwr, " : "",
1991 (msr
& 1 << 24) ? "Amps, " : "",
1992 (msr
& 1 << 22) ? "VR-Therm, " : "",
1993 (msr
& 1 << 21) ? "Auto-HWP, " : "",
1994 (msr
& 1 << 20) ? "Graphics, " : "",
1995 (msr
& 1 << 18) ? "bit18, " : "",
1996 (msr
& 1 << 17) ? "ThermStatus, " : "",
1997 (msr
& 1 << 16) ? "PROCHOT, " : "");
2000 if (do_gfx_perf_limit_reasons
) {
2001 get_msr(cpu
, MSR_GFX_PERF_LIMIT_REASONS
, &msr
);
2002 fprintf(stderr
, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu
, msr
);
2003 fprintf(stderr
, " (Active: %s%s%s%s%s%s%s%s)",
2004 (msr
& 1 << 0) ? "PROCHOT, " : "",
2005 (msr
& 1 << 1) ? "ThermStatus, " : "",
2006 (msr
& 1 << 4) ? "Graphics, " : "",
2007 (msr
& 1 << 6) ? "VR-Therm, " : "",
2008 (msr
& 1 << 8) ? "Amps, " : "",
2009 (msr
& 1 << 9) ? "GFXPwr, " : "",
2010 (msr
& 1 << 10) ? "PkgPwrL1, " : "",
2011 (msr
& 1 << 11) ? "PkgPwrL2, " : "");
2012 fprintf(stderr
, " (Logged: %s%s%s%s%s%s%s%s)\n",
2013 (msr
& 1 << 16) ? "PROCHOT, " : "",
2014 (msr
& 1 << 17) ? "ThermStatus, " : "",
2015 (msr
& 1 << 20) ? "Graphics, " : "",
2016 (msr
& 1 << 22) ? "VR-Therm, " : "",
2017 (msr
& 1 << 24) ? "Amps, " : "",
2018 (msr
& 1 << 25) ? "GFXPwr, " : "",
2019 (msr
& 1 << 26) ? "PkgPwrL1, " : "",
2020 (msr
& 1 << 27) ? "PkgPwrL2, " : "");
2022 if (do_ring_perf_limit_reasons
) {
2023 get_msr(cpu
, MSR_RING_PERF_LIMIT_REASONS
, &msr
);
2024 fprintf(stderr
, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu
, msr
);
2025 fprintf(stderr
, " (Active: %s%s%s%s%s%s)",
2026 (msr
& 1 << 0) ? "PROCHOT, " : "",
2027 (msr
& 1 << 1) ? "ThermStatus, " : "",
2028 (msr
& 1 << 6) ? "VR-Therm, " : "",
2029 (msr
& 1 << 8) ? "Amps, " : "",
2030 (msr
& 1 << 10) ? "PkgPwrL1, " : "",
2031 (msr
& 1 << 11) ? "PkgPwrL2, " : "");
2032 fprintf(stderr
, " (Logged: %s%s%s%s%s%s)\n",
2033 (msr
& 1 << 16) ? "PROCHOT, " : "",
2034 (msr
& 1 << 17) ? "ThermStatus, " : "",
2035 (msr
& 1 << 22) ? "VR-Therm, " : "",
2036 (msr
& 1 << 24) ? "Amps, " : "",
2037 (msr
& 1 << 26) ? "PkgPwrL1, " : "",
2038 (msr
& 1 << 27) ? "PkgPwrL2, " : "");
2043 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
2044 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
2046 double get_tdp(model
)
2048 unsigned long long msr
;
2050 if (do_rapl
& RAPL_PKG_POWER_INFO
)
2051 if (!get_msr(base_cpu
, MSR_PKG_POWER_INFO
, &msr
))
2052 return ((msr
>> 0) & RAPL_POWER_GRANULARITY
) * rapl_power_units
;
2064 * rapl_dram_energy_units_probe()
2065 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
2068 rapl_dram_energy_units_probe(int model
, double rapl_energy_units
)
2070 /* only called for genuine_intel, family 6 */
2073 case 0x3F: /* HSX */
2074 case 0x4F: /* BDX */
2075 case 0x56: /* BDX-DE */
2076 case 0x57: /* KNL */
2077 return (rapl_dram_energy_units
= 15.3 / 1000000);
2079 return (rapl_energy_units
);
2087 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
2089 void rapl_probe(unsigned int family
, unsigned int model
)
2091 unsigned long long msr
;
2092 unsigned int time_unit
;
2104 case 0x3C: /* HSW */
2105 case 0x45: /* HSW */
2106 case 0x46: /* HSW */
2107 case 0x3D: /* BDW */
2108 case 0x47: /* BDW */
2109 do_rapl
= RAPL_PKG
| RAPL_CORES
| RAPL_CORE_POLICY
| RAPL_GFX
| RAPL_PKG_POWER_INFO
;
2111 case 0x4E: /* SKL */
2112 case 0x5E: /* SKL */
2113 do_rapl
= RAPL_PKG
| RAPL_DRAM
| RAPL_DRAM_PERF_STATUS
| RAPL_PKG_PERF_STATUS
| RAPL_PKG_POWER_INFO
;
2115 case 0x3F: /* HSX */
2116 case 0x4F: /* BDX */
2117 case 0x56: /* BDX-DE */
2118 case 0x57: /* KNL */
2119 do_rapl
= RAPL_PKG
| RAPL_DRAM
| RAPL_DRAM_POWER_INFO
| RAPL_DRAM_PERF_STATUS
| RAPL_PKG_PERF_STATUS
| RAPL_PKG_POWER_INFO
;
2123 do_rapl
= RAPL_PKG
| RAPL_CORES
| RAPL_CORE_POLICY
| RAPL_DRAM
| RAPL_DRAM_POWER_INFO
| RAPL_PKG_PERF_STATUS
| RAPL_DRAM_PERF_STATUS
| RAPL_PKG_POWER_INFO
;
2125 case 0x37: /* BYT */
2126 case 0x4D: /* AVN */
2127 do_rapl
= RAPL_PKG
| RAPL_CORES
;
2133 /* units on package 0, verify later other packages match */
2134 if (get_msr(base_cpu
, MSR_RAPL_POWER_UNIT
, &msr
))
2137 rapl_power_units
= 1.0 / (1 << (msr
& 0xF));
2139 rapl_energy_units
= 1.0 * (1 << (msr
>> 8 & 0x1F)) / 1000000;
2141 rapl_energy_units
= 1.0 / (1 << (msr
>> 8 & 0x1F));
2143 rapl_dram_energy_units
= rapl_dram_energy_units_probe(model
, rapl_energy_units
);
2145 time_unit
= msr
>> 16 & 0xF;
2149 rapl_time_units
= 1.0 / (1 << (time_unit
));
2151 tdp
= get_tdp(model
);
2153 rapl_joule_counter_range
= 0xFFFFFFFF * rapl_energy_units
/ tdp
;
2155 fprintf(stderr
, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range
, tdp
);
2160 void perf_limit_reasons_probe(family
, model
)
2169 case 0x3C: /* HSW */
2170 case 0x45: /* HSW */
2171 case 0x46: /* HSW */
2172 do_gfx_perf_limit_reasons
= 1;
2173 case 0x3F: /* HSX */
2174 do_core_perf_limit_reasons
= 1;
2175 do_ring_perf_limit_reasons
= 1;
2181 int print_thermal(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
2183 unsigned long long msr
;
2187 if (!(do_dts
|| do_ptm
))
2192 /* DTS is per-core, no need to print for each thread */
2193 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
2196 if (cpu_migrate(cpu
)) {
2197 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
2201 if (do_ptm
&& (t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
)) {
2202 if (get_msr(cpu
, MSR_IA32_PACKAGE_THERM_STATUS
, &msr
))
2205 dts
= (msr
>> 16) & 0x7F;
2206 fprintf(stderr
, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
2207 cpu
, msr
, tcc_activation_temp
- dts
);
2210 if (get_msr(cpu
, MSR_IA32_PACKAGE_THERM_INTERRUPT
, &msr
))
2213 dts
= (msr
>> 16) & 0x7F;
2214 dts2
= (msr
>> 8) & 0x7F;
2215 fprintf(stderr
, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2216 cpu
, msr
, tcc_activation_temp
- dts
, tcc_activation_temp
- dts2
);
2222 unsigned int resolution
;
2224 if (get_msr(cpu
, MSR_IA32_THERM_STATUS
, &msr
))
2227 dts
= (msr
>> 16) & 0x7F;
2228 resolution
= (msr
>> 27) & 0xF;
2229 fprintf(stderr
, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
2230 cpu
, msr
, tcc_activation_temp
- dts
, resolution
);
2233 if (get_msr(cpu
, MSR_IA32_THERM_INTERRUPT
, &msr
))
2236 dts
= (msr
>> 16) & 0x7F;
2237 dts2
= (msr
>> 8) & 0x7F;
2238 fprintf(stderr
, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2239 cpu
, msr
, tcc_activation_temp
- dts
, tcc_activation_temp
- dts2
);
2246 void print_power_limit_msr(int cpu
, unsigned long long msr
, char *label
)
2248 fprintf(stderr
, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
2250 ((msr
>> 15) & 1) ? "EN" : "DIS",
2251 ((msr
>> 0) & 0x7FFF) * rapl_power_units
,
2252 (1.0 + (((msr
>> 22) & 0x3)/4.0)) * (1 << ((msr
>> 17) & 0x1F)) * rapl_time_units
,
2253 (((msr
>> 16) & 1) ? "EN" : "DIS"));
2258 int print_rapl(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
2260 unsigned long long msr
;
2266 /* RAPL counters are per package, so print only for 1st thread/package */
2267 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
) || !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
2271 if (cpu_migrate(cpu
)) {
2272 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
2276 if (get_msr(cpu
, MSR_RAPL_POWER_UNIT
, &msr
))
2280 fprintf(stderr
, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
2281 "(%f Watts, %f Joules, %f sec.)\n", cpu
, msr
,
2282 rapl_power_units
, rapl_energy_units
, rapl_time_units
);
2284 if (do_rapl
& RAPL_PKG_POWER_INFO
) {
2286 if (get_msr(cpu
, MSR_PKG_POWER_INFO
, &msr
))
2290 fprintf(stderr
, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2292 ((msr
>> 0) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
2293 ((msr
>> 16) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
2294 ((msr
>> 32) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
2295 ((msr
>> 48) & RAPL_TIME_GRANULARITY
) * rapl_time_units
);
2298 if (do_rapl
& RAPL_PKG
) {
2300 if (get_msr(cpu
, MSR_PKG_POWER_LIMIT
, &msr
))
2303 fprintf(stderr
, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
2304 cpu
, msr
, (msr
>> 63) & 1 ? "": "UN");
2306 print_power_limit_msr(cpu
, msr
, "PKG Limit #1");
2307 fprintf(stderr
, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
2309 ((msr
>> 47) & 1) ? "EN" : "DIS",
2310 ((msr
>> 32) & 0x7FFF) * rapl_power_units
,
2311 (1.0 + (((msr
>> 54) & 0x3)/4.0)) * (1 << ((msr
>> 49) & 0x1F)) * rapl_time_units
,
2312 ((msr
>> 48) & 1) ? "EN" : "DIS");
2315 if (do_rapl
& RAPL_DRAM_POWER_INFO
) {
2316 if (get_msr(cpu
, MSR_DRAM_POWER_INFO
, &msr
))
2319 fprintf(stderr
, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2321 ((msr
>> 0) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
2322 ((msr
>> 16) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
2323 ((msr
>> 32) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
2324 ((msr
>> 48) & RAPL_TIME_GRANULARITY
) * rapl_time_units
);
2326 if (do_rapl
& RAPL_DRAM
) {
2327 if (get_msr(cpu
, MSR_DRAM_POWER_LIMIT
, &msr
))
2329 fprintf(stderr
, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
2330 cpu
, msr
, (msr
>> 31) & 1 ? "": "UN");
2332 print_power_limit_msr(cpu
, msr
, "DRAM Limit");
2334 if (do_rapl
& RAPL_CORE_POLICY
) {
2336 if (get_msr(cpu
, MSR_PP0_POLICY
, &msr
))
2339 fprintf(stderr
, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu
, msr
& 0xF);
2342 if (do_rapl
& RAPL_CORES
) {
2345 if (get_msr(cpu
, MSR_PP0_POWER_LIMIT
, &msr
))
2347 fprintf(stderr
, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
2348 cpu
, msr
, (msr
>> 31) & 1 ? "": "UN");
2349 print_power_limit_msr(cpu
, msr
, "Cores Limit");
2352 if (do_rapl
& RAPL_GFX
) {
2354 if (get_msr(cpu
, MSR_PP1_POLICY
, &msr
))
2357 fprintf(stderr
, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu
, msr
& 0xF);
2359 if (get_msr(cpu
, MSR_PP1_POWER_LIMIT
, &msr
))
2361 fprintf(stderr
, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
2362 cpu
, msr
, (msr
>> 31) & 1 ? "": "UN");
2363 print_power_limit_msr(cpu
, msr
, "GFX Limit");
2370 * SNB adds support for additional MSRs:
2372 * MSR_PKG_C7_RESIDENCY 0x000003fa
2373 * MSR_CORE_C7_RESIDENCY 0x000003fe
2374 * MSR_PKG_C2_RESIDENCY 0x0000060d
2377 int has_snb_msrs(unsigned int family
, unsigned int model
)
2385 case 0x3A: /* IVB */
2386 case 0x3E: /* IVB Xeon */
2387 case 0x3C: /* HSW */
2388 case 0x3F: /* HSW */
2389 case 0x45: /* HSW */
2390 case 0x46: /* HSW */
2391 case 0x3D: /* BDW */
2392 case 0x47: /* BDW */
2393 case 0x4F: /* BDX */
2394 case 0x56: /* BDX-DE */
2395 case 0x4E: /* SKL */
2396 case 0x5E: /* SKL */
2403 * HSW adds support for additional MSRs:
2405 * MSR_PKG_C8_RESIDENCY 0x00000630
2406 * MSR_PKG_C9_RESIDENCY 0x00000631
2407 * MSR_PKG_C10_RESIDENCY 0x00000632
2409 int has_hsw_msrs(unsigned int family
, unsigned int model
)
2415 case 0x45: /* HSW */
2416 case 0x3D: /* BDW */
2417 case 0x4E: /* SKL */
2418 case 0x5E: /* SKL */
2425 * SKL adds support for additional MSRS:
2427 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
2428 * MSR_PKG_ANY_CORE_C0_RES 0x00000659
2429 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
2430 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
2432 int has_skl_msrs(unsigned int family
, unsigned int model
)
2438 case 0x4E: /* SKL */
2439 case 0x5E: /* SKL */
2447 int is_slm(unsigned int family
, unsigned int model
)
2452 case 0x37: /* BYT */
2453 case 0x4D: /* AVN */
2459 int is_knl(unsigned int family
, unsigned int model
)
2464 case 0x57: /* KNL */
2470 #define SLM_BCLK_FREQS 5
2471 double slm_freq_table
[SLM_BCLK_FREQS
] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2473 double slm_bclk(void)
2475 unsigned long long msr
= 3;
2479 if (get_msr(base_cpu
, MSR_FSB_FREQ
, &msr
))
2480 fprintf(stderr
, "SLM BCLK: unknown\n");
2483 if (i
>= SLM_BCLK_FREQS
) {
2484 fprintf(stderr
, "SLM BCLK[%d] invalid\n", i
);
2487 freq
= slm_freq_table
[i
];
2489 fprintf(stderr
, "SLM BCLK: %.1f Mhz\n", freq
);
2494 double discover_bclk(unsigned int family
, unsigned int model
)
2496 if (has_snb_msrs(family
, model
))
2498 else if (is_slm(family
, model
))
2505 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
2506 * the Thermal Control Circuit (TCC) activates.
2507 * This is usually equal to tjMax.
2509 * Older processors do not have this MSR, so there we guess,
2510 * but also allow cmdline over-ride with -T.
2512 * Several MSR temperature values are in units of degrees-C
2513 * below this value, including the Digital Thermal Sensor (DTS),
2514 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
2516 int set_temperature_target(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
2518 unsigned long long msr
;
2519 unsigned int target_c_local
;
2522 /* tcc_activation_temp is used only for dts or ptm */
2523 if (!(do_dts
|| do_ptm
))
2526 /* this is a per-package concept */
2527 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
) || !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
2531 if (cpu_migrate(cpu
)) {
2532 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
2536 if (tcc_activation_temp_override
!= 0) {
2537 tcc_activation_temp
= tcc_activation_temp_override
;
2538 fprintf(stderr
, "cpu%d: Using cmdline TCC Target (%d C)\n",
2539 cpu
, tcc_activation_temp
);
2543 /* Temperature Target MSR is Nehalem and newer only */
2544 if (!do_nhm_platform_info
)
2547 if (get_msr(base_cpu
, MSR_IA32_TEMPERATURE_TARGET
, &msr
))
2550 target_c_local
= (msr
>> 16) & 0xFF;
2553 fprintf(stderr
, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
2554 cpu
, msr
, target_c_local
);
2556 if (!target_c_local
)
2559 tcc_activation_temp
= target_c_local
;
2564 tcc_activation_temp
= TJMAX_DEFAULT
;
2565 fprintf(stderr
, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
2566 cpu
, tcc_activation_temp
);
2570 void process_cpuid()
2572 unsigned int eax
, ebx
, ecx
, edx
, max_level
;
2573 unsigned int fms
, family
, model
, stepping
;
2575 eax
= ebx
= ecx
= edx
= 0;
2577 __get_cpuid(0, &max_level
, &ebx
, &ecx
, &edx
);
2579 if (ebx
== 0x756e6547 && edx
== 0x49656e69 && ecx
== 0x6c65746e)
2583 fprintf(stderr
, "CPUID(0): %.4s%.4s%.4s ",
2584 (char *)&ebx
, (char *)&edx
, (char *)&ecx
);
2586 __get_cpuid(1, &fms
, &ebx
, &ecx
, &edx
);
2587 family
= (fms
>> 8) & 0xf;
2588 model
= (fms
>> 4) & 0xf;
2589 stepping
= fms
& 0xf;
2590 if (family
== 6 || family
== 0xf)
2591 model
+= ((fms
>> 16) & 0xf) << 4;
2594 fprintf(stderr
, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
2595 max_level
, family
, model
, stepping
, family
, model
, stepping
);
2597 if (!(edx
& (1 << 5)))
2598 errx(1, "CPUID: no MSR");
2601 * check max extended function levels of CPUID.
2602 * This is needed to check for invariant TSC.
2603 * This check is valid for both Intel and AMD.
2605 ebx
= ecx
= edx
= 0;
2606 __get_cpuid(0x80000000, &max_level
, &ebx
, &ecx
, &edx
);
2608 if (max_level
>= 0x80000007) {
2611 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
2612 * this check is valid for both Intel and AMD
2614 __get_cpuid(0x80000007, &eax
, &ebx
, &ecx
, &edx
);
2615 has_invariant_tsc
= edx
& (1 << 8);
2619 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
2620 * this check is valid for both Intel and AMD
2623 __get_cpuid(0x6, &eax
, &ebx
, &ecx
, &edx
);
2624 has_aperf
= ecx
& (1 << 0);
2625 do_dts
= eax
& (1 << 0);
2626 do_ptm
= eax
& (1 << 6);
2627 has_epb
= ecx
& (1 << 3);
2630 fprintf(stderr
, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
2631 has_aperf
? "" : "No ",
2632 do_dts
? "" : "No ",
2633 do_ptm
? "" : "No ",
2634 has_epb
? "" : "No ");
2636 if (max_level
> 0x15) {
2637 unsigned int eax_crystal
;
2638 unsigned int ebx_tsc
;
2641 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
2643 eax_crystal
= ebx_tsc
= crystal_hz
= edx
= 0;
2644 __get_cpuid(0x15, &eax_crystal
, &ebx_tsc
, &crystal_hz
, &edx
);
2648 if (debug
&& (ebx
!= 0))
2649 fprintf(stderr
, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
2650 eax_crystal
, ebx_tsc
, crystal_hz
);
2652 if (crystal_hz
== 0)
2654 case 0x4E: /* SKL */
2655 case 0x5E: /* SKL */
2656 crystal_hz
= 24000000; /* 24 MHz */
2663 tsc_hz
= (unsigned long long) crystal_hz
* ebx_tsc
/ eax_crystal
;
2665 fprintf(stderr
, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
2666 tsc_hz
/ 1000000, crystal_hz
, ebx_tsc
, eax_crystal
);
2671 do_nhm_platform_info
= do_nhm_cstates
= do_smi
= probe_nhm_msrs(family
, model
);
2672 do_snb_cstates
= has_snb_msrs(family
, model
);
2673 do_pc2
= do_snb_cstates
&& (pkg_cstate_limit
>= PCL__2
);
2674 do_pc3
= (pkg_cstate_limit
>= PCL__3
);
2675 do_pc6
= (pkg_cstate_limit
>= PCL__6
);
2676 do_pc7
= do_snb_cstates
&& (pkg_cstate_limit
>= PCL__7
);
2677 do_c8_c9_c10
= has_hsw_msrs(family
, model
);
2678 do_skl_residency
= has_skl_msrs(family
, model
);
2679 do_slm_cstates
= is_slm(family
, model
);
2680 do_knl_cstates
= is_knl(family
, model
);
2681 bclk
= discover_bclk(family
, model
);
2683 rapl_probe(family
, model
);
2684 perf_limit_reasons_probe(family
, model
);
2687 dump_cstate_pstate_config_info();
2695 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
2697 "Turbostat forks the specified COMMAND and prints statistics\n"
2698 "when COMMAND completes.\n"
2699 "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
2700 "to print statistics, until interrupted.\n"
2701 "--debug run in \"debug\" mode\n"
2702 "--interval sec Override default 5-second measurement interval\n"
2703 "--help print this help message\n"
2704 "--counter msr print 32-bit counter at address \"msr\"\n"
2705 "--Counter msr print 64-bit Counter at address \"msr\"\n"
2706 "--msr msr print 32-bit value at address \"msr\"\n"
2707 "--MSR msr print 64-bit Value at address \"msr\"\n"
2708 "--version print version information\n"
2710 "For more help, run \"man turbostat\"\n");
2715 * in /dev/cpu/ return success for names that are numbers
2716 * ie. filter out ".", "..", "microcode".
2718 int dir_filter(const struct dirent
*dirp
)
2720 if (isdigit(dirp
->d_name
[0]))
2726 int open_dev_cpu_msr(int dummy1
)
2731 void topology_probe()
2734 int max_core_id
= 0;
2735 int max_package_id
= 0;
2736 int max_siblings
= 0;
2737 struct cpu_topology
{
2739 int physical_package_id
;
2742 /* Initialize num_cpus, max_cpu_num */
2744 topo
.max_cpu_num
= 0;
2745 for_all_proc_cpus(count_cpus
);
2746 if (!summary_only
&& topo
.num_cpus
> 1)
2750 fprintf(stderr
, "num_cpus %d max_cpu_num %d\n", topo
.num_cpus
, topo
.max_cpu_num
);
2752 cpus
= calloc(1, (topo
.max_cpu_num
+ 1) * sizeof(struct cpu_topology
));
2754 err(1, "calloc cpus");
2757 * Allocate and initialize cpu_present_set
2759 cpu_present_set
= CPU_ALLOC((topo
.max_cpu_num
+ 1));
2760 if (cpu_present_set
== NULL
)
2761 err(3, "CPU_ALLOC");
2762 cpu_present_setsize
= CPU_ALLOC_SIZE((topo
.max_cpu_num
+ 1));
2763 CPU_ZERO_S(cpu_present_setsize
, cpu_present_set
);
2764 for_all_proc_cpus(mark_cpu_present
);
2767 * Allocate and initialize cpu_affinity_set
2769 cpu_affinity_set
= CPU_ALLOC((topo
.max_cpu_num
+ 1));
2770 if (cpu_affinity_set
== NULL
)
2771 err(3, "CPU_ALLOC");
2772 cpu_affinity_setsize
= CPU_ALLOC_SIZE((topo
.max_cpu_num
+ 1));
2773 CPU_ZERO_S(cpu_affinity_setsize
, cpu_affinity_set
);
2778 * find max_core_id, max_package_id
2780 for (i
= 0; i
<= topo
.max_cpu_num
; ++i
) {
2783 if (cpu_is_not_present(i
)) {
2785 fprintf(stderr
, "cpu%d NOT PRESENT\n", i
);
2788 cpus
[i
].core_id
= get_core_id(i
);
2789 if (cpus
[i
].core_id
> max_core_id
)
2790 max_core_id
= cpus
[i
].core_id
;
2792 cpus
[i
].physical_package_id
= get_physical_package_id(i
);
2793 if (cpus
[i
].physical_package_id
> max_package_id
)
2794 max_package_id
= cpus
[i
].physical_package_id
;
2796 siblings
= get_num_ht_siblings(i
);
2797 if (siblings
> max_siblings
)
2798 max_siblings
= siblings
;
2800 fprintf(stderr
, "cpu %d pkg %d core %d\n",
2801 i
, cpus
[i
].physical_package_id
, cpus
[i
].core_id
);
2803 topo
.num_cores_per_pkg
= max_core_id
+ 1;
2805 fprintf(stderr
, "max_core_id %d, sizing for %d cores per package\n",
2806 max_core_id
, topo
.num_cores_per_pkg
);
2807 if (debug
&& !summary_only
&& topo
.num_cores_per_pkg
> 1)
2810 topo
.num_packages
= max_package_id
+ 1;
2812 fprintf(stderr
, "max_package_id %d, sizing for %d packages\n",
2813 max_package_id
, topo
.num_packages
);
2814 if (debug
&& !summary_only
&& topo
.num_packages
> 1)
2817 topo
.num_threads_per_core
= max_siblings
;
2819 fprintf(stderr
, "max_siblings %d\n", max_siblings
);
2825 allocate_counters(struct thread_data
**t
, struct core_data
**c
, struct pkg_data
**p
)
2829 *t
= calloc(topo
.num_threads_per_core
* topo
.num_cores_per_pkg
*
2830 topo
.num_packages
, sizeof(struct thread_data
));
2834 for (i
= 0; i
< topo
.num_threads_per_core
*
2835 topo
.num_cores_per_pkg
* topo
.num_packages
; i
++)
2836 (*t
)[i
].cpu_id
= -1;
2838 *c
= calloc(topo
.num_cores_per_pkg
* topo
.num_packages
,
2839 sizeof(struct core_data
));
2843 for (i
= 0; i
< topo
.num_cores_per_pkg
* topo
.num_packages
; i
++)
2844 (*c
)[i
].core_id
= -1;
2846 *p
= calloc(topo
.num_packages
, sizeof(struct pkg_data
));
2850 for (i
= 0; i
< topo
.num_packages
; i
++)
2851 (*p
)[i
].package_id
= i
;
2855 err(1, "calloc counters");
2860 * set cpu_id, core_num, pkg_num
2861 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
2863 * increment topo.num_cores when 1st core in pkg seen
2865 void init_counter(struct thread_data
*thread_base
, struct core_data
*core_base
,
2866 struct pkg_data
*pkg_base
, int thread_num
, int core_num
,
2867 int pkg_num
, int cpu_id
)
2869 struct thread_data
*t
;
2870 struct core_data
*c
;
2873 t
= GET_THREAD(thread_base
, thread_num
, core_num
, pkg_num
);
2874 c
= GET_CORE(core_base
, core_num
, pkg_num
);
2875 p
= GET_PKG(pkg_base
, pkg_num
);
2878 if (thread_num
== 0) {
2879 t
->flags
|= CPU_IS_FIRST_THREAD_IN_CORE
;
2880 if (cpu_is_first_core_in_package(cpu_id
))
2881 t
->flags
|= CPU_IS_FIRST_CORE_IN_PACKAGE
;
2884 c
->core_id
= core_num
;
2885 p
->package_id
= pkg_num
;
2889 int initialize_counters(int cpu_id
)
2891 int my_thread_id
, my_core_id
, my_package_id
;
2893 my_package_id
= get_physical_package_id(cpu_id
);
2894 my_core_id
= get_core_id(cpu_id
);
2895 my_thread_id
= get_cpu_position_in_core(cpu_id
);
2899 init_counter(EVEN_COUNTERS
, my_thread_id
, my_core_id
, my_package_id
, cpu_id
);
2900 init_counter(ODD_COUNTERS
, my_thread_id
, my_core_id
, my_package_id
, cpu_id
);
2904 void allocate_output_buffer()
2906 output_buffer
= calloc(1, (1 + topo
.num_cpus
) * 1024);
2907 outp
= output_buffer
;
2909 err(-1, "calloc output buffer");
2912 void setup_all_buffers(void)
2915 allocate_counters(&thread_even
, &core_even
, &package_even
);
2916 allocate_counters(&thread_odd
, &core_odd
, &package_odd
);
2917 allocate_output_buffer();
2918 for_all_proc_cpus(initialize_counters
);
2921 void set_base_cpu(void)
2923 base_cpu
= sched_getcpu();
2925 err(-ENODEV
, "No valid cpus found");
2928 fprintf(stderr
, "base_cpu = %d\n", base_cpu
);
2931 void turbostat_init()
2933 setup_all_buffers();
2936 check_permissions();
2941 for_all_cpus(print_epb
, ODD_COUNTERS
);
2944 for_all_cpus(print_perf_limit
, ODD_COUNTERS
);
2947 for_all_cpus(print_rapl
, ODD_COUNTERS
);
2949 for_all_cpus(set_temperature_target
, ODD_COUNTERS
);
2952 for_all_cpus(print_thermal
, ODD_COUNTERS
);
2955 int fork_it(char **argv
)
2960 status
= for_all_cpus(get_counters
, EVEN_COUNTERS
);
2963 /* clear affinity side-effect of get_counters() */
2964 sched_setaffinity(0, cpu_present_setsize
, cpu_present_set
);
2965 gettimeofday(&tv_even
, (struct timezone
*)NULL
);
2970 execvp(argv
[0], argv
);
2974 if (child_pid
== -1)
2977 signal(SIGINT
, SIG_IGN
);
2978 signal(SIGQUIT
, SIG_IGN
);
2979 if (waitpid(child_pid
, &status
, 0) == -1)
2980 err(status
, "waitpid");
2983 * n.b. fork_it() does not check for errors from for_all_cpus()
2984 * because re-starting is problematic when forking
2986 for_all_cpus(get_counters
, ODD_COUNTERS
);
2987 gettimeofday(&tv_odd
, (struct timezone
*)NULL
);
2988 timersub(&tv_odd
, &tv_even
, &tv_delta
);
2989 for_all_cpus_2(delta_cpu
, ODD_COUNTERS
, EVEN_COUNTERS
);
2990 compute_average(EVEN_COUNTERS
);
2991 format_all_counters(EVEN_COUNTERS
);
2994 fprintf(stderr
, "%.6f sec\n", tv_delta
.tv_sec
+ tv_delta
.tv_usec
/1000000.0);
2999 int get_and_dump_counters(void)
3003 status
= for_all_cpus(get_counters
, ODD_COUNTERS
);
3007 status
= for_all_cpus(dump_counters
, ODD_COUNTERS
);
3016 void print_version() {
3017 fprintf(stderr
, "turbostat version 4.7 27-May, 2015"
3018 " - Len Brown <lenb@kernel.org>\n");
3021 void cmdline(int argc
, char **argv
)
3024 int option_index
= 0;
3025 static struct option long_options
[] = {
3026 {"Counter", required_argument
, 0, 'C'},
3027 {"counter", required_argument
, 0, 'c'},
3028 {"Dump", no_argument
, 0, 'D'},
3029 {"debug", no_argument
, 0, 'd'},
3030 {"interval", required_argument
, 0, 'i'},
3031 {"help", no_argument
, 0, 'h'},
3032 {"Joules", no_argument
, 0, 'J'},
3033 {"MSR", required_argument
, 0, 'M'},
3034 {"msr", required_argument
, 0, 'm'},
3035 {"Package", no_argument
, 0, 'p'},
3036 {"processor", no_argument
, 0, 'p'},
3037 {"Summary", no_argument
, 0, 'S'},
3038 {"TCC", required_argument
, 0, 'T'},
3039 {"version", no_argument
, 0, 'v' },
3045 while ((opt
= getopt_long_only(argc
, argv
, "C:c:Ddhi:JM:m:PpST:v",
3046 long_options
, &option_index
)) != -1) {
3049 sscanf(optarg
, "%x", &extra_delta_offset64
);
3052 sscanf(optarg
, "%x", &extra_delta_offset32
);
3065 interval_sec
= atoi(optarg
);
3071 sscanf(optarg
, "%x", &extra_msr_offset64
);
3074 sscanf(optarg
, "%x", &extra_msr_offset32
);
3086 tcc_activation_temp_override
= atoi(optarg
);
3096 int main(int argc
, char **argv
)
3098 cmdline(argc
, argv
);
3105 /* dump counters and exit */
3107 return get_and_dump_counters();
3110 * if any params left, it must be a command to fork
3113 return fork_it(argv
+ optind
);