treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / tools / power / x86 / turbostat / turbostat.c
blob31c1ca0bb3ee1d6e98250cc24c7de2dbbd016f13
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * turbostat -- show CPU frequency and C-state residency
4 * on modern Intel and AMD processors.
6 * Copyright (c) 2013 Intel Corporation.
7 * Len Brown <len.brown@intel.com>
8 */
10 #define _GNU_SOURCE
11 #include MSRHEADER
12 #include INTEL_FAMILY_HEADER
13 #include <stdarg.h>
14 #include <stdio.h>
15 #include <err.h>
16 #include <unistd.h>
17 #include <sys/types.h>
18 #include <sys/wait.h>
19 #include <sys/stat.h>
20 #include <sys/select.h>
21 #include <sys/resource.h>
22 #include <fcntl.h>
23 #include <signal.h>
24 #include <sys/time.h>
25 #include <stdlib.h>
26 #include <getopt.h>
27 #include <dirent.h>
28 #include <string.h>
29 #include <ctype.h>
30 #include <sched.h>
31 #include <time.h>
32 #include <cpuid.h>
33 #include <linux/capability.h>
34 #include <errno.h>
35 #include <math.h>
37 char *proc_stat = "/proc/stat";
38 FILE *outf;
39 int *fd_percpu;
40 struct timeval interval_tv = {5, 0};
41 struct timespec interval_ts = {5, 0};
42 unsigned int num_iterations;
43 unsigned int debug;
44 unsigned int quiet;
45 unsigned int shown;
46 unsigned int sums_need_wide_columns;
47 unsigned int rapl_joules;
48 unsigned int summary_only;
49 unsigned int list_header_only;
50 unsigned int dump_only;
51 unsigned int do_snb_cstates;
52 unsigned int do_knl_cstates;
53 unsigned int do_slm_cstates;
54 unsigned int use_c1_residency_msr;
55 unsigned int has_aperf;
56 unsigned int has_epb;
57 unsigned int do_irtl_snb;
58 unsigned int do_irtl_hsw;
59 unsigned int units = 1000000; /* MHz etc */
60 unsigned int genuine_intel;
61 unsigned int authentic_amd;
62 unsigned int hygon_genuine;
63 unsigned int max_level, max_extended_level;
64 unsigned int has_invariant_tsc;
65 unsigned int do_nhm_platform_info;
66 unsigned int no_MSR_MISC_PWR_MGMT;
67 unsigned int aperf_mperf_multiplier = 1;
68 double bclk;
69 double base_hz;
70 unsigned int has_base_hz;
71 double tsc_tweak = 1.0;
72 unsigned int show_pkg_only;
73 unsigned int show_core_only;
74 char *output_buffer, *outp;
75 unsigned int do_rapl;
76 unsigned int do_dts;
77 unsigned int do_ptm;
78 unsigned long long gfx_cur_rc6_ms;
79 unsigned long long cpuidle_cur_cpu_lpi_us;
80 unsigned long long cpuidle_cur_sys_lpi_us;
81 unsigned int gfx_cur_mhz;
82 unsigned int tcc_activation_temp;
83 unsigned int tcc_activation_temp_override;
84 double rapl_power_units, rapl_time_units;
85 double rapl_dram_energy_units, rapl_energy_units;
86 double rapl_joule_counter_range;
87 unsigned int do_core_perf_limit_reasons;
88 unsigned int has_automatic_cstate_conversion;
89 unsigned int do_gfx_perf_limit_reasons;
90 unsigned int do_ring_perf_limit_reasons;
91 unsigned int crystal_hz;
92 unsigned long long tsc_hz;
93 int base_cpu;
94 double discover_bclk(unsigned int family, unsigned int model);
95 unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
96 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
97 unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */
98 unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */
99 unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
100 unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
101 unsigned int has_misc_feature_control;
102 unsigned int first_counter_read = 1;
103 int ignore_stdin;
105 #define RAPL_PKG (1 << 0)
106 /* 0x610 MSR_PKG_POWER_LIMIT */
107 /* 0x611 MSR_PKG_ENERGY_STATUS */
108 #define RAPL_PKG_PERF_STATUS (1 << 1)
109 /* 0x613 MSR_PKG_PERF_STATUS */
110 #define RAPL_PKG_POWER_INFO (1 << 2)
111 /* 0x614 MSR_PKG_POWER_INFO */
113 #define RAPL_DRAM (1 << 3)
114 /* 0x618 MSR_DRAM_POWER_LIMIT */
115 /* 0x619 MSR_DRAM_ENERGY_STATUS */
116 #define RAPL_DRAM_PERF_STATUS (1 << 4)
117 /* 0x61b MSR_DRAM_PERF_STATUS */
118 #define RAPL_DRAM_POWER_INFO (1 << 5)
119 /* 0x61c MSR_DRAM_POWER_INFO */
121 #define RAPL_CORES_POWER_LIMIT (1 << 6)
122 /* 0x638 MSR_PP0_POWER_LIMIT */
123 #define RAPL_CORE_POLICY (1 << 7)
124 /* 0x63a MSR_PP0_POLICY */
126 #define RAPL_GFX (1 << 8)
127 /* 0x640 MSR_PP1_POWER_LIMIT */
128 /* 0x641 MSR_PP1_ENERGY_STATUS */
129 /* 0x642 MSR_PP1_POLICY */
131 #define RAPL_CORES_ENERGY_STATUS (1 << 9)
132 /* 0x639 MSR_PP0_ENERGY_STATUS */
133 #define RAPL_PER_CORE_ENERGY (1 << 10)
134 /* Indicates cores energy collection is per-core,
135 * not per-package. */
136 #define RAPL_AMD_F17H (1 << 11)
137 /* 0xc0010299 MSR_RAPL_PWR_UNIT */
138 /* 0xc001029a MSR_CORE_ENERGY_STAT */
139 /* 0xc001029b MSR_PKG_ENERGY_STAT */
140 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
141 #define TJMAX_DEFAULT 100
143 /* MSRs that are not yet in the kernel-provided header. */
144 #define MSR_RAPL_PWR_UNIT 0xc0010299
145 #define MSR_CORE_ENERGY_STAT 0xc001029a
146 #define MSR_PKG_ENERGY_STAT 0xc001029b
148 #define MAX(a, b) ((a) > (b) ? (a) : (b))
151 * buffer size used by sscanf() for added column names
152 * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters
154 #define NAME_BYTES 20
155 #define PATH_BYTES 128
157 int backwards_count;
158 char *progname;
160 #define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */
161 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset;
162 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
163 #define MAX_ADDED_COUNTERS 8
164 #define MAX_ADDED_THREAD_COUNTERS 24
165 #define BITMASK_SIZE 32
167 struct thread_data {
168 struct timeval tv_begin;
169 struct timeval tv_end;
170 struct timeval tv_delta;
171 unsigned long long tsc;
172 unsigned long long aperf;
173 unsigned long long mperf;
174 unsigned long long c1;
175 unsigned long long irq_count;
176 unsigned int smi_count;
177 unsigned int cpu_id;
178 unsigned int apic_id;
179 unsigned int x2apic_id;
180 unsigned int flags;
181 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
182 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
183 unsigned long long counter[MAX_ADDED_THREAD_COUNTERS];
184 } *thread_even, *thread_odd;
186 struct core_data {
187 unsigned long long c3;
188 unsigned long long c6;
189 unsigned long long c7;
190 unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */
191 unsigned int core_temp_c;
192 unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */
193 unsigned int core_id;
194 unsigned long long counter[MAX_ADDED_COUNTERS];
195 } *core_even, *core_odd;
197 struct pkg_data {
198 unsigned long long pc2;
199 unsigned long long pc3;
200 unsigned long long pc6;
201 unsigned long long pc7;
202 unsigned long long pc8;
203 unsigned long long pc9;
204 unsigned long long pc10;
205 unsigned long long cpu_lpi;
206 unsigned long long sys_lpi;
207 unsigned long long pkg_wtd_core_c0;
208 unsigned long long pkg_any_core_c0;
209 unsigned long long pkg_any_gfxe_c0;
210 unsigned long long pkg_both_core_gfxe_c0;
211 long long gfx_rc6_ms;
212 unsigned int gfx_mhz;
213 unsigned int package_id;
214 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
215 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
216 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
217 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
218 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
219 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
220 unsigned int pkg_temp_c;
221 unsigned long long counter[MAX_ADDED_COUNTERS];
222 } *package_even, *package_odd;
224 #define ODD_COUNTERS thread_odd, core_odd, package_odd
225 #define EVEN_COUNTERS thread_even, core_even, package_even
227 #define GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no) \
228 ((thread_base) + \
229 ((pkg_no) * \
230 topo.nodes_per_pkg * topo.cores_per_node * topo.threads_per_core) + \
231 ((node_no) * topo.cores_per_node * topo.threads_per_core) + \
232 ((core_no) * topo.threads_per_core) + \
233 (thread_no))
235 #define GET_CORE(core_base, core_no, node_no, pkg_no) \
236 ((core_base) + \
237 ((pkg_no) * topo.nodes_per_pkg * topo.cores_per_node) + \
238 ((node_no) * topo.cores_per_node) + \
239 (core_no))
242 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
244 enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE};
245 enum counter_type {COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC};
246 enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT};
248 struct msr_counter {
249 unsigned int msr_num;
250 char name[NAME_BYTES];
251 char path[PATH_BYTES];
252 unsigned int width;
253 enum counter_type type;
254 enum counter_format format;
255 struct msr_counter *next;
256 unsigned int flags;
257 #define FLAGS_HIDE (1 << 0)
258 #define FLAGS_SHOW (1 << 1)
259 #define SYSFS_PERCPU (1 << 1)
262 struct sys_counters {
263 unsigned int added_thread_counters;
264 unsigned int added_core_counters;
265 unsigned int added_package_counters;
266 struct msr_counter *tp;
267 struct msr_counter *cp;
268 struct msr_counter *pp;
269 } sys;
271 struct system_summary {
272 struct thread_data threads;
273 struct core_data cores;
274 struct pkg_data packages;
275 } average;
277 struct cpu_topology {
278 int physical_package_id;
279 int die_id;
280 int logical_cpu_id;
281 int physical_node_id;
282 int logical_node_id; /* 0-based count within the package */
283 int physical_core_id;
284 int thread_id;
285 cpu_set_t *put_ids; /* Processing Unit/Thread IDs */
286 } *cpus;
288 struct topo_params {
289 int num_packages;
290 int num_die;
291 int num_cpus;
292 int num_cores;
293 int max_cpu_num;
294 int max_node_num;
295 int nodes_per_pkg;
296 int cores_per_node;
297 int threads_per_core;
298 } topo;
300 struct timeval tv_even, tv_odd, tv_delta;
302 int *irq_column_2_cpu; /* /proc/interrupts column numbers */
303 int *irqs_per_cpu; /* indexed by cpu_num */
305 void setup_all_buffers(void);
307 int cpu_is_not_present(int cpu)
309 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
312 * run func(thread, core, package) in topology order
313 * skip non-present cpus
316 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
317 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
319 int retval, pkg_no, core_no, thread_no, node_no;
321 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
322 for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
323 for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
324 for (thread_no = 0; thread_no <
325 topo.threads_per_core; ++thread_no) {
326 struct thread_data *t;
327 struct core_data *c;
328 struct pkg_data *p;
330 t = GET_THREAD(thread_base, thread_no,
331 core_no, node_no,
332 pkg_no);
334 if (cpu_is_not_present(t->cpu_id))
335 continue;
337 c = GET_CORE(core_base, core_no,
338 node_no, pkg_no);
339 p = GET_PKG(pkg_base, pkg_no);
341 retval = func(t, c, p);
342 if (retval)
343 return retval;
348 return 0;
351 int cpu_migrate(int cpu)
353 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
354 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
355 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
356 return -1;
357 else
358 return 0;
360 int get_msr_fd(int cpu)
362 char pathname[32];
363 int fd;
365 fd = fd_percpu[cpu];
367 if (fd)
368 return fd;
370 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
371 fd = open(pathname, O_RDONLY);
372 if (fd < 0)
373 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
375 fd_percpu[cpu] = fd;
377 return fd;
380 int get_msr(int cpu, off_t offset, unsigned long long *msr)
382 ssize_t retval;
384 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset);
386 if (retval != sizeof *msr)
387 err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset);
389 return 0;
393 * This list matches the column headers, except
394 * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
395 * 2. Core and CPU are moved to the end, we can't have strings that contain them
396 * matching on them for --show and --hide.
398 struct msr_counter bic[] = {
399 { 0x0, "usec" },
400 { 0x0, "Time_Of_Day_Seconds" },
401 { 0x0, "Package" },
402 { 0x0, "Node" },
403 { 0x0, "Avg_MHz" },
404 { 0x0, "Busy%" },
405 { 0x0, "Bzy_MHz" },
406 { 0x0, "TSC_MHz" },
407 { 0x0, "IRQ" },
408 { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
409 { 0x0, "sysfs" },
410 { 0x0, "CPU%c1" },
411 { 0x0, "CPU%c3" },
412 { 0x0, "CPU%c6" },
413 { 0x0, "CPU%c7" },
414 { 0x0, "ThreadC" },
415 { 0x0, "CoreTmp" },
416 { 0x0, "CoreCnt" },
417 { 0x0, "PkgTmp" },
418 { 0x0, "GFX%rc6" },
419 { 0x0, "GFXMHz" },
420 { 0x0, "Pkg%pc2" },
421 { 0x0, "Pkg%pc3" },
422 { 0x0, "Pkg%pc6" },
423 { 0x0, "Pkg%pc7" },
424 { 0x0, "Pkg%pc8" },
425 { 0x0, "Pkg%pc9" },
426 { 0x0, "Pk%pc10" },
427 { 0x0, "CPU%LPI" },
428 { 0x0, "SYS%LPI" },
429 { 0x0, "PkgWatt" },
430 { 0x0, "CorWatt" },
431 { 0x0, "GFXWatt" },
432 { 0x0, "PkgCnt" },
433 { 0x0, "RAMWatt" },
434 { 0x0, "PKG_%" },
435 { 0x0, "RAM_%" },
436 { 0x0, "Pkg_J" },
437 { 0x0, "Cor_J" },
438 { 0x0, "GFX_J" },
439 { 0x0, "RAM_J" },
440 { 0x0, "Mod%c6" },
441 { 0x0, "Totl%C0" },
442 { 0x0, "Any%C0" },
443 { 0x0, "GFX%C0" },
444 { 0x0, "CPUGFX%" },
445 { 0x0, "Core" },
446 { 0x0, "CPU" },
447 { 0x0, "APIC" },
448 { 0x0, "X2APIC" },
449 { 0x0, "Die" },
452 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
453 #define BIC_USEC (1ULL << 0)
454 #define BIC_TOD (1ULL << 1)
455 #define BIC_Package (1ULL << 2)
456 #define BIC_Node (1ULL << 3)
457 #define BIC_Avg_MHz (1ULL << 4)
458 #define BIC_Busy (1ULL << 5)
459 #define BIC_Bzy_MHz (1ULL << 6)
460 #define BIC_TSC_MHz (1ULL << 7)
461 #define BIC_IRQ (1ULL << 8)
462 #define BIC_SMI (1ULL << 9)
463 #define BIC_sysfs (1ULL << 10)
464 #define BIC_CPU_c1 (1ULL << 11)
465 #define BIC_CPU_c3 (1ULL << 12)
466 #define BIC_CPU_c6 (1ULL << 13)
467 #define BIC_CPU_c7 (1ULL << 14)
468 #define BIC_ThreadC (1ULL << 15)
469 #define BIC_CoreTmp (1ULL << 16)
470 #define BIC_CoreCnt (1ULL << 17)
471 #define BIC_PkgTmp (1ULL << 18)
472 #define BIC_GFX_rc6 (1ULL << 19)
473 #define BIC_GFXMHz (1ULL << 20)
474 #define BIC_Pkgpc2 (1ULL << 21)
475 #define BIC_Pkgpc3 (1ULL << 22)
476 #define BIC_Pkgpc6 (1ULL << 23)
477 #define BIC_Pkgpc7 (1ULL << 24)
478 #define BIC_Pkgpc8 (1ULL << 25)
479 #define BIC_Pkgpc9 (1ULL << 26)
480 #define BIC_Pkgpc10 (1ULL << 27)
481 #define BIC_CPU_LPI (1ULL << 28)
482 #define BIC_SYS_LPI (1ULL << 29)
483 #define BIC_PkgWatt (1ULL << 30)
484 #define BIC_CorWatt (1ULL << 31)
485 #define BIC_GFXWatt (1ULL << 32)
486 #define BIC_PkgCnt (1ULL << 33)
487 #define BIC_RAMWatt (1ULL << 34)
488 #define BIC_PKG__ (1ULL << 35)
489 #define BIC_RAM__ (1ULL << 36)
490 #define BIC_Pkg_J (1ULL << 37)
491 #define BIC_Cor_J (1ULL << 38)
492 #define BIC_GFX_J (1ULL << 39)
493 #define BIC_RAM_J (1ULL << 40)
494 #define BIC_Mod_c6 (1ULL << 41)
495 #define BIC_Totl_c0 (1ULL << 42)
496 #define BIC_Any_c0 (1ULL << 43)
497 #define BIC_GFX_c0 (1ULL << 44)
498 #define BIC_CPUGFX (1ULL << 45)
499 #define BIC_Core (1ULL << 46)
500 #define BIC_CPU (1ULL << 47)
501 #define BIC_APIC (1ULL << 48)
502 #define BIC_X2APIC (1ULL << 49)
503 #define BIC_Die (1ULL << 50)
505 #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
507 unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
508 unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
510 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
511 #define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
512 #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
513 #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
514 #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
517 #define MAX_DEFERRED 16
518 char *deferred_skip_names[MAX_DEFERRED];
519 int deferred_skip_index;
522 * HIDE_LIST - hide this list of counters, show the rest [default]
523 * SHOW_LIST - show this list of counters, hide the rest
525 enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST;
527 void help(void)
529 fprintf(outf,
530 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
531 "\n"
532 "Turbostat forks the specified COMMAND and prints statistics\n"
533 "when COMMAND completes.\n"
534 "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
535 "to print statistics, until interrupted.\n"
536 " -a, --add add a counter\n"
537 " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
538 " -c, --cpu cpu-set limit output to summary plus cpu-set:\n"
539 " {core | package | j,k,l..m,n-p }\n"
540 " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n"
541 " -D, --Dump displays the raw counter values\n"
542 " -e, --enable [all | column]\n"
543 " shows all or the specified disabled column\n"
544 " -H, --hide [column|column,column,...]\n"
545 " hide the specified column(s)\n"
546 " -i, --interval sec.subsec\n"
547 " Override default 5-second measurement interval\n"
548 " -J, --Joules displays energy in Joules instead of Watts\n"
549 " -l, --list list column headers only\n"
550 " -n, --num_iterations num\n"
551 " number of the measurement iterations\n"
552 " -o, --out file\n"
553 " create or truncate \"file\" for all output\n"
554 " -q, --quiet skip decoding system configuration header\n"
555 " -s, --show [column|column,column,...]\n"
556 " show only the specified column(s)\n"
557 " -S, --Summary\n"
558 " limits output to 1-line system summary per interval\n"
559 " -T, --TCC temperature\n"
560 " sets the Thermal Control Circuit temperature in\n"
561 " degrees Celsius\n"
562 " -h, --help print this help message\n"
563 " -v, --version print version information\n"
564 "\n"
565 "For more help, run \"man turbostat\"\n");
569 * bic_lookup
570 * for all the strings in comma separate name_list,
571 * set the approprate bit in return value.
573 unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
575 int i;
576 unsigned long long retval = 0;
578 while (name_list) {
579 char *comma;
581 comma = strchr(name_list, ',');
583 if (comma)
584 *comma = '\0';
586 if (!strcmp(name_list, "all"))
587 return ~0;
589 for (i = 0; i < MAX_BIC; ++i) {
590 if (!strcmp(name_list, bic[i].name)) {
591 retval |= (1ULL << i);
592 break;
595 if (i == MAX_BIC) {
596 if (mode == SHOW_LIST) {
597 fprintf(stderr, "Invalid counter name: %s\n", name_list);
598 exit(-1);
600 deferred_skip_names[deferred_skip_index++] = name_list;
601 if (debug)
602 fprintf(stderr, "deferred \"%s\"\n", name_list);
603 if (deferred_skip_index >= MAX_DEFERRED) {
604 fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n",
605 MAX_DEFERRED, name_list);
606 help();
607 exit(1);
611 name_list = comma;
612 if (name_list)
613 name_list++;
616 return retval;
620 void print_header(char *delim)
622 struct msr_counter *mp;
623 int printed = 0;
625 if (DO_BIC(BIC_USEC))
626 outp += sprintf(outp, "%susec", (printed++ ? delim : ""));
627 if (DO_BIC(BIC_TOD))
628 outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
629 if (DO_BIC(BIC_Package))
630 outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
631 if (DO_BIC(BIC_Die))
632 outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
633 if (DO_BIC(BIC_Node))
634 outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
635 if (DO_BIC(BIC_Core))
636 outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
637 if (DO_BIC(BIC_CPU))
638 outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
639 if (DO_BIC(BIC_APIC))
640 outp += sprintf(outp, "%sAPIC", (printed++ ? delim : ""));
641 if (DO_BIC(BIC_X2APIC))
642 outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : ""));
643 if (DO_BIC(BIC_Avg_MHz))
644 outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
645 if (DO_BIC(BIC_Busy))
646 outp += sprintf(outp, "%sBusy%%", (printed++ ? delim : ""));
647 if (DO_BIC(BIC_Bzy_MHz))
648 outp += sprintf(outp, "%sBzy_MHz", (printed++ ? delim : ""));
649 if (DO_BIC(BIC_TSC_MHz))
650 outp += sprintf(outp, "%sTSC_MHz", (printed++ ? delim : ""));
652 if (DO_BIC(BIC_IRQ)) {
653 if (sums_need_wide_columns)
654 outp += sprintf(outp, "%s IRQ", (printed++ ? delim : ""));
655 else
656 outp += sprintf(outp, "%sIRQ", (printed++ ? delim : ""));
659 if (DO_BIC(BIC_SMI))
660 outp += sprintf(outp, "%sSMI", (printed++ ? delim : ""));
662 for (mp = sys.tp; mp; mp = mp->next) {
664 if (mp->format == FORMAT_RAW) {
665 if (mp->width == 64)
666 outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name);
667 else
668 outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), mp->name);
669 } else {
670 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
671 outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), mp->name);
672 else
673 outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), mp->name);
677 if (DO_BIC(BIC_CPU_c1))
678 outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
679 if (DO_BIC(BIC_CPU_c3))
680 outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
681 if (DO_BIC(BIC_CPU_c6))
682 outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
683 if (DO_BIC(BIC_CPU_c7))
684 outp += sprintf(outp, "%sCPU%%c7", (printed++ ? delim : ""));
686 if (DO_BIC(BIC_Mod_c6))
687 outp += sprintf(outp, "%sMod%%c6", (printed++ ? delim : ""));
689 if (DO_BIC(BIC_CoreTmp))
690 outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
692 if (do_rapl && !rapl_joules) {
693 if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
694 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
695 } else if (do_rapl && rapl_joules) {
696 if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
697 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
700 for (mp = sys.cp; mp; mp = mp->next) {
701 if (mp->format == FORMAT_RAW) {
702 if (mp->width == 64)
703 outp += sprintf(outp, "%s%18.18s", delim, mp->name);
704 else
705 outp += sprintf(outp, "%s%10.10s", delim, mp->name);
706 } else {
707 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
708 outp += sprintf(outp, "%s%8s", delim, mp->name);
709 else
710 outp += sprintf(outp, "%s%s", delim, mp->name);
714 if (DO_BIC(BIC_PkgTmp))
715 outp += sprintf(outp, "%sPkgTmp", (printed++ ? delim : ""));
717 if (DO_BIC(BIC_GFX_rc6))
718 outp += sprintf(outp, "%sGFX%%rc6", (printed++ ? delim : ""));
720 if (DO_BIC(BIC_GFXMHz))
721 outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : ""));
723 if (DO_BIC(BIC_Totl_c0))
724 outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : ""));
725 if (DO_BIC(BIC_Any_c0))
726 outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : ""));
727 if (DO_BIC(BIC_GFX_c0))
728 outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : ""));
729 if (DO_BIC(BIC_CPUGFX))
730 outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : ""));
732 if (DO_BIC(BIC_Pkgpc2))
733 outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : ""));
734 if (DO_BIC(BIC_Pkgpc3))
735 outp += sprintf(outp, "%sPkg%%pc3", (printed++ ? delim : ""));
736 if (DO_BIC(BIC_Pkgpc6))
737 outp += sprintf(outp, "%sPkg%%pc6", (printed++ ? delim : ""));
738 if (DO_BIC(BIC_Pkgpc7))
739 outp += sprintf(outp, "%sPkg%%pc7", (printed++ ? delim : ""));
740 if (DO_BIC(BIC_Pkgpc8))
741 outp += sprintf(outp, "%sPkg%%pc8", (printed++ ? delim : ""));
742 if (DO_BIC(BIC_Pkgpc9))
743 outp += sprintf(outp, "%sPkg%%pc9", (printed++ ? delim : ""));
744 if (DO_BIC(BIC_Pkgpc10))
745 outp += sprintf(outp, "%sPk%%pc10", (printed++ ? delim : ""));
746 if (DO_BIC(BIC_CPU_LPI))
747 outp += sprintf(outp, "%sCPU%%LPI", (printed++ ? delim : ""));
748 if (DO_BIC(BIC_SYS_LPI))
749 outp += sprintf(outp, "%sSYS%%LPI", (printed++ ? delim : ""));
751 if (do_rapl && !rapl_joules) {
752 if (DO_BIC(BIC_PkgWatt))
753 outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
754 if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
755 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
756 if (DO_BIC(BIC_GFXWatt))
757 outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
758 if (DO_BIC(BIC_RAMWatt))
759 outp += sprintf(outp, "%sRAMWatt", (printed++ ? delim : ""));
760 if (DO_BIC(BIC_PKG__))
761 outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : ""));
762 if (DO_BIC(BIC_RAM__))
763 outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : ""));
764 } else if (do_rapl && rapl_joules) {
765 if (DO_BIC(BIC_Pkg_J))
766 outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
767 if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
768 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
769 if (DO_BIC(BIC_GFX_J))
770 outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
771 if (DO_BIC(BIC_RAM_J))
772 outp += sprintf(outp, "%sRAM_J", (printed++ ? delim : ""));
773 if (DO_BIC(BIC_PKG__))
774 outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : ""));
775 if (DO_BIC(BIC_RAM__))
776 outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : ""));
778 for (mp = sys.pp; mp; mp = mp->next) {
779 if (mp->format == FORMAT_RAW) {
780 if (mp->width == 64)
781 outp += sprintf(outp, "%s%18.18s", delim, mp->name);
782 else
783 outp += sprintf(outp, "%s%10.10s", delim, mp->name);
784 } else {
785 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
786 outp += sprintf(outp, "%s%8s", delim, mp->name);
787 else
788 outp += sprintf(outp, "%s%s", delim, mp->name);
792 outp += sprintf(outp, "\n");
795 int dump_counters(struct thread_data *t, struct core_data *c,
796 struct pkg_data *p)
798 int i;
799 struct msr_counter *mp;
801 outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
803 if (t) {
804 outp += sprintf(outp, "CPU: %d flags 0x%x\n",
805 t->cpu_id, t->flags);
806 outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
807 outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
808 outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
809 outp += sprintf(outp, "c1: %016llX\n", t->c1);
811 if (DO_BIC(BIC_IRQ))
812 outp += sprintf(outp, "IRQ: %lld\n", t->irq_count);
813 if (DO_BIC(BIC_SMI))
814 outp += sprintf(outp, "SMI: %d\n", t->smi_count);
816 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
817 outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n",
818 i, mp->msr_num, t->counter[i]);
822 if (c) {
823 outp += sprintf(outp, "core: %d\n", c->core_id);
824 outp += sprintf(outp, "c3: %016llX\n", c->c3);
825 outp += sprintf(outp, "c6: %016llX\n", c->c6);
826 outp += sprintf(outp, "c7: %016llX\n", c->c7);
827 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
828 outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
830 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
831 outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
832 i, mp->msr_num, c->counter[i]);
834 outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us);
837 if (p) {
838 outp += sprintf(outp, "package: %d\n", p->package_id);
840 outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
841 outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
842 outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
843 outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
845 outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
846 if (DO_BIC(BIC_Pkgpc3))
847 outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
848 if (DO_BIC(BIC_Pkgpc6))
849 outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
850 if (DO_BIC(BIC_Pkgpc7))
851 outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
852 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
853 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
854 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
855 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
856 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
857 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
858 outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
859 outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
860 outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
861 outp += sprintf(outp, "Throttle PKG: %0X\n",
862 p->rapl_pkg_perf_status);
863 outp += sprintf(outp, "Throttle RAM: %0X\n",
864 p->rapl_dram_perf_status);
865 outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
867 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
868 outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n",
869 i, mp->msr_num, p->counter[i]);
873 outp += sprintf(outp, "\n");
875 return 0;
879 * column formatting convention & formats
881 int format_counters(struct thread_data *t, struct core_data *c,
882 struct pkg_data *p)
884 double interval_float, tsc;
885 char *fmt8;
886 int i;
887 struct msr_counter *mp;
888 char *delim = "\t";
889 int printed = 0;
891 /* if showing only 1st thread in core and this isn't one, bail out */
892 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
893 return 0;
895 /* if showing only 1st thread in pkg and this isn't one, bail out */
896 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
897 return 0;
899 /*if not summary line and --cpu is used */
900 if ((t != &average.threads) &&
901 (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset)))
902 return 0;
904 if (DO_BIC(BIC_USEC)) {
905 /* on each row, print how many usec each timestamp took to gather */
906 struct timeval tv;
908 timersub(&t->tv_end, &t->tv_begin, &tv);
909 outp += sprintf(outp, "%5ld\t", tv.tv_sec * 1000000 + tv.tv_usec);
912 /* Time_Of_Day_Seconds: on each row, print sec.usec last timestamp taken */
913 if (DO_BIC(BIC_TOD))
914 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
916 interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
918 tsc = t->tsc * tsc_tweak;
920 /* topo columns, print blanks on 1st (average) line */
921 if (t == &average.threads) {
922 if (DO_BIC(BIC_Package))
923 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
924 if (DO_BIC(BIC_Die))
925 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
926 if (DO_BIC(BIC_Node))
927 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
928 if (DO_BIC(BIC_Core))
929 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
930 if (DO_BIC(BIC_CPU))
931 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
932 if (DO_BIC(BIC_APIC))
933 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
934 if (DO_BIC(BIC_X2APIC))
935 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
936 } else {
937 if (DO_BIC(BIC_Package)) {
938 if (p)
939 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->package_id);
940 else
941 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
943 if (DO_BIC(BIC_Die)) {
944 if (c)
945 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
946 else
947 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
949 if (DO_BIC(BIC_Node)) {
950 if (t)
951 outp += sprintf(outp, "%s%d",
952 (printed++ ? delim : ""),
953 cpus[t->cpu_id].physical_node_id);
954 else
955 outp += sprintf(outp, "%s-",
956 (printed++ ? delim : ""));
958 if (DO_BIC(BIC_Core)) {
959 if (c)
960 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id);
961 else
962 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
964 if (DO_BIC(BIC_CPU))
965 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
966 if (DO_BIC(BIC_APIC))
967 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
968 if (DO_BIC(BIC_X2APIC))
969 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
972 if (DO_BIC(BIC_Avg_MHz))
973 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""),
974 1.0 / units * t->aperf / interval_float);
976 if (DO_BIC(BIC_Busy))
977 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf/tsc);
979 if (DO_BIC(BIC_Bzy_MHz)) {
980 if (has_base_hz)
981 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf);
982 else
983 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""),
984 tsc / units * t->aperf / t->mperf / interval_float);
987 if (DO_BIC(BIC_TSC_MHz))
988 outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc/units/interval_float);
990 /* IRQ */
991 if (DO_BIC(BIC_IRQ)) {
992 if (sums_need_wide_columns)
993 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->irq_count);
994 else
995 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count);
998 /* SMI */
999 if (DO_BIC(BIC_SMI))
1000 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count);
1002 /* Added counters */
1003 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1004 if (mp->format == FORMAT_RAW) {
1005 if (mp->width == 32)
1006 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) t->counter[i]);
1007 else
1008 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]);
1009 } else if (mp->format == FORMAT_DELTA) {
1010 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
1011 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]);
1012 else
1013 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]);
1014 } else if (mp->format == FORMAT_PERCENT) {
1015 if (mp->type == COUNTER_USEC)
1016 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i]/interval_float/10000);
1017 else
1018 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i]/tsc);
1022 /* C1 */
1023 if (DO_BIC(BIC_CPU_c1))
1024 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1/tsc);
1027 /* print per-core data only for 1st thread in core */
1028 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1029 goto done;
1031 if (DO_BIC(BIC_CPU_c3))
1032 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
1033 if (DO_BIC(BIC_CPU_c6))
1034 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
1035 if (DO_BIC(BIC_CPU_c7))
1036 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7/tsc);
1038 /* Mod%c6 */
1039 if (DO_BIC(BIC_Mod_c6))
1040 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc);
1042 if (DO_BIC(BIC_CoreTmp))
1043 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c);
1045 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1046 if (mp->format == FORMAT_RAW) {
1047 if (mp->width == 32)
1048 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) c->counter[i]);
1049 else
1050 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]);
1051 } else if (mp->format == FORMAT_DELTA) {
1052 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
1053 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]);
1054 else
1055 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]);
1056 } else if (mp->format == FORMAT_PERCENT) {
1057 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i]/tsc);
1062 * If measurement interval exceeds minimum RAPL Joule Counter range,
1063 * indicate that results are suspect by printing "**" in fraction place.
1065 if (interval_float < rapl_joule_counter_range)
1066 fmt8 = "%s%.2f";
1067 else
1068 fmt8 = "%6.0f**";
1070 if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
1071 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
1072 if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
1073 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
1075 /* print per-package data only for 1st core in package */
1076 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1077 goto done;
1079 /* PkgTmp */
1080 if (DO_BIC(BIC_PkgTmp))
1081 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->pkg_temp_c);
1083 /* GFXrc6 */
1084 if (DO_BIC(BIC_GFX_rc6)) {
1085 if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */
1086 outp += sprintf(outp, "%s**.**", (printed++ ? delim : ""));
1087 } else {
1088 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
1089 p->gfx_rc6_ms / 10.0 / interval_float);
1093 /* GFXMHz */
1094 if (DO_BIC(BIC_GFXMHz))
1095 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz);
1097 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
1098 if (DO_BIC(BIC_Totl_c0))
1099 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc);
1100 if (DO_BIC(BIC_Any_c0))
1101 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc);
1102 if (DO_BIC(BIC_GFX_c0))
1103 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc);
1104 if (DO_BIC(BIC_CPUGFX))
1105 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc);
1107 if (DO_BIC(BIC_Pkgpc2))
1108 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc);
1109 if (DO_BIC(BIC_Pkgpc3))
1110 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3/tsc);
1111 if (DO_BIC(BIC_Pkgpc6))
1112 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6/tsc);
1113 if (DO_BIC(BIC_Pkgpc7))
1114 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7/tsc);
1115 if (DO_BIC(BIC_Pkgpc8))
1116 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8/tsc);
1117 if (DO_BIC(BIC_Pkgpc9))
1118 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9/tsc);
1119 if (DO_BIC(BIC_Pkgpc10))
1120 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc);
1122 if (DO_BIC(BIC_CPU_LPI))
1123 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float);
1124 if (DO_BIC(BIC_SYS_LPI))
1125 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
1127 if (DO_BIC(BIC_PkgWatt))
1128 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
1129 if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
1130 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
1131 if (DO_BIC(BIC_GFXWatt))
1132 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
1133 if (DO_BIC(BIC_RAMWatt))
1134 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
1135 if (DO_BIC(BIC_Pkg_J))
1136 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
1137 if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
1138 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
1139 if (DO_BIC(BIC_GFX_J))
1140 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
1141 if (DO_BIC(BIC_RAM_J))
1142 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units);
1143 if (DO_BIC(BIC_PKG__))
1144 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
1145 if (DO_BIC(BIC_RAM__))
1146 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
1148 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1149 if (mp->format == FORMAT_RAW) {
1150 if (mp->width == 32)
1151 outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) p->counter[i]);
1152 else
1153 outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]);
1154 } else if (mp->format == FORMAT_DELTA) {
1155 if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
1156 outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]);
1157 else
1158 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]);
1159 } else if (mp->format == FORMAT_PERCENT) {
1160 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i]/tsc);
1164 done:
1165 if (*(outp - 1) != '\n')
1166 outp += sprintf(outp, "\n");
1168 return 0;
1171 void flush_output_stdout(void)
1173 FILE *filep;
1175 if (outf == stderr)
1176 filep = stdout;
1177 else
1178 filep = outf;
1180 fputs(output_buffer, filep);
1181 fflush(filep);
1183 outp = output_buffer;
1185 void flush_output_stderr(void)
1187 fputs(output_buffer, outf);
1188 fflush(outf);
1189 outp = output_buffer;
1191 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1193 static int printed;
1195 if (!printed || !summary_only)
1196 print_header("\t");
1198 format_counters(&average.threads, &average.cores, &average.packages);
1200 printed = 1;
1202 if (summary_only)
1203 return;
1205 for_all_cpus(format_counters, t, c, p);
1208 #define DELTA_WRAP32(new, old) \
1209 if (new > old) { \
1210 old = new - old; \
1211 } else { \
1212 old = 0x100000000 + new - old; \
1216 delta_package(struct pkg_data *new, struct pkg_data *old)
1218 int i;
1219 struct msr_counter *mp;
1222 if (DO_BIC(BIC_Totl_c0))
1223 old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
1224 if (DO_BIC(BIC_Any_c0))
1225 old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
1226 if (DO_BIC(BIC_GFX_c0))
1227 old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
1228 if (DO_BIC(BIC_CPUGFX))
1229 old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
1231 old->pc2 = new->pc2 - old->pc2;
1232 if (DO_BIC(BIC_Pkgpc3))
1233 old->pc3 = new->pc3 - old->pc3;
1234 if (DO_BIC(BIC_Pkgpc6))
1235 old->pc6 = new->pc6 - old->pc6;
1236 if (DO_BIC(BIC_Pkgpc7))
1237 old->pc7 = new->pc7 - old->pc7;
1238 old->pc8 = new->pc8 - old->pc8;
1239 old->pc9 = new->pc9 - old->pc9;
1240 old->pc10 = new->pc10 - old->pc10;
1241 old->cpu_lpi = new->cpu_lpi - old->cpu_lpi;
1242 old->sys_lpi = new->sys_lpi - old->sys_lpi;
1243 old->pkg_temp_c = new->pkg_temp_c;
1245 /* flag an error when rc6 counter resets/wraps */
1246 if (old->gfx_rc6_ms > new->gfx_rc6_ms)
1247 old->gfx_rc6_ms = -1;
1248 else
1249 old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
1251 old->gfx_mhz = new->gfx_mhz;
1253 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
1254 DELTA_WRAP32(new->energy_cores, old->energy_cores);
1255 DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
1256 DELTA_WRAP32(new->energy_dram, old->energy_dram);
1257 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
1258 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
1260 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1261 if (mp->format == FORMAT_RAW)
1262 old->counter[i] = new->counter[i];
1263 else
1264 old->counter[i] = new->counter[i] - old->counter[i];
1267 return 0;
1270 void
1271 delta_core(struct core_data *new, struct core_data *old)
1273 int i;
1274 struct msr_counter *mp;
1276 old->c3 = new->c3 - old->c3;
1277 old->c6 = new->c6 - old->c6;
1278 old->c7 = new->c7 - old->c7;
1279 old->core_temp_c = new->core_temp_c;
1280 old->mc6_us = new->mc6_us - old->mc6_us;
1282 DELTA_WRAP32(new->core_energy, old->core_energy);
1284 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1285 if (mp->format == FORMAT_RAW)
1286 old->counter[i] = new->counter[i];
1287 else
1288 old->counter[i] = new->counter[i] - old->counter[i];
1292 int soft_c1_residency_display(int bic)
1294 if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
1295 return 0;
1297 return DO_BIC_READ(bic);
1301 * old = new - old
1304 delta_thread(struct thread_data *new, struct thread_data *old,
1305 struct core_data *core_delta)
1307 int i;
1308 struct msr_counter *mp;
1310 /* we run cpuid just the 1st time, copy the results */
1311 if (DO_BIC(BIC_APIC))
1312 new->apic_id = old->apic_id;
1313 if (DO_BIC(BIC_X2APIC))
1314 new->x2apic_id = old->x2apic_id;
1317 * the timestamps from start of measurement interval are in "old"
1318 * the timestamp from end of measurement interval are in "new"
1319 * over-write old w/ new so we can print end of interval values
1322 timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta);
1323 old->tv_begin = new->tv_begin;
1324 old->tv_end = new->tv_end;
1326 old->tsc = new->tsc - old->tsc;
1328 /* check for TSC < 1 Mcycles over interval */
1329 if (old->tsc < (1000 * 1000))
1330 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
1331 "You can disable all c-states by booting with \"idle=poll\"\n"
1332 "or just the deep ones with \"processor.max_cstate=1\"");
1334 old->c1 = new->c1 - old->c1;
1336 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1337 soft_c1_residency_display(BIC_Avg_MHz)) {
1338 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
1339 old->aperf = new->aperf - old->aperf;
1340 old->mperf = new->mperf - old->mperf;
1341 } else {
1342 return -1;
1347 if (use_c1_residency_msr) {
1349 * Some models have a dedicated C1 residency MSR,
1350 * which should be more accurate than the derivation below.
1352 } else {
1354 * As counter collection is not atomic,
1355 * it is possible for mperf's non-halted cycles + idle states
1356 * to exceed TSC's all cycles: show c1 = 0% in that case.
1358 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
1359 old->c1 = 0;
1360 else {
1361 /* normal case, derive c1 */
1362 old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3
1363 - core_delta->c6 - core_delta->c7;
1367 if (old->mperf == 0) {
1368 if (debug > 1)
1369 fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id);
1370 old->mperf = 1; /* divide by 0 protection */
1373 if (DO_BIC(BIC_IRQ))
1374 old->irq_count = new->irq_count - old->irq_count;
1376 if (DO_BIC(BIC_SMI))
1377 old->smi_count = new->smi_count - old->smi_count;
1379 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1380 if (mp->format == FORMAT_RAW)
1381 old->counter[i] = new->counter[i];
1382 else
1383 old->counter[i] = new->counter[i] - old->counter[i];
1385 return 0;
1388 int delta_cpu(struct thread_data *t, struct core_data *c,
1389 struct pkg_data *p, struct thread_data *t2,
1390 struct core_data *c2, struct pkg_data *p2)
1392 int retval = 0;
1394 /* calculate core delta only for 1st thread in core */
1395 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
1396 delta_core(c, c2);
1398 /* always calculate thread delta */
1399 retval = delta_thread(t, t2, c2); /* c2 is core delta */
1400 if (retval)
1401 return retval;
1403 /* calculate package delta only for 1st core in package */
1404 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
1405 retval = delta_package(p, p2);
1407 return retval;
1410 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1412 int i;
1413 struct msr_counter *mp;
1415 t->tv_begin.tv_sec = 0;
1416 t->tv_begin.tv_usec = 0;
1417 t->tv_end.tv_sec = 0;
1418 t->tv_end.tv_usec = 0;
1419 t->tv_delta.tv_sec = 0;
1420 t->tv_delta.tv_usec = 0;
1422 t->tsc = 0;
1423 t->aperf = 0;
1424 t->mperf = 0;
1425 t->c1 = 0;
1427 t->irq_count = 0;
1428 t->smi_count = 0;
1430 /* tells format_counters to dump all fields from this set */
1431 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
1433 c->c3 = 0;
1434 c->c6 = 0;
1435 c->c7 = 0;
1436 c->mc6_us = 0;
1437 c->core_temp_c = 0;
1438 c->core_energy = 0;
1440 p->pkg_wtd_core_c0 = 0;
1441 p->pkg_any_core_c0 = 0;
1442 p->pkg_any_gfxe_c0 = 0;
1443 p->pkg_both_core_gfxe_c0 = 0;
1445 p->pc2 = 0;
1446 if (DO_BIC(BIC_Pkgpc3))
1447 p->pc3 = 0;
1448 if (DO_BIC(BIC_Pkgpc6))
1449 p->pc6 = 0;
1450 if (DO_BIC(BIC_Pkgpc7))
1451 p->pc7 = 0;
1452 p->pc8 = 0;
1453 p->pc9 = 0;
1454 p->pc10 = 0;
1455 p->cpu_lpi = 0;
1456 p->sys_lpi = 0;
1458 p->energy_pkg = 0;
1459 p->energy_dram = 0;
1460 p->energy_cores = 0;
1461 p->energy_gfx = 0;
1462 p->rapl_pkg_perf_status = 0;
1463 p->rapl_dram_perf_status = 0;
1464 p->pkg_temp_c = 0;
1466 p->gfx_rc6_ms = 0;
1467 p->gfx_mhz = 0;
1468 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next)
1469 t->counter[i] = 0;
1471 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next)
1472 c->counter[i] = 0;
1474 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next)
1475 p->counter[i] = 0;
1477 int sum_counters(struct thread_data *t, struct core_data *c,
1478 struct pkg_data *p)
1480 int i;
1481 struct msr_counter *mp;
1483 /* copy un-changing apic_id's */
1484 if (DO_BIC(BIC_APIC))
1485 average.threads.apic_id = t->apic_id;
1486 if (DO_BIC(BIC_X2APIC))
1487 average.threads.x2apic_id = t->x2apic_id;
1489 /* remember first tv_begin */
1490 if (average.threads.tv_begin.tv_sec == 0)
1491 average.threads.tv_begin = t->tv_begin;
1493 /* remember last tv_end */
1494 average.threads.tv_end = t->tv_end;
1496 average.threads.tsc += t->tsc;
1497 average.threads.aperf += t->aperf;
1498 average.threads.mperf += t->mperf;
1499 average.threads.c1 += t->c1;
1501 average.threads.irq_count += t->irq_count;
1502 average.threads.smi_count += t->smi_count;
1504 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1505 if (mp->format == FORMAT_RAW)
1506 continue;
1507 average.threads.counter[i] += t->counter[i];
1510 /* sum per-core values only for 1st thread in core */
1511 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1512 return 0;
1514 average.cores.c3 += c->c3;
1515 average.cores.c6 += c->c6;
1516 average.cores.c7 += c->c7;
1517 average.cores.mc6_us += c->mc6_us;
1519 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
1521 average.cores.core_energy += c->core_energy;
1523 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1524 if (mp->format == FORMAT_RAW)
1525 continue;
1526 average.cores.counter[i] += c->counter[i];
1529 /* sum per-pkg values only for 1st core in pkg */
1530 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1531 return 0;
1533 if (DO_BIC(BIC_Totl_c0))
1534 average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
1535 if (DO_BIC(BIC_Any_c0))
1536 average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
1537 if (DO_BIC(BIC_GFX_c0))
1538 average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
1539 if (DO_BIC(BIC_CPUGFX))
1540 average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
1542 average.packages.pc2 += p->pc2;
1543 if (DO_BIC(BIC_Pkgpc3))
1544 average.packages.pc3 += p->pc3;
1545 if (DO_BIC(BIC_Pkgpc6))
1546 average.packages.pc6 += p->pc6;
1547 if (DO_BIC(BIC_Pkgpc7))
1548 average.packages.pc7 += p->pc7;
1549 average.packages.pc8 += p->pc8;
1550 average.packages.pc9 += p->pc9;
1551 average.packages.pc10 += p->pc10;
1553 average.packages.cpu_lpi = p->cpu_lpi;
1554 average.packages.sys_lpi = p->sys_lpi;
1556 average.packages.energy_pkg += p->energy_pkg;
1557 average.packages.energy_dram += p->energy_dram;
1558 average.packages.energy_cores += p->energy_cores;
1559 average.packages.energy_gfx += p->energy_gfx;
1561 average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
1562 average.packages.gfx_mhz = p->gfx_mhz;
1564 average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
1566 average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
1567 average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
1569 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1570 if (mp->format == FORMAT_RAW)
1571 continue;
1572 average.packages.counter[i] += p->counter[i];
1574 return 0;
1577 * sum the counters for all cpus in the system
1578 * compute the weighted average
1580 void compute_average(struct thread_data *t, struct core_data *c,
1581 struct pkg_data *p)
1583 int i;
1584 struct msr_counter *mp;
1586 clear_counters(&average.threads, &average.cores, &average.packages);
1588 for_all_cpus(sum_counters, t, c, p);
1590 /* Use the global time delta for the average. */
1591 average.threads.tv_delta = tv_delta;
1593 average.threads.tsc /= topo.num_cpus;
1594 average.threads.aperf /= topo.num_cpus;
1595 average.threads.mperf /= topo.num_cpus;
1596 average.threads.c1 /= topo.num_cpus;
1598 if (average.threads.irq_count > 9999999)
1599 sums_need_wide_columns = 1;
1601 average.cores.c3 /= topo.num_cores;
1602 average.cores.c6 /= topo.num_cores;
1603 average.cores.c7 /= topo.num_cores;
1604 average.cores.mc6_us /= topo.num_cores;
1606 if (DO_BIC(BIC_Totl_c0))
1607 average.packages.pkg_wtd_core_c0 /= topo.num_packages;
1608 if (DO_BIC(BIC_Any_c0))
1609 average.packages.pkg_any_core_c0 /= topo.num_packages;
1610 if (DO_BIC(BIC_GFX_c0))
1611 average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
1612 if (DO_BIC(BIC_CPUGFX))
1613 average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
1615 average.packages.pc2 /= topo.num_packages;
1616 if (DO_BIC(BIC_Pkgpc3))
1617 average.packages.pc3 /= topo.num_packages;
1618 if (DO_BIC(BIC_Pkgpc6))
1619 average.packages.pc6 /= topo.num_packages;
1620 if (DO_BIC(BIC_Pkgpc7))
1621 average.packages.pc7 /= topo.num_packages;
1623 average.packages.pc8 /= topo.num_packages;
1624 average.packages.pc9 /= topo.num_packages;
1625 average.packages.pc10 /= topo.num_packages;
1627 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1628 if (mp->format == FORMAT_RAW)
1629 continue;
1630 if (mp->type == COUNTER_ITEMS) {
1631 if (average.threads.counter[i] > 9999999)
1632 sums_need_wide_columns = 1;
1633 continue;
1635 average.threads.counter[i] /= topo.num_cpus;
1637 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1638 if (mp->format == FORMAT_RAW)
1639 continue;
1640 if (mp->type == COUNTER_ITEMS) {
1641 if (average.cores.counter[i] > 9999999)
1642 sums_need_wide_columns = 1;
1644 average.cores.counter[i] /= topo.num_cores;
1646 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
1647 if (mp->format == FORMAT_RAW)
1648 continue;
1649 if (mp->type == COUNTER_ITEMS) {
1650 if (average.packages.counter[i] > 9999999)
1651 sums_need_wide_columns = 1;
1653 average.packages.counter[i] /= topo.num_packages;
1657 static unsigned long long rdtsc(void)
1659 unsigned int low, high;
1661 asm volatile("rdtsc" : "=a" (low), "=d" (high));
1663 return low | ((unsigned long long)high) << 32;
1667 * Open a file, and exit on failure
1669 FILE *fopen_or_die(const char *path, const char *mode)
1671 FILE *filep = fopen(path, mode);
1673 if (!filep)
1674 err(1, "%s: open failed", path);
1675 return filep;
1678 * snapshot_sysfs_counter()
1680 * return snapshot of given counter
1682 unsigned long long snapshot_sysfs_counter(char *path)
1684 FILE *fp;
1685 int retval;
1686 unsigned long long counter;
1688 fp = fopen_or_die(path, "r");
1690 retval = fscanf(fp, "%lld", &counter);
1691 if (retval != 1)
1692 err(1, "snapshot_sysfs_counter(%s)", path);
1694 fclose(fp);
1696 return counter;
1699 int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
1701 if (mp->msr_num != 0) {
1702 if (get_msr(cpu, mp->msr_num, counterp))
1703 return -1;
1704 } else {
1705 char path[128 + PATH_BYTES];
1707 if (mp->flags & SYSFS_PERCPU) {
1708 sprintf(path, "/sys/devices/system/cpu/cpu%d/%s",
1709 cpu, mp->path);
1711 *counterp = snapshot_sysfs_counter(path);
1712 } else {
1713 *counterp = snapshot_sysfs_counter(mp->path);
1717 return 0;
1720 void get_apic_id(struct thread_data *t)
1722 unsigned int eax, ebx, ecx, edx;
1724 if (DO_BIC(BIC_APIC)) {
1725 eax = ebx = ecx = edx = 0;
1726 __cpuid(1, eax, ebx, ecx, edx);
1728 t->apic_id = (ebx >> 24) & 0xff;
1731 if (!DO_BIC(BIC_X2APIC))
1732 return;
1734 if (authentic_amd || hygon_genuine) {
1735 unsigned int topology_extensions;
1737 if (max_extended_level < 0x8000001e)
1738 return;
1740 eax = ebx = ecx = edx = 0;
1741 __cpuid(0x80000001, eax, ebx, ecx, edx);
1742 topology_extensions = ecx & (1 << 22);
1744 if (topology_extensions == 0)
1745 return;
1747 eax = ebx = ecx = edx = 0;
1748 __cpuid(0x8000001e, eax, ebx, ecx, edx);
1750 t->x2apic_id = eax;
1751 return;
1754 if (!genuine_intel)
1755 return;
1757 if (max_level < 0xb)
1758 return;
1760 ecx = 0;
1761 __cpuid(0xb, eax, ebx, ecx, edx);
1762 t->x2apic_id = edx;
1764 if (debug && (t->apic_id != (t->x2apic_id & 0xff)))
1765 fprintf(outf, "cpu%d: BIOS BUG: apic 0x%x x2apic 0x%x\n",
1766 t->cpu_id, t->apic_id, t->x2apic_id);
1770 * get_counters(...)
1771 * migrate to cpu
1772 * acquire and record local counters for that cpu
1774 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1776 int cpu = t->cpu_id;
1777 unsigned long long msr;
1778 int aperf_mperf_retry_count = 0;
1779 struct msr_counter *mp;
1780 int i;
1782 if (cpu_migrate(cpu)) {
1783 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
1784 return -1;
1787 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1789 if (first_counter_read)
1790 get_apic_id(t);
1791 retry:
1792 t->tsc = rdtsc(); /* we are running on local CPU of interest */
1794 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1795 soft_c1_residency_display(BIC_Avg_MHz)) {
1796 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
1799 * The TSC, APERF and MPERF must be read together for
1800 * APERF/MPERF and MPERF/TSC to give accurate results.
1802 * Unfortunately, APERF and MPERF are read by
1803 * individual system call, so delays may occur
1804 * between them. If the time to read them
1805 * varies by a large amount, we re-read them.
1809 * This initial dummy APERF read has been seen to
1810 * reduce jitter in the subsequent reads.
1813 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
1814 return -3;
1816 t->tsc = rdtsc(); /* re-read close to APERF */
1818 tsc_before = t->tsc;
1820 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
1821 return -3;
1823 tsc_between = rdtsc();
1825 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
1826 return -4;
1828 tsc_after = rdtsc();
1830 aperf_time = tsc_between - tsc_before;
1831 mperf_time = tsc_after - tsc_between;
1834 * If the system call latency to read APERF and MPERF
1835 * differ by more than 2x, then try again.
1837 if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) {
1838 aperf_mperf_retry_count++;
1839 if (aperf_mperf_retry_count < 5)
1840 goto retry;
1841 else
1842 warnx("cpu%d jitter %lld %lld",
1843 cpu, aperf_time, mperf_time);
1845 aperf_mperf_retry_count = 0;
1847 t->aperf = t->aperf * aperf_mperf_multiplier;
1848 t->mperf = t->mperf * aperf_mperf_multiplier;
1851 if (DO_BIC(BIC_IRQ))
1852 t->irq_count = irqs_per_cpu[cpu];
1853 if (DO_BIC(BIC_SMI)) {
1854 if (get_msr(cpu, MSR_SMI_COUNT, &msr))
1855 return -5;
1856 t->smi_count = msr & 0xFFFFFFFF;
1858 if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) {
1859 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1860 return -6;
1863 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
1864 if (get_mp(cpu, mp, &t->counter[i]))
1865 return -10;
1868 /* collect core counters only for 1st thread in core */
1869 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1870 goto done;
1872 if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
1873 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1874 return -6;
1877 if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
1878 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1879 return -7;
1880 } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
1881 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1882 return -7;
1885 if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
1886 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1887 return -8;
1889 if (DO_BIC(BIC_Mod_c6))
1890 if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us))
1891 return -8;
1893 if (DO_BIC(BIC_CoreTmp)) {
1894 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1895 return -9;
1896 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1899 if (do_rapl & RAPL_AMD_F17H) {
1900 if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
1901 return -14;
1902 c->core_energy = msr & 0xFFFFFFFF;
1905 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1906 if (get_mp(cpu, mp, &c->counter[i]))
1907 return -10;
1910 /* collect package counters only for 1st core in package */
1911 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1912 goto done;
1914 if (DO_BIC(BIC_Totl_c0)) {
1915 if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1916 return -10;
1918 if (DO_BIC(BIC_Any_c0)) {
1919 if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1920 return -11;
1922 if (DO_BIC(BIC_GFX_c0)) {
1923 if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1924 return -12;
1926 if (DO_BIC(BIC_CPUGFX)) {
1927 if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1928 return -13;
1930 if (DO_BIC(BIC_Pkgpc3))
1931 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1932 return -9;
1933 if (DO_BIC(BIC_Pkgpc6)) {
1934 if (do_slm_cstates) {
1935 if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6))
1936 return -10;
1937 } else {
1938 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1939 return -10;
1943 if (DO_BIC(BIC_Pkgpc2))
1944 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1945 return -11;
1946 if (DO_BIC(BIC_Pkgpc7))
1947 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1948 return -12;
1949 if (DO_BIC(BIC_Pkgpc8))
1950 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1951 return -13;
1952 if (DO_BIC(BIC_Pkgpc9))
1953 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1954 return -13;
1955 if (DO_BIC(BIC_Pkgpc10))
1956 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1957 return -13;
1959 if (DO_BIC(BIC_CPU_LPI))
1960 p->cpu_lpi = cpuidle_cur_cpu_lpi_us;
1961 if (DO_BIC(BIC_SYS_LPI))
1962 p->sys_lpi = cpuidle_cur_sys_lpi_us;
1964 if (do_rapl & RAPL_PKG) {
1965 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1966 return -13;
1967 p->energy_pkg = msr & 0xFFFFFFFF;
1969 if (do_rapl & RAPL_CORES_ENERGY_STATUS) {
1970 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1971 return -14;
1972 p->energy_cores = msr & 0xFFFFFFFF;
1974 if (do_rapl & RAPL_DRAM) {
1975 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1976 return -15;
1977 p->energy_dram = msr & 0xFFFFFFFF;
1979 if (do_rapl & RAPL_GFX) {
1980 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1981 return -16;
1982 p->energy_gfx = msr & 0xFFFFFFFF;
1984 if (do_rapl & RAPL_PKG_PERF_STATUS) {
1985 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1986 return -16;
1987 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1989 if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1990 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1991 return -16;
1992 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1994 if (do_rapl & RAPL_AMD_F17H) {
1995 if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
1996 return -13;
1997 p->energy_pkg = msr & 0xFFFFFFFF;
1999 if (DO_BIC(BIC_PkgTmp)) {
2000 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
2001 return -17;
2002 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
2005 if (DO_BIC(BIC_GFX_rc6))
2006 p->gfx_rc6_ms = gfx_cur_rc6_ms;
2008 if (DO_BIC(BIC_GFXMHz))
2009 p->gfx_mhz = gfx_cur_mhz;
2011 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
2012 if (get_mp(cpu, mp, &p->counter[i]))
2013 return -10;
2015 done:
2016 gettimeofday(&t->tv_end, (struct timezone *)NULL);
2018 return 0;
2022 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
2023 * If you change the values, note they are used both in comparisons
2024 * (>= PCL__7) and to index pkg_cstate_limit_strings[].
2027 #define PCLUKN 0 /* Unknown */
2028 #define PCLRSV 1 /* Reserved */
2029 #define PCL__0 2 /* PC0 */
2030 #define PCL__1 3 /* PC1 */
2031 #define PCL__2 4 /* PC2 */
2032 #define PCL__3 5 /* PC3 */
2033 #define PCL__4 6 /* PC4 */
2034 #define PCL__6 7 /* PC6 */
2035 #define PCL_6N 8 /* PC6 No Retention */
2036 #define PCL_6R 9 /* PC6 Retention */
2037 #define PCL__7 10 /* PC7 */
2038 #define PCL_7S 11 /* PC7 Shrink */
2039 #define PCL__8 12 /* PC8 */
2040 #define PCL__9 13 /* PC9 */
2041 #define PCL_10 14 /* PC10 */
2042 #define PCLUNL 15 /* Unlimited */
2044 int pkg_cstate_limit = PCLUKN;
2045 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
2046 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "pc10", "unlimited"};
2048 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
2049 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
2050 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
2051 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7};
2052 int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
2053 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
2054 int glm_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCL_10, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
2055 int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
2058 static void
2059 calculate_tsc_tweak()
2061 tsc_tweak = base_hz / tsc_hz;
2064 static void
2065 dump_nhm_platform_info(void)
2067 unsigned long long msr;
2068 unsigned int ratio;
2070 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
2072 fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
2074 ratio = (msr >> 40) & 0xFF;
2075 fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n",
2076 ratio, bclk, ratio * bclk);
2078 ratio = (msr >> 8) & 0xFF;
2079 fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n",
2080 ratio, bclk, ratio * bclk);
2082 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
2083 fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
2084 base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
2086 return;
2089 static void
2090 dump_hsw_turbo_ratio_limits(void)
2092 unsigned long long msr;
2093 unsigned int ratio;
2095 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
2097 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
2099 ratio = (msr >> 8) & 0xFF;
2100 if (ratio)
2101 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n",
2102 ratio, bclk, ratio * bclk);
2104 ratio = (msr >> 0) & 0xFF;
2105 if (ratio)
2106 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n",
2107 ratio, bclk, ratio * bclk);
2108 return;
2111 static void
2112 dump_ivt_turbo_ratio_limits(void)
2114 unsigned long long msr;
2115 unsigned int ratio;
2117 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
2119 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
2121 ratio = (msr >> 56) & 0xFF;
2122 if (ratio)
2123 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n",
2124 ratio, bclk, ratio * bclk);
2126 ratio = (msr >> 48) & 0xFF;
2127 if (ratio)
2128 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n",
2129 ratio, bclk, ratio * bclk);
2131 ratio = (msr >> 40) & 0xFF;
2132 if (ratio)
2133 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n",
2134 ratio, bclk, ratio * bclk);
2136 ratio = (msr >> 32) & 0xFF;
2137 if (ratio)
2138 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n",
2139 ratio, bclk, ratio * bclk);
2141 ratio = (msr >> 24) & 0xFF;
2142 if (ratio)
2143 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n",
2144 ratio, bclk, ratio * bclk);
2146 ratio = (msr >> 16) & 0xFF;
2147 if (ratio)
2148 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n",
2149 ratio, bclk, ratio * bclk);
2151 ratio = (msr >> 8) & 0xFF;
2152 if (ratio)
2153 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n",
2154 ratio, bclk, ratio * bclk);
2156 ratio = (msr >> 0) & 0xFF;
2157 if (ratio)
2158 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n",
2159 ratio, bclk, ratio * bclk);
2160 return;
2162 int has_turbo_ratio_group_limits(int family, int model)
2165 if (!genuine_intel)
2166 return 0;
2168 switch (model) {
2169 case INTEL_FAM6_ATOM_GOLDMONT:
2170 case INTEL_FAM6_SKYLAKE_X:
2171 case INTEL_FAM6_ATOM_GOLDMONT_D:
2172 return 1;
2174 return 0;
2177 static void
2178 dump_turbo_ratio_limits(int family, int model)
2180 unsigned long long msr, core_counts;
2181 unsigned int ratio, group_size;
2183 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
2184 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
2186 if (has_turbo_ratio_group_limits(family, model)) {
2187 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts);
2188 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts);
2189 } else {
2190 core_counts = 0x0807060504030201;
2193 ratio = (msr >> 56) & 0xFF;
2194 group_size = (core_counts >> 56) & 0xFF;
2195 if (ratio)
2196 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2197 ratio, bclk, ratio * bclk, group_size);
2199 ratio = (msr >> 48) & 0xFF;
2200 group_size = (core_counts >> 48) & 0xFF;
2201 if (ratio)
2202 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2203 ratio, bclk, ratio * bclk, group_size);
2205 ratio = (msr >> 40) & 0xFF;
2206 group_size = (core_counts >> 40) & 0xFF;
2207 if (ratio)
2208 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2209 ratio, bclk, ratio * bclk, group_size);
2211 ratio = (msr >> 32) & 0xFF;
2212 group_size = (core_counts >> 32) & 0xFF;
2213 if (ratio)
2214 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2215 ratio, bclk, ratio * bclk, group_size);
2217 ratio = (msr >> 24) & 0xFF;
2218 group_size = (core_counts >> 24) & 0xFF;
2219 if (ratio)
2220 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2221 ratio, bclk, ratio * bclk, group_size);
2223 ratio = (msr >> 16) & 0xFF;
2224 group_size = (core_counts >> 16) & 0xFF;
2225 if (ratio)
2226 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2227 ratio, bclk, ratio * bclk, group_size);
2229 ratio = (msr >> 8) & 0xFF;
2230 group_size = (core_counts >> 8) & 0xFF;
2231 if (ratio)
2232 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2233 ratio, bclk, ratio * bclk, group_size);
2235 ratio = (msr >> 0) & 0xFF;
2236 group_size = (core_counts >> 0) & 0xFF;
2237 if (ratio)
2238 fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2239 ratio, bclk, ratio * bclk, group_size);
2240 return;
2243 static void
2244 dump_atom_turbo_ratio_limits(void)
2246 unsigned long long msr;
2247 unsigned int ratio;
2249 get_msr(base_cpu, MSR_ATOM_CORE_RATIOS, &msr);
2250 fprintf(outf, "cpu%d: MSR_ATOM_CORE_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF);
2252 ratio = (msr >> 0) & 0x3F;
2253 if (ratio)
2254 fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n",
2255 ratio, bclk, ratio * bclk);
2257 ratio = (msr >> 8) & 0x3F;
2258 if (ratio)
2259 fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n",
2260 ratio, bclk, ratio * bclk);
2262 ratio = (msr >> 16) & 0x3F;
2263 if (ratio)
2264 fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n",
2265 ratio, bclk, ratio * bclk);
2267 get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr);
2268 fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF);
2270 ratio = (msr >> 24) & 0x3F;
2271 if (ratio)
2272 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n",
2273 ratio, bclk, ratio * bclk);
2275 ratio = (msr >> 16) & 0x3F;
2276 if (ratio)
2277 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n",
2278 ratio, bclk, ratio * bclk);
2280 ratio = (msr >> 8) & 0x3F;
2281 if (ratio)
2282 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n",
2283 ratio, bclk, ratio * bclk);
2285 ratio = (msr >> 0) & 0x3F;
2286 if (ratio)
2287 fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n",
2288 ratio, bclk, ratio * bclk);
2291 static void
2292 dump_knl_turbo_ratio_limits(void)
2294 const unsigned int buckets_no = 7;
2296 unsigned long long msr;
2297 int delta_cores, delta_ratio;
2298 int i, b_nr;
2299 unsigned int cores[buckets_no];
2300 unsigned int ratio[buckets_no];
2302 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
2304 fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n",
2305 base_cpu, msr);
2308 * Turbo encoding in KNL is as follows:
2309 * [0] -- Reserved
2310 * [7:1] -- Base value of number of active cores of bucket 1.
2311 * [15:8] -- Base value of freq ratio of bucket 1.
2312 * [20:16] -- +ve delta of number of active cores of bucket 2.
2313 * i.e. active cores of bucket 2 =
2314 * active cores of bucket 1 + delta
2315 * [23:21] -- Negative delta of freq ratio of bucket 2.
2316 * i.e. freq ratio of bucket 2 =
2317 * freq ratio of bucket 1 - delta
2318 * [28:24]-- +ve delta of number of active cores of bucket 3.
2319 * [31:29]-- -ve delta of freq ratio of bucket 3.
2320 * [36:32]-- +ve delta of number of active cores of bucket 4.
2321 * [39:37]-- -ve delta of freq ratio of bucket 4.
2322 * [44:40]-- +ve delta of number of active cores of bucket 5.
2323 * [47:45]-- -ve delta of freq ratio of bucket 5.
2324 * [52:48]-- +ve delta of number of active cores of bucket 6.
2325 * [55:53]-- -ve delta of freq ratio of bucket 6.
2326 * [60:56]-- +ve delta of number of active cores of bucket 7.
2327 * [63:61]-- -ve delta of freq ratio of bucket 7.
2330 b_nr = 0;
2331 cores[b_nr] = (msr & 0xFF) >> 1;
2332 ratio[b_nr] = (msr >> 8) & 0xFF;
2334 for (i = 16; i < 64; i += 8) {
2335 delta_cores = (msr >> i) & 0x1F;
2336 delta_ratio = (msr >> (i + 5)) & 0x7;
2338 cores[b_nr + 1] = cores[b_nr] + delta_cores;
2339 ratio[b_nr + 1] = ratio[b_nr] - delta_ratio;
2340 b_nr++;
2343 for (i = buckets_no - 1; i >= 0; i--)
2344 if (i > 0 ? ratio[i] != ratio[i - 1] : 1)
2345 fprintf(outf,
2346 "%d * %.1f = %.1f MHz max turbo %d active cores\n",
2347 ratio[i], bclk, ratio[i] * bclk, cores[i]);
2350 static void
2351 dump_nhm_cst_cfg(void)
2353 unsigned long long msr;
2355 get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr);
2357 fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr);
2359 fprintf(outf, " (%s%s%s%s%slocked, pkg-cstate-limit=%d (%s)",
2360 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
2361 (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
2362 (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
2363 (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
2364 (msr & (1 << 15)) ? "" : "UN",
2365 (unsigned int)msr & 0xF,
2366 pkg_cstate_limit_strings[pkg_cstate_limit]);
2368 #define AUTOMATIC_CSTATE_CONVERSION (1UL << 16)
2369 if (has_automatic_cstate_conversion) {
2370 fprintf(outf, ", automatic c-state conversion=%s",
2371 (msr & AUTOMATIC_CSTATE_CONVERSION) ? "on" : "off");
2374 fprintf(outf, ")\n");
2376 return;
2379 static void
2380 dump_config_tdp(void)
2382 unsigned long long msr;
2384 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
2385 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
2386 fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF);
2388 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
2389 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
2390 if (msr) {
2391 fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
2392 fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
2393 fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
2394 fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF);
2396 fprintf(outf, ")\n");
2398 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
2399 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
2400 if (msr) {
2401 fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
2402 fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
2403 fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
2404 fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF);
2406 fprintf(outf, ")\n");
2408 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
2409 fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
2410 if ((msr) & 0x3)
2411 fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
2412 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
2413 fprintf(outf, ")\n");
2415 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
2416 fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
2417 fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF);
2418 fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
2419 fprintf(outf, ")\n");
2422 unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
2424 void print_irtl(void)
2426 unsigned long long msr;
2428 get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
2429 fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
2430 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2431 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2433 get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
2434 fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
2435 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2436 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2438 get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
2439 fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
2440 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2441 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2443 if (!do_irtl_hsw)
2444 return;
2446 get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
2447 fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
2448 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2449 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2451 get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
2452 fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
2453 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2454 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2456 get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
2457 fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
2458 fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
2459 (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
2462 void free_fd_percpu(void)
2464 int i;
2466 for (i = 0; i < topo.max_cpu_num + 1; ++i) {
2467 if (fd_percpu[i] != 0)
2468 close(fd_percpu[i]);
2471 free(fd_percpu);
2474 void free_all_buffers(void)
2476 int i;
2478 CPU_FREE(cpu_present_set);
2479 cpu_present_set = NULL;
2480 cpu_present_setsize = 0;
2482 CPU_FREE(cpu_affinity_set);
2483 cpu_affinity_set = NULL;
2484 cpu_affinity_setsize = 0;
2486 free(thread_even);
2487 free(core_even);
2488 free(package_even);
2490 thread_even = NULL;
2491 core_even = NULL;
2492 package_even = NULL;
2494 free(thread_odd);
2495 free(core_odd);
2496 free(package_odd);
2498 thread_odd = NULL;
2499 core_odd = NULL;
2500 package_odd = NULL;
2502 free(output_buffer);
2503 output_buffer = NULL;
2504 outp = NULL;
2506 free_fd_percpu();
2508 free(irq_column_2_cpu);
2509 free(irqs_per_cpu);
2511 for (i = 0; i <= topo.max_cpu_num; ++i) {
2512 if (cpus[i].put_ids)
2513 CPU_FREE(cpus[i].put_ids);
2515 free(cpus);
2520 * Parse a file containing a single int.
2521 * Return 0 if file can not be opened
2522 * Exit if file can be opened, but can not be parsed
2524 int parse_int_file(const char *fmt, ...)
2526 va_list args;
2527 char path[PATH_MAX];
2528 FILE *filep;
2529 int value;
2531 va_start(args, fmt);
2532 vsnprintf(path, sizeof(path), fmt, args);
2533 va_end(args);
2534 filep = fopen(path, "r");
2535 if (!filep)
2536 return 0;
2537 if (fscanf(filep, "%d", &value) != 1)
2538 err(1, "%s: failed to parse number from file", path);
2539 fclose(filep);
2540 return value;
2544 * cpu_is_first_core_in_package(cpu)
2545 * return 1 if given CPU is 1st core in package
2547 int cpu_is_first_core_in_package(int cpu)
2549 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
2552 int get_physical_package_id(int cpu)
2554 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
2557 int get_die_id(int cpu)
2559 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
2562 int get_core_id(int cpu)
2564 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
2567 void set_node_data(void)
2569 int pkg, node, lnode, cpu, cpux;
2570 int cpu_count;
2572 /* initialize logical_node_id */
2573 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu)
2574 cpus[cpu].logical_node_id = -1;
2576 cpu_count = 0;
2577 for (pkg = 0; pkg < topo.num_packages; pkg++) {
2578 lnode = 0;
2579 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
2580 if (cpus[cpu].physical_package_id != pkg)
2581 continue;
2582 /* find a cpu with an unset logical_node_id */
2583 if (cpus[cpu].logical_node_id != -1)
2584 continue;
2585 cpus[cpu].logical_node_id = lnode;
2586 node = cpus[cpu].physical_node_id;
2587 cpu_count++;
2589 * find all matching cpus on this pkg and set
2590 * the logical_node_id
2592 for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) {
2593 if ((cpus[cpux].physical_package_id == pkg) &&
2594 (cpus[cpux].physical_node_id == node)) {
2595 cpus[cpux].logical_node_id = lnode;
2596 cpu_count++;
2599 lnode++;
2600 if (lnode > topo.nodes_per_pkg)
2601 topo.nodes_per_pkg = lnode;
2603 if (cpu_count >= topo.max_cpu_num)
2604 break;
2608 int get_physical_node_id(struct cpu_topology *thiscpu)
2610 char path[80];
2611 FILE *filep;
2612 int i;
2613 int cpu = thiscpu->logical_cpu_id;
2615 for (i = 0; i <= topo.max_cpu_num; i++) {
2616 sprintf(path, "/sys/devices/system/cpu/cpu%d/node%i/cpulist",
2617 cpu, i);
2618 filep = fopen(path, "r");
2619 if (!filep)
2620 continue;
2621 fclose(filep);
2622 return i;
2624 return -1;
2627 int get_thread_siblings(struct cpu_topology *thiscpu)
2629 char path[80], character;
2630 FILE *filep;
2631 unsigned long map;
2632 int so, shift, sib_core;
2633 int cpu = thiscpu->logical_cpu_id;
2634 int offset = topo.max_cpu_num + 1;
2635 size_t size;
2636 int thread_id = 0;
2638 thiscpu->put_ids = CPU_ALLOC((topo.max_cpu_num + 1));
2639 if (thiscpu->thread_id < 0)
2640 thiscpu->thread_id = thread_id++;
2641 if (!thiscpu->put_ids)
2642 return -1;
2644 size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2645 CPU_ZERO_S(size, thiscpu->put_ids);
2647 sprintf(path,
2648 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu);
2649 filep = fopen_or_die(path, "r");
2650 do {
2651 offset -= BITMASK_SIZE;
2652 if (fscanf(filep, "%lx%c", &map, &character) != 2)
2653 err(1, "%s: failed to parse file", path);
2654 for (shift = 0; shift < BITMASK_SIZE; shift++) {
2655 if ((map >> shift) & 0x1) {
2656 so = shift + offset;
2657 sib_core = get_core_id(so);
2658 if (sib_core == thiscpu->physical_core_id) {
2659 CPU_SET_S(so, size, thiscpu->put_ids);
2660 if ((so != cpu) &&
2661 (cpus[so].thread_id < 0))
2662 cpus[so].thread_id =
2663 thread_id++;
2667 } while (!strncmp(&character, ",", 1));
2668 fclose(filep);
2670 return CPU_COUNT_S(size, thiscpu->put_ids);
2674 * run func(thread, core, package) in topology order
2675 * skip non-present cpus
2678 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
2679 struct pkg_data *, struct thread_data *, struct core_data *,
2680 struct pkg_data *), struct thread_data *thread_base,
2681 struct core_data *core_base, struct pkg_data *pkg_base,
2682 struct thread_data *thread_base2, struct core_data *core_base2,
2683 struct pkg_data *pkg_base2)
2685 int retval, pkg_no, node_no, core_no, thread_no;
2687 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
2688 for (node_no = 0; node_no < topo.nodes_per_pkg; ++node_no) {
2689 for (core_no = 0; core_no < topo.cores_per_node;
2690 ++core_no) {
2691 for (thread_no = 0; thread_no <
2692 topo.threads_per_core; ++thread_no) {
2693 struct thread_data *t, *t2;
2694 struct core_data *c, *c2;
2695 struct pkg_data *p, *p2;
2697 t = GET_THREAD(thread_base, thread_no,
2698 core_no, node_no,
2699 pkg_no);
2701 if (cpu_is_not_present(t->cpu_id))
2702 continue;
2704 t2 = GET_THREAD(thread_base2, thread_no,
2705 core_no, node_no,
2706 pkg_no);
2708 c = GET_CORE(core_base, core_no,
2709 node_no, pkg_no);
2710 c2 = GET_CORE(core_base2, core_no,
2711 node_no,
2712 pkg_no);
2714 p = GET_PKG(pkg_base, pkg_no);
2715 p2 = GET_PKG(pkg_base2, pkg_no);
2717 retval = func(t, c, p, t2, c2, p2);
2718 if (retval)
2719 return retval;
2724 return 0;
2728 * run func(cpu) on every cpu in /proc/stat
2729 * return max_cpu number
2731 int for_all_proc_cpus(int (func)(int))
2733 FILE *fp;
2734 int cpu_num;
2735 int retval;
2737 fp = fopen_or_die(proc_stat, "r");
2739 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
2740 if (retval != 0)
2741 err(1, "%s: failed to parse format", proc_stat);
2743 while (1) {
2744 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
2745 if (retval != 1)
2746 break;
2748 retval = func(cpu_num);
2749 if (retval) {
2750 fclose(fp);
2751 return(retval);
2754 fclose(fp);
2755 return 0;
2758 void re_initialize(void)
2760 free_all_buffers();
2761 setup_all_buffers();
2762 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
2765 void set_max_cpu_num(void)
2767 FILE *filep;
2768 unsigned long dummy;
2770 topo.max_cpu_num = 0;
2771 filep = fopen_or_die(
2772 "/sys/devices/system/cpu/cpu0/topology/thread_siblings",
2773 "r");
2774 while (fscanf(filep, "%lx,", &dummy) == 1)
2775 topo.max_cpu_num += BITMASK_SIZE;
2776 fclose(filep);
2777 topo.max_cpu_num--; /* 0 based */
2781 * count_cpus()
2782 * remember the last one seen, it will be the max
2784 int count_cpus(int cpu)
2786 topo.num_cpus++;
2787 return 0;
2789 int mark_cpu_present(int cpu)
2791 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
2792 return 0;
2795 int init_thread_id(int cpu)
2797 cpus[cpu].thread_id = -1;
2798 return 0;
2802 * snapshot_proc_interrupts()
2804 * read and record summary of /proc/interrupts
2806 * return 1 if config change requires a restart, else return 0
2808 int snapshot_proc_interrupts(void)
2810 static FILE *fp;
2811 int column, retval;
2813 if (fp == NULL)
2814 fp = fopen_or_die("/proc/interrupts", "r");
2815 else
2816 rewind(fp);
2818 /* read 1st line of /proc/interrupts to get cpu* name for each column */
2819 for (column = 0; column < topo.num_cpus; ++column) {
2820 int cpu_number;
2822 retval = fscanf(fp, " CPU%d", &cpu_number);
2823 if (retval != 1)
2824 break;
2826 if (cpu_number > topo.max_cpu_num) {
2827 warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num);
2828 return 1;
2831 irq_column_2_cpu[column] = cpu_number;
2832 irqs_per_cpu[cpu_number] = 0;
2835 /* read /proc/interrupt count lines and sum up irqs per cpu */
2836 while (1) {
2837 int column;
2838 char buf[64];
2840 retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */
2841 if (retval != 1)
2842 break;
2844 /* read the count per cpu */
2845 for (column = 0; column < topo.num_cpus; ++column) {
2847 int cpu_number, irq_count;
2849 retval = fscanf(fp, " %d", &irq_count);
2850 if (retval != 1)
2851 break;
2853 cpu_number = irq_column_2_cpu[column];
2854 irqs_per_cpu[cpu_number] += irq_count;
2858 while (getc(fp) != '\n')
2859 ; /* flush interrupt description */
2862 return 0;
2865 * snapshot_gfx_rc6_ms()
2867 * record snapshot of
2868 * /sys/class/drm/card0/power/rc6_residency_ms
2870 * return 1 if config change requires a restart, else return 0
2872 int snapshot_gfx_rc6_ms(void)
2874 FILE *fp;
2875 int retval;
2877 fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r");
2879 retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms);
2880 if (retval != 1)
2881 err(1, "GFX rc6");
2883 fclose(fp);
2885 return 0;
2888 * snapshot_gfx_mhz()
2890 * record snapshot of
2891 * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz
2893 * return 1 if config change requires a restart, else return 0
2895 int snapshot_gfx_mhz(void)
2897 static FILE *fp;
2898 int retval;
2900 if (fp == NULL)
2901 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
2902 else {
2903 rewind(fp);
2904 fflush(fp);
2907 retval = fscanf(fp, "%d", &gfx_cur_mhz);
2908 if (retval != 1)
2909 err(1, "GFX MHz");
2911 return 0;
2915 * snapshot_cpu_lpi()
2917 * record snapshot of
2918 * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
2920 * return 1 if config change requires a restart, else return 0
2922 int snapshot_cpu_lpi_us(void)
2924 FILE *fp;
2925 int retval;
2927 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
2929 retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
2930 if (retval != 1) {
2931 fprintf(stderr, "Disabling Low Power Idle CPU output\n");
2932 BIC_NOT_PRESENT(BIC_CPU_LPI);
2933 fclose(fp);
2934 return -1;
2937 fclose(fp);
2939 return 0;
2942 * snapshot_sys_lpi()
2944 * record snapshot of
2945 * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
2947 * return 1 if config change requires a restart, else return 0
2949 int snapshot_sys_lpi_us(void)
2951 FILE *fp;
2952 int retval;
2954 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
2956 retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
2957 if (retval != 1) {
2958 fprintf(stderr, "Disabling Low Power Idle System output\n");
2959 BIC_NOT_PRESENT(BIC_SYS_LPI);
2960 fclose(fp);
2961 return -1;
2963 fclose(fp);
2965 return 0;
2968 * snapshot /proc and /sys files
2970 * return 1 if configuration restart needed, else return 0
2972 int snapshot_proc_sysfs_files(void)
2974 if (DO_BIC(BIC_IRQ))
2975 if (snapshot_proc_interrupts())
2976 return 1;
2978 if (DO_BIC(BIC_GFX_rc6))
2979 snapshot_gfx_rc6_ms();
2981 if (DO_BIC(BIC_GFXMHz))
2982 snapshot_gfx_mhz();
2984 if (DO_BIC(BIC_CPU_LPI))
2985 snapshot_cpu_lpi_us();
2987 if (DO_BIC(BIC_SYS_LPI))
2988 snapshot_sys_lpi_us();
2990 return 0;
2993 int exit_requested;
2995 static void signal_handler (int signal)
2997 switch (signal) {
2998 case SIGINT:
2999 exit_requested = 1;
3000 if (debug)
3001 fprintf(stderr, " SIGINT\n");
3002 break;
3003 case SIGUSR1:
3004 if (debug > 1)
3005 fprintf(stderr, "SIGUSR1\n");
3006 break;
3010 void setup_signal_handler(void)
3012 struct sigaction sa;
3014 memset(&sa, 0, sizeof(sa));
3016 sa.sa_handler = &signal_handler;
3018 if (sigaction(SIGINT, &sa, NULL) < 0)
3019 err(1, "sigaction SIGINT");
3020 if (sigaction(SIGUSR1, &sa, NULL) < 0)
3021 err(1, "sigaction SIGUSR1");
3024 void do_sleep(void)
3026 struct timeval tout;
3027 struct timespec rest;
3028 fd_set readfds;
3029 int retval;
3031 FD_ZERO(&readfds);
3032 FD_SET(0, &readfds);
3034 if (ignore_stdin) {
3035 nanosleep(&interval_ts, NULL);
3036 return;
3039 tout = interval_tv;
3040 retval = select(1, &readfds, NULL, NULL, &tout);
3042 if (retval == 1) {
3043 switch (getc(stdin)) {
3044 case 'q':
3045 exit_requested = 1;
3046 break;
3047 case EOF:
3049 * 'stdin' is a pipe closed on the other end. There
3050 * won't be any further input.
3052 ignore_stdin = 1;
3053 /* Sleep the rest of the time */
3054 rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000);
3055 rest.tv_nsec = (tout.tv_usec % 1000000) * 1000;
3056 nanosleep(&rest, NULL);
3062 void turbostat_loop()
3064 int retval;
3065 int restarted = 0;
3066 int done_iters = 0;
3068 setup_signal_handler();
3070 restart:
3071 restarted++;
3073 snapshot_proc_sysfs_files();
3074 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
3075 first_counter_read = 0;
3076 if (retval < -1) {
3077 exit(retval);
3078 } else if (retval == -1) {
3079 if (restarted > 1) {
3080 exit(retval);
3082 re_initialize();
3083 goto restart;
3085 restarted = 0;
3086 done_iters = 0;
3087 gettimeofday(&tv_even, (struct timezone *)NULL);
3089 while (1) {
3090 if (for_all_proc_cpus(cpu_is_not_present)) {
3091 re_initialize();
3092 goto restart;
3094 do_sleep();
3095 if (snapshot_proc_sysfs_files())
3096 goto restart;
3097 retval = for_all_cpus(get_counters, ODD_COUNTERS);
3098 if (retval < -1) {
3099 exit(retval);
3100 } else if (retval == -1) {
3101 re_initialize();
3102 goto restart;
3104 gettimeofday(&tv_odd, (struct timezone *)NULL);
3105 timersub(&tv_odd, &tv_even, &tv_delta);
3106 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) {
3107 re_initialize();
3108 goto restart;
3110 compute_average(EVEN_COUNTERS);
3111 format_all_counters(EVEN_COUNTERS);
3112 flush_output_stdout();
3113 if (exit_requested)
3114 break;
3115 if (num_iterations && ++done_iters >= num_iterations)
3116 break;
3117 do_sleep();
3118 if (snapshot_proc_sysfs_files())
3119 goto restart;
3120 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
3121 if (retval < -1) {
3122 exit(retval);
3123 } else if (retval == -1) {
3124 re_initialize();
3125 goto restart;
3127 gettimeofday(&tv_even, (struct timezone *)NULL);
3128 timersub(&tv_even, &tv_odd, &tv_delta);
3129 if (for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) {
3130 re_initialize();
3131 goto restart;
3133 compute_average(ODD_COUNTERS);
3134 format_all_counters(ODD_COUNTERS);
3135 flush_output_stdout();
3136 if (exit_requested)
3137 break;
3138 if (num_iterations && ++done_iters >= num_iterations)
3139 break;
3143 void check_dev_msr()
3145 struct stat sb;
3146 char pathname[32];
3148 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
3149 if (stat(pathname, &sb))
3150 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
3151 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
3154 void check_permissions()
3156 struct __user_cap_header_struct cap_header_data;
3157 cap_user_header_t cap_header = &cap_header_data;
3158 struct __user_cap_data_struct cap_data_data;
3159 cap_user_data_t cap_data = &cap_data_data;
3160 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
3161 int do_exit = 0;
3162 char pathname[32];
3164 /* check for CAP_SYS_RAWIO */
3165 cap_header->pid = getpid();
3166 cap_header->version = _LINUX_CAPABILITY_VERSION;
3167 if (capget(cap_header, cap_data) < 0)
3168 err(-6, "capget(2) failed");
3170 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
3171 do_exit++;
3172 warnx("capget(CAP_SYS_RAWIO) failed,"
3173 " try \"# setcap cap_sys_rawio=ep %s\"", progname);
3176 /* test file permissions */
3177 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
3178 if (euidaccess(pathname, R_OK)) {
3179 do_exit++;
3180 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
3183 /* if all else fails, thell them to be root */
3184 if (do_exit)
3185 if (getuid() != 0)
3186 warnx("... or simply run as root");
3188 if (do_exit)
3189 exit(-6);
3193 * NHM adds support for additional MSRs:
3195 * MSR_SMI_COUNT 0x00000034
3197 * MSR_PLATFORM_INFO 0x000000ce
3198 * MSR_PKG_CST_CONFIG_CONTROL 0x000000e2
3200 * MSR_MISC_PWR_MGMT 0x000001aa
3202 * MSR_PKG_C3_RESIDENCY 0x000003f8
3203 * MSR_PKG_C6_RESIDENCY 0x000003f9
3204 * MSR_CORE_C3_RESIDENCY 0x000003fc
3205 * MSR_CORE_C6_RESIDENCY 0x000003fd
3207 * Side effect:
3208 * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL
3209 * sets has_misc_feature_control
3211 int probe_nhm_msrs(unsigned int family, unsigned int model)
3213 unsigned long long msr;
3214 unsigned int base_ratio;
3215 int *pkg_cstate_limits;
3217 if (!genuine_intel)
3218 return 0;
3220 if (family != 6)
3221 return 0;
3223 bclk = discover_bclk(family, model);
3225 switch (model) {
3226 case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
3227 case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
3228 pkg_cstate_limits = nhm_pkg_cstate_limits;
3229 break;
3230 case INTEL_FAM6_SANDYBRIDGE: /* SNB */
3231 case INTEL_FAM6_SANDYBRIDGE_X: /* SNB Xeon */
3232 case INTEL_FAM6_IVYBRIDGE: /* IVB */
3233 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
3234 pkg_cstate_limits = snb_pkg_cstate_limits;
3235 has_misc_feature_control = 1;
3236 break;
3237 case INTEL_FAM6_HASWELL: /* HSW */
3238 case INTEL_FAM6_HASWELL_G: /* HSW */
3239 case INTEL_FAM6_HASWELL_X: /* HSX */
3240 case INTEL_FAM6_HASWELL_L: /* HSW */
3241 case INTEL_FAM6_BROADWELL: /* BDW */
3242 case INTEL_FAM6_BROADWELL_G: /* BDW */
3243 case INTEL_FAM6_BROADWELL_X: /* BDX */
3244 case INTEL_FAM6_SKYLAKE_L: /* SKL */
3245 case INTEL_FAM6_CANNONLAKE_L: /* CNL */
3246 pkg_cstate_limits = hsw_pkg_cstate_limits;
3247 has_misc_feature_control = 1;
3248 break;
3249 case INTEL_FAM6_SKYLAKE_X: /* SKX */
3250 pkg_cstate_limits = skx_pkg_cstate_limits;
3251 has_misc_feature_control = 1;
3252 break;
3253 case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
3254 no_MSR_MISC_PWR_MGMT = 1;
3255 case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */
3256 pkg_cstate_limits = slv_pkg_cstate_limits;
3257 break;
3258 case INTEL_FAM6_ATOM_AIRMONT: /* AMT */
3259 pkg_cstate_limits = amt_pkg_cstate_limits;
3260 no_MSR_MISC_PWR_MGMT = 1;
3261 break;
3262 case INTEL_FAM6_XEON_PHI_KNL: /* PHI */
3263 pkg_cstate_limits = phi_pkg_cstate_limits;
3264 break;
3265 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
3266 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
3267 case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */
3268 pkg_cstate_limits = glm_pkg_cstate_limits;
3269 break;
3270 default:
3271 return 0;
3273 get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr);
3274 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
3276 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
3277 base_ratio = (msr >> 8) & 0xFF;
3279 base_hz = base_ratio * bclk * 1000000;
3280 has_base_hz = 1;
3281 return 1;
3284 * SLV client has support for unique MSRs:
3286 * MSR_CC6_DEMOTION_POLICY_CONFIG
3287 * MSR_MC6_DEMOTION_POLICY_CONFIG
3290 int has_slv_msrs(unsigned int family, unsigned int model)
3292 if (!genuine_intel)
3293 return 0;
3295 switch (model) {
3296 case INTEL_FAM6_ATOM_SILVERMONT:
3297 case INTEL_FAM6_ATOM_SILVERMONT_MID:
3298 case INTEL_FAM6_ATOM_AIRMONT_MID:
3299 return 1;
3301 return 0;
3303 int is_dnv(unsigned int family, unsigned int model)
3306 if (!genuine_intel)
3307 return 0;
3309 switch (model) {
3310 case INTEL_FAM6_ATOM_GOLDMONT_D:
3311 return 1;
3313 return 0;
3315 int is_bdx(unsigned int family, unsigned int model)
3318 if (!genuine_intel)
3319 return 0;
3321 switch (model) {
3322 case INTEL_FAM6_BROADWELL_X:
3323 return 1;
3325 return 0;
3327 int is_skx(unsigned int family, unsigned int model)
3330 if (!genuine_intel)
3331 return 0;
3333 switch (model) {
3334 case INTEL_FAM6_SKYLAKE_X:
3335 return 1;
3337 return 0;
3340 int has_turbo_ratio_limit(unsigned int family, unsigned int model)
3342 if (has_slv_msrs(family, model))
3343 return 0;
3345 switch (model) {
3346 /* Nehalem compatible, but do not include turbo-ratio limit support */
3347 case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
3348 case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */
3349 return 0;
3350 default:
3351 return 1;
3354 int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model)
3356 if (has_slv_msrs(family, model))
3357 return 1;
3359 return 0;
3361 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
3363 if (!genuine_intel)
3364 return 0;
3366 if (family != 6)
3367 return 0;
3369 switch (model) {
3370 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
3371 case INTEL_FAM6_HASWELL_X: /* HSW Xeon */
3372 return 1;
3373 default:
3374 return 0;
3377 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
3379 if (!genuine_intel)
3380 return 0;
3382 if (family != 6)
3383 return 0;
3385 switch (model) {
3386 case INTEL_FAM6_HASWELL_X: /* HSW Xeon */
3387 return 1;
3388 default:
3389 return 0;
3393 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
3395 if (!genuine_intel)
3396 return 0;
3398 if (family != 6)
3399 return 0;
3401 switch (model) {
3402 case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
3403 return 1;
3404 default:
3405 return 0;
3408 int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model)
3410 if (!genuine_intel)
3411 return 0;
3413 if (family != 6)
3414 return 0;
3416 switch (model) {
3417 case INTEL_FAM6_ATOM_GOLDMONT:
3418 case INTEL_FAM6_SKYLAKE_X:
3419 return 1;
3420 default:
3421 return 0;
3424 int has_config_tdp(unsigned int family, unsigned int model)
3426 if (!genuine_intel)
3427 return 0;
3429 if (family != 6)
3430 return 0;
3432 switch (model) {
3433 case INTEL_FAM6_IVYBRIDGE: /* IVB */
3434 case INTEL_FAM6_HASWELL: /* HSW */
3435 case INTEL_FAM6_HASWELL_X: /* HSX */
3436 case INTEL_FAM6_HASWELL_L: /* HSW */
3437 case INTEL_FAM6_HASWELL_G: /* HSW */
3438 case INTEL_FAM6_BROADWELL: /* BDW */
3439 case INTEL_FAM6_BROADWELL_G: /* BDW */
3440 case INTEL_FAM6_BROADWELL_X: /* BDX */
3441 case INTEL_FAM6_SKYLAKE_L: /* SKL */
3442 case INTEL_FAM6_CANNONLAKE_L: /* CNL */
3443 case INTEL_FAM6_SKYLAKE_X: /* SKX */
3445 case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
3446 return 1;
3447 default:
3448 return 0;
3452 static void
3453 dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
3455 if (!do_nhm_platform_info)
3456 return;
3458 dump_nhm_platform_info();
3460 if (has_hsw_turbo_ratio_limit(family, model))
3461 dump_hsw_turbo_ratio_limits();
3463 if (has_ivt_turbo_ratio_limit(family, model))
3464 dump_ivt_turbo_ratio_limits();
3466 if (has_turbo_ratio_limit(family, model))
3467 dump_turbo_ratio_limits(family, model);
3469 if (has_atom_turbo_ratio_limit(family, model))
3470 dump_atom_turbo_ratio_limits();
3472 if (has_knl_turbo_ratio_limit(family, model))
3473 dump_knl_turbo_ratio_limits();
3475 if (has_config_tdp(family, model))
3476 dump_config_tdp();
3478 dump_nhm_cst_cfg();
3481 static void
3482 dump_sysfs_cstate_config(void)
3484 char path[64];
3485 char name_buf[16];
3486 char desc[64];
3487 FILE *input;
3488 int state;
3489 char *sp;
3491 if (!DO_BIC(BIC_sysfs))
3492 return;
3494 for (state = 0; state < 10; ++state) {
3496 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
3497 base_cpu, state);
3498 input = fopen(path, "r");
3499 if (input == NULL)
3500 continue;
3501 if (!fgets(name_buf, sizeof(name_buf), input))
3502 err(1, "%s: failed to read file", path);
3504 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
3505 sp = strchr(name_buf, '-');
3506 if (!sp)
3507 sp = strchrnul(name_buf, '\n');
3508 *sp = '\0';
3509 fclose(input);
3511 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
3512 base_cpu, state);
3513 input = fopen(path, "r");
3514 if (input == NULL)
3515 continue;
3516 if (!fgets(desc, sizeof(desc), input))
3517 err(1, "%s: failed to read file", path);
3519 fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
3520 fclose(input);
3523 static void
3524 dump_sysfs_pstate_config(void)
3526 char path[64];
3527 char driver_buf[64];
3528 char governor_buf[64];
3529 FILE *input;
3530 int turbo;
3532 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver",
3533 base_cpu);
3534 input = fopen(path, "r");
3535 if (input == NULL) {
3536 fprintf(outf, "NSFOD %s\n", path);
3537 return;
3539 if (!fgets(driver_buf, sizeof(driver_buf), input))
3540 err(1, "%s: failed to read file", path);
3541 fclose(input);
3543 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
3544 base_cpu);
3545 input = fopen(path, "r");
3546 if (input == NULL) {
3547 fprintf(outf, "NSFOD %s\n", path);
3548 return;
3550 if (!fgets(governor_buf, sizeof(governor_buf), input))
3551 err(1, "%s: failed to read file", path);
3552 fclose(input);
3554 fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
3555 fprintf(outf, "cpu%d: cpufreq governor: %s", base_cpu, governor_buf);
3557 sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
3558 input = fopen(path, "r");
3559 if (input != NULL) {
3560 if (fscanf(input, "%d", &turbo) != 1)
3561 err(1, "%s: failed to parse number from file", path);
3562 fprintf(outf, "cpufreq boost: %d\n", turbo);
3563 fclose(input);
3566 sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
3567 input = fopen(path, "r");
3568 if (input != NULL) {
3569 if (fscanf(input, "%d", &turbo) != 1)
3570 err(1, "%s: failed to parse number from file", path);
3571 fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
3572 fclose(input);
3578 * print_epb()
3579 * Decode the ENERGY_PERF_BIAS MSR
3581 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3583 unsigned long long msr;
3584 char *epb_string;
3585 int cpu;
3587 if (!has_epb)
3588 return 0;
3590 cpu = t->cpu_id;
3592 /* EPB is per-package */
3593 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3594 return 0;
3596 if (cpu_migrate(cpu)) {
3597 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3598 return -1;
3601 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
3602 return 0;
3604 switch (msr & 0xF) {
3605 case ENERGY_PERF_BIAS_PERFORMANCE:
3606 epb_string = "performance";
3607 break;
3608 case ENERGY_PERF_BIAS_NORMAL:
3609 epb_string = "balanced";
3610 break;
3611 case ENERGY_PERF_BIAS_POWERSAVE:
3612 epb_string = "powersave";
3613 break;
3614 default:
3615 epb_string = "custom";
3616 break;
3618 fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
3620 return 0;
3623 * print_hwp()
3624 * Decode the MSR_HWP_CAPABILITIES
3626 int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3628 unsigned long long msr;
3629 int cpu;
3631 if (!has_hwp)
3632 return 0;
3634 cpu = t->cpu_id;
3636 /* MSR_HWP_CAPABILITIES is per-package */
3637 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3638 return 0;
3640 if (cpu_migrate(cpu)) {
3641 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3642 return -1;
3645 if (get_msr(cpu, MSR_PM_ENABLE, &msr))
3646 return 0;
3648 fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n",
3649 cpu, msr, (msr & (1 << 0)) ? "" : "No-");
3651 /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
3652 if ((msr & (1 << 0)) == 0)
3653 return 0;
3655 if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr))
3656 return 0;
3658 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
3659 "(high %d guar %d eff %d low %d)\n",
3660 cpu, msr,
3661 (unsigned int)HWP_HIGHEST_PERF(msr),
3662 (unsigned int)HWP_GUARANTEED_PERF(msr),
3663 (unsigned int)HWP_MOSTEFFICIENT_PERF(msr),
3664 (unsigned int)HWP_LOWEST_PERF(msr));
3666 if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
3667 return 0;
3669 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
3670 "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
3671 cpu, msr,
3672 (unsigned int)(((msr) >> 0) & 0xff),
3673 (unsigned int)(((msr) >> 8) & 0xff),
3674 (unsigned int)(((msr) >> 16) & 0xff),
3675 (unsigned int)(((msr) >> 24) & 0xff),
3676 (unsigned int)(((msr) >> 32) & 0xff3),
3677 (unsigned int)(((msr) >> 42) & 0x1));
3679 if (has_hwp_pkg) {
3680 if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
3681 return 0;
3683 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
3684 "(min %d max %d des %d epp 0x%x window 0x%x)\n",
3685 cpu, msr,
3686 (unsigned int)(((msr) >> 0) & 0xff),
3687 (unsigned int)(((msr) >> 8) & 0xff),
3688 (unsigned int)(((msr) >> 16) & 0xff),
3689 (unsigned int)(((msr) >> 24) & 0xff),
3690 (unsigned int)(((msr) >> 32) & 0xff3));
3692 if (has_hwp_notify) {
3693 if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
3694 return 0;
3696 fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx "
3697 "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n",
3698 cpu, msr,
3699 ((msr) & 0x1) ? "EN" : "Dis",
3700 ((msr) & 0x2) ? "EN" : "Dis");
3702 if (get_msr(cpu, MSR_HWP_STATUS, &msr))
3703 return 0;
3705 fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
3706 "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
3707 cpu, msr,
3708 ((msr) & 0x1) ? "" : "No-",
3709 ((msr) & 0x2) ? "" : "No-");
3711 return 0;
3715 * print_perf_limit()
3717 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3719 unsigned long long msr;
3720 int cpu;
3722 cpu = t->cpu_id;
3724 /* per-package */
3725 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
3726 return 0;
3728 if (cpu_migrate(cpu)) {
3729 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
3730 return -1;
3733 if (do_core_perf_limit_reasons) {
3734 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
3735 fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3736 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
3737 (msr & 1 << 15) ? "bit15, " : "",
3738 (msr & 1 << 14) ? "bit14, " : "",
3739 (msr & 1 << 13) ? "Transitions, " : "",
3740 (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
3741 (msr & 1 << 11) ? "PkgPwrL2, " : "",
3742 (msr & 1 << 10) ? "PkgPwrL1, " : "",
3743 (msr & 1 << 9) ? "CorePwr, " : "",
3744 (msr & 1 << 8) ? "Amps, " : "",
3745 (msr & 1 << 6) ? "VR-Therm, " : "",
3746 (msr & 1 << 5) ? "Auto-HWP, " : "",
3747 (msr & 1 << 4) ? "Graphics, " : "",
3748 (msr & 1 << 2) ? "bit2, " : "",
3749 (msr & 1 << 1) ? "ThermStatus, " : "",
3750 (msr & 1 << 0) ? "PROCHOT, " : "");
3751 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
3752 (msr & 1 << 31) ? "bit31, " : "",
3753 (msr & 1 << 30) ? "bit30, " : "",
3754 (msr & 1 << 29) ? "Transitions, " : "",
3755 (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
3756 (msr & 1 << 27) ? "PkgPwrL2, " : "",
3757 (msr & 1 << 26) ? "PkgPwrL1, " : "",
3758 (msr & 1 << 25) ? "CorePwr, " : "",
3759 (msr & 1 << 24) ? "Amps, " : "",
3760 (msr & 1 << 22) ? "VR-Therm, " : "",
3761 (msr & 1 << 21) ? "Auto-HWP, " : "",
3762 (msr & 1 << 20) ? "Graphics, " : "",
3763 (msr & 1 << 18) ? "bit18, " : "",
3764 (msr & 1 << 17) ? "ThermStatus, " : "",
3765 (msr & 1 << 16) ? "PROCHOT, " : "");
3768 if (do_gfx_perf_limit_reasons) {
3769 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
3770 fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3771 fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)",
3772 (msr & 1 << 0) ? "PROCHOT, " : "",
3773 (msr & 1 << 1) ? "ThermStatus, " : "",
3774 (msr & 1 << 4) ? "Graphics, " : "",
3775 (msr & 1 << 6) ? "VR-Therm, " : "",
3776 (msr & 1 << 8) ? "Amps, " : "",
3777 (msr & 1 << 9) ? "GFXPwr, " : "",
3778 (msr & 1 << 10) ? "PkgPwrL1, " : "",
3779 (msr & 1 << 11) ? "PkgPwrL2, " : "");
3780 fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n",
3781 (msr & 1 << 16) ? "PROCHOT, " : "",
3782 (msr & 1 << 17) ? "ThermStatus, " : "",
3783 (msr & 1 << 20) ? "Graphics, " : "",
3784 (msr & 1 << 22) ? "VR-Therm, " : "",
3785 (msr & 1 << 24) ? "Amps, " : "",
3786 (msr & 1 << 25) ? "GFXPwr, " : "",
3787 (msr & 1 << 26) ? "PkgPwrL1, " : "",
3788 (msr & 1 << 27) ? "PkgPwrL2, " : "");
3790 if (do_ring_perf_limit_reasons) {
3791 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
3792 fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
3793 fprintf(outf, " (Active: %s%s%s%s%s%s)",
3794 (msr & 1 << 0) ? "PROCHOT, " : "",
3795 (msr & 1 << 1) ? "ThermStatus, " : "",
3796 (msr & 1 << 6) ? "VR-Therm, " : "",
3797 (msr & 1 << 8) ? "Amps, " : "",
3798 (msr & 1 << 10) ? "PkgPwrL1, " : "",
3799 (msr & 1 << 11) ? "PkgPwrL2, " : "");
3800 fprintf(outf, " (Logged: %s%s%s%s%s%s)\n",
3801 (msr & 1 << 16) ? "PROCHOT, " : "",
3802 (msr & 1 << 17) ? "ThermStatus, " : "",
3803 (msr & 1 << 22) ? "VR-Therm, " : "",
3804 (msr & 1 << 24) ? "Amps, " : "",
3805 (msr & 1 << 26) ? "PkgPwrL1, " : "",
3806 (msr & 1 << 27) ? "PkgPwrL2, " : "");
3808 return 0;
3811 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
3812 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
3814 double get_tdp_intel(unsigned int model)
3816 unsigned long long msr;
3818 if (do_rapl & RAPL_PKG_POWER_INFO)
3819 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
3820 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
3822 switch (model) {
3823 case INTEL_FAM6_ATOM_SILVERMONT:
3824 case INTEL_FAM6_ATOM_SILVERMONT_D:
3825 return 30.0;
3826 default:
3827 return 135.0;
3831 double get_tdp_amd(unsigned int family)
3833 switch (family) {
3834 case 0x17:
3835 case 0x18:
3836 default:
3837 /* This is the max stock TDP of HEDT/Server Fam17h chips */
3838 return 250.0;
3843 * rapl_dram_energy_units_probe()
3844 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
3846 static double
3847 rapl_dram_energy_units_probe(int model, double rapl_energy_units)
3849 /* only called for genuine_intel, family 6 */
3851 switch (model) {
3852 case INTEL_FAM6_HASWELL_X: /* HSX */
3853 case INTEL_FAM6_BROADWELL_X: /* BDX */
3854 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
3855 return (rapl_dram_energy_units = 15.3 / 1000000);
3856 default:
3857 return (rapl_energy_units);
3861 void rapl_probe_intel(unsigned int family, unsigned int model)
3863 unsigned long long msr;
3864 unsigned int time_unit;
3865 double tdp;
3867 if (family != 6)
3868 return;
3870 switch (model) {
3871 case INTEL_FAM6_SANDYBRIDGE:
3872 case INTEL_FAM6_IVYBRIDGE:
3873 case INTEL_FAM6_HASWELL: /* HSW */
3874 case INTEL_FAM6_HASWELL_L: /* HSW */
3875 case INTEL_FAM6_HASWELL_G: /* HSW */
3876 case INTEL_FAM6_BROADWELL: /* BDW */
3877 case INTEL_FAM6_BROADWELL_G: /* BDW */
3878 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
3879 if (rapl_joules) {
3880 BIC_PRESENT(BIC_Pkg_J);
3881 BIC_PRESENT(BIC_Cor_J);
3882 BIC_PRESENT(BIC_GFX_J);
3883 } else {
3884 BIC_PRESENT(BIC_PkgWatt);
3885 BIC_PRESENT(BIC_CorWatt);
3886 BIC_PRESENT(BIC_GFXWatt);
3888 break;
3889 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
3890 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
3891 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
3892 if (rapl_joules)
3893 BIC_PRESENT(BIC_Pkg_J);
3894 else
3895 BIC_PRESENT(BIC_PkgWatt);
3896 break;
3897 case INTEL_FAM6_SKYLAKE_L: /* SKL */
3898 case INTEL_FAM6_CANNONLAKE_L: /* CNL */
3899 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
3900 BIC_PRESENT(BIC_PKG__);
3901 BIC_PRESENT(BIC_RAM__);
3902 if (rapl_joules) {
3903 BIC_PRESENT(BIC_Pkg_J);
3904 BIC_PRESENT(BIC_Cor_J);
3905 BIC_PRESENT(BIC_RAM_J);
3906 BIC_PRESENT(BIC_GFX_J);
3907 } else {
3908 BIC_PRESENT(BIC_PkgWatt);
3909 BIC_PRESENT(BIC_CorWatt);
3910 BIC_PRESENT(BIC_RAMWatt);
3911 BIC_PRESENT(BIC_GFXWatt);
3913 break;
3914 case INTEL_FAM6_HASWELL_X: /* HSX */
3915 case INTEL_FAM6_BROADWELL_X: /* BDX */
3916 case INTEL_FAM6_SKYLAKE_X: /* SKX */
3917 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
3918 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
3919 BIC_PRESENT(BIC_PKG__);
3920 BIC_PRESENT(BIC_RAM__);
3921 if (rapl_joules) {
3922 BIC_PRESENT(BIC_Pkg_J);
3923 BIC_PRESENT(BIC_RAM_J);
3924 } else {
3925 BIC_PRESENT(BIC_PkgWatt);
3926 BIC_PRESENT(BIC_RAMWatt);
3928 break;
3929 case INTEL_FAM6_SANDYBRIDGE_X:
3930 case INTEL_FAM6_IVYBRIDGE_X:
3931 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
3932 BIC_PRESENT(BIC_PKG__);
3933 BIC_PRESENT(BIC_RAM__);
3934 if (rapl_joules) {
3935 BIC_PRESENT(BIC_Pkg_J);
3936 BIC_PRESENT(BIC_Cor_J);
3937 BIC_PRESENT(BIC_RAM_J);
3938 } else {
3939 BIC_PRESENT(BIC_PkgWatt);
3940 BIC_PRESENT(BIC_CorWatt);
3941 BIC_PRESENT(BIC_RAMWatt);
3943 break;
3944 case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
3945 case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */
3946 do_rapl = RAPL_PKG | RAPL_CORES;
3947 if (rapl_joules) {
3948 BIC_PRESENT(BIC_Pkg_J);
3949 BIC_PRESENT(BIC_Cor_J);
3950 } else {
3951 BIC_PRESENT(BIC_PkgWatt);
3952 BIC_PRESENT(BIC_CorWatt);
3954 break;
3955 case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */
3956 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
3957 BIC_PRESENT(BIC_PKG__);
3958 BIC_PRESENT(BIC_RAM__);
3959 if (rapl_joules) {
3960 BIC_PRESENT(BIC_Pkg_J);
3961 BIC_PRESENT(BIC_Cor_J);
3962 BIC_PRESENT(BIC_RAM_J);
3963 } else {
3964 BIC_PRESENT(BIC_PkgWatt);
3965 BIC_PRESENT(BIC_CorWatt);
3966 BIC_PRESENT(BIC_RAMWatt);
3968 break;
3969 default:
3970 return;
3973 /* units on package 0, verify later other packages match */
3974 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
3975 return;
3977 rapl_power_units = 1.0 / (1 << (msr & 0xF));
3978 if (model == INTEL_FAM6_ATOM_SILVERMONT)
3979 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
3980 else
3981 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
3983 rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
3985 time_unit = msr >> 16 & 0xF;
3986 if (time_unit == 0)
3987 time_unit = 0xA;
3989 rapl_time_units = 1.0 / (1 << (time_unit));
3991 tdp = get_tdp_intel(model);
3993 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
3994 if (!quiet)
3995 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
3998 void rapl_probe_amd(unsigned int family, unsigned int model)
4000 unsigned long long msr;
4001 unsigned int eax, ebx, ecx, edx;
4002 unsigned int has_rapl = 0;
4003 double tdp;
4005 if (max_extended_level >= 0x80000007) {
4006 __cpuid(0x80000007, eax, ebx, ecx, edx);
4007 /* RAPL (Fam 17h) */
4008 has_rapl = edx & (1 << 14);
4011 if (!has_rapl)
4012 return;
4014 switch (family) {
4015 case 0x17: /* Zen, Zen+ */
4016 case 0x18: /* Hygon Dhyana */
4017 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
4018 if (rapl_joules) {
4019 BIC_PRESENT(BIC_Pkg_J);
4020 BIC_PRESENT(BIC_Cor_J);
4021 } else {
4022 BIC_PRESENT(BIC_PkgWatt);
4023 BIC_PRESENT(BIC_CorWatt);
4025 break;
4026 default:
4027 return;
4030 if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
4031 return;
4033 rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
4034 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
4035 rapl_power_units = ldexp(1.0, -(msr & 0xf));
4037 tdp = get_tdp_amd(family);
4039 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
4040 if (!quiet)
4041 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
4045 * rapl_probe()
4047 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
4049 void rapl_probe(unsigned int family, unsigned int model)
4051 if (genuine_intel)
4052 rapl_probe_intel(family, model);
4053 if (authentic_amd || hygon_genuine)
4054 rapl_probe_amd(family, model);
4057 void perf_limit_reasons_probe(unsigned int family, unsigned int model)
4059 if (!genuine_intel)
4060 return;
4062 if (family != 6)
4063 return;
4065 switch (model) {
4066 case INTEL_FAM6_HASWELL: /* HSW */
4067 case INTEL_FAM6_HASWELL_L: /* HSW */
4068 case INTEL_FAM6_HASWELL_G: /* HSW */
4069 do_gfx_perf_limit_reasons = 1;
4070 case INTEL_FAM6_HASWELL_X: /* HSX */
4071 do_core_perf_limit_reasons = 1;
4072 do_ring_perf_limit_reasons = 1;
4073 default:
4074 return;
4078 void automatic_cstate_conversion_probe(unsigned int family, unsigned int model)
4080 if (is_skx(family, model) || is_bdx(family, model))
4081 has_automatic_cstate_conversion = 1;
4084 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
4086 unsigned long long msr;
4087 unsigned int dts, dts2;
4088 int cpu;
4090 if (!(do_dts || do_ptm))
4091 return 0;
4093 cpu = t->cpu_id;
4095 /* DTS is per-core, no need to print for each thread */
4096 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
4097 return 0;
4099 if (cpu_migrate(cpu)) {
4100 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
4101 return -1;
4104 if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
4105 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
4106 return 0;
4108 dts = (msr >> 16) & 0x7F;
4109 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
4110 cpu, msr, tcc_activation_temp - dts);
4112 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
4113 return 0;
4115 dts = (msr >> 16) & 0x7F;
4116 dts2 = (msr >> 8) & 0x7F;
4117 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
4118 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
4122 if (do_dts && debug) {
4123 unsigned int resolution;
4125 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
4126 return 0;
4128 dts = (msr >> 16) & 0x7F;
4129 resolution = (msr >> 27) & 0xF;
4130 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
4131 cpu, msr, tcc_activation_temp - dts, resolution);
4133 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
4134 return 0;
4136 dts = (msr >> 16) & 0x7F;
4137 dts2 = (msr >> 8) & 0x7F;
4138 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
4139 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
4142 return 0;
4145 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
4147 fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
4148 cpu, label,
4149 ((msr >> 15) & 1) ? "EN" : "DIS",
4150 ((msr >> 0) & 0x7FFF) * rapl_power_units,
4151 (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
4152 (((msr >> 16) & 1) ? "EN" : "DIS"));
4154 return;
4157 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
4159 unsigned long long msr;
4160 const char *msr_name;
4161 int cpu;
4163 if (!do_rapl)
4164 return 0;
4166 /* RAPL counters are per package, so print only for 1st thread/package */
4167 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
4168 return 0;
4170 cpu = t->cpu_id;
4171 if (cpu_migrate(cpu)) {
4172 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
4173 return -1;
4176 if (do_rapl & RAPL_AMD_F17H) {
4177 msr_name = "MSR_RAPL_PWR_UNIT";
4178 if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
4179 return -1;
4180 } else {
4181 msr_name = "MSR_RAPL_POWER_UNIT";
4182 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
4183 return -1;
4186 fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
4187 rapl_power_units, rapl_energy_units, rapl_time_units);
4189 if (do_rapl & RAPL_PKG_POWER_INFO) {
4191 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
4192 return -5;
4195 fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
4196 cpu, msr,
4197 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
4198 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
4199 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
4200 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
4203 if (do_rapl & RAPL_PKG) {
4205 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
4206 return -9;
4208 fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
4209 cpu, msr, (msr >> 63) & 1 ? "" : "UN");
4211 print_power_limit_msr(cpu, msr, "PKG Limit #1");
4212 fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
4213 cpu,
4214 ((msr >> 47) & 1) ? "EN" : "DIS",
4215 ((msr >> 32) & 0x7FFF) * rapl_power_units,
4216 (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
4217 ((msr >> 48) & 1) ? "EN" : "DIS");
4220 if (do_rapl & RAPL_DRAM_POWER_INFO) {
4221 if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
4222 return -6;
4224 fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
4225 cpu, msr,
4226 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
4227 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
4228 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
4229 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
4231 if (do_rapl & RAPL_DRAM) {
4232 if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
4233 return -9;
4234 fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
4235 cpu, msr, (msr >> 31) & 1 ? "" : "UN");
4237 print_power_limit_msr(cpu, msr, "DRAM Limit");
4239 if (do_rapl & RAPL_CORE_POLICY) {
4240 if (get_msr(cpu, MSR_PP0_POLICY, &msr))
4241 return -7;
4243 fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
4245 if (do_rapl & RAPL_CORES_POWER_LIMIT) {
4246 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
4247 return -9;
4248 fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
4249 cpu, msr, (msr >> 31) & 1 ? "" : "UN");
4250 print_power_limit_msr(cpu, msr, "Cores Limit");
4252 if (do_rapl & RAPL_GFX) {
4253 if (get_msr(cpu, MSR_PP1_POLICY, &msr))
4254 return -8;
4256 fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
4258 if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
4259 return -9;
4260 fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
4261 cpu, msr, (msr >> 31) & 1 ? "" : "UN");
4262 print_power_limit_msr(cpu, msr, "GFX Limit");
4264 return 0;
4268 * SNB adds support for additional MSRs:
4270 * MSR_PKG_C7_RESIDENCY 0x000003fa
4271 * MSR_CORE_C7_RESIDENCY 0x000003fe
4272 * MSR_PKG_C2_RESIDENCY 0x0000060d
4275 int has_snb_msrs(unsigned int family, unsigned int model)
4277 if (!genuine_intel)
4278 return 0;
4280 switch (model) {
4281 case INTEL_FAM6_SANDYBRIDGE:
4282 case INTEL_FAM6_SANDYBRIDGE_X:
4283 case INTEL_FAM6_IVYBRIDGE: /* IVB */
4284 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
4285 case INTEL_FAM6_HASWELL: /* HSW */
4286 case INTEL_FAM6_HASWELL_X: /* HSW */
4287 case INTEL_FAM6_HASWELL_L: /* HSW */
4288 case INTEL_FAM6_HASWELL_G: /* HSW */
4289 case INTEL_FAM6_BROADWELL: /* BDW */
4290 case INTEL_FAM6_BROADWELL_G: /* BDW */
4291 case INTEL_FAM6_BROADWELL_X: /* BDX */
4292 case INTEL_FAM6_SKYLAKE_L: /* SKL */
4293 case INTEL_FAM6_CANNONLAKE_L: /* CNL */
4294 case INTEL_FAM6_SKYLAKE_X: /* SKX */
4295 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
4296 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4297 case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */
4298 return 1;
4300 return 0;
4304 * HSW ULT added support for C8/C9/C10 MSRs:
4306 * MSR_PKG_C8_RESIDENCY 0x00000630
4307 * MSR_PKG_C9_RESIDENCY 0x00000631
4308 * MSR_PKG_C10_RESIDENCY 0x00000632
4310 * MSR_PKGC8_IRTL 0x00000633
4311 * MSR_PKGC9_IRTL 0x00000634
4312 * MSR_PKGC10_IRTL 0x00000635
4315 int has_c8910_msrs(unsigned int family, unsigned int model)
4317 if (!genuine_intel)
4318 return 0;
4320 switch (model) {
4321 case INTEL_FAM6_HASWELL_L: /* HSW */
4322 case INTEL_FAM6_BROADWELL: /* BDW */
4323 case INTEL_FAM6_SKYLAKE_L: /* SKL */
4324 case INTEL_FAM6_CANNONLAKE_L: /* CNL */
4325 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
4326 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4327 return 1;
4329 return 0;
4333 * SKL adds support for additional MSRS:
4335 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
4336 * MSR_PKG_ANY_CORE_C0_RES 0x00000659
4337 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
4338 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
4340 int has_skl_msrs(unsigned int family, unsigned int model)
4342 if (!genuine_intel)
4343 return 0;
4345 switch (model) {
4346 case INTEL_FAM6_SKYLAKE_L: /* SKL */
4347 case INTEL_FAM6_CANNONLAKE_L: /* CNL */
4348 return 1;
4350 return 0;
4353 int is_slm(unsigned int family, unsigned int model)
4355 if (!genuine_intel)
4356 return 0;
4357 switch (model) {
4358 case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
4359 case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */
4360 return 1;
4362 return 0;
4365 int is_knl(unsigned int family, unsigned int model)
4367 if (!genuine_intel)
4368 return 0;
4369 switch (model) {
4370 case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
4371 return 1;
4373 return 0;
4376 int is_cnl(unsigned int family, unsigned int model)
4378 if (!genuine_intel)
4379 return 0;
4381 switch (model) {
4382 case INTEL_FAM6_CANNONLAKE_L: /* CNL */
4383 return 1;
4386 return 0;
4389 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
4391 if (is_knl(family, model))
4392 return 1024;
4393 return 1;
4396 #define SLM_BCLK_FREQS 5
4397 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
4399 double slm_bclk(void)
4401 unsigned long long msr = 3;
4402 unsigned int i;
4403 double freq;
4405 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
4406 fprintf(outf, "SLM BCLK: unknown\n");
4408 i = msr & 0xf;
4409 if (i >= SLM_BCLK_FREQS) {
4410 fprintf(outf, "SLM BCLK[%d] invalid\n", i);
4411 i = 3;
4413 freq = slm_freq_table[i];
4415 if (!quiet)
4416 fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq);
4418 return freq;
4421 double discover_bclk(unsigned int family, unsigned int model)
4423 if (has_snb_msrs(family, model) || is_knl(family, model))
4424 return 100.00;
4425 else if (is_slm(family, model))
4426 return slm_bclk();
4427 else
4428 return 133.33;
4432 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
4433 * the Thermal Control Circuit (TCC) activates.
4434 * This is usually equal to tjMax.
4436 * Older processors do not have this MSR, so there we guess,
4437 * but also allow cmdline over-ride with -T.
4439 * Several MSR temperature values are in units of degrees-C
4440 * below this value, including the Digital Thermal Sensor (DTS),
4441 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
4443 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
4445 unsigned long long msr;
4446 unsigned int target_c_local;
4447 int cpu;
4449 /* tcc_activation_temp is used only for dts or ptm */
4450 if (!(do_dts || do_ptm))
4451 return 0;
4453 /* this is a per-package concept */
4454 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
4455 return 0;
4457 cpu = t->cpu_id;
4458 if (cpu_migrate(cpu)) {
4459 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
4460 return -1;
4463 if (tcc_activation_temp_override != 0) {
4464 tcc_activation_temp = tcc_activation_temp_override;
4465 fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
4466 cpu, tcc_activation_temp);
4467 return 0;
4470 /* Temperature Target MSR is Nehalem and newer only */
4471 if (!do_nhm_platform_info)
4472 goto guess;
4474 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
4475 goto guess;
4477 target_c_local = (msr >> 16) & 0xFF;
4479 if (!quiet)
4480 fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
4481 cpu, msr, target_c_local);
4483 if (!target_c_local)
4484 goto guess;
4486 tcc_activation_temp = target_c_local;
4488 return 0;
4490 guess:
4491 tcc_activation_temp = TJMAX_DEFAULT;
4492 fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
4493 cpu, tcc_activation_temp);
4495 return 0;
4498 void decode_feature_control_msr(void)
4500 unsigned long long msr;
4502 if (!get_msr(base_cpu, MSR_IA32_FEAT_CTL, &msr))
4503 fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n",
4504 base_cpu, msr,
4505 msr & FEAT_CTL_LOCKED ? "" : "UN-",
4506 msr & (1 << 18) ? "SGX" : "");
4509 void decode_misc_enable_msr(void)
4511 unsigned long long msr;
4513 if (!genuine_intel)
4514 return;
4516 if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr))
4517 fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n",
4518 base_cpu, msr,
4519 msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-",
4520 msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-",
4521 msr & MSR_IA32_MISC_ENABLE_MWAIT ? "" : "No-",
4522 msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "",
4523 msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : "");
4526 void decode_misc_feature_control(void)
4528 unsigned long long msr;
4530 if (!has_misc_feature_control)
4531 return;
4533 if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr))
4534 fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n",
4535 base_cpu, msr,
4536 msr & (0 << 0) ? "No-" : "",
4537 msr & (1 << 0) ? "No-" : "",
4538 msr & (2 << 0) ? "No-" : "",
4539 msr & (3 << 0) ? "No-" : "");
4542 * Decode MSR_MISC_PWR_MGMT
4544 * Decode the bits according to the Nehalem documentation
4545 * bit[0] seems to continue to have same meaning going forward
4546 * bit[1] less so...
4548 void decode_misc_pwr_mgmt_msr(void)
4550 unsigned long long msr;
4552 if (!do_nhm_platform_info)
4553 return;
4555 if (no_MSR_MISC_PWR_MGMT)
4556 return;
4558 if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr))
4559 fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n",
4560 base_cpu, msr,
4561 msr & (1 << 0) ? "DIS" : "EN",
4562 msr & (1 << 1) ? "EN" : "DIS",
4563 msr & (1 << 8) ? "EN" : "DIS");
4566 * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG
4568 * This MSRs are present on Silvermont processors,
4569 * Intel Atom processor E3000 series (Baytrail), and friends.
4571 void decode_c6_demotion_policy_msr(void)
4573 unsigned long long msr;
4575 if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr))
4576 fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n",
4577 base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS");
4579 if (!get_msr(base_cpu, MSR_MC6_DEMOTION_POLICY_CONFIG, &msr))
4580 fprintf(outf, "cpu%d: MSR_MC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-MC6-Demotion)\n",
4581 base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS");
4585 * When models are the same, for the purpose of turbostat, reuse
4587 unsigned int intel_model_duplicates(unsigned int model)
4590 switch(model) {
4591 case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
4592 case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
4593 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
4594 case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */
4595 case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */
4596 return INTEL_FAM6_NEHALEM;
4598 case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
4599 case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
4600 return INTEL_FAM6_NEHALEM_EX;
4602 case INTEL_FAM6_XEON_PHI_KNM:
4603 return INTEL_FAM6_XEON_PHI_KNL;
4605 case INTEL_FAM6_BROADWELL_X:
4606 case INTEL_FAM6_BROADWELL_D: /* BDX-DE */
4607 return INTEL_FAM6_BROADWELL_X;
4609 case INTEL_FAM6_SKYLAKE_L:
4610 case INTEL_FAM6_SKYLAKE:
4611 case INTEL_FAM6_KABYLAKE_L:
4612 case INTEL_FAM6_KABYLAKE:
4613 return INTEL_FAM6_SKYLAKE_L;
4615 case INTEL_FAM6_ICELAKE_L:
4616 case INTEL_FAM6_ICELAKE_NNPI:
4617 return INTEL_FAM6_CANNONLAKE_L;
4619 case INTEL_FAM6_ATOM_TREMONT_D:
4620 return INTEL_FAM6_ATOM_GOLDMONT_D;
4622 return model;
4624 void process_cpuid()
4626 unsigned int eax, ebx, ecx, edx;
4627 unsigned int fms, family, model, stepping, ecx_flags, edx_flags;
4628 unsigned int has_turbo;
4630 eax = ebx = ecx = edx = 0;
4632 __cpuid(0, max_level, ebx, ecx, edx);
4634 if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
4635 genuine_intel = 1;
4636 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
4637 authentic_amd = 1;
4638 else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e)
4639 hygon_genuine = 1;
4641 if (!quiet)
4642 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
4643 (char *)&ebx, (char *)&edx, (char *)&ecx);
4645 __cpuid(1, fms, ebx, ecx, edx);
4646 family = (fms >> 8) & 0xf;
4647 model = (fms >> 4) & 0xf;
4648 stepping = fms & 0xf;
4649 if (family == 0xf)
4650 family += (fms >> 20) & 0xff;
4651 if (family >= 6)
4652 model += ((fms >> 16) & 0xf) << 4;
4653 ecx_flags = ecx;
4654 edx_flags = edx;
4657 * check max extended function levels of CPUID.
4658 * This is needed to check for invariant TSC.
4659 * This check is valid for both Intel and AMD.
4661 ebx = ecx = edx = 0;
4662 __cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
4664 if (!quiet) {
4665 fprintf(outf, "0x%x CPUID levels; 0x%x xlevels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
4666 max_level, max_extended_level, family, model, stepping, family, model, stepping);
4667 fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
4668 ecx_flags & (1 << 0) ? "SSE3" : "-",
4669 ecx_flags & (1 << 3) ? "MONITOR" : "-",
4670 ecx_flags & (1 << 6) ? "SMX" : "-",
4671 ecx_flags & (1 << 7) ? "EIST" : "-",
4672 ecx_flags & (1 << 8) ? "TM2" : "-",
4673 edx_flags & (1 << 4) ? "TSC" : "-",
4674 edx_flags & (1 << 5) ? "MSR" : "-",
4675 edx_flags & (1 << 22) ? "ACPI-TM" : "-",
4676 edx_flags & (1 << 28) ? "HT" : "-",
4677 edx_flags & (1 << 29) ? "TM" : "-");
4679 if (genuine_intel)
4680 model = intel_model_duplicates(model);
4682 if (!(edx_flags & (1 << 5)))
4683 errx(1, "CPUID: no MSR");
4685 if (max_extended_level >= 0x80000007) {
4688 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
4689 * this check is valid for both Intel and AMD
4691 __cpuid(0x80000007, eax, ebx, ecx, edx);
4692 has_invariant_tsc = edx & (1 << 8);
4696 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
4697 * this check is valid for both Intel and AMD
4700 __cpuid(0x6, eax, ebx, ecx, edx);
4701 has_aperf = ecx & (1 << 0);
4702 if (has_aperf) {
4703 BIC_PRESENT(BIC_Avg_MHz);
4704 BIC_PRESENT(BIC_Busy);
4705 BIC_PRESENT(BIC_Bzy_MHz);
4707 do_dts = eax & (1 << 0);
4708 if (do_dts)
4709 BIC_PRESENT(BIC_CoreTmp);
4710 has_turbo = eax & (1 << 1);
4711 do_ptm = eax & (1 << 6);
4712 if (do_ptm)
4713 BIC_PRESENT(BIC_PkgTmp);
4714 has_hwp = eax & (1 << 7);
4715 has_hwp_notify = eax & (1 << 8);
4716 has_hwp_activity_window = eax & (1 << 9);
4717 has_hwp_epp = eax & (1 << 10);
4718 has_hwp_pkg = eax & (1 << 11);
4719 has_epb = ecx & (1 << 3);
4721 if (!quiet)
4722 fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, "
4723 "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
4724 has_aperf ? "" : "No-",
4725 has_turbo ? "" : "No-",
4726 do_dts ? "" : "No-",
4727 do_ptm ? "" : "No-",
4728 has_hwp ? "" : "No-",
4729 has_hwp_notify ? "" : "No-",
4730 has_hwp_activity_window ? "" : "No-",
4731 has_hwp_epp ? "" : "No-",
4732 has_hwp_pkg ? "" : "No-",
4733 has_epb ? "" : "No-");
4735 if (!quiet)
4736 decode_misc_enable_msr();
4739 if (max_level >= 0x7 && !quiet) {
4740 int has_sgx;
4742 ecx = 0;
4744 __cpuid_count(0x7, 0, eax, ebx, ecx, edx);
4746 has_sgx = ebx & (1 << 2);
4747 fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-");
4749 if (has_sgx)
4750 decode_feature_control_msr();
4753 if (max_level >= 0x15) {
4754 unsigned int eax_crystal;
4755 unsigned int ebx_tsc;
4758 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
4760 eax_crystal = ebx_tsc = crystal_hz = edx = 0;
4761 __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx);
4763 if (ebx_tsc != 0) {
4765 if (!quiet && (ebx != 0))
4766 fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
4767 eax_crystal, ebx_tsc, crystal_hz);
4769 if (crystal_hz == 0)
4770 switch(model) {
4771 case INTEL_FAM6_SKYLAKE_L: /* SKL */
4772 crystal_hz = 24000000; /* 24.0 MHz */
4773 break;
4774 case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */
4775 crystal_hz = 25000000; /* 25.0 MHz */
4776 break;
4777 case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
4778 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4779 crystal_hz = 19200000; /* 19.2 MHz */
4780 break;
4781 default:
4782 crystal_hz = 0;
4785 if (crystal_hz) {
4786 tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
4787 if (!quiet)
4788 fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
4789 tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal);
4793 if (max_level >= 0x16) {
4794 unsigned int base_mhz, max_mhz, bus_mhz, edx;
4797 * CPUID 16H Base MHz, Max MHz, Bus MHz
4799 base_mhz = max_mhz = bus_mhz = edx = 0;
4801 __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx);
4802 if (!quiet)
4803 fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n",
4804 base_mhz, max_mhz, bus_mhz);
4807 if (has_aperf)
4808 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
4810 BIC_PRESENT(BIC_IRQ);
4811 BIC_PRESENT(BIC_TSC_MHz);
4813 if (probe_nhm_msrs(family, model)) {
4814 do_nhm_platform_info = 1;
4815 BIC_PRESENT(BIC_CPU_c1);
4816 BIC_PRESENT(BIC_CPU_c3);
4817 BIC_PRESENT(BIC_CPU_c6);
4818 BIC_PRESENT(BIC_SMI);
4820 do_snb_cstates = has_snb_msrs(family, model);
4822 if (do_snb_cstates)
4823 BIC_PRESENT(BIC_CPU_c7);
4825 do_irtl_snb = has_snb_msrs(family, model);
4826 if (do_snb_cstates && (pkg_cstate_limit >= PCL__2))
4827 BIC_PRESENT(BIC_Pkgpc2);
4828 if (pkg_cstate_limit >= PCL__3)
4829 BIC_PRESENT(BIC_Pkgpc3);
4830 if (pkg_cstate_limit >= PCL__6)
4831 BIC_PRESENT(BIC_Pkgpc6);
4832 if (do_snb_cstates && (pkg_cstate_limit >= PCL__7))
4833 BIC_PRESENT(BIC_Pkgpc7);
4834 if (has_slv_msrs(family, model)) {
4835 BIC_NOT_PRESENT(BIC_Pkgpc2);
4836 BIC_NOT_PRESENT(BIC_Pkgpc3);
4837 BIC_PRESENT(BIC_Pkgpc6);
4838 BIC_NOT_PRESENT(BIC_Pkgpc7);
4839 BIC_PRESENT(BIC_Mod_c6);
4840 use_c1_residency_msr = 1;
4842 if (is_dnv(family, model)) {
4843 BIC_PRESENT(BIC_CPU_c1);
4844 BIC_NOT_PRESENT(BIC_CPU_c3);
4845 BIC_NOT_PRESENT(BIC_Pkgpc3);
4846 BIC_NOT_PRESENT(BIC_CPU_c7);
4847 BIC_NOT_PRESENT(BIC_Pkgpc7);
4848 use_c1_residency_msr = 1;
4850 if (is_skx(family, model)) {
4851 BIC_NOT_PRESENT(BIC_CPU_c3);
4852 BIC_NOT_PRESENT(BIC_Pkgpc3);
4853 BIC_NOT_PRESENT(BIC_CPU_c7);
4854 BIC_NOT_PRESENT(BIC_Pkgpc7);
4856 if (is_bdx(family, model)) {
4857 BIC_NOT_PRESENT(BIC_CPU_c7);
4858 BIC_NOT_PRESENT(BIC_Pkgpc7);
4860 if (has_c8910_msrs(family, model)) {
4861 BIC_PRESENT(BIC_Pkgpc8);
4862 BIC_PRESENT(BIC_Pkgpc9);
4863 BIC_PRESENT(BIC_Pkgpc10);
4865 do_irtl_hsw = has_c8910_msrs(family, model);
4866 if (has_skl_msrs(family, model)) {
4867 BIC_PRESENT(BIC_Totl_c0);
4868 BIC_PRESENT(BIC_Any_c0);
4869 BIC_PRESENT(BIC_GFX_c0);
4870 BIC_PRESENT(BIC_CPUGFX);
4872 do_slm_cstates = is_slm(family, model);
4873 do_knl_cstates = is_knl(family, model);
4875 if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
4876 BIC_NOT_PRESENT(BIC_CPU_c3);
4878 if (!quiet)
4879 decode_misc_pwr_mgmt_msr();
4881 if (!quiet && has_slv_msrs(family, model))
4882 decode_c6_demotion_policy_msr();
4884 rapl_probe(family, model);
4885 perf_limit_reasons_probe(family, model);
4886 automatic_cstate_conversion_probe(family, model);
4888 if (!quiet)
4889 dump_cstate_pstate_config_info(family, model);
4891 if (!quiet)
4892 dump_sysfs_cstate_config();
4893 if (!quiet)
4894 dump_sysfs_pstate_config();
4896 if (has_skl_msrs(family, model))
4897 calculate_tsc_tweak();
4899 if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK))
4900 BIC_PRESENT(BIC_GFX_rc6);
4902 if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK))
4903 BIC_PRESENT(BIC_GFXMHz);
4905 if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK))
4906 BIC_PRESENT(BIC_CPU_LPI);
4907 else
4908 BIC_NOT_PRESENT(BIC_CPU_LPI);
4910 if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK))
4911 BIC_PRESENT(BIC_SYS_LPI);
4912 else
4913 BIC_NOT_PRESENT(BIC_SYS_LPI);
4915 if (!quiet)
4916 decode_misc_feature_control();
4918 return;
4922 * in /dev/cpu/ return success for names that are numbers
4923 * ie. filter out ".", "..", "microcode".
4925 int dir_filter(const struct dirent *dirp)
4927 if (isdigit(dirp->d_name[0]))
4928 return 1;
4929 else
4930 return 0;
4933 int open_dev_cpu_msr(int dummy1)
4935 return 0;
4938 void topology_probe()
4940 int i;
4941 int max_core_id = 0;
4942 int max_package_id = 0;
4943 int max_die_id = 0;
4944 int max_siblings = 0;
4946 /* Initialize num_cpus, max_cpu_num */
4947 set_max_cpu_num();
4948 topo.num_cpus = 0;
4949 for_all_proc_cpus(count_cpus);
4950 if (!summary_only && topo.num_cpus > 1)
4951 BIC_PRESENT(BIC_CPU);
4953 if (debug > 1)
4954 fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
4956 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
4957 if (cpus == NULL)
4958 err(1, "calloc cpus");
4961 * Allocate and initialize cpu_present_set
4963 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
4964 if (cpu_present_set == NULL)
4965 err(3, "CPU_ALLOC");
4966 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
4967 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
4968 for_all_proc_cpus(mark_cpu_present);
4971 * Validate that all cpus in cpu_subset are also in cpu_present_set
4973 for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) {
4974 if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset))
4975 if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set))
4976 err(1, "cpu%d not present", i);
4980 * Allocate and initialize cpu_affinity_set
4982 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
4983 if (cpu_affinity_set == NULL)
4984 err(3, "CPU_ALLOC");
4985 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
4986 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
4988 for_all_proc_cpus(init_thread_id);
4991 * For online cpus
4992 * find max_core_id, max_package_id
4994 for (i = 0; i <= topo.max_cpu_num; ++i) {
4995 int siblings;
4997 if (cpu_is_not_present(i)) {
4998 if (debug > 1)
4999 fprintf(outf, "cpu%d NOT PRESENT\n", i);
5000 continue;
5003 cpus[i].logical_cpu_id = i;
5005 /* get package information */
5006 cpus[i].physical_package_id = get_physical_package_id(i);
5007 if (cpus[i].physical_package_id > max_package_id)
5008 max_package_id = cpus[i].physical_package_id;
5010 /* get die information */
5011 cpus[i].die_id = get_die_id(i);
5012 if (cpus[i].die_id > max_die_id)
5013 max_die_id = cpus[i].die_id;
5015 /* get numa node information */
5016 cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
5017 if (cpus[i].physical_node_id > topo.max_node_num)
5018 topo.max_node_num = cpus[i].physical_node_id;
5020 /* get core information */
5021 cpus[i].physical_core_id = get_core_id(i);
5022 if (cpus[i].physical_core_id > max_core_id)
5023 max_core_id = cpus[i].physical_core_id;
5025 /* get thread information */
5026 siblings = get_thread_siblings(&cpus[i]);
5027 if (siblings > max_siblings)
5028 max_siblings = siblings;
5029 if (cpus[i].thread_id == 0)
5030 topo.num_cores++;
5033 topo.cores_per_node = max_core_id + 1;
5034 if (debug > 1)
5035 fprintf(outf, "max_core_id %d, sizing for %d cores per package\n",
5036 max_core_id, topo.cores_per_node);
5037 if (!summary_only && topo.cores_per_node > 1)
5038 BIC_PRESENT(BIC_Core);
5040 topo.num_die = max_die_id + 1;
5041 if (debug > 1)
5042 fprintf(outf, "max_die_id %d, sizing for %d die\n",
5043 max_die_id, topo.num_die);
5044 if (!summary_only && topo.num_die > 1)
5045 BIC_PRESENT(BIC_Die);
5047 topo.num_packages = max_package_id + 1;
5048 if (debug > 1)
5049 fprintf(outf, "max_package_id %d, sizing for %d packages\n",
5050 max_package_id, topo.num_packages);
5051 if (!summary_only && topo.num_packages > 1)
5052 BIC_PRESENT(BIC_Package);
5054 set_node_data();
5055 if (debug > 1)
5056 fprintf(outf, "nodes_per_pkg %d\n", topo.nodes_per_pkg);
5057 if (!summary_only && topo.nodes_per_pkg > 1)
5058 BIC_PRESENT(BIC_Node);
5060 topo.threads_per_core = max_siblings;
5061 if (debug > 1)
5062 fprintf(outf, "max_siblings %d\n", max_siblings);
5064 if (debug < 1)
5065 return;
5067 for (i = 0; i <= topo.max_cpu_num; ++i) {
5068 if (cpu_is_not_present(i))
5069 continue;
5070 fprintf(outf,
5071 "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
5072 i, cpus[i].physical_package_id, cpus[i].die_id,
5073 cpus[i].physical_node_id,
5074 cpus[i].logical_node_id,
5075 cpus[i].physical_core_id,
5076 cpus[i].thread_id);
5081 void
5082 allocate_counters(struct thread_data **t, struct core_data **c,
5083 struct pkg_data **p)
5085 int i;
5086 int num_cores = topo.cores_per_node * topo.nodes_per_pkg *
5087 topo.num_packages;
5088 int num_threads = topo.threads_per_core * num_cores;
5090 *t = calloc(num_threads, sizeof(struct thread_data));
5091 if (*t == NULL)
5092 goto error;
5094 for (i = 0; i < num_threads; i++)
5095 (*t)[i].cpu_id = -1;
5097 *c = calloc(num_cores, sizeof(struct core_data));
5098 if (*c == NULL)
5099 goto error;
5101 for (i = 0; i < num_cores; i++)
5102 (*c)[i].core_id = -1;
5104 *p = calloc(topo.num_packages, sizeof(struct pkg_data));
5105 if (*p == NULL)
5106 goto error;
5108 for (i = 0; i < topo.num_packages; i++)
5109 (*p)[i].package_id = i;
5111 return;
5112 error:
5113 err(1, "calloc counters");
5116 * init_counter()
5118 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
5120 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
5121 struct pkg_data *pkg_base, int cpu_id)
5123 int pkg_id = cpus[cpu_id].physical_package_id;
5124 int node_id = cpus[cpu_id].logical_node_id;
5125 int core_id = cpus[cpu_id].physical_core_id;
5126 int thread_id = cpus[cpu_id].thread_id;
5127 struct thread_data *t;
5128 struct core_data *c;
5129 struct pkg_data *p;
5132 /* Workaround for systems where physical_node_id==-1
5133 * and logical_node_id==(-1 - topo.num_cpus)
5135 if (node_id < 0)
5136 node_id = 0;
5138 t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
5139 c = GET_CORE(core_base, core_id, node_id, pkg_id);
5140 p = GET_PKG(pkg_base, pkg_id);
5142 t->cpu_id = cpu_id;
5143 if (thread_id == 0) {
5144 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
5145 if (cpu_is_first_core_in_package(cpu_id))
5146 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
5149 c->core_id = core_id;
5150 p->package_id = pkg_id;
5154 int initialize_counters(int cpu_id)
5156 init_counter(EVEN_COUNTERS, cpu_id);
5157 init_counter(ODD_COUNTERS, cpu_id);
5158 return 0;
5161 void allocate_output_buffer()
5163 output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
5164 outp = output_buffer;
5165 if (outp == NULL)
5166 err(-1, "calloc output buffer");
5168 void allocate_fd_percpu(void)
5170 fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
5171 if (fd_percpu == NULL)
5172 err(-1, "calloc fd_percpu");
5174 void allocate_irq_buffers(void)
5176 irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int));
5177 if (irq_column_2_cpu == NULL)
5178 err(-1, "calloc %d", topo.num_cpus);
5180 irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int));
5181 if (irqs_per_cpu == NULL)
5182 err(-1, "calloc %d", topo.max_cpu_num + 1);
5184 void setup_all_buffers(void)
5186 topology_probe();
5187 allocate_irq_buffers();
5188 allocate_fd_percpu();
5189 allocate_counters(&thread_even, &core_even, &package_even);
5190 allocate_counters(&thread_odd, &core_odd, &package_odd);
5191 allocate_output_buffer();
5192 for_all_proc_cpus(initialize_counters);
5195 void set_base_cpu(void)
5197 base_cpu = sched_getcpu();
5198 if (base_cpu < 0)
5199 err(-ENODEV, "No valid cpus found");
5201 if (debug > 1)
5202 fprintf(outf, "base_cpu = %d\n", base_cpu);
5205 void turbostat_init()
5207 setup_all_buffers();
5208 set_base_cpu();
5209 check_dev_msr();
5210 check_permissions();
5211 process_cpuid();
5214 if (!quiet)
5215 for_all_cpus(print_hwp, ODD_COUNTERS);
5217 if (!quiet)
5218 for_all_cpus(print_epb, ODD_COUNTERS);
5220 if (!quiet)
5221 for_all_cpus(print_perf_limit, ODD_COUNTERS);
5223 if (!quiet)
5224 for_all_cpus(print_rapl, ODD_COUNTERS);
5226 for_all_cpus(set_temperature_target, ODD_COUNTERS);
5228 if (!quiet)
5229 for_all_cpus(print_thermal, ODD_COUNTERS);
5231 if (!quiet && do_irtl_snb)
5232 print_irtl();
5235 int fork_it(char **argv)
5237 pid_t child_pid;
5238 int status;
5240 snapshot_proc_sysfs_files();
5241 status = for_all_cpus(get_counters, EVEN_COUNTERS);
5242 first_counter_read = 0;
5243 if (status)
5244 exit(status);
5245 /* clear affinity side-effect of get_counters() */
5246 sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
5247 gettimeofday(&tv_even, (struct timezone *)NULL);
5249 child_pid = fork();
5250 if (!child_pid) {
5251 /* child */
5252 execvp(argv[0], argv);
5253 err(errno, "exec %s", argv[0]);
5254 } else {
5256 /* parent */
5257 if (child_pid == -1)
5258 err(1, "fork");
5260 signal(SIGINT, SIG_IGN);
5261 signal(SIGQUIT, SIG_IGN);
5262 if (waitpid(child_pid, &status, 0) == -1)
5263 err(status, "waitpid");
5265 if (WIFEXITED(status))
5266 status = WEXITSTATUS(status);
5269 * n.b. fork_it() does not check for errors from for_all_cpus()
5270 * because re-starting is problematic when forking
5272 snapshot_proc_sysfs_files();
5273 for_all_cpus(get_counters, ODD_COUNTERS);
5274 gettimeofday(&tv_odd, (struct timezone *)NULL);
5275 timersub(&tv_odd, &tv_even, &tv_delta);
5276 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS))
5277 fprintf(outf, "%s: Counter reset detected\n", progname);
5278 else {
5279 compute_average(EVEN_COUNTERS);
5280 format_all_counters(EVEN_COUNTERS);
5283 fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
5285 flush_output_stderr();
5287 return status;
5290 int get_and_dump_counters(void)
5292 int status;
5294 snapshot_proc_sysfs_files();
5295 status = for_all_cpus(get_counters, ODD_COUNTERS);
5296 if (status)
5297 return status;
5299 status = for_all_cpus(dump_counters, ODD_COUNTERS);
5300 if (status)
5301 return status;
5303 flush_output_stdout();
5305 return status;
5308 void print_version() {
5309 fprintf(outf, "turbostat version 19.08.31"
5310 " - Len Brown <lenb@kernel.org>\n");
5313 int add_counter(unsigned int msr_num, char *path, char *name,
5314 unsigned int width, enum counter_scope scope,
5315 enum counter_type type, enum counter_format format, int flags)
5317 struct msr_counter *msrp;
5319 msrp = calloc(1, sizeof(struct msr_counter));
5320 if (msrp == NULL) {
5321 perror("calloc");
5322 exit(1);
5325 msrp->msr_num = msr_num;
5326 strncpy(msrp->name, name, NAME_BYTES);
5327 if (path)
5328 strncpy(msrp->path, path, PATH_BYTES);
5329 msrp->width = width;
5330 msrp->type = type;
5331 msrp->format = format;
5332 msrp->flags = flags;
5334 switch (scope) {
5336 case SCOPE_CPU:
5337 msrp->next = sys.tp;
5338 sys.tp = msrp;
5339 sys.added_thread_counters++;
5340 if (sys.added_thread_counters > MAX_ADDED_THREAD_COUNTERS) {
5341 fprintf(stderr, "exceeded max %d added thread counters\n",
5342 MAX_ADDED_COUNTERS);
5343 exit(-1);
5345 break;
5347 case SCOPE_CORE:
5348 msrp->next = sys.cp;
5349 sys.cp = msrp;
5350 sys.added_core_counters++;
5351 if (sys.added_core_counters > MAX_ADDED_COUNTERS) {
5352 fprintf(stderr, "exceeded max %d added core counters\n",
5353 MAX_ADDED_COUNTERS);
5354 exit(-1);
5356 break;
5358 case SCOPE_PACKAGE:
5359 msrp->next = sys.pp;
5360 sys.pp = msrp;
5361 sys.added_package_counters++;
5362 if (sys.added_package_counters > MAX_ADDED_COUNTERS) {
5363 fprintf(stderr, "exceeded max %d added package counters\n",
5364 MAX_ADDED_COUNTERS);
5365 exit(-1);
5367 break;
5370 return 0;
5373 void parse_add_command(char *add_command)
5375 int msr_num = 0;
5376 char *path = NULL;
5377 char name_buffer[NAME_BYTES] = "";
5378 int width = 64;
5379 int fail = 0;
5380 enum counter_scope scope = SCOPE_CPU;
5381 enum counter_type type = COUNTER_CYCLES;
5382 enum counter_format format = FORMAT_DELTA;
5384 while (add_command) {
5386 if (sscanf(add_command, "msr0x%x", &msr_num) == 1)
5387 goto next;
5389 if (sscanf(add_command, "msr%d", &msr_num) == 1)
5390 goto next;
5392 if (*add_command == '/') {
5393 path = add_command;
5394 goto next;
5397 if (sscanf(add_command, "u%d", &width) == 1) {
5398 if ((width == 32) || (width == 64))
5399 goto next;
5400 width = 64;
5402 if (!strncmp(add_command, "cpu", strlen("cpu"))) {
5403 scope = SCOPE_CPU;
5404 goto next;
5406 if (!strncmp(add_command, "core", strlen("core"))) {
5407 scope = SCOPE_CORE;
5408 goto next;
5410 if (!strncmp(add_command, "package", strlen("package"))) {
5411 scope = SCOPE_PACKAGE;
5412 goto next;
5414 if (!strncmp(add_command, "cycles", strlen("cycles"))) {
5415 type = COUNTER_CYCLES;
5416 goto next;
5418 if (!strncmp(add_command, "seconds", strlen("seconds"))) {
5419 type = COUNTER_SECONDS;
5420 goto next;
5422 if (!strncmp(add_command, "usec", strlen("usec"))) {
5423 type = COUNTER_USEC;
5424 goto next;
5426 if (!strncmp(add_command, "raw", strlen("raw"))) {
5427 format = FORMAT_RAW;
5428 goto next;
5430 if (!strncmp(add_command, "delta", strlen("delta"))) {
5431 format = FORMAT_DELTA;
5432 goto next;
5434 if (!strncmp(add_command, "percent", strlen("percent"))) {
5435 format = FORMAT_PERCENT;
5436 goto next;
5439 if (sscanf(add_command, "%18s,%*s", name_buffer) == 1) { /* 18 < NAME_BYTES */
5440 char *eos;
5442 eos = strchr(name_buffer, ',');
5443 if (eos)
5444 *eos = '\0';
5445 goto next;
5448 next:
5449 add_command = strchr(add_command, ',');
5450 if (add_command) {
5451 *add_command = '\0';
5452 add_command++;
5456 if ((msr_num == 0) && (path == NULL)) {
5457 fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter ) required\n");
5458 fail++;
5461 /* generate default column header */
5462 if (*name_buffer == '\0') {
5463 if (width == 32)
5464 sprintf(name_buffer, "M0x%x%s", msr_num, format == FORMAT_PERCENT ? "%" : "");
5465 else
5466 sprintf(name_buffer, "M0X%x%s", msr_num, format == FORMAT_PERCENT ? "%" : "");
5469 if (add_counter(msr_num, path, name_buffer, width, scope, type, format, 0))
5470 fail++;
5472 if (fail) {
5473 help();
5474 exit(1);
5478 int is_deferred_skip(char *name)
5480 int i;
5482 for (i = 0; i < deferred_skip_index; ++i)
5483 if (!strcmp(name, deferred_skip_names[i]))
5484 return 1;
5485 return 0;
5488 void probe_sysfs(void)
5490 char path[64];
5491 char name_buf[16];
5492 FILE *input;
5493 int state;
5494 char *sp;
5496 if (!DO_BIC(BIC_sysfs))
5497 return;
5499 for (state = 10; state >= 0; --state) {
5501 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
5502 base_cpu, state);
5503 input = fopen(path, "r");
5504 if (input == NULL)
5505 continue;
5506 if (!fgets(name_buf, sizeof(name_buf), input))
5507 err(1, "%s: failed to read file", path);
5509 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
5510 sp = strchr(name_buf, '-');
5511 if (!sp)
5512 sp = strchrnul(name_buf, '\n');
5513 *sp = '%';
5514 *(sp + 1) = '\0';
5516 fclose(input);
5518 sprintf(path, "cpuidle/state%d/time", state);
5520 if (is_deferred_skip(name_buf))
5521 continue;
5523 add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC,
5524 FORMAT_PERCENT, SYSFS_PERCPU);
5527 for (state = 10; state >= 0; --state) {
5529 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
5530 base_cpu, state);
5531 input = fopen(path, "r");
5532 if (input == NULL)
5533 continue;
5534 if (!fgets(name_buf, sizeof(name_buf), input))
5535 err(1, "%s: failed to read file", path);
5536 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
5537 sp = strchr(name_buf, '-');
5538 if (!sp)
5539 sp = strchrnul(name_buf, '\n');
5540 *sp = '\0';
5541 fclose(input);
5543 sprintf(path, "cpuidle/state%d/usage", state);
5545 if (is_deferred_skip(name_buf))
5546 continue;
5548 add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS,
5549 FORMAT_DELTA, SYSFS_PERCPU);
5556 * parse cpuset with following syntax
5557 * 1,2,4..6,8-10 and set bits in cpu_subset
5559 void parse_cpu_command(char *optarg)
5561 unsigned int start, end;
5562 char *next;
5564 if (!strcmp(optarg, "core")) {
5565 if (cpu_subset)
5566 goto error;
5567 show_core_only++;
5568 return;
5570 if (!strcmp(optarg, "package")) {
5571 if (cpu_subset)
5572 goto error;
5573 show_pkg_only++;
5574 return;
5576 if (show_core_only || show_pkg_only)
5577 goto error;
5579 cpu_subset = CPU_ALLOC(CPU_SUBSET_MAXCPUS);
5580 if (cpu_subset == NULL)
5581 err(3, "CPU_ALLOC");
5582 cpu_subset_size = CPU_ALLOC_SIZE(CPU_SUBSET_MAXCPUS);
5584 CPU_ZERO_S(cpu_subset_size, cpu_subset);
5586 next = optarg;
5588 while (next && *next) {
5590 if (*next == '-') /* no negative cpu numbers */
5591 goto error;
5593 start = strtoul(next, &next, 10);
5595 if (start >= CPU_SUBSET_MAXCPUS)
5596 goto error;
5597 CPU_SET_S(start, cpu_subset_size, cpu_subset);
5599 if (*next == '\0')
5600 break;
5602 if (*next == ',') {
5603 next += 1;
5604 continue;
5607 if (*next == '-') {
5608 next += 1; /* start range */
5609 } else if (*next == '.') {
5610 next += 1;
5611 if (*next == '.')
5612 next += 1; /* start range */
5613 else
5614 goto error;
5617 end = strtoul(next, &next, 10);
5618 if (end <= start)
5619 goto error;
5621 while (++start <= end) {
5622 if (start >= CPU_SUBSET_MAXCPUS)
5623 goto error;
5624 CPU_SET_S(start, cpu_subset_size, cpu_subset);
5627 if (*next == ',')
5628 next += 1;
5629 else if (*next != '\0')
5630 goto error;
5633 return;
5635 error:
5636 fprintf(stderr, "\"--cpu %s\" malformed\n", optarg);
5637 help();
5638 exit(-1);
5642 void cmdline(int argc, char **argv)
5644 int opt;
5645 int option_index = 0;
5646 static struct option long_options[] = {
5647 {"add", required_argument, 0, 'a'},
5648 {"cpu", required_argument, 0, 'c'},
5649 {"Dump", no_argument, 0, 'D'},
5650 {"debug", no_argument, 0, 'd'}, /* internal, not documented */
5651 {"enable", required_argument, 0, 'e'},
5652 {"interval", required_argument, 0, 'i'},
5653 {"num_iterations", required_argument, 0, 'n'},
5654 {"help", no_argument, 0, 'h'},
5655 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help
5656 {"Joules", no_argument, 0, 'J'},
5657 {"list", no_argument, 0, 'l'},
5658 {"out", required_argument, 0, 'o'},
5659 {"quiet", no_argument, 0, 'q'},
5660 {"show", required_argument, 0, 's'},
5661 {"Summary", no_argument, 0, 'S'},
5662 {"TCC", required_argument, 0, 'T'},
5663 {"version", no_argument, 0, 'v' },
5664 {0, 0, 0, 0 }
5667 progname = argv[0];
5669 while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qST:v",
5670 long_options, &option_index)) != -1) {
5671 switch (opt) {
5672 case 'a':
5673 parse_add_command(optarg);
5674 break;
5675 case 'c':
5676 parse_cpu_command(optarg);
5677 break;
5678 case 'D':
5679 dump_only++;
5680 break;
5681 case 'e':
5682 /* --enable specified counter */
5683 bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
5684 break;
5685 case 'd':
5686 debug++;
5687 ENABLE_BIC(BIC_DISABLED_BY_DEFAULT);
5688 break;
5689 case 'H':
5691 * --hide: do not show those specified
5692 * multiple invocations simply clear more bits in enabled mask
5694 bic_enabled &= ~bic_lookup(optarg, HIDE_LIST);
5695 break;
5696 case 'h':
5697 default:
5698 help();
5699 exit(1);
5700 case 'i':
5702 double interval = strtod(optarg, NULL);
5704 if (interval < 0.001) {
5705 fprintf(outf, "interval %f seconds is too small\n",
5706 interval);
5707 exit(2);
5710 interval_tv.tv_sec = interval_ts.tv_sec = interval;
5711 interval_tv.tv_usec = (interval - interval_tv.tv_sec) * 1000000;
5712 interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000;
5714 break;
5715 case 'J':
5716 rapl_joules++;
5717 break;
5718 case 'l':
5719 ENABLE_BIC(BIC_DISABLED_BY_DEFAULT);
5720 list_header_only++;
5721 quiet++;
5722 break;
5723 case 'o':
5724 outf = fopen_or_die(optarg, "w");
5725 break;
5726 case 'q':
5727 quiet = 1;
5728 break;
5729 case 'n':
5730 num_iterations = strtod(optarg, NULL);
5732 if (num_iterations <= 0) {
5733 fprintf(outf, "iterations %d should be positive number\n",
5734 num_iterations);
5735 exit(2);
5737 break;
5738 case 's':
5740 * --show: show only those specified
5741 * The 1st invocation will clear and replace the enabled mask
5742 * subsequent invocations can add to it.
5744 if (shown == 0)
5745 bic_enabled = bic_lookup(optarg, SHOW_LIST);
5746 else
5747 bic_enabled |= bic_lookup(optarg, SHOW_LIST);
5748 shown = 1;
5749 break;
5750 case 'S':
5751 summary_only++;
5752 break;
5753 case 'T':
5754 tcc_activation_temp_override = atoi(optarg);
5755 break;
5756 case 'v':
5757 print_version();
5758 exit(0);
5759 break;
5764 int main(int argc, char **argv)
5766 outf = stderr;
5767 cmdline(argc, argv);
5769 if (!quiet)
5770 print_version();
5772 probe_sysfs();
5774 turbostat_init();
5776 /* dump counters and exit */
5777 if (dump_only)
5778 return get_and_dump_counters();
5780 /* list header and exit */
5781 if (list_header_only) {
5782 print_header(",");
5783 flush_output_stdout();
5784 return 0;
5788 * if any params left, it must be a command to fork
5790 if (argc - optind)
5791 return fork_it(argv + optind);
5792 else
5793 turbostat_loop();
5795 return 0;