Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / tools / power / x86 / turbostat / turbostat.c
blob310d3dd5e547023ea375f933cb2ad7ca61aee081
1 /*
2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
5 * Copyright (c) 2010, Intel Corporation.
6 * Len Brown <len.brown@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <stdio.h>
23 #include <unistd.h>
24 #include <sys/types.h>
25 #include <sys/wait.h>
26 #include <sys/stat.h>
27 #include <sys/resource.h>
28 #include <fcntl.h>
29 #include <signal.h>
30 #include <sys/time.h>
31 #include <stdlib.h>
32 #include <dirent.h>
33 #include <string.h>
34 #include <ctype.h>
36 #define MSR_TSC 0x10
37 #define MSR_NEHALEM_PLATFORM_INFO 0xCE
38 #define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1AD
39 #define MSR_APERF 0xE8
40 #define MSR_MPERF 0xE7
41 #define MSR_PKG_C2_RESIDENCY 0x60D /* SNB only */
42 #define MSR_PKG_C3_RESIDENCY 0x3F8
43 #define MSR_PKG_C6_RESIDENCY 0x3F9
44 #define MSR_PKG_C7_RESIDENCY 0x3FA /* SNB only */
45 #define MSR_CORE_C3_RESIDENCY 0x3FC
46 #define MSR_CORE_C6_RESIDENCY 0x3FD
47 #define MSR_CORE_C7_RESIDENCY 0x3FE /* SNB only */
49 char *proc_stat = "/proc/stat";
50 unsigned int interval_sec = 5; /* set with -i interval_sec */
51 unsigned int verbose; /* set with -v */
52 unsigned int skip_c0;
53 unsigned int skip_c1;
54 unsigned int do_nhm_cstates;
55 unsigned int do_snb_cstates;
56 unsigned int has_aperf;
57 unsigned int units = 1000000000; /* Ghz etc */
58 unsigned int genuine_intel;
59 unsigned int has_invariant_tsc;
60 unsigned int do_nehalem_platform_info;
61 unsigned int do_nehalem_turbo_ratio_limit;
62 unsigned int extra_msr_offset;
63 double bclk;
64 unsigned int show_pkg;
65 unsigned int show_core;
66 unsigned int show_cpu;
68 int aperf_mperf_unstable;
69 int backwards_count;
70 char *progname;
71 int need_reinitialize;
73 int num_cpus;
75 struct counters {
76 unsigned long long tsc; /* per thread */
77 unsigned long long aperf; /* per thread */
78 unsigned long long mperf; /* per thread */
79 unsigned long long c1; /* per thread (calculated) */
80 unsigned long long c3; /* per core */
81 unsigned long long c6; /* per core */
82 unsigned long long c7; /* per core */
83 unsigned long long pc2; /* per package */
84 unsigned long long pc3; /* per package */
85 unsigned long long pc6; /* per package */
86 unsigned long long pc7; /* per package */
87 unsigned long long extra_msr; /* per thread */
88 int pkg;
89 int core;
90 int cpu;
91 struct counters *next;
94 struct counters *cnt_even;
95 struct counters *cnt_odd;
96 struct counters *cnt_delta;
97 struct counters *cnt_average;
98 struct timeval tv_even;
99 struct timeval tv_odd;
100 struct timeval tv_delta;
102 unsigned long long get_msr(int cpu, off_t offset)
104 ssize_t retval;
105 unsigned long long msr;
106 char pathname[32];
107 int fd;
109 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
110 fd = open(pathname, O_RDONLY);
111 if (fd < 0) {
112 perror(pathname);
113 need_reinitialize = 1;
114 return 0;
117 retval = pread(fd, &msr, sizeof msr, offset);
118 if (retval != sizeof msr) {
119 fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
120 cpu, offset, retval);
121 exit(-2);
124 close(fd);
125 return msr;
128 void print_header(void)
130 if (show_pkg)
131 fprintf(stderr, "pk");
132 if (show_core)
133 fprintf(stderr, " cr");
134 if (show_cpu)
135 fprintf(stderr, " CPU");
136 if (do_nhm_cstates)
137 fprintf(stderr, " %%c0 ");
138 if (has_aperf)
139 fprintf(stderr, " GHz");
140 fprintf(stderr, " TSC");
141 if (do_nhm_cstates)
142 fprintf(stderr, " %%c1");
143 if (do_nhm_cstates)
144 fprintf(stderr, " %%c3");
145 if (do_nhm_cstates)
146 fprintf(stderr, " %%c6");
147 if (do_snb_cstates)
148 fprintf(stderr, " %%c7");
149 if (do_snb_cstates)
150 fprintf(stderr, " %%pc2");
151 if (do_nhm_cstates)
152 fprintf(stderr, " %%pc3");
153 if (do_nhm_cstates)
154 fprintf(stderr, " %%pc6");
155 if (do_snb_cstates)
156 fprintf(stderr, " %%pc7");
157 if (extra_msr_offset)
158 fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
160 putc('\n', stderr);
163 void dump_cnt(struct counters *cnt)
165 if (!cnt)
166 return;
167 if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
168 if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
169 if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
170 if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
171 if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
172 if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
173 if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
174 if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
175 if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
176 if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
177 if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
178 if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
179 if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
182 void dump_list(struct counters *cnt)
184 printf("dump_list 0x%p\n", cnt);
186 for (; cnt; cnt = cnt->next)
187 dump_cnt(cnt);
190 void print_cnt(struct counters *p)
192 double interval_float;
194 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
196 /* topology columns, print blanks on 1st (average) line */
197 if (p == cnt_average) {
198 if (show_pkg)
199 fprintf(stderr, " ");
200 if (show_core)
201 fprintf(stderr, " ");
202 if (show_cpu)
203 fprintf(stderr, " ");
204 } else {
205 if (show_pkg)
206 fprintf(stderr, "%d", p->pkg);
207 if (show_core)
208 fprintf(stderr, "%4d", p->core);
209 if (show_cpu)
210 fprintf(stderr, "%4d", p->cpu);
213 /* %c0 */
214 if (do_nhm_cstates) {
215 if (!skip_c0)
216 fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc);
217 else
218 fprintf(stderr, " ****");
221 /* GHz */
222 if (has_aperf) {
223 if (!aperf_mperf_unstable) {
224 fprintf(stderr, "%5.2f",
225 1.0 * p->tsc / units * p->aperf /
226 p->mperf / interval_float);
227 } else {
228 if (p->aperf > p->tsc || p->mperf > p->tsc) {
229 fprintf(stderr, " ****");
230 } else {
231 fprintf(stderr, "%4.1f*",
232 1.0 * p->tsc /
233 units * p->aperf /
234 p->mperf / interval_float);
239 /* TSC */
240 fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
242 if (do_nhm_cstates) {
243 if (!skip_c1)
244 fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
245 else
246 fprintf(stderr, " ****");
248 if (do_nhm_cstates)
249 fprintf(stderr, " %6.2f", 100.0 * p->c3/p->tsc);
250 if (do_nhm_cstates)
251 fprintf(stderr, " %6.2f", 100.0 * p->c6/p->tsc);
252 if (do_snb_cstates)
253 fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
254 if (do_snb_cstates)
255 fprintf(stderr, " %5.2f", 100.0 * p->pc2/p->tsc);
256 if (do_nhm_cstates)
257 fprintf(stderr, " %5.2f", 100.0 * p->pc3/p->tsc);
258 if (do_nhm_cstates)
259 fprintf(stderr, " %5.2f", 100.0 * p->pc6/p->tsc);
260 if (do_snb_cstates)
261 fprintf(stderr, " %5.2f", 100.0 * p->pc7/p->tsc);
262 if (extra_msr_offset)
263 fprintf(stderr, " 0x%016llx", p->extra_msr);
264 putc('\n', stderr);
267 void print_counters(struct counters *counters)
269 struct counters *cnt;
271 print_header();
273 if (num_cpus > 1)
274 print_cnt(cnt_average);
276 for (cnt = counters; cnt != NULL; cnt = cnt->next)
277 print_cnt(cnt);
281 #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
283 int compute_delta(struct counters *after,
284 struct counters *before, struct counters *delta)
286 int errors = 0;
287 int perf_err = 0;
289 skip_c0 = skip_c1 = 0;
291 for ( ; after && before && delta;
292 after = after->next, before = before->next, delta = delta->next) {
293 if (before->cpu != after->cpu) {
294 printf("cpu configuration changed: %d != %d\n",
295 before->cpu, after->cpu);
296 return -1;
299 if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
300 fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
301 before->cpu, before->tsc, after->tsc);
302 errors++;
304 /* check for TSC < 1 Mcycles over interval */
305 if (delta->tsc < (1000 * 1000)) {
306 fprintf(stderr, "Insanely slow TSC rate,"
307 " TSC stops in idle?\n");
308 fprintf(stderr, "You can disable all c-states"
309 " by booting with \"idle=poll\"\n");
310 fprintf(stderr, "or just the deep ones with"
311 " \"processor.max_cstate=1\"\n");
312 exit(-3);
314 if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
315 fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
316 before->cpu, before->c3, after->c3);
317 errors++;
319 if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
320 fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
321 before->cpu, before->c6, after->c6);
322 errors++;
324 if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
325 fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
326 before->cpu, before->c7, after->c7);
327 errors++;
329 if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
330 fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
331 before->cpu, before->pc2, after->pc2);
332 errors++;
334 if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
335 fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
336 before->cpu, before->pc3, after->pc3);
337 errors++;
339 if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
340 fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
341 before->cpu, before->pc6, after->pc6);
342 errors++;
344 if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
345 fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
346 before->cpu, before->pc7, after->pc7);
347 errors++;
350 perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
351 if (perf_err) {
352 fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
353 before->cpu, before->aperf, after->aperf);
355 perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
356 if (perf_err) {
357 fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
358 before->cpu, before->mperf, after->mperf);
360 if (perf_err) {
361 if (!aperf_mperf_unstable) {
362 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
363 fprintf(stderr, "* Frequency results do not cover entire interval *\n");
364 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
366 aperf_mperf_unstable = 1;
369 * mperf delta is likely a huge "positive" number
370 * can not use it for calculating c0 time
372 skip_c0 = 1;
373 skip_c1 = 1;
377 * As mperf and tsc collection are not atomic,
378 * it is possible for mperf's non-halted cycles
379 * to exceed TSC's all cycles: show c1 = 0% in that case.
381 if (delta->mperf > delta->tsc)
382 delta->c1 = 0;
383 else /* normal case, derive c1 */
384 delta->c1 = delta->tsc - delta->mperf
385 - delta->c3 - delta->c6 - delta->c7;
387 if (delta->mperf == 0)
388 delta->mperf = 1; /* divide by 0 protection */
391 * for "extra msr", just copy the latest w/o subtracting
393 delta->extra_msr = after->extra_msr;
394 if (errors) {
395 fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
396 dump_cnt(before);
397 fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
398 dump_cnt(after);
399 errors = 0;
402 return 0;
405 void compute_average(struct counters *delta, struct counters *avg)
407 struct counters *sum;
409 sum = calloc(1, sizeof(struct counters));
410 if (sum == NULL) {
411 perror("calloc sum");
412 exit(1);
415 for (; delta; delta = delta->next) {
416 sum->tsc += delta->tsc;
417 sum->c1 += delta->c1;
418 sum->c3 += delta->c3;
419 sum->c6 += delta->c6;
420 sum->c7 += delta->c7;
421 sum->aperf += delta->aperf;
422 sum->mperf += delta->mperf;
423 sum->pc2 += delta->pc2;
424 sum->pc3 += delta->pc3;
425 sum->pc6 += delta->pc6;
426 sum->pc7 += delta->pc7;
428 avg->tsc = sum->tsc/num_cpus;
429 avg->c1 = sum->c1/num_cpus;
430 avg->c3 = sum->c3/num_cpus;
431 avg->c6 = sum->c6/num_cpus;
432 avg->c7 = sum->c7/num_cpus;
433 avg->aperf = sum->aperf/num_cpus;
434 avg->mperf = sum->mperf/num_cpus;
435 avg->pc2 = sum->pc2/num_cpus;
436 avg->pc3 = sum->pc3/num_cpus;
437 avg->pc6 = sum->pc6/num_cpus;
438 avg->pc7 = sum->pc7/num_cpus;
440 free(sum);
443 void get_counters(struct counters *cnt)
445 for ( ; cnt; cnt = cnt->next) {
446 cnt->tsc = get_msr(cnt->cpu, MSR_TSC);
447 if (do_nhm_cstates)
448 cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY);
449 if (do_nhm_cstates)
450 cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY);
451 if (do_snb_cstates)
452 cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY);
453 if (has_aperf)
454 cnt->aperf = get_msr(cnt->cpu, MSR_APERF);
455 if (has_aperf)
456 cnt->mperf = get_msr(cnt->cpu, MSR_MPERF);
457 if (do_snb_cstates)
458 cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY);
459 if (do_nhm_cstates)
460 cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY);
461 if (do_nhm_cstates)
462 cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY);
463 if (do_snb_cstates)
464 cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY);
465 if (extra_msr_offset)
466 cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset);
470 void print_nehalem_info(void)
472 unsigned long long msr;
473 unsigned int ratio;
475 if (!do_nehalem_platform_info)
476 return;
478 msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO);
480 ratio = (msr >> 40) & 0xFF;
481 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
482 ratio, bclk, ratio * bclk);
484 ratio = (msr >> 8) & 0xFF;
485 fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
486 ratio, bclk, ratio * bclk);
488 if (verbose > 1)
489 fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
491 if (!do_nehalem_turbo_ratio_limit)
492 return;
494 msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT);
496 ratio = (msr >> 24) & 0xFF;
497 if (ratio)
498 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
499 ratio, bclk, ratio * bclk);
501 ratio = (msr >> 16) & 0xFF;
502 if (ratio)
503 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
504 ratio, bclk, ratio * bclk);
506 ratio = (msr >> 8) & 0xFF;
507 if (ratio)
508 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
509 ratio, bclk, ratio * bclk);
511 ratio = (msr >> 0) & 0xFF;
512 if (ratio)
513 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
514 ratio, bclk, ratio * bclk);
518 void free_counter_list(struct counters *list)
520 struct counters *p;
522 for (p = list; p; ) {
523 struct counters *free_me;
525 free_me = p;
526 p = p->next;
527 free(free_me);
531 void free_all_counters(void)
533 free_counter_list(cnt_even);
534 cnt_even = NULL;
536 free_counter_list(cnt_odd);
537 cnt_odd = NULL;
539 free_counter_list(cnt_delta);
540 cnt_delta = NULL;
542 free_counter_list(cnt_average);
543 cnt_average = NULL;
546 void insert_counters(struct counters **list,
547 struct counters *new)
549 struct counters *prev;
552 * list was empty
554 if (*list == NULL) {
555 new->next = *list;
556 *list = new;
557 return;
560 show_cpu = 1; /* there is more than one CPU */
563 * insert on front of list.
564 * It is sorted by ascending package#, core#, cpu#
566 if (((*list)->pkg > new->pkg) ||
567 (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
568 (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
569 new->next = *list;
570 *list = new;
571 return;
574 prev = *list;
576 while (prev->next && (prev->next->pkg < new->pkg)) {
577 prev = prev->next;
578 show_pkg = 1; /* there is more than 1 package */
581 while (prev->next && (prev->next->pkg == new->pkg)
582 && (prev->next->core < new->core)) {
583 prev = prev->next;
584 show_core = 1; /* there is more than 1 core */
587 while (prev->next && (prev->next->pkg == new->pkg)
588 && (prev->next->core == new->core)
589 && (prev->next->cpu < new->cpu)) {
590 prev = prev->next;
594 * insert after "prev"
596 new->next = prev->next;
597 prev->next = new;
600 void alloc_new_counters(int pkg, int core, int cpu)
602 struct counters *new;
604 if (verbose > 1)
605 printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
607 new = (struct counters *)calloc(1, sizeof(struct counters));
608 if (new == NULL) {
609 perror("calloc");
610 exit(1);
612 new->pkg = pkg;
613 new->core = core;
614 new->cpu = cpu;
615 insert_counters(&cnt_odd, new);
617 new = (struct counters *)calloc(1,
618 sizeof(struct counters));
619 if (new == NULL) {
620 perror("calloc");
621 exit(1);
623 new->pkg = pkg;
624 new->core = core;
625 new->cpu = cpu;
626 insert_counters(&cnt_even, new);
628 new = (struct counters *)calloc(1, sizeof(struct counters));
629 if (new == NULL) {
630 perror("calloc");
631 exit(1);
633 new->pkg = pkg;
634 new->core = core;
635 new->cpu = cpu;
636 insert_counters(&cnt_delta, new);
638 new = (struct counters *)calloc(1, sizeof(struct counters));
639 if (new == NULL) {
640 perror("calloc");
641 exit(1);
643 new->pkg = pkg;
644 new->core = core;
645 new->cpu = cpu;
646 cnt_average = new;
649 int get_physical_package_id(int cpu)
651 char path[64];
652 FILE *filep;
653 int pkg;
655 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
656 filep = fopen(path, "r");
657 if (filep == NULL) {
658 perror(path);
659 exit(1);
661 fscanf(filep, "%d", &pkg);
662 fclose(filep);
663 return pkg;
666 int get_core_id(int cpu)
668 char path[64];
669 FILE *filep;
670 int core;
672 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
673 filep = fopen(path, "r");
674 if (filep == NULL) {
675 perror(path);
676 exit(1);
678 fscanf(filep, "%d", &core);
679 fclose(filep);
680 return core;
684 * run func(index, cpu) on every cpu in /proc/stat
687 int for_all_cpus(void (func)(int, int, int))
689 FILE *fp;
690 int cpu_count;
691 int retval;
693 fp = fopen(proc_stat, "r");
694 if (fp == NULL) {
695 perror(proc_stat);
696 exit(1);
699 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
700 if (retval != 0) {
701 perror("/proc/stat format");
702 exit(1);
705 for (cpu_count = 0; ; cpu_count++) {
706 int cpu;
708 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
709 if (retval != 1)
710 break;
712 func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
714 fclose(fp);
715 return cpu_count;
718 void re_initialize(void)
720 printf("turbostat: topology changed, re-initializing.\n");
721 free_all_counters();
722 num_cpus = for_all_cpus(alloc_new_counters);
723 need_reinitialize = 0;
724 printf("num_cpus is now %d\n", num_cpus);
727 void dummy(int pkg, int core, int cpu) { return; }
729 * check to see if a cpu came on-line
731 void verify_num_cpus(void)
733 int new_num_cpus;
735 new_num_cpus = for_all_cpus(dummy);
737 if (new_num_cpus != num_cpus) {
738 if (verbose)
739 printf("num_cpus was %d, is now %d\n",
740 num_cpus, new_num_cpus);
741 need_reinitialize = 1;
745 void turbostat_loop()
747 restart:
748 get_counters(cnt_even);
749 gettimeofday(&tv_even, (struct timezone *)NULL);
751 while (1) {
752 verify_num_cpus();
753 if (need_reinitialize) {
754 re_initialize();
755 goto restart;
757 sleep(interval_sec);
758 get_counters(cnt_odd);
759 gettimeofday(&tv_odd, (struct timezone *)NULL);
761 compute_delta(cnt_odd, cnt_even, cnt_delta);
762 timersub(&tv_odd, &tv_even, &tv_delta);
763 compute_average(cnt_delta, cnt_average);
764 print_counters(cnt_delta);
765 if (need_reinitialize) {
766 re_initialize();
767 goto restart;
769 sleep(interval_sec);
770 get_counters(cnt_even);
771 gettimeofday(&tv_even, (struct timezone *)NULL);
772 compute_delta(cnt_even, cnt_odd, cnt_delta);
773 timersub(&tv_even, &tv_odd, &tv_delta);
774 compute_average(cnt_delta, cnt_average);
775 print_counters(cnt_delta);
779 void check_dev_msr()
781 struct stat sb;
783 if (stat("/dev/cpu/0/msr", &sb)) {
784 fprintf(stderr, "no /dev/cpu/0/msr\n");
785 fprintf(stderr, "Try \"# modprobe msr\"\n");
786 exit(-5);
790 void check_super_user()
792 if (getuid() != 0) {
793 fprintf(stderr, "must be root\n");
794 exit(-6);
798 int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
800 if (!genuine_intel)
801 return 0;
803 if (family != 6)
804 return 0;
806 switch (model) {
807 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
808 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
809 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
810 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
811 case 0x2C: /* Westmere EP - Gulftown */
812 case 0x2A: /* SNB */
813 case 0x2D: /* SNB Xeon */
814 case 0x3A: /* IVB */
815 case 0x3D: /* IVB Xeon */
816 return 1;
817 case 0x2E: /* Nehalem-EX Xeon - Beckton */
818 case 0x2F: /* Westmere-EX Xeon - Eagleton */
819 default:
820 return 0;
824 int is_snb(unsigned int family, unsigned int model)
826 if (!genuine_intel)
827 return 0;
829 switch (model) {
830 case 0x2A:
831 case 0x2D:
832 return 1;
834 return 0;
837 double discover_bclk(unsigned int family, unsigned int model)
839 if (is_snb(family, model))
840 return 100.00;
841 else
842 return 133.33;
845 void check_cpuid()
847 unsigned int eax, ebx, ecx, edx, max_level;
848 unsigned int fms, family, model, stepping;
850 eax = ebx = ecx = edx = 0;
852 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
854 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
855 genuine_intel = 1;
857 if (verbose)
858 fprintf(stderr, "%.4s%.4s%.4s ",
859 (char *)&ebx, (char *)&edx, (char *)&ecx);
861 asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
862 family = (fms >> 8) & 0xf;
863 model = (fms >> 4) & 0xf;
864 stepping = fms & 0xf;
865 if (family == 6 || family == 0xf)
866 model += ((fms >> 16) & 0xf) << 4;
868 if (verbose)
869 fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
870 max_level, family, model, stepping, family, model, stepping);
872 if (!(edx & (1 << 5))) {
873 fprintf(stderr, "CPUID: no MSR\n");
874 exit(1);
878 * check max extended function levels of CPUID.
879 * This is needed to check for invariant TSC.
880 * This check is valid for both Intel and AMD.
882 ebx = ecx = edx = 0;
883 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
885 if (max_level < 0x80000007) {
886 fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
887 exit(1);
891 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
892 * this check is valid for both Intel and AMD
894 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
895 has_invariant_tsc = edx & (1 << 8);
897 if (!has_invariant_tsc) {
898 fprintf(stderr, "No invariant TSC\n");
899 exit(1);
903 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
904 * this check is valid for both Intel and AMD
907 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
908 has_aperf = ecx & (1 << 0);
909 if (!has_aperf) {
910 fprintf(stderr, "No APERF MSR\n");
911 exit(1);
914 do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
915 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
916 do_snb_cstates = is_snb(family, model);
917 bclk = discover_bclk(family, model);
919 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
923 void usage()
925 fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
926 progname);
927 exit(1);
932 * in /dev/cpu/ return success for names that are numbers
933 * ie. filter out ".", "..", "microcode".
935 int dir_filter(const struct dirent *dirp)
937 if (isdigit(dirp->d_name[0]))
938 return 1;
939 else
940 return 0;
943 int open_dev_cpu_msr(int dummy1)
945 return 0;
948 void turbostat_init()
950 check_cpuid();
952 check_dev_msr();
953 check_super_user();
955 num_cpus = for_all_cpus(alloc_new_counters);
957 if (verbose)
958 print_nehalem_info();
961 int fork_it(char **argv)
963 int retval;
964 pid_t child_pid;
965 get_counters(cnt_even);
966 gettimeofday(&tv_even, (struct timezone *)NULL);
968 child_pid = fork();
969 if (!child_pid) {
970 /* child */
971 execvp(argv[0], argv);
972 } else {
973 int status;
975 /* parent */
976 if (child_pid == -1) {
977 perror("fork");
978 exit(1);
981 signal(SIGINT, SIG_IGN);
982 signal(SIGQUIT, SIG_IGN);
983 if (waitpid(child_pid, &status, 0) == -1) {
984 perror("wait");
985 exit(1);
988 get_counters(cnt_odd);
989 gettimeofday(&tv_odd, (struct timezone *)NULL);
990 retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
992 timersub(&tv_odd, &tv_even, &tv_delta);
993 compute_average(cnt_delta, cnt_average);
994 if (!retval)
995 print_counters(cnt_delta);
997 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
999 return 0;
1002 void cmdline(int argc, char **argv)
1004 int opt;
1006 progname = argv[0];
1008 while ((opt = getopt(argc, argv, "+vi:M:")) != -1) {
1009 switch (opt) {
1010 case 'v':
1011 verbose++;
1012 break;
1013 case 'i':
1014 interval_sec = atoi(optarg);
1015 break;
1016 case 'M':
1017 sscanf(optarg, "%x", &extra_msr_offset);
1018 if (verbose > 1)
1019 fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
1020 break;
1021 default:
1022 usage();
1027 int main(int argc, char **argv)
1029 cmdline(argc, argv);
1031 if (verbose > 1)
1032 fprintf(stderr, "turbostat Dec 6, 2010"
1033 " - Len Brown <lenb@kernel.org>\n");
1034 if (verbose > 1)
1035 fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
1037 turbostat_init();
1040 * if any params left, it must be a command to fork
1042 if (argc - optind)
1043 return fork_it(argv + optind);
1044 else
1045 turbostat_loop();
1047 return 0;