dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / sa / sadc.c
blob2334a7204fc1b1e2a24830ed150a0bc8a1b8e0dc
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
30 * sadc.c writes system activity binary data to a file or stdout.
32 * Usage: sadc [t n] [file]
34 * if t and n are not specified, it writes a dummy record to data file. This
35 * usage is particularly used at system booting. If t and n are specified, it
36 * writes system data n times to file every t seconds. In both cases, if file
37 * is not specified, it writes data to stdout.
40 #include <sys/fcntl.h>
41 #include <sys/flock.h>
42 #include <sys/proc.h>
43 #include <sys/stat.h>
44 #include <sys/sysinfo.h>
45 #include <sys/time.h>
46 #include <sys/types.h>
47 #include <sys/var.h>
49 #include <ctype.h>
50 #include <errno.h>
51 #include <fcntl.h>
52 #include <kstat.h>
53 #include <memory.h>
54 #include <nlist.h>
55 #include <signal.h>
56 #include <stdarg.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <string.h>
60 #include <time.h>
61 #include <unistd.h>
62 #include <strings.h>
64 #include "sa.h"
66 #define MAX(x1, x2) ((x1) >= (x2) ? (x1) : (x2))
68 static kstat_ctl_t *kc; /* libkstat cookie */
69 static int ncpus;
70 static int oncpus;
71 static kstat_t **cpu_stat_list = NULL;
72 static kstat_t **ocpu_stat_list = NULL;
73 static int ncaches;
74 static kstat_t **kmem_cache_list = NULL;
76 static kstat_t *sysinfo_ksp, *vminfo_ksp, *var_ksp;
77 static kstat_t *system_misc_ksp, *ufs_inode_ksp, *kmem_oversize_ksp;
78 static kstat_t *file_cache_ksp;
79 static kstat_named_t *ufs_inode_size_knp, *nproc_knp;
80 static kstat_named_t *file_total_knp, *file_avail_knp;
81 static kstat_named_t *oversize_alloc_knp, *oversize_fail_knp;
82 static int slab_create_index, slab_destroy_index, slab_size_index;
83 static int buf_size_index, buf_avail_index, alloc_fail_index;
85 static struct iodevinfo zeroiodev = { NULL, NULL };
86 static struct iodevinfo *firstiodev = NULL;
87 static struct iodevinfo *lastiodev = NULL;
88 static struct iodevinfo *snip = NULL;
89 static ulong_t niodevs;
91 static void all_stat_init(void);
92 static int all_stat_load(void);
93 static void fail(int, char *, ...);
94 static void safe_zalloc(void **, int, int);
95 static kid_t safe_kstat_read(kstat_ctl_t *, kstat_t *, void *);
96 static kstat_t *safe_kstat_lookup(kstat_ctl_t *, char *, int, char *);
97 static void *safe_kstat_data_lookup(kstat_t *, char *);
98 static int safe_kstat_data_index(kstat_t *, char *);
99 static void init_iodevs(void);
100 static int iodevinfo_load(void);
101 static int kstat_copy(const kstat_t *, kstat_t *);
102 static void diff_two_arrays(kstat_t ** const [], size_t, size_t,
103 kstat_t ** const []);
104 static void compute_cpu_stat_adj(void);
106 static char *cmdname = "sadc";
108 static struct var var;
110 static struct sa d;
111 static int64_t cpu_stat_adj[CPU_STATES] = {0};
113 static long ninode;
115 int caught_cont = 0;
118 * Sleep until *wakeup + interval, keeping cadence where desired
120 * *wakeup - The time we last wanted to wake up. Updated.
121 * interval - We want to sleep until *wakeup + interval
122 * *caught_cont - Global set by signal handler if we got a SIGCONT
124 void
125 sleep_until(hrtime_t *wakeup, hrtime_t interval, int *caught_cont)
127 hrtime_t now, pause, pause_left;
128 struct timespec pause_tv;
129 int status;
130 now = gethrtime();
131 pause = *wakeup + interval - now;
133 if (pause <= 0 || pause < (interval / 4))
134 if (*caught_cont) {
135 /* Reset our cadence (see comment below) */
136 *wakeup = now + interval;
137 pause = interval;
138 } else {
140 * If we got here, then the time between the
141 * output we just did, and the scheduled time
142 * for the next output is < 1/4 of our requested
143 * interval AND the number of intervals has been
144 * requested AND we have never caught a SIGCONT
145 * (so we have never been suspended). In this
146 * case, we'll try to stay to the desired
147 * cadence, and we will pause for 1/2 the normal
148 * interval this time.
150 pause = interval / 2;
151 *wakeup += interval;
153 else
154 *wakeup += interval;
155 if (pause < 1000)
156 /* Near enough */
157 return;
159 /* Now do the actual sleep */
160 pause_left = pause;
161 do {
162 pause_tv.tv_sec = pause_left / NANOSEC;
163 pause_tv.tv_nsec = pause_left % NANOSEC;
164 status = nanosleep(&pause_tv, NULL);
165 if (status < 0)
166 if (errno == EINTR) {
167 now = gethrtime();
168 pause_left = *wakeup - now;
169 if (pause_left < 1000)
170 /* Near enough */
171 return;
172 } else {
173 fail(1, "nanosleep failed");
175 } while (status != 0);
179 * Signal handler - so we can be aware of SIGCONT
181 void
182 cont_handler(int sig_number)
184 /* Re-set the signal handler */
185 (void) signal(sig_number, cont_handler);
186 caught_cont = 1;
190 main(int argc, char *argv[])
192 int ct;
193 unsigned ti;
194 int fp;
195 time_t min;
196 struct stat buf;
197 char *fname;
198 struct iodevinfo *iodev;
199 off_t flength;
200 hrtime_t start_n;
201 hrtime_t period_n;
204 ct = argc >= 3? atoi(argv[2]): 0;
205 min = time((time_t *)0);
206 ti = argc >= 3? atoi(argv[1]): 0;
208 period_n = (hrtime_t)ti * NANOSEC;
210 if ((kc = kstat_open()) == NULL)
211 fail(1, "kstat_open(): can't open /dev/kstat");
213 /* Set up handler for SIGCONT */
214 if (signal(SIGCONT, cont_handler) == SIG_ERR)
215 fail(1, "signal failed");
217 all_stat_init();
218 init_iodevs();
220 if (argc == 3 || argc == 1) {
222 * no data file is specified, direct data to stdout.
224 fp = 1;
225 } else {
226 struct flock lock;
228 fname = (argc == 2) ? argv[1] : argv[3];
230 * Open or Create a data file. If the file doesn't exist, then
231 * it will be created.
233 if ((fp = open(fname, O_WRONLY | O_APPEND | O_CREAT, 0644))
234 == -1)
235 fail(1, "can't open data file");
237 * Lock the entire data file to prevent data corruption
239 lock.l_type = F_WRLCK;
240 lock.l_whence = SEEK_SET;
241 lock.l_start = 0;
242 lock.l_len = 0;
243 if (fcntl(fp, F_SETLK, &lock) == -1)
244 fail(1, "can't lock data file");
246 * Get data file statistics for use in determining whether
247 * truncation required and where rollback recovery should
248 * be applied.
250 if (fstat(fp, &buf) == -1)
251 fail(1, "can't get data file information");
253 * If the data file was opened and is too old, truncate it
255 if (min - buf.st_mtime > 86400)
256 if (ftruncate(fp, 0) == -1)
257 fail(1, "can't truncate data file");
259 * Remember filesize for rollback on error (bug #1223549)
261 flength = buf.st_size;
264 memset(&d, 0, sizeof (d));
267 * If n == 0, write the additional dummy record.
269 if (ct == 0) {
270 d.valid = 0;
271 d.ts = min;
272 d.niodevs = niodevs;
274 if (write(fp, &d, sizeof (struct sa)) != sizeof (struct sa))
275 ftruncate(fp, flength), fail(1, "write failed");
277 for (iodev = firstiodev; iodev; iodev = iodev->next) {
278 if (write(fp, iodev, sizeof (struct iodevinfo)) !=
279 sizeof (struct iodevinfo))
280 ftruncate(fp, flength), fail(1, "write failed");
284 start_n = gethrtime();
286 for (;;) {
287 do {
288 (void) kstat_chain_update(kc);
289 all_stat_init();
290 init_iodevs();
291 } while (all_stat_load() || iodevinfo_load());
293 d.ts = time((time_t *)0);
294 d.valid = 1;
295 d.niodevs = niodevs;
297 if (write(fp, &d, sizeof (struct sa)) != sizeof (struct sa))
298 ftruncate(fp, flength), fail(1, "write failed");
300 for (iodev = firstiodev; iodev; iodev = iodev->next) {
301 if (write(fp, iodev, sizeof (struct iodevinfo)) !=
302 sizeof (struct iodevinfo))
303 ftruncate(fp, flength), fail(1, "write failed");
305 if (--ct > 0) {
306 sleep_until(&start_n, period_n, &caught_cont);
307 } else {
308 close(fp);
309 return (0);
313 /*NOTREACHED*/
317 * Get various KIDs for subsequent all_stat_load operations.
320 static void
321 all_stat_init(void)
323 kstat_t *ksp;
326 * Initialize global statistics
329 sysinfo_ksp = safe_kstat_lookup(kc, "unix", 0, "sysinfo");
330 vminfo_ksp = safe_kstat_lookup(kc, "unix", 0, "vminfo");
331 kmem_oversize_ksp = safe_kstat_lookup(kc, "vmem", -1, "kmem_oversize");
332 var_ksp = safe_kstat_lookup(kc, "unix", 0, "var");
333 system_misc_ksp = safe_kstat_lookup(kc, "unix", 0, "system_misc");
334 file_cache_ksp = safe_kstat_lookup(kc, "unix", 0, "file_cache");
335 ufs_inode_ksp = kstat_lookup(kc, "ufs", 0, "inode_cache");
337 safe_kstat_read(kc, system_misc_ksp, NULL);
338 nproc_knp = safe_kstat_data_lookup(system_misc_ksp, "nproc");
340 safe_kstat_read(kc, file_cache_ksp, NULL);
341 file_avail_knp = safe_kstat_data_lookup(file_cache_ksp, "buf_avail");
342 file_total_knp = safe_kstat_data_lookup(file_cache_ksp, "buf_total");
344 safe_kstat_read(kc, kmem_oversize_ksp, NULL);
345 oversize_alloc_knp = safe_kstat_data_lookup(kmem_oversize_ksp,
346 "mem_total");
347 oversize_fail_knp = safe_kstat_data_lookup(kmem_oversize_ksp, "fail");
349 if (ufs_inode_ksp != NULL) {
350 safe_kstat_read(kc, ufs_inode_ksp, NULL);
351 ufs_inode_size_knp = safe_kstat_data_lookup(ufs_inode_ksp,
352 "size");
353 ninode = ((kstat_named_t *)
354 safe_kstat_data_lookup(ufs_inode_ksp,
355 "maxsize"))->value.l;
359 * Load constant values now -- no need to reread each time
362 safe_kstat_read(kc, var_ksp, (void *) &var);
365 * Initialize per-CPU and per-kmem-cache statistics
368 ncpus = ncaches = 0;
369 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
370 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0)
371 ncpus++;
372 if (strcmp(ksp->ks_class, "kmem_cache") == 0)
373 ncaches++;
376 safe_zalloc((void **)&cpu_stat_list, ncpus * sizeof (kstat_t *), 1);
377 safe_zalloc((void **)&kmem_cache_list, ncaches * sizeof (kstat_t *), 1);
379 ncpus = ncaches = 0;
380 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
381 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0 &&
382 kstat_read(kc, ksp, NULL) != -1)
383 cpu_stat_list[ncpus++] = ksp;
384 if (strcmp(ksp->ks_class, "kmem_cache") == 0 &&
385 kstat_read(kc, ksp, NULL) != -1)
386 kmem_cache_list[ncaches++] = ksp;
389 if (ncpus == 0)
390 fail(1, "can't find any cpu statistics");
392 if (ncaches == 0)
393 fail(1, "can't find any kmem_cache statistics");
395 ksp = kmem_cache_list[0];
396 safe_kstat_read(kc, ksp, NULL);
397 buf_size_index = safe_kstat_data_index(ksp, "buf_size");
398 slab_create_index = safe_kstat_data_index(ksp, "slab_create");
399 slab_destroy_index = safe_kstat_data_index(ksp, "slab_destroy");
400 slab_size_index = safe_kstat_data_index(ksp, "slab_size");
401 buf_avail_index = safe_kstat_data_index(ksp, "buf_avail");
402 alloc_fail_index = safe_kstat_data_index(ksp, "alloc_fail");
406 * load statistics, summing across CPUs where needed
409 static int
410 all_stat_load(void)
412 int i, j;
413 cpu_stat_t cs;
414 ulong_t *np, *tp;
415 uint64_t cpu_tick[4] = {0, 0, 0, 0};
417 memset(&d, 0, sizeof (d));
420 * Global statistics
423 safe_kstat_read(kc, sysinfo_ksp, (void *) &d.si);
424 safe_kstat_read(kc, vminfo_ksp, (void *) &d.vmi);
425 safe_kstat_read(kc, system_misc_ksp, NULL);
426 safe_kstat_read(kc, file_cache_ksp, NULL);
428 if (ufs_inode_ksp != NULL) {
429 safe_kstat_read(kc, ufs_inode_ksp, NULL);
430 d.szinode = ufs_inode_size_knp->value.ul;
433 d.szfile = file_total_knp->value.ui64 - file_avail_knp->value.ui64;
434 d.szproc = nproc_knp->value.ul;
436 d.mszinode = (ninode > d.szinode) ? ninode : d.szinode;
437 d.mszfile = d.szfile;
438 d.mszproc = var.v_proc;
441 * Per-CPU statistics.
444 for (i = 0; i < ncpus; i++) {
445 if (kstat_read(kc, cpu_stat_list[i], (void *) &cs) == -1)
446 return (1);
448 np = (ulong_t *)&d.csi;
449 tp = (ulong_t *)&cs.cpu_sysinfo;
452 * Accumulate cpu ticks for CPU_IDLE, CPU_USER, CPU_KERNEL and
453 * CPU_WAIT with respect to each of the cpus.
455 for (j = 0; j < CPU_STATES; j++)
456 cpu_tick[j] += tp[j];
458 for (j = 0; j < sizeof (cpu_sysinfo_t); j += sizeof (ulong_t))
459 *np++ += *tp++;
460 np = (ulong_t *)&d.cvmi;
461 tp = (ulong_t *)&cs.cpu_vminfo;
462 for (j = 0; j < sizeof (cpu_vminfo_t); j += sizeof (ulong_t))
463 *np++ += *tp++;
467 * Per-cache kmem statistics.
470 for (i = 0; i < ncaches; i++) {
471 kstat_named_t *knp;
472 u_longlong_t slab_create, slab_destroy, slab_size, mem_total;
473 u_longlong_t buf_size, buf_avail, alloc_fail;
474 int kmi_index;
476 if (kstat_read(kc, kmem_cache_list[i], NULL) == -1)
477 return (1);
478 knp = kmem_cache_list[i]->ks_data;
479 slab_create = knp[slab_create_index].value.ui64;
480 slab_destroy = knp[slab_destroy_index].value.ui64;
481 slab_size = knp[slab_size_index].value.ui64;
482 buf_size = knp[buf_size_index].value.ui64;
483 buf_avail = knp[buf_avail_index].value.ui64;
484 alloc_fail = knp[alloc_fail_index].value.ui64;
485 if (buf_size <= 256)
486 kmi_index = KMEM_SMALL;
487 else
488 kmi_index = KMEM_LARGE;
489 mem_total = (slab_create - slab_destroy) * slab_size;
491 d.kmi.km_mem[kmi_index] += (ulong_t)mem_total;
492 d.kmi.km_alloc[kmi_index] +=
493 (ulong_t)mem_total - buf_size * buf_avail;
494 d.kmi.km_fail[kmi_index] += (ulong_t)alloc_fail;
497 safe_kstat_read(kc, kmem_oversize_ksp, NULL);
499 d.kmi.km_alloc[KMEM_OSIZE] = d.kmi.km_mem[KMEM_OSIZE] =
500 oversize_alloc_knp->value.ui64;
501 d.kmi.km_fail[KMEM_OSIZE] = oversize_fail_knp->value.ui64;
504 * Adjust CPU statistics so the delta calculations in sar will
505 * be correct when facing changes to the set of online CPUs.
507 compute_cpu_stat_adj();
508 for (i = 0; i < CPU_STATES; i++)
509 d.csi.cpu[i] = (cpu_tick[i] + cpu_stat_adj[i]) / ncpus;
511 return (0);
514 static void
515 fail(int do_perror, char *message, ...)
517 va_list args;
519 va_start(args, message);
520 fprintf(stderr, "%s: ", cmdname);
521 vfprintf(stderr, message, args);
522 va_end(args);
523 if (do_perror)
524 fprintf(stderr, ": %s", strerror(errno));
525 fprintf(stderr, "\n");
526 exit(2);
529 static void
530 safe_zalloc(void **ptr, int size, int free_first)
532 if (free_first)
533 free(*ptr);
534 if ((*ptr = malloc(size)) == NULL)
535 fail(1, "malloc failed");
536 memset(*ptr, 0, size);
539 static kid_t
540 safe_kstat_read(kstat_ctl_t *kc, kstat_t *ksp, void *data)
542 kid_t kstat_chain_id = kstat_read(kc, ksp, data);
544 if (kstat_chain_id == -1)
545 fail(1, "kstat_read(%x, '%s') failed", kc, ksp->ks_name);
546 return (kstat_chain_id);
549 static kstat_t *
550 safe_kstat_lookup(kstat_ctl_t *kc, char *ks_module, int ks_instance,
551 char *ks_name)
553 kstat_t *ksp = kstat_lookup(kc, ks_module, ks_instance, ks_name);
555 if (ksp == NULL)
556 fail(0, "kstat_lookup('%s', %d, '%s') failed",
557 ks_module == NULL ? "" : ks_module,
558 ks_instance,
559 ks_name == NULL ? "" : ks_name);
560 return (ksp);
563 static void *
564 safe_kstat_data_lookup(kstat_t *ksp, char *name)
566 void *fp = kstat_data_lookup(ksp, name);
568 if (fp == NULL)
569 fail(0, "kstat_data_lookup('%s', '%s') failed",
570 ksp->ks_name, name);
571 return (fp);
574 static int
575 safe_kstat_data_index(kstat_t *ksp, char *name)
577 return ((int)((char *)safe_kstat_data_lookup(ksp, name) -
578 (char *)ksp->ks_data) / (ksp->ks_data_size / ksp->ks_ndata));
581 static int
582 kscmp(kstat_t *ks1, kstat_t *ks2)
584 int cmp;
586 cmp = strcmp(ks1->ks_module, ks2->ks_module);
587 if (cmp != 0)
588 return (cmp);
589 cmp = ks1->ks_instance - ks2->ks_instance;
590 if (cmp != 0)
591 return (cmp);
592 return (strcmp(ks1->ks_name, ks2->ks_name));
595 static void
596 init_iodevs(void)
598 struct iodevinfo *iodev, *previodev, *comp;
599 kstat_t *ksp;
601 iodev = &zeroiodev;
602 niodevs = 0;
605 * Patch the snip in the iodevinfo list (see below)
607 if (snip)
608 lastiodev->next = snip;
610 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
612 if (ksp->ks_type != KSTAT_TYPE_IO)
613 continue;
614 previodev = iodev;
615 if (iodev->next)
616 iodev = iodev->next;
617 else {
618 safe_zalloc((void **) &iodev->next,
619 sizeof (struct iodevinfo), 0);
620 iodev = iodev->next;
621 iodev->next = NULL;
623 iodev->ksp = ksp;
624 iodev->ks = *ksp;
625 memset(&iodev->kios, 0, sizeof (kstat_io_t));
626 iodev->kios.wlastupdate = iodev->ks.ks_crtime;
627 iodev->kios.rlastupdate = iodev->ks.ks_crtime;
630 * Insertion sort on (ks_module, ks_instance, ks_name)
632 comp = &zeroiodev;
633 while (kscmp(&iodev->ks, &comp->next->ks) > 0)
634 comp = comp->next;
635 if (previodev != comp) {
636 previodev->next = iodev->next;
637 iodev->next = comp->next;
638 comp->next = iodev;
639 iodev = previodev;
641 niodevs++;
644 * Put a snip in the linked list of iodevinfos. The idea:
645 * If there was a state change such that now there are fewer
646 * iodevs, we snip the list and retain the tail, rather than
647 * freeing it. At the next state change, we clip the tail back on.
648 * This prevents a lot of malloc/free activity, and it's simpler.
650 lastiodev = iodev;
651 snip = iodev->next;
652 iodev->next = NULL;
654 firstiodev = zeroiodev.next;
657 static int
658 iodevinfo_load(void)
660 struct iodevinfo *iodev;
662 for (iodev = firstiodev; iodev; iodev = iodev->next) {
663 if (kstat_read(kc, iodev->ksp, (void *) &iodev->kios) == -1)
664 return (1);
666 return (0);
669 static int
670 kstat_copy(const kstat_t *src, kstat_t *dst)
672 *dst = *src;
674 if (src->ks_data != NULL) {
675 if ((dst->ks_data = malloc(src->ks_data_size)) == NULL)
676 return (-1);
677 bcopy(src->ks_data, dst->ks_data, src->ks_data_size);
678 } else {
679 dst->ks_data = NULL;
680 dst->ks_data_size = 0;
682 return (0);
686 * Determine what is different between two sets of kstats; s[0] and s[1]
687 * are arrays of kstats of size ns0 and ns1, respectively, and sorted by
688 * instance number. u[0] and u[1] are two arrays which must be
689 * caller-zallocated; each must be of size MAX(ns0, ns1). When the
690 * function terminates, u[0] contains all s[0]-unique items and u[1]
691 * contains all s[1]-unique items. Any unused entries in u[0] and u[1]
692 * are left NULL.
694 static void
695 diff_two_arrays(kstat_t ** const s[], size_t ns0, size_t ns1,
696 kstat_t ** const u[])
698 kstat_t **s0p = s[0], **s1p = s[1];
699 kstat_t **u0p = u[0], **u1p = u[1];
700 int i = 0, j = 0;
702 while (i < ns0 && j < ns1) {
703 if ((*s0p)->ks_instance == (*s1p)->ks_instance) {
704 if ((*s0p)->ks_kid != (*s1p)->ks_kid) {
706 * The instance is the same, but this
707 * CPU has been offline during the
708 * interval, so we consider *u0p to
709 * be s0p-unique, and similarly for
710 * *u1p.
712 *(u0p++) = *s0p;
713 *(u1p++) = *s1p;
715 s0p++;
716 i++;
717 s1p++;
718 j++;
719 } else if ((*s0p)->ks_instance < (*s1p)->ks_instance) {
720 *(u0p++) = *(s0p++);
721 i++;
722 } else {
723 *(u1p++) = *(s1p++);
724 j++;
728 while (i < ns0) {
729 *(u0p++) = *(s0p++);
730 i++;
732 while (j < ns1) {
733 *(u1p++) = *(s1p++);
734 j++;
738 static int
739 cpuid_compare(const void *p1, const void *p2)
741 return ((*(kstat_t **)p1)->ks_instance -
742 (*(kstat_t **)p2)->ks_instance);
746 * Identify those CPUs which were not present for the whole interval so
747 * their statistics can be removed from the aggregate.
749 static void
750 compute_cpu_stat_adj(void)
752 int i, j;
754 if (ocpu_stat_list) {
755 kstat_t **s[2];
756 kstat_t **inarray[2];
757 int max_cpus = MAX(ncpus, oncpus);
759 qsort(cpu_stat_list, ncpus, sizeof (*cpu_stat_list),
760 cpuid_compare);
761 qsort(ocpu_stat_list, oncpus, sizeof (*ocpu_stat_list),
762 cpuid_compare);
764 s[0] = ocpu_stat_list;
765 s[1] = cpu_stat_list;
767 safe_zalloc((void *)&inarray[0], sizeof (**inarray) * max_cpus,
769 safe_zalloc((void *)&inarray[1], sizeof (**inarray) * max_cpus,
771 diff_two_arrays(s, oncpus, ncpus, inarray);
773 for (i = 0; i < max_cpus; i++) {
774 if (inarray[0][i])
775 for (j = 0; j < CPU_STATES; j++)
776 cpu_stat_adj[j] +=
777 ((cpu_stat_t *)inarray[0][i]
778 ->ks_data)->cpu_sysinfo.cpu[j];
779 if (inarray[1][i])
780 for (j = 0; j < CPU_STATES; j++)
781 cpu_stat_adj[j] -=
782 ((cpu_stat_t *)inarray[1][i]
783 ->ks_data)->cpu_sysinfo.cpu[j];
786 free(inarray[0]);
787 free(inarray[1]);
791 * Preserve the last interval's CPU stats.
793 if (cpu_stat_list) {
794 for (i = 0; i < oncpus; i++)
795 free(ocpu_stat_list[i]->ks_data);
797 oncpus = ncpus;
798 safe_zalloc((void **)&ocpu_stat_list, oncpus *
799 sizeof (*ocpu_stat_list), 1);
800 for (i = 0; i < ncpus; i++) {
801 safe_zalloc((void *)&ocpu_stat_list[i],
802 sizeof (*ocpu_stat_list[0]), 0);
803 if (kstat_copy(cpu_stat_list[i], ocpu_stat_list[i]))
804 fail(1, "kstat_copy() failed");