4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
30 * sadc.c writes system activity binary data to a file or stdout.
32 * Usage: sadc [t n] [file]
34 * if t and n are not specified, it writes a dummy record to data file. This
35 * usage is particularly used at system booting. If t and n are specified, it
36 * writes system data n times to file every t seconds. In both cases, if file
37 * is not specified, it writes data to stdout.
40 #include <sys/fcntl.h>
41 #include <sys/flock.h>
44 #include <sys/sysinfo.h>
46 #include <sys/types.h>
66 #define MAX(x1, x2) ((x1) >= (x2) ? (x1) : (x2))
68 static kstat_ctl_t
*kc
; /* libkstat cookie */
71 static kstat_t
**cpu_stat_list
= NULL
;
72 static kstat_t
**ocpu_stat_list
= NULL
;
74 static kstat_t
**kmem_cache_list
= NULL
;
76 static kstat_t
*sysinfo_ksp
, *vminfo_ksp
, *var_ksp
;
77 static kstat_t
*system_misc_ksp
, *ufs_inode_ksp
, *kmem_oversize_ksp
;
78 static kstat_t
*file_cache_ksp
;
79 static kstat_named_t
*ufs_inode_size_knp
, *nproc_knp
;
80 static kstat_named_t
*file_total_knp
, *file_avail_knp
;
81 static kstat_named_t
*oversize_alloc_knp
, *oversize_fail_knp
;
82 static int slab_create_index
, slab_destroy_index
, slab_size_index
;
83 static int buf_size_index
, buf_avail_index
, alloc_fail_index
;
85 static struct iodevinfo zeroiodev
= { NULL
, NULL
};
86 static struct iodevinfo
*firstiodev
= NULL
;
87 static struct iodevinfo
*lastiodev
= NULL
;
88 static struct iodevinfo
*snip
= NULL
;
89 static ulong_t niodevs
;
91 static void all_stat_init(void);
92 static int all_stat_load(void);
93 static void fail(int, char *, ...);
94 static void safe_zalloc(void **, int, int);
95 static kid_t
safe_kstat_read(kstat_ctl_t
*, kstat_t
*, void *);
96 static kstat_t
*safe_kstat_lookup(kstat_ctl_t
*, char *, int, char *);
97 static void *safe_kstat_data_lookup(kstat_t
*, char *);
98 static int safe_kstat_data_index(kstat_t
*, char *);
99 static void init_iodevs(void);
100 static int iodevinfo_load(void);
101 static int kstat_copy(const kstat_t
*, kstat_t
*);
102 static void diff_two_arrays(kstat_t
** const [], size_t, size_t,
103 kstat_t
** const []);
104 static void compute_cpu_stat_adj(void);
106 static char *cmdname
= "sadc";
108 static struct var var
;
111 static int64_t cpu_stat_adj
[CPU_STATES
] = {0};
118 * Sleep until *wakeup + interval, keeping cadence where desired
120 * *wakeup - The time we last wanted to wake up. Updated.
121 * interval - We want to sleep until *wakeup + interval
122 * *caught_cont - Global set by signal handler if we got a SIGCONT
125 sleep_until(hrtime_t
*wakeup
, hrtime_t interval
, int *caught_cont
)
127 hrtime_t now
, pause
, pause_left
;
128 struct timespec pause_tv
;
131 pause
= *wakeup
+ interval
- now
;
133 if (pause
<= 0 || pause
< (interval
/ 4))
135 /* Reset our cadence (see comment below) */
136 *wakeup
= now
+ interval
;
140 * If we got here, then the time between the
141 * output we just did, and the scheduled time
142 * for the next output is < 1/4 of our requested
143 * interval AND the number of intervals has been
144 * requested AND we have never caught a SIGCONT
145 * (so we have never been suspended). In this
146 * case, we'll try to stay to the desired
147 * cadence, and we will pause for 1/2 the normal
148 * interval this time.
150 pause
= interval
/ 2;
159 /* Now do the actual sleep */
162 pause_tv
.tv_sec
= pause_left
/ NANOSEC
;
163 pause_tv
.tv_nsec
= pause_left
% NANOSEC
;
164 status
= nanosleep(&pause_tv
, (struct timespec
*)NULL
);
166 if (errno
== EINTR
) {
168 pause_left
= *wakeup
- now
;
169 if (pause_left
< 1000)
173 fail(1, "nanosleep failed");
175 } while (status
!= 0);
179 * Signal handler - so we can be aware of SIGCONT
182 cont_handler(int sig_number
)
184 /* Re-set the signal handler */
185 (void) signal(sig_number
, cont_handler
);
190 main(int argc
, char *argv
[])
198 struct iodevinfo
*iodev
;
204 ct
= argc
>= 3? atoi(argv
[2]): 0;
205 min
= time((time_t *)0);
206 ti
= argc
>= 3? atoi(argv
[1]): 0;
208 period_n
= (hrtime_t
)ti
* NANOSEC
;
210 if ((kc
= kstat_open()) == NULL
)
211 fail(1, "kstat_open(): can't open /dev/kstat");
213 /* Set up handler for SIGCONT */
214 if (signal(SIGCONT
, cont_handler
) == SIG_ERR
)
215 fail(1, "signal failed");
220 if (argc
== 3 || argc
== 1) {
222 * no data file is specified, direct data to stdout.
228 fname
= (argc
== 2) ? argv
[1] : argv
[3];
230 * Open or Create a data file. If the file doesn't exist, then
231 * it will be created.
233 if ((fp
= open(fname
, O_WRONLY
| O_APPEND
| O_CREAT
, 0644))
235 fail(1, "can't open data file");
237 * Lock the entire data file to prevent data corruption
239 lock
.l_type
= F_WRLCK
;
240 lock
.l_whence
= SEEK_SET
;
243 if (fcntl(fp
, F_SETLK
, &lock
) == -1)
244 fail(1, "can't lock data file");
246 * Get data file statistics for use in determining whether
247 * truncation required and where rollback recovery should
250 if (fstat(fp
, &buf
) == -1)
251 fail(1, "can't get data file information");
253 * If the data file was opened and is too old, truncate it
255 if (min
- buf
.st_mtime
> 86400)
256 if (ftruncate(fp
, 0) == -1)
257 fail(1, "can't truncate data file");
259 * Remember filesize for rollback on error (bug #1223549)
261 flength
= buf
.st_size
;
264 memset(&d
, 0, sizeof (d
));
267 * If n == 0, write the additional dummy record.
274 if (write(fp
, &d
, sizeof (struct sa
)) != sizeof (struct sa
))
275 ftruncate(fp
, flength
), fail(1, "write failed");
277 for (iodev
= firstiodev
; iodev
; iodev
= iodev
->next
) {
278 if (write(fp
, iodev
, sizeof (struct iodevinfo
)) !=
279 sizeof (struct iodevinfo
))
280 ftruncate(fp
, flength
), fail(1, "write failed");
284 start_n
= gethrtime();
288 (void) kstat_chain_update(kc
);
291 } while (all_stat_load() || iodevinfo_load());
293 d
.ts
= time((time_t *)0);
297 if (write(fp
, &d
, sizeof (struct sa
)) != sizeof (struct sa
))
298 ftruncate(fp
, flength
), fail(1, "write failed");
300 for (iodev
= firstiodev
; iodev
; iodev
= iodev
->next
) {
301 if (write(fp
, iodev
, sizeof (struct iodevinfo
)) !=
302 sizeof (struct iodevinfo
))
303 ftruncate(fp
, flength
), fail(1, "write failed");
306 sleep_until(&start_n
, period_n
, &caught_cont
);
317 * Get various KIDs for subsequent all_stat_load operations.
326 * Initialize global statistics
329 sysinfo_ksp
= safe_kstat_lookup(kc
, "unix", 0, "sysinfo");
330 vminfo_ksp
= safe_kstat_lookup(kc
, "unix", 0, "vminfo");
331 kmem_oversize_ksp
= safe_kstat_lookup(kc
, "vmem", -1, "kmem_oversize");
332 var_ksp
= safe_kstat_lookup(kc
, "unix", 0, "var");
333 system_misc_ksp
= safe_kstat_lookup(kc
, "unix", 0, "system_misc");
334 file_cache_ksp
= safe_kstat_lookup(kc
, "unix", 0, "file_cache");
335 ufs_inode_ksp
= kstat_lookup(kc
, "ufs", 0, "inode_cache");
337 safe_kstat_read(kc
, system_misc_ksp
, NULL
);
338 nproc_knp
= safe_kstat_data_lookup(system_misc_ksp
, "nproc");
340 safe_kstat_read(kc
, file_cache_ksp
, NULL
);
341 file_avail_knp
= safe_kstat_data_lookup(file_cache_ksp
, "buf_avail");
342 file_total_knp
= safe_kstat_data_lookup(file_cache_ksp
, "buf_total");
344 safe_kstat_read(kc
, kmem_oversize_ksp
, NULL
);
345 oversize_alloc_knp
= safe_kstat_data_lookup(kmem_oversize_ksp
,
347 oversize_fail_knp
= safe_kstat_data_lookup(kmem_oversize_ksp
, "fail");
349 if (ufs_inode_ksp
!= NULL
) {
350 safe_kstat_read(kc
, ufs_inode_ksp
, NULL
);
351 ufs_inode_size_knp
= safe_kstat_data_lookup(ufs_inode_ksp
,
353 ninode
= ((kstat_named_t
*)
354 safe_kstat_data_lookup(ufs_inode_ksp
,
355 "maxsize"))->value
.l
;
359 * Load constant values now -- no need to reread each time
362 safe_kstat_read(kc
, var_ksp
, (void *) &var
);
365 * Initialize per-CPU and per-kmem-cache statistics
369 for (ksp
= kc
->kc_chain
; ksp
; ksp
= ksp
->ks_next
) {
370 if (strncmp(ksp
->ks_name
, "cpu_stat", 8) == 0)
372 if (strcmp(ksp
->ks_class
, "kmem_cache") == 0)
376 safe_zalloc((void **)&cpu_stat_list
, ncpus
* sizeof (kstat_t
*), 1);
377 safe_zalloc((void **)&kmem_cache_list
, ncaches
* sizeof (kstat_t
*), 1);
380 for (ksp
= kc
->kc_chain
; ksp
; ksp
= ksp
->ks_next
) {
381 if (strncmp(ksp
->ks_name
, "cpu_stat", 8) == 0 &&
382 kstat_read(kc
, ksp
, NULL
) != -1)
383 cpu_stat_list
[ncpus
++] = ksp
;
384 if (strcmp(ksp
->ks_class
, "kmem_cache") == 0 &&
385 kstat_read(kc
, ksp
, NULL
) != -1)
386 kmem_cache_list
[ncaches
++] = ksp
;
390 fail(1, "can't find any cpu statistics");
393 fail(1, "can't find any kmem_cache statistics");
395 ksp
= kmem_cache_list
[0];
396 safe_kstat_read(kc
, ksp
, NULL
);
397 buf_size_index
= safe_kstat_data_index(ksp
, "buf_size");
398 slab_create_index
= safe_kstat_data_index(ksp
, "slab_create");
399 slab_destroy_index
= safe_kstat_data_index(ksp
, "slab_destroy");
400 slab_size_index
= safe_kstat_data_index(ksp
, "slab_size");
401 buf_avail_index
= safe_kstat_data_index(ksp
, "buf_avail");
402 alloc_fail_index
= safe_kstat_data_index(ksp
, "alloc_fail");
406 * load statistics, summing across CPUs where needed
415 uint64_t cpu_tick
[4] = {0, 0, 0, 0};
417 memset(&d
, 0, sizeof (d
));
423 safe_kstat_read(kc
, sysinfo_ksp
, (void *) &d
.si
);
424 safe_kstat_read(kc
, vminfo_ksp
, (void *) &d
.vmi
);
425 safe_kstat_read(kc
, system_misc_ksp
, NULL
);
426 safe_kstat_read(kc
, file_cache_ksp
, NULL
);
428 if (ufs_inode_ksp
!= NULL
) {
429 safe_kstat_read(kc
, ufs_inode_ksp
, NULL
);
430 d
.szinode
= ufs_inode_size_knp
->value
.ul
;
433 d
.szfile
= file_total_knp
->value
.ui64
- file_avail_knp
->value
.ui64
;
434 d
.szproc
= nproc_knp
->value
.ul
;
436 d
.mszinode
= (ninode
> d
.szinode
) ? ninode
: d
.szinode
;
437 d
.mszfile
= d
.szfile
;
438 d
.mszproc
= var
.v_proc
;
441 * Per-CPU statistics.
444 for (i
= 0; i
< ncpus
; i
++) {
445 if (kstat_read(kc
, cpu_stat_list
[i
], (void *) &cs
) == -1)
448 np
= (ulong_t
*)&d
.csi
;
449 tp
= (ulong_t
*)&cs
.cpu_sysinfo
;
452 * Accumulate cpu ticks for CPU_IDLE, CPU_USER, CPU_KERNEL and
453 * CPU_WAIT with respect to each of the cpus.
455 for (j
= 0; j
< CPU_STATES
; j
++)
456 cpu_tick
[j
] += tp
[j
];
458 for (j
= 0; j
< sizeof (cpu_sysinfo_t
); j
+= sizeof (ulong_t
))
460 np
= (ulong_t
*)&d
.cvmi
;
461 tp
= (ulong_t
*)&cs
.cpu_vminfo
;
462 for (j
= 0; j
< sizeof (cpu_vminfo_t
); j
+= sizeof (ulong_t
))
467 * Per-cache kmem statistics.
470 for (i
= 0; i
< ncaches
; i
++) {
472 u_longlong_t slab_create
, slab_destroy
, slab_size
, mem_total
;
473 u_longlong_t buf_size
, buf_avail
, alloc_fail
;
476 if (kstat_read(kc
, kmem_cache_list
[i
], NULL
) == -1)
478 knp
= kmem_cache_list
[i
]->ks_data
;
479 slab_create
= knp
[slab_create_index
].value
.ui64
;
480 slab_destroy
= knp
[slab_destroy_index
].value
.ui64
;
481 slab_size
= knp
[slab_size_index
].value
.ui64
;
482 buf_size
= knp
[buf_size_index
].value
.ui64
;
483 buf_avail
= knp
[buf_avail_index
].value
.ui64
;
484 alloc_fail
= knp
[alloc_fail_index
].value
.ui64
;
486 kmi_index
= KMEM_SMALL
;
488 kmi_index
= KMEM_LARGE
;
489 mem_total
= (slab_create
- slab_destroy
) * slab_size
;
491 d
.kmi
.km_mem
[kmi_index
] += (ulong_t
)mem_total
;
492 d
.kmi
.km_alloc
[kmi_index
] +=
493 (ulong_t
)mem_total
- buf_size
* buf_avail
;
494 d
.kmi
.km_fail
[kmi_index
] += (ulong_t
)alloc_fail
;
497 safe_kstat_read(kc
, kmem_oversize_ksp
, NULL
);
499 d
.kmi
.km_alloc
[KMEM_OSIZE
] = d
.kmi
.km_mem
[KMEM_OSIZE
] =
500 oversize_alloc_knp
->value
.ui64
;
501 d
.kmi
.km_fail
[KMEM_OSIZE
] = oversize_fail_knp
->value
.ui64
;
504 * Adjust CPU statistics so the delta calculations in sar will
505 * be correct when facing changes to the set of online CPUs.
507 compute_cpu_stat_adj();
508 for (i
= 0; i
< CPU_STATES
; i
++)
509 d
.csi
.cpu
[i
] = (cpu_tick
[i
] + cpu_stat_adj
[i
]) / ncpus
;
515 fail(int do_perror
, char *message
, ...)
519 va_start(args
, message
);
520 fprintf(stderr
, "%s: ", cmdname
);
521 vfprintf(stderr
, message
, args
);
524 fprintf(stderr
, ": %s", strerror(errno
));
525 fprintf(stderr
, "\n");
530 safe_zalloc(void **ptr
, int size
, int free_first
)
532 if (free_first
&& *ptr
!= NULL
)
534 if ((*ptr
= malloc(size
)) == NULL
)
535 fail(1, "malloc failed");
536 memset(*ptr
, 0, size
);
540 safe_kstat_read(kstat_ctl_t
*kc
, kstat_t
*ksp
, void *data
)
542 kid_t kstat_chain_id
= kstat_read(kc
, ksp
, data
);
544 if (kstat_chain_id
== -1)
545 fail(1, "kstat_read(%x, '%s') failed", kc
, ksp
->ks_name
);
546 return (kstat_chain_id
);
550 safe_kstat_lookup(kstat_ctl_t
*kc
, char *ks_module
, int ks_instance
,
553 kstat_t
*ksp
= kstat_lookup(kc
, ks_module
, ks_instance
, ks_name
);
556 fail(0, "kstat_lookup('%s', %d, '%s') failed",
557 ks_module
== NULL
? "" : ks_module
,
559 ks_name
== NULL
? "" : ks_name
);
564 safe_kstat_data_lookup(kstat_t
*ksp
, char *name
)
566 void *fp
= kstat_data_lookup(ksp
, name
);
569 fail(0, "kstat_data_lookup('%s', '%s') failed",
575 safe_kstat_data_index(kstat_t
*ksp
, char *name
)
577 return ((int)((char *)safe_kstat_data_lookup(ksp
, name
) -
578 (char *)ksp
->ks_data
) / (ksp
->ks_data_size
/ ksp
->ks_ndata
));
582 kscmp(kstat_t
*ks1
, kstat_t
*ks2
)
586 cmp
= strcmp(ks1
->ks_module
, ks2
->ks_module
);
589 cmp
= ks1
->ks_instance
- ks2
->ks_instance
;
592 return (strcmp(ks1
->ks_name
, ks2
->ks_name
));
598 struct iodevinfo
*iodev
, *previodev
, *comp
;
605 * Patch the snip in the iodevinfo list (see below)
608 lastiodev
->next
= snip
;
610 for (ksp
= kc
->kc_chain
; ksp
; ksp
= ksp
->ks_next
) {
612 if (ksp
->ks_type
!= KSTAT_TYPE_IO
)
618 safe_zalloc((void **) &iodev
->next
,
619 sizeof (struct iodevinfo
), 0);
625 memset((void *)&iodev
->kios
, 0, sizeof (kstat_io_t
));
626 iodev
->kios
.wlastupdate
= iodev
->ks
.ks_crtime
;
627 iodev
->kios
.rlastupdate
= iodev
->ks
.ks_crtime
;
630 * Insertion sort on (ks_module, ks_instance, ks_name)
633 while (kscmp(&iodev
->ks
, &comp
->next
->ks
) > 0)
635 if (previodev
!= comp
) {
636 previodev
->next
= iodev
->next
;
637 iodev
->next
= comp
->next
;
644 * Put a snip in the linked list of iodevinfos. The idea:
645 * If there was a state change such that now there are fewer
646 * iodevs, we snip the list and retain the tail, rather than
647 * freeing it. At the next state change, we clip the tail back on.
648 * This prevents a lot of malloc/free activity, and it's simpler.
654 firstiodev
= zeroiodev
.next
;
660 struct iodevinfo
*iodev
;
662 for (iodev
= firstiodev
; iodev
; iodev
= iodev
->next
) {
663 if (kstat_read(kc
, iodev
->ksp
, (void *) &iodev
->kios
) == -1)
670 kstat_copy(const kstat_t
*src
, kstat_t
*dst
)
674 if (src
->ks_data
!= NULL
) {
675 if ((dst
->ks_data
= malloc(src
->ks_data_size
)) == NULL
)
677 bcopy(src
->ks_data
, dst
->ks_data
, src
->ks_data_size
);
680 dst
->ks_data_size
= 0;
686 * Determine what is different between two sets of kstats; s[0] and s[1]
687 * are arrays of kstats of size ns0 and ns1, respectively, and sorted by
688 * instance number. u[0] and u[1] are two arrays which must be
689 * caller-zallocated; each must be of size MAX(ns0, ns1). When the
690 * function terminates, u[0] contains all s[0]-unique items and u[1]
691 * contains all s[1]-unique items. Any unused entries in u[0] and u[1]
695 diff_two_arrays(kstat_t
** const s
[], size_t ns0
, size_t ns1
,
696 kstat_t
** const u
[])
698 kstat_t
**s0p
= s
[0], **s1p
= s
[1];
699 kstat_t
**u0p
= u
[0], **u1p
= u
[1];
702 while (i
< ns0
&& j
< ns1
) {
703 if ((*s0p
)->ks_instance
== (*s1p
)->ks_instance
) {
704 if ((*s0p
)->ks_kid
!= (*s1p
)->ks_kid
) {
706 * The instance is the same, but this
707 * CPU has been offline during the
708 * interval, so we consider *u0p to
709 * be s0p-unique, and similarly for
719 } else if ((*s0p
)->ks_instance
< (*s1p
)->ks_instance
) {
739 cpuid_compare(const void *p1
, const void *p2
)
741 return ((*(kstat_t
**)p1
)->ks_instance
-
742 (*(kstat_t
**)p2
)->ks_instance
);
746 * Identify those CPUs which were not present for the whole interval so
747 * their statistics can be removed from the aggregate.
750 compute_cpu_stat_adj(void)
754 if (ocpu_stat_list
) {
756 kstat_t
**inarray
[2];
757 int max_cpus
= MAX(ncpus
, oncpus
);
759 qsort(cpu_stat_list
, ncpus
, sizeof (*cpu_stat_list
),
761 qsort(ocpu_stat_list
, oncpus
, sizeof (*ocpu_stat_list
),
764 s
[0] = ocpu_stat_list
;
765 s
[1] = cpu_stat_list
;
767 safe_zalloc((void *)&inarray
[0], sizeof (**inarray
) * max_cpus
,
769 safe_zalloc((void *)&inarray
[1], sizeof (**inarray
) * max_cpus
,
771 diff_two_arrays(s
, oncpus
, ncpus
, inarray
);
773 for (i
= 0; i
< max_cpus
; i
++) {
775 for (j
= 0; j
< CPU_STATES
; j
++)
777 ((cpu_stat_t
*)inarray
[0][i
]
778 ->ks_data
)->cpu_sysinfo
.cpu
[j
];
780 for (j
= 0; j
< CPU_STATES
; j
++)
782 ((cpu_stat_t
*)inarray
[1][i
]
783 ->ks_data
)->cpu_sysinfo
.cpu
[j
];
791 * Preserve the last interval's CPU stats.
794 for (i
= 0; i
< oncpus
; i
++)
795 free(ocpu_stat_list
[i
]->ks_data
);
798 safe_zalloc((void **)&ocpu_stat_list
, oncpus
*
799 sizeof (*ocpu_stat_list
), 1);
800 for (i
= 0; i
< ncpus
; i
++) {
801 safe_zalloc((void *)&ocpu_stat_list
[i
],
802 sizeof (*ocpu_stat_list
[0]), 0);
803 if (kstat_copy(cpu_stat_list
[i
], ocpu_stat_list
[i
]))
804 fail(1, "kstat_copy() failed");