4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
26 #include "statcommon.h"
36 #define ARRAY_SIZE(a) (sizeof (a) / sizeof (*a))
39 * The time we delay before retrying after an allocation
40 * failure, in milliseconds
42 #define RETRY_DELAY 200
44 static char *cpu_states
[] = {
52 kstat_lookup_read(kstat_ctl_t
*kc
, char *module
,
53 int instance
, char *name
)
55 kstat_t
*ksp
= kstat_lookup(kc
, module
, instance
, name
);
58 if (kstat_read(kc
, ksp
, NULL
) == -1)
64 * Note: the following helpers do not clean up on the failure case,
65 * because it is left to the free_snapshot() in the acquire_snapshot()
70 acquire_cpus(struct snapshot
*ss
, kstat_ctl_t
*kc
)
74 ss
->s_nr_cpus
= sysconf(_SC_CPUID_MAX
) + 1;
75 ss
->s_cpus
= calloc(ss
->s_nr_cpus
, sizeof (struct cpu_snapshot
));
76 if (ss
->s_cpus
== NULL
)
79 for (i
= 0; i
< ss
->s_nr_cpus
; i
++) {
82 ss
->s_cpus
[i
].cs_id
= ID_NO_CPU
;
83 ss
->s_cpus
[i
].cs_state
= p_online(i
, P_STATUS
);
84 /* If no valid CPU is present, move on to the next one */
85 if (ss
->s_cpus
[i
].cs_state
== -1)
87 ss
->s_cpus
[i
].cs_id
= i
;
89 if ((ksp
= kstat_lookup_read(kc
, "cpu_info", i
, NULL
)) == NULL
)
92 (void) pset_assign(PS_QUERY
, i
, &ss
->s_cpus
[i
].cs_pset_id
);
93 if (ss
->s_cpus
[i
].cs_pset_id
== PS_NONE
)
94 ss
->s_cpus
[i
].cs_pset_id
= ID_NO_PSET
;
96 if (!CPU_ACTIVE(&ss
->s_cpus
[i
]))
99 if ((ksp
= kstat_lookup_read(kc
, "cpu", i
, "vm")) == NULL
)
102 if (kstat_copy(ksp
, &ss
->s_cpus
[i
].cs_vm
))
105 if ((ksp
= kstat_lookup_read(kc
, "cpu", i
, "sys")) == NULL
)
108 if (kstat_copy(ksp
, &ss
->s_cpus
[i
].cs_sys
))
118 acquire_psets(struct snapshot
*ss
)
120 psetid_t
*pids
= NULL
;
121 struct pset_snapshot
*ps
;
126 * Careful in this code. We have to use pset_list
127 * twice, but inbetween pids_nr can change at will.
128 * We delay the setting of s_nr_psets until we have
129 * the "final" value of pids_nr.
132 if (pset_list(NULL
, &pids_nr
) < 0)
135 if ((pids
= calloc(pids_nr
, sizeof (psetid_t
))) == NULL
)
138 if (pset_list(pids
, &pids_nr
) < 0)
141 ss
->s_psets
= calloc(pids_nr
+ 1, sizeof (struct pset_snapshot
));
142 if (ss
->s_psets
== NULL
)
144 ss
->s_nr_psets
= pids_nr
+ 1;
146 /* CPUs not in any actual pset */
147 ps
= &ss
->s_psets
[0];
149 ps
->ps_cpus
= calloc(ss
->s_nr_cpus
, sizeof (struct cpu_snapshot
*));
150 if (ps
->ps_cpus
== NULL
)
153 /* CPUs in a a pset */
154 for (i
= 1; i
< ss
->s_nr_psets
; i
++) {
155 ps
= &ss
->s_psets
[i
];
157 ps
->ps_id
= pids
[i
- 1];
159 calloc(ss
->s_nr_cpus
, sizeof (struct cpu_snapshot
*));
160 if (ps
->ps_cpus
== NULL
)
164 for (i
= 0; i
< ss
->s_nr_psets
; i
++) {
165 ps
= &ss
->s_psets
[i
];
167 for (j
= 0; j
< ss
->s_nr_cpus
; j
++) {
168 if (!CPU_ACTIVE(&ss
->s_cpus
[j
]))
170 if (ss
->s_cpus
[j
].cs_pset_id
!= ps
->ps_id
)
173 ps
->ps_cpus
[ps
->ps_nr_cpus
++] = &ss
->s_cpus
[j
];
184 acquire_intrs(struct snapshot
*ss
, kstat_ctl_t
*kc
)
189 kstat_named_t
*clock
;
191 /* clock interrupt */
194 for (ksp
= kc
->kc_chain
; ksp
; ksp
= ksp
->ks_next
) {
195 if (ksp
->ks_type
== KSTAT_TYPE_INTR
)
199 ss
->s_intrs
= calloc(ss
->s_nr_intrs
, sizeof (struct intr_snapshot
));
200 if (ss
->s_intrs
== NULL
)
203 sys_misc
= kstat_lookup_read(kc
, "unix", 0, "system_misc");
204 if (sys_misc
== NULL
)
207 clock
= (kstat_named_t
*)kstat_data_lookup(sys_misc
, "clk_intr");
211 (void) strlcpy(ss
->s_intrs
[0].is_name
, "clock", KSTAT_STRLEN
);
212 ss
->s_intrs
[0].is_total
= clock
->value
.ui32
;
216 for (ksp
= kc
->kc_chain
; ksp
; ksp
= ksp
->ks_next
) {
220 if (ksp
->ks_type
!= KSTAT_TYPE_INTR
)
222 if (kstat_read(kc
, ksp
, NULL
) == -1)
225 ki
= KSTAT_INTR_PTR(ksp
);
227 (void) strlcpy(ss
->s_intrs
[i
].is_name
, ksp
->ks_name
,
229 ss
->s_intrs
[i
].is_total
= 0;
231 for (j
= 0; j
< KSTAT_NUM_INTRS
; j
++)
232 ss
->s_intrs
[i
].is_total
+= ki
->intrs
[j
];
243 acquire_sys(struct snapshot
*ss
, kstat_ctl_t
*kc
)
249 if ((ksp
= kstat_lookup(kc
, "unix", 0, "sysinfo")) == NULL
)
252 if (kstat_read(kc
, ksp
, &ss
->s_sys
.ss_sysinfo
) == -1)
255 if ((ksp
= kstat_lookup(kc
, "unix", 0, "vminfo")) == NULL
)
258 if (kstat_read(kc
, ksp
, &ss
->s_sys
.ss_vminfo
) == -1)
261 if ((ksp
= kstat_lookup(kc
, "unix", 0, "dnlcstats")) == NULL
)
264 if (kstat_read(kc
, ksp
, &ss
->s_sys
.ss_nc
) == -1)
267 if ((ksp
= kstat_lookup(kc
, "unix", 0, "system_misc")) == NULL
)
270 if (kstat_read(kc
, ksp
, NULL
) == -1)
273 knp
= (kstat_named_t
*)kstat_data_lookup(ksp
, "clk_intr");
277 ss
->s_sys
.ss_ticks
= knp
->value
.l
;
279 knp
= (kstat_named_t
*)kstat_data_lookup(ksp
, "deficit");
283 ss
->s_sys
.ss_deficit
= knp
->value
.l
;
285 for (i
= 0; i
< ss
->s_nr_cpus
; i
++) {
286 if (!CPU_ACTIVE(&ss
->s_cpus
[i
]))
289 if (kstat_add(&ss
->s_cpus
[i
].cs_sys
, &ss
->s_sys
.ss_agg_sys
))
291 if (kstat_add(&ss
->s_cpus
[i
].cs_vm
, &ss
->s_sys
.ss_agg_vm
))
293 ss
->s_nr_active_cpus
++;
300 acquire_snapshot(kstat_ctl_t
*kc
, int types
, struct iodev_filter
*iodev_filter
)
302 struct snapshot
*ss
= NULL
;
307 /* ensure any partial resources are freed on a retry */
310 ss
= safe_alloc(sizeof (struct snapshot
));
312 (void) memset(ss
, 0, sizeof (struct snapshot
));
316 /* wait for a possibly up-to-date chain */
317 while (kstat_chain_update(kc
) == -1) {
319 (void) poll(NULL
, 0, RETRY_DELAY
);
321 fail(1, "kstat_chain_update failed");
324 if (!err
&& (types
& SNAP_INTERRUPTS
))
325 err
= acquire_intrs(ss
, kc
);
327 if (!err
&& (types
& (SNAP_CPUS
| SNAP_SYSTEM
| SNAP_PSETS
)))
328 err
= acquire_cpus(ss
, kc
);
330 if (!err
&& (types
& SNAP_PSETS
))
331 err
= acquire_psets(ss
);
333 if (!err
&& (types
& (SNAP_IODEVS
| SNAP_CONTROLLERS
|
334 SNAP_IOPATHS_LI
| SNAP_IOPATHS_LTI
)))
335 err
= acquire_iodevs(ss
, kc
, iodev_filter
);
337 if (!err
&& (types
& SNAP_SYSTEM
))
338 err
= acquire_sys(ss
, kc
);
344 (void) poll(NULL
, 0, RETRY_DELAY
);
345 /* a kstat disappeared from under us */
351 fail(1, "acquiring snapshot failed");
358 free_snapshot(struct snapshot
*ss
)
365 while (ss
->s_iodevs
) {
366 struct iodev_snapshot
*tmp
= ss
->s_iodevs
;
367 ss
->s_iodevs
= ss
->s_iodevs
->is_next
;
372 for (i
= 0; i
< ss
->s_nr_cpus
; i
++) {
373 free(ss
->s_cpus
[i
].cs_vm
.ks_data
);
374 free(ss
->s_cpus
[i
].cs_sys
.ks_data
);
380 for (i
= 0; i
< ss
->s_nr_psets
; i
++)
381 free(ss
->s_psets
[i
].ps_cpus
);
385 free(ss
->s_sys
.ss_agg_sys
.ks_data
);
386 free(ss
->s_sys
.ss_agg_vm
.ks_data
);
395 while ((kc
= kstat_open()) == NULL
) {
397 (void) poll(NULL
, 0, RETRY_DELAY
);
399 fail(1, "kstat_open failed");
406 safe_alloc(size_t size
)
410 while ((ptr
= malloc(size
)) == NULL
) {
412 (void) poll(NULL
, 0, RETRY_DELAY
);
414 fail(1, "malloc failed");
420 safe_strdup(char *str
)
427 while ((ret
= strdup(str
)) == NULL
) {
429 (void) poll(NULL
, 0, RETRY_DELAY
);
431 fail(1, "malloc failed");
437 kstat_delta(kstat_t
*old
, kstat_t
*new, char *name
)
439 kstat_named_t
*knew
= kstat_data_lookup(new, name
);
440 if (old
&& old
->ks_data
) {
441 kstat_named_t
*kold
= kstat_data_lookup(old
, name
);
442 return (knew
->value
.ui64
- kold
->value
.ui64
);
444 return (knew
->value
.ui64
);
448 kstat_copy(const kstat_t
*src
, kstat_t
*dst
)
452 if (src
->ks_data
!= NULL
) {
453 if ((dst
->ks_data
= malloc(src
->ks_data_size
)) == NULL
)
455 bcopy(src
->ks_data
, dst
->ks_data
, src
->ks_data_size
);
458 dst
->ks_data_size
= 0;
464 kstat_add(const kstat_t
*src
, kstat_t
*dst
)
470 if (dst
->ks_data
== NULL
)
471 return (kstat_copy(src
, dst
));
476 for (i
= 0; i
< src
->ks_ndata
; i
++) {
477 /* "addition" makes little sense for strings */
478 if (from
->data_type
!= KSTAT_DATA_CHAR
&&
479 from
->data_type
!= KSTAT_DATA_STRING
)
480 (to
)->value
.ui64
+= (from
)->value
.ui64
;
489 cpu_ticks_delta(kstat_t
*old
, kstat_t
*new)
493 for (i
= 0; i
< ARRAY_SIZE(cpu_states
); i
++)
494 ticks
+= kstat_delta(old
, new, cpu_states
[i
]);
499 nr_active_cpus(struct snapshot
*ss
)
503 for (i
= 0; i
< ss
->s_nr_cpus
; i
++) {
504 if (CPU_ACTIVE(&ss
->s_cpus
[i
]))
512 * Return the number of ticks delta between two hrtime_t
513 * values. Attempt to cater for various kinds of overflow
514 * in hrtime_t - no matter how improbable.
517 hrtime_delta(hrtime_t old
, hrtime_t
new)
521 if ((new >= old
) && (old
>= 0L))
525 * We've overflowed the positive portion of an
530 * The new value is negative. Handle the
531 * case where the old value is positive or
547 * Either we've just gone from being negative
548 * to positive *or* the last entry was positive
549 * and the new entry is also positive but *less*
550 * than the old entry. This implies we waited
551 * quite a few days on a very fast system between
558 del
= UINT64_MAX
- o2
;
560 del
= UINT64_MAX
- old
;