4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 * Copyright (c) 2012 by Delphix. All rights reserved.
42 #define DT_MASK_LO 0x00000000FFFFFFFFULL
45 * We declare this here because (1) we need it and (2) we want to avoid a
46 * dependency on libm in libdtrace.
49 dt_fabsl(long double x
)
58 dt_ndigits(long long val
)
64 val
= val
== INT64_MIN
? INT64_MAX
: -val
;
68 while (val
> cmp
&& cmp
> 0) {
73 return (rval
< 4 ? 4 : rval
);
77 * 128-bit arithmetic functions needed to support the stddev() aggregating
81 dt_gt_128(uint64_t *a
, uint64_t *b
)
83 return (a
[1] > b
[1] || (a
[1] == b
[1] && a
[0] > b
[0]));
87 dt_ge_128(uint64_t *a
, uint64_t *b
)
89 return (a
[1] > b
[1] || (a
[1] == b
[1] && a
[0] >= b
[0]));
93 dt_le_128(uint64_t *a
, uint64_t *b
)
95 return (a
[1] < b
[1] || (a
[1] == b
[1] && a
[0] <= b
[0]));
99 * Shift the 128-bit value in a by b. If b is positive, shift left.
100 * If b is negative, shift right.
103 dt_shift_128(uint64_t *a
, int b
)
113 a
[0] = a
[1] >> (b
- 64);
117 mask
= 1LL << (64 - b
);
119 a
[0] |= ((a
[1] & mask
) << (64 - b
));
124 a
[1] = a
[0] << (b
- 64);
128 mask
= a
[0] >> (64 - b
);
136 dt_nbits_128(uint64_t *a
)
140 uint64_t zero
[2] = { 0, 0 };
145 dt_shift_128(tmp
, -1);
146 while (dt_gt_128(tmp
, zero
)) {
147 dt_shift_128(tmp
, -1);
155 dt_subtract_128(uint64_t *minuend
, uint64_t *subtrahend
, uint64_t *difference
)
159 result
[0] = minuend
[0] - subtrahend
[0];
160 result
[1] = minuend
[1] - subtrahend
[1] -
161 (minuend
[0] < subtrahend
[0] ? 1 : 0);
163 difference
[0] = result
[0];
164 difference
[1] = result
[1];
168 dt_add_128(uint64_t *addend1
, uint64_t *addend2
, uint64_t *sum
)
172 result
[0] = addend1
[0] + addend2
[0];
173 result
[1] = addend1
[1] + addend2
[1] +
174 (result
[0] < addend1
[0] || result
[0] < addend2
[0] ? 1 : 0);
181 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
182 * use native multiplication on those, and then re-combine into the
183 * resulting 128-bit value.
185 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
192 dt_multiply_128(uint64_t factor1
, uint64_t factor2
, uint64_t *product
)
194 uint64_t hi1
, hi2
, lo1
, lo2
;
200 lo1
= factor1
& DT_MASK_LO
;
201 lo2
= factor2
& DT_MASK_LO
;
203 product
[0] = lo1
* lo2
;
204 product
[1] = hi1
* hi2
;
208 dt_shift_128(tmp
, 32);
209 dt_add_128(product
, tmp
, product
);
213 dt_shift_128(tmp
, 32);
214 dt_add_128(product
, tmp
, product
);
218 * This is long-hand division.
220 * We initialize subtrahend by shifting divisor left as far as possible. We
221 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we
222 * subtract and set the appropriate bit in the result. We then shift
223 * subtrahend right by one bit for the next comparison.
226 dt_divide_128(uint64_t *dividend
, uint64_t divisor
, uint64_t *quotient
)
228 uint64_t result
[2] = { 0, 0 };
229 uint64_t remainder
[2];
230 uint64_t subtrahend
[2];
231 uint64_t divisor_128
[2];
232 uint64_t mask
[2] = { 1, 0 };
235 assert(divisor
!= 0);
237 divisor_128
[0] = divisor
;
240 remainder
[0] = dividend
[0];
241 remainder
[1] = dividend
[1];
243 subtrahend
[0] = divisor
;
246 while (divisor
> 0) {
251 dt_shift_128(subtrahend
, 128 - log
);
252 dt_shift_128(mask
, 128 - log
);
254 while (dt_ge_128(remainder
, divisor_128
)) {
255 if (dt_ge_128(remainder
, subtrahend
)) {
256 dt_subtract_128(remainder
, subtrahend
, remainder
);
257 result
[0] |= mask
[0];
258 result
[1] |= mask
[1];
261 dt_shift_128(subtrahend
, -1);
262 dt_shift_128(mask
, -1);
265 quotient
[0] = result
[0];
266 quotient
[1] = result
[1];
270 * This is the long-hand method of calculating a square root.
271 * The algorithm is as follows:
273 * 1. Group the digits by 2 from the right.
274 * 2. Over the leftmost group, find the largest single-digit number
275 * whose square is less than that group.
276 * 3. Subtract the result of the previous step (2 or 4, depending) and
277 * bring down the next two-digit group.
278 * 4. For the result R we have so far, find the largest single-digit number
279 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3.
280 * (Note that this is doubling R and performing a decimal left-shift by 1
281 * and searching for the appropriate decimal to fill the one's place.)
282 * The value x is the next digit in the square root.
283 * Repeat steps 3 and 4 until the desired precision is reached. (We're
284 * dealing with integers, so the above is sufficient.)
286 * In decimal, the square root of 582,734 would be calculated as so:
290 * -49 (7^2 == 49 => 7 is the first digit in the square root)
292 * 9 27 (Subtract and bring down the next group.)
293 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in
294 * ----- the square root)
295 * 51 34 (Subtract and bring down the next group.)
296 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in
297 * ----- the square root)
300 * The above algorithm applies similarly in binary, but note that the
301 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a
302 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the
303 * preceding difference?
305 * In binary, the square root of 11011011 would be calculated as so:
309 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1)
312 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1)
315 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1)
318 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0)
322 dt_sqrt_128(uint64_t *square
)
324 uint64_t result
[2] = { 0, 0 };
325 uint64_t diff
[2] = { 0, 0 };
326 uint64_t one
[2] = { 1, 0 };
327 uint64_t next_pair
[2];
328 uint64_t next_try
[2];
329 uint64_t bit_pairs
, pair_shift
;
332 bit_pairs
= dt_nbits_128(square
) / 2;
333 pair_shift
= bit_pairs
* 2;
335 for (i
= 0; i
<= bit_pairs
; i
++) {
337 * Bring down the next pair of bits.
339 next_pair
[0] = square
[0];
340 next_pair
[1] = square
[1];
341 dt_shift_128(next_pair
, -pair_shift
);
345 dt_shift_128(diff
, 2);
346 dt_add_128(diff
, next_pair
, diff
);
349 * next_try = R << 2 + 1
351 next_try
[0] = result
[0];
352 next_try
[1] = result
[1];
353 dt_shift_128(next_try
, 2);
354 dt_add_128(next_try
, one
, next_try
);
356 if (dt_le_128(next_try
, diff
)) {
357 dt_subtract_128(diff
, next_try
, diff
);
358 dt_shift_128(result
, 1);
359 dt_add_128(result
, one
, result
);
361 dt_shift_128(result
, 1);
367 assert(result
[1] == 0);
373 dt_stddev(uint64_t *data
, uint64_t normal
)
375 uint64_t avg_of_squares
[2];
376 uint64_t square_of_avg
[2];
381 * The standard approximation for standard deviation is
382 * sqrt(average(x**2) - average(x)**2), i.e. the square root
383 * of the average of the squares minus the square of the average.
384 * When normalizing, we should divide the sum of x**2 by normal**2.
386 dt_divide_128(data
+ 2, normal
, avg_of_squares
);
387 dt_divide_128(avg_of_squares
, normal
, avg_of_squares
);
388 dt_divide_128(avg_of_squares
, data
[0], avg_of_squares
);
390 norm_avg
= (int64_t)data
[1] / (int64_t)normal
/ (int64_t)data
[0];
393 norm_avg
= -norm_avg
;
395 dt_multiply_128((uint64_t)norm_avg
, (uint64_t)norm_avg
, square_of_avg
);
397 dt_subtract_128(avg_of_squares
, square_of_avg
, diff
);
399 return (dt_sqrt_128(diff
));
403 dt_flowindent(dtrace_hdl_t
*dtp
, dtrace_probedata_t
*data
, dtrace_epid_t last
,
404 dtrace_bufdesc_t
*buf
, size_t offs
)
406 dtrace_probedesc_t
*pd
= data
->dtpda_pdesc
, *npd
;
407 dtrace_eprobedesc_t
*epd
= data
->dtpda_edesc
, *nepd
;
408 char *p
= pd
->dtpd_provider
, *n
= pd
->dtpd_name
, *sub
;
409 dtrace_flowkind_t flow
= DTRACEFLOW_NONE
;
410 const char *str
= NULL
;
411 static const char *e_str
[2] = { " -> ", " => " };
412 static const char *r_str
[2] = { " <- ", " <= " };
413 static const char *ent
= "entry", *ret
= "return";
414 static int entlen
= 0, retlen
= 0;
415 dtrace_epid_t next
, id
= epd
->dtepd_epid
;
420 entlen
= strlen(ent
);
421 retlen
= strlen(ret
);
425 * If the name of the probe is "entry" or ends with "-entry", we
426 * treat it as an entry; if it is "return" or ends with "-return",
427 * we treat it as a return. (This allows application-provided probes
428 * like "method-entry" or "function-entry" to participate in flow
429 * indentation -- without accidentally misinterpreting popular probe
430 * names like "carpentry", "gentry" or "Coventry".)
432 if ((sub
= strstr(n
, ent
)) != NULL
&& sub
[entlen
] == '\0' &&
433 (sub
== n
|| sub
[-1] == '-')) {
434 flow
= DTRACEFLOW_ENTRY
;
435 str
= e_str
[strcmp(p
, "syscall") == 0];
436 } else if ((sub
= strstr(n
, ret
)) != NULL
&& sub
[retlen
] == '\0' &&
437 (sub
== n
|| sub
[-1] == '-')) {
438 flow
= DTRACEFLOW_RETURN
;
439 str
= r_str
[strcmp(p
, "syscall") == 0];
443 * If we're going to indent this, we need to check the ID of our last
444 * call. If we're looking at the same probe ID but a different EPID,
445 * we _don't_ want to indent. (Yes, there are some minor holes in
446 * this scheme -- it's a heuristic.)
448 if (flow
== DTRACEFLOW_ENTRY
) {
449 if ((last
!= DTRACE_EPIDNONE
&& id
!= last
&&
450 pd
->dtpd_id
== dtp
->dt_pdesc
[last
]->dtpd_id
))
451 flow
= DTRACEFLOW_NONE
;
455 * If we're going to unindent this, it's more difficult to see if
456 * we don't actually want to unindent it -- we need to look at the
459 if (flow
== DTRACEFLOW_RETURN
) {
460 offs
+= epd
->dtepd_size
;
463 if (offs
>= buf
->dtbd_size
)
466 next
= *(uint32_t *)((uintptr_t)buf
->dtbd_data
+ offs
);
468 if (next
== DTRACE_EPIDNONE
)
470 } while (next
== DTRACE_EPIDNONE
);
472 if ((rval
= dt_epid_lookup(dtp
, next
, &nepd
, &npd
)) != 0)
475 if (next
!= id
&& npd
->dtpd_id
== pd
->dtpd_id
)
476 flow
= DTRACEFLOW_NONE
;
480 if (flow
== DTRACEFLOW_ENTRY
|| flow
== DTRACEFLOW_RETURN
) {
481 data
->dtpda_prefix
= str
;
483 data
->dtpda_prefix
= "| ";
486 if (flow
== DTRACEFLOW_RETURN
&& data
->dtpda_indent
> 0)
487 data
->dtpda_indent
-= 2;
489 data
->dtpda_flow
= flow
;
497 return (DTRACE_CONSUME_THIS
);
503 return (DTRACE_CONSUME_NEXT
);
507 dt_quantize_total(dtrace_hdl_t
*dtp
, int64_t datum
, long double *total
)
509 long double val
= dt_fabsl((long double)datum
);
511 if (dtp
->dt_options
[DTRACEOPT_AGGZOOM
] == DTRACEOPT_UNSET
) {
517 * If we're zooming in on an aggregation, we want the height of the
518 * highest value to be approximately 95% of total bar height -- so we
519 * adjust up by the reciprocal of DTRACE_AGGZOOM_MAX when comparing to
522 val
*= 1 / DTRACE_AGGZOOM_MAX
;
529 dt_print_quanthdr(dtrace_hdl_t
*dtp
, FILE *fp
, int width
)
531 return (dt_printf(dtp
, fp
, "\n%*s %41s %-9s\n",
532 width
? width
: 16, width
? "key" : "value",
533 "------------- Distribution -------------", "count"));
537 dt_print_quanthdr_packed(dtrace_hdl_t
*dtp
, FILE *fp
, int width
,
538 const dtrace_aggdata_t
*aggdata
, dtrace_actkind_t action
)
540 int min
= aggdata
->dtada_minbin
, max
= aggdata
->dtada_maxbin
;
541 int minwidth
, maxwidth
, i
;
543 assert(action
== DTRACEAGG_QUANTIZE
|| action
== DTRACEAGG_LQUANTIZE
);
545 if (action
== DTRACEAGG_QUANTIZE
) {
546 if (min
!= 0 && min
!= DTRACE_QUANTIZE_ZEROBUCKET
)
549 if (max
< DTRACE_QUANTIZE_NBUCKETS
- 1)
552 minwidth
= dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(min
));
553 maxwidth
= dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(max
));
556 minwidth
= maxwidth
- 1;
560 if (dt_printf(dtp
, fp
, "\n%*s %*s .",
561 width
, width
> 0 ? "key" : "", minwidth
, "min") < 0)
564 for (i
= min
; i
<= max
; i
++) {
565 if (dt_printf(dtp
, fp
, "-") < 0)
569 return (dt_printf(dtp
, fp
, ". %*s | count\n", -maxwidth
, "max"));
573 * We use a subset of the Unicode Block Elements (U+2588 through U+258F,
574 * inclusive) to represent aggregations via UTF-8 -- which are expressed via
575 * 3-byte UTF-8 sequences.
577 #define DTRACE_AGGUTF8_FULL 0x2588
578 #define DTRACE_AGGUTF8_BASE 0x258f
579 #define DTRACE_AGGUTF8_LEVELS 8
581 #define DTRACE_AGGUTF8_BYTE0(val) (0xe0 | ((val) >> 12))
582 #define DTRACE_AGGUTF8_BYTE1(val) (0x80 | (((val) >> 6) & 0x3f))
583 #define DTRACE_AGGUTF8_BYTE2(val) (0x80 | ((val) & 0x3f))
586 dt_print_quantline_utf8(dtrace_hdl_t
*dtp
, FILE *fp
, int64_t val
,
587 uint64_t normal
, long double total
)
589 uint_t len
= 40, i
, whole
, partial
;
590 long double f
= (dt_fabsl((long double)val
) * len
) / total
;
591 const char *spaces
= " ";
594 partial
= (uint_t
)((f
- (long double)(uint_t
)f
) *
595 (long double)DTRACE_AGGUTF8_LEVELS
);
597 if (dt_printf(dtp
, fp
, "|") < 0)
600 for (i
= 0; i
< whole
; i
++) {
601 if (dt_printf(dtp
, fp
, "%c%c%c",
602 DTRACE_AGGUTF8_BYTE0(DTRACE_AGGUTF8_FULL
),
603 DTRACE_AGGUTF8_BYTE1(DTRACE_AGGUTF8_FULL
),
604 DTRACE_AGGUTF8_BYTE2(DTRACE_AGGUTF8_FULL
)) < 0)
609 partial
= DTRACE_AGGUTF8_BASE
- (partial
- 1);
611 if (dt_printf(dtp
, fp
, "%c%c%c",
612 DTRACE_AGGUTF8_BYTE0(partial
),
613 DTRACE_AGGUTF8_BYTE1(partial
),
614 DTRACE_AGGUTF8_BYTE2(partial
)) < 0)
620 return (dt_printf(dtp
, fp
, "%s %-9lld\n", spaces
+ i
,
621 (long long)val
/ normal
));
625 dt_print_quantline(dtrace_hdl_t
*dtp
, FILE *fp
, int64_t val
,
626 uint64_t normal
, long double total
, char positives
, char negatives
)
629 uint_t depth
, len
= 40;
631 const char *ats
= "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@";
632 const char *spaces
= " ";
634 assert(strlen(ats
) == len
&& strlen(spaces
) == len
);
635 assert(!(total
== 0 && (positives
|| negatives
)));
636 assert(!(val
< 0 && !negatives
));
637 assert(!(val
> 0 && !positives
));
638 assert(!(val
!= 0 && total
== 0));
642 if (dtp
->dt_encoding
== DT_ENCODING_UTF8
) {
643 return (dt_print_quantline_utf8(dtp
, fp
, val
,
647 f
= (dt_fabsl((long double)val
) * len
) / total
;
648 depth
= (uint_t
)(f
+ 0.5);
653 return (dt_printf(dtp
, fp
, "|%s%s %-9lld\n", ats
+ len
- depth
,
654 spaces
+ depth
, (long long)val
/ normal
));
658 f
= (dt_fabsl((long double)val
) * len
) / total
;
659 depth
= (uint_t
)(f
+ 0.5);
661 return (dt_printf(dtp
, fp
, "%s%s| %-9lld\n", spaces
+ depth
,
662 ats
+ len
- depth
, (long long)val
/ normal
));
666 * If we're here, we have both positive and negative bucket values.
667 * To express this graphically, we're going to generate both positive
668 * and negative bars separated by a centerline. These bars are half
669 * the size of normal quantize()/lquantize() bars, so we divide the
670 * length in half before calculating the bar length.
674 spaces
= &spaces
[len
];
676 f
= (dt_fabsl((long double)val
) * len
) / total
;
677 depth
= (uint_t
)(f
+ 0.5);
680 return (dt_printf(dtp
, fp
, "%s%s|%*s %-9lld\n", spaces
+ depth
,
681 ats
+ len
- depth
, len
, "", (long long)val
/ normal
));
683 return (dt_printf(dtp
, fp
, "%20s|%s%s %-9lld\n", "",
684 ats
+ len
- depth
, spaces
+ depth
,
685 (long long)val
/ normal
));
690 * As with UTF-8 printing of aggregations, we use a subset of the Unicode
691 * Block Elements (U+2581 through U+2588, inclusive) to represent our packed
694 #define DTRACE_AGGPACK_BASE 0x2581
695 #define DTRACE_AGGPACK_LEVELS 8
698 dt_print_packed(dtrace_hdl_t
*dtp
, FILE *fp
,
699 long double datum
, long double total
)
701 static boolean_t utf8_checked
= B_FALSE
;
702 static boolean_t utf8
;
703 char *ascii
= "__xxxxXX";
704 char *neg
= "vvvvVV";
712 * We want to determine if we can reasonably emit UTF-8 for our
713 * packed aggregation. To do this, we will check for terminals
714 * that are known to be primitive to emit UTF-8 on these.
716 utf8_checked
= B_TRUE
;
718 if (dtp
->dt_encoding
== DT_ENCODING_ASCII
) {
720 } else if (dtp
->dt_encoding
== DT_ENCODING_UTF8
) {
722 } else if ((term
= getenv("TERM")) != NULL
&&
723 (strcmp(term
, "sun") == 0 ||
724 strcmp(term
, "sun-color") == 0) ||
725 strcmp(term
, "dumb") == 0) {
733 return (dt_printf(dtp
, fp
, " "));
737 val
= dt_fabsl(datum
* (len
- 1)) / total
;
738 return (dt_printf(dtp
, fp
, "%c", neg
[(uint_t
)(val
+ 0.5)]));
742 int block
= DTRACE_AGGPACK_BASE
+ (unsigned int)(((datum
*
743 (DTRACE_AGGPACK_LEVELS
- 1)) / total
) + 0.5);
745 return (dt_printf(dtp
, fp
, "%c%c%c",
746 DTRACE_AGGUTF8_BYTE0(block
),
747 DTRACE_AGGUTF8_BYTE1(block
),
748 DTRACE_AGGUTF8_BYTE2(block
)));
752 val
= (datum
* (len
- 1)) / total
;
753 return (dt_printf(dtp
, fp
, "%c", ascii
[(uint_t
)(val
+ 0.5)]));
757 dt_print_quantize(dtrace_hdl_t
*dtp
, FILE *fp
, const void *addr
,
758 size_t size
, uint64_t normal
)
760 const int64_t *data
= addr
;
761 int i
, first_bin
= 0, last_bin
= DTRACE_QUANTIZE_NBUCKETS
- 1;
762 long double total
= 0;
763 char positives
= 0, negatives
= 0;
765 if (size
!= DTRACE_QUANTIZE_NBUCKETS
* sizeof (uint64_t))
766 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
768 while (first_bin
< DTRACE_QUANTIZE_NBUCKETS
- 1 && data
[first_bin
] == 0)
771 if (first_bin
== DTRACE_QUANTIZE_NBUCKETS
- 1) {
773 * There isn't any data. This is possible if the aggregation
774 * has been clear()'d or if negative increment values have been
775 * used. Regardless, we'll print the buckets around 0.
777 first_bin
= DTRACE_QUANTIZE_ZEROBUCKET
- 1;
778 last_bin
= DTRACE_QUANTIZE_ZEROBUCKET
+ 1;
783 while (last_bin
> 0 && data
[last_bin
] == 0)
786 if (last_bin
< DTRACE_QUANTIZE_NBUCKETS
- 1)
790 for (i
= first_bin
; i
<= last_bin
; i
++) {
791 positives
|= (data
[i
] > 0);
792 negatives
|= (data
[i
] < 0);
793 dt_quantize_total(dtp
, data
[i
], &total
);
796 if (dt_print_quanthdr(dtp
, fp
, 0) < 0)
799 for (i
= first_bin
; i
<= last_bin
; i
++) {
800 if (dt_printf(dtp
, fp
, "%16lld ",
801 (long long)DTRACE_QUANTIZE_BUCKETVAL(i
)) < 0)
804 if (dt_print_quantline(dtp
, fp
, data
[i
], normal
, total
,
805 positives
, negatives
) < 0)
813 dt_print_quantize_packed(dtrace_hdl_t
*dtp
, FILE *fp
, const void *addr
,
814 size_t size
, const dtrace_aggdata_t
*aggdata
)
816 const int64_t *data
= addr
;
817 long double total
= 0, count
= 0;
818 int min
= aggdata
->dtada_minbin
, max
= aggdata
->dtada_maxbin
, i
;
819 int64_t minval
, maxval
;
821 if (size
!= DTRACE_QUANTIZE_NBUCKETS
* sizeof (uint64_t))
822 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
824 if (min
!= 0 && min
!= DTRACE_QUANTIZE_ZEROBUCKET
)
827 if (max
< DTRACE_QUANTIZE_NBUCKETS
- 1)
830 minval
= DTRACE_QUANTIZE_BUCKETVAL(min
);
831 maxval
= DTRACE_QUANTIZE_BUCKETVAL(max
);
833 if (dt_printf(dtp
, fp
, " %*lld :", dt_ndigits(minval
),
834 (long long)minval
) < 0)
837 for (i
= min
; i
<= max
; i
++) {
838 dt_quantize_total(dtp
, data
[i
], &total
);
842 for (i
= min
; i
<= max
; i
++) {
843 if (dt_print_packed(dtp
, fp
, data
[i
], total
) < 0)
847 if (dt_printf(dtp
, fp
, ": %*lld | %lld\n",
848 -dt_ndigits(maxval
), (long long)maxval
, (long long)count
) < 0)
855 dt_print_lquantize(dtrace_hdl_t
*dtp
, FILE *fp
, const void *addr
,
856 size_t size
, uint64_t normal
)
858 const int64_t *data
= addr
;
859 int i
, first_bin
, last_bin
, base
;
861 long double total
= 0;
862 uint16_t step
, levels
;
863 char positives
= 0, negatives
= 0;
865 if (size
< sizeof (uint64_t))
866 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
869 size
-= sizeof (uint64_t);
871 base
= DTRACE_LQUANTIZE_BASE(arg
);
872 step
= DTRACE_LQUANTIZE_STEP(arg
);
873 levels
= DTRACE_LQUANTIZE_LEVELS(arg
);
876 last_bin
= levels
+ 1;
878 if (size
!= sizeof (uint64_t) * (levels
+ 2))
879 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
881 while (first_bin
<= levels
+ 1 && data
[first_bin
] == 0)
884 if (first_bin
> levels
+ 1) {
891 while (last_bin
> 0 && data
[last_bin
] == 0)
894 if (last_bin
< levels
+ 1)
898 for (i
= first_bin
; i
<= last_bin
; i
++) {
899 positives
|= (data
[i
] > 0);
900 negatives
|= (data
[i
] < 0);
901 dt_quantize_total(dtp
, data
[i
], &total
);
904 if (dt_printf(dtp
, fp
, "\n%16s %41s %-9s\n", "value",
905 "------------- Distribution -------------", "count") < 0)
908 for (i
= first_bin
; i
<= last_bin
; i
++) {
913 (void) snprintf(c
, sizeof (c
), "< %d", base
);
914 err
= dt_printf(dtp
, fp
, "%16s ", c
);
915 } else if (i
== levels
+ 1) {
916 (void) snprintf(c
, sizeof (c
), ">= %d",
917 base
+ (levels
* step
));
918 err
= dt_printf(dtp
, fp
, "%16s ", c
);
920 err
= dt_printf(dtp
, fp
, "%16d ",
921 base
+ (i
- 1) * step
);
924 if (err
< 0 || dt_print_quantline(dtp
, fp
, data
[i
], normal
,
925 total
, positives
, negatives
) < 0)
934 dt_print_lquantize_packed(dtrace_hdl_t
*dtp
, FILE *fp
, const void *addr
,
935 size_t size
, const dtrace_aggdata_t
*aggdata
)
937 const int64_t *data
= addr
;
938 long double total
= 0, count
= 0;
939 int min
, max
, base
, err
;
941 uint16_t step
, levels
;
945 if (size
< sizeof (uint64_t))
946 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
949 size
-= sizeof (uint64_t);
951 base
= DTRACE_LQUANTIZE_BASE(arg
);
952 step
= DTRACE_LQUANTIZE_STEP(arg
);
953 levels
= DTRACE_LQUANTIZE_LEVELS(arg
);
955 if (size
!= sizeof (uint64_t) * (levels
+ 2))
956 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
962 (void) snprintf(c
, sizeof (c
), "< %d", base
);
963 err
= dt_printf(dtp
, fp
, "%8s :", c
);
965 err
= dt_printf(dtp
, fp
, "%8d :", base
+ (min
- 1) * step
);
971 for (i
= min
; i
<= max
; i
++) {
972 dt_quantize_total(dtp
, data
[i
], &total
);
976 for (i
= min
; i
<= max
; i
++) {
977 if (dt_print_packed(dtp
, fp
, data
[i
], total
) < 0)
981 (void) snprintf(c
, sizeof (c
), ">= %d", base
+ (levels
* step
));
982 return (dt_printf(dtp
, fp
, ": %-8s | %lld\n", c
, (long long)count
));
986 dt_print_llquantize(dtrace_hdl_t
*dtp
, FILE *fp
, const void *addr
,
987 size_t size
, uint64_t normal
)
989 int i
, first_bin
, last_bin
, bin
= 1, order
, levels
;
990 uint16_t factor
, low
, high
, nsteps
;
991 const int64_t *data
= addr
;
992 int64_t value
= 1, next
, step
;
993 char positives
= 0, negatives
= 0;
994 long double total
= 0;
998 if (size
< sizeof (uint64_t))
999 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
1002 size
-= sizeof (uint64_t);
1004 factor
= DTRACE_LLQUANTIZE_FACTOR(arg
);
1005 low
= DTRACE_LLQUANTIZE_LOW(arg
);
1006 high
= DTRACE_LLQUANTIZE_HIGH(arg
);
1007 nsteps
= DTRACE_LLQUANTIZE_NSTEP(arg
);
1010 * We don't expect to be handed invalid llquantize() parameters here,
1011 * but sanity check them (to a degree) nonetheless.
1013 if (size
> INT32_MAX
|| factor
< 2 || low
>= high
||
1014 nsteps
== 0 || factor
> nsteps
)
1015 return (dt_set_errno(dtp
, EDT_DMISMATCH
));
1017 levels
= (int)size
/ sizeof (uint64_t);
1020 last_bin
= levels
- 1;
1022 while (first_bin
< levels
&& data
[first_bin
] == 0)
1025 if (first_bin
== levels
) {
1032 while (last_bin
> 0 && data
[last_bin
] == 0)
1035 if (last_bin
< levels
- 1)
1039 for (i
= first_bin
; i
<= last_bin
; i
++) {
1040 positives
|= (data
[i
] > 0);
1041 negatives
|= (data
[i
] < 0);
1042 dt_quantize_total(dtp
, data
[i
], &total
);
1045 if (dt_printf(dtp
, fp
, "\n%16s %41s %-9s\n", "value",
1046 "------------- Distribution -------------", "count") < 0)
1049 for (order
= 0; order
< low
; order
++)
1052 next
= value
* factor
;
1053 step
= next
> nsteps
? next
/ nsteps
: 1;
1055 if (first_bin
== 0) {
1056 (void) snprintf(c
, sizeof (c
), "< %lld", value
);
1058 if (dt_printf(dtp
, fp
, "%16s ", c
) < 0)
1061 if (dt_print_quantline(dtp
, fp
, data
[0], normal
,
1062 total
, positives
, negatives
) < 0)
1066 while (order
<= high
) {
1067 if (bin
>= first_bin
&& bin
<= last_bin
) {
1068 if (dt_printf(dtp
, fp
, "%16lld ", (long long)value
) < 0)
1071 if (dt_print_quantline(dtp
, fp
, data
[bin
],
1072 normal
, total
, positives
, negatives
) < 0)
1076 assert(value
< next
);
1079 if ((value
+= step
) != next
)
1082 next
= value
* factor
;
1083 step
= next
> nsteps
? next
/ nsteps
: 1;
1090 assert(last_bin
== bin
);
1091 (void) snprintf(c
, sizeof (c
), ">= %lld", value
);
1093 if (dt_printf(dtp
, fp
, "%16s ", c
) < 0)
1096 return (dt_print_quantline(dtp
, fp
, data
[bin
], normal
,
1097 total
, positives
, negatives
));
1102 dt_print_average(dtrace_hdl_t
*dtp
, FILE *fp
, caddr_t addr
,
1103 size_t size
, uint64_t normal
)
1105 /* LINTED - alignment */
1106 int64_t *data
= (int64_t *)addr
;
1108 return (dt_printf(dtp
, fp
, " %16lld", data
[0] ?
1109 (long long)(data
[1] / (int64_t)normal
/ data
[0]) : 0));
1114 dt_print_stddev(dtrace_hdl_t
*dtp
, FILE *fp
, caddr_t addr
,
1115 size_t size
, uint64_t normal
)
1117 /* LINTED - alignment */
1118 uint64_t *data
= (uint64_t *)addr
;
1120 return (dt_printf(dtp
, fp
, " %16llu", data
[0] ?
1121 (unsigned long long) dt_stddev(data
, normal
) : 0));
1126 dt_print_bytes(dtrace_hdl_t
*dtp
, FILE *fp
, caddr_t addr
,
1127 size_t nbytes
, int width
, int quiet
, int forceraw
)
1130 * If the byte stream is a series of printable characters, followed by
1131 * a terminating byte, we print it out as a string. Otherwise, we
1132 * assume that it's something else and just print the bytes.
1134 int i
, j
, margin
= 5;
1135 char *c
= (char *)addr
;
1143 if (dtp
->dt_options
[DTRACEOPT_RAWBYTES
] != DTRACEOPT_UNSET
)
1146 for (i
= 0; i
< nbytes
; i
++) {
1148 * We define a "printable character" to be one for which
1149 * isprint(3C) returns non-zero, isspace(3C) returns non-zero,
1150 * or a character which is either backspace or the bell.
1151 * Backspace and the bell are regrettably special because
1152 * they fail the first two tests -- and yet they are entirely
1153 * printable. These are the only two control characters that
1154 * have meaning for the terminal and for which isprint(3C) and
1155 * isspace(3C) return 0.
1157 if (isprint(c
[i
]) || isspace(c
[i
]) ||
1158 c
[i
] == '\b' || c
[i
] == '\a')
1161 if (c
[i
] == '\0' && i
> 0) {
1163 * This looks like it might be a string. Before we
1164 * assume that it is indeed a string, check the
1165 * remainder of the byte range; if it contains
1166 * additional non-nul characters, we'll assume that
1167 * it's a binary stream that just happens to look like
1168 * a string, and we'll print out the individual bytes.
1170 for (j
= i
+ 1; j
< nbytes
; j
++) {
1179 return (dt_printf(dtp
, fp
, "%s", c
));
1181 return (dt_printf(dtp
, fp
, " %s%*s",
1182 width
< 0 ? " " : "", width
, c
));
1191 * The byte range is all printable characters, but there is
1192 * no trailing nul byte. We'll assume that it's a string and
1195 char *s
= alloca(nbytes
+ 1);
1196 bcopy(c
, s
, nbytes
);
1198 return (dt_printf(dtp
, fp
, " %-*s", width
, s
));
1202 if (dt_printf(dtp
, fp
, "\n%*s ", margin
, "") < 0)
1205 for (i
= 0; i
< 16; i
++)
1206 if (dt_printf(dtp
, fp
, " %c", "0123456789abcdef"[i
]) < 0)
1209 if (dt_printf(dtp
, fp
, " 0123456789abcdef\n") < 0)
1213 for (i
= 0; i
< nbytes
; i
+= 16) {
1214 if (dt_printf(dtp
, fp
, "%*s%5x:", margin
, "", i
) < 0)
1217 for (j
= i
; j
< i
+ 16 && j
< nbytes
; j
++) {
1218 if (dt_printf(dtp
, fp
, " %02x", (uchar_t
)c
[j
]) < 0)
1223 if (dt_printf(dtp
, fp
, " ") < 0)
1227 if (dt_printf(dtp
, fp
, " ") < 0)
1230 for (j
= i
; j
< i
+ 16 && j
< nbytes
; j
++) {
1231 if (dt_printf(dtp
, fp
, "%c",
1232 c
[j
] < ' ' || c
[j
] > '~' ? '.' : c
[j
]) < 0)
1236 if (dt_printf(dtp
, fp
, "\n") < 0)
1244 dt_print_stack(dtrace_hdl_t
*dtp
, FILE *fp
, const char *format
,
1245 caddr_t addr
, int depth
, int size
)
1247 dtrace_syminfo_t dts
;
1250 char c
[PATH_MAX
* 2];
1253 if (dt_printf(dtp
, fp
, "\n") < 0)
1259 if (dtp
->dt_options
[DTRACEOPT_STACKINDENT
] != DTRACEOPT_UNSET
)
1260 indent
= (int)dtp
->dt_options
[DTRACEOPT_STACKINDENT
];
1262 indent
= _dtrace_stkindent
;
1264 for (i
= 0; i
< depth
; i
++) {
1266 case sizeof (uint32_t):
1267 /* LINTED - alignment */
1268 pc
= *((uint32_t *)addr
);
1271 case sizeof (uint64_t):
1272 /* LINTED - alignment */
1273 pc
= *((uint64_t *)addr
);
1277 return (dt_set_errno(dtp
, EDT_BADSTACKPC
));
1280 if (pc
== (uintptr_t)NULL
)
1285 if (dt_printf(dtp
, fp
, "%*s", indent
, "") < 0)
1288 if (dtrace_lookup_by_addr(dtp
, pc
, &sym
, &dts
) == 0) {
1289 if (pc
> sym
.st_value
) {
1290 (void) snprintf(c
, sizeof (c
), "%s`%s+0x%llx",
1291 dts
.dts_object
, dts
.dts_name
,
1294 (void) snprintf(c
, sizeof (c
), "%s`%s",
1295 dts
.dts_object
, dts
.dts_name
);
1299 * We'll repeat the lookup, but this time we'll specify
1300 * a NULL GElf_Sym -- indicating that we're only
1301 * interested in the containing module.
1303 if (dtrace_lookup_by_addr(dtp
, pc
, NULL
, &dts
) == 0) {
1304 (void) snprintf(c
, sizeof (c
), "%s`0x%llx",
1305 dts
.dts_object
, pc
);
1307 (void) snprintf(c
, sizeof (c
), "0x%llx", pc
);
1311 if (dt_printf(dtp
, fp
, format
, c
) < 0)
1314 if (dt_printf(dtp
, fp
, "\n") < 0)
1322 dt_print_ustack(dtrace_hdl_t
*dtp
, FILE *fp
, const char *format
,
1323 caddr_t addr
, uint64_t arg
)
1325 /* LINTED - alignment */
1326 uint64_t *pc
= (uint64_t *)addr
;
1327 uint32_t depth
= DTRACE_USTACK_NFRAMES(arg
);
1328 uint32_t strsize
= DTRACE_USTACK_STRSIZE(arg
);
1329 const char *strbase
= addr
+ (depth
+ 1) * sizeof (uint64_t);
1330 const char *str
= strsize
? strbase
: NULL
;
1333 char name
[PATH_MAX
], objname
[PATH_MAX
], c
[PATH_MAX
* 2];
1334 struct ps_prochandle
*P
;
1344 if (dt_printf(dtp
, fp
, "\n") < 0)
1350 if (dtp
->dt_options
[DTRACEOPT_STACKINDENT
] != DTRACEOPT_UNSET
)
1351 indent
= (int)dtp
->dt_options
[DTRACEOPT_STACKINDENT
];
1353 indent
= _dtrace_stkindent
;
1356 * Ultimately, we need to add an entry point in the library vector for
1357 * determining <symbol, offset> from <pid, address>. For now, if
1358 * this is a vector open, we just print the raw address or string.
1360 if (dtp
->dt_vector
== NULL
)
1361 P
= dt_proc_grab(dtp
, pid
, PGRAB_RDONLY
| PGRAB_FORCE
, 0);
1366 dt_proc_lock(dtp
, P
); /* lock handle while we perform lookups */
1368 for (i
= 0; i
< depth
&& pc
[i
] != (uintptr_t)NULL
; i
++) {
1371 if ((err
= dt_printf(dtp
, fp
, "%*s", indent
, "")) < 0)
1374 if (P
!= NULL
&& Plookup_by_addr(P
, pc
[i
],
1375 name
, sizeof (name
), &sym
) == 0) {
1376 (void) Pobjname(P
, pc
[i
], objname
, sizeof (objname
));
1378 if (pc
[i
] > sym
.st_value
) {
1379 (void) snprintf(c
, sizeof (c
),
1380 "%s`%s+0x%llx", dt_basename(objname
), name
,
1381 (u_longlong_t
)(pc
[i
] - sym
.st_value
));
1383 (void) snprintf(c
, sizeof (c
),
1384 "%s`%s", dt_basename(objname
), name
);
1386 } else if (str
!= NULL
&& str
[0] != '\0' && str
[0] != '@' &&
1387 (P
== NULL
|| (map
= Paddr_to_map(P
, pc
[i
])) == NULL
||
1388 map
->pr_mflags
& MA_WRITE
)) {
1390 * If the current string pointer in the string table
1391 * does not point to an empty string _and_ the program
1392 * counter falls in a writable region, we'll use the
1393 * string from the string table instead of the raw
1394 * address. This last condition is necessary because
1395 * some (broken) ustack helpers will return a string
1396 * even for a program counter that they can't
1397 * identify. If we have a string for a program
1398 * counter that falls in a segment that isn't
1399 * writable, we assume that we have fallen into this
1400 * case and we refuse to use the string. Finally,
1401 * note that if we could not grab the process (e.g.,
1402 * because it exited), the information from the helper
1403 * is better than nothing.
1405 (void) snprintf(c
, sizeof (c
), "%s", str
);
1407 if (P
!= NULL
&& Pobjname(P
, pc
[i
], objname
,
1408 sizeof (objname
)) != NULL
) {
1409 (void) snprintf(c
, sizeof (c
), "%s`0x%llx",
1410 dt_basename(objname
), (u_longlong_t
)pc
[i
]);
1412 (void) snprintf(c
, sizeof (c
), "0x%llx",
1413 (u_longlong_t
)pc
[i
]);
1417 if ((err
= dt_printf(dtp
, fp
, format
, c
)) < 0)
1420 if ((err
= dt_printf(dtp
, fp
, "\n")) < 0)
1423 if (str
!= NULL
&& str
[0] == '@') {
1425 * If the first character of the string is an "at" sign,
1426 * then the string is inferred to be an annotation --
1427 * and it is printed out beneath the frame and offset
1430 if ((err
= dt_printf(dtp
, fp
, "%*s", indent
, "")) < 0)
1433 (void) snprintf(c
, sizeof (c
), " [ %s ]", &str
[1]);
1435 if ((err
= dt_printf(dtp
, fp
, format
, c
)) < 0)
1438 if ((err
= dt_printf(dtp
, fp
, "\n")) < 0)
1443 str
+= strlen(str
) + 1;
1444 if (str
- strbase
>= strsize
)
1450 dt_proc_unlock(dtp
, P
);
1451 dt_proc_release(dtp
, P
);
1458 dt_print_usym(dtrace_hdl_t
*dtp
, FILE *fp
, caddr_t addr
, dtrace_actkind_t act
)
1460 /* LINTED - alignment */
1461 uint64_t pid
= ((uint64_t *)addr
)[0];
1462 /* LINTED - alignment */
1463 uint64_t pc
= ((uint64_t *)addr
)[1];
1464 const char *format
= " %-50s";
1468 if (act
== DTRACEACT_USYM
&& dtp
->dt_vector
== NULL
) {
1469 struct ps_prochandle
*P
;
1471 if ((P
= dt_proc_grab(dtp
, pid
,
1472 PGRAB_RDONLY
| PGRAB_FORCE
, 0)) != NULL
) {
1475 dt_proc_lock(dtp
, P
);
1477 if (Plookup_by_addr(P
, pc
, NULL
, 0, &sym
) == 0)
1480 dt_proc_unlock(dtp
, P
);
1481 dt_proc_release(dtp
, P
);
1488 } while ((len
= dtrace_uaddr2str(dtp
, pid
, pc
, s
, n
)) > n
);
1490 return (dt_printf(dtp
, fp
, format
, s
));
1494 dt_print_umod(dtrace_hdl_t
*dtp
, FILE *fp
, const char *format
, caddr_t addr
)
1496 /* LINTED - alignment */
1497 uint64_t pid
= ((uint64_t *)addr
)[0];
1498 /* LINTED - alignment */
1499 uint64_t pc
= ((uint64_t *)addr
)[1];
1502 char objname
[PATH_MAX
], c
[PATH_MAX
* 2];
1503 struct ps_prochandle
*P
;
1509 * See the comment in dt_print_ustack() for the rationale for
1510 * printing raw addresses in the vectored case.
1512 if (dtp
->dt_vector
== NULL
)
1513 P
= dt_proc_grab(dtp
, pid
, PGRAB_RDONLY
| PGRAB_FORCE
, 0);
1518 dt_proc_lock(dtp
, P
); /* lock handle while we perform lookups */
1520 if (P
!= NULL
&& Pobjname(P
, pc
, objname
, sizeof (objname
)) != NULL
) {
1521 (void) snprintf(c
, sizeof (c
), "%s", dt_basename(objname
));
1523 (void) snprintf(c
, sizeof (c
), "0x%llx", (u_longlong_t
)pc
);
1526 err
= dt_printf(dtp
, fp
, format
, c
);
1529 dt_proc_unlock(dtp
, P
);
1530 dt_proc_release(dtp
, P
);
1537 dt_print_sym(dtrace_hdl_t
*dtp
, FILE *fp
, const char *format
, caddr_t addr
)
1539 /* LINTED - alignment */
1540 uint64_t pc
= *((uint64_t *)addr
);
1541 dtrace_syminfo_t dts
;
1543 char c
[PATH_MAX
* 2];
1548 if (dtrace_lookup_by_addr(dtp
, pc
, &sym
, &dts
) == 0) {
1549 (void) snprintf(c
, sizeof (c
), "%s`%s",
1550 dts
.dts_object
, dts
.dts_name
);
1553 * We'll repeat the lookup, but this time we'll specify a
1554 * NULL GElf_Sym -- indicating that we're only interested in
1555 * the containing module.
1557 if (dtrace_lookup_by_addr(dtp
, pc
, NULL
, &dts
) == 0) {
1558 (void) snprintf(c
, sizeof (c
), "%s`0x%llx",
1559 dts
.dts_object
, (u_longlong_t
)pc
);
1561 (void) snprintf(c
, sizeof (c
), "0x%llx",
1566 if (dt_printf(dtp
, fp
, format
, c
) < 0)
1573 dt_print_mod(dtrace_hdl_t
*dtp
, FILE *fp
, const char *format
, caddr_t addr
)
1575 /* LINTED - alignment */
1576 uint64_t pc
= *((uint64_t *)addr
);
1577 dtrace_syminfo_t dts
;
1578 char c
[PATH_MAX
* 2];
1583 if (dtrace_lookup_by_addr(dtp
, pc
, NULL
, &dts
) == 0) {
1584 (void) snprintf(c
, sizeof (c
), "%s", dts
.dts_object
);
1586 (void) snprintf(c
, sizeof (c
), "0x%llx", (u_longlong_t
)pc
);
1589 if (dt_printf(dtp
, fp
, format
, c
) < 0)
1595 typedef struct dt_normal
{
1596 dtrace_aggvarid_t dtnd_id
;
1597 uint64_t dtnd_normal
;
1601 dt_normalize_agg(const dtrace_aggdata_t
*aggdata
, void *arg
)
1603 dt_normal_t
*normal
= arg
;
1604 dtrace_aggdesc_t
*agg
= aggdata
->dtada_desc
;
1605 dtrace_aggvarid_t id
= normal
->dtnd_id
;
1607 if (agg
->dtagd_nrecs
== 0)
1608 return (DTRACE_AGGWALK_NEXT
);
1610 if (agg
->dtagd_varid
!= id
)
1611 return (DTRACE_AGGWALK_NEXT
);
1613 ((dtrace_aggdata_t
*)aggdata
)->dtada_normal
= normal
->dtnd_normal
;
1614 return (DTRACE_AGGWALK_NORMALIZE
);
1618 dt_normalize(dtrace_hdl_t
*dtp
, caddr_t base
, dtrace_recdesc_t
*rec
)
1624 * We (should) have two records: the aggregation ID followed by the
1625 * normalization value.
1627 addr
= base
+ rec
->dtrd_offset
;
1629 if (rec
->dtrd_size
!= sizeof (dtrace_aggvarid_t
))
1630 return (dt_set_errno(dtp
, EDT_BADNORMAL
));
1632 /* LINTED - alignment */
1633 normal
.dtnd_id
= *((dtrace_aggvarid_t
*)addr
);
1636 if (rec
->dtrd_action
!= DTRACEACT_LIBACT
)
1637 return (dt_set_errno(dtp
, EDT_BADNORMAL
));
1639 if (rec
->dtrd_arg
!= DT_ACT_NORMALIZE
)
1640 return (dt_set_errno(dtp
, EDT_BADNORMAL
));
1642 addr
= base
+ rec
->dtrd_offset
;
1644 switch (rec
->dtrd_size
) {
1645 case sizeof (uint64_t):
1646 /* LINTED - alignment */
1647 normal
.dtnd_normal
= *((uint64_t *)addr
);
1649 case sizeof (uint32_t):
1650 /* LINTED - alignment */
1651 normal
.dtnd_normal
= *((uint32_t *)addr
);
1653 case sizeof (uint16_t):
1654 /* LINTED - alignment */
1655 normal
.dtnd_normal
= *((uint16_t *)addr
);
1657 case sizeof (uint8_t):
1658 normal
.dtnd_normal
= *((uint8_t *)addr
);
1661 return (dt_set_errno(dtp
, EDT_BADNORMAL
));
1664 (void) dtrace_aggregate_walk(dtp
, dt_normalize_agg
, &normal
);
1670 dt_denormalize_agg(const dtrace_aggdata_t
*aggdata
, void *arg
)
1672 dtrace_aggdesc_t
*agg
= aggdata
->dtada_desc
;
1673 dtrace_aggvarid_t id
= *((dtrace_aggvarid_t
*)arg
);
1675 if (agg
->dtagd_nrecs
== 0)
1676 return (DTRACE_AGGWALK_NEXT
);
1678 if (agg
->dtagd_varid
!= id
)
1679 return (DTRACE_AGGWALK_NEXT
);
1681 return (DTRACE_AGGWALK_DENORMALIZE
);
1685 dt_clear_agg(const dtrace_aggdata_t
*aggdata
, void *arg
)
1687 dtrace_aggdesc_t
*agg
= aggdata
->dtada_desc
;
1688 dtrace_aggvarid_t id
= *((dtrace_aggvarid_t
*)arg
);
1690 if (agg
->dtagd_nrecs
== 0)
1691 return (DTRACE_AGGWALK_NEXT
);
1693 if (agg
->dtagd_varid
!= id
)
1694 return (DTRACE_AGGWALK_NEXT
);
1696 return (DTRACE_AGGWALK_CLEAR
);
1699 typedef struct dt_trunc
{
1700 dtrace_aggvarid_t dttd_id
;
1701 uint64_t dttd_remaining
;
1705 dt_trunc_agg(const dtrace_aggdata_t
*aggdata
, void *arg
)
1707 dt_trunc_t
*trunc
= arg
;
1708 dtrace_aggdesc_t
*agg
= aggdata
->dtada_desc
;
1709 dtrace_aggvarid_t id
= trunc
->dttd_id
;
1711 if (agg
->dtagd_nrecs
== 0)
1712 return (DTRACE_AGGWALK_NEXT
);
1714 if (agg
->dtagd_varid
!= id
)
1715 return (DTRACE_AGGWALK_NEXT
);
1717 if (trunc
->dttd_remaining
== 0)
1718 return (DTRACE_AGGWALK_REMOVE
);
1720 trunc
->dttd_remaining
--;
1721 return (DTRACE_AGGWALK_NEXT
);
1725 dt_trunc(dtrace_hdl_t
*dtp
, caddr_t base
, dtrace_recdesc_t
*rec
)
1730 int (*func
)(dtrace_hdl_t
*, dtrace_aggregate_f
*, void *);
1733 * We (should) have two records: the aggregation ID followed by the
1734 * number of aggregation entries after which the aggregation is to be
1737 addr
= base
+ rec
->dtrd_offset
;
1739 if (rec
->dtrd_size
!= sizeof (dtrace_aggvarid_t
))
1740 return (dt_set_errno(dtp
, EDT_BADTRUNC
));
1742 /* LINTED - alignment */
1743 trunc
.dttd_id
= *((dtrace_aggvarid_t
*)addr
);
1746 if (rec
->dtrd_action
!= DTRACEACT_LIBACT
)
1747 return (dt_set_errno(dtp
, EDT_BADTRUNC
));
1749 if (rec
->dtrd_arg
!= DT_ACT_TRUNC
)
1750 return (dt_set_errno(dtp
, EDT_BADTRUNC
));
1752 addr
= base
+ rec
->dtrd_offset
;
1754 switch (rec
->dtrd_size
) {
1755 case sizeof (uint64_t):
1756 /* LINTED - alignment */
1757 remaining
= *((int64_t *)addr
);
1759 case sizeof (uint32_t):
1760 /* LINTED - alignment */
1761 remaining
= *((int32_t *)addr
);
1763 case sizeof (uint16_t):
1764 /* LINTED - alignment */
1765 remaining
= *((int16_t *)addr
);
1767 case sizeof (uint8_t):
1768 remaining
= *((int8_t *)addr
);
1771 return (dt_set_errno(dtp
, EDT_BADNORMAL
));
1774 if (remaining
< 0) {
1775 func
= dtrace_aggregate_walk_valsorted
;
1776 remaining
= -remaining
;
1778 func
= dtrace_aggregate_walk_valrevsorted
;
1781 assert(remaining
>= 0);
1782 trunc
.dttd_remaining
= remaining
;
1784 (void) func(dtp
, dt_trunc_agg
, &trunc
);
1790 dt_print_datum(dtrace_hdl_t
*dtp
, FILE *fp
, dtrace_recdesc_t
*rec
,
1791 caddr_t addr
, size_t size
, const dtrace_aggdata_t
*aggdata
,
1792 uint64_t normal
, dt_print_aggdata_t
*pd
)
1795 dtrace_actkind_t act
= rec
->dtrd_action
;
1796 boolean_t packed
= pd
->dtpa_agghist
|| pd
->dtpa_aggpack
;
1797 dtrace_aggdesc_t
*agg
= aggdata
->dtada_desc
;
1803 } *fmt
, fmttab
[] = {
1804 { sizeof (uint8_t), 3, 3 },
1805 { sizeof (uint16_t), 5, 5 },
1806 { sizeof (uint32_t), 8, 8 },
1807 { sizeof (uint64_t), 16, 16 },
1811 if (packed
&& pd
->dtpa_agghisthdr
!= agg
->dtagd_varid
) {
1812 dtrace_recdesc_t
*r
;
1817 * To print our quantization header for either an agghist or
1818 * aggpack aggregation, we need to iterate through all of our
1819 * of our records to determine their width.
1821 for (r
= rec
; !DTRACEACT_ISAGG(r
->dtrd_action
); r
++) {
1822 for (fmt
= fmttab
; fmt
->size
&&
1823 fmt
->size
!= r
->dtrd_size
; fmt
++)
1826 width
+= fmt
->packedwidth
+ 1;
1829 if (pd
->dtpa_agghist
) {
1830 if (dt_print_quanthdr(dtp
, fp
, width
) < 0)
1833 if (dt_print_quanthdr_packed(dtp
, fp
,
1834 width
, aggdata
, r
->dtrd_action
) < 0)
1838 pd
->dtpa_agghisthdr
= agg
->dtagd_varid
;
1841 if (pd
->dtpa_agghist
&& DTRACEACT_ISAGG(act
)) {
1842 char positives
= aggdata
->dtada_flags
& DTRACE_A_HASPOSITIVES
;
1843 char negatives
= aggdata
->dtada_flags
& DTRACE_A_HASNEGATIVES
;
1846 assert(act
== DTRACEAGG_SUM
|| act
== DTRACEAGG_COUNT
);
1847 val
= (long long)*((uint64_t *)addr
);
1849 if (dt_printf(dtp
, fp
, " ") < 0)
1852 return (dt_print_quantline(dtp
, fp
, val
, normal
,
1853 aggdata
->dtada_total
, positives
, negatives
));
1856 if (pd
->dtpa_aggpack
&& DTRACEACT_ISAGG(act
)) {
1858 case DTRACEAGG_QUANTIZE
:
1859 return (dt_print_quantize_packed(dtp
,
1860 fp
, addr
, size
, aggdata
));
1861 case DTRACEAGG_LQUANTIZE
:
1862 return (dt_print_lquantize_packed(dtp
,
1863 fp
, addr
, size
, aggdata
));
1870 case DTRACEACT_STACK
:
1871 return (dt_print_stack(dtp
, fp
, NULL
, addr
,
1872 rec
->dtrd_arg
, rec
->dtrd_size
/ rec
->dtrd_arg
));
1874 case DTRACEACT_USTACK
:
1875 case DTRACEACT_JSTACK
:
1876 return (dt_print_ustack(dtp
, fp
, NULL
, addr
, rec
->dtrd_arg
));
1878 case DTRACEACT_USYM
:
1879 case DTRACEACT_UADDR
:
1880 return (dt_print_usym(dtp
, fp
, addr
, act
));
1882 case DTRACEACT_UMOD
:
1883 return (dt_print_umod(dtp
, fp
, NULL
, addr
));
1886 return (dt_print_sym(dtp
, fp
, NULL
, addr
));
1889 return (dt_print_mod(dtp
, fp
, NULL
, addr
));
1891 case DTRACEAGG_QUANTIZE
:
1892 return (dt_print_quantize(dtp
, fp
, addr
, size
, normal
));
1894 case DTRACEAGG_LQUANTIZE
:
1895 return (dt_print_lquantize(dtp
, fp
, addr
, size
, normal
));
1897 case DTRACEAGG_LLQUANTIZE
:
1898 return (dt_print_llquantize(dtp
, fp
, addr
, size
, normal
));
1901 return (dt_print_average(dtp
, fp
, addr
, size
, normal
));
1903 case DTRACEAGG_STDDEV
:
1904 return (dt_print_stddev(dtp
, fp
, addr
, size
, normal
));
1910 for (fmt
= fmttab
; fmt
->size
&& fmt
->size
!= size
; fmt
++)
1913 width
= packed
? fmt
->packedwidth
: fmt
->width
;
1916 case sizeof (uint64_t):
1917 err
= dt_printf(dtp
, fp
, " %*lld", width
,
1918 /* LINTED - alignment */
1919 (long long)*((uint64_t *)addr
) / normal
);
1921 case sizeof (uint32_t):
1922 /* LINTED - alignment */
1923 err
= dt_printf(dtp
, fp
, " %*d", width
, *((uint32_t *)addr
) /
1926 case sizeof (uint16_t):
1927 /* LINTED - alignment */
1928 err
= dt_printf(dtp
, fp
, " %*d", width
, *((uint16_t *)addr
) /
1931 case sizeof (uint8_t):
1932 err
= dt_printf(dtp
, fp
, " %*d", width
, *((uint8_t *)addr
) /
1936 err
= dt_print_bytes(dtp
, fp
, addr
, size
, width
, 0, 0);
1944 dt_print_aggs(const dtrace_aggdata_t
**aggsdata
, int naggvars
, void *arg
)
1947 dt_print_aggdata_t
*pd
= arg
;
1948 const dtrace_aggdata_t
*aggdata
= aggsdata
[0];
1949 dtrace_aggdesc_t
*agg
= aggdata
->dtada_desc
;
1950 FILE *fp
= pd
->dtpa_fp
;
1951 dtrace_hdl_t
*dtp
= pd
->dtpa_dtp
;
1952 dtrace_recdesc_t
*rec
;
1953 dtrace_actkind_t act
;
1957 pd
->dtpa_agghist
= (aggdata
->dtada_flags
& DTRACE_A_TOTAL
);
1958 pd
->dtpa_aggpack
= (aggdata
->dtada_flags
& DTRACE_A_MINMAXBIN
);
1961 * Iterate over each record description in the key, printing the traced
1962 * data, skipping the first datum (the tuple member created by the
1965 for (i
= 1; i
< agg
->dtagd_nrecs
; i
++) {
1966 rec
= &agg
->dtagd_rec
[i
];
1967 act
= rec
->dtrd_action
;
1968 addr
= aggdata
->dtada_data
+ rec
->dtrd_offset
;
1969 size
= rec
->dtrd_size
;
1971 if (DTRACEACT_ISAGG(act
)) {
1976 if (dt_print_datum(dtp
, fp
, rec
, addr
,
1977 size
, aggdata
, 1, pd
) < 0)
1980 if (dt_buffered_flush(dtp
, NULL
, rec
, aggdata
,
1981 DTRACE_BUFDATA_AGGKEY
) < 0)
1985 assert(aggact
!= 0);
1987 for (i
= (naggvars
== 1 ? 0 : 1); i
< naggvars
; i
++) {
1990 aggdata
= aggsdata
[i
];
1991 agg
= aggdata
->dtada_desc
;
1992 rec
= &agg
->dtagd_rec
[aggact
];
1993 act
= rec
->dtrd_action
;
1994 addr
= aggdata
->dtada_data
+ rec
->dtrd_offset
;
1995 size
= rec
->dtrd_size
;
1997 assert(DTRACEACT_ISAGG(act
));
1998 normal
= aggdata
->dtada_normal
;
2000 if (dt_print_datum(dtp
, fp
, rec
, addr
,
2001 size
, aggdata
, normal
, pd
) < 0)
2004 if (dt_buffered_flush(dtp
, NULL
, rec
, aggdata
,
2005 DTRACE_BUFDATA_AGGVAL
) < 0)
2008 if (!pd
->dtpa_allunprint
)
2009 agg
->dtagd_flags
|= DTRACE_AGD_PRINTED
;
2012 if (!pd
->dtpa_agghist
&& !pd
->dtpa_aggpack
) {
2013 if (dt_printf(dtp
, fp
, "\n") < 0)
2017 if (dt_buffered_flush(dtp
, NULL
, NULL
, aggdata
,
2018 DTRACE_BUFDATA_AGGFORMAT
| DTRACE_BUFDATA_AGGLAST
) < 0)
2025 dt_print_agg(const dtrace_aggdata_t
*aggdata
, void *arg
)
2027 dt_print_aggdata_t
*pd
= arg
;
2028 dtrace_aggdesc_t
*agg
= aggdata
->dtada_desc
;
2029 dtrace_aggvarid_t aggvarid
= pd
->dtpa_id
;
2031 if (pd
->dtpa_allunprint
) {
2032 if (agg
->dtagd_flags
& DTRACE_AGD_PRINTED
)
2036 * If we're not printing all unprinted aggregations, then the
2037 * aggregation variable ID denotes a specific aggregation
2038 * variable that we should print -- skip any other aggregations
2039 * that we encounter.
2041 if (agg
->dtagd_nrecs
== 0)
2044 if (aggvarid
!= agg
->dtagd_varid
)
2048 return (dt_print_aggs(&aggdata
, 1, arg
));
2052 dt_setopt(dtrace_hdl_t
*dtp
, const dtrace_probedata_t
*data
,
2053 const char *option
, const char *value
)
2058 dtrace_setoptdata_t optdata
;
2060 bzero(&optdata
, sizeof (optdata
));
2061 (void) dtrace_getopt(dtp
, option
, &optdata
.dtsda_oldval
);
2063 if (dtrace_setopt(dtp
, option
, value
) == 0) {
2064 (void) dtrace_getopt(dtp
, option
, &optdata
.dtsda_newval
);
2065 optdata
.dtsda_probe
= data
;
2066 optdata
.dtsda_option
= option
;
2067 optdata
.dtsda_handle
= dtp
;
2069 if ((rval
= dt_handle_setopt(dtp
, &optdata
)) != 0)
2075 errstr
= dtrace_errmsg(dtp
, dtrace_errno(dtp
));
2076 len
= strlen(option
) + strlen(value
) + strlen(errstr
) + 80;
2079 (void) snprintf(msg
, len
, "couldn't set option \"%s\" to \"%s\": %s\n",
2080 option
, value
, errstr
);
2082 if ((rval
= dt_handle_liberr(dtp
, data
, msg
)) == 0)
2089 dt_consume_cpu(dtrace_hdl_t
*dtp
, FILE *fp
, int cpu
,
2090 dtrace_bufdesc_t
*buf
, boolean_t just_one
,
2091 dtrace_consume_probe_f
*efunc
, dtrace_consume_rec_f
*rfunc
, void *arg
)
2095 int flow
= (dtp
->dt_options
[DTRACEOPT_FLOWINDENT
] != DTRACEOPT_UNSET
);
2096 int quiet
= (dtp
->dt_options
[DTRACEOPT_QUIET
] != DTRACEOPT_UNSET
);
2098 uint64_t tracememsize
= 0;
2099 dtrace_probedata_t data
;
2102 bzero(&data
, sizeof (data
));
2103 data
.dtpda_handle
= dtp
;
2104 data
.dtpda_cpu
= cpu
;
2105 data
.dtpda_flow
= dtp
->dt_flow
;
2106 data
.dtpda_indent
= dtp
->dt_indent
;
2107 data
.dtpda_prefix
= dtp
->dt_prefix
;
2109 for (offs
= buf
->dtbd_oldest
; offs
< buf
->dtbd_size
; ) {
2110 dtrace_eprobedesc_t
*epd
;
2113 * We're guaranteed to have an ID.
2115 id
= *(uint32_t *)((uintptr_t)buf
->dtbd_data
+ offs
);
2117 if (id
== DTRACE_EPIDNONE
) {
2119 * This is filler to assure proper alignment of the
2120 * next record; we simply ignore it.
2122 offs
+= sizeof (id
);
2126 if ((rval
= dt_epid_lookup(dtp
, id
, &data
.dtpda_edesc
,
2127 &data
.dtpda_pdesc
)) != 0)
2130 epd
= data
.dtpda_edesc
;
2131 data
.dtpda_data
= buf
->dtbd_data
+ offs
;
2133 if (data
.dtpda_edesc
->dtepd_uarg
!= DT_ECB_DEFAULT
) {
2134 rval
= dt_handle(dtp
, &data
);
2136 if (rval
== DTRACE_CONSUME_NEXT
)
2139 if (rval
== DTRACE_CONSUME_ERROR
)
2144 (void) dt_flowindent(dtp
, &data
, dtp
->dt_last_epid
,
2147 rval
= (*efunc
)(&data
, arg
);
2150 if (data
.dtpda_flow
== DTRACEFLOW_ENTRY
)
2151 data
.dtpda_indent
+= 2;
2154 if (rval
== DTRACE_CONSUME_NEXT
)
2157 if (rval
== DTRACE_CONSUME_ABORT
)
2158 return (dt_set_errno(dtp
, EDT_DIRABORT
));
2160 if (rval
!= DTRACE_CONSUME_THIS
)
2161 return (dt_set_errno(dtp
, EDT_BADRVAL
));
2163 for (i
= 0; i
< epd
->dtepd_nrecs
; i
++) {
2165 dtrace_recdesc_t
*rec
= &epd
->dtepd_rec
[i
];
2166 dtrace_actkind_t act
= rec
->dtrd_action
;
2168 data
.dtpda_data
= buf
->dtbd_data
+ offs
+
2170 addr
= data
.dtpda_data
;
2172 if (act
== DTRACEACT_LIBACT
) {
2173 uint64_t arg
= rec
->dtrd_arg
;
2174 dtrace_aggvarid_t id
;
2178 /* LINTED - alignment */
2179 id
= *((dtrace_aggvarid_t
*)addr
);
2180 (void) dtrace_aggregate_walk(dtp
,
2184 case DT_ACT_DENORMALIZE
:
2185 /* LINTED - alignment */
2186 id
= *((dtrace_aggvarid_t
*)addr
);
2187 (void) dtrace_aggregate_walk(dtp
,
2188 dt_denormalize_agg
, &id
);
2191 case DT_ACT_FTRUNCATE
:
2196 (void) ftruncate(fileno(fp
), 0);
2197 (void) fseeko(fp
, 0, SEEK_SET
);
2200 case DT_ACT_NORMALIZE
:
2201 if (i
== epd
->dtepd_nrecs
- 1)
2202 return (dt_set_errno(dtp
,
2205 if (dt_normalize(dtp
,
2206 buf
->dtbd_data
+ offs
, rec
) != 0)
2212 case DT_ACT_SETOPT
: {
2213 uint64_t *opts
= dtp
->dt_options
;
2214 dtrace_recdesc_t
*valrec
;
2219 if (i
== epd
->dtepd_nrecs
- 1) {
2220 return (dt_set_errno(dtp
,
2224 valrec
= &epd
->dtepd_rec
[++i
];
2225 valsize
= valrec
->dtrd_size
;
2227 if (valrec
->dtrd_action
!= act
||
2228 valrec
->dtrd_arg
!= arg
) {
2229 return (dt_set_errno(dtp
,
2233 if (valsize
> sizeof (uint64_t)) {
2234 val
= buf
->dtbd_data
+ offs
+
2235 valrec
->dtrd_offset
;
2240 rv
= dt_setopt(dtp
, &data
, addr
, val
);
2245 flow
= (opts
[DTRACEOPT_FLOWINDENT
] !=
2247 quiet
= (opts
[DTRACEOPT_QUIET
] !=
2254 if (i
== epd
->dtepd_nrecs
- 1)
2255 return (dt_set_errno(dtp
,
2259 buf
->dtbd_data
+ offs
, rec
) != 0)
2270 if (act
== DTRACEACT_TRACEMEM_DYNSIZE
&&
2271 rec
->dtrd_size
== sizeof (uint64_t)) {
2272 /* LINTED - alignment */
2273 tracememsize
= *((unsigned long long *)addr
);
2277 rval
= (*rfunc
)(&data
, rec
, arg
);
2279 if (rval
== DTRACE_CONSUME_NEXT
)
2282 if (rval
== DTRACE_CONSUME_ABORT
)
2283 return (dt_set_errno(dtp
, EDT_DIRABORT
));
2285 if (rval
!= DTRACE_CONSUME_THIS
)
2286 return (dt_set_errno(dtp
, EDT_BADRVAL
));
2288 if (act
== DTRACEACT_STACK
) {
2289 int depth
= rec
->dtrd_arg
;
2291 if (dt_print_stack(dtp
, fp
, NULL
, addr
, depth
,
2292 rec
->dtrd_size
/ depth
) < 0)
2297 if (act
== DTRACEACT_USTACK
||
2298 act
== DTRACEACT_JSTACK
) {
2299 if (dt_print_ustack(dtp
, fp
, NULL
,
2300 addr
, rec
->dtrd_arg
) < 0)
2305 if (act
== DTRACEACT_SYM
) {
2306 if (dt_print_sym(dtp
, fp
, NULL
, addr
) < 0)
2311 if (act
== DTRACEACT_MOD
) {
2312 if (dt_print_mod(dtp
, fp
, NULL
, addr
) < 0)
2317 if (act
== DTRACEACT_USYM
|| act
== DTRACEACT_UADDR
) {
2318 if (dt_print_usym(dtp
, fp
, addr
, act
) < 0)
2323 if (act
== DTRACEACT_UMOD
) {
2324 if (dt_print_umod(dtp
, fp
, NULL
, addr
) < 0)
2329 if (DTRACEACT_ISPRINTFLIKE(act
)) {
2331 int (*func
)(dtrace_hdl_t
*, FILE *, void *,
2332 const dtrace_probedata_t
*,
2333 const dtrace_recdesc_t
*, uint_t
,
2334 const void *buf
, size_t);
2336 if ((fmtdata
= dt_format_lookup(dtp
,
2337 rec
->dtrd_format
)) == NULL
)
2341 case DTRACEACT_PRINTF
:
2342 func
= dtrace_fprintf
;
2344 case DTRACEACT_PRINTA
:
2345 func
= dtrace_fprinta
;
2347 case DTRACEACT_SYSTEM
:
2348 func
= dtrace_system
;
2350 case DTRACEACT_FREOPEN
:
2351 func
= dtrace_freopen
;
2355 n
= (*func
)(dtp
, fp
, fmtdata
, &data
,
2356 rec
, epd
->dtepd_nrecs
- i
,
2357 (uchar_t
*)buf
->dtbd_data
+ offs
,
2358 buf
->dtbd_size
- offs
);
2361 return (-1); /* errno is set for us */
2369 * If this is a DIF expression, and the record has a
2370 * format set, this indicates we have a CTF type name
2371 * associated with the data and we should try to print
2374 if (act
== DTRACEACT_DIFEXPR
) {
2375 const char *strdata
= dt_strdata_lookup(dtp
,
2377 if (strdata
!= NULL
) {
2378 n
= dtrace_print(dtp
, fp
, strdata
,
2379 addr
, rec
->dtrd_size
);
2382 * dtrace_print() will return -1 on
2383 * error, or return the number of bytes
2384 * consumed. It will return 0 if the
2385 * type couldn't be determined, and we
2386 * should fall through to the normal
2398 if (act
== DTRACEACT_PRINTA
) {
2399 dt_print_aggdata_t pd
;
2400 dtrace_aggvarid_t
*aggvars
;
2401 int j
, naggvars
= 0;
2402 size_t size
= ((epd
->dtepd_nrecs
- i
) *
2403 sizeof (dtrace_aggvarid_t
));
2405 if ((aggvars
= dt_alloc(dtp
, size
)) == NULL
)
2409 * This might be a printa() with multiple
2410 * aggregation variables. We need to scan
2411 * forward through the records until we find
2412 * a record from a different statement.
2414 for (j
= i
; j
< epd
->dtepd_nrecs
; j
++) {
2415 dtrace_recdesc_t
*nrec
;
2418 nrec
= &epd
->dtepd_rec
[j
];
2420 if (nrec
->dtrd_uarg
!= rec
->dtrd_uarg
)
2423 if (nrec
->dtrd_action
!= act
) {
2424 return (dt_set_errno(dtp
,
2428 naddr
= buf
->dtbd_data
+ offs
+
2431 aggvars
[naggvars
++] =
2432 /* LINTED - alignment */
2433 *((dtrace_aggvarid_t
*)naddr
);
2437 bzero(&pd
, sizeof (pd
));
2441 assert(naggvars
>= 1);
2443 if (naggvars
== 1) {
2444 pd
.dtpa_id
= aggvars
[0];
2445 dt_free(dtp
, aggvars
);
2447 if (dt_printf(dtp
, fp
, "\n") < 0 ||
2448 dtrace_aggregate_walk_sorted(dtp
,
2449 dt_print_agg
, &pd
) < 0)
2454 if (dt_printf(dtp
, fp
, "\n") < 0 ||
2455 dtrace_aggregate_walk_joined(dtp
, aggvars
,
2456 naggvars
, dt_print_aggs
, &pd
) < 0) {
2457 dt_free(dtp
, aggvars
);
2461 dt_free(dtp
, aggvars
);
2465 if (act
== DTRACEACT_TRACEMEM
) {
2466 if (tracememsize
== 0 ||
2467 tracememsize
> rec
->dtrd_size
) {
2468 tracememsize
= rec
->dtrd_size
;
2471 n
= dt_print_bytes(dtp
, fp
, addr
,
2472 tracememsize
, -33, quiet
, 1);
2482 switch (rec
->dtrd_size
) {
2483 case sizeof (uint64_t):
2484 n
= dt_printf(dtp
, fp
,
2485 quiet
? "%lld" : " %16lld",
2486 /* LINTED - alignment */
2487 *((unsigned long long *)addr
));
2489 case sizeof (uint32_t):
2490 n
= dt_printf(dtp
, fp
, quiet
? "%d" : " %8d",
2491 /* LINTED - alignment */
2492 *((uint32_t *)addr
));
2494 case sizeof (uint16_t):
2495 n
= dt_printf(dtp
, fp
, quiet
? "%d" : " %5d",
2496 /* LINTED - alignment */
2497 *((uint16_t *)addr
));
2499 case sizeof (uint8_t):
2500 n
= dt_printf(dtp
, fp
, quiet
? "%d" : " %3d",
2501 *((uint8_t *)addr
));
2504 n
= dt_print_bytes(dtp
, fp
, addr
,
2505 rec
->dtrd_size
, -33, quiet
, 0);
2510 return (-1); /* errno is set for us */
2513 if (dt_buffered_flush(dtp
, &data
, rec
, NULL
, 0) < 0)
2514 return (-1); /* errno is set for us */
2518 * Call the record callback with a NULL record to indicate
2519 * that we're done processing this EPID.
2521 rval
= (*rfunc
)(&data
, NULL
, arg
);
2523 offs
+= epd
->dtepd_size
;
2524 dtp
->dt_last_epid
= id
;
2526 buf
->dtbd_oldest
= offs
;
2531 dtp
->dt_flow
= data
.dtpda_flow
;
2532 dtp
->dt_indent
= data
.dtpda_indent
;
2533 dtp
->dt_prefix
= data
.dtpda_prefix
;
2535 if ((drops
= buf
->dtbd_drops
) == 0)
2539 * Explicitly zero the drops to prevent us from processing them again.
2541 buf
->dtbd_drops
= 0;
2543 return (dt_handle_cpudrop(dtp
, cpu
, DTRACEDROP_PRINCIPAL
, drops
));
2547 * Reduce memory usage by shrinking the buffer if it's no more than half full.
2548 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is
2549 * only 4-byte aligned.
2552 dt_realloc_buf(dtrace_hdl_t
*dtp
, dtrace_bufdesc_t
*buf
, int cursize
)
2554 uint64_t used
= buf
->dtbd_size
- buf
->dtbd_oldest
;
2555 if (used
< cursize
/ 2) {
2556 int misalign
= buf
->dtbd_oldest
& (sizeof (uint64_t) - 1);
2557 char *newdata
= dt_alloc(dtp
, used
+ misalign
);
2558 if (newdata
== NULL
)
2560 bzero(newdata
, misalign
);
2561 bcopy(buf
->dtbd_data
+ buf
->dtbd_oldest
,
2562 newdata
+ misalign
, used
);
2563 dt_free(dtp
, buf
->dtbd_data
);
2564 buf
->dtbd_oldest
= misalign
;
2565 buf
->dtbd_size
= used
+ misalign
;
2566 buf
->dtbd_data
= newdata
;
2571 * If the ring buffer has wrapped, the data is not in order. Rearrange it
2572 * so that it is. Note, we need to preserve the alignment of the data at
2573 * dtbd_oldest, which is only 4-byte aligned.
2576 dt_unring_buf(dtrace_hdl_t
*dtp
, dtrace_bufdesc_t
*buf
)
2579 char *newdata
, *ndp
;
2581 if (buf
->dtbd_oldest
== 0)
2584 misalign
= buf
->dtbd_oldest
& (sizeof (uint64_t) - 1);
2585 newdata
= ndp
= dt_alloc(dtp
, buf
->dtbd_size
+ misalign
);
2587 if (newdata
== NULL
)
2590 assert(0 == (buf
->dtbd_size
& (sizeof (uint64_t) - 1)));
2592 bzero(ndp
, misalign
);
2595 bcopy(buf
->dtbd_data
+ buf
->dtbd_oldest
, ndp
,
2596 buf
->dtbd_size
- buf
->dtbd_oldest
);
2597 ndp
+= buf
->dtbd_size
- buf
->dtbd_oldest
;
2599 bcopy(buf
->dtbd_data
, ndp
, buf
->dtbd_oldest
);
2601 dt_free(dtp
, buf
->dtbd_data
);
2602 buf
->dtbd_oldest
= 0;
2603 buf
->dtbd_data
= newdata
;
2604 buf
->dtbd_size
+= misalign
;
2610 dt_put_buf(dtrace_hdl_t
*dtp
, dtrace_bufdesc_t
*buf
)
2612 dt_free(dtp
, buf
->dtbd_data
);
2617 * Returns 0 on success, in which case *cbp will be filled in if we retrieved
2618 * data, or NULL if there is no data for this CPU.
2619 * Returns -1 on failure and sets dt_errno.
2622 dt_get_buf(dtrace_hdl_t
*dtp
, int cpu
, dtrace_bufdesc_t
**bufp
)
2624 dtrace_optval_t size
;
2625 dtrace_bufdesc_t
*buf
= dt_zalloc(dtp
, sizeof (*buf
));
2631 (void) dtrace_getopt(dtp
, "bufsize", &size
);
2632 buf
->dtbd_data
= dt_alloc(dtp
, size
);
2633 if (buf
->dtbd_data
== NULL
) {
2637 buf
->dtbd_size
= size
;
2638 buf
->dtbd_cpu
= cpu
;
2640 if (dt_ioctl(dtp
, DTRACEIOC_BUFSNAP
, buf
) == -1) {
2641 dt_put_buf(dtp
, buf
);
2643 * If we failed with ENOENT, it may be because the
2644 * CPU was unconfigured -- this is okay. Any other
2645 * error, however, is unexpected.
2647 if (errno
== ENOENT
) {
2652 return (dt_set_errno(dtp
, errno
));
2655 error
= dt_unring_buf(dtp
, buf
);
2657 dt_put_buf(dtp
, buf
);
2660 dt_realloc_buf(dtp
, buf
, size
);
2666 typedef struct dt_begin
{
2667 dtrace_consume_probe_f
*dtbgn_probefunc
;
2668 dtrace_consume_rec_f
*dtbgn_recfunc
;
2670 dtrace_handle_err_f
*dtbgn_errhdlr
;
2672 int dtbgn_beginonly
;
2676 dt_consume_begin_probe(const dtrace_probedata_t
*data
, void *arg
)
2678 dt_begin_t
*begin
= arg
;
2679 dtrace_probedesc_t
*pd
= data
->dtpda_pdesc
;
2681 int r1
= (strcmp(pd
->dtpd_provider
, "dtrace") == 0);
2682 int r2
= (strcmp(pd
->dtpd_name
, "BEGIN") == 0);
2684 if (begin
->dtbgn_beginonly
) {
2686 return (DTRACE_CONSUME_NEXT
);
2689 return (DTRACE_CONSUME_NEXT
);
2693 * We have a record that we're interested in. Now call the underlying
2696 return (begin
->dtbgn_probefunc(data
, begin
->dtbgn_arg
));
2700 dt_consume_begin_record(const dtrace_probedata_t
*data
,
2701 const dtrace_recdesc_t
*rec
, void *arg
)
2703 dt_begin_t
*begin
= arg
;
2705 return (begin
->dtbgn_recfunc(data
, rec
, begin
->dtbgn_arg
));
2709 dt_consume_begin_error(const dtrace_errdata_t
*data
, void *arg
)
2711 dt_begin_t
*begin
= (dt_begin_t
*)arg
;
2712 dtrace_probedesc_t
*pd
= data
->dteda_pdesc
;
2714 int r1
= (strcmp(pd
->dtpd_provider
, "dtrace") == 0);
2715 int r2
= (strcmp(pd
->dtpd_name
, "BEGIN") == 0);
2717 if (begin
->dtbgn_beginonly
) {
2719 return (DTRACE_HANDLE_OK
);
2722 return (DTRACE_HANDLE_OK
);
2725 return (begin
->dtbgn_errhdlr(data
, begin
->dtbgn_errarg
));
2729 dt_consume_begin(dtrace_hdl_t
*dtp
, FILE *fp
,
2730 dtrace_consume_probe_f
*pf
, dtrace_consume_rec_f
*rf
, void *arg
)
2733 * There's this idea that the BEGIN probe should be processed before
2734 * everything else, and that the END probe should be processed after
2735 * anything else. In the common case, this is pretty easy to deal
2736 * with. However, a situation may arise where the BEGIN enabling and
2737 * END enabling are on the same CPU, and some enabling in the middle
2738 * occurred on a different CPU. To deal with this (blech!) we need to
2739 * consume the BEGIN buffer up until the end of the BEGIN probe, and
2740 * then set it aside. We will then process every other CPU, and then
2741 * we'll return to the BEGIN CPU and process the rest of the data
2742 * (which will inevitably include the END probe, if any). Making this
2743 * even more complicated (!) is the library's ERROR enabling. Because
2744 * this enabling is processed before we even get into the consume call
2745 * back, any ERROR firing would result in the library's ERROR enabling
2746 * being processed twice -- once in our first pass (for BEGIN probes),
2747 * and again in our second pass (for everything but BEGIN probes). To
2748 * deal with this, we interpose on the ERROR handler to assure that we
2749 * only process ERROR enablings induced by BEGIN enablings in the
2750 * first pass, and that we only process ERROR enablings _not_ induced
2751 * by BEGIN enablings in the second pass.
2755 processorid_t cpu
= dtp
->dt_beganon
;
2757 static int max_ncpus
;
2758 dtrace_bufdesc_t
*buf
;
2760 dtp
->dt_beganon
= -1;
2762 if (dt_get_buf(dtp
, cpu
, &buf
) != 0)
2767 if (!dtp
->dt_stopped
|| buf
->dtbd_cpu
!= dtp
->dt_endedon
) {
2769 * This is the simple case. We're either not stopped, or if
2770 * we are, we actually processed any END probes on another
2771 * CPU. We can simply consume this buffer and return.
2773 rval
= dt_consume_cpu(dtp
, fp
, cpu
, buf
, B_FALSE
,
2775 dt_put_buf(dtp
, buf
);
2779 begin
.dtbgn_probefunc
= pf
;
2780 begin
.dtbgn_recfunc
= rf
;
2781 begin
.dtbgn_arg
= arg
;
2782 begin
.dtbgn_beginonly
= 1;
2785 * We need to interpose on the ERROR handler to be sure that we
2786 * only process ERRORs induced by BEGIN.
2788 begin
.dtbgn_errhdlr
= dtp
->dt_errhdlr
;
2789 begin
.dtbgn_errarg
= dtp
->dt_errarg
;
2790 dtp
->dt_errhdlr
= dt_consume_begin_error
;
2791 dtp
->dt_errarg
= &begin
;
2793 rval
= dt_consume_cpu(dtp
, fp
, cpu
, buf
, B_FALSE
,
2794 dt_consume_begin_probe
, dt_consume_begin_record
, &begin
);
2796 dtp
->dt_errhdlr
= begin
.dtbgn_errhdlr
;
2797 dtp
->dt_errarg
= begin
.dtbgn_errarg
;
2800 dt_put_buf(dtp
, buf
);
2805 max_ncpus
= dt_sysconf(dtp
, _SC_CPUID_MAX
) + 1;
2807 for (i
= 0; i
< max_ncpus
; i
++) {
2808 dtrace_bufdesc_t
*nbuf
;
2812 if (dt_get_buf(dtp
, i
, &nbuf
) != 0) {
2813 dt_put_buf(dtp
, buf
);
2819 rval
= dt_consume_cpu(dtp
, fp
, i
, nbuf
, B_FALSE
,
2821 dt_put_buf(dtp
, nbuf
);
2823 dt_put_buf(dtp
, buf
);
2829 * Okay -- we're done with the other buffers. Now we want to
2830 * reconsume the first buffer -- but this time we're looking for
2831 * everything _but_ BEGIN. And of course, in order to only consume
2832 * those ERRORs _not_ associated with BEGIN, we need to reinstall our
2833 * ERROR interposition function...
2835 begin
.dtbgn_beginonly
= 0;
2837 assert(begin
.dtbgn_errhdlr
== dtp
->dt_errhdlr
);
2838 assert(begin
.dtbgn_errarg
== dtp
->dt_errarg
);
2839 dtp
->dt_errhdlr
= dt_consume_begin_error
;
2840 dtp
->dt_errarg
= &begin
;
2842 rval
= dt_consume_cpu(dtp
, fp
, cpu
, buf
, B_FALSE
,
2843 dt_consume_begin_probe
, dt_consume_begin_record
, &begin
);
2845 dtp
->dt_errhdlr
= begin
.dtbgn_errhdlr
;
2846 dtp
->dt_errarg
= begin
.dtbgn_errarg
;
2853 dt_buf_oldest(void *elem
, void *arg
)
2855 dtrace_bufdesc_t
*buf
= elem
;
2856 size_t offs
= buf
->dtbd_oldest
;
2858 while (offs
< buf
->dtbd_size
) {
2859 dtrace_rechdr_t
*dtrh
=
2860 /* LINTED - alignment */
2861 (dtrace_rechdr_t
*)(buf
->dtbd_data
+ offs
);
2862 if (dtrh
->dtrh_epid
== DTRACE_EPIDNONE
) {
2863 offs
+= sizeof (dtrace_epid_t
);
2865 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh
));
2869 /* There are no records left; use the time the buffer was retrieved. */
2870 return (buf
->dtbd_timestamp
);
2874 dtrace_consume(dtrace_hdl_t
*dtp
, FILE *fp
,
2875 dtrace_consume_probe_f
*pf
, dtrace_consume_rec_f
*rf
, void *arg
)
2877 dtrace_optval_t size
;
2878 static int max_ncpus
;
2880 dtrace_optval_t interval
= dtp
->dt_options
[DTRACEOPT_SWITCHRATE
];
2881 hrtime_t now
= gethrtime();
2883 if (dtp
->dt_lastswitch
!= 0) {
2884 if (now
- dtp
->dt_lastswitch
< interval
)
2887 dtp
->dt_lastswitch
+= interval
;
2889 dtp
->dt_lastswitch
= now
;
2892 if (!dtp
->dt_active
)
2893 return (dt_set_errno(dtp
, EINVAL
));
2896 max_ncpus
= dt_sysconf(dtp
, _SC_CPUID_MAX
) + 1;
2899 pf
= (dtrace_consume_probe_f
*)dt_nullprobe
;
2902 rf
= (dtrace_consume_rec_f
*)dt_nullrec
;
2904 if (dtp
->dt_options
[DTRACEOPT_TEMPORAL
] == DTRACEOPT_UNSET
) {
2906 * The output will not be in the order it was traced. Rather,
2907 * we will consume all of the data from each CPU's buffer in
2908 * turn. We apply special handling for the records from BEGIN
2909 * and END probes so that they are consumed first and last,
2912 * If we have just begun, we want to first process the CPU that
2913 * executed the BEGIN probe (if any).
2915 if (dtp
->dt_active
&& dtp
->dt_beganon
!= -1 &&
2916 (rval
= dt_consume_begin(dtp
, fp
, pf
, rf
, arg
)) != 0)
2919 for (i
= 0; i
< max_ncpus
; i
++) {
2920 dtrace_bufdesc_t
*buf
;
2923 * If we have stopped, we want to process the CPU on
2924 * which the END probe was processed only _after_ we
2925 * have processed everything else.
2927 if (dtp
->dt_stopped
&& (i
== dtp
->dt_endedon
))
2930 if (dt_get_buf(dtp
, i
, &buf
) != 0)
2937 dtp
->dt_prefix
= NULL
;
2938 rval
= dt_consume_cpu(dtp
, fp
, i
,
2939 buf
, B_FALSE
, pf
, rf
, arg
);
2940 dt_put_buf(dtp
, buf
);
2944 if (dtp
->dt_stopped
) {
2945 dtrace_bufdesc_t
*buf
;
2947 if (dt_get_buf(dtp
, dtp
->dt_endedon
, &buf
) != 0)
2952 rval
= dt_consume_cpu(dtp
, fp
, dtp
->dt_endedon
,
2953 buf
, B_FALSE
, pf
, rf
, arg
);
2954 dt_put_buf(dtp
, buf
);
2959 * The output will be in the order it was traced (or for
2960 * speculations, when it was committed). We retrieve a buffer
2961 * from each CPU and put it into a priority queue, which sorts
2962 * based on the first entry in the buffer. This is sufficient
2963 * because entries within a buffer are already sorted.
2965 * We then consume records one at a time, always consuming the
2966 * oldest record, as determined by the priority queue. When
2967 * we reach the end of the time covered by these buffers,
2968 * we need to stop and retrieve more records on the next pass.
2969 * The kernel tells us the time covered by each buffer, in
2970 * dtbd_timestamp. The first buffer's timestamp tells us the
2971 * time covered by all buffers, as subsequently retrieved
2972 * buffers will cover to a more recent time.
2975 uint64_t *drops
= alloca(max_ncpus
* sizeof (uint64_t));
2976 uint64_t first_timestamp
= 0;
2978 dtrace_bufdesc_t
*buf
;
2980 bzero(drops
, max_ncpus
* sizeof (uint64_t));
2982 if (dtp
->dt_bufq
== NULL
) {
2983 dtp
->dt_bufq
= dt_pq_init(dtp
, max_ncpus
* 2,
2984 dt_buf_oldest
, NULL
);
2985 if (dtp
->dt_bufq
== NULL
) /* ENOMEM */
2989 /* Retrieve data from each CPU. */
2990 (void) dtrace_getopt(dtp
, "bufsize", &size
);
2991 for (i
= 0; i
< max_ncpus
; i
++) {
2992 dtrace_bufdesc_t
*buf
;
2994 if (dt_get_buf(dtp
, i
, &buf
) != 0)
2997 if (first_timestamp
== 0)
2998 first_timestamp
= buf
->dtbd_timestamp
;
2999 assert(buf
->dtbd_timestamp
>= first_timestamp
);
3001 dt_pq_insert(dtp
->dt_bufq
, buf
);
3002 drops
[i
] = buf
->dtbd_drops
;
3003 buf
->dtbd_drops
= 0;
3007 /* Consume records. */
3009 dtrace_bufdesc_t
*buf
= dt_pq_pop(dtp
->dt_bufq
);
3015 timestamp
= dt_buf_oldest(buf
, dtp
);
3016 assert(timestamp
>= dtp
->dt_last_timestamp
);
3017 dtp
->dt_last_timestamp
= timestamp
;
3019 if (timestamp
== buf
->dtbd_timestamp
) {
3021 * We've reached the end of the time covered
3022 * by this buffer. If this is the oldest
3023 * buffer, we must do another pass
3024 * to retrieve more data.
3026 dt_put_buf(dtp
, buf
);
3027 if (timestamp
== first_timestamp
&&
3033 if ((rval
= dt_consume_cpu(dtp
, fp
,
3034 buf
->dtbd_cpu
, buf
, B_TRUE
, pf
, rf
, arg
)) != 0)
3036 dt_pq_insert(dtp
->dt_bufq
, buf
);
3039 /* Consume drops. */
3040 for (i
= 0; i
< max_ncpus
; i
++) {
3041 if (drops
[i
] != 0) {
3042 int error
= dt_handle_cpudrop(dtp
, i
,
3043 DTRACEDROP_PRINCIPAL
, drops
[i
]);
3050 * Reduce memory usage by re-allocating smaller buffers
3051 * for the "remnants".
3053 while (buf
= dt_pq_walk(dtp
->dt_bufq
, &cookie
))
3054 dt_realloc_buf(dtp
, buf
, buf
->dtbd_size
);