1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 * Copyright (c) 2012 by Delphix. All rights reserved.
29 */
30
31 #include <stdlib.h>
32 #include <strings.h>
33 #include <errno.h>
34 #include <unistd.h>
35 #include <limits.h>
36 #include <assert.h>
37 #include <ctype.h>
38 #include <alloca.h>
39 #include <dt_impl.h>
40 #include <dt_pq.h>
41
42 #define DT_MASK_LO 0x00000000FFFFFFFFULL
43
44 /*
45 * We declare this here because (1) we need it and (2) we want to avoid a
46 * dependency on libm in libdtrace.
47 */
48 static long double
49 dt_fabsl(long double x)
50 {
51 if (x < 0)
52 return (-x);
53
54 return (x);
55 }
56
57 static int
58 dt_ndigits(long long val)
59 {
60 int rval = 1;
61 long long cmp = 10;
62
63 if (val < 0) {
64 val = val == INT64_MIN ? INT64_MAX : -val;
65 rval++;
66 }
67
68 while (val > cmp && cmp > 0) {
69 rval++;
70 cmp *= 10;
71 }
72
73 return (rval < 4 ? 4 : rval);
74 }
75
76 /*
77 * 128-bit arithmetic functions needed to support the stddev() aggregating
78 * action.
79 */
80 static int
81 dt_gt_128(uint64_t *a, uint64_t *b)
82 {
83 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0]));
84 }
85
86 static int
87 dt_ge_128(uint64_t *a, uint64_t *b)
88 {
89 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0]));
90 }
91
92 static int
93 dt_le_128(uint64_t *a, uint64_t *b)
94 {
95 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0]));
96 }
97
98 /*
99 * Shift the 128-bit value in a by b. If b is positive, shift left.
100 * If b is negative, shift right.
101 */
102 static void
103 dt_shift_128(uint64_t *a, int b)
104 {
105 uint64_t mask;
106
107 if (b == 0)
108 return;
109
110 if (b < 0) {
111 b = -b;
112 if (b >= 64) {
113 a[0] = a[1] >> (b - 64);
114 a[1] = 0;
115 } else {
116 a[0] >>= b;
117 mask = 1LL << (64 - b);
118 mask -= 1;
119 a[0] |= ((a[1] & mask) << (64 - b));
120 a[1] >>= b;
121 }
122 } else {
123 if (b >= 64) {
124 a[1] = a[0] << (b - 64);
125 a[0] = 0;
126 } else {
127 a[1] <<= b;
128 mask = a[0] >> (64 - b);
129 a[1] |= mask;
130 a[0] <<= b;
131 }
132 }
133 }
134
135 static int
136 dt_nbits_128(uint64_t *a)
137 {
138 int nbits = 0;
139 uint64_t tmp[2];
140 uint64_t zero[2] = { 0, 0 };
141
142 tmp[0] = a[0];
143 tmp[1] = a[1];
144
145 dt_shift_128(tmp, -1);
146 while (dt_gt_128(tmp, zero)) {
147 dt_shift_128(tmp, -1);
148 nbits++;
149 }
150
151 return (nbits);
152 }
153
154 static void
155 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference)
156 {
157 uint64_t result[2];
158
159 result[0] = minuend[0] - subtrahend[0];
160 result[1] = minuend[1] - subtrahend[1] -
161 (minuend[0] < subtrahend[0] ? 1 : 0);
162
163 difference[0] = result[0];
164 difference[1] = result[1];
165 }
166
167 static void
168 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
169 {
170 uint64_t result[2];
171
172 result[0] = addend1[0] + addend2[0];
173 result[1] = addend1[1] + addend2[1] +
174 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
175
176 sum[0] = result[0];
177 sum[1] = result[1];
178 }
179
180 /*
181 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
182 * use native multiplication on those, and then re-combine into the
183 * resulting 128-bit value.
184 *
185 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
186 * hi1 * hi2 << 64 +
187 * hi1 * lo2 << 32 +
188 * hi2 * lo1 << 32 +
189 * lo1 * lo2
190 */
191 static void
192 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
193 {
194 uint64_t hi1, hi2, lo1, lo2;
195 uint64_t tmp[2];
196
197 hi1 = factor1 >> 32;
198 hi2 = factor2 >> 32;
199
200 lo1 = factor1 & DT_MASK_LO;
201 lo2 = factor2 & DT_MASK_LO;
202
203 product[0] = lo1 * lo2;
204 product[1] = hi1 * hi2;
205
206 tmp[0] = hi1 * lo2;
207 tmp[1] = 0;
208 dt_shift_128(tmp, 32);
209 dt_add_128(product, tmp, product);
210
211 tmp[0] = hi2 * lo1;
212 tmp[1] = 0;
213 dt_shift_128(tmp, 32);
214 dt_add_128(product, tmp, product);
215 }
216
217 /*
218 * This is long-hand division.
219 *
220 * We initialize subtrahend by shifting divisor left as far as possible. We
221 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we
222 * subtract and set the appropriate bit in the result. We then shift
223 * subtrahend right by one bit for the next comparison.
224 */
225 static void
226 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient)
227 {
228 uint64_t result[2] = { 0, 0 };
229 uint64_t remainder[2];
230 uint64_t subtrahend[2];
231 uint64_t divisor_128[2];
232 uint64_t mask[2] = { 1, 0 };
233 int log = 0;
234
235 assert(divisor != 0);
236
237 divisor_128[0] = divisor;
238 divisor_128[1] = 0;
239
240 remainder[0] = dividend[0];
241 remainder[1] = dividend[1];
242
243 subtrahend[0] = divisor;
244 subtrahend[1] = 0;
245
246 while (divisor > 0) {
247 log++;
248 divisor >>= 1;
249 }
250
251 dt_shift_128(subtrahend, 128 - log);
252 dt_shift_128(mask, 128 - log);
253
254 while (dt_ge_128(remainder, divisor_128)) {
255 if (dt_ge_128(remainder, subtrahend)) {
256 dt_subtract_128(remainder, subtrahend, remainder);
257 result[0] |= mask[0];
258 result[1] |= mask[1];
259 }
260
261 dt_shift_128(subtrahend, -1);
262 dt_shift_128(mask, -1);
263 }
264
265 quotient[0] = result[0];
266 quotient[1] = result[1];
267 }
268
269 /*
270 * This is the long-hand method of calculating a square root.
271 * The algorithm is as follows:
272 *
273 * 1. Group the digits by 2 from the right.
274 * 2. Over the leftmost group, find the largest single-digit number
275 * whose square is less than that group.
276 * 3. Subtract the result of the previous step (2 or 4, depending) and
277 * bring down the next two-digit group.
278 * 4. For the result R we have so far, find the largest single-digit number
279 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3.
280 * (Note that this is doubling R and performing a decimal left-shift by 1
281 * and searching for the appropriate decimal to fill the one's place.)
282 * The value x is the next digit in the square root.
283 * Repeat steps 3 and 4 until the desired precision is reached. (We're
284 * dealing with integers, so the above is sufficient.)
285 *
286 * In decimal, the square root of 582,734 would be calculated as so:
287 *
288 * __7__6__3
289 * | 58 27 34
290 * -49 (7^2 == 49 => 7 is the first digit in the square root)
291 * --
292 * 9 27 (Subtract and bring down the next group.)
293 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in
294 * ----- the square root)
295 * 51 34 (Subtract and bring down the next group.)
296 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in
297 * ----- the square root)
298 * 5 65 (remainder)
299 *
300 * The above algorithm applies similarly in binary, but note that the
301 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a
302 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the
303 * preceding difference?
304 *
305 * In binary, the square root of 11011011 would be calculated as so:
306 *
307 * __1__1__1__0
308 * | 11 01 10 11
309 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1)
310 * --
311 * 10 01 10 11
312 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1)
313 * -----
314 * 1 00 10 11
315 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1)
316 * -------
317 * 1 01 11
318 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0)
319 *
320 */
321 static uint64_t
322 dt_sqrt_128(uint64_t *square)
323 {
324 uint64_t result[2] = { 0, 0 };
325 uint64_t diff[2] = { 0, 0 };
326 uint64_t one[2] = { 1, 0 };
327 uint64_t next_pair[2];
328 uint64_t next_try[2];
329 uint64_t bit_pairs, pair_shift;
330 int i;
331
332 bit_pairs = dt_nbits_128(square) / 2;
333 pair_shift = bit_pairs * 2;
334
335 for (i = 0; i <= bit_pairs; i++) {
336 /*
337 * Bring down the next pair of bits.
338 */
339 next_pair[0] = square[0];
340 next_pair[1] = square[1];
341 dt_shift_128(next_pair, -pair_shift);
342 next_pair[0] &= 0x3;
343 next_pair[1] = 0;
344
345 dt_shift_128(diff, 2);
346 dt_add_128(diff, next_pair, diff);
347
348 /*
349 * next_try = R << 2 + 1
350 */
351 next_try[0] = result[0];
352 next_try[1] = result[1];
353 dt_shift_128(next_try, 2);
354 dt_add_128(next_try, one, next_try);
355
356 if (dt_le_128(next_try, diff)) {
357 dt_subtract_128(diff, next_try, diff);
358 dt_shift_128(result, 1);
359 dt_add_128(result, one, result);
360 } else {
361 dt_shift_128(result, 1);
362 }
363
364 pair_shift -= 2;
365 }
366
367 assert(result[1] == 0);
368
369 return (result[0]);
370 }
371
372 uint64_t
373 dt_stddev(uint64_t *data, uint64_t normal)
374 {
375 uint64_t avg_of_squares[2];
376 uint64_t square_of_avg[2];
377 int64_t norm_avg;
378 uint64_t diff[2];
379
380 /*
381 * The standard approximation for standard deviation is
382 * sqrt(average(x**2) - average(x)**2), i.e. the square root
383 * of the average of the squares minus the square of the average.
384 */
385 dt_divide_128(data + 2, normal, avg_of_squares);
386 dt_divide_128(avg_of_squares, data[0], avg_of_squares);
387
388 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0];
389
390 if (norm_avg < 0)
391 norm_avg = -norm_avg;
392
393 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg);
394
395 dt_subtract_128(avg_of_squares, square_of_avg, diff);
396
397 return (dt_sqrt_128(diff));
398 }
399
400 static int
401 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last,
402 dtrace_bufdesc_t *buf, size_t offs)
403 {
404 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd;
405 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd;
406 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub;
407 dtrace_flowkind_t flow = DTRACEFLOW_NONE;
408 const char *str = NULL;
409 static const char *e_str[2] = { " -> ", " => " };
410 static const char *r_str[2] = { " <- ", " <= " };
411 static const char *ent = "entry", *ret = "return";
412 static int entlen = 0, retlen = 0;
413 dtrace_epid_t next, id = epd->dtepd_epid;
414 int rval;
415
416 if (entlen == 0) {
417 assert(retlen == 0);
418 entlen = strlen(ent);
419 retlen = strlen(ret);
420 }
421
422 /*
423 * If the name of the probe is "entry" or ends with "-entry", we
424 * treat it as an entry; if it is "return" or ends with "-return",
425 * we treat it as a return. (This allows application-provided probes
426 * like "method-entry" or "function-entry" to participate in flow
427 * indentation -- without accidentally misinterpreting popular probe
428 * names like "carpentry", "gentry" or "Coventry".)
429 */
430 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' &&
431 (sub == n || sub[-1] == '-')) {
432 flow = DTRACEFLOW_ENTRY;
433 str = e_str[strcmp(p, "syscall") == 0];
434 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' &&
435 (sub == n || sub[-1] == '-')) {
436 flow = DTRACEFLOW_RETURN;
437 str = r_str[strcmp(p, "syscall") == 0];
438 }
439
440 /*
441 * If we're going to indent this, we need to check the ID of our last
442 * call. If we're looking at the same probe ID but a different EPID,
443 * we _don't_ want to indent. (Yes, there are some minor holes in
444 * this scheme -- it's a heuristic.)
445 */
446 if (flow == DTRACEFLOW_ENTRY) {
447 if ((last != DTRACE_EPIDNONE && id != last &&
448 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id))
449 flow = DTRACEFLOW_NONE;
450 }
451
452 /*
453 * If we're going to unindent this, it's more difficult to see if
454 * we don't actually want to unindent it -- we need to look at the
455 * _next_ EPID.
456 */
457 if (flow == DTRACEFLOW_RETURN) {
458 offs += epd->dtepd_size;
459
460 do {
461 if (offs >= buf->dtbd_size)
462 goto out;
463
464 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs);
465
466 if (next == DTRACE_EPIDNONE)
467 offs += sizeof (id);
468 } while (next == DTRACE_EPIDNONE);
469
470 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0)
471 return (rval);
472
473 if (next != id && npd->dtpd_id == pd->dtpd_id)
474 flow = DTRACEFLOW_NONE;
475 }
476
477 out:
478 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) {
479 data->dtpda_prefix = str;
480 } else {
481 data->dtpda_prefix = "| ";
482 }
483
484 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0)
485 data->dtpda_indent -= 2;
486
487 data->dtpda_flow = flow;
488
489 return (0);
490 }
491
492 static int
493 dt_nullprobe()
494 {
495 return (DTRACE_CONSUME_THIS);
496 }
497
498 static int
499 dt_nullrec()
500 {
501 return (DTRACE_CONSUME_NEXT);
502 }
503
504 static void
505 dt_quantize_total(dtrace_hdl_t *dtp, int64_t datum, long double *total)
506 {
507 long double val = dt_fabsl((long double)datum);
508
509 if (dtp->dt_options[DTRACEOPT_AGGZOOM] == DTRACEOPT_UNSET) {
510 *total += val;
511 return;
512 }
513
514 /*
515 * If we're zooming in on an aggregation, we want the height of the
516 * highest value to be approximately 95% of total bar height -- so we
517 * adjust up by the reciprocal of DTRACE_AGGZOOM_MAX when comparing to
518 * our highest value.
519 */
520 val *= 1 / DTRACE_AGGZOOM_MAX;
521
522 if (*total < val)
523 *total = val;
524 }
525
526 static int
527 dt_print_quanthdr(dtrace_hdl_t *dtp, FILE *fp, int width)
528 {
529 return (dt_printf(dtp, fp, "\n%*s %41s %-9s\n",
530 width ? width : 16, width ? "key" : "value",
531 "------------- Distribution -------------", "count"));
532 }
533
534 static int
535 dt_print_quanthdr_packed(dtrace_hdl_t *dtp, FILE *fp, int width,
536 const dtrace_aggdata_t *aggdata, dtrace_actkind_t action)
537 {
538 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin;
539 int minwidth, maxwidth, i;
540
541 assert(action == DTRACEAGG_QUANTIZE || action == DTRACEAGG_LQUANTIZE);
542
543 if (action == DTRACEAGG_QUANTIZE) {
544 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET)
545 min--;
546
547 if (max < DTRACE_QUANTIZE_NBUCKETS - 1)
548 max++;
549
550 minwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(min));
551 maxwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(max));
552 } else {
553 maxwidth = 8;
554 minwidth = maxwidth - 1;
555 max++;
556 }
557
558 if (dt_printf(dtp, fp, "\n%*s %*s .",
559 width, width > 0 ? "key" : "", minwidth, "min") < 0)
560 return (-1);
561
562 for (i = min; i <= max; i++) {
563 if (dt_printf(dtp, fp, "-") < 0)
564 return (-1);
565 }
566
567 return (dt_printf(dtp, fp, ". %*s | count\n", -maxwidth, "max"));
568 }
569
570 /*
571 * We use a subset of the Unicode Block Elements (U+2588 through U+258F,
572 * inclusive) to represent aggregations via UTF-8 -- which are expressed via
573 * 3-byte UTF-8 sequences.
574 */
575 #define DTRACE_AGGUTF8_FULL 0x2588
576 #define DTRACE_AGGUTF8_BASE 0x258f
577 #define DTRACE_AGGUTF8_LEVELS 8
578
579 #define DTRACE_AGGUTF8_BYTE0(val) (0xe0 | ((val) >> 12))
580 #define DTRACE_AGGUTF8_BYTE1(val) (0x80 | (((val) >> 6) & 0x3f))
581 #define DTRACE_AGGUTF8_BYTE2(val) (0x80 | ((val) & 0x3f))
582
583 static int
584 dt_print_quantline_utf8(dtrace_hdl_t *dtp, FILE *fp, int64_t val,
585 uint64_t normal, long double total)
586 {
587 uint_t len = 40, i, whole, partial;
588 long double f = (dt_fabsl((long double)val) * len) / total;
589 const char *spaces = " ";
590
591 whole = (uint_t)f;
592 partial = (uint_t)((f - (long double)(uint_t)f) *
593 (long double)DTRACE_AGGUTF8_LEVELS);
594
595 if (dt_printf(dtp, fp, "|") < 0)
596 return (-1);
597
598 for (i = 0; i < whole; i++) {
599 if (dt_printf(dtp, fp, "%c%c%c",
600 DTRACE_AGGUTF8_BYTE0(DTRACE_AGGUTF8_FULL),
601 DTRACE_AGGUTF8_BYTE1(DTRACE_AGGUTF8_FULL),
602 DTRACE_AGGUTF8_BYTE2(DTRACE_AGGUTF8_FULL)) < 0)
603 return (-1);
604 }
605
606 if (partial != 0) {
607 partial = DTRACE_AGGUTF8_BASE - (partial - 1);
608
609 if (dt_printf(dtp, fp, "%c%c%c",
610 DTRACE_AGGUTF8_BYTE0(partial),
611 DTRACE_AGGUTF8_BYTE1(partial),
612 DTRACE_AGGUTF8_BYTE2(partial)) < 0)
613 return (-1);
614
615 i++;
616 }
617
618 return (dt_printf(dtp, fp, "%s %-9lld\n", spaces + i,
619 (long long)val / normal));
620 }
621
622 static int
623 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val,
624 uint64_t normal, long double total, char positives, char negatives)
625 {
626 long double f;
627 uint_t depth, len = 40;
628
629 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@";
630 const char *spaces = " ";
631
632 assert(strlen(ats) == len && strlen(spaces) == len);
633 assert(!(total == 0 && (positives || negatives)));
634 assert(!(val < 0 && !negatives));
635 assert(!(val > 0 && !positives));
636 assert(!(val != 0 && total == 0));
637
638 if (!negatives) {
639 if (positives) {
640 if (dtp->dt_encoding == DT_ENCODING_UTF8) {
641 return (dt_print_quantline_utf8(dtp, fp, val,
642 normal, total));
643 }
644
645 f = (dt_fabsl((long double)val) * len) / total;
646 depth = (uint_t)(f + 0.5);
647 } else {
648 depth = 0;
649 }
650
651 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth,
652 spaces + depth, (long long)val / normal));
653 }
654
655 if (!positives) {
656 f = (dt_fabsl((long double)val) * len) / total;
657 depth = (uint_t)(f + 0.5);
658
659 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth,
660 ats + len - depth, (long long)val / normal));
661 }
662
663 /*
664 * If we're here, we have both positive and negative bucket values.
665 * To express this graphically, we're going to generate both positive
666 * and negative bars separated by a centerline. These bars are half
667 * the size of normal quantize()/lquantize() bars, so we divide the
668 * length in half before calculating the bar length.
669 */
670 len /= 2;
671 ats = &ats[len];
672 spaces = &spaces[len];
673
674 f = (dt_fabsl((long double)val) * len) / total;
675 depth = (uint_t)(f + 0.5);
676
677 if (val <= 0) {
678 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth,
679 ats + len - depth, len, "", (long long)val / normal));
680 } else {
681 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "",
682 ats + len - depth, spaces + depth,
683 (long long)val / normal));
684 }
685 }
686
687 /*
688 * As with UTF-8 printing of aggregations, we use a subset of the Unicode
689 * Block Elements (U+2581 through U+2588, inclusive) to represent our packed
690 * aggregation.
691 */
692 #define DTRACE_AGGPACK_BASE 0x2581
693 #define DTRACE_AGGPACK_LEVELS 8
694
695 static int
696 dt_print_packed(dtrace_hdl_t *dtp, FILE *fp,
697 long double datum, long double total)
698 {
699 static boolean_t utf8_checked = B_FALSE;
700 static boolean_t utf8;
701 char *ascii = "__xxxxXX";
702 char *neg = "vvvvVV";
703 unsigned int len;
704 long double val;
705
706 if (!utf8_checked) {
707 char *term;
708
709 /*
710 * We want to determine if we can reasonably emit UTF-8 for our
711 * packed aggregation. To do this, we will check for terminals
712 * that are known to be primitive to emit UTF-8 on these.
713 */
714 utf8_checked = B_TRUE;
715
716 if (dtp->dt_encoding == DT_ENCODING_ASCII)
717 utf8 = B_FALSE;
718 else if (dtp->dt_encoding == DT_ENCODING_UTF8)
719 utf8 = B_TRUE;
720 else if ((term = getenv("TERM")) != NULL &&
721 (strcmp(term, "sun") == 0 ||
722 strcmp(term, "sun-color") == 0) ||
723 strcmp(term, "dumb") == 0)
724 utf8 = B_FALSE;
725 else
726 utf8 = B_TRUE;
727 }
728
729 if (datum == 0)
730 return (dt_printf(dtp, fp, " "));
731
732 if (datum < 0) {
733 len = strlen(neg);
734 val = dt_fabsl(datum * (len - 1)) / total;
735 return (dt_printf(dtp, fp, "%c", neg[(uint_t)(val + 0.5)]));
736 }
737
738 if (utf8) {
739 int block = DTRACE_AGGPACK_BASE + (unsigned int)(((datum *
740 (DTRACE_AGGPACK_LEVELS - 1)) / total) + 0.5);
741
742 return (dt_printf(dtp, fp, "%c%c%c",
743 DTRACE_AGGUTF8_BYTE0(block),
744 DTRACE_AGGUTF8_BYTE1(block),
745 DTRACE_AGGUTF8_BYTE2(block)));
746 }
747
748 len = strlen(ascii);
749 val = (datum * (len - 1)) / total;
750 return (dt_printf(dtp, fp, "%c", ascii[(uint_t)(val + 0.5)]));
751 }
752
753 int
754 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
755 size_t size, uint64_t normal)
756 {
757 const int64_t *data = addr;
758 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1;
759 long double total = 0;
760 char positives = 0, negatives = 0;
761
762 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t))
763 return (dt_set_errno(dtp, EDT_DMISMATCH));
764
765 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0)
766 first_bin++;
767
768 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) {
769 /*
770 * There isn't any data. This is possible if the aggregation
771 * has been clear()'d or if negative increment values have been
772 * used. Regardless, we'll print the buckets around 0.
773 */
774 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1;
775 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1;
776 } else {
777 if (first_bin > 0)
778 first_bin--;
779
780 while (last_bin > 0 && data[last_bin] == 0)
781 last_bin--;
782
783 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1)
784 last_bin++;
785 }
786
787 for (i = first_bin; i <= last_bin; i++) {
788 positives |= (data[i] > 0);
789 negatives |= (data[i] < 0);
790 dt_quantize_total(dtp, data[i], &total);
791 }
792
793 if (dt_print_quanthdr(dtp, fp, 0) < 0)
794 return (-1);
795
796 for (i = first_bin; i <= last_bin; i++) {
797 if (dt_printf(dtp, fp, "%16lld ",
798 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0)
799 return (-1);
800
801 if (dt_print_quantline(dtp, fp, data[i], normal, total,
802 positives, negatives) < 0)
803 return (-1);
804 }
805
806 return (0);
807 }
808
809 int
810 dt_print_quantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
811 size_t size, const dtrace_aggdata_t *aggdata)
812 {
813 const int64_t *data = addr;
814 long double total = 0, count = 0;
815 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin, i;
816 int64_t minval, maxval;
817
818 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t))
819 return (dt_set_errno(dtp, EDT_DMISMATCH));
820
821 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET)
822 min--;
823
824 if (max < DTRACE_QUANTIZE_NBUCKETS - 1)
825 max++;
826
827 minval = DTRACE_QUANTIZE_BUCKETVAL(min);
828 maxval = DTRACE_QUANTIZE_BUCKETVAL(max);
829
830 if (dt_printf(dtp, fp, " %*lld :", dt_ndigits(minval),
831 (long long)minval) < 0)
832 return (-1);
833
834 for (i = min; i <= max; i++) {
835 dt_quantize_total(dtp, data[i], &total);
836 count += data[i];
837 }
838
839 for (i = min; i <= max; i++) {
840 if (dt_print_packed(dtp, fp, data[i], total) < 0)
841 return (-1);
842 }
843
844 if (dt_printf(dtp, fp, ": %*lld | %lld\n",
845 -dt_ndigits(maxval), (long long)maxval, (long long)count) < 0)
846 return (-1);
847
848 return (0);
849 }
850
851 int
852 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
853 size_t size, uint64_t normal)
854 {
855 const int64_t *data = addr;
856 int i, first_bin, last_bin, base;
857 uint64_t arg;
858 long double total = 0;
859 uint16_t step, levels;
860 char positives = 0, negatives = 0;
861
862 if (size < sizeof (uint64_t))
863 return (dt_set_errno(dtp, EDT_DMISMATCH));
864
865 arg = *data++;
866 size -= sizeof (uint64_t);
867
868 base = DTRACE_LQUANTIZE_BASE(arg);
869 step = DTRACE_LQUANTIZE_STEP(arg);
870 levels = DTRACE_LQUANTIZE_LEVELS(arg);
871
872 first_bin = 0;
873 last_bin = levels + 1;
874
875 if (size != sizeof (uint64_t) * (levels + 2))
876 return (dt_set_errno(dtp, EDT_DMISMATCH));
877
878 while (first_bin <= levels + 1 && data[first_bin] == 0)
879 first_bin++;
880
881 if (first_bin > levels + 1) {
882 first_bin = 0;
883 last_bin = 2;
884 } else {
885 if (first_bin > 0)
886 first_bin--;
887
888 while (last_bin > 0 && data[last_bin] == 0)
889 last_bin--;
890
891 if (last_bin < levels + 1)
892 last_bin++;
893 }
894
895 for (i = first_bin; i <= last_bin; i++) {
896 positives |= (data[i] > 0);
897 negatives |= (data[i] < 0);
898 dt_quantize_total(dtp, data[i], &total);
899 }
900
901 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value",
902 "------------- Distribution -------------", "count") < 0)
903 return (-1);
904
905 for (i = first_bin; i <= last_bin; i++) {
906 char c[32];
907 int err;
908
909 if (i == 0) {
910 (void) snprintf(c, sizeof (c), "< %d", base);
911 err = dt_printf(dtp, fp, "%16s ", c);
912 } else if (i == levels + 1) {
913 (void) snprintf(c, sizeof (c), ">= %d",
914 base + (levels * step));
915 err = dt_printf(dtp, fp, "%16s ", c);
916 } else {
917 err = dt_printf(dtp, fp, "%16d ",
918 base + (i - 1) * step);
919 }
920
921 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal,
922 total, positives, negatives) < 0)
923 return (-1);
924 }
925
926 return (0);
927 }
928
929 /*ARGSUSED*/
930 int
931 dt_print_lquantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
932 size_t size, const dtrace_aggdata_t *aggdata)
933 {
934 const int64_t *data = addr;
935 long double total = 0, count = 0;
936 int min, max, base, err;
937 uint64_t arg;
938 uint16_t step, levels;
939 char c[32];
940 unsigned int i;
941
942 if (size < sizeof (uint64_t))
943 return (dt_set_errno(dtp, EDT_DMISMATCH));
944
945 arg = *data++;
946 size -= sizeof (uint64_t);
947
948 base = DTRACE_LQUANTIZE_BASE(arg);
949 step = DTRACE_LQUANTIZE_STEP(arg);
950 levels = DTRACE_LQUANTIZE_LEVELS(arg);
951
952 if (size != sizeof (uint64_t) * (levels + 2))
953 return (dt_set_errno(dtp, EDT_DMISMATCH));
954
955 min = 0;
956 max = levels + 1;
957
958 if (min == 0) {
959 (void) snprintf(c, sizeof (c), "< %d", base);
960 err = dt_printf(dtp, fp, "%8s :", c);
961 } else {
962 err = dt_printf(dtp, fp, "%8d :", base + (min - 1) * step);
963 }
964
965 if (err < 0)
966 return (-1);
967
968 for (i = min; i <= max; i++) {
969 dt_quantize_total(dtp, data[i], &total);
970 count += data[i];
971 }
972
973 for (i = min; i <= max; i++) {
974 if (dt_print_packed(dtp, fp, data[i], total) < 0)
975 return (-1);
976 }
977
978 (void) snprintf(c, sizeof (c), ">= %d", base + (levels * step));
979 return (dt_printf(dtp, fp, ": %-8s | %lld\n", c, (long long)count));
980 }
981
982 int
983 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
984 size_t size, uint64_t normal)
985 {
986 int i, first_bin, last_bin, bin = 1, order, levels;
987 uint16_t factor, low, high, nsteps;
988 const int64_t *data = addr;
989 int64_t value = 1, next, step;
990 char positives = 0, negatives = 0;
991 long double total = 0;
992 uint64_t arg;
993 char c[32];
994
995 if (size < sizeof (uint64_t))
996 return (dt_set_errno(dtp, EDT_DMISMATCH));
997
998 arg = *data++;
999 size -= sizeof (uint64_t);
1000
1001 factor = DTRACE_LLQUANTIZE_FACTOR(arg);
1002 low = DTRACE_LLQUANTIZE_LOW(arg);
1003 high = DTRACE_LLQUANTIZE_HIGH(arg);
1004 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
1005
1006 /*
1007 * We don't expect to be handed invalid llquantize() parameters here,
1008 * but sanity check them (to a degree) nonetheless.
1009 */
1010 if (size > INT32_MAX || factor < 2 || low >= high ||
1011 nsteps == 0 || factor > nsteps)
1012 return (dt_set_errno(dtp, EDT_DMISMATCH));
1013
1014 levels = (int)size / sizeof (uint64_t);
1015
1016 first_bin = 0;
1017 last_bin = levels - 1;
1018
1019 while (first_bin < levels && data[first_bin] == 0)
1020 first_bin++;
1021
1022 if (first_bin == levels) {
1023 first_bin = 0;
1024 last_bin = 1;
1025 } else {
1026 if (first_bin > 0)
1027 first_bin--;
1028
1029 while (last_bin > 0 && data[last_bin] == 0)
1030 last_bin--;
1031
1032 if (last_bin < levels - 1)
1033 last_bin++;
1034 }
1035
1036 for (i = first_bin; i <= last_bin; i++) {
1037 positives |= (data[i] > 0);
1038 negatives |= (data[i] < 0);
1039 dt_quantize_total(dtp, data[i], &total);
1040 }
1041
1042 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value",
1043 "------------- Distribution -------------", "count") < 0)
1044 return (-1);
1045
1046 for (order = 0; order < low; order++)
1047 value *= factor;
1048
1049 next = value * factor;
1050 step = next > nsteps ? next / nsteps : 1;
1051
1052 if (first_bin == 0) {
1053 (void) snprintf(c, sizeof (c), "< %lld", value);
1054
1055 if (dt_printf(dtp, fp, "%16s ", c) < 0)
1056 return (-1);
1057
1058 if (dt_print_quantline(dtp, fp, data[0], normal,
1059 total, positives, negatives) < 0)
1060 return (-1);
1061 }
1062
1063 while (order <= high) {
1064 if (bin >= first_bin && bin <= last_bin) {
1065 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0)
1066 return (-1);
1067
1068 if (dt_print_quantline(dtp, fp, data[bin],
1069 normal, total, positives, negatives) < 0)
1070 return (-1);
1071 }
1072
1073 assert(value < next);
1074 bin++;
1075
1076 if ((value += step) != next)
1077 continue;
1078
1079 next = value * factor;
1080 step = next > nsteps ? next / nsteps : 1;
1081 order++;
1082 }
1083
1084 if (last_bin < bin)
1085 return (0);
1086
1087 assert(last_bin == bin);
1088 (void) snprintf(c, sizeof (c), ">= %lld", value);
1089
1090 if (dt_printf(dtp, fp, "%16s ", c) < 0)
1091 return (-1);
1092
1093 return (dt_print_quantline(dtp, fp, data[bin], normal,
1094 total, positives, negatives));
1095 }
1096
1097 /*ARGSUSED*/
1098 static int
1099 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
1100 size_t size, uint64_t normal)
1101 {
1102 /* LINTED - alignment */
1103 int64_t *data = (int64_t *)addr;
1104
1105 return (dt_printf(dtp, fp, " %16lld", data[0] ?
1106 (long long)(data[1] / (int64_t)normal / data[0]) : 0));
1107 }
1108
1109 /*ARGSUSED*/
1110 static int
1111 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
1112 size_t size, uint64_t normal)
1113 {
1114 /* LINTED - alignment */
1115 uint64_t *data = (uint64_t *)addr;
1116
1117 return (dt_printf(dtp, fp, " %16llu", data[0] ?
1118 (unsigned long long) dt_stddev(data, normal) : 0));
1119 }
1120
1121 /*ARGSUSED*/
1122 static int
1123 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
1124 size_t nbytes, int width, int quiet, int forceraw)
1125 {
1126 /*
1127 * If the byte stream is a series of printable characters, followed by
1128 * a terminating byte, we print it out as a string. Otherwise, we
1129 * assume that it's something else and just print the bytes.
1130 */
1131 int i, j, margin = 5;
1132 char *c = (char *)addr;
1133
1134 if (nbytes == 0)
1135 return (0);
1136
1137 if (forceraw)
1138 goto raw;
1139
1140 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET)
1141 goto raw;
1142
1143 for (i = 0; i < nbytes; i++) {
1144 /*
1145 * We define a "printable character" to be one for which
1146 * isprint(3C) returns non-zero, isspace(3C) returns non-zero,
1147 * or a character which is either backspace or the bell.
1148 * Backspace and the bell are regrettably special because
1149 * they fail the first two tests -- and yet they are entirely
1150 * printable. These are the only two control characters that
1151 * have meaning for the terminal and for which isprint(3C) and
1152 * isspace(3C) return 0.
1153 */
1154 if (isprint(c[i]) || isspace(c[i]) ||
1155 c[i] == '\b' || c[i] == '\a')
1156 continue;
1157
1158 if (c[i] == '\0' && i > 0) {
1159 /*
1160 * This looks like it might be a string. Before we
1161 * assume that it is indeed a string, check the
1162 * remainder of the byte range; if it contains
1163 * additional non-nul characters, we'll assume that
1164 * it's a binary stream that just happens to look like
1165 * a string, and we'll print out the individual bytes.
1166 */
1167 for (j = i + 1; j < nbytes; j++) {
1168 if (c[j] != '\0')
1169 break;
1170 }
1171
1172 if (j != nbytes)
1173 break;
1174
1175 if (quiet) {
1176 return (dt_printf(dtp, fp, "%s", c));
1177 } else {
1178 return (dt_printf(dtp, fp, " %s%*s",
1179 width < 0 ? " " : "", width, c));
1180 }
1181 }
1182
1183 break;
1184 }
1185
1186 if (i == nbytes) {
1187 /*
1188 * The byte range is all printable characters, but there is
1189 * no trailing nul byte. We'll assume that it's a string and
1190 * print it as such.
1191 */
1192 char *s = alloca(nbytes + 1);
1193 bcopy(c, s, nbytes);
1194 s[nbytes] = '\0';
1195 return (dt_printf(dtp, fp, " %-*s", width, s));
1196 }
1197
1198 raw:
1199 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0)
1200 return (-1);
1201
1202 for (i = 0; i < 16; i++)
1203 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0)
1204 return (-1);
1205
1206 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0)
1207 return (-1);
1208
1209
1210 for (i = 0; i < nbytes; i += 16) {
1211 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0)
1212 return (-1);
1213
1214 for (j = i; j < i + 16 && j < nbytes; j++) {
1215 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0)
1216 return (-1);
1217 }
1218
1219 while (j++ % 16) {
1220 if (dt_printf(dtp, fp, " ") < 0)
1221 return (-1);
1222 }
1223
1224 if (dt_printf(dtp, fp, " ") < 0)
1225 return (-1);
1226
1227 for (j = i; j < i + 16 && j < nbytes; j++) {
1228 if (dt_printf(dtp, fp, "%c",
1229 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0)
1230 return (-1);
1231 }
1232
1233 if (dt_printf(dtp, fp, "\n") < 0)
1234 return (-1);
1235 }
1236
1237 return (0);
1238 }
1239
1240 int
1241 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format,
1242 caddr_t addr, int depth, int size)
1243 {
1244 dtrace_syminfo_t dts;
1245 GElf_Sym sym;
1246 int i, indent;
1247 char c[PATH_MAX * 2];
1248 uint64_t pc;
1249
1250 if (dt_printf(dtp, fp, "\n") < 0)
1251 return (-1);
1252
1253 if (format == NULL)
1254 format = "%s";
1255
1256 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET)
1257 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT];
1258 else
1259 indent = _dtrace_stkindent;
1260
1261 for (i = 0; i < depth; i++) {
1262 switch (size) {
1263 case sizeof (uint32_t):
1264 /* LINTED - alignment */
1265 pc = *((uint32_t *)addr);
1266 break;
1267
1268 case sizeof (uint64_t):
1269 /* LINTED - alignment */
1270 pc = *((uint64_t *)addr);
1271 break;
1272
1273 default:
1274 return (dt_set_errno(dtp, EDT_BADSTACKPC));
1275 }
1276
1277 if (pc == NULL)
1278 break;
1279
1280 addr += size;
1281
1282 if (dt_printf(dtp, fp, "%*s", indent, "") < 0)
1283 return (-1);
1284
1285 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) {
1286 if (pc > sym.st_value) {
1287 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx",
1288 dts.dts_object, dts.dts_name,
1289 pc - sym.st_value);
1290 } else {
1291 (void) snprintf(c, sizeof (c), "%s`%s",
1292 dts.dts_object, dts.dts_name);
1293 }
1294 } else {
1295 /*
1296 * We'll repeat the lookup, but this time we'll specify
1297 * a NULL GElf_Sym -- indicating that we're only
1298 * interested in the containing module.
1299 */
1300 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1301 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1302 dts.dts_object, pc);
1303 } else {
1304 (void) snprintf(c, sizeof (c), "0x%llx", pc);
1305 }
1306 }
1307
1308 if (dt_printf(dtp, fp, format, c) < 0)
1309 return (-1);
1310
1311 if (dt_printf(dtp, fp, "\n") < 0)
1312 return (-1);
1313 }
1314
1315 return (0);
1316 }
1317
1318 int
1319 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format,
1320 caddr_t addr, uint64_t arg)
1321 {
1322 /* LINTED - alignment */
1323 uint64_t *pc = (uint64_t *)addr;
1324 uint32_t depth = DTRACE_USTACK_NFRAMES(arg);
1325 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg);
1326 const char *strbase = addr + (depth + 1) * sizeof (uint64_t);
1327 const char *str = strsize ? strbase : NULL;
1328 int err = 0;
1329
1330 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2];
1331 struct ps_prochandle *P;
1332 GElf_Sym sym;
1333 int i, indent;
1334 pid_t pid;
1335
1336 if (depth == 0)
1337 return (0);
1338
1339 pid = (pid_t)*pc++;
1340
1341 if (dt_printf(dtp, fp, "\n") < 0)
1342 return (-1);
1343
1344 if (format == NULL)
1345 format = "%s";
1346
1347 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET)
1348 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT];
1349 else
1350 indent = _dtrace_stkindent;
1351
1352 /*
1353 * Ultimately, we need to add an entry point in the library vector for
1354 * determining <symbol, offset> from <pid, address>. For now, if
1355 * this is a vector open, we just print the raw address or string.
1356 */
1357 if (dtp->dt_vector == NULL)
1358 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0);
1359 else
1360 P = NULL;
1361
1362 if (P != NULL)
1363 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */
1364
1365 for (i = 0; i < depth && pc[i] != NULL; i++) {
1366 const prmap_t *map;
1367
1368 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0)
1369 break;
1370
1371 if (P != NULL && Plookup_by_addr(P, pc[i],
1372 name, sizeof (name), &sym) == 0) {
1373 (void) Pobjname(P, pc[i], objname, sizeof (objname));
1374
1375 if (pc[i] > sym.st_value) {
1376 (void) snprintf(c, sizeof (c),
1377 "%s`%s+0x%llx", dt_basename(objname), name,
1378 (u_longlong_t)(pc[i] - sym.st_value));
1379 } else {
1380 (void) snprintf(c, sizeof (c),
1381 "%s`%s", dt_basename(objname), name);
1382 }
1383 } else if (str != NULL && str[0] != '\0' && str[0] != '@' &&
1384 (P == NULL || (map = Paddr_to_map(P, pc[i])) == NULL ||
1385 map->pr_mflags & MA_WRITE)) {
1386 /*
1387 * If the current string pointer in the string table
1388 * does not point to an empty string _and_ the program
1389 * counter falls in a writable region, we'll use the
1390 * string from the string table instead of the raw
1391 * address. This last condition is necessary because
1392 * some (broken) ustack helpers will return a string
1393 * even for a program counter that they can't
1394 * identify. If we have a string for a program
1395 * counter that falls in a segment that isn't
1396 * writable, we assume that we have fallen into this
1397 * case and we refuse to use the string. Finally,
1398 * note that if we could not grab the process (e.g.,
1399 * because it exited), the information from the helper
1400 * is better than nothing.
1401 */
1402 (void) snprintf(c, sizeof (c), "%s", str);
1403 } else {
1404 if (P != NULL && Pobjname(P, pc[i], objname,
1405 sizeof (objname)) != NULL) {
1406 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1407 dt_basename(objname), (u_longlong_t)pc[i]);
1408 } else {
1409 (void) snprintf(c, sizeof (c), "0x%llx",
1410 (u_longlong_t)pc[i]);
1411 }
1412 }
1413
1414 if ((err = dt_printf(dtp, fp, format, c)) < 0)
1415 break;
1416
1417 if ((err = dt_printf(dtp, fp, "\n")) < 0)
1418 break;
1419
1420 if (str != NULL && str[0] == '@') {
1421 /*
1422 * If the first character of the string is an "at" sign,
1423 * then the string is inferred to be an annotation --
1424 * and it is printed out beneath the frame and offset
1425 * with brackets.
1426 */
1427 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0)
1428 break;
1429
1430 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]);
1431
1432 if ((err = dt_printf(dtp, fp, format, c)) < 0)
1433 break;
1434
1435 if ((err = dt_printf(dtp, fp, "\n")) < 0)
1436 break;
1437 }
1438
1439 if (str != NULL) {
1440 str += strlen(str) + 1;
1441 if (str - strbase >= strsize)
1442 str = NULL;
1443 }
1444 }
1445
1446 if (P != NULL) {
1447 dt_proc_unlock(dtp, P);
1448 dt_proc_release(dtp, P);
1449 }
1450
1451 return (err);
1452 }
1453
1454 static int
1455 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act)
1456 {
1457 /* LINTED - alignment */
1458 uint64_t pid = ((uint64_t *)addr)[0];
1459 /* LINTED - alignment */
1460 uint64_t pc = ((uint64_t *)addr)[1];
1461 const char *format = " %-50s";
1462 char *s;
1463 int n, len = 256;
1464
1465 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) {
1466 struct ps_prochandle *P;
1467
1468 if ((P = dt_proc_grab(dtp, pid,
1469 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) {
1470 GElf_Sym sym;
1471
1472 dt_proc_lock(dtp, P);
1473
1474 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0)
1475 pc = sym.st_value;
1476
1477 dt_proc_unlock(dtp, P);
1478 dt_proc_release(dtp, P);
1479 }
1480 }
1481
1482 do {
1483 n = len;
1484 s = alloca(n);
1485 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n);
1486
1487 return (dt_printf(dtp, fp, format, s));
1488 }
1489
1490 int
1491 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1492 {
1493 /* LINTED - alignment */
1494 uint64_t pid = ((uint64_t *)addr)[0];
1495 /* LINTED - alignment */
1496 uint64_t pc = ((uint64_t *)addr)[1];
1497 int err = 0;
1498
1499 char objname[PATH_MAX], c[PATH_MAX * 2];
1500 struct ps_prochandle *P;
1501
1502 if (format == NULL)
1503 format = " %-50s";
1504
1505 /*
1506 * See the comment in dt_print_ustack() for the rationale for
1507 * printing raw addresses in the vectored case.
1508 */
1509 if (dtp->dt_vector == NULL)
1510 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0);
1511 else
1512 P = NULL;
1513
1514 if (P != NULL)
1515 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */
1516
1517 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != NULL) {
1518 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname));
1519 } else {
1520 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc);
1521 }
1522
1523 err = dt_printf(dtp, fp, format, c);
1524
1525 if (P != NULL) {
1526 dt_proc_unlock(dtp, P);
1527 dt_proc_release(dtp, P);
1528 }
1529
1530 return (err);
1531 }
1532
1533 static int
1534 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1535 {
1536 /* LINTED - alignment */
1537 uint64_t pc = *((uint64_t *)addr);
1538 dtrace_syminfo_t dts;
1539 GElf_Sym sym;
1540 char c[PATH_MAX * 2];
1541
1542 if (format == NULL)
1543 format = " %-50s";
1544
1545 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) {
1546 (void) snprintf(c, sizeof (c), "%s`%s",
1547 dts.dts_object, dts.dts_name);
1548 } else {
1549 /*
1550 * We'll repeat the lookup, but this time we'll specify a
1551 * NULL GElf_Sym -- indicating that we're only interested in
1552 * the containing module.
1553 */
1554 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1555 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1556 dts.dts_object, (u_longlong_t)pc);
1557 } else {
1558 (void) snprintf(c, sizeof (c), "0x%llx",
1559 (u_longlong_t)pc);
1560 }
1561 }
1562
1563 if (dt_printf(dtp, fp, format, c) < 0)
1564 return (-1);
1565
1566 return (0);
1567 }
1568
1569 int
1570 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1571 {
1572 /* LINTED - alignment */
1573 uint64_t pc = *((uint64_t *)addr);
1574 dtrace_syminfo_t dts;
1575 char c[PATH_MAX * 2];
1576
1577 if (format == NULL)
1578 format = " %-50s";
1579
1580 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1581 (void) snprintf(c, sizeof (c), "%s", dts.dts_object);
1582 } else {
1583 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc);
1584 }
1585
1586 if (dt_printf(dtp, fp, format, c) < 0)
1587 return (-1);
1588
1589 return (0);
1590 }
1591
1592 typedef struct dt_normal {
1593 dtrace_aggvarid_t dtnd_id;
1594 uint64_t dtnd_normal;
1595 } dt_normal_t;
1596
1597 static int
1598 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg)
1599 {
1600 dt_normal_t *normal = arg;
1601 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1602 dtrace_aggvarid_t id = normal->dtnd_id;
1603
1604 if (agg->dtagd_nrecs == 0)
1605 return (DTRACE_AGGWALK_NEXT);
1606
1607 if (agg->dtagd_varid != id)
1608 return (DTRACE_AGGWALK_NEXT);
1609
1610 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal;
1611 return (DTRACE_AGGWALK_NORMALIZE);
1612 }
1613
1614 static int
1615 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec)
1616 {
1617 dt_normal_t normal;
1618 caddr_t addr;
1619
1620 /*
1621 * We (should) have two records: the aggregation ID followed by the
1622 * normalization value.
1623 */
1624 addr = base + rec->dtrd_offset;
1625
1626 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t))
1627 return (dt_set_errno(dtp, EDT_BADNORMAL));
1628
1629 /* LINTED - alignment */
1630 normal.dtnd_id = *((dtrace_aggvarid_t *)addr);
1631 rec++;
1632
1633 if (rec->dtrd_action != DTRACEACT_LIBACT)
1634 return (dt_set_errno(dtp, EDT_BADNORMAL));
1635
1636 if (rec->dtrd_arg != DT_ACT_NORMALIZE)
1637 return (dt_set_errno(dtp, EDT_BADNORMAL));
1638
1639 addr = base + rec->dtrd_offset;
1640
1641 switch (rec->dtrd_size) {
1642 case sizeof (uint64_t):
1643 /* LINTED - alignment */
1644 normal.dtnd_normal = *((uint64_t *)addr);
1645 break;
1646 case sizeof (uint32_t):
1647 /* LINTED - alignment */
1648 normal.dtnd_normal = *((uint32_t *)addr);
1649 break;
1650 case sizeof (uint16_t):
1651 /* LINTED - alignment */
1652 normal.dtnd_normal = *((uint16_t *)addr);
1653 break;
1654 case sizeof (uint8_t):
1655 normal.dtnd_normal = *((uint8_t *)addr);
1656 break;
1657 default:
1658 return (dt_set_errno(dtp, EDT_BADNORMAL));
1659 }
1660
1661 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal);
1662
1663 return (0);
1664 }
1665
1666 static int
1667 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg)
1668 {
1669 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1670 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg);
1671
1672 if (agg->dtagd_nrecs == 0)
1673 return (DTRACE_AGGWALK_NEXT);
1674
1675 if (agg->dtagd_varid != id)
1676 return (DTRACE_AGGWALK_NEXT);
1677
1678 return (DTRACE_AGGWALK_DENORMALIZE);
1679 }
1680
1681 static int
1682 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg)
1683 {
1684 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1685 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg);
1686
1687 if (agg->dtagd_nrecs == 0)
1688 return (DTRACE_AGGWALK_NEXT);
1689
1690 if (agg->dtagd_varid != id)
1691 return (DTRACE_AGGWALK_NEXT);
1692
1693 return (DTRACE_AGGWALK_CLEAR);
1694 }
1695
1696 typedef struct dt_trunc {
1697 dtrace_aggvarid_t dttd_id;
1698 uint64_t dttd_remaining;
1699 } dt_trunc_t;
1700
1701 static int
1702 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg)
1703 {
1704 dt_trunc_t *trunc = arg;
1705 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1706 dtrace_aggvarid_t id = trunc->dttd_id;
1707
1708 if (agg->dtagd_nrecs == 0)
1709 return (DTRACE_AGGWALK_NEXT);
1710
1711 if (agg->dtagd_varid != id)
1712 return (DTRACE_AGGWALK_NEXT);
1713
1714 if (trunc->dttd_remaining == 0)
1715 return (DTRACE_AGGWALK_REMOVE);
1716
1717 trunc->dttd_remaining--;
1718 return (DTRACE_AGGWALK_NEXT);
1719 }
1720
1721 static int
1722 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec)
1723 {
1724 dt_trunc_t trunc;
1725 caddr_t addr;
1726 int64_t remaining;
1727 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *);
1728
1729 /*
1730 * We (should) have two records: the aggregation ID followed by the
1731 * number of aggregation entries after which the aggregation is to be
1732 * truncated.
1733 */
1734 addr = base + rec->dtrd_offset;
1735
1736 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t))
1737 return (dt_set_errno(dtp, EDT_BADTRUNC));
1738
1739 /* LINTED - alignment */
1740 trunc.dttd_id = *((dtrace_aggvarid_t *)addr);
1741 rec++;
1742
1743 if (rec->dtrd_action != DTRACEACT_LIBACT)
1744 return (dt_set_errno(dtp, EDT_BADTRUNC));
1745
1746 if (rec->dtrd_arg != DT_ACT_TRUNC)
1747 return (dt_set_errno(dtp, EDT_BADTRUNC));
1748
1749 addr = base + rec->dtrd_offset;
1750
1751 switch (rec->dtrd_size) {
1752 case sizeof (uint64_t):
1753 /* LINTED - alignment */
1754 remaining = *((int64_t *)addr);
1755 break;
1756 case sizeof (uint32_t):
1757 /* LINTED - alignment */
1758 remaining = *((int32_t *)addr);
1759 break;
1760 case sizeof (uint16_t):
1761 /* LINTED - alignment */
1762 remaining = *((int16_t *)addr);
1763 break;
1764 case sizeof (uint8_t):
1765 remaining = *((int8_t *)addr);
1766 break;
1767 default:
1768 return (dt_set_errno(dtp, EDT_BADNORMAL));
1769 }
1770
1771 if (remaining < 0) {
1772 func = dtrace_aggregate_walk_valsorted;
1773 remaining = -remaining;
1774 } else {
1775 func = dtrace_aggregate_walk_valrevsorted;
1776 }
1777
1778 assert(remaining >= 0);
1779 trunc.dttd_remaining = remaining;
1780
1781 (void) func(dtp, dt_trunc_agg, &trunc);
1782
1783 return (0);
1784 }
1785
1786 static int
1787 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec,
1788 caddr_t addr, size_t size, const dtrace_aggdata_t *aggdata,
1789 uint64_t normal, dt_print_aggdata_t *pd)
1790 {
1791 int err, width;
1792 dtrace_actkind_t act = rec->dtrd_action;
1793 boolean_t packed = pd->dtpa_agghist || pd->dtpa_aggpack;
1794 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1795
1796 static struct {
1797 size_t size;
1798 int width;
1799 int packedwidth;
1800 } *fmt, fmttab[] = {
1801 { sizeof (uint8_t), 3, 3 },
1802 { sizeof (uint16_t), 5, 5 },
1803 { sizeof (uint32_t), 8, 8 },
1804 { sizeof (uint64_t), 16, 16 },
1805 { 0, -50, 16 }
1806 };
1807
1808 if (packed && pd->dtpa_agghisthdr != agg->dtagd_varid) {
1809 dtrace_recdesc_t *r;
1810
1811 width = 0;
1812
1813 /*
1814 * To print our quantization header for either an agghist or
1815 * aggpack aggregation, we need to iterate through all of our
1816 * of our records to determine their width.
1817 */
1818 for (r = rec; !DTRACEACT_ISAGG(r->dtrd_action); r++) {
1819 for (fmt = fmttab; fmt->size &&
1820 fmt->size != r->dtrd_size; fmt++)
1821 continue;
1822
1823 width += fmt->packedwidth + 1;
1824 }
1825
1826 if (pd->dtpa_agghist) {
1827 if (dt_print_quanthdr(dtp, fp, width) < 0)
1828 return (-1);
1829 } else {
1830 if (dt_print_quanthdr_packed(dtp, fp,
1831 width, aggdata, r->dtrd_action) < 0)
1832 return (-1);
1833 }
1834
1835 pd->dtpa_agghisthdr = agg->dtagd_varid;
1836 }
1837
1838 if (pd->dtpa_agghist && DTRACEACT_ISAGG(act)) {
1839 char positives = aggdata->dtada_flags & DTRACE_A_HASPOSITIVES;
1840 char negatives = aggdata->dtada_flags & DTRACE_A_HASNEGATIVES;
1841 int64_t val;
1842
1843 assert(act == DTRACEAGG_SUM || act == DTRACEAGG_COUNT);
1844 val = (long long)*((uint64_t *)addr);
1845
1846 if (dt_printf(dtp, fp, " ") < 0)
1847 return (-1);
1848
1849 return (dt_print_quantline(dtp, fp, val, normal,
1850 aggdata->dtada_total, positives, negatives));
1851 }
1852
1853 if (pd->dtpa_aggpack && DTRACEACT_ISAGG(act)) {
1854 switch (act) {
1855 case DTRACEAGG_QUANTIZE:
1856 return (dt_print_quantize_packed(dtp,
1857 fp, addr, size, aggdata));
1858 case DTRACEAGG_LQUANTIZE:
1859 return (dt_print_lquantize_packed(dtp,
1860 fp, addr, size, aggdata));
1861 default:
1862 break;
1863 }
1864 }
1865
1866 switch (act) {
1867 case DTRACEACT_STACK:
1868 return (dt_print_stack(dtp, fp, NULL, addr,
1869 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg));
1870
1871 case DTRACEACT_USTACK:
1872 case DTRACEACT_JSTACK:
1873 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg));
1874
1875 case DTRACEACT_USYM:
1876 case DTRACEACT_UADDR:
1877 return (dt_print_usym(dtp, fp, addr, act));
1878
1879 case DTRACEACT_UMOD:
1880 return (dt_print_umod(dtp, fp, NULL, addr));
1881
1882 case DTRACEACT_SYM:
1883 return (dt_print_sym(dtp, fp, NULL, addr));
1884
1885 case DTRACEACT_MOD:
1886 return (dt_print_mod(dtp, fp, NULL, addr));
1887
1888 case DTRACEAGG_QUANTIZE:
1889 return (dt_print_quantize(dtp, fp, addr, size, normal));
1890
1891 case DTRACEAGG_LQUANTIZE:
1892 return (dt_print_lquantize(dtp, fp, addr, size, normal));
1893
1894 case DTRACEAGG_LLQUANTIZE:
1895 return (dt_print_llquantize(dtp, fp, addr, size, normal));
1896
1897 case DTRACEAGG_AVG:
1898 return (dt_print_average(dtp, fp, addr, size, normal));
1899
1900 case DTRACEAGG_STDDEV:
1901 return (dt_print_stddev(dtp, fp, addr, size, normal));
1902
1903 default:
1904 break;
1905 }
1906
1907 for (fmt = fmttab; fmt->size && fmt->size != size; fmt++)
1908 continue;
1909
1910 width = packed ? fmt->packedwidth : fmt->width;
1911
1912 switch (size) {
1913 case sizeof (uint64_t):
1914 err = dt_printf(dtp, fp, " %*lld", width,
1915 /* LINTED - alignment */
1916 (long long)*((uint64_t *)addr) / normal);
1917 break;
1918 case sizeof (uint32_t):
1919 /* LINTED - alignment */
1920 err = dt_printf(dtp, fp, " %*d", width, *((uint32_t *)addr) /
1921 (uint32_t)normal);
1922 break;
1923 case sizeof (uint16_t):
1924 /* LINTED - alignment */
1925 err = dt_printf(dtp, fp, " %*d", width, *((uint16_t *)addr) /
1926 (uint32_t)normal);
1927 break;
1928 case sizeof (uint8_t):
1929 err = dt_printf(dtp, fp, " %*d", width, *((uint8_t *)addr) /
1930 (uint32_t)normal);
1931 break;
1932 default:
1933 err = dt_print_bytes(dtp, fp, addr, size, width, 0, 0);
1934 break;
1935 }
1936
1937 return (err);
1938 }
1939
1940 int
1941 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg)
1942 {
1943 int i, aggact = 0;
1944 dt_print_aggdata_t *pd = arg;
1945 const dtrace_aggdata_t *aggdata = aggsdata[0];
1946 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1947 FILE *fp = pd->dtpa_fp;
1948 dtrace_hdl_t *dtp = pd->dtpa_dtp;
1949 dtrace_recdesc_t *rec;
1950 dtrace_actkind_t act;
1951 caddr_t addr;
1952 size_t size;
1953
1954 pd->dtpa_agghist = (aggdata->dtada_flags & DTRACE_A_TOTAL);
1955 pd->dtpa_aggpack = (aggdata->dtada_flags & DTRACE_A_MINMAXBIN);
1956
1957 /*
1958 * Iterate over each record description in the key, printing the traced
1959 * data, skipping the first datum (the tuple member created by the
1960 * compiler).
1961 */
1962 for (i = 1; i < agg->dtagd_nrecs; i++) {
1963 rec = &agg->dtagd_rec[i];
1964 act = rec->dtrd_action;
1965 addr = aggdata->dtada_data + rec->dtrd_offset;
1966 size = rec->dtrd_size;
1967
1968 if (DTRACEACT_ISAGG(act)) {
1969 aggact = i;
1970 break;
1971 }
1972
1973 if (dt_print_datum(dtp, fp, rec, addr,
1974 size, aggdata, 1, pd) < 0)
1975 return (-1);
1976
1977 if (dt_buffered_flush(dtp, NULL, rec, aggdata,
1978 DTRACE_BUFDATA_AGGKEY) < 0)
1979 return (-1);
1980 }
1981
1982 assert(aggact != 0);
1983
1984 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) {
1985 uint64_t normal;
1986
1987 aggdata = aggsdata[i];
1988 agg = aggdata->dtada_desc;
1989 rec = &agg->dtagd_rec[aggact];
1990 act = rec->dtrd_action;
1991 addr = aggdata->dtada_data + rec->dtrd_offset;
1992 size = rec->dtrd_size;
1993
1994 assert(DTRACEACT_ISAGG(act));
1995 normal = aggdata->dtada_normal;
1996
1997 if (dt_print_datum(dtp, fp, rec, addr,
1998 size, aggdata, normal, pd) < 0)
1999 return (-1);
2000
2001 if (dt_buffered_flush(dtp, NULL, rec, aggdata,
2002 DTRACE_BUFDATA_AGGVAL) < 0)
2003 return (-1);
2004
2005 if (!pd->dtpa_allunprint)
2006 agg->dtagd_flags |= DTRACE_AGD_PRINTED;
2007 }
2008
2009 if (!pd->dtpa_agghist && !pd->dtpa_aggpack) {
2010 if (dt_printf(dtp, fp, "\n") < 0)
2011 return (-1);
2012 }
2013
2014 if (dt_buffered_flush(dtp, NULL, NULL, aggdata,
2015 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0)
2016 return (-1);
2017
2018 return (0);
2019 }
2020
2021 int
2022 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg)
2023 {
2024 dt_print_aggdata_t *pd = arg;
2025 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
2026 dtrace_aggvarid_t aggvarid = pd->dtpa_id;
2027
2028 if (pd->dtpa_allunprint) {
2029 if (agg->dtagd_flags & DTRACE_AGD_PRINTED)
2030 return (0);
2031 } else {
2032 /*
2033 * If we're not printing all unprinted aggregations, then the
2034 * aggregation variable ID denotes a specific aggregation
2035 * variable that we should print -- skip any other aggregations
2036 * that we encounter.
2037 */
2038 if (agg->dtagd_nrecs == 0)
2039 return (0);
2040
2041 if (aggvarid != agg->dtagd_varid)
2042 return (0);
2043 }
2044
2045 return (dt_print_aggs(&aggdata, 1, arg));
2046 }
2047
2048 int
2049 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data,
2050 const char *option, const char *value)
2051 {
2052 int len, rval;
2053 char *msg;
2054 const char *errstr;
2055 dtrace_setoptdata_t optdata;
2056
2057 bzero(&optdata, sizeof (optdata));
2058 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval);
2059
2060 if (dtrace_setopt(dtp, option, value) == 0) {
2061 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval);
2062 optdata.dtsda_probe = data;
2063 optdata.dtsda_option = option;
2064 optdata.dtsda_handle = dtp;
2065
2066 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0)
2067 return (rval);
2068
2069 return (0);
2070 }
2071
2072 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp));
2073 len = strlen(option) + strlen(value) + strlen(errstr) + 80;
2074 msg = alloca(len);
2075
2076 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n",
2077 option, value, errstr);
2078
2079 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0)
2080 return (0);
2081
2082 return (rval);
2083 }
2084
2085 static int
2086 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu,
2087 dtrace_bufdesc_t *buf, boolean_t just_one,
2088 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg)
2089 {
2090 dtrace_epid_t id;
2091 size_t offs;
2092 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET);
2093 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET);
2094 int rval, i, n;
2095 uint64_t tracememsize = 0;
2096 dtrace_probedata_t data;
2097 uint64_t drops;
2098
2099 bzero(&data, sizeof (data));
2100 data.dtpda_handle = dtp;
2101 data.dtpda_cpu = cpu;
2102 data.dtpda_flow = dtp->dt_flow;
2103 data.dtpda_indent = dtp->dt_indent;
2104 data.dtpda_prefix = dtp->dt_prefix;
2105
2106 for (offs = buf->dtbd_oldest; offs < buf->dtbd_size; ) {
2107 dtrace_eprobedesc_t *epd;
2108
2109 /*
2110 * We're guaranteed to have an ID.
2111 */
2112 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs);
2113
2114 if (id == DTRACE_EPIDNONE) {
2115 /*
2116 * This is filler to assure proper alignment of the
2117 * next record; we simply ignore it.
2118 */
2119 offs += sizeof (id);
2120 continue;
2121 }
2122
2123 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc,
2124 &data.dtpda_pdesc)) != 0)
2125 return (rval);
2126
2127 epd = data.dtpda_edesc;
2128 data.dtpda_data = buf->dtbd_data + offs;
2129
2130 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) {
2131 rval = dt_handle(dtp, &data);
2132
2133 if (rval == DTRACE_CONSUME_NEXT)
2134 goto nextepid;
2135
2136 if (rval == DTRACE_CONSUME_ERROR)
2137 return (-1);
2138 }
2139
2140 if (flow)
2141 (void) dt_flowindent(dtp, &data, dtp->dt_last_epid,
2142 buf, offs);
2143
2144 rval = (*efunc)(&data, arg);
2145
2146 if (flow) {
2147 if (data.dtpda_flow == DTRACEFLOW_ENTRY)
2148 data.dtpda_indent += 2;
2149 }
2150
2151 if (rval == DTRACE_CONSUME_NEXT)
2152 goto nextepid;
2153
2154 if (rval == DTRACE_CONSUME_ABORT)
2155 return (dt_set_errno(dtp, EDT_DIRABORT));
2156
2157 if (rval != DTRACE_CONSUME_THIS)
2158 return (dt_set_errno(dtp, EDT_BADRVAL));
2159
2160 for (i = 0; i < epd->dtepd_nrecs; i++) {
2161 caddr_t addr;
2162 dtrace_recdesc_t *rec = &epd->dtepd_rec[i];
2163 dtrace_actkind_t act = rec->dtrd_action;
2164
2165 data.dtpda_data = buf->dtbd_data + offs +
2166 rec->dtrd_offset;
2167 addr = data.dtpda_data;
2168
2169 if (act == DTRACEACT_LIBACT) {
2170 uint64_t arg = rec->dtrd_arg;
2171 dtrace_aggvarid_t id;
2172
2173 switch (arg) {
2174 case DT_ACT_CLEAR:
2175 /* LINTED - alignment */
2176 id = *((dtrace_aggvarid_t *)addr);
2177 (void) dtrace_aggregate_walk(dtp,
2178 dt_clear_agg, &id);
2179 continue;
2180
2181 case DT_ACT_DENORMALIZE:
2182 /* LINTED - alignment */
2183 id = *((dtrace_aggvarid_t *)addr);
2184 (void) dtrace_aggregate_walk(dtp,
2185 dt_denormalize_agg, &id);
2186 continue;
2187
2188 case DT_ACT_FTRUNCATE:
2189 if (fp == NULL)
2190 continue;
2191
2192 (void) fflush(fp);
2193 (void) ftruncate(fileno(fp), 0);
2194 (void) fseeko(fp, 0, SEEK_SET);
2195 continue;
2196
2197 case DT_ACT_NORMALIZE:
2198 if (i == epd->dtepd_nrecs - 1)
2199 return (dt_set_errno(dtp,
2200 EDT_BADNORMAL));
2201
2202 if (dt_normalize(dtp,
2203 buf->dtbd_data + offs, rec) != 0)
2204 return (-1);
2205
2206 i++;
2207 continue;
2208
2209 case DT_ACT_SETOPT: {
2210 uint64_t *opts = dtp->dt_options;
2211 dtrace_recdesc_t *valrec;
2212 uint32_t valsize;
2213 caddr_t val;
2214 int rv;
2215
2216 if (i == epd->dtepd_nrecs - 1) {
2217 return (dt_set_errno(dtp,
2218 EDT_BADSETOPT));
2219 }
2220
2221 valrec = &epd->dtepd_rec[++i];
2222 valsize = valrec->dtrd_size;
2223
2224 if (valrec->dtrd_action != act ||
2225 valrec->dtrd_arg != arg) {
2226 return (dt_set_errno(dtp,
2227 EDT_BADSETOPT));
2228 }
2229
2230 if (valsize > sizeof (uint64_t)) {
2231 val = buf->dtbd_data + offs +
2232 valrec->dtrd_offset;
2233 } else {
2234 val = "1";
2235 }
2236
2237 rv = dt_setopt(dtp, &data, addr, val);
2238
2239 if (rv != 0)
2240 return (-1);
2241
2242 flow = (opts[DTRACEOPT_FLOWINDENT] !=
2243 DTRACEOPT_UNSET);
2244 quiet = (opts[DTRACEOPT_QUIET] !=
2245 DTRACEOPT_UNSET);
2246
2247 continue;
2248 }
2249
2250 case DT_ACT_TRUNC:
2251 if (i == epd->dtepd_nrecs - 1)
2252 return (dt_set_errno(dtp,
2253 EDT_BADTRUNC));
2254
2255 if (dt_trunc(dtp,
2256 buf->dtbd_data + offs, rec) != 0)
2257 return (-1);
2258
2259 i++;
2260 continue;
2261
2262 default:
2263 continue;
2264 }
2265 }
2266
2267 if (act == DTRACEACT_TRACEMEM_DYNSIZE &&
2268 rec->dtrd_size == sizeof (uint64_t)) {
2269 /* LINTED - alignment */
2270 tracememsize = *((unsigned long long *)addr);
2271 continue;
2272 }
2273
2274 rval = (*rfunc)(&data, rec, arg);
2275
2276 if (rval == DTRACE_CONSUME_NEXT)
2277 continue;
2278
2279 if (rval == DTRACE_CONSUME_ABORT)
2280 return (dt_set_errno(dtp, EDT_DIRABORT));
2281
2282 if (rval != DTRACE_CONSUME_THIS)
2283 return (dt_set_errno(dtp, EDT_BADRVAL));
2284
2285 if (act == DTRACEACT_STACK) {
2286 int depth = rec->dtrd_arg;
2287
2288 if (dt_print_stack(dtp, fp, NULL, addr, depth,
2289 rec->dtrd_size / depth) < 0)
2290 return (-1);
2291 goto nextrec;
2292 }
2293
2294 if (act == DTRACEACT_USTACK ||
2295 act == DTRACEACT_JSTACK) {
2296 if (dt_print_ustack(dtp, fp, NULL,
2297 addr, rec->dtrd_arg) < 0)
2298 return (-1);
2299 goto nextrec;
2300 }
2301
2302 if (act == DTRACEACT_SYM) {
2303 if (dt_print_sym(dtp, fp, NULL, addr) < 0)
2304 return (-1);
2305 goto nextrec;
2306 }
2307
2308 if (act == DTRACEACT_MOD) {
2309 if (dt_print_mod(dtp, fp, NULL, addr) < 0)
2310 return (-1);
2311 goto nextrec;
2312 }
2313
2314 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) {
2315 if (dt_print_usym(dtp, fp, addr, act) < 0)
2316 return (-1);
2317 goto nextrec;
2318 }
2319
2320 if (act == DTRACEACT_UMOD) {
2321 if (dt_print_umod(dtp, fp, NULL, addr) < 0)
2322 return (-1);
2323 goto nextrec;
2324 }
2325
2326 if (DTRACEACT_ISPRINTFLIKE(act)) {
2327 void *fmtdata;
2328 int (*func)(dtrace_hdl_t *, FILE *, void *,
2329 const dtrace_probedata_t *,
2330 const dtrace_recdesc_t *, uint_t,
2331 const void *buf, size_t);
2332
2333 if ((fmtdata = dt_format_lookup(dtp,
2334 rec->dtrd_format)) == NULL)
2335 goto nofmt;
2336
2337 switch (act) {
2338 case DTRACEACT_PRINTF:
2339 func = dtrace_fprintf;
2340 break;
2341 case DTRACEACT_PRINTA:
2342 func = dtrace_fprinta;
2343 break;
2344 case DTRACEACT_SYSTEM:
2345 func = dtrace_system;
2346 break;
2347 case DTRACEACT_FREOPEN:
2348 func = dtrace_freopen;
2349 break;
2350 }
2351
2352 n = (*func)(dtp, fp, fmtdata, &data,
2353 rec, epd->dtepd_nrecs - i,
2354 (uchar_t *)buf->dtbd_data + offs,
2355 buf->dtbd_size - offs);
2356
2357 if (n < 0)
2358 return (-1); /* errno is set for us */
2359
2360 if (n > 0)
2361 i += n - 1;
2362 goto nextrec;
2363 }
2364
2365 /*
2366 * If this is a DIF expression, and the record has a
2367 * format set, this indicates we have a CTF type name
2368 * associated with the data and we should try to print
2369 * it out by type.
2370 */
2371 if (act == DTRACEACT_DIFEXPR) {
2372 const char *strdata = dt_strdata_lookup(dtp,
2373 rec->dtrd_format);
2374 if (strdata != NULL) {
2375 n = dtrace_print(dtp, fp, strdata,
2376 addr, rec->dtrd_size);
2377
2378 /*
2379 * dtrace_print() will return -1 on
2380 * error, or return the number of bytes
2381 * consumed. It will return 0 if the
2382 * type couldn't be determined, and we
2383 * should fall through to the normal
2384 * trace method.
2385 */
2386 if (n < 0)
2387 return (-1);
2388
2389 if (n > 0)
2390 goto nextrec;
2391 }
2392 }
2393
2394 nofmt:
2395 if (act == DTRACEACT_PRINTA) {
2396 dt_print_aggdata_t pd;
2397 dtrace_aggvarid_t *aggvars;
2398 int j, naggvars = 0;
2399 size_t size = ((epd->dtepd_nrecs - i) *
2400 sizeof (dtrace_aggvarid_t));
2401
2402 if ((aggvars = dt_alloc(dtp, size)) == NULL)
2403 return (-1);
2404
2405 /*
2406 * This might be a printa() with multiple
2407 * aggregation variables. We need to scan
2408 * forward through the records until we find
2409 * a record from a different statement.
2410 */
2411 for (j = i; j < epd->dtepd_nrecs; j++) {
2412 dtrace_recdesc_t *nrec;
2413 caddr_t naddr;
2414
2415 nrec = &epd->dtepd_rec[j];
2416
2417 if (nrec->dtrd_uarg != rec->dtrd_uarg)
2418 break;
2419
2420 if (nrec->dtrd_action != act) {
2421 return (dt_set_errno(dtp,
2422 EDT_BADAGG));
2423 }
2424
2425 naddr = buf->dtbd_data + offs +
2426 nrec->dtrd_offset;
2427
2428 aggvars[naggvars++] =
2429 /* LINTED - alignment */
2430 *((dtrace_aggvarid_t *)naddr);
2431 }
2432
2433 i = j - 1;
2434 bzero(&pd, sizeof (pd));
2435 pd.dtpa_dtp = dtp;
2436 pd.dtpa_fp = fp;
2437
2438 assert(naggvars >= 1);
2439
2440 if (naggvars == 1) {
2441 pd.dtpa_id = aggvars[0];
2442 dt_free(dtp, aggvars);
2443
2444 if (dt_printf(dtp, fp, "\n") < 0 ||
2445 dtrace_aggregate_walk_sorted(dtp,
2446 dt_print_agg, &pd) < 0)
2447 return (-1);
2448 goto nextrec;
2449 }
2450
2451 if (dt_printf(dtp, fp, "\n") < 0 ||
2452 dtrace_aggregate_walk_joined(dtp, aggvars,
2453 naggvars, dt_print_aggs, &pd) < 0) {
2454 dt_free(dtp, aggvars);
2455 return (-1);
2456 }
2457
2458 dt_free(dtp, aggvars);
2459 goto nextrec;
2460 }
2461
2462 if (act == DTRACEACT_TRACEMEM) {
2463 if (tracememsize == 0 ||
2464 tracememsize > rec->dtrd_size) {
2465 tracememsize = rec->dtrd_size;
2466 }
2467
2468 n = dt_print_bytes(dtp, fp, addr,
2469 tracememsize, -33, quiet, 1);
2470
2471 tracememsize = 0;
2472
2473 if (n < 0)
2474 return (-1);
2475
2476 goto nextrec;
2477 }
2478
2479 switch (rec->dtrd_size) {
2480 case sizeof (uint64_t):
2481 n = dt_printf(dtp, fp,
2482 quiet ? "%lld" : " %16lld",
2483 /* LINTED - alignment */
2484 *((unsigned long long *)addr));
2485 break;
2486 case sizeof (uint32_t):
2487 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d",
2488 /* LINTED - alignment */
2489 *((uint32_t *)addr));
2490 break;
2491 case sizeof (uint16_t):
2492 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d",
2493 /* LINTED - alignment */
2494 *((uint16_t *)addr));
2495 break;
2496 case sizeof (uint8_t):
2497 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d",
2498 *((uint8_t *)addr));
2499 break;
2500 default:
2501 n = dt_print_bytes(dtp, fp, addr,
2502 rec->dtrd_size, -33, quiet, 0);
2503 break;
2504 }
2505
2506 if (n < 0)
2507 return (-1); /* errno is set for us */
2508
2509 nextrec:
2510 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0)
2511 return (-1); /* errno is set for us */
2512 }
2513
2514 /*
2515 * Call the record callback with a NULL record to indicate
2516 * that we're done processing this EPID.
2517 */
2518 rval = (*rfunc)(&data, NULL, arg);
2519 nextepid:
2520 offs += epd->dtepd_size;
2521 dtp->dt_last_epid = id;
2522 if (just_one) {
2523 buf->dtbd_oldest = offs;
2524 break;
2525 }
2526 }
2527
2528 dtp->dt_flow = data.dtpda_flow;
2529 dtp->dt_indent = data.dtpda_indent;
2530 dtp->dt_prefix = data.dtpda_prefix;
2531
2532 if ((drops = buf->dtbd_drops) == 0)
2533 return (0);
2534
2535 /*
2536 * Explicitly zero the drops to prevent us from processing them again.
2537 */
2538 buf->dtbd_drops = 0;
2539
2540 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops));
2541 }
2542
2543 /*
2544 * Reduce memory usage by shrinking the buffer if it's no more than half full.
2545 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is
2546 * only 4-byte aligned.
2547 */
2548 static void
2549 dt_realloc_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf, int cursize)
2550 {
2551 uint64_t used = buf->dtbd_size - buf->dtbd_oldest;
2552 if (used < cursize / 2) {
2553 int misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1);
2554 char *newdata = dt_alloc(dtp, used + misalign);
2555 if (newdata == NULL)
2556 return;
2557 bzero(newdata, misalign);
2558 bcopy(buf->dtbd_data + buf->dtbd_oldest,
2559 newdata + misalign, used);
2560 dt_free(dtp, buf->dtbd_data);
2561 buf->dtbd_oldest = misalign;
2562 buf->dtbd_size = used + misalign;
2563 buf->dtbd_data = newdata;
2564 }
2565 }
2566
2567 /*
2568 * If the ring buffer has wrapped, the data is not in order. Rearrange it
2569 * so that it is. Note, we need to preserve the alignment of the data at
2570 * dtbd_oldest, which is only 4-byte aligned.
2571 */
2572 static int
2573 dt_unring_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf)
2574 {
2575 int misalign;
2576 char *newdata, *ndp;
2577
2578 if (buf->dtbd_oldest == 0)
2579 return (0);
2580
2581 misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1);
2582 newdata = ndp = dt_alloc(dtp, buf->dtbd_size + misalign);
2583
2584 if (newdata == NULL)
2585 return (-1);
2586
2587 assert(0 == (buf->dtbd_size & (sizeof (uint64_t) - 1)));
2588
2589 bzero(ndp, misalign);
2590 ndp += misalign;
2591
2592 bcopy(buf->dtbd_data + buf->dtbd_oldest, ndp,
2593 buf->dtbd_size - buf->dtbd_oldest);
2594 ndp += buf->dtbd_size - buf->dtbd_oldest;
2595
2596 bcopy(buf->dtbd_data, ndp, buf->dtbd_oldest);
2597
2598 dt_free(dtp, buf->dtbd_data);
2599 buf->dtbd_oldest = 0;
2600 buf->dtbd_data = newdata;
2601 buf->dtbd_size += misalign;
2602
2603 return (0);
2604 }
2605
2606 static void
2607 dt_put_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf)
2608 {
2609 dt_free(dtp, buf->dtbd_data);
2610 dt_free(dtp, buf);
2611 }
2612
2613 /*
2614 * Returns 0 on success, in which case *cbp will be filled in if we retrieved
2615 * data, or NULL if there is no data for this CPU.
2616 * Returns -1 on failure and sets dt_errno.
2617 */
2618 static int
2619 dt_get_buf(dtrace_hdl_t *dtp, int cpu, dtrace_bufdesc_t **bufp)
2620 {
2621 dtrace_optval_t size;
2622 dtrace_bufdesc_t *buf = dt_zalloc(dtp, sizeof (*buf));
2623 int error;
2624
2625 if (buf == NULL)
2626 return (-1);
2627
2628 (void) dtrace_getopt(dtp, "bufsize", &size);
2629 buf->dtbd_data = dt_alloc(dtp, size);
2630 if (buf->dtbd_data == NULL) {
2631 dt_free(dtp, buf);
2632 return (-1);
2633 }
2634 buf->dtbd_size = size;
2635 buf->dtbd_cpu = cpu;
2636
2637 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) {
2638 dt_put_buf(dtp, buf);
2639 /*
2640 * If we failed with ENOENT, it may be because the
2641 * CPU was unconfigured -- this is okay. Any other
2642 * error, however, is unexpected.
2643 */
2644 if (errno == ENOENT) {
2645 *bufp = NULL;
2646 return (0);
2647 }
2648
2649 return (dt_set_errno(dtp, errno));
2650 }
2651
2652 error = dt_unring_buf(dtp, buf);
2653 if (error != 0) {
2654 dt_put_buf(dtp, buf);
2655 return (error);
2656 }
2657 dt_realloc_buf(dtp, buf, size);
2658
2659 *bufp = buf;
2660 return (0);
2661 }
2662
2663 typedef struct dt_begin {
2664 dtrace_consume_probe_f *dtbgn_probefunc;
2665 dtrace_consume_rec_f *dtbgn_recfunc;
2666 void *dtbgn_arg;
2667 dtrace_handle_err_f *dtbgn_errhdlr;
2668 void *dtbgn_errarg;
2669 int dtbgn_beginonly;
2670 } dt_begin_t;
2671
2672 static int
2673 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg)
2674 {
2675 dt_begin_t *begin = arg;
2676 dtrace_probedesc_t *pd = data->dtpda_pdesc;
2677
2678 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0);
2679 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0);
2680
2681 if (begin->dtbgn_beginonly) {
2682 if (!(r1 && r2))
2683 return (DTRACE_CONSUME_NEXT);
2684 } else {
2685 if (r1 && r2)
2686 return (DTRACE_CONSUME_NEXT);
2687 }
2688
2689 /*
2690 * We have a record that we're interested in. Now call the underlying
2691 * probe function...
2692 */
2693 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg));
2694 }
2695
2696 static int
2697 dt_consume_begin_record(const dtrace_probedata_t *data,
2698 const dtrace_recdesc_t *rec, void *arg)
2699 {
2700 dt_begin_t *begin = arg;
2701
2702 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg));
2703 }
2704
2705 static int
2706 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg)
2707 {
2708 dt_begin_t *begin = (dt_begin_t *)arg;
2709 dtrace_probedesc_t *pd = data->dteda_pdesc;
2710
2711 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0);
2712 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0);
2713
2714 if (begin->dtbgn_beginonly) {
2715 if (!(r1 && r2))
2716 return (DTRACE_HANDLE_OK);
2717 } else {
2718 if (r1 && r2)
2719 return (DTRACE_HANDLE_OK);
2720 }
2721
2722 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg));
2723 }
2724
2725 static int
2726 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp,
2727 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg)
2728 {
2729 /*
2730 * There's this idea that the BEGIN probe should be processed before
2731 * everything else, and that the END probe should be processed after
2732 * anything else. In the common case, this is pretty easy to deal
2733 * with. However, a situation may arise where the BEGIN enabling and
2734 * END enabling are on the same CPU, and some enabling in the middle
2735 * occurred on a different CPU. To deal with this (blech!) we need to
2736 * consume the BEGIN buffer up until the end of the BEGIN probe, and
2737 * then set it aside. We will then process every other CPU, and then
2738 * we'll return to the BEGIN CPU and process the rest of the data
2739 * (which will inevitably include the END probe, if any). Making this
2740 * even more complicated (!) is the library's ERROR enabling. Because
2741 * this enabling is processed before we even get into the consume call
2742 * back, any ERROR firing would result in the library's ERROR enabling
2743 * being processed twice -- once in our first pass (for BEGIN probes),
2744 * and again in our second pass (for everything but BEGIN probes). To
2745 * deal with this, we interpose on the ERROR handler to assure that we
2746 * only process ERROR enablings induced by BEGIN enablings in the
2747 * first pass, and that we only process ERROR enablings _not_ induced
2748 * by BEGIN enablings in the second pass.
2749 */
2750
2751 dt_begin_t begin;
2752 processorid_t cpu = dtp->dt_beganon;
2753 int rval, i;
2754 static int max_ncpus;
2755 dtrace_bufdesc_t *buf;
2756
2757 dtp->dt_beganon = -1;
2758
2759 if (dt_get_buf(dtp, cpu, &buf) != 0)
2760 return (-1);
2761 if (buf == NULL)
2762 return (0);
2763
2764 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) {
2765 /*
2766 * This is the simple case. We're either not stopped, or if
2767 * we are, we actually processed any END probes on another
2768 * CPU. We can simply consume this buffer and return.
2769 */
2770 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2771 pf, rf, arg);
2772 dt_put_buf(dtp, buf);
2773 return (rval);
2774 }
2775
2776 begin.dtbgn_probefunc = pf;
2777 begin.dtbgn_recfunc = rf;
2778 begin.dtbgn_arg = arg;
2779 begin.dtbgn_beginonly = 1;
2780
2781 /*
2782 * We need to interpose on the ERROR handler to be sure that we
2783 * only process ERRORs induced by BEGIN.
2784 */
2785 begin.dtbgn_errhdlr = dtp->dt_errhdlr;
2786 begin.dtbgn_errarg = dtp->dt_errarg;
2787 dtp->dt_errhdlr = dt_consume_begin_error;
2788 dtp->dt_errarg = &begin;
2789
2790 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2791 dt_consume_begin_probe, dt_consume_begin_record, &begin);
2792
2793 dtp->dt_errhdlr = begin.dtbgn_errhdlr;
2794 dtp->dt_errarg = begin.dtbgn_errarg;
2795
2796 if (rval != 0) {
2797 dt_put_buf(dtp, buf);
2798 return (rval);
2799 }
2800
2801 if (max_ncpus == 0)
2802 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
2803
2804 for (i = 0; i < max_ncpus; i++) {
2805 dtrace_bufdesc_t *nbuf;
2806 if (i == cpu)
2807 continue;
2808
2809 if (dt_get_buf(dtp, i, &nbuf) != 0) {
2810 dt_put_buf(dtp, buf);
2811 return (-1);
2812 }
2813 if (nbuf == NULL)
2814 continue;
2815
2816 rval = dt_consume_cpu(dtp, fp, i, nbuf, B_FALSE,
2817 pf, rf, arg);
2818 dt_put_buf(dtp, nbuf);
2819 if (rval != 0) {
2820 dt_put_buf(dtp, buf);
2821 return (rval);
2822 }
2823 }
2824
2825 /*
2826 * Okay -- we're done with the other buffers. Now we want to
2827 * reconsume the first buffer -- but this time we're looking for
2828 * everything _but_ BEGIN. And of course, in order to only consume
2829 * those ERRORs _not_ associated with BEGIN, we need to reinstall our
2830 * ERROR interposition function...
2831 */
2832 begin.dtbgn_beginonly = 0;
2833
2834 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr);
2835 assert(begin.dtbgn_errarg == dtp->dt_errarg);
2836 dtp->dt_errhdlr = dt_consume_begin_error;
2837 dtp->dt_errarg = &begin;
2838
2839 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2840 dt_consume_begin_probe, dt_consume_begin_record, &begin);
2841
2842 dtp->dt_errhdlr = begin.dtbgn_errhdlr;
2843 dtp->dt_errarg = begin.dtbgn_errarg;
2844
2845 return (rval);
2846 }
2847
2848 /* ARGSUSED */
2849 static uint64_t
2850 dt_buf_oldest(void *elem, void *arg)
2851 {
2852 dtrace_bufdesc_t *buf = elem;
2853 size_t offs = buf->dtbd_oldest;
2854
2855 while (offs < buf->dtbd_size) {
2856 dtrace_rechdr_t *dtrh =
2857 /* LINTED - alignment */
2858 (dtrace_rechdr_t *)(buf->dtbd_data + offs);
2859 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2860 offs += sizeof (dtrace_epid_t);
2861 } else {
2862 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh));
2863 }
2864 }
2865
2866 /* There are no records left; use the time the buffer was retrieved. */
2867 return (buf->dtbd_timestamp);
2868 }
2869
2870 int
2871 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp,
2872 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg)
2873 {
2874 dtrace_optval_t size;
2875 static int max_ncpus;
2876 int i, rval;
2877 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE];
2878 hrtime_t now = gethrtime();
2879
2880 if (dtp->dt_lastswitch != 0) {
2881 if (now - dtp->dt_lastswitch < interval)
2882 return (0);
2883
2884 dtp->dt_lastswitch += interval;
2885 } else {
2886 dtp->dt_lastswitch = now;
2887 }
2888
2889 if (!dtp->dt_active)
2890 return (dt_set_errno(dtp, EINVAL));
2891
2892 if (max_ncpus == 0)
2893 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
2894
2895 if (pf == NULL)
2896 pf = (dtrace_consume_probe_f *)dt_nullprobe;
2897
2898 if (rf == NULL)
2899 rf = (dtrace_consume_rec_f *)dt_nullrec;
2900
2901 if (dtp->dt_options[DTRACEOPT_TEMPORAL] == DTRACEOPT_UNSET) {
2902 /*
2903 * The output will not be in the order it was traced. Rather,
2904 * we will consume all of the data from each CPU's buffer in
2905 * turn. We apply special handling for the records from BEGIN
2906 * and END probes so that they are consumed first and last,
2907 * respectively.
2908 *
2909 * If we have just begun, we want to first process the CPU that
2910 * executed the BEGIN probe (if any).
2911 */
2912 if (dtp->dt_active && dtp->dt_beganon != -1 &&
2913 (rval = dt_consume_begin(dtp, fp, pf, rf, arg)) != 0)
2914 return (rval);
2915
2916 for (i = 0; i < max_ncpus; i++) {
2917 dtrace_bufdesc_t *buf;
2918
2919 /*
2920 * If we have stopped, we want to process the CPU on
2921 * which the END probe was processed only _after_ we
2922 * have processed everything else.
2923 */
2924 if (dtp->dt_stopped && (i == dtp->dt_endedon))
2925 continue;
2926
2927 if (dt_get_buf(dtp, i, &buf) != 0)
2928 return (-1);
2929 if (buf == NULL)
2930 continue;
2931
2932 dtp->dt_flow = 0;
2933 dtp->dt_indent = 0;
2934 dtp->dt_prefix = NULL;
2935 rval = dt_consume_cpu(dtp, fp, i,
2936 buf, B_FALSE, pf, rf, arg);
2937 dt_put_buf(dtp, buf);
2938 if (rval != 0)
2939 return (rval);
2940 }
2941 if (dtp->dt_stopped) {
2942 dtrace_bufdesc_t *buf;
2943
2944 if (dt_get_buf(dtp, dtp->dt_endedon, &buf) != 0)
2945 return (-1);
2946 if (buf == NULL)
2947 return (0);
2948
2949 rval = dt_consume_cpu(dtp, fp, dtp->dt_endedon,
2950 buf, B_FALSE, pf, rf, arg);
2951 dt_put_buf(dtp, buf);
2952 return (rval);
2953 }
2954 } else {
2955 /*
2956 * The output will be in the order it was traced (or for
2957 * speculations, when it was committed). We retrieve a buffer
2958 * from each CPU and put it into a priority queue, which sorts
2959 * based on the first entry in the buffer. This is sufficient
2960 * because entries within a buffer are already sorted.
2961 *
2962 * We then consume records one at a time, always consuming the
2963 * oldest record, as determined by the priority queue. When
2964 * we reach the end of the time covered by these buffers,
2965 * we need to stop and retrieve more records on the next pass.
2966 * The kernel tells us the time covered by each buffer, in
2967 * dtbd_timestamp. The first buffer's timestamp tells us the
2968 * time covered by all buffers, as subsequently retrieved
2969 * buffers will cover to a more recent time.
2970 */
2971
2972 uint64_t *drops = alloca(max_ncpus * sizeof (uint64_t));
2973 uint64_t first_timestamp = 0;
2974 uint_t cookie = 0;
2975 dtrace_bufdesc_t *buf;
2976
2977 bzero(drops, max_ncpus * sizeof (uint64_t));
2978
2979 if (dtp->dt_bufq == NULL) {
2980 dtp->dt_bufq = dt_pq_init(dtp, max_ncpus * 2,
2981 dt_buf_oldest, NULL);
2982 if (dtp->dt_bufq == NULL) /* ENOMEM */
2983 return (-1);
2984 }
2985
2986 /* Retrieve data from each CPU. */
2987 (void) dtrace_getopt(dtp, "bufsize", &size);
2988 for (i = 0; i < max_ncpus; i++) {
2989 dtrace_bufdesc_t *buf;
2990
2991 if (dt_get_buf(dtp, i, &buf) != 0)
2992 return (-1);
2993 if (buf != NULL) {
2994 if (first_timestamp == 0)
2995 first_timestamp = buf->dtbd_timestamp;
2996 assert(buf->dtbd_timestamp >= first_timestamp);
2997
2998 dt_pq_insert(dtp->dt_bufq, buf);
2999 drops[i] = buf->dtbd_drops;
3000 buf->dtbd_drops = 0;
3001 }
3002 }
3003
3004 /* Consume records. */
3005 for (;;) {
3006 dtrace_bufdesc_t *buf = dt_pq_pop(dtp->dt_bufq);
3007 uint64_t timestamp;
3008
3009 if (buf == NULL)
3010 break;
3011
3012 timestamp = dt_buf_oldest(buf, dtp);
3013 assert(timestamp >= dtp->dt_last_timestamp);
3014 dtp->dt_last_timestamp = timestamp;
3015
3016 if (timestamp == buf->dtbd_timestamp) {
3017 /*
3018 * We've reached the end of the time covered
3019 * by this buffer. If this is the oldest
3020 * buffer, we must do another pass
3021 * to retrieve more data.
3022 */
3023 dt_put_buf(dtp, buf);
3024 if (timestamp == first_timestamp &&
3025 !dtp->dt_stopped)
3026 break;
3027 continue;
3028 }
3029
3030 if ((rval = dt_consume_cpu(dtp, fp,
3031 buf->dtbd_cpu, buf, B_TRUE, pf, rf, arg)) != 0)
3032 return (rval);
3033 dt_pq_insert(dtp->dt_bufq, buf);
3034 }
3035
3036 /* Consume drops. */
3037 for (i = 0; i < max_ncpus; i++) {
3038 if (drops[i] != 0) {
3039 int error = dt_handle_cpudrop(dtp, i,
3040 DTRACEDROP_PRINCIPAL, drops[i]);
3041 if (error != 0)
3042 return (error);
3043 }
3044 }
3045
3046 /*
3047 * Reduce memory usage by re-allocating smaller buffers
3048 * for the "remnants".
3049 */
3050 while (buf = dt_pq_walk(dtp->dt_bufq, &cookie))
3051 dt_realloc_buf(dtp, buf, buf->dtbd_size);
3052 }
3053
3054 return (0);
3055 }