Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/dtrace/dtrace.c
+++ new/usr/src/uts/common/dtrace/dtrace.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2016, Joyent, Inc. All rights reserved.
25 25 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * DTrace - Dynamic Tracing for Solaris
30 30 *
31 31 * This is the implementation of the Solaris Dynamic Tracing framework
32 32 * (DTrace). The user-visible interface to DTrace is described at length in
33 33 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
34 34 * library, the in-kernel DTrace framework, and the DTrace providers are
35 35 * described in the block comments in the <sys/dtrace.h> header file. The
36 36 * internal architecture of DTrace is described in the block comments in the
37 37 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
38 38 * implementation very much assume mastery of all of these sources; if one has
39 39 * an unanswered question about the implementation, one should consult them
40 40 * first.
41 41 *
42 42 * The functions here are ordered roughly as follows:
43 43 *
44 44 * - Probe context functions
45 45 * - Probe hashing functions
46 46 * - Non-probe context utility functions
47 47 * - Matching functions
48 48 * - Provider-to-Framework API functions
49 49 * - Probe management functions
50 50 * - DIF object functions
51 51 * - Format functions
52 52 * - Predicate functions
53 53 * - ECB functions
54 54 * - Buffer functions
55 55 * - Enabling functions
56 56 * - DOF functions
57 57 * - Anonymous enabling functions
58 58 * - Consumer state functions
59 59 * - Helper functions
60 60 * - Hook functions
61 61 * - Driver cookbook functions
62 62 *
63 63 * Each group of functions begins with a block comment labelled the "DTrace
64 64 * [Group] Functions", allowing one to find each block by searching forward
65 65 * on capital-f functions.
66 66 */
67 67 #include <sys/errno.h>
68 68 #include <sys/stat.h>
69 69 #include <sys/modctl.h>
70 70 #include <sys/conf.h>
71 71 #include <sys/systm.h>
72 72 #include <sys/ddi.h>
73 73 #include <sys/sunddi.h>
74 74 #include <sys/cpuvar.h>
75 75 #include <sys/kmem.h>
76 76 #include <sys/strsubr.h>
77 77 #include <sys/sysmacros.h>
78 78 #include <sys/dtrace_impl.h>
79 79 #include <sys/atomic.h>
80 80 #include <sys/cmn_err.h>
81 81 #include <sys/mutex_impl.h>
82 82 #include <sys/rwlock_impl.h>
83 83 #include <sys/ctf_api.h>
84 84 #include <sys/panic.h>
85 85 #include <sys/priv_impl.h>
86 86 #include <sys/policy.h>
87 87 #include <sys/cred_impl.h>
88 88 #include <sys/procfs_isa.h>
89 89 #include <sys/taskq.h>
90 90 #include <sys/mkdev.h>
91 91 #include <sys/kdi.h>
92 92 #include <sys/zone.h>
93 93 #include <sys/socket.h>
94 94 #include <netinet/in.h>
95 95 #include "strtolctype.h"
96 96
97 97 /*
98 98 * DTrace Tunable Variables
99 99 *
100 100 * The following variables may be tuned by adding a line to /etc/system that
101 101 * includes both the name of the DTrace module ("dtrace") and the name of the
102 102 * variable. For example:
103 103 *
104 104 * set dtrace:dtrace_destructive_disallow = 1
105 105 *
106 106 * In general, the only variables that one should be tuning this way are those
107 107 * that affect system-wide DTrace behavior, and for which the default behavior
108 108 * is undesirable. Most of these variables are tunable on a per-consumer
109 109 * basis using DTrace options, and need not be tuned on a system-wide basis.
110 110 * When tuning these variables, avoid pathological values; while some attempt
111 111 * is made to verify the integrity of these variables, they are not considered
112 112 * part of the supported interface to DTrace, and they are therefore not
113 113 * checked comprehensively. Further, these variables should not be tuned
114 114 * dynamically via "mdb -kw" or other means; they should only be tuned via
115 115 * /etc/system.
116 116 */
117 117 int dtrace_destructive_disallow = 0;
118 118 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
119 119 size_t dtrace_difo_maxsize = (256 * 1024);
120 120 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024);
121 121 size_t dtrace_statvar_maxsize = (16 * 1024);
122 122 size_t dtrace_actions_max = (16 * 1024);
123 123 size_t dtrace_retain_max = 1024;
124 124 dtrace_optval_t dtrace_helper_actions_max = 1024;
125 125 dtrace_optval_t dtrace_helper_providers_max = 32;
126 126 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
127 127 size_t dtrace_strsize_default = 256;
128 128 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
129 129 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
130 130 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
131 131 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
132 132 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
133 133 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
134 134 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
135 135 dtrace_optval_t dtrace_nspec_default = 1;
136 136 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
137 137 dtrace_optval_t dtrace_stackframes_default = 20;
138 138 dtrace_optval_t dtrace_ustackframes_default = 20;
139 139 dtrace_optval_t dtrace_jstackframes_default = 50;
140 140 dtrace_optval_t dtrace_jstackstrsize_default = 512;
141 141 int dtrace_msgdsize_max = 128;
142 142 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */
143 143 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
144 144 int dtrace_devdepth_max = 32;
145 145 int dtrace_err_verbose;
146 146 hrtime_t dtrace_deadman_interval = NANOSEC;
147 147 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
148 148 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
149 149 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
150 150
151 151 /*
152 152 * DTrace External Variables
153 153 *
154 154 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
155 155 * available to DTrace consumers via the backtick (`) syntax. One of these,
156 156 * dtrace_zero, is made deliberately so: it is provided as a source of
157 157 * well-known, zero-filled memory. While this variable is not documented,
158 158 * it is used by some translators as an implementation detail.
159 159 */
160 160 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
161 161
162 162 /*
163 163 * DTrace Internal Variables
164 164 */
165 165 static dev_info_t *dtrace_devi; /* device info */
166 166 static vmem_t *dtrace_arena; /* probe ID arena */
167 167 static vmem_t *dtrace_minor; /* minor number arena */
168 168 static taskq_t *dtrace_taskq; /* task queue */
169 169 static dtrace_probe_t **dtrace_probes; /* array of all probes */
170 170 static int dtrace_nprobes; /* number of probes */
171 171 static dtrace_provider_t *dtrace_provider; /* provider list */
172 172 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
173 173 static int dtrace_opens; /* number of opens */
174 174 static int dtrace_helpers; /* number of helpers */
175 175 static int dtrace_getf; /* number of unpriv getf()s */
176 176 static void *dtrace_softstate; /* softstate pointer */
177 177 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
178 178 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
179 179 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
180 180 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
181 181 static int dtrace_toxranges; /* number of toxic ranges */
182 182 static int dtrace_toxranges_max; /* size of toxic range array */
183 183 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
184 184 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
185 185 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
186 186 static kthread_t *dtrace_panicked; /* panicking thread */
187 187 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
188 188 static dtrace_genid_t dtrace_probegen; /* current probe generation */
189 189 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
190 190 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
191 191 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
192 192 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
193 193 static int dtrace_dynvar_failclean; /* dynvars failed to clean */
194 194
195 195 /*
196 196 * DTrace Locking
197 197 * DTrace is protected by three (relatively coarse-grained) locks:
198 198 *
199 199 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
200 200 * including enabling state, probes, ECBs, consumer state, helper state,
201 201 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
202 202 * probe context is lock-free -- synchronization is handled via the
203 203 * dtrace_sync() cross call mechanism.
204 204 *
205 205 * (2) dtrace_provider_lock is required when manipulating provider state, or
206 206 * when provider state must be held constant.
207 207 *
208 208 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
209 209 * when meta provider state must be held constant.
210 210 *
211 211 * The lock ordering between these three locks is dtrace_meta_lock before
212 212 * dtrace_provider_lock before dtrace_lock. (In particular, there are
213 213 * several places where dtrace_provider_lock is held by the framework as it
214 214 * calls into the providers -- which then call back into the framework,
215 215 * grabbing dtrace_lock.)
216 216 *
217 217 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
218 218 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
219 219 * role as a coarse-grained lock; it is acquired before both of these locks.
220 220 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
221 221 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
222 222 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
223 223 * acquired _between_ dtrace_provider_lock and dtrace_lock.
224 224 */
225 225 static kmutex_t dtrace_lock; /* probe state lock */
226 226 static kmutex_t dtrace_provider_lock; /* provider state lock */
227 227 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
228 228
229 229 /*
230 230 * DTrace Provider Variables
231 231 *
232 232 * These are the variables relating to DTrace as a provider (that is, the
233 233 * provider of the BEGIN, END, and ERROR probes).
234 234 */
235 235 static dtrace_pattr_t dtrace_provider_attr = {
236 236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
237 237 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
238 238 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
239 239 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
240 240 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
241 241 };
242 242
243 243 static void
244 244 dtrace_nullop(void)
245 245 {}
246 246
247 247 static int
248 248 dtrace_enable_nullop(void)
249 249 {
250 250 return (0);
251 251 }
252 252
253 253 static dtrace_pops_t dtrace_provider_ops = {
254 254 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
255 255 (void (*)(void *, struct modctl *))dtrace_nullop,
256 256 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
257 257 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
258 258 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
259 259 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
260 260 NULL,
261 261 NULL,
262 262 NULL,
263 263 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
264 264 };
265 265
266 266 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
267 267 static dtrace_id_t dtrace_probeid_end; /* special END probe */
268 268 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
269 269
270 270 /*
271 271 * DTrace Helper Tracing Variables
272 272 *
273 273 * These variables should be set dynamically to enable helper tracing. The
274 274 * only variables that should be set are dtrace_helptrace_enable (which should
275 275 * be set to a non-zero value to allocate helper tracing buffers on the next
276 276 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a
277 277 * non-zero value to deallocate helper tracing buffers on the next close of
278 278 * /dev/dtrace). When (and only when) helper tracing is disabled, the
279 279 * buffer size may also be set via dtrace_helptrace_bufsize.
280 280 */
281 281 int dtrace_helptrace_enable = 0;
282 282 int dtrace_helptrace_disable = 0;
283 283 int dtrace_helptrace_bufsize = 16 * 1024 * 1024;
284 284 uint32_t dtrace_helptrace_nlocals;
285 285 static dtrace_helptrace_t *dtrace_helptrace_buffer;
286 286 static uint32_t dtrace_helptrace_next = 0;
287 287 static int dtrace_helptrace_wrapped = 0;
288 288
289 289 /*
290 290 * DTrace Error Hashing
291 291 *
292 292 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
293 293 * table. This is very useful for checking coverage of tests that are
294 294 * expected to induce DIF or DOF processing errors, and may be useful for
295 295 * debugging problems in the DIF code generator or in DOF generation . The
296 296 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
297 297 */
298 298 #ifdef DEBUG
299 299 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
300 300 static const char *dtrace_errlast;
301 301 static kthread_t *dtrace_errthread;
302 302 static kmutex_t dtrace_errlock;
303 303 #endif
304 304
305 305 /*
306 306 * DTrace Macros and Constants
307 307 *
308 308 * These are various macros that are useful in various spots in the
309 309 * implementation, along with a few random constants that have no meaning
310 310 * outside of the implementation. There is no real structure to this cpp
311 311 * mishmash -- but is there ever?
312 312 */
313 313 #define DTRACE_HASHSTR(hash, probe) \
314 314 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
315 315
316 316 #define DTRACE_HASHNEXT(hash, probe) \
317 317 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
318 318
319 319 #define DTRACE_HASHPREV(hash, probe) \
320 320 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
321 321
322 322 #define DTRACE_HASHEQ(hash, lhs, rhs) \
323 323 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
324 324 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
325 325
326 326 #define DTRACE_AGGHASHSIZE_SLEW 17
327 327
328 328 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
329 329
330 330 /*
331 331 * The key for a thread-local variable consists of the lower 61 bits of the
332 332 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
333 333 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
334 334 * equal to a variable identifier. This is necessary (but not sufficient) to
335 335 * assure that global associative arrays never collide with thread-local
336 336 * variables. To guarantee that they cannot collide, we must also define the
337 337 * order for keying dynamic variables. That order is:
338 338 *
339 339 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
340 340 *
341 341 * Because the variable-key and the tls-key are in orthogonal spaces, there is
342 342 * no way for a global variable key signature to match a thread-local key
343 343 * signature.
344 344 */
345 345 #define DTRACE_TLS_THRKEY(where) { \
346 346 uint_t intr = 0; \
347 347 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
348 348 for (; actv; actv >>= 1) \
349 349 intr++; \
350 350 ASSERT(intr < (1 << 3)); \
351 351 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
352 352 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
353 353 }
354 354
355 355 #define DT_BSWAP_8(x) ((x) & 0xff)
356 356 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
357 357 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
358 358 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
359 359
360 360 #define DT_MASK_LO 0x00000000FFFFFFFFULL
361 361
362 362 #define DTRACE_STORE(type, tomax, offset, what) \
363 363 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
364 364
365 365 #ifndef __x86
366 366 #define DTRACE_ALIGNCHECK(addr, size, flags) \
367 367 if (addr & (size - 1)) { \
368 368 *flags |= CPU_DTRACE_BADALIGN; \
369 369 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
370 370 return (0); \
371 371 }
372 372 #else
373 373 #define DTRACE_ALIGNCHECK(addr, size, flags)
374 374 #endif
375 375
376 376 /*
377 377 * Test whether a range of memory starting at testaddr of size testsz falls
378 378 * within the range of memory described by addr, sz. We take care to avoid
379 379 * problems with overflow and underflow of the unsigned quantities, and
380 380 * disallow all negative sizes. Ranges of size 0 are allowed.
381 381 */
382 382 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
383 383 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
384 384 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
385 385 (testaddr) + (testsz) >= (testaddr))
386 386
387 387 #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \
388 388 do { \
389 389 if ((remp) != NULL) { \
390 390 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \
391 391 } \
392 392 _NOTE(CONSTCOND) } while (0)
393 393
394 394
395 395 /*
396 396 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
397 397 * alloc_sz on the righthand side of the comparison in order to avoid overflow
398 398 * or underflow in the comparison with it. This is simpler than the INRANGE
399 399 * check above, because we know that the dtms_scratch_ptr is valid in the
400 400 * range. Allocations of size zero are allowed.
401 401 */
402 402 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
403 403 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
404 404 (mstate)->dtms_scratch_ptr >= (alloc_sz))
405 405
406 406 #define DTRACE_LOADFUNC(bits) \
407 407 /*CSTYLED*/ \
408 408 uint##bits##_t \
409 409 dtrace_load##bits(uintptr_t addr) \
410 410 { \
411 411 size_t size = bits / NBBY; \
412 412 /*CSTYLED*/ \
413 413 uint##bits##_t rval; \
414 414 int i; \
415 415 volatile uint16_t *flags = (volatile uint16_t *) \
416 416 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
417 417 \
418 418 DTRACE_ALIGNCHECK(addr, size, flags); \
419 419 \
420 420 for (i = 0; i < dtrace_toxranges; i++) { \
421 421 if (addr >= dtrace_toxrange[i].dtt_limit) \
422 422 continue; \
423 423 \
424 424 if (addr + size <= dtrace_toxrange[i].dtt_base) \
425 425 continue; \
426 426 \
427 427 /* \
428 428 * This address falls within a toxic region; return 0. \
429 429 */ \
430 430 *flags |= CPU_DTRACE_BADADDR; \
431 431 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
432 432 return (0); \
433 433 } \
434 434 \
435 435 *flags |= CPU_DTRACE_NOFAULT; \
436 436 /*CSTYLED*/ \
437 437 rval = *((volatile uint##bits##_t *)addr); \
438 438 *flags &= ~CPU_DTRACE_NOFAULT; \
439 439 \
440 440 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
441 441 }
442 442
443 443 #ifdef _LP64
444 444 #define dtrace_loadptr dtrace_load64
445 445 #else
446 446 #define dtrace_loadptr dtrace_load32
447 447 #endif
448 448
449 449 #define DTRACE_DYNHASH_FREE 0
450 450 #define DTRACE_DYNHASH_SINK 1
451 451 #define DTRACE_DYNHASH_VALID 2
452 452
453 453 #define DTRACE_MATCH_FAIL -1
454 454 #define DTRACE_MATCH_NEXT 0
455 455 #define DTRACE_MATCH_DONE 1
456 456 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
457 457 #define DTRACE_STATE_ALIGN 64
458 458
459 459 #define DTRACE_FLAGS2FLT(flags) \
460 460 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
461 461 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
462 462 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
463 463 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
464 464 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
465 465 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
466 466 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
467 467 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
468 468 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
469 469 DTRACEFLT_UNKNOWN)
470 470
471 471 #define DTRACEACT_ISSTRING(act) \
472 472 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
473 473 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
474 474
475 475 static size_t dtrace_strlen(const char *, size_t);
476 476 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
477 477 static void dtrace_enabling_provide(dtrace_provider_t *);
478 478 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
479 479 static void dtrace_enabling_matchall(void);
480 480 static void dtrace_enabling_reap(void);
481 481 static dtrace_state_t *dtrace_anon_grab(void);
482 482 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
483 483 dtrace_state_t *, uint64_t, uint64_t);
484 484 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
485 485 static void dtrace_buffer_drop(dtrace_buffer_t *);
486 486 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
487 487 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
488 488 dtrace_state_t *, dtrace_mstate_t *);
489 489 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
490 490 dtrace_optval_t);
491 491 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
492 492 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
493 493 static int dtrace_priv_proc(dtrace_state_t *, dtrace_mstate_t *);
494 494 static void dtrace_getf_barrier(void);
495 495 static int dtrace_canload_remains(uint64_t, size_t, size_t *,
496 496 dtrace_mstate_t *, dtrace_vstate_t *);
497 497 static int dtrace_canstore_remains(uint64_t, size_t, size_t *,
498 498 dtrace_mstate_t *, dtrace_vstate_t *);
499 499
500 500 /*
501 501 * DTrace Probe Context Functions
502 502 *
503 503 * These functions are called from probe context. Because probe context is
504 504 * any context in which C may be called, arbitrarily locks may be held,
505 505 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
506 506 * As a result, functions called from probe context may only call other DTrace
507 507 * support functions -- they may not interact at all with the system at large.
508 508 * (Note that the ASSERT macro is made probe-context safe by redefining it in
509 509 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
510 510 * loads are to be performed from probe context, they _must_ be in terms of
511 511 * the safe dtrace_load*() variants.
512 512 *
513 513 * Some functions in this block are not actually called from probe context;
514 514 * for these functions, there will be a comment above the function reading
515 515 * "Note: not called from probe context."
516 516 */
517 517 void
518 518 dtrace_panic(const char *format, ...)
519 519 {
520 520 va_list alist;
521 521
522 522 va_start(alist, format);
523 523 dtrace_vpanic(format, alist);
524 524 va_end(alist);
525 525 }
526 526
527 527 int
528 528 dtrace_assfail(const char *a, const char *f, int l)
529 529 {
530 530 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
531 531
532 532 /*
533 533 * We just need something here that even the most clever compiler
534 534 * cannot optimize away.
535 535 */
536 536 return (a[(uintptr_t)f]);
537 537 }
538 538
539 539 /*
540 540 * Atomically increment a specified error counter from probe context.
541 541 */
542 542 static void
543 543 dtrace_error(uint32_t *counter)
544 544 {
545 545 /*
546 546 * Most counters stored to in probe context are per-CPU counters.
547 547 * However, there are some error conditions that are sufficiently
548 548 * arcane that they don't merit per-CPU storage. If these counters
549 549 * are incremented concurrently on different CPUs, scalability will be
550 550 * adversely affected -- but we don't expect them to be white-hot in a
551 551 * correctly constructed enabling...
552 552 */
553 553 uint32_t oval, nval;
554 554
555 555 do {
556 556 oval = *counter;
557 557
558 558 if ((nval = oval + 1) == 0) {
559 559 /*
560 560 * If the counter would wrap, set it to 1 -- assuring
561 561 * that the counter is never zero when we have seen
562 562 * errors. (The counter must be 32-bits because we
563 563 * aren't guaranteed a 64-bit compare&swap operation.)
564 564 * To save this code both the infamy of being fingered
565 565 * by a priggish news story and the indignity of being
566 566 * the target of a neo-puritan witch trial, we're
567 567 * carefully avoiding any colorful description of the
568 568 * likelihood of this condition -- but suffice it to
569 569 * say that it is only slightly more likely than the
570 570 * overflow of predicate cache IDs, as discussed in
571 571 * dtrace_predicate_create().
572 572 */
573 573 nval = 1;
574 574 }
575 575 } while (dtrace_cas32(counter, oval, nval) != oval);
576 576 }
577 577
578 578 /*
579 579 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
580 580 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
581 581 */
582 582 /* BEGIN CSTYLED */
583 583 DTRACE_LOADFUNC(8)
584 584 DTRACE_LOADFUNC(16)
585 585 DTRACE_LOADFUNC(32)
586 586 DTRACE_LOADFUNC(64)
587 587 /* END CSTYLED */
588 588
589 589 static int
590 590 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
591 591 {
592 592 if (dest < mstate->dtms_scratch_base)
593 593 return (0);
594 594
595 595 if (dest + size < dest)
596 596 return (0);
597 597
598 598 if (dest + size > mstate->dtms_scratch_ptr)
599 599 return (0);
600 600
601 601 return (1);
602 602 }
603 603
604 604 static int
605 605 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain,
606 606 dtrace_statvar_t **svars, int nsvars)
607 607 {
608 608 int i;
609 609 size_t maxglobalsize, maxlocalsize;
610 610
611 611 if (nsvars == 0)
612 612 return (0);
613 613
614 614 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t);
615 615 maxlocalsize = maxglobalsize * NCPU;
616 616
617 617 for (i = 0; i < nsvars; i++) {
618 618 dtrace_statvar_t *svar = svars[i];
619 619 uint8_t scope;
620 620 size_t size;
621 621
622 622 if (svar == NULL || (size = svar->dtsv_size) == 0)
623 623 continue;
624 624
625 625 scope = svar->dtsv_var.dtdv_scope;
626 626
627 627 /*
628 628 * We verify that our size is valid in the spirit of providing
629 629 * defense in depth: we want to prevent attackers from using
630 630 * DTrace to escalate an orthogonal kernel heap corruption bug
631 631 * into the ability to store to arbitrary locations in memory.
632 632 */
633 633 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) ||
634 634 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize));
635 635
636 636 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data,
637 637 svar->dtsv_size)) {
638 638 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data,
639 639 svar->dtsv_size);
640 640 return (1);
641 641 }
642 642 }
643 643
644 644 return (0);
645 645 }
646 646
647 647 /*
648 648 * Check to see if the address is within a memory region to which a store may
649 649 * be issued. This includes the DTrace scratch areas, and any DTrace variable
650 650 * region. The caller of dtrace_canstore() is responsible for performing any
651 651 * alignment checks that are needed before stores are actually executed.
652 652 */
653 653 static int
654 654 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
655 655 dtrace_vstate_t *vstate)
656 656 {
657 657 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate));
658 658 }
659 659
660 660 /*
661 661 * Implementation of dtrace_canstore which communicates the upper bound of the
662 662 * allowed memory region.
663 663 */
664 664 static int
665 665 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain,
666 666 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
667 667 {
668 668 /*
669 669 * First, check to see if the address is in scratch space...
670 670 */
671 671 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
672 672 mstate->dtms_scratch_size)) {
673 673 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base,
674 674 mstate->dtms_scratch_size);
675 675 return (1);
676 676 }
677 677
678 678 /*
679 679 * Now check to see if it's a dynamic variable. This check will pick
680 680 * up both thread-local variables and any global dynamically-allocated
681 681 * variables.
682 682 */
683 683 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
684 684 vstate->dtvs_dynvars.dtds_size)) {
685 685 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
686 686 uintptr_t base = (uintptr_t)dstate->dtds_base +
687 687 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
688 688 uintptr_t chunkoffs;
689 689 dtrace_dynvar_t *dvar;
690 690
691 691 /*
692 692 * Before we assume that we can store here, we need to make
693 693 * sure that it isn't in our metadata -- storing to our
694 694 * dynamic variable metadata would corrupt our state. For
695 695 * the range to not include any dynamic variable metadata,
696 696 * it must:
697 697 *
698 698 * (1) Start above the hash table that is at the base of
699 699 * the dynamic variable space
700 700 *
701 701 * (2) Have a starting chunk offset that is beyond the
702 702 * dtrace_dynvar_t that is at the base of every chunk
703 703 *
704 704 * (3) Not span a chunk boundary
705 705 *
706 706 * (4) Not be in the tuple space of a dynamic variable
707 707 *
708 708 */
709 709 if (addr < base)
710 710 return (0);
711 711
712 712 chunkoffs = (addr - base) % dstate->dtds_chunksize;
713 713
714 714 if (chunkoffs < sizeof (dtrace_dynvar_t))
715 715 return (0);
716 716
717 717 if (chunkoffs + sz > dstate->dtds_chunksize)
718 718 return (0);
719 719
720 720 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs);
721 721
722 722 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE)
723 723 return (0);
724 724
725 725 if (chunkoffs < sizeof (dtrace_dynvar_t) +
726 726 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t)))
727 727 return (0);
728 728
729 729 DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize);
730 730 return (1);
731 731 }
732 732
733 733 /*
734 734 * Finally, check the static local and global variables. These checks
735 735 * take the longest, so we perform them last.
736 736 */
737 737 if (dtrace_canstore_statvar(addr, sz, remain,
738 738 vstate->dtvs_locals, vstate->dtvs_nlocals))
739 739 return (1);
740 740
741 741 if (dtrace_canstore_statvar(addr, sz, remain,
742 742 vstate->dtvs_globals, vstate->dtvs_nglobals))
743 743 return (1);
744 744
745 745 return (0);
746 746 }
747 747
748 748
749 749 /*
750 750 * Convenience routine to check to see if the address is within a memory
751 751 * region in which a load may be issued given the user's privilege level;
752 752 * if not, it sets the appropriate error flags and loads 'addr' into the
753 753 * illegal value slot.
754 754 *
755 755 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
756 756 * appropriate memory access protection.
757 757 */
758 758 static int
759 759 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
760 760 dtrace_vstate_t *vstate)
761 761 {
762 762 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate));
763 763 }
764 764
765 765 /*
766 766 * Implementation of dtrace_canload which communicates the upper bound of the
767 767 * allowed memory region.
768 768 */
769 769 static int
770 770 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain,
771 771 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
772 772 {
773 773 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
774 774 file_t *fp;
775 775
776 776 /*
777 777 * If we hold the privilege to read from kernel memory, then
778 778 * everything is readable.
779 779 */
780 780 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
781 781 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
782 782 return (1);
783 783 }
784 784
785 785 /*
786 786 * You can obviously read that which you can store.
787 787 */
788 788 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate))
789 789 return (1);
790 790
791 791 /*
792 792 * We're allowed to read from our own string table.
793 793 */
794 794 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
795 795 mstate->dtms_difo->dtdo_strlen)) {
796 796 DTRACE_RANGE_REMAIN(remain, addr,
797 797 mstate->dtms_difo->dtdo_strtab,
798 798 mstate->dtms_difo->dtdo_strlen);
799 799 return (1);
800 800 }
801 801
802 802 if (vstate->dtvs_state != NULL &&
803 803 dtrace_priv_proc(vstate->dtvs_state, mstate)) {
804 804 proc_t *p;
805 805
806 806 /*
807 807 * When we have privileges to the current process, there are
808 808 * several context-related kernel structures that are safe to
809 809 * read, even absent the privilege to read from kernel memory.
810 810 * These reads are safe because these structures contain only
811 811 * state that (1) we're permitted to read, (2) is harmless or
812 812 * (3) contains pointers to additional kernel state that we're
813 813 * not permitted to read (and as such, do not present an
814 814 * opportunity for privilege escalation). Finally (and
815 815 * critically), because of the nature of their relation with
816 816 * the current thread context, the memory associated with these
817 817 * structures cannot change over the duration of probe context,
818 818 * and it is therefore impossible for this memory to be
819 819 * deallocated and reallocated as something else while it's
820 820 * being operated upon.
821 821 */
822 822 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) {
823 823 DTRACE_RANGE_REMAIN(remain, addr, curthread,
824 824 sizeof (kthread_t));
825 825 return (1);
826 826 }
827 827
828 828 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
829 829 sz, curthread->t_procp, sizeof (proc_t))) {
830 830 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp,
831 831 sizeof (proc_t));
832 832 return (1);
833 833 }
834 834
835 835 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
836 836 curthread->t_cred, sizeof (cred_t))) {
837 837 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred,
838 838 sizeof (cred_t));
839 839 return (1);
840 840 }
841 841
842 842 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
843 843 &(p->p_pidp->pid_id), sizeof (pid_t))) {
844 844 DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id),
845 845 sizeof (pid_t));
846 846 return (1);
847 847 }
848 848
849 849 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
850 850 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
851 851 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu,
852 852 offsetof(cpu_t, cpu_pause_thread));
853 853 return (1);
854 854 }
855 855 }
856 856
857 857 if ((fp = mstate->dtms_getf) != NULL) {
858 858 uintptr_t psz = sizeof (void *);
859 859 vnode_t *vp;
860 860 vnodeops_t *op;
861 861
862 862 /*
863 863 * When getf() returns a file_t, the enabling is implicitly
864 864 * granted the (transient) right to read the returned file_t
865 865 * as well as the v_path and v_op->vnop_name of the underlying
866 866 * vnode. These accesses are allowed after a successful
867 867 * getf() because the members that they refer to cannot change
868 868 * once set -- and the barrier logic in the kernel's closef()
869 869 * path assures that the file_t and its referenced vode_t
870 870 * cannot themselves be stale (that is, it impossible for
871 871 * either dtms_getf itself or its f_vnode member to reference
872 872 * freed memory).
873 873 */
874 874 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) {
875 875 DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t));
876 876 return (1);
877 877 }
878 878
879 879 if ((vp = fp->f_vnode) != NULL) {
880 880 size_t slen;
881 881
882 882 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) {
883 883 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path,
884 884 psz);
885 885 return (1);
886 886 }
887 887
888 888 slen = strlen(vp->v_path) + 1;
889 889 if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) {
890 890 DTRACE_RANGE_REMAIN(remain, addr, vp->v_path,
891 891 slen);
892 892 return (1);
893 893 }
894 894
895 895 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) {
896 896 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op,
897 897 psz);
898 898 return (1);
899 899 }
900 900
901 901 if ((op = vp->v_op) != NULL &&
902 902 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
903 903 DTRACE_RANGE_REMAIN(remain, addr,
904 904 &op->vnop_name, psz);
905 905 return (1);
906 906 }
907 907
908 908 if (op != NULL && op->vnop_name != NULL &&
909 909 DTRACE_INRANGE(addr, sz, op->vnop_name,
910 910 (slen = strlen(op->vnop_name) + 1))) {
911 911 DTRACE_RANGE_REMAIN(remain, addr,
912 912 op->vnop_name, slen);
913 913 return (1);
914 914 }
915 915 }
916 916 }
917 917
918 918 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
919 919 *illval = addr;
920 920 return (0);
921 921 }
922 922
923 923 /*
924 924 * Convenience routine to check to see if a given string is within a memory
925 925 * region in which a load may be issued given the user's privilege level;
926 926 * this exists so that we don't need to issue unnecessary dtrace_strlen()
927 927 * calls in the event that the user has all privileges.
928 928 */
929 929 static int
930 930 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain,
931 931 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
932 932 {
933 933 size_t rsize;
934 934
935 935 /*
936 936 * If we hold the privilege to read from kernel memory, then
937 937 * everything is readable.
938 938 */
939 939 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
940 940 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
941 941 return (1);
942 942 }
943 943
944 944 /*
945 945 * Even if the caller is uninterested in querying the remaining valid
946 946 * range, it is required to ensure that the access is allowed.
947 947 */
948 948 if (remain == NULL) {
949 949 remain = &rsize;
950 950 }
951 951 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) {
952 952 size_t strsz;
953 953 /*
954 954 * Perform the strlen after determining the length of the
955 955 * memory region which is accessible. This prevents timing
956 956 * information from being used to find NULs in memory which is
957 957 * not accessible to the caller.
958 958 */
959 959 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr,
960 960 MIN(sz, *remain));
961 961 if (strsz <= *remain) {
962 962 return (1);
963 963 }
964 964 }
965 965
966 966 return (0);
967 967 }
968 968
969 969 /*
970 970 * Convenience routine to check to see if a given variable is within a memory
971 971 * region in which a load may be issued given the user's privilege level.
972 972 */
973 973 static int
974 974 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain,
975 975 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
976 976 {
977 977 size_t sz;
978 978 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
979 979
980 980 /*
981 981 * Calculate the max size before performing any checks since even
982 982 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function
983 983 * return the max length via 'remain'.
984 984 */
985 985 if (type->dtdt_kind == DIF_TYPE_STRING) {
986 986 dtrace_state_t *state = vstate->dtvs_state;
987 987
988 988 if (state != NULL) {
989 989 sz = state->dts_options[DTRACEOPT_STRSIZE];
990 990 } else {
991 991 /*
992 992 * In helper context, we have a NULL state; fall back
993 993 * to using the system-wide default for the string size
994 994 * in this case.
995 995 */
996 996 sz = dtrace_strsize_default;
997 997 }
998 998 } else {
999 999 sz = type->dtdt_size;
1000 1000 }
1001 1001
1002 1002 /*
1003 1003 * If we hold the privilege to read from kernel memory, then
1004 1004 * everything is readable.
1005 1005 */
1006 1006 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1007 1007 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz);
1008 1008 return (1);
1009 1009 }
1010 1010
1011 1011 if (type->dtdt_kind == DIF_TYPE_STRING) {
1012 1012 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate,
1013 1013 vstate));
1014 1014 }
1015 1015 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate,
1016 1016 vstate));
1017 1017 }
1018 1018
1019 1019 /*
1020 1020 * Convert a string to a signed integer using safe loads.
1021 1021 *
1022 1022 * NOTE: This function uses various macros from strtolctype.h to manipulate
1023 1023 * digit values, etc -- these have all been checked to ensure they make
1024 1024 * no additional function calls.
1025 1025 */
1026 1026 static int64_t
1027 1027 dtrace_strtoll(char *input, int base, size_t limit)
1028 1028 {
1029 1029 uintptr_t pos = (uintptr_t)input;
1030 1030 int64_t val = 0;
1031 1031 int x;
1032 1032 boolean_t neg = B_FALSE;
1033 1033 char c, cc, ccc;
1034 1034 uintptr_t end = pos + limit;
1035 1035
1036 1036 /*
1037 1037 * Consume any whitespace preceding digits.
1038 1038 */
1039 1039 while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
1040 1040 pos++;
1041 1041
1042 1042 /*
1043 1043 * Handle an explicit sign if one is present.
1044 1044 */
1045 1045 if (c == '-' || c == '+') {
1046 1046 if (c == '-')
1047 1047 neg = B_TRUE;
1048 1048 c = dtrace_load8(++pos);
1049 1049 }
1050 1050
1051 1051 /*
1052 1052 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1053 1053 * if present.
1054 1054 */
1055 1055 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1056 1056 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1057 1057 pos += 2;
1058 1058 c = ccc;
1059 1059 }
1060 1060
1061 1061 /*
1062 1062 * Read in contiguous digits until the first non-digit character.
1063 1063 */
1064 1064 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1065 1065 c = dtrace_load8(++pos))
1066 1066 val = val * base + x;
1067 1067
1068 1068 return (neg ? -val : val);
1069 1069 }
1070 1070
1071 1071 /*
1072 1072 * Compare two strings using safe loads.
1073 1073 */
1074 1074 static int
1075 1075 dtrace_strncmp(char *s1, char *s2, size_t limit)
1076 1076 {
1077 1077 uint8_t c1, c2;
1078 1078 volatile uint16_t *flags;
1079 1079
1080 1080 if (s1 == s2 || limit == 0)
1081 1081 return (0);
1082 1082
1083 1083 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1084 1084
1085 1085 do {
1086 1086 if (s1 == NULL) {
1087 1087 c1 = '\0';
1088 1088 } else {
1089 1089 c1 = dtrace_load8((uintptr_t)s1++);
1090 1090 }
1091 1091
1092 1092 if (s2 == NULL) {
1093 1093 c2 = '\0';
1094 1094 } else {
1095 1095 c2 = dtrace_load8((uintptr_t)s2++);
1096 1096 }
1097 1097
1098 1098 if (c1 != c2)
1099 1099 return (c1 - c2);
1100 1100 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1101 1101
1102 1102 return (0);
1103 1103 }
1104 1104
1105 1105 /*
1106 1106 * Compute strlen(s) for a string using safe memory accesses. The additional
1107 1107 * len parameter is used to specify a maximum length to ensure completion.
1108 1108 */
1109 1109 static size_t
1110 1110 dtrace_strlen(const char *s, size_t lim)
1111 1111 {
1112 1112 uint_t len;
1113 1113
1114 1114 for (len = 0; len != lim; len++) {
1115 1115 if (dtrace_load8((uintptr_t)s++) == '\0')
1116 1116 break;
1117 1117 }
1118 1118
1119 1119 return (len);
1120 1120 }
1121 1121
1122 1122 /*
1123 1123 * Check if an address falls within a toxic region.
1124 1124 */
1125 1125 static int
1126 1126 dtrace_istoxic(uintptr_t kaddr, size_t size)
1127 1127 {
1128 1128 uintptr_t taddr, tsize;
1129 1129 int i;
1130 1130
1131 1131 for (i = 0; i < dtrace_toxranges; i++) {
1132 1132 taddr = dtrace_toxrange[i].dtt_base;
1133 1133 tsize = dtrace_toxrange[i].dtt_limit - taddr;
1134 1134
1135 1135 if (kaddr - taddr < tsize) {
1136 1136 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1137 1137 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
1138 1138 return (1);
1139 1139 }
1140 1140
1141 1141 if (taddr - kaddr < size) {
1142 1142 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1143 1143 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
1144 1144 return (1);
1145 1145 }
1146 1146 }
1147 1147
1148 1148 return (0);
1149 1149 }
1150 1150
1151 1151 /*
1152 1152 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
1153 1153 * memory specified by the DIF program. The dst is assumed to be safe memory
1154 1154 * that we can store to directly because it is managed by DTrace. As with
1155 1155 * standard bcopy, overlapping copies are handled properly.
1156 1156 */
1157 1157 static void
1158 1158 dtrace_bcopy(const void *src, void *dst, size_t len)
1159 1159 {
1160 1160 if (len != 0) {
1161 1161 uint8_t *s1 = dst;
1162 1162 const uint8_t *s2 = src;
1163 1163
1164 1164 if (s1 <= s2) {
1165 1165 do {
1166 1166 *s1++ = dtrace_load8((uintptr_t)s2++);
1167 1167 } while (--len != 0);
1168 1168 } else {
1169 1169 s2 += len;
1170 1170 s1 += len;
1171 1171
1172 1172 do {
1173 1173 *--s1 = dtrace_load8((uintptr_t)--s2);
1174 1174 } while (--len != 0);
1175 1175 }
1176 1176 }
1177 1177 }
1178 1178
1179 1179 /*
1180 1180 * Copy src to dst using safe memory accesses, up to either the specified
1181 1181 * length, or the point that a nul byte is encountered. The src is assumed to
1182 1182 * be unsafe memory specified by the DIF program. The dst is assumed to be
1183 1183 * safe memory that we can store to directly because it is managed by DTrace.
1184 1184 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1185 1185 */
1186 1186 static void
1187 1187 dtrace_strcpy(const void *src, void *dst, size_t len)
1188 1188 {
1189 1189 if (len != 0) {
1190 1190 uint8_t *s1 = dst, c;
1191 1191 const uint8_t *s2 = src;
1192 1192
1193 1193 do {
1194 1194 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1195 1195 } while (--len != 0 && c != '\0');
1196 1196 }
1197 1197 }
1198 1198
1199 1199 /*
1200 1200 * Copy src to dst, deriving the size and type from the specified (BYREF)
1201 1201 * variable type. The src is assumed to be unsafe memory specified by the DIF
1202 1202 * program. The dst is assumed to be DTrace variable memory that is of the
1203 1203 * specified type; we assume that we can store to directly.
1204 1204 */
1205 1205 static void
1206 1206 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit)
1207 1207 {
1208 1208 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1209 1209
1210 1210 if (type->dtdt_kind == DIF_TYPE_STRING) {
1211 1211 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit));
1212 1212 } else {
1213 1213 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit));
1214 1214 }
1215 1215 }
1216 1216
1217 1217 /*
1218 1218 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1219 1219 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1220 1220 * safe memory that we can access directly because it is managed by DTrace.
1221 1221 */
1222 1222 static int
1223 1223 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1224 1224 {
1225 1225 volatile uint16_t *flags;
1226 1226
1227 1227 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1228 1228
1229 1229 if (s1 == s2)
1230 1230 return (0);
1231 1231
1232 1232 if (s1 == NULL || s2 == NULL)
1233 1233 return (1);
1234 1234
1235 1235 if (s1 != s2 && len != 0) {
1236 1236 const uint8_t *ps1 = s1;
1237 1237 const uint8_t *ps2 = s2;
1238 1238
1239 1239 do {
1240 1240 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1241 1241 return (1);
1242 1242 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1243 1243 }
1244 1244 return (0);
1245 1245 }
1246 1246
1247 1247 /*
1248 1248 * Zero the specified region using a simple byte-by-byte loop. Note that this
1249 1249 * is for safe DTrace-managed memory only.
1250 1250 */
1251 1251 static void
1252 1252 dtrace_bzero(void *dst, size_t len)
1253 1253 {
1254 1254 uchar_t *cp;
1255 1255
1256 1256 for (cp = dst; len != 0; len--)
1257 1257 *cp++ = 0;
1258 1258 }
1259 1259
1260 1260 static void
1261 1261 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1262 1262 {
1263 1263 uint64_t result[2];
1264 1264
1265 1265 result[0] = addend1[0] + addend2[0];
1266 1266 result[1] = addend1[1] + addend2[1] +
1267 1267 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1268 1268
1269 1269 sum[0] = result[0];
1270 1270 sum[1] = result[1];
1271 1271 }
1272 1272
1273 1273 /*
1274 1274 * Shift the 128-bit value in a by b. If b is positive, shift left.
1275 1275 * If b is negative, shift right.
1276 1276 */
1277 1277 static void
1278 1278 dtrace_shift_128(uint64_t *a, int b)
1279 1279 {
1280 1280 uint64_t mask;
1281 1281
1282 1282 if (b == 0)
1283 1283 return;
1284 1284
1285 1285 if (b < 0) {
1286 1286 b = -b;
1287 1287 if (b >= 64) {
1288 1288 a[0] = a[1] >> (b - 64);
1289 1289 a[1] = 0;
1290 1290 } else {
1291 1291 a[0] >>= b;
1292 1292 mask = 1LL << (64 - b);
1293 1293 mask -= 1;
1294 1294 a[0] |= ((a[1] & mask) << (64 - b));
1295 1295 a[1] >>= b;
1296 1296 }
1297 1297 } else {
1298 1298 if (b >= 64) {
1299 1299 a[1] = a[0] << (b - 64);
1300 1300 a[0] = 0;
1301 1301 } else {
1302 1302 a[1] <<= b;
1303 1303 mask = a[0] >> (64 - b);
1304 1304 a[1] |= mask;
1305 1305 a[0] <<= b;
1306 1306 }
1307 1307 }
1308 1308 }
1309 1309
1310 1310 /*
1311 1311 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1312 1312 * use native multiplication on those, and then re-combine into the
1313 1313 * resulting 128-bit value.
1314 1314 *
1315 1315 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1316 1316 * hi1 * hi2 << 64 +
1317 1317 * hi1 * lo2 << 32 +
1318 1318 * hi2 * lo1 << 32 +
1319 1319 * lo1 * lo2
1320 1320 */
1321 1321 static void
1322 1322 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1323 1323 {
1324 1324 uint64_t hi1, hi2, lo1, lo2;
1325 1325 uint64_t tmp[2];
1326 1326
1327 1327 hi1 = factor1 >> 32;
1328 1328 hi2 = factor2 >> 32;
1329 1329
1330 1330 lo1 = factor1 & DT_MASK_LO;
1331 1331 lo2 = factor2 & DT_MASK_LO;
1332 1332
1333 1333 product[0] = lo1 * lo2;
1334 1334 product[1] = hi1 * hi2;
1335 1335
1336 1336 tmp[0] = hi1 * lo2;
1337 1337 tmp[1] = 0;
1338 1338 dtrace_shift_128(tmp, 32);
1339 1339 dtrace_add_128(product, tmp, product);
1340 1340
1341 1341 tmp[0] = hi2 * lo1;
1342 1342 tmp[1] = 0;
1343 1343 dtrace_shift_128(tmp, 32);
1344 1344 dtrace_add_128(product, tmp, product);
1345 1345 }
1346 1346
1347 1347 /*
1348 1348 * This privilege check should be used by actions and subroutines to
1349 1349 * verify that the user credentials of the process that enabled the
1350 1350 * invoking ECB match the target credentials
1351 1351 */
1352 1352 static int
1353 1353 dtrace_priv_proc_common_user(dtrace_state_t *state)
1354 1354 {
1355 1355 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1356 1356
1357 1357 /*
1358 1358 * We should always have a non-NULL state cred here, since if cred
1359 1359 * is null (anonymous tracing), we fast-path bypass this routine.
1360 1360 */
1361 1361 ASSERT(s_cr != NULL);
1362 1362
1363 1363 if ((cr = CRED()) != NULL &&
1364 1364 s_cr->cr_uid == cr->cr_uid &&
1365 1365 s_cr->cr_uid == cr->cr_ruid &&
1366 1366 s_cr->cr_uid == cr->cr_suid &&
1367 1367 s_cr->cr_gid == cr->cr_gid &&
1368 1368 s_cr->cr_gid == cr->cr_rgid &&
1369 1369 s_cr->cr_gid == cr->cr_sgid)
1370 1370 return (1);
1371 1371
1372 1372 return (0);
1373 1373 }
1374 1374
1375 1375 /*
1376 1376 * This privilege check should be used by actions and subroutines to
1377 1377 * verify that the zone of the process that enabled the invoking ECB
1378 1378 * matches the target credentials
1379 1379 */
1380 1380 static int
1381 1381 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1382 1382 {
1383 1383 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1384 1384
1385 1385 /*
1386 1386 * We should always have a non-NULL state cred here, since if cred
1387 1387 * is null (anonymous tracing), we fast-path bypass this routine.
1388 1388 */
1389 1389 ASSERT(s_cr != NULL);
1390 1390
1391 1391 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1392 1392 return (1);
1393 1393
1394 1394 return (0);
1395 1395 }
1396 1396
1397 1397 /*
1398 1398 * This privilege check should be used by actions and subroutines to
1399 1399 * verify that the process has not setuid or changed credentials.
1400 1400 */
1401 1401 static int
1402 1402 dtrace_priv_proc_common_nocd()
1403 1403 {
1404 1404 proc_t *proc;
1405 1405
1406 1406 if ((proc = ttoproc(curthread)) != NULL &&
1407 1407 !(proc->p_flag & SNOCD))
1408 1408 return (1);
1409 1409
1410 1410 return (0);
1411 1411 }
1412 1412
1413 1413 static int
1414 1414 dtrace_priv_proc_destructive(dtrace_state_t *state, dtrace_mstate_t *mstate)
1415 1415 {
1416 1416 int action = state->dts_cred.dcr_action;
1417 1417
1418 1418 if (!(mstate->dtms_access & DTRACE_ACCESS_PROC))
1419 1419 goto bad;
1420 1420
1421 1421 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1422 1422 dtrace_priv_proc_common_zone(state) == 0)
1423 1423 goto bad;
1424 1424
1425 1425 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1426 1426 dtrace_priv_proc_common_user(state) == 0)
1427 1427 goto bad;
1428 1428
1429 1429 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1430 1430 dtrace_priv_proc_common_nocd() == 0)
1431 1431 goto bad;
1432 1432
1433 1433 return (1);
1434 1434
1435 1435 bad:
1436 1436 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1437 1437
1438 1438 return (0);
1439 1439 }
1440 1440
1441 1441 static int
1442 1442 dtrace_priv_proc_control(dtrace_state_t *state, dtrace_mstate_t *mstate)
1443 1443 {
1444 1444 if (mstate->dtms_access & DTRACE_ACCESS_PROC) {
1445 1445 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1446 1446 return (1);
1447 1447
1448 1448 if (dtrace_priv_proc_common_zone(state) &&
1449 1449 dtrace_priv_proc_common_user(state) &&
1450 1450 dtrace_priv_proc_common_nocd())
1451 1451 return (1);
1452 1452 }
1453 1453
1454 1454 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1455 1455
1456 1456 return (0);
1457 1457 }
1458 1458
1459 1459 static int
1460 1460 dtrace_priv_proc(dtrace_state_t *state, dtrace_mstate_t *mstate)
1461 1461 {
1462 1462 if ((mstate->dtms_access & DTRACE_ACCESS_PROC) &&
1463 1463 (state->dts_cred.dcr_action & DTRACE_CRA_PROC))
1464 1464 return (1);
1465 1465
1466 1466 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1467 1467
1468 1468 return (0);
1469 1469 }
1470 1470
1471 1471 static int
1472 1472 dtrace_priv_kernel(dtrace_state_t *state)
1473 1473 {
1474 1474 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1475 1475 return (1);
1476 1476
1477 1477 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1478 1478
1479 1479 return (0);
1480 1480 }
1481 1481
1482 1482 static int
1483 1483 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1484 1484 {
1485 1485 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1486 1486 return (1);
1487 1487
1488 1488 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1489 1489
1490 1490 return (0);
1491 1491 }
1492 1492
1493 1493 /*
1494 1494 * Determine if the dte_cond of the specified ECB allows for processing of
1495 1495 * the current probe to continue. Note that this routine may allow continued
1496 1496 * processing, but with access(es) stripped from the mstate's dtms_access
1497 1497 * field.
1498 1498 */
1499 1499 static int
1500 1500 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1501 1501 dtrace_ecb_t *ecb)
1502 1502 {
1503 1503 dtrace_probe_t *probe = ecb->dte_probe;
1504 1504 dtrace_provider_t *prov = probe->dtpr_provider;
1505 1505 dtrace_pops_t *pops = &prov->dtpv_pops;
1506 1506 int mode = DTRACE_MODE_NOPRIV_DROP;
1507 1507
1508 1508 ASSERT(ecb->dte_cond);
1509 1509
1510 1510 if (pops->dtps_mode != NULL) {
1511 1511 mode = pops->dtps_mode(prov->dtpv_arg,
1512 1512 probe->dtpr_id, probe->dtpr_arg);
1513 1513
1514 1514 ASSERT(mode & (DTRACE_MODE_USER | DTRACE_MODE_KERNEL));
1515 1515 ASSERT(mode & (DTRACE_MODE_NOPRIV_RESTRICT |
1516 1516 DTRACE_MODE_NOPRIV_DROP));
1517 1517 }
1518 1518
1519 1519 /*
1520 1520 * If the dte_cond bits indicate that this consumer is only allowed to
1521 1521 * see user-mode firings of this probe, check that the probe was fired
1522 1522 * while in a user context. If that's not the case, use the policy
1523 1523 * specified by the provider to determine if we drop the probe or
1524 1524 * merely restrict operation.
1525 1525 */
1526 1526 if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1527 1527 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1528 1528
1529 1529 if (!(mode & DTRACE_MODE_USER)) {
1530 1530 if (mode & DTRACE_MODE_NOPRIV_DROP)
1531 1531 return (0);
1532 1532
1533 1533 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1534 1534 }
1535 1535 }
1536 1536
1537 1537 /*
1538 1538 * This is more subtle than it looks. We have to be absolutely certain
1539 1539 * that CRED() isn't going to change out from under us so it's only
1540 1540 * legit to examine that structure if we're in constrained situations.
1541 1541 * Currently, the only times we'll this check is if a non-super-user
1542 1542 * has enabled the profile or syscall providers -- providers that
1543 1543 * allow visibility of all processes. For the profile case, the check
1544 1544 * above will ensure that we're examining a user context.
1545 1545 */
1546 1546 if (ecb->dte_cond & DTRACE_COND_OWNER) {
1547 1547 cred_t *cr;
1548 1548 cred_t *s_cr = state->dts_cred.dcr_cred;
1549 1549 proc_t *proc;
1550 1550
1551 1551 ASSERT(s_cr != NULL);
1552 1552
1553 1553 if ((cr = CRED()) == NULL ||
1554 1554 s_cr->cr_uid != cr->cr_uid ||
1555 1555 s_cr->cr_uid != cr->cr_ruid ||
1556 1556 s_cr->cr_uid != cr->cr_suid ||
1557 1557 s_cr->cr_gid != cr->cr_gid ||
1558 1558 s_cr->cr_gid != cr->cr_rgid ||
1559 1559 s_cr->cr_gid != cr->cr_sgid ||
1560 1560 (proc = ttoproc(curthread)) == NULL ||
1561 1561 (proc->p_flag & SNOCD)) {
1562 1562 if (mode & DTRACE_MODE_NOPRIV_DROP)
1563 1563 return (0);
1564 1564
1565 1565 mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1566 1566 }
1567 1567 }
1568 1568
1569 1569 /*
1570 1570 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1571 1571 * in our zone, check to see if our mode policy is to restrict rather
1572 1572 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1573 1573 * and DTRACE_ACCESS_ARGS
1574 1574 */
1575 1575 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1576 1576 cred_t *cr;
1577 1577 cred_t *s_cr = state->dts_cred.dcr_cred;
1578 1578
1579 1579 ASSERT(s_cr != NULL);
1580 1580
1581 1581 if ((cr = CRED()) == NULL ||
1582 1582 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1583 1583 if (mode & DTRACE_MODE_NOPRIV_DROP)
1584 1584 return (0);
1585 1585
1586 1586 mstate->dtms_access &=
1587 1587 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1588 1588 }
1589 1589 }
1590 1590
1591 1591 /*
1592 1592 * By merits of being in this code path at all, we have limited
1593 1593 * privileges. If the provider has indicated that limited privileges
1594 1594 * are to denote restricted operation, strip off the ability to access
1595 1595 * arguments.
1596 1596 */
1597 1597 if (mode & DTRACE_MODE_LIMITEDPRIV_RESTRICT)
1598 1598 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1599 1599
1600 1600 return (1);
1601 1601 }
1602 1602
1603 1603 /*
1604 1604 * Note: not called from probe context. This function is called
1605 1605 * asynchronously (and at a regular interval) from outside of probe context to
1606 1606 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1607 1607 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1608 1608 */
1609 1609 void
1610 1610 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1611 1611 {
1612 1612 dtrace_dynvar_t *dirty;
1613 1613 dtrace_dstate_percpu_t *dcpu;
1614 1614 dtrace_dynvar_t **rinsep;
1615 1615 int i, j, work = 0;
1616 1616
1617 1617 for (i = 0; i < NCPU; i++) {
1618 1618 dcpu = &dstate->dtds_percpu[i];
1619 1619 rinsep = &dcpu->dtdsc_rinsing;
1620 1620
1621 1621 /*
1622 1622 * If the dirty list is NULL, there is no dirty work to do.
1623 1623 */
1624 1624 if (dcpu->dtdsc_dirty == NULL)
1625 1625 continue;
1626 1626
1627 1627 if (dcpu->dtdsc_rinsing != NULL) {
1628 1628 /*
1629 1629 * If the rinsing list is non-NULL, then it is because
1630 1630 * this CPU was selected to accept another CPU's
1631 1631 * dirty list -- and since that time, dirty buffers
1632 1632 * have accumulated. This is a highly unlikely
1633 1633 * condition, but we choose to ignore the dirty
1634 1634 * buffers -- they'll be picked up a future cleanse.
1635 1635 */
1636 1636 continue;
1637 1637 }
1638 1638
1639 1639 if (dcpu->dtdsc_clean != NULL) {
1640 1640 /*
1641 1641 * If the clean list is non-NULL, then we're in a
1642 1642 * situation where a CPU has done deallocations (we
1643 1643 * have a non-NULL dirty list) but no allocations (we
1644 1644 * also have a non-NULL clean list). We can't simply
1645 1645 * move the dirty list into the clean list on this
1646 1646 * CPU, yet we also don't want to allow this condition
1647 1647 * to persist, lest a short clean list prevent a
1648 1648 * massive dirty list from being cleaned (which in
1649 1649 * turn could lead to otherwise avoidable dynamic
1650 1650 * drops). To deal with this, we look for some CPU
1651 1651 * with a NULL clean list, NULL dirty list, and NULL
1652 1652 * rinsing list -- and then we borrow this CPU to
1653 1653 * rinse our dirty list.
1654 1654 */
1655 1655 for (j = 0; j < NCPU; j++) {
1656 1656 dtrace_dstate_percpu_t *rinser;
1657 1657
1658 1658 rinser = &dstate->dtds_percpu[j];
1659 1659
1660 1660 if (rinser->dtdsc_rinsing != NULL)
1661 1661 continue;
1662 1662
1663 1663 if (rinser->dtdsc_dirty != NULL)
1664 1664 continue;
1665 1665
1666 1666 if (rinser->dtdsc_clean != NULL)
1667 1667 continue;
1668 1668
1669 1669 rinsep = &rinser->dtdsc_rinsing;
1670 1670 break;
1671 1671 }
1672 1672
1673 1673 if (j == NCPU) {
1674 1674 /*
1675 1675 * We were unable to find another CPU that
1676 1676 * could accept this dirty list -- we are
1677 1677 * therefore unable to clean it now.
1678 1678 */
1679 1679 dtrace_dynvar_failclean++;
1680 1680 continue;
1681 1681 }
1682 1682 }
1683 1683
1684 1684 work = 1;
1685 1685
1686 1686 /*
1687 1687 * Atomically move the dirty list aside.
1688 1688 */
1689 1689 do {
1690 1690 dirty = dcpu->dtdsc_dirty;
1691 1691
1692 1692 /*
1693 1693 * Before we zap the dirty list, set the rinsing list.
1694 1694 * (This allows for a potential assertion in
1695 1695 * dtrace_dynvar(): if a free dynamic variable appears
1696 1696 * on a hash chain, either the dirty list or the
1697 1697 * rinsing list for some CPU must be non-NULL.)
1698 1698 */
1699 1699 *rinsep = dirty;
1700 1700 dtrace_membar_producer();
1701 1701 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1702 1702 dirty, NULL) != dirty);
1703 1703 }
1704 1704
1705 1705 if (!work) {
1706 1706 /*
1707 1707 * We have no work to do; we can simply return.
1708 1708 */
1709 1709 return;
1710 1710 }
1711 1711
1712 1712 dtrace_sync();
1713 1713
1714 1714 for (i = 0; i < NCPU; i++) {
1715 1715 dcpu = &dstate->dtds_percpu[i];
1716 1716
1717 1717 if (dcpu->dtdsc_rinsing == NULL)
1718 1718 continue;
1719 1719
1720 1720 /*
1721 1721 * We are now guaranteed that no hash chain contains a pointer
1722 1722 * into this dirty list; we can make it clean.
1723 1723 */
1724 1724 ASSERT(dcpu->dtdsc_clean == NULL);
1725 1725 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1726 1726 dcpu->dtdsc_rinsing = NULL;
1727 1727 }
1728 1728
1729 1729 /*
1730 1730 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1731 1731 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1732 1732 * This prevents a race whereby a CPU incorrectly decides that
1733 1733 * the state should be something other than DTRACE_DSTATE_CLEAN
1734 1734 * after dtrace_dynvar_clean() has completed.
1735 1735 */
1736 1736 dtrace_sync();
1737 1737
1738 1738 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1739 1739 }
1740 1740
1741 1741 /*
1742 1742 * Depending on the value of the op parameter, this function looks-up,
1743 1743 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1744 1744 * allocation is requested, this function will return a pointer to a
1745 1745 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1746 1746 * variable can be allocated. If NULL is returned, the appropriate counter
1747 1747 * will be incremented.
1748 1748 */
1749 1749 dtrace_dynvar_t *
1750 1750 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1751 1751 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1752 1752 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1753 1753 {
1754 1754 uint64_t hashval = DTRACE_DYNHASH_VALID;
1755 1755 dtrace_dynhash_t *hash = dstate->dtds_hash;
1756 1756 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1757 1757 processorid_t me = CPU->cpu_id, cpu = me;
1758 1758 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1759 1759 size_t bucket, ksize;
1760 1760 size_t chunksize = dstate->dtds_chunksize;
1761 1761 uintptr_t kdata, lock, nstate;
1762 1762 uint_t i;
1763 1763
1764 1764 ASSERT(nkeys != 0);
1765 1765
1766 1766 /*
1767 1767 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1768 1768 * algorithm. For the by-value portions, we perform the algorithm in
1769 1769 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1770 1770 * bit, and seems to have only a minute effect on distribution. For
1771 1771 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1772 1772 * over each referenced byte. It's painful to do this, but it's much
1773 1773 * better than pathological hash distribution. The efficacy of the
1774 1774 * hashing algorithm (and a comparison with other algorithms) may be
1775 1775 * found by running the ::dtrace_dynstat MDB dcmd.
1776 1776 */
1777 1777 for (i = 0; i < nkeys; i++) {
1778 1778 if (key[i].dttk_size == 0) {
1779 1779 uint64_t val = key[i].dttk_value;
1780 1780
1781 1781 hashval += (val >> 48) & 0xffff;
1782 1782 hashval += (hashval << 10);
1783 1783 hashval ^= (hashval >> 6);
1784 1784
1785 1785 hashval += (val >> 32) & 0xffff;
1786 1786 hashval += (hashval << 10);
1787 1787 hashval ^= (hashval >> 6);
1788 1788
1789 1789 hashval += (val >> 16) & 0xffff;
1790 1790 hashval += (hashval << 10);
1791 1791 hashval ^= (hashval >> 6);
1792 1792
1793 1793 hashval += val & 0xffff;
1794 1794 hashval += (hashval << 10);
1795 1795 hashval ^= (hashval >> 6);
1796 1796 } else {
1797 1797 /*
1798 1798 * This is incredibly painful, but it beats the hell
1799 1799 * out of the alternative.
1800 1800 */
1801 1801 uint64_t j, size = key[i].dttk_size;
1802 1802 uintptr_t base = (uintptr_t)key[i].dttk_value;
1803 1803
1804 1804 if (!dtrace_canload(base, size, mstate, vstate))
1805 1805 break;
1806 1806
1807 1807 for (j = 0; j < size; j++) {
1808 1808 hashval += dtrace_load8(base + j);
1809 1809 hashval += (hashval << 10);
1810 1810 hashval ^= (hashval >> 6);
1811 1811 }
1812 1812 }
1813 1813 }
1814 1814
1815 1815 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1816 1816 return (NULL);
1817 1817
1818 1818 hashval += (hashval << 3);
1819 1819 hashval ^= (hashval >> 11);
1820 1820 hashval += (hashval << 15);
1821 1821
1822 1822 /*
1823 1823 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1824 1824 * comes out to be one of our two sentinel hash values. If this
1825 1825 * actually happens, we set the hashval to be a value known to be a
1826 1826 * non-sentinel value.
1827 1827 */
1828 1828 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1829 1829 hashval = DTRACE_DYNHASH_VALID;
1830 1830
1831 1831 /*
1832 1832 * Yes, it's painful to do a divide here. If the cycle count becomes
1833 1833 * important here, tricks can be pulled to reduce it. (However, it's
1834 1834 * critical that hash collisions be kept to an absolute minimum;
1835 1835 * they're much more painful than a divide.) It's better to have a
1836 1836 * solution that generates few collisions and still keeps things
1837 1837 * relatively simple.
1838 1838 */
1839 1839 bucket = hashval % dstate->dtds_hashsize;
1840 1840
1841 1841 if (op == DTRACE_DYNVAR_DEALLOC) {
1842 1842 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1843 1843
1844 1844 for (;;) {
1845 1845 while ((lock = *lockp) & 1)
1846 1846 continue;
1847 1847
1848 1848 if (dtrace_casptr((void *)lockp,
1849 1849 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1850 1850 break;
1851 1851 }
1852 1852
1853 1853 dtrace_membar_producer();
1854 1854 }
1855 1855
1856 1856 top:
1857 1857 prev = NULL;
1858 1858 lock = hash[bucket].dtdh_lock;
1859 1859
1860 1860 dtrace_membar_consumer();
1861 1861
1862 1862 start = hash[bucket].dtdh_chain;
1863 1863 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1864 1864 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1865 1865 op != DTRACE_DYNVAR_DEALLOC));
1866 1866
1867 1867 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1868 1868 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1869 1869 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1870 1870
1871 1871 if (dvar->dtdv_hashval != hashval) {
1872 1872 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1873 1873 /*
1874 1874 * We've reached the sink, and therefore the
1875 1875 * end of the hash chain; we can kick out of
1876 1876 * the loop knowing that we have seen a valid
1877 1877 * snapshot of state.
1878 1878 */
1879 1879 ASSERT(dvar->dtdv_next == NULL);
1880 1880 ASSERT(dvar == &dtrace_dynhash_sink);
1881 1881 break;
1882 1882 }
1883 1883
1884 1884 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1885 1885 /*
1886 1886 * We've gone off the rails: somewhere along
1887 1887 * the line, one of the members of this hash
1888 1888 * chain was deleted. Note that we could also
1889 1889 * detect this by simply letting this loop run
1890 1890 * to completion, as we would eventually hit
1891 1891 * the end of the dirty list. However, we
1892 1892 * want to avoid running the length of the
1893 1893 * dirty list unnecessarily (it might be quite
1894 1894 * long), so we catch this as early as
1895 1895 * possible by detecting the hash marker. In
1896 1896 * this case, we simply set dvar to NULL and
1897 1897 * break; the conditional after the loop will
1898 1898 * send us back to top.
1899 1899 */
1900 1900 dvar = NULL;
1901 1901 break;
1902 1902 }
1903 1903
1904 1904 goto next;
1905 1905 }
1906 1906
1907 1907 if (dtuple->dtt_nkeys != nkeys)
1908 1908 goto next;
1909 1909
1910 1910 for (i = 0; i < nkeys; i++, dkey++) {
1911 1911 if (dkey->dttk_size != key[i].dttk_size)
1912 1912 goto next; /* size or type mismatch */
1913 1913
1914 1914 if (dkey->dttk_size != 0) {
1915 1915 if (dtrace_bcmp(
1916 1916 (void *)(uintptr_t)key[i].dttk_value,
1917 1917 (void *)(uintptr_t)dkey->dttk_value,
1918 1918 dkey->dttk_size))
1919 1919 goto next;
1920 1920 } else {
1921 1921 if (dkey->dttk_value != key[i].dttk_value)
1922 1922 goto next;
1923 1923 }
1924 1924 }
1925 1925
1926 1926 if (op != DTRACE_DYNVAR_DEALLOC)
1927 1927 return (dvar);
1928 1928
1929 1929 ASSERT(dvar->dtdv_next == NULL ||
1930 1930 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1931 1931
1932 1932 if (prev != NULL) {
1933 1933 ASSERT(hash[bucket].dtdh_chain != dvar);
1934 1934 ASSERT(start != dvar);
1935 1935 ASSERT(prev->dtdv_next == dvar);
1936 1936 prev->dtdv_next = dvar->dtdv_next;
1937 1937 } else {
1938 1938 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1939 1939 start, dvar->dtdv_next) != start) {
1940 1940 /*
1941 1941 * We have failed to atomically swing the
1942 1942 * hash table head pointer, presumably because
1943 1943 * of a conflicting allocation on another CPU.
1944 1944 * We need to reread the hash chain and try
1945 1945 * again.
1946 1946 */
1947 1947 goto top;
1948 1948 }
1949 1949 }
1950 1950
1951 1951 dtrace_membar_producer();
1952 1952
1953 1953 /*
1954 1954 * Now set the hash value to indicate that it's free.
1955 1955 */
1956 1956 ASSERT(hash[bucket].dtdh_chain != dvar);
1957 1957 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1958 1958
1959 1959 dtrace_membar_producer();
1960 1960
1961 1961 /*
1962 1962 * Set the next pointer to point at the dirty list, and
1963 1963 * atomically swing the dirty pointer to the newly freed dvar.
1964 1964 */
1965 1965 do {
1966 1966 next = dcpu->dtdsc_dirty;
1967 1967 dvar->dtdv_next = next;
1968 1968 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1969 1969
1970 1970 /*
1971 1971 * Finally, unlock this hash bucket.
1972 1972 */
1973 1973 ASSERT(hash[bucket].dtdh_lock == lock);
1974 1974 ASSERT(lock & 1);
1975 1975 hash[bucket].dtdh_lock++;
1976 1976
1977 1977 return (NULL);
1978 1978 next:
1979 1979 prev = dvar;
1980 1980 continue;
1981 1981 }
1982 1982
1983 1983 if (dvar == NULL) {
1984 1984 /*
1985 1985 * If dvar is NULL, it is because we went off the rails:
1986 1986 * one of the elements that we traversed in the hash chain
1987 1987 * was deleted while we were traversing it. In this case,
1988 1988 * we assert that we aren't doing a dealloc (deallocs lock
1989 1989 * the hash bucket to prevent themselves from racing with
1990 1990 * one another), and retry the hash chain traversal.
1991 1991 */
1992 1992 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1993 1993 goto top;
1994 1994 }
1995 1995
1996 1996 if (op != DTRACE_DYNVAR_ALLOC) {
1997 1997 /*
1998 1998 * If we are not to allocate a new variable, we want to
1999 1999 * return NULL now. Before we return, check that the value
2000 2000 * of the lock word hasn't changed. If it has, we may have
2001 2001 * seen an inconsistent snapshot.
2002 2002 */
2003 2003 if (op == DTRACE_DYNVAR_NOALLOC) {
2004 2004 if (hash[bucket].dtdh_lock != lock)
2005 2005 goto top;
2006 2006 } else {
2007 2007 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
2008 2008 ASSERT(hash[bucket].dtdh_lock == lock);
2009 2009 ASSERT(lock & 1);
2010 2010 hash[bucket].dtdh_lock++;
2011 2011 }
2012 2012
2013 2013 return (NULL);
2014 2014 }
2015 2015
2016 2016 /*
2017 2017 * We need to allocate a new dynamic variable. The size we need is the
2018 2018 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
2019 2019 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
2020 2020 * the size of any referred-to data (dsize). We then round the final
2021 2021 * size up to the chunksize for allocation.
2022 2022 */
2023 2023 for (ksize = 0, i = 0; i < nkeys; i++)
2024 2024 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
2025 2025
2026 2026 /*
2027 2027 * This should be pretty much impossible, but could happen if, say,
2028 2028 * strange DIF specified the tuple. Ideally, this should be an
2029 2029 * assertion and not an error condition -- but that requires that the
2030 2030 * chunksize calculation in dtrace_difo_chunksize() be absolutely
2031 2031 * bullet-proof. (That is, it must not be able to be fooled by
2032 2032 * malicious DIF.) Given the lack of backwards branches in DIF,
2033 2033 * solving this would presumably not amount to solving the Halting
2034 2034 * Problem -- but it still seems awfully hard.
2035 2035 */
2036 2036 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
2037 2037 ksize + dsize > chunksize) {
2038 2038 dcpu->dtdsc_drops++;
2039 2039 return (NULL);
2040 2040 }
2041 2041
2042 2042 nstate = DTRACE_DSTATE_EMPTY;
2043 2043
2044 2044 do {
2045 2045 retry:
2046 2046 free = dcpu->dtdsc_free;
2047 2047
2048 2048 if (free == NULL) {
2049 2049 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
2050 2050 void *rval;
2051 2051
2052 2052 if (clean == NULL) {
2053 2053 /*
2054 2054 * We're out of dynamic variable space on
2055 2055 * this CPU. Unless we have tried all CPUs,
2056 2056 * we'll try to allocate from a different
2057 2057 * CPU.
2058 2058 */
2059 2059 switch (dstate->dtds_state) {
2060 2060 case DTRACE_DSTATE_CLEAN: {
2061 2061 void *sp = &dstate->dtds_state;
2062 2062
2063 2063 if (++cpu >= NCPU)
2064 2064 cpu = 0;
2065 2065
2066 2066 if (dcpu->dtdsc_dirty != NULL &&
2067 2067 nstate == DTRACE_DSTATE_EMPTY)
2068 2068 nstate = DTRACE_DSTATE_DIRTY;
2069 2069
2070 2070 if (dcpu->dtdsc_rinsing != NULL)
2071 2071 nstate = DTRACE_DSTATE_RINSING;
2072 2072
2073 2073 dcpu = &dstate->dtds_percpu[cpu];
2074 2074
2075 2075 if (cpu != me)
2076 2076 goto retry;
2077 2077
2078 2078 (void) dtrace_cas32(sp,
2079 2079 DTRACE_DSTATE_CLEAN, nstate);
2080 2080
2081 2081 /*
2082 2082 * To increment the correct bean
2083 2083 * counter, take another lap.
2084 2084 */
2085 2085 goto retry;
2086 2086 }
2087 2087
2088 2088 case DTRACE_DSTATE_DIRTY:
2089 2089 dcpu->dtdsc_dirty_drops++;
2090 2090 break;
2091 2091
2092 2092 case DTRACE_DSTATE_RINSING:
2093 2093 dcpu->dtdsc_rinsing_drops++;
2094 2094 break;
2095 2095
2096 2096 case DTRACE_DSTATE_EMPTY:
2097 2097 dcpu->dtdsc_drops++;
2098 2098 break;
2099 2099 }
2100 2100
2101 2101 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2102 2102 return (NULL);
2103 2103 }
2104 2104
2105 2105 /*
2106 2106 * The clean list appears to be non-empty. We want to
2107 2107 * move the clean list to the free list; we start by
2108 2108 * moving the clean pointer aside.
2109 2109 */
2110 2110 if (dtrace_casptr(&dcpu->dtdsc_clean,
2111 2111 clean, NULL) != clean) {
2112 2112 /*
2113 2113 * We are in one of two situations:
2114 2114 *
2115 2115 * (a) The clean list was switched to the
2116 2116 * free list by another CPU.
2117 2117 *
2118 2118 * (b) The clean list was added to by the
2119 2119 * cleansing cyclic.
2120 2120 *
2121 2121 * In either of these situations, we can
2122 2122 * just reattempt the free list allocation.
2123 2123 */
2124 2124 goto retry;
2125 2125 }
2126 2126
2127 2127 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2128 2128
2129 2129 /*
2130 2130 * Now we'll move the clean list to our free list.
2131 2131 * It's impossible for this to fail: the only way
2132 2132 * the free list can be updated is through this
2133 2133 * code path, and only one CPU can own the clean list.
2134 2134 * Thus, it would only be possible for this to fail if
2135 2135 * this code were racing with dtrace_dynvar_clean().
2136 2136 * (That is, if dtrace_dynvar_clean() updated the clean
2137 2137 * list, and we ended up racing to update the free
2138 2138 * list.) This race is prevented by the dtrace_sync()
2139 2139 * in dtrace_dynvar_clean() -- which flushes the
2140 2140 * owners of the clean lists out before resetting
2141 2141 * the clean lists.
2142 2142 */
2143 2143 dcpu = &dstate->dtds_percpu[me];
2144 2144 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2145 2145 ASSERT(rval == NULL);
2146 2146 goto retry;
2147 2147 }
2148 2148
2149 2149 dvar = free;
2150 2150 new_free = dvar->dtdv_next;
2151 2151 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2152 2152
2153 2153 /*
2154 2154 * We have now allocated a new chunk. We copy the tuple keys into the
2155 2155 * tuple array and copy any referenced key data into the data space
2156 2156 * following the tuple array. As we do this, we relocate dttk_value
2157 2157 * in the final tuple to point to the key data address in the chunk.
2158 2158 */
2159 2159 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2160 2160 dvar->dtdv_data = (void *)(kdata + ksize);
2161 2161 dvar->dtdv_tuple.dtt_nkeys = nkeys;
2162 2162
2163 2163 for (i = 0; i < nkeys; i++) {
2164 2164 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2165 2165 size_t kesize = key[i].dttk_size;
2166 2166
2167 2167 if (kesize != 0) {
2168 2168 dtrace_bcopy(
2169 2169 (const void *)(uintptr_t)key[i].dttk_value,
2170 2170 (void *)kdata, kesize);
2171 2171 dkey->dttk_value = kdata;
2172 2172 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2173 2173 } else {
2174 2174 dkey->dttk_value = key[i].dttk_value;
2175 2175 }
2176 2176
2177 2177 dkey->dttk_size = kesize;
2178 2178 }
2179 2179
2180 2180 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2181 2181 dvar->dtdv_hashval = hashval;
2182 2182 dvar->dtdv_next = start;
2183 2183
2184 2184 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2185 2185 return (dvar);
2186 2186
2187 2187 /*
2188 2188 * The cas has failed. Either another CPU is adding an element to
2189 2189 * this hash chain, or another CPU is deleting an element from this
2190 2190 * hash chain. The simplest way to deal with both of these cases
2191 2191 * (though not necessarily the most efficient) is to free our
2192 2192 * allocated block and re-attempt it all. Note that the free is
2193 2193 * to the dirty list and _not_ to the free list. This is to prevent
2194 2194 * races with allocators, above.
2195 2195 */
2196 2196 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2197 2197
2198 2198 dtrace_membar_producer();
2199 2199
2200 2200 do {
2201 2201 free = dcpu->dtdsc_dirty;
2202 2202 dvar->dtdv_next = free;
2203 2203 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2204 2204
2205 2205 goto top;
2206 2206 }
2207 2207
2208 2208 /*ARGSUSED*/
2209 2209 static void
2210 2210 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2211 2211 {
2212 2212 if ((int64_t)nval < (int64_t)*oval)
2213 2213 *oval = nval;
2214 2214 }
2215 2215
2216 2216 /*ARGSUSED*/
2217 2217 static void
2218 2218 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2219 2219 {
2220 2220 if ((int64_t)nval > (int64_t)*oval)
2221 2221 *oval = nval;
2222 2222 }
2223 2223
2224 2224 static void
2225 2225 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2226 2226 {
2227 2227 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2228 2228 int64_t val = (int64_t)nval;
2229 2229
2230 2230 if (val < 0) {
2231 2231 for (i = 0; i < zero; i++) {
2232 2232 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2233 2233 quanta[i] += incr;
2234 2234 return;
2235 2235 }
2236 2236 }
2237 2237 } else {
2238 2238 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2239 2239 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2240 2240 quanta[i - 1] += incr;
2241 2241 return;
2242 2242 }
2243 2243 }
2244 2244
2245 2245 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2246 2246 return;
2247 2247 }
2248 2248
2249 2249 ASSERT(0);
2250 2250 }
2251 2251
2252 2252 static void
2253 2253 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2254 2254 {
2255 2255 uint64_t arg = *lquanta++;
2256 2256 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2257 2257 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2258 2258 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2259 2259 int32_t val = (int32_t)nval, level;
2260 2260
2261 2261 ASSERT(step != 0);
2262 2262 ASSERT(levels != 0);
2263 2263
2264 2264 if (val < base) {
2265 2265 /*
2266 2266 * This is an underflow.
2267 2267 */
2268 2268 lquanta[0] += incr;
2269 2269 return;
2270 2270 }
2271 2271
2272 2272 level = (val - base) / step;
2273 2273
2274 2274 if (level < levels) {
2275 2275 lquanta[level + 1] += incr;
2276 2276 return;
2277 2277 }
2278 2278
2279 2279 /*
2280 2280 * This is an overflow.
2281 2281 */
2282 2282 lquanta[levels + 1] += incr;
2283 2283 }
2284 2284
2285 2285 static int
2286 2286 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2287 2287 uint16_t high, uint16_t nsteps, int64_t value)
2288 2288 {
2289 2289 int64_t this = 1, last, next;
2290 2290 int base = 1, order;
2291 2291
2292 2292 ASSERT(factor <= nsteps);
2293 2293 ASSERT(nsteps % factor == 0);
2294 2294
2295 2295 for (order = 0; order < low; order++)
2296 2296 this *= factor;
2297 2297
2298 2298 /*
2299 2299 * If our value is less than our factor taken to the power of the
2300 2300 * low order of magnitude, it goes into the zeroth bucket.
2301 2301 */
2302 2302 if (value < (last = this))
2303 2303 return (0);
2304 2304
2305 2305 for (this *= factor; order <= high; order++) {
2306 2306 int nbuckets = this > nsteps ? nsteps : this;
2307 2307
2308 2308 if ((next = this * factor) < this) {
2309 2309 /*
2310 2310 * We should not generally get log/linear quantizations
2311 2311 * with a high magnitude that allows 64-bits to
2312 2312 * overflow, but we nonetheless protect against this
2313 2313 * by explicitly checking for overflow, and clamping
2314 2314 * our value accordingly.
2315 2315 */
2316 2316 value = this - 1;
2317 2317 }
2318 2318
2319 2319 if (value < this) {
2320 2320 /*
2321 2321 * If our value lies within this order of magnitude,
2322 2322 * determine its position by taking the offset within
2323 2323 * the order of magnitude, dividing by the bucket
2324 2324 * width, and adding to our (accumulated) base.
2325 2325 */
2326 2326 return (base + (value - last) / (this / nbuckets));
2327 2327 }
2328 2328
2329 2329 base += nbuckets - (nbuckets / factor);
2330 2330 last = this;
2331 2331 this = next;
2332 2332 }
2333 2333
2334 2334 /*
2335 2335 * Our value is greater than or equal to our factor taken to the
2336 2336 * power of one plus the high magnitude -- return the top bucket.
2337 2337 */
2338 2338 return (base);
2339 2339 }
2340 2340
2341 2341 static void
2342 2342 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2343 2343 {
2344 2344 uint64_t arg = *llquanta++;
2345 2345 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2346 2346 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2347 2347 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2348 2348 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2349 2349
2350 2350 llquanta[dtrace_aggregate_llquantize_bucket(factor,
2351 2351 low, high, nsteps, nval)] += incr;
2352 2352 }
2353 2353
2354 2354 /*ARGSUSED*/
2355 2355 static void
2356 2356 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2357 2357 {
2358 2358 data[0]++;
2359 2359 data[1] += nval;
2360 2360 }
2361 2361
2362 2362 /*ARGSUSED*/
2363 2363 static void
2364 2364 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2365 2365 {
2366 2366 int64_t snval = (int64_t)nval;
2367 2367 uint64_t tmp[2];
2368 2368
2369 2369 data[0]++;
2370 2370 data[1] += nval;
2371 2371
2372 2372 /*
2373 2373 * What we want to say here is:
2374 2374 *
2375 2375 * data[2] += nval * nval;
2376 2376 *
2377 2377 * But given that nval is 64-bit, we could easily overflow, so
2378 2378 * we do this as 128-bit arithmetic.
2379 2379 */
2380 2380 if (snval < 0)
2381 2381 snval = -snval;
2382 2382
2383 2383 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2384 2384 dtrace_add_128(data + 2, tmp, data + 2);
2385 2385 }
2386 2386
2387 2387 /*ARGSUSED*/
2388 2388 static void
2389 2389 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2390 2390 {
2391 2391 *oval = *oval + 1;
2392 2392 }
2393 2393
2394 2394 /*ARGSUSED*/
2395 2395 static void
2396 2396 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2397 2397 {
2398 2398 *oval += nval;
2399 2399 }
2400 2400
2401 2401 /*
2402 2402 * Aggregate given the tuple in the principal data buffer, and the aggregating
2403 2403 * action denoted by the specified dtrace_aggregation_t. The aggregation
2404 2404 * buffer is specified as the buf parameter. This routine does not return
2405 2405 * failure; if there is no space in the aggregation buffer, the data will be
2406 2406 * dropped, and a corresponding counter incremented.
2407 2407 */
2408 2408 static void
2409 2409 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2410 2410 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2411 2411 {
2412 2412 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2413 2413 uint32_t i, ndx, size, fsize;
2414 2414 uint32_t align = sizeof (uint64_t) - 1;
2415 2415 dtrace_aggbuffer_t *agb;
2416 2416 dtrace_aggkey_t *key;
2417 2417 uint32_t hashval = 0, limit, isstr;
2418 2418 caddr_t tomax, data, kdata;
2419 2419 dtrace_actkind_t action;
2420 2420 dtrace_action_t *act;
2421 2421 uintptr_t offs;
2422 2422
2423 2423 if (buf == NULL)
2424 2424 return;
2425 2425
2426 2426 if (!agg->dtag_hasarg) {
2427 2427 /*
2428 2428 * Currently, only quantize() and lquantize() take additional
2429 2429 * arguments, and they have the same semantics: an increment
2430 2430 * value that defaults to 1 when not present. If additional
2431 2431 * aggregating actions take arguments, the setting of the
2432 2432 * default argument value will presumably have to become more
2433 2433 * sophisticated...
2434 2434 */
2435 2435 arg = 1;
2436 2436 }
2437 2437
2438 2438 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2439 2439 size = rec->dtrd_offset - agg->dtag_base;
2440 2440 fsize = size + rec->dtrd_size;
2441 2441
2442 2442 ASSERT(dbuf->dtb_tomax != NULL);
2443 2443 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2444 2444
2445 2445 if ((tomax = buf->dtb_tomax) == NULL) {
2446 2446 dtrace_buffer_drop(buf);
2447 2447 return;
2448 2448 }
2449 2449
2450 2450 /*
2451 2451 * The metastructure is always at the bottom of the buffer.
2452 2452 */
2453 2453 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2454 2454 sizeof (dtrace_aggbuffer_t));
2455 2455
2456 2456 if (buf->dtb_offset == 0) {
2457 2457 /*
2458 2458 * We just kludge up approximately 1/8th of the size to be
2459 2459 * buckets. If this guess ends up being routinely
2460 2460 * off-the-mark, we may need to dynamically readjust this
2461 2461 * based on past performance.
2462 2462 */
2463 2463 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2464 2464
2465 2465 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2466 2466 (uintptr_t)tomax || hashsize == 0) {
2467 2467 /*
2468 2468 * We've been given a ludicrously small buffer;
2469 2469 * increment our drop count and leave.
2470 2470 */
2471 2471 dtrace_buffer_drop(buf);
2472 2472 return;
2473 2473 }
2474 2474
2475 2475 /*
2476 2476 * And now, a pathetic attempt to try to get a an odd (or
2477 2477 * perchance, a prime) hash size for better hash distribution.
2478 2478 */
2479 2479 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2480 2480 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2481 2481
2482 2482 agb->dtagb_hashsize = hashsize;
2483 2483 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2484 2484 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2485 2485 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2486 2486
2487 2487 for (i = 0; i < agb->dtagb_hashsize; i++)
2488 2488 agb->dtagb_hash[i] = NULL;
2489 2489 }
2490 2490
2491 2491 ASSERT(agg->dtag_first != NULL);
2492 2492 ASSERT(agg->dtag_first->dta_intuple);
2493 2493
2494 2494 /*
2495 2495 * Calculate the hash value based on the key. Note that we _don't_
2496 2496 * include the aggid in the hashing (but we will store it as part of
2497 2497 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2498 2498 * algorithm: a simple, quick algorithm that has no known funnels, and
2499 2499 * gets good distribution in practice. The efficacy of the hashing
2500 2500 * algorithm (and a comparison with other algorithms) may be found by
2501 2501 * running the ::dtrace_aggstat MDB dcmd.
2502 2502 */
2503 2503 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2504 2504 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2505 2505 limit = i + act->dta_rec.dtrd_size;
2506 2506 ASSERT(limit <= size);
2507 2507 isstr = DTRACEACT_ISSTRING(act);
2508 2508
2509 2509 for (; i < limit; i++) {
2510 2510 hashval += data[i];
2511 2511 hashval += (hashval << 10);
2512 2512 hashval ^= (hashval >> 6);
2513 2513
2514 2514 if (isstr && data[i] == '\0')
2515 2515 break;
2516 2516 }
2517 2517 }
2518 2518
2519 2519 hashval += (hashval << 3);
2520 2520 hashval ^= (hashval >> 11);
2521 2521 hashval += (hashval << 15);
2522 2522
2523 2523 /*
2524 2524 * Yes, the divide here is expensive -- but it's generally the least
2525 2525 * of the performance issues given the amount of data that we iterate
2526 2526 * over to compute hash values, compare data, etc.
2527 2527 */
2528 2528 ndx = hashval % agb->dtagb_hashsize;
2529 2529
2530 2530 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2531 2531 ASSERT((caddr_t)key >= tomax);
2532 2532 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2533 2533
2534 2534 if (hashval != key->dtak_hashval || key->dtak_size != size)
2535 2535 continue;
2536 2536
2537 2537 kdata = key->dtak_data;
2538 2538 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2539 2539
2540 2540 for (act = agg->dtag_first; act->dta_intuple;
2541 2541 act = act->dta_next) {
2542 2542 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2543 2543 limit = i + act->dta_rec.dtrd_size;
2544 2544 ASSERT(limit <= size);
2545 2545 isstr = DTRACEACT_ISSTRING(act);
2546 2546
2547 2547 for (; i < limit; i++) {
2548 2548 if (kdata[i] != data[i])
2549 2549 goto next;
2550 2550
2551 2551 if (isstr && data[i] == '\0')
2552 2552 break;
2553 2553 }
2554 2554 }
2555 2555
2556 2556 if (action != key->dtak_action) {
2557 2557 /*
2558 2558 * We are aggregating on the same value in the same
2559 2559 * aggregation with two different aggregating actions.
2560 2560 * (This should have been picked up in the compiler,
2561 2561 * so we may be dealing with errant or devious DIF.)
2562 2562 * This is an error condition; we indicate as much,
2563 2563 * and return.
2564 2564 */
2565 2565 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2566 2566 return;
2567 2567 }
2568 2568
2569 2569 /*
2570 2570 * This is a hit: we need to apply the aggregator to
2571 2571 * the value at this key.
2572 2572 */
2573 2573 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2574 2574 return;
2575 2575 next:
2576 2576 continue;
2577 2577 }
2578 2578
2579 2579 /*
2580 2580 * We didn't find it. We need to allocate some zero-filled space,
2581 2581 * link it into the hash table appropriately, and apply the aggregator
2582 2582 * to the (zero-filled) value.
2583 2583 */
2584 2584 offs = buf->dtb_offset;
2585 2585 while (offs & (align - 1))
2586 2586 offs += sizeof (uint32_t);
2587 2587
2588 2588 /*
2589 2589 * If we don't have enough room to both allocate a new key _and_
2590 2590 * its associated data, increment the drop count and return.
2591 2591 */
2592 2592 if ((uintptr_t)tomax + offs + fsize >
2593 2593 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2594 2594 dtrace_buffer_drop(buf);
2595 2595 return;
2596 2596 }
2597 2597
2598 2598 /*CONSTCOND*/
2599 2599 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2600 2600 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2601 2601 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2602 2602
2603 2603 key->dtak_data = kdata = tomax + offs;
2604 2604 buf->dtb_offset = offs + fsize;
2605 2605
2606 2606 /*
2607 2607 * Now copy the data across.
2608 2608 */
2609 2609 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2610 2610
2611 2611 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2612 2612 kdata[i] = data[i];
2613 2613
2614 2614 /*
2615 2615 * Because strings are not zeroed out by default, we need to iterate
2616 2616 * looking for actions that store strings, and we need to explicitly
2617 2617 * pad these strings out with zeroes.
2618 2618 */
2619 2619 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2620 2620 int nul;
2621 2621
2622 2622 if (!DTRACEACT_ISSTRING(act))
2623 2623 continue;
2624 2624
2625 2625 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2626 2626 limit = i + act->dta_rec.dtrd_size;
2627 2627 ASSERT(limit <= size);
2628 2628
2629 2629 for (nul = 0; i < limit; i++) {
2630 2630 if (nul) {
2631 2631 kdata[i] = '\0';
2632 2632 continue;
2633 2633 }
2634 2634
2635 2635 if (data[i] != '\0')
2636 2636 continue;
2637 2637
2638 2638 nul = 1;
2639 2639 }
2640 2640 }
2641 2641
2642 2642 for (i = size; i < fsize; i++)
2643 2643 kdata[i] = 0;
2644 2644
2645 2645 key->dtak_hashval = hashval;
2646 2646 key->dtak_size = size;
2647 2647 key->dtak_action = action;
2648 2648 key->dtak_next = agb->dtagb_hash[ndx];
2649 2649 agb->dtagb_hash[ndx] = key;
2650 2650
2651 2651 /*
2652 2652 * Finally, apply the aggregator.
2653 2653 */
2654 2654 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2655 2655 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2656 2656 }
2657 2657
2658 2658 /*
2659 2659 * Given consumer state, this routine finds a speculation in the INACTIVE
2660 2660 * state and transitions it into the ACTIVE state. If there is no speculation
2661 2661 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2662 2662 * incremented -- it is up to the caller to take appropriate action.
2663 2663 */
2664 2664 static int
2665 2665 dtrace_speculation(dtrace_state_t *state)
2666 2666 {
2667 2667 int i = 0;
2668 2668 dtrace_speculation_state_t current;
2669 2669 uint32_t *stat = &state->dts_speculations_unavail, count;
2670 2670
2671 2671 while (i < state->dts_nspeculations) {
2672 2672 dtrace_speculation_t *spec = &state->dts_speculations[i];
2673 2673
2674 2674 current = spec->dtsp_state;
2675 2675
2676 2676 if (current != DTRACESPEC_INACTIVE) {
2677 2677 if (current == DTRACESPEC_COMMITTINGMANY ||
2678 2678 current == DTRACESPEC_COMMITTING ||
2679 2679 current == DTRACESPEC_DISCARDING)
2680 2680 stat = &state->dts_speculations_busy;
2681 2681 i++;
2682 2682 continue;
2683 2683 }
2684 2684
2685 2685 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2686 2686 current, DTRACESPEC_ACTIVE) == current)
2687 2687 return (i + 1);
2688 2688 }
2689 2689
2690 2690 /*
2691 2691 * We couldn't find a speculation. If we found as much as a single
2692 2692 * busy speculation buffer, we'll attribute this failure as "busy"
2693 2693 * instead of "unavail".
2694 2694 */
2695 2695 do {
2696 2696 count = *stat;
2697 2697 } while (dtrace_cas32(stat, count, count + 1) != count);
2698 2698
2699 2699 return (0);
2700 2700 }
2701 2701
2702 2702 /*
2703 2703 * This routine commits an active speculation. If the specified speculation
2704 2704 * is not in a valid state to perform a commit(), this routine will silently do
2705 2705 * nothing. The state of the specified speculation is transitioned according
2706 2706 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2707 2707 */
2708 2708 static void
2709 2709 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2710 2710 dtrace_specid_t which)
2711 2711 {
2712 2712 dtrace_speculation_t *spec;
2713 2713 dtrace_buffer_t *src, *dest;
2714 2714 uintptr_t daddr, saddr, dlimit, slimit;
2715 2715 dtrace_speculation_state_t current, new;
2716 2716 intptr_t offs;
2717 2717 uint64_t timestamp;
2718 2718
2719 2719 if (which == 0)
2720 2720 return;
2721 2721
2722 2722 if (which > state->dts_nspeculations) {
2723 2723 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2724 2724 return;
2725 2725 }
2726 2726
2727 2727 spec = &state->dts_speculations[which - 1];
2728 2728 src = &spec->dtsp_buffer[cpu];
2729 2729 dest = &state->dts_buffer[cpu];
2730 2730
2731 2731 do {
2732 2732 current = spec->dtsp_state;
2733 2733
2734 2734 if (current == DTRACESPEC_COMMITTINGMANY)
2735 2735 break;
2736 2736
2737 2737 switch (current) {
2738 2738 case DTRACESPEC_INACTIVE:
2739 2739 case DTRACESPEC_DISCARDING:
2740 2740 return;
2741 2741
2742 2742 case DTRACESPEC_COMMITTING:
2743 2743 /*
2744 2744 * This is only possible if we are (a) commit()'ing
2745 2745 * without having done a prior speculate() on this CPU
2746 2746 * and (b) racing with another commit() on a different
2747 2747 * CPU. There's nothing to do -- we just assert that
2748 2748 * our offset is 0.
2749 2749 */
2750 2750 ASSERT(src->dtb_offset == 0);
2751 2751 return;
2752 2752
2753 2753 case DTRACESPEC_ACTIVE:
2754 2754 new = DTRACESPEC_COMMITTING;
2755 2755 break;
2756 2756
2757 2757 case DTRACESPEC_ACTIVEONE:
2758 2758 /*
2759 2759 * This speculation is active on one CPU. If our
2760 2760 * buffer offset is non-zero, we know that the one CPU
2761 2761 * must be us. Otherwise, we are committing on a
2762 2762 * different CPU from the speculate(), and we must
2763 2763 * rely on being asynchronously cleaned.
2764 2764 */
2765 2765 if (src->dtb_offset != 0) {
2766 2766 new = DTRACESPEC_COMMITTING;
2767 2767 break;
2768 2768 }
2769 2769 /*FALLTHROUGH*/
2770 2770
2771 2771 case DTRACESPEC_ACTIVEMANY:
2772 2772 new = DTRACESPEC_COMMITTINGMANY;
2773 2773 break;
2774 2774
2775 2775 default:
2776 2776 ASSERT(0);
2777 2777 }
2778 2778 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2779 2779 current, new) != current);
2780 2780
2781 2781 /*
2782 2782 * We have set the state to indicate that we are committing this
2783 2783 * speculation. Now reserve the necessary space in the destination
2784 2784 * buffer.
2785 2785 */
2786 2786 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2787 2787 sizeof (uint64_t), state, NULL)) < 0) {
2788 2788 dtrace_buffer_drop(dest);
2789 2789 goto out;
2790 2790 }
2791 2791
2792 2792 /*
2793 2793 * We have sufficient space to copy the speculative buffer into the
2794 2794 * primary buffer. First, modify the speculative buffer, filling
2795 2795 * in the timestamp of all entries with the current time. The data
2796 2796 * must have the commit() time rather than the time it was traced,
2797 2797 * so that all entries in the primary buffer are in timestamp order.
2798 2798 */
2799 2799 timestamp = dtrace_gethrtime();
2800 2800 saddr = (uintptr_t)src->dtb_tomax;
2801 2801 slimit = saddr + src->dtb_offset;
2802 2802 while (saddr < slimit) {
2803 2803 size_t size;
2804 2804 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2805 2805
2806 2806 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2807 2807 saddr += sizeof (dtrace_epid_t);
2808 2808 continue;
2809 2809 }
2810 2810 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
2811 2811 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2812 2812
2813 2813 ASSERT3U(saddr + size, <=, slimit);
2814 2814 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
2815 2815 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
2816 2816
2817 2817 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2818 2818
2819 2819 saddr += size;
2820 2820 }
2821 2821
2822 2822 /*
2823 2823 * Copy the buffer across. (Note that this is a
2824 2824 * highly subobtimal bcopy(); in the unlikely event that this becomes
2825 2825 * a serious performance issue, a high-performance DTrace-specific
2826 2826 * bcopy() should obviously be invented.)
2827 2827 */
2828 2828 daddr = (uintptr_t)dest->dtb_tomax + offs;
2829 2829 dlimit = daddr + src->dtb_offset;
2830 2830 saddr = (uintptr_t)src->dtb_tomax;
2831 2831
2832 2832 /*
2833 2833 * First, the aligned portion.
2834 2834 */
2835 2835 while (dlimit - daddr >= sizeof (uint64_t)) {
2836 2836 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2837 2837
2838 2838 daddr += sizeof (uint64_t);
2839 2839 saddr += sizeof (uint64_t);
2840 2840 }
2841 2841
2842 2842 /*
2843 2843 * Now any left-over bit...
2844 2844 */
2845 2845 while (dlimit - daddr)
2846 2846 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2847 2847
2848 2848 /*
2849 2849 * Finally, commit the reserved space in the destination buffer.
2850 2850 */
2851 2851 dest->dtb_offset = offs + src->dtb_offset;
2852 2852
2853 2853 out:
2854 2854 /*
2855 2855 * If we're lucky enough to be the only active CPU on this speculation
2856 2856 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2857 2857 */
2858 2858 if (current == DTRACESPEC_ACTIVE ||
2859 2859 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2860 2860 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2861 2861 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2862 2862
2863 2863 ASSERT(rval == DTRACESPEC_COMMITTING);
2864 2864 }
2865 2865
2866 2866 src->dtb_offset = 0;
2867 2867 src->dtb_xamot_drops += src->dtb_drops;
2868 2868 src->dtb_drops = 0;
2869 2869 }
2870 2870
2871 2871 /*
2872 2872 * This routine discards an active speculation. If the specified speculation
2873 2873 * is not in a valid state to perform a discard(), this routine will silently
2874 2874 * do nothing. The state of the specified speculation is transitioned
2875 2875 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2876 2876 */
2877 2877 static void
2878 2878 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2879 2879 dtrace_specid_t which)
2880 2880 {
2881 2881 dtrace_speculation_t *spec;
2882 2882 dtrace_speculation_state_t current, new;
2883 2883 dtrace_buffer_t *buf;
2884 2884
2885 2885 if (which == 0)
2886 2886 return;
2887 2887
2888 2888 if (which > state->dts_nspeculations) {
2889 2889 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2890 2890 return;
2891 2891 }
2892 2892
2893 2893 spec = &state->dts_speculations[which - 1];
2894 2894 buf = &spec->dtsp_buffer[cpu];
2895 2895
2896 2896 do {
2897 2897 current = spec->dtsp_state;
2898 2898
2899 2899 switch (current) {
2900 2900 case DTRACESPEC_INACTIVE:
2901 2901 case DTRACESPEC_COMMITTINGMANY:
2902 2902 case DTRACESPEC_COMMITTING:
2903 2903 case DTRACESPEC_DISCARDING:
2904 2904 return;
2905 2905
2906 2906 case DTRACESPEC_ACTIVE:
2907 2907 case DTRACESPEC_ACTIVEMANY:
2908 2908 new = DTRACESPEC_DISCARDING;
2909 2909 break;
2910 2910
2911 2911 case DTRACESPEC_ACTIVEONE:
2912 2912 if (buf->dtb_offset != 0) {
2913 2913 new = DTRACESPEC_INACTIVE;
2914 2914 } else {
2915 2915 new = DTRACESPEC_DISCARDING;
2916 2916 }
2917 2917 break;
2918 2918
2919 2919 default:
2920 2920 ASSERT(0);
2921 2921 }
2922 2922 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2923 2923 current, new) != current);
2924 2924
2925 2925 buf->dtb_offset = 0;
2926 2926 buf->dtb_drops = 0;
2927 2927 }
2928 2928
2929 2929 /*
2930 2930 * Note: not called from probe context. This function is called
2931 2931 * asynchronously from cross call context to clean any speculations that are
2932 2932 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2933 2933 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2934 2934 * speculation.
2935 2935 */
2936 2936 static void
2937 2937 dtrace_speculation_clean_here(dtrace_state_t *state)
2938 2938 {
2939 2939 dtrace_icookie_t cookie;
2940 2940 processorid_t cpu = CPU->cpu_id;
2941 2941 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2942 2942 dtrace_specid_t i;
2943 2943
2944 2944 cookie = dtrace_interrupt_disable();
2945 2945
2946 2946 if (dest->dtb_tomax == NULL) {
2947 2947 dtrace_interrupt_enable(cookie);
2948 2948 return;
2949 2949 }
2950 2950
2951 2951 for (i = 0; i < state->dts_nspeculations; i++) {
2952 2952 dtrace_speculation_t *spec = &state->dts_speculations[i];
2953 2953 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2954 2954
2955 2955 if (src->dtb_tomax == NULL)
2956 2956 continue;
2957 2957
2958 2958 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2959 2959 src->dtb_offset = 0;
2960 2960 continue;
2961 2961 }
2962 2962
2963 2963 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2964 2964 continue;
2965 2965
2966 2966 if (src->dtb_offset == 0)
2967 2967 continue;
2968 2968
2969 2969 dtrace_speculation_commit(state, cpu, i + 1);
2970 2970 }
2971 2971
2972 2972 dtrace_interrupt_enable(cookie);
2973 2973 }
2974 2974
2975 2975 /*
2976 2976 * Note: not called from probe context. This function is called
2977 2977 * asynchronously (and at a regular interval) to clean any speculations that
2978 2978 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2979 2979 * is work to be done, it cross calls all CPUs to perform that work;
2980 2980 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2981 2981 * INACTIVE state until they have been cleaned by all CPUs.
2982 2982 */
2983 2983 static void
2984 2984 dtrace_speculation_clean(dtrace_state_t *state)
2985 2985 {
2986 2986 int work = 0, rv;
2987 2987 dtrace_specid_t i;
2988 2988
2989 2989 for (i = 0; i < state->dts_nspeculations; i++) {
2990 2990 dtrace_speculation_t *spec = &state->dts_speculations[i];
2991 2991
2992 2992 ASSERT(!spec->dtsp_cleaning);
2993 2993
2994 2994 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2995 2995 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2996 2996 continue;
2997 2997
2998 2998 work++;
2999 2999 spec->dtsp_cleaning = 1;
3000 3000 }
3001 3001
3002 3002 if (!work)
3003 3003 return;
3004 3004
3005 3005 dtrace_xcall(DTRACE_CPUALL,
3006 3006 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
3007 3007
3008 3008 /*
3009 3009 * We now know that all CPUs have committed or discarded their
3010 3010 * speculation buffers, as appropriate. We can now set the state
3011 3011 * to inactive.
3012 3012 */
3013 3013 for (i = 0; i < state->dts_nspeculations; i++) {
3014 3014 dtrace_speculation_t *spec = &state->dts_speculations[i];
3015 3015 dtrace_speculation_state_t current, new;
3016 3016
3017 3017 if (!spec->dtsp_cleaning)
3018 3018 continue;
3019 3019
3020 3020 current = spec->dtsp_state;
3021 3021 ASSERT(current == DTRACESPEC_DISCARDING ||
3022 3022 current == DTRACESPEC_COMMITTINGMANY);
3023 3023
3024 3024 new = DTRACESPEC_INACTIVE;
3025 3025
3026 3026 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
3027 3027 ASSERT(rv == current);
3028 3028 spec->dtsp_cleaning = 0;
3029 3029 }
3030 3030 }
3031 3031
3032 3032 /*
3033 3033 * Called as part of a speculate() to get the speculative buffer associated
3034 3034 * with a given speculation. Returns NULL if the specified speculation is not
3035 3035 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
3036 3036 * the active CPU is not the specified CPU -- the speculation will be
3037 3037 * atomically transitioned into the ACTIVEMANY state.
3038 3038 */
3039 3039 static dtrace_buffer_t *
3040 3040 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
3041 3041 dtrace_specid_t which)
3042 3042 {
3043 3043 dtrace_speculation_t *spec;
3044 3044 dtrace_speculation_state_t current, new;
3045 3045 dtrace_buffer_t *buf;
3046 3046
3047 3047 if (which == 0)
3048 3048 return (NULL);
3049 3049
3050 3050 if (which > state->dts_nspeculations) {
3051 3051 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3052 3052 return (NULL);
3053 3053 }
3054 3054
3055 3055 spec = &state->dts_speculations[which - 1];
3056 3056 buf = &spec->dtsp_buffer[cpuid];
3057 3057
3058 3058 do {
3059 3059 current = spec->dtsp_state;
3060 3060
3061 3061 switch (current) {
3062 3062 case DTRACESPEC_INACTIVE:
3063 3063 case DTRACESPEC_COMMITTINGMANY:
3064 3064 case DTRACESPEC_DISCARDING:
3065 3065 return (NULL);
3066 3066
3067 3067 case DTRACESPEC_COMMITTING:
3068 3068 ASSERT(buf->dtb_offset == 0);
3069 3069 return (NULL);
3070 3070
3071 3071 case DTRACESPEC_ACTIVEONE:
3072 3072 /*
3073 3073 * This speculation is currently active on one CPU.
3074 3074 * Check the offset in the buffer; if it's non-zero,
3075 3075 * that CPU must be us (and we leave the state alone).
3076 3076 * If it's zero, assume that we're starting on a new
3077 3077 * CPU -- and change the state to indicate that the
3078 3078 * speculation is active on more than one CPU.
3079 3079 */
3080 3080 if (buf->dtb_offset != 0)
3081 3081 return (buf);
3082 3082
3083 3083 new = DTRACESPEC_ACTIVEMANY;
3084 3084 break;
3085 3085
3086 3086 case DTRACESPEC_ACTIVEMANY:
3087 3087 return (buf);
3088 3088
3089 3089 case DTRACESPEC_ACTIVE:
3090 3090 new = DTRACESPEC_ACTIVEONE;
3091 3091 break;
3092 3092
3093 3093 default:
3094 3094 ASSERT(0);
3095 3095 }
3096 3096 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3097 3097 current, new) != current);
3098 3098
3099 3099 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3100 3100 return (buf);
3101 3101 }
3102 3102
3103 3103 /*
3104 3104 * Return a string. In the event that the user lacks the privilege to access
3105 3105 * arbitrary kernel memory, we copy the string out to scratch memory so that we
3106 3106 * don't fail access checking.
3107 3107 *
3108 3108 * dtrace_dif_variable() uses this routine as a helper for various
3109 3109 * builtin values such as 'execname' and 'probefunc.'
3110 3110 */
3111 3111 uintptr_t
3112 3112 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3113 3113 dtrace_mstate_t *mstate)
3114 3114 {
3115 3115 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3116 3116 uintptr_t ret;
3117 3117 size_t strsz;
3118 3118
3119 3119 /*
3120 3120 * The easy case: this probe is allowed to read all of memory, so
3121 3121 * we can just return this as a vanilla pointer.
3122 3122 */
3123 3123 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3124 3124 return (addr);
3125 3125
3126 3126 /*
3127 3127 * This is the tougher case: we copy the string in question from
3128 3128 * kernel memory into scratch memory and return it that way: this
3129 3129 * ensures that we won't trip up when access checking tests the
3130 3130 * BYREF return value.
3131 3131 */
3132 3132 strsz = dtrace_strlen((char *)addr, size) + 1;
3133 3133
3134 3134 if (mstate->dtms_scratch_ptr + strsz >
3135 3135 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3136 3136 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3137 3137 return (NULL);
3138 3138 }
3139 3139
3140 3140 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3141 3141 strsz);
3142 3142 ret = mstate->dtms_scratch_ptr;
3143 3143 mstate->dtms_scratch_ptr += strsz;
3144 3144 return (ret);
3145 3145 }
3146 3146
3147 3147 /*
3148 3148 * This function implements the DIF emulator's variable lookups. The emulator
3149 3149 * passes a reserved variable identifier and optional built-in array index.
3150 3150 */
3151 3151 static uint64_t
3152 3152 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3153 3153 uint64_t ndx)
3154 3154 {
3155 3155 /*
3156 3156 * If we're accessing one of the uncached arguments, we'll turn this
3157 3157 * into a reference in the args array.
3158 3158 */
3159 3159 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3160 3160 ndx = v - DIF_VAR_ARG0;
3161 3161 v = DIF_VAR_ARGS;
3162 3162 }
3163 3163
3164 3164 switch (v) {
3165 3165 case DIF_VAR_ARGS:
3166 3166 if (!(mstate->dtms_access & DTRACE_ACCESS_ARGS)) {
3167 3167 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |=
3168 3168 CPU_DTRACE_KPRIV;
3169 3169 return (0);
3170 3170 }
3171 3171
3172 3172 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3173 3173 if (ndx >= sizeof (mstate->dtms_arg) /
3174 3174 sizeof (mstate->dtms_arg[0])) {
3175 3175 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3176 3176 dtrace_provider_t *pv;
3177 3177 uint64_t val;
3178 3178
3179 3179 pv = mstate->dtms_probe->dtpr_provider;
3180 3180 if (pv->dtpv_pops.dtps_getargval != NULL)
3181 3181 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3182 3182 mstate->dtms_probe->dtpr_id,
3183 3183 mstate->dtms_probe->dtpr_arg, ndx, aframes);
3184 3184 else
3185 3185 val = dtrace_getarg(ndx, aframes);
3186 3186
3187 3187 /*
3188 3188 * This is regrettably required to keep the compiler
3189 3189 * from tail-optimizing the call to dtrace_getarg().
3190 3190 * The condition always evaluates to true, but the
3191 3191 * compiler has no way of figuring that out a priori.
3192 3192 * (None of this would be necessary if the compiler
3193 3193 * could be relied upon to _always_ tail-optimize
3194 3194 * the call to dtrace_getarg() -- but it can't.)
3195 3195 */
3196 3196 if (mstate->dtms_probe != NULL)
3197 3197 return (val);
3198 3198
3199 3199 ASSERT(0);
3200 3200 }
3201 3201
3202 3202 return (mstate->dtms_arg[ndx]);
3203 3203
3204 3204 case DIF_VAR_UREGS: {
3205 3205 klwp_t *lwp;
3206 3206
3207 3207 if (!dtrace_priv_proc(state, mstate))
3208 3208 return (0);
3209 3209
3210 3210 if ((lwp = curthread->t_lwp) == NULL) {
3211 3211 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3212 3212 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
3213 3213 return (0);
3214 3214 }
3215 3215
3216 3216 return (dtrace_getreg(lwp->lwp_regs, ndx));
3217 3217 }
3218 3218
3219 3219 case DIF_VAR_VMREGS: {
3220 3220 uint64_t rval;
3221 3221
3222 3222 if (!dtrace_priv_kernel(state))
3223 3223 return (0);
3224 3224
3225 3225 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3226 3226
3227 3227 rval = dtrace_getvmreg(ndx,
3228 3228 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags);
3229 3229
3230 3230 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3231 3231
3232 3232 return (rval);
3233 3233 }
3234 3234
3235 3235 case DIF_VAR_CURTHREAD:
3236 3236 if (!dtrace_priv_proc(state, mstate))
3237 3237 return (0);
3238 3238 return ((uint64_t)(uintptr_t)curthread);
3239 3239
3240 3240 case DIF_VAR_TIMESTAMP:
3241 3241 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3242 3242 mstate->dtms_timestamp = dtrace_gethrtime();
3243 3243 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3244 3244 }
3245 3245 return (mstate->dtms_timestamp);
3246 3246
3247 3247 case DIF_VAR_VTIMESTAMP:
3248 3248 ASSERT(dtrace_vtime_references != 0);
3249 3249 return (curthread->t_dtrace_vtime);
3250 3250
3251 3251 case DIF_VAR_WALLTIMESTAMP:
3252 3252 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3253 3253 mstate->dtms_walltimestamp = dtrace_gethrestime();
3254 3254 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3255 3255 }
3256 3256 return (mstate->dtms_walltimestamp);
3257 3257
3258 3258 case DIF_VAR_IPL:
3259 3259 if (!dtrace_priv_kernel(state))
3260 3260 return (0);
3261 3261 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3262 3262 mstate->dtms_ipl = dtrace_getipl();
3263 3263 mstate->dtms_present |= DTRACE_MSTATE_IPL;
3264 3264 }
3265 3265 return (mstate->dtms_ipl);
3266 3266
3267 3267 case DIF_VAR_EPID:
3268 3268 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3269 3269 return (mstate->dtms_epid);
3270 3270
3271 3271 case DIF_VAR_ID:
3272 3272 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3273 3273 return (mstate->dtms_probe->dtpr_id);
3274 3274
3275 3275 case DIF_VAR_STACKDEPTH:
3276 3276 if (!dtrace_priv_kernel(state))
3277 3277 return (0);
3278 3278 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3279 3279 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3280 3280
3281 3281 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3282 3282 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3283 3283 }
3284 3284 return (mstate->dtms_stackdepth);
3285 3285
3286 3286 case DIF_VAR_USTACKDEPTH:
3287 3287 if (!dtrace_priv_proc(state, mstate))
3288 3288 return (0);
3289 3289 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3290 3290 /*
3291 3291 * See comment in DIF_VAR_PID.
3292 3292 */
3293 3293 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3294 3294 CPU_ON_INTR(CPU)) {
3295 3295 mstate->dtms_ustackdepth = 0;
3296 3296 } else {
3297 3297 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3298 3298 mstate->dtms_ustackdepth =
3299 3299 dtrace_getustackdepth();
3300 3300 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3301 3301 }
3302 3302 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3303 3303 }
3304 3304 return (mstate->dtms_ustackdepth);
3305 3305
3306 3306 case DIF_VAR_CALLER:
3307 3307 if (!dtrace_priv_kernel(state))
3308 3308 return (0);
3309 3309 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3310 3310 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3311 3311
3312 3312 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3313 3313 /*
3314 3314 * If this is an unanchored probe, we are
3315 3315 * required to go through the slow path:
3316 3316 * dtrace_caller() only guarantees correct
3317 3317 * results for anchored probes.
3318 3318 */
3319 3319 pc_t caller[2];
3320 3320
3321 3321 dtrace_getpcstack(caller, 2, aframes,
3322 3322 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3323 3323 mstate->dtms_caller = caller[1];
3324 3324 } else if ((mstate->dtms_caller =
3325 3325 dtrace_caller(aframes)) == -1) {
3326 3326 /*
3327 3327 * We have failed to do this the quick way;
3328 3328 * we must resort to the slower approach of
3329 3329 * calling dtrace_getpcstack().
3330 3330 */
3331 3331 pc_t caller;
3332 3332
3333 3333 dtrace_getpcstack(&caller, 1, aframes, NULL);
3334 3334 mstate->dtms_caller = caller;
3335 3335 }
3336 3336
3337 3337 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3338 3338 }
3339 3339 return (mstate->dtms_caller);
3340 3340
3341 3341 case DIF_VAR_UCALLER:
3342 3342 if (!dtrace_priv_proc(state, mstate))
3343 3343 return (0);
3344 3344
3345 3345 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3346 3346 uint64_t ustack[3];
3347 3347
3348 3348 /*
3349 3349 * dtrace_getupcstack() fills in the first uint64_t
3350 3350 * with the current PID. The second uint64_t will
3351 3351 * be the program counter at user-level. The third
3352 3352 * uint64_t will contain the caller, which is what
3353 3353 * we're after.
3354 3354 */
3355 3355 ustack[2] = NULL;
3356 3356 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3357 3357 dtrace_getupcstack(ustack, 3);
3358 3358 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3359 3359 mstate->dtms_ucaller = ustack[2];
3360 3360 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3361 3361 }
3362 3362
3363 3363 return (mstate->dtms_ucaller);
3364 3364
3365 3365 case DIF_VAR_PROBEPROV:
3366 3366 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3367 3367 return (dtrace_dif_varstr(
3368 3368 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3369 3369 state, mstate));
3370 3370
3371 3371 case DIF_VAR_PROBEMOD:
3372 3372 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3373 3373 return (dtrace_dif_varstr(
3374 3374 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3375 3375 state, mstate));
3376 3376
3377 3377 case DIF_VAR_PROBEFUNC:
3378 3378 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3379 3379 return (dtrace_dif_varstr(
3380 3380 (uintptr_t)mstate->dtms_probe->dtpr_func,
3381 3381 state, mstate));
3382 3382
3383 3383 case DIF_VAR_PROBENAME:
3384 3384 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3385 3385 return (dtrace_dif_varstr(
3386 3386 (uintptr_t)mstate->dtms_probe->dtpr_name,
3387 3387 state, mstate));
3388 3388
3389 3389 case DIF_VAR_PID:
3390 3390 if (!dtrace_priv_proc(state, mstate))
3391 3391 return (0);
3392 3392
3393 3393 /*
3394 3394 * Note that we are assuming that an unanchored probe is
3395 3395 * always due to a high-level interrupt. (And we're assuming
3396 3396 * that there is only a single high level interrupt.)
3397 3397 */
3398 3398 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3399 3399 return (pid0.pid_id);
3400 3400
3401 3401 /*
3402 3402 * It is always safe to dereference one's own t_procp pointer:
3403 3403 * it always points to a valid, allocated proc structure.
3404 3404 * Further, it is always safe to dereference the p_pidp member
3405 3405 * of one's own proc structure. (These are truisms becuase
3406 3406 * threads and processes don't clean up their own state --
3407 3407 * they leave that task to whomever reaps them.)
3408 3408 */
3409 3409 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3410 3410
3411 3411 case DIF_VAR_PPID:
3412 3412 if (!dtrace_priv_proc(state, mstate))
3413 3413 return (0);
3414 3414
3415 3415 /*
3416 3416 * See comment in DIF_VAR_PID.
3417 3417 */
3418 3418 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3419 3419 return (pid0.pid_id);
3420 3420
3421 3421 /*
3422 3422 * It is always safe to dereference one's own t_procp pointer:
3423 3423 * it always points to a valid, allocated proc structure.
3424 3424 * (This is true because threads don't clean up their own
3425 3425 * state -- they leave that task to whomever reaps them.)
3426 3426 */
3427 3427 return ((uint64_t)curthread->t_procp->p_ppid);
3428 3428
3429 3429 case DIF_VAR_TID:
3430 3430 /*
3431 3431 * See comment in DIF_VAR_PID.
3432 3432 */
3433 3433 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3434 3434 return (0);
3435 3435
3436 3436 return ((uint64_t)curthread->t_tid);
3437 3437
3438 3438 case DIF_VAR_EXECNAME:
3439 3439 if (!dtrace_priv_proc(state, mstate))
3440 3440 return (0);
3441 3441
3442 3442 /*
3443 3443 * See comment in DIF_VAR_PID.
3444 3444 */
3445 3445 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3446 3446 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3447 3447
3448 3448 /*
3449 3449 * It is always safe to dereference one's own t_procp pointer:
3450 3450 * it always points to a valid, allocated proc structure.
3451 3451 * (This is true because threads don't clean up their own
3452 3452 * state -- they leave that task to whomever reaps them.)
3453 3453 */
3454 3454 return (dtrace_dif_varstr(
3455 3455 (uintptr_t)curthread->t_procp->p_user.u_comm,
3456 3456 state, mstate));
3457 3457
3458 3458 case DIF_VAR_ZONENAME:
3459 3459 if (!dtrace_priv_proc(state, mstate))
3460 3460 return (0);
3461 3461
3462 3462 /*
3463 3463 * See comment in DIF_VAR_PID.
3464 3464 */
3465 3465 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3466 3466 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3467 3467
3468 3468 /*
3469 3469 * It is always safe to dereference one's own t_procp pointer:
3470 3470 * it always points to a valid, allocated proc structure.
3471 3471 * (This is true because threads don't clean up their own
3472 3472 * state -- they leave that task to whomever reaps them.)
3473 3473 */
3474 3474 return (dtrace_dif_varstr(
3475 3475 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3476 3476 state, mstate));
3477 3477
3478 3478 case DIF_VAR_UID:
3479 3479 if (!dtrace_priv_proc(state, mstate))
3480 3480 return (0);
3481 3481
3482 3482 /*
3483 3483 * See comment in DIF_VAR_PID.
3484 3484 */
3485 3485 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3486 3486 return ((uint64_t)p0.p_cred->cr_uid);
3487 3487
3488 3488 /*
3489 3489 * It is always safe to dereference one's own t_procp pointer:
3490 3490 * it always points to a valid, allocated proc structure.
3491 3491 * (This is true because threads don't clean up their own
3492 3492 * state -- they leave that task to whomever reaps them.)
3493 3493 *
3494 3494 * Additionally, it is safe to dereference one's own process
3495 3495 * credential, since this is never NULL after process birth.
3496 3496 */
3497 3497 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3498 3498
3499 3499 case DIF_VAR_GID:
3500 3500 if (!dtrace_priv_proc(state, mstate))
3501 3501 return (0);
3502 3502
3503 3503 /*
3504 3504 * See comment in DIF_VAR_PID.
3505 3505 */
3506 3506 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3507 3507 return ((uint64_t)p0.p_cred->cr_gid);
3508 3508
3509 3509 /*
3510 3510 * It is always safe to dereference one's own t_procp pointer:
3511 3511 * it always points to a valid, allocated proc structure.
3512 3512 * (This is true because threads don't clean up their own
3513 3513 * state -- they leave that task to whomever reaps them.)
3514 3514 *
3515 3515 * Additionally, it is safe to dereference one's own process
3516 3516 * credential, since this is never NULL after process birth.
3517 3517 */
3518 3518 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3519 3519
3520 3520 case DIF_VAR_ERRNO: {
3521 3521 klwp_t *lwp;
3522 3522 if (!dtrace_priv_proc(state, mstate))
3523 3523 return (0);
3524 3524
3525 3525 /*
3526 3526 * See comment in DIF_VAR_PID.
3527 3527 */
3528 3528 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3529 3529 return (0);
3530 3530
3531 3531 /*
3532 3532 * It is always safe to dereference one's own t_lwp pointer in
3533 3533 * the event that this pointer is non-NULL. (This is true
3534 3534 * because threads and lwps don't clean up their own state --
3535 3535 * they leave that task to whomever reaps them.)
3536 3536 */
3537 3537 if ((lwp = curthread->t_lwp) == NULL)
3538 3538 return (0);
3539 3539
3540 3540 return ((uint64_t)lwp->lwp_errno);
3541 3541 }
3542 3542 default:
3543 3543 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3544 3544 return (0);
3545 3545 }
3546 3546 }
3547 3547
3548 3548
3549 3549 typedef enum dtrace_json_state {
3550 3550 DTRACE_JSON_REST = 1,
3551 3551 DTRACE_JSON_OBJECT,
3552 3552 DTRACE_JSON_STRING,
3553 3553 DTRACE_JSON_STRING_ESCAPE,
3554 3554 DTRACE_JSON_STRING_ESCAPE_UNICODE,
3555 3555 DTRACE_JSON_COLON,
3556 3556 DTRACE_JSON_COMMA,
3557 3557 DTRACE_JSON_VALUE,
3558 3558 DTRACE_JSON_IDENTIFIER,
3559 3559 DTRACE_JSON_NUMBER,
3560 3560 DTRACE_JSON_NUMBER_FRAC,
3561 3561 DTRACE_JSON_NUMBER_EXP,
3562 3562 DTRACE_JSON_COLLECT_OBJECT
3563 3563 } dtrace_json_state_t;
3564 3564
3565 3565 /*
3566 3566 * This function possesses just enough knowledge about JSON to extract a single
3567 3567 * value from a JSON string and store it in the scratch buffer. It is able
3568 3568 * to extract nested object values, and members of arrays by index.
3569 3569 *
3570 3570 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3571 3571 * be looked up as we descend into the object tree. e.g.
3572 3572 *
3573 3573 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3574 3574 * with nelems = 5.
3575 3575 *
3576 3576 * The run time of this function must be bounded above by strsize to limit the
3577 3577 * amount of work done in probe context. As such, it is implemented as a
3578 3578 * simple state machine, reading one character at a time using safe loads
3579 3579 * until we find the requested element, hit a parsing error or run off the
3580 3580 * end of the object or string.
3581 3581 *
3582 3582 * As there is no way for a subroutine to return an error without interrupting
3583 3583 * clause execution, we simply return NULL in the event of a missing key or any
3584 3584 * other error condition. Each NULL return in this function is commented with
3585 3585 * the error condition it represents -- parsing or otherwise.
3586 3586 *
3587 3587 * The set of states for the state machine closely matches the JSON
3588 3588 * specification (http://json.org/). Briefly:
3589 3589 *
3590 3590 * DTRACE_JSON_REST:
3591 3591 * Skip whitespace until we find either a top-level Object, moving
3592 3592 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3593 3593 *
3594 3594 * DTRACE_JSON_OBJECT:
3595 3595 * Locate the next key String in an Object. Sets a flag to denote
3596 3596 * the next String as a key string and moves to DTRACE_JSON_STRING.
3597 3597 *
3598 3598 * DTRACE_JSON_COLON:
3599 3599 * Skip whitespace until we find the colon that separates key Strings
3600 3600 * from their values. Once found, move to DTRACE_JSON_VALUE.
3601 3601 *
3602 3602 * DTRACE_JSON_VALUE:
3603 3603 * Detects the type of the next value (String, Number, Identifier, Object
3604 3604 * or Array) and routes to the states that process that type. Here we also
3605 3605 * deal with the element selector list if we are requested to traverse down
3606 3606 * into the object tree.
3607 3607 *
3608 3608 * DTRACE_JSON_COMMA:
3609 3609 * Skip whitespace until we find the comma that separates key-value pairs
3610 3610 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3611 3611 * (similarly DTRACE_JSON_VALUE). All following literal value processing
3612 3612 * states return to this state at the end of their value, unless otherwise
3613 3613 * noted.
3614 3614 *
3615 3615 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3616 3616 * Processes a Number literal from the JSON, including any exponent
3617 3617 * component that may be present. Numbers are returned as strings, which
3618 3618 * may be passed to strtoll() if an integer is required.
3619 3619 *
3620 3620 * DTRACE_JSON_IDENTIFIER:
3621 3621 * Processes a "true", "false" or "null" literal in the JSON.
3622 3622 *
3623 3623 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3624 3624 * DTRACE_JSON_STRING_ESCAPE_UNICODE:
3625 3625 * Processes a String literal from the JSON, whether the String denotes
3626 3626 * a key, a value or part of a larger Object. Handles all escape sequences
3627 3627 * present in the specification, including four-digit unicode characters,
3628 3628 * but merely includes the escape sequence without converting it to the
3629 3629 * actual escaped character. If the String is flagged as a key, we
3630 3630 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3631 3631 *
3632 3632 * DTRACE_JSON_COLLECT_OBJECT:
3633 3633 * This state collects an entire Object (or Array), correctly handling
3634 3634 * embedded strings. If the full element selector list matches this nested
3635 3635 * object, we return the Object in full as a string. If not, we use this
3636 3636 * state to skip to the next value at this level and continue processing.
3637 3637 *
3638 3638 * NOTE: This function uses various macros from strtolctype.h to manipulate
3639 3639 * digit values, etc -- these have all been checked to ensure they make
3640 3640 * no additional function calls.
3641 3641 */
3642 3642 static char *
3643 3643 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3644 3644 char *dest)
3645 3645 {
3646 3646 dtrace_json_state_t state = DTRACE_JSON_REST;
3647 3647 int64_t array_elem = INT64_MIN;
3648 3648 int64_t array_pos = 0;
3649 3649 uint8_t escape_unicount = 0;
3650 3650 boolean_t string_is_key = B_FALSE;
3651 3651 boolean_t collect_object = B_FALSE;
3652 3652 boolean_t found_key = B_FALSE;
3653 3653 boolean_t in_array = B_FALSE;
3654 3654 uint32_t braces = 0, brackets = 0;
3655 3655 char *elem = elemlist;
3656 3656 char *dd = dest;
3657 3657 uintptr_t cur;
3658 3658
3659 3659 for (cur = json; cur < json + size; cur++) {
3660 3660 char cc = dtrace_load8(cur);
3661 3661 if (cc == '\0')
3662 3662 return (NULL);
3663 3663
3664 3664 switch (state) {
3665 3665 case DTRACE_JSON_REST:
3666 3666 if (isspace(cc))
3667 3667 break;
3668 3668
3669 3669 if (cc == '{') {
3670 3670 state = DTRACE_JSON_OBJECT;
3671 3671 break;
3672 3672 }
3673 3673
3674 3674 if (cc == '[') {
3675 3675 in_array = B_TRUE;
3676 3676 array_pos = 0;
3677 3677 array_elem = dtrace_strtoll(elem, 10, size);
3678 3678 found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3679 3679 state = DTRACE_JSON_VALUE;
3680 3680 break;
3681 3681 }
3682 3682
3683 3683 /*
3684 3684 * ERROR: expected to find a top-level object or array.
3685 3685 */
3686 3686 return (NULL);
3687 3687 case DTRACE_JSON_OBJECT:
3688 3688 if (isspace(cc))
3689 3689 break;
3690 3690
3691 3691 if (cc == '"') {
3692 3692 state = DTRACE_JSON_STRING;
3693 3693 string_is_key = B_TRUE;
3694 3694 break;
3695 3695 }
3696 3696
3697 3697 /*
3698 3698 * ERROR: either the object did not start with a key
3699 3699 * string, or we've run off the end of the object
3700 3700 * without finding the requested key.
3701 3701 */
3702 3702 return (NULL);
3703 3703 case DTRACE_JSON_STRING:
3704 3704 if (cc == '\\') {
3705 3705 *dd++ = '\\';
3706 3706 state = DTRACE_JSON_STRING_ESCAPE;
3707 3707 break;
3708 3708 }
3709 3709
3710 3710 if (cc == '"') {
3711 3711 if (collect_object) {
3712 3712 /*
3713 3713 * We don't reset the dest here, as
3714 3714 * the string is part of a larger
3715 3715 * object being collected.
3716 3716 */
3717 3717 *dd++ = cc;
3718 3718 collect_object = B_FALSE;
3719 3719 state = DTRACE_JSON_COLLECT_OBJECT;
3720 3720 break;
3721 3721 }
3722 3722 *dd = '\0';
3723 3723 dd = dest; /* reset string buffer */
3724 3724 if (string_is_key) {
3725 3725 if (dtrace_strncmp(dest, elem,
3726 3726 size) == 0)
3727 3727 found_key = B_TRUE;
3728 3728 } else if (found_key) {
3729 3729 if (nelems > 1) {
3730 3730 /*
3731 3731 * We expected an object, not
3732 3732 * this string.
3733 3733 */
3734 3734 return (NULL);
3735 3735 }
3736 3736 return (dest);
3737 3737 }
3738 3738 state = string_is_key ? DTRACE_JSON_COLON :
3739 3739 DTRACE_JSON_COMMA;
3740 3740 string_is_key = B_FALSE;
3741 3741 break;
3742 3742 }
3743 3743
3744 3744 *dd++ = cc;
3745 3745 break;
3746 3746 case DTRACE_JSON_STRING_ESCAPE:
3747 3747 *dd++ = cc;
3748 3748 if (cc == 'u') {
3749 3749 escape_unicount = 0;
3750 3750 state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3751 3751 } else {
3752 3752 state = DTRACE_JSON_STRING;
3753 3753 }
3754 3754 break;
3755 3755 case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3756 3756 if (!isxdigit(cc)) {
3757 3757 /*
3758 3758 * ERROR: invalid unicode escape, expected
3759 3759 * four valid hexidecimal digits.
3760 3760 */
3761 3761 return (NULL);
3762 3762 }
3763 3763
3764 3764 *dd++ = cc;
3765 3765 if (++escape_unicount == 4)
3766 3766 state = DTRACE_JSON_STRING;
3767 3767 break;
3768 3768 case DTRACE_JSON_COLON:
3769 3769 if (isspace(cc))
3770 3770 break;
3771 3771
3772 3772 if (cc == ':') {
3773 3773 state = DTRACE_JSON_VALUE;
3774 3774 break;
3775 3775 }
3776 3776
3777 3777 /*
3778 3778 * ERROR: expected a colon.
3779 3779 */
3780 3780 return (NULL);
3781 3781 case DTRACE_JSON_COMMA:
3782 3782 if (isspace(cc))
3783 3783 break;
3784 3784
3785 3785 if (cc == ',') {
3786 3786 if (in_array) {
3787 3787 state = DTRACE_JSON_VALUE;
3788 3788 if (++array_pos == array_elem)
3789 3789 found_key = B_TRUE;
3790 3790 } else {
3791 3791 state = DTRACE_JSON_OBJECT;
3792 3792 }
3793 3793 break;
3794 3794 }
3795 3795
3796 3796 /*
3797 3797 * ERROR: either we hit an unexpected character, or
3798 3798 * we reached the end of the object or array without
3799 3799 * finding the requested key.
3800 3800 */
3801 3801 return (NULL);
3802 3802 case DTRACE_JSON_IDENTIFIER:
3803 3803 if (islower(cc)) {
3804 3804 *dd++ = cc;
3805 3805 break;
3806 3806 }
3807 3807
3808 3808 *dd = '\0';
3809 3809 dd = dest; /* reset string buffer */
3810 3810
3811 3811 if (dtrace_strncmp(dest, "true", 5) == 0 ||
3812 3812 dtrace_strncmp(dest, "false", 6) == 0 ||
3813 3813 dtrace_strncmp(dest, "null", 5) == 0) {
3814 3814 if (found_key) {
3815 3815 if (nelems > 1) {
3816 3816 /*
3817 3817 * ERROR: We expected an object,
3818 3818 * not this identifier.
3819 3819 */
3820 3820 return (NULL);
3821 3821 }
3822 3822 return (dest);
3823 3823 } else {
3824 3824 cur--;
3825 3825 state = DTRACE_JSON_COMMA;
3826 3826 break;
3827 3827 }
3828 3828 }
3829 3829
3830 3830 /*
3831 3831 * ERROR: we did not recognise the identifier as one
3832 3832 * of those in the JSON specification.
3833 3833 */
3834 3834 return (NULL);
3835 3835 case DTRACE_JSON_NUMBER:
3836 3836 if (cc == '.') {
3837 3837 *dd++ = cc;
3838 3838 state = DTRACE_JSON_NUMBER_FRAC;
3839 3839 break;
3840 3840 }
3841 3841
3842 3842 if (cc == 'x' || cc == 'X') {
3843 3843 /*
3844 3844 * ERROR: specification explicitly excludes
3845 3845 * hexidecimal or octal numbers.
3846 3846 */
3847 3847 return (NULL);
3848 3848 }
3849 3849
3850 3850 /* FALLTHRU */
3851 3851 case DTRACE_JSON_NUMBER_FRAC:
3852 3852 if (cc == 'e' || cc == 'E') {
3853 3853 *dd++ = cc;
3854 3854 state = DTRACE_JSON_NUMBER_EXP;
3855 3855 break;
3856 3856 }
3857 3857
3858 3858 if (cc == '+' || cc == '-') {
3859 3859 /*
3860 3860 * ERROR: expect sign as part of exponent only.
3861 3861 */
3862 3862 return (NULL);
3863 3863 }
3864 3864 /* FALLTHRU */
3865 3865 case DTRACE_JSON_NUMBER_EXP:
3866 3866 if (isdigit(cc) || cc == '+' || cc == '-') {
3867 3867 *dd++ = cc;
3868 3868 break;
3869 3869 }
3870 3870
3871 3871 *dd = '\0';
3872 3872 dd = dest; /* reset string buffer */
3873 3873 if (found_key) {
3874 3874 if (nelems > 1) {
3875 3875 /*
3876 3876 * ERROR: We expected an object, not
3877 3877 * this number.
3878 3878 */
3879 3879 return (NULL);
3880 3880 }
3881 3881 return (dest);
3882 3882 }
3883 3883
3884 3884 cur--;
3885 3885 state = DTRACE_JSON_COMMA;
3886 3886 break;
3887 3887 case DTRACE_JSON_VALUE:
3888 3888 if (isspace(cc))
3889 3889 break;
3890 3890
3891 3891 if (cc == '{' || cc == '[') {
3892 3892 if (nelems > 1 && found_key) {
3893 3893 in_array = cc == '[' ? B_TRUE : B_FALSE;
3894 3894 /*
3895 3895 * If our element selector directs us
3896 3896 * to descend into this nested object,
3897 3897 * then move to the next selector
3898 3898 * element in the list and restart the
3899 3899 * state machine.
3900 3900 */
3901 3901 while (*elem != '\0')
3902 3902 elem++;
3903 3903 elem++; /* skip the inter-element NUL */
3904 3904 nelems--;
3905 3905 dd = dest;
3906 3906 if (in_array) {
3907 3907 state = DTRACE_JSON_VALUE;
3908 3908 array_pos = 0;
3909 3909 array_elem = dtrace_strtoll(
3910 3910 elem, 10, size);
3911 3911 found_key = array_elem == 0 ?
3912 3912 B_TRUE : B_FALSE;
3913 3913 } else {
3914 3914 found_key = B_FALSE;
3915 3915 state = DTRACE_JSON_OBJECT;
3916 3916 }
3917 3917 break;
3918 3918 }
3919 3919
3920 3920 /*
3921 3921 * Otherwise, we wish to either skip this
3922 3922 * nested object or return it in full.
3923 3923 */
3924 3924 if (cc == '[')
3925 3925 brackets = 1;
3926 3926 else
3927 3927 braces = 1;
3928 3928 *dd++ = cc;
3929 3929 state = DTRACE_JSON_COLLECT_OBJECT;
3930 3930 break;
3931 3931 }
3932 3932
3933 3933 if (cc == '"') {
3934 3934 state = DTRACE_JSON_STRING;
3935 3935 break;
3936 3936 }
3937 3937
3938 3938 if (islower(cc)) {
3939 3939 /*
3940 3940 * Here we deal with true, false and null.
3941 3941 */
3942 3942 *dd++ = cc;
3943 3943 state = DTRACE_JSON_IDENTIFIER;
3944 3944 break;
3945 3945 }
3946 3946
3947 3947 if (cc == '-' || isdigit(cc)) {
3948 3948 *dd++ = cc;
3949 3949 state = DTRACE_JSON_NUMBER;
3950 3950 break;
3951 3951 }
3952 3952
3953 3953 /*
3954 3954 * ERROR: unexpected character at start of value.
3955 3955 */
3956 3956 return (NULL);
3957 3957 case DTRACE_JSON_COLLECT_OBJECT:
3958 3958 if (cc == '\0')
3959 3959 /*
3960 3960 * ERROR: unexpected end of input.
3961 3961 */
3962 3962 return (NULL);
3963 3963
3964 3964 *dd++ = cc;
3965 3965 if (cc == '"') {
3966 3966 collect_object = B_TRUE;
3967 3967 state = DTRACE_JSON_STRING;
3968 3968 break;
3969 3969 }
3970 3970
3971 3971 if (cc == ']') {
3972 3972 if (brackets-- == 0) {
3973 3973 /*
3974 3974 * ERROR: unbalanced brackets.
3975 3975 */
3976 3976 return (NULL);
3977 3977 }
3978 3978 } else if (cc == '}') {
3979 3979 if (braces-- == 0) {
3980 3980 /*
3981 3981 * ERROR: unbalanced braces.
3982 3982 */
3983 3983 return (NULL);
3984 3984 }
3985 3985 } else if (cc == '{') {
3986 3986 braces++;
3987 3987 } else if (cc == '[') {
3988 3988 brackets++;
3989 3989 }
3990 3990
3991 3991 if (brackets == 0 && braces == 0) {
3992 3992 if (found_key) {
3993 3993 *dd = '\0';
3994 3994 return (dest);
3995 3995 }
3996 3996 dd = dest; /* reset string buffer */
3997 3997 state = DTRACE_JSON_COMMA;
3998 3998 }
3999 3999 break;
4000 4000 }
4001 4001 }
4002 4002 return (NULL);
4003 4003 }
4004 4004
4005 4005 /*
4006 4006 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4007 4007 * Notice that we don't bother validating the proper number of arguments or
4008 4008 * their types in the tuple stack. This isn't needed because all argument
4009 4009 * interpretation is safe because of our load safety -- the worst that can
4010 4010 * happen is that a bogus program can obtain bogus results.
4011 4011 */
4012 4012 static void
4013 4013 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4014 4014 dtrace_key_t *tupregs, int nargs,
4015 4015 dtrace_mstate_t *mstate, dtrace_state_t *state)
4016 4016 {
4017 4017 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
4018 4018 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4019 4019 dtrace_vstate_t *vstate = &state->dts_vstate;
4020 4020
4021 4021 union {
4022 4022 mutex_impl_t mi;
4023 4023 uint64_t mx;
4024 4024 } m;
4025 4025
4026 4026 union {
4027 4027 krwlock_t ri;
4028 4028 uintptr_t rw;
4029 4029 } r;
4030 4030
4031 4031 switch (subr) {
4032 4032 case DIF_SUBR_RAND:
4033 4033 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
4034 4034 break;
4035 4035
4036 4036 case DIF_SUBR_MUTEX_OWNED:
4037 4037 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4038 4038 mstate, vstate)) {
4039 4039 regs[rd] = NULL;
4040 4040 break;
4041 4041 }
4042 4042
4043 4043 m.mx = dtrace_load64(tupregs[0].dttk_value);
4044 4044 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4045 4045 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4046 4046 else
4047 4047 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4048 4048 break;
4049 4049
4050 4050 case DIF_SUBR_MUTEX_OWNER:
4051 4051 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4052 4052 mstate, vstate)) {
4053 4053 regs[rd] = NULL;
4054 4054 break;
4055 4055 }
4056 4056
4057 4057 m.mx = dtrace_load64(tupregs[0].dttk_value);
4058 4058 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4059 4059 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4060 4060 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4061 4061 else
4062 4062 regs[rd] = 0;
4063 4063 break;
4064 4064
4065 4065 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4066 4066 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4067 4067 mstate, vstate)) {
4068 4068 regs[rd] = NULL;
4069 4069 break;
4070 4070 }
4071 4071
4072 4072 m.mx = dtrace_load64(tupregs[0].dttk_value);
4073 4073 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4074 4074 break;
4075 4075
4076 4076 case DIF_SUBR_MUTEX_TYPE_SPIN:
4077 4077 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4078 4078 mstate, vstate)) {
4079 4079 regs[rd] = NULL;
4080 4080 break;
4081 4081 }
4082 4082
4083 4083 m.mx = dtrace_load64(tupregs[0].dttk_value);
4084 4084 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4085 4085 break;
4086 4086
4087 4087 case DIF_SUBR_RW_READ_HELD: {
4088 4088 uintptr_t tmp;
4089 4089
4090 4090 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4091 4091 mstate, vstate)) {
4092 4092 regs[rd] = NULL;
4093 4093 break;
4094 4094 }
4095 4095
4096 4096 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4097 4097 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4098 4098 break;
4099 4099 }
4100 4100
4101 4101 case DIF_SUBR_RW_WRITE_HELD:
4102 4102 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4103 4103 mstate, vstate)) {
4104 4104 regs[rd] = NULL;
4105 4105 break;
4106 4106 }
4107 4107
4108 4108 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4109 4109 regs[rd] = _RW_WRITE_HELD(&r.ri);
4110 4110 break;
4111 4111
4112 4112 case DIF_SUBR_RW_ISWRITER:
4113 4113 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4114 4114 mstate, vstate)) {
4115 4115 regs[rd] = NULL;
4116 4116 break;
4117 4117 }
4118 4118
4119 4119 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4120 4120 regs[rd] = _RW_ISWRITER(&r.ri);
4121 4121 break;
4122 4122
4123 4123 case DIF_SUBR_BCOPY: {
4124 4124 /*
4125 4125 * We need to be sure that the destination is in the scratch
4126 4126 * region -- no other region is allowed.
4127 4127 */
4128 4128 uintptr_t src = tupregs[0].dttk_value;
4129 4129 uintptr_t dest = tupregs[1].dttk_value;
4130 4130 size_t size = tupregs[2].dttk_value;
4131 4131
4132 4132 if (!dtrace_inscratch(dest, size, mstate)) {
4133 4133 *flags |= CPU_DTRACE_BADADDR;
4134 4134 *illval = regs[rd];
4135 4135 break;
4136 4136 }
4137 4137
4138 4138 if (!dtrace_canload(src, size, mstate, vstate)) {
4139 4139 regs[rd] = NULL;
4140 4140 break;
4141 4141 }
4142 4142
4143 4143 dtrace_bcopy((void *)src, (void *)dest, size);
4144 4144 break;
4145 4145 }
4146 4146
4147 4147 case DIF_SUBR_ALLOCA:
4148 4148 case DIF_SUBR_COPYIN: {
4149 4149 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4150 4150 uint64_t size =
4151 4151 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4152 4152 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4153 4153
4154 4154 /*
4155 4155 * This action doesn't require any credential checks since
4156 4156 * probes will not activate in user contexts to which the
4157 4157 * enabling user does not have permissions.
4158 4158 */
4159 4159
4160 4160 /*
4161 4161 * Rounding up the user allocation size could have overflowed
4162 4162 * a large, bogus allocation (like -1ULL) to 0.
4163 4163 */
4164 4164 if (scratch_size < size ||
4165 4165 !DTRACE_INSCRATCH(mstate, scratch_size)) {
4166 4166 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4167 4167 regs[rd] = NULL;
4168 4168 break;
4169 4169 }
4170 4170
4171 4171 if (subr == DIF_SUBR_COPYIN) {
4172 4172 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4173 4173 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4174 4174 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4175 4175 }
4176 4176
4177 4177 mstate->dtms_scratch_ptr += scratch_size;
4178 4178 regs[rd] = dest;
4179 4179 break;
4180 4180 }
4181 4181
4182 4182 case DIF_SUBR_COPYINTO: {
4183 4183 uint64_t size = tupregs[1].dttk_value;
4184 4184 uintptr_t dest = tupregs[2].dttk_value;
4185 4185
4186 4186 /*
4187 4187 * This action doesn't require any credential checks since
4188 4188 * probes will not activate in user contexts to which the
4189 4189 * enabling user does not have permissions.
4190 4190 */
4191 4191 if (!dtrace_inscratch(dest, size, mstate)) {
4192 4192 *flags |= CPU_DTRACE_BADADDR;
4193 4193 *illval = regs[rd];
4194 4194 break;
4195 4195 }
4196 4196
4197 4197 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4198 4198 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4199 4199 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4200 4200 break;
4201 4201 }
4202 4202
4203 4203 case DIF_SUBR_COPYINSTR: {
4204 4204 uintptr_t dest = mstate->dtms_scratch_ptr;
4205 4205 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4206 4206
4207 4207 if (nargs > 1 && tupregs[1].dttk_value < size)
4208 4208 size = tupregs[1].dttk_value + 1;
4209 4209
4210 4210 /*
4211 4211 * This action doesn't require any credential checks since
4212 4212 * probes will not activate in user contexts to which the
4213 4213 * enabling user does not have permissions.
4214 4214 */
4215 4215 if (!DTRACE_INSCRATCH(mstate, size)) {
4216 4216 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4217 4217 regs[rd] = NULL;
4218 4218 break;
4219 4219 }
4220 4220
4221 4221 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4222 4222 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4223 4223 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4224 4224
4225 4225 ((char *)dest)[size - 1] = '\0';
4226 4226 mstate->dtms_scratch_ptr += size;
4227 4227 regs[rd] = dest;
4228 4228 break;
4229 4229 }
4230 4230
4231 4231 case DIF_SUBR_MSGSIZE:
4232 4232 case DIF_SUBR_MSGDSIZE: {
4233 4233 uintptr_t baddr = tupregs[0].dttk_value, daddr;
4234 4234 uintptr_t wptr, rptr;
4235 4235 size_t count = 0;
4236 4236 int cont = 0;
4237 4237
4238 4238 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4239 4239
4240 4240 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4241 4241 vstate)) {
4242 4242 regs[rd] = NULL;
4243 4243 break;
4244 4244 }
4245 4245
4246 4246 wptr = dtrace_loadptr(baddr +
4247 4247 offsetof(mblk_t, b_wptr));
4248 4248
4249 4249 rptr = dtrace_loadptr(baddr +
4250 4250 offsetof(mblk_t, b_rptr));
4251 4251
4252 4252 if (wptr < rptr) {
4253 4253 *flags |= CPU_DTRACE_BADADDR;
4254 4254 *illval = tupregs[0].dttk_value;
4255 4255 break;
4256 4256 }
4257 4257
4258 4258 daddr = dtrace_loadptr(baddr +
4259 4259 offsetof(mblk_t, b_datap));
4260 4260
4261 4261 baddr = dtrace_loadptr(baddr +
4262 4262 offsetof(mblk_t, b_cont));
4263 4263
4264 4264 /*
4265 4265 * We want to prevent against denial-of-service here,
4266 4266 * so we're only going to search the list for
4267 4267 * dtrace_msgdsize_max mblks.
4268 4268 */
4269 4269 if (cont++ > dtrace_msgdsize_max) {
4270 4270 *flags |= CPU_DTRACE_ILLOP;
4271 4271 break;
4272 4272 }
4273 4273
4274 4274 if (subr == DIF_SUBR_MSGDSIZE) {
4275 4275 if (dtrace_load8(daddr +
4276 4276 offsetof(dblk_t, db_type)) != M_DATA)
4277 4277 continue;
4278 4278 }
4279 4279
4280 4280 count += wptr - rptr;
4281 4281 }
4282 4282
4283 4283 if (!(*flags & CPU_DTRACE_FAULT))
4284 4284 regs[rd] = count;
4285 4285
4286 4286 break;
4287 4287 }
4288 4288
4289 4289 case DIF_SUBR_PROGENYOF: {
4290 4290 pid_t pid = tupregs[0].dttk_value;
4291 4291 proc_t *p;
4292 4292 int rval = 0;
4293 4293
4294 4294 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4295 4295
4296 4296 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4297 4297 if (p->p_pidp->pid_id == pid) {
4298 4298 rval = 1;
4299 4299 break;
4300 4300 }
4301 4301 }
4302 4302
4303 4303 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4304 4304
4305 4305 regs[rd] = rval;
4306 4306 break;
4307 4307 }
4308 4308
4309 4309 case DIF_SUBR_SPECULATION:
4310 4310 regs[rd] = dtrace_speculation(state);
4311 4311 break;
4312 4312
4313 4313 case DIF_SUBR_COPYOUT: {
4314 4314 uintptr_t kaddr = tupregs[0].dttk_value;
4315 4315 uintptr_t uaddr = tupregs[1].dttk_value;
4316 4316 uint64_t size = tupregs[2].dttk_value;
4317 4317
4318 4318 if (!dtrace_destructive_disallow &&
4319 4319 dtrace_priv_proc_control(state, mstate) &&
4320 4320 !dtrace_istoxic(kaddr, size) &&
4321 4321 dtrace_canload(kaddr, size, mstate, vstate)) {
4322 4322 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4323 4323 dtrace_copyout(kaddr, uaddr, size, flags);
4324 4324 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4325 4325 }
4326 4326 break;
4327 4327 }
4328 4328
4329 4329 case DIF_SUBR_COPYOUTSTR: {
4330 4330 uintptr_t kaddr = tupregs[0].dttk_value;
4331 4331 uintptr_t uaddr = tupregs[1].dttk_value;
4332 4332 uint64_t size = tupregs[2].dttk_value;
4333 4333 size_t lim;
4334 4334
4335 4335 if (!dtrace_destructive_disallow &&
4336 4336 dtrace_priv_proc_control(state, mstate) &&
4337 4337 !dtrace_istoxic(kaddr, size) &&
4338 4338 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) {
4339 4339 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4340 4340 dtrace_copyoutstr(kaddr, uaddr, lim, flags);
4341 4341 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4342 4342 }
4343 4343 break;
4344 4344 }
4345 4345
4346 4346 case DIF_SUBR_STRLEN: {
4347 4347 size_t size = state->dts_options[DTRACEOPT_STRSIZE];
4348 4348 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4349 4349 size_t lim;
4350 4350
4351 4351 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4352 4352 regs[rd] = NULL;
4353 4353 break;
4354 4354 }
4355 4355 regs[rd] = dtrace_strlen((char *)addr, lim);
4356 4356
4357 4357 break;
4358 4358 }
4359 4359
4360 4360 case DIF_SUBR_STRCHR:
4361 4361 case DIF_SUBR_STRRCHR: {
4362 4362 /*
4363 4363 * We're going to iterate over the string looking for the
4364 4364 * specified character. We will iterate until we have reached
4365 4365 * the string length or we have found the character. If this
4366 4366 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4367 4367 * of the specified character instead of the first.
4368 4368 */
4369 4369 uintptr_t addr = tupregs[0].dttk_value;
4370 4370 uintptr_t addr_limit;
4371 4371 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4372 4372 size_t lim;
4373 4373 char c, target = (char)tupregs[1].dttk_value;
4374 4374
4375 4375 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4376 4376 regs[rd] = NULL;
4377 4377 break;
4378 4378 }
4379 4379 addr_limit = addr + lim;
4380 4380
4381 4381 for (regs[rd] = NULL; addr < addr_limit; addr++) {
4382 4382 if ((c = dtrace_load8(addr)) == target) {
4383 4383 regs[rd] = addr;
4384 4384
4385 4385 if (subr == DIF_SUBR_STRCHR)
4386 4386 break;
4387 4387 }
4388 4388 if (c == '\0')
4389 4389 break;
4390 4390 }
4391 4391
4392 4392 break;
4393 4393 }
4394 4394
4395 4395 case DIF_SUBR_STRSTR:
4396 4396 case DIF_SUBR_INDEX:
4397 4397 case DIF_SUBR_RINDEX: {
4398 4398 /*
4399 4399 * We're going to iterate over the string looking for the
4400 4400 * specified string. We will iterate until we have reached
4401 4401 * the string length or we have found the string. (Yes, this
4402 4402 * is done in the most naive way possible -- but considering
4403 4403 * that the string we're searching for is likely to be
4404 4404 * relatively short, the complexity of Rabin-Karp or similar
4405 4405 * hardly seems merited.)
4406 4406 */
4407 4407 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4408 4408 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4409 4409 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4410 4410 size_t len = dtrace_strlen(addr, size);
4411 4411 size_t sublen = dtrace_strlen(substr, size);
4412 4412 char *limit = addr + len, *orig = addr;
4413 4413 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4414 4414 int inc = 1;
4415 4415
4416 4416 regs[rd] = notfound;
4417 4417
4418 4418 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4419 4419 regs[rd] = NULL;
4420 4420 break;
4421 4421 }
4422 4422
4423 4423 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4424 4424 vstate)) {
4425 4425 regs[rd] = NULL;
4426 4426 break;
4427 4427 }
4428 4428
4429 4429 /*
4430 4430 * strstr() and index()/rindex() have similar semantics if
4431 4431 * both strings are the empty string: strstr() returns a
4432 4432 * pointer to the (empty) string, and index() and rindex()
4433 4433 * both return index 0 (regardless of any position argument).
4434 4434 */
4435 4435 if (sublen == 0 && len == 0) {
4436 4436 if (subr == DIF_SUBR_STRSTR)
4437 4437 regs[rd] = (uintptr_t)addr;
4438 4438 else
4439 4439 regs[rd] = 0;
4440 4440 break;
4441 4441 }
4442 4442
4443 4443 if (subr != DIF_SUBR_STRSTR) {
4444 4444 if (subr == DIF_SUBR_RINDEX) {
4445 4445 limit = orig - 1;
4446 4446 addr += len;
4447 4447 inc = -1;
4448 4448 }
4449 4449
4450 4450 /*
4451 4451 * Both index() and rindex() take an optional position
4452 4452 * argument that denotes the starting position.
4453 4453 */
4454 4454 if (nargs == 3) {
4455 4455 int64_t pos = (int64_t)tupregs[2].dttk_value;
4456 4456
4457 4457 /*
4458 4458 * If the position argument to index() is
4459 4459 * negative, Perl implicitly clamps it at
4460 4460 * zero. This semantic is a little surprising
4461 4461 * given the special meaning of negative
4462 4462 * positions to similar Perl functions like
4463 4463 * substr(), but it appears to reflect a
4464 4464 * notion that index() can start from a
4465 4465 * negative index and increment its way up to
4466 4466 * the string. Given this notion, Perl's
4467 4467 * rindex() is at least self-consistent in
4468 4468 * that it implicitly clamps positions greater
4469 4469 * than the string length to be the string
4470 4470 * length. Where Perl completely loses
4471 4471 * coherence, however, is when the specified
4472 4472 * substring is the empty string (""). In
4473 4473 * this case, even if the position is
4474 4474 * negative, rindex() returns 0 -- and even if
4475 4475 * the position is greater than the length,
4476 4476 * index() returns the string length. These
4477 4477 * semantics violate the notion that index()
4478 4478 * should never return a value less than the
4479 4479 * specified position and that rindex() should
4480 4480 * never return a value greater than the
4481 4481 * specified position. (One assumes that
4482 4482 * these semantics are artifacts of Perl's
4483 4483 * implementation and not the results of
4484 4484 * deliberate design -- it beggars belief that
4485 4485 * even Larry Wall could desire such oddness.)
4486 4486 * While in the abstract one would wish for
4487 4487 * consistent position semantics across
4488 4488 * substr(), index() and rindex() -- or at the
4489 4489 * very least self-consistent position
4490 4490 * semantics for index() and rindex() -- we
4491 4491 * instead opt to keep with the extant Perl
4492 4492 * semantics, in all their broken glory. (Do
4493 4493 * we have more desire to maintain Perl's
4494 4494 * semantics than Perl does? Probably.)
4495 4495 */
4496 4496 if (subr == DIF_SUBR_RINDEX) {
4497 4497 if (pos < 0) {
4498 4498 if (sublen == 0)
4499 4499 regs[rd] = 0;
4500 4500 break;
4501 4501 }
4502 4502
4503 4503 if (pos > len)
4504 4504 pos = len;
4505 4505 } else {
4506 4506 if (pos < 0)
4507 4507 pos = 0;
4508 4508
4509 4509 if (pos >= len) {
4510 4510 if (sublen == 0)
4511 4511 regs[rd] = len;
4512 4512 break;
4513 4513 }
4514 4514 }
4515 4515
4516 4516 addr = orig + pos;
4517 4517 }
4518 4518 }
4519 4519
4520 4520 for (regs[rd] = notfound; addr != limit; addr += inc) {
4521 4521 if (dtrace_strncmp(addr, substr, sublen) == 0) {
4522 4522 if (subr != DIF_SUBR_STRSTR) {
4523 4523 /*
4524 4524 * As D index() and rindex() are
4525 4525 * modeled on Perl (and not on awk),
4526 4526 * we return a zero-based (and not a
4527 4527 * one-based) index. (For you Perl
4528 4528 * weenies: no, we're not going to add
4529 4529 * $[ -- and shouldn't you be at a con
4530 4530 * or something?)
4531 4531 */
4532 4532 regs[rd] = (uintptr_t)(addr - orig);
4533 4533 break;
4534 4534 }
4535 4535
4536 4536 ASSERT(subr == DIF_SUBR_STRSTR);
4537 4537 regs[rd] = (uintptr_t)addr;
4538 4538 break;
4539 4539 }
4540 4540 }
4541 4541
4542 4542 break;
4543 4543 }
4544 4544
4545 4545 case DIF_SUBR_STRTOK: {
4546 4546 uintptr_t addr = tupregs[0].dttk_value;
4547 4547 uintptr_t tokaddr = tupregs[1].dttk_value;
4548 4548 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4549 4549 uintptr_t limit, toklimit;
4550 4550 size_t clim;
4551 4551 uint8_t c, tokmap[32]; /* 256 / 8 */
4552 4552 char *dest = (char *)mstate->dtms_scratch_ptr;
4553 4553 int i;
4554 4554
4555 4555 /*
4556 4556 * Check both the token buffer and (later) the input buffer,
4557 4557 * since both could be non-scratch addresses.
4558 4558 */
4559 4559 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) {
4560 4560 regs[rd] = NULL;
4561 4561 break;
4562 4562 }
4563 4563 toklimit = tokaddr + clim;
4564 4564
4565 4565 if (!DTRACE_INSCRATCH(mstate, size)) {
4566 4566 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4567 4567 regs[rd] = NULL;
4568 4568 break;
4569 4569 }
4570 4570
4571 4571 if (addr == NULL) {
4572 4572 /*
4573 4573 * If the address specified is NULL, we use our saved
4574 4574 * strtok pointer from the mstate. Note that this
4575 4575 * means that the saved strtok pointer is _only_
4576 4576 * valid within multiple enablings of the same probe --
4577 4577 * it behaves like an implicit clause-local variable.
4578 4578 */
4579 4579 addr = mstate->dtms_strtok;
4580 4580 limit = mstate->dtms_strtok_limit;
4581 4581 } else {
4582 4582 /*
4583 4583 * If the user-specified address is non-NULL we must
4584 4584 * access check it. This is the only time we have
4585 4585 * a chance to do so, since this address may reside
4586 4586 * in the string table of this clause-- future calls
4587 4587 * (when we fetch addr from mstate->dtms_strtok)
4588 4588 * would fail this access check.
4589 4589 */
4590 4590 if (!dtrace_strcanload(addr, size, &clim, mstate,
4591 4591 vstate)) {
4592 4592 regs[rd] = NULL;
4593 4593 break;
4594 4594 }
4595 4595 limit = addr + clim;
4596 4596 }
4597 4597
4598 4598 /*
4599 4599 * First, zero the token map, and then process the token
4600 4600 * string -- setting a bit in the map for every character
4601 4601 * found in the token string.
4602 4602 */
4603 4603 for (i = 0; i < sizeof (tokmap); i++)
4604 4604 tokmap[i] = 0;
4605 4605
4606 4606 for (; tokaddr < toklimit; tokaddr++) {
4607 4607 if ((c = dtrace_load8(tokaddr)) == '\0')
4608 4608 break;
4609 4609
4610 4610 ASSERT((c >> 3) < sizeof (tokmap));
4611 4611 tokmap[c >> 3] |= (1 << (c & 0x7));
4612 4612 }
4613 4613
4614 4614 for (; addr < limit; addr++) {
4615 4615 /*
4616 4616 * We're looking for a character that is _not_
4617 4617 * contained in the token string.
4618 4618 */
4619 4619 if ((c = dtrace_load8(addr)) == '\0')
4620 4620 break;
4621 4621
4622 4622 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4623 4623 break;
4624 4624 }
4625 4625
4626 4626 if (c == '\0') {
4627 4627 /*
4628 4628 * We reached the end of the string without finding
4629 4629 * any character that was not in the token string.
4630 4630 * We return NULL in this case, and we set the saved
4631 4631 * address to NULL as well.
4632 4632 */
4633 4633 regs[rd] = NULL;
4634 4634 mstate->dtms_strtok = NULL;
4635 4635 mstate->dtms_strtok_limit = NULL;
4636 4636 break;
4637 4637 }
4638 4638
4639 4639 /*
4640 4640 * From here on, we're copying into the destination string.
4641 4641 */
4642 4642 for (i = 0; addr < limit && i < size - 1; addr++) {
4643 4643 if ((c = dtrace_load8(addr)) == '\0')
4644 4644 break;
4645 4645
4646 4646 if (tokmap[c >> 3] & (1 << (c & 0x7)))
4647 4647 break;
4648 4648
4649 4649 ASSERT(i < size);
4650 4650 dest[i++] = c;
4651 4651 }
4652 4652
4653 4653 ASSERT(i < size);
4654 4654 dest[i] = '\0';
4655 4655 regs[rd] = (uintptr_t)dest;
4656 4656 mstate->dtms_scratch_ptr += size;
4657 4657 mstate->dtms_strtok = addr;
4658 4658 mstate->dtms_strtok_limit = limit;
4659 4659 break;
4660 4660 }
4661 4661
4662 4662 case DIF_SUBR_SUBSTR: {
4663 4663 uintptr_t s = tupregs[0].dttk_value;
4664 4664 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4665 4665 char *d = (char *)mstate->dtms_scratch_ptr;
4666 4666 int64_t index = (int64_t)tupregs[1].dttk_value;
4667 4667 int64_t remaining = (int64_t)tupregs[2].dttk_value;
4668 4668 size_t len = dtrace_strlen((char *)s, size);
4669 4669 int64_t i;
4670 4670
4671 4671 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4672 4672 regs[rd] = NULL;
4673 4673 break;
4674 4674 }
4675 4675
4676 4676 if (!DTRACE_INSCRATCH(mstate, size)) {
4677 4677 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4678 4678 regs[rd] = NULL;
4679 4679 break;
4680 4680 }
4681 4681
4682 4682 if (nargs <= 2)
4683 4683 remaining = (int64_t)size;
4684 4684
4685 4685 if (index < 0) {
4686 4686 index += len;
4687 4687
4688 4688 if (index < 0 && index + remaining > 0) {
4689 4689 remaining += index;
4690 4690 index = 0;
4691 4691 }
4692 4692 }
4693 4693
4694 4694 if (index >= len || index < 0) {
4695 4695 remaining = 0;
4696 4696 } else if (remaining < 0) {
4697 4697 remaining += len - index;
4698 4698 } else if (index + remaining > size) {
4699 4699 remaining = size - index;
4700 4700 }
4701 4701
4702 4702 for (i = 0; i < remaining; i++) {
4703 4703 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4704 4704 break;
4705 4705 }
4706 4706
4707 4707 d[i] = '\0';
4708 4708
4709 4709 mstate->dtms_scratch_ptr += size;
4710 4710 regs[rd] = (uintptr_t)d;
4711 4711 break;
4712 4712 }
4713 4713
4714 4714 case DIF_SUBR_JSON: {
4715 4715 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4716 4716 uintptr_t json = tupregs[0].dttk_value;
4717 4717 size_t jsonlen = dtrace_strlen((char *)json, size);
4718 4718 uintptr_t elem = tupregs[1].dttk_value;
4719 4719 size_t elemlen = dtrace_strlen((char *)elem, size);
4720 4720
4721 4721 char *dest = (char *)mstate->dtms_scratch_ptr;
4722 4722 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
4723 4723 char *ee = elemlist;
4724 4724 int nelems = 1;
4725 4725 uintptr_t cur;
4726 4726
4727 4727 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
4728 4728 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
4729 4729 regs[rd] = NULL;
4730 4730 break;
4731 4731 }
4732 4732
4733 4733 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
4734 4734 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4735 4735 regs[rd] = NULL;
4736 4736 break;
4737 4737 }
4738 4738
4739 4739 /*
4740 4740 * Read the element selector and split it up into a packed list
4741 4741 * of strings.
4742 4742 */
4743 4743 for (cur = elem; cur < elem + elemlen; cur++) {
4744 4744 char cc = dtrace_load8(cur);
4745 4745
4746 4746 if (cur == elem && cc == '[') {
4747 4747 /*
4748 4748 * If the first element selector key is
4749 4749 * actually an array index then ignore the
4750 4750 * bracket.
4751 4751 */
4752 4752 continue;
4753 4753 }
4754 4754
4755 4755 if (cc == ']')
4756 4756 continue;
4757 4757
4758 4758 if (cc == '.' || cc == '[') {
4759 4759 nelems++;
4760 4760 cc = '\0';
4761 4761 }
4762 4762
4763 4763 *ee++ = cc;
4764 4764 }
4765 4765 *ee++ = '\0';
4766 4766
4767 4767 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
4768 4768 nelems, dest)) != NULL)
4769 4769 mstate->dtms_scratch_ptr += jsonlen + 1;
4770 4770 break;
4771 4771 }
4772 4772
4773 4773 case DIF_SUBR_TOUPPER:
4774 4774 case DIF_SUBR_TOLOWER: {
4775 4775 uintptr_t s = tupregs[0].dttk_value;
4776 4776 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4777 4777 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4778 4778 size_t len = dtrace_strlen((char *)s, size);
4779 4779 char lower, upper, convert;
4780 4780 int64_t i;
4781 4781
4782 4782 if (subr == DIF_SUBR_TOUPPER) {
4783 4783 lower = 'a';
4784 4784 upper = 'z';
4785 4785 convert = 'A';
4786 4786 } else {
4787 4787 lower = 'A';
4788 4788 upper = 'Z';
4789 4789 convert = 'a';
4790 4790 }
4791 4791
4792 4792 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4793 4793 regs[rd] = NULL;
4794 4794 break;
4795 4795 }
4796 4796
4797 4797 if (!DTRACE_INSCRATCH(mstate, size)) {
4798 4798 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4799 4799 regs[rd] = NULL;
4800 4800 break;
4801 4801 }
4802 4802
4803 4803 for (i = 0; i < size - 1; i++) {
4804 4804 if ((c = dtrace_load8(s + i)) == '\0')
4805 4805 break;
4806 4806
4807 4807 if (c >= lower && c <= upper)
4808 4808 c = convert + (c - lower);
4809 4809
4810 4810 dest[i] = c;
4811 4811 }
4812 4812
4813 4813 ASSERT(i < size);
4814 4814 dest[i] = '\0';
4815 4815 regs[rd] = (uintptr_t)dest;
4816 4816 mstate->dtms_scratch_ptr += size;
4817 4817 break;
4818 4818 }
4819 4819
4820 4820 case DIF_SUBR_GETMAJOR:
4821 4821 #ifdef _LP64
4822 4822 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4823 4823 #else
4824 4824 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4825 4825 #endif
4826 4826 break;
4827 4827
4828 4828 case DIF_SUBR_GETMINOR:
4829 4829 #ifdef _LP64
4830 4830 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4831 4831 #else
4832 4832 regs[rd] = tupregs[0].dttk_value & MAXMIN;
4833 4833 #endif
4834 4834 break;
4835 4835
4836 4836 case DIF_SUBR_DDI_PATHNAME: {
4837 4837 /*
4838 4838 * This one is a galactic mess. We are going to roughly
4839 4839 * emulate ddi_pathname(), but it's made more complicated
4840 4840 * by the fact that we (a) want to include the minor name and
4841 4841 * (b) must proceed iteratively instead of recursively.
4842 4842 */
4843 4843 uintptr_t dest = mstate->dtms_scratch_ptr;
4844 4844 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4845 4845 char *start = (char *)dest, *end = start + size - 1;
4846 4846 uintptr_t daddr = tupregs[0].dttk_value;
4847 4847 int64_t minor = (int64_t)tupregs[1].dttk_value;
4848 4848 char *s;
4849 4849 int i, len, depth = 0;
4850 4850
4851 4851 /*
4852 4852 * Due to all the pointer jumping we do and context we must
4853 4853 * rely upon, we just mandate that the user must have kernel
4854 4854 * read privileges to use this routine.
4855 4855 */
4856 4856 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4857 4857 *flags |= CPU_DTRACE_KPRIV;
4858 4858 *illval = daddr;
4859 4859 regs[rd] = NULL;
4860 4860 }
4861 4861
4862 4862 if (!DTRACE_INSCRATCH(mstate, size)) {
4863 4863 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4864 4864 regs[rd] = NULL;
4865 4865 break;
4866 4866 }
4867 4867
4868 4868 *end = '\0';
4869 4869
4870 4870 /*
4871 4871 * We want to have a name for the minor. In order to do this,
4872 4872 * we need to walk the minor list from the devinfo. We want
4873 4873 * to be sure that we don't infinitely walk a circular list,
4874 4874 * so we check for circularity by sending a scout pointer
4875 4875 * ahead two elements for every element that we iterate over;
4876 4876 * if the list is circular, these will ultimately point to the
4877 4877 * same element. You may recognize this little trick as the
4878 4878 * answer to a stupid interview question -- one that always
4879 4879 * seems to be asked by those who had to have it laboriously
4880 4880 * explained to them, and who can't even concisely describe
4881 4881 * the conditions under which one would be forced to resort to
4882 4882 * this technique. Needless to say, those conditions are
4883 4883 * found here -- and probably only here. Is this the only use
4884 4884 * of this infamous trick in shipping, production code? If it
4885 4885 * isn't, it probably should be...
4886 4886 */
4887 4887 if (minor != -1) {
4888 4888 uintptr_t maddr = dtrace_loadptr(daddr +
4889 4889 offsetof(struct dev_info, devi_minor));
4890 4890
4891 4891 uintptr_t next = offsetof(struct ddi_minor_data, next);
4892 4892 uintptr_t name = offsetof(struct ddi_minor_data,
4893 4893 d_minor) + offsetof(struct ddi_minor, name);
4894 4894 uintptr_t dev = offsetof(struct ddi_minor_data,
4895 4895 d_minor) + offsetof(struct ddi_minor, dev);
4896 4896 uintptr_t scout;
4897 4897
4898 4898 if (maddr != NULL)
4899 4899 scout = dtrace_loadptr(maddr + next);
4900 4900
4901 4901 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4902 4902 uint64_t m;
4903 4903 #ifdef _LP64
4904 4904 m = dtrace_load64(maddr + dev) & MAXMIN64;
4905 4905 #else
4906 4906 m = dtrace_load32(maddr + dev) & MAXMIN;
4907 4907 #endif
4908 4908 if (m != minor) {
4909 4909 maddr = dtrace_loadptr(maddr + next);
4910 4910
4911 4911 if (scout == NULL)
4912 4912 continue;
4913 4913
4914 4914 scout = dtrace_loadptr(scout + next);
4915 4915
4916 4916 if (scout == NULL)
4917 4917 continue;
4918 4918
4919 4919 scout = dtrace_loadptr(scout + next);
4920 4920
4921 4921 if (scout == NULL)
4922 4922 continue;
4923 4923
4924 4924 if (scout == maddr) {
4925 4925 *flags |= CPU_DTRACE_ILLOP;
4926 4926 break;
4927 4927 }
4928 4928
4929 4929 continue;
4930 4930 }
4931 4931
4932 4932 /*
4933 4933 * We have the minor data. Now we need to
4934 4934 * copy the minor's name into the end of the
4935 4935 * pathname.
4936 4936 */
4937 4937 s = (char *)dtrace_loadptr(maddr + name);
4938 4938 len = dtrace_strlen(s, size);
4939 4939
4940 4940 if (*flags & CPU_DTRACE_FAULT)
4941 4941 break;
4942 4942
4943 4943 if (len != 0) {
4944 4944 if ((end -= (len + 1)) < start)
4945 4945 break;
4946 4946
4947 4947 *end = ':';
4948 4948 }
4949 4949
4950 4950 for (i = 1; i <= len; i++)
4951 4951 end[i] = dtrace_load8((uintptr_t)s++);
4952 4952 break;
4953 4953 }
4954 4954 }
4955 4955
4956 4956 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4957 4957 ddi_node_state_t devi_state;
4958 4958
4959 4959 devi_state = dtrace_load32(daddr +
4960 4960 offsetof(struct dev_info, devi_node_state));
4961 4961
4962 4962 if (*flags & CPU_DTRACE_FAULT)
4963 4963 break;
4964 4964
4965 4965 if (devi_state >= DS_INITIALIZED) {
4966 4966 s = (char *)dtrace_loadptr(daddr +
4967 4967 offsetof(struct dev_info, devi_addr));
4968 4968 len = dtrace_strlen(s, size);
4969 4969
4970 4970 if (*flags & CPU_DTRACE_FAULT)
4971 4971 break;
4972 4972
4973 4973 if (len != 0) {
4974 4974 if ((end -= (len + 1)) < start)
4975 4975 break;
4976 4976
4977 4977 *end = '@';
4978 4978 }
4979 4979
4980 4980 for (i = 1; i <= len; i++)
4981 4981 end[i] = dtrace_load8((uintptr_t)s++);
4982 4982 }
4983 4983
4984 4984 /*
4985 4985 * Now for the node name...
4986 4986 */
4987 4987 s = (char *)dtrace_loadptr(daddr +
4988 4988 offsetof(struct dev_info, devi_node_name));
4989 4989
4990 4990 daddr = dtrace_loadptr(daddr +
4991 4991 offsetof(struct dev_info, devi_parent));
4992 4992
4993 4993 /*
4994 4994 * If our parent is NULL (that is, if we're the root
4995 4995 * node), we're going to use the special path
4996 4996 * "devices".
4997 4997 */
4998 4998 if (daddr == NULL)
4999 4999 s = "devices";
5000 5000
5001 5001 len = dtrace_strlen(s, size);
5002 5002 if (*flags & CPU_DTRACE_FAULT)
5003 5003 break;
5004 5004
5005 5005 if ((end -= (len + 1)) < start)
5006 5006 break;
5007 5007
5008 5008 for (i = 1; i <= len; i++)
5009 5009 end[i] = dtrace_load8((uintptr_t)s++);
5010 5010 *end = '/';
5011 5011
5012 5012 if (depth++ > dtrace_devdepth_max) {
5013 5013 *flags |= CPU_DTRACE_ILLOP;
5014 5014 break;
5015 5015 }
5016 5016 }
5017 5017
5018 5018 if (end < start)
5019 5019 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5020 5020
5021 5021 if (daddr == NULL) {
5022 5022 regs[rd] = (uintptr_t)end;
5023 5023 mstate->dtms_scratch_ptr += size;
5024 5024 }
5025 5025
5026 5026 break;
5027 5027 }
5028 5028
5029 5029 case DIF_SUBR_STRJOIN: {
5030 5030 char *d = (char *)mstate->dtms_scratch_ptr;
5031 5031 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5032 5032 uintptr_t s1 = tupregs[0].dttk_value;
5033 5033 uintptr_t s2 = tupregs[1].dttk_value;
5034 5034 int i = 0, j = 0;
5035 5035 size_t lim1, lim2;
5036 5036 char c;
5037 5037
5038 5038 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) ||
5039 5039 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) {
5040 5040 regs[rd] = NULL;
5041 5041 break;
5042 5042 }
5043 5043
5044 5044 if (!DTRACE_INSCRATCH(mstate, size)) {
5045 5045 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5046 5046 regs[rd] = NULL;
5047 5047 break;
5048 5048 }
5049 5049
5050 5050 for (;;) {
5051 5051 if (i >= size) {
5052 5052 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5053 5053 regs[rd] = NULL;
5054 5054 break;
5055 5055 }
5056 5056 c = (i >= lim1) ? '\0' : dtrace_load8(s1++);
5057 5057 if ((d[i++] = c) == '\0') {
5058 5058 i--;
5059 5059 break;
5060 5060 }
5061 5061 }
5062 5062
5063 5063 for (;;) {
5064 5064 if (i >= size) {
5065 5065 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5066 5066 regs[rd] = NULL;
5067 5067 break;
5068 5068 }
5069 5069
5070 5070 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++);
5071 5071 if ((d[i++] = c) == '\0')
5072 5072 break;
5073 5073 }
5074 5074
5075 5075 if (i < size) {
5076 5076 mstate->dtms_scratch_ptr += i;
5077 5077 regs[rd] = (uintptr_t)d;
5078 5078 }
5079 5079
5080 5080 break;
5081 5081 }
5082 5082
5083 5083 case DIF_SUBR_STRTOLL: {
5084 5084 uintptr_t s = tupregs[0].dttk_value;
5085 5085 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5086 5086 size_t lim;
5087 5087 int base = 10;
5088 5088
5089 5089 if (nargs > 1) {
5090 5090 if ((base = tupregs[1].dttk_value) <= 1 ||
5091 5091 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5092 5092 *flags |= CPU_DTRACE_ILLOP;
5093 5093 break;
5094 5094 }
5095 5095 }
5096 5096
5097 5097 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) {
5098 5098 regs[rd] = INT64_MIN;
5099 5099 break;
5100 5100 }
5101 5101
5102 5102 regs[rd] = dtrace_strtoll((char *)s, base, lim);
5103 5103 break;
5104 5104 }
5105 5105
5106 5106 case DIF_SUBR_LLTOSTR: {
5107 5107 int64_t i = (int64_t)tupregs[0].dttk_value;
5108 5108 uint64_t val, digit;
5109 5109 uint64_t size = 65; /* enough room for 2^64 in binary */
5110 5110 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
5111 5111 int base = 10;
5112 5112
5113 5113 if (nargs > 1) {
5114 5114 if ((base = tupregs[1].dttk_value) <= 1 ||
5115 5115 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5116 5116 *flags |= CPU_DTRACE_ILLOP;
5117 5117 break;
5118 5118 }
5119 5119 }
5120 5120
5121 5121 val = (base == 10 && i < 0) ? i * -1 : i;
5122 5122
5123 5123 if (!DTRACE_INSCRATCH(mstate, size)) {
5124 5124 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5125 5125 regs[rd] = NULL;
5126 5126 break;
5127 5127 }
5128 5128
5129 5129 for (*end-- = '\0'; val; val /= base) {
5130 5130 if ((digit = val % base) <= '9' - '0') {
5131 5131 *end-- = '0' + digit;
5132 5132 } else {
5133 5133 *end-- = 'a' + (digit - ('9' - '0') - 1);
5134 5134 }
5135 5135 }
5136 5136
5137 5137 if (i == 0 && base == 16)
5138 5138 *end-- = '0';
5139 5139
5140 5140 if (base == 16)
5141 5141 *end-- = 'x';
5142 5142
5143 5143 if (i == 0 || base == 8 || base == 16)
5144 5144 *end-- = '0';
5145 5145
5146 5146 if (i < 0 && base == 10)
5147 5147 *end-- = '-';
5148 5148
5149 5149 regs[rd] = (uintptr_t)end + 1;
5150 5150 mstate->dtms_scratch_ptr += size;
5151 5151 break;
5152 5152 }
5153 5153
5154 5154 case DIF_SUBR_HTONS:
5155 5155 case DIF_SUBR_NTOHS:
5156 5156 #ifdef _BIG_ENDIAN
5157 5157 regs[rd] = (uint16_t)tupregs[0].dttk_value;
5158 5158 #else
5159 5159 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5160 5160 #endif
5161 5161 break;
5162 5162
5163 5163
5164 5164 case DIF_SUBR_HTONL:
5165 5165 case DIF_SUBR_NTOHL:
5166 5166 #ifdef _BIG_ENDIAN
5167 5167 regs[rd] = (uint32_t)tupregs[0].dttk_value;
5168 5168 #else
5169 5169 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5170 5170 #endif
5171 5171 break;
5172 5172
5173 5173
5174 5174 case DIF_SUBR_HTONLL:
5175 5175 case DIF_SUBR_NTOHLL:
5176 5176 #ifdef _BIG_ENDIAN
5177 5177 regs[rd] = (uint64_t)tupregs[0].dttk_value;
5178 5178 #else
5179 5179 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5180 5180 #endif
5181 5181 break;
5182 5182
5183 5183
5184 5184 case DIF_SUBR_DIRNAME:
5185 5185 case DIF_SUBR_BASENAME: {
5186 5186 char *dest = (char *)mstate->dtms_scratch_ptr;
5187 5187 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5188 5188 uintptr_t src = tupregs[0].dttk_value;
5189 5189 int i, j, len = dtrace_strlen((char *)src, size);
5190 5190 int lastbase = -1, firstbase = -1, lastdir = -1;
5191 5191 int start, end;
5192 5192
5193 5193 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5194 5194 regs[rd] = NULL;
5195 5195 break;
5196 5196 }
5197 5197
5198 5198 if (!DTRACE_INSCRATCH(mstate, size)) {
5199 5199 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5200 5200 regs[rd] = NULL;
5201 5201 break;
5202 5202 }
5203 5203
5204 5204 /*
5205 5205 * The basename and dirname for a zero-length string is
5206 5206 * defined to be "."
5207 5207 */
5208 5208 if (len == 0) {
5209 5209 len = 1;
5210 5210 src = (uintptr_t)".";
5211 5211 }
5212 5212
5213 5213 /*
5214 5214 * Start from the back of the string, moving back toward the
5215 5215 * front until we see a character that isn't a slash. That
5216 5216 * character is the last character in the basename.
5217 5217 */
5218 5218 for (i = len - 1; i >= 0; i--) {
5219 5219 if (dtrace_load8(src + i) != '/')
5220 5220 break;
5221 5221 }
5222 5222
5223 5223 if (i >= 0)
5224 5224 lastbase = i;
5225 5225
5226 5226 /*
5227 5227 * Starting from the last character in the basename, move
5228 5228 * towards the front until we find a slash. The character
5229 5229 * that we processed immediately before that is the first
5230 5230 * character in the basename.
5231 5231 */
5232 5232 for (; i >= 0; i--) {
5233 5233 if (dtrace_load8(src + i) == '/')
5234 5234 break;
5235 5235 }
5236 5236
5237 5237 if (i >= 0)
5238 5238 firstbase = i + 1;
5239 5239
5240 5240 /*
5241 5241 * Now keep going until we find a non-slash character. That
5242 5242 * character is the last character in the dirname.
5243 5243 */
5244 5244 for (; i >= 0; i--) {
5245 5245 if (dtrace_load8(src + i) != '/')
5246 5246 break;
5247 5247 }
5248 5248
5249 5249 if (i >= 0)
5250 5250 lastdir = i;
5251 5251
5252 5252 ASSERT(!(lastbase == -1 && firstbase != -1));
5253 5253 ASSERT(!(firstbase == -1 && lastdir != -1));
5254 5254
5255 5255 if (lastbase == -1) {
5256 5256 /*
5257 5257 * We didn't find a non-slash character. We know that
5258 5258 * the length is non-zero, so the whole string must be
5259 5259 * slashes. In either the dirname or the basename
5260 5260 * case, we return '/'.
5261 5261 */
5262 5262 ASSERT(firstbase == -1);
5263 5263 firstbase = lastbase = lastdir = 0;
5264 5264 }
5265 5265
5266 5266 if (firstbase == -1) {
5267 5267 /*
5268 5268 * The entire string consists only of a basename
5269 5269 * component. If we're looking for dirname, we need
5270 5270 * to change our string to be just "."; if we're
5271 5271 * looking for a basename, we'll just set the first
5272 5272 * character of the basename to be 0.
5273 5273 */
5274 5274 if (subr == DIF_SUBR_DIRNAME) {
5275 5275 ASSERT(lastdir == -1);
5276 5276 src = (uintptr_t)".";
5277 5277 lastdir = 0;
5278 5278 } else {
5279 5279 firstbase = 0;
5280 5280 }
5281 5281 }
5282 5282
5283 5283 if (subr == DIF_SUBR_DIRNAME) {
5284 5284 if (lastdir == -1) {
5285 5285 /*
5286 5286 * We know that we have a slash in the name --
5287 5287 * or lastdir would be set to 0, above. And
5288 5288 * because lastdir is -1, we know that this
5289 5289 * slash must be the first character. (That
5290 5290 * is, the full string must be of the form
5291 5291 * "/basename".) In this case, the last
5292 5292 * character of the directory name is 0.
5293 5293 */
5294 5294 lastdir = 0;
5295 5295 }
5296 5296
5297 5297 start = 0;
5298 5298 end = lastdir;
5299 5299 } else {
5300 5300 ASSERT(subr == DIF_SUBR_BASENAME);
5301 5301 ASSERT(firstbase != -1 && lastbase != -1);
5302 5302 start = firstbase;
5303 5303 end = lastbase;
5304 5304 }
5305 5305
5306 5306 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5307 5307 dest[j] = dtrace_load8(src + i);
5308 5308
5309 5309 dest[j] = '\0';
5310 5310 regs[rd] = (uintptr_t)dest;
5311 5311 mstate->dtms_scratch_ptr += size;
5312 5312 break;
5313 5313 }
5314 5314
5315 5315 case DIF_SUBR_GETF: {
5316 5316 uintptr_t fd = tupregs[0].dttk_value;
5317 5317 uf_info_t *finfo = &curthread->t_procp->p_user.u_finfo;
5318 5318 file_t *fp;
5319 5319
5320 5320 if (!dtrace_priv_proc(state, mstate)) {
5321 5321 regs[rd] = NULL;
5322 5322 break;
5323 5323 }
5324 5324
5325 5325 /*
5326 5326 * This is safe because fi_nfiles only increases, and the
5327 5327 * fi_list array is not freed when the array size doubles.
5328 5328 * (See the comment in flist_grow() for details on the
5329 5329 * management of the u_finfo structure.)
5330 5330 */
5331 5331 fp = fd < finfo->fi_nfiles ? finfo->fi_list[fd].uf_file : NULL;
5332 5332
5333 5333 mstate->dtms_getf = fp;
5334 5334 regs[rd] = (uintptr_t)fp;
5335 5335 break;
5336 5336 }
5337 5337
5338 5338 case DIF_SUBR_CLEANPATH: {
5339 5339 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5340 5340 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5341 5341 uintptr_t src = tupregs[0].dttk_value;
5342 5342 size_t lim;
5343 5343 int i = 0, j = 0;
5344 5344 zone_t *z;
5345 5345
5346 5346 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5347 5347 regs[rd] = NULL;
5348 5348 break;
5349 5349 }
5350 5350
5351 5351 if (!DTRACE_INSCRATCH(mstate, size)) {
5352 5352 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5353 5353 regs[rd] = NULL;
5354 5354 break;
5355 5355 }
5356 5356
5357 5357 /*
5358 5358 * Move forward, loading each character.
5359 5359 */
5360 5360 do {
5361 5361 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5362 5362 next:
5363 5363 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
5364 5364 break;
5365 5365
5366 5366 if (c != '/') {
5367 5367 dest[j++] = c;
5368 5368 continue;
5369 5369 }
5370 5370
5371 5371 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5372 5372
5373 5373 if (c == '/') {
5374 5374 /*
5375 5375 * We have two slashes -- we can just advance
5376 5376 * to the next character.
5377 5377 */
5378 5378 goto next;
5379 5379 }
5380 5380
5381 5381 if (c != '.') {
5382 5382 /*
5383 5383 * This is not "." and it's not ".." -- we can
5384 5384 * just store the "/" and this character and
5385 5385 * drive on.
5386 5386 */
5387 5387 dest[j++] = '/';
5388 5388 dest[j++] = c;
5389 5389 continue;
5390 5390 }
5391 5391
5392 5392 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5393 5393
5394 5394 if (c == '/') {
5395 5395 /*
5396 5396 * This is a "/./" component. We're not going
5397 5397 * to store anything in the destination buffer;
5398 5398 * we're just going to go to the next component.
5399 5399 */
5400 5400 goto next;
5401 5401 }
5402 5402
5403 5403 if (c != '.') {
5404 5404 /*
5405 5405 * This is not ".." -- we can just store the
5406 5406 * "/." and this character and continue
5407 5407 * processing.
5408 5408 */
5409 5409 dest[j++] = '/';
5410 5410 dest[j++] = '.';
5411 5411 dest[j++] = c;
5412 5412 continue;
5413 5413 }
5414 5414
5415 5415 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5416 5416
5417 5417 if (c != '/' && c != '\0') {
5418 5418 /*
5419 5419 * This is not ".." -- it's "..[mumble]".
5420 5420 * We'll store the "/.." and this character
5421 5421 * and continue processing.
5422 5422 */
5423 5423 dest[j++] = '/';
5424 5424 dest[j++] = '.';
5425 5425 dest[j++] = '.';
5426 5426 dest[j++] = c;
5427 5427 continue;
5428 5428 }
5429 5429
5430 5430 /*
5431 5431 * This is "/../" or "/..\0". We need to back up
5432 5432 * our destination pointer until we find a "/".
5433 5433 */
5434 5434 i--;
5435 5435 while (j != 0 && dest[--j] != '/')
5436 5436 continue;
5437 5437
5438 5438 if (c == '\0')
5439 5439 dest[++j] = '/';
5440 5440 } while (c != '\0');
5441 5441
5442 5442 dest[j] = '\0';
5443 5443
5444 5444 if (mstate->dtms_getf != NULL &&
5445 5445 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
5446 5446 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
5447 5447 /*
5448 5448 * If we've done a getf() as a part of this ECB and we
5449 5449 * don't have kernel access (and we're not in the global
5450 5450 * zone), check if the path we cleaned up begins with
5451 5451 * the zone's root path, and trim it off if so. Note
5452 5452 * that this is an output cleanliness issue, not a
5453 5453 * security issue: knowing one's zone root path does
5454 5454 * not enable privilege escalation.
5455 5455 */
5456 5456 if (strstr(dest, z->zone_rootpath) == dest)
5457 5457 dest += strlen(z->zone_rootpath) - 1;
5458 5458 }
5459 5459
5460 5460 regs[rd] = (uintptr_t)dest;
5461 5461 mstate->dtms_scratch_ptr += size;
5462 5462 break;
5463 5463 }
5464 5464
5465 5465 case DIF_SUBR_INET_NTOA:
5466 5466 case DIF_SUBR_INET_NTOA6:
5467 5467 case DIF_SUBR_INET_NTOP: {
5468 5468 size_t size;
5469 5469 int af, argi, i;
5470 5470 char *base, *end;
5471 5471
5472 5472 if (subr == DIF_SUBR_INET_NTOP) {
5473 5473 af = (int)tupregs[0].dttk_value;
5474 5474 argi = 1;
5475 5475 } else {
5476 5476 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5477 5477 argi = 0;
5478 5478 }
5479 5479
5480 5480 if (af == AF_INET) {
5481 5481 ipaddr_t ip4;
5482 5482 uint8_t *ptr8, val;
5483 5483
5484 5484 if (!dtrace_canload(tupregs[argi].dttk_value,
5485 5485 sizeof (ipaddr_t), mstate, vstate)) {
5486 5486 regs[rd] = NULL;
5487 5487 break;
5488 5488 }
5489 5489
5490 5490 /*
5491 5491 * Safely load the IPv4 address.
5492 5492 */
5493 5493 ip4 = dtrace_load32(tupregs[argi].dttk_value);
5494 5494
5495 5495 /*
5496 5496 * Check an IPv4 string will fit in scratch.
5497 5497 */
5498 5498 size = INET_ADDRSTRLEN;
5499 5499 if (!DTRACE_INSCRATCH(mstate, size)) {
5500 5500 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5501 5501 regs[rd] = NULL;
5502 5502 break;
5503 5503 }
5504 5504 base = (char *)mstate->dtms_scratch_ptr;
5505 5505 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5506 5506
5507 5507 /*
5508 5508 * Stringify as a dotted decimal quad.
5509 5509 */
5510 5510 *end-- = '\0';
5511 5511 ptr8 = (uint8_t *)&ip4;
5512 5512 for (i = 3; i >= 0; i--) {
5513 5513 val = ptr8[i];
5514 5514
5515 5515 if (val == 0) {
5516 5516 *end-- = '0';
5517 5517 } else {
5518 5518 for (; val; val /= 10) {
5519 5519 *end-- = '0' + (val % 10);
5520 5520 }
5521 5521 }
5522 5522
5523 5523 if (i > 0)
5524 5524 *end-- = '.';
5525 5525 }
5526 5526 ASSERT(end + 1 >= base);
5527 5527
5528 5528 } else if (af == AF_INET6) {
5529 5529 struct in6_addr ip6;
5530 5530 int firstzero, tryzero, numzero, v6end;
5531 5531 uint16_t val;
5532 5532 const char digits[] = "0123456789abcdef";
5533 5533
5534 5534 /*
5535 5535 * Stringify using RFC 1884 convention 2 - 16 bit
5536 5536 * hexadecimal values with a zero-run compression.
5537 5537 * Lower case hexadecimal digits are used.
5538 5538 * eg, fe80::214:4fff:fe0b:76c8.
5539 5539 * The IPv4 embedded form is returned for inet_ntop,
5540 5540 * just the IPv4 string is returned for inet_ntoa6.
5541 5541 */
5542 5542
5543 5543 if (!dtrace_canload(tupregs[argi].dttk_value,
5544 5544 sizeof (struct in6_addr), mstate, vstate)) {
5545 5545 regs[rd] = NULL;
5546 5546 break;
5547 5547 }
5548 5548
5549 5549 /*
5550 5550 * Safely load the IPv6 address.
5551 5551 */
5552 5552 dtrace_bcopy(
5553 5553 (void *)(uintptr_t)tupregs[argi].dttk_value,
5554 5554 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5555 5555
5556 5556 /*
5557 5557 * Check an IPv6 string will fit in scratch.
5558 5558 */
5559 5559 size = INET6_ADDRSTRLEN;
5560 5560 if (!DTRACE_INSCRATCH(mstate, size)) {
5561 5561 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5562 5562 regs[rd] = NULL;
5563 5563 break;
5564 5564 }
5565 5565 base = (char *)mstate->dtms_scratch_ptr;
5566 5566 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5567 5567 *end-- = '\0';
5568 5568
5569 5569 /*
5570 5570 * Find the longest run of 16 bit zero values
5571 5571 * for the single allowed zero compression - "::".
5572 5572 */
5573 5573 firstzero = -1;
5574 5574 tryzero = -1;
5575 5575 numzero = 1;
5576 5576 for (i = 0; i < sizeof (struct in6_addr); i++) {
5577 5577 if (ip6._S6_un._S6_u8[i] == 0 &&
5578 5578 tryzero == -1 && i % 2 == 0) {
5579 5579 tryzero = i;
5580 5580 continue;
5581 5581 }
5582 5582
5583 5583 if (tryzero != -1 &&
5584 5584 (ip6._S6_un._S6_u8[i] != 0 ||
5585 5585 i == sizeof (struct in6_addr) - 1)) {
5586 5586
5587 5587 if (i - tryzero <= numzero) {
5588 5588 tryzero = -1;
5589 5589 continue;
5590 5590 }
5591 5591
5592 5592 firstzero = tryzero;
5593 5593 numzero = i - i % 2 - tryzero;
5594 5594 tryzero = -1;
5595 5595
5596 5596 if (ip6._S6_un._S6_u8[i] == 0 &&
5597 5597 i == sizeof (struct in6_addr) - 1)
5598 5598 numzero += 2;
5599 5599 }
5600 5600 }
5601 5601 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
5602 5602
5603 5603 /*
5604 5604 * Check for an IPv4 embedded address.
5605 5605 */
5606 5606 v6end = sizeof (struct in6_addr) - 2;
5607 5607 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5608 5608 IN6_IS_ADDR_V4COMPAT(&ip6)) {
5609 5609 for (i = sizeof (struct in6_addr) - 1;
5610 5610 i >= DTRACE_V4MAPPED_OFFSET; i--) {
5611 5611 ASSERT(end >= base);
5612 5612
5613 5613 val = ip6._S6_un._S6_u8[i];
5614 5614
5615 5615 if (val == 0) {
5616 5616 *end-- = '0';
5617 5617 } else {
5618 5618 for (; val; val /= 10) {
5619 5619 *end-- = '0' + val % 10;
5620 5620 }
5621 5621 }
5622 5622
5623 5623 if (i > DTRACE_V4MAPPED_OFFSET)
5624 5624 *end-- = '.';
5625 5625 }
5626 5626
5627 5627 if (subr == DIF_SUBR_INET_NTOA6)
5628 5628 goto inetout;
5629 5629
5630 5630 /*
5631 5631 * Set v6end to skip the IPv4 address that
5632 5632 * we have already stringified.
5633 5633 */
5634 5634 v6end = 10;
5635 5635 }
5636 5636
5637 5637 /*
5638 5638 * Build the IPv6 string by working through the
5639 5639 * address in reverse.
5640 5640 */
5641 5641 for (i = v6end; i >= 0; i -= 2) {
5642 5642 ASSERT(end >= base);
5643 5643
5644 5644 if (i == firstzero + numzero - 2) {
5645 5645 *end-- = ':';
5646 5646 *end-- = ':';
5647 5647 i -= numzero - 2;
5648 5648 continue;
5649 5649 }
5650 5650
5651 5651 if (i < 14 && i != firstzero - 2)
5652 5652 *end-- = ':';
5653 5653
5654 5654 val = (ip6._S6_un._S6_u8[i] << 8) +
5655 5655 ip6._S6_un._S6_u8[i + 1];
5656 5656
5657 5657 if (val == 0) {
5658 5658 *end-- = '0';
5659 5659 } else {
5660 5660 for (; val; val /= 16) {
5661 5661 *end-- = digits[val % 16];
5662 5662 }
5663 5663 }
5664 5664 }
5665 5665 ASSERT(end + 1 >= base);
5666 5666
5667 5667 } else {
5668 5668 /*
5669 5669 * The user didn't use AH_INET or AH_INET6.
5670 5670 */
5671 5671 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5672 5672 regs[rd] = NULL;
5673 5673 break;
5674 5674 }
5675 5675
5676 5676 inetout: regs[rd] = (uintptr_t)end + 1;
5677 5677 mstate->dtms_scratch_ptr += size;
5678 5678 break;
5679 5679 }
5680 5680
5681 5681 }
5682 5682 }
5683 5683
5684 5684 /*
5685 5685 * Emulate the execution of DTrace IR instructions specified by the given
5686 5686 * DIF object. This function is deliberately void of assertions as all of
5687 5687 * the necessary checks are handled by a call to dtrace_difo_validate().
5688 5688 */
5689 5689 static uint64_t
5690 5690 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5691 5691 dtrace_vstate_t *vstate, dtrace_state_t *state)
5692 5692 {
5693 5693 const dif_instr_t *text = difo->dtdo_buf;
5694 5694 const uint_t textlen = difo->dtdo_len;
5695 5695 const char *strtab = difo->dtdo_strtab;
5696 5696 const uint64_t *inttab = difo->dtdo_inttab;
5697 5697
5698 5698 uint64_t rval = 0;
5699 5699 dtrace_statvar_t *svar;
5700 5700 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5701 5701 dtrace_difv_t *v;
5702 5702 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5703 5703 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
5704 5704
5705 5705 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5706 5706 uint64_t regs[DIF_DIR_NREGS];
5707 5707 uint64_t *tmp;
5708 5708
5709 5709 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5710 5710 int64_t cc_r;
5711 5711 uint_t pc = 0, id, opc;
5712 5712 uint8_t ttop = 0;
5713 5713 dif_instr_t instr;
5714 5714 uint_t r1, r2, rd;
5715 5715
5716 5716 /*
5717 5717 * We stash the current DIF object into the machine state: we need it
5718 5718 * for subsequent access checking.
5719 5719 */
5720 5720 mstate->dtms_difo = difo;
5721 5721
5722 5722 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
5723 5723
5724 5724 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5725 5725 opc = pc;
5726 5726
5727 5727 instr = text[pc++];
5728 5728 r1 = DIF_INSTR_R1(instr);
5729 5729 r2 = DIF_INSTR_R2(instr);
5730 5730 rd = DIF_INSTR_RD(instr);
5731 5731
5732 5732 switch (DIF_INSTR_OP(instr)) {
5733 5733 case DIF_OP_OR:
5734 5734 regs[rd] = regs[r1] | regs[r2];
5735 5735 break;
5736 5736 case DIF_OP_XOR:
5737 5737 regs[rd] = regs[r1] ^ regs[r2];
5738 5738 break;
5739 5739 case DIF_OP_AND:
5740 5740 regs[rd] = regs[r1] & regs[r2];
5741 5741 break;
5742 5742 case DIF_OP_SLL:
5743 5743 regs[rd] = regs[r1] << regs[r2];
5744 5744 break;
5745 5745 case DIF_OP_SRL:
5746 5746 regs[rd] = regs[r1] >> regs[r2];
5747 5747 break;
5748 5748 case DIF_OP_SUB:
5749 5749 regs[rd] = regs[r1] - regs[r2];
5750 5750 break;
5751 5751 case DIF_OP_ADD:
5752 5752 regs[rd] = regs[r1] + regs[r2];
5753 5753 break;
5754 5754 case DIF_OP_MUL:
5755 5755 regs[rd] = regs[r1] * regs[r2];
5756 5756 break;
5757 5757 case DIF_OP_SDIV:
5758 5758 if (regs[r2] == 0) {
5759 5759 regs[rd] = 0;
5760 5760 *flags |= CPU_DTRACE_DIVZERO;
5761 5761 } else {
5762 5762 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5763 5763 regs[rd] = (int64_t)regs[r1] /
5764 5764 (int64_t)regs[r2];
5765 5765 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5766 5766 }
5767 5767 break;
5768 5768
5769 5769 case DIF_OP_UDIV:
5770 5770 if (regs[r2] == 0) {
5771 5771 regs[rd] = 0;
5772 5772 *flags |= CPU_DTRACE_DIVZERO;
5773 5773 } else {
5774 5774 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5775 5775 regs[rd] = regs[r1] / regs[r2];
5776 5776 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5777 5777 }
5778 5778 break;
5779 5779
5780 5780 case DIF_OP_SREM:
5781 5781 if (regs[r2] == 0) {
5782 5782 regs[rd] = 0;
5783 5783 *flags |= CPU_DTRACE_DIVZERO;
5784 5784 } else {
5785 5785 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5786 5786 regs[rd] = (int64_t)regs[r1] %
5787 5787 (int64_t)regs[r2];
5788 5788 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5789 5789 }
5790 5790 break;
5791 5791
5792 5792 case DIF_OP_UREM:
5793 5793 if (regs[r2] == 0) {
5794 5794 regs[rd] = 0;
5795 5795 *flags |= CPU_DTRACE_DIVZERO;
5796 5796 } else {
5797 5797 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5798 5798 regs[rd] = regs[r1] % regs[r2];
5799 5799 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5800 5800 }
5801 5801 break;
5802 5802
5803 5803 case DIF_OP_NOT:
5804 5804 regs[rd] = ~regs[r1];
5805 5805 break;
5806 5806 case DIF_OP_MOV:
5807 5807 regs[rd] = regs[r1];
5808 5808 break;
5809 5809 case DIF_OP_CMP:
5810 5810 cc_r = regs[r1] - regs[r2];
5811 5811 cc_n = cc_r < 0;
5812 5812 cc_z = cc_r == 0;
5813 5813 cc_v = 0;
5814 5814 cc_c = regs[r1] < regs[r2];
5815 5815 break;
5816 5816 case DIF_OP_TST:
5817 5817 cc_n = cc_v = cc_c = 0;
5818 5818 cc_z = regs[r1] == 0;
5819 5819 break;
5820 5820 case DIF_OP_BA:
5821 5821 pc = DIF_INSTR_LABEL(instr);
5822 5822 break;
5823 5823 case DIF_OP_BE:
5824 5824 if (cc_z)
5825 5825 pc = DIF_INSTR_LABEL(instr);
5826 5826 break;
5827 5827 case DIF_OP_BNE:
5828 5828 if (cc_z == 0)
5829 5829 pc = DIF_INSTR_LABEL(instr);
5830 5830 break;
5831 5831 case DIF_OP_BG:
5832 5832 if ((cc_z | (cc_n ^ cc_v)) == 0)
5833 5833 pc = DIF_INSTR_LABEL(instr);
5834 5834 break;
5835 5835 case DIF_OP_BGU:
5836 5836 if ((cc_c | cc_z) == 0)
5837 5837 pc = DIF_INSTR_LABEL(instr);
5838 5838 break;
5839 5839 case DIF_OP_BGE:
5840 5840 if ((cc_n ^ cc_v) == 0)
5841 5841 pc = DIF_INSTR_LABEL(instr);
5842 5842 break;
5843 5843 case DIF_OP_BGEU:
5844 5844 if (cc_c == 0)
5845 5845 pc = DIF_INSTR_LABEL(instr);
5846 5846 break;
5847 5847 case DIF_OP_BL:
5848 5848 if (cc_n ^ cc_v)
5849 5849 pc = DIF_INSTR_LABEL(instr);
5850 5850 break;
5851 5851 case DIF_OP_BLU:
5852 5852 if (cc_c)
5853 5853 pc = DIF_INSTR_LABEL(instr);
5854 5854 break;
5855 5855 case DIF_OP_BLE:
5856 5856 if (cc_z | (cc_n ^ cc_v))
5857 5857 pc = DIF_INSTR_LABEL(instr);
5858 5858 break;
5859 5859 case DIF_OP_BLEU:
5860 5860 if (cc_c | cc_z)
5861 5861 pc = DIF_INSTR_LABEL(instr);
5862 5862 break;
5863 5863 case DIF_OP_RLDSB:
5864 5864 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5865 5865 break;
5866 5866 /*FALLTHROUGH*/
5867 5867 case DIF_OP_LDSB:
5868 5868 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5869 5869 break;
5870 5870 case DIF_OP_RLDSH:
5871 5871 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5872 5872 break;
5873 5873 /*FALLTHROUGH*/
5874 5874 case DIF_OP_LDSH:
5875 5875 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5876 5876 break;
5877 5877 case DIF_OP_RLDSW:
5878 5878 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5879 5879 break;
5880 5880 /*FALLTHROUGH*/
5881 5881 case DIF_OP_LDSW:
5882 5882 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5883 5883 break;
5884 5884 case DIF_OP_RLDUB:
5885 5885 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5886 5886 break;
5887 5887 /*FALLTHROUGH*/
5888 5888 case DIF_OP_LDUB:
5889 5889 regs[rd] = dtrace_load8(regs[r1]);
5890 5890 break;
5891 5891 case DIF_OP_RLDUH:
5892 5892 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5893 5893 break;
5894 5894 /*FALLTHROUGH*/
5895 5895 case DIF_OP_LDUH:
5896 5896 regs[rd] = dtrace_load16(regs[r1]);
5897 5897 break;
5898 5898 case DIF_OP_RLDUW:
5899 5899 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5900 5900 break;
5901 5901 /*FALLTHROUGH*/
5902 5902 case DIF_OP_LDUW:
5903 5903 regs[rd] = dtrace_load32(regs[r1]);
5904 5904 break;
5905 5905 case DIF_OP_RLDX:
5906 5906 if (!dtrace_canload(regs[r1], 8, mstate, vstate))
5907 5907 break;
5908 5908 /*FALLTHROUGH*/
5909 5909 case DIF_OP_LDX:
5910 5910 regs[rd] = dtrace_load64(regs[r1]);
5911 5911 break;
5912 5912 case DIF_OP_ULDSB:
5913 5913 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5914 5914 regs[rd] = (int8_t)
5915 5915 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5916 5916 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5917 5917 break;
5918 5918 case DIF_OP_ULDSH:
5919 5919 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5920 5920 regs[rd] = (int16_t)
5921 5921 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5922 5922 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5923 5923 break;
5924 5924 case DIF_OP_ULDSW:
5925 5925 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5926 5926 regs[rd] = (int32_t)
5927 5927 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5928 5928 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5929 5929 break;
5930 5930 case DIF_OP_ULDUB:
5931 5931 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5932 5932 regs[rd] =
5933 5933 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5934 5934 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5935 5935 break;
5936 5936 case DIF_OP_ULDUH:
5937 5937 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5938 5938 regs[rd] =
5939 5939 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5940 5940 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5941 5941 break;
5942 5942 case DIF_OP_ULDUW:
5943 5943 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5944 5944 regs[rd] =
5945 5945 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5946 5946 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5947 5947 break;
5948 5948 case DIF_OP_ULDX:
5949 5949 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5950 5950 regs[rd] =
5951 5951 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5952 5952 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5953 5953 break;
5954 5954 case DIF_OP_RET:
5955 5955 rval = regs[rd];
5956 5956 pc = textlen;
5957 5957 break;
5958 5958 case DIF_OP_NOP:
5959 5959 break;
5960 5960 case DIF_OP_SETX:
5961 5961 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5962 5962 break;
5963 5963 case DIF_OP_SETS:
5964 5964 regs[rd] = (uint64_t)(uintptr_t)
5965 5965 (strtab + DIF_INSTR_STRING(instr));
5966 5966 break;
5967 5967 case DIF_OP_SCMP: {
5968 5968 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5969 5969 uintptr_t s1 = regs[r1];
5970 5970 uintptr_t s2 = regs[r2];
5971 5971 size_t lim1, lim2;
5972 5972
5973 5973 if (s1 != NULL &&
5974 5974 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate))
5975 5975 break;
5976 5976 if (s2 != NULL &&
5977 5977 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate))
5978 5978 break;
5979 5979
5980 5980 cc_r = dtrace_strncmp((char *)s1, (char *)s2,
5981 5981 MIN(lim1, lim2));
5982 5982
5983 5983 cc_n = cc_r < 0;
5984 5984 cc_z = cc_r == 0;
5985 5985 cc_v = cc_c = 0;
5986 5986 break;
5987 5987 }
5988 5988 case DIF_OP_LDGA:
5989 5989 regs[rd] = dtrace_dif_variable(mstate, state,
5990 5990 r1, regs[r2]);
5991 5991 break;
5992 5992 case DIF_OP_LDGS:
5993 5993 id = DIF_INSTR_VAR(instr);
5994 5994
5995 5995 if (id >= DIF_VAR_OTHER_UBASE) {
5996 5996 uintptr_t a;
5997 5997
5998 5998 id -= DIF_VAR_OTHER_UBASE;
5999 5999 svar = vstate->dtvs_globals[id];
6000 6000 ASSERT(svar != NULL);
6001 6001 v = &svar->dtsv_var;
6002 6002
6003 6003 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6004 6004 regs[rd] = svar->dtsv_data;
6005 6005 break;
6006 6006 }
6007 6007
6008 6008 a = (uintptr_t)svar->dtsv_data;
6009 6009
6010 6010 if (*(uint8_t *)a == UINT8_MAX) {
6011 6011 /*
6012 6012 * If the 0th byte is set to UINT8_MAX
6013 6013 * then this is to be treated as a
6014 6014 * reference to a NULL variable.
6015 6015 */
6016 6016 regs[rd] = NULL;
6017 6017 } else {
6018 6018 regs[rd] = a + sizeof (uint64_t);
6019 6019 }
6020 6020
6021 6021 break;
6022 6022 }
6023 6023
6024 6024 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6025 6025 break;
6026 6026
6027 6027 case DIF_OP_STGS:
6028 6028 id = DIF_INSTR_VAR(instr);
6029 6029
6030 6030 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6031 6031 id -= DIF_VAR_OTHER_UBASE;
6032 6032
6033 6033 VERIFY(id < vstate->dtvs_nglobals);
6034 6034 svar = vstate->dtvs_globals[id];
6035 6035 ASSERT(svar != NULL);
6036 6036 v = &svar->dtsv_var;
6037 6037
6038 6038 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6039 6039 uintptr_t a = (uintptr_t)svar->dtsv_data;
6040 6040 size_t lim;
6041 6041
6042 6042 ASSERT(a != NULL);
6043 6043 ASSERT(svar->dtsv_size != 0);
6044 6044
6045 6045 if (regs[rd] == NULL) {
6046 6046 *(uint8_t *)a = UINT8_MAX;
6047 6047 break;
6048 6048 } else {
6049 6049 *(uint8_t *)a = 0;
6050 6050 a += sizeof (uint64_t);
6051 6051 }
6052 6052 if (!dtrace_vcanload(
6053 6053 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6054 6054 &lim, mstate, vstate))
6055 6055 break;
6056 6056
6057 6057 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6058 6058 (void *)a, &v->dtdv_type, lim);
6059 6059 break;
6060 6060 }
6061 6061
6062 6062 svar->dtsv_data = regs[rd];
6063 6063 break;
6064 6064
6065 6065 case DIF_OP_LDTA:
6066 6066 /*
6067 6067 * There are no DTrace built-in thread-local arrays at
6068 6068 * present. This opcode is saved for future work.
6069 6069 */
6070 6070 *flags |= CPU_DTRACE_ILLOP;
6071 6071 regs[rd] = 0;
6072 6072 break;
6073 6073
6074 6074 case DIF_OP_LDLS:
6075 6075 id = DIF_INSTR_VAR(instr);
6076 6076
6077 6077 if (id < DIF_VAR_OTHER_UBASE) {
6078 6078 /*
6079 6079 * For now, this has no meaning.
6080 6080 */
6081 6081 regs[rd] = 0;
6082 6082 break;
6083 6083 }
6084 6084
6085 6085 id -= DIF_VAR_OTHER_UBASE;
6086 6086
6087 6087 ASSERT(id < vstate->dtvs_nlocals);
6088 6088 ASSERT(vstate->dtvs_locals != NULL);
6089 6089
6090 6090 svar = vstate->dtvs_locals[id];
6091 6091 ASSERT(svar != NULL);
6092 6092 v = &svar->dtsv_var;
6093 6093
6094 6094 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6095 6095 uintptr_t a = (uintptr_t)svar->dtsv_data;
6096 6096 size_t sz = v->dtdv_type.dtdt_size;
6097 6097
6098 6098 sz += sizeof (uint64_t);
6099 6099 ASSERT(svar->dtsv_size == NCPU * sz);
6100 6100 a += CPU->cpu_id * sz;
6101 6101
6102 6102 if (*(uint8_t *)a == UINT8_MAX) {
6103 6103 /*
6104 6104 * If the 0th byte is set to UINT8_MAX
6105 6105 * then this is to be treated as a
6106 6106 * reference to a NULL variable.
6107 6107 */
6108 6108 regs[rd] = NULL;
6109 6109 } else {
6110 6110 regs[rd] = a + sizeof (uint64_t);
6111 6111 }
6112 6112
6113 6113 break;
6114 6114 }
6115 6115
6116 6116 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6117 6117 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6118 6118 regs[rd] = tmp[CPU->cpu_id];
6119 6119 break;
6120 6120
6121 6121 case DIF_OP_STLS:
6122 6122 id = DIF_INSTR_VAR(instr);
6123 6123
6124 6124 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6125 6125 id -= DIF_VAR_OTHER_UBASE;
6126 6126 VERIFY(id < vstate->dtvs_nlocals);
6127 6127
6128 6128 ASSERT(vstate->dtvs_locals != NULL);
6129 6129 svar = vstate->dtvs_locals[id];
6130 6130 ASSERT(svar != NULL);
6131 6131 v = &svar->dtsv_var;
6132 6132
6133 6133 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6134 6134 uintptr_t a = (uintptr_t)svar->dtsv_data;
6135 6135 size_t sz = v->dtdv_type.dtdt_size;
6136 6136 size_t lim;
6137 6137
6138 6138 sz += sizeof (uint64_t);
6139 6139 ASSERT(svar->dtsv_size == NCPU * sz);
6140 6140 a += CPU->cpu_id * sz;
6141 6141
6142 6142 if (regs[rd] == NULL) {
6143 6143 *(uint8_t *)a = UINT8_MAX;
6144 6144 break;
6145 6145 } else {
6146 6146 *(uint8_t *)a = 0;
6147 6147 a += sizeof (uint64_t);
6148 6148 }
6149 6149
6150 6150 if (!dtrace_vcanload(
6151 6151 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6152 6152 &lim, mstate, vstate))
6153 6153 break;
6154 6154
6155 6155 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6156 6156 (void *)a, &v->dtdv_type, lim);
6157 6157 break;
6158 6158 }
6159 6159
6160 6160 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6161 6161 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6162 6162 tmp[CPU->cpu_id] = regs[rd];
6163 6163 break;
6164 6164
6165 6165 case DIF_OP_LDTS: {
6166 6166 dtrace_dynvar_t *dvar;
6167 6167 dtrace_key_t *key;
6168 6168
6169 6169 id = DIF_INSTR_VAR(instr);
6170 6170 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6171 6171 id -= DIF_VAR_OTHER_UBASE;
6172 6172 v = &vstate->dtvs_tlocals[id];
6173 6173
6174 6174 key = &tupregs[DIF_DTR_NREGS];
6175 6175 key[0].dttk_value = (uint64_t)id;
6176 6176 key[0].dttk_size = 0;
6177 6177 DTRACE_TLS_THRKEY(key[1].dttk_value);
6178 6178 key[1].dttk_size = 0;
6179 6179
6180 6180 dvar = dtrace_dynvar(dstate, 2, key,
6181 6181 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6182 6182 mstate, vstate);
6183 6183
6184 6184 if (dvar == NULL) {
6185 6185 regs[rd] = 0;
6186 6186 break;
6187 6187 }
6188 6188
6189 6189 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6190 6190 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6191 6191 } else {
6192 6192 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6193 6193 }
6194 6194
6195 6195 break;
6196 6196 }
6197 6197
6198 6198 case DIF_OP_STTS: {
6199 6199 dtrace_dynvar_t *dvar;
6200 6200 dtrace_key_t *key;
6201 6201
6202 6202 id = DIF_INSTR_VAR(instr);
6203 6203 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6204 6204 id -= DIF_VAR_OTHER_UBASE;
6205 6205 VERIFY(id < vstate->dtvs_ntlocals);
6206 6206
6207 6207 key = &tupregs[DIF_DTR_NREGS];
6208 6208 key[0].dttk_value = (uint64_t)id;
6209 6209 key[0].dttk_size = 0;
6210 6210 DTRACE_TLS_THRKEY(key[1].dttk_value);
6211 6211 key[1].dttk_size = 0;
6212 6212 v = &vstate->dtvs_tlocals[id];
6213 6213
6214 6214 dvar = dtrace_dynvar(dstate, 2, key,
6215 6215 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6216 6216 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6217 6217 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6218 6218 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6219 6219
6220 6220 /*
6221 6221 * Given that we're storing to thread-local data,
6222 6222 * we need to flush our predicate cache.
6223 6223 */
6224 6224 curthread->t_predcache = NULL;
6225 6225
6226 6226 if (dvar == NULL)
6227 6227 break;
6228 6228
6229 6229 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6230 6230 size_t lim;
6231 6231
6232 6232 if (!dtrace_vcanload(
6233 6233 (void *)(uintptr_t)regs[rd],
6234 6234 &v->dtdv_type, &lim, mstate, vstate))
6235 6235 break;
6236 6236
6237 6237 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6238 6238 dvar->dtdv_data, &v->dtdv_type, lim);
6239 6239 } else {
6240 6240 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6241 6241 }
6242 6242
6243 6243 break;
6244 6244 }
6245 6245
6246 6246 case DIF_OP_SRA:
6247 6247 regs[rd] = (int64_t)regs[r1] >> regs[r2];
6248 6248 break;
6249 6249
6250 6250 case DIF_OP_CALL:
6251 6251 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6252 6252 regs, tupregs, ttop, mstate, state);
6253 6253 break;
6254 6254
6255 6255 case DIF_OP_PUSHTR:
6256 6256 if (ttop == DIF_DTR_NREGS) {
6257 6257 *flags |= CPU_DTRACE_TUPOFLOW;
6258 6258 break;
6259 6259 }
6260 6260
6261 6261 if (r1 == DIF_TYPE_STRING) {
6262 6262 /*
6263 6263 * If this is a string type and the size is 0,
6264 6264 * we'll use the system-wide default string
6265 6265 * size. Note that we are _not_ looking at
6266 6266 * the value of the DTRACEOPT_STRSIZE option;
6267 6267 * had this been set, we would expect to have
6268 6268 * a non-zero size value in the "pushtr".
6269 6269 */
6270 6270 tupregs[ttop].dttk_size =
6271 6271 dtrace_strlen((char *)(uintptr_t)regs[rd],
6272 6272 regs[r2] ? regs[r2] :
6273 6273 dtrace_strsize_default) + 1;
6274 6274 } else {
6275 6275 if (regs[r2] > LONG_MAX) {
6276 6276 *flags |= CPU_DTRACE_ILLOP;
6277 6277 break;
6278 6278 }
6279 6279
6280 6280 tupregs[ttop].dttk_size = regs[r2];
6281 6281 }
6282 6282
6283 6283 tupregs[ttop++].dttk_value = regs[rd];
6284 6284 break;
6285 6285
6286 6286 case DIF_OP_PUSHTV:
6287 6287 if (ttop == DIF_DTR_NREGS) {
6288 6288 *flags |= CPU_DTRACE_TUPOFLOW;
6289 6289 break;
6290 6290 }
6291 6291
6292 6292 tupregs[ttop].dttk_value = regs[rd];
6293 6293 tupregs[ttop++].dttk_size = 0;
6294 6294 break;
6295 6295
6296 6296 case DIF_OP_POPTS:
6297 6297 if (ttop != 0)
6298 6298 ttop--;
6299 6299 break;
6300 6300
6301 6301 case DIF_OP_FLUSHTS:
6302 6302 ttop = 0;
6303 6303 break;
6304 6304
6305 6305 case DIF_OP_LDGAA:
6306 6306 case DIF_OP_LDTAA: {
6307 6307 dtrace_dynvar_t *dvar;
6308 6308 dtrace_key_t *key = tupregs;
6309 6309 uint_t nkeys = ttop;
6310 6310
6311 6311 id = DIF_INSTR_VAR(instr);
6312 6312 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6313 6313 id -= DIF_VAR_OTHER_UBASE;
6314 6314
6315 6315 key[nkeys].dttk_value = (uint64_t)id;
6316 6316 key[nkeys++].dttk_size = 0;
6317 6317
6318 6318 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6319 6319 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6320 6320 key[nkeys++].dttk_size = 0;
6321 6321 VERIFY(id < vstate->dtvs_ntlocals);
6322 6322 v = &vstate->dtvs_tlocals[id];
6323 6323 } else {
6324 6324 VERIFY(id < vstate->dtvs_nglobals);
6325 6325 v = &vstate->dtvs_globals[id]->dtsv_var;
6326 6326 }
6327 6327
6328 6328 dvar = dtrace_dynvar(dstate, nkeys, key,
6329 6329 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6330 6330 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6331 6331 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6332 6332
6333 6333 if (dvar == NULL) {
6334 6334 regs[rd] = 0;
6335 6335 break;
6336 6336 }
6337 6337
6338 6338 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6339 6339 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6340 6340 } else {
6341 6341 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6342 6342 }
6343 6343
6344 6344 break;
6345 6345 }
6346 6346
6347 6347 case DIF_OP_STGAA:
6348 6348 case DIF_OP_STTAA: {
6349 6349 dtrace_dynvar_t *dvar;
6350 6350 dtrace_key_t *key = tupregs;
6351 6351 uint_t nkeys = ttop;
6352 6352
6353 6353 id = DIF_INSTR_VAR(instr);
6354 6354 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6355 6355 id -= DIF_VAR_OTHER_UBASE;
6356 6356
6357 6357 key[nkeys].dttk_value = (uint64_t)id;
6358 6358 key[nkeys++].dttk_size = 0;
6359 6359
6360 6360 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6361 6361 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6362 6362 key[nkeys++].dttk_size = 0;
6363 6363 VERIFY(id < vstate->dtvs_ntlocals);
6364 6364 v = &vstate->dtvs_tlocals[id];
6365 6365 } else {
6366 6366 VERIFY(id < vstate->dtvs_nglobals);
6367 6367 v = &vstate->dtvs_globals[id]->dtsv_var;
6368 6368 }
6369 6369
6370 6370 dvar = dtrace_dynvar(dstate, nkeys, key,
6371 6371 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6372 6372 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6373 6373 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6374 6374 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6375 6375
6376 6376 if (dvar == NULL)
6377 6377 break;
6378 6378
6379 6379 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6380 6380 size_t lim;
6381 6381
6382 6382 if (!dtrace_vcanload(
6383 6383 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6384 6384 &lim, mstate, vstate))
6385 6385 break;
6386 6386
6387 6387 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6388 6388 dvar->dtdv_data, &v->dtdv_type, lim);
6389 6389 } else {
6390 6390 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6391 6391 }
6392 6392
6393 6393 break;
6394 6394 }
6395 6395
6396 6396 case DIF_OP_ALLOCS: {
6397 6397 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6398 6398 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6399 6399
6400 6400 /*
6401 6401 * Rounding up the user allocation size could have
6402 6402 * overflowed large, bogus allocations (like -1ULL) to
6403 6403 * 0.
6404 6404 */
6405 6405 if (size < regs[r1] ||
6406 6406 !DTRACE_INSCRATCH(mstate, size)) {
6407 6407 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6408 6408 regs[rd] = NULL;
6409 6409 break;
6410 6410 }
6411 6411
6412 6412 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6413 6413 mstate->dtms_scratch_ptr += size;
6414 6414 regs[rd] = ptr;
6415 6415 break;
6416 6416 }
6417 6417
6418 6418 case DIF_OP_COPYS:
6419 6419 if (!dtrace_canstore(regs[rd], regs[r2],
6420 6420 mstate, vstate)) {
6421 6421 *flags |= CPU_DTRACE_BADADDR;
6422 6422 *illval = regs[rd];
6423 6423 break;
6424 6424 }
6425 6425
6426 6426 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6427 6427 break;
6428 6428
6429 6429 dtrace_bcopy((void *)(uintptr_t)regs[r1],
6430 6430 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6431 6431 break;
6432 6432
6433 6433 case DIF_OP_STB:
6434 6434 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6435 6435 *flags |= CPU_DTRACE_BADADDR;
6436 6436 *illval = regs[rd];
6437 6437 break;
6438 6438 }
6439 6439 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6440 6440 break;
6441 6441
6442 6442 case DIF_OP_STH:
6443 6443 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6444 6444 *flags |= CPU_DTRACE_BADADDR;
6445 6445 *illval = regs[rd];
6446 6446 break;
6447 6447 }
6448 6448 if (regs[rd] & 1) {
6449 6449 *flags |= CPU_DTRACE_BADALIGN;
6450 6450 *illval = regs[rd];
6451 6451 break;
6452 6452 }
6453 6453 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6454 6454 break;
6455 6455
6456 6456 case DIF_OP_STW:
6457 6457 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6458 6458 *flags |= CPU_DTRACE_BADADDR;
6459 6459 *illval = regs[rd];
6460 6460 break;
6461 6461 }
6462 6462 if (regs[rd] & 3) {
6463 6463 *flags |= CPU_DTRACE_BADALIGN;
6464 6464 *illval = regs[rd];
6465 6465 break;
6466 6466 }
6467 6467 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6468 6468 break;
6469 6469
6470 6470 case DIF_OP_STX:
6471 6471 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6472 6472 *flags |= CPU_DTRACE_BADADDR;
6473 6473 *illval = regs[rd];
6474 6474 break;
6475 6475 }
6476 6476 if (regs[rd] & 7) {
6477 6477 *flags |= CPU_DTRACE_BADALIGN;
6478 6478 *illval = regs[rd];
6479 6479 break;
6480 6480 }
6481 6481 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6482 6482 break;
6483 6483 }
6484 6484 }
6485 6485
6486 6486 if (!(*flags & CPU_DTRACE_FAULT))
6487 6487 return (rval);
6488 6488
6489 6489 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6490 6490 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6491 6491
6492 6492 return (0);
6493 6493 }
6494 6494
6495 6495 static void
6496 6496 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6497 6497 {
6498 6498 dtrace_probe_t *probe = ecb->dte_probe;
6499 6499 dtrace_provider_t *prov = probe->dtpr_provider;
6500 6500 char c[DTRACE_FULLNAMELEN + 80], *str;
6501 6501 char *msg = "dtrace: breakpoint action at probe ";
6502 6502 char *ecbmsg = " (ecb ";
6503 6503 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6504 6504 uintptr_t val = (uintptr_t)ecb;
6505 6505 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6506 6506
6507 6507 if (dtrace_destructive_disallow)
6508 6508 return;
6509 6509
6510 6510 /*
6511 6511 * It's impossible to be taking action on the NULL probe.
6512 6512 */
6513 6513 ASSERT(probe != NULL);
6514 6514
6515 6515 /*
6516 6516 * This is a poor man's (destitute man's?) sprintf(): we want to
6517 6517 * print the provider name, module name, function name and name of
6518 6518 * the probe, along with the hex address of the ECB with the breakpoint
6519 6519 * action -- all of which we must place in the character buffer by
6520 6520 * hand.
6521 6521 */
6522 6522 while (*msg != '\0')
6523 6523 c[i++] = *msg++;
6524 6524
6525 6525 for (str = prov->dtpv_name; *str != '\0'; str++)
6526 6526 c[i++] = *str;
6527 6527 c[i++] = ':';
6528 6528
6529 6529 for (str = probe->dtpr_mod; *str != '\0'; str++)
6530 6530 c[i++] = *str;
6531 6531 c[i++] = ':';
6532 6532
6533 6533 for (str = probe->dtpr_func; *str != '\0'; str++)
6534 6534 c[i++] = *str;
6535 6535 c[i++] = ':';
6536 6536
6537 6537 for (str = probe->dtpr_name; *str != '\0'; str++)
6538 6538 c[i++] = *str;
6539 6539
6540 6540 while (*ecbmsg != '\0')
6541 6541 c[i++] = *ecbmsg++;
6542 6542
6543 6543 while (shift >= 0) {
6544 6544 mask = (uintptr_t)0xf << shift;
6545 6545
6546 6546 if (val >= ((uintptr_t)1 << shift))
6547 6547 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6548 6548 shift -= 4;
6549 6549 }
6550 6550
6551 6551 c[i++] = ')';
6552 6552 c[i] = '\0';
6553 6553
6554 6554 debug_enter(c);
6555 6555 }
6556 6556
6557 6557 static void
6558 6558 dtrace_action_panic(dtrace_ecb_t *ecb)
6559 6559 {
6560 6560 dtrace_probe_t *probe = ecb->dte_probe;
6561 6561
6562 6562 /*
6563 6563 * It's impossible to be taking action on the NULL probe.
6564 6564 */
6565 6565 ASSERT(probe != NULL);
6566 6566
6567 6567 if (dtrace_destructive_disallow)
6568 6568 return;
6569 6569
6570 6570 if (dtrace_panicked != NULL)
6571 6571 return;
6572 6572
6573 6573 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
6574 6574 return;
6575 6575
6576 6576 /*
6577 6577 * We won the right to panic. (We want to be sure that only one
6578 6578 * thread calls panic() from dtrace_probe(), and that panic() is
6579 6579 * called exactly once.)
6580 6580 */
6581 6581 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6582 6582 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6583 6583 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6584 6584 }
6585 6585
6586 6586 static void
6587 6587 dtrace_action_raise(uint64_t sig)
6588 6588 {
6589 6589 if (dtrace_destructive_disallow)
6590 6590 return;
6591 6591
6592 6592 if (sig >= NSIG) {
6593 6593 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6594 6594 return;
6595 6595 }
6596 6596
6597 6597 /*
6598 6598 * raise() has a queue depth of 1 -- we ignore all subsequent
6599 6599 * invocations of the raise() action.
6600 6600 */
6601 6601 if (curthread->t_dtrace_sig == 0)
6602 6602 curthread->t_dtrace_sig = (uint8_t)sig;
6603 6603
6604 6604 curthread->t_sig_check = 1;
6605 6605 aston(curthread);
6606 6606 }
6607 6607
6608 6608 static void
6609 6609 dtrace_action_stop(void)
6610 6610 {
6611 6611 if (dtrace_destructive_disallow)
6612 6612 return;
6613 6613
6614 6614 if (!curthread->t_dtrace_stop) {
6615 6615 curthread->t_dtrace_stop = 1;
6616 6616 curthread->t_sig_check = 1;
6617 6617 aston(curthread);
6618 6618 }
6619 6619 }
6620 6620
6621 6621 static void
6622 6622 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6623 6623 {
6624 6624 hrtime_t now;
6625 6625 volatile uint16_t *flags;
6626 6626 cpu_t *cpu = CPU;
6627 6627
6628 6628 if (dtrace_destructive_disallow)
6629 6629 return;
6630 6630
6631 6631 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
6632 6632
6633 6633 now = dtrace_gethrtime();
6634 6634
6635 6635 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6636 6636 /*
6637 6637 * We need to advance the mark to the current time.
6638 6638 */
6639 6639 cpu->cpu_dtrace_chillmark = now;
6640 6640 cpu->cpu_dtrace_chilled = 0;
6641 6641 }
6642 6642
6643 6643 /*
6644 6644 * Now check to see if the requested chill time would take us over
6645 6645 * the maximum amount of time allowed in the chill interval. (Or
6646 6646 * worse, if the calculation itself induces overflow.)
6647 6647 */
6648 6648 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6649 6649 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6650 6650 *flags |= CPU_DTRACE_ILLOP;
6651 6651 return;
6652 6652 }
6653 6653
6654 6654 while (dtrace_gethrtime() - now < val)
6655 6655 continue;
6656 6656
6657 6657 /*
6658 6658 * Normally, we assure that the value of the variable "timestamp" does
6659 6659 * not change within an ECB. The presence of chill() represents an
6660 6660 * exception to this rule, however.
6661 6661 */
6662 6662 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6663 6663 cpu->cpu_dtrace_chilled += val;
6664 6664 }
6665 6665
6666 6666 static void
6667 6667 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6668 6668 uint64_t *buf, uint64_t arg)
6669 6669 {
6670 6670 int nframes = DTRACE_USTACK_NFRAMES(arg);
6671 6671 int strsize = DTRACE_USTACK_STRSIZE(arg);
6672 6672 uint64_t *pcs = &buf[1], *fps;
6673 6673 char *str = (char *)&pcs[nframes];
6674 6674 int size, offs = 0, i, j;
6675 6675 size_t rem;
6676 6676 uintptr_t old = mstate->dtms_scratch_ptr, saved;
6677 6677 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6678 6678 char *sym;
6679 6679
6680 6680 /*
6681 6681 * Should be taking a faster path if string space has not been
6682 6682 * allocated.
6683 6683 */
6684 6684 ASSERT(strsize != 0);
6685 6685
6686 6686 /*
6687 6687 * We will first allocate some temporary space for the frame pointers.
6688 6688 */
6689 6689 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6690 6690 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6691 6691 (nframes * sizeof (uint64_t));
6692 6692
6693 6693 if (!DTRACE_INSCRATCH(mstate, size)) {
6694 6694 /*
6695 6695 * Not enough room for our frame pointers -- need to indicate
6696 6696 * that we ran out of scratch space.
6697 6697 */
6698 6698 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6699 6699 return;
6700 6700 }
6701 6701
6702 6702 mstate->dtms_scratch_ptr += size;
6703 6703 saved = mstate->dtms_scratch_ptr;
6704 6704
6705 6705 /*
6706 6706 * Now get a stack with both program counters and frame pointers.
6707 6707 */
6708 6708 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6709 6709 dtrace_getufpstack(buf, fps, nframes + 1);
6710 6710 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6711 6711
6712 6712 /*
6713 6713 * If that faulted, we're cooked.
6714 6714 */
6715 6715 if (*flags & CPU_DTRACE_FAULT)
6716 6716 goto out;
6717 6717
6718 6718 /*
6719 6719 * Now we want to walk up the stack, calling the USTACK helper. For
6720 6720 * each iteration, we restore the scratch pointer.
6721 6721 */
6722 6722 for (i = 0; i < nframes; i++) {
6723 6723 mstate->dtms_scratch_ptr = saved;
6724 6724
6725 6725 if (offs >= strsize)
6726 6726 break;
6727 6727
6728 6728 sym = (char *)(uintptr_t)dtrace_helper(
6729 6729 DTRACE_HELPER_ACTION_USTACK,
6730 6730 mstate, state, pcs[i], fps[i]);
6731 6731
6732 6732 /*
6733 6733 * If we faulted while running the helper, we're going to
6734 6734 * clear the fault and null out the corresponding string.
6735 6735 */
6736 6736 if (*flags & CPU_DTRACE_FAULT) {
6737 6737 *flags &= ~CPU_DTRACE_FAULT;
6738 6738 str[offs++] = '\0';
6739 6739 continue;
6740 6740 }
6741 6741
6742 6742 if (sym == NULL) {
6743 6743 str[offs++] = '\0';
6744 6744 continue;
6745 6745 }
6746 6746
6747 6747 if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate,
6748 6748 &(state->dts_vstate))) {
6749 6749 str[offs++] = '\0';
6750 6750 continue;
6751 6751 }
6752 6752
6753 6753 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6754 6754
6755 6755 /*
6756 6756 * Now copy in the string that the helper returned to us.
6757 6757 */
6758 6758 for (j = 0; offs + j < strsize && j < rem; j++) {
6759 6759 if ((str[offs + j] = sym[j]) == '\0')
6760 6760 break;
6761 6761 }
6762 6762
6763 6763 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6764 6764
6765 6765 offs += j + 1;
6766 6766 }
6767 6767
6768 6768 if (offs >= strsize) {
6769 6769 /*
6770 6770 * If we didn't have room for all of the strings, we don't
6771 6771 * abort processing -- this needn't be a fatal error -- but we
6772 6772 * still want to increment a counter (dts_stkstroverflows) to
6773 6773 * allow this condition to be warned about. (If this is from
6774 6774 * a jstack() action, it is easily tuned via jstackstrsize.)
6775 6775 */
6776 6776 dtrace_error(&state->dts_stkstroverflows);
6777 6777 }
6778 6778
6779 6779 while (offs < strsize)
6780 6780 str[offs++] = '\0';
6781 6781
6782 6782 out:
6783 6783 mstate->dtms_scratch_ptr = old;
6784 6784 }
6785 6785
6786 6786 static void
6787 6787 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6788 6788 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6789 6789 {
6790 6790 volatile uint16_t *flags;
6791 6791 uint64_t val = *valp;
6792 6792 size_t valoffs = *valoffsp;
6793 6793
6794 6794 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6795 6795 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6796 6796
6797 6797 /*
6798 6798 * If this is a string, we're going to only load until we find the zero
6799 6799 * byte -- after which we'll store zero bytes.
6800 6800 */
6801 6801 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6802 6802 char c = '\0' + 1;
6803 6803 size_t s;
6804 6804
6805 6805 for (s = 0; s < size; s++) {
6806 6806 if (c != '\0' && dtkind == DIF_TF_BYREF) {
6807 6807 c = dtrace_load8(val++);
6808 6808 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6809 6809 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6810 6810 c = dtrace_fuword8((void *)(uintptr_t)val++);
6811 6811 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6812 6812 if (*flags & CPU_DTRACE_FAULT)
6813 6813 break;
6814 6814 }
6815 6815
6816 6816 DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6817 6817
6818 6818 if (c == '\0' && intuple)
6819 6819 break;
6820 6820 }
6821 6821 } else {
6822 6822 uint8_t c;
6823 6823 while (valoffs < end) {
6824 6824 if (dtkind == DIF_TF_BYREF) {
6825 6825 c = dtrace_load8(val++);
6826 6826 } else if (dtkind == DIF_TF_BYUREF) {
6827 6827 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6828 6828 c = dtrace_fuword8((void *)(uintptr_t)val++);
6829 6829 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6830 6830 if (*flags & CPU_DTRACE_FAULT)
6831 6831 break;
6832 6832 }
6833 6833
6834 6834 DTRACE_STORE(uint8_t, tomax,
6835 6835 valoffs++, c);
6836 6836 }
6837 6837 }
6838 6838
6839 6839 *valp = val;
6840 6840 *valoffsp = valoffs;
6841 6841 }
6842 6842
6843 6843 /*
6844 6844 * If you're looking for the epicenter of DTrace, you just found it. This
6845 6845 * is the function called by the provider to fire a probe -- from which all
6846 6846 * subsequent probe-context DTrace activity emanates.
6847 6847 */
6848 6848 void
6849 6849 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
6850 6850 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
6851 6851 {
6852 6852 processorid_t cpuid;
6853 6853 dtrace_icookie_t cookie;
6854 6854 dtrace_probe_t *probe;
6855 6855 dtrace_mstate_t mstate;
6856 6856 dtrace_ecb_t *ecb;
6857 6857 dtrace_action_t *act;
6858 6858 intptr_t offs;
6859 6859 size_t size;
6860 6860 int vtime, onintr;
6861 6861 volatile uint16_t *flags;
6862 6862 hrtime_t now, end;
6863 6863
6864 6864 /*
6865 6865 * Kick out immediately if this CPU is still being born (in which case
6866 6866 * curthread will be set to -1) or the current thread can't allow
6867 6867 * probes in its current context.
6868 6868 */
6869 6869 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
6870 6870 return;
6871 6871
6872 6872 cookie = dtrace_interrupt_disable();
6873 6873 probe = dtrace_probes[id - 1];
6874 6874 cpuid = CPU->cpu_id;
6875 6875 onintr = CPU_ON_INTR(CPU);
6876 6876
6877 6877 CPU->cpu_dtrace_probes++;
6878 6878
6879 6879 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
6880 6880 probe->dtpr_predcache == curthread->t_predcache) {
6881 6881 /*
6882 6882 * We have hit in the predicate cache; we know that
6883 6883 * this predicate would evaluate to be false.
6884 6884 */
6885 6885 dtrace_interrupt_enable(cookie);
6886 6886 return;
6887 6887 }
6888 6888
6889 6889 if (panic_quiesce) {
6890 6890 /*
6891 6891 * We don't trace anything if we're panicking.
6892 6892 */
6893 6893 dtrace_interrupt_enable(cookie);
6894 6894 return;
6895 6895 }
6896 6896
6897 6897 now = mstate.dtms_timestamp = dtrace_gethrtime();
6898 6898 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
6899 6899 vtime = dtrace_vtime_references != 0;
6900 6900
6901 6901 if (vtime && curthread->t_dtrace_start)
6902 6902 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
6903 6903
6904 6904 mstate.dtms_difo = NULL;
6905 6905 mstate.dtms_probe = probe;
6906 6906 mstate.dtms_strtok = NULL;
6907 6907 mstate.dtms_arg[0] = arg0;
6908 6908 mstate.dtms_arg[1] = arg1;
6909 6909 mstate.dtms_arg[2] = arg2;
6910 6910 mstate.dtms_arg[3] = arg3;
6911 6911 mstate.dtms_arg[4] = arg4;
6912 6912
6913 6913 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6914 6914
6915 6915 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6916 6916 dtrace_predicate_t *pred = ecb->dte_predicate;
6917 6917 dtrace_state_t *state = ecb->dte_state;
6918 6918 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6919 6919 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6920 6920 dtrace_vstate_t *vstate = &state->dts_vstate;
6921 6921 dtrace_provider_t *prov = probe->dtpr_provider;
6922 6922 uint64_t tracememsize = 0;
6923 6923 int committed = 0;
6924 6924 caddr_t tomax;
6925 6925
6926 6926 /*
6927 6927 * A little subtlety with the following (seemingly innocuous)
6928 6928 * declaration of the automatic 'val': by looking at the
6929 6929 * code, you might think that it could be declared in the
6930 6930 * action processing loop, below. (That is, it's only used in
6931 6931 * the action processing loop.) However, it must be declared
6932 6932 * out of that scope because in the case of DIF expression
6933 6933 * arguments to aggregating actions, one iteration of the
6934 6934 * action loop will use the last iteration's value.
6935 6935 */
6936 6936 #ifdef lint
6937 6937 uint64_t val = 0;
6938 6938 #else
6939 6939 uint64_t val;
6940 6940 #endif
6941 6941
6942 6942 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6943 6943 mstate.dtms_access = DTRACE_ACCESS_ARGS | DTRACE_ACCESS_PROC;
6944 6944 mstate.dtms_getf = NULL;
6945 6945
6946 6946 *flags &= ~CPU_DTRACE_ERROR;
6947 6947
6948 6948 if (prov == dtrace_provider) {
6949 6949 /*
6950 6950 * If dtrace itself is the provider of this probe,
6951 6951 * we're only going to continue processing the ECB if
6952 6952 * arg0 (the dtrace_state_t) is equal to the ECB's
6953 6953 * creating state. (This prevents disjoint consumers
6954 6954 * from seeing one another's metaprobes.)
6955 6955 */
6956 6956 if (arg0 != (uint64_t)(uintptr_t)state)
6957 6957 continue;
6958 6958 }
6959 6959
6960 6960 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6961 6961 /*
6962 6962 * We're not currently active. If our provider isn't
6963 6963 * the dtrace pseudo provider, we're not interested.
6964 6964 */
6965 6965 if (prov != dtrace_provider)
6966 6966 continue;
6967 6967
6968 6968 /*
6969 6969 * Now we must further check if we are in the BEGIN
6970 6970 * probe. If we are, we will only continue processing
6971 6971 * if we're still in WARMUP -- if one BEGIN enabling
6972 6972 * has invoked the exit() action, we don't want to
6973 6973 * evaluate subsequent BEGIN enablings.
6974 6974 */
6975 6975 if (probe->dtpr_id == dtrace_probeid_begin &&
6976 6976 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
6977 6977 ASSERT(state->dts_activity ==
6978 6978 DTRACE_ACTIVITY_DRAINING);
6979 6979 continue;
6980 6980 }
6981 6981 }
6982 6982
6983 6983 if (ecb->dte_cond && !dtrace_priv_probe(state, &mstate, ecb))
6984 6984 continue;
6985 6985
6986 6986 if (now - state->dts_alive > dtrace_deadman_timeout) {
6987 6987 /*
6988 6988 * We seem to be dead. Unless we (a) have kernel
6989 6989 * destructive permissions (b) have explicitly enabled
6990 6990 * destructive actions and (c) destructive actions have
6991 6991 * not been disabled, we're going to transition into
6992 6992 * the KILLED state, from which no further processing
6993 6993 * on this state will be performed.
6994 6994 */
6995 6995 if (!dtrace_priv_kernel_destructive(state) ||
6996 6996 !state->dts_cred.dcr_destructive ||
6997 6997 dtrace_destructive_disallow) {
6998 6998 void *activity = &state->dts_activity;
6999 6999 dtrace_activity_t current;
7000 7000
7001 7001 do {
7002 7002 current = state->dts_activity;
7003 7003 } while (dtrace_cas32(activity, current,
7004 7004 DTRACE_ACTIVITY_KILLED) != current);
7005 7005
7006 7006 continue;
7007 7007 }
7008 7008 }
7009 7009
7010 7010 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7011 7011 ecb->dte_alignment, state, &mstate)) < 0)
7012 7012 continue;
7013 7013
7014 7014 tomax = buf->dtb_tomax;
7015 7015 ASSERT(tomax != NULL);
7016 7016
7017 7017 if (ecb->dte_size != 0) {
7018 7018 dtrace_rechdr_t dtrh;
7019 7019 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7020 7020 mstate.dtms_timestamp = dtrace_gethrtime();
7021 7021 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7022 7022 }
7023 7023 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
7024 7024 dtrh.dtrh_epid = ecb->dte_epid;
7025 7025 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
7026 7026 mstate.dtms_timestamp);
7027 7027 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
7028 7028 }
7029 7029
7030 7030 mstate.dtms_epid = ecb->dte_epid;
7031 7031 mstate.dtms_present |= DTRACE_MSTATE_EPID;
7032 7032
7033 7033 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7034 7034 mstate.dtms_access |= DTRACE_ACCESS_KERNEL;
7035 7035
7036 7036 if (pred != NULL) {
7037 7037 dtrace_difo_t *dp = pred->dtp_difo;
7038 7038 int rval;
7039 7039
7040 7040 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7041 7041
7042 7042 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7043 7043 dtrace_cacheid_t cid = probe->dtpr_predcache;
7044 7044
7045 7045 if (cid != DTRACE_CACHEIDNONE && !onintr) {
7046 7046 /*
7047 7047 * Update the predicate cache...
7048 7048 */
7049 7049 ASSERT(cid == pred->dtp_cacheid);
7050 7050 curthread->t_predcache = cid;
7051 7051 }
7052 7052
7053 7053 continue;
7054 7054 }
7055 7055 }
7056 7056
7057 7057 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7058 7058 act != NULL; act = act->dta_next) {
7059 7059 size_t valoffs;
7060 7060 dtrace_difo_t *dp;
7061 7061 dtrace_recdesc_t *rec = &act->dta_rec;
7062 7062
7063 7063 size = rec->dtrd_size;
7064 7064 valoffs = offs + rec->dtrd_offset;
7065 7065
7066 7066 if (DTRACEACT_ISAGG(act->dta_kind)) {
7067 7067 uint64_t v = 0xbad;
7068 7068 dtrace_aggregation_t *agg;
7069 7069
7070 7070 agg = (dtrace_aggregation_t *)act;
7071 7071
7072 7072 if ((dp = act->dta_difo) != NULL)
7073 7073 v = dtrace_dif_emulate(dp,
7074 7074 &mstate, vstate, state);
7075 7075
7076 7076 if (*flags & CPU_DTRACE_ERROR)
7077 7077 continue;
7078 7078
7079 7079 /*
7080 7080 * Note that we always pass the expression
7081 7081 * value from the previous iteration of the
7082 7082 * action loop. This value will only be used
7083 7083 * if there is an expression argument to the
7084 7084 * aggregating action, denoted by the
7085 7085 * dtag_hasarg field.
7086 7086 */
7087 7087 dtrace_aggregate(agg, buf,
7088 7088 offs, aggbuf, v, val);
7089 7089 continue;
7090 7090 }
7091 7091
7092 7092 switch (act->dta_kind) {
7093 7093 case DTRACEACT_STOP:
7094 7094 if (dtrace_priv_proc_destructive(state,
7095 7095 &mstate))
7096 7096 dtrace_action_stop();
7097 7097 continue;
7098 7098
7099 7099 case DTRACEACT_BREAKPOINT:
7100 7100 if (dtrace_priv_kernel_destructive(state))
7101 7101 dtrace_action_breakpoint(ecb);
7102 7102 continue;
7103 7103
7104 7104 case DTRACEACT_PANIC:
7105 7105 if (dtrace_priv_kernel_destructive(state))
7106 7106 dtrace_action_panic(ecb);
7107 7107 continue;
7108 7108
7109 7109 case DTRACEACT_STACK:
7110 7110 if (!dtrace_priv_kernel(state))
7111 7111 continue;
7112 7112
7113 7113 dtrace_getpcstack((pc_t *)(tomax + valoffs),
7114 7114 size / sizeof (pc_t), probe->dtpr_aframes,
7115 7115 DTRACE_ANCHORED(probe) ? NULL :
7116 7116 (uint32_t *)arg0);
7117 7117
7118 7118 continue;
7119 7119
7120 7120 case DTRACEACT_JSTACK:
7121 7121 case DTRACEACT_USTACK:
7122 7122 if (!dtrace_priv_proc(state, &mstate))
7123 7123 continue;
7124 7124
7125 7125 /*
7126 7126 * See comment in DIF_VAR_PID.
7127 7127 */
7128 7128 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7129 7129 CPU_ON_INTR(CPU)) {
7130 7130 int depth = DTRACE_USTACK_NFRAMES(
7131 7131 rec->dtrd_arg) + 1;
7132 7132
7133 7133 dtrace_bzero((void *)(tomax + valoffs),
7134 7134 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7135 7135 + depth * sizeof (uint64_t));
7136 7136
7137 7137 continue;
7138 7138 }
7139 7139
7140 7140 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7141 7141 curproc->p_dtrace_helpers != NULL) {
7142 7142 /*
7143 7143 * This is the slow path -- we have
7144 7144 * allocated string space, and we're
7145 7145 * getting the stack of a process that
7146 7146 * has helpers. Call into a separate
7147 7147 * routine to perform this processing.
7148 7148 */
7149 7149 dtrace_action_ustack(&mstate, state,
7150 7150 (uint64_t *)(tomax + valoffs),
7151 7151 rec->dtrd_arg);
7152 7152 continue;
7153 7153 }
7154 7154
7155 7155 /*
7156 7156 * Clear the string space, since there's no
7157 7157 * helper to do it for us.
7158 7158 */
7159 7159 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0) {
7160 7160 int depth = DTRACE_USTACK_NFRAMES(
7161 7161 rec->dtrd_arg);
7162 7162 size_t strsize = DTRACE_USTACK_STRSIZE(
7163 7163 rec->dtrd_arg);
7164 7164 uint64_t *buf = (uint64_t *)(tomax +
7165 7165 valoffs);
7166 7166 void *strspace = &buf[depth + 1];
7167 7167
7168 7168 dtrace_bzero(strspace,
7169 7169 MIN(depth, strsize));
7170 7170 }
7171 7171
7172 7172 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7173 7173 dtrace_getupcstack((uint64_t *)
7174 7174 (tomax + valoffs),
7175 7175 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7176 7176 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7177 7177 continue;
7178 7178
7179 7179 default:
7180 7180 break;
7181 7181 }
7182 7182
7183 7183 dp = act->dta_difo;
7184 7184 ASSERT(dp != NULL);
7185 7185
7186 7186 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7187 7187
7188 7188 if (*flags & CPU_DTRACE_ERROR)
7189 7189 continue;
7190 7190
7191 7191 switch (act->dta_kind) {
7192 7192 case DTRACEACT_SPECULATE: {
7193 7193 dtrace_rechdr_t *dtrh;
7194 7194
7195 7195 ASSERT(buf == &state->dts_buffer[cpuid]);
7196 7196 buf = dtrace_speculation_buffer(state,
7197 7197 cpuid, val);
7198 7198
7199 7199 if (buf == NULL) {
7200 7200 *flags |= CPU_DTRACE_DROP;
7201 7201 continue;
7202 7202 }
7203 7203
7204 7204 offs = dtrace_buffer_reserve(buf,
7205 7205 ecb->dte_needed, ecb->dte_alignment,
7206 7206 state, NULL);
7207 7207
7208 7208 if (offs < 0) {
7209 7209 *flags |= CPU_DTRACE_DROP;
7210 7210 continue;
7211 7211 }
7212 7212
7213 7213 tomax = buf->dtb_tomax;
7214 7214 ASSERT(tomax != NULL);
7215 7215
7216 7216 if (ecb->dte_size == 0)
7217 7217 continue;
7218 7218
7219 7219 ASSERT3U(ecb->dte_size, >=,
7220 7220 sizeof (dtrace_rechdr_t));
7221 7221 dtrh = ((void *)(tomax + offs));
7222 7222 dtrh->dtrh_epid = ecb->dte_epid;
7223 7223 /*
7224 7224 * When the speculation is committed, all of
7225 7225 * the records in the speculative buffer will
7226 7226 * have their timestamps set to the commit
7227 7227 * time. Until then, it is set to a sentinel
7228 7228 * value, for debugability.
7229 7229 */
7230 7230 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7231 7231 continue;
7232 7232 }
7233 7233
7234 7234 case DTRACEACT_CHILL:
7235 7235 if (dtrace_priv_kernel_destructive(state))
7236 7236 dtrace_action_chill(&mstate, val);
7237 7237 continue;
7238 7238
7239 7239 case DTRACEACT_RAISE:
7240 7240 if (dtrace_priv_proc_destructive(state,
7241 7241 &mstate))
7242 7242 dtrace_action_raise(val);
7243 7243 continue;
7244 7244
7245 7245 case DTRACEACT_COMMIT:
7246 7246 ASSERT(!committed);
7247 7247
7248 7248 /*
7249 7249 * We need to commit our buffer state.
7250 7250 */
7251 7251 if (ecb->dte_size)
7252 7252 buf->dtb_offset = offs + ecb->dte_size;
7253 7253 buf = &state->dts_buffer[cpuid];
7254 7254 dtrace_speculation_commit(state, cpuid, val);
7255 7255 committed = 1;
7256 7256 continue;
7257 7257
7258 7258 case DTRACEACT_DISCARD:
7259 7259 dtrace_speculation_discard(state, cpuid, val);
7260 7260 continue;
7261 7261
7262 7262 case DTRACEACT_DIFEXPR:
7263 7263 case DTRACEACT_LIBACT:
7264 7264 case DTRACEACT_PRINTF:
7265 7265 case DTRACEACT_PRINTA:
7266 7266 case DTRACEACT_SYSTEM:
7267 7267 case DTRACEACT_FREOPEN:
7268 7268 case DTRACEACT_TRACEMEM:
7269 7269 break;
7270 7270
7271 7271 case DTRACEACT_TRACEMEM_DYNSIZE:
7272 7272 tracememsize = val;
7273 7273 break;
7274 7274
7275 7275 case DTRACEACT_SYM:
7276 7276 case DTRACEACT_MOD:
7277 7277 if (!dtrace_priv_kernel(state))
7278 7278 continue;
7279 7279 break;
7280 7280
7281 7281 case DTRACEACT_USYM:
7282 7282 case DTRACEACT_UMOD:
7283 7283 case DTRACEACT_UADDR: {
7284 7284 struct pid *pid = curthread->t_procp->p_pidp;
7285 7285
7286 7286 if (!dtrace_priv_proc(state, &mstate))
7287 7287 continue;
7288 7288
7289 7289 DTRACE_STORE(uint64_t, tomax,
7290 7290 valoffs, (uint64_t)pid->pid_id);
7291 7291 DTRACE_STORE(uint64_t, tomax,
7292 7292 valoffs + sizeof (uint64_t), val);
7293 7293
7294 7294 continue;
7295 7295 }
7296 7296
7297 7297 case DTRACEACT_EXIT: {
7298 7298 /*
7299 7299 * For the exit action, we are going to attempt
7300 7300 * to atomically set our activity to be
7301 7301 * draining. If this fails (either because
7302 7302 * another CPU has beat us to the exit action,
7303 7303 * or because our current activity is something
7304 7304 * other than ACTIVE or WARMUP), we will
7305 7305 * continue. This assures that the exit action
7306 7306 * can be successfully recorded at most once
7307 7307 * when we're in the ACTIVE state. If we're
7308 7308 * encountering the exit() action while in
7309 7309 * COOLDOWN, however, we want to honor the new
7310 7310 * status code. (We know that we're the only
7311 7311 * thread in COOLDOWN, so there is no race.)
7312 7312 */
7313 7313 void *activity = &state->dts_activity;
7314 7314 dtrace_activity_t current = state->dts_activity;
7315 7315
7316 7316 if (current == DTRACE_ACTIVITY_COOLDOWN)
7317 7317 break;
7318 7318
7319 7319 if (current != DTRACE_ACTIVITY_WARMUP)
7320 7320 current = DTRACE_ACTIVITY_ACTIVE;
7321 7321
7322 7322 if (dtrace_cas32(activity, current,
7323 7323 DTRACE_ACTIVITY_DRAINING) != current) {
7324 7324 *flags |= CPU_DTRACE_DROP;
7325 7325 continue;
7326 7326 }
7327 7327
7328 7328 break;
7329 7329 }
7330 7330
7331 7331 default:
7332 7332 ASSERT(0);
7333 7333 }
7334 7334
7335 7335 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
7336 7336 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
7337 7337 uintptr_t end = valoffs + size;
7338 7338
7339 7339 if (tracememsize != 0 &&
7340 7340 valoffs + tracememsize < end) {
7341 7341 end = valoffs + tracememsize;
7342 7342 tracememsize = 0;
7343 7343 }
7344 7344
7345 7345 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7346 7346 !dtrace_vcanload((void *)(uintptr_t)val,
7347 7347 &dp->dtdo_rtype, NULL, &mstate, vstate))
7348 7348 continue;
7349 7349
7350 7350 dtrace_store_by_ref(dp, tomax, size, &valoffs,
7351 7351 &val, end, act->dta_intuple,
7352 7352 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7353 7353 DIF_TF_BYREF: DIF_TF_BYUREF);
7354 7354 continue;
7355 7355 }
7356 7356
7357 7357 switch (size) {
7358 7358 case 0:
7359 7359 break;
7360 7360
7361 7361 case sizeof (uint8_t):
7362 7362 DTRACE_STORE(uint8_t, tomax, valoffs, val);
7363 7363 break;
7364 7364 case sizeof (uint16_t):
7365 7365 DTRACE_STORE(uint16_t, tomax, valoffs, val);
7366 7366 break;
7367 7367 case sizeof (uint32_t):
7368 7368 DTRACE_STORE(uint32_t, tomax, valoffs, val);
7369 7369 break;
7370 7370 case sizeof (uint64_t):
7371 7371 DTRACE_STORE(uint64_t, tomax, valoffs, val);
7372 7372 break;
7373 7373 default:
7374 7374 /*
7375 7375 * Any other size should have been returned by
7376 7376 * reference, not by value.
7377 7377 */
7378 7378 ASSERT(0);
7379 7379 break;
7380 7380 }
7381 7381 }
7382 7382
7383 7383 if (*flags & CPU_DTRACE_DROP)
7384 7384 continue;
7385 7385
7386 7386 if (*flags & CPU_DTRACE_FAULT) {
7387 7387 int ndx;
7388 7388 dtrace_action_t *err;
7389 7389
7390 7390 buf->dtb_errors++;
7391 7391
7392 7392 if (probe->dtpr_id == dtrace_probeid_error) {
7393 7393 /*
7394 7394 * There's nothing we can do -- we had an
7395 7395 * error on the error probe. We bump an
7396 7396 * error counter to at least indicate that
7397 7397 * this condition happened.
7398 7398 */
7399 7399 dtrace_error(&state->dts_dblerrors);
7400 7400 continue;
7401 7401 }
7402 7402
7403 7403 if (vtime) {
7404 7404 /*
7405 7405 * Before recursing on dtrace_probe(), we
7406 7406 * need to explicitly clear out our start
7407 7407 * time to prevent it from being accumulated
7408 7408 * into t_dtrace_vtime.
7409 7409 */
7410 7410 curthread->t_dtrace_start = 0;
7411 7411 }
7412 7412
7413 7413 /*
7414 7414 * Iterate over the actions to figure out which action
7415 7415 * we were processing when we experienced the error.
7416 7416 * Note that act points _past_ the faulting action; if
7417 7417 * act is ecb->dte_action, the fault was in the
7418 7418 * predicate, if it's ecb->dte_action->dta_next it's
7419 7419 * in action #1, and so on.
7420 7420 */
7421 7421 for (err = ecb->dte_action, ndx = 0;
7422 7422 err != act; err = err->dta_next, ndx++)
7423 7423 continue;
7424 7424
7425 7425 dtrace_probe_error(state, ecb->dte_epid, ndx,
7426 7426 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7427 7427 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7428 7428 cpu_core[cpuid].cpuc_dtrace_illval);
7429 7429
7430 7430 continue;
7431 7431 }
7432 7432
7433 7433 if (!committed)
7434 7434 buf->dtb_offset = offs + ecb->dte_size;
7435 7435 }
7436 7436
7437 7437 end = dtrace_gethrtime();
7438 7438 if (vtime)
7439 7439 curthread->t_dtrace_start = end;
7440 7440
7441 7441 CPU->cpu_dtrace_nsec += end - now;
7442 7442
7443 7443 dtrace_interrupt_enable(cookie);
7444 7444 }
7445 7445
7446 7446 /*
7447 7447 * DTrace Probe Hashing Functions
7448 7448 *
7449 7449 * The functions in this section (and indeed, the functions in remaining
7450 7450 * sections) are not _called_ from probe context. (Any exceptions to this are
7451 7451 * marked with a "Note:".) Rather, they are called from elsewhere in the
7452 7452 * DTrace framework to look-up probes in, add probes to and remove probes from
7453 7453 * the DTrace probe hashes. (Each probe is hashed by each element of the
7454 7454 * probe tuple -- allowing for fast lookups, regardless of what was
7455 7455 * specified.)
7456 7456 */
7457 7457 static uint_t
7458 7458 dtrace_hash_str(char *p)
7459 7459 {
7460 7460 unsigned int g;
7461 7461 uint_t hval = 0;
7462 7462
7463 7463 while (*p) {
7464 7464 hval = (hval << 4) + *p++;
7465 7465 if ((g = (hval & 0xf0000000)) != 0)
7466 7466 hval ^= g >> 24;
7467 7467 hval &= ~g;
7468 7468 }
7469 7469 return (hval);
7470 7470 }
7471 7471
7472 7472 static dtrace_hash_t *
7473 7473 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
7474 7474 {
7475 7475 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7476 7476
7477 7477 hash->dth_stroffs = stroffs;
7478 7478 hash->dth_nextoffs = nextoffs;
7479 7479 hash->dth_prevoffs = prevoffs;
7480 7480
7481 7481 hash->dth_size = 1;
7482 7482 hash->dth_mask = hash->dth_size - 1;
7483 7483
7484 7484 hash->dth_tab = kmem_zalloc(hash->dth_size *
7485 7485 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7486 7486
7487 7487 return (hash);
7488 7488 }
7489 7489
7490 7490 static void
7491 7491 dtrace_hash_destroy(dtrace_hash_t *hash)
7492 7492 {
7493 7493 #ifdef DEBUG
7494 7494 int i;
7495 7495
7496 7496 for (i = 0; i < hash->dth_size; i++)
7497 7497 ASSERT(hash->dth_tab[i] == NULL);
7498 7498 #endif
7499 7499
7500 7500 kmem_free(hash->dth_tab,
7501 7501 hash->dth_size * sizeof (dtrace_hashbucket_t *));
7502 7502 kmem_free(hash, sizeof (dtrace_hash_t));
7503 7503 }
7504 7504
7505 7505 static void
7506 7506 dtrace_hash_resize(dtrace_hash_t *hash)
7507 7507 {
7508 7508 int size = hash->dth_size, i, ndx;
7509 7509 int new_size = hash->dth_size << 1;
7510 7510 int new_mask = new_size - 1;
7511 7511 dtrace_hashbucket_t **new_tab, *bucket, *next;
7512 7512
7513 7513 ASSERT((new_size & new_mask) == 0);
7514 7514
7515 7515 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7516 7516
7517 7517 for (i = 0; i < size; i++) {
7518 7518 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
7519 7519 dtrace_probe_t *probe = bucket->dthb_chain;
7520 7520
7521 7521 ASSERT(probe != NULL);
7522 7522 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
7523 7523
7524 7524 next = bucket->dthb_next;
7525 7525 bucket->dthb_next = new_tab[ndx];
7526 7526 new_tab[ndx] = bucket;
7527 7527 }
7528 7528 }
7529 7529
7530 7530 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7531 7531 hash->dth_tab = new_tab;
7532 7532 hash->dth_size = new_size;
7533 7533 hash->dth_mask = new_mask;
7534 7534 }
7535 7535
7536 7536 static void
7537 7537 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
7538 7538 {
7539 7539 int hashval = DTRACE_HASHSTR(hash, new);
7540 7540 int ndx = hashval & hash->dth_mask;
7541 7541 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7542 7542 dtrace_probe_t **nextp, **prevp;
7543 7543
7544 7544 for (; bucket != NULL; bucket = bucket->dthb_next) {
7545 7545 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7546 7546 goto add;
7547 7547 }
7548 7548
7549 7549 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7550 7550 dtrace_hash_resize(hash);
7551 7551 dtrace_hash_add(hash, new);
7552 7552 return;
7553 7553 }
7554 7554
7555 7555 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7556 7556 bucket->dthb_next = hash->dth_tab[ndx];
7557 7557 hash->dth_tab[ndx] = bucket;
7558 7558 hash->dth_nbuckets++;
7559 7559
7560 7560 add:
7561 7561 nextp = DTRACE_HASHNEXT(hash, new);
7562 7562 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7563 7563 *nextp = bucket->dthb_chain;
7564 7564
7565 7565 if (bucket->dthb_chain != NULL) {
7566 7566 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7567 7567 ASSERT(*prevp == NULL);
7568 7568 *prevp = new;
7569 7569 }
7570 7570
7571 7571 bucket->dthb_chain = new;
7572 7572 bucket->dthb_len++;
7573 7573 }
7574 7574
7575 7575 static dtrace_probe_t *
7576 7576 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
7577 7577 {
7578 7578 int hashval = DTRACE_HASHSTR(hash, template);
7579 7579 int ndx = hashval & hash->dth_mask;
7580 7580 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7581 7581
7582 7582 for (; bucket != NULL; bucket = bucket->dthb_next) {
7583 7583 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7584 7584 return (bucket->dthb_chain);
7585 7585 }
7586 7586
7587 7587 return (NULL);
7588 7588 }
7589 7589
7590 7590 static int
7591 7591 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
7592 7592 {
7593 7593 int hashval = DTRACE_HASHSTR(hash, template);
7594 7594 int ndx = hashval & hash->dth_mask;
7595 7595 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7596 7596
7597 7597 for (; bucket != NULL; bucket = bucket->dthb_next) {
7598 7598 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7599 7599 return (bucket->dthb_len);
7600 7600 }
7601 7601
7602 7602 return (NULL);
7603 7603 }
7604 7604
7605 7605 static void
7606 7606 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
7607 7607 {
7608 7608 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
7609 7609 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7610 7610
7611 7611 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
7612 7612 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
7613 7613
7614 7614 /*
7615 7615 * Find the bucket that we're removing this probe from.
7616 7616 */
7617 7617 for (; bucket != NULL; bucket = bucket->dthb_next) {
7618 7618 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
7619 7619 break;
7620 7620 }
7621 7621
7622 7622 ASSERT(bucket != NULL);
7623 7623
7624 7624 if (*prevp == NULL) {
7625 7625 if (*nextp == NULL) {
7626 7626 /*
7627 7627 * The removed probe was the only probe on this
7628 7628 * bucket; we need to remove the bucket.
7629 7629 */
7630 7630 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7631 7631
7632 7632 ASSERT(bucket->dthb_chain == probe);
7633 7633 ASSERT(b != NULL);
7634 7634
7635 7635 if (b == bucket) {
7636 7636 hash->dth_tab[ndx] = bucket->dthb_next;
7637 7637 } else {
7638 7638 while (b->dthb_next != bucket)
7639 7639 b = b->dthb_next;
7640 7640 b->dthb_next = bucket->dthb_next;
7641 7641 }
7642 7642
7643 7643 ASSERT(hash->dth_nbuckets > 0);
7644 7644 hash->dth_nbuckets--;
7645 7645 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
7646 7646 return;
7647 7647 }
7648 7648
7649 7649 bucket->dthb_chain = *nextp;
7650 7650 } else {
7651 7651 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
7652 7652 }
7653 7653
7654 7654 if (*nextp != NULL)
7655 7655 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
7656 7656 }
7657 7657
7658 7658 /*
7659 7659 * DTrace Utility Functions
7660 7660 *
7661 7661 * These are random utility functions that are _not_ called from probe context.
7662 7662 */
7663 7663 static int
7664 7664 dtrace_badattr(const dtrace_attribute_t *a)
7665 7665 {
7666 7666 return (a->dtat_name > DTRACE_STABILITY_MAX ||
7667 7667 a->dtat_data > DTRACE_STABILITY_MAX ||
7668 7668 a->dtat_class > DTRACE_CLASS_MAX);
7669 7669 }
7670 7670
7671 7671 /*
7672 7672 * Return a duplicate copy of a string. If the specified string is NULL,
7673 7673 * this function returns a zero-length string.
7674 7674 */
7675 7675 static char *
7676 7676 dtrace_strdup(const char *str)
7677 7677 {
7678 7678 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
7679 7679
7680 7680 if (str != NULL)
7681 7681 (void) strcpy(new, str);
7682 7682
7683 7683 return (new);
7684 7684 }
7685 7685
7686 7686 #define DTRACE_ISALPHA(c) \
7687 7687 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
7688 7688
7689 7689 static int
7690 7690 dtrace_badname(const char *s)
7691 7691 {
7692 7692 char c;
7693 7693
7694 7694 if (s == NULL || (c = *s++) == '\0')
7695 7695 return (0);
7696 7696
7697 7697 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
7698 7698 return (1);
7699 7699
7700 7700 while ((c = *s++) != '\0') {
7701 7701 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
7702 7702 c != '-' && c != '_' && c != '.' && c != '`')
7703 7703 return (1);
7704 7704 }
7705 7705
7706 7706 return (0);
7707 7707 }
7708 7708
7709 7709 static void
7710 7710 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
7711 7711 {
7712 7712 uint32_t priv;
7713 7713
7714 7714 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
7715 7715 /*
7716 7716 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
7717 7717 */
7718 7718 priv = DTRACE_PRIV_ALL;
7719 7719 } else {
7720 7720 *uidp = crgetuid(cr);
7721 7721 *zoneidp = crgetzoneid(cr);
7722 7722
7723 7723 priv = 0;
7724 7724 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
7725 7725 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
7726 7726 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
7727 7727 priv |= DTRACE_PRIV_USER;
7728 7728 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
7729 7729 priv |= DTRACE_PRIV_PROC;
7730 7730 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
7731 7731 priv |= DTRACE_PRIV_OWNER;
7732 7732 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
7733 7733 priv |= DTRACE_PRIV_ZONEOWNER;
7734 7734 }
7735 7735
7736 7736 *privp = priv;
7737 7737 }
7738 7738
7739 7739 #ifdef DTRACE_ERRDEBUG
7740 7740 static void
7741 7741 dtrace_errdebug(const char *str)
7742 7742 {
7743 7743 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
7744 7744 int occupied = 0;
7745 7745
7746 7746 mutex_enter(&dtrace_errlock);
7747 7747 dtrace_errlast = str;
7748 7748 dtrace_errthread = curthread;
7749 7749
7750 7750 while (occupied++ < DTRACE_ERRHASHSZ) {
7751 7751 if (dtrace_errhash[hval].dter_msg == str) {
7752 7752 dtrace_errhash[hval].dter_count++;
7753 7753 goto out;
7754 7754 }
7755 7755
7756 7756 if (dtrace_errhash[hval].dter_msg != NULL) {
7757 7757 hval = (hval + 1) % DTRACE_ERRHASHSZ;
7758 7758 continue;
7759 7759 }
7760 7760
7761 7761 dtrace_errhash[hval].dter_msg = str;
7762 7762 dtrace_errhash[hval].dter_count = 1;
7763 7763 goto out;
7764 7764 }
7765 7765
7766 7766 panic("dtrace: undersized error hash");
7767 7767 out:
7768 7768 mutex_exit(&dtrace_errlock);
7769 7769 }
7770 7770 #endif
7771 7771
7772 7772 /*
7773 7773 * DTrace Matching Functions
7774 7774 *
7775 7775 * These functions are used to match groups of probes, given some elements of
7776 7776 * a probe tuple, or some globbed expressions for elements of a probe tuple.
7777 7777 */
7778 7778 static int
7779 7779 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7780 7780 zoneid_t zoneid)
7781 7781 {
7782 7782 if (priv != DTRACE_PRIV_ALL) {
7783 7783 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7784 7784 uint32_t match = priv & ppriv;
7785 7785
7786 7786 /*
7787 7787 * No PRIV_DTRACE_* privileges...
7788 7788 */
7789 7789 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7790 7790 DTRACE_PRIV_KERNEL)) == 0)
7791 7791 return (0);
7792 7792
7793 7793 /*
7794 7794 * No matching bits, but there were bits to match...
7795 7795 */
7796 7796 if (match == 0 && ppriv != 0)
7797 7797 return (0);
7798 7798
7799 7799 /*
7800 7800 * Need to have permissions to the process, but don't...
7801 7801 */
7802 7802 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7803 7803 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7804 7804 return (0);
7805 7805 }
7806 7806
7807 7807 /*
7808 7808 * Need to be in the same zone unless we possess the
7809 7809 * privilege to examine all zones.
7810 7810 */
7811 7811 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7812 7812 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7813 7813 return (0);
7814 7814 }
7815 7815 }
7816 7816
7817 7817 return (1);
7818 7818 }
7819 7819
7820 7820 /*
7821 7821 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7822 7822 * consists of input pattern strings and an ops-vector to evaluate them.
7823 7823 * This function returns >0 for match, 0 for no match, and <0 for error.
7824 7824 */
7825 7825 static int
7826 7826 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7827 7827 uint32_t priv, uid_t uid, zoneid_t zoneid)
7828 7828 {
7829 7829 dtrace_provider_t *pvp = prp->dtpr_provider;
7830 7830 int rv;
7831 7831
7832 7832 if (pvp->dtpv_defunct)
7833 7833 return (0);
7834 7834
7835 7835 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7836 7836 return (rv);
7837 7837
7838 7838 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7839 7839 return (rv);
7840 7840
7841 7841 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7842 7842 return (rv);
7843 7843
7844 7844 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7845 7845 return (rv);
7846 7846
7847 7847 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7848 7848 return (0);
7849 7849
7850 7850 return (rv);
7851 7851 }
7852 7852
7853 7853 /*
7854 7854 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7855 7855 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7856 7856 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7857 7857 * In addition, all of the recursion cases except for '*' matching have been
7858 7858 * unwound. For '*', we still implement recursive evaluation, but a depth
7859 7859 * counter is maintained and matching is aborted if we recurse too deep.
7860 7860 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7861 7861 */
7862 7862 static int
7863 7863 dtrace_match_glob(const char *s, const char *p, int depth)
7864 7864 {
7865 7865 const char *olds;
7866 7866 char s1, c;
7867 7867 int gs;
7868 7868
7869 7869 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7870 7870 return (-1);
7871 7871
7872 7872 if (s == NULL)
7873 7873 s = ""; /* treat NULL as empty string */
7874 7874
7875 7875 top:
7876 7876 olds = s;
7877 7877 s1 = *s++;
7878 7878
7879 7879 if (p == NULL)
7880 7880 return (0);
7881 7881
7882 7882 if ((c = *p++) == '\0')
7883 7883 return (s1 == '\0');
7884 7884
7885 7885 switch (c) {
7886 7886 case '[': {
7887 7887 int ok = 0, notflag = 0;
7888 7888 char lc = '\0';
7889 7889
7890 7890 if (s1 == '\0')
7891 7891 return (0);
7892 7892
7893 7893 if (*p == '!') {
7894 7894 notflag = 1;
7895 7895 p++;
7896 7896 }
7897 7897
7898 7898 if ((c = *p++) == '\0')
7899 7899 return (0);
7900 7900
7901 7901 do {
7902 7902 if (c == '-' && lc != '\0' && *p != ']') {
7903 7903 if ((c = *p++) == '\0')
7904 7904 return (0);
7905 7905 if (c == '\\' && (c = *p++) == '\0')
7906 7906 return (0);
7907 7907
7908 7908 if (notflag) {
7909 7909 if (s1 < lc || s1 > c)
7910 7910 ok++;
7911 7911 else
7912 7912 return (0);
7913 7913 } else if (lc <= s1 && s1 <= c)
7914 7914 ok++;
7915 7915
7916 7916 } else if (c == '\\' && (c = *p++) == '\0')
7917 7917 return (0);
7918 7918
7919 7919 lc = c; /* save left-hand 'c' for next iteration */
7920 7920
7921 7921 if (notflag) {
7922 7922 if (s1 != c)
7923 7923 ok++;
7924 7924 else
7925 7925 return (0);
7926 7926 } else if (s1 == c)
7927 7927 ok++;
7928 7928
7929 7929 if ((c = *p++) == '\0')
7930 7930 return (0);
7931 7931
7932 7932 } while (c != ']');
7933 7933
7934 7934 if (ok)
7935 7935 goto top;
7936 7936
7937 7937 return (0);
7938 7938 }
7939 7939
7940 7940 case '\\':
7941 7941 if ((c = *p++) == '\0')
7942 7942 return (0);
7943 7943 /*FALLTHRU*/
7944 7944
7945 7945 default:
7946 7946 if (c != s1)
7947 7947 return (0);
7948 7948 /*FALLTHRU*/
7949 7949
7950 7950 case '?':
7951 7951 if (s1 != '\0')
7952 7952 goto top;
7953 7953 return (0);
7954 7954
7955 7955 case '*':
7956 7956 while (*p == '*')
7957 7957 p++; /* consecutive *'s are identical to a single one */
7958 7958
7959 7959 if (*p == '\0')
7960 7960 return (1);
7961 7961
7962 7962 for (s = olds; *s != '\0'; s++) {
7963 7963 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7964 7964 return (gs);
7965 7965 }
7966 7966
7967 7967 return (0);
7968 7968 }
7969 7969 }
7970 7970
7971 7971 /*ARGSUSED*/
7972 7972 static int
7973 7973 dtrace_match_string(const char *s, const char *p, int depth)
7974 7974 {
7975 7975 return (s != NULL && strcmp(s, p) == 0);
7976 7976 }
7977 7977
7978 7978 /*ARGSUSED*/
7979 7979 static int
7980 7980 dtrace_match_nul(const char *s, const char *p, int depth)
7981 7981 {
7982 7982 return (1); /* always match the empty pattern */
7983 7983 }
7984 7984
7985 7985 /*ARGSUSED*/
7986 7986 static int
7987 7987 dtrace_match_nonzero(const char *s, const char *p, int depth)
7988 7988 {
7989 7989 return (s != NULL && s[0] != '\0');
7990 7990 }
7991 7991
7992 7992 static int
7993 7993 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7994 7994 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7995 7995 {
7996 7996 dtrace_probe_t template, *probe;
7997 7997 dtrace_hash_t *hash = NULL;
7998 7998 int len, rc, best = INT_MAX, nmatched = 0;
7999 7999 dtrace_id_t i;
8000 8000
8001 8001 ASSERT(MUTEX_HELD(&dtrace_lock));
8002 8002
8003 8003 /*
8004 8004 * If the probe ID is specified in the key, just lookup by ID and
8005 8005 * invoke the match callback once if a matching probe is found.
8006 8006 */
8007 8007 if (pkp->dtpk_id != DTRACE_IDNONE) {
8008 8008 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8009 8009 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
8010 8010 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
8011 8011 return (DTRACE_MATCH_FAIL);
8012 8012 nmatched++;
8013 8013 }
8014 8014 return (nmatched);
8015 8015 }
8016 8016
8017 8017 template.dtpr_mod = (char *)pkp->dtpk_mod;
8018 8018 template.dtpr_func = (char *)pkp->dtpk_func;
8019 8019 template.dtpr_name = (char *)pkp->dtpk_name;
8020 8020
8021 8021 /*
8022 8022 * We want to find the most distinct of the module name, function
8023 8023 * name, and name. So for each one that is not a glob pattern or
8024 8024 * empty string, we perform a lookup in the corresponding hash and
8025 8025 * use the hash table with the fewest collisions to do our search.
8026 8026 */
8027 8027 if (pkp->dtpk_mmatch == &dtrace_match_string &&
8028 8028 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8029 8029 best = len;
8030 8030 hash = dtrace_bymod;
8031 8031 }
8032 8032
8033 8033 if (pkp->dtpk_fmatch == &dtrace_match_string &&
8034 8034 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8035 8035 best = len;
8036 8036 hash = dtrace_byfunc;
8037 8037 }
8038 8038
8039 8039 if (pkp->dtpk_nmatch == &dtrace_match_string &&
8040 8040 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8041 8041 best = len;
8042 8042 hash = dtrace_byname;
8043 8043 }
8044 8044
8045 8045 /*
8046 8046 * If we did not select a hash table, iterate over every probe and
8047 8047 * invoke our callback for each one that matches our input probe key.
8048 8048 */
8049 8049 if (hash == NULL) {
8050 8050 for (i = 0; i < dtrace_nprobes; i++) {
8051 8051 if ((probe = dtrace_probes[i]) == NULL ||
8052 8052 dtrace_match_probe(probe, pkp, priv, uid,
8053 8053 zoneid) <= 0)
8054 8054 continue;
8055 8055
8056 8056 nmatched++;
8057 8057
8058 8058 if ((rc = (*matched)(probe, arg)) !=
8059 8059 DTRACE_MATCH_NEXT) {
8060 8060 if (rc == DTRACE_MATCH_FAIL)
8061 8061 return (DTRACE_MATCH_FAIL);
8062 8062 break;
8063 8063 }
8064 8064 }
8065 8065
8066 8066 return (nmatched);
8067 8067 }
8068 8068
8069 8069 /*
8070 8070 * If we selected a hash table, iterate over each probe of the same key
8071 8071 * name and invoke the callback for every probe that matches the other
8072 8072 * attributes of our input probe key.
8073 8073 */
8074 8074 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8075 8075 probe = *(DTRACE_HASHNEXT(hash, probe))) {
8076 8076
8077 8077 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8078 8078 continue;
8079 8079
8080 8080 nmatched++;
8081 8081
8082 8082 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
8083 8083 if (rc == DTRACE_MATCH_FAIL)
8084 8084 return (DTRACE_MATCH_FAIL);
8085 8085 break;
8086 8086 }
8087 8087 }
8088 8088
8089 8089 return (nmatched);
8090 8090 }
8091 8091
8092 8092 /*
8093 8093 * Return the function pointer dtrace_probecmp() should use to compare the
8094 8094 * specified pattern with a string. For NULL or empty patterns, we select
8095 8095 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
8096 8096 * For non-empty non-glob strings, we use dtrace_match_string().
8097 8097 */
8098 8098 static dtrace_probekey_f *
8099 8099 dtrace_probekey_func(const char *p)
8100 8100 {
8101 8101 char c;
8102 8102
8103 8103 if (p == NULL || *p == '\0')
8104 8104 return (&dtrace_match_nul);
8105 8105
8106 8106 while ((c = *p++) != '\0') {
8107 8107 if (c == '[' || c == '?' || c == '*' || c == '\\')
8108 8108 return (&dtrace_match_glob);
8109 8109 }
8110 8110
8111 8111 return (&dtrace_match_string);
8112 8112 }
8113 8113
8114 8114 /*
8115 8115 * Build a probe comparison key for use with dtrace_match_probe() from the
8116 8116 * given probe description. By convention, a null key only matches anchored
8117 8117 * probes: if each field is the empty string, reset dtpk_fmatch to
8118 8118 * dtrace_match_nonzero().
8119 8119 */
8120 8120 static void
8121 8121 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8122 8122 {
8123 8123 pkp->dtpk_prov = pdp->dtpd_provider;
8124 8124 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8125 8125
8126 8126 pkp->dtpk_mod = pdp->dtpd_mod;
8127 8127 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
8128 8128
8129 8129 pkp->dtpk_func = pdp->dtpd_func;
8130 8130 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8131 8131
8132 8132 pkp->dtpk_name = pdp->dtpd_name;
8133 8133 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8134 8134
8135 8135 pkp->dtpk_id = pdp->dtpd_id;
8136 8136
8137 8137 if (pkp->dtpk_id == DTRACE_IDNONE &&
8138 8138 pkp->dtpk_pmatch == &dtrace_match_nul &&
8139 8139 pkp->dtpk_mmatch == &dtrace_match_nul &&
8140 8140 pkp->dtpk_fmatch == &dtrace_match_nul &&
8141 8141 pkp->dtpk_nmatch == &dtrace_match_nul)
8142 8142 pkp->dtpk_fmatch = &dtrace_match_nonzero;
8143 8143 }
8144 8144
8145 8145 /*
8146 8146 * DTrace Provider-to-Framework API Functions
8147 8147 *
8148 8148 * These functions implement much of the Provider-to-Framework API, as
8149 8149 * described in <sys/dtrace.h>. The parts of the API not in this section are
8150 8150 * the functions in the API for probe management (found below), and
8151 8151 * dtrace_probe() itself (found above).
8152 8152 */
8153 8153
8154 8154 /*
8155 8155 * Register the calling provider with the DTrace framework. This should
8156 8156 * generally be called by DTrace providers in their attach(9E) entry point.
8157 8157 */
8158 8158 int
8159 8159 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8160 8160 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8161 8161 {
8162 8162 dtrace_provider_t *provider;
8163 8163
8164 8164 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8165 8165 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8166 8166 "arguments", name ? name : "<NULL>");
8167 8167 return (EINVAL);
8168 8168 }
8169 8169
8170 8170 if (name[0] == '\0' || dtrace_badname(name)) {
8171 8171 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8172 8172 "provider name", name);
8173 8173 return (EINVAL);
8174 8174 }
8175 8175
8176 8176 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8177 8177 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8178 8178 pops->dtps_destroy == NULL ||
8179 8179 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8180 8180 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8181 8181 "provider ops", name);
8182 8182 return (EINVAL);
8183 8183 }
8184 8184
8185 8185 if (dtrace_badattr(&pap->dtpa_provider) ||
8186 8186 dtrace_badattr(&pap->dtpa_mod) ||
8187 8187 dtrace_badattr(&pap->dtpa_func) ||
8188 8188 dtrace_badattr(&pap->dtpa_name) ||
8189 8189 dtrace_badattr(&pap->dtpa_args)) {
8190 8190 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8191 8191 "provider attributes", name);
8192 8192 return (EINVAL);
8193 8193 }
8194 8194
8195 8195 if (priv & ~DTRACE_PRIV_ALL) {
8196 8196 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8197 8197 "privilege attributes", name);
8198 8198 return (EINVAL);
8199 8199 }
8200 8200
8201 8201 if ((priv & DTRACE_PRIV_KERNEL) &&
8202 8202 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8203 8203 pops->dtps_mode == NULL) {
8204 8204 cmn_err(CE_WARN, "failed to register provider '%s': need "
8205 8205 "dtps_mode() op for given privilege attributes", name);
8206 8206 return (EINVAL);
8207 8207 }
8208 8208
8209 8209 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
8210 8210 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8211 8211 (void) strcpy(provider->dtpv_name, name);
8212 8212
8213 8213 provider->dtpv_attr = *pap;
8214 8214 provider->dtpv_priv.dtpp_flags = priv;
8215 8215 if (cr != NULL) {
8216 8216 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8217 8217 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8218 8218 }
8219 8219 provider->dtpv_pops = *pops;
8220 8220
8221 8221 if (pops->dtps_provide == NULL) {
8222 8222 ASSERT(pops->dtps_provide_module != NULL);
8223 8223 provider->dtpv_pops.dtps_provide =
8224 8224 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
8225 8225 }
8226 8226
8227 8227 if (pops->dtps_provide_module == NULL) {
8228 8228 ASSERT(pops->dtps_provide != NULL);
8229 8229 provider->dtpv_pops.dtps_provide_module =
8230 8230 (void (*)(void *, struct modctl *))dtrace_nullop;
8231 8231 }
8232 8232
8233 8233 if (pops->dtps_suspend == NULL) {
8234 8234 ASSERT(pops->dtps_resume == NULL);
8235 8235 provider->dtpv_pops.dtps_suspend =
8236 8236 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8237 8237 provider->dtpv_pops.dtps_resume =
8238 8238 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8239 8239 }
8240 8240
8241 8241 provider->dtpv_arg = arg;
8242 8242 *idp = (dtrace_provider_id_t)provider;
8243 8243
8244 8244 if (pops == &dtrace_provider_ops) {
8245 8245 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8246 8246 ASSERT(MUTEX_HELD(&dtrace_lock));
8247 8247 ASSERT(dtrace_anon.dta_enabling == NULL);
8248 8248
8249 8249 /*
8250 8250 * We make sure that the DTrace provider is at the head of
8251 8251 * the provider chain.
8252 8252 */
8253 8253 provider->dtpv_next = dtrace_provider;
8254 8254 dtrace_provider = provider;
8255 8255 return (0);
8256 8256 }
8257 8257
8258 8258 mutex_enter(&dtrace_provider_lock);
8259 8259 mutex_enter(&dtrace_lock);
8260 8260
8261 8261 /*
8262 8262 * If there is at least one provider registered, we'll add this
8263 8263 * provider after the first provider.
8264 8264 */
8265 8265 if (dtrace_provider != NULL) {
8266 8266 provider->dtpv_next = dtrace_provider->dtpv_next;
8267 8267 dtrace_provider->dtpv_next = provider;
8268 8268 } else {
8269 8269 dtrace_provider = provider;
8270 8270 }
8271 8271
8272 8272 if (dtrace_retained != NULL) {
8273 8273 dtrace_enabling_provide(provider);
8274 8274
8275 8275 /*
8276 8276 * Now we need to call dtrace_enabling_matchall() -- which
8277 8277 * will acquire cpu_lock and dtrace_lock. We therefore need
8278 8278 * to drop all of our locks before calling into it...
8279 8279 */
8280 8280 mutex_exit(&dtrace_lock);
8281 8281 mutex_exit(&dtrace_provider_lock);
8282 8282 dtrace_enabling_matchall();
8283 8283
8284 8284 return (0);
8285 8285 }
8286 8286
8287 8287 mutex_exit(&dtrace_lock);
8288 8288 mutex_exit(&dtrace_provider_lock);
8289 8289
8290 8290 return (0);
8291 8291 }
8292 8292
8293 8293 /*
8294 8294 * Unregister the specified provider from the DTrace framework. This should
8295 8295 * generally be called by DTrace providers in their detach(9E) entry point.
8296 8296 */
8297 8297 int
8298 8298 dtrace_unregister(dtrace_provider_id_t id)
8299 8299 {
8300 8300 dtrace_provider_t *old = (dtrace_provider_t *)id;
8301 8301 dtrace_provider_t *prev = NULL;
8302 8302 int i, self = 0, noreap = 0;
8303 8303 dtrace_probe_t *probe, *first = NULL;
8304 8304
8305 8305 if (old->dtpv_pops.dtps_enable ==
8306 8306 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
8307 8307 /*
8308 8308 * If DTrace itself is the provider, we're called with locks
8309 8309 * already held.
8310 8310 */
8311 8311 ASSERT(old == dtrace_provider);
8312 8312 ASSERT(dtrace_devi != NULL);
8313 8313 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8314 8314 ASSERT(MUTEX_HELD(&dtrace_lock));
8315 8315 self = 1;
8316 8316
8317 8317 if (dtrace_provider->dtpv_next != NULL) {
8318 8318 /*
8319 8319 * There's another provider here; return failure.
8320 8320 */
8321 8321 return (EBUSY);
8322 8322 }
8323 8323 } else {
8324 8324 mutex_enter(&dtrace_provider_lock);
8325 8325 mutex_enter(&mod_lock);
8326 8326 mutex_enter(&dtrace_lock);
8327 8327 }
8328 8328
8329 8329 /*
8330 8330 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8331 8331 * probes, we refuse to let providers slither away, unless this
8332 8332 * provider has already been explicitly invalidated.
8333 8333 */
8334 8334 if (!old->dtpv_defunct &&
8335 8335 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8336 8336 dtrace_anon.dta_state->dts_necbs > 0))) {
8337 8337 if (!self) {
8338 8338 mutex_exit(&dtrace_lock);
8339 8339 mutex_exit(&mod_lock);
8340 8340 mutex_exit(&dtrace_provider_lock);
8341 8341 }
8342 8342 return (EBUSY);
8343 8343 }
8344 8344
8345 8345 /*
8346 8346 * Attempt to destroy the probes associated with this provider.
8347 8347 */
8348 8348 for (i = 0; i < dtrace_nprobes; i++) {
8349 8349 if ((probe = dtrace_probes[i]) == NULL)
8350 8350 continue;
8351 8351
8352 8352 if (probe->dtpr_provider != old)
8353 8353 continue;
8354 8354
8355 8355 if (probe->dtpr_ecb == NULL)
8356 8356 continue;
8357 8357
8358 8358 /*
8359 8359 * If we are trying to unregister a defunct provider, and the
8360 8360 * provider was made defunct within the interval dictated by
8361 8361 * dtrace_unregister_defunct_reap, we'll (asynchronously)
8362 8362 * attempt to reap our enablings. To denote that the provider
8363 8363 * should reattempt to unregister itself at some point in the
8364 8364 * future, we will return a differentiable error code (EAGAIN
8365 8365 * instead of EBUSY) in this case.
8366 8366 */
8367 8367 if (dtrace_gethrtime() - old->dtpv_defunct >
8368 8368 dtrace_unregister_defunct_reap)
8369 8369 noreap = 1;
8370 8370
8371 8371 if (!self) {
8372 8372 mutex_exit(&dtrace_lock);
8373 8373 mutex_exit(&mod_lock);
8374 8374 mutex_exit(&dtrace_provider_lock);
8375 8375 }
8376 8376
8377 8377 if (noreap)
8378 8378 return (EBUSY);
8379 8379
8380 8380 (void) taskq_dispatch(dtrace_taskq,
8381 8381 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
8382 8382
8383 8383 return (EAGAIN);
8384 8384 }
8385 8385
8386 8386 /*
8387 8387 * All of the probes for this provider are disabled; we can safely
8388 8388 * remove all of them from their hash chains and from the probe array.
8389 8389 */
8390 8390 for (i = 0; i < dtrace_nprobes; i++) {
8391 8391 if ((probe = dtrace_probes[i]) == NULL)
8392 8392 continue;
8393 8393
8394 8394 if (probe->dtpr_provider != old)
8395 8395 continue;
8396 8396
8397 8397 dtrace_probes[i] = NULL;
8398 8398
8399 8399 dtrace_hash_remove(dtrace_bymod, probe);
8400 8400 dtrace_hash_remove(dtrace_byfunc, probe);
8401 8401 dtrace_hash_remove(dtrace_byname, probe);
8402 8402
8403 8403 if (first == NULL) {
8404 8404 first = probe;
8405 8405 probe->dtpr_nextmod = NULL;
8406 8406 } else {
8407 8407 probe->dtpr_nextmod = first;
8408 8408 first = probe;
8409 8409 }
8410 8410 }
8411 8411
8412 8412 /*
8413 8413 * The provider's probes have been removed from the hash chains and
8414 8414 * from the probe array. Now issue a dtrace_sync() to be sure that
8415 8415 * everyone has cleared out from any probe array processing.
8416 8416 */
8417 8417 dtrace_sync();
8418 8418
8419 8419 for (probe = first; probe != NULL; probe = first) {
8420 8420 first = probe->dtpr_nextmod;
8421 8421
8422 8422 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8423 8423 probe->dtpr_arg);
8424 8424 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8425 8425 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8426 8426 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8427 8427 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8428 8428 kmem_free(probe, sizeof (dtrace_probe_t));
8429 8429 }
8430 8430
8431 8431 if ((prev = dtrace_provider) == old) {
8432 8432 ASSERT(self || dtrace_devi == NULL);
8433 8433 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8434 8434 dtrace_provider = old->dtpv_next;
8435 8435 } else {
8436 8436 while (prev != NULL && prev->dtpv_next != old)
8437 8437 prev = prev->dtpv_next;
8438 8438
8439 8439 if (prev == NULL) {
8440 8440 panic("attempt to unregister non-existent "
8441 8441 "dtrace provider %p\n", (void *)id);
8442 8442 }
8443 8443
8444 8444 prev->dtpv_next = old->dtpv_next;
8445 8445 }
8446 8446
8447 8447 if (!self) {
8448 8448 mutex_exit(&dtrace_lock);
8449 8449 mutex_exit(&mod_lock);
8450 8450 mutex_exit(&dtrace_provider_lock);
8451 8451 }
8452 8452
8453 8453 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
8454 8454 kmem_free(old, sizeof (dtrace_provider_t));
8455 8455
8456 8456 return (0);
8457 8457 }
8458 8458
8459 8459 /*
8460 8460 * Invalidate the specified provider. All subsequent probe lookups for the
8461 8461 * specified provider will fail, but its probes will not be removed.
8462 8462 */
8463 8463 void
8464 8464 dtrace_invalidate(dtrace_provider_id_t id)
8465 8465 {
8466 8466 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8467 8467
8468 8468 ASSERT(pvp->dtpv_pops.dtps_enable !=
8469 8469 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8470 8470
8471 8471 mutex_enter(&dtrace_provider_lock);
8472 8472 mutex_enter(&dtrace_lock);
8473 8473
8474 8474 pvp->dtpv_defunct = dtrace_gethrtime();
8475 8475
8476 8476 mutex_exit(&dtrace_lock);
8477 8477 mutex_exit(&dtrace_provider_lock);
8478 8478 }
8479 8479
8480 8480 /*
8481 8481 * Indicate whether or not DTrace has attached.
8482 8482 */
8483 8483 int
8484 8484 dtrace_attached(void)
8485 8485 {
8486 8486 /*
8487 8487 * dtrace_provider will be non-NULL iff the DTrace driver has
8488 8488 * attached. (It's non-NULL because DTrace is always itself a
8489 8489 * provider.)
8490 8490 */
8491 8491 return (dtrace_provider != NULL);
8492 8492 }
8493 8493
8494 8494 /*
8495 8495 * Remove all the unenabled probes for the given provider. This function is
8496 8496 * not unlike dtrace_unregister(), except that it doesn't remove the provider
8497 8497 * -- just as many of its associated probes as it can.
8498 8498 */
8499 8499 int
8500 8500 dtrace_condense(dtrace_provider_id_t id)
8501 8501 {
8502 8502 dtrace_provider_t *prov = (dtrace_provider_t *)id;
8503 8503 int i;
8504 8504 dtrace_probe_t *probe;
8505 8505
8506 8506 /*
8507 8507 * Make sure this isn't the dtrace provider itself.
8508 8508 */
8509 8509 ASSERT(prov->dtpv_pops.dtps_enable !=
8510 8510 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8511 8511
8512 8512 mutex_enter(&dtrace_provider_lock);
8513 8513 mutex_enter(&dtrace_lock);
8514 8514
8515 8515 /*
8516 8516 * Attempt to destroy the probes associated with this provider.
8517 8517 */
8518 8518 for (i = 0; i < dtrace_nprobes; i++) {
8519 8519 if ((probe = dtrace_probes[i]) == NULL)
8520 8520 continue;
8521 8521
8522 8522 if (probe->dtpr_provider != prov)
8523 8523 continue;
8524 8524
8525 8525 if (probe->dtpr_ecb != NULL)
8526 8526 continue;
8527 8527
8528 8528 dtrace_probes[i] = NULL;
8529 8529
8530 8530 dtrace_hash_remove(dtrace_bymod, probe);
8531 8531 dtrace_hash_remove(dtrace_byfunc, probe);
8532 8532 dtrace_hash_remove(dtrace_byname, probe);
8533 8533
8534 8534 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
8535 8535 probe->dtpr_arg);
8536 8536 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8537 8537 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8538 8538 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8539 8539 kmem_free(probe, sizeof (dtrace_probe_t));
8540 8540 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
8541 8541 }
8542 8542
8543 8543 mutex_exit(&dtrace_lock);
8544 8544 mutex_exit(&dtrace_provider_lock);
8545 8545
8546 8546 return (0);
8547 8547 }
8548 8548
8549 8549 /*
8550 8550 * DTrace Probe Management Functions
8551 8551 *
8552 8552 * The functions in this section perform the DTrace probe management,
8553 8553 * including functions to create probes, look-up probes, and call into the
8554 8554 * providers to request that probes be provided. Some of these functions are
8555 8555 * in the Provider-to-Framework API; these functions can be identified by the
8556 8556 * fact that they are not declared "static".
8557 8557 */
8558 8558
8559 8559 /*
8560 8560 * Create a probe with the specified module name, function name, and name.
8561 8561 */
8562 8562 dtrace_id_t
8563 8563 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
8564 8564 const char *func, const char *name, int aframes, void *arg)
8565 8565 {
8566 8566 dtrace_probe_t *probe, **probes;
8567 8567 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
8568 8568 dtrace_id_t id;
8569 8569
8570 8570 if (provider == dtrace_provider) {
8571 8571 ASSERT(MUTEX_HELD(&dtrace_lock));
8572 8572 } else {
8573 8573 mutex_enter(&dtrace_lock);
8574 8574 }
8575 8575
8576 8576 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
8577 8577 VM_BESTFIT | VM_SLEEP);
8578 8578 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
8579 8579
8580 8580 probe->dtpr_id = id;
8581 8581 probe->dtpr_gen = dtrace_probegen++;
8582 8582 probe->dtpr_mod = dtrace_strdup(mod);
8583 8583 probe->dtpr_func = dtrace_strdup(func);
8584 8584 probe->dtpr_name = dtrace_strdup(name);
8585 8585 probe->dtpr_arg = arg;
8586 8586 probe->dtpr_aframes = aframes;
8587 8587 probe->dtpr_provider = provider;
8588 8588
8589 8589 dtrace_hash_add(dtrace_bymod, probe);
8590 8590 dtrace_hash_add(dtrace_byfunc, probe);
8591 8591 dtrace_hash_add(dtrace_byname, probe);
8592 8592
8593 8593 if (id - 1 >= dtrace_nprobes) {
8594 8594 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
8595 8595 size_t nsize = osize << 1;
8596 8596
8597 8597 if (nsize == 0) {
8598 8598 ASSERT(osize == 0);
8599 8599 ASSERT(dtrace_probes == NULL);
8600 8600 nsize = sizeof (dtrace_probe_t *);
8601 8601 }
8602 8602
8603 8603 probes = kmem_zalloc(nsize, KM_SLEEP);
8604 8604
8605 8605 if (dtrace_probes == NULL) {
8606 8606 ASSERT(osize == 0);
8607 8607 dtrace_probes = probes;
8608 8608 dtrace_nprobes = 1;
8609 8609 } else {
8610 8610 dtrace_probe_t **oprobes = dtrace_probes;
8611 8611
8612 8612 bcopy(oprobes, probes, osize);
8613 8613 dtrace_membar_producer();
8614 8614 dtrace_probes = probes;
8615 8615
8616 8616 dtrace_sync();
8617 8617
8618 8618 /*
8619 8619 * All CPUs are now seeing the new probes array; we can
8620 8620 * safely free the old array.
8621 8621 */
8622 8622 kmem_free(oprobes, osize);
8623 8623 dtrace_nprobes <<= 1;
8624 8624 }
8625 8625
8626 8626 ASSERT(id - 1 < dtrace_nprobes);
8627 8627 }
8628 8628
8629 8629 ASSERT(dtrace_probes[id - 1] == NULL);
8630 8630 dtrace_probes[id - 1] = probe;
8631 8631
8632 8632 if (provider != dtrace_provider)
8633 8633 mutex_exit(&dtrace_lock);
8634 8634
8635 8635 return (id);
8636 8636 }
8637 8637
8638 8638 static dtrace_probe_t *
8639 8639 dtrace_probe_lookup_id(dtrace_id_t id)
8640 8640 {
8641 8641 ASSERT(MUTEX_HELD(&dtrace_lock));
8642 8642
8643 8643 if (id == 0 || id > dtrace_nprobes)
8644 8644 return (NULL);
8645 8645
8646 8646 return (dtrace_probes[id - 1]);
8647 8647 }
8648 8648
8649 8649 static int
8650 8650 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
8651 8651 {
8652 8652 *((dtrace_id_t *)arg) = probe->dtpr_id;
8653 8653
8654 8654 return (DTRACE_MATCH_DONE);
8655 8655 }
8656 8656
8657 8657 /*
8658 8658 * Look up a probe based on provider and one or more of module name, function
8659 8659 * name and probe name.
8660 8660 */
8661 8661 dtrace_id_t
8662 8662 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
8663 8663 const char *func, const char *name)
8664 8664 {
8665 8665 dtrace_probekey_t pkey;
8666 8666 dtrace_id_t id;
8667 8667 int match;
8668 8668
8669 8669 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
8670 8670 pkey.dtpk_pmatch = &dtrace_match_string;
8671 8671 pkey.dtpk_mod = mod;
8672 8672 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
8673 8673 pkey.dtpk_func = func;
8674 8674 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
8675 8675 pkey.dtpk_name = name;
8676 8676 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
8677 8677 pkey.dtpk_id = DTRACE_IDNONE;
8678 8678
8679 8679 mutex_enter(&dtrace_lock);
8680 8680 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
8681 8681 dtrace_probe_lookup_match, &id);
8682 8682 mutex_exit(&dtrace_lock);
8683 8683
8684 8684 ASSERT(match == 1 || match == 0);
8685 8685 return (match ? id : 0);
8686 8686 }
8687 8687
8688 8688 /*
8689 8689 * Returns the probe argument associated with the specified probe.
8690 8690 */
8691 8691 void *
8692 8692 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
8693 8693 {
8694 8694 dtrace_probe_t *probe;
8695 8695 void *rval = NULL;
8696 8696
8697 8697 mutex_enter(&dtrace_lock);
8698 8698
8699 8699 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
8700 8700 probe->dtpr_provider == (dtrace_provider_t *)id)
8701 8701 rval = probe->dtpr_arg;
8702 8702
8703 8703 mutex_exit(&dtrace_lock);
8704 8704
8705 8705 return (rval);
8706 8706 }
8707 8707
8708 8708 /*
8709 8709 * Copy a probe into a probe description.
8710 8710 */
8711 8711 static void
8712 8712 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
8713 8713 {
8714 8714 bzero(pdp, sizeof (dtrace_probedesc_t));
8715 8715 pdp->dtpd_id = prp->dtpr_id;
8716 8716
8717 8717 (void) strncpy(pdp->dtpd_provider,
8718 8718 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
8719 8719
8720 8720 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
8721 8721 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
8722 8722 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
8723 8723 }
8724 8724
8725 8725 /*
8726 8726 * Called to indicate that a probe -- or probes -- should be provided by a
8727 8727 * specfied provider. If the specified description is NULL, the provider will
8728 8728 * be told to provide all of its probes. (This is done whenever a new
8729 8729 * consumer comes along, or whenever a retained enabling is to be matched.) If
8730 8730 * the specified description is non-NULL, the provider is given the
8731 8731 * opportunity to dynamically provide the specified probe, allowing providers
8732 8732 * to support the creation of probes on-the-fly. (So-called _autocreated_
8733 8733 * probes.) If the provider is NULL, the operations will be applied to all
8734 8734 * providers; if the provider is non-NULL the operations will only be applied
8735 8735 * to the specified provider. The dtrace_provider_lock must be held, and the
8736 8736 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
8737 8737 * will need to grab the dtrace_lock when it reenters the framework through
8738 8738 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
8739 8739 */
8740 8740 static void
8741 8741 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
8742 8742 {
8743 8743 struct modctl *ctl;
8744 8744 int all = 0;
8745 8745
8746 8746 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8747 8747
8748 8748 if (prv == NULL) {
8749 8749 all = 1;
8750 8750 prv = dtrace_provider;
8751 8751 }
8752 8752
8753 8753 do {
8754 8754 /*
8755 8755 * First, call the blanket provide operation.
8756 8756 */
8757 8757 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8758 8758
8759 8759 /*
8760 8760 * Now call the per-module provide operation. We will grab
8761 8761 * mod_lock to prevent the list from being modified. Note
8762 8762 * that this also prevents the mod_busy bits from changing.
8763 8763 * (mod_busy can only be changed with mod_lock held.)
8764 8764 */
8765 8765 mutex_enter(&mod_lock);
8766 8766
8767 8767 ctl = &modules;
8768 8768 do {
8769 8769 if (ctl->mod_busy || ctl->mod_mp == NULL)
8770 8770 continue;
8771 8771
8772 8772 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8773 8773
8774 8774 } while ((ctl = ctl->mod_next) != &modules);
8775 8775
8776 8776 mutex_exit(&mod_lock);
8777 8777 } while (all && (prv = prv->dtpv_next) != NULL);
8778 8778 }
8779 8779
8780 8780 /*
8781 8781 * Iterate over each probe, and call the Framework-to-Provider API function
8782 8782 * denoted by offs.
8783 8783 */
8784 8784 static void
8785 8785 dtrace_probe_foreach(uintptr_t offs)
8786 8786 {
8787 8787 dtrace_provider_t *prov;
8788 8788 void (*func)(void *, dtrace_id_t, void *);
8789 8789 dtrace_probe_t *probe;
8790 8790 dtrace_icookie_t cookie;
8791 8791 int i;
8792 8792
8793 8793 /*
8794 8794 * We disable interrupts to walk through the probe array. This is
8795 8795 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8796 8796 * won't see stale data.
8797 8797 */
8798 8798 cookie = dtrace_interrupt_disable();
8799 8799
8800 8800 for (i = 0; i < dtrace_nprobes; i++) {
8801 8801 if ((probe = dtrace_probes[i]) == NULL)
8802 8802 continue;
8803 8803
8804 8804 if (probe->dtpr_ecb == NULL) {
8805 8805 /*
8806 8806 * This probe isn't enabled -- don't call the function.
8807 8807 */
8808 8808 continue;
8809 8809 }
8810 8810
8811 8811 prov = probe->dtpr_provider;
8812 8812 func = *((void(**)(void *, dtrace_id_t, void *))
8813 8813 ((uintptr_t)&prov->dtpv_pops + offs));
8814 8814
8815 8815 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8816 8816 }
8817 8817
8818 8818 dtrace_interrupt_enable(cookie);
8819 8819 }
8820 8820
8821 8821 static int
8822 8822 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8823 8823 {
8824 8824 dtrace_probekey_t pkey;
8825 8825 uint32_t priv;
8826 8826 uid_t uid;
8827 8827 zoneid_t zoneid;
8828 8828
8829 8829 ASSERT(MUTEX_HELD(&dtrace_lock));
8830 8830 dtrace_ecb_create_cache = NULL;
8831 8831
8832 8832 if (desc == NULL) {
8833 8833 /*
8834 8834 * If we're passed a NULL description, we're being asked to
8835 8835 * create an ECB with a NULL probe.
8836 8836 */
8837 8837 (void) dtrace_ecb_create_enable(NULL, enab);
8838 8838 return (0);
8839 8839 }
8840 8840
8841 8841 dtrace_probekey(desc, &pkey);
8842 8842 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8843 8843 &priv, &uid, &zoneid);
8844 8844
8845 8845 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8846 8846 enab));
8847 8847 }
8848 8848
8849 8849 /*
8850 8850 * DTrace Helper Provider Functions
8851 8851 */
8852 8852 static void
8853 8853 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8854 8854 {
8855 8855 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8856 8856 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8857 8857 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8858 8858 }
8859 8859
8860 8860 static void
8861 8861 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8862 8862 const dof_provider_t *dofprov, char *strtab)
8863 8863 {
8864 8864 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8865 8865 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8866 8866 dofprov->dofpv_provattr);
8867 8867 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8868 8868 dofprov->dofpv_modattr);
8869 8869 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8870 8870 dofprov->dofpv_funcattr);
8871 8871 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8872 8872 dofprov->dofpv_nameattr);
8873 8873 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8874 8874 dofprov->dofpv_argsattr);
8875 8875 }
8876 8876
8877 8877 static void
8878 8878 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8879 8879 {
8880 8880 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8881 8881 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8882 8882 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8883 8883 dof_provider_t *provider;
8884 8884 dof_probe_t *probe;
8885 8885 uint32_t *off, *enoff;
8886 8886 uint8_t *arg;
8887 8887 char *strtab;
8888 8888 uint_t i, nprobes;
8889 8889 dtrace_helper_provdesc_t dhpv;
8890 8890 dtrace_helper_probedesc_t dhpb;
8891 8891 dtrace_meta_t *meta = dtrace_meta_pid;
8892 8892 dtrace_mops_t *mops = &meta->dtm_mops;
8893 8893 void *parg;
8894 8894
8895 8895 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8896 8896 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8897 8897 provider->dofpv_strtab * dof->dofh_secsize);
8898 8898 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8899 8899 provider->dofpv_probes * dof->dofh_secsize);
8900 8900 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8901 8901 provider->dofpv_prargs * dof->dofh_secsize);
8902 8902 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8903 8903 provider->dofpv_proffs * dof->dofh_secsize);
8904 8904
8905 8905 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8906 8906 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8907 8907 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8908 8908 enoff = NULL;
8909 8909
8910 8910 /*
8911 8911 * See dtrace_helper_provider_validate().
8912 8912 */
8913 8913 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8914 8914 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8915 8915 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8916 8916 provider->dofpv_prenoffs * dof->dofh_secsize);
8917 8917 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8918 8918 }
8919 8919
8920 8920 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8921 8921
8922 8922 /*
8923 8923 * Create the provider.
8924 8924 */
8925 8925 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8926 8926
8927 8927 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8928 8928 return;
8929 8929
8930 8930 meta->dtm_count++;
8931 8931
8932 8932 /*
8933 8933 * Create the probes.
8934 8934 */
8935 8935 for (i = 0; i < nprobes; i++) {
8936 8936 probe = (dof_probe_t *)(uintptr_t)(daddr +
8937 8937 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8938 8938
8939 8939 dhpb.dthpb_mod = dhp->dofhp_mod;
8940 8940 dhpb.dthpb_func = strtab + probe->dofpr_func;
8941 8941 dhpb.dthpb_name = strtab + probe->dofpr_name;
8942 8942 dhpb.dthpb_base = probe->dofpr_addr;
8943 8943 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8944 8944 dhpb.dthpb_noffs = probe->dofpr_noffs;
8945 8945 if (enoff != NULL) {
8946 8946 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8947 8947 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8948 8948 } else {
8949 8949 dhpb.dthpb_enoffs = NULL;
8950 8950 dhpb.dthpb_nenoffs = 0;
8951 8951 }
8952 8952 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8953 8953 dhpb.dthpb_nargc = probe->dofpr_nargc;
8954 8954 dhpb.dthpb_xargc = probe->dofpr_xargc;
8955 8955 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8956 8956 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8957 8957
8958 8958 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8959 8959 }
8960 8960 }
8961 8961
8962 8962 static void
8963 8963 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8964 8964 {
8965 8965 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8966 8966 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8967 8967 int i;
8968 8968
8969 8969 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8970 8970
8971 8971 for (i = 0; i < dof->dofh_secnum; i++) {
8972 8972 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8973 8973 dof->dofh_secoff + i * dof->dofh_secsize);
8974 8974
8975 8975 if (sec->dofs_type != DOF_SECT_PROVIDER)
8976 8976 continue;
8977 8977
8978 8978 dtrace_helper_provide_one(dhp, sec, pid);
8979 8979 }
8980 8980
8981 8981 /*
8982 8982 * We may have just created probes, so we must now rematch against
8983 8983 * any retained enablings. Note that this call will acquire both
8984 8984 * cpu_lock and dtrace_lock; the fact that we are holding
8985 8985 * dtrace_meta_lock now is what defines the ordering with respect to
8986 8986 * these three locks.
8987 8987 */
8988 8988 dtrace_enabling_matchall();
8989 8989 }
8990 8990
8991 8991 static void
8992 8992 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8993 8993 {
8994 8994 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8995 8995 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8996 8996 dof_sec_t *str_sec;
8997 8997 dof_provider_t *provider;
8998 8998 char *strtab;
8999 8999 dtrace_helper_provdesc_t dhpv;
9000 9000 dtrace_meta_t *meta = dtrace_meta_pid;
9001 9001 dtrace_mops_t *mops = &meta->dtm_mops;
9002 9002
9003 9003 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9004 9004 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9005 9005 provider->dofpv_strtab * dof->dofh_secsize);
9006 9006
9007 9007 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9008 9008
9009 9009 /*
9010 9010 * Create the provider.
9011 9011 */
9012 9012 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9013 9013
9014 9014 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
9015 9015
9016 9016 meta->dtm_count--;
9017 9017 }
9018 9018
9019 9019 static void
9020 9020 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
9021 9021 {
9022 9022 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9023 9023 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9024 9024 int i;
9025 9025
9026 9026 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9027 9027
9028 9028 for (i = 0; i < dof->dofh_secnum; i++) {
9029 9029 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9030 9030 dof->dofh_secoff + i * dof->dofh_secsize);
9031 9031
9032 9032 if (sec->dofs_type != DOF_SECT_PROVIDER)
9033 9033 continue;
9034 9034
9035 9035 dtrace_helper_provider_remove_one(dhp, sec, pid);
9036 9036 }
9037 9037 }
9038 9038
9039 9039 /*
9040 9040 * DTrace Meta Provider-to-Framework API Functions
9041 9041 *
9042 9042 * These functions implement the Meta Provider-to-Framework API, as described
9043 9043 * in <sys/dtrace.h>.
9044 9044 */
9045 9045 int
9046 9046 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9047 9047 dtrace_meta_provider_id_t *idp)
9048 9048 {
9049 9049 dtrace_meta_t *meta;
9050 9050 dtrace_helpers_t *help, *next;
9051 9051 int i;
9052 9052
9053 9053 *idp = DTRACE_METAPROVNONE;
9054 9054
9055 9055 /*
9056 9056 * We strictly don't need the name, but we hold onto it for
9057 9057 * debuggability. All hail error queues!
9058 9058 */
9059 9059 if (name == NULL) {
9060 9060 cmn_err(CE_WARN, "failed to register meta-provider: "
9061 9061 "invalid name");
9062 9062 return (EINVAL);
9063 9063 }
9064 9064
9065 9065 if (mops == NULL ||
9066 9066 mops->dtms_create_probe == NULL ||
9067 9067 mops->dtms_provide_pid == NULL ||
9068 9068 mops->dtms_remove_pid == NULL) {
9069 9069 cmn_err(CE_WARN, "failed to register meta-register %s: "
9070 9070 "invalid ops", name);
9071 9071 return (EINVAL);
9072 9072 }
9073 9073
9074 9074 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9075 9075 meta->dtm_mops = *mops;
9076 9076 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
9077 9077 (void) strcpy(meta->dtm_name, name);
9078 9078 meta->dtm_arg = arg;
9079 9079
9080 9080 mutex_enter(&dtrace_meta_lock);
9081 9081 mutex_enter(&dtrace_lock);
9082 9082
9083 9083 if (dtrace_meta_pid != NULL) {
9084 9084 mutex_exit(&dtrace_lock);
9085 9085 mutex_exit(&dtrace_meta_lock);
9086 9086 cmn_err(CE_WARN, "failed to register meta-register %s: "
9087 9087 "user-land meta-provider exists", name);
9088 9088 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
9089 9089 kmem_free(meta, sizeof (dtrace_meta_t));
9090 9090 return (EINVAL);
9091 9091 }
9092 9092
9093 9093 dtrace_meta_pid = meta;
9094 9094 *idp = (dtrace_meta_provider_id_t)meta;
9095 9095
9096 9096 /*
9097 9097 * If there are providers and probes ready to go, pass them
9098 9098 * off to the new meta provider now.
9099 9099 */
9100 9100
9101 9101 help = dtrace_deferred_pid;
9102 9102 dtrace_deferred_pid = NULL;
9103 9103
9104 9104 mutex_exit(&dtrace_lock);
9105 9105
9106 9106 while (help != NULL) {
9107 9107 for (i = 0; i < help->dthps_nprovs; i++) {
9108 9108 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
9109 9109 help->dthps_pid);
9110 9110 }
9111 9111
9112 9112 next = help->dthps_next;
9113 9113 help->dthps_next = NULL;
9114 9114 help->dthps_prev = NULL;
9115 9115 help->dthps_deferred = 0;
9116 9116 help = next;
9117 9117 }
9118 9118
9119 9119 mutex_exit(&dtrace_meta_lock);
9120 9120
9121 9121 return (0);
9122 9122 }
9123 9123
9124 9124 int
9125 9125 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
9126 9126 {
9127 9127 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9128 9128
9129 9129 mutex_enter(&dtrace_meta_lock);
9130 9130 mutex_enter(&dtrace_lock);
9131 9131
9132 9132 if (old == dtrace_meta_pid) {
9133 9133 pp = &dtrace_meta_pid;
9134 9134 } else {
9135 9135 panic("attempt to unregister non-existent "
9136 9136 "dtrace meta-provider %p\n", (void *)old);
9137 9137 }
9138 9138
9139 9139 if (old->dtm_count != 0) {
9140 9140 mutex_exit(&dtrace_lock);
9141 9141 mutex_exit(&dtrace_meta_lock);
9142 9142 return (EBUSY);
9143 9143 }
9144 9144
9145 9145 *pp = NULL;
9146 9146
9147 9147 mutex_exit(&dtrace_lock);
9148 9148 mutex_exit(&dtrace_meta_lock);
9149 9149
9150 9150 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
9151 9151 kmem_free(old, sizeof (dtrace_meta_t));
9152 9152
9153 9153 return (0);
9154 9154 }
9155 9155
9156 9156
9157 9157 /*
9158 9158 * DTrace DIF Object Functions
9159 9159 */
9160 9160 static int
9161 9161 dtrace_difo_err(uint_t pc, const char *format, ...)
9162 9162 {
9163 9163 if (dtrace_err_verbose) {
9164 9164 va_list alist;
9165 9165
9166 9166 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
9167 9167 va_start(alist, format);
9168 9168 (void) vuprintf(format, alist);
9169 9169 va_end(alist);
9170 9170 }
9171 9171
9172 9172 #ifdef DTRACE_ERRDEBUG
9173 9173 dtrace_errdebug(format);
9174 9174 #endif
9175 9175 return (1);
9176 9176 }
9177 9177
9178 9178 /*
9179 9179 * Validate a DTrace DIF object by checking the IR instructions. The following
9180 9180 * rules are currently enforced by dtrace_difo_validate():
9181 9181 *
9182 9182 * 1. Each instruction must have a valid opcode
9183 9183 * 2. Each register, string, variable, or subroutine reference must be valid
9184 9184 * 3. No instruction can modify register %r0 (must be zero)
9185 9185 * 4. All instruction reserved bits must be set to zero
9186 9186 * 5. The last instruction must be a "ret" instruction
9187 9187 * 6. All branch targets must reference a valid instruction _after_ the branch
9188 9188 */
9189 9189 static int
9190 9190 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
9191 9191 cred_t *cr)
9192 9192 {
9193 9193 int err = 0, i;
9194 9194 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9195 9195 int kcheckload;
9196 9196 uint_t pc;
9197 9197 int maxglobal = -1, maxlocal = -1, maxtlocal = -1;
9198 9198
9199 9199 kcheckload = cr == NULL ||
9200 9200 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
9201 9201
9202 9202 dp->dtdo_destructive = 0;
9203 9203
9204 9204 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9205 9205 dif_instr_t instr = dp->dtdo_buf[pc];
9206 9206
9207 9207 uint_t r1 = DIF_INSTR_R1(instr);
9208 9208 uint_t r2 = DIF_INSTR_R2(instr);
9209 9209 uint_t rd = DIF_INSTR_RD(instr);
9210 9210 uint_t rs = DIF_INSTR_RS(instr);
9211 9211 uint_t label = DIF_INSTR_LABEL(instr);
9212 9212 uint_t v = DIF_INSTR_VAR(instr);
9213 9213 uint_t subr = DIF_INSTR_SUBR(instr);
9214 9214 uint_t type = DIF_INSTR_TYPE(instr);
9215 9215 uint_t op = DIF_INSTR_OP(instr);
9216 9216
9217 9217 switch (op) {
9218 9218 case DIF_OP_OR:
9219 9219 case DIF_OP_XOR:
9220 9220 case DIF_OP_AND:
9221 9221 case DIF_OP_SLL:
9222 9222 case DIF_OP_SRL:
9223 9223 case DIF_OP_SRA:
9224 9224 case DIF_OP_SUB:
9225 9225 case DIF_OP_ADD:
9226 9226 case DIF_OP_MUL:
9227 9227 case DIF_OP_SDIV:
9228 9228 case DIF_OP_UDIV:
9229 9229 case DIF_OP_SREM:
9230 9230 case DIF_OP_UREM:
9231 9231 case DIF_OP_COPYS:
9232 9232 if (r1 >= nregs)
9233 9233 err += efunc(pc, "invalid register %u\n", r1);
9234 9234 if (r2 >= nregs)
9235 9235 err += efunc(pc, "invalid register %u\n", r2);
9236 9236 if (rd >= nregs)
9237 9237 err += efunc(pc, "invalid register %u\n", rd);
9238 9238 if (rd == 0)
9239 9239 err += efunc(pc, "cannot write to %r0\n");
9240 9240 break;
9241 9241 case DIF_OP_NOT:
9242 9242 case DIF_OP_MOV:
9243 9243 case DIF_OP_ALLOCS:
9244 9244 if (r1 >= nregs)
9245 9245 err += efunc(pc, "invalid register %u\n", r1);
9246 9246 if (r2 != 0)
9247 9247 err += efunc(pc, "non-zero reserved bits\n");
9248 9248 if (rd >= nregs)
9249 9249 err += efunc(pc, "invalid register %u\n", rd);
9250 9250 if (rd == 0)
9251 9251 err += efunc(pc, "cannot write to %r0\n");
9252 9252 break;
9253 9253 case DIF_OP_LDSB:
9254 9254 case DIF_OP_LDSH:
9255 9255 case DIF_OP_LDSW:
9256 9256 case DIF_OP_LDUB:
9257 9257 case DIF_OP_LDUH:
9258 9258 case DIF_OP_LDUW:
9259 9259 case DIF_OP_LDX:
9260 9260 if (r1 >= nregs)
9261 9261 err += efunc(pc, "invalid register %u\n", r1);
9262 9262 if (r2 != 0)
9263 9263 err += efunc(pc, "non-zero reserved bits\n");
9264 9264 if (rd >= nregs)
9265 9265 err += efunc(pc, "invalid register %u\n", rd);
9266 9266 if (rd == 0)
9267 9267 err += efunc(pc, "cannot write to %r0\n");
9268 9268 if (kcheckload)
9269 9269 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9270 9270 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9271 9271 break;
9272 9272 case DIF_OP_RLDSB:
9273 9273 case DIF_OP_RLDSH:
9274 9274 case DIF_OP_RLDSW:
9275 9275 case DIF_OP_RLDUB:
9276 9276 case DIF_OP_RLDUH:
9277 9277 case DIF_OP_RLDUW:
9278 9278 case DIF_OP_RLDX:
9279 9279 if (r1 >= nregs)
9280 9280 err += efunc(pc, "invalid register %u\n", r1);
9281 9281 if (r2 != 0)
9282 9282 err += efunc(pc, "non-zero reserved bits\n");
9283 9283 if (rd >= nregs)
9284 9284 err += efunc(pc, "invalid register %u\n", rd);
9285 9285 if (rd == 0)
9286 9286 err += efunc(pc, "cannot write to %r0\n");
9287 9287 break;
9288 9288 case DIF_OP_ULDSB:
9289 9289 case DIF_OP_ULDSH:
9290 9290 case DIF_OP_ULDSW:
9291 9291 case DIF_OP_ULDUB:
9292 9292 case DIF_OP_ULDUH:
9293 9293 case DIF_OP_ULDUW:
9294 9294 case DIF_OP_ULDX:
9295 9295 if (r1 >= nregs)
9296 9296 err += efunc(pc, "invalid register %u\n", r1);
9297 9297 if (r2 != 0)
9298 9298 err += efunc(pc, "non-zero reserved bits\n");
9299 9299 if (rd >= nregs)
9300 9300 err += efunc(pc, "invalid register %u\n", rd);
9301 9301 if (rd == 0)
9302 9302 err += efunc(pc, "cannot write to %r0\n");
9303 9303 break;
9304 9304 case DIF_OP_STB:
9305 9305 case DIF_OP_STH:
9306 9306 case DIF_OP_STW:
9307 9307 case DIF_OP_STX:
9308 9308 if (r1 >= nregs)
9309 9309 err += efunc(pc, "invalid register %u\n", r1);
9310 9310 if (r2 != 0)
9311 9311 err += efunc(pc, "non-zero reserved bits\n");
9312 9312 if (rd >= nregs)
9313 9313 err += efunc(pc, "invalid register %u\n", rd);
9314 9314 if (rd == 0)
9315 9315 err += efunc(pc, "cannot write to 0 address\n");
9316 9316 break;
9317 9317 case DIF_OP_CMP:
9318 9318 case DIF_OP_SCMP:
9319 9319 if (r1 >= nregs)
9320 9320 err += efunc(pc, "invalid register %u\n", r1);
9321 9321 if (r2 >= nregs)
9322 9322 err += efunc(pc, "invalid register %u\n", r2);
9323 9323 if (rd != 0)
9324 9324 err += efunc(pc, "non-zero reserved bits\n");
9325 9325 break;
9326 9326 case DIF_OP_TST:
9327 9327 if (r1 >= nregs)
9328 9328 err += efunc(pc, "invalid register %u\n", r1);
9329 9329 if (r2 != 0 || rd != 0)
9330 9330 err += efunc(pc, "non-zero reserved bits\n");
9331 9331 break;
9332 9332 case DIF_OP_BA:
9333 9333 case DIF_OP_BE:
9334 9334 case DIF_OP_BNE:
9335 9335 case DIF_OP_BG:
9336 9336 case DIF_OP_BGU:
9337 9337 case DIF_OP_BGE:
9338 9338 case DIF_OP_BGEU:
9339 9339 case DIF_OP_BL:
9340 9340 case DIF_OP_BLU:
9341 9341 case DIF_OP_BLE:
9342 9342 case DIF_OP_BLEU:
9343 9343 if (label >= dp->dtdo_len) {
9344 9344 err += efunc(pc, "invalid branch target %u\n",
9345 9345 label);
9346 9346 }
9347 9347 if (label <= pc) {
9348 9348 err += efunc(pc, "backward branch to %u\n",
9349 9349 label);
9350 9350 }
9351 9351 break;
9352 9352 case DIF_OP_RET:
9353 9353 if (r1 != 0 || r2 != 0)
9354 9354 err += efunc(pc, "non-zero reserved bits\n");
9355 9355 if (rd >= nregs)
9356 9356 err += efunc(pc, "invalid register %u\n", rd);
9357 9357 break;
9358 9358 case DIF_OP_NOP:
9359 9359 case DIF_OP_POPTS:
9360 9360 case DIF_OP_FLUSHTS:
9361 9361 if (r1 != 0 || r2 != 0 || rd != 0)
9362 9362 err += efunc(pc, "non-zero reserved bits\n");
9363 9363 break;
9364 9364 case DIF_OP_SETX:
9365 9365 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9366 9366 err += efunc(pc, "invalid integer ref %u\n",
9367 9367 DIF_INSTR_INTEGER(instr));
9368 9368 }
9369 9369 if (rd >= nregs)
9370 9370 err += efunc(pc, "invalid register %u\n", rd);
9371 9371 if (rd == 0)
9372 9372 err += efunc(pc, "cannot write to %r0\n");
9373 9373 break;
9374 9374 case DIF_OP_SETS:
9375 9375 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9376 9376 err += efunc(pc, "invalid string ref %u\n",
9377 9377 DIF_INSTR_STRING(instr));
9378 9378 }
9379 9379 if (rd >= nregs)
9380 9380 err += efunc(pc, "invalid register %u\n", rd);
9381 9381 if (rd == 0)
9382 9382 err += efunc(pc, "cannot write to %r0\n");
9383 9383 break;
9384 9384 case DIF_OP_LDGA:
9385 9385 case DIF_OP_LDTA:
9386 9386 if (r1 > DIF_VAR_ARRAY_MAX)
9387 9387 err += efunc(pc, "invalid array %u\n", r1);
9388 9388 if (r2 >= nregs)
9389 9389 err += efunc(pc, "invalid register %u\n", r2);
9390 9390 if (rd >= nregs)
9391 9391 err += efunc(pc, "invalid register %u\n", rd);
9392 9392 if (rd == 0)
9393 9393 err += efunc(pc, "cannot write to %r0\n");
9394 9394 break;
9395 9395 case DIF_OP_LDGS:
9396 9396 case DIF_OP_LDTS:
9397 9397 case DIF_OP_LDLS:
9398 9398 case DIF_OP_LDGAA:
9399 9399 case DIF_OP_LDTAA:
9400 9400 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9401 9401 err += efunc(pc, "invalid variable %u\n", v);
9402 9402 if (rd >= nregs)
9403 9403 err += efunc(pc, "invalid register %u\n", rd);
9404 9404 if (rd == 0)
9405 9405 err += efunc(pc, "cannot write to %r0\n");
9406 9406 break;
9407 9407 case DIF_OP_STGS:
9408 9408 case DIF_OP_STTS:
9409 9409 case DIF_OP_STLS:
9410 9410 case DIF_OP_STGAA:
9411 9411 case DIF_OP_STTAA:
9412 9412 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9413 9413 err += efunc(pc, "invalid variable %u\n", v);
9414 9414 if (rs >= nregs)
9415 9415 err += efunc(pc, "invalid register %u\n", rd);
9416 9416 break;
9417 9417 case DIF_OP_CALL:
9418 9418 if (subr > DIF_SUBR_MAX)
9419 9419 err += efunc(pc, "invalid subr %u\n", subr);
9420 9420 if (rd >= nregs)
9421 9421 err += efunc(pc, "invalid register %u\n", rd);
9422 9422 if (rd == 0)
9423 9423 err += efunc(pc, "cannot write to %r0\n");
9424 9424
9425 9425 if (subr == DIF_SUBR_COPYOUT ||
9426 9426 subr == DIF_SUBR_COPYOUTSTR) {
9427 9427 dp->dtdo_destructive = 1;
9428 9428 }
9429 9429
9430 9430 if (subr == DIF_SUBR_GETF) {
9431 9431 /*
9432 9432 * If we have a getf() we need to record that
9433 9433 * in our state. Note that our state can be
9434 9434 * NULL if this is a helper -- but in that
9435 9435 * case, the call to getf() is itself illegal,
9436 9436 * and will be caught (slightly later) when
9437 9437 * the helper is validated.
9438 9438 */
9439 9439 if (vstate->dtvs_state != NULL)
9440 9440 vstate->dtvs_state->dts_getf++;
9441 9441 }
9442 9442
9443 9443 break;
9444 9444 case DIF_OP_PUSHTR:
9445 9445 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9446 9446 err += efunc(pc, "invalid ref type %u\n", type);
9447 9447 if (r2 >= nregs)
9448 9448 err += efunc(pc, "invalid register %u\n", r2);
9449 9449 if (rs >= nregs)
9450 9450 err += efunc(pc, "invalid register %u\n", rs);
9451 9451 break;
9452 9452 case DIF_OP_PUSHTV:
9453 9453 if (type != DIF_TYPE_CTF)
9454 9454 err += efunc(pc, "invalid val type %u\n", type);
9455 9455 if (r2 >= nregs)
9456 9456 err += efunc(pc, "invalid register %u\n", r2);
9457 9457 if (rs >= nregs)
9458 9458 err += efunc(pc, "invalid register %u\n", rs);
9459 9459 break;
9460 9460 default:
9461 9461 err += efunc(pc, "invalid opcode %u\n",
9462 9462 DIF_INSTR_OP(instr));
9463 9463 }
9464 9464 }
9465 9465
9466 9466 if (dp->dtdo_len != 0 &&
9467 9467 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9468 9468 err += efunc(dp->dtdo_len - 1,
9469 9469 "expected 'ret' as last DIF instruction\n");
9470 9470 }
9471 9471
9472 9472 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
9473 9473 /*
9474 9474 * If we're not returning by reference, the size must be either
9475 9475 * 0 or the size of one of the base types.
9476 9476 */
9477 9477 switch (dp->dtdo_rtype.dtdt_size) {
9478 9478 case 0:
9479 9479 case sizeof (uint8_t):
9480 9480 case sizeof (uint16_t):
9481 9481 case sizeof (uint32_t):
9482 9482 case sizeof (uint64_t):
9483 9483 break;
9484 9484
9485 9485 default:
9486 9486 err += efunc(dp->dtdo_len - 1, "bad return size\n");
9487 9487 }
9488 9488 }
9489 9489
9490 9490 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9491 9491 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9492 9492 dtrace_diftype_t *vt, *et;
9493 9493 uint_t id, ndx;
9494 9494
9495 9495 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
9496 9496 v->dtdv_scope != DIFV_SCOPE_THREAD &&
9497 9497 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
9498 9498 err += efunc(i, "unrecognized variable scope %d\n",
9499 9499 v->dtdv_scope);
9500 9500 break;
9501 9501 }
9502 9502
9503 9503 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
9504 9504 v->dtdv_kind != DIFV_KIND_SCALAR) {
9505 9505 err += efunc(i, "unrecognized variable type %d\n",
9506 9506 v->dtdv_kind);
9507 9507 break;
9508 9508 }
9509 9509
9510 9510 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
9511 9511 err += efunc(i, "%d exceeds variable id limit\n", id);
9512 9512 break;
9513 9513 }
9514 9514
9515 9515 if (id < DIF_VAR_OTHER_UBASE)
9516 9516 continue;
9517 9517
9518 9518 /*
9519 9519 * For user-defined variables, we need to check that this
9520 9520 * definition is identical to any previous definition that we
9521 9521 * encountered.
9522 9522 */
9523 9523 ndx = id - DIF_VAR_OTHER_UBASE;
9524 9524
9525 9525 switch (v->dtdv_scope) {
9526 9526 case DIFV_SCOPE_GLOBAL:
9527 9527 if (maxglobal == -1 || ndx > maxglobal)
9528 9528 maxglobal = ndx;
9529 9529
9530 9530 if (ndx < vstate->dtvs_nglobals) {
9531 9531 dtrace_statvar_t *svar;
9532 9532
9533 9533 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
9534 9534 existing = &svar->dtsv_var;
9535 9535 }
9536 9536
9537 9537 break;
9538 9538
9539 9539 case DIFV_SCOPE_THREAD:
9540 9540 if (maxtlocal == -1 || ndx > maxtlocal)
9541 9541 maxtlocal = ndx;
9542 9542
9543 9543 if (ndx < vstate->dtvs_ntlocals)
9544 9544 existing = &vstate->dtvs_tlocals[ndx];
9545 9545 break;
9546 9546
9547 9547 case DIFV_SCOPE_LOCAL:
9548 9548 if (maxlocal == -1 || ndx > maxlocal)
9549 9549 maxlocal = ndx;
9550 9550
9551 9551 if (ndx < vstate->dtvs_nlocals) {
9552 9552 dtrace_statvar_t *svar;
9553 9553
9554 9554 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
9555 9555 existing = &svar->dtsv_var;
9556 9556 }
9557 9557
9558 9558 break;
9559 9559 }
9560 9560
9561 9561 vt = &v->dtdv_type;
9562 9562
9563 9563 if (vt->dtdt_flags & DIF_TF_BYREF) {
9564 9564 if (vt->dtdt_size == 0) {
9565 9565 err += efunc(i, "zero-sized variable\n");
9566 9566 break;
9567 9567 }
9568 9568
9569 9569 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL ||
9570 9570 v->dtdv_scope == DIFV_SCOPE_LOCAL) &&
9571 9571 vt->dtdt_size > dtrace_statvar_maxsize) {
9572 9572 err += efunc(i, "oversized by-ref static\n");
9573 9573 break;
9574 9574 }
9575 9575 }
9576 9576
9577 9577 if (existing == NULL || existing->dtdv_id == 0)
9578 9578 continue;
9579 9579
9580 9580 ASSERT(existing->dtdv_id == v->dtdv_id);
9581 9581 ASSERT(existing->dtdv_scope == v->dtdv_scope);
9582 9582
9583 9583 if (existing->dtdv_kind != v->dtdv_kind)
9584 9584 err += efunc(i, "%d changed variable kind\n", id);
9585 9585
9586 9586 et = &existing->dtdv_type;
9587 9587
9588 9588 if (vt->dtdt_flags != et->dtdt_flags) {
9589 9589 err += efunc(i, "%d changed variable type flags\n", id);
9590 9590 break;
9591 9591 }
9592 9592
9593 9593 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
9594 9594 err += efunc(i, "%d changed variable type size\n", id);
9595 9595 break;
9596 9596 }
9597 9597 }
9598 9598
9599 9599 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9600 9600 dif_instr_t instr = dp->dtdo_buf[pc];
9601 9601
9602 9602 uint_t v = DIF_INSTR_VAR(instr);
9603 9603 uint_t op = DIF_INSTR_OP(instr);
9604 9604
9605 9605 switch (op) {
9606 9606 case DIF_OP_LDGS:
9607 9607 case DIF_OP_LDGAA:
9608 9608 case DIF_OP_STGS:
9609 9609 case DIF_OP_STGAA:
9610 9610 if (v > DIF_VAR_OTHER_UBASE + maxglobal)
9611 9611 err += efunc(pc, "invalid variable %u\n", v);
9612 9612 break;
9613 9613 case DIF_OP_LDTS:
9614 9614 case DIF_OP_LDTAA:
9615 9615 case DIF_OP_STTS:
9616 9616 case DIF_OP_STTAA:
9617 9617 if (v > DIF_VAR_OTHER_UBASE + maxtlocal)
9618 9618 err += efunc(pc, "invalid variable %u\n", v);
9619 9619 break;
9620 9620 case DIF_OP_LDLS:
9621 9621 case DIF_OP_STLS:
9622 9622 if (v > DIF_VAR_OTHER_UBASE + maxlocal)
9623 9623 err += efunc(pc, "invalid variable %u\n", v);
9624 9624 break;
9625 9625 default:
9626 9626 break;
9627 9627 }
9628 9628 }
9629 9629
9630 9630 return (err);
9631 9631 }
9632 9632
9633 9633 /*
9634 9634 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
9635 9635 * are much more constrained than normal DIFOs. Specifically, they may
9636 9636 * not:
9637 9637 *
9638 9638 * 1. Make calls to subroutines other than copyin(), copyinstr() or
9639 9639 * miscellaneous string routines
9640 9640 * 2. Access DTrace variables other than the args[] array, and the
9641 9641 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
9642 9642 * 3. Have thread-local variables.
9643 9643 * 4. Have dynamic variables.
9644 9644 */
9645 9645 static int
9646 9646 dtrace_difo_validate_helper(dtrace_difo_t *dp)
9647 9647 {
9648 9648 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9649 9649 int err = 0;
9650 9650 uint_t pc;
9651 9651
9652 9652 for (pc = 0; pc < dp->dtdo_len; pc++) {
9653 9653 dif_instr_t instr = dp->dtdo_buf[pc];
9654 9654
9655 9655 uint_t v = DIF_INSTR_VAR(instr);
9656 9656 uint_t subr = DIF_INSTR_SUBR(instr);
9657 9657 uint_t op = DIF_INSTR_OP(instr);
9658 9658
9659 9659 switch (op) {
9660 9660 case DIF_OP_OR:
9661 9661 case DIF_OP_XOR:
9662 9662 case DIF_OP_AND:
9663 9663 case DIF_OP_SLL:
9664 9664 case DIF_OP_SRL:
9665 9665 case DIF_OP_SRA:
9666 9666 case DIF_OP_SUB:
9667 9667 case DIF_OP_ADD:
9668 9668 case DIF_OP_MUL:
9669 9669 case DIF_OP_SDIV:
9670 9670 case DIF_OP_UDIV:
9671 9671 case DIF_OP_SREM:
9672 9672 case DIF_OP_UREM:
9673 9673 case DIF_OP_COPYS:
9674 9674 case DIF_OP_NOT:
9675 9675 case DIF_OP_MOV:
9676 9676 case DIF_OP_RLDSB:
9677 9677 case DIF_OP_RLDSH:
9678 9678 case DIF_OP_RLDSW:
9679 9679 case DIF_OP_RLDUB:
9680 9680 case DIF_OP_RLDUH:
9681 9681 case DIF_OP_RLDUW:
9682 9682 case DIF_OP_RLDX:
9683 9683 case DIF_OP_ULDSB:
9684 9684 case DIF_OP_ULDSH:
9685 9685 case DIF_OP_ULDSW:
9686 9686 case DIF_OP_ULDUB:
9687 9687 case DIF_OP_ULDUH:
9688 9688 case DIF_OP_ULDUW:
9689 9689 case DIF_OP_ULDX:
9690 9690 case DIF_OP_STB:
9691 9691 case DIF_OP_STH:
9692 9692 case DIF_OP_STW:
9693 9693 case DIF_OP_STX:
9694 9694 case DIF_OP_ALLOCS:
9695 9695 case DIF_OP_CMP:
9696 9696 case DIF_OP_SCMP:
9697 9697 case DIF_OP_TST:
9698 9698 case DIF_OP_BA:
9699 9699 case DIF_OP_BE:
9700 9700 case DIF_OP_BNE:
9701 9701 case DIF_OP_BG:
9702 9702 case DIF_OP_BGU:
9703 9703 case DIF_OP_BGE:
9704 9704 case DIF_OP_BGEU:
9705 9705 case DIF_OP_BL:
9706 9706 case DIF_OP_BLU:
9707 9707 case DIF_OP_BLE:
9708 9708 case DIF_OP_BLEU:
9709 9709 case DIF_OP_RET:
9710 9710 case DIF_OP_NOP:
9711 9711 case DIF_OP_POPTS:
9712 9712 case DIF_OP_FLUSHTS:
9713 9713 case DIF_OP_SETX:
9714 9714 case DIF_OP_SETS:
9715 9715 case DIF_OP_LDGA:
9716 9716 case DIF_OP_LDLS:
9717 9717 case DIF_OP_STGS:
9718 9718 case DIF_OP_STLS:
9719 9719 case DIF_OP_PUSHTR:
9720 9720 case DIF_OP_PUSHTV:
9721 9721 break;
9722 9722
9723 9723 case DIF_OP_LDGS:
9724 9724 if (v >= DIF_VAR_OTHER_UBASE)
9725 9725 break;
9726 9726
9727 9727 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
9728 9728 break;
9729 9729
9730 9730 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
9731 9731 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
9732 9732 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
9733 9733 v == DIF_VAR_UID || v == DIF_VAR_GID)
9734 9734 break;
9735 9735
9736 9736 err += efunc(pc, "illegal variable %u\n", v);
9737 9737 break;
9738 9738
9739 9739 case DIF_OP_LDTA:
9740 9740 case DIF_OP_LDTS:
9741 9741 case DIF_OP_LDGAA:
9742 9742 case DIF_OP_LDTAA:
9743 9743 err += efunc(pc, "illegal dynamic variable load\n");
9744 9744 break;
9745 9745
9746 9746 case DIF_OP_STTS:
9747 9747 case DIF_OP_STGAA:
9748 9748 case DIF_OP_STTAA:
9749 9749 err += efunc(pc, "illegal dynamic variable store\n");
9750 9750 break;
9751 9751
9752 9752 case DIF_OP_CALL:
9753 9753 if (subr == DIF_SUBR_ALLOCA ||
9754 9754 subr == DIF_SUBR_BCOPY ||
9755 9755 subr == DIF_SUBR_COPYIN ||
9756 9756 subr == DIF_SUBR_COPYINTO ||
9757 9757 subr == DIF_SUBR_COPYINSTR ||
9758 9758 subr == DIF_SUBR_INDEX ||
9759 9759 subr == DIF_SUBR_INET_NTOA ||
9760 9760 subr == DIF_SUBR_INET_NTOA6 ||
9761 9761 subr == DIF_SUBR_INET_NTOP ||
9762 9762 subr == DIF_SUBR_JSON ||
9763 9763 subr == DIF_SUBR_LLTOSTR ||
9764 9764 subr == DIF_SUBR_STRTOLL ||
9765 9765 subr == DIF_SUBR_RINDEX ||
9766 9766 subr == DIF_SUBR_STRCHR ||
9767 9767 subr == DIF_SUBR_STRJOIN ||
9768 9768 subr == DIF_SUBR_STRRCHR ||
9769 9769 subr == DIF_SUBR_STRSTR ||
9770 9770 subr == DIF_SUBR_HTONS ||
9771 9771 subr == DIF_SUBR_HTONL ||
9772 9772 subr == DIF_SUBR_HTONLL ||
9773 9773 subr == DIF_SUBR_NTOHS ||
9774 9774 subr == DIF_SUBR_NTOHL ||
9775 9775 subr == DIF_SUBR_NTOHLL)
9776 9776 break;
9777 9777
9778 9778 err += efunc(pc, "invalid subr %u\n", subr);
9779 9779 break;
9780 9780
9781 9781 default:
9782 9782 err += efunc(pc, "invalid opcode %u\n",
9783 9783 DIF_INSTR_OP(instr));
9784 9784 }
9785 9785 }
9786 9786
9787 9787 return (err);
9788 9788 }
9789 9789
9790 9790 /*
9791 9791 * Returns 1 if the expression in the DIF object can be cached on a per-thread
9792 9792 * basis; 0 if not.
9793 9793 */
9794 9794 static int
9795 9795 dtrace_difo_cacheable(dtrace_difo_t *dp)
9796 9796 {
9797 9797 int i;
9798 9798
9799 9799 if (dp == NULL)
9800 9800 return (0);
9801 9801
9802 9802 for (i = 0; i < dp->dtdo_varlen; i++) {
9803 9803 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9804 9804
9805 9805 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9806 9806 continue;
9807 9807
9808 9808 switch (v->dtdv_id) {
9809 9809 case DIF_VAR_CURTHREAD:
9810 9810 case DIF_VAR_PID:
9811 9811 case DIF_VAR_TID:
9812 9812 case DIF_VAR_EXECNAME:
9813 9813 case DIF_VAR_ZONENAME:
9814 9814 break;
9815 9815
9816 9816 default:
9817 9817 return (0);
9818 9818 }
9819 9819 }
9820 9820
9821 9821 /*
9822 9822 * This DIF object may be cacheable. Now we need to look for any
9823 9823 * array loading instructions, any memory loading instructions, or
9824 9824 * any stores to thread-local variables.
9825 9825 */
9826 9826 for (i = 0; i < dp->dtdo_len; i++) {
9827 9827 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9828 9828
9829 9829 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9830 9830 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9831 9831 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9832 9832 op == DIF_OP_LDGA || op == DIF_OP_STTS)
9833 9833 return (0);
9834 9834 }
9835 9835
9836 9836 return (1);
9837 9837 }
9838 9838
9839 9839 static void
9840 9840 dtrace_difo_hold(dtrace_difo_t *dp)
9841 9841 {
9842 9842 int i;
9843 9843
9844 9844 ASSERT(MUTEX_HELD(&dtrace_lock));
9845 9845
9846 9846 dp->dtdo_refcnt++;
9847 9847 ASSERT(dp->dtdo_refcnt != 0);
9848 9848
9849 9849 /*
9850 9850 * We need to check this DIF object for references to the variable
9851 9851 * DIF_VAR_VTIMESTAMP.
9852 9852 */
9853 9853 for (i = 0; i < dp->dtdo_varlen; i++) {
9854 9854 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9855 9855
9856 9856 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9857 9857 continue;
9858 9858
9859 9859 if (dtrace_vtime_references++ == 0)
9860 9860 dtrace_vtime_enable();
9861 9861 }
9862 9862 }
9863 9863
9864 9864 /*
9865 9865 * This routine calculates the dynamic variable chunksize for a given DIF
9866 9866 * object. The calculation is not fool-proof, and can probably be tricked by
9867 9867 * malicious DIF -- but it works for all compiler-generated DIF. Because this
9868 9868 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9869 9869 * if a dynamic variable size exceeds the chunksize.
9870 9870 */
9871 9871 static void
9872 9872 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9873 9873 {
9874 9874 uint64_t sval;
9875 9875 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9876 9876 const dif_instr_t *text = dp->dtdo_buf;
9877 9877 uint_t pc, srd = 0;
9878 9878 uint_t ttop = 0;
9879 9879 size_t size, ksize;
9880 9880 uint_t id, i;
9881 9881
9882 9882 for (pc = 0; pc < dp->dtdo_len; pc++) {
9883 9883 dif_instr_t instr = text[pc];
9884 9884 uint_t op = DIF_INSTR_OP(instr);
9885 9885 uint_t rd = DIF_INSTR_RD(instr);
9886 9886 uint_t r1 = DIF_INSTR_R1(instr);
9887 9887 uint_t nkeys = 0;
9888 9888 uchar_t scope;
9889 9889
9890 9890 dtrace_key_t *key = tupregs;
9891 9891
9892 9892 switch (op) {
9893 9893 case DIF_OP_SETX:
9894 9894 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9895 9895 srd = rd;
9896 9896 continue;
9897 9897
9898 9898 case DIF_OP_STTS:
9899 9899 key = &tupregs[DIF_DTR_NREGS];
9900 9900 key[0].dttk_size = 0;
9901 9901 key[1].dttk_size = 0;
9902 9902 nkeys = 2;
9903 9903 scope = DIFV_SCOPE_THREAD;
9904 9904 break;
9905 9905
9906 9906 case DIF_OP_STGAA:
9907 9907 case DIF_OP_STTAA:
9908 9908 nkeys = ttop;
9909 9909
9910 9910 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9911 9911 key[nkeys++].dttk_size = 0;
9912 9912
9913 9913 key[nkeys++].dttk_size = 0;
9914 9914
9915 9915 if (op == DIF_OP_STTAA) {
9916 9916 scope = DIFV_SCOPE_THREAD;
9917 9917 } else {
9918 9918 scope = DIFV_SCOPE_GLOBAL;
9919 9919 }
9920 9920
9921 9921 break;
9922 9922
9923 9923 case DIF_OP_PUSHTR:
9924 9924 if (ttop == DIF_DTR_NREGS)
9925 9925 return;
9926 9926
9927 9927 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9928 9928 /*
9929 9929 * If the register for the size of the "pushtr"
9930 9930 * is %r0 (or the value is 0) and the type is
9931 9931 * a string, we'll use the system-wide default
9932 9932 * string size.
9933 9933 */
9934 9934 tupregs[ttop++].dttk_size =
9935 9935 dtrace_strsize_default;
9936 9936 } else {
9937 9937 if (srd == 0)
9938 9938 return;
9939 9939
9940 9940 if (sval > LONG_MAX)
9941 9941 return;
9942 9942
9943 9943 tupregs[ttop++].dttk_size = sval;
9944 9944 }
9945 9945
9946 9946 break;
9947 9947
9948 9948 case DIF_OP_PUSHTV:
9949 9949 if (ttop == DIF_DTR_NREGS)
9950 9950 return;
9951 9951
9952 9952 tupregs[ttop++].dttk_size = 0;
9953 9953 break;
9954 9954
9955 9955 case DIF_OP_FLUSHTS:
9956 9956 ttop = 0;
9957 9957 break;
9958 9958
9959 9959 case DIF_OP_POPTS:
9960 9960 if (ttop != 0)
9961 9961 ttop--;
9962 9962 break;
9963 9963 }
9964 9964
9965 9965 sval = 0;
9966 9966 srd = 0;
9967 9967
9968 9968 if (nkeys == 0)
9969 9969 continue;
9970 9970
9971 9971 /*
9972 9972 * We have a dynamic variable allocation; calculate its size.
9973 9973 */
9974 9974 for (ksize = 0, i = 0; i < nkeys; i++)
9975 9975 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9976 9976
9977 9977 size = sizeof (dtrace_dynvar_t);
9978 9978 size += sizeof (dtrace_key_t) * (nkeys - 1);
9979 9979 size += ksize;
9980 9980
9981 9981 /*
9982 9982 * Now we need to determine the size of the stored data.
9983 9983 */
9984 9984 id = DIF_INSTR_VAR(instr);
9985 9985
9986 9986 for (i = 0; i < dp->dtdo_varlen; i++) {
9987 9987 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9988 9988
9989 9989 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9990 9990 size += v->dtdv_type.dtdt_size;
9991 9991 break;
9992 9992 }
9993 9993 }
9994 9994
9995 9995 if (i == dp->dtdo_varlen)
9996 9996 return;
9997 9997
9998 9998 /*
9999 9999 * We have the size. If this is larger than the chunk size
10000 10000 * for our dynamic variable state, reset the chunk size.
10001 10001 */
10002 10002 size = P2ROUNDUP(size, sizeof (uint64_t));
10003 10003
10004 10004 /*
10005 10005 * Before setting the chunk size, check that we're not going
10006 10006 * to set it to a negative value...
10007 10007 */
10008 10008 if (size > LONG_MAX)
10009 10009 return;
10010 10010
10011 10011 /*
10012 10012 * ...and make certain that we didn't badly overflow.
10013 10013 */
10014 10014 if (size < ksize || size < sizeof (dtrace_dynvar_t))
10015 10015 return;
10016 10016
10017 10017 if (size > vstate->dtvs_dynvars.dtds_chunksize)
10018 10018 vstate->dtvs_dynvars.dtds_chunksize = size;
10019 10019 }
10020 10020 }
10021 10021
10022 10022 static void
10023 10023 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10024 10024 {
10025 10025 int i, oldsvars, osz, nsz, otlocals, ntlocals;
10026 10026 uint_t id;
10027 10027
10028 10028 ASSERT(MUTEX_HELD(&dtrace_lock));
10029 10029 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10030 10030
10031 10031 for (i = 0; i < dp->dtdo_varlen; i++) {
10032 10032 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10033 10033 dtrace_statvar_t *svar, ***svarp;
10034 10034 size_t dsize = 0;
10035 10035 uint8_t scope = v->dtdv_scope;
10036 10036 int *np;
10037 10037
10038 10038 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10039 10039 continue;
10040 10040
10041 10041 id -= DIF_VAR_OTHER_UBASE;
10042 10042
10043 10043 switch (scope) {
10044 10044 case DIFV_SCOPE_THREAD:
10045 10045 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
10046 10046 dtrace_difv_t *tlocals;
10047 10047
10048 10048 if ((ntlocals = (otlocals << 1)) == 0)
10049 10049 ntlocals = 1;
10050 10050
10051 10051 osz = otlocals * sizeof (dtrace_difv_t);
10052 10052 nsz = ntlocals * sizeof (dtrace_difv_t);
10053 10053
10054 10054 tlocals = kmem_zalloc(nsz, KM_SLEEP);
10055 10055
10056 10056 if (osz != 0) {
10057 10057 bcopy(vstate->dtvs_tlocals,
10058 10058 tlocals, osz);
10059 10059 kmem_free(vstate->dtvs_tlocals, osz);
10060 10060 }
10061 10061
10062 10062 vstate->dtvs_tlocals = tlocals;
10063 10063 vstate->dtvs_ntlocals = ntlocals;
10064 10064 }
10065 10065
10066 10066 vstate->dtvs_tlocals[id] = *v;
10067 10067 continue;
10068 10068
10069 10069 case DIFV_SCOPE_LOCAL:
10070 10070 np = &vstate->dtvs_nlocals;
10071 10071 svarp = &vstate->dtvs_locals;
10072 10072
10073 10073 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10074 10074 dsize = NCPU * (v->dtdv_type.dtdt_size +
10075 10075 sizeof (uint64_t));
10076 10076 else
10077 10077 dsize = NCPU * sizeof (uint64_t);
10078 10078
10079 10079 break;
10080 10080
10081 10081 case DIFV_SCOPE_GLOBAL:
10082 10082 np = &vstate->dtvs_nglobals;
10083 10083 svarp = &vstate->dtvs_globals;
10084 10084
10085 10085 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10086 10086 dsize = v->dtdv_type.dtdt_size +
10087 10087 sizeof (uint64_t);
10088 10088
10089 10089 break;
10090 10090
10091 10091 default:
10092 10092 ASSERT(0);
10093 10093 }
10094 10094
10095 10095 while (id >= (oldsvars = *np)) {
10096 10096 dtrace_statvar_t **statics;
10097 10097 int newsvars, oldsize, newsize;
10098 10098
10099 10099 if ((newsvars = (oldsvars << 1)) == 0)
10100 10100 newsvars = 1;
10101 10101
10102 10102 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10103 10103 newsize = newsvars * sizeof (dtrace_statvar_t *);
10104 10104
10105 10105 statics = kmem_zalloc(newsize, KM_SLEEP);
10106 10106
10107 10107 if (oldsize != 0) {
10108 10108 bcopy(*svarp, statics, oldsize);
10109 10109 kmem_free(*svarp, oldsize);
10110 10110 }
10111 10111
10112 10112 *svarp = statics;
10113 10113 *np = newsvars;
10114 10114 }
10115 10115
10116 10116 if ((svar = (*svarp)[id]) == NULL) {
10117 10117 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10118 10118 svar->dtsv_var = *v;
10119 10119
10120 10120 if ((svar->dtsv_size = dsize) != 0) {
10121 10121 svar->dtsv_data = (uint64_t)(uintptr_t)
10122 10122 kmem_zalloc(dsize, KM_SLEEP);
10123 10123 }
10124 10124
10125 10125 (*svarp)[id] = svar;
10126 10126 }
10127 10127
10128 10128 svar->dtsv_refcnt++;
10129 10129 }
10130 10130
10131 10131 dtrace_difo_chunksize(dp, vstate);
10132 10132 dtrace_difo_hold(dp);
10133 10133 }
10134 10134
10135 10135 static dtrace_difo_t *
10136 10136 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10137 10137 {
10138 10138 dtrace_difo_t *new;
10139 10139 size_t sz;
10140 10140
10141 10141 ASSERT(dp->dtdo_buf != NULL);
10142 10142 ASSERT(dp->dtdo_refcnt != 0);
10143 10143
10144 10144 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10145 10145
10146 10146 ASSERT(dp->dtdo_buf != NULL);
10147 10147 sz = dp->dtdo_len * sizeof (dif_instr_t);
10148 10148 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10149 10149 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10150 10150 new->dtdo_len = dp->dtdo_len;
10151 10151
10152 10152 if (dp->dtdo_strtab != NULL) {
10153 10153 ASSERT(dp->dtdo_strlen != 0);
10154 10154 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10155 10155 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10156 10156 new->dtdo_strlen = dp->dtdo_strlen;
10157 10157 }
10158 10158
10159 10159 if (dp->dtdo_inttab != NULL) {
10160 10160 ASSERT(dp->dtdo_intlen != 0);
10161 10161 sz = dp->dtdo_intlen * sizeof (uint64_t);
10162 10162 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
10163 10163 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
10164 10164 new->dtdo_intlen = dp->dtdo_intlen;
10165 10165 }
10166 10166
10167 10167 if (dp->dtdo_vartab != NULL) {
10168 10168 ASSERT(dp->dtdo_varlen != 0);
10169 10169 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
10170 10170 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
10171 10171 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
10172 10172 new->dtdo_varlen = dp->dtdo_varlen;
10173 10173 }
10174 10174
10175 10175 dtrace_difo_init(new, vstate);
10176 10176 return (new);
10177 10177 }
10178 10178
10179 10179 static void
10180 10180 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10181 10181 {
10182 10182 int i;
10183 10183
10184 10184 ASSERT(dp->dtdo_refcnt == 0);
10185 10185
10186 10186 for (i = 0; i < dp->dtdo_varlen; i++) {
10187 10187 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10188 10188 dtrace_statvar_t *svar, **svarp;
10189 10189 uint_t id;
10190 10190 uint8_t scope = v->dtdv_scope;
10191 10191 int *np;
10192 10192
10193 10193 switch (scope) {
10194 10194 case DIFV_SCOPE_THREAD:
10195 10195 continue;
10196 10196
10197 10197 case DIFV_SCOPE_LOCAL:
10198 10198 np = &vstate->dtvs_nlocals;
10199 10199 svarp = vstate->dtvs_locals;
10200 10200 break;
10201 10201
10202 10202 case DIFV_SCOPE_GLOBAL:
10203 10203 np = &vstate->dtvs_nglobals;
10204 10204 svarp = vstate->dtvs_globals;
10205 10205 break;
10206 10206
10207 10207 default:
10208 10208 ASSERT(0);
10209 10209 }
10210 10210
10211 10211 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10212 10212 continue;
10213 10213
10214 10214 id -= DIF_VAR_OTHER_UBASE;
10215 10215 ASSERT(id < *np);
10216 10216
10217 10217 svar = svarp[id];
10218 10218 ASSERT(svar != NULL);
10219 10219 ASSERT(svar->dtsv_refcnt > 0);
10220 10220
10221 10221 if (--svar->dtsv_refcnt > 0)
10222 10222 continue;
10223 10223
10224 10224 if (svar->dtsv_size != 0) {
10225 10225 ASSERT(svar->dtsv_data != NULL);
10226 10226 kmem_free((void *)(uintptr_t)svar->dtsv_data,
10227 10227 svar->dtsv_size);
10228 10228 }
10229 10229
10230 10230 kmem_free(svar, sizeof (dtrace_statvar_t));
10231 10231 svarp[id] = NULL;
10232 10232 }
10233 10233
10234 10234 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
10235 10235 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
10236 10236 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
10237 10237 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
10238 10238
10239 10239 kmem_free(dp, sizeof (dtrace_difo_t));
10240 10240 }
10241 10241
10242 10242 static void
10243 10243 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10244 10244 {
10245 10245 int i;
10246 10246
10247 10247 ASSERT(MUTEX_HELD(&dtrace_lock));
10248 10248 ASSERT(dp->dtdo_refcnt != 0);
10249 10249
10250 10250 for (i = 0; i < dp->dtdo_varlen; i++) {
10251 10251 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10252 10252
10253 10253 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10254 10254 continue;
10255 10255
10256 10256 ASSERT(dtrace_vtime_references > 0);
10257 10257 if (--dtrace_vtime_references == 0)
10258 10258 dtrace_vtime_disable();
10259 10259 }
10260 10260
10261 10261 if (--dp->dtdo_refcnt == 0)
10262 10262 dtrace_difo_destroy(dp, vstate);
10263 10263 }
10264 10264
10265 10265 /*
10266 10266 * DTrace Format Functions
10267 10267 */
10268 10268 static uint16_t
10269 10269 dtrace_format_add(dtrace_state_t *state, char *str)
10270 10270 {
10271 10271 char *fmt, **new;
10272 10272 uint16_t ndx, len = strlen(str) + 1;
10273 10273
10274 10274 fmt = kmem_zalloc(len, KM_SLEEP);
10275 10275 bcopy(str, fmt, len);
10276 10276
10277 10277 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10278 10278 if (state->dts_formats[ndx] == NULL) {
10279 10279 state->dts_formats[ndx] = fmt;
10280 10280 return (ndx + 1);
10281 10281 }
10282 10282 }
10283 10283
10284 10284 if (state->dts_nformats == USHRT_MAX) {
10285 10285 /*
10286 10286 * This is only likely if a denial-of-service attack is being
10287 10287 * attempted. As such, it's okay to fail silently here.
10288 10288 */
10289 10289 kmem_free(fmt, len);
10290 10290 return (0);
10291 10291 }
10292 10292
10293 10293 /*
10294 10294 * For simplicity, we always resize the formats array to be exactly the
10295 10295 * number of formats.
10296 10296 */
10297 10297 ndx = state->dts_nformats++;
10298 10298 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
10299 10299
10300 10300 if (state->dts_formats != NULL) {
10301 10301 ASSERT(ndx != 0);
10302 10302 bcopy(state->dts_formats, new, ndx * sizeof (char *));
10303 10303 kmem_free(state->dts_formats, ndx * sizeof (char *));
10304 10304 }
10305 10305
10306 10306 state->dts_formats = new;
10307 10307 state->dts_formats[ndx] = fmt;
10308 10308
10309 10309 return (ndx + 1);
10310 10310 }
10311 10311
10312 10312 static void
10313 10313 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
10314 10314 {
10315 10315 char *fmt;
10316 10316
10317 10317 ASSERT(state->dts_formats != NULL);
10318 10318 ASSERT(format <= state->dts_nformats);
10319 10319 ASSERT(state->dts_formats[format - 1] != NULL);
10320 10320
10321 10321 fmt = state->dts_formats[format - 1];
10322 10322 kmem_free(fmt, strlen(fmt) + 1);
10323 10323 state->dts_formats[format - 1] = NULL;
10324 10324 }
10325 10325
10326 10326 static void
10327 10327 dtrace_format_destroy(dtrace_state_t *state)
10328 10328 {
10329 10329 int i;
10330 10330
10331 10331 if (state->dts_nformats == 0) {
10332 10332 ASSERT(state->dts_formats == NULL);
10333 10333 return;
10334 10334 }
10335 10335
10336 10336 ASSERT(state->dts_formats != NULL);
10337 10337
10338 10338 for (i = 0; i < state->dts_nformats; i++) {
10339 10339 char *fmt = state->dts_formats[i];
10340 10340
10341 10341 if (fmt == NULL)
10342 10342 continue;
10343 10343
10344 10344 kmem_free(fmt, strlen(fmt) + 1);
10345 10345 }
10346 10346
10347 10347 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
10348 10348 state->dts_nformats = 0;
10349 10349 state->dts_formats = NULL;
10350 10350 }
10351 10351
10352 10352 /*
10353 10353 * DTrace Predicate Functions
10354 10354 */
10355 10355 static dtrace_predicate_t *
10356 10356 dtrace_predicate_create(dtrace_difo_t *dp)
10357 10357 {
10358 10358 dtrace_predicate_t *pred;
10359 10359
10360 10360 ASSERT(MUTEX_HELD(&dtrace_lock));
10361 10361 ASSERT(dp->dtdo_refcnt != 0);
10362 10362
10363 10363 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10364 10364 pred->dtp_difo = dp;
10365 10365 pred->dtp_refcnt = 1;
10366 10366
10367 10367 if (!dtrace_difo_cacheable(dp))
10368 10368 return (pred);
10369 10369
10370 10370 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10371 10371 /*
10372 10372 * This is only theoretically possible -- we have had 2^32
10373 10373 * cacheable predicates on this machine. We cannot allow any
10374 10374 * more predicates to become cacheable: as unlikely as it is,
10375 10375 * there may be a thread caching a (now stale) predicate cache
10376 10376 * ID. (N.B.: the temptation is being successfully resisted to
10377 10377 * have this cmn_err() "Holy shit -- we executed this code!")
10378 10378 */
10379 10379 return (pred);
10380 10380 }
10381 10381
10382 10382 pred->dtp_cacheid = dtrace_predcache_id++;
10383 10383
10384 10384 return (pred);
10385 10385 }
10386 10386
10387 10387 static void
10388 10388 dtrace_predicate_hold(dtrace_predicate_t *pred)
10389 10389 {
10390 10390 ASSERT(MUTEX_HELD(&dtrace_lock));
10391 10391 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10392 10392 ASSERT(pred->dtp_refcnt > 0);
10393 10393
10394 10394 pred->dtp_refcnt++;
10395 10395 }
10396 10396
10397 10397 static void
10398 10398 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10399 10399 {
10400 10400 dtrace_difo_t *dp = pred->dtp_difo;
10401 10401
10402 10402 ASSERT(MUTEX_HELD(&dtrace_lock));
10403 10403 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10404 10404 ASSERT(pred->dtp_refcnt > 0);
10405 10405
10406 10406 if (--pred->dtp_refcnt == 0) {
10407 10407 dtrace_difo_release(pred->dtp_difo, vstate);
10408 10408 kmem_free(pred, sizeof (dtrace_predicate_t));
10409 10409 }
10410 10410 }
10411 10411
10412 10412 /*
10413 10413 * DTrace Action Description Functions
10414 10414 */
10415 10415 static dtrace_actdesc_t *
10416 10416 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10417 10417 uint64_t uarg, uint64_t arg)
10418 10418 {
10419 10419 dtrace_actdesc_t *act;
10420 10420
10421 10421 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
10422 10422 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
10423 10423
10424 10424 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10425 10425 act->dtad_kind = kind;
10426 10426 act->dtad_ntuple = ntuple;
10427 10427 act->dtad_uarg = uarg;
10428 10428 act->dtad_arg = arg;
10429 10429 act->dtad_refcnt = 1;
10430 10430
10431 10431 return (act);
10432 10432 }
10433 10433
10434 10434 static void
10435 10435 dtrace_actdesc_hold(dtrace_actdesc_t *act)
10436 10436 {
10437 10437 ASSERT(act->dtad_refcnt >= 1);
10438 10438 act->dtad_refcnt++;
10439 10439 }
10440 10440
10441 10441 static void
10442 10442 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10443 10443 {
10444 10444 dtrace_actkind_t kind = act->dtad_kind;
10445 10445 dtrace_difo_t *dp;
10446 10446
10447 10447 ASSERT(act->dtad_refcnt >= 1);
10448 10448
10449 10449 if (--act->dtad_refcnt != 0)
10450 10450 return;
10451 10451
10452 10452 if ((dp = act->dtad_difo) != NULL)
10453 10453 dtrace_difo_release(dp, vstate);
10454 10454
10455 10455 if (DTRACEACT_ISPRINTFLIKE(kind)) {
10456 10456 char *str = (char *)(uintptr_t)act->dtad_arg;
10457 10457
10458 10458 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10459 10459 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
10460 10460
10461 10461 if (str != NULL)
10462 10462 kmem_free(str, strlen(str) + 1);
10463 10463 }
10464 10464
10465 10465 kmem_free(act, sizeof (dtrace_actdesc_t));
10466 10466 }
10467 10467
10468 10468 /*
10469 10469 * DTrace ECB Functions
10470 10470 */
10471 10471 static dtrace_ecb_t *
10472 10472 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
10473 10473 {
10474 10474 dtrace_ecb_t *ecb;
10475 10475 dtrace_epid_t epid;
10476 10476
10477 10477 ASSERT(MUTEX_HELD(&dtrace_lock));
10478 10478
10479 10479 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
10480 10480 ecb->dte_predicate = NULL;
10481 10481 ecb->dte_probe = probe;
10482 10482
10483 10483 /*
10484 10484 * The default size is the size of the default action: recording
10485 10485 * the header.
10486 10486 */
10487 10487 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
10488 10488 ecb->dte_alignment = sizeof (dtrace_epid_t);
10489 10489
10490 10490 epid = state->dts_epid++;
10491 10491
10492 10492 if (epid - 1 >= state->dts_necbs) {
10493 10493 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
10494 10494 int necbs = state->dts_necbs << 1;
10495 10495
10496 10496 ASSERT(epid == state->dts_necbs + 1);
10497 10497
10498 10498 if (necbs == 0) {
10499 10499 ASSERT(oecbs == NULL);
10500 10500 necbs = 1;
10501 10501 }
10502 10502
10503 10503 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
10504 10504
10505 10505 if (oecbs != NULL)
10506 10506 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
10507 10507
10508 10508 dtrace_membar_producer();
10509 10509 state->dts_ecbs = ecbs;
10510 10510
10511 10511 if (oecbs != NULL) {
10512 10512 /*
10513 10513 * If this state is active, we must dtrace_sync()
10514 10514 * before we can free the old dts_ecbs array: we're
10515 10515 * coming in hot, and there may be active ring
10516 10516 * buffer processing (which indexes into the dts_ecbs
10517 10517 * array) on another CPU.
10518 10518 */
10519 10519 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
10520 10520 dtrace_sync();
10521 10521
10522 10522 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
10523 10523 }
10524 10524
10525 10525 dtrace_membar_producer();
10526 10526 state->dts_necbs = necbs;
10527 10527 }
10528 10528
10529 10529 ecb->dte_state = state;
10530 10530
10531 10531 ASSERT(state->dts_ecbs[epid - 1] == NULL);
10532 10532 dtrace_membar_producer();
10533 10533 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
10534 10534
10535 10535 return (ecb);
10536 10536 }
10537 10537
10538 10538 static int
10539 10539 dtrace_ecb_enable(dtrace_ecb_t *ecb)
10540 10540 {
10541 10541 dtrace_probe_t *probe = ecb->dte_probe;
10542 10542
10543 10543 ASSERT(MUTEX_HELD(&cpu_lock));
10544 10544 ASSERT(MUTEX_HELD(&dtrace_lock));
10545 10545 ASSERT(ecb->dte_next == NULL);
10546 10546
10547 10547 if (probe == NULL) {
10548 10548 /*
10549 10549 * This is the NULL probe -- there's nothing to do.
10550 10550 */
10551 10551 return (0);
10552 10552 }
10553 10553
10554 10554 if (probe->dtpr_ecb == NULL) {
10555 10555 dtrace_provider_t *prov = probe->dtpr_provider;
10556 10556
10557 10557 /*
10558 10558 * We're the first ECB on this probe.
10559 10559 */
10560 10560 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
10561 10561
10562 10562 if (ecb->dte_predicate != NULL)
10563 10563 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
10564 10564
10565 10565 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
10566 10566 probe->dtpr_id, probe->dtpr_arg));
10567 10567 } else {
10568 10568 /*
10569 10569 * This probe is already active. Swing the last pointer to
10570 10570 * point to the new ECB, and issue a dtrace_sync() to assure
10571 10571 * that all CPUs have seen the change.
10572 10572 */
10573 10573 ASSERT(probe->dtpr_ecb_last != NULL);
10574 10574 probe->dtpr_ecb_last->dte_next = ecb;
10575 10575 probe->dtpr_ecb_last = ecb;
10576 10576 probe->dtpr_predcache = 0;
10577 10577
10578 10578 dtrace_sync();
10579 10579 return (0);
10580 10580 }
10581 10581 }
10582 10582
10583 10583 static int
10584 10584 dtrace_ecb_resize(dtrace_ecb_t *ecb)
10585 10585 {
10586 10586 dtrace_action_t *act;
10587 10587 uint32_t curneeded = UINT32_MAX;
10588 10588 uint32_t aggbase = UINT32_MAX;
10589 10589
10590 10590 /*
10591 10591 * If we record anything, we always record the dtrace_rechdr_t. (And
10592 10592 * we always record it first.)
10593 10593 */
10594 10594 ecb->dte_size = sizeof (dtrace_rechdr_t);
10595 10595 ecb->dte_alignment = sizeof (dtrace_epid_t);
10596 10596
10597 10597 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10598 10598 dtrace_recdesc_t *rec = &act->dta_rec;
10599 10599 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
10600 10600
10601 10601 ecb->dte_alignment = MAX(ecb->dte_alignment,
10602 10602 rec->dtrd_alignment);
10603 10603
10604 10604 if (DTRACEACT_ISAGG(act->dta_kind)) {
10605 10605 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10606 10606
10607 10607 ASSERT(rec->dtrd_size != 0);
10608 10608 ASSERT(agg->dtag_first != NULL);
10609 10609 ASSERT(act->dta_prev->dta_intuple);
10610 10610 ASSERT(aggbase != UINT32_MAX);
10611 10611 ASSERT(curneeded != UINT32_MAX);
10612 10612
10613 10613 agg->dtag_base = aggbase;
10614 10614
10615 10615 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10616 10616 rec->dtrd_offset = curneeded;
10617 10617 if (curneeded + rec->dtrd_size < curneeded)
10618 10618 return (EINVAL);
10619 10619 curneeded += rec->dtrd_size;
10620 10620 ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
10621 10621
10622 10622 aggbase = UINT32_MAX;
10623 10623 curneeded = UINT32_MAX;
10624 10624 } else if (act->dta_intuple) {
10625 10625 if (curneeded == UINT32_MAX) {
10626 10626 /*
10627 10627 * This is the first record in a tuple. Align
10628 10628 * curneeded to be at offset 4 in an 8-byte
10629 10629 * aligned block.
10630 10630 */
10631 10631 ASSERT(act->dta_prev == NULL ||
10632 10632 !act->dta_prev->dta_intuple);
10633 10633 ASSERT3U(aggbase, ==, UINT32_MAX);
10634 10634 curneeded = P2PHASEUP(ecb->dte_size,
10635 10635 sizeof (uint64_t), sizeof (dtrace_aggid_t));
10636 10636
10637 10637 aggbase = curneeded - sizeof (dtrace_aggid_t);
10638 10638 ASSERT(IS_P2ALIGNED(aggbase,
10639 10639 sizeof (uint64_t)));
10640 10640 }
10641 10641 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10642 10642 rec->dtrd_offset = curneeded;
10643 10643 if (curneeded + rec->dtrd_size < curneeded)
10644 10644 return (EINVAL);
10645 10645 curneeded += rec->dtrd_size;
10646 10646 } else {
10647 10647 /* tuples must be followed by an aggregation */
10648 10648 ASSERT(act->dta_prev == NULL ||
10649 10649 !act->dta_prev->dta_intuple);
10650 10650
10651 10651 ecb->dte_size = P2ROUNDUP(ecb->dte_size,
10652 10652 rec->dtrd_alignment);
10653 10653 rec->dtrd_offset = ecb->dte_size;
10654 10654 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size)
10655 10655 return (EINVAL);
10656 10656 ecb->dte_size += rec->dtrd_size;
10657 10657 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
10658 10658 }
10659 10659 }
10660 10660
10661 10661 if ((act = ecb->dte_action) != NULL &&
10662 10662 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
10663 10663 ecb->dte_size == sizeof (dtrace_rechdr_t)) {
10664 10664 /*
10665 10665 * If the size is still sizeof (dtrace_rechdr_t), then all
10666 10666 * actions store no data; set the size to 0.
10667 10667 */
10668 10668 ecb->dte_size = 0;
10669 10669 }
10670 10670
10671 10671 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
10672 10672 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
10673 10673 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
10674 10674 ecb->dte_needed);
10675 10675 return (0);
10676 10676 }
10677 10677
10678 10678 static dtrace_action_t *
10679 10679 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10680 10680 {
10681 10681 dtrace_aggregation_t *agg;
10682 10682 size_t size = sizeof (uint64_t);
10683 10683 int ntuple = desc->dtad_ntuple;
10684 10684 dtrace_action_t *act;
10685 10685 dtrace_recdesc_t *frec;
10686 10686 dtrace_aggid_t aggid;
10687 10687 dtrace_state_t *state = ecb->dte_state;
10688 10688
10689 10689 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
10690 10690 agg->dtag_ecb = ecb;
10691 10691
10692 10692 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
10693 10693
10694 10694 switch (desc->dtad_kind) {
10695 10695 case DTRACEAGG_MIN:
10696 10696 agg->dtag_initial = INT64_MAX;
10697 10697 agg->dtag_aggregate = dtrace_aggregate_min;
10698 10698 break;
10699 10699
10700 10700 case DTRACEAGG_MAX:
10701 10701 agg->dtag_initial = INT64_MIN;
10702 10702 agg->dtag_aggregate = dtrace_aggregate_max;
10703 10703 break;
10704 10704
10705 10705 case DTRACEAGG_COUNT:
10706 10706 agg->dtag_aggregate = dtrace_aggregate_count;
10707 10707 break;
10708 10708
10709 10709 case DTRACEAGG_QUANTIZE:
10710 10710 agg->dtag_aggregate = dtrace_aggregate_quantize;
10711 10711 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
10712 10712 sizeof (uint64_t);
10713 10713 break;
10714 10714
10715 10715 case DTRACEAGG_LQUANTIZE: {
10716 10716 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
10717 10717 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
10718 10718
10719 10719 agg->dtag_initial = desc->dtad_arg;
10720 10720 agg->dtag_aggregate = dtrace_aggregate_lquantize;
10721 10721
10722 10722 if (step == 0 || levels == 0)
10723 10723 goto err;
10724 10724
10725 10725 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
10726 10726 break;
10727 10727 }
10728 10728
10729 10729 case DTRACEAGG_LLQUANTIZE: {
10730 10730 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
10731 10731 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
10732 10732 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
10733 10733 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
10734 10734 int64_t v;
10735 10735
10736 10736 agg->dtag_initial = desc->dtad_arg;
10737 10737 agg->dtag_aggregate = dtrace_aggregate_llquantize;
10738 10738
10739 10739 if (factor < 2 || low >= high || nsteps < factor)
10740 10740 goto err;
10741 10741
10742 10742 /*
10743 10743 * Now check that the number of steps evenly divides a power
10744 10744 * of the factor. (This assures both integer bucket size and
10745 10745 * linearity within each magnitude.)
10746 10746 */
10747 10747 for (v = factor; v < nsteps; v *= factor)
10748 10748 continue;
10749 10749
10750 10750 if ((v % nsteps) || (nsteps % factor))
10751 10751 goto err;
10752 10752
10753 10753 size = (dtrace_aggregate_llquantize_bucket(factor,
10754 10754 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
10755 10755 break;
10756 10756 }
10757 10757
10758 10758 case DTRACEAGG_AVG:
10759 10759 agg->dtag_aggregate = dtrace_aggregate_avg;
10760 10760 size = sizeof (uint64_t) * 2;
10761 10761 break;
10762 10762
10763 10763 case DTRACEAGG_STDDEV:
10764 10764 agg->dtag_aggregate = dtrace_aggregate_stddev;
10765 10765 size = sizeof (uint64_t) * 4;
10766 10766 break;
10767 10767
10768 10768 case DTRACEAGG_SUM:
10769 10769 agg->dtag_aggregate = dtrace_aggregate_sum;
10770 10770 break;
10771 10771
10772 10772 default:
10773 10773 goto err;
10774 10774 }
10775 10775
10776 10776 agg->dtag_action.dta_rec.dtrd_size = size;
10777 10777
10778 10778 if (ntuple == 0)
10779 10779 goto err;
10780 10780
10781 10781 /*
10782 10782 * We must make sure that we have enough actions for the n-tuple.
10783 10783 */
10784 10784 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
10785 10785 if (DTRACEACT_ISAGG(act->dta_kind))
10786 10786 break;
10787 10787
10788 10788 if (--ntuple == 0) {
10789 10789 /*
10790 10790 * This is the action with which our n-tuple begins.
10791 10791 */
10792 10792 agg->dtag_first = act;
10793 10793 goto success;
10794 10794 }
10795 10795 }
10796 10796
10797 10797 /*
10798 10798 * This n-tuple is short by ntuple elements. Return failure.
10799 10799 */
10800 10800 ASSERT(ntuple != 0);
10801 10801 err:
10802 10802 kmem_free(agg, sizeof (dtrace_aggregation_t));
10803 10803 return (NULL);
10804 10804
10805 10805 success:
10806 10806 /*
10807 10807 * If the last action in the tuple has a size of zero, it's actually
10808 10808 * an expression argument for the aggregating action.
10809 10809 */
10810 10810 ASSERT(ecb->dte_action_last != NULL);
10811 10811 act = ecb->dte_action_last;
10812 10812
10813 10813 if (act->dta_kind == DTRACEACT_DIFEXPR) {
10814 10814 ASSERT(act->dta_difo != NULL);
10815 10815
10816 10816 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10817 10817 agg->dtag_hasarg = 1;
10818 10818 }
10819 10819
10820 10820 /*
10821 10821 * We need to allocate an id for this aggregation.
10822 10822 */
10823 10823 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
10824 10824 VM_BESTFIT | VM_SLEEP);
10825 10825
10826 10826 if (aggid - 1 >= state->dts_naggregations) {
10827 10827 dtrace_aggregation_t **oaggs = state->dts_aggregations;
10828 10828 dtrace_aggregation_t **aggs;
10829 10829 int naggs = state->dts_naggregations << 1;
10830 10830 int onaggs = state->dts_naggregations;
10831 10831
10832 10832 ASSERT(aggid == state->dts_naggregations + 1);
10833 10833
10834 10834 if (naggs == 0) {
10835 10835 ASSERT(oaggs == NULL);
10836 10836 naggs = 1;
10837 10837 }
10838 10838
10839 10839 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10840 10840
10841 10841 if (oaggs != NULL) {
10842 10842 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10843 10843 kmem_free(oaggs, onaggs * sizeof (*aggs));
10844 10844 }
10845 10845
10846 10846 state->dts_aggregations = aggs;
10847 10847 state->dts_naggregations = naggs;
10848 10848 }
10849 10849
10850 10850 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10851 10851 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10852 10852
10853 10853 frec = &agg->dtag_first->dta_rec;
10854 10854 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10855 10855 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10856 10856
10857 10857 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10858 10858 ASSERT(!act->dta_intuple);
10859 10859 act->dta_intuple = 1;
10860 10860 }
10861 10861
10862 10862 return (&agg->dtag_action);
10863 10863 }
10864 10864
10865 10865 static void
10866 10866 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10867 10867 {
10868 10868 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10869 10869 dtrace_state_t *state = ecb->dte_state;
10870 10870 dtrace_aggid_t aggid = agg->dtag_id;
10871 10871
10872 10872 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10873 10873 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10874 10874
10875 10875 ASSERT(state->dts_aggregations[aggid - 1] == agg);
10876 10876 state->dts_aggregations[aggid - 1] = NULL;
10877 10877
10878 10878 kmem_free(agg, sizeof (dtrace_aggregation_t));
10879 10879 }
10880 10880
10881 10881 static int
10882 10882 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10883 10883 {
10884 10884 dtrace_action_t *action, *last;
10885 10885 dtrace_difo_t *dp = desc->dtad_difo;
10886 10886 uint32_t size = 0, align = sizeof (uint8_t), mask;
10887 10887 uint16_t format = 0;
10888 10888 dtrace_recdesc_t *rec;
10889 10889 dtrace_state_t *state = ecb->dte_state;
10890 10890 dtrace_optval_t *opt = state->dts_options, nframes, strsize;
10891 10891 uint64_t arg = desc->dtad_arg;
10892 10892
10893 10893 ASSERT(MUTEX_HELD(&dtrace_lock));
10894 10894 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10895 10895
10896 10896 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10897 10897 /*
10898 10898 * If this is an aggregating action, there must be neither
10899 10899 * a speculate nor a commit on the action chain.
10900 10900 */
10901 10901 dtrace_action_t *act;
10902 10902
10903 10903 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10904 10904 if (act->dta_kind == DTRACEACT_COMMIT)
10905 10905 return (EINVAL);
10906 10906
10907 10907 if (act->dta_kind == DTRACEACT_SPECULATE)
10908 10908 return (EINVAL);
10909 10909 }
10910 10910
10911 10911 action = dtrace_ecb_aggregation_create(ecb, desc);
10912 10912
10913 10913 if (action == NULL)
10914 10914 return (EINVAL);
10915 10915 } else {
10916 10916 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10917 10917 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10918 10918 dp != NULL && dp->dtdo_destructive)) {
10919 10919 state->dts_destructive = 1;
10920 10920 }
10921 10921
10922 10922 switch (desc->dtad_kind) {
10923 10923 case DTRACEACT_PRINTF:
10924 10924 case DTRACEACT_PRINTA:
10925 10925 case DTRACEACT_SYSTEM:
10926 10926 case DTRACEACT_FREOPEN:
10927 10927 case DTRACEACT_DIFEXPR:
10928 10928 /*
10929 10929 * We know that our arg is a string -- turn it into a
10930 10930 * format.
10931 10931 */
10932 10932 if (arg == NULL) {
10933 10933 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
10934 10934 desc->dtad_kind == DTRACEACT_DIFEXPR);
10935 10935 format = 0;
10936 10936 } else {
10937 10937 ASSERT(arg != NULL);
10938 10938 ASSERT(arg > KERNELBASE);
10939 10939 format = dtrace_format_add(state,
10940 10940 (char *)(uintptr_t)arg);
10941 10941 }
10942 10942
10943 10943 /*FALLTHROUGH*/
10944 10944 case DTRACEACT_LIBACT:
10945 10945 case DTRACEACT_TRACEMEM:
10946 10946 case DTRACEACT_TRACEMEM_DYNSIZE:
10947 10947 if (dp == NULL)
10948 10948 return (EINVAL);
10949 10949
10950 10950 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10951 10951 break;
10952 10952
10953 10953 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10954 10954 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10955 10955 return (EINVAL);
10956 10956
10957 10957 size = opt[DTRACEOPT_STRSIZE];
10958 10958 }
10959 10959
10960 10960 break;
10961 10961
10962 10962 case DTRACEACT_STACK:
10963 10963 if ((nframes = arg) == 0) {
10964 10964 nframes = opt[DTRACEOPT_STACKFRAMES];
10965 10965 ASSERT(nframes > 0);
10966 10966 arg = nframes;
10967 10967 }
10968 10968
10969 10969 size = nframes * sizeof (pc_t);
10970 10970 break;
10971 10971
10972 10972 case DTRACEACT_JSTACK:
10973 10973 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10974 10974 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10975 10975
10976 10976 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10977 10977 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10978 10978
10979 10979 arg = DTRACE_USTACK_ARG(nframes, strsize);
10980 10980
10981 10981 /*FALLTHROUGH*/
10982 10982 case DTRACEACT_USTACK:
10983 10983 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10984 10984 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10985 10985 strsize = DTRACE_USTACK_STRSIZE(arg);
10986 10986 nframes = opt[DTRACEOPT_USTACKFRAMES];
10987 10987 ASSERT(nframes > 0);
10988 10988 arg = DTRACE_USTACK_ARG(nframes, strsize);
10989 10989 }
10990 10990
10991 10991 /*
10992 10992 * Save a slot for the pid.
10993 10993 */
10994 10994 size = (nframes + 1) * sizeof (uint64_t);
10995 10995 size += DTRACE_USTACK_STRSIZE(arg);
10996 10996 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10997 10997
10998 10998 break;
10999 10999
11000 11000 case DTRACEACT_SYM:
11001 11001 case DTRACEACT_MOD:
11002 11002 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11003 11003 sizeof (uint64_t)) ||
11004 11004 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11005 11005 return (EINVAL);
11006 11006 break;
11007 11007
11008 11008 case DTRACEACT_USYM:
11009 11009 case DTRACEACT_UMOD:
11010 11010 case DTRACEACT_UADDR:
11011 11011 if (dp == NULL ||
11012 11012 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11013 11013 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11014 11014 return (EINVAL);
11015 11015
11016 11016 /*
11017 11017 * We have a slot for the pid, plus a slot for the
11018 11018 * argument. To keep things simple (aligned with
11019 11019 * bitness-neutral sizing), we store each as a 64-bit
11020 11020 * quantity.
11021 11021 */
11022 11022 size = 2 * sizeof (uint64_t);
11023 11023 break;
11024 11024
11025 11025 case DTRACEACT_STOP:
11026 11026 case DTRACEACT_BREAKPOINT:
11027 11027 case DTRACEACT_PANIC:
11028 11028 break;
11029 11029
11030 11030 case DTRACEACT_CHILL:
11031 11031 case DTRACEACT_DISCARD:
11032 11032 case DTRACEACT_RAISE:
11033 11033 if (dp == NULL)
11034 11034 return (EINVAL);
11035 11035 break;
11036 11036
11037 11037 case DTRACEACT_EXIT:
11038 11038 if (dp == NULL ||
11039 11039 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11040 11040 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11041 11041 return (EINVAL);
11042 11042 break;
11043 11043
11044 11044 case DTRACEACT_SPECULATE:
11045 11045 if (ecb->dte_size > sizeof (dtrace_rechdr_t))
11046 11046 return (EINVAL);
11047 11047
11048 11048 if (dp == NULL)
11049 11049 return (EINVAL);
11050 11050
11051 11051 state->dts_speculates = 1;
11052 11052 break;
11053 11053
11054 11054 case DTRACEACT_COMMIT: {
11055 11055 dtrace_action_t *act = ecb->dte_action;
11056 11056
11057 11057 for (; act != NULL; act = act->dta_next) {
11058 11058 if (act->dta_kind == DTRACEACT_COMMIT)
11059 11059 return (EINVAL);
11060 11060 }
11061 11061
11062 11062 if (dp == NULL)
11063 11063 return (EINVAL);
11064 11064 break;
11065 11065 }
11066 11066
11067 11067 default:
11068 11068 return (EINVAL);
11069 11069 }
11070 11070
11071 11071 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11072 11072 /*
11073 11073 * If this is a data-storing action or a speculate,
11074 11074 * we must be sure that there isn't a commit on the
11075 11075 * action chain.
11076 11076 */
11077 11077 dtrace_action_t *act = ecb->dte_action;
11078 11078
11079 11079 for (; act != NULL; act = act->dta_next) {
11080 11080 if (act->dta_kind == DTRACEACT_COMMIT)
11081 11081 return (EINVAL);
11082 11082 }
11083 11083 }
11084 11084
11085 11085 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11086 11086 action->dta_rec.dtrd_size = size;
11087 11087 }
11088 11088
11089 11089 action->dta_refcnt = 1;
11090 11090 rec = &action->dta_rec;
11091 11091 size = rec->dtrd_size;
11092 11092
11093 11093 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11094 11094 if (!(size & mask)) {
11095 11095 align = mask + 1;
11096 11096 break;
11097 11097 }
11098 11098 }
11099 11099
11100 11100 action->dta_kind = desc->dtad_kind;
11101 11101
11102 11102 if ((action->dta_difo = dp) != NULL)
11103 11103 dtrace_difo_hold(dp);
11104 11104
11105 11105 rec->dtrd_action = action->dta_kind;
11106 11106 rec->dtrd_arg = arg;
11107 11107 rec->dtrd_uarg = desc->dtad_uarg;
11108 11108 rec->dtrd_alignment = (uint16_t)align;
11109 11109 rec->dtrd_format = format;
11110 11110
11111 11111 if ((last = ecb->dte_action_last) != NULL) {
11112 11112 ASSERT(ecb->dte_action != NULL);
11113 11113 action->dta_prev = last;
11114 11114 last->dta_next = action;
11115 11115 } else {
11116 11116 ASSERT(ecb->dte_action == NULL);
11117 11117 ecb->dte_action = action;
11118 11118 }
11119 11119
11120 11120 ecb->dte_action_last = action;
11121 11121
11122 11122 return (0);
11123 11123 }
11124 11124
11125 11125 static void
11126 11126 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
11127 11127 {
11128 11128 dtrace_action_t *act = ecb->dte_action, *next;
11129 11129 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
11130 11130 dtrace_difo_t *dp;
11131 11131 uint16_t format;
11132 11132
11133 11133 if (act != NULL && act->dta_refcnt > 1) {
11134 11134 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
11135 11135 act->dta_refcnt--;
11136 11136 } else {
11137 11137 for (; act != NULL; act = next) {
11138 11138 next = act->dta_next;
11139 11139 ASSERT(next != NULL || act == ecb->dte_action_last);
11140 11140 ASSERT(act->dta_refcnt == 1);
11141 11141
11142 11142 if ((format = act->dta_rec.dtrd_format) != 0)
11143 11143 dtrace_format_remove(ecb->dte_state, format);
11144 11144
11145 11145 if ((dp = act->dta_difo) != NULL)
11146 11146 dtrace_difo_release(dp, vstate);
11147 11147
11148 11148 if (DTRACEACT_ISAGG(act->dta_kind)) {
11149 11149 dtrace_ecb_aggregation_destroy(ecb, act);
11150 11150 } else {
11151 11151 kmem_free(act, sizeof (dtrace_action_t));
11152 11152 }
11153 11153 }
11154 11154 }
11155 11155
11156 11156 ecb->dte_action = NULL;
11157 11157 ecb->dte_action_last = NULL;
11158 11158 ecb->dte_size = 0;
11159 11159 }
11160 11160
11161 11161 static void
11162 11162 dtrace_ecb_disable(dtrace_ecb_t *ecb)
11163 11163 {
11164 11164 /*
11165 11165 * We disable the ECB by removing it from its probe.
11166 11166 */
11167 11167 dtrace_ecb_t *pecb, *prev = NULL;
11168 11168 dtrace_probe_t *probe = ecb->dte_probe;
11169 11169
11170 11170 ASSERT(MUTEX_HELD(&dtrace_lock));
11171 11171
11172 11172 if (probe == NULL) {
11173 11173 /*
11174 11174 * This is the NULL probe; there is nothing to disable.
11175 11175 */
11176 11176 return;
11177 11177 }
11178 11178
11179 11179 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
11180 11180 if (pecb == ecb)
11181 11181 break;
11182 11182 prev = pecb;
11183 11183 }
11184 11184
11185 11185 ASSERT(pecb != NULL);
11186 11186
11187 11187 if (prev == NULL) {
11188 11188 probe->dtpr_ecb = ecb->dte_next;
11189 11189 } else {
11190 11190 prev->dte_next = ecb->dte_next;
11191 11191 }
11192 11192
11193 11193 if (ecb == probe->dtpr_ecb_last) {
11194 11194 ASSERT(ecb->dte_next == NULL);
11195 11195 probe->dtpr_ecb_last = prev;
11196 11196 }
11197 11197
11198 11198 /*
11199 11199 * The ECB has been disconnected from the probe; now sync to assure
11200 11200 * that all CPUs have seen the change before returning.
11201 11201 */
11202 11202 dtrace_sync();
11203 11203
11204 11204 if (probe->dtpr_ecb == NULL) {
11205 11205 /*
11206 11206 * That was the last ECB on the probe; clear the predicate
11207 11207 * cache ID for the probe, disable it and sync one more time
11208 11208 * to assure that we'll never hit it again.
11209 11209 */
11210 11210 dtrace_provider_t *prov = probe->dtpr_provider;
11211 11211
11212 11212 ASSERT(ecb->dte_next == NULL);
11213 11213 ASSERT(probe->dtpr_ecb_last == NULL);
11214 11214 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
11215 11215 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
11216 11216 probe->dtpr_id, probe->dtpr_arg);
11217 11217 dtrace_sync();
11218 11218 } else {
11219 11219 /*
11220 11220 * There is at least one ECB remaining on the probe. If there
11221 11221 * is _exactly_ one, set the probe's predicate cache ID to be
11222 11222 * the predicate cache ID of the remaining ECB.
11223 11223 */
11224 11224 ASSERT(probe->dtpr_ecb_last != NULL);
11225 11225 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
11226 11226
11227 11227 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
11228 11228 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
11229 11229
11230 11230 ASSERT(probe->dtpr_ecb->dte_next == NULL);
11231 11231
11232 11232 if (p != NULL)
11233 11233 probe->dtpr_predcache = p->dtp_cacheid;
11234 11234 }
11235 11235
11236 11236 ecb->dte_next = NULL;
11237 11237 }
11238 11238 }
11239 11239
11240 11240 static void
11241 11241 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
11242 11242 {
11243 11243 dtrace_state_t *state = ecb->dte_state;
11244 11244 dtrace_vstate_t *vstate = &state->dts_vstate;
11245 11245 dtrace_predicate_t *pred;
11246 11246 dtrace_epid_t epid = ecb->dte_epid;
11247 11247
11248 11248 ASSERT(MUTEX_HELD(&dtrace_lock));
11249 11249 ASSERT(ecb->dte_next == NULL);
11250 11250 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
11251 11251
11252 11252 if ((pred = ecb->dte_predicate) != NULL)
11253 11253 dtrace_predicate_release(pred, vstate);
11254 11254
11255 11255 dtrace_ecb_action_remove(ecb);
11256 11256
11257 11257 ASSERT(state->dts_ecbs[epid - 1] == ecb);
11258 11258 state->dts_ecbs[epid - 1] = NULL;
11259 11259
11260 11260 kmem_free(ecb, sizeof (dtrace_ecb_t));
11261 11261 }
11262 11262
11263 11263 static dtrace_ecb_t *
11264 11264 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
11265 11265 dtrace_enabling_t *enab)
11266 11266 {
11267 11267 dtrace_ecb_t *ecb;
11268 11268 dtrace_predicate_t *pred;
11269 11269 dtrace_actdesc_t *act;
11270 11270 dtrace_provider_t *prov;
11271 11271 dtrace_ecbdesc_t *desc = enab->dten_current;
11272 11272
11273 11273 ASSERT(MUTEX_HELD(&dtrace_lock));
11274 11274 ASSERT(state != NULL);
11275 11275
11276 11276 ecb = dtrace_ecb_add(state, probe);
11277 11277 ecb->dte_uarg = desc->dted_uarg;
11278 11278
11279 11279 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11280 11280 dtrace_predicate_hold(pred);
11281 11281 ecb->dte_predicate = pred;
11282 11282 }
11283 11283
11284 11284 if (probe != NULL) {
11285 11285 /*
11286 11286 * If the provider shows more leg than the consumer is old
11287 11287 * enough to see, we need to enable the appropriate implicit
11288 11288 * predicate bits to prevent the ecb from activating at
11289 11289 * revealing times.
11290 11290 *
11291 11291 * Providers specifying DTRACE_PRIV_USER at register time
11292 11292 * are stating that they need the /proc-style privilege
11293 11293 * model to be enforced, and this is what DTRACE_COND_OWNER
11294 11294 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11295 11295 */
11296 11296 prov = probe->dtpr_provider;
11297 11297 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11298 11298 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11299 11299 ecb->dte_cond |= DTRACE_COND_OWNER;
11300 11300
11301 11301 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11302 11302 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11303 11303 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11304 11304
11305 11305 /*
11306 11306 * If the provider shows us kernel innards and the user
11307 11307 * is lacking sufficient privilege, enable the
11308 11308 * DTRACE_COND_USERMODE implicit predicate.
11309 11309 */
11310 11310 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11311 11311 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11312 11312 ecb->dte_cond |= DTRACE_COND_USERMODE;
11313 11313 }
11314 11314
11315 11315 if (dtrace_ecb_create_cache != NULL) {
11316 11316 /*
11317 11317 * If we have a cached ecb, we'll use its action list instead
11318 11318 * of creating our own (saving both time and space).
11319 11319 */
11320 11320 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
11321 11321 dtrace_action_t *act = cached->dte_action;
11322 11322
11323 11323 if (act != NULL) {
11324 11324 ASSERT(act->dta_refcnt > 0);
11325 11325 act->dta_refcnt++;
11326 11326 ecb->dte_action = act;
11327 11327 ecb->dte_action_last = cached->dte_action_last;
11328 11328 ecb->dte_needed = cached->dte_needed;
11329 11329 ecb->dte_size = cached->dte_size;
11330 11330 ecb->dte_alignment = cached->dte_alignment;
11331 11331 }
11332 11332
11333 11333 return (ecb);
11334 11334 }
11335 11335
11336 11336 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11337 11337 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11338 11338 dtrace_ecb_destroy(ecb);
11339 11339 return (NULL);
11340 11340 }
11341 11341 }
11342 11342
11343 11343 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) {
11344 11344 dtrace_ecb_destroy(ecb);
11345 11345 return (NULL);
11346 11346 }
11347 11347
11348 11348 return (dtrace_ecb_create_cache = ecb);
11349 11349 }
11350 11350
11351 11351 static int
11352 11352 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
11353 11353 {
11354 11354 dtrace_ecb_t *ecb;
11355 11355 dtrace_enabling_t *enab = arg;
11356 11356 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11357 11357
11358 11358 ASSERT(state != NULL);
11359 11359
11360 11360 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
11361 11361 /*
11362 11362 * This probe was created in a generation for which this
11363 11363 * enabling has previously created ECBs; we don't want to
11364 11364 * enable it again, so just kick out.
11365 11365 */
11366 11366 return (DTRACE_MATCH_NEXT);
11367 11367 }
11368 11368
11369 11369 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11370 11370 return (DTRACE_MATCH_DONE);
11371 11371
11372 11372 if (dtrace_ecb_enable(ecb) < 0)
11373 11373 return (DTRACE_MATCH_FAIL);
11374 11374
11375 11375 return (DTRACE_MATCH_NEXT);
11376 11376 }
11377 11377
11378 11378 static dtrace_ecb_t *
11379 11379 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11380 11380 {
11381 11381 dtrace_ecb_t *ecb;
11382 11382
11383 11383 ASSERT(MUTEX_HELD(&dtrace_lock));
11384 11384
11385 11385 if (id == 0 || id > state->dts_necbs)
11386 11386 return (NULL);
11387 11387
11388 11388 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11389 11389 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11390 11390
11391 11391 return (state->dts_ecbs[id - 1]);
11392 11392 }
11393 11393
11394 11394 static dtrace_aggregation_t *
11395 11395 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11396 11396 {
11397 11397 dtrace_aggregation_t *agg;
11398 11398
11399 11399 ASSERT(MUTEX_HELD(&dtrace_lock));
11400 11400
11401 11401 if (id == 0 || id > state->dts_naggregations)
11402 11402 return (NULL);
11403 11403
11404 11404 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11405 11405 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11406 11406 agg->dtag_id == id);
11407 11407
11408 11408 return (state->dts_aggregations[id - 1]);
11409 11409 }
11410 11410
11411 11411 /*
11412 11412 * DTrace Buffer Functions
11413 11413 *
11414 11414 * The following functions manipulate DTrace buffers. Most of these functions
11415 11415 * are called in the context of establishing or processing consumer state;
11416 11416 * exceptions are explicitly noted.
11417 11417 */
11418 11418
11419 11419 /*
11420 11420 * Note: called from cross call context. This function switches the two
11421 11421 * buffers on a given CPU. The atomicity of this operation is assured by
11422 11422 * disabling interrupts while the actual switch takes place; the disabling of
11423 11423 * interrupts serializes the execution with any execution of dtrace_probe() on
11424 11424 * the same CPU.
11425 11425 */
11426 11426 static void
11427 11427 dtrace_buffer_switch(dtrace_buffer_t *buf)
11428 11428 {
11429 11429 caddr_t tomax = buf->dtb_tomax;
11430 11430 caddr_t xamot = buf->dtb_xamot;
11431 11431 dtrace_icookie_t cookie;
11432 11432 hrtime_t now;
11433 11433
11434 11434 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11435 11435 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11436 11436
11437 11437 cookie = dtrace_interrupt_disable();
11438 11438 now = dtrace_gethrtime();
11439 11439 buf->dtb_tomax = xamot;
11440 11440 buf->dtb_xamot = tomax;
11441 11441 buf->dtb_xamot_drops = buf->dtb_drops;
11442 11442 buf->dtb_xamot_offset = buf->dtb_offset;
11443 11443 buf->dtb_xamot_errors = buf->dtb_errors;
11444 11444 buf->dtb_xamot_flags = buf->dtb_flags;
11445 11445 buf->dtb_offset = 0;
11446 11446 buf->dtb_drops = 0;
11447 11447 buf->dtb_errors = 0;
11448 11448 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
11449 11449 buf->dtb_interval = now - buf->dtb_switched;
11450 11450 buf->dtb_switched = now;
11451 11451 dtrace_interrupt_enable(cookie);
11452 11452 }
11453 11453
11454 11454 /*
11455 11455 * Note: called from cross call context. This function activates a buffer
11456 11456 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
11457 11457 * is guaranteed by the disabling of interrupts.
11458 11458 */
11459 11459 static void
11460 11460 dtrace_buffer_activate(dtrace_state_t *state)
11461 11461 {
11462 11462 dtrace_buffer_t *buf;
11463 11463 dtrace_icookie_t cookie = dtrace_interrupt_disable();
11464 11464
11465 11465 buf = &state->dts_buffer[CPU->cpu_id];
11466 11466
11467 11467 if (buf->dtb_tomax != NULL) {
11468 11468 /*
11469 11469 * We might like to assert that the buffer is marked inactive,
11470 11470 * but this isn't necessarily true: the buffer for the CPU
11471 11471 * that processes the BEGIN probe has its buffer activated
11472 11472 * manually. In this case, we take the (harmless) action
11473 11473 * re-clearing the bit INACTIVE bit.
11474 11474 */
11475 11475 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
11476 11476 }
11477 11477
11478 11478 dtrace_interrupt_enable(cookie);
11479 11479 }
11480 11480
11481 11481 static int
11482 11482 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
11483 11483 processorid_t cpu, int *factor)
11484 11484 {
11485 11485 cpu_t *cp;
11486 11486 dtrace_buffer_t *buf;
11487 11487 int allocated = 0, desired = 0;
11488 11488
11489 11489 ASSERT(MUTEX_HELD(&cpu_lock));
11490 11490 ASSERT(MUTEX_HELD(&dtrace_lock));
11491 11491
11492 11492 *factor = 1;
11493 11493
11494 11494 if (size > dtrace_nonroot_maxsize &&
11495 11495 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
11496 11496 return (EFBIG);
11497 11497
11498 11498 cp = cpu_list;
11499 11499
11500 11500 do {
11501 11501 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11502 11502 continue;
11503 11503
11504 11504 buf = &bufs[cp->cpu_id];
11505 11505
11506 11506 /*
11507 11507 * If there is already a buffer allocated for this CPU, it
11508 11508 * is only possible that this is a DR event. In this case,
11509 11509 * the buffer size must match our specified size.
11510 11510 */
11511 11511 if (buf->dtb_tomax != NULL) {
11512 11512 ASSERT(buf->dtb_size == size);
11513 11513 continue;
11514 11514 }
11515 11515
11516 11516 ASSERT(buf->dtb_xamot == NULL);
11517 11517
11518 11518 if ((buf->dtb_tomax = kmem_zalloc(size,
11519 11519 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11520 11520 goto err;
11521 11521
11522 11522 buf->dtb_size = size;
11523 11523 buf->dtb_flags = flags;
11524 11524 buf->dtb_offset = 0;
11525 11525 buf->dtb_drops = 0;
11526 11526
11527 11527 if (flags & DTRACEBUF_NOSWITCH)
11528 11528 continue;
11529 11529
11530 11530 if ((buf->dtb_xamot = kmem_zalloc(size,
11531 11531 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11532 11532 goto err;
11533 11533 } while ((cp = cp->cpu_next) != cpu_list);
11534 11534
11535 11535 return (0);
11536 11536
11537 11537 err:
11538 11538 cp = cpu_list;
11539 11539
11540 11540 do {
11541 11541 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11542 11542 continue;
11543 11543
11544 11544 buf = &bufs[cp->cpu_id];
11545 11545 desired += 2;
11546 11546
11547 11547 if (buf->dtb_xamot != NULL) {
11548 11548 ASSERT(buf->dtb_tomax != NULL);
11549 11549 ASSERT(buf->dtb_size == size);
11550 11550 kmem_free(buf->dtb_xamot, size);
11551 11551 allocated++;
11552 11552 }
11553 11553
11554 11554 if (buf->dtb_tomax != NULL) {
11555 11555 ASSERT(buf->dtb_size == size);
11556 11556 kmem_free(buf->dtb_tomax, size);
11557 11557 allocated++;
11558 11558 }
11559 11559
11560 11560 buf->dtb_tomax = NULL;
11561 11561 buf->dtb_xamot = NULL;
11562 11562 buf->dtb_size = 0;
11563 11563 } while ((cp = cp->cpu_next) != cpu_list);
11564 11564
11565 11565 *factor = desired / (allocated > 0 ? allocated : 1);
11566 11566
11567 11567 return (ENOMEM);
11568 11568 }
11569 11569
11570 11570 /*
11571 11571 * Note: called from probe context. This function just increments the drop
11572 11572 * count on a buffer. It has been made a function to allow for the
11573 11573 * possibility of understanding the source of mysterious drop counts. (A
11574 11574 * problem for which one may be particularly disappointed that DTrace cannot
11575 11575 * be used to understand DTrace.)
11576 11576 */
11577 11577 static void
11578 11578 dtrace_buffer_drop(dtrace_buffer_t *buf)
11579 11579 {
11580 11580 buf->dtb_drops++;
11581 11581 }
11582 11582
11583 11583 /*
11584 11584 * Note: called from probe context. This function is called to reserve space
11585 11585 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
11586 11586 * mstate. Returns the new offset in the buffer, or a negative value if an
11587 11587 * error has occurred.
11588 11588 */
11589 11589 static intptr_t
11590 11590 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
11591 11591 dtrace_state_t *state, dtrace_mstate_t *mstate)
11592 11592 {
11593 11593 intptr_t offs = buf->dtb_offset, soffs;
11594 11594 intptr_t woffs;
11595 11595 caddr_t tomax;
11596 11596 size_t total;
11597 11597
11598 11598 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
11599 11599 return (-1);
11600 11600
11601 11601 if ((tomax = buf->dtb_tomax) == NULL) {
11602 11602 dtrace_buffer_drop(buf);
11603 11603 return (-1);
11604 11604 }
11605 11605
11606 11606 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
11607 11607 while (offs & (align - 1)) {
11608 11608 /*
11609 11609 * Assert that our alignment is off by a number which
11610 11610 * is itself sizeof (uint32_t) aligned.
11611 11611 */
11612 11612 ASSERT(!((align - (offs & (align - 1))) &
11613 11613 (sizeof (uint32_t) - 1)));
11614 11614 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11615 11615 offs += sizeof (uint32_t);
11616 11616 }
11617 11617
11618 11618 if ((soffs = offs + needed) > buf->dtb_size) {
11619 11619 dtrace_buffer_drop(buf);
11620 11620 return (-1);
11621 11621 }
11622 11622
11623 11623 if (mstate == NULL)
11624 11624 return (offs);
11625 11625
11626 11626 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
11627 11627 mstate->dtms_scratch_size = buf->dtb_size - soffs;
11628 11628 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11629 11629
11630 11630 return (offs);
11631 11631 }
11632 11632
11633 11633 if (buf->dtb_flags & DTRACEBUF_FILL) {
11634 11634 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
11635 11635 (buf->dtb_flags & DTRACEBUF_FULL))
11636 11636 return (-1);
11637 11637 goto out;
11638 11638 }
11639 11639
11640 11640 total = needed + (offs & (align - 1));
11641 11641
11642 11642 /*
11643 11643 * For a ring buffer, life is quite a bit more complicated. Before
11644 11644 * we can store any padding, we need to adjust our wrapping offset.
11645 11645 * (If we've never before wrapped or we're not about to, no adjustment
11646 11646 * is required.)
11647 11647 */
11648 11648 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
11649 11649 offs + total > buf->dtb_size) {
11650 11650 woffs = buf->dtb_xamot_offset;
11651 11651
11652 11652 if (offs + total > buf->dtb_size) {
11653 11653 /*
11654 11654 * We can't fit in the end of the buffer. First, a
11655 11655 * sanity check that we can fit in the buffer at all.
11656 11656 */
11657 11657 if (total > buf->dtb_size) {
11658 11658 dtrace_buffer_drop(buf);
11659 11659 return (-1);
11660 11660 }
11661 11661
11662 11662 /*
11663 11663 * We're going to be storing at the top of the buffer,
11664 11664 * so now we need to deal with the wrapped offset. We
11665 11665 * only reset our wrapped offset to 0 if it is
11666 11666 * currently greater than the current offset. If it
11667 11667 * is less than the current offset, it is because a
11668 11668 * previous allocation induced a wrap -- but the
11669 11669 * allocation didn't subsequently take the space due
11670 11670 * to an error or false predicate evaluation. In this
11671 11671 * case, we'll just leave the wrapped offset alone: if
11672 11672 * the wrapped offset hasn't been advanced far enough
11673 11673 * for this allocation, it will be adjusted in the
11674 11674 * lower loop.
11675 11675 */
11676 11676 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
11677 11677 if (woffs >= offs)
11678 11678 woffs = 0;
11679 11679 } else {
11680 11680 woffs = 0;
11681 11681 }
11682 11682
11683 11683 /*
11684 11684 * Now we know that we're going to be storing to the
11685 11685 * top of the buffer and that there is room for us
11686 11686 * there. We need to clear the buffer from the current
11687 11687 * offset to the end (there may be old gunk there).
11688 11688 */
11689 11689 while (offs < buf->dtb_size)
11690 11690 tomax[offs++] = 0;
11691 11691
11692 11692 /*
11693 11693 * We need to set our offset to zero. And because we
11694 11694 * are wrapping, we need to set the bit indicating as
11695 11695 * much. We can also adjust our needed space back
11696 11696 * down to the space required by the ECB -- we know
11697 11697 * that the top of the buffer is aligned.
11698 11698 */
11699 11699 offs = 0;
11700 11700 total = needed;
11701 11701 buf->dtb_flags |= DTRACEBUF_WRAPPED;
11702 11702 } else {
11703 11703 /*
11704 11704 * There is room for us in the buffer, so we simply
11705 11705 * need to check the wrapped offset.
11706 11706 */
11707 11707 if (woffs < offs) {
11708 11708 /*
11709 11709 * The wrapped offset is less than the offset.
11710 11710 * This can happen if we allocated buffer space
11711 11711 * that induced a wrap, but then we didn't
11712 11712 * subsequently take the space due to an error
11713 11713 * or false predicate evaluation. This is
11714 11714 * okay; we know that _this_ allocation isn't
11715 11715 * going to induce a wrap. We still can't
11716 11716 * reset the wrapped offset to be zero,
11717 11717 * however: the space may have been trashed in
11718 11718 * the previous failed probe attempt. But at
11719 11719 * least the wrapped offset doesn't need to
11720 11720 * be adjusted at all...
11721 11721 */
11722 11722 goto out;
11723 11723 }
11724 11724 }
11725 11725
11726 11726 while (offs + total > woffs) {
11727 11727 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11728 11728 size_t size;
11729 11729
11730 11730 if (epid == DTRACE_EPIDNONE) {
11731 11731 size = sizeof (uint32_t);
11732 11732 } else {
11733 11733 ASSERT3U(epid, <=, state->dts_necbs);
11734 11734 ASSERT(state->dts_ecbs[epid - 1] != NULL);
11735 11735
11736 11736 size = state->dts_ecbs[epid - 1]->dte_size;
11737 11737 }
11738 11738
11739 11739 ASSERT(woffs + size <= buf->dtb_size);
11740 11740 ASSERT(size != 0);
11741 11741
11742 11742 if (woffs + size == buf->dtb_size) {
11743 11743 /*
11744 11744 * We've reached the end of the buffer; we want
11745 11745 * to set the wrapped offset to 0 and break
11746 11746 * out. However, if the offs is 0, then we're
11747 11747 * in a strange edge-condition: the amount of
11748 11748 * space that we want to reserve plus the size
11749 11749 * of the record that we're overwriting is
11750 11750 * greater than the size of the buffer. This
11751 11751 * is problematic because if we reserve the
11752 11752 * space but subsequently don't consume it (due
11753 11753 * to a failed predicate or error) the wrapped
11754 11754 * offset will be 0 -- yet the EPID at offset 0
11755 11755 * will not be committed. This situation is
11756 11756 * relatively easy to deal with: if we're in
11757 11757 * this case, the buffer is indistinguishable
11758 11758 * from one that hasn't wrapped; we need only
11759 11759 * finish the job by clearing the wrapped bit,
11760 11760 * explicitly setting the offset to be 0, and
11761 11761 * zero'ing out the old data in the buffer.
11762 11762 */
11763 11763 if (offs == 0) {
11764 11764 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11765 11765 buf->dtb_offset = 0;
11766 11766 woffs = total;
11767 11767
11768 11768 while (woffs < buf->dtb_size)
11769 11769 tomax[woffs++] = 0;
11770 11770 }
11771 11771
11772 11772 woffs = 0;
11773 11773 break;
11774 11774 }
11775 11775
11776 11776 woffs += size;
11777 11777 }
11778 11778
11779 11779 /*
11780 11780 * We have a wrapped offset. It may be that the wrapped offset
11781 11781 * has become zero -- that's okay.
11782 11782 */
11783 11783 buf->dtb_xamot_offset = woffs;
11784 11784 }
11785 11785
11786 11786 out:
11787 11787 /*
11788 11788 * Now we can plow the buffer with any necessary padding.
11789 11789 */
11790 11790 while (offs & (align - 1)) {
11791 11791 /*
11792 11792 * Assert that our alignment is off by a number which
11793 11793 * is itself sizeof (uint32_t) aligned.
11794 11794 */
11795 11795 ASSERT(!((align - (offs & (align - 1))) &
11796 11796 (sizeof (uint32_t) - 1)));
11797 11797 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11798 11798 offs += sizeof (uint32_t);
11799 11799 }
11800 11800
11801 11801 if (buf->dtb_flags & DTRACEBUF_FILL) {
11802 11802 if (offs + needed > buf->dtb_size - state->dts_reserve) {
11803 11803 buf->dtb_flags |= DTRACEBUF_FULL;
11804 11804 return (-1);
11805 11805 }
11806 11806 }
11807 11807
11808 11808 if (mstate == NULL)
11809 11809 return (offs);
11810 11810
11811 11811 /*
11812 11812 * For ring buffers and fill buffers, the scratch space is always
11813 11813 * the inactive buffer.
11814 11814 */
11815 11815 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11816 11816 mstate->dtms_scratch_size = buf->dtb_size;
11817 11817 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11818 11818
11819 11819 return (offs);
11820 11820 }
11821 11821
11822 11822 static void
11823 11823 dtrace_buffer_polish(dtrace_buffer_t *buf)
11824 11824 {
11825 11825 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11826 11826 ASSERT(MUTEX_HELD(&dtrace_lock));
11827 11827
11828 11828 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11829 11829 return;
11830 11830
11831 11831 /*
11832 11832 * We need to polish the ring buffer. There are three cases:
11833 11833 *
11834 11834 * - The first (and presumably most common) is that there is no gap
11835 11835 * between the buffer offset and the wrapped offset. In this case,
11836 11836 * there is nothing in the buffer that isn't valid data; we can
11837 11837 * mark the buffer as polished and return.
11838 11838 *
11839 11839 * - The second (less common than the first but still more common
11840 11840 * than the third) is that there is a gap between the buffer offset
11841 11841 * and the wrapped offset, and the wrapped offset is larger than the
11842 11842 * buffer offset. This can happen because of an alignment issue, or
11843 11843 * can happen because of a call to dtrace_buffer_reserve() that
11844 11844 * didn't subsequently consume the buffer space. In this case,
11845 11845 * we need to zero the data from the buffer offset to the wrapped
11846 11846 * offset.
11847 11847 *
11848 11848 * - The third (and least common) is that there is a gap between the
11849 11849 * buffer offset and the wrapped offset, but the wrapped offset is
11850 11850 * _less_ than the buffer offset. This can only happen because a
11851 11851 * call to dtrace_buffer_reserve() induced a wrap, but the space
11852 11852 * was not subsequently consumed. In this case, we need to zero the
11853 11853 * space from the offset to the end of the buffer _and_ from the
11854 11854 * top of the buffer to the wrapped offset.
11855 11855 */
11856 11856 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11857 11857 bzero(buf->dtb_tomax + buf->dtb_offset,
11858 11858 buf->dtb_xamot_offset - buf->dtb_offset);
11859 11859 }
11860 11860
11861 11861 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11862 11862 bzero(buf->dtb_tomax + buf->dtb_offset,
11863 11863 buf->dtb_size - buf->dtb_offset);
11864 11864 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11865 11865 }
11866 11866 }
11867 11867
11868 11868 /*
11869 11869 * This routine determines if data generated at the specified time has likely
11870 11870 * been entirely consumed at user-level. This routine is called to determine
11871 11871 * if an ECB on a defunct probe (but for an active enabling) can be safely
11872 11872 * disabled and destroyed.
11873 11873 */
11874 11874 static int
11875 11875 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
11876 11876 {
11877 11877 int i;
11878 11878
11879 11879 for (i = 0; i < NCPU; i++) {
11880 11880 dtrace_buffer_t *buf = &bufs[i];
11881 11881
11882 11882 if (buf->dtb_size == 0)
11883 11883 continue;
11884 11884
11885 11885 if (buf->dtb_flags & DTRACEBUF_RING)
11886 11886 return (0);
11887 11887
11888 11888 if (!buf->dtb_switched && buf->dtb_offset != 0)
11889 11889 return (0);
11890 11890
11891 11891 if (buf->dtb_switched - buf->dtb_interval < when)
11892 11892 return (0);
11893 11893 }
11894 11894
11895 11895 return (1);
11896 11896 }
11897 11897
11898 11898 static void
11899 11899 dtrace_buffer_free(dtrace_buffer_t *bufs)
11900 11900 {
11901 11901 int i;
11902 11902
11903 11903 for (i = 0; i < NCPU; i++) {
11904 11904 dtrace_buffer_t *buf = &bufs[i];
11905 11905
11906 11906 if (buf->dtb_tomax == NULL) {
11907 11907 ASSERT(buf->dtb_xamot == NULL);
11908 11908 ASSERT(buf->dtb_size == 0);
11909 11909 continue;
11910 11910 }
11911 11911
11912 11912 if (buf->dtb_xamot != NULL) {
11913 11913 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11914 11914 kmem_free(buf->dtb_xamot, buf->dtb_size);
11915 11915 }
11916 11916
11917 11917 kmem_free(buf->dtb_tomax, buf->dtb_size);
11918 11918 buf->dtb_size = 0;
11919 11919 buf->dtb_tomax = NULL;
11920 11920 buf->dtb_xamot = NULL;
11921 11921 }
11922 11922 }
11923 11923
11924 11924 /*
11925 11925 * DTrace Enabling Functions
11926 11926 */
11927 11927 static dtrace_enabling_t *
11928 11928 dtrace_enabling_create(dtrace_vstate_t *vstate)
11929 11929 {
11930 11930 dtrace_enabling_t *enab;
11931 11931
11932 11932 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11933 11933 enab->dten_vstate = vstate;
11934 11934
11935 11935 return (enab);
11936 11936 }
11937 11937
11938 11938 static void
11939 11939 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11940 11940 {
11941 11941 dtrace_ecbdesc_t **ndesc;
11942 11942 size_t osize, nsize;
11943 11943
11944 11944 /*
11945 11945 * We can't add to enablings after we've enabled them, or after we've
11946 11946 * retained them.
11947 11947 */
11948 11948 ASSERT(enab->dten_probegen == 0);
11949 11949 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11950 11950
11951 11951 if (enab->dten_ndesc < enab->dten_maxdesc) {
11952 11952 enab->dten_desc[enab->dten_ndesc++] = ecb;
11953 11953 return;
11954 11954 }
11955 11955
11956 11956 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11957 11957
11958 11958 if (enab->dten_maxdesc == 0) {
11959 11959 enab->dten_maxdesc = 1;
11960 11960 } else {
11961 11961 enab->dten_maxdesc <<= 1;
11962 11962 }
11963 11963
11964 11964 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11965 11965
11966 11966 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11967 11967 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11968 11968 bcopy(enab->dten_desc, ndesc, osize);
11969 11969 kmem_free(enab->dten_desc, osize);
11970 11970
11971 11971 enab->dten_desc = ndesc;
11972 11972 enab->dten_desc[enab->dten_ndesc++] = ecb;
11973 11973 }
11974 11974
11975 11975 static void
11976 11976 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11977 11977 dtrace_probedesc_t *pd)
11978 11978 {
11979 11979 dtrace_ecbdesc_t *new;
11980 11980 dtrace_predicate_t *pred;
11981 11981 dtrace_actdesc_t *act;
11982 11982
11983 11983 /*
11984 11984 * We're going to create a new ECB description that matches the
11985 11985 * specified ECB in every way, but has the specified probe description.
11986 11986 */
11987 11987 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11988 11988
11989 11989 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11990 11990 dtrace_predicate_hold(pred);
11991 11991
11992 11992 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11993 11993 dtrace_actdesc_hold(act);
11994 11994
11995 11995 new->dted_action = ecb->dted_action;
11996 11996 new->dted_pred = ecb->dted_pred;
11997 11997 new->dted_probe = *pd;
11998 11998 new->dted_uarg = ecb->dted_uarg;
11999 11999
12000 12000 dtrace_enabling_add(enab, new);
12001 12001 }
12002 12002
12003 12003 static void
12004 12004 dtrace_enabling_dump(dtrace_enabling_t *enab)
12005 12005 {
12006 12006 int i;
12007 12007
12008 12008 for (i = 0; i < enab->dten_ndesc; i++) {
12009 12009 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
12010 12010
12011 12011 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
12012 12012 desc->dtpd_provider, desc->dtpd_mod,
12013 12013 desc->dtpd_func, desc->dtpd_name);
12014 12014 }
12015 12015 }
12016 12016
12017 12017 static void
12018 12018 dtrace_enabling_destroy(dtrace_enabling_t *enab)
12019 12019 {
12020 12020 int i;
12021 12021 dtrace_ecbdesc_t *ep;
12022 12022 dtrace_vstate_t *vstate = enab->dten_vstate;
12023 12023
12024 12024 ASSERT(MUTEX_HELD(&dtrace_lock));
12025 12025
12026 12026 for (i = 0; i < enab->dten_ndesc; i++) {
12027 12027 dtrace_actdesc_t *act, *next;
12028 12028 dtrace_predicate_t *pred;
12029 12029
12030 12030 ep = enab->dten_desc[i];
12031 12031
12032 12032 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
12033 12033 dtrace_predicate_release(pred, vstate);
12034 12034
12035 12035 for (act = ep->dted_action; act != NULL; act = next) {
12036 12036 next = act->dtad_next;
12037 12037 dtrace_actdesc_release(act, vstate);
12038 12038 }
12039 12039
12040 12040 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12041 12041 }
12042 12042
12043 12043 kmem_free(enab->dten_desc,
12044 12044 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
12045 12045
12046 12046 /*
12047 12047 * If this was a retained enabling, decrement the dts_nretained count
12048 12048 * and take it off of the dtrace_retained list.
12049 12049 */
12050 12050 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
12051 12051 dtrace_retained == enab) {
12052 12052 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12053 12053 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
12054 12054 enab->dten_vstate->dtvs_state->dts_nretained--;
12055 12055 dtrace_retained_gen++;
12056 12056 }
12057 12057
12058 12058 if (enab->dten_prev == NULL) {
12059 12059 if (dtrace_retained == enab) {
12060 12060 dtrace_retained = enab->dten_next;
12061 12061
12062 12062 if (dtrace_retained != NULL)
12063 12063 dtrace_retained->dten_prev = NULL;
12064 12064 }
12065 12065 } else {
12066 12066 ASSERT(enab != dtrace_retained);
12067 12067 ASSERT(dtrace_retained != NULL);
12068 12068 enab->dten_prev->dten_next = enab->dten_next;
12069 12069 }
12070 12070
12071 12071 if (enab->dten_next != NULL) {
12072 12072 ASSERT(dtrace_retained != NULL);
12073 12073 enab->dten_next->dten_prev = enab->dten_prev;
12074 12074 }
12075 12075
12076 12076 kmem_free(enab, sizeof (dtrace_enabling_t));
12077 12077 }
12078 12078
12079 12079 static int
12080 12080 dtrace_enabling_retain(dtrace_enabling_t *enab)
12081 12081 {
12082 12082 dtrace_state_t *state;
12083 12083
12084 12084 ASSERT(MUTEX_HELD(&dtrace_lock));
12085 12085 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12086 12086 ASSERT(enab->dten_vstate != NULL);
12087 12087
12088 12088 state = enab->dten_vstate->dtvs_state;
12089 12089 ASSERT(state != NULL);
12090 12090
12091 12091 /*
12092 12092 * We only allow each state to retain dtrace_retain_max enablings.
12093 12093 */
12094 12094 if (state->dts_nretained >= dtrace_retain_max)
12095 12095 return (ENOSPC);
12096 12096
12097 12097 state->dts_nretained++;
12098 12098 dtrace_retained_gen++;
12099 12099
12100 12100 if (dtrace_retained == NULL) {
12101 12101 dtrace_retained = enab;
12102 12102 return (0);
12103 12103 }
12104 12104
12105 12105 enab->dten_next = dtrace_retained;
12106 12106 dtrace_retained->dten_prev = enab;
12107 12107 dtrace_retained = enab;
12108 12108
12109 12109 return (0);
12110 12110 }
12111 12111
12112 12112 static int
12113 12113 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
12114 12114 dtrace_probedesc_t *create)
12115 12115 {
12116 12116 dtrace_enabling_t *new, *enab;
12117 12117 int found = 0, err = ENOENT;
12118 12118
12119 12119 ASSERT(MUTEX_HELD(&dtrace_lock));
12120 12120 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
12121 12121 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
12122 12122 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
12123 12123 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
12124 12124
12125 12125 new = dtrace_enabling_create(&state->dts_vstate);
12126 12126
12127 12127 /*
12128 12128 * Iterate over all retained enablings, looking for enablings that
12129 12129 * match the specified state.
12130 12130 */
12131 12131 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12132 12132 int i;
12133 12133
12134 12134 /*
12135 12135 * dtvs_state can only be NULL for helper enablings -- and
12136 12136 * helper enablings can't be retained.
12137 12137 */
12138 12138 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12139 12139
12140 12140 if (enab->dten_vstate->dtvs_state != state)
12141 12141 continue;
12142 12142
12143 12143 /*
12144 12144 * Now iterate over each probe description; we're looking for
12145 12145 * an exact match to the specified probe description.
12146 12146 */
12147 12147 for (i = 0; i < enab->dten_ndesc; i++) {
12148 12148 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12149 12149 dtrace_probedesc_t *pd = &ep->dted_probe;
12150 12150
12151 12151 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
12152 12152 continue;
12153 12153
12154 12154 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
12155 12155 continue;
12156 12156
12157 12157 if (strcmp(pd->dtpd_func, match->dtpd_func))
12158 12158 continue;
12159 12159
12160 12160 if (strcmp(pd->dtpd_name, match->dtpd_name))
12161 12161 continue;
12162 12162
12163 12163 /*
12164 12164 * We have a winning probe! Add it to our growing
12165 12165 * enabling.
12166 12166 */
12167 12167 found = 1;
12168 12168 dtrace_enabling_addlike(new, ep, create);
12169 12169 }
12170 12170 }
12171 12171
12172 12172 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
12173 12173 dtrace_enabling_destroy(new);
12174 12174 return (err);
12175 12175 }
12176 12176
12177 12177 return (0);
12178 12178 }
12179 12179
12180 12180 static void
12181 12181 dtrace_enabling_retract(dtrace_state_t *state)
12182 12182 {
12183 12183 dtrace_enabling_t *enab, *next;
12184 12184
12185 12185 ASSERT(MUTEX_HELD(&dtrace_lock));
12186 12186
12187 12187 /*
12188 12188 * Iterate over all retained enablings, destroy the enablings retained
12189 12189 * for the specified state.
12190 12190 */
12191 12191 for (enab = dtrace_retained; enab != NULL; enab = next) {
12192 12192 next = enab->dten_next;
12193 12193
12194 12194 /*
12195 12195 * dtvs_state can only be NULL for helper enablings -- and
12196 12196 * helper enablings can't be retained.
12197 12197 */
12198 12198 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12199 12199
12200 12200 if (enab->dten_vstate->dtvs_state == state) {
12201 12201 ASSERT(state->dts_nretained > 0);
12202 12202 dtrace_enabling_destroy(enab);
12203 12203 }
12204 12204 }
12205 12205
12206 12206 ASSERT(state->dts_nretained == 0);
12207 12207 }
12208 12208
12209 12209 static int
12210 12210 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
12211 12211 {
12212 12212 int i = 0;
12213 12213 int total_matched = 0, matched = 0;
12214 12214
12215 12215 ASSERT(MUTEX_HELD(&cpu_lock));
12216 12216 ASSERT(MUTEX_HELD(&dtrace_lock));
12217 12217
12218 12218 for (i = 0; i < enab->dten_ndesc; i++) {
12219 12219 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12220 12220
12221 12221 enab->dten_current = ep;
12222 12222 enab->dten_error = 0;
12223 12223
12224 12224 /*
12225 12225 * If a provider failed to enable a probe then get out and
12226 12226 * let the consumer know we failed.
12227 12227 */
12228 12228 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
12229 12229 return (EBUSY);
12230 12230
12231 12231 total_matched += matched;
12232 12232
12233 12233 if (enab->dten_error != 0) {
12234 12234 /*
12235 12235 * If we get an error half-way through enabling the
12236 12236 * probes, we kick out -- perhaps with some number of
12237 12237 * them enabled. Leaving enabled probes enabled may
12238 12238 * be slightly confusing for user-level, but we expect
12239 12239 * that no one will attempt to actually drive on in
12240 12240 * the face of such errors. If this is an anonymous
12241 12241 * enabling (indicated with a NULL nmatched pointer),
12242 12242 * we cmn_err() a message. We aren't expecting to
12243 12243 * get such an error -- such as it can exist at all,
12244 12244 * it would be a result of corrupted DOF in the driver
12245 12245 * properties.
12246 12246 */
12247 12247 if (nmatched == NULL) {
12248 12248 cmn_err(CE_WARN, "dtrace_enabling_match() "
12249 12249 "error on %p: %d", (void *)ep,
12250 12250 enab->dten_error);
12251 12251 }
12252 12252
12253 12253 return (enab->dten_error);
12254 12254 }
12255 12255 }
12256 12256
12257 12257 enab->dten_probegen = dtrace_probegen;
12258 12258 if (nmatched != NULL)
12259 12259 *nmatched = total_matched;
12260 12260
12261 12261 return (0);
12262 12262 }
12263 12263
12264 12264 static void
12265 12265 dtrace_enabling_matchall(void)
12266 12266 {
12267 12267 dtrace_enabling_t *enab;
12268 12268
12269 12269 mutex_enter(&cpu_lock);
12270 12270 mutex_enter(&dtrace_lock);
12271 12271
12272 12272 /*
12273 12273 * Iterate over all retained enablings to see if any probes match
12274 12274 * against them. We only perform this operation on enablings for which
12275 12275 * we have sufficient permissions by virtue of being in the global zone
12276 12276 * or in the same zone as the DTrace client. Because we can be called
12277 12277 * after dtrace_detach() has been called, we cannot assert that there
12278 12278 * are retained enablings. We can safely load from dtrace_retained,
12279 12279 * however: the taskq_destroy() at the end of dtrace_detach() will
12280 12280 * block pending our completion.
12281 12281 */
12282 12282 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12283 12283 dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred;
12284 12284 cred_t *cr = dcr->dcr_cred;
12285 12285 zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0;
12286 12286
12287 12287 if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL &&
12288 12288 (zone == GLOBAL_ZONEID || getzoneid() == zone)))
12289 12289 (void) dtrace_enabling_match(enab, NULL);
12290 12290 }
12291 12291
12292 12292 mutex_exit(&dtrace_lock);
12293 12293 mutex_exit(&cpu_lock);
12294 12294 }
12295 12295
12296 12296 /*
12297 12297 * If an enabling is to be enabled without having matched probes (that is, if
12298 12298 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
12299 12299 * enabling must be _primed_ by creating an ECB for every ECB description.
12300 12300 * This must be done to assure that we know the number of speculations, the
12301 12301 * number of aggregations, the minimum buffer size needed, etc. before we
12302 12302 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
12303 12303 * enabling any probes, we create ECBs for every ECB decription, but with a
12304 12304 * NULL probe -- which is exactly what this function does.
12305 12305 */
12306 12306 static void
12307 12307 dtrace_enabling_prime(dtrace_state_t *state)
12308 12308 {
12309 12309 dtrace_enabling_t *enab;
12310 12310 int i;
12311 12311
12312 12312 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12313 12313 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12314 12314
12315 12315 if (enab->dten_vstate->dtvs_state != state)
12316 12316 continue;
12317 12317
12318 12318 /*
12319 12319 * We don't want to prime an enabling more than once, lest
12320 12320 * we allow a malicious user to induce resource exhaustion.
12321 12321 * (The ECBs that result from priming an enabling aren't
12322 12322 * leaked -- but they also aren't deallocated until the
12323 12323 * consumer state is destroyed.)
12324 12324 */
12325 12325 if (enab->dten_primed)
12326 12326 continue;
12327 12327
12328 12328 for (i = 0; i < enab->dten_ndesc; i++) {
12329 12329 enab->dten_current = enab->dten_desc[i];
12330 12330 (void) dtrace_probe_enable(NULL, enab);
12331 12331 }
12332 12332
12333 12333 enab->dten_primed = 1;
12334 12334 }
12335 12335 }
12336 12336
12337 12337 /*
12338 12338 * Called to indicate that probes should be provided due to retained
12339 12339 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
12340 12340 * must take an initial lap through the enabling calling the dtps_provide()
12341 12341 * entry point explicitly to allow for autocreated probes.
12342 12342 */
12343 12343 static void
12344 12344 dtrace_enabling_provide(dtrace_provider_t *prv)
12345 12345 {
12346 12346 int i, all = 0;
12347 12347 dtrace_probedesc_t desc;
12348 12348 dtrace_genid_t gen;
12349 12349
12350 12350 ASSERT(MUTEX_HELD(&dtrace_lock));
12351 12351 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
12352 12352
12353 12353 if (prv == NULL) {
12354 12354 all = 1;
12355 12355 prv = dtrace_provider;
12356 12356 }
12357 12357
12358 12358 do {
12359 12359 dtrace_enabling_t *enab;
12360 12360 void *parg = prv->dtpv_arg;
12361 12361
12362 12362 retry:
12363 12363 gen = dtrace_retained_gen;
12364 12364 for (enab = dtrace_retained; enab != NULL;
12365 12365 enab = enab->dten_next) {
12366 12366 for (i = 0; i < enab->dten_ndesc; i++) {
12367 12367 desc = enab->dten_desc[i]->dted_probe;
12368 12368 mutex_exit(&dtrace_lock);
12369 12369 prv->dtpv_pops.dtps_provide(parg, &desc);
12370 12370 mutex_enter(&dtrace_lock);
12371 12371 /*
12372 12372 * Process the retained enablings again if
12373 12373 * they have changed while we weren't holding
12374 12374 * dtrace_lock.
12375 12375 */
12376 12376 if (gen != dtrace_retained_gen)
12377 12377 goto retry;
12378 12378 }
12379 12379 }
12380 12380 } while (all && (prv = prv->dtpv_next) != NULL);
12381 12381
12382 12382 mutex_exit(&dtrace_lock);
12383 12383 dtrace_probe_provide(NULL, all ? NULL : prv);
12384 12384 mutex_enter(&dtrace_lock);
12385 12385 }
12386 12386
12387 12387 /*
12388 12388 * Called to reap ECBs that are attached to probes from defunct providers.
12389 12389 */
12390 12390 static void
12391 12391 dtrace_enabling_reap(void)
12392 12392 {
12393 12393 dtrace_provider_t *prov;
12394 12394 dtrace_probe_t *probe;
12395 12395 dtrace_ecb_t *ecb;
12396 12396 hrtime_t when;
12397 12397 int i;
12398 12398
12399 12399 mutex_enter(&cpu_lock);
12400 12400 mutex_enter(&dtrace_lock);
12401 12401
12402 12402 for (i = 0; i < dtrace_nprobes; i++) {
12403 12403 if ((probe = dtrace_probes[i]) == NULL)
12404 12404 continue;
12405 12405
12406 12406 if (probe->dtpr_ecb == NULL)
12407 12407 continue;
12408 12408
12409 12409 prov = probe->dtpr_provider;
12410 12410
12411 12411 if ((when = prov->dtpv_defunct) == 0)
12412 12412 continue;
12413 12413
12414 12414 /*
12415 12415 * We have ECBs on a defunct provider: we want to reap these
12416 12416 * ECBs to allow the provider to unregister. The destruction
12417 12417 * of these ECBs must be done carefully: if we destroy the ECB
12418 12418 * and the consumer later wishes to consume an EPID that
12419 12419 * corresponds to the destroyed ECB (and if the EPID metadata
12420 12420 * has not been previously consumed), the consumer will abort
12421 12421 * processing on the unknown EPID. To reduce (but not, sadly,
12422 12422 * eliminate) the possibility of this, we will only destroy an
12423 12423 * ECB for a defunct provider if, for the state that
12424 12424 * corresponds to the ECB:
12425 12425 *
12426 12426 * (a) There is no speculative tracing (which can effectively
12427 12427 * cache an EPID for an arbitrary amount of time).
12428 12428 *
12429 12429 * (b) The principal buffers have been switched twice since the
12430 12430 * provider became defunct.
12431 12431 *
12432 12432 * (c) The aggregation buffers are of zero size or have been
12433 12433 * switched twice since the provider became defunct.
12434 12434 *
12435 12435 * We use dts_speculates to determine (a) and call a function
12436 12436 * (dtrace_buffer_consumed()) to determine (b) and (c). Note
12437 12437 * that as soon as we've been unable to destroy one of the ECBs
12438 12438 * associated with the probe, we quit trying -- reaping is only
12439 12439 * fruitful in as much as we can destroy all ECBs associated
12440 12440 * with the defunct provider's probes.
12441 12441 */
12442 12442 while ((ecb = probe->dtpr_ecb) != NULL) {
12443 12443 dtrace_state_t *state = ecb->dte_state;
12444 12444 dtrace_buffer_t *buf = state->dts_buffer;
12445 12445 dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
12446 12446
12447 12447 if (state->dts_speculates)
12448 12448 break;
12449 12449
12450 12450 if (!dtrace_buffer_consumed(buf, when))
12451 12451 break;
12452 12452
12453 12453 if (!dtrace_buffer_consumed(aggbuf, when))
12454 12454 break;
12455 12455
12456 12456 dtrace_ecb_disable(ecb);
12457 12457 ASSERT(probe->dtpr_ecb != ecb);
12458 12458 dtrace_ecb_destroy(ecb);
12459 12459 }
12460 12460 }
12461 12461
12462 12462 mutex_exit(&dtrace_lock);
12463 12463 mutex_exit(&cpu_lock);
12464 12464 }
12465 12465
12466 12466 /*
12467 12467 * DTrace DOF Functions
12468 12468 */
12469 12469 /*ARGSUSED*/
12470 12470 static void
12471 12471 dtrace_dof_error(dof_hdr_t *dof, const char *str)
12472 12472 {
12473 12473 if (dtrace_err_verbose)
12474 12474 cmn_err(CE_WARN, "failed to process DOF: %s", str);
12475 12475
12476 12476 #ifdef DTRACE_ERRDEBUG
12477 12477 dtrace_errdebug(str);
12478 12478 #endif
12479 12479 }
12480 12480
12481 12481 /*
12482 12482 * Create DOF out of a currently enabled state. Right now, we only create
12483 12483 * DOF containing the run-time options -- but this could be expanded to create
12484 12484 * complete DOF representing the enabled state.
12485 12485 */
12486 12486 static dof_hdr_t *
12487 12487 dtrace_dof_create(dtrace_state_t *state)
12488 12488 {
12489 12489 dof_hdr_t *dof;
12490 12490 dof_sec_t *sec;
12491 12491 dof_optdesc_t *opt;
12492 12492 int i, len = sizeof (dof_hdr_t) +
12493 12493 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12494 12494 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12495 12495
12496 12496 ASSERT(MUTEX_HELD(&dtrace_lock));
12497 12497
12498 12498 dof = kmem_zalloc(len, KM_SLEEP);
12499 12499 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12500 12500 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12501 12501 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12502 12502 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12503 12503
12504 12504 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12505 12505 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12506 12506 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
12507 12507 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
12508 12508 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
12509 12509 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
12510 12510
12511 12511 dof->dofh_flags = 0;
12512 12512 dof->dofh_hdrsize = sizeof (dof_hdr_t);
12513 12513 dof->dofh_secsize = sizeof (dof_sec_t);
12514 12514 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
12515 12515 dof->dofh_secoff = sizeof (dof_hdr_t);
12516 12516 dof->dofh_loadsz = len;
12517 12517 dof->dofh_filesz = len;
12518 12518 dof->dofh_pad = 0;
12519 12519
12520 12520 /*
12521 12521 * Fill in the option section header...
12522 12522 */
12523 12523 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
12524 12524 sec->dofs_type = DOF_SECT_OPTDESC;
12525 12525 sec->dofs_align = sizeof (uint64_t);
12526 12526 sec->dofs_flags = DOF_SECF_LOAD;
12527 12527 sec->dofs_entsize = sizeof (dof_optdesc_t);
12528 12528
12529 12529 opt = (dof_optdesc_t *)((uintptr_t)sec +
12530 12530 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
12531 12531
12532 12532 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
12533 12533 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12534 12534
12535 12535 for (i = 0; i < DTRACEOPT_MAX; i++) {
12536 12536 opt[i].dofo_option = i;
12537 12537 opt[i].dofo_strtab = DOF_SECIDX_NONE;
12538 12538 opt[i].dofo_value = state->dts_options[i];
12539 12539 }
12540 12540
12541 12541 return (dof);
12542 12542 }
12543 12543
12544 12544 static dof_hdr_t *
12545 12545 dtrace_dof_copyin(uintptr_t uarg, int *errp)
12546 12546 {
12547 12547 dof_hdr_t hdr, *dof;
12548 12548
12549 12549 ASSERT(!MUTEX_HELD(&dtrace_lock));
12550 12550
12551 12551 /*
12552 12552 * First, we're going to copyin() the sizeof (dof_hdr_t).
12553 12553 */
12554 12554 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
12555 12555 dtrace_dof_error(NULL, "failed to copyin DOF header");
12556 12556 *errp = EFAULT;
12557 12557 return (NULL);
12558 12558 }
12559 12559
12560 12560 /*
12561 12561 * Now we'll allocate the entire DOF and copy it in -- provided
12562 12562 * that the length isn't outrageous.
12563 12563 */
12564 12564 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
12565 12565 dtrace_dof_error(&hdr, "load size exceeds maximum");
12566 12566 *errp = E2BIG;
12567 12567 return (NULL);
12568 12568 }
12569 12569
12570 12570 if (hdr.dofh_loadsz < sizeof (hdr)) {
12571 12571 dtrace_dof_error(&hdr, "invalid load size");
12572 12572 *errp = EINVAL;
12573 12573 return (NULL);
12574 12574 }
12575 12575
12576 12576 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
12577 12577
12578 12578 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
12579 12579 dof->dofh_loadsz != hdr.dofh_loadsz) {
12580 12580 kmem_free(dof, hdr.dofh_loadsz);
12581 12581 *errp = EFAULT;
12582 12582 return (NULL);
12583 12583 }
12584 12584
12585 12585 return (dof);
12586 12586 }
12587 12587
12588 12588 static dof_hdr_t *
12589 12589 dtrace_dof_property(const char *name)
12590 12590 {
12591 12591 uchar_t *buf;
12592 12592 uint64_t loadsz;
12593 12593 unsigned int len, i;
12594 12594 dof_hdr_t *dof;
12595 12595
12596 12596 /*
12597 12597 * Unfortunately, array of values in .conf files are always (and
12598 12598 * only) interpreted to be integer arrays. We must read our DOF
12599 12599 * as an integer array, and then squeeze it into a byte array.
12600 12600 */
12601 12601 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
12602 12602 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
12603 12603 return (NULL);
12604 12604
12605 12605 for (i = 0; i < len; i++)
12606 12606 buf[i] = (uchar_t)(((int *)buf)[i]);
12607 12607
12608 12608 if (len < sizeof (dof_hdr_t)) {
12609 12609 ddi_prop_free(buf);
12610 12610 dtrace_dof_error(NULL, "truncated header");
12611 12611 return (NULL);
12612 12612 }
12613 12613
12614 12614 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
12615 12615 ddi_prop_free(buf);
12616 12616 dtrace_dof_error(NULL, "truncated DOF");
12617 12617 return (NULL);
12618 12618 }
12619 12619
12620 12620 if (loadsz >= dtrace_dof_maxsize) {
12621 12621 ddi_prop_free(buf);
12622 12622 dtrace_dof_error(NULL, "oversized DOF");
12623 12623 return (NULL);
12624 12624 }
12625 12625
12626 12626 dof = kmem_alloc(loadsz, KM_SLEEP);
12627 12627 bcopy(buf, dof, loadsz);
12628 12628 ddi_prop_free(buf);
12629 12629
12630 12630 return (dof);
12631 12631 }
12632 12632
12633 12633 static void
12634 12634 dtrace_dof_destroy(dof_hdr_t *dof)
12635 12635 {
12636 12636 kmem_free(dof, dof->dofh_loadsz);
12637 12637 }
12638 12638
12639 12639 /*
12640 12640 * Return the dof_sec_t pointer corresponding to a given section index. If the
12641 12641 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
12642 12642 * a type other than DOF_SECT_NONE is specified, the header is checked against
12643 12643 * this type and NULL is returned if the types do not match.
12644 12644 */
12645 12645 static dof_sec_t *
12646 12646 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
12647 12647 {
12648 12648 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
12649 12649 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
12650 12650
12651 12651 if (i >= dof->dofh_secnum) {
12652 12652 dtrace_dof_error(dof, "referenced section index is invalid");
12653 12653 return (NULL);
12654 12654 }
12655 12655
12656 12656 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
12657 12657 dtrace_dof_error(dof, "referenced section is not loadable");
12658 12658 return (NULL);
12659 12659 }
12660 12660
12661 12661 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
12662 12662 dtrace_dof_error(dof, "referenced section is the wrong type");
12663 12663 return (NULL);
12664 12664 }
12665 12665
12666 12666 return (sec);
12667 12667 }
12668 12668
12669 12669 static dtrace_probedesc_t *
12670 12670 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
12671 12671 {
12672 12672 dof_probedesc_t *probe;
12673 12673 dof_sec_t *strtab;
12674 12674 uintptr_t daddr = (uintptr_t)dof;
12675 12675 uintptr_t str;
12676 12676 size_t size;
12677 12677
12678 12678 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
12679 12679 dtrace_dof_error(dof, "invalid probe section");
12680 12680 return (NULL);
12681 12681 }
12682 12682
12683 12683 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12684 12684 dtrace_dof_error(dof, "bad alignment in probe description");
12685 12685 return (NULL);
12686 12686 }
12687 12687
12688 12688 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
12689 12689 dtrace_dof_error(dof, "truncated probe description");
12690 12690 return (NULL);
12691 12691 }
12692 12692
12693 12693 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
12694 12694 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
12695 12695
12696 12696 if (strtab == NULL)
12697 12697 return (NULL);
12698 12698
12699 12699 str = daddr + strtab->dofs_offset;
12700 12700 size = strtab->dofs_size;
12701 12701
12702 12702 if (probe->dofp_provider >= strtab->dofs_size) {
12703 12703 dtrace_dof_error(dof, "corrupt probe provider");
12704 12704 return (NULL);
12705 12705 }
12706 12706
12707 12707 (void) strncpy(desc->dtpd_provider,
12708 12708 (char *)(str + probe->dofp_provider),
12709 12709 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
12710 12710
12711 12711 if (probe->dofp_mod >= strtab->dofs_size) {
12712 12712 dtrace_dof_error(dof, "corrupt probe module");
12713 12713 return (NULL);
12714 12714 }
12715 12715
12716 12716 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
12717 12717 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
12718 12718
12719 12719 if (probe->dofp_func >= strtab->dofs_size) {
12720 12720 dtrace_dof_error(dof, "corrupt probe function");
12721 12721 return (NULL);
12722 12722 }
12723 12723
12724 12724 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
12725 12725 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
12726 12726
12727 12727 if (probe->dofp_name >= strtab->dofs_size) {
12728 12728 dtrace_dof_error(dof, "corrupt probe name");
12729 12729 return (NULL);
12730 12730 }
12731 12731
12732 12732 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
12733 12733 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
12734 12734
12735 12735 return (desc);
12736 12736 }
12737 12737
12738 12738 static dtrace_difo_t *
12739 12739 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12740 12740 cred_t *cr)
12741 12741 {
12742 12742 dtrace_difo_t *dp;
12743 12743 size_t ttl = 0;
12744 12744 dof_difohdr_t *dofd;
12745 12745 uintptr_t daddr = (uintptr_t)dof;
12746 12746 size_t max = dtrace_difo_maxsize;
12747 12747 int i, l, n;
12748 12748
12749 12749 static const struct {
12750 12750 int section;
12751 12751 int bufoffs;
12752 12752 int lenoffs;
12753 12753 int entsize;
12754 12754 int align;
12755 12755 const char *msg;
12756 12756 } difo[] = {
12757 12757 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12758 12758 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12759 12759 sizeof (dif_instr_t), "multiple DIF sections" },
12760 12760
12761 12761 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12762 12762 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12763 12763 sizeof (uint64_t), "multiple integer tables" },
12764 12764
12765 12765 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12766 12766 offsetof(dtrace_difo_t, dtdo_strlen), 0,
12767 12767 sizeof (char), "multiple string tables" },
12768 12768
12769 12769 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12770 12770 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12771 12771 sizeof (uint_t), "multiple variable tables" },
12772 12772
12773 12773 { DOF_SECT_NONE, 0, 0, 0, NULL }
12774 12774 };
12775 12775
12776 12776 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12777 12777 dtrace_dof_error(dof, "invalid DIFO header section");
12778 12778 return (NULL);
12779 12779 }
12780 12780
12781 12781 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12782 12782 dtrace_dof_error(dof, "bad alignment in DIFO header");
12783 12783 return (NULL);
12784 12784 }
12785 12785
12786 12786 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12787 12787 sec->dofs_size % sizeof (dof_secidx_t)) {
12788 12788 dtrace_dof_error(dof, "bad size in DIFO header");
12789 12789 return (NULL);
12790 12790 }
12791 12791
12792 12792 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12793 12793 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12794 12794
12795 12795 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12796 12796 dp->dtdo_rtype = dofd->dofd_rtype;
12797 12797
12798 12798 for (l = 0; l < n; l++) {
12799 12799 dof_sec_t *subsec;
12800 12800 void **bufp;
12801 12801 uint32_t *lenp;
12802 12802
12803 12803 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12804 12804 dofd->dofd_links[l])) == NULL)
12805 12805 goto err; /* invalid section link */
12806 12806
12807 12807 if (ttl + subsec->dofs_size > max) {
12808 12808 dtrace_dof_error(dof, "exceeds maximum size");
12809 12809 goto err;
12810 12810 }
12811 12811
12812 12812 ttl += subsec->dofs_size;
12813 12813
12814 12814 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12815 12815 if (subsec->dofs_type != difo[i].section)
12816 12816 continue;
12817 12817
12818 12818 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12819 12819 dtrace_dof_error(dof, "section not loaded");
12820 12820 goto err;
12821 12821 }
12822 12822
12823 12823 if (subsec->dofs_align != difo[i].align) {
12824 12824 dtrace_dof_error(dof, "bad alignment");
12825 12825 goto err;
12826 12826 }
12827 12827
12828 12828 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12829 12829 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12830 12830
12831 12831 if (*bufp != NULL) {
12832 12832 dtrace_dof_error(dof, difo[i].msg);
12833 12833 goto err;
12834 12834 }
12835 12835
12836 12836 if (difo[i].entsize != subsec->dofs_entsize) {
12837 12837 dtrace_dof_error(dof, "entry size mismatch");
12838 12838 goto err;
12839 12839 }
12840 12840
12841 12841 if (subsec->dofs_entsize != 0 &&
12842 12842 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12843 12843 dtrace_dof_error(dof, "corrupt entry size");
12844 12844 goto err;
12845 12845 }
12846 12846
12847 12847 *lenp = subsec->dofs_size;
12848 12848 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12849 12849 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12850 12850 *bufp, subsec->dofs_size);
12851 12851
12852 12852 if (subsec->dofs_entsize != 0)
12853 12853 *lenp /= subsec->dofs_entsize;
12854 12854
12855 12855 break;
12856 12856 }
12857 12857
12858 12858 /*
12859 12859 * If we encounter a loadable DIFO sub-section that is not
12860 12860 * known to us, assume this is a broken program and fail.
12861 12861 */
12862 12862 if (difo[i].section == DOF_SECT_NONE &&
12863 12863 (subsec->dofs_flags & DOF_SECF_LOAD)) {
12864 12864 dtrace_dof_error(dof, "unrecognized DIFO subsection");
12865 12865 goto err;
12866 12866 }
12867 12867 }
12868 12868
12869 12869 if (dp->dtdo_buf == NULL) {
12870 12870 /*
12871 12871 * We can't have a DIF object without DIF text.
12872 12872 */
12873 12873 dtrace_dof_error(dof, "missing DIF text");
12874 12874 goto err;
12875 12875 }
12876 12876
12877 12877 /*
12878 12878 * Before we validate the DIF object, run through the variable table
12879 12879 * looking for the strings -- if any of their size are under, we'll set
12880 12880 * their size to be the system-wide default string size. Note that
12881 12881 * this should _not_ happen if the "strsize" option has been set --
12882 12882 * in this case, the compiler should have set the size to reflect the
12883 12883 * setting of the option.
12884 12884 */
12885 12885 for (i = 0; i < dp->dtdo_varlen; i++) {
12886 12886 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12887 12887 dtrace_diftype_t *t = &v->dtdv_type;
12888 12888
12889 12889 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12890 12890 continue;
12891 12891
12892 12892 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12893 12893 t->dtdt_size = dtrace_strsize_default;
12894 12894 }
12895 12895
12896 12896 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12897 12897 goto err;
12898 12898
12899 12899 dtrace_difo_init(dp, vstate);
12900 12900 return (dp);
12901 12901
12902 12902 err:
12903 12903 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12904 12904 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12905 12905 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12906 12906 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12907 12907
12908 12908 kmem_free(dp, sizeof (dtrace_difo_t));
12909 12909 return (NULL);
12910 12910 }
12911 12911
12912 12912 static dtrace_predicate_t *
12913 12913 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12914 12914 cred_t *cr)
12915 12915 {
12916 12916 dtrace_difo_t *dp;
12917 12917
12918 12918 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12919 12919 return (NULL);
12920 12920
12921 12921 return (dtrace_predicate_create(dp));
12922 12922 }
12923 12923
12924 12924 static dtrace_actdesc_t *
12925 12925 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12926 12926 cred_t *cr)
12927 12927 {
12928 12928 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12929 12929 dof_actdesc_t *desc;
12930 12930 dof_sec_t *difosec;
12931 12931 size_t offs;
12932 12932 uintptr_t daddr = (uintptr_t)dof;
12933 12933 uint64_t arg;
12934 12934 dtrace_actkind_t kind;
12935 12935
12936 12936 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12937 12937 dtrace_dof_error(dof, "invalid action section");
12938 12938 return (NULL);
12939 12939 }
12940 12940
12941 12941 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12942 12942 dtrace_dof_error(dof, "truncated action description");
12943 12943 return (NULL);
12944 12944 }
12945 12945
12946 12946 if (sec->dofs_align != sizeof (uint64_t)) {
12947 12947 dtrace_dof_error(dof, "bad alignment in action description");
12948 12948 return (NULL);
12949 12949 }
12950 12950
12951 12951 if (sec->dofs_size < sec->dofs_entsize) {
12952 12952 dtrace_dof_error(dof, "section entry size exceeds total size");
12953 12953 return (NULL);
12954 12954 }
12955 12955
12956 12956 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12957 12957 dtrace_dof_error(dof, "bad entry size in action description");
12958 12958 return (NULL);
12959 12959 }
12960 12960
12961 12961 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12962 12962 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12963 12963 return (NULL);
12964 12964 }
12965 12965
12966 12966 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12967 12967 desc = (dof_actdesc_t *)(daddr +
12968 12968 (uintptr_t)sec->dofs_offset + offs);
12969 12969 kind = (dtrace_actkind_t)desc->dofa_kind;
12970 12970
12971 12971 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
12972 12972 (kind != DTRACEACT_PRINTA ||
12973 12973 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
12974 12974 (kind == DTRACEACT_DIFEXPR &&
12975 12975 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12976 12976 dof_sec_t *strtab;
12977 12977 char *str, *fmt;
12978 12978 uint64_t i;
12979 12979
12980 12980 /*
12981 12981 * The argument to these actions is an index into the
12982 12982 * DOF string table. For printf()-like actions, this
12983 12983 * is the format string. For print(), this is the
12984 12984 * CTF type of the expression result.
12985 12985 */
12986 12986 if ((strtab = dtrace_dof_sect(dof,
12987 12987 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12988 12988 goto err;
12989 12989
12990 12990 str = (char *)((uintptr_t)dof +
12991 12991 (uintptr_t)strtab->dofs_offset);
12992 12992
12993 12993 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12994 12994 if (str[i] == '\0')
12995 12995 break;
12996 12996 }
12997 12997
12998 12998 if (i >= strtab->dofs_size) {
12999 12999 dtrace_dof_error(dof, "bogus format string");
13000 13000 goto err;
13001 13001 }
13002 13002
13003 13003 if (i == desc->dofa_arg) {
13004 13004 dtrace_dof_error(dof, "empty format string");
13005 13005 goto err;
13006 13006 }
13007 13007
13008 13008 i -= desc->dofa_arg;
13009 13009 fmt = kmem_alloc(i + 1, KM_SLEEP);
13010 13010 bcopy(&str[desc->dofa_arg], fmt, i + 1);
13011 13011 arg = (uint64_t)(uintptr_t)fmt;
13012 13012 } else {
13013 13013 if (kind == DTRACEACT_PRINTA) {
13014 13014 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
13015 13015 arg = 0;
13016 13016 } else {
13017 13017 arg = desc->dofa_arg;
13018 13018 }
13019 13019 }
13020 13020
13021 13021 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
13022 13022 desc->dofa_uarg, arg);
13023 13023
13024 13024 if (last != NULL) {
13025 13025 last->dtad_next = act;
13026 13026 } else {
13027 13027 first = act;
13028 13028 }
13029 13029
13030 13030 last = act;
13031 13031
13032 13032 if (desc->dofa_difo == DOF_SECIDX_NONE)
13033 13033 continue;
13034 13034
13035 13035 if ((difosec = dtrace_dof_sect(dof,
13036 13036 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
13037 13037 goto err;
13038 13038
13039 13039 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
13040 13040
13041 13041 if (act->dtad_difo == NULL)
13042 13042 goto err;
13043 13043 }
13044 13044
13045 13045 ASSERT(first != NULL);
13046 13046 return (first);
13047 13047
13048 13048 err:
13049 13049 for (act = first; act != NULL; act = next) {
13050 13050 next = act->dtad_next;
13051 13051 dtrace_actdesc_release(act, vstate);
13052 13052 }
13053 13053
13054 13054 return (NULL);
13055 13055 }
13056 13056
13057 13057 static dtrace_ecbdesc_t *
13058 13058 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13059 13059 cred_t *cr)
13060 13060 {
13061 13061 dtrace_ecbdesc_t *ep;
13062 13062 dof_ecbdesc_t *ecb;
13063 13063 dtrace_probedesc_t *desc;
13064 13064 dtrace_predicate_t *pred = NULL;
13065 13065
13066 13066 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
13067 13067 dtrace_dof_error(dof, "truncated ECB description");
13068 13068 return (NULL);
13069 13069 }
13070 13070
13071 13071 if (sec->dofs_align != sizeof (uint64_t)) {
13072 13072 dtrace_dof_error(dof, "bad alignment in ECB description");
13073 13073 return (NULL);
13074 13074 }
13075 13075
13076 13076 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
13077 13077 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
13078 13078
13079 13079 if (sec == NULL)
13080 13080 return (NULL);
13081 13081
13082 13082 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
13083 13083 ep->dted_uarg = ecb->dofe_uarg;
13084 13084 desc = &ep->dted_probe;
13085 13085
13086 13086 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
13087 13087 goto err;
13088 13088
13089 13089 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
13090 13090 if ((sec = dtrace_dof_sect(dof,
13091 13091 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
13092 13092 goto err;
13093 13093
13094 13094 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
13095 13095 goto err;
13096 13096
13097 13097 ep->dted_pred.dtpdd_predicate = pred;
13098 13098 }
13099 13099
13100 13100 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
13101 13101 if ((sec = dtrace_dof_sect(dof,
13102 13102 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
13103 13103 goto err;
13104 13104
13105 13105 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
13106 13106
13107 13107 if (ep->dted_action == NULL)
13108 13108 goto err;
13109 13109 }
13110 13110
13111 13111 return (ep);
13112 13112
13113 13113 err:
13114 13114 if (pred != NULL)
13115 13115 dtrace_predicate_release(pred, vstate);
13116 13116 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
13117 13117 return (NULL);
13118 13118 }
13119 13119
13120 13120 /*
13121 13121 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
13122 13122 * specified DOF. At present, this amounts to simply adding 'ubase' to the
13123 13123 * site of any user SETX relocations to account for load object base address.
13124 13124 * In the future, if we need other relocations, this function can be extended.
13125 13125 */
13126 13126 static int
13127 13127 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
13128 13128 {
13129 13129 uintptr_t daddr = (uintptr_t)dof;
13130 13130 dof_relohdr_t *dofr =
13131 13131 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13132 13132 dof_sec_t *ss, *rs, *ts;
13133 13133 dof_relodesc_t *r;
13134 13134 uint_t i, n;
13135 13135
13136 13136 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
13137 13137 sec->dofs_align != sizeof (dof_secidx_t)) {
13138 13138 dtrace_dof_error(dof, "invalid relocation header");
13139 13139 return (-1);
13140 13140 }
13141 13141
13142 13142 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
13143 13143 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
13144 13144 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
13145 13145
13146 13146 if (ss == NULL || rs == NULL || ts == NULL)
13147 13147 return (-1); /* dtrace_dof_error() has been called already */
13148 13148
13149 13149 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
13150 13150 rs->dofs_align != sizeof (uint64_t)) {
13151 13151 dtrace_dof_error(dof, "invalid relocation section");
13152 13152 return (-1);
13153 13153 }
13154 13154
13155 13155 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
13156 13156 n = rs->dofs_size / rs->dofs_entsize;
13157 13157
13158 13158 for (i = 0; i < n; i++) {
13159 13159 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
13160 13160
13161 13161 switch (r->dofr_type) {
13162 13162 case DOF_RELO_NONE:
13163 13163 break;
13164 13164 case DOF_RELO_SETX:
13165 13165 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
13166 13166 sizeof (uint64_t) > ts->dofs_size) {
13167 13167 dtrace_dof_error(dof, "bad relocation offset");
13168 13168 return (-1);
13169 13169 }
13170 13170
13171 13171 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
13172 13172 dtrace_dof_error(dof, "misaligned setx relo");
13173 13173 return (-1);
13174 13174 }
13175 13175
13176 13176 *(uint64_t *)taddr += ubase;
13177 13177 break;
13178 13178 default:
13179 13179 dtrace_dof_error(dof, "invalid relocation type");
13180 13180 return (-1);
13181 13181 }
13182 13182
13183 13183 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
13184 13184 }
13185 13185
13186 13186 return (0);
13187 13187 }
13188 13188
13189 13189 /*
13190 13190 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
13191 13191 * header: it should be at the front of a memory region that is at least
13192 13192 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
13193 13193 * size. It need not be validated in any other way.
13194 13194 */
13195 13195 static int
13196 13196 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
13197 13197 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
13198 13198 {
13199 13199 uint64_t len = dof->dofh_loadsz, seclen;
13200 13200 uintptr_t daddr = (uintptr_t)dof;
13201 13201 dtrace_ecbdesc_t *ep;
13202 13202 dtrace_enabling_t *enab;
13203 13203 uint_t i;
13204 13204
13205 13205 ASSERT(MUTEX_HELD(&dtrace_lock));
13206 13206 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
13207 13207
13208 13208 /*
13209 13209 * Check the DOF header identification bytes. In addition to checking
13210 13210 * valid settings, we also verify that unused bits/bytes are zeroed so
13211 13211 * we can use them later without fear of regressing existing binaries.
13212 13212 */
13213 13213 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
13214 13214 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
13215 13215 dtrace_dof_error(dof, "DOF magic string mismatch");
13216 13216 return (-1);
13217 13217 }
13218 13218
13219 13219 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
13220 13220 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
13221 13221 dtrace_dof_error(dof, "DOF has invalid data model");
13222 13222 return (-1);
13223 13223 }
13224 13224
13225 13225 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
13226 13226 dtrace_dof_error(dof, "DOF encoding mismatch");
13227 13227 return (-1);
13228 13228 }
13229 13229
13230 13230 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
13231 13231 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
13232 13232 dtrace_dof_error(dof, "DOF version mismatch");
13233 13233 return (-1);
13234 13234 }
13235 13235
13236 13236 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
13237 13237 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
13238 13238 return (-1);
13239 13239 }
13240 13240
13241 13241 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
13242 13242 dtrace_dof_error(dof, "DOF uses too many integer registers");
13243 13243 return (-1);
13244 13244 }
13245 13245
13246 13246 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
13247 13247 dtrace_dof_error(dof, "DOF uses too many tuple registers");
13248 13248 return (-1);
13249 13249 }
13250 13250
13251 13251 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
13252 13252 if (dof->dofh_ident[i] != 0) {
13253 13253 dtrace_dof_error(dof, "DOF has invalid ident byte set");
13254 13254 return (-1);
13255 13255 }
13256 13256 }
13257 13257
13258 13258 if (dof->dofh_flags & ~DOF_FL_VALID) {
13259 13259 dtrace_dof_error(dof, "DOF has invalid flag bits set");
13260 13260 return (-1);
13261 13261 }
13262 13262
13263 13263 if (dof->dofh_secsize == 0) {
13264 13264 dtrace_dof_error(dof, "zero section header size");
13265 13265 return (-1);
13266 13266 }
13267 13267
13268 13268 /*
13269 13269 * Check that the section headers don't exceed the amount of DOF
13270 13270 * data. Note that we cast the section size and number of sections
13271 13271 * to uint64_t's to prevent possible overflow in the multiplication.
13272 13272 */
13273 13273 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
13274 13274
13275 13275 if (dof->dofh_secoff > len || seclen > len ||
13276 13276 dof->dofh_secoff + seclen > len) {
13277 13277 dtrace_dof_error(dof, "truncated section headers");
13278 13278 return (-1);
13279 13279 }
13280 13280
13281 13281 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
13282 13282 dtrace_dof_error(dof, "misaligned section headers");
13283 13283 return (-1);
13284 13284 }
13285 13285
13286 13286 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
13287 13287 dtrace_dof_error(dof, "misaligned section size");
13288 13288 return (-1);
13289 13289 }
13290 13290
13291 13291 /*
13292 13292 * Take an initial pass through the section headers to be sure that
13293 13293 * the headers don't have stray offsets. If the 'noprobes' flag is
13294 13294 * set, do not permit sections relating to providers, probes, or args.
13295 13295 */
13296 13296 for (i = 0; i < dof->dofh_secnum; i++) {
13297 13297 dof_sec_t *sec = (dof_sec_t *)(daddr +
13298 13298 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13299 13299
13300 13300 if (noprobes) {
13301 13301 switch (sec->dofs_type) {
13302 13302 case DOF_SECT_PROVIDER:
13303 13303 case DOF_SECT_PROBES:
13304 13304 case DOF_SECT_PRARGS:
13305 13305 case DOF_SECT_PROFFS:
13306 13306 dtrace_dof_error(dof, "illegal sections "
13307 13307 "for enabling");
13308 13308 return (-1);
13309 13309 }
13310 13310 }
13311 13311
13312 13312 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
13313 13313 !(sec->dofs_flags & DOF_SECF_LOAD)) {
13314 13314 dtrace_dof_error(dof, "loadable section with load "
13315 13315 "flag unset");
13316 13316 return (-1);
13317 13317 }
13318 13318
13319 13319 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13320 13320 continue; /* just ignore non-loadable sections */
13321 13321
13322 13322 if (!ISP2(sec->dofs_align)) {
13323 13323 dtrace_dof_error(dof, "bad section alignment");
13324 13324 return (-1);
13325 13325 }
13326 13326
13327 13327 if (sec->dofs_offset & (sec->dofs_align - 1)) {
13328 13328 dtrace_dof_error(dof, "misaligned section");
13329 13329 return (-1);
13330 13330 }
13331 13331
13332 13332 if (sec->dofs_offset > len || sec->dofs_size > len ||
13333 13333 sec->dofs_offset + sec->dofs_size > len) {
13334 13334 dtrace_dof_error(dof, "corrupt section header");
13335 13335 return (-1);
13336 13336 }
13337 13337
13338 13338 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13339 13339 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13340 13340 dtrace_dof_error(dof, "non-terminating string table");
13341 13341 return (-1);
13342 13342 }
13343 13343 }
13344 13344
13345 13345 /*
13346 13346 * Take a second pass through the sections and locate and perform any
13347 13347 * relocations that are present. We do this after the first pass to
13348 13348 * be sure that all sections have had their headers validated.
13349 13349 */
13350 13350 for (i = 0; i < dof->dofh_secnum; i++) {
13351 13351 dof_sec_t *sec = (dof_sec_t *)(daddr +
13352 13352 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13353 13353
13354 13354 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13355 13355 continue; /* skip sections that are not loadable */
13356 13356
13357 13357 switch (sec->dofs_type) {
13358 13358 case DOF_SECT_URELHDR:
13359 13359 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
13360 13360 return (-1);
13361 13361 break;
13362 13362 }
13363 13363 }
13364 13364
13365 13365 if ((enab = *enabp) == NULL)
13366 13366 enab = *enabp = dtrace_enabling_create(vstate);
13367 13367
13368 13368 for (i = 0; i < dof->dofh_secnum; i++) {
13369 13369 dof_sec_t *sec = (dof_sec_t *)(daddr +
13370 13370 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13371 13371
13372 13372 if (sec->dofs_type != DOF_SECT_ECBDESC)
13373 13373 continue;
13374 13374
13375 13375 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
13376 13376 dtrace_enabling_destroy(enab);
13377 13377 *enabp = NULL;
13378 13378 return (-1);
13379 13379 }
13380 13380
13381 13381 dtrace_enabling_add(enab, ep);
13382 13382 }
13383 13383
13384 13384 return (0);
13385 13385 }
13386 13386
13387 13387 /*
13388 13388 * Process DOF for any options. This routine assumes that the DOF has been
13389 13389 * at least processed by dtrace_dof_slurp().
13390 13390 */
13391 13391 static int
13392 13392 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13393 13393 {
13394 13394 int i, rval;
13395 13395 uint32_t entsize;
13396 13396 size_t offs;
13397 13397 dof_optdesc_t *desc;
13398 13398
13399 13399 for (i = 0; i < dof->dofh_secnum; i++) {
13400 13400 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13401 13401 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13402 13402
13403 13403 if (sec->dofs_type != DOF_SECT_OPTDESC)
13404 13404 continue;
13405 13405
13406 13406 if (sec->dofs_align != sizeof (uint64_t)) {
13407 13407 dtrace_dof_error(dof, "bad alignment in "
13408 13408 "option description");
13409 13409 return (EINVAL);
13410 13410 }
13411 13411
13412 13412 if ((entsize = sec->dofs_entsize) == 0) {
13413 13413 dtrace_dof_error(dof, "zeroed option entry size");
13414 13414 return (EINVAL);
13415 13415 }
13416 13416
13417 13417 if (entsize < sizeof (dof_optdesc_t)) {
13418 13418 dtrace_dof_error(dof, "bad option entry size");
13419 13419 return (EINVAL);
13420 13420 }
13421 13421
13422 13422 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13423 13423 desc = (dof_optdesc_t *)((uintptr_t)dof +
13424 13424 (uintptr_t)sec->dofs_offset + offs);
13425 13425
13426 13426 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13427 13427 dtrace_dof_error(dof, "non-zero option string");
13428 13428 return (EINVAL);
13429 13429 }
13430 13430
13431 13431 if (desc->dofo_value == DTRACEOPT_UNSET) {
13432 13432 dtrace_dof_error(dof, "unset option");
13433 13433 return (EINVAL);
13434 13434 }
13435 13435
13436 13436 if ((rval = dtrace_state_option(state,
13437 13437 desc->dofo_option, desc->dofo_value)) != 0) {
13438 13438 dtrace_dof_error(dof, "rejected option");
13439 13439 return (rval);
13440 13440 }
13441 13441 }
13442 13442 }
13443 13443
13444 13444 return (0);
13445 13445 }
13446 13446
13447 13447 /*
13448 13448 * DTrace Consumer State Functions
13449 13449 */
13450 13450 int
13451 13451 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13452 13452 {
13453 13453 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13454 13454 void *base;
13455 13455 uintptr_t limit;
13456 13456 dtrace_dynvar_t *dvar, *next, *start;
13457 13457 int i;
13458 13458
13459 13459 ASSERT(MUTEX_HELD(&dtrace_lock));
13460 13460 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13461 13461
13462 13462 bzero(dstate, sizeof (dtrace_dstate_t));
13463 13463
13464 13464 if ((dstate->dtds_chunksize = chunksize) == 0)
13465 13465 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13466 13466
13467 13467 VERIFY(dstate->dtds_chunksize < LONG_MAX);
13468 13468
13469 13469 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13470 13470 size = min;
13471 13471
13472 13472 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
13473 13473 return (ENOMEM);
13474 13474
13475 13475 dstate->dtds_size = size;
13476 13476 dstate->dtds_base = base;
13477 13477 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13478 13478 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13479 13479
13480 13480 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13481 13481
13482 13482 if (hashsize != 1 && (hashsize & 1))
13483 13483 hashsize--;
13484 13484
13485 13485 dstate->dtds_hashsize = hashsize;
13486 13486 dstate->dtds_hash = dstate->dtds_base;
13487 13487
13488 13488 /*
13489 13489 * Set all of our hash buckets to point to the single sink, and (if
13490 13490 * it hasn't already been set), set the sink's hash value to be the
13491 13491 * sink sentinel value. The sink is needed for dynamic variable
13492 13492 * lookups to know that they have iterated over an entire, valid hash
13493 13493 * chain.
13494 13494 */
13495 13495 for (i = 0; i < hashsize; i++)
13496 13496 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13497 13497
13498 13498 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13499 13499 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13500 13500
13501 13501 /*
13502 13502 * Determine number of active CPUs. Divide free list evenly among
13503 13503 * active CPUs.
13504 13504 */
13505 13505 start = (dtrace_dynvar_t *)
13506 13506 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13507 13507 limit = (uintptr_t)base + size;
13508 13508
13509 13509 VERIFY((uintptr_t)start < limit);
13510 13510 VERIFY((uintptr_t)start >= (uintptr_t)base);
13511 13511
13512 13512 maxper = (limit - (uintptr_t)start) / NCPU;
13513 13513 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13514 13514
13515 13515 for (i = 0; i < NCPU; i++) {
13516 13516 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
13517 13517
13518 13518 /*
13519 13519 * If we don't even have enough chunks to make it once through
13520 13520 * NCPUs, we're just going to allocate everything to the first
13521 13521 * CPU. And if we're on the last CPU, we're going to allocate
13522 13522 * whatever is left over. In either case, we set the limit to
13523 13523 * be the limit of the dynamic variable space.
13524 13524 */
13525 13525 if (maxper == 0 || i == NCPU - 1) {
13526 13526 limit = (uintptr_t)base + size;
13527 13527 start = NULL;
13528 13528 } else {
13529 13529 limit = (uintptr_t)start + maxper;
13530 13530 start = (dtrace_dynvar_t *)limit;
13531 13531 }
13532 13532
13533 13533 VERIFY(limit <= (uintptr_t)base + size);
13534 13534
13535 13535 for (;;) {
13536 13536 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
13537 13537 dstate->dtds_chunksize);
13538 13538
13539 13539 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
13540 13540 break;
13541 13541
13542 13542 VERIFY((uintptr_t)dvar >= (uintptr_t)base &&
13543 13543 (uintptr_t)dvar <= (uintptr_t)base + size);
13544 13544 dvar->dtdv_next = next;
13545 13545 dvar = next;
13546 13546 }
13547 13547
13548 13548 if (maxper == 0)
13549 13549 break;
13550 13550 }
13551 13551
13552 13552 return (0);
13553 13553 }
13554 13554
13555 13555 void
13556 13556 dtrace_dstate_fini(dtrace_dstate_t *dstate)
13557 13557 {
13558 13558 ASSERT(MUTEX_HELD(&cpu_lock));
13559 13559
13560 13560 if (dstate->dtds_base == NULL)
13561 13561 return;
13562 13562
13563 13563 kmem_free(dstate->dtds_base, dstate->dtds_size);
13564 13564 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
13565 13565 }
13566 13566
13567 13567 static void
13568 13568 dtrace_vstate_fini(dtrace_vstate_t *vstate)
13569 13569 {
13570 13570 /*
13571 13571 * Logical XOR, where are you?
13572 13572 */
13573 13573 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
13574 13574
13575 13575 if (vstate->dtvs_nglobals > 0) {
13576 13576 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
13577 13577 sizeof (dtrace_statvar_t *));
13578 13578 }
13579 13579
13580 13580 if (vstate->dtvs_ntlocals > 0) {
13581 13581 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
13582 13582 sizeof (dtrace_difv_t));
13583 13583 }
13584 13584
13585 13585 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
13586 13586
13587 13587 if (vstate->dtvs_nlocals > 0) {
13588 13588 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
13589 13589 sizeof (dtrace_statvar_t *));
13590 13590 }
13591 13591 }
13592 13592
13593 13593 static void
13594 13594 dtrace_state_clean(dtrace_state_t *state)
13595 13595 {
13596 13596 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
13597 13597 return;
13598 13598
13599 13599 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
13600 13600 dtrace_speculation_clean(state);
13601 13601 }
13602 13602
13603 13603 static void
13604 13604 dtrace_state_deadman(dtrace_state_t *state)
13605 13605 {
13606 13606 hrtime_t now;
13607 13607
13608 13608 dtrace_sync();
13609 13609
13610 13610 now = dtrace_gethrtime();
13611 13611
13612 13612 if (state != dtrace_anon.dta_state &&
13613 13613 now - state->dts_laststatus >= dtrace_deadman_user)
13614 13614 return;
13615 13615
13616 13616 /*
13617 13617 * We must be sure that dts_alive never appears to be less than the
13618 13618 * value upon entry to dtrace_state_deadman(), and because we lack a
13619 13619 * dtrace_cas64(), we cannot store to it atomically. We thus instead
13620 13620 * store INT64_MAX to it, followed by a memory barrier, followed by
13621 13621 * the new value. This assures that dts_alive never appears to be
13622 13622 * less than its true value, regardless of the order in which the
13623 13623 * stores to the underlying storage are issued.
13624 13624 */
13625 13625 state->dts_alive = INT64_MAX;
13626 13626 dtrace_membar_producer();
13627 13627 state->dts_alive = now;
13628 13628 }
13629 13629
13630 13630 dtrace_state_t *
13631 13631 dtrace_state_create(dev_t *devp, cred_t *cr)
13632 13632 {
13633 13633 minor_t minor;
13634 13634 major_t major;
13635 13635 char c[30];
13636 13636 dtrace_state_t *state;
13637 13637 dtrace_optval_t *opt;
13638 13638 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
13639 13639
13640 13640 ASSERT(MUTEX_HELD(&dtrace_lock));
13641 13641 ASSERT(MUTEX_HELD(&cpu_lock));
13642 13642
13643 13643 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
13644 13644 VM_BESTFIT | VM_SLEEP);
13645 13645
13646 13646 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
13647 13647 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13648 13648 return (NULL);
13649 13649 }
13650 13650
13651 13651 state = ddi_get_soft_state(dtrace_softstate, minor);
13652 13652 state->dts_epid = DTRACE_EPIDNONE + 1;
13653 13653
13654 13654 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
13655 13655 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
13656 13656 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
13657 13657
13658 13658 if (devp != NULL) {
13659 13659 major = getemajor(*devp);
13660 13660 } else {
13661 13661 major = ddi_driver_major(dtrace_devi);
13662 13662 }
13663 13663
13664 13664 state->dts_dev = makedevice(major, minor);
13665 13665
13666 13666 if (devp != NULL)
13667 13667 *devp = state->dts_dev;
13668 13668
13669 13669 /*
13670 13670 * We allocate NCPU buffers. On the one hand, this can be quite
13671 13671 * a bit of memory per instance (nearly 36K on a Starcat). On the
13672 13672 * other hand, it saves an additional memory reference in the probe
13673 13673 * path.
13674 13674 */
13675 13675 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
13676 13676 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
13677 13677 state->dts_cleaner = CYCLIC_NONE;
13678 13678 state->dts_deadman = CYCLIC_NONE;
13679 13679 state->dts_vstate.dtvs_state = state;
13680 13680
13681 13681 for (i = 0; i < DTRACEOPT_MAX; i++)
13682 13682 state->dts_options[i] = DTRACEOPT_UNSET;
13683 13683
13684 13684 /*
13685 13685 * Set the default options.
13686 13686 */
13687 13687 opt = state->dts_options;
13688 13688 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
13689 13689 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
13690 13690 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
13691 13691 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
13692 13692 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
13693 13693 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
13694 13694 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
13695 13695 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
13696 13696 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
13697 13697 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
13698 13698 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
13699 13699 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
13700 13700 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
13701 13701 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
13702 13702
13703 13703 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
13704 13704
13705 13705 /*
13706 13706 * Depending on the user credentials, we set flag bits which alter probe
13707 13707 * visibility or the amount of destructiveness allowed. In the case of
13708 13708 * actual anonymous tracing, or the possession of all privileges, all of
13709 13709 * the normal checks are bypassed.
13710 13710 */
13711 13711 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
13712 13712 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
13713 13713 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
13714 13714 } else {
13715 13715 /*
13716 13716 * Set up the credentials for this instantiation. We take a
13717 13717 * hold on the credential to prevent it from disappearing on
13718 13718 * us; this in turn prevents the zone_t referenced by this
13719 13719 * credential from disappearing. This means that we can
13720 13720 * examine the credential and the zone from probe context.
13721 13721 */
13722 13722 crhold(cr);
13723 13723 state->dts_cred.dcr_cred = cr;
13724 13724
13725 13725 /*
13726 13726 * CRA_PROC means "we have *some* privilege for dtrace" and
13727 13727 * unlocks the use of variables like pid, zonename, etc.
13728 13728 */
13729 13729 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13730 13730 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13731 13731 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13732 13732 }
13733 13733
13734 13734 /*
13735 13735 * dtrace_user allows use of syscall and profile providers.
13736 13736 * If the user also has proc_owner and/or proc_zone, we
13737 13737 * extend the scope to include additional visibility and
13738 13738 * destructive power.
13739 13739 */
13740 13740 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13741 13741 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13742 13742 state->dts_cred.dcr_visible |=
13743 13743 DTRACE_CRV_ALLPROC;
13744 13744
13745 13745 state->dts_cred.dcr_action |=
13746 13746 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13747 13747 }
13748 13748
13749 13749 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13750 13750 state->dts_cred.dcr_visible |=
13751 13751 DTRACE_CRV_ALLZONE;
13752 13752
13753 13753 state->dts_cred.dcr_action |=
13754 13754 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13755 13755 }
13756 13756
13757 13757 /*
13758 13758 * If we have all privs in whatever zone this is,
13759 13759 * we can do destructive things to processes which
13760 13760 * have altered credentials.
13761 13761 */
13762 13762 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13763 13763 cr->cr_zone->zone_privset)) {
13764 13764 state->dts_cred.dcr_action |=
13765 13765 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13766 13766 }
13767 13767 }
13768 13768
13769 13769 /*
13770 13770 * Holding the dtrace_kernel privilege also implies that
13771 13771 * the user has the dtrace_user privilege from a visibility
13772 13772 * perspective. But without further privileges, some
13773 13773 * destructive actions are not available.
13774 13774 */
13775 13775 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13776 13776 /*
13777 13777 * Make all probes in all zones visible. However,
13778 13778 * this doesn't mean that all actions become available
13779 13779 * to all zones.
13780 13780 */
13781 13781 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13782 13782 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13783 13783
13784 13784 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13785 13785 DTRACE_CRA_PROC;
13786 13786 /*
13787 13787 * Holding proc_owner means that destructive actions
13788 13788 * for *this* zone are allowed.
13789 13789 */
13790 13790 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13791 13791 state->dts_cred.dcr_action |=
13792 13792 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13793 13793
13794 13794 /*
13795 13795 * Holding proc_zone means that destructive actions
13796 13796 * for this user/group ID in all zones is allowed.
13797 13797 */
13798 13798 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13799 13799 state->dts_cred.dcr_action |=
13800 13800 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13801 13801
13802 13802 /*
13803 13803 * If we have all privs in whatever zone this is,
13804 13804 * we can do destructive things to processes which
13805 13805 * have altered credentials.
13806 13806 */
13807 13807 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13808 13808 cr->cr_zone->zone_privset)) {
13809 13809 state->dts_cred.dcr_action |=
13810 13810 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13811 13811 }
13812 13812 }
13813 13813
13814 13814 /*
13815 13815 * Holding the dtrace_proc privilege gives control over fasttrap
13816 13816 * and pid providers. We need to grant wider destructive
13817 13817 * privileges in the event that the user has proc_owner and/or
13818 13818 * proc_zone.
13819 13819 */
13820 13820 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13821 13821 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13822 13822 state->dts_cred.dcr_action |=
13823 13823 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13824 13824
13825 13825 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13826 13826 state->dts_cred.dcr_action |=
13827 13827 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13828 13828 }
13829 13829 }
13830 13830
13831 13831 return (state);
13832 13832 }
13833 13833
13834 13834 static int
13835 13835 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13836 13836 {
13837 13837 dtrace_optval_t *opt = state->dts_options, size;
13838 13838 processorid_t cpu;
13839 13839 int flags = 0, rval, factor, divisor = 1;
13840 13840
13841 13841 ASSERT(MUTEX_HELD(&dtrace_lock));
13842 13842 ASSERT(MUTEX_HELD(&cpu_lock));
13843 13843 ASSERT(which < DTRACEOPT_MAX);
13844 13844 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13845 13845 (state == dtrace_anon.dta_state &&
13846 13846 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13847 13847
13848 13848 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13849 13849 return (0);
13850 13850
13851 13851 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13852 13852 cpu = opt[DTRACEOPT_CPU];
13853 13853
13854 13854 if (which == DTRACEOPT_SPECSIZE)
13855 13855 flags |= DTRACEBUF_NOSWITCH;
13856 13856
13857 13857 if (which == DTRACEOPT_BUFSIZE) {
13858 13858 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13859 13859 flags |= DTRACEBUF_RING;
13860 13860
13861 13861 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13862 13862 flags |= DTRACEBUF_FILL;
13863 13863
13864 13864 if (state != dtrace_anon.dta_state ||
13865 13865 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13866 13866 flags |= DTRACEBUF_INACTIVE;
13867 13867 }
13868 13868
13869 13869 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
13870 13870 /*
13871 13871 * The size must be 8-byte aligned. If the size is not 8-byte
13872 13872 * aligned, drop it down by the difference.
13873 13873 */
13874 13874 if (size & (sizeof (uint64_t) - 1))
13875 13875 size -= size & (sizeof (uint64_t) - 1);
13876 13876
13877 13877 if (size < state->dts_reserve) {
13878 13878 /*
13879 13879 * Buffers always must be large enough to accommodate
13880 13880 * their prereserved space. We return E2BIG instead
13881 13881 * of ENOMEM in this case to allow for user-level
13882 13882 * software to differentiate the cases.
13883 13883 */
13884 13884 return (E2BIG);
13885 13885 }
13886 13886
13887 13887 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
13888 13888
13889 13889 if (rval != ENOMEM) {
13890 13890 opt[which] = size;
13891 13891 return (rval);
13892 13892 }
13893 13893
13894 13894 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13895 13895 return (rval);
13896 13896
13897 13897 for (divisor = 2; divisor < factor; divisor <<= 1)
13898 13898 continue;
13899 13899 }
13900 13900
13901 13901 return (ENOMEM);
13902 13902 }
13903 13903
13904 13904 static int
13905 13905 dtrace_state_buffers(dtrace_state_t *state)
13906 13906 {
13907 13907 dtrace_speculation_t *spec = state->dts_speculations;
13908 13908 int rval, i;
13909 13909
13910 13910 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13911 13911 DTRACEOPT_BUFSIZE)) != 0)
13912 13912 return (rval);
13913 13913
13914 13914 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13915 13915 DTRACEOPT_AGGSIZE)) != 0)
13916 13916 return (rval);
13917 13917
13918 13918 for (i = 0; i < state->dts_nspeculations; i++) {
13919 13919 if ((rval = dtrace_state_buffer(state,
13920 13920 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13921 13921 return (rval);
13922 13922 }
13923 13923
13924 13924 return (0);
13925 13925 }
13926 13926
13927 13927 static void
13928 13928 dtrace_state_prereserve(dtrace_state_t *state)
13929 13929 {
13930 13930 dtrace_ecb_t *ecb;
13931 13931 dtrace_probe_t *probe;
13932 13932
13933 13933 state->dts_reserve = 0;
13934 13934
13935 13935 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13936 13936 return;
13937 13937
13938 13938 /*
13939 13939 * If our buffer policy is a "fill" buffer policy, we need to set the
13940 13940 * prereserved space to be the space required by the END probes.
13941 13941 */
13942 13942 probe = dtrace_probes[dtrace_probeid_end - 1];
13943 13943 ASSERT(probe != NULL);
13944 13944
13945 13945 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13946 13946 if (ecb->dte_state != state)
13947 13947 continue;
13948 13948
13949 13949 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13950 13950 }
13951 13951 }
13952 13952
13953 13953 static int
13954 13954 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13955 13955 {
13956 13956 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13957 13957 dtrace_speculation_t *spec;
13958 13958 dtrace_buffer_t *buf;
13959 13959 cyc_handler_t hdlr;
13960 13960 cyc_time_t when;
13961 13961 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13962 13962 dtrace_icookie_t cookie;
13963 13963
13964 13964 mutex_enter(&cpu_lock);
13965 13965 mutex_enter(&dtrace_lock);
13966 13966
13967 13967 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13968 13968 rval = EBUSY;
13969 13969 goto out;
13970 13970 }
13971 13971
13972 13972 /*
13973 13973 * Before we can perform any checks, we must prime all of the
13974 13974 * retained enablings that correspond to this state.
13975 13975 */
13976 13976 dtrace_enabling_prime(state);
13977 13977
13978 13978 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13979 13979 rval = EACCES;
13980 13980 goto out;
13981 13981 }
13982 13982
13983 13983 dtrace_state_prereserve(state);
13984 13984
13985 13985 /*
13986 13986 * Now we want to do is try to allocate our speculations.
13987 13987 * We do not automatically resize the number of speculations; if
13988 13988 * this fails, we will fail the operation.
13989 13989 */
13990 13990 nspec = opt[DTRACEOPT_NSPEC];
13991 13991 ASSERT(nspec != DTRACEOPT_UNSET);
13992 13992
13993 13993 if (nspec > INT_MAX) {
13994 13994 rval = ENOMEM;
13995 13995 goto out;
13996 13996 }
13997 13997
13998 13998 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
13999 13999 KM_NOSLEEP | KM_NORMALPRI);
14000 14000
14001 14001 if (spec == NULL) {
14002 14002 rval = ENOMEM;
14003 14003 goto out;
14004 14004 }
14005 14005
14006 14006 state->dts_speculations = spec;
14007 14007 state->dts_nspeculations = (int)nspec;
14008 14008
14009 14009 for (i = 0; i < nspec; i++) {
14010 14010 if ((buf = kmem_zalloc(bufsize,
14011 14011 KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
14012 14012 rval = ENOMEM;
14013 14013 goto err;
14014 14014 }
14015 14015
14016 14016 spec[i].dtsp_buffer = buf;
14017 14017 }
14018 14018
14019 14019 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14020 14020 if (dtrace_anon.dta_state == NULL) {
14021 14021 rval = ENOENT;
14022 14022 goto out;
14023 14023 }
14024 14024
14025 14025 if (state->dts_necbs != 0) {
14026 14026 rval = EALREADY;
14027 14027 goto out;
14028 14028 }
14029 14029
14030 14030 state->dts_anon = dtrace_anon_grab();
14031 14031 ASSERT(state->dts_anon != NULL);
14032 14032 state = state->dts_anon;
14033 14033
14034 14034 /*
14035 14035 * We want "grabanon" to be set in the grabbed state, so we'll
14036 14036 * copy that option value from the grabbing state into the
14037 14037 * grabbed state.
14038 14038 */
14039 14039 state->dts_options[DTRACEOPT_GRABANON] =
14040 14040 opt[DTRACEOPT_GRABANON];
14041 14041
14042 14042 *cpu = dtrace_anon.dta_beganon;
14043 14043
14044 14044 /*
14045 14045 * If the anonymous state is active (as it almost certainly
14046 14046 * is if the anonymous enabling ultimately matched anything),
14047 14047 * we don't allow any further option processing -- but we
14048 14048 * don't return failure.
14049 14049 */
14050 14050 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14051 14051 goto out;
14052 14052 }
14053 14053
14054 14054 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
14055 14055 opt[DTRACEOPT_AGGSIZE] != 0) {
14056 14056 if (state->dts_aggregations == NULL) {
14057 14057 /*
14058 14058 * We're not going to create an aggregation buffer
14059 14059 * because we don't have any ECBs that contain
14060 14060 * aggregations -- set this option to 0.
14061 14061 */
14062 14062 opt[DTRACEOPT_AGGSIZE] = 0;
14063 14063 } else {
14064 14064 /*
14065 14065 * If we have an aggregation buffer, we must also have
14066 14066 * a buffer to use as scratch.
14067 14067 */
14068 14068 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
14069 14069 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
14070 14070 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
14071 14071 }
14072 14072 }
14073 14073 }
14074 14074
14075 14075 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
14076 14076 opt[DTRACEOPT_SPECSIZE] != 0) {
14077 14077 if (!state->dts_speculates) {
14078 14078 /*
14079 14079 * We're not going to create speculation buffers
14080 14080 * because we don't have any ECBs that actually
14081 14081 * speculate -- set the speculation size to 0.
14082 14082 */
14083 14083 opt[DTRACEOPT_SPECSIZE] = 0;
14084 14084 }
14085 14085 }
14086 14086
14087 14087 /*
14088 14088 * The bare minimum size for any buffer that we're actually going to
14089 14089 * do anything to is sizeof (uint64_t).
14090 14090 */
14091 14091 sz = sizeof (uint64_t);
14092 14092
14093 14093 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
14094 14094 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
14095 14095 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
14096 14096 /*
14097 14097 * A buffer size has been explicitly set to 0 (or to a size
14098 14098 * that will be adjusted to 0) and we need the space -- we
14099 14099 * need to return failure. We return ENOSPC to differentiate
14100 14100 * it from failing to allocate a buffer due to failure to meet
14101 14101 * the reserve (for which we return E2BIG).
14102 14102 */
14103 14103 rval = ENOSPC;
14104 14104 goto out;
14105 14105 }
14106 14106
14107 14107 if ((rval = dtrace_state_buffers(state)) != 0)
14108 14108 goto err;
14109 14109
14110 14110 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
14111 14111 sz = dtrace_dstate_defsize;
14112 14112
14113 14113 do {
14114 14114 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
14115 14115
14116 14116 if (rval == 0)
14117 14117 break;
14118 14118
14119 14119 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14120 14120 goto err;
14121 14121 } while (sz >>= 1);
14122 14122
14123 14123 opt[DTRACEOPT_DYNVARSIZE] = sz;
14124 14124
14125 14125 if (rval != 0)
14126 14126 goto err;
14127 14127
14128 14128 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
14129 14129 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
14130 14130
14131 14131 if (opt[DTRACEOPT_CLEANRATE] == 0)
14132 14132 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14133 14133
14134 14134 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
14135 14135 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
14136 14136
14137 14137 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
14138 14138 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14139 14139
14140 14140 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
14141 14141 hdlr.cyh_arg = state;
14142 14142 hdlr.cyh_level = CY_LOW_LEVEL;
14143 14143
14144 14144 when.cyt_when = 0;
14145 14145 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
14146 14146
14147 14147 state->dts_cleaner = cyclic_add(&hdlr, &when);
14148 14148
14149 14149 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
14150 14150 hdlr.cyh_arg = state;
14151 14151 hdlr.cyh_level = CY_LOW_LEVEL;
14152 14152
14153 14153 when.cyt_when = 0;
14154 14154 when.cyt_interval = dtrace_deadman_interval;
14155 14155
14156 14156 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
14157 14157 state->dts_deadman = cyclic_add(&hdlr, &when);
14158 14158
14159 14159 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
14160 14160
14161 14161 if (state->dts_getf != 0 &&
14162 14162 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14163 14163 /*
14164 14164 * We don't have kernel privs but we have at least one call
14165 14165 * to getf(); we need to bump our zone's count, and (if
14166 14166 * this is the first enabling to have an unprivileged call
14167 14167 * to getf()) we need to hook into closef().
14168 14168 */
14169 14169 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
14170 14170
14171 14171 if (dtrace_getf++ == 0) {
14172 14172 ASSERT(dtrace_closef == NULL);
14173 14173 dtrace_closef = dtrace_getf_barrier;
14174 14174 }
14175 14175 }
14176 14176
14177 14177 /*
14178 14178 * Now it's time to actually fire the BEGIN probe. We need to disable
14179 14179 * interrupts here both to record the CPU on which we fired the BEGIN
14180 14180 * probe (the data from this CPU will be processed first at user
14181 14181 * level) and to manually activate the buffer for this CPU.
14182 14182 */
14183 14183 cookie = dtrace_interrupt_disable();
14184 14184 *cpu = CPU->cpu_id;
14185 14185 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
14186 14186 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
14187 14187
14188 14188 dtrace_probe(dtrace_probeid_begin,
14189 14189 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14190 14190 dtrace_interrupt_enable(cookie);
14191 14191 /*
14192 14192 * We may have had an exit action from a BEGIN probe; only change our
14193 14193 * state to ACTIVE if we're still in WARMUP.
14194 14194 */
14195 14195 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
14196 14196 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
14197 14197
14198 14198 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
14199 14199 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
14200 14200
14201 14201 /*
14202 14202 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
14203 14203 * want each CPU to transition its principal buffer out of the
14204 14204 * INACTIVE state. Doing this assures that no CPU will suddenly begin
14205 14205 * processing an ECB halfway down a probe's ECB chain; all CPUs will
14206 14206 * atomically transition from processing none of a state's ECBs to
14207 14207 * processing all of them.
14208 14208 */
14209 14209 dtrace_xcall(DTRACE_CPUALL,
14210 14210 (dtrace_xcall_t)dtrace_buffer_activate, state);
14211 14211 goto out;
14212 14212
14213 14213 err:
14214 14214 dtrace_buffer_free(state->dts_buffer);
14215 14215 dtrace_buffer_free(state->dts_aggbuffer);
14216 14216
14217 14217 if ((nspec = state->dts_nspeculations) == 0) {
14218 14218 ASSERT(state->dts_speculations == NULL);
14219 14219 goto out;
14220 14220 }
14221 14221
14222 14222 spec = state->dts_speculations;
14223 14223 ASSERT(spec != NULL);
14224 14224
14225 14225 for (i = 0; i < state->dts_nspeculations; i++) {
14226 14226 if ((buf = spec[i].dtsp_buffer) == NULL)
14227 14227 break;
14228 14228
14229 14229 dtrace_buffer_free(buf);
14230 14230 kmem_free(buf, bufsize);
14231 14231 }
14232 14232
14233 14233 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14234 14234 state->dts_nspeculations = 0;
14235 14235 state->dts_speculations = NULL;
14236 14236
14237 14237 out:
14238 14238 mutex_exit(&dtrace_lock);
14239 14239 mutex_exit(&cpu_lock);
14240 14240
14241 14241 return (rval);
14242 14242 }
14243 14243
14244 14244 static int
14245 14245 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
14246 14246 {
14247 14247 dtrace_icookie_t cookie;
14248 14248
14249 14249 ASSERT(MUTEX_HELD(&dtrace_lock));
14250 14250
14251 14251 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
14252 14252 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
14253 14253 return (EINVAL);
14254 14254
14255 14255 /*
14256 14256 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
14257 14257 * to be sure that every CPU has seen it. See below for the details
14258 14258 * on why this is done.
14259 14259 */
14260 14260 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
14261 14261 dtrace_sync();
14262 14262
14263 14263 /*
14264 14264 * By this point, it is impossible for any CPU to be still processing
14265 14265 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
14266 14266 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
14267 14267 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
14268 14268 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
14269 14269 * iff we're in the END probe.
14270 14270 */
14271 14271 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
14272 14272 dtrace_sync();
14273 14273 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
14274 14274
14275 14275 /*
14276 14276 * Finally, we can release the reserve and call the END probe. We
14277 14277 * disable interrupts across calling the END probe to allow us to
14278 14278 * return the CPU on which we actually called the END probe. This
14279 14279 * allows user-land to be sure that this CPU's principal buffer is
14280 14280 * processed last.
14281 14281 */
14282 14282 state->dts_reserve = 0;
14283 14283
14284 14284 cookie = dtrace_interrupt_disable();
14285 14285 *cpu = CPU->cpu_id;
14286 14286 dtrace_probe(dtrace_probeid_end,
14287 14287 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14288 14288 dtrace_interrupt_enable(cookie);
14289 14289
14290 14290 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
14291 14291 dtrace_sync();
14292 14292
14293 14293 if (state->dts_getf != 0 &&
14294 14294 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14295 14295 /*
14296 14296 * We don't have kernel privs but we have at least one call
14297 14297 * to getf(); we need to lower our zone's count, and (if
14298 14298 * this is the last enabling to have an unprivileged call
14299 14299 * to getf()) we need to clear the closef() hook.
14300 14300 */
14301 14301 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
14302 14302 ASSERT(dtrace_closef == dtrace_getf_barrier);
14303 14303 ASSERT(dtrace_getf > 0);
14304 14304
14305 14305 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
14306 14306
14307 14307 if (--dtrace_getf == 0)
14308 14308 dtrace_closef = NULL;
14309 14309 }
14310 14310
14311 14311 return (0);
14312 14312 }
14313 14313
14314 14314 static int
14315 14315 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
14316 14316 dtrace_optval_t val)
14317 14317 {
14318 14318 ASSERT(MUTEX_HELD(&dtrace_lock));
14319 14319
14320 14320 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14321 14321 return (EBUSY);
14322 14322
14323 14323 if (option >= DTRACEOPT_MAX)
14324 14324 return (EINVAL);
14325 14325
14326 14326 if (option != DTRACEOPT_CPU && val < 0)
14327 14327 return (EINVAL);
14328 14328
14329 14329 switch (option) {
14330 14330 case DTRACEOPT_DESTRUCTIVE:
14331 14331 if (dtrace_destructive_disallow)
14332 14332 return (EACCES);
14333 14333
14334 14334 state->dts_cred.dcr_destructive = 1;
14335 14335 break;
14336 14336
14337 14337 case DTRACEOPT_BUFSIZE:
14338 14338 case DTRACEOPT_DYNVARSIZE:
14339 14339 case DTRACEOPT_AGGSIZE:
14340 14340 case DTRACEOPT_SPECSIZE:
14341 14341 case DTRACEOPT_STRSIZE:
14342 14342 if (val < 0)
14343 14343 return (EINVAL);
14344 14344
14345 14345 if (val >= LONG_MAX) {
14346 14346 /*
14347 14347 * If this is an otherwise negative value, set it to
14348 14348 * the highest multiple of 128m less than LONG_MAX.
14349 14349 * Technically, we're adjusting the size without
14350 14350 * regard to the buffer resizing policy, but in fact,
14351 14351 * this has no effect -- if we set the buffer size to
14352 14352 * ~LONG_MAX and the buffer policy is ultimately set to
14353 14353 * be "manual", the buffer allocation is guaranteed to
14354 14354 * fail, if only because the allocation requires two
14355 14355 * buffers. (We set the the size to the highest
14356 14356 * multiple of 128m because it ensures that the size
14357 14357 * will remain a multiple of a megabyte when
14358 14358 * repeatedly halved -- all the way down to 15m.)
14359 14359 */
14360 14360 val = LONG_MAX - (1 << 27) + 1;
14361 14361 }
14362 14362 }
14363 14363
14364 14364 state->dts_options[option] = val;
14365 14365
14366 14366 return (0);
14367 14367 }
14368 14368
14369 14369 static void
14370 14370 dtrace_state_destroy(dtrace_state_t *state)
14371 14371 {
14372 14372 dtrace_ecb_t *ecb;
14373 14373 dtrace_vstate_t *vstate = &state->dts_vstate;
14374 14374 minor_t minor = getminor(state->dts_dev);
14375 14375 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14376 14376 dtrace_speculation_t *spec = state->dts_speculations;
14377 14377 int nspec = state->dts_nspeculations;
14378 14378 uint32_t match;
14379 14379
14380 14380 ASSERT(MUTEX_HELD(&dtrace_lock));
14381 14381 ASSERT(MUTEX_HELD(&cpu_lock));
14382 14382
14383 14383 /*
14384 14384 * First, retract any retained enablings for this state.
14385 14385 */
14386 14386 dtrace_enabling_retract(state);
14387 14387 ASSERT(state->dts_nretained == 0);
14388 14388
14389 14389 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14390 14390 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14391 14391 /*
14392 14392 * We have managed to come into dtrace_state_destroy() on a
14393 14393 * hot enabling -- almost certainly because of a disorderly
14394 14394 * shutdown of a consumer. (That is, a consumer that is
14395 14395 * exiting without having called dtrace_stop().) In this case,
14396 14396 * we're going to set our activity to be KILLED, and then
14397 14397 * issue a sync to be sure that everyone is out of probe
14398 14398 * context before we start blowing away ECBs.
14399 14399 */
14400 14400 state->dts_activity = DTRACE_ACTIVITY_KILLED;
14401 14401 dtrace_sync();
14402 14402 }
14403 14403
14404 14404 /*
14405 14405 * Release the credential hold we took in dtrace_state_create().
14406 14406 */
14407 14407 if (state->dts_cred.dcr_cred != NULL)
14408 14408 crfree(state->dts_cred.dcr_cred);
14409 14409
14410 14410 /*
14411 14411 * Now we can safely disable and destroy any enabled probes. Because
14412 14412 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14413 14413 * (especially if they're all enabled), we take two passes through the
14414 14414 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14415 14415 * in the second we disable whatever is left over.
14416 14416 */
14417 14417 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14418 14418 for (i = 0; i < state->dts_necbs; i++) {
14419 14419 if ((ecb = state->dts_ecbs[i]) == NULL)
14420 14420 continue;
14421 14421
14422 14422 if (match && ecb->dte_probe != NULL) {
14423 14423 dtrace_probe_t *probe = ecb->dte_probe;
14424 14424 dtrace_provider_t *prov = probe->dtpr_provider;
14425 14425
14426 14426 if (!(prov->dtpv_priv.dtpp_flags & match))
14427 14427 continue;
14428 14428 }
14429 14429
14430 14430 dtrace_ecb_disable(ecb);
14431 14431 dtrace_ecb_destroy(ecb);
14432 14432 }
14433 14433
14434 14434 if (!match)
14435 14435 break;
14436 14436 }
14437 14437
14438 14438 /*
14439 14439 * Before we free the buffers, perform one more sync to assure that
14440 14440 * every CPU is out of probe context.
14441 14441 */
14442 14442 dtrace_sync();
14443 14443
14444 14444 dtrace_buffer_free(state->dts_buffer);
14445 14445 dtrace_buffer_free(state->dts_aggbuffer);
14446 14446
14447 14447 for (i = 0; i < nspec; i++)
14448 14448 dtrace_buffer_free(spec[i].dtsp_buffer);
14449 14449
14450 14450 if (state->dts_cleaner != CYCLIC_NONE)
14451 14451 cyclic_remove(state->dts_cleaner);
14452 14452
14453 14453 if (state->dts_deadman != CYCLIC_NONE)
14454 14454 cyclic_remove(state->dts_deadman);
14455 14455
14456 14456 dtrace_dstate_fini(&vstate->dtvs_dynvars);
14457 14457 dtrace_vstate_fini(vstate);
14458 14458 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
14459 14459
14460 14460 if (state->dts_aggregations != NULL) {
14461 14461 #ifdef DEBUG
14462 14462 for (i = 0; i < state->dts_naggregations; i++)
14463 14463 ASSERT(state->dts_aggregations[i] == NULL);
14464 14464 #endif
14465 14465 ASSERT(state->dts_naggregations > 0);
14466 14466 kmem_free(state->dts_aggregations,
14467 14467 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
14468 14468 }
14469 14469
14470 14470 kmem_free(state->dts_buffer, bufsize);
14471 14471 kmem_free(state->dts_aggbuffer, bufsize);
14472 14472
14473 14473 for (i = 0; i < nspec; i++)
14474 14474 kmem_free(spec[i].dtsp_buffer, bufsize);
14475 14475
14476 14476 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14477 14477
14478 14478 dtrace_format_destroy(state);
14479 14479
14480 14480 vmem_destroy(state->dts_aggid_arena);
14481 14481 ddi_soft_state_free(dtrace_softstate, minor);
14482 14482 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14483 14483 }
14484 14484
14485 14485 /*
14486 14486 * DTrace Anonymous Enabling Functions
14487 14487 */
14488 14488 static dtrace_state_t *
14489 14489 dtrace_anon_grab(void)
14490 14490 {
14491 14491 dtrace_state_t *state;
14492 14492
14493 14493 ASSERT(MUTEX_HELD(&dtrace_lock));
14494 14494
14495 14495 if ((state = dtrace_anon.dta_state) == NULL) {
14496 14496 ASSERT(dtrace_anon.dta_enabling == NULL);
14497 14497 return (NULL);
14498 14498 }
14499 14499
14500 14500 ASSERT(dtrace_anon.dta_enabling != NULL);
14501 14501 ASSERT(dtrace_retained != NULL);
14502 14502
14503 14503 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
14504 14504 dtrace_anon.dta_enabling = NULL;
14505 14505 dtrace_anon.dta_state = NULL;
14506 14506
14507 14507 return (state);
14508 14508 }
14509 14509
14510 14510 static void
14511 14511 dtrace_anon_property(void)
14512 14512 {
14513 14513 int i, rv;
14514 14514 dtrace_state_t *state;
14515 14515 dof_hdr_t *dof;
14516 14516 char c[32]; /* enough for "dof-data-" + digits */
14517 14517
14518 14518 ASSERT(MUTEX_HELD(&dtrace_lock));
14519 14519 ASSERT(MUTEX_HELD(&cpu_lock));
14520 14520
14521 14521 for (i = 0; ; i++) {
14522 14522 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
14523 14523
14524 14524 dtrace_err_verbose = 1;
14525 14525
14526 14526 if ((dof = dtrace_dof_property(c)) == NULL) {
14527 14527 dtrace_err_verbose = 0;
14528 14528 break;
14529 14529 }
14530 14530
14531 14531 /*
14532 14532 * We want to create anonymous state, so we need to transition
14533 14533 * the kernel debugger to indicate that DTrace is active. If
14534 14534 * this fails (e.g. because the debugger has modified text in
14535 14535 * some way), we won't continue with the processing.
14536 14536 */
14537 14537 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14538 14538 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
14539 14539 "enabling ignored.");
14540 14540 dtrace_dof_destroy(dof);
14541 14541 break;
14542 14542 }
14543 14543
14544 14544 /*
14545 14545 * If we haven't allocated an anonymous state, we'll do so now.
14546 14546 */
14547 14547 if ((state = dtrace_anon.dta_state) == NULL) {
14548 14548 state = dtrace_state_create(NULL, NULL);
14549 14549 dtrace_anon.dta_state = state;
14550 14550
14551 14551 if (state == NULL) {
14552 14552 /*
14553 14553 * This basically shouldn't happen: the only
14554 14554 * failure mode from dtrace_state_create() is a
14555 14555 * failure of ddi_soft_state_zalloc() that
14556 14556 * itself should never happen. Still, the
14557 14557 * interface allows for a failure mode, and
14558 14558 * we want to fail as gracefully as possible:
14559 14559 * we'll emit an error message and cease
14560 14560 * processing anonymous state in this case.
14561 14561 */
14562 14562 cmn_err(CE_WARN, "failed to create "
14563 14563 "anonymous state");
14564 14564 dtrace_dof_destroy(dof);
14565 14565 break;
14566 14566 }
14567 14567 }
14568 14568
14569 14569 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
14570 14570 &dtrace_anon.dta_enabling, 0, B_TRUE);
14571 14571
14572 14572 if (rv == 0)
14573 14573 rv = dtrace_dof_options(dof, state);
14574 14574
14575 14575 dtrace_err_verbose = 0;
14576 14576 dtrace_dof_destroy(dof);
14577 14577
14578 14578 if (rv != 0) {
14579 14579 /*
14580 14580 * This is malformed DOF; chuck any anonymous state
14581 14581 * that we created.
14582 14582 */
14583 14583 ASSERT(dtrace_anon.dta_enabling == NULL);
14584 14584 dtrace_state_destroy(state);
14585 14585 dtrace_anon.dta_state = NULL;
14586 14586 break;
14587 14587 }
14588 14588
14589 14589 ASSERT(dtrace_anon.dta_enabling != NULL);
14590 14590 }
14591 14591
14592 14592 if (dtrace_anon.dta_enabling != NULL) {
14593 14593 int rval;
14594 14594
14595 14595 /*
14596 14596 * dtrace_enabling_retain() can only fail because we are
14597 14597 * trying to retain more enablings than are allowed -- but
14598 14598 * we only have one anonymous enabling, and we are guaranteed
14599 14599 * to be allowed at least one retained enabling; we assert
14600 14600 * that dtrace_enabling_retain() returns success.
14601 14601 */
14602 14602 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14603 14603 ASSERT(rval == 0);
14604 14604
14605 14605 dtrace_enabling_dump(dtrace_anon.dta_enabling);
14606 14606 }
14607 14607 }
14608 14608
14609 14609 /*
14610 14610 * DTrace Helper Functions
14611 14611 */
14612 14612 static void
14613 14613 dtrace_helper_trace(dtrace_helper_action_t *helper,
14614 14614 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14615 14615 {
14616 14616 uint32_t size, next, nnext, i;
14617 14617 dtrace_helptrace_t *ent, *buffer;
14618 14618 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14619 14619
14620 14620 if ((buffer = dtrace_helptrace_buffer) == NULL)
14621 14621 return;
14622 14622
14623 14623 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14624 14624
14625 14625 /*
14626 14626 * What would a tracing framework be without its own tracing
14627 14627 * framework? (Well, a hell of a lot simpler, for starters...)
14628 14628 */
14629 14629 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14630 14630 sizeof (uint64_t) - sizeof (uint64_t);
14631 14631
14632 14632 /*
14633 14633 * Iterate until we can allocate a slot in the trace buffer.
14634 14634 */
14635 14635 do {
14636 14636 next = dtrace_helptrace_next;
14637 14637
14638 14638 if (next + size < dtrace_helptrace_bufsize) {
14639 14639 nnext = next + size;
14640 14640 } else {
14641 14641 nnext = size;
14642 14642 }
14643 14643 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14644 14644
14645 14645 /*
14646 14646 * We have our slot; fill it in.
14647 14647 */
14648 14648 if (nnext == size) {
14649 14649 dtrace_helptrace_wrapped++;
14650 14650 next = 0;
14651 14651 }
14652 14652
14653 14653 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next);
14654 14654 ent->dtht_helper = helper;
14655 14655 ent->dtht_where = where;
14656 14656 ent->dtht_nlocals = vstate->dtvs_nlocals;
14657 14657
14658 14658 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14659 14659 mstate->dtms_fltoffs : -1;
14660 14660 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14661 14661 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
14662 14662
14663 14663 for (i = 0; i < vstate->dtvs_nlocals; i++) {
14664 14664 dtrace_statvar_t *svar;
14665 14665
14666 14666 if ((svar = vstate->dtvs_locals[i]) == NULL)
14667 14667 continue;
14668 14668
14669 14669 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14670 14670 ent->dtht_locals[i] =
14671 14671 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
14672 14672 }
14673 14673 }
14674 14674
14675 14675 static uint64_t
14676 14676 dtrace_helper(int which, dtrace_mstate_t *mstate,
14677 14677 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14678 14678 {
14679 14679 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14680 14680 uint64_t sarg0 = mstate->dtms_arg[0];
14681 14681 uint64_t sarg1 = mstate->dtms_arg[1];
14682 14682 uint64_t rval;
14683 14683 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14684 14684 dtrace_helper_action_t *helper;
14685 14685 dtrace_vstate_t *vstate;
14686 14686 dtrace_difo_t *pred;
14687 14687 int i, trace = dtrace_helptrace_buffer != NULL;
14688 14688
14689 14689 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14690 14690
14691 14691 if (helpers == NULL)
14692 14692 return (0);
14693 14693
14694 14694 if ((helper = helpers->dthps_actions[which]) == NULL)
14695 14695 return (0);
14696 14696
14697 14697 vstate = &helpers->dthps_vstate;
14698 14698 mstate->dtms_arg[0] = arg0;
14699 14699 mstate->dtms_arg[1] = arg1;
14700 14700
14701 14701 /*
14702 14702 * Now iterate over each helper. If its predicate evaluates to 'true',
14703 14703 * we'll call the corresponding actions. Note that the below calls
14704 14704 * to dtrace_dif_emulate() may set faults in machine state. This is
14705 14705 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
14706 14706 * the stored DIF offset with its own (which is the desired behavior).
14707 14707 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
14708 14708 * from machine state; this is okay, too.
14709 14709 */
14710 14710 for (; helper != NULL; helper = helper->dtha_next) {
14711 14711 if ((pred = helper->dtha_predicate) != NULL) {
14712 14712 if (trace)
14713 14713 dtrace_helper_trace(helper, mstate, vstate, 0);
14714 14714
14715 14715 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
14716 14716 goto next;
14717 14717
14718 14718 if (*flags & CPU_DTRACE_FAULT)
14719 14719 goto err;
14720 14720 }
14721 14721
14722 14722 for (i = 0; i < helper->dtha_nactions; i++) {
14723 14723 if (trace)
14724 14724 dtrace_helper_trace(helper,
14725 14725 mstate, vstate, i + 1);
14726 14726
14727 14727 rval = dtrace_dif_emulate(helper->dtha_actions[i],
14728 14728 mstate, vstate, state);
14729 14729
14730 14730 if (*flags & CPU_DTRACE_FAULT)
14731 14731 goto err;
14732 14732 }
14733 14733
14734 14734 next:
14735 14735 if (trace)
14736 14736 dtrace_helper_trace(helper, mstate, vstate,
14737 14737 DTRACE_HELPTRACE_NEXT);
14738 14738 }
14739 14739
14740 14740 if (trace)
14741 14741 dtrace_helper_trace(helper, mstate, vstate,
14742 14742 DTRACE_HELPTRACE_DONE);
14743 14743
14744 14744 /*
14745 14745 * Restore the arg0 that we saved upon entry.
14746 14746 */
14747 14747 mstate->dtms_arg[0] = sarg0;
14748 14748 mstate->dtms_arg[1] = sarg1;
14749 14749
14750 14750 return (rval);
14751 14751
14752 14752 err:
14753 14753 if (trace)
14754 14754 dtrace_helper_trace(helper, mstate, vstate,
14755 14755 DTRACE_HELPTRACE_ERR);
14756 14756
14757 14757 /*
14758 14758 * Restore the arg0 that we saved upon entry.
14759 14759 */
14760 14760 mstate->dtms_arg[0] = sarg0;
14761 14761 mstate->dtms_arg[1] = sarg1;
14762 14762
14763 14763 return (NULL);
14764 14764 }
14765 14765
14766 14766 static void
14767 14767 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14768 14768 dtrace_vstate_t *vstate)
14769 14769 {
14770 14770 int i;
14771 14771
14772 14772 if (helper->dtha_predicate != NULL)
14773 14773 dtrace_difo_release(helper->dtha_predicate, vstate);
14774 14774
14775 14775 for (i = 0; i < helper->dtha_nactions; i++) {
14776 14776 ASSERT(helper->dtha_actions[i] != NULL);
14777 14777 dtrace_difo_release(helper->dtha_actions[i], vstate);
14778 14778 }
14779 14779
14780 14780 kmem_free(helper->dtha_actions,
14781 14781 helper->dtha_nactions * sizeof (dtrace_difo_t *));
14782 14782 kmem_free(helper, sizeof (dtrace_helper_action_t));
14783 14783 }
14784 14784
14785 14785 static int
14786 14786 dtrace_helper_destroygen(int gen)
14787 14787 {
14788 14788 proc_t *p = curproc;
14789 14789 dtrace_helpers_t *help = p->p_dtrace_helpers;
14790 14790 dtrace_vstate_t *vstate;
14791 14791 int i;
14792 14792
14793 14793 ASSERT(MUTEX_HELD(&dtrace_lock));
14794 14794
14795 14795 if (help == NULL || gen > help->dthps_generation)
14796 14796 return (EINVAL);
14797 14797
14798 14798 vstate = &help->dthps_vstate;
14799 14799
14800 14800 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14801 14801 dtrace_helper_action_t *last = NULL, *h, *next;
14802 14802
14803 14803 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14804 14804 next = h->dtha_next;
14805 14805
14806 14806 if (h->dtha_generation == gen) {
14807 14807 if (last != NULL) {
14808 14808 last->dtha_next = next;
14809 14809 } else {
14810 14810 help->dthps_actions[i] = next;
14811 14811 }
14812 14812
14813 14813 dtrace_helper_action_destroy(h, vstate);
14814 14814 } else {
14815 14815 last = h;
14816 14816 }
14817 14817 }
14818 14818 }
14819 14819
14820 14820 /*
14821 14821 * Interate until we've cleared out all helper providers with the
14822 14822 * given generation number.
14823 14823 */
14824 14824 for (;;) {
14825 14825 dtrace_helper_provider_t *prov;
14826 14826
14827 14827 /*
14828 14828 * Look for a helper provider with the right generation. We
14829 14829 * have to start back at the beginning of the list each time
14830 14830 * because we drop dtrace_lock. It's unlikely that we'll make
14831 14831 * more than two passes.
14832 14832 */
14833 14833 for (i = 0; i < help->dthps_nprovs; i++) {
14834 14834 prov = help->dthps_provs[i];
14835 14835
14836 14836 if (prov->dthp_generation == gen)
14837 14837 break;
14838 14838 }
14839 14839
14840 14840 /*
14841 14841 * If there were no matches, we're done.
14842 14842 */
14843 14843 if (i == help->dthps_nprovs)
14844 14844 break;
14845 14845
14846 14846 /*
14847 14847 * Move the last helper provider into this slot.
14848 14848 */
14849 14849 help->dthps_nprovs--;
14850 14850 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14851 14851 help->dthps_provs[help->dthps_nprovs] = NULL;
14852 14852
14853 14853 mutex_exit(&dtrace_lock);
14854 14854
14855 14855 /*
14856 14856 * If we have a meta provider, remove this helper provider.
14857 14857 */
14858 14858 mutex_enter(&dtrace_meta_lock);
14859 14859 if (dtrace_meta_pid != NULL) {
14860 14860 ASSERT(dtrace_deferred_pid == NULL);
14861 14861 dtrace_helper_provider_remove(&prov->dthp_prov,
14862 14862 p->p_pid);
14863 14863 }
14864 14864 mutex_exit(&dtrace_meta_lock);
14865 14865
14866 14866 dtrace_helper_provider_destroy(prov);
14867 14867
14868 14868 mutex_enter(&dtrace_lock);
14869 14869 }
14870 14870
14871 14871 return (0);
14872 14872 }
14873 14873
14874 14874 static int
14875 14875 dtrace_helper_validate(dtrace_helper_action_t *helper)
14876 14876 {
14877 14877 int err = 0, i;
14878 14878 dtrace_difo_t *dp;
14879 14879
14880 14880 if ((dp = helper->dtha_predicate) != NULL)
14881 14881 err += dtrace_difo_validate_helper(dp);
14882 14882
14883 14883 for (i = 0; i < helper->dtha_nactions; i++)
14884 14884 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14885 14885
14886 14886 return (err == 0);
14887 14887 }
14888 14888
14889 14889 static int
14890 14890 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14891 14891 {
14892 14892 dtrace_helpers_t *help;
14893 14893 dtrace_helper_action_t *helper, *last;
14894 14894 dtrace_actdesc_t *act;
14895 14895 dtrace_vstate_t *vstate;
14896 14896 dtrace_predicate_t *pred;
14897 14897 int count = 0, nactions = 0, i;
14898 14898
14899 14899 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14900 14900 return (EINVAL);
14901 14901
14902 14902 help = curproc->p_dtrace_helpers;
14903 14903 last = help->dthps_actions[which];
14904 14904 vstate = &help->dthps_vstate;
14905 14905
14906 14906 for (count = 0; last != NULL; last = last->dtha_next) {
14907 14907 count++;
14908 14908 if (last->dtha_next == NULL)
14909 14909 break;
14910 14910 }
14911 14911
14912 14912 /*
14913 14913 * If we already have dtrace_helper_actions_max helper actions for this
14914 14914 * helper action type, we'll refuse to add a new one.
14915 14915 */
14916 14916 if (count >= dtrace_helper_actions_max)
14917 14917 return (ENOSPC);
14918 14918
14919 14919 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14920 14920 helper->dtha_generation = help->dthps_generation;
14921 14921
14922 14922 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14923 14923 ASSERT(pred->dtp_difo != NULL);
14924 14924 dtrace_difo_hold(pred->dtp_difo);
14925 14925 helper->dtha_predicate = pred->dtp_difo;
14926 14926 }
14927 14927
14928 14928 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14929 14929 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14930 14930 goto err;
14931 14931
14932 14932 if (act->dtad_difo == NULL)
14933 14933 goto err;
14934 14934
14935 14935 nactions++;
14936 14936 }
14937 14937
14938 14938 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14939 14939 (helper->dtha_nactions = nactions), KM_SLEEP);
14940 14940
14941 14941 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14942 14942 dtrace_difo_hold(act->dtad_difo);
14943 14943 helper->dtha_actions[i++] = act->dtad_difo;
14944 14944 }
14945 14945
14946 14946 if (!dtrace_helper_validate(helper))
14947 14947 goto err;
14948 14948
14949 14949 if (last == NULL) {
14950 14950 help->dthps_actions[which] = helper;
14951 14951 } else {
14952 14952 last->dtha_next = helper;
14953 14953 }
14954 14954
14955 14955 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14956 14956 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14957 14957 dtrace_helptrace_next = 0;
14958 14958 }
14959 14959
14960 14960 return (0);
14961 14961 err:
14962 14962 dtrace_helper_action_destroy(helper, vstate);
14963 14963 return (EINVAL);
14964 14964 }
14965 14965
14966 14966 static void
14967 14967 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14968 14968 dof_helper_t *dofhp)
14969 14969 {
14970 14970 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14971 14971
14972 14972 mutex_enter(&dtrace_meta_lock);
14973 14973 mutex_enter(&dtrace_lock);
14974 14974
14975 14975 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14976 14976 /*
14977 14977 * If the dtrace module is loaded but not attached, or if
14978 14978 * there aren't isn't a meta provider registered to deal with
14979 14979 * these provider descriptions, we need to postpone creating
14980 14980 * the actual providers until later.
14981 14981 */
14982 14982
14983 14983 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14984 14984 dtrace_deferred_pid != help) {
14985 14985 help->dthps_deferred = 1;
14986 14986 help->dthps_pid = p->p_pid;
14987 14987 help->dthps_next = dtrace_deferred_pid;
14988 14988 help->dthps_prev = NULL;
14989 14989 if (dtrace_deferred_pid != NULL)
14990 14990 dtrace_deferred_pid->dthps_prev = help;
14991 14991 dtrace_deferred_pid = help;
14992 14992 }
14993 14993
14994 14994 mutex_exit(&dtrace_lock);
14995 14995
14996 14996 } else if (dofhp != NULL) {
14997 14997 /*
14998 14998 * If the dtrace module is loaded and we have a particular
14999 14999 * helper provider description, pass that off to the
15000 15000 * meta provider.
15001 15001 */
15002 15002
15003 15003 mutex_exit(&dtrace_lock);
15004 15004
15005 15005 dtrace_helper_provide(dofhp, p->p_pid);
15006 15006
15007 15007 } else {
15008 15008 /*
15009 15009 * Otherwise, just pass all the helper provider descriptions
15010 15010 * off to the meta provider.
15011 15011 */
15012 15012
15013 15013 int i;
15014 15014 mutex_exit(&dtrace_lock);
15015 15015
15016 15016 for (i = 0; i < help->dthps_nprovs; i++) {
15017 15017 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
15018 15018 p->p_pid);
15019 15019 }
15020 15020 }
15021 15021
15022 15022 mutex_exit(&dtrace_meta_lock);
15023 15023 }
15024 15024
15025 15025 static int
15026 15026 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
15027 15027 {
15028 15028 dtrace_helpers_t *help;
15029 15029 dtrace_helper_provider_t *hprov, **tmp_provs;
15030 15030 uint_t tmp_maxprovs, i;
15031 15031
15032 15032 ASSERT(MUTEX_HELD(&dtrace_lock));
15033 15033
15034 15034 help = curproc->p_dtrace_helpers;
15035 15035 ASSERT(help != NULL);
15036 15036
15037 15037 /*
15038 15038 * If we already have dtrace_helper_providers_max helper providers,
15039 15039 * we're refuse to add a new one.
15040 15040 */
15041 15041 if (help->dthps_nprovs >= dtrace_helper_providers_max)
15042 15042 return (ENOSPC);
15043 15043
15044 15044 /*
15045 15045 * Check to make sure this isn't a duplicate.
15046 15046 */
15047 15047 for (i = 0; i < help->dthps_nprovs; i++) {
15048 15048 if (dofhp->dofhp_addr ==
15049 15049 help->dthps_provs[i]->dthp_prov.dofhp_addr)
15050 15050 return (EALREADY);
15051 15051 }
15052 15052
15053 15053 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
15054 15054 hprov->dthp_prov = *dofhp;
15055 15055 hprov->dthp_ref = 1;
15056 15056 hprov->dthp_generation = gen;
15057 15057
15058 15058 /*
15059 15059 * Allocate a bigger table for helper providers if it's already full.
15060 15060 */
15061 15061 if (help->dthps_maxprovs == help->dthps_nprovs) {
15062 15062 tmp_maxprovs = help->dthps_maxprovs;
15063 15063 tmp_provs = help->dthps_provs;
15064 15064
15065 15065 if (help->dthps_maxprovs == 0)
15066 15066 help->dthps_maxprovs = 2;
15067 15067 else
15068 15068 help->dthps_maxprovs *= 2;
15069 15069 if (help->dthps_maxprovs > dtrace_helper_providers_max)
15070 15070 help->dthps_maxprovs = dtrace_helper_providers_max;
15071 15071
15072 15072 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
15073 15073
15074 15074 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
15075 15075 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15076 15076
15077 15077 if (tmp_provs != NULL) {
15078 15078 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
15079 15079 sizeof (dtrace_helper_provider_t *));
15080 15080 kmem_free(tmp_provs, tmp_maxprovs *
15081 15081 sizeof (dtrace_helper_provider_t *));
15082 15082 }
15083 15083 }
15084 15084
15085 15085 help->dthps_provs[help->dthps_nprovs] = hprov;
15086 15086 help->dthps_nprovs++;
15087 15087
15088 15088 return (0);
15089 15089 }
15090 15090
15091 15091 static void
15092 15092 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
15093 15093 {
15094 15094 mutex_enter(&dtrace_lock);
15095 15095
15096 15096 if (--hprov->dthp_ref == 0) {
15097 15097 dof_hdr_t *dof;
15098 15098 mutex_exit(&dtrace_lock);
15099 15099 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
15100 15100 dtrace_dof_destroy(dof);
15101 15101 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
15102 15102 } else {
15103 15103 mutex_exit(&dtrace_lock);
15104 15104 }
15105 15105 }
15106 15106
15107 15107 static int
15108 15108 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
15109 15109 {
15110 15110 uintptr_t daddr = (uintptr_t)dof;
15111 15111 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
15112 15112 dof_provider_t *provider;
15113 15113 dof_probe_t *probe;
15114 15114 uint8_t *arg;
15115 15115 char *strtab, *typestr;
15116 15116 dof_stridx_t typeidx;
15117 15117 size_t typesz;
15118 15118 uint_t nprobes, j, k;
15119 15119
15120 15120 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
15121 15121
15122 15122 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
15123 15123 dtrace_dof_error(dof, "misaligned section offset");
15124 15124 return (-1);
15125 15125 }
15126 15126
15127 15127 /*
15128 15128 * The section needs to be large enough to contain the DOF provider
15129 15129 * structure appropriate for the given version.
15130 15130 */
15131 15131 if (sec->dofs_size <
15132 15132 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
15133 15133 offsetof(dof_provider_t, dofpv_prenoffs) :
15134 15134 sizeof (dof_provider_t))) {
15135 15135 dtrace_dof_error(dof, "provider section too small");
15136 15136 return (-1);
15137 15137 }
15138 15138
15139 15139 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
15140 15140 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
15141 15141 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
15142 15142 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
15143 15143 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
15144 15144
15145 15145 if (str_sec == NULL || prb_sec == NULL ||
15146 15146 arg_sec == NULL || off_sec == NULL)
15147 15147 return (-1);
15148 15148
15149 15149 enoff_sec = NULL;
15150 15150
15151 15151 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
15152 15152 provider->dofpv_prenoffs != DOF_SECT_NONE &&
15153 15153 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
15154 15154 provider->dofpv_prenoffs)) == NULL)
15155 15155 return (-1);
15156 15156
15157 15157 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
15158 15158
15159 15159 if (provider->dofpv_name >= str_sec->dofs_size ||
15160 15160 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
15161 15161 dtrace_dof_error(dof, "invalid provider name");
15162 15162 return (-1);
15163 15163 }
15164 15164
15165 15165 if (prb_sec->dofs_entsize == 0 ||
15166 15166 prb_sec->dofs_entsize > prb_sec->dofs_size) {
15167 15167 dtrace_dof_error(dof, "invalid entry size");
15168 15168 return (-1);
15169 15169 }
15170 15170
15171 15171 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
15172 15172 dtrace_dof_error(dof, "misaligned entry size");
15173 15173 return (-1);
15174 15174 }
15175 15175
15176 15176 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
15177 15177 dtrace_dof_error(dof, "invalid entry size");
15178 15178 return (-1);
15179 15179 }
15180 15180
15181 15181 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
15182 15182 dtrace_dof_error(dof, "misaligned section offset");
15183 15183 return (-1);
15184 15184 }
15185 15185
15186 15186 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
15187 15187 dtrace_dof_error(dof, "invalid entry size");
15188 15188 return (-1);
15189 15189 }
15190 15190
15191 15191 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
15192 15192
15193 15193 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
15194 15194
15195 15195 /*
15196 15196 * Take a pass through the probes to check for errors.
15197 15197 */
15198 15198 for (j = 0; j < nprobes; j++) {
15199 15199 probe = (dof_probe_t *)(uintptr_t)(daddr +
15200 15200 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
15201 15201
15202 15202 if (probe->dofpr_func >= str_sec->dofs_size) {
15203 15203 dtrace_dof_error(dof, "invalid function name");
15204 15204 return (-1);
15205 15205 }
15206 15206
15207 15207 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
15208 15208 dtrace_dof_error(dof, "function name too long");
15209 15209 return (-1);
15210 15210 }
15211 15211
15212 15212 if (probe->dofpr_name >= str_sec->dofs_size ||
15213 15213 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
15214 15214 dtrace_dof_error(dof, "invalid probe name");
15215 15215 return (-1);
15216 15216 }
15217 15217
15218 15218 /*
15219 15219 * The offset count must not wrap the index, and the offsets
15220 15220 * must also not overflow the section's data.
15221 15221 */
15222 15222 if (probe->dofpr_offidx + probe->dofpr_noffs <
15223 15223 probe->dofpr_offidx ||
15224 15224 (probe->dofpr_offidx + probe->dofpr_noffs) *
15225 15225 off_sec->dofs_entsize > off_sec->dofs_size) {
15226 15226 dtrace_dof_error(dof, "invalid probe offset");
15227 15227 return (-1);
15228 15228 }
15229 15229
15230 15230 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
15231 15231 /*
15232 15232 * If there's no is-enabled offset section, make sure
15233 15233 * there aren't any is-enabled offsets. Otherwise
15234 15234 * perform the same checks as for probe offsets
15235 15235 * (immediately above).
15236 15236 */
15237 15237 if (enoff_sec == NULL) {
15238 15238 if (probe->dofpr_enoffidx != 0 ||
15239 15239 probe->dofpr_nenoffs != 0) {
15240 15240 dtrace_dof_error(dof, "is-enabled "
15241 15241 "offsets with null section");
15242 15242 return (-1);
15243 15243 }
15244 15244 } else if (probe->dofpr_enoffidx +
15245 15245 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
15246 15246 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
15247 15247 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
15248 15248 dtrace_dof_error(dof, "invalid is-enabled "
15249 15249 "offset");
15250 15250 return (-1);
15251 15251 }
15252 15252
15253 15253 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
15254 15254 dtrace_dof_error(dof, "zero probe and "
15255 15255 "is-enabled offsets");
15256 15256 return (-1);
15257 15257 }
15258 15258 } else if (probe->dofpr_noffs == 0) {
15259 15259 dtrace_dof_error(dof, "zero probe offsets");
15260 15260 return (-1);
15261 15261 }
15262 15262
15263 15263 if (probe->dofpr_argidx + probe->dofpr_xargc <
15264 15264 probe->dofpr_argidx ||
15265 15265 (probe->dofpr_argidx + probe->dofpr_xargc) *
15266 15266 arg_sec->dofs_entsize > arg_sec->dofs_size) {
15267 15267 dtrace_dof_error(dof, "invalid args");
15268 15268 return (-1);
15269 15269 }
15270 15270
15271 15271 typeidx = probe->dofpr_nargv;
15272 15272 typestr = strtab + probe->dofpr_nargv;
15273 15273 for (k = 0; k < probe->dofpr_nargc; k++) {
15274 15274 if (typeidx >= str_sec->dofs_size) {
15275 15275 dtrace_dof_error(dof, "bad "
15276 15276 "native argument type");
15277 15277 return (-1);
15278 15278 }
15279 15279
15280 15280 typesz = strlen(typestr) + 1;
15281 15281 if (typesz > DTRACE_ARGTYPELEN) {
15282 15282 dtrace_dof_error(dof, "native "
15283 15283 "argument type too long");
15284 15284 return (-1);
15285 15285 }
15286 15286 typeidx += typesz;
15287 15287 typestr += typesz;
15288 15288 }
15289 15289
15290 15290 typeidx = probe->dofpr_xargv;
15291 15291 typestr = strtab + probe->dofpr_xargv;
15292 15292 for (k = 0; k < probe->dofpr_xargc; k++) {
15293 15293 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
15294 15294 dtrace_dof_error(dof, "bad "
15295 15295 "native argument index");
15296 15296 return (-1);
15297 15297 }
15298 15298
15299 15299 if (typeidx >= str_sec->dofs_size) {
15300 15300 dtrace_dof_error(dof, "bad "
15301 15301 "translated argument type");
15302 15302 return (-1);
15303 15303 }
15304 15304
15305 15305 typesz = strlen(typestr) + 1;
15306 15306 if (typesz > DTRACE_ARGTYPELEN) {
15307 15307 dtrace_dof_error(dof, "translated argument "
15308 15308 "type too long");
15309 15309 return (-1);
15310 15310 }
15311 15311
15312 15312 typeidx += typesz;
15313 15313 typestr += typesz;
15314 15314 }
15315 15315 }
15316 15316
15317 15317 return (0);
15318 15318 }
15319 15319
15320 15320 static int
15321 15321 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
15322 15322 {
15323 15323 dtrace_helpers_t *help;
15324 15324 dtrace_vstate_t *vstate;
15325 15325 dtrace_enabling_t *enab = NULL;
15326 15326 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
15327 15327 uintptr_t daddr = (uintptr_t)dof;
15328 15328
15329 15329 ASSERT(MUTEX_HELD(&dtrace_lock));
15330 15330
15331 15331 if ((help = curproc->p_dtrace_helpers) == NULL)
15332 15332 help = dtrace_helpers_create(curproc);
15333 15333
15334 15334 vstate = &help->dthps_vstate;
15335 15335
15336 15336 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
15337 15337 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
15338 15338 dtrace_dof_destroy(dof);
15339 15339 return (rv);
15340 15340 }
15341 15341
15342 15342 /*
15343 15343 * Look for helper providers and validate their descriptions.
15344 15344 */
15345 15345 if (dhp != NULL) {
15346 15346 for (i = 0; i < dof->dofh_secnum; i++) {
15347 15347 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15348 15348 dof->dofh_secoff + i * dof->dofh_secsize);
15349 15349
15350 15350 if (sec->dofs_type != DOF_SECT_PROVIDER)
15351 15351 continue;
15352 15352
15353 15353 if (dtrace_helper_provider_validate(dof, sec) != 0) {
15354 15354 dtrace_enabling_destroy(enab);
15355 15355 dtrace_dof_destroy(dof);
15356 15356 return (-1);
15357 15357 }
15358 15358
15359 15359 nprovs++;
15360 15360 }
15361 15361 }
15362 15362
15363 15363 /*
15364 15364 * Now we need to walk through the ECB descriptions in the enabling.
15365 15365 */
15366 15366 for (i = 0; i < enab->dten_ndesc; i++) {
15367 15367 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15368 15368 dtrace_probedesc_t *desc = &ep->dted_probe;
15369 15369
15370 15370 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
15371 15371 continue;
15372 15372
15373 15373 if (strcmp(desc->dtpd_mod, "helper") != 0)
15374 15374 continue;
15375 15375
15376 15376 if (strcmp(desc->dtpd_func, "ustack") != 0)
15377 15377 continue;
15378 15378
15379 15379 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
15380 15380 ep)) != 0) {
15381 15381 /*
15382 15382 * Adding this helper action failed -- we are now going
15383 15383 * to rip out the entire generation and return failure.
15384 15384 */
15385 15385 (void) dtrace_helper_destroygen(help->dthps_generation);
15386 15386 dtrace_enabling_destroy(enab);
15387 15387 dtrace_dof_destroy(dof);
15388 15388 return (-1);
15389 15389 }
15390 15390
15391 15391 nhelpers++;
15392 15392 }
15393 15393
15394 15394 if (nhelpers < enab->dten_ndesc)
15395 15395 dtrace_dof_error(dof, "unmatched helpers");
15396 15396
15397 15397 gen = help->dthps_generation++;
15398 15398 dtrace_enabling_destroy(enab);
15399 15399
15400 15400 if (dhp != NULL && nprovs > 0) {
15401 15401 /*
15402 15402 * Now that this is in-kernel, we change the sense of the
15403 15403 * members: dofhp_dof denotes the in-kernel copy of the DOF
15404 15404 * and dofhp_addr denotes the address at user-level.
15405 15405 */
15406 15406 dhp->dofhp_addr = dhp->dofhp_dof;
15407 15407 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
15408 15408
15409 15409 if (dtrace_helper_provider_add(dhp, gen) == 0) {
15410 15410 mutex_exit(&dtrace_lock);
15411 15411 dtrace_helper_provider_register(curproc, help, dhp);
15412 15412 mutex_enter(&dtrace_lock);
15413 15413
15414 15414 destroy = 0;
15415 15415 }
15416 15416 }
15417 15417
15418 15418 if (destroy)
15419 15419 dtrace_dof_destroy(dof);
15420 15420
15421 15421 return (gen);
15422 15422 }
15423 15423
15424 15424 static dtrace_helpers_t *
15425 15425 dtrace_helpers_create(proc_t *p)
15426 15426 {
15427 15427 dtrace_helpers_t *help;
15428 15428
15429 15429 ASSERT(MUTEX_HELD(&dtrace_lock));
15430 15430 ASSERT(p->p_dtrace_helpers == NULL);
15431 15431
15432 15432 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
15433 15433 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
15434 15434 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
15435 15435
15436 15436 p->p_dtrace_helpers = help;
15437 15437 dtrace_helpers++;
15438 15438
15439 15439 return (help);
15440 15440 }
15441 15441
15442 15442 static void
15443 15443 dtrace_helpers_destroy(void)
15444 15444 {
15445 15445 dtrace_helpers_t *help;
15446 15446 dtrace_vstate_t *vstate;
15447 15447 proc_t *p = curproc;
15448 15448 int i;
15449 15449
15450 15450 mutex_enter(&dtrace_lock);
15451 15451
15452 15452 ASSERT(p->p_dtrace_helpers != NULL);
15453 15453 ASSERT(dtrace_helpers > 0);
15454 15454
15455 15455 help = p->p_dtrace_helpers;
15456 15456 vstate = &help->dthps_vstate;
15457 15457
15458 15458 /*
15459 15459 * We're now going to lose the help from this process.
15460 15460 */
15461 15461 p->p_dtrace_helpers = NULL;
15462 15462 dtrace_sync();
15463 15463
15464 15464 /*
15465 15465 * Destory the helper actions.
15466 15466 */
15467 15467 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15468 15468 dtrace_helper_action_t *h, *next;
15469 15469
15470 15470 for (h = help->dthps_actions[i]; h != NULL; h = next) {
15471 15471 next = h->dtha_next;
15472 15472 dtrace_helper_action_destroy(h, vstate);
15473 15473 h = next;
15474 15474 }
15475 15475 }
15476 15476
15477 15477 mutex_exit(&dtrace_lock);
15478 15478
15479 15479 /*
15480 15480 * Destroy the helper providers.
15481 15481 */
15482 15482 if (help->dthps_maxprovs > 0) {
15483 15483 mutex_enter(&dtrace_meta_lock);
15484 15484 if (dtrace_meta_pid != NULL) {
15485 15485 ASSERT(dtrace_deferred_pid == NULL);
15486 15486
15487 15487 for (i = 0; i < help->dthps_nprovs; i++) {
15488 15488 dtrace_helper_provider_remove(
15489 15489 &help->dthps_provs[i]->dthp_prov, p->p_pid);
15490 15490 }
15491 15491 } else {
15492 15492 mutex_enter(&dtrace_lock);
15493 15493 ASSERT(help->dthps_deferred == 0 ||
15494 15494 help->dthps_next != NULL ||
15495 15495 help->dthps_prev != NULL ||
15496 15496 help == dtrace_deferred_pid);
15497 15497
15498 15498 /*
15499 15499 * Remove the helper from the deferred list.
15500 15500 */
15501 15501 if (help->dthps_next != NULL)
15502 15502 help->dthps_next->dthps_prev = help->dthps_prev;
15503 15503 if (help->dthps_prev != NULL)
15504 15504 help->dthps_prev->dthps_next = help->dthps_next;
15505 15505 if (dtrace_deferred_pid == help) {
15506 15506 dtrace_deferred_pid = help->dthps_next;
15507 15507 ASSERT(help->dthps_prev == NULL);
15508 15508 }
15509 15509
15510 15510 mutex_exit(&dtrace_lock);
15511 15511 }
15512 15512
15513 15513 mutex_exit(&dtrace_meta_lock);
15514 15514
15515 15515 for (i = 0; i < help->dthps_nprovs; i++) {
15516 15516 dtrace_helper_provider_destroy(help->dthps_provs[i]);
15517 15517 }
15518 15518
15519 15519 kmem_free(help->dthps_provs, help->dthps_maxprovs *
15520 15520 sizeof (dtrace_helper_provider_t *));
15521 15521 }
15522 15522
15523 15523 mutex_enter(&dtrace_lock);
15524 15524
15525 15525 dtrace_vstate_fini(&help->dthps_vstate);
15526 15526 kmem_free(help->dthps_actions,
15527 15527 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
15528 15528 kmem_free(help, sizeof (dtrace_helpers_t));
15529 15529
15530 15530 --dtrace_helpers;
15531 15531 mutex_exit(&dtrace_lock);
15532 15532 }
15533 15533
15534 15534 static void
15535 15535 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
15536 15536 {
15537 15537 dtrace_helpers_t *help, *newhelp;
15538 15538 dtrace_helper_action_t *helper, *new, *last;
15539 15539 dtrace_difo_t *dp;
15540 15540 dtrace_vstate_t *vstate;
15541 15541 int i, j, sz, hasprovs = 0;
15542 15542
15543 15543 mutex_enter(&dtrace_lock);
15544 15544 ASSERT(from->p_dtrace_helpers != NULL);
15545 15545 ASSERT(dtrace_helpers > 0);
15546 15546
15547 15547 help = from->p_dtrace_helpers;
15548 15548 newhelp = dtrace_helpers_create(to);
15549 15549 ASSERT(to->p_dtrace_helpers != NULL);
15550 15550
15551 15551 newhelp->dthps_generation = help->dthps_generation;
15552 15552 vstate = &newhelp->dthps_vstate;
15553 15553
15554 15554 /*
15555 15555 * Duplicate the helper actions.
15556 15556 */
15557 15557 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15558 15558 if ((helper = help->dthps_actions[i]) == NULL)
15559 15559 continue;
15560 15560
15561 15561 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
15562 15562 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
15563 15563 KM_SLEEP);
15564 15564 new->dtha_generation = helper->dtha_generation;
15565 15565
15566 15566 if ((dp = helper->dtha_predicate) != NULL) {
15567 15567 dp = dtrace_difo_duplicate(dp, vstate);
15568 15568 new->dtha_predicate = dp;
15569 15569 }
15570 15570
15571 15571 new->dtha_nactions = helper->dtha_nactions;
15572 15572 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
15573 15573 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
15574 15574
15575 15575 for (j = 0; j < new->dtha_nactions; j++) {
15576 15576 dtrace_difo_t *dp = helper->dtha_actions[j];
15577 15577
15578 15578 ASSERT(dp != NULL);
15579 15579 dp = dtrace_difo_duplicate(dp, vstate);
15580 15580 new->dtha_actions[j] = dp;
15581 15581 }
15582 15582
15583 15583 if (last != NULL) {
15584 15584 last->dtha_next = new;
15585 15585 } else {
15586 15586 newhelp->dthps_actions[i] = new;
15587 15587 }
15588 15588
15589 15589 last = new;
15590 15590 }
15591 15591 }
15592 15592
15593 15593 /*
15594 15594 * Duplicate the helper providers and register them with the
15595 15595 * DTrace framework.
15596 15596 */
15597 15597 if (help->dthps_nprovs > 0) {
15598 15598 newhelp->dthps_nprovs = help->dthps_nprovs;
15599 15599 newhelp->dthps_maxprovs = help->dthps_nprovs;
15600 15600 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
15601 15601 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15602 15602 for (i = 0; i < newhelp->dthps_nprovs; i++) {
15603 15603 newhelp->dthps_provs[i] = help->dthps_provs[i];
15604 15604 newhelp->dthps_provs[i]->dthp_ref++;
15605 15605 }
15606 15606
15607 15607 hasprovs = 1;
15608 15608 }
15609 15609
15610 15610 mutex_exit(&dtrace_lock);
15611 15611
15612 15612 if (hasprovs)
15613 15613 dtrace_helper_provider_register(to, newhelp, NULL);
15614 15614 }
15615 15615
15616 15616 /*
15617 15617 * DTrace Hook Functions
15618 15618 */
15619 15619 static void
15620 15620 dtrace_module_loaded(struct modctl *ctl)
15621 15621 {
15622 15622 dtrace_provider_t *prv;
15623 15623
15624 15624 mutex_enter(&dtrace_provider_lock);
15625 15625 mutex_enter(&mod_lock);
15626 15626
15627 15627 ASSERT(ctl->mod_busy);
15628 15628
15629 15629 /*
15630 15630 * We're going to call each providers per-module provide operation
15631 15631 * specifying only this module.
15632 15632 */
15633 15633 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
15634 15634 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
15635 15635
15636 15636 mutex_exit(&mod_lock);
15637 15637 mutex_exit(&dtrace_provider_lock);
15638 15638
15639 15639 /*
15640 15640 * If we have any retained enablings, we need to match against them.
15641 15641 * Enabling probes requires that cpu_lock be held, and we cannot hold
15642 15642 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
15643 15643 * module. (In particular, this happens when loading scheduling
15644 15644 * classes.) So if we have any retained enablings, we need to dispatch
15645 15645 * our task queue to do the match for us.
15646 15646 */
15647 15647 mutex_enter(&dtrace_lock);
15648 15648
15649 15649 if (dtrace_retained == NULL) {
15650 15650 mutex_exit(&dtrace_lock);
15651 15651 return;
15652 15652 }
15653 15653
15654 15654 (void) taskq_dispatch(dtrace_taskq,
15655 15655 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
15656 15656
15657 15657 mutex_exit(&dtrace_lock);
15658 15658
15659 15659 /*
15660 15660 * And now, for a little heuristic sleaze: in general, we want to
15661 15661 * match modules as soon as they load. However, we cannot guarantee
15662 15662 * this, because it would lead us to the lock ordering violation
15663 15663 * outlined above. The common case, of course, is that cpu_lock is
15664 15664 * _not_ held -- so we delay here for a clock tick, hoping that that's
15665 15665 * long enough for the task queue to do its work. If it's not, it's
15666 15666 * not a serious problem -- it just means that the module that we
15667 15667 * just loaded may not be immediately instrumentable.
15668 15668 */
15669 15669 delay(1);
15670 15670 }
15671 15671
15672 15672 static void
15673 15673 dtrace_module_unloaded(struct modctl *ctl)
15674 15674 {
15675 15675 dtrace_probe_t template, *probe, *first, *next;
15676 15676 dtrace_provider_t *prov;
15677 15677
15678 15678 template.dtpr_mod = ctl->mod_modname;
15679 15679
15680 15680 mutex_enter(&dtrace_provider_lock);
15681 15681 mutex_enter(&mod_lock);
15682 15682 mutex_enter(&dtrace_lock);
15683 15683
15684 15684 if (dtrace_bymod == NULL) {
15685 15685 /*
15686 15686 * The DTrace module is loaded (obviously) but not attached;
15687 15687 * we don't have any work to do.
15688 15688 */
15689 15689 mutex_exit(&dtrace_provider_lock);
15690 15690 mutex_exit(&mod_lock);
15691 15691 mutex_exit(&dtrace_lock);
15692 15692 return;
15693 15693 }
15694 15694
15695 15695 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
15696 15696 probe != NULL; probe = probe->dtpr_nextmod) {
15697 15697 if (probe->dtpr_ecb != NULL) {
15698 15698 mutex_exit(&dtrace_provider_lock);
15699 15699 mutex_exit(&mod_lock);
15700 15700 mutex_exit(&dtrace_lock);
15701 15701
15702 15702 /*
15703 15703 * This shouldn't _actually_ be possible -- we're
15704 15704 * unloading a module that has an enabled probe in it.
15705 15705 * (It's normally up to the provider to make sure that
15706 15706 * this can't happen.) However, because dtps_enable()
15707 15707 * doesn't have a failure mode, there can be an
15708 15708 * enable/unload race. Upshot: we don't want to
15709 15709 * assert, but we're not going to disable the
15710 15710 * probe, either.
15711 15711 */
15712 15712 if (dtrace_err_verbose) {
15713 15713 cmn_err(CE_WARN, "unloaded module '%s' had "
15714 15714 "enabled probes", ctl->mod_modname);
15715 15715 }
15716 15716
15717 15717 return;
15718 15718 }
15719 15719 }
15720 15720
15721 15721 probe = first;
15722 15722
15723 15723 for (first = NULL; probe != NULL; probe = next) {
15724 15724 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
15725 15725
15726 15726 dtrace_probes[probe->dtpr_id - 1] = NULL;
15727 15727
15728 15728 next = probe->dtpr_nextmod;
15729 15729 dtrace_hash_remove(dtrace_bymod, probe);
15730 15730 dtrace_hash_remove(dtrace_byfunc, probe);
15731 15731 dtrace_hash_remove(dtrace_byname, probe);
15732 15732
15733 15733 if (first == NULL) {
15734 15734 first = probe;
15735 15735 probe->dtpr_nextmod = NULL;
15736 15736 } else {
15737 15737 probe->dtpr_nextmod = first;
15738 15738 first = probe;
15739 15739 }
15740 15740 }
15741 15741
15742 15742 /*
15743 15743 * We've removed all of the module's probes from the hash chains and
15744 15744 * from the probe array. Now issue a dtrace_sync() to be sure that
15745 15745 * everyone has cleared out from any probe array processing.
15746 15746 */
15747 15747 dtrace_sync();
15748 15748
15749 15749 for (probe = first; probe != NULL; probe = first) {
15750 15750 first = probe->dtpr_nextmod;
15751 15751 prov = probe->dtpr_provider;
15752 15752 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15753 15753 probe->dtpr_arg);
15754 15754 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15755 15755 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15756 15756 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15757 15757 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15758 15758 kmem_free(probe, sizeof (dtrace_probe_t));
15759 15759 }
15760 15760
15761 15761 mutex_exit(&dtrace_lock);
15762 15762 mutex_exit(&mod_lock);
15763 15763 mutex_exit(&dtrace_provider_lock);
15764 15764 }
15765 15765
15766 15766 void
15767 15767 dtrace_suspend(void)
15768 15768 {
15769 15769 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15770 15770 }
15771 15771
15772 15772 void
15773 15773 dtrace_resume(void)
15774 15774 {
15775 15775 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15776 15776 }
15777 15777
15778 15778 static int
15779 15779 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15780 15780 {
15781 15781 ASSERT(MUTEX_HELD(&cpu_lock));
15782 15782 mutex_enter(&dtrace_lock);
15783 15783
15784 15784 switch (what) {
15785 15785 case CPU_CONFIG: {
15786 15786 dtrace_state_t *state;
15787 15787 dtrace_optval_t *opt, rs, c;
15788 15788
15789 15789 /*
15790 15790 * For now, we only allocate a new buffer for anonymous state.
15791 15791 */
15792 15792 if ((state = dtrace_anon.dta_state) == NULL)
15793 15793 break;
15794 15794
15795 15795 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15796 15796 break;
15797 15797
15798 15798 opt = state->dts_options;
15799 15799 c = opt[DTRACEOPT_CPU];
15800 15800
15801 15801 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15802 15802 break;
15803 15803
15804 15804 /*
15805 15805 * Regardless of what the actual policy is, we're going to
15806 15806 * temporarily set our resize policy to be manual. We're
15807 15807 * also going to temporarily set our CPU option to denote
15808 15808 * the newly configured CPU.
15809 15809 */
15810 15810 rs = opt[DTRACEOPT_BUFRESIZE];
15811 15811 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15812 15812 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15813 15813
15814 15814 (void) dtrace_state_buffers(state);
15815 15815
15816 15816 opt[DTRACEOPT_BUFRESIZE] = rs;
15817 15817 opt[DTRACEOPT_CPU] = c;
15818 15818
15819 15819 break;
15820 15820 }
15821 15821
15822 15822 case CPU_UNCONFIG:
15823 15823 /*
15824 15824 * We don't free the buffer in the CPU_UNCONFIG case. (The
15825 15825 * buffer will be freed when the consumer exits.)
15826 15826 */
15827 15827 break;
15828 15828
15829 15829 default:
15830 15830 break;
15831 15831 }
15832 15832
15833 15833 mutex_exit(&dtrace_lock);
15834 15834 return (0);
15835 15835 }
15836 15836
15837 15837 static void
15838 15838 dtrace_cpu_setup_initial(processorid_t cpu)
15839 15839 {
15840 15840 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15841 15841 }
15842 15842
15843 15843 static void
15844 15844 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15845 15845 {
15846 15846 if (dtrace_toxranges >= dtrace_toxranges_max) {
15847 15847 int osize, nsize;
15848 15848 dtrace_toxrange_t *range;
15849 15849
15850 15850 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15851 15851
15852 15852 if (osize == 0) {
15853 15853 ASSERT(dtrace_toxrange == NULL);
15854 15854 ASSERT(dtrace_toxranges_max == 0);
15855 15855 dtrace_toxranges_max = 1;
15856 15856 } else {
15857 15857 dtrace_toxranges_max <<= 1;
15858 15858 }
15859 15859
15860 15860 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15861 15861 range = kmem_zalloc(nsize, KM_SLEEP);
15862 15862
15863 15863 if (dtrace_toxrange != NULL) {
15864 15864 ASSERT(osize != 0);
15865 15865 bcopy(dtrace_toxrange, range, osize);
15866 15866 kmem_free(dtrace_toxrange, osize);
15867 15867 }
15868 15868
15869 15869 dtrace_toxrange = range;
15870 15870 }
15871 15871
15872 15872 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
15873 15873 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
15874 15874
15875 15875 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15876 15876 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15877 15877 dtrace_toxranges++;
15878 15878 }
15879 15879
15880 15880 static void
15881 15881 dtrace_getf_barrier()
15882 15882 {
15883 15883 /*
15884 15884 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
15885 15885 * that contain calls to getf(), this routine will be called on every
15886 15886 * closef() before either the underlying vnode is released or the
15887 15887 * file_t itself is freed. By the time we are here, it is essential
15888 15888 * that the file_t can no longer be accessed from a call to getf()
15889 15889 * in probe context -- that assures that a dtrace_sync() can be used
15890 15890 * to clear out any enablings referring to the old structures.
15891 15891 */
15892 15892 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
15893 15893 kcred->cr_zone->zone_dtrace_getf != 0)
15894 15894 dtrace_sync();
15895 15895 }
15896 15896
15897 15897 /*
15898 15898 * DTrace Driver Cookbook Functions
15899 15899 */
15900 15900 /*ARGSUSED*/
15901 15901 static int
15902 15902 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15903 15903 {
15904 15904 dtrace_provider_id_t id;
15905 15905 dtrace_state_t *state = NULL;
15906 15906 dtrace_enabling_t *enab;
15907 15907
15908 15908 mutex_enter(&cpu_lock);
15909 15909 mutex_enter(&dtrace_provider_lock);
15910 15910 mutex_enter(&dtrace_lock);
15911 15911
15912 15912 if (ddi_soft_state_init(&dtrace_softstate,
15913 15913 sizeof (dtrace_state_t), 0) != 0) {
15914 15914 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15915 15915 mutex_exit(&cpu_lock);
15916 15916 mutex_exit(&dtrace_provider_lock);
15917 15917 mutex_exit(&dtrace_lock);
15918 15918 return (DDI_FAILURE);
15919 15919 }
15920 15920
15921 15921 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15922 15922 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15923 15923 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15924 15924 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15925 15925 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15926 15926 ddi_remove_minor_node(devi, NULL);
15927 15927 ddi_soft_state_fini(&dtrace_softstate);
15928 15928 mutex_exit(&cpu_lock);
15929 15929 mutex_exit(&dtrace_provider_lock);
15930 15930 mutex_exit(&dtrace_lock);
15931 15931 return (DDI_FAILURE);
15932 15932 }
15933 15933
15934 15934 ddi_report_dev(devi);
15935 15935 dtrace_devi = devi;
15936 15936
15937 15937 dtrace_modload = dtrace_module_loaded;
15938 15938 dtrace_modunload = dtrace_module_unloaded;
15939 15939 dtrace_cpu_init = dtrace_cpu_setup_initial;
15940 15940 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15941 15941 dtrace_helpers_fork = dtrace_helpers_duplicate;
15942 15942 dtrace_cpustart_init = dtrace_suspend;
15943 15943 dtrace_cpustart_fini = dtrace_resume;
15944 15944 dtrace_debugger_init = dtrace_suspend;
15945 15945 dtrace_debugger_fini = dtrace_resume;
15946 15946
15947 15947 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15948 15948
15949 15949 ASSERT(MUTEX_HELD(&cpu_lock));
15950 15950
15951 15951 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15952 15952 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15953 15953 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15954 15954 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15955 15955 VM_SLEEP | VMC_IDENTIFIER);
15956 15956 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15957 15957 1, INT_MAX, 0);
15958 15958
15959 15959 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15960 15960 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15961 15961 NULL, NULL, NULL, NULL, NULL, 0);
15962 15962
15963 15963 ASSERT(MUTEX_HELD(&cpu_lock));
15964 15964 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15965 15965 offsetof(dtrace_probe_t, dtpr_nextmod),
15966 15966 offsetof(dtrace_probe_t, dtpr_prevmod));
15967 15967
15968 15968 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15969 15969 offsetof(dtrace_probe_t, dtpr_nextfunc),
15970 15970 offsetof(dtrace_probe_t, dtpr_prevfunc));
15971 15971
15972 15972 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15973 15973 offsetof(dtrace_probe_t, dtpr_nextname),
15974 15974 offsetof(dtrace_probe_t, dtpr_prevname));
15975 15975
15976 15976 if (dtrace_retain_max < 1) {
15977 15977 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15978 15978 "setting to 1", dtrace_retain_max);
15979 15979 dtrace_retain_max = 1;
15980 15980 }
15981 15981
15982 15982 /*
15983 15983 * Now discover our toxic ranges.
15984 15984 */
15985 15985 dtrace_toxic_ranges(dtrace_toxrange_add);
15986 15986
15987 15987 /*
15988 15988 * Before we register ourselves as a provider to our own framework,
15989 15989 * we would like to assert that dtrace_provider is NULL -- but that's
15990 15990 * not true if we were loaded as a dependency of a DTrace provider.
15991 15991 * Once we've registered, we can assert that dtrace_provider is our
15992 15992 * pseudo provider.
15993 15993 */
15994 15994 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15995 15995 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15996 15996
15997 15997 ASSERT(dtrace_provider != NULL);
15998 15998 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15999 15999
16000 16000 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
16001 16001 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
16002 16002 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
16003 16003 dtrace_provider, NULL, NULL, "END", 0, NULL);
16004 16004 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
16005 16005 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
16006 16006
16007 16007 dtrace_anon_property();
16008 16008 mutex_exit(&cpu_lock);
16009 16009
16010 16010 /*
16011 16011 * If there are already providers, we must ask them to provide their
16012 16012 * probes, and then match any anonymous enabling against them. Note
16013 16013 * that there should be no other retained enablings at this time:
16014 16014 * the only retained enablings at this time should be the anonymous
16015 16015 * enabling.
16016 16016 */
16017 16017 if (dtrace_anon.dta_enabling != NULL) {
16018 16018 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
16019 16019
16020 16020 dtrace_enabling_provide(NULL);
16021 16021 state = dtrace_anon.dta_state;
16022 16022
16023 16023 /*
16024 16024 * We couldn't hold cpu_lock across the above call to
16025 16025 * dtrace_enabling_provide(), but we must hold it to actually
16026 16026 * enable the probes. We have to drop all of our locks, pick
16027 16027 * up cpu_lock, and regain our locks before matching the
16028 16028 * retained anonymous enabling.
16029 16029 */
16030 16030 mutex_exit(&dtrace_lock);
16031 16031 mutex_exit(&dtrace_provider_lock);
16032 16032
16033 16033 mutex_enter(&cpu_lock);
16034 16034 mutex_enter(&dtrace_provider_lock);
16035 16035 mutex_enter(&dtrace_lock);
16036 16036
16037 16037 if ((enab = dtrace_anon.dta_enabling) != NULL)
16038 16038 (void) dtrace_enabling_match(enab, NULL);
16039 16039
16040 16040 mutex_exit(&cpu_lock);
16041 16041 }
16042 16042
16043 16043 mutex_exit(&dtrace_lock);
16044 16044 mutex_exit(&dtrace_provider_lock);
16045 16045
16046 16046 if (state != NULL) {
16047 16047 /*
16048 16048 * If we created any anonymous state, set it going now.
16049 16049 */
16050 16050 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
16051 16051 }
16052 16052
16053 16053 return (DDI_SUCCESS);
16054 16054 }
16055 16055
16056 16056 /*ARGSUSED*/
16057 16057 static int
16058 16058 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
16059 16059 {
16060 16060 dtrace_state_t *state;
16061 16061 uint32_t priv;
16062 16062 uid_t uid;
16063 16063 zoneid_t zoneid;
16064 16064
16065 16065 if (getminor(*devp) == DTRACEMNRN_HELPER)
16066 16066 return (0);
16067 16067
16068 16068 /*
16069 16069 * If this wasn't an open with the "helper" minor, then it must be
16070 16070 * the "dtrace" minor.
16071 16071 */
16072 16072 if (getminor(*devp) != DTRACEMNRN_DTRACE)
16073 16073 return (ENXIO);
16074 16074
16075 16075 /*
16076 16076 * If no DTRACE_PRIV_* bits are set in the credential, then the
16077 16077 * caller lacks sufficient permission to do anything with DTrace.
16078 16078 */
16079 16079 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
16080 16080 if (priv == DTRACE_PRIV_NONE)
16081 16081 return (EACCES);
16082 16082
16083 16083 /*
16084 16084 * Ask all providers to provide all their probes.
16085 16085 */
16086 16086 mutex_enter(&dtrace_provider_lock);
16087 16087 dtrace_probe_provide(NULL, NULL);
16088 16088 mutex_exit(&dtrace_provider_lock);
16089 16089
16090 16090 mutex_enter(&cpu_lock);
16091 16091 mutex_enter(&dtrace_lock);
16092 16092 dtrace_opens++;
16093 16093 dtrace_membar_producer();
16094 16094
16095 16095 /*
16096 16096 * If the kernel debugger is active (that is, if the kernel debugger
16097 16097 * modified text in some way), we won't allow the open.
16098 16098 */
16099 16099 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
16100 16100 dtrace_opens--;
16101 16101 mutex_exit(&cpu_lock);
16102 16102 mutex_exit(&dtrace_lock);
16103 16103 return (EBUSY);
16104 16104 }
16105 16105
16106 16106 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) {
16107 16107 /*
16108 16108 * If DTrace helper tracing is enabled, we need to allocate the
16109 16109 * trace buffer and initialize the values.
16110 16110 */
16111 16111 dtrace_helptrace_buffer =
16112 16112 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
16113 16113 dtrace_helptrace_next = 0;
16114 16114 dtrace_helptrace_wrapped = 0;
16115 16115 dtrace_helptrace_enable = 0;
16116 16116 }
16117 16117
16118 16118 state = dtrace_state_create(devp, cred_p);
16119 16119 mutex_exit(&cpu_lock);
16120 16120
16121 16121 if (state == NULL) {
16122 16122 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16123 16123 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16124 16124 mutex_exit(&dtrace_lock);
16125 16125 return (EAGAIN);
16126 16126 }
16127 16127
16128 16128 mutex_exit(&dtrace_lock);
16129 16129
16130 16130 return (0);
16131 16131 }
16132 16132
16133 16133 /*ARGSUSED*/
16134 16134 static int
16135 16135 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
16136 16136 {
16137 16137 minor_t minor = getminor(dev);
16138 16138 dtrace_state_t *state;
16139 16139 dtrace_helptrace_t *buf = NULL;
16140 16140
16141 16141 if (minor == DTRACEMNRN_HELPER)
16142 16142 return (0);
16143 16143
16144 16144 state = ddi_get_soft_state(dtrace_softstate, minor);
16145 16145
16146 16146 mutex_enter(&cpu_lock);
16147 16147 mutex_enter(&dtrace_lock);
16148 16148
16149 16149 if (state->dts_anon) {
16150 16150 /*
16151 16151 * There is anonymous state. Destroy that first.
16152 16152 */
16153 16153 ASSERT(dtrace_anon.dta_state == NULL);
16154 16154 dtrace_state_destroy(state->dts_anon);
16155 16155 }
16156 16156
16157 16157 if (dtrace_helptrace_disable) {
16158 16158 /*
16159 16159 * If we have been told to disable helper tracing, set the
16160 16160 * buffer to NULL before calling into dtrace_state_destroy();
16161 16161 * we take advantage of its dtrace_sync() to know that no
16162 16162 * CPU is in probe context with enabled helper tracing
16163 16163 * after it returns.
16164 16164 */
16165 16165 buf = dtrace_helptrace_buffer;
16166 16166 dtrace_helptrace_buffer = NULL;
16167 16167 }
16168 16168
16169 16169 dtrace_state_destroy(state);
16170 16170 ASSERT(dtrace_opens > 0);
16171 16171
16172 16172 /*
16173 16173 * Only relinquish control of the kernel debugger interface when there
16174 16174 * are no consumers and no anonymous enablings.
16175 16175 */
16176 16176 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16177 16177 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16178 16178
16179 16179 if (buf != NULL) {
16180 16180 kmem_free(buf, dtrace_helptrace_bufsize);
16181 16181 dtrace_helptrace_disable = 0;
16182 16182 }
16183 16183
16184 16184 mutex_exit(&dtrace_lock);
16185 16185 mutex_exit(&cpu_lock);
16186 16186
16187 16187 return (0);
16188 16188 }
16189 16189
16190 16190 /*ARGSUSED*/
16191 16191 static int
16192 16192 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
16193 16193 {
16194 16194 int rval;
16195 16195 dof_helper_t help, *dhp = NULL;
16196 16196
16197 16197 switch (cmd) {
16198 16198 case DTRACEHIOC_ADDDOF:
16199 16199 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
16200 16200 dtrace_dof_error(NULL, "failed to copyin DOF helper");
16201 16201 return (EFAULT);
16202 16202 }
16203 16203
16204 16204 dhp = &help;
16205 16205 arg = (intptr_t)help.dofhp_dof;
16206 16206 /*FALLTHROUGH*/
16207 16207
16208 16208 case DTRACEHIOC_ADD: {
16209 16209 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
16210 16210
16211 16211 if (dof == NULL)
16212 16212 return (rval);
16213 16213
16214 16214 mutex_enter(&dtrace_lock);
16215 16215
16216 16216 /*
16217 16217 * dtrace_helper_slurp() takes responsibility for the dof --
16218 16218 * it may free it now or it may save it and free it later.
16219 16219 */
16220 16220 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
16221 16221 *rv = rval;
16222 16222 rval = 0;
16223 16223 } else {
16224 16224 rval = EINVAL;
16225 16225 }
16226 16226
16227 16227 mutex_exit(&dtrace_lock);
16228 16228 return (rval);
16229 16229 }
16230 16230
16231 16231 case DTRACEHIOC_REMOVE: {
16232 16232 mutex_enter(&dtrace_lock);
16233 16233 rval = dtrace_helper_destroygen(arg);
16234 16234 mutex_exit(&dtrace_lock);
16235 16235
16236 16236 return (rval);
16237 16237 }
16238 16238
16239 16239 default:
16240 16240 break;
16241 16241 }
16242 16242
16243 16243 return (ENOTTY);
16244 16244 }
16245 16245
16246 16246 /*ARGSUSED*/
16247 16247 static int
16248 16248 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
16249 16249 {
16250 16250 minor_t minor = getminor(dev);
16251 16251 dtrace_state_t *state;
16252 16252 int rval;
16253 16253
16254 16254 if (minor == DTRACEMNRN_HELPER)
16255 16255 return (dtrace_ioctl_helper(cmd, arg, rv));
16256 16256
16257 16257 state = ddi_get_soft_state(dtrace_softstate, minor);
16258 16258
16259 16259 if (state->dts_anon) {
16260 16260 ASSERT(dtrace_anon.dta_state == NULL);
16261 16261 state = state->dts_anon;
16262 16262 }
16263 16263
16264 16264 switch (cmd) {
16265 16265 case DTRACEIOC_PROVIDER: {
16266 16266 dtrace_providerdesc_t pvd;
16267 16267 dtrace_provider_t *pvp;
16268 16268
16269 16269 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
16270 16270 return (EFAULT);
16271 16271
16272 16272 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
16273 16273 mutex_enter(&dtrace_provider_lock);
16274 16274
16275 16275 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
16276 16276 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
16277 16277 break;
16278 16278 }
16279 16279
16280 16280 mutex_exit(&dtrace_provider_lock);
16281 16281
16282 16282 if (pvp == NULL)
16283 16283 return (ESRCH);
16284 16284
16285 16285 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
16286 16286 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
16287 16287 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
16288 16288 return (EFAULT);
16289 16289
16290 16290 return (0);
16291 16291 }
16292 16292
16293 16293 case DTRACEIOC_EPROBE: {
16294 16294 dtrace_eprobedesc_t epdesc;
16295 16295 dtrace_ecb_t *ecb;
16296 16296 dtrace_action_t *act;
16297 16297 void *buf;
16298 16298 size_t size;
16299 16299 uintptr_t dest;
16300 16300 int nrecs;
16301 16301
16302 16302 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
16303 16303 return (EFAULT);
16304 16304
16305 16305 mutex_enter(&dtrace_lock);
16306 16306
16307 16307 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
16308 16308 mutex_exit(&dtrace_lock);
16309 16309 return (EINVAL);
16310 16310 }
16311 16311
16312 16312 if (ecb->dte_probe == NULL) {
16313 16313 mutex_exit(&dtrace_lock);
16314 16314 return (EINVAL);
16315 16315 }
16316 16316
16317 16317 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
16318 16318 epdesc.dtepd_uarg = ecb->dte_uarg;
16319 16319 epdesc.dtepd_size = ecb->dte_size;
16320 16320
16321 16321 nrecs = epdesc.dtepd_nrecs;
16322 16322 epdesc.dtepd_nrecs = 0;
16323 16323 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16324 16324 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16325 16325 continue;
16326 16326
16327 16327 epdesc.dtepd_nrecs++;
16328 16328 }
16329 16329
16330 16330 /*
16331 16331 * Now that we have the size, we need to allocate a temporary
16332 16332 * buffer in which to store the complete description. We need
16333 16333 * the temporary buffer to be able to drop dtrace_lock()
16334 16334 * across the copyout(), below.
16335 16335 */
16336 16336 size = sizeof (dtrace_eprobedesc_t) +
16337 16337 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
16338 16338
16339 16339 buf = kmem_alloc(size, KM_SLEEP);
16340 16340 dest = (uintptr_t)buf;
16341 16341
16342 16342 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
16343 16343 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
16344 16344
16345 16345 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16346 16346 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16347 16347 continue;
16348 16348
16349 16349 if (nrecs-- == 0)
16350 16350 break;
16351 16351
16352 16352 bcopy(&act->dta_rec, (void *)dest,
16353 16353 sizeof (dtrace_recdesc_t));
16354 16354 dest += sizeof (dtrace_recdesc_t);
16355 16355 }
16356 16356
16357 16357 mutex_exit(&dtrace_lock);
16358 16358
16359 16359 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16360 16360 kmem_free(buf, size);
16361 16361 return (EFAULT);
16362 16362 }
16363 16363
16364 16364 kmem_free(buf, size);
16365 16365 return (0);
16366 16366 }
16367 16367
16368 16368 case DTRACEIOC_AGGDESC: {
16369 16369 dtrace_aggdesc_t aggdesc;
16370 16370 dtrace_action_t *act;
16371 16371 dtrace_aggregation_t *agg;
16372 16372 int nrecs;
16373 16373 uint32_t offs;
16374 16374 dtrace_recdesc_t *lrec;
16375 16375 void *buf;
16376 16376 size_t size;
16377 16377 uintptr_t dest;
16378 16378
16379 16379 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
16380 16380 return (EFAULT);
16381 16381
16382 16382 mutex_enter(&dtrace_lock);
16383 16383
16384 16384 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
16385 16385 mutex_exit(&dtrace_lock);
16386 16386 return (EINVAL);
16387 16387 }
16388 16388
16389 16389 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
16390 16390
16391 16391 nrecs = aggdesc.dtagd_nrecs;
16392 16392 aggdesc.dtagd_nrecs = 0;
16393 16393
16394 16394 offs = agg->dtag_base;
16395 16395 lrec = &agg->dtag_action.dta_rec;
16396 16396 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
16397 16397
16398 16398 for (act = agg->dtag_first; ; act = act->dta_next) {
16399 16399 ASSERT(act->dta_intuple ||
16400 16400 DTRACEACT_ISAGG(act->dta_kind));
16401 16401
16402 16402 /*
16403 16403 * If this action has a record size of zero, it
16404 16404 * denotes an argument to the aggregating action.
16405 16405 * Because the presence of this record doesn't (or
16406 16406 * shouldn't) affect the way the data is interpreted,
16407 16407 * we don't copy it out to save user-level the
16408 16408 * confusion of dealing with a zero-length record.
16409 16409 */
16410 16410 if (act->dta_rec.dtrd_size == 0) {
16411 16411 ASSERT(agg->dtag_hasarg);
16412 16412 continue;
16413 16413 }
16414 16414
16415 16415 aggdesc.dtagd_nrecs++;
16416 16416
16417 16417 if (act == &agg->dtag_action)
16418 16418 break;
16419 16419 }
16420 16420
16421 16421 /*
16422 16422 * Now that we have the size, we need to allocate a temporary
16423 16423 * buffer in which to store the complete description. We need
16424 16424 * the temporary buffer to be able to drop dtrace_lock()
16425 16425 * across the copyout(), below.
16426 16426 */
16427 16427 size = sizeof (dtrace_aggdesc_t) +
16428 16428 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
16429 16429
16430 16430 buf = kmem_alloc(size, KM_SLEEP);
16431 16431 dest = (uintptr_t)buf;
16432 16432
16433 16433 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
16434 16434 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
16435 16435
16436 16436 for (act = agg->dtag_first; ; act = act->dta_next) {
16437 16437 dtrace_recdesc_t rec = act->dta_rec;
16438 16438
16439 16439 /*
16440 16440 * See the comment in the above loop for why we pass
16441 16441 * over zero-length records.
16442 16442 */
16443 16443 if (rec.dtrd_size == 0) {
16444 16444 ASSERT(agg->dtag_hasarg);
16445 16445 continue;
16446 16446 }
16447 16447
16448 16448 if (nrecs-- == 0)
16449 16449 break;
16450 16450
16451 16451 rec.dtrd_offset -= offs;
16452 16452 bcopy(&rec, (void *)dest, sizeof (rec));
16453 16453 dest += sizeof (dtrace_recdesc_t);
16454 16454
16455 16455 if (act == &agg->dtag_action)
16456 16456 break;
16457 16457 }
16458 16458
16459 16459 mutex_exit(&dtrace_lock);
16460 16460
16461 16461 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16462 16462 kmem_free(buf, size);
16463 16463 return (EFAULT);
16464 16464 }
16465 16465
16466 16466 kmem_free(buf, size);
16467 16467 return (0);
16468 16468 }
16469 16469
16470 16470 case DTRACEIOC_ENABLE: {
16471 16471 dof_hdr_t *dof;
16472 16472 dtrace_enabling_t *enab = NULL;
16473 16473 dtrace_vstate_t *vstate;
16474 16474 int err = 0;
16475 16475
16476 16476 *rv = 0;
16477 16477
16478 16478 /*
16479 16479 * If a NULL argument has been passed, we take this as our
16480 16480 * cue to reevaluate our enablings.
16481 16481 */
16482 16482 if (arg == NULL) {
16483 16483 dtrace_enabling_matchall();
16484 16484
16485 16485 return (0);
16486 16486 }
16487 16487
16488 16488 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
16489 16489 return (rval);
16490 16490
16491 16491 mutex_enter(&cpu_lock);
16492 16492 mutex_enter(&dtrace_lock);
16493 16493 vstate = &state->dts_vstate;
16494 16494
16495 16495 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
16496 16496 mutex_exit(&dtrace_lock);
16497 16497 mutex_exit(&cpu_lock);
16498 16498 dtrace_dof_destroy(dof);
16499 16499 return (EBUSY);
16500 16500 }
16501 16501
16502 16502 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
16503 16503 mutex_exit(&dtrace_lock);
16504 16504 mutex_exit(&cpu_lock);
16505 16505 dtrace_dof_destroy(dof);
16506 16506 return (EINVAL);
16507 16507 }
16508 16508
16509 16509 if ((rval = dtrace_dof_options(dof, state)) != 0) {
16510 16510 dtrace_enabling_destroy(enab);
16511 16511 mutex_exit(&dtrace_lock);
16512 16512 mutex_exit(&cpu_lock);
16513 16513 dtrace_dof_destroy(dof);
16514 16514 return (rval);
16515 16515 }
16516 16516
16517 16517 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
16518 16518 err = dtrace_enabling_retain(enab);
16519 16519 } else {
16520 16520 dtrace_enabling_destroy(enab);
16521 16521 }
16522 16522
16523 16523 mutex_exit(&cpu_lock);
16524 16524 mutex_exit(&dtrace_lock);
16525 16525 dtrace_dof_destroy(dof);
16526 16526
16527 16527 return (err);
16528 16528 }
16529 16529
16530 16530 case DTRACEIOC_REPLICATE: {
16531 16531 dtrace_repldesc_t desc;
16532 16532 dtrace_probedesc_t *match = &desc.dtrpd_match;
16533 16533 dtrace_probedesc_t *create = &desc.dtrpd_create;
16534 16534 int err;
16535 16535
16536 16536 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16537 16537 return (EFAULT);
16538 16538
16539 16539 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16540 16540 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16541 16541 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16542 16542 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16543 16543
16544 16544 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16545 16545 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16546 16546 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16547 16547 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16548 16548
16549 16549 mutex_enter(&dtrace_lock);
16550 16550 err = dtrace_enabling_replicate(state, match, create);
16551 16551 mutex_exit(&dtrace_lock);
16552 16552
16553 16553 return (err);
16554 16554 }
16555 16555
16556 16556 case DTRACEIOC_PROBEMATCH:
16557 16557 case DTRACEIOC_PROBES: {
16558 16558 dtrace_probe_t *probe = NULL;
16559 16559 dtrace_probedesc_t desc;
16560 16560 dtrace_probekey_t pkey;
16561 16561 dtrace_id_t i;
16562 16562 int m = 0;
16563 16563 uint32_t priv;
16564 16564 uid_t uid;
16565 16565 zoneid_t zoneid;
16566 16566
16567 16567 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16568 16568 return (EFAULT);
16569 16569
16570 16570 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16571 16571 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16572 16572 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16573 16573 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16574 16574
16575 16575 /*
16576 16576 * Before we attempt to match this probe, we want to give
16577 16577 * all providers the opportunity to provide it.
16578 16578 */
16579 16579 if (desc.dtpd_id == DTRACE_IDNONE) {
16580 16580 mutex_enter(&dtrace_provider_lock);
16581 16581 dtrace_probe_provide(&desc, NULL);
16582 16582 mutex_exit(&dtrace_provider_lock);
16583 16583 desc.dtpd_id++;
16584 16584 }
16585 16585
16586 16586 if (cmd == DTRACEIOC_PROBEMATCH) {
16587 16587 dtrace_probekey(&desc, &pkey);
16588 16588 pkey.dtpk_id = DTRACE_IDNONE;
16589 16589 }
16590 16590
16591 16591 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
16592 16592
16593 16593 mutex_enter(&dtrace_lock);
16594 16594
16595 16595 if (cmd == DTRACEIOC_PROBEMATCH) {
16596 16596 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16597 16597 if ((probe = dtrace_probes[i - 1]) != NULL &&
16598 16598 (m = dtrace_match_probe(probe, &pkey,
16599 16599 priv, uid, zoneid)) != 0)
16600 16600 break;
16601 16601 }
16602 16602
16603 16603 if (m < 0) {
16604 16604 mutex_exit(&dtrace_lock);
16605 16605 return (EINVAL);
16606 16606 }
16607 16607
16608 16608 } else {
16609 16609 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16610 16610 if ((probe = dtrace_probes[i - 1]) != NULL &&
16611 16611 dtrace_match_priv(probe, priv, uid, zoneid))
16612 16612 break;
16613 16613 }
16614 16614 }
16615 16615
16616 16616 if (probe == NULL) {
16617 16617 mutex_exit(&dtrace_lock);
16618 16618 return (ESRCH);
16619 16619 }
16620 16620
16621 16621 dtrace_probe_description(probe, &desc);
16622 16622 mutex_exit(&dtrace_lock);
16623 16623
16624 16624 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16625 16625 return (EFAULT);
16626 16626
16627 16627 return (0);
16628 16628 }
16629 16629
16630 16630 case DTRACEIOC_PROBEARG: {
16631 16631 dtrace_argdesc_t desc;
16632 16632 dtrace_probe_t *probe;
16633 16633 dtrace_provider_t *prov;
16634 16634
16635 16635 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16636 16636 return (EFAULT);
16637 16637
16638 16638 if (desc.dtargd_id == DTRACE_IDNONE)
16639 16639 return (EINVAL);
16640 16640
16641 16641 if (desc.dtargd_ndx == DTRACE_ARGNONE)
16642 16642 return (EINVAL);
16643 16643
16644 16644 mutex_enter(&dtrace_provider_lock);
16645 16645 mutex_enter(&mod_lock);
16646 16646 mutex_enter(&dtrace_lock);
16647 16647
16648 16648 if (desc.dtargd_id > dtrace_nprobes) {
16649 16649 mutex_exit(&dtrace_lock);
16650 16650 mutex_exit(&mod_lock);
16651 16651 mutex_exit(&dtrace_provider_lock);
16652 16652 return (EINVAL);
16653 16653 }
16654 16654
16655 16655 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16656 16656 mutex_exit(&dtrace_lock);
16657 16657 mutex_exit(&mod_lock);
16658 16658 mutex_exit(&dtrace_provider_lock);
16659 16659 return (EINVAL);
16660 16660 }
16661 16661
16662 16662 mutex_exit(&dtrace_lock);
16663 16663
16664 16664 prov = probe->dtpr_provider;
16665 16665
16666 16666 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16667 16667 /*
16668 16668 * There isn't any typed information for this probe.
16669 16669 * Set the argument number to DTRACE_ARGNONE.
16670 16670 */
16671 16671 desc.dtargd_ndx = DTRACE_ARGNONE;
16672 16672 } else {
16673 16673 desc.dtargd_native[0] = '\0';
16674 16674 desc.dtargd_xlate[0] = '\0';
16675 16675 desc.dtargd_mapping = desc.dtargd_ndx;
16676 16676
16677 16677 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16678 16678 probe->dtpr_id, probe->dtpr_arg, &desc);
16679 16679 }
16680 16680
16681 16681 mutex_exit(&mod_lock);
16682 16682 mutex_exit(&dtrace_provider_lock);
16683 16683
16684 16684 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16685 16685 return (EFAULT);
16686 16686
16687 16687 return (0);
16688 16688 }
16689 16689
16690 16690 case DTRACEIOC_GO: {
16691 16691 processorid_t cpuid;
16692 16692 rval = dtrace_state_go(state, &cpuid);
16693 16693
16694 16694 if (rval != 0)
16695 16695 return (rval);
16696 16696
16697 16697 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16698 16698 return (EFAULT);
16699 16699
16700 16700 return (0);
16701 16701 }
16702 16702
16703 16703 case DTRACEIOC_STOP: {
16704 16704 processorid_t cpuid;
16705 16705
16706 16706 mutex_enter(&dtrace_lock);
16707 16707 rval = dtrace_state_stop(state, &cpuid);
16708 16708 mutex_exit(&dtrace_lock);
16709 16709
16710 16710 if (rval != 0)
16711 16711 return (rval);
16712 16712
16713 16713 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16714 16714 return (EFAULT);
16715 16715
16716 16716 return (0);
16717 16717 }
16718 16718
16719 16719 case DTRACEIOC_DOFGET: {
16720 16720 dof_hdr_t hdr, *dof;
16721 16721 uint64_t len;
16722 16722
16723 16723 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16724 16724 return (EFAULT);
16725 16725
16726 16726 mutex_enter(&dtrace_lock);
16727 16727 dof = dtrace_dof_create(state);
16728 16728 mutex_exit(&dtrace_lock);
16729 16729
16730 16730 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16731 16731 rval = copyout(dof, (void *)arg, len);
16732 16732 dtrace_dof_destroy(dof);
16733 16733
16734 16734 return (rval == 0 ? 0 : EFAULT);
16735 16735 }
16736 16736
16737 16737 case DTRACEIOC_AGGSNAP:
16738 16738 case DTRACEIOC_BUFSNAP: {
16739 16739 dtrace_bufdesc_t desc;
16740 16740 caddr_t cached;
16741 16741 dtrace_buffer_t *buf;
16742 16742
16743 16743 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16744 16744 return (EFAULT);
16745 16745
16746 16746 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16747 16747 return (EINVAL);
16748 16748
16749 16749 mutex_enter(&dtrace_lock);
16750 16750
16751 16751 if (cmd == DTRACEIOC_BUFSNAP) {
16752 16752 buf = &state->dts_buffer[desc.dtbd_cpu];
16753 16753 } else {
16754 16754 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16755 16755 }
16756 16756
16757 16757 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16758 16758 size_t sz = buf->dtb_offset;
16759 16759
16760 16760 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16761 16761 mutex_exit(&dtrace_lock);
16762 16762 return (EBUSY);
16763 16763 }
16764 16764
16765 16765 /*
16766 16766 * If this buffer has already been consumed, we're
16767 16767 * going to indicate that there's nothing left here
16768 16768 * to consume.
16769 16769 */
16770 16770 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16771 16771 mutex_exit(&dtrace_lock);
16772 16772
16773 16773 desc.dtbd_size = 0;
16774 16774 desc.dtbd_drops = 0;
16775 16775 desc.dtbd_errors = 0;
16776 16776 desc.dtbd_oldest = 0;
16777 16777 sz = sizeof (desc);
16778 16778
16779 16779 if (copyout(&desc, (void *)arg, sz) != 0)
16780 16780 return (EFAULT);
16781 16781
16782 16782 return (0);
16783 16783 }
16784 16784
16785 16785 /*
16786 16786 * If this is a ring buffer that has wrapped, we want
16787 16787 * to copy the whole thing out.
16788 16788 */
16789 16789 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16790 16790 dtrace_buffer_polish(buf);
16791 16791 sz = buf->dtb_size;
16792 16792 }
16793 16793
16794 16794 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16795 16795 mutex_exit(&dtrace_lock);
16796 16796 return (EFAULT);
16797 16797 }
16798 16798
16799 16799 desc.dtbd_size = sz;
16800 16800 desc.dtbd_drops = buf->dtb_drops;
16801 16801 desc.dtbd_errors = buf->dtb_errors;
16802 16802 desc.dtbd_oldest = buf->dtb_xamot_offset;
16803 16803 desc.dtbd_timestamp = dtrace_gethrtime();
16804 16804
16805 16805 mutex_exit(&dtrace_lock);
16806 16806
16807 16807 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16808 16808 return (EFAULT);
16809 16809
16810 16810 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16811 16811
16812 16812 return (0);
16813 16813 }
16814 16814
16815 16815 if (buf->dtb_tomax == NULL) {
16816 16816 ASSERT(buf->dtb_xamot == NULL);
16817 16817 mutex_exit(&dtrace_lock);
16818 16818 return (ENOENT);
16819 16819 }
16820 16820
16821 16821 cached = buf->dtb_tomax;
16822 16822 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16823 16823
16824 16824 dtrace_xcall(desc.dtbd_cpu,
16825 16825 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16826 16826
16827 16827 state->dts_errors += buf->dtb_xamot_errors;
16828 16828
16829 16829 /*
16830 16830 * If the buffers did not actually switch, then the cross call
16831 16831 * did not take place -- presumably because the given CPU is
16832 16832 * not in the ready set. If this is the case, we'll return
16833 16833 * ENOENT.
16834 16834 */
16835 16835 if (buf->dtb_tomax == cached) {
16836 16836 ASSERT(buf->dtb_xamot != cached);
16837 16837 mutex_exit(&dtrace_lock);
16838 16838 return (ENOENT);
16839 16839 }
16840 16840
16841 16841 ASSERT(cached == buf->dtb_xamot);
16842 16842
16843 16843 /*
16844 16844 * We have our snapshot; now copy it out.
16845 16845 */
16846 16846 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16847 16847 buf->dtb_xamot_offset) != 0) {
16848 16848 mutex_exit(&dtrace_lock);
16849 16849 return (EFAULT);
16850 16850 }
16851 16851
16852 16852 desc.dtbd_size = buf->dtb_xamot_offset;
16853 16853 desc.dtbd_drops = buf->dtb_xamot_drops;
16854 16854 desc.dtbd_errors = buf->dtb_xamot_errors;
16855 16855 desc.dtbd_oldest = 0;
16856 16856 desc.dtbd_timestamp = buf->dtb_switched;
16857 16857
16858 16858 mutex_exit(&dtrace_lock);
16859 16859
16860 16860 /*
16861 16861 * Finally, copy out the buffer description.
16862 16862 */
16863 16863 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16864 16864 return (EFAULT);
16865 16865
16866 16866 return (0);
16867 16867 }
16868 16868
16869 16869 case DTRACEIOC_CONF: {
16870 16870 dtrace_conf_t conf;
16871 16871
16872 16872 bzero(&conf, sizeof (conf));
16873 16873 conf.dtc_difversion = DIF_VERSION;
16874 16874 conf.dtc_difintregs = DIF_DIR_NREGS;
16875 16875 conf.dtc_diftupregs = DIF_DTR_NREGS;
16876 16876 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16877 16877
16878 16878 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16879 16879 return (EFAULT);
16880 16880
16881 16881 return (0);
16882 16882 }
16883 16883
16884 16884 case DTRACEIOC_STATUS: {
16885 16885 dtrace_status_t stat;
16886 16886 dtrace_dstate_t *dstate;
16887 16887 int i, j;
16888 16888 uint64_t nerrs;
16889 16889
16890 16890 /*
16891 16891 * See the comment in dtrace_state_deadman() for the reason
16892 16892 * for setting dts_laststatus to INT64_MAX before setting
16893 16893 * it to the correct value.
16894 16894 */
16895 16895 state->dts_laststatus = INT64_MAX;
16896 16896 dtrace_membar_producer();
16897 16897 state->dts_laststatus = dtrace_gethrtime();
16898 16898
16899 16899 bzero(&stat, sizeof (stat));
16900 16900
16901 16901 mutex_enter(&dtrace_lock);
16902 16902
16903 16903 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16904 16904 mutex_exit(&dtrace_lock);
16905 16905 return (ENOENT);
16906 16906 }
16907 16907
16908 16908 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16909 16909 stat.dtst_exiting = 1;
16910 16910
16911 16911 nerrs = state->dts_errors;
16912 16912 dstate = &state->dts_vstate.dtvs_dynvars;
16913 16913
16914 16914 for (i = 0; i < NCPU; i++) {
16915 16915 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16916 16916
16917 16917 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16918 16918 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16919 16919 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16920 16920
16921 16921 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16922 16922 stat.dtst_filled++;
16923 16923
16924 16924 nerrs += state->dts_buffer[i].dtb_errors;
16925 16925
16926 16926 for (j = 0; j < state->dts_nspeculations; j++) {
16927 16927 dtrace_speculation_t *spec;
16928 16928 dtrace_buffer_t *buf;
16929 16929
16930 16930 spec = &state->dts_speculations[j];
16931 16931 buf = &spec->dtsp_buffer[i];
16932 16932 stat.dtst_specdrops += buf->dtb_xamot_drops;
16933 16933 }
16934 16934 }
16935 16935
16936 16936 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16937 16937 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16938 16938 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16939 16939 stat.dtst_dblerrors = state->dts_dblerrors;
16940 16940 stat.dtst_killed =
16941 16941 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16942 16942 stat.dtst_errors = nerrs;
16943 16943
16944 16944 mutex_exit(&dtrace_lock);
16945 16945
16946 16946 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16947 16947 return (EFAULT);
16948 16948
16949 16949 return (0);
16950 16950 }
16951 16951
16952 16952 case DTRACEIOC_FORMAT: {
16953 16953 dtrace_fmtdesc_t fmt;
16954 16954 char *str;
16955 16955 int len;
16956 16956
16957 16957 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16958 16958 return (EFAULT);
16959 16959
16960 16960 mutex_enter(&dtrace_lock);
16961 16961
16962 16962 if (fmt.dtfd_format == 0 ||
16963 16963 fmt.dtfd_format > state->dts_nformats) {
16964 16964 mutex_exit(&dtrace_lock);
16965 16965 return (EINVAL);
16966 16966 }
16967 16967
16968 16968 /*
16969 16969 * Format strings are allocated contiguously and they are
16970 16970 * never freed; if a format index is less than the number
16971 16971 * of formats, we can assert that the format map is non-NULL
16972 16972 * and that the format for the specified index is non-NULL.
16973 16973 */
16974 16974 ASSERT(state->dts_formats != NULL);
16975 16975 str = state->dts_formats[fmt.dtfd_format - 1];
16976 16976 ASSERT(str != NULL);
16977 16977
16978 16978 len = strlen(str) + 1;
16979 16979
16980 16980 if (len > fmt.dtfd_length) {
16981 16981 fmt.dtfd_length = len;
16982 16982
16983 16983 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16984 16984 mutex_exit(&dtrace_lock);
16985 16985 return (EINVAL);
16986 16986 }
16987 16987 } else {
16988 16988 if (copyout(str, fmt.dtfd_string, len) != 0) {
16989 16989 mutex_exit(&dtrace_lock);
16990 16990 return (EINVAL);
16991 16991 }
16992 16992 }
16993 16993
16994 16994 mutex_exit(&dtrace_lock);
16995 16995 return (0);
16996 16996 }
16997 16997
16998 16998 default:
16999 16999 break;
17000 17000 }
17001 17001
17002 17002 return (ENOTTY);
17003 17003 }
17004 17004
17005 17005 /*ARGSUSED*/
17006 17006 static int
17007 17007 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
17008 17008 {
17009 17009 dtrace_state_t *state;
17010 17010
17011 17011 switch (cmd) {
17012 17012 case DDI_DETACH:
17013 17013 break;
17014 17014
17015 17015 case DDI_SUSPEND:
17016 17016 return (DDI_SUCCESS);
17017 17017
17018 17018 default:
17019 17019 return (DDI_FAILURE);
17020 17020 }
17021 17021
17022 17022 mutex_enter(&cpu_lock);
17023 17023 mutex_enter(&dtrace_provider_lock);
17024 17024 mutex_enter(&dtrace_lock);
17025 17025
17026 17026 ASSERT(dtrace_opens == 0);
17027 17027
17028 17028 if (dtrace_helpers > 0) {
17029 17029 mutex_exit(&dtrace_provider_lock);
17030 17030 mutex_exit(&dtrace_lock);
17031 17031 mutex_exit(&cpu_lock);
17032 17032 return (DDI_FAILURE);
17033 17033 }
17034 17034
17035 17035 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
17036 17036 mutex_exit(&dtrace_provider_lock);
17037 17037 mutex_exit(&dtrace_lock);
17038 17038 mutex_exit(&cpu_lock);
17039 17039 return (DDI_FAILURE);
17040 17040 }
17041 17041
17042 17042 dtrace_provider = NULL;
17043 17043
17044 17044 if ((state = dtrace_anon_grab()) != NULL) {
17045 17045 /*
17046 17046 * If there were ECBs on this state, the provider should
17047 17047 * have not been allowed to detach; assert that there is
17048 17048 * none.
17049 17049 */
17050 17050 ASSERT(state->dts_necbs == 0);
17051 17051 dtrace_state_destroy(state);
17052 17052
17053 17053 /*
17054 17054 * If we're being detached with anonymous state, we need to
17055 17055 * indicate to the kernel debugger that DTrace is now inactive.
17056 17056 */
17057 17057 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17058 17058 }
17059 17059
17060 17060 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
17061 17061 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17062 17062 dtrace_cpu_init = NULL;
17063 17063 dtrace_helpers_cleanup = NULL;
17064 17064 dtrace_helpers_fork = NULL;
17065 17065 dtrace_cpustart_init = NULL;
17066 17066 dtrace_cpustart_fini = NULL;
17067 17067 dtrace_debugger_init = NULL;
17068 17068 dtrace_debugger_fini = NULL;
17069 17069 dtrace_modload = NULL;
17070 17070 dtrace_modunload = NULL;
17071 17071
17072 17072 ASSERT(dtrace_getf == 0);
17073 17073 ASSERT(dtrace_closef == NULL);
17074 17074
17075 17075 mutex_exit(&cpu_lock);
17076 17076
17077 17077 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
17078 17078 dtrace_probes = NULL;
17079 17079 dtrace_nprobes = 0;
17080 17080
17081 17081 dtrace_hash_destroy(dtrace_bymod);
17082 17082 dtrace_hash_destroy(dtrace_byfunc);
17083 17083 dtrace_hash_destroy(dtrace_byname);
17084 17084 dtrace_bymod = NULL;
17085 17085 dtrace_byfunc = NULL;
17086 17086 dtrace_byname = NULL;
17087 17087
17088 17088 kmem_cache_destroy(dtrace_state_cache);
17089 17089 vmem_destroy(dtrace_minor);
17090 17090 vmem_destroy(dtrace_arena);
17091 17091
17092 17092 if (dtrace_toxrange != NULL) {
17093 17093 kmem_free(dtrace_toxrange,
17094 17094 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
17095 17095 dtrace_toxrange = NULL;
17096 17096 dtrace_toxranges = 0;
17097 17097 dtrace_toxranges_max = 0;
17098 17098 }
17099 17099
17100 17100 ddi_remove_minor_node(dtrace_devi, NULL);
17101 17101 dtrace_devi = NULL;
17102 17102
17103 17103 ddi_soft_state_fini(&dtrace_softstate);
17104 17104
17105 17105 ASSERT(dtrace_vtime_references == 0);
17106 17106 ASSERT(dtrace_opens == 0);
17107 17107 ASSERT(dtrace_retained == NULL);
17108 17108
17109 17109 mutex_exit(&dtrace_lock);
17110 17110 mutex_exit(&dtrace_provider_lock);
17111 17111
17112 17112 /*
17113 17113 * We don't destroy the task queue until after we have dropped our
17114 17114 * locks (taskq_destroy() may block on running tasks). To prevent
17115 17115 * attempting to do work after we have effectively detached but before
17116 17116 * the task queue has been destroyed, all tasks dispatched via the
17117 17117 * task queue must check that DTrace is still attached before
17118 17118 * performing any operation.
17119 17119 */
17120 17120 taskq_destroy(dtrace_taskq);
17121 17121 dtrace_taskq = NULL;
17122 17122
17123 17123 return (DDI_SUCCESS);
17124 17124 }
17125 17125
17126 17126 /*ARGSUSED*/
17127 17127 static int
17128 17128 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
17129 17129 {
17130 17130 int error;
17131 17131
17132 17132 switch (infocmd) {
17133 17133 case DDI_INFO_DEVT2DEVINFO:
17134 17134 *result = (void *)dtrace_devi;
17135 17135 error = DDI_SUCCESS;
17136 17136 break;
17137 17137 case DDI_INFO_DEVT2INSTANCE:
17138 17138 *result = (void *)0;
17139 17139 error = DDI_SUCCESS;
17140 17140 break;
17141 17141 default:
17142 17142 error = DDI_FAILURE;
17143 17143 }
17144 17144 return (error);
17145 17145 }
17146 17146
17147 17147 static struct cb_ops dtrace_cb_ops = {
17148 17148 dtrace_open, /* open */
17149 17149 dtrace_close, /* close */
17150 17150 nulldev, /* strategy */
17151 17151 nulldev, /* print */
17152 17152 nodev, /* dump */
17153 17153 nodev, /* read */
17154 17154 nodev, /* write */
17155 17155 dtrace_ioctl, /* ioctl */
17156 17156 nodev, /* devmap */
17157 17157 nodev, /* mmap */
17158 17158 nodev, /* segmap */
17159 17159 nochpoll, /* poll */
17160 17160 ddi_prop_op, /* cb_prop_op */
17161 17161 0, /* streamtab */
17162 17162 D_NEW | D_MP /* Driver compatibility flag */
17163 17163 };
17164 17164
17165 17165 static struct dev_ops dtrace_ops = {
17166 17166 DEVO_REV, /* devo_rev */
17167 17167 0, /* refcnt */
17168 17168 dtrace_info, /* get_dev_info */
17169 17169 nulldev, /* identify */
17170 17170 nulldev, /* probe */
17171 17171 dtrace_attach, /* attach */
17172 17172 dtrace_detach, /* detach */
17173 17173 nodev, /* reset */
17174 17174 &dtrace_cb_ops, /* driver operations */
17175 17175 NULL, /* bus operations */
17176 17176 nodev, /* dev power */
17177 17177 ddi_quiesce_not_needed, /* quiesce */
↓ open down ↓ |
17177 lines elided |
↑ open up ↑ |
17178 17178 };
17179 17179
17180 17180 static struct modldrv modldrv = {
17181 17181 &mod_driverops, /* module type (this is a pseudo driver) */
17182 17182 "Dynamic Tracing", /* name of module */
17183 17183 &dtrace_ops, /* driver ops */
17184 17184 };
17185 17185
17186 17186 static struct modlinkage modlinkage = {
17187 17187 MODREV_1,
17188 - (void *)&modldrv,
17189 - NULL
17188 + { (void *)&modldrv, NULL }
17190 17189 };
17191 17190
17192 17191 int
17193 17192 _init(void)
17194 17193 {
17195 17194 return (mod_install(&modlinkage));
17196 17195 }
17197 17196
17198 17197 int
17199 17198 _info(struct modinfo *modinfop)
17200 17199 {
17201 17200 return (mod_info(&modlinkage, modinfop));
17202 17201 }
17203 17202
17204 17203 int
17205 17204 _fini(void)
17206 17205 {
17207 17206 return (mod_remove(&modlinkage));
17208 17207 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX