Print this page
4474 DTrace Userland CTF Support
4475 DTrace userland Keyword
4476 DTrace tests should be better citizens
4479 pid provider types
4480 dof emulation missing checks
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/dtrace/dtrace.c
+++ new/usr/src/uts/common/dtrace/dtrace.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 25 * Copyright (c) 2012 by Delphix. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * DTrace - Dynamic Tracing for Solaris
30 30 *
31 31 * This is the implementation of the Solaris Dynamic Tracing framework
32 32 * (DTrace). The user-visible interface to DTrace is described at length in
33 33 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
34 34 * library, the in-kernel DTrace framework, and the DTrace providers are
35 35 * described in the block comments in the <sys/dtrace.h> header file. The
36 36 * internal architecture of DTrace is described in the block comments in the
37 37 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
38 38 * implementation very much assume mastery of all of these sources; if one has
39 39 * an unanswered question about the implementation, one should consult them
40 40 * first.
41 41 *
42 42 * The functions here are ordered roughly as follows:
43 43 *
44 44 * - Probe context functions
45 45 * - Probe hashing functions
46 46 * - Non-probe context utility functions
47 47 * - Matching functions
48 48 * - Provider-to-Framework API functions
49 49 * - Probe management functions
50 50 * - DIF object functions
51 51 * - Format functions
52 52 * - Predicate functions
53 53 * - ECB functions
54 54 * - Buffer functions
55 55 * - Enabling functions
56 56 * - DOF functions
57 57 * - Anonymous enabling functions
58 58 * - Consumer state functions
59 59 * - Helper functions
60 60 * - Hook functions
61 61 * - Driver cookbook functions
62 62 *
63 63 * Each group of functions begins with a block comment labelled the "DTrace
64 64 * [Group] Functions", allowing one to find each block by searching forward
65 65 * on capital-f functions.
66 66 */
67 67 #include <sys/errno.h>
68 68 #include <sys/stat.h>
69 69 #include <sys/modctl.h>
70 70 #include <sys/conf.h>
71 71 #include <sys/systm.h>
72 72 #include <sys/ddi.h>
73 73 #include <sys/sunddi.h>
74 74 #include <sys/cpuvar.h>
75 75 #include <sys/kmem.h>
76 76 #include <sys/strsubr.h>
77 77 #include <sys/sysmacros.h>
78 78 #include <sys/dtrace_impl.h>
79 79 #include <sys/atomic.h>
80 80 #include <sys/cmn_err.h>
81 81 #include <sys/mutex_impl.h>
82 82 #include <sys/rwlock_impl.h>
83 83 #include <sys/ctf_api.h>
84 84 #include <sys/panic.h>
85 85 #include <sys/priv_impl.h>
86 86 #include <sys/policy.h>
87 87 #include <sys/cred_impl.h>
88 88 #include <sys/procfs_isa.h>
89 89 #include <sys/taskq.h>
90 90 #include <sys/mkdev.h>
91 91 #include <sys/kdi.h>
92 92 #include <sys/zone.h>
93 93 #include <sys/socket.h>
94 94 #include <netinet/in.h>
95 95 #include "strtolctype.h"
96 96
97 97 /*
98 98 * DTrace Tunable Variables
99 99 *
100 100 * The following variables may be tuned by adding a line to /etc/system that
101 101 * includes both the name of the DTrace module ("dtrace") and the name of the
102 102 * variable. For example:
103 103 *
104 104 * set dtrace:dtrace_destructive_disallow = 1
105 105 *
106 106 * In general, the only variables that one should be tuning this way are those
107 107 * that affect system-wide DTrace behavior, and for which the default behavior
108 108 * is undesirable. Most of these variables are tunable on a per-consumer
109 109 * basis using DTrace options, and need not be tuned on a system-wide basis.
110 110 * When tuning these variables, avoid pathological values; while some attempt
111 111 * is made to verify the integrity of these variables, they are not considered
112 112 * part of the supported interface to DTrace, and they are therefore not
113 113 * checked comprehensively. Further, these variables should not be tuned
114 114 * dynamically via "mdb -kw" or other means; they should only be tuned via
115 115 * /etc/system.
116 116 */
117 117 int dtrace_destructive_disallow = 0;
118 118 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
119 119 size_t dtrace_difo_maxsize = (256 * 1024);
120 120 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
121 121 size_t dtrace_global_maxsize = (16 * 1024);
122 122 size_t dtrace_actions_max = (16 * 1024);
123 123 size_t dtrace_retain_max = 1024;
124 124 dtrace_optval_t dtrace_helper_actions_max = 1024;
125 125 dtrace_optval_t dtrace_helper_providers_max = 32;
126 126 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
127 127 size_t dtrace_strsize_default = 256;
128 128 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
129 129 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
130 130 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
131 131 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
132 132 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
133 133 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
134 134 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
135 135 dtrace_optval_t dtrace_nspec_default = 1;
136 136 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
137 137 dtrace_optval_t dtrace_stackframes_default = 20;
138 138 dtrace_optval_t dtrace_ustackframes_default = 20;
139 139 dtrace_optval_t dtrace_jstackframes_default = 50;
140 140 dtrace_optval_t dtrace_jstackstrsize_default = 512;
141 141 int dtrace_msgdsize_max = 128;
142 142 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
143 143 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
144 144 int dtrace_devdepth_max = 32;
145 145 int dtrace_err_verbose;
146 146 hrtime_t dtrace_deadman_interval = NANOSEC;
147 147 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
148 148 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
149 149 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
150 150
151 151 /*
152 152 * DTrace External Variables
153 153 *
154 154 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
155 155 * available to DTrace consumers via the backtick (`) syntax. One of these,
156 156 * dtrace_zero, is made deliberately so: it is provided as a source of
157 157 * well-known, zero-filled memory. While this variable is not documented,
158 158 * it is used by some translators as an implementation detail.
159 159 */
160 160 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
161 161
162 162 /*
163 163 * DTrace Internal Variables
164 164 */
165 165 static dev_info_t *dtrace_devi; /* device info */
166 166 static vmem_t *dtrace_arena; /* probe ID arena */
167 167 static vmem_t *dtrace_minor; /* minor number arena */
168 168 static taskq_t *dtrace_taskq; /* task queue */
169 169 static dtrace_probe_t **dtrace_probes; /* array of all probes */
170 170 static int dtrace_nprobes; /* number of probes */
171 171 static dtrace_provider_t *dtrace_provider; /* provider list */
172 172 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
173 173 static int dtrace_opens; /* number of opens */
174 174 static int dtrace_helpers; /* number of helpers */
175 175 static int dtrace_getf; /* number of unpriv getf()s */
176 176 static void *dtrace_softstate; /* softstate pointer */
177 177 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
178 178 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
179 179 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
180 180 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
181 181 static int dtrace_toxranges; /* number of toxic ranges */
182 182 static int dtrace_toxranges_max; /* size of toxic range array */
183 183 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
184 184 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
185 185 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
186 186 static kthread_t *dtrace_panicked; /* panicking thread */
187 187 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
188 188 static dtrace_genid_t dtrace_probegen; /* current probe generation */
189 189 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
190 190 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
191 191 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
192 192 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
193 193 static int dtrace_dynvar_failclean; /* dynvars failed to clean */
194 194
195 195 /*
196 196 * DTrace Locking
197 197 * DTrace is protected by three (relatively coarse-grained) locks:
198 198 *
199 199 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
200 200 * including enabling state, probes, ECBs, consumer state, helper state,
201 201 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
202 202 * probe context is lock-free -- synchronization is handled via the
203 203 * dtrace_sync() cross call mechanism.
204 204 *
205 205 * (2) dtrace_provider_lock is required when manipulating provider state, or
206 206 * when provider state must be held constant.
207 207 *
208 208 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
209 209 * when meta provider state must be held constant.
210 210 *
211 211 * The lock ordering between these three locks is dtrace_meta_lock before
212 212 * dtrace_provider_lock before dtrace_lock. (In particular, there are
213 213 * several places where dtrace_provider_lock is held by the framework as it
214 214 * calls into the providers -- which then call back into the framework,
215 215 * grabbing dtrace_lock.)
216 216 *
217 217 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
218 218 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
219 219 * role as a coarse-grained lock; it is acquired before both of these locks.
220 220 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
221 221 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
222 222 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
223 223 * acquired _between_ dtrace_provider_lock and dtrace_lock.
224 224 */
225 225 static kmutex_t dtrace_lock; /* probe state lock */
226 226 static kmutex_t dtrace_provider_lock; /* provider state lock */
227 227 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
228 228
229 229 /*
230 230 * DTrace Provider Variables
231 231 *
232 232 * These are the variables relating to DTrace as a provider (that is, the
233 233 * provider of the BEGIN, END, and ERROR probes).
234 234 */
235 235 static dtrace_pattr_t dtrace_provider_attr = {
236 236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
237 237 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
238 238 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
239 239 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
240 240 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
241 241 };
242 242
243 243 static void
244 244 dtrace_nullop(void)
245 245 {}
246 246
247 247 static int
248 248 dtrace_enable_nullop(void)
249 249 {
250 250 return (0);
251 251 }
252 252
253 253 static dtrace_pops_t dtrace_provider_ops = {
254 254 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
255 255 (void (*)(void *, struct modctl *))dtrace_nullop,
256 256 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
257 257 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
258 258 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
259 259 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
260 260 NULL,
261 261 NULL,
262 262 NULL,
263 263 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
264 264 };
265 265
266 266 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
267 267 static dtrace_id_t dtrace_probeid_end; /* special END probe */
268 268 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
269 269
270 270 /*
271 271 * DTrace Helper Tracing Variables
272 272 */
273 273 uint32_t dtrace_helptrace_next = 0;
274 274 uint32_t dtrace_helptrace_nlocals;
275 275 char *dtrace_helptrace_buffer;
276 276 int dtrace_helptrace_bufsize = 512 * 1024;
277 277
278 278 #ifdef DEBUG
279 279 int dtrace_helptrace_enabled = 1;
280 280 #else
281 281 int dtrace_helptrace_enabled = 0;
282 282 #endif
283 283
284 284 /*
285 285 * DTrace Error Hashing
286 286 *
287 287 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
288 288 * table. This is very useful for checking coverage of tests that are
289 289 * expected to induce DIF or DOF processing errors, and may be useful for
290 290 * debugging problems in the DIF code generator or in DOF generation . The
291 291 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
292 292 */
293 293 #ifdef DEBUG
294 294 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
295 295 static const char *dtrace_errlast;
296 296 static kthread_t *dtrace_errthread;
297 297 static kmutex_t dtrace_errlock;
298 298 #endif
299 299
300 300 /*
301 301 * DTrace Macros and Constants
302 302 *
303 303 * These are various macros that are useful in various spots in the
304 304 * implementation, along with a few random constants that have no meaning
305 305 * outside of the implementation. There is no real structure to this cpp
306 306 * mishmash -- but is there ever?
307 307 */
308 308 #define DTRACE_HASHSTR(hash, probe) \
309 309 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
310 310
311 311 #define DTRACE_HASHNEXT(hash, probe) \
312 312 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
313 313
314 314 #define DTRACE_HASHPREV(hash, probe) \
315 315 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
316 316
317 317 #define DTRACE_HASHEQ(hash, lhs, rhs) \
318 318 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
319 319 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
320 320
321 321 #define DTRACE_AGGHASHSIZE_SLEW 17
322 322
323 323 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
324 324
325 325 /*
326 326 * The key for a thread-local variable consists of the lower 61 bits of the
327 327 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
328 328 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
329 329 * equal to a variable identifier. This is necessary (but not sufficient) to
330 330 * assure that global associative arrays never collide with thread-local
331 331 * variables. To guarantee that they cannot collide, we must also define the
332 332 * order for keying dynamic variables. That order is:
333 333 *
334 334 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
335 335 *
336 336 * Because the variable-key and the tls-key are in orthogonal spaces, there is
337 337 * no way for a global variable key signature to match a thread-local key
338 338 * signature.
339 339 */
340 340 #define DTRACE_TLS_THRKEY(where) { \
341 341 uint_t intr = 0; \
342 342 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
343 343 for (; actv; actv >>= 1) \
344 344 intr++; \
345 345 ASSERT(intr < (1 << 3)); \
346 346 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
347 347 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
348 348 }
349 349
350 350 #define DT_BSWAP_8(x) ((x) & 0xff)
351 351 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
352 352 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
353 353 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
354 354
355 355 #define DT_MASK_LO 0x00000000FFFFFFFFULL
356 356
357 357 #define DTRACE_STORE(type, tomax, offset, what) \
358 358 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
359 359
360 360 #ifndef __x86
361 361 #define DTRACE_ALIGNCHECK(addr, size, flags) \
362 362 if (addr & (size - 1)) { \
363 363 *flags |= CPU_DTRACE_BADALIGN; \
364 364 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
365 365 return (0); \
366 366 }
367 367 #else
368 368 #define DTRACE_ALIGNCHECK(addr, size, flags)
369 369 #endif
370 370
371 371 /*
372 372 * Test whether a range of memory starting at testaddr of size testsz falls
373 373 * within the range of memory described by addr, sz. We take care to avoid
374 374 * problems with overflow and underflow of the unsigned quantities, and
375 375 * disallow all negative sizes. Ranges of size 0 are allowed.
376 376 */
377 377 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
378 378 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
379 379 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
380 380 (testaddr) + (testsz) >= (testaddr))
381 381
382 382 /*
383 383 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
384 384 * alloc_sz on the righthand side of the comparison in order to avoid overflow
385 385 * or underflow in the comparison with it. This is simpler than the INRANGE
386 386 * check above, because we know that the dtms_scratch_ptr is valid in the
387 387 * range. Allocations of size zero are allowed.
388 388 */
389 389 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
390 390 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
391 391 (mstate)->dtms_scratch_ptr >= (alloc_sz))
392 392
393 393 #define DTRACE_LOADFUNC(bits) \
394 394 /*CSTYLED*/ \
395 395 uint##bits##_t \
396 396 dtrace_load##bits(uintptr_t addr) \
397 397 { \
398 398 size_t size = bits / NBBY; \
399 399 /*CSTYLED*/ \
400 400 uint##bits##_t rval; \
401 401 int i; \
402 402 volatile uint16_t *flags = (volatile uint16_t *) \
403 403 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
404 404 \
405 405 DTRACE_ALIGNCHECK(addr, size, flags); \
406 406 \
407 407 for (i = 0; i < dtrace_toxranges; i++) { \
408 408 if (addr >= dtrace_toxrange[i].dtt_limit) \
409 409 continue; \
410 410 \
411 411 if (addr + size <= dtrace_toxrange[i].dtt_base) \
412 412 continue; \
413 413 \
414 414 /* \
415 415 * This address falls within a toxic region; return 0. \
416 416 */ \
417 417 *flags |= CPU_DTRACE_BADADDR; \
418 418 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
419 419 return (0); \
420 420 } \
421 421 \
422 422 *flags |= CPU_DTRACE_NOFAULT; \
423 423 /*CSTYLED*/ \
424 424 rval = *((volatile uint##bits##_t *)addr); \
425 425 *flags &= ~CPU_DTRACE_NOFAULT; \
426 426 \
427 427 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
428 428 }
429 429
430 430 #ifdef _LP64
431 431 #define dtrace_loadptr dtrace_load64
432 432 #else
433 433 #define dtrace_loadptr dtrace_load32
434 434 #endif
435 435
436 436 #define DTRACE_DYNHASH_FREE 0
437 437 #define DTRACE_DYNHASH_SINK 1
438 438 #define DTRACE_DYNHASH_VALID 2
439 439
440 440 #define DTRACE_MATCH_FAIL -1
441 441 #define DTRACE_MATCH_NEXT 0
442 442 #define DTRACE_MATCH_DONE 1
443 443 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
444 444 #define DTRACE_STATE_ALIGN 64
445 445
446 446 #define DTRACE_FLAGS2FLT(flags) \
447 447 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
448 448 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
449 449 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
450 450 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
451 451 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
452 452 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
453 453 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
454 454 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
455 455 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
456 456 DTRACEFLT_UNKNOWN)
457 457
458 458 #define DTRACEACT_ISSTRING(act) \
459 459 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
460 460 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
461 461
462 462 static size_t dtrace_strlen(const char *, size_t);
463 463 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
464 464 static void dtrace_enabling_provide(dtrace_provider_t *);
465 465 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
466 466 static void dtrace_enabling_matchall(void);
467 467 static void dtrace_enabling_reap(void);
468 468 static dtrace_state_t *dtrace_anon_grab(void);
469 469 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
470 470 dtrace_state_t *, uint64_t, uint64_t);
471 471 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
472 472 static void dtrace_buffer_drop(dtrace_buffer_t *);
473 473 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
474 474 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
475 475 dtrace_state_t *, dtrace_mstate_t *);
476 476 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
477 477 dtrace_optval_t);
478 478 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
479 479 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
480 480 static int dtrace_priv_proc(dtrace_state_t *, dtrace_mstate_t *);
481 481 static void dtrace_getf_barrier(void);
482 482
483 483 /*
484 484 * DTrace Probe Context Functions
485 485 *
486 486 * These functions are called from probe context. Because probe context is
487 487 * any context in which C may be called, arbitrarily locks may be held,
488 488 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
489 489 * As a result, functions called from probe context may only call other DTrace
490 490 * support functions -- they may not interact at all with the system at large.
491 491 * (Note that the ASSERT macro is made probe-context safe by redefining it in
492 492 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
493 493 * loads are to be performed from probe context, they _must_ be in terms of
494 494 * the safe dtrace_load*() variants.
495 495 *
496 496 * Some functions in this block are not actually called from probe context;
497 497 * for these functions, there will be a comment above the function reading
498 498 * "Note: not called from probe context."
499 499 */
500 500 void
501 501 dtrace_panic(const char *format, ...)
502 502 {
503 503 va_list alist;
504 504
505 505 va_start(alist, format);
506 506 dtrace_vpanic(format, alist);
507 507 va_end(alist);
508 508 }
509 509
510 510 int
511 511 dtrace_assfail(const char *a, const char *f, int l)
512 512 {
513 513 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
514 514
515 515 /*
516 516 * We just need something here that even the most clever compiler
517 517 * cannot optimize away.
518 518 */
519 519 return (a[(uintptr_t)f]);
520 520 }
521 521
522 522 /*
523 523 * Atomically increment a specified error counter from probe context.
524 524 */
525 525 static void
526 526 dtrace_error(uint32_t *counter)
527 527 {
528 528 /*
529 529 * Most counters stored to in probe context are per-CPU counters.
530 530 * However, there are some error conditions that are sufficiently
531 531 * arcane that they don't merit per-CPU storage. If these counters
532 532 * are incremented concurrently on different CPUs, scalability will be
533 533 * adversely affected -- but we don't expect them to be white-hot in a
534 534 * correctly constructed enabling...
535 535 */
536 536 uint32_t oval, nval;
537 537
538 538 do {
539 539 oval = *counter;
540 540
541 541 if ((nval = oval + 1) == 0) {
542 542 /*
543 543 * If the counter would wrap, set it to 1 -- assuring
544 544 * that the counter is never zero when we have seen
545 545 * errors. (The counter must be 32-bits because we
546 546 * aren't guaranteed a 64-bit compare&swap operation.)
547 547 * To save this code both the infamy of being fingered
548 548 * by a priggish news story and the indignity of being
549 549 * the target of a neo-puritan witch trial, we're
550 550 * carefully avoiding any colorful description of the
551 551 * likelihood of this condition -- but suffice it to
552 552 * say that it is only slightly more likely than the
553 553 * overflow of predicate cache IDs, as discussed in
554 554 * dtrace_predicate_create().
555 555 */
556 556 nval = 1;
557 557 }
558 558 } while (dtrace_cas32(counter, oval, nval) != oval);
559 559 }
560 560
561 561 /*
562 562 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
563 563 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
564 564 */
565 565 DTRACE_LOADFUNC(8)
566 566 DTRACE_LOADFUNC(16)
567 567 DTRACE_LOADFUNC(32)
568 568 DTRACE_LOADFUNC(64)
569 569
570 570 static int
571 571 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
572 572 {
573 573 if (dest < mstate->dtms_scratch_base)
574 574 return (0);
575 575
576 576 if (dest + size < dest)
577 577 return (0);
578 578
579 579 if (dest + size > mstate->dtms_scratch_ptr)
580 580 return (0);
581 581
582 582 return (1);
583 583 }
584 584
585 585 static int
586 586 dtrace_canstore_statvar(uint64_t addr, size_t sz,
587 587 dtrace_statvar_t **svars, int nsvars)
588 588 {
589 589 int i;
590 590
591 591 for (i = 0; i < nsvars; i++) {
592 592 dtrace_statvar_t *svar = svars[i];
593 593
594 594 if (svar == NULL || svar->dtsv_size == 0)
595 595 continue;
596 596
597 597 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
598 598 return (1);
599 599 }
600 600
601 601 return (0);
602 602 }
603 603
604 604 /*
605 605 * Check to see if the address is within a memory region to which a store may
606 606 * be issued. This includes the DTrace scratch areas, and any DTrace variable
607 607 * region. The caller of dtrace_canstore() is responsible for performing any
608 608 * alignment checks that are needed before stores are actually executed.
609 609 */
610 610 static int
611 611 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
612 612 dtrace_vstate_t *vstate)
613 613 {
614 614 /*
615 615 * First, check to see if the address is in scratch space...
616 616 */
617 617 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
618 618 mstate->dtms_scratch_size))
619 619 return (1);
620 620
621 621 /*
622 622 * Now check to see if it's a dynamic variable. This check will pick
623 623 * up both thread-local variables and any global dynamically-allocated
624 624 * variables.
625 625 */
626 626 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
627 627 vstate->dtvs_dynvars.dtds_size)) {
628 628 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
629 629 uintptr_t base = (uintptr_t)dstate->dtds_base +
630 630 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
631 631 uintptr_t chunkoffs;
632 632
633 633 /*
634 634 * Before we assume that we can store here, we need to make
635 635 * sure that it isn't in our metadata -- storing to our
636 636 * dynamic variable metadata would corrupt our state. For
637 637 * the range to not include any dynamic variable metadata,
638 638 * it must:
639 639 *
640 640 * (1) Start above the hash table that is at the base of
641 641 * the dynamic variable space
642 642 *
643 643 * (2) Have a starting chunk offset that is beyond the
644 644 * dtrace_dynvar_t that is at the base of every chunk
645 645 *
646 646 * (3) Not span a chunk boundary
647 647 *
648 648 */
649 649 if (addr < base)
650 650 return (0);
651 651
652 652 chunkoffs = (addr - base) % dstate->dtds_chunksize;
653 653
654 654 if (chunkoffs < sizeof (dtrace_dynvar_t))
655 655 return (0);
656 656
657 657 if (chunkoffs + sz > dstate->dtds_chunksize)
658 658 return (0);
659 659
660 660 return (1);
661 661 }
662 662
663 663 /*
664 664 * Finally, check the static local and global variables. These checks
665 665 * take the longest, so we perform them last.
666 666 */
667 667 if (dtrace_canstore_statvar(addr, sz,
668 668 vstate->dtvs_locals, vstate->dtvs_nlocals))
669 669 return (1);
670 670
671 671 if (dtrace_canstore_statvar(addr, sz,
672 672 vstate->dtvs_globals, vstate->dtvs_nglobals))
673 673 return (1);
674 674
675 675 return (0);
676 676 }
677 677
678 678
679 679 /*
680 680 * Convenience routine to check to see if the address is within a memory
681 681 * region in which a load may be issued given the user's privilege level;
682 682 * if not, it sets the appropriate error flags and loads 'addr' into the
683 683 * illegal value slot.
684 684 *
685 685 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
686 686 * appropriate memory access protection.
687 687 */
688 688 static int
689 689 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
690 690 dtrace_vstate_t *vstate)
691 691 {
692 692 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
693 693 file_t *fp;
694 694
695 695 /*
696 696 * If we hold the privilege to read from kernel memory, then
697 697 * everything is readable.
698 698 */
699 699 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
700 700 return (1);
701 701
702 702 /*
703 703 * You can obviously read that which you can store.
704 704 */
705 705 if (dtrace_canstore(addr, sz, mstate, vstate))
706 706 return (1);
707 707
708 708 /*
709 709 * We're allowed to read from our own string table.
710 710 */
711 711 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
712 712 mstate->dtms_difo->dtdo_strlen))
713 713 return (1);
714 714
715 715 if (vstate->dtvs_state != NULL &&
716 716 dtrace_priv_proc(vstate->dtvs_state, mstate)) {
717 717 proc_t *p;
718 718
719 719 /*
720 720 * When we have privileges to the current process, there are
721 721 * several context-related kernel structures that are safe to
722 722 * read, even absent the privilege to read from kernel memory.
723 723 * These reads are safe because these structures contain only
724 724 * state that (1) we're permitted to read, (2) is harmless or
725 725 * (3) contains pointers to additional kernel state that we're
726 726 * not permitted to read (and as such, do not present an
727 727 * opportunity for privilege escalation). Finally (and
728 728 * critically), because of the nature of their relation with
729 729 * the current thread context, the memory associated with these
730 730 * structures cannot change over the duration of probe context,
731 731 * and it is therefore impossible for this memory to be
732 732 * deallocated and reallocated as something else while it's
733 733 * being operated upon.
734 734 */
735 735 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t)))
736 736 return (1);
737 737
738 738 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
739 739 sz, curthread->t_procp, sizeof (proc_t))) {
740 740 return (1);
741 741 }
742 742
743 743 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
744 744 curthread->t_cred, sizeof (cred_t))) {
745 745 return (1);
746 746 }
747 747
748 748 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
749 749 &(p->p_pidp->pid_id), sizeof (pid_t))) {
750 750 return (1);
751 751 }
752 752
753 753 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
754 754 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
755 755 return (1);
756 756 }
757 757 }
758 758
759 759 if ((fp = mstate->dtms_getf) != NULL) {
760 760 uintptr_t psz = sizeof (void *);
761 761 vnode_t *vp;
762 762 vnodeops_t *op;
763 763
764 764 /*
765 765 * When getf() returns a file_t, the enabling is implicitly
766 766 * granted the (transient) right to read the returned file_t
767 767 * as well as the v_path and v_op->vnop_name of the underlying
768 768 * vnode. These accesses are allowed after a successful
769 769 * getf() because the members that they refer to cannot change
770 770 * once set -- and the barrier logic in the kernel's closef()
771 771 * path assures that the file_t and its referenced vode_t
772 772 * cannot themselves be stale (that is, it impossible for
773 773 * either dtms_getf itself or its f_vnode member to reference
774 774 * freed memory).
775 775 */
776 776 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t)))
777 777 return (1);
778 778
779 779 if ((vp = fp->f_vnode) != NULL) {
780 780 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz))
781 781 return (1);
782 782
783 783 if (vp->v_path != NULL && DTRACE_INRANGE(addr, sz,
784 784 vp->v_path, strlen(vp->v_path) + 1)) {
785 785 return (1);
786 786 }
787 787
788 788 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz))
789 789 return (1);
790 790
791 791 if ((op = vp->v_op) != NULL &&
792 792 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
793 793 return (1);
794 794 }
795 795
796 796 if (op != NULL && op->vnop_name != NULL &&
797 797 DTRACE_INRANGE(addr, sz, op->vnop_name,
798 798 strlen(op->vnop_name) + 1)) {
799 799 return (1);
800 800 }
801 801 }
802 802 }
803 803
804 804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
805 805 *illval = addr;
806 806 return (0);
807 807 }
808 808
809 809 /*
810 810 * Convenience routine to check to see if a given string is within a memory
811 811 * region in which a load may be issued given the user's privilege level;
812 812 * this exists so that we don't need to issue unnecessary dtrace_strlen()
813 813 * calls in the event that the user has all privileges.
814 814 */
815 815 static int
816 816 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
817 817 dtrace_vstate_t *vstate)
818 818 {
819 819 size_t strsz;
820 820
821 821 /*
822 822 * If we hold the privilege to read from kernel memory, then
823 823 * everything is readable.
824 824 */
825 825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
826 826 return (1);
827 827
828 828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
829 829 if (dtrace_canload(addr, strsz, mstate, vstate))
830 830 return (1);
831 831
832 832 return (0);
833 833 }
834 834
835 835 /*
836 836 * Convenience routine to check to see if a given variable is within a memory
837 837 * region in which a load may be issued given the user's privilege level.
838 838 */
839 839 static int
840 840 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
841 841 dtrace_vstate_t *vstate)
842 842 {
843 843 size_t sz;
844 844 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
845 845
846 846 /*
847 847 * If we hold the privilege to read from kernel memory, then
848 848 * everything is readable.
849 849 */
850 850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
851 851 return (1);
852 852
853 853 if (type->dtdt_kind == DIF_TYPE_STRING)
854 854 sz = dtrace_strlen(src,
855 855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
856 856 else
857 857 sz = type->dtdt_size;
858 858
859 859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
860 860 }
861 861
862 862 /*
863 863 * Convert a string to a signed integer using safe loads.
864 864 *
865 865 * NOTE: This function uses various macros from strtolctype.h to manipulate
866 866 * digit values, etc -- these have all been checked to ensure they make
867 867 * no additional function calls.
868 868 */
869 869 static int64_t
870 870 dtrace_strtoll(char *input, int base, size_t limit)
871 871 {
872 872 uintptr_t pos = (uintptr_t)input;
873 873 int64_t val = 0;
874 874 int x;
875 875 boolean_t neg = B_FALSE;
876 876 char c, cc, ccc;
877 877 uintptr_t end = pos + limit;
878 878
879 879 /*
880 880 * Consume any whitespace preceding digits.
881 881 */
882 882 while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
883 883 pos++;
884 884
885 885 /*
886 886 * Handle an explicit sign if one is present.
887 887 */
888 888 if (c == '-' || c == '+') {
889 889 if (c == '-')
890 890 neg = B_TRUE;
891 891 c = dtrace_load8(++pos);
892 892 }
893 893
894 894 /*
895 895 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
896 896 * if present.
897 897 */
898 898 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
899 899 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
900 900 pos += 2;
901 901 c = ccc;
902 902 }
903 903
904 904 /*
905 905 * Read in contiguous digits until the first non-digit character.
906 906 */
907 907 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
908 908 c = dtrace_load8(++pos))
909 909 val = val * base + x;
910 910
911 911 return (neg ? -val : val);
912 912 }
913 913
914 914 /*
915 915 * Compare two strings using safe loads.
916 916 */
917 917 static int
918 918 dtrace_strncmp(char *s1, char *s2, size_t limit)
919 919 {
920 920 uint8_t c1, c2;
921 921 volatile uint16_t *flags;
922 922
923 923 if (s1 == s2 || limit == 0)
924 924 return (0);
925 925
926 926 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
927 927
928 928 do {
929 929 if (s1 == NULL) {
930 930 c1 = '\0';
931 931 } else {
932 932 c1 = dtrace_load8((uintptr_t)s1++);
933 933 }
934 934
935 935 if (s2 == NULL) {
936 936 c2 = '\0';
937 937 } else {
938 938 c2 = dtrace_load8((uintptr_t)s2++);
939 939 }
940 940
941 941 if (c1 != c2)
942 942 return (c1 - c2);
943 943 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
944 944
945 945 return (0);
946 946 }
947 947
948 948 /*
949 949 * Compute strlen(s) for a string using safe memory accesses. The additional
950 950 * len parameter is used to specify a maximum length to ensure completion.
951 951 */
952 952 static size_t
953 953 dtrace_strlen(const char *s, size_t lim)
954 954 {
955 955 uint_t len;
956 956
957 957 for (len = 0; len != lim; len++) {
958 958 if (dtrace_load8((uintptr_t)s++) == '\0')
959 959 break;
960 960 }
961 961
962 962 return (len);
963 963 }
964 964
965 965 /*
966 966 * Check if an address falls within a toxic region.
967 967 */
968 968 static int
969 969 dtrace_istoxic(uintptr_t kaddr, size_t size)
970 970 {
971 971 uintptr_t taddr, tsize;
972 972 int i;
973 973
974 974 for (i = 0; i < dtrace_toxranges; i++) {
975 975 taddr = dtrace_toxrange[i].dtt_base;
976 976 tsize = dtrace_toxrange[i].dtt_limit - taddr;
977 977
978 978 if (kaddr - taddr < tsize) {
979 979 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
980 980 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
981 981 return (1);
982 982 }
983 983
984 984 if (taddr - kaddr < size) {
985 985 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
986 986 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
987 987 return (1);
988 988 }
989 989 }
990 990
991 991 return (0);
992 992 }
993 993
994 994 /*
995 995 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
996 996 * memory specified by the DIF program. The dst is assumed to be safe memory
997 997 * that we can store to directly because it is managed by DTrace. As with
998 998 * standard bcopy, overlapping copies are handled properly.
999 999 */
1000 1000 static void
1001 1001 dtrace_bcopy(const void *src, void *dst, size_t len)
1002 1002 {
1003 1003 if (len != 0) {
1004 1004 uint8_t *s1 = dst;
1005 1005 const uint8_t *s2 = src;
1006 1006
1007 1007 if (s1 <= s2) {
1008 1008 do {
1009 1009 *s1++ = dtrace_load8((uintptr_t)s2++);
1010 1010 } while (--len != 0);
1011 1011 } else {
1012 1012 s2 += len;
1013 1013 s1 += len;
1014 1014
1015 1015 do {
1016 1016 *--s1 = dtrace_load8((uintptr_t)--s2);
1017 1017 } while (--len != 0);
1018 1018 }
1019 1019 }
1020 1020 }
1021 1021
1022 1022 /*
1023 1023 * Copy src to dst using safe memory accesses, up to either the specified
1024 1024 * length, or the point that a nul byte is encountered. The src is assumed to
1025 1025 * be unsafe memory specified by the DIF program. The dst is assumed to be
1026 1026 * safe memory that we can store to directly because it is managed by DTrace.
1027 1027 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1028 1028 */
1029 1029 static void
1030 1030 dtrace_strcpy(const void *src, void *dst, size_t len)
1031 1031 {
1032 1032 if (len != 0) {
1033 1033 uint8_t *s1 = dst, c;
1034 1034 const uint8_t *s2 = src;
1035 1035
1036 1036 do {
1037 1037 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1038 1038 } while (--len != 0 && c != '\0');
1039 1039 }
1040 1040 }
1041 1041
1042 1042 /*
1043 1043 * Copy src to dst, deriving the size and type from the specified (BYREF)
1044 1044 * variable type. The src is assumed to be unsafe memory specified by the DIF
1045 1045 * program. The dst is assumed to be DTrace variable memory that is of the
1046 1046 * specified type; we assume that we can store to directly.
1047 1047 */
1048 1048 static void
1049 1049 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
1050 1050 {
1051 1051 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1052 1052
1053 1053 if (type->dtdt_kind == DIF_TYPE_STRING) {
1054 1054 dtrace_strcpy(src, dst, type->dtdt_size);
1055 1055 } else {
1056 1056 dtrace_bcopy(src, dst, type->dtdt_size);
1057 1057 }
1058 1058 }
1059 1059
1060 1060 /*
1061 1061 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1062 1062 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1063 1063 * safe memory that we can access directly because it is managed by DTrace.
1064 1064 */
1065 1065 static int
1066 1066 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1067 1067 {
1068 1068 volatile uint16_t *flags;
1069 1069
1070 1070 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1071 1071
1072 1072 if (s1 == s2)
1073 1073 return (0);
1074 1074
1075 1075 if (s1 == NULL || s2 == NULL)
1076 1076 return (1);
1077 1077
1078 1078 if (s1 != s2 && len != 0) {
1079 1079 const uint8_t *ps1 = s1;
1080 1080 const uint8_t *ps2 = s2;
1081 1081
1082 1082 do {
1083 1083 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1084 1084 return (1);
1085 1085 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1086 1086 }
1087 1087 return (0);
1088 1088 }
1089 1089
1090 1090 /*
1091 1091 * Zero the specified region using a simple byte-by-byte loop. Note that this
1092 1092 * is for safe DTrace-managed memory only.
1093 1093 */
1094 1094 static void
1095 1095 dtrace_bzero(void *dst, size_t len)
1096 1096 {
1097 1097 uchar_t *cp;
1098 1098
1099 1099 for (cp = dst; len != 0; len--)
1100 1100 *cp++ = 0;
1101 1101 }
1102 1102
1103 1103 static void
1104 1104 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1105 1105 {
1106 1106 uint64_t result[2];
1107 1107
1108 1108 result[0] = addend1[0] + addend2[0];
1109 1109 result[1] = addend1[1] + addend2[1] +
1110 1110 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1111 1111
1112 1112 sum[0] = result[0];
1113 1113 sum[1] = result[1];
1114 1114 }
1115 1115
1116 1116 /*
1117 1117 * Shift the 128-bit value in a by b. If b is positive, shift left.
1118 1118 * If b is negative, shift right.
1119 1119 */
1120 1120 static void
1121 1121 dtrace_shift_128(uint64_t *a, int b)
1122 1122 {
1123 1123 uint64_t mask;
1124 1124
1125 1125 if (b == 0)
1126 1126 return;
1127 1127
1128 1128 if (b < 0) {
1129 1129 b = -b;
1130 1130 if (b >= 64) {
1131 1131 a[0] = a[1] >> (b - 64);
1132 1132 a[1] = 0;
1133 1133 } else {
1134 1134 a[0] >>= b;
1135 1135 mask = 1LL << (64 - b);
1136 1136 mask -= 1;
1137 1137 a[0] |= ((a[1] & mask) << (64 - b));
1138 1138 a[1] >>= b;
1139 1139 }
1140 1140 } else {
1141 1141 if (b >= 64) {
1142 1142 a[1] = a[0] << (b - 64);
1143 1143 a[0] = 0;
1144 1144 } else {
1145 1145 a[1] <<= b;
1146 1146 mask = a[0] >> (64 - b);
1147 1147 a[1] |= mask;
1148 1148 a[0] <<= b;
1149 1149 }
1150 1150 }
1151 1151 }
1152 1152
1153 1153 /*
1154 1154 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1155 1155 * use native multiplication on those, and then re-combine into the
1156 1156 * resulting 128-bit value.
1157 1157 *
1158 1158 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1159 1159 * hi1 * hi2 << 64 +
1160 1160 * hi1 * lo2 << 32 +
1161 1161 * hi2 * lo1 << 32 +
1162 1162 * lo1 * lo2
1163 1163 */
1164 1164 static void
1165 1165 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1166 1166 {
1167 1167 uint64_t hi1, hi2, lo1, lo2;
1168 1168 uint64_t tmp[2];
1169 1169
1170 1170 hi1 = factor1 >> 32;
1171 1171 hi2 = factor2 >> 32;
1172 1172
1173 1173 lo1 = factor1 & DT_MASK_LO;
1174 1174 lo2 = factor2 & DT_MASK_LO;
1175 1175
1176 1176 product[0] = lo1 * lo2;
1177 1177 product[1] = hi1 * hi2;
1178 1178
1179 1179 tmp[0] = hi1 * lo2;
1180 1180 tmp[1] = 0;
1181 1181 dtrace_shift_128(tmp, 32);
1182 1182 dtrace_add_128(product, tmp, product);
1183 1183
1184 1184 tmp[0] = hi2 * lo1;
1185 1185 tmp[1] = 0;
1186 1186 dtrace_shift_128(tmp, 32);
1187 1187 dtrace_add_128(product, tmp, product);
1188 1188 }
1189 1189
1190 1190 /*
1191 1191 * This privilege check should be used by actions and subroutines to
1192 1192 * verify that the user credentials of the process that enabled the
1193 1193 * invoking ECB match the target credentials
1194 1194 */
1195 1195 static int
1196 1196 dtrace_priv_proc_common_user(dtrace_state_t *state)
1197 1197 {
1198 1198 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1199 1199
1200 1200 /*
1201 1201 * We should always have a non-NULL state cred here, since if cred
1202 1202 * is null (anonymous tracing), we fast-path bypass this routine.
1203 1203 */
1204 1204 ASSERT(s_cr != NULL);
1205 1205
1206 1206 if ((cr = CRED()) != NULL &&
1207 1207 s_cr->cr_uid == cr->cr_uid &&
1208 1208 s_cr->cr_uid == cr->cr_ruid &&
1209 1209 s_cr->cr_uid == cr->cr_suid &&
1210 1210 s_cr->cr_gid == cr->cr_gid &&
1211 1211 s_cr->cr_gid == cr->cr_rgid &&
1212 1212 s_cr->cr_gid == cr->cr_sgid)
1213 1213 return (1);
1214 1214
1215 1215 return (0);
1216 1216 }
1217 1217
1218 1218 /*
1219 1219 * This privilege check should be used by actions and subroutines to
1220 1220 * verify that the zone of the process that enabled the invoking ECB
1221 1221 * matches the target credentials
1222 1222 */
1223 1223 static int
1224 1224 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1225 1225 {
1226 1226 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1227 1227
1228 1228 /*
1229 1229 * We should always have a non-NULL state cred here, since if cred
1230 1230 * is null (anonymous tracing), we fast-path bypass this routine.
1231 1231 */
1232 1232 ASSERT(s_cr != NULL);
1233 1233
1234 1234 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1235 1235 return (1);
1236 1236
1237 1237 return (0);
1238 1238 }
1239 1239
1240 1240 /*
1241 1241 * This privilege check should be used by actions and subroutines to
1242 1242 * verify that the process has not setuid or changed credentials.
1243 1243 */
1244 1244 static int
1245 1245 dtrace_priv_proc_common_nocd()
1246 1246 {
1247 1247 proc_t *proc;
1248 1248
1249 1249 if ((proc = ttoproc(curthread)) != NULL &&
1250 1250 !(proc->p_flag & SNOCD))
1251 1251 return (1);
1252 1252
1253 1253 return (0);
1254 1254 }
1255 1255
1256 1256 static int
1257 1257 dtrace_priv_proc_destructive(dtrace_state_t *state, dtrace_mstate_t *mstate)
1258 1258 {
1259 1259 int action = state->dts_cred.dcr_action;
1260 1260
1261 1261 if (!(mstate->dtms_access & DTRACE_ACCESS_PROC))
1262 1262 goto bad;
1263 1263
1264 1264 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1265 1265 dtrace_priv_proc_common_zone(state) == 0)
1266 1266 goto bad;
1267 1267
1268 1268 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1269 1269 dtrace_priv_proc_common_user(state) == 0)
1270 1270 goto bad;
1271 1271
1272 1272 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1273 1273 dtrace_priv_proc_common_nocd() == 0)
1274 1274 goto bad;
1275 1275
1276 1276 return (1);
1277 1277
1278 1278 bad:
1279 1279 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1280 1280
1281 1281 return (0);
1282 1282 }
1283 1283
1284 1284 static int
1285 1285 dtrace_priv_proc_control(dtrace_state_t *state, dtrace_mstate_t *mstate)
1286 1286 {
1287 1287 if (mstate->dtms_access & DTRACE_ACCESS_PROC) {
1288 1288 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1289 1289 return (1);
1290 1290
1291 1291 if (dtrace_priv_proc_common_zone(state) &&
1292 1292 dtrace_priv_proc_common_user(state) &&
1293 1293 dtrace_priv_proc_common_nocd())
1294 1294 return (1);
1295 1295 }
1296 1296
1297 1297 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1298 1298
1299 1299 return (0);
1300 1300 }
1301 1301
1302 1302 static int
1303 1303 dtrace_priv_proc(dtrace_state_t *state, dtrace_mstate_t *mstate)
1304 1304 {
1305 1305 if ((mstate->dtms_access & DTRACE_ACCESS_PROC) &&
1306 1306 (state->dts_cred.dcr_action & DTRACE_CRA_PROC))
1307 1307 return (1);
1308 1308
1309 1309 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1310 1310
1311 1311 return (0);
1312 1312 }
1313 1313
1314 1314 static int
1315 1315 dtrace_priv_kernel(dtrace_state_t *state)
1316 1316 {
1317 1317 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1318 1318 return (1);
1319 1319
1320 1320 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1321 1321
1322 1322 return (0);
1323 1323 }
1324 1324
1325 1325 static int
1326 1326 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1327 1327 {
1328 1328 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1329 1329 return (1);
1330 1330
1331 1331 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1332 1332
1333 1333 return (0);
1334 1334 }
1335 1335
1336 1336 /*
1337 1337 * Determine if the dte_cond of the specified ECB allows for processing of
1338 1338 * the current probe to continue. Note that this routine may allow continued
1339 1339 * processing, but with access(es) stripped from the mstate's dtms_access
1340 1340 * field.
1341 1341 */
1342 1342 static int
1343 1343 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1344 1344 dtrace_ecb_t *ecb)
1345 1345 {
1346 1346 dtrace_probe_t *probe = ecb->dte_probe;
1347 1347 dtrace_provider_t *prov = probe->dtpr_provider;
1348 1348 dtrace_pops_t *pops = &prov->dtpv_pops;
1349 1349 int mode = DTRACE_MODE_NOPRIV_DROP;
1350 1350
1351 1351 ASSERT(ecb->dte_cond);
1352 1352
1353 1353 if (pops->dtps_mode != NULL) {
1354 1354 mode = pops->dtps_mode(prov->dtpv_arg,
1355 1355 probe->dtpr_id, probe->dtpr_arg);
1356 1356
1357 1357 ASSERT(mode & (DTRACE_MODE_USER | DTRACE_MODE_KERNEL));
1358 1358 ASSERT(mode & (DTRACE_MODE_NOPRIV_RESTRICT |
1359 1359 DTRACE_MODE_NOPRIV_DROP));
1360 1360 }
1361 1361
1362 1362 /*
1363 1363 * If the dte_cond bits indicate that this consumer is only allowed to
1364 1364 * see user-mode firings of this probe, check that the probe was fired
1365 1365 * while in a user context. If that's not the case, use the policy
1366 1366 * specified by the provider to determine if we drop the probe or
1367 1367 * merely restrict operation.
1368 1368 */
1369 1369 if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1370 1370 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1371 1371
1372 1372 if (!(mode & DTRACE_MODE_USER)) {
1373 1373 if (mode & DTRACE_MODE_NOPRIV_DROP)
1374 1374 return (0);
1375 1375
1376 1376 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1377 1377 }
1378 1378 }
1379 1379
1380 1380 /*
1381 1381 * This is more subtle than it looks. We have to be absolutely certain
1382 1382 * that CRED() isn't going to change out from under us so it's only
1383 1383 * legit to examine that structure if we're in constrained situations.
1384 1384 * Currently, the only times we'll this check is if a non-super-user
1385 1385 * has enabled the profile or syscall providers -- providers that
1386 1386 * allow visibility of all processes. For the profile case, the check
1387 1387 * above will ensure that we're examining a user context.
1388 1388 */
1389 1389 if (ecb->dte_cond & DTRACE_COND_OWNER) {
1390 1390 cred_t *cr;
1391 1391 cred_t *s_cr = state->dts_cred.dcr_cred;
1392 1392 proc_t *proc;
1393 1393
1394 1394 ASSERT(s_cr != NULL);
1395 1395
1396 1396 if ((cr = CRED()) == NULL ||
1397 1397 s_cr->cr_uid != cr->cr_uid ||
1398 1398 s_cr->cr_uid != cr->cr_ruid ||
1399 1399 s_cr->cr_uid != cr->cr_suid ||
1400 1400 s_cr->cr_gid != cr->cr_gid ||
1401 1401 s_cr->cr_gid != cr->cr_rgid ||
1402 1402 s_cr->cr_gid != cr->cr_sgid ||
1403 1403 (proc = ttoproc(curthread)) == NULL ||
1404 1404 (proc->p_flag & SNOCD)) {
1405 1405 if (mode & DTRACE_MODE_NOPRIV_DROP)
1406 1406 return (0);
1407 1407
1408 1408 mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1409 1409 }
1410 1410 }
1411 1411
1412 1412 /*
1413 1413 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1414 1414 * in our zone, check to see if our mode policy is to restrict rather
1415 1415 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1416 1416 * and DTRACE_ACCESS_ARGS
1417 1417 */
1418 1418 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1419 1419 cred_t *cr;
1420 1420 cred_t *s_cr = state->dts_cred.dcr_cred;
1421 1421
1422 1422 ASSERT(s_cr != NULL);
1423 1423
1424 1424 if ((cr = CRED()) == NULL ||
1425 1425 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1426 1426 if (mode & DTRACE_MODE_NOPRIV_DROP)
1427 1427 return (0);
1428 1428
1429 1429 mstate->dtms_access &=
1430 1430 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1431 1431 }
1432 1432 }
1433 1433
1434 1434 /*
1435 1435 * By merits of being in this code path at all, we have limited
1436 1436 * privileges. If the provider has indicated that limited privileges
1437 1437 * are to denote restricted operation, strip off the ability to access
1438 1438 * arguments.
1439 1439 */
1440 1440 if (mode & DTRACE_MODE_LIMITEDPRIV_RESTRICT)
1441 1441 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1442 1442
1443 1443 return (1);
1444 1444 }
1445 1445
1446 1446 /*
1447 1447 * Note: not called from probe context. This function is called
1448 1448 * asynchronously (and at a regular interval) from outside of probe context to
1449 1449 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1450 1450 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1451 1451 */
1452 1452 void
1453 1453 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1454 1454 {
1455 1455 dtrace_dynvar_t *dirty;
1456 1456 dtrace_dstate_percpu_t *dcpu;
1457 1457 dtrace_dynvar_t **rinsep;
1458 1458 int i, j, work = 0;
1459 1459
1460 1460 for (i = 0; i < NCPU; i++) {
1461 1461 dcpu = &dstate->dtds_percpu[i];
1462 1462 rinsep = &dcpu->dtdsc_rinsing;
1463 1463
1464 1464 /*
1465 1465 * If the dirty list is NULL, there is no dirty work to do.
1466 1466 */
1467 1467 if (dcpu->dtdsc_dirty == NULL)
1468 1468 continue;
1469 1469
1470 1470 if (dcpu->dtdsc_rinsing != NULL) {
1471 1471 /*
1472 1472 * If the rinsing list is non-NULL, then it is because
1473 1473 * this CPU was selected to accept another CPU's
1474 1474 * dirty list -- and since that time, dirty buffers
1475 1475 * have accumulated. This is a highly unlikely
1476 1476 * condition, but we choose to ignore the dirty
1477 1477 * buffers -- they'll be picked up a future cleanse.
1478 1478 */
1479 1479 continue;
1480 1480 }
1481 1481
1482 1482 if (dcpu->dtdsc_clean != NULL) {
1483 1483 /*
1484 1484 * If the clean list is non-NULL, then we're in a
1485 1485 * situation where a CPU has done deallocations (we
1486 1486 * have a non-NULL dirty list) but no allocations (we
1487 1487 * also have a non-NULL clean list). We can't simply
1488 1488 * move the dirty list into the clean list on this
1489 1489 * CPU, yet we also don't want to allow this condition
1490 1490 * to persist, lest a short clean list prevent a
1491 1491 * massive dirty list from being cleaned (which in
1492 1492 * turn could lead to otherwise avoidable dynamic
1493 1493 * drops). To deal with this, we look for some CPU
1494 1494 * with a NULL clean list, NULL dirty list, and NULL
1495 1495 * rinsing list -- and then we borrow this CPU to
1496 1496 * rinse our dirty list.
1497 1497 */
1498 1498 for (j = 0; j < NCPU; j++) {
1499 1499 dtrace_dstate_percpu_t *rinser;
1500 1500
1501 1501 rinser = &dstate->dtds_percpu[j];
1502 1502
1503 1503 if (rinser->dtdsc_rinsing != NULL)
1504 1504 continue;
1505 1505
1506 1506 if (rinser->dtdsc_dirty != NULL)
1507 1507 continue;
1508 1508
1509 1509 if (rinser->dtdsc_clean != NULL)
1510 1510 continue;
1511 1511
1512 1512 rinsep = &rinser->dtdsc_rinsing;
1513 1513 break;
1514 1514 }
1515 1515
1516 1516 if (j == NCPU) {
1517 1517 /*
1518 1518 * We were unable to find another CPU that
1519 1519 * could accept this dirty list -- we are
1520 1520 * therefore unable to clean it now.
1521 1521 */
1522 1522 dtrace_dynvar_failclean++;
1523 1523 continue;
1524 1524 }
1525 1525 }
1526 1526
1527 1527 work = 1;
1528 1528
1529 1529 /*
1530 1530 * Atomically move the dirty list aside.
1531 1531 */
1532 1532 do {
1533 1533 dirty = dcpu->dtdsc_dirty;
1534 1534
1535 1535 /*
1536 1536 * Before we zap the dirty list, set the rinsing list.
1537 1537 * (This allows for a potential assertion in
1538 1538 * dtrace_dynvar(): if a free dynamic variable appears
1539 1539 * on a hash chain, either the dirty list or the
1540 1540 * rinsing list for some CPU must be non-NULL.)
1541 1541 */
1542 1542 *rinsep = dirty;
1543 1543 dtrace_membar_producer();
1544 1544 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1545 1545 dirty, NULL) != dirty);
1546 1546 }
1547 1547
1548 1548 if (!work) {
1549 1549 /*
1550 1550 * We have no work to do; we can simply return.
1551 1551 */
1552 1552 return;
1553 1553 }
1554 1554
1555 1555 dtrace_sync();
1556 1556
1557 1557 for (i = 0; i < NCPU; i++) {
1558 1558 dcpu = &dstate->dtds_percpu[i];
1559 1559
1560 1560 if (dcpu->dtdsc_rinsing == NULL)
1561 1561 continue;
1562 1562
1563 1563 /*
1564 1564 * We are now guaranteed that no hash chain contains a pointer
1565 1565 * into this dirty list; we can make it clean.
1566 1566 */
1567 1567 ASSERT(dcpu->dtdsc_clean == NULL);
1568 1568 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1569 1569 dcpu->dtdsc_rinsing = NULL;
1570 1570 }
1571 1571
1572 1572 /*
1573 1573 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1574 1574 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1575 1575 * This prevents a race whereby a CPU incorrectly decides that
1576 1576 * the state should be something other than DTRACE_DSTATE_CLEAN
1577 1577 * after dtrace_dynvar_clean() has completed.
1578 1578 */
1579 1579 dtrace_sync();
1580 1580
1581 1581 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1582 1582 }
1583 1583
1584 1584 /*
1585 1585 * Depending on the value of the op parameter, this function looks-up,
1586 1586 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1587 1587 * allocation is requested, this function will return a pointer to a
1588 1588 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1589 1589 * variable can be allocated. If NULL is returned, the appropriate counter
1590 1590 * will be incremented.
1591 1591 */
1592 1592 dtrace_dynvar_t *
1593 1593 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1594 1594 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1595 1595 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1596 1596 {
1597 1597 uint64_t hashval = DTRACE_DYNHASH_VALID;
1598 1598 dtrace_dynhash_t *hash = dstate->dtds_hash;
1599 1599 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1600 1600 processorid_t me = CPU->cpu_id, cpu = me;
1601 1601 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1602 1602 size_t bucket, ksize;
1603 1603 size_t chunksize = dstate->dtds_chunksize;
1604 1604 uintptr_t kdata, lock, nstate;
1605 1605 uint_t i;
1606 1606
1607 1607 ASSERT(nkeys != 0);
1608 1608
1609 1609 /*
1610 1610 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1611 1611 * algorithm. For the by-value portions, we perform the algorithm in
1612 1612 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1613 1613 * bit, and seems to have only a minute effect on distribution. For
1614 1614 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1615 1615 * over each referenced byte. It's painful to do this, but it's much
1616 1616 * better than pathological hash distribution. The efficacy of the
1617 1617 * hashing algorithm (and a comparison with other algorithms) may be
1618 1618 * found by running the ::dtrace_dynstat MDB dcmd.
1619 1619 */
1620 1620 for (i = 0; i < nkeys; i++) {
1621 1621 if (key[i].dttk_size == 0) {
1622 1622 uint64_t val = key[i].dttk_value;
1623 1623
1624 1624 hashval += (val >> 48) & 0xffff;
1625 1625 hashval += (hashval << 10);
1626 1626 hashval ^= (hashval >> 6);
1627 1627
1628 1628 hashval += (val >> 32) & 0xffff;
1629 1629 hashval += (hashval << 10);
1630 1630 hashval ^= (hashval >> 6);
1631 1631
1632 1632 hashval += (val >> 16) & 0xffff;
1633 1633 hashval += (hashval << 10);
1634 1634 hashval ^= (hashval >> 6);
1635 1635
1636 1636 hashval += val & 0xffff;
1637 1637 hashval += (hashval << 10);
1638 1638 hashval ^= (hashval >> 6);
1639 1639 } else {
1640 1640 /*
1641 1641 * This is incredibly painful, but it beats the hell
1642 1642 * out of the alternative.
1643 1643 */
1644 1644 uint64_t j, size = key[i].dttk_size;
1645 1645 uintptr_t base = (uintptr_t)key[i].dttk_value;
1646 1646
1647 1647 if (!dtrace_canload(base, size, mstate, vstate))
1648 1648 break;
1649 1649
1650 1650 for (j = 0; j < size; j++) {
1651 1651 hashval += dtrace_load8(base + j);
1652 1652 hashval += (hashval << 10);
1653 1653 hashval ^= (hashval >> 6);
1654 1654 }
1655 1655 }
1656 1656 }
1657 1657
1658 1658 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1659 1659 return (NULL);
1660 1660
1661 1661 hashval += (hashval << 3);
1662 1662 hashval ^= (hashval >> 11);
1663 1663 hashval += (hashval << 15);
1664 1664
1665 1665 /*
1666 1666 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1667 1667 * comes out to be one of our two sentinel hash values. If this
1668 1668 * actually happens, we set the hashval to be a value known to be a
1669 1669 * non-sentinel value.
1670 1670 */
1671 1671 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1672 1672 hashval = DTRACE_DYNHASH_VALID;
1673 1673
1674 1674 /*
1675 1675 * Yes, it's painful to do a divide here. If the cycle count becomes
1676 1676 * important here, tricks can be pulled to reduce it. (However, it's
1677 1677 * critical that hash collisions be kept to an absolute minimum;
1678 1678 * they're much more painful than a divide.) It's better to have a
1679 1679 * solution that generates few collisions and still keeps things
1680 1680 * relatively simple.
1681 1681 */
1682 1682 bucket = hashval % dstate->dtds_hashsize;
1683 1683
1684 1684 if (op == DTRACE_DYNVAR_DEALLOC) {
1685 1685 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1686 1686
1687 1687 for (;;) {
1688 1688 while ((lock = *lockp) & 1)
1689 1689 continue;
1690 1690
1691 1691 if (dtrace_casptr((void *)lockp,
1692 1692 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1693 1693 break;
1694 1694 }
1695 1695
1696 1696 dtrace_membar_producer();
1697 1697 }
1698 1698
1699 1699 top:
1700 1700 prev = NULL;
1701 1701 lock = hash[bucket].dtdh_lock;
1702 1702
1703 1703 dtrace_membar_consumer();
1704 1704
1705 1705 start = hash[bucket].dtdh_chain;
1706 1706 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1707 1707 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1708 1708 op != DTRACE_DYNVAR_DEALLOC));
1709 1709
1710 1710 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1711 1711 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1712 1712 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1713 1713
1714 1714 if (dvar->dtdv_hashval != hashval) {
1715 1715 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1716 1716 /*
1717 1717 * We've reached the sink, and therefore the
1718 1718 * end of the hash chain; we can kick out of
1719 1719 * the loop knowing that we have seen a valid
1720 1720 * snapshot of state.
1721 1721 */
1722 1722 ASSERT(dvar->dtdv_next == NULL);
1723 1723 ASSERT(dvar == &dtrace_dynhash_sink);
1724 1724 break;
1725 1725 }
1726 1726
1727 1727 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1728 1728 /*
1729 1729 * We've gone off the rails: somewhere along
1730 1730 * the line, one of the members of this hash
1731 1731 * chain was deleted. Note that we could also
1732 1732 * detect this by simply letting this loop run
1733 1733 * to completion, as we would eventually hit
1734 1734 * the end of the dirty list. However, we
1735 1735 * want to avoid running the length of the
1736 1736 * dirty list unnecessarily (it might be quite
1737 1737 * long), so we catch this as early as
1738 1738 * possible by detecting the hash marker. In
1739 1739 * this case, we simply set dvar to NULL and
1740 1740 * break; the conditional after the loop will
1741 1741 * send us back to top.
1742 1742 */
1743 1743 dvar = NULL;
1744 1744 break;
1745 1745 }
1746 1746
1747 1747 goto next;
1748 1748 }
1749 1749
1750 1750 if (dtuple->dtt_nkeys != nkeys)
1751 1751 goto next;
1752 1752
1753 1753 for (i = 0; i < nkeys; i++, dkey++) {
1754 1754 if (dkey->dttk_size != key[i].dttk_size)
1755 1755 goto next; /* size or type mismatch */
1756 1756
1757 1757 if (dkey->dttk_size != 0) {
1758 1758 if (dtrace_bcmp(
1759 1759 (void *)(uintptr_t)key[i].dttk_value,
1760 1760 (void *)(uintptr_t)dkey->dttk_value,
1761 1761 dkey->dttk_size))
1762 1762 goto next;
1763 1763 } else {
1764 1764 if (dkey->dttk_value != key[i].dttk_value)
1765 1765 goto next;
1766 1766 }
1767 1767 }
1768 1768
1769 1769 if (op != DTRACE_DYNVAR_DEALLOC)
1770 1770 return (dvar);
1771 1771
1772 1772 ASSERT(dvar->dtdv_next == NULL ||
1773 1773 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1774 1774
1775 1775 if (prev != NULL) {
1776 1776 ASSERT(hash[bucket].dtdh_chain != dvar);
1777 1777 ASSERT(start != dvar);
1778 1778 ASSERT(prev->dtdv_next == dvar);
1779 1779 prev->dtdv_next = dvar->dtdv_next;
1780 1780 } else {
1781 1781 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1782 1782 start, dvar->dtdv_next) != start) {
1783 1783 /*
1784 1784 * We have failed to atomically swing the
1785 1785 * hash table head pointer, presumably because
1786 1786 * of a conflicting allocation on another CPU.
1787 1787 * We need to reread the hash chain and try
1788 1788 * again.
1789 1789 */
1790 1790 goto top;
1791 1791 }
1792 1792 }
1793 1793
1794 1794 dtrace_membar_producer();
1795 1795
1796 1796 /*
1797 1797 * Now set the hash value to indicate that it's free.
1798 1798 */
1799 1799 ASSERT(hash[bucket].dtdh_chain != dvar);
1800 1800 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1801 1801
1802 1802 dtrace_membar_producer();
1803 1803
1804 1804 /*
1805 1805 * Set the next pointer to point at the dirty list, and
1806 1806 * atomically swing the dirty pointer to the newly freed dvar.
1807 1807 */
1808 1808 do {
1809 1809 next = dcpu->dtdsc_dirty;
1810 1810 dvar->dtdv_next = next;
1811 1811 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1812 1812
1813 1813 /*
1814 1814 * Finally, unlock this hash bucket.
1815 1815 */
1816 1816 ASSERT(hash[bucket].dtdh_lock == lock);
1817 1817 ASSERT(lock & 1);
1818 1818 hash[bucket].dtdh_lock++;
1819 1819
1820 1820 return (NULL);
1821 1821 next:
1822 1822 prev = dvar;
1823 1823 continue;
1824 1824 }
1825 1825
1826 1826 if (dvar == NULL) {
1827 1827 /*
1828 1828 * If dvar is NULL, it is because we went off the rails:
1829 1829 * one of the elements that we traversed in the hash chain
1830 1830 * was deleted while we were traversing it. In this case,
1831 1831 * we assert that we aren't doing a dealloc (deallocs lock
1832 1832 * the hash bucket to prevent themselves from racing with
1833 1833 * one another), and retry the hash chain traversal.
1834 1834 */
1835 1835 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1836 1836 goto top;
1837 1837 }
1838 1838
1839 1839 if (op != DTRACE_DYNVAR_ALLOC) {
1840 1840 /*
1841 1841 * If we are not to allocate a new variable, we want to
1842 1842 * return NULL now. Before we return, check that the value
1843 1843 * of the lock word hasn't changed. If it has, we may have
1844 1844 * seen an inconsistent snapshot.
1845 1845 */
1846 1846 if (op == DTRACE_DYNVAR_NOALLOC) {
1847 1847 if (hash[bucket].dtdh_lock != lock)
1848 1848 goto top;
1849 1849 } else {
1850 1850 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1851 1851 ASSERT(hash[bucket].dtdh_lock == lock);
1852 1852 ASSERT(lock & 1);
1853 1853 hash[bucket].dtdh_lock++;
1854 1854 }
1855 1855
1856 1856 return (NULL);
1857 1857 }
1858 1858
1859 1859 /*
1860 1860 * We need to allocate a new dynamic variable. The size we need is the
1861 1861 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1862 1862 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1863 1863 * the size of any referred-to data (dsize). We then round the final
1864 1864 * size up to the chunksize for allocation.
1865 1865 */
1866 1866 for (ksize = 0, i = 0; i < nkeys; i++)
1867 1867 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1868 1868
1869 1869 /*
1870 1870 * This should be pretty much impossible, but could happen if, say,
1871 1871 * strange DIF specified the tuple. Ideally, this should be an
1872 1872 * assertion and not an error condition -- but that requires that the
1873 1873 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1874 1874 * bullet-proof. (That is, it must not be able to be fooled by
1875 1875 * malicious DIF.) Given the lack of backwards branches in DIF,
1876 1876 * solving this would presumably not amount to solving the Halting
1877 1877 * Problem -- but it still seems awfully hard.
1878 1878 */
1879 1879 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1880 1880 ksize + dsize > chunksize) {
1881 1881 dcpu->dtdsc_drops++;
1882 1882 return (NULL);
1883 1883 }
1884 1884
1885 1885 nstate = DTRACE_DSTATE_EMPTY;
1886 1886
1887 1887 do {
1888 1888 retry:
1889 1889 free = dcpu->dtdsc_free;
1890 1890
1891 1891 if (free == NULL) {
1892 1892 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1893 1893 void *rval;
1894 1894
1895 1895 if (clean == NULL) {
1896 1896 /*
1897 1897 * We're out of dynamic variable space on
1898 1898 * this CPU. Unless we have tried all CPUs,
1899 1899 * we'll try to allocate from a different
1900 1900 * CPU.
1901 1901 */
1902 1902 switch (dstate->dtds_state) {
1903 1903 case DTRACE_DSTATE_CLEAN: {
1904 1904 void *sp = &dstate->dtds_state;
1905 1905
1906 1906 if (++cpu >= NCPU)
1907 1907 cpu = 0;
1908 1908
1909 1909 if (dcpu->dtdsc_dirty != NULL &&
1910 1910 nstate == DTRACE_DSTATE_EMPTY)
1911 1911 nstate = DTRACE_DSTATE_DIRTY;
1912 1912
1913 1913 if (dcpu->dtdsc_rinsing != NULL)
1914 1914 nstate = DTRACE_DSTATE_RINSING;
1915 1915
1916 1916 dcpu = &dstate->dtds_percpu[cpu];
1917 1917
1918 1918 if (cpu != me)
1919 1919 goto retry;
1920 1920
1921 1921 (void) dtrace_cas32(sp,
1922 1922 DTRACE_DSTATE_CLEAN, nstate);
1923 1923
1924 1924 /*
1925 1925 * To increment the correct bean
1926 1926 * counter, take another lap.
1927 1927 */
1928 1928 goto retry;
1929 1929 }
1930 1930
1931 1931 case DTRACE_DSTATE_DIRTY:
1932 1932 dcpu->dtdsc_dirty_drops++;
1933 1933 break;
1934 1934
1935 1935 case DTRACE_DSTATE_RINSING:
1936 1936 dcpu->dtdsc_rinsing_drops++;
1937 1937 break;
1938 1938
1939 1939 case DTRACE_DSTATE_EMPTY:
1940 1940 dcpu->dtdsc_drops++;
1941 1941 break;
1942 1942 }
1943 1943
1944 1944 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1945 1945 return (NULL);
1946 1946 }
1947 1947
1948 1948 /*
1949 1949 * The clean list appears to be non-empty. We want to
1950 1950 * move the clean list to the free list; we start by
1951 1951 * moving the clean pointer aside.
1952 1952 */
1953 1953 if (dtrace_casptr(&dcpu->dtdsc_clean,
1954 1954 clean, NULL) != clean) {
1955 1955 /*
1956 1956 * We are in one of two situations:
1957 1957 *
1958 1958 * (a) The clean list was switched to the
1959 1959 * free list by another CPU.
1960 1960 *
1961 1961 * (b) The clean list was added to by the
1962 1962 * cleansing cyclic.
1963 1963 *
1964 1964 * In either of these situations, we can
1965 1965 * just reattempt the free list allocation.
1966 1966 */
1967 1967 goto retry;
1968 1968 }
1969 1969
1970 1970 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1971 1971
1972 1972 /*
1973 1973 * Now we'll move the clean list to our free list.
1974 1974 * It's impossible for this to fail: the only way
1975 1975 * the free list can be updated is through this
1976 1976 * code path, and only one CPU can own the clean list.
1977 1977 * Thus, it would only be possible for this to fail if
1978 1978 * this code were racing with dtrace_dynvar_clean().
1979 1979 * (That is, if dtrace_dynvar_clean() updated the clean
1980 1980 * list, and we ended up racing to update the free
1981 1981 * list.) This race is prevented by the dtrace_sync()
1982 1982 * in dtrace_dynvar_clean() -- which flushes the
1983 1983 * owners of the clean lists out before resetting
1984 1984 * the clean lists.
1985 1985 */
1986 1986 dcpu = &dstate->dtds_percpu[me];
1987 1987 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1988 1988 ASSERT(rval == NULL);
1989 1989 goto retry;
1990 1990 }
1991 1991
1992 1992 dvar = free;
1993 1993 new_free = dvar->dtdv_next;
1994 1994 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1995 1995
1996 1996 /*
1997 1997 * We have now allocated a new chunk. We copy the tuple keys into the
1998 1998 * tuple array and copy any referenced key data into the data space
1999 1999 * following the tuple array. As we do this, we relocate dttk_value
2000 2000 * in the final tuple to point to the key data address in the chunk.
2001 2001 */
2002 2002 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2003 2003 dvar->dtdv_data = (void *)(kdata + ksize);
2004 2004 dvar->dtdv_tuple.dtt_nkeys = nkeys;
2005 2005
2006 2006 for (i = 0; i < nkeys; i++) {
2007 2007 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2008 2008 size_t kesize = key[i].dttk_size;
2009 2009
2010 2010 if (kesize != 0) {
2011 2011 dtrace_bcopy(
2012 2012 (const void *)(uintptr_t)key[i].dttk_value,
2013 2013 (void *)kdata, kesize);
2014 2014 dkey->dttk_value = kdata;
2015 2015 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2016 2016 } else {
2017 2017 dkey->dttk_value = key[i].dttk_value;
2018 2018 }
2019 2019
2020 2020 dkey->dttk_size = kesize;
2021 2021 }
2022 2022
2023 2023 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2024 2024 dvar->dtdv_hashval = hashval;
2025 2025 dvar->dtdv_next = start;
2026 2026
2027 2027 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2028 2028 return (dvar);
2029 2029
2030 2030 /*
2031 2031 * The cas has failed. Either another CPU is adding an element to
2032 2032 * this hash chain, or another CPU is deleting an element from this
2033 2033 * hash chain. The simplest way to deal with both of these cases
2034 2034 * (though not necessarily the most efficient) is to free our
2035 2035 * allocated block and tail-call ourselves. Note that the free is
2036 2036 * to the dirty list and _not_ to the free list. This is to prevent
2037 2037 * races with allocators, above.
2038 2038 */
2039 2039 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2040 2040
2041 2041 dtrace_membar_producer();
2042 2042
2043 2043 do {
2044 2044 free = dcpu->dtdsc_dirty;
2045 2045 dvar->dtdv_next = free;
2046 2046 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2047 2047
2048 2048 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
2049 2049 }
2050 2050
2051 2051 /*ARGSUSED*/
2052 2052 static void
2053 2053 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2054 2054 {
2055 2055 if ((int64_t)nval < (int64_t)*oval)
2056 2056 *oval = nval;
2057 2057 }
2058 2058
2059 2059 /*ARGSUSED*/
2060 2060 static void
2061 2061 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2062 2062 {
2063 2063 if ((int64_t)nval > (int64_t)*oval)
2064 2064 *oval = nval;
2065 2065 }
2066 2066
2067 2067 static void
2068 2068 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2069 2069 {
2070 2070 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2071 2071 int64_t val = (int64_t)nval;
2072 2072
2073 2073 if (val < 0) {
2074 2074 for (i = 0; i < zero; i++) {
2075 2075 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2076 2076 quanta[i] += incr;
2077 2077 return;
2078 2078 }
2079 2079 }
2080 2080 } else {
2081 2081 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2082 2082 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2083 2083 quanta[i - 1] += incr;
2084 2084 return;
2085 2085 }
2086 2086 }
2087 2087
2088 2088 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2089 2089 return;
2090 2090 }
2091 2091
2092 2092 ASSERT(0);
2093 2093 }
2094 2094
2095 2095 static void
2096 2096 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2097 2097 {
2098 2098 uint64_t arg = *lquanta++;
2099 2099 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2100 2100 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2101 2101 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2102 2102 int32_t val = (int32_t)nval, level;
2103 2103
2104 2104 ASSERT(step != 0);
2105 2105 ASSERT(levels != 0);
2106 2106
2107 2107 if (val < base) {
2108 2108 /*
2109 2109 * This is an underflow.
2110 2110 */
2111 2111 lquanta[0] += incr;
2112 2112 return;
2113 2113 }
2114 2114
2115 2115 level = (val - base) / step;
2116 2116
2117 2117 if (level < levels) {
2118 2118 lquanta[level + 1] += incr;
2119 2119 return;
2120 2120 }
2121 2121
2122 2122 /*
2123 2123 * This is an overflow.
2124 2124 */
2125 2125 lquanta[levels + 1] += incr;
2126 2126 }
2127 2127
2128 2128 static int
2129 2129 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2130 2130 uint16_t high, uint16_t nsteps, int64_t value)
2131 2131 {
2132 2132 int64_t this = 1, last, next;
2133 2133 int base = 1, order;
2134 2134
2135 2135 ASSERT(factor <= nsteps);
2136 2136 ASSERT(nsteps % factor == 0);
2137 2137
2138 2138 for (order = 0; order < low; order++)
2139 2139 this *= factor;
2140 2140
2141 2141 /*
2142 2142 * If our value is less than our factor taken to the power of the
2143 2143 * low order of magnitude, it goes into the zeroth bucket.
2144 2144 */
2145 2145 if (value < (last = this))
2146 2146 return (0);
2147 2147
2148 2148 for (this *= factor; order <= high; order++) {
2149 2149 int nbuckets = this > nsteps ? nsteps : this;
2150 2150
2151 2151 if ((next = this * factor) < this) {
2152 2152 /*
2153 2153 * We should not generally get log/linear quantizations
2154 2154 * with a high magnitude that allows 64-bits to
2155 2155 * overflow, but we nonetheless protect against this
2156 2156 * by explicitly checking for overflow, and clamping
2157 2157 * our value accordingly.
2158 2158 */
2159 2159 value = this - 1;
2160 2160 }
2161 2161
2162 2162 if (value < this) {
2163 2163 /*
2164 2164 * If our value lies within this order of magnitude,
2165 2165 * determine its position by taking the offset within
2166 2166 * the order of magnitude, dividing by the bucket
2167 2167 * width, and adding to our (accumulated) base.
2168 2168 */
2169 2169 return (base + (value - last) / (this / nbuckets));
2170 2170 }
2171 2171
2172 2172 base += nbuckets - (nbuckets / factor);
2173 2173 last = this;
2174 2174 this = next;
2175 2175 }
2176 2176
2177 2177 /*
2178 2178 * Our value is greater than or equal to our factor taken to the
2179 2179 * power of one plus the high magnitude -- return the top bucket.
2180 2180 */
2181 2181 return (base);
2182 2182 }
2183 2183
2184 2184 static void
2185 2185 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2186 2186 {
2187 2187 uint64_t arg = *llquanta++;
2188 2188 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2189 2189 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2190 2190 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2191 2191 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2192 2192
2193 2193 llquanta[dtrace_aggregate_llquantize_bucket(factor,
2194 2194 low, high, nsteps, nval)] += incr;
2195 2195 }
2196 2196
2197 2197 /*ARGSUSED*/
2198 2198 static void
2199 2199 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2200 2200 {
2201 2201 data[0]++;
2202 2202 data[1] += nval;
2203 2203 }
2204 2204
2205 2205 /*ARGSUSED*/
2206 2206 static void
2207 2207 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2208 2208 {
2209 2209 int64_t snval = (int64_t)nval;
2210 2210 uint64_t tmp[2];
2211 2211
2212 2212 data[0]++;
2213 2213 data[1] += nval;
2214 2214
2215 2215 /*
2216 2216 * What we want to say here is:
2217 2217 *
2218 2218 * data[2] += nval * nval;
2219 2219 *
2220 2220 * But given that nval is 64-bit, we could easily overflow, so
2221 2221 * we do this as 128-bit arithmetic.
2222 2222 */
2223 2223 if (snval < 0)
2224 2224 snval = -snval;
2225 2225
2226 2226 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2227 2227 dtrace_add_128(data + 2, tmp, data + 2);
2228 2228 }
2229 2229
2230 2230 /*ARGSUSED*/
2231 2231 static void
2232 2232 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2233 2233 {
2234 2234 *oval = *oval + 1;
2235 2235 }
2236 2236
2237 2237 /*ARGSUSED*/
2238 2238 static void
2239 2239 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2240 2240 {
2241 2241 *oval += nval;
2242 2242 }
2243 2243
2244 2244 /*
2245 2245 * Aggregate given the tuple in the principal data buffer, and the aggregating
2246 2246 * action denoted by the specified dtrace_aggregation_t. The aggregation
2247 2247 * buffer is specified as the buf parameter. This routine does not return
2248 2248 * failure; if there is no space in the aggregation buffer, the data will be
2249 2249 * dropped, and a corresponding counter incremented.
2250 2250 */
2251 2251 static void
2252 2252 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2253 2253 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2254 2254 {
2255 2255 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2256 2256 uint32_t i, ndx, size, fsize;
2257 2257 uint32_t align = sizeof (uint64_t) - 1;
2258 2258 dtrace_aggbuffer_t *agb;
2259 2259 dtrace_aggkey_t *key;
2260 2260 uint32_t hashval = 0, limit, isstr;
2261 2261 caddr_t tomax, data, kdata;
2262 2262 dtrace_actkind_t action;
2263 2263 dtrace_action_t *act;
2264 2264 uintptr_t offs;
2265 2265
2266 2266 if (buf == NULL)
2267 2267 return;
2268 2268
2269 2269 if (!agg->dtag_hasarg) {
2270 2270 /*
2271 2271 * Currently, only quantize() and lquantize() take additional
2272 2272 * arguments, and they have the same semantics: an increment
2273 2273 * value that defaults to 1 when not present. If additional
2274 2274 * aggregating actions take arguments, the setting of the
2275 2275 * default argument value will presumably have to become more
2276 2276 * sophisticated...
2277 2277 */
2278 2278 arg = 1;
2279 2279 }
2280 2280
2281 2281 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2282 2282 size = rec->dtrd_offset - agg->dtag_base;
2283 2283 fsize = size + rec->dtrd_size;
2284 2284
2285 2285 ASSERT(dbuf->dtb_tomax != NULL);
2286 2286 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2287 2287
2288 2288 if ((tomax = buf->dtb_tomax) == NULL) {
2289 2289 dtrace_buffer_drop(buf);
2290 2290 return;
2291 2291 }
2292 2292
2293 2293 /*
2294 2294 * The metastructure is always at the bottom of the buffer.
2295 2295 */
2296 2296 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2297 2297 sizeof (dtrace_aggbuffer_t));
2298 2298
2299 2299 if (buf->dtb_offset == 0) {
2300 2300 /*
2301 2301 * We just kludge up approximately 1/8th of the size to be
2302 2302 * buckets. If this guess ends up being routinely
2303 2303 * off-the-mark, we may need to dynamically readjust this
2304 2304 * based on past performance.
2305 2305 */
2306 2306 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2307 2307
2308 2308 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2309 2309 (uintptr_t)tomax || hashsize == 0) {
2310 2310 /*
2311 2311 * We've been given a ludicrously small buffer;
2312 2312 * increment our drop count and leave.
2313 2313 */
2314 2314 dtrace_buffer_drop(buf);
2315 2315 return;
2316 2316 }
2317 2317
2318 2318 /*
2319 2319 * And now, a pathetic attempt to try to get a an odd (or
2320 2320 * perchance, a prime) hash size for better hash distribution.
2321 2321 */
2322 2322 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2323 2323 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2324 2324
2325 2325 agb->dtagb_hashsize = hashsize;
2326 2326 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2327 2327 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2328 2328 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2329 2329
2330 2330 for (i = 0; i < agb->dtagb_hashsize; i++)
2331 2331 agb->dtagb_hash[i] = NULL;
2332 2332 }
2333 2333
2334 2334 ASSERT(agg->dtag_first != NULL);
2335 2335 ASSERT(agg->dtag_first->dta_intuple);
2336 2336
2337 2337 /*
2338 2338 * Calculate the hash value based on the key. Note that we _don't_
2339 2339 * include the aggid in the hashing (but we will store it as part of
2340 2340 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2341 2341 * algorithm: a simple, quick algorithm that has no known funnels, and
2342 2342 * gets good distribution in practice. The efficacy of the hashing
2343 2343 * algorithm (and a comparison with other algorithms) may be found by
2344 2344 * running the ::dtrace_aggstat MDB dcmd.
2345 2345 */
2346 2346 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2347 2347 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2348 2348 limit = i + act->dta_rec.dtrd_size;
2349 2349 ASSERT(limit <= size);
2350 2350 isstr = DTRACEACT_ISSTRING(act);
2351 2351
2352 2352 for (; i < limit; i++) {
2353 2353 hashval += data[i];
2354 2354 hashval += (hashval << 10);
2355 2355 hashval ^= (hashval >> 6);
2356 2356
2357 2357 if (isstr && data[i] == '\0')
2358 2358 break;
2359 2359 }
2360 2360 }
2361 2361
2362 2362 hashval += (hashval << 3);
2363 2363 hashval ^= (hashval >> 11);
2364 2364 hashval += (hashval << 15);
2365 2365
2366 2366 /*
2367 2367 * Yes, the divide here is expensive -- but it's generally the least
2368 2368 * of the performance issues given the amount of data that we iterate
2369 2369 * over to compute hash values, compare data, etc.
2370 2370 */
2371 2371 ndx = hashval % agb->dtagb_hashsize;
2372 2372
2373 2373 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2374 2374 ASSERT((caddr_t)key >= tomax);
2375 2375 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2376 2376
2377 2377 if (hashval != key->dtak_hashval || key->dtak_size != size)
2378 2378 continue;
2379 2379
2380 2380 kdata = key->dtak_data;
2381 2381 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2382 2382
2383 2383 for (act = agg->dtag_first; act->dta_intuple;
2384 2384 act = act->dta_next) {
2385 2385 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2386 2386 limit = i + act->dta_rec.dtrd_size;
2387 2387 ASSERT(limit <= size);
2388 2388 isstr = DTRACEACT_ISSTRING(act);
2389 2389
2390 2390 for (; i < limit; i++) {
2391 2391 if (kdata[i] != data[i])
2392 2392 goto next;
2393 2393
2394 2394 if (isstr && data[i] == '\0')
2395 2395 break;
2396 2396 }
2397 2397 }
2398 2398
2399 2399 if (action != key->dtak_action) {
2400 2400 /*
2401 2401 * We are aggregating on the same value in the same
2402 2402 * aggregation with two different aggregating actions.
2403 2403 * (This should have been picked up in the compiler,
2404 2404 * so we may be dealing with errant or devious DIF.)
2405 2405 * This is an error condition; we indicate as much,
2406 2406 * and return.
2407 2407 */
2408 2408 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2409 2409 return;
2410 2410 }
2411 2411
2412 2412 /*
2413 2413 * This is a hit: we need to apply the aggregator to
2414 2414 * the value at this key.
2415 2415 */
2416 2416 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2417 2417 return;
2418 2418 next:
2419 2419 continue;
2420 2420 }
2421 2421
2422 2422 /*
2423 2423 * We didn't find it. We need to allocate some zero-filled space,
2424 2424 * link it into the hash table appropriately, and apply the aggregator
2425 2425 * to the (zero-filled) value.
2426 2426 */
2427 2427 offs = buf->dtb_offset;
2428 2428 while (offs & (align - 1))
2429 2429 offs += sizeof (uint32_t);
2430 2430
2431 2431 /*
2432 2432 * If we don't have enough room to both allocate a new key _and_
2433 2433 * its associated data, increment the drop count and return.
2434 2434 */
2435 2435 if ((uintptr_t)tomax + offs + fsize >
2436 2436 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2437 2437 dtrace_buffer_drop(buf);
2438 2438 return;
2439 2439 }
2440 2440
2441 2441 /*CONSTCOND*/
2442 2442 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2443 2443 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2444 2444 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2445 2445
2446 2446 key->dtak_data = kdata = tomax + offs;
2447 2447 buf->dtb_offset = offs + fsize;
2448 2448
2449 2449 /*
2450 2450 * Now copy the data across.
2451 2451 */
2452 2452 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2453 2453
2454 2454 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2455 2455 kdata[i] = data[i];
2456 2456
2457 2457 /*
2458 2458 * Because strings are not zeroed out by default, we need to iterate
2459 2459 * looking for actions that store strings, and we need to explicitly
2460 2460 * pad these strings out with zeroes.
2461 2461 */
2462 2462 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2463 2463 int nul;
2464 2464
2465 2465 if (!DTRACEACT_ISSTRING(act))
2466 2466 continue;
2467 2467
2468 2468 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2469 2469 limit = i + act->dta_rec.dtrd_size;
2470 2470 ASSERT(limit <= size);
2471 2471
2472 2472 for (nul = 0; i < limit; i++) {
2473 2473 if (nul) {
2474 2474 kdata[i] = '\0';
2475 2475 continue;
2476 2476 }
2477 2477
2478 2478 if (data[i] != '\0')
2479 2479 continue;
2480 2480
2481 2481 nul = 1;
2482 2482 }
2483 2483 }
2484 2484
2485 2485 for (i = size; i < fsize; i++)
2486 2486 kdata[i] = 0;
2487 2487
2488 2488 key->dtak_hashval = hashval;
2489 2489 key->dtak_size = size;
2490 2490 key->dtak_action = action;
2491 2491 key->dtak_next = agb->dtagb_hash[ndx];
2492 2492 agb->dtagb_hash[ndx] = key;
2493 2493
2494 2494 /*
2495 2495 * Finally, apply the aggregator.
2496 2496 */
2497 2497 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2498 2498 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2499 2499 }
2500 2500
2501 2501 /*
2502 2502 * Given consumer state, this routine finds a speculation in the INACTIVE
2503 2503 * state and transitions it into the ACTIVE state. If there is no speculation
2504 2504 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2505 2505 * incremented -- it is up to the caller to take appropriate action.
2506 2506 */
2507 2507 static int
2508 2508 dtrace_speculation(dtrace_state_t *state)
2509 2509 {
2510 2510 int i = 0;
2511 2511 dtrace_speculation_state_t current;
2512 2512 uint32_t *stat = &state->dts_speculations_unavail, count;
2513 2513
2514 2514 while (i < state->dts_nspeculations) {
2515 2515 dtrace_speculation_t *spec = &state->dts_speculations[i];
2516 2516
2517 2517 current = spec->dtsp_state;
2518 2518
2519 2519 if (current != DTRACESPEC_INACTIVE) {
2520 2520 if (current == DTRACESPEC_COMMITTINGMANY ||
2521 2521 current == DTRACESPEC_COMMITTING ||
2522 2522 current == DTRACESPEC_DISCARDING)
2523 2523 stat = &state->dts_speculations_busy;
2524 2524 i++;
2525 2525 continue;
2526 2526 }
2527 2527
2528 2528 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2529 2529 current, DTRACESPEC_ACTIVE) == current)
2530 2530 return (i + 1);
2531 2531 }
2532 2532
2533 2533 /*
2534 2534 * We couldn't find a speculation. If we found as much as a single
2535 2535 * busy speculation buffer, we'll attribute this failure as "busy"
2536 2536 * instead of "unavail".
2537 2537 */
2538 2538 do {
2539 2539 count = *stat;
2540 2540 } while (dtrace_cas32(stat, count, count + 1) != count);
2541 2541
2542 2542 return (0);
2543 2543 }
2544 2544
2545 2545 /*
2546 2546 * This routine commits an active speculation. If the specified speculation
2547 2547 * is not in a valid state to perform a commit(), this routine will silently do
2548 2548 * nothing. The state of the specified speculation is transitioned according
2549 2549 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2550 2550 */
2551 2551 static void
2552 2552 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2553 2553 dtrace_specid_t which)
2554 2554 {
2555 2555 dtrace_speculation_t *spec;
2556 2556 dtrace_buffer_t *src, *dest;
2557 2557 uintptr_t daddr, saddr, dlimit, slimit;
2558 2558 dtrace_speculation_state_t current, new;
2559 2559 intptr_t offs;
2560 2560 uint64_t timestamp;
2561 2561
2562 2562 if (which == 0)
2563 2563 return;
2564 2564
2565 2565 if (which > state->dts_nspeculations) {
2566 2566 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2567 2567 return;
2568 2568 }
2569 2569
2570 2570 spec = &state->dts_speculations[which - 1];
2571 2571 src = &spec->dtsp_buffer[cpu];
2572 2572 dest = &state->dts_buffer[cpu];
2573 2573
2574 2574 do {
2575 2575 current = spec->dtsp_state;
2576 2576
2577 2577 if (current == DTRACESPEC_COMMITTINGMANY)
2578 2578 break;
2579 2579
2580 2580 switch (current) {
2581 2581 case DTRACESPEC_INACTIVE:
2582 2582 case DTRACESPEC_DISCARDING:
2583 2583 return;
2584 2584
2585 2585 case DTRACESPEC_COMMITTING:
2586 2586 /*
2587 2587 * This is only possible if we are (a) commit()'ing
2588 2588 * without having done a prior speculate() on this CPU
2589 2589 * and (b) racing with another commit() on a different
2590 2590 * CPU. There's nothing to do -- we just assert that
2591 2591 * our offset is 0.
2592 2592 */
2593 2593 ASSERT(src->dtb_offset == 0);
2594 2594 return;
2595 2595
2596 2596 case DTRACESPEC_ACTIVE:
2597 2597 new = DTRACESPEC_COMMITTING;
2598 2598 break;
2599 2599
2600 2600 case DTRACESPEC_ACTIVEONE:
2601 2601 /*
2602 2602 * This speculation is active on one CPU. If our
2603 2603 * buffer offset is non-zero, we know that the one CPU
2604 2604 * must be us. Otherwise, we are committing on a
2605 2605 * different CPU from the speculate(), and we must
2606 2606 * rely on being asynchronously cleaned.
2607 2607 */
2608 2608 if (src->dtb_offset != 0) {
2609 2609 new = DTRACESPEC_COMMITTING;
2610 2610 break;
2611 2611 }
2612 2612 /*FALLTHROUGH*/
2613 2613
2614 2614 case DTRACESPEC_ACTIVEMANY:
2615 2615 new = DTRACESPEC_COMMITTINGMANY;
2616 2616 break;
2617 2617
2618 2618 default:
2619 2619 ASSERT(0);
2620 2620 }
2621 2621 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2622 2622 current, new) != current);
2623 2623
2624 2624 /*
2625 2625 * We have set the state to indicate that we are committing this
2626 2626 * speculation. Now reserve the necessary space in the destination
2627 2627 * buffer.
2628 2628 */
2629 2629 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2630 2630 sizeof (uint64_t), state, NULL)) < 0) {
2631 2631 dtrace_buffer_drop(dest);
2632 2632 goto out;
2633 2633 }
2634 2634
2635 2635 /*
2636 2636 * We have sufficient space to copy the speculative buffer into the
2637 2637 * primary buffer. First, modify the speculative buffer, filling
2638 2638 * in the timestamp of all entries with the current time. The data
2639 2639 * must have the commit() time rather than the time it was traced,
2640 2640 * so that all entries in the primary buffer are in timestamp order.
2641 2641 */
2642 2642 timestamp = dtrace_gethrtime();
2643 2643 saddr = (uintptr_t)src->dtb_tomax;
2644 2644 slimit = saddr + src->dtb_offset;
2645 2645 while (saddr < slimit) {
2646 2646 size_t size;
2647 2647 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2648 2648
2649 2649 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2650 2650 saddr += sizeof (dtrace_epid_t);
2651 2651 continue;
2652 2652 }
2653 2653 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
2654 2654 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2655 2655
2656 2656 ASSERT3U(saddr + size, <=, slimit);
2657 2657 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
2658 2658 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
2659 2659
2660 2660 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2661 2661
2662 2662 saddr += size;
2663 2663 }
2664 2664
2665 2665 /*
2666 2666 * Copy the buffer across. (Note that this is a
2667 2667 * highly subobtimal bcopy(); in the unlikely event that this becomes
2668 2668 * a serious performance issue, a high-performance DTrace-specific
2669 2669 * bcopy() should obviously be invented.)
2670 2670 */
2671 2671 daddr = (uintptr_t)dest->dtb_tomax + offs;
2672 2672 dlimit = daddr + src->dtb_offset;
2673 2673 saddr = (uintptr_t)src->dtb_tomax;
2674 2674
2675 2675 /*
2676 2676 * First, the aligned portion.
2677 2677 */
2678 2678 while (dlimit - daddr >= sizeof (uint64_t)) {
2679 2679 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2680 2680
2681 2681 daddr += sizeof (uint64_t);
2682 2682 saddr += sizeof (uint64_t);
2683 2683 }
2684 2684
2685 2685 /*
2686 2686 * Now any left-over bit...
2687 2687 */
2688 2688 while (dlimit - daddr)
2689 2689 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2690 2690
2691 2691 /*
2692 2692 * Finally, commit the reserved space in the destination buffer.
2693 2693 */
2694 2694 dest->dtb_offset = offs + src->dtb_offset;
2695 2695
2696 2696 out:
2697 2697 /*
2698 2698 * If we're lucky enough to be the only active CPU on this speculation
2699 2699 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2700 2700 */
2701 2701 if (current == DTRACESPEC_ACTIVE ||
2702 2702 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2703 2703 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2704 2704 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2705 2705
2706 2706 ASSERT(rval == DTRACESPEC_COMMITTING);
2707 2707 }
2708 2708
2709 2709 src->dtb_offset = 0;
2710 2710 src->dtb_xamot_drops += src->dtb_drops;
2711 2711 src->dtb_drops = 0;
2712 2712 }
2713 2713
2714 2714 /*
2715 2715 * This routine discards an active speculation. If the specified speculation
2716 2716 * is not in a valid state to perform a discard(), this routine will silently
2717 2717 * do nothing. The state of the specified speculation is transitioned
2718 2718 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2719 2719 */
2720 2720 static void
2721 2721 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2722 2722 dtrace_specid_t which)
2723 2723 {
2724 2724 dtrace_speculation_t *spec;
2725 2725 dtrace_speculation_state_t current, new;
2726 2726 dtrace_buffer_t *buf;
2727 2727
2728 2728 if (which == 0)
2729 2729 return;
2730 2730
2731 2731 if (which > state->dts_nspeculations) {
2732 2732 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2733 2733 return;
2734 2734 }
2735 2735
2736 2736 spec = &state->dts_speculations[which - 1];
2737 2737 buf = &spec->dtsp_buffer[cpu];
2738 2738
2739 2739 do {
2740 2740 current = spec->dtsp_state;
2741 2741
2742 2742 switch (current) {
2743 2743 case DTRACESPEC_INACTIVE:
2744 2744 case DTRACESPEC_COMMITTINGMANY:
2745 2745 case DTRACESPEC_COMMITTING:
2746 2746 case DTRACESPEC_DISCARDING:
2747 2747 return;
2748 2748
2749 2749 case DTRACESPEC_ACTIVE:
2750 2750 case DTRACESPEC_ACTIVEMANY:
2751 2751 new = DTRACESPEC_DISCARDING;
2752 2752 break;
2753 2753
2754 2754 case DTRACESPEC_ACTIVEONE:
2755 2755 if (buf->dtb_offset != 0) {
2756 2756 new = DTRACESPEC_INACTIVE;
2757 2757 } else {
2758 2758 new = DTRACESPEC_DISCARDING;
2759 2759 }
2760 2760 break;
2761 2761
2762 2762 default:
2763 2763 ASSERT(0);
2764 2764 }
2765 2765 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2766 2766 current, new) != current);
2767 2767
2768 2768 buf->dtb_offset = 0;
2769 2769 buf->dtb_drops = 0;
2770 2770 }
2771 2771
2772 2772 /*
2773 2773 * Note: not called from probe context. This function is called
2774 2774 * asynchronously from cross call context to clean any speculations that are
2775 2775 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2776 2776 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2777 2777 * speculation.
2778 2778 */
2779 2779 static void
2780 2780 dtrace_speculation_clean_here(dtrace_state_t *state)
2781 2781 {
2782 2782 dtrace_icookie_t cookie;
2783 2783 processorid_t cpu = CPU->cpu_id;
2784 2784 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2785 2785 dtrace_specid_t i;
2786 2786
2787 2787 cookie = dtrace_interrupt_disable();
2788 2788
2789 2789 if (dest->dtb_tomax == NULL) {
2790 2790 dtrace_interrupt_enable(cookie);
2791 2791 return;
2792 2792 }
2793 2793
2794 2794 for (i = 0; i < state->dts_nspeculations; i++) {
2795 2795 dtrace_speculation_t *spec = &state->dts_speculations[i];
2796 2796 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2797 2797
2798 2798 if (src->dtb_tomax == NULL)
2799 2799 continue;
2800 2800
2801 2801 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2802 2802 src->dtb_offset = 0;
2803 2803 continue;
2804 2804 }
2805 2805
2806 2806 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2807 2807 continue;
2808 2808
2809 2809 if (src->dtb_offset == 0)
2810 2810 continue;
2811 2811
2812 2812 dtrace_speculation_commit(state, cpu, i + 1);
2813 2813 }
2814 2814
2815 2815 dtrace_interrupt_enable(cookie);
2816 2816 }
2817 2817
2818 2818 /*
2819 2819 * Note: not called from probe context. This function is called
2820 2820 * asynchronously (and at a regular interval) to clean any speculations that
2821 2821 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2822 2822 * is work to be done, it cross calls all CPUs to perform that work;
2823 2823 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2824 2824 * INACTIVE state until they have been cleaned by all CPUs.
2825 2825 */
2826 2826 static void
2827 2827 dtrace_speculation_clean(dtrace_state_t *state)
2828 2828 {
2829 2829 int work = 0, rv;
2830 2830 dtrace_specid_t i;
2831 2831
2832 2832 for (i = 0; i < state->dts_nspeculations; i++) {
2833 2833 dtrace_speculation_t *spec = &state->dts_speculations[i];
2834 2834
2835 2835 ASSERT(!spec->dtsp_cleaning);
2836 2836
2837 2837 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2838 2838 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2839 2839 continue;
2840 2840
2841 2841 work++;
2842 2842 spec->dtsp_cleaning = 1;
2843 2843 }
2844 2844
2845 2845 if (!work)
2846 2846 return;
2847 2847
2848 2848 dtrace_xcall(DTRACE_CPUALL,
2849 2849 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2850 2850
2851 2851 /*
2852 2852 * We now know that all CPUs have committed or discarded their
2853 2853 * speculation buffers, as appropriate. We can now set the state
2854 2854 * to inactive.
2855 2855 */
2856 2856 for (i = 0; i < state->dts_nspeculations; i++) {
2857 2857 dtrace_speculation_t *spec = &state->dts_speculations[i];
2858 2858 dtrace_speculation_state_t current, new;
2859 2859
2860 2860 if (!spec->dtsp_cleaning)
2861 2861 continue;
2862 2862
2863 2863 current = spec->dtsp_state;
2864 2864 ASSERT(current == DTRACESPEC_DISCARDING ||
2865 2865 current == DTRACESPEC_COMMITTINGMANY);
2866 2866
2867 2867 new = DTRACESPEC_INACTIVE;
2868 2868
2869 2869 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2870 2870 ASSERT(rv == current);
2871 2871 spec->dtsp_cleaning = 0;
2872 2872 }
2873 2873 }
2874 2874
2875 2875 /*
2876 2876 * Called as part of a speculate() to get the speculative buffer associated
2877 2877 * with a given speculation. Returns NULL if the specified speculation is not
2878 2878 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2879 2879 * the active CPU is not the specified CPU -- the speculation will be
2880 2880 * atomically transitioned into the ACTIVEMANY state.
2881 2881 */
2882 2882 static dtrace_buffer_t *
2883 2883 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2884 2884 dtrace_specid_t which)
2885 2885 {
2886 2886 dtrace_speculation_t *spec;
2887 2887 dtrace_speculation_state_t current, new;
2888 2888 dtrace_buffer_t *buf;
2889 2889
2890 2890 if (which == 0)
2891 2891 return (NULL);
2892 2892
2893 2893 if (which > state->dts_nspeculations) {
2894 2894 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2895 2895 return (NULL);
2896 2896 }
2897 2897
2898 2898 spec = &state->dts_speculations[which - 1];
2899 2899 buf = &spec->dtsp_buffer[cpuid];
2900 2900
2901 2901 do {
2902 2902 current = spec->dtsp_state;
2903 2903
2904 2904 switch (current) {
2905 2905 case DTRACESPEC_INACTIVE:
2906 2906 case DTRACESPEC_COMMITTINGMANY:
2907 2907 case DTRACESPEC_DISCARDING:
2908 2908 return (NULL);
2909 2909
2910 2910 case DTRACESPEC_COMMITTING:
2911 2911 ASSERT(buf->dtb_offset == 0);
2912 2912 return (NULL);
2913 2913
2914 2914 case DTRACESPEC_ACTIVEONE:
2915 2915 /*
2916 2916 * This speculation is currently active on one CPU.
2917 2917 * Check the offset in the buffer; if it's non-zero,
2918 2918 * that CPU must be us (and we leave the state alone).
2919 2919 * If it's zero, assume that we're starting on a new
2920 2920 * CPU -- and change the state to indicate that the
2921 2921 * speculation is active on more than one CPU.
2922 2922 */
2923 2923 if (buf->dtb_offset != 0)
2924 2924 return (buf);
2925 2925
2926 2926 new = DTRACESPEC_ACTIVEMANY;
2927 2927 break;
2928 2928
2929 2929 case DTRACESPEC_ACTIVEMANY:
2930 2930 return (buf);
2931 2931
2932 2932 case DTRACESPEC_ACTIVE:
2933 2933 new = DTRACESPEC_ACTIVEONE;
2934 2934 break;
2935 2935
2936 2936 default:
2937 2937 ASSERT(0);
2938 2938 }
2939 2939 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2940 2940 current, new) != current);
2941 2941
2942 2942 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2943 2943 return (buf);
2944 2944 }
2945 2945
2946 2946 /*
2947 2947 * Return a string. In the event that the user lacks the privilege to access
2948 2948 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2949 2949 * don't fail access checking.
2950 2950 *
2951 2951 * dtrace_dif_variable() uses this routine as a helper for various
2952 2952 * builtin values such as 'execname' and 'probefunc.'
2953 2953 */
2954 2954 uintptr_t
2955 2955 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2956 2956 dtrace_mstate_t *mstate)
2957 2957 {
2958 2958 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2959 2959 uintptr_t ret;
2960 2960 size_t strsz;
2961 2961
2962 2962 /*
2963 2963 * The easy case: this probe is allowed to read all of memory, so
2964 2964 * we can just return this as a vanilla pointer.
2965 2965 */
2966 2966 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2967 2967 return (addr);
2968 2968
2969 2969 /*
2970 2970 * This is the tougher case: we copy the string in question from
2971 2971 * kernel memory into scratch memory and return it that way: this
2972 2972 * ensures that we won't trip up when access checking tests the
2973 2973 * BYREF return value.
2974 2974 */
2975 2975 strsz = dtrace_strlen((char *)addr, size) + 1;
2976 2976
2977 2977 if (mstate->dtms_scratch_ptr + strsz >
2978 2978 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2979 2979 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2980 2980 return (NULL);
2981 2981 }
2982 2982
2983 2983 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2984 2984 strsz);
2985 2985 ret = mstate->dtms_scratch_ptr;
2986 2986 mstate->dtms_scratch_ptr += strsz;
2987 2987 return (ret);
2988 2988 }
2989 2989
2990 2990 /*
2991 2991 * This function implements the DIF emulator's variable lookups. The emulator
2992 2992 * passes a reserved variable identifier and optional built-in array index.
2993 2993 */
2994 2994 static uint64_t
2995 2995 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2996 2996 uint64_t ndx)
2997 2997 {
2998 2998 /*
2999 2999 * If we're accessing one of the uncached arguments, we'll turn this
3000 3000 * into a reference in the args array.
3001 3001 */
3002 3002 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3003 3003 ndx = v - DIF_VAR_ARG0;
3004 3004 v = DIF_VAR_ARGS;
3005 3005 }
3006 3006
3007 3007 switch (v) {
3008 3008 case DIF_VAR_ARGS:
3009 3009 if (!(mstate->dtms_access & DTRACE_ACCESS_ARGS)) {
3010 3010 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |=
3011 3011 CPU_DTRACE_KPRIV;
3012 3012 return (0);
3013 3013 }
3014 3014
3015 3015 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3016 3016 if (ndx >= sizeof (mstate->dtms_arg) /
3017 3017 sizeof (mstate->dtms_arg[0])) {
3018 3018 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3019 3019 dtrace_provider_t *pv;
3020 3020 uint64_t val;
3021 3021
3022 3022 pv = mstate->dtms_probe->dtpr_provider;
3023 3023 if (pv->dtpv_pops.dtps_getargval != NULL)
3024 3024 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3025 3025 mstate->dtms_probe->dtpr_id,
3026 3026 mstate->dtms_probe->dtpr_arg, ndx, aframes);
3027 3027 else
3028 3028 val = dtrace_getarg(ndx, aframes);
3029 3029
3030 3030 /*
3031 3031 * This is regrettably required to keep the compiler
3032 3032 * from tail-optimizing the call to dtrace_getarg().
3033 3033 * The condition always evaluates to true, but the
3034 3034 * compiler has no way of figuring that out a priori.
3035 3035 * (None of this would be necessary if the compiler
3036 3036 * could be relied upon to _always_ tail-optimize
3037 3037 * the call to dtrace_getarg() -- but it can't.)
3038 3038 */
3039 3039 if (mstate->dtms_probe != NULL)
3040 3040 return (val);
3041 3041
3042 3042 ASSERT(0);
3043 3043 }
3044 3044
3045 3045 return (mstate->dtms_arg[ndx]);
3046 3046
3047 3047 case DIF_VAR_UREGS: {
3048 3048 klwp_t *lwp;
3049 3049
3050 3050 if (!dtrace_priv_proc(state, mstate))
3051 3051 return (0);
3052 3052
3053 3053 if ((lwp = curthread->t_lwp) == NULL) {
3054 3054 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3055 3055 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
3056 3056 return (0);
3057 3057 }
3058 3058
3059 3059 return (dtrace_getreg(lwp->lwp_regs, ndx));
3060 3060 }
3061 3061
3062 3062 case DIF_VAR_VMREGS: {
3063 3063 uint64_t rval;
3064 3064
3065 3065 if (!dtrace_priv_kernel(state))
3066 3066 return (0);
3067 3067
3068 3068 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3069 3069
3070 3070 rval = dtrace_getvmreg(ndx,
3071 3071 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags);
3072 3072
3073 3073 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3074 3074
3075 3075 return (rval);
3076 3076 }
3077 3077
3078 3078 case DIF_VAR_CURTHREAD:
3079 3079 if (!dtrace_priv_proc(state, mstate))
3080 3080 return (0);
3081 3081 return ((uint64_t)(uintptr_t)curthread);
3082 3082
3083 3083 case DIF_VAR_TIMESTAMP:
3084 3084 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3085 3085 mstate->dtms_timestamp = dtrace_gethrtime();
3086 3086 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3087 3087 }
3088 3088 return (mstate->dtms_timestamp);
3089 3089
3090 3090 case DIF_VAR_VTIMESTAMP:
3091 3091 ASSERT(dtrace_vtime_references != 0);
3092 3092 return (curthread->t_dtrace_vtime);
3093 3093
3094 3094 case DIF_VAR_WALLTIMESTAMP:
3095 3095 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3096 3096 mstate->dtms_walltimestamp = dtrace_gethrestime();
3097 3097 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3098 3098 }
3099 3099 return (mstate->dtms_walltimestamp);
3100 3100
3101 3101 case DIF_VAR_IPL:
3102 3102 if (!dtrace_priv_kernel(state))
3103 3103 return (0);
3104 3104 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3105 3105 mstate->dtms_ipl = dtrace_getipl();
3106 3106 mstate->dtms_present |= DTRACE_MSTATE_IPL;
3107 3107 }
3108 3108 return (mstate->dtms_ipl);
3109 3109
3110 3110 case DIF_VAR_EPID:
3111 3111 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3112 3112 return (mstate->dtms_epid);
3113 3113
3114 3114 case DIF_VAR_ID:
3115 3115 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3116 3116 return (mstate->dtms_probe->dtpr_id);
3117 3117
3118 3118 case DIF_VAR_STACKDEPTH:
3119 3119 if (!dtrace_priv_kernel(state))
3120 3120 return (0);
3121 3121 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3122 3122 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3123 3123
3124 3124 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3125 3125 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3126 3126 }
3127 3127 return (mstate->dtms_stackdepth);
3128 3128
3129 3129 case DIF_VAR_USTACKDEPTH:
3130 3130 if (!dtrace_priv_proc(state, mstate))
3131 3131 return (0);
3132 3132 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3133 3133 /*
3134 3134 * See comment in DIF_VAR_PID.
3135 3135 */
3136 3136 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3137 3137 CPU_ON_INTR(CPU)) {
3138 3138 mstate->dtms_ustackdepth = 0;
3139 3139 } else {
3140 3140 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3141 3141 mstate->dtms_ustackdepth =
3142 3142 dtrace_getustackdepth();
3143 3143 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3144 3144 }
3145 3145 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3146 3146 }
3147 3147 return (mstate->dtms_ustackdepth);
3148 3148
3149 3149 case DIF_VAR_CALLER:
3150 3150 if (!dtrace_priv_kernel(state))
3151 3151 return (0);
3152 3152 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3153 3153 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3154 3154
3155 3155 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3156 3156 /*
3157 3157 * If this is an unanchored probe, we are
3158 3158 * required to go through the slow path:
3159 3159 * dtrace_caller() only guarantees correct
3160 3160 * results for anchored probes.
3161 3161 */
3162 3162 pc_t caller[2];
3163 3163
3164 3164 dtrace_getpcstack(caller, 2, aframes,
3165 3165 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3166 3166 mstate->dtms_caller = caller[1];
3167 3167 } else if ((mstate->dtms_caller =
3168 3168 dtrace_caller(aframes)) == -1) {
3169 3169 /*
3170 3170 * We have failed to do this the quick way;
3171 3171 * we must resort to the slower approach of
3172 3172 * calling dtrace_getpcstack().
3173 3173 */
3174 3174 pc_t caller;
3175 3175
3176 3176 dtrace_getpcstack(&caller, 1, aframes, NULL);
3177 3177 mstate->dtms_caller = caller;
3178 3178 }
3179 3179
3180 3180 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3181 3181 }
3182 3182 return (mstate->dtms_caller);
3183 3183
3184 3184 case DIF_VAR_UCALLER:
3185 3185 if (!dtrace_priv_proc(state, mstate))
3186 3186 return (0);
3187 3187
3188 3188 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3189 3189 uint64_t ustack[3];
3190 3190
3191 3191 /*
3192 3192 * dtrace_getupcstack() fills in the first uint64_t
3193 3193 * with the current PID. The second uint64_t will
3194 3194 * be the program counter at user-level. The third
3195 3195 * uint64_t will contain the caller, which is what
3196 3196 * we're after.
3197 3197 */
3198 3198 ustack[2] = NULL;
3199 3199 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3200 3200 dtrace_getupcstack(ustack, 3);
3201 3201 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3202 3202 mstate->dtms_ucaller = ustack[2];
3203 3203 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3204 3204 }
3205 3205
3206 3206 return (mstate->dtms_ucaller);
3207 3207
3208 3208 case DIF_VAR_PROBEPROV:
3209 3209 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3210 3210 return (dtrace_dif_varstr(
3211 3211 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3212 3212 state, mstate));
3213 3213
3214 3214 case DIF_VAR_PROBEMOD:
3215 3215 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3216 3216 return (dtrace_dif_varstr(
3217 3217 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3218 3218 state, mstate));
3219 3219
3220 3220 case DIF_VAR_PROBEFUNC:
3221 3221 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3222 3222 return (dtrace_dif_varstr(
3223 3223 (uintptr_t)mstate->dtms_probe->dtpr_func,
3224 3224 state, mstate));
3225 3225
3226 3226 case DIF_VAR_PROBENAME:
3227 3227 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3228 3228 return (dtrace_dif_varstr(
3229 3229 (uintptr_t)mstate->dtms_probe->dtpr_name,
3230 3230 state, mstate));
3231 3231
3232 3232 case DIF_VAR_PID:
3233 3233 if (!dtrace_priv_proc(state, mstate))
3234 3234 return (0);
3235 3235
3236 3236 /*
3237 3237 * Note that we are assuming that an unanchored probe is
3238 3238 * always due to a high-level interrupt. (And we're assuming
3239 3239 * that there is only a single high level interrupt.)
3240 3240 */
3241 3241 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3242 3242 return (pid0.pid_id);
3243 3243
3244 3244 /*
3245 3245 * It is always safe to dereference one's own t_procp pointer:
3246 3246 * it always points to a valid, allocated proc structure.
3247 3247 * Further, it is always safe to dereference the p_pidp member
3248 3248 * of one's own proc structure. (These are truisms becuase
3249 3249 * threads and processes don't clean up their own state --
3250 3250 * they leave that task to whomever reaps them.)
3251 3251 */
3252 3252 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3253 3253
3254 3254 case DIF_VAR_PPID:
3255 3255 if (!dtrace_priv_proc(state, mstate))
3256 3256 return (0);
3257 3257
3258 3258 /*
3259 3259 * See comment in DIF_VAR_PID.
3260 3260 */
3261 3261 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3262 3262 return (pid0.pid_id);
3263 3263
3264 3264 /*
3265 3265 * It is always safe to dereference one's own t_procp pointer:
3266 3266 * it always points to a valid, allocated proc structure.
3267 3267 * (This is true because threads don't clean up their own
3268 3268 * state -- they leave that task to whomever reaps them.)
3269 3269 */
3270 3270 return ((uint64_t)curthread->t_procp->p_ppid);
3271 3271
3272 3272 case DIF_VAR_TID:
3273 3273 /*
3274 3274 * See comment in DIF_VAR_PID.
3275 3275 */
3276 3276 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3277 3277 return (0);
3278 3278
3279 3279 return ((uint64_t)curthread->t_tid);
3280 3280
3281 3281 case DIF_VAR_EXECNAME:
3282 3282 if (!dtrace_priv_proc(state, mstate))
3283 3283 return (0);
3284 3284
3285 3285 /*
3286 3286 * See comment in DIF_VAR_PID.
3287 3287 */
3288 3288 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3289 3289 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3290 3290
3291 3291 /*
3292 3292 * It is always safe to dereference one's own t_procp pointer:
3293 3293 * it always points to a valid, allocated proc structure.
3294 3294 * (This is true because threads don't clean up their own
3295 3295 * state -- they leave that task to whomever reaps them.)
3296 3296 */
3297 3297 return (dtrace_dif_varstr(
3298 3298 (uintptr_t)curthread->t_procp->p_user.u_comm,
3299 3299 state, mstate));
3300 3300
3301 3301 case DIF_VAR_ZONENAME:
3302 3302 if (!dtrace_priv_proc(state, mstate))
3303 3303 return (0);
3304 3304
3305 3305 /*
3306 3306 * See comment in DIF_VAR_PID.
3307 3307 */
3308 3308 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3309 3309 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3310 3310
3311 3311 /*
3312 3312 * It is always safe to dereference one's own t_procp pointer:
3313 3313 * it always points to a valid, allocated proc structure.
3314 3314 * (This is true because threads don't clean up their own
3315 3315 * state -- they leave that task to whomever reaps them.)
3316 3316 */
3317 3317 return (dtrace_dif_varstr(
3318 3318 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3319 3319 state, mstate));
3320 3320
3321 3321 case DIF_VAR_UID:
3322 3322 if (!dtrace_priv_proc(state, mstate))
3323 3323 return (0);
3324 3324
3325 3325 /*
3326 3326 * See comment in DIF_VAR_PID.
3327 3327 */
3328 3328 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3329 3329 return ((uint64_t)p0.p_cred->cr_uid);
3330 3330
3331 3331 /*
3332 3332 * It is always safe to dereference one's own t_procp pointer:
3333 3333 * it always points to a valid, allocated proc structure.
3334 3334 * (This is true because threads don't clean up their own
3335 3335 * state -- they leave that task to whomever reaps them.)
3336 3336 *
3337 3337 * Additionally, it is safe to dereference one's own process
3338 3338 * credential, since this is never NULL after process birth.
3339 3339 */
3340 3340 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3341 3341
3342 3342 case DIF_VAR_GID:
3343 3343 if (!dtrace_priv_proc(state, mstate))
3344 3344 return (0);
3345 3345
3346 3346 /*
3347 3347 * See comment in DIF_VAR_PID.
3348 3348 */
3349 3349 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3350 3350 return ((uint64_t)p0.p_cred->cr_gid);
3351 3351
3352 3352 /*
3353 3353 * It is always safe to dereference one's own t_procp pointer:
3354 3354 * it always points to a valid, allocated proc structure.
3355 3355 * (This is true because threads don't clean up their own
3356 3356 * state -- they leave that task to whomever reaps them.)
3357 3357 *
3358 3358 * Additionally, it is safe to dereference one's own process
3359 3359 * credential, since this is never NULL after process birth.
3360 3360 */
3361 3361 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3362 3362
3363 3363 case DIF_VAR_ERRNO: {
3364 3364 klwp_t *lwp;
3365 3365 if (!dtrace_priv_proc(state, mstate))
3366 3366 return (0);
3367 3367
3368 3368 /*
3369 3369 * See comment in DIF_VAR_PID.
3370 3370 */
3371 3371 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3372 3372 return (0);
3373 3373
3374 3374 /*
3375 3375 * It is always safe to dereference one's own t_lwp pointer in
3376 3376 * the event that this pointer is non-NULL. (This is true
3377 3377 * because threads and lwps don't clean up their own state --
3378 3378 * they leave that task to whomever reaps them.)
3379 3379 */
3380 3380 if ((lwp = curthread->t_lwp) == NULL)
3381 3381 return (0);
3382 3382
3383 3383 return ((uint64_t)lwp->lwp_errno);
3384 3384 }
3385 3385 default:
3386 3386 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3387 3387 return (0);
3388 3388 }
3389 3389 }
3390 3390
3391 3391
3392 3392 typedef enum dtrace_json_state {
3393 3393 DTRACE_JSON_REST = 1,
3394 3394 DTRACE_JSON_OBJECT,
3395 3395 DTRACE_JSON_STRING,
3396 3396 DTRACE_JSON_STRING_ESCAPE,
3397 3397 DTRACE_JSON_STRING_ESCAPE_UNICODE,
3398 3398 DTRACE_JSON_COLON,
3399 3399 DTRACE_JSON_COMMA,
3400 3400 DTRACE_JSON_VALUE,
3401 3401 DTRACE_JSON_IDENTIFIER,
3402 3402 DTRACE_JSON_NUMBER,
3403 3403 DTRACE_JSON_NUMBER_FRAC,
3404 3404 DTRACE_JSON_NUMBER_EXP,
3405 3405 DTRACE_JSON_COLLECT_OBJECT
3406 3406 } dtrace_json_state_t;
3407 3407
3408 3408 /*
3409 3409 * This function possesses just enough knowledge about JSON to extract a single
3410 3410 * value from a JSON string and store it in the scratch buffer. It is able
3411 3411 * to extract nested object values, and members of arrays by index.
3412 3412 *
3413 3413 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3414 3414 * be looked up as we descend into the object tree. e.g.
3415 3415 *
3416 3416 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3417 3417 * with nelems = 5.
3418 3418 *
3419 3419 * The run time of this function must be bounded above by strsize to limit the
3420 3420 * amount of work done in probe context. As such, it is implemented as a
3421 3421 * simple state machine, reading one character at a time using safe loads
3422 3422 * until we find the requested element, hit a parsing error or run off the
3423 3423 * end of the object or string.
3424 3424 *
3425 3425 * As there is no way for a subroutine to return an error without interrupting
3426 3426 * clause execution, we simply return NULL in the event of a missing key or any
3427 3427 * other error condition. Each NULL return in this function is commented with
3428 3428 * the error condition it represents -- parsing or otherwise.
3429 3429 *
3430 3430 * The set of states for the state machine closely matches the JSON
3431 3431 * specification (http://json.org/). Briefly:
3432 3432 *
3433 3433 * DTRACE_JSON_REST:
3434 3434 * Skip whitespace until we find either a top-level Object, moving
3435 3435 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3436 3436 *
3437 3437 * DTRACE_JSON_OBJECT:
3438 3438 * Locate the next key String in an Object. Sets a flag to denote
3439 3439 * the next String as a key string and moves to DTRACE_JSON_STRING.
3440 3440 *
3441 3441 * DTRACE_JSON_COLON:
3442 3442 * Skip whitespace until we find the colon that separates key Strings
3443 3443 * from their values. Once found, move to DTRACE_JSON_VALUE.
3444 3444 *
3445 3445 * DTRACE_JSON_VALUE:
3446 3446 * Detects the type of the next value (String, Number, Identifier, Object
3447 3447 * or Array) and routes to the states that process that type. Here we also
3448 3448 * deal with the element selector list if we are requested to traverse down
3449 3449 * into the object tree.
3450 3450 *
3451 3451 * DTRACE_JSON_COMMA:
3452 3452 * Skip whitespace until we find the comma that separates key-value pairs
3453 3453 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3454 3454 * (similarly DTRACE_JSON_VALUE). All following literal value processing
3455 3455 * states return to this state at the end of their value, unless otherwise
3456 3456 * noted.
3457 3457 *
3458 3458 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3459 3459 * Processes a Number literal from the JSON, including any exponent
3460 3460 * component that may be present. Numbers are returned as strings, which
3461 3461 * may be passed to strtoll() if an integer is required.
3462 3462 *
3463 3463 * DTRACE_JSON_IDENTIFIER:
3464 3464 * Processes a "true", "false" or "null" literal in the JSON.
3465 3465 *
3466 3466 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3467 3467 * DTRACE_JSON_STRING_ESCAPE_UNICODE:
3468 3468 * Processes a String literal from the JSON, whether the String denotes
3469 3469 * a key, a value or part of a larger Object. Handles all escape sequences
3470 3470 * present in the specification, including four-digit unicode characters,
3471 3471 * but merely includes the escape sequence without converting it to the
3472 3472 * actual escaped character. If the String is flagged as a key, we
3473 3473 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3474 3474 *
3475 3475 * DTRACE_JSON_COLLECT_OBJECT:
3476 3476 * This state collects an entire Object (or Array), correctly handling
3477 3477 * embedded strings. If the full element selector list matches this nested
3478 3478 * object, we return the Object in full as a string. If not, we use this
3479 3479 * state to skip to the next value at this level and continue processing.
3480 3480 *
3481 3481 * NOTE: This function uses various macros from strtolctype.h to manipulate
3482 3482 * digit values, etc -- these have all been checked to ensure they make
3483 3483 * no additional function calls.
3484 3484 */
3485 3485 static char *
3486 3486 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3487 3487 char *dest)
3488 3488 {
3489 3489 dtrace_json_state_t state = DTRACE_JSON_REST;
3490 3490 int64_t array_elem = INT64_MIN;
3491 3491 int64_t array_pos = 0;
3492 3492 uint8_t escape_unicount = 0;
3493 3493 boolean_t string_is_key = B_FALSE;
3494 3494 boolean_t collect_object = B_FALSE;
3495 3495 boolean_t found_key = B_FALSE;
3496 3496 boolean_t in_array = B_FALSE;
3497 3497 uint32_t braces = 0, brackets = 0;
3498 3498 char *elem = elemlist;
3499 3499 char *dd = dest;
3500 3500 uintptr_t cur;
3501 3501
3502 3502 for (cur = json; cur < json + size; cur++) {
3503 3503 char cc = dtrace_load8(cur);
3504 3504 if (cc == '\0')
3505 3505 return (NULL);
3506 3506
3507 3507 switch (state) {
3508 3508 case DTRACE_JSON_REST:
3509 3509 if (isspace(cc))
3510 3510 break;
3511 3511
3512 3512 if (cc == '{') {
3513 3513 state = DTRACE_JSON_OBJECT;
3514 3514 break;
3515 3515 }
3516 3516
3517 3517 if (cc == '[') {
3518 3518 in_array = B_TRUE;
3519 3519 array_pos = 0;
3520 3520 array_elem = dtrace_strtoll(elem, 10, size);
3521 3521 found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3522 3522 state = DTRACE_JSON_VALUE;
3523 3523 break;
3524 3524 }
3525 3525
3526 3526 /*
3527 3527 * ERROR: expected to find a top-level object or array.
3528 3528 */
3529 3529 return (NULL);
3530 3530 case DTRACE_JSON_OBJECT:
3531 3531 if (isspace(cc))
3532 3532 break;
3533 3533
3534 3534 if (cc == '"') {
3535 3535 state = DTRACE_JSON_STRING;
3536 3536 string_is_key = B_TRUE;
3537 3537 break;
3538 3538 }
3539 3539
3540 3540 /*
3541 3541 * ERROR: either the object did not start with a key
3542 3542 * string, or we've run off the end of the object
3543 3543 * without finding the requested key.
3544 3544 */
3545 3545 return (NULL);
3546 3546 case DTRACE_JSON_STRING:
3547 3547 if (cc == '\\') {
3548 3548 *dd++ = '\\';
3549 3549 state = DTRACE_JSON_STRING_ESCAPE;
3550 3550 break;
3551 3551 }
3552 3552
3553 3553 if (cc == '"') {
3554 3554 if (collect_object) {
3555 3555 /*
3556 3556 * We don't reset the dest here, as
3557 3557 * the string is part of a larger
3558 3558 * object being collected.
3559 3559 */
3560 3560 *dd++ = cc;
3561 3561 collect_object = B_FALSE;
3562 3562 state = DTRACE_JSON_COLLECT_OBJECT;
3563 3563 break;
3564 3564 }
3565 3565 *dd = '\0';
3566 3566 dd = dest; /* reset string buffer */
3567 3567 if (string_is_key) {
3568 3568 if (dtrace_strncmp(dest, elem,
3569 3569 size) == 0)
3570 3570 found_key = B_TRUE;
3571 3571 } else if (found_key) {
3572 3572 if (nelems > 1) {
3573 3573 /*
3574 3574 * We expected an object, not
3575 3575 * this string.
3576 3576 */
3577 3577 return (NULL);
3578 3578 }
3579 3579 return (dest);
3580 3580 }
3581 3581 state = string_is_key ? DTRACE_JSON_COLON :
3582 3582 DTRACE_JSON_COMMA;
3583 3583 string_is_key = B_FALSE;
3584 3584 break;
3585 3585 }
3586 3586
3587 3587 *dd++ = cc;
3588 3588 break;
3589 3589 case DTRACE_JSON_STRING_ESCAPE:
3590 3590 *dd++ = cc;
3591 3591 if (cc == 'u') {
3592 3592 escape_unicount = 0;
3593 3593 state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3594 3594 } else {
3595 3595 state = DTRACE_JSON_STRING;
3596 3596 }
3597 3597 break;
3598 3598 case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3599 3599 if (!isxdigit(cc)) {
3600 3600 /*
3601 3601 * ERROR: invalid unicode escape, expected
3602 3602 * four valid hexidecimal digits.
3603 3603 */
3604 3604 return (NULL);
3605 3605 }
3606 3606
3607 3607 *dd++ = cc;
3608 3608 if (++escape_unicount == 4)
3609 3609 state = DTRACE_JSON_STRING;
3610 3610 break;
3611 3611 case DTRACE_JSON_COLON:
3612 3612 if (isspace(cc))
3613 3613 break;
3614 3614
3615 3615 if (cc == ':') {
3616 3616 state = DTRACE_JSON_VALUE;
3617 3617 break;
3618 3618 }
3619 3619
3620 3620 /*
3621 3621 * ERROR: expected a colon.
3622 3622 */
3623 3623 return (NULL);
3624 3624 case DTRACE_JSON_COMMA:
3625 3625 if (isspace(cc))
3626 3626 break;
3627 3627
3628 3628 if (cc == ',') {
3629 3629 if (in_array) {
3630 3630 state = DTRACE_JSON_VALUE;
3631 3631 if (++array_pos == array_elem)
3632 3632 found_key = B_TRUE;
3633 3633 } else {
3634 3634 state = DTRACE_JSON_OBJECT;
3635 3635 }
3636 3636 break;
3637 3637 }
3638 3638
3639 3639 /*
3640 3640 * ERROR: either we hit an unexpected character, or
3641 3641 * we reached the end of the object or array without
3642 3642 * finding the requested key.
3643 3643 */
3644 3644 return (NULL);
3645 3645 case DTRACE_JSON_IDENTIFIER:
3646 3646 if (islower(cc)) {
3647 3647 *dd++ = cc;
3648 3648 break;
3649 3649 }
3650 3650
3651 3651 *dd = '\0';
3652 3652 dd = dest; /* reset string buffer */
3653 3653
3654 3654 if (dtrace_strncmp(dest, "true", 5) == 0 ||
3655 3655 dtrace_strncmp(dest, "false", 6) == 0 ||
3656 3656 dtrace_strncmp(dest, "null", 5) == 0) {
3657 3657 if (found_key) {
3658 3658 if (nelems > 1) {
3659 3659 /*
3660 3660 * ERROR: We expected an object,
3661 3661 * not this identifier.
3662 3662 */
3663 3663 return (NULL);
3664 3664 }
3665 3665 return (dest);
3666 3666 } else {
3667 3667 cur--;
3668 3668 state = DTRACE_JSON_COMMA;
3669 3669 break;
3670 3670 }
3671 3671 }
3672 3672
3673 3673 /*
3674 3674 * ERROR: we did not recognise the identifier as one
3675 3675 * of those in the JSON specification.
3676 3676 */
3677 3677 return (NULL);
3678 3678 case DTRACE_JSON_NUMBER:
3679 3679 if (cc == '.') {
3680 3680 *dd++ = cc;
3681 3681 state = DTRACE_JSON_NUMBER_FRAC;
3682 3682 break;
3683 3683 }
3684 3684
3685 3685 if (cc == 'x' || cc == 'X') {
3686 3686 /*
3687 3687 * ERROR: specification explicitly excludes
3688 3688 * hexidecimal or octal numbers.
3689 3689 */
3690 3690 return (NULL);
3691 3691 }
3692 3692
3693 3693 /* FALLTHRU */
3694 3694 case DTRACE_JSON_NUMBER_FRAC:
3695 3695 if (cc == 'e' || cc == 'E') {
3696 3696 *dd++ = cc;
3697 3697 state = DTRACE_JSON_NUMBER_EXP;
3698 3698 break;
3699 3699 }
3700 3700
3701 3701 if (cc == '+' || cc == '-') {
3702 3702 /*
3703 3703 * ERROR: expect sign as part of exponent only.
3704 3704 */
3705 3705 return (NULL);
3706 3706 }
3707 3707 /* FALLTHRU */
3708 3708 case DTRACE_JSON_NUMBER_EXP:
3709 3709 if (isdigit(cc) || cc == '+' || cc == '-') {
3710 3710 *dd++ = cc;
3711 3711 break;
3712 3712 }
3713 3713
3714 3714 *dd = '\0';
3715 3715 dd = dest; /* reset string buffer */
3716 3716 if (found_key) {
3717 3717 if (nelems > 1) {
3718 3718 /*
3719 3719 * ERROR: We expected an object, not
3720 3720 * this number.
3721 3721 */
3722 3722 return (NULL);
3723 3723 }
3724 3724 return (dest);
3725 3725 }
3726 3726
3727 3727 cur--;
3728 3728 state = DTRACE_JSON_COMMA;
3729 3729 break;
3730 3730 case DTRACE_JSON_VALUE:
3731 3731 if (isspace(cc))
3732 3732 break;
3733 3733
3734 3734 if (cc == '{' || cc == '[') {
3735 3735 if (nelems > 1 && found_key) {
3736 3736 in_array = cc == '[' ? B_TRUE : B_FALSE;
3737 3737 /*
3738 3738 * If our element selector directs us
3739 3739 * to descend into this nested object,
3740 3740 * then move to the next selector
3741 3741 * element in the list and restart the
3742 3742 * state machine.
3743 3743 */
3744 3744 while (*elem != '\0')
3745 3745 elem++;
3746 3746 elem++; /* skip the inter-element NUL */
3747 3747 nelems--;
3748 3748 dd = dest;
3749 3749 if (in_array) {
3750 3750 state = DTRACE_JSON_VALUE;
3751 3751 array_pos = 0;
3752 3752 array_elem = dtrace_strtoll(
3753 3753 elem, 10, size);
3754 3754 found_key = array_elem == 0 ?
3755 3755 B_TRUE : B_FALSE;
3756 3756 } else {
3757 3757 found_key = B_FALSE;
3758 3758 state = DTRACE_JSON_OBJECT;
3759 3759 }
3760 3760 break;
3761 3761 }
3762 3762
3763 3763 /*
3764 3764 * Otherwise, we wish to either skip this
3765 3765 * nested object or return it in full.
3766 3766 */
3767 3767 if (cc == '[')
3768 3768 brackets = 1;
3769 3769 else
3770 3770 braces = 1;
3771 3771 *dd++ = cc;
3772 3772 state = DTRACE_JSON_COLLECT_OBJECT;
3773 3773 break;
3774 3774 }
3775 3775
3776 3776 if (cc == '"') {
3777 3777 state = DTRACE_JSON_STRING;
3778 3778 break;
3779 3779 }
3780 3780
3781 3781 if (islower(cc)) {
3782 3782 /*
3783 3783 * Here we deal with true, false and null.
3784 3784 */
3785 3785 *dd++ = cc;
3786 3786 state = DTRACE_JSON_IDENTIFIER;
3787 3787 break;
3788 3788 }
3789 3789
3790 3790 if (cc == '-' || isdigit(cc)) {
3791 3791 *dd++ = cc;
3792 3792 state = DTRACE_JSON_NUMBER;
3793 3793 break;
3794 3794 }
3795 3795
3796 3796 /*
3797 3797 * ERROR: unexpected character at start of value.
3798 3798 */
3799 3799 return (NULL);
3800 3800 case DTRACE_JSON_COLLECT_OBJECT:
3801 3801 if (cc == '\0')
3802 3802 /*
3803 3803 * ERROR: unexpected end of input.
3804 3804 */
3805 3805 return (NULL);
3806 3806
3807 3807 *dd++ = cc;
3808 3808 if (cc == '"') {
3809 3809 collect_object = B_TRUE;
3810 3810 state = DTRACE_JSON_STRING;
3811 3811 break;
3812 3812 }
3813 3813
3814 3814 if (cc == ']') {
3815 3815 if (brackets-- == 0) {
3816 3816 /*
3817 3817 * ERROR: unbalanced brackets.
3818 3818 */
3819 3819 return (NULL);
3820 3820 }
3821 3821 } else if (cc == '}') {
3822 3822 if (braces-- == 0) {
3823 3823 /*
3824 3824 * ERROR: unbalanced braces.
3825 3825 */
3826 3826 return (NULL);
3827 3827 }
3828 3828 } else if (cc == '{') {
3829 3829 braces++;
3830 3830 } else if (cc == '[') {
3831 3831 brackets++;
3832 3832 }
3833 3833
3834 3834 if (brackets == 0 && braces == 0) {
3835 3835 if (found_key) {
3836 3836 *dd = '\0';
3837 3837 return (dest);
3838 3838 }
3839 3839 dd = dest; /* reset string buffer */
3840 3840 state = DTRACE_JSON_COMMA;
3841 3841 }
3842 3842 break;
3843 3843 }
3844 3844 }
3845 3845 return (NULL);
3846 3846 }
3847 3847
3848 3848 /*
3849 3849 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3850 3850 * Notice that we don't bother validating the proper number of arguments or
3851 3851 * their types in the tuple stack. This isn't needed because all argument
3852 3852 * interpretation is safe because of our load safety -- the worst that can
3853 3853 * happen is that a bogus program can obtain bogus results.
3854 3854 */
3855 3855 static void
3856 3856 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3857 3857 dtrace_key_t *tupregs, int nargs,
3858 3858 dtrace_mstate_t *mstate, dtrace_state_t *state)
3859 3859 {
3860 3860 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
3861 3861 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
3862 3862 dtrace_vstate_t *vstate = &state->dts_vstate;
3863 3863
3864 3864 union {
3865 3865 mutex_impl_t mi;
3866 3866 uint64_t mx;
3867 3867 } m;
3868 3868
3869 3869 union {
3870 3870 krwlock_t ri;
3871 3871 uintptr_t rw;
3872 3872 } r;
3873 3873
3874 3874 switch (subr) {
3875 3875 case DIF_SUBR_RAND:
3876 3876 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3877 3877 break;
3878 3878
3879 3879 case DIF_SUBR_MUTEX_OWNED:
3880 3880 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3881 3881 mstate, vstate)) {
3882 3882 regs[rd] = NULL;
3883 3883 break;
3884 3884 }
3885 3885
3886 3886 m.mx = dtrace_load64(tupregs[0].dttk_value);
3887 3887 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3888 3888 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3889 3889 else
3890 3890 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3891 3891 break;
3892 3892
3893 3893 case DIF_SUBR_MUTEX_OWNER:
3894 3894 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3895 3895 mstate, vstate)) {
3896 3896 regs[rd] = NULL;
3897 3897 break;
3898 3898 }
3899 3899
3900 3900 m.mx = dtrace_load64(tupregs[0].dttk_value);
3901 3901 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3902 3902 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3903 3903 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3904 3904 else
3905 3905 regs[rd] = 0;
3906 3906 break;
3907 3907
3908 3908 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3909 3909 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3910 3910 mstate, vstate)) {
3911 3911 regs[rd] = NULL;
3912 3912 break;
3913 3913 }
3914 3914
3915 3915 m.mx = dtrace_load64(tupregs[0].dttk_value);
3916 3916 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3917 3917 break;
3918 3918
3919 3919 case DIF_SUBR_MUTEX_TYPE_SPIN:
3920 3920 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3921 3921 mstate, vstate)) {
3922 3922 regs[rd] = NULL;
3923 3923 break;
3924 3924 }
3925 3925
3926 3926 m.mx = dtrace_load64(tupregs[0].dttk_value);
3927 3927 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3928 3928 break;
3929 3929
3930 3930 case DIF_SUBR_RW_READ_HELD: {
3931 3931 uintptr_t tmp;
3932 3932
3933 3933 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3934 3934 mstate, vstate)) {
3935 3935 regs[rd] = NULL;
3936 3936 break;
3937 3937 }
3938 3938
3939 3939 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3940 3940 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3941 3941 break;
3942 3942 }
3943 3943
3944 3944 case DIF_SUBR_RW_WRITE_HELD:
3945 3945 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3946 3946 mstate, vstate)) {
3947 3947 regs[rd] = NULL;
3948 3948 break;
3949 3949 }
3950 3950
3951 3951 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3952 3952 regs[rd] = _RW_WRITE_HELD(&r.ri);
3953 3953 break;
3954 3954
3955 3955 case DIF_SUBR_RW_ISWRITER:
3956 3956 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3957 3957 mstate, vstate)) {
3958 3958 regs[rd] = NULL;
3959 3959 break;
3960 3960 }
3961 3961
3962 3962 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3963 3963 regs[rd] = _RW_ISWRITER(&r.ri);
3964 3964 break;
3965 3965
3966 3966 case DIF_SUBR_BCOPY: {
3967 3967 /*
3968 3968 * We need to be sure that the destination is in the scratch
3969 3969 * region -- no other region is allowed.
3970 3970 */
3971 3971 uintptr_t src = tupregs[0].dttk_value;
3972 3972 uintptr_t dest = tupregs[1].dttk_value;
3973 3973 size_t size = tupregs[2].dttk_value;
3974 3974
3975 3975 if (!dtrace_inscratch(dest, size, mstate)) {
3976 3976 *flags |= CPU_DTRACE_BADADDR;
3977 3977 *illval = regs[rd];
3978 3978 break;
3979 3979 }
3980 3980
3981 3981 if (!dtrace_canload(src, size, mstate, vstate)) {
3982 3982 regs[rd] = NULL;
3983 3983 break;
3984 3984 }
3985 3985
3986 3986 dtrace_bcopy((void *)src, (void *)dest, size);
3987 3987 break;
3988 3988 }
3989 3989
3990 3990 case DIF_SUBR_ALLOCA:
3991 3991 case DIF_SUBR_COPYIN: {
3992 3992 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3993 3993 uint64_t size =
3994 3994 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3995 3995 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3996 3996
3997 3997 /*
3998 3998 * This action doesn't require any credential checks since
3999 3999 * probes will not activate in user contexts to which the
4000 4000 * enabling user does not have permissions.
4001 4001 */
4002 4002
4003 4003 /*
4004 4004 * Rounding up the user allocation size could have overflowed
4005 4005 * a large, bogus allocation (like -1ULL) to 0.
4006 4006 */
4007 4007 if (scratch_size < size ||
4008 4008 !DTRACE_INSCRATCH(mstate, scratch_size)) {
4009 4009 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4010 4010 regs[rd] = NULL;
4011 4011 break;
4012 4012 }
4013 4013
4014 4014 if (subr == DIF_SUBR_COPYIN) {
4015 4015 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4016 4016 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4017 4017 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4018 4018 }
4019 4019
4020 4020 mstate->dtms_scratch_ptr += scratch_size;
4021 4021 regs[rd] = dest;
4022 4022 break;
4023 4023 }
4024 4024
4025 4025 case DIF_SUBR_COPYINTO: {
4026 4026 uint64_t size = tupregs[1].dttk_value;
4027 4027 uintptr_t dest = tupregs[2].dttk_value;
4028 4028
4029 4029 /*
4030 4030 * This action doesn't require any credential checks since
4031 4031 * probes will not activate in user contexts to which the
4032 4032 * enabling user does not have permissions.
4033 4033 */
4034 4034 if (!dtrace_inscratch(dest, size, mstate)) {
4035 4035 *flags |= CPU_DTRACE_BADADDR;
4036 4036 *illval = regs[rd];
4037 4037 break;
4038 4038 }
4039 4039
4040 4040 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4041 4041 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4042 4042 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4043 4043 break;
4044 4044 }
4045 4045
4046 4046 case DIF_SUBR_COPYINSTR: {
4047 4047 uintptr_t dest = mstate->dtms_scratch_ptr;
4048 4048 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4049 4049
4050 4050 if (nargs > 1 && tupregs[1].dttk_value < size)
4051 4051 size = tupregs[1].dttk_value + 1;
4052 4052
4053 4053 /*
4054 4054 * This action doesn't require any credential checks since
4055 4055 * probes will not activate in user contexts to which the
4056 4056 * enabling user does not have permissions.
4057 4057 */
4058 4058 if (!DTRACE_INSCRATCH(mstate, size)) {
4059 4059 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4060 4060 regs[rd] = NULL;
4061 4061 break;
4062 4062 }
4063 4063
4064 4064 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4065 4065 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4066 4066 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4067 4067
4068 4068 ((char *)dest)[size - 1] = '\0';
4069 4069 mstate->dtms_scratch_ptr += size;
4070 4070 regs[rd] = dest;
4071 4071 break;
4072 4072 }
4073 4073
4074 4074 case DIF_SUBR_MSGSIZE:
4075 4075 case DIF_SUBR_MSGDSIZE: {
4076 4076 uintptr_t baddr = tupregs[0].dttk_value, daddr;
4077 4077 uintptr_t wptr, rptr;
4078 4078 size_t count = 0;
4079 4079 int cont = 0;
4080 4080
4081 4081 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4082 4082
4083 4083 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4084 4084 vstate)) {
4085 4085 regs[rd] = NULL;
4086 4086 break;
4087 4087 }
4088 4088
4089 4089 wptr = dtrace_loadptr(baddr +
4090 4090 offsetof(mblk_t, b_wptr));
4091 4091
4092 4092 rptr = dtrace_loadptr(baddr +
4093 4093 offsetof(mblk_t, b_rptr));
4094 4094
4095 4095 if (wptr < rptr) {
4096 4096 *flags |= CPU_DTRACE_BADADDR;
4097 4097 *illval = tupregs[0].dttk_value;
4098 4098 break;
4099 4099 }
4100 4100
4101 4101 daddr = dtrace_loadptr(baddr +
4102 4102 offsetof(mblk_t, b_datap));
4103 4103
4104 4104 baddr = dtrace_loadptr(baddr +
4105 4105 offsetof(mblk_t, b_cont));
4106 4106
4107 4107 /*
4108 4108 * We want to prevent against denial-of-service here,
4109 4109 * so we're only going to search the list for
4110 4110 * dtrace_msgdsize_max mblks.
4111 4111 */
4112 4112 if (cont++ > dtrace_msgdsize_max) {
4113 4113 *flags |= CPU_DTRACE_ILLOP;
4114 4114 break;
4115 4115 }
4116 4116
4117 4117 if (subr == DIF_SUBR_MSGDSIZE) {
4118 4118 if (dtrace_load8(daddr +
4119 4119 offsetof(dblk_t, db_type)) != M_DATA)
4120 4120 continue;
4121 4121 }
4122 4122
4123 4123 count += wptr - rptr;
4124 4124 }
4125 4125
4126 4126 if (!(*flags & CPU_DTRACE_FAULT))
4127 4127 regs[rd] = count;
4128 4128
4129 4129 break;
4130 4130 }
4131 4131
4132 4132 case DIF_SUBR_PROGENYOF: {
4133 4133 pid_t pid = tupregs[0].dttk_value;
4134 4134 proc_t *p;
4135 4135 int rval = 0;
4136 4136
4137 4137 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4138 4138
4139 4139 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4140 4140 if (p->p_pidp->pid_id == pid) {
4141 4141 rval = 1;
4142 4142 break;
4143 4143 }
4144 4144 }
4145 4145
4146 4146 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4147 4147
4148 4148 regs[rd] = rval;
4149 4149 break;
4150 4150 }
4151 4151
4152 4152 case DIF_SUBR_SPECULATION:
4153 4153 regs[rd] = dtrace_speculation(state);
4154 4154 break;
4155 4155
4156 4156 case DIF_SUBR_COPYOUT: {
4157 4157 uintptr_t kaddr = tupregs[0].dttk_value;
4158 4158 uintptr_t uaddr = tupregs[1].dttk_value;
4159 4159 uint64_t size = tupregs[2].dttk_value;
4160 4160
4161 4161 if (!dtrace_destructive_disallow &&
4162 4162 dtrace_priv_proc_control(state, mstate) &&
4163 4163 !dtrace_istoxic(kaddr, size)) {
4164 4164 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4165 4165 dtrace_copyout(kaddr, uaddr, size, flags);
4166 4166 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4167 4167 }
4168 4168 break;
4169 4169 }
4170 4170
4171 4171 case DIF_SUBR_COPYOUTSTR: {
4172 4172 uintptr_t kaddr = tupregs[0].dttk_value;
4173 4173 uintptr_t uaddr = tupregs[1].dttk_value;
4174 4174 uint64_t size = tupregs[2].dttk_value;
4175 4175
4176 4176 if (!dtrace_destructive_disallow &&
4177 4177 dtrace_priv_proc_control(state, mstate) &&
4178 4178 !dtrace_istoxic(kaddr, size)) {
4179 4179 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4180 4180 dtrace_copyoutstr(kaddr, uaddr, size, flags);
4181 4181 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4182 4182 }
4183 4183 break;
4184 4184 }
4185 4185
4186 4186 case DIF_SUBR_STRLEN: {
4187 4187 size_t sz;
4188 4188 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4189 4189 sz = dtrace_strlen((char *)addr,
4190 4190 state->dts_options[DTRACEOPT_STRSIZE]);
4191 4191
4192 4192 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
4193 4193 regs[rd] = NULL;
4194 4194 break;
4195 4195 }
4196 4196
4197 4197 regs[rd] = sz;
4198 4198
4199 4199 break;
4200 4200 }
4201 4201
4202 4202 case DIF_SUBR_STRCHR:
4203 4203 case DIF_SUBR_STRRCHR: {
4204 4204 /*
4205 4205 * We're going to iterate over the string looking for the
4206 4206 * specified character. We will iterate until we have reached
4207 4207 * the string length or we have found the character. If this
4208 4208 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4209 4209 * of the specified character instead of the first.
4210 4210 */
4211 4211 uintptr_t saddr = tupregs[0].dttk_value;
4212 4212 uintptr_t addr = tupregs[0].dttk_value;
4213 4213 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
4214 4214 char c, target = (char)tupregs[1].dttk_value;
4215 4215
4216 4216 for (regs[rd] = NULL; addr < limit; addr++) {
4217 4217 if ((c = dtrace_load8(addr)) == target) {
4218 4218 regs[rd] = addr;
4219 4219
4220 4220 if (subr == DIF_SUBR_STRCHR)
4221 4221 break;
4222 4222 }
4223 4223
4224 4224 if (c == '\0')
4225 4225 break;
4226 4226 }
4227 4227
4228 4228 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
4229 4229 regs[rd] = NULL;
4230 4230 break;
4231 4231 }
4232 4232
4233 4233 break;
4234 4234 }
4235 4235
4236 4236 case DIF_SUBR_STRSTR:
4237 4237 case DIF_SUBR_INDEX:
4238 4238 case DIF_SUBR_RINDEX: {
4239 4239 /*
4240 4240 * We're going to iterate over the string looking for the
4241 4241 * specified string. We will iterate until we have reached
4242 4242 * the string length or we have found the string. (Yes, this
4243 4243 * is done in the most naive way possible -- but considering
4244 4244 * that the string we're searching for is likely to be
4245 4245 * relatively short, the complexity of Rabin-Karp or similar
4246 4246 * hardly seems merited.)
4247 4247 */
4248 4248 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4249 4249 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4250 4250 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4251 4251 size_t len = dtrace_strlen(addr, size);
4252 4252 size_t sublen = dtrace_strlen(substr, size);
4253 4253 char *limit = addr + len, *orig = addr;
4254 4254 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4255 4255 int inc = 1;
4256 4256
4257 4257 regs[rd] = notfound;
4258 4258
4259 4259 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4260 4260 regs[rd] = NULL;
4261 4261 break;
4262 4262 }
4263 4263
4264 4264 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4265 4265 vstate)) {
4266 4266 regs[rd] = NULL;
4267 4267 break;
4268 4268 }
4269 4269
4270 4270 /*
4271 4271 * strstr() and index()/rindex() have similar semantics if
4272 4272 * both strings are the empty string: strstr() returns a
4273 4273 * pointer to the (empty) string, and index() and rindex()
4274 4274 * both return index 0 (regardless of any position argument).
4275 4275 */
4276 4276 if (sublen == 0 && len == 0) {
4277 4277 if (subr == DIF_SUBR_STRSTR)
4278 4278 regs[rd] = (uintptr_t)addr;
4279 4279 else
4280 4280 regs[rd] = 0;
4281 4281 break;
4282 4282 }
4283 4283
4284 4284 if (subr != DIF_SUBR_STRSTR) {
4285 4285 if (subr == DIF_SUBR_RINDEX) {
4286 4286 limit = orig - 1;
4287 4287 addr += len;
4288 4288 inc = -1;
4289 4289 }
4290 4290
4291 4291 /*
4292 4292 * Both index() and rindex() take an optional position
4293 4293 * argument that denotes the starting position.
4294 4294 */
4295 4295 if (nargs == 3) {
4296 4296 int64_t pos = (int64_t)tupregs[2].dttk_value;
4297 4297
4298 4298 /*
4299 4299 * If the position argument to index() is
4300 4300 * negative, Perl implicitly clamps it at
4301 4301 * zero. This semantic is a little surprising
4302 4302 * given the special meaning of negative
4303 4303 * positions to similar Perl functions like
4304 4304 * substr(), but it appears to reflect a
4305 4305 * notion that index() can start from a
4306 4306 * negative index and increment its way up to
4307 4307 * the string. Given this notion, Perl's
4308 4308 * rindex() is at least self-consistent in
4309 4309 * that it implicitly clamps positions greater
4310 4310 * than the string length to be the string
4311 4311 * length. Where Perl completely loses
4312 4312 * coherence, however, is when the specified
4313 4313 * substring is the empty string (""). In
4314 4314 * this case, even if the position is
4315 4315 * negative, rindex() returns 0 -- and even if
4316 4316 * the position is greater than the length,
4317 4317 * index() returns the string length. These
4318 4318 * semantics violate the notion that index()
4319 4319 * should never return a value less than the
4320 4320 * specified position and that rindex() should
4321 4321 * never return a value greater than the
4322 4322 * specified position. (One assumes that
4323 4323 * these semantics are artifacts of Perl's
4324 4324 * implementation and not the results of
4325 4325 * deliberate design -- it beggars belief that
4326 4326 * even Larry Wall could desire such oddness.)
4327 4327 * While in the abstract one would wish for
4328 4328 * consistent position semantics across
4329 4329 * substr(), index() and rindex() -- or at the
4330 4330 * very least self-consistent position
4331 4331 * semantics for index() and rindex() -- we
4332 4332 * instead opt to keep with the extant Perl
4333 4333 * semantics, in all their broken glory. (Do
4334 4334 * we have more desire to maintain Perl's
4335 4335 * semantics than Perl does? Probably.)
4336 4336 */
4337 4337 if (subr == DIF_SUBR_RINDEX) {
4338 4338 if (pos < 0) {
4339 4339 if (sublen == 0)
4340 4340 regs[rd] = 0;
4341 4341 break;
4342 4342 }
4343 4343
4344 4344 if (pos > len)
4345 4345 pos = len;
4346 4346 } else {
4347 4347 if (pos < 0)
4348 4348 pos = 0;
4349 4349
4350 4350 if (pos >= len) {
4351 4351 if (sublen == 0)
4352 4352 regs[rd] = len;
4353 4353 break;
4354 4354 }
4355 4355 }
4356 4356
4357 4357 addr = orig + pos;
4358 4358 }
4359 4359 }
4360 4360
4361 4361 for (regs[rd] = notfound; addr != limit; addr += inc) {
4362 4362 if (dtrace_strncmp(addr, substr, sublen) == 0) {
4363 4363 if (subr != DIF_SUBR_STRSTR) {
4364 4364 /*
4365 4365 * As D index() and rindex() are
4366 4366 * modeled on Perl (and not on awk),
4367 4367 * we return a zero-based (and not a
4368 4368 * one-based) index. (For you Perl
4369 4369 * weenies: no, we're not going to add
4370 4370 * $[ -- and shouldn't you be at a con
4371 4371 * or something?)
4372 4372 */
4373 4373 regs[rd] = (uintptr_t)(addr - orig);
4374 4374 break;
4375 4375 }
4376 4376
4377 4377 ASSERT(subr == DIF_SUBR_STRSTR);
4378 4378 regs[rd] = (uintptr_t)addr;
4379 4379 break;
4380 4380 }
4381 4381 }
4382 4382
4383 4383 break;
4384 4384 }
4385 4385
4386 4386 case DIF_SUBR_STRTOK: {
4387 4387 uintptr_t addr = tupregs[0].dttk_value;
4388 4388 uintptr_t tokaddr = tupregs[1].dttk_value;
4389 4389 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4390 4390 uintptr_t limit, toklimit = tokaddr + size;
4391 4391 uint8_t c, tokmap[32]; /* 256 / 8 */
4392 4392 char *dest = (char *)mstate->dtms_scratch_ptr;
4393 4393 int i;
4394 4394
4395 4395 /*
4396 4396 * Check both the token buffer and (later) the input buffer,
4397 4397 * since both could be non-scratch addresses.
4398 4398 */
4399 4399 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
4400 4400 regs[rd] = NULL;
4401 4401 break;
4402 4402 }
4403 4403
4404 4404 if (!DTRACE_INSCRATCH(mstate, size)) {
4405 4405 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4406 4406 regs[rd] = NULL;
4407 4407 break;
4408 4408 }
4409 4409
4410 4410 if (addr == NULL) {
4411 4411 /*
4412 4412 * If the address specified is NULL, we use our saved
4413 4413 * strtok pointer from the mstate. Note that this
4414 4414 * means that the saved strtok pointer is _only_
4415 4415 * valid within multiple enablings of the same probe --
4416 4416 * it behaves like an implicit clause-local variable.
4417 4417 */
4418 4418 addr = mstate->dtms_strtok;
4419 4419 } else {
4420 4420 /*
4421 4421 * If the user-specified address is non-NULL we must
4422 4422 * access check it. This is the only time we have
4423 4423 * a chance to do so, since this address may reside
4424 4424 * in the string table of this clause-- future calls
4425 4425 * (when we fetch addr from mstate->dtms_strtok)
4426 4426 * would fail this access check.
4427 4427 */
4428 4428 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
4429 4429 regs[rd] = NULL;
4430 4430 break;
4431 4431 }
4432 4432 }
4433 4433
4434 4434 /*
4435 4435 * First, zero the token map, and then process the token
4436 4436 * string -- setting a bit in the map for every character
4437 4437 * found in the token string.
4438 4438 */
4439 4439 for (i = 0; i < sizeof (tokmap); i++)
4440 4440 tokmap[i] = 0;
4441 4441
4442 4442 for (; tokaddr < toklimit; tokaddr++) {
4443 4443 if ((c = dtrace_load8(tokaddr)) == '\0')
4444 4444 break;
4445 4445
4446 4446 ASSERT((c >> 3) < sizeof (tokmap));
4447 4447 tokmap[c >> 3] |= (1 << (c & 0x7));
4448 4448 }
4449 4449
4450 4450 for (limit = addr + size; addr < limit; addr++) {
4451 4451 /*
4452 4452 * We're looking for a character that is _not_ contained
4453 4453 * in the token string.
4454 4454 */
4455 4455 if ((c = dtrace_load8(addr)) == '\0')
4456 4456 break;
4457 4457
4458 4458 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4459 4459 break;
4460 4460 }
4461 4461
4462 4462 if (c == '\0') {
4463 4463 /*
4464 4464 * We reached the end of the string without finding
4465 4465 * any character that was not in the token string.
4466 4466 * We return NULL in this case, and we set the saved
4467 4467 * address to NULL as well.
4468 4468 */
4469 4469 regs[rd] = NULL;
4470 4470 mstate->dtms_strtok = NULL;
4471 4471 break;
4472 4472 }
4473 4473
4474 4474 /*
4475 4475 * From here on, we're copying into the destination string.
4476 4476 */
4477 4477 for (i = 0; addr < limit && i < size - 1; addr++) {
4478 4478 if ((c = dtrace_load8(addr)) == '\0')
4479 4479 break;
4480 4480
4481 4481 if (tokmap[c >> 3] & (1 << (c & 0x7)))
4482 4482 break;
4483 4483
4484 4484 ASSERT(i < size);
4485 4485 dest[i++] = c;
4486 4486 }
4487 4487
4488 4488 ASSERT(i < size);
4489 4489 dest[i] = '\0';
4490 4490 regs[rd] = (uintptr_t)dest;
4491 4491 mstate->dtms_scratch_ptr += size;
4492 4492 mstate->dtms_strtok = addr;
4493 4493 break;
4494 4494 }
4495 4495
4496 4496 case DIF_SUBR_SUBSTR: {
4497 4497 uintptr_t s = tupregs[0].dttk_value;
4498 4498 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4499 4499 char *d = (char *)mstate->dtms_scratch_ptr;
4500 4500 int64_t index = (int64_t)tupregs[1].dttk_value;
4501 4501 int64_t remaining = (int64_t)tupregs[2].dttk_value;
4502 4502 size_t len = dtrace_strlen((char *)s, size);
4503 4503 int64_t i;
4504 4504
4505 4505 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4506 4506 regs[rd] = NULL;
4507 4507 break;
4508 4508 }
4509 4509
4510 4510 if (!DTRACE_INSCRATCH(mstate, size)) {
4511 4511 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4512 4512 regs[rd] = NULL;
4513 4513 break;
4514 4514 }
4515 4515
4516 4516 if (nargs <= 2)
4517 4517 remaining = (int64_t)size;
4518 4518
4519 4519 if (index < 0) {
4520 4520 index += len;
4521 4521
4522 4522 if (index < 0 && index + remaining > 0) {
4523 4523 remaining += index;
4524 4524 index = 0;
4525 4525 }
4526 4526 }
4527 4527
4528 4528 if (index >= len || index < 0) {
4529 4529 remaining = 0;
4530 4530 } else if (remaining < 0) {
4531 4531 remaining += len - index;
4532 4532 } else if (index + remaining > size) {
4533 4533 remaining = size - index;
4534 4534 }
4535 4535
4536 4536 for (i = 0; i < remaining; i++) {
4537 4537 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4538 4538 break;
4539 4539 }
4540 4540
4541 4541 d[i] = '\0';
4542 4542
4543 4543 mstate->dtms_scratch_ptr += size;
4544 4544 regs[rd] = (uintptr_t)d;
4545 4545 break;
4546 4546 }
4547 4547
4548 4548 case DIF_SUBR_JSON: {
4549 4549 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4550 4550 uintptr_t json = tupregs[0].dttk_value;
4551 4551 size_t jsonlen = dtrace_strlen((char *)json, size);
4552 4552 uintptr_t elem = tupregs[1].dttk_value;
4553 4553 size_t elemlen = dtrace_strlen((char *)elem, size);
4554 4554
4555 4555 char *dest = (char *)mstate->dtms_scratch_ptr;
4556 4556 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
4557 4557 char *ee = elemlist;
4558 4558 int nelems = 1;
4559 4559 uintptr_t cur;
4560 4560
4561 4561 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
4562 4562 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
4563 4563 regs[rd] = NULL;
4564 4564 break;
4565 4565 }
4566 4566
4567 4567 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
4568 4568 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4569 4569 regs[rd] = NULL;
4570 4570 break;
4571 4571 }
4572 4572
4573 4573 /*
4574 4574 * Read the element selector and split it up into a packed list
4575 4575 * of strings.
4576 4576 */
4577 4577 for (cur = elem; cur < elem + elemlen; cur++) {
4578 4578 char cc = dtrace_load8(cur);
4579 4579
4580 4580 if (cur == elem && cc == '[') {
4581 4581 /*
4582 4582 * If the first element selector key is
4583 4583 * actually an array index then ignore the
4584 4584 * bracket.
4585 4585 */
4586 4586 continue;
4587 4587 }
4588 4588
4589 4589 if (cc == ']')
4590 4590 continue;
4591 4591
4592 4592 if (cc == '.' || cc == '[') {
4593 4593 nelems++;
4594 4594 cc = '\0';
4595 4595 }
4596 4596
4597 4597 *ee++ = cc;
4598 4598 }
4599 4599 *ee++ = '\0';
4600 4600
4601 4601 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
4602 4602 nelems, dest)) != NULL)
4603 4603 mstate->dtms_scratch_ptr += jsonlen + 1;
4604 4604 break;
4605 4605 }
4606 4606
4607 4607 case DIF_SUBR_TOUPPER:
4608 4608 case DIF_SUBR_TOLOWER: {
4609 4609 uintptr_t s = tupregs[0].dttk_value;
4610 4610 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4611 4611 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4612 4612 size_t len = dtrace_strlen((char *)s, size);
4613 4613 char lower, upper, convert;
4614 4614 int64_t i;
4615 4615
4616 4616 if (subr == DIF_SUBR_TOUPPER) {
4617 4617 lower = 'a';
4618 4618 upper = 'z';
4619 4619 convert = 'A';
4620 4620 } else {
4621 4621 lower = 'A';
4622 4622 upper = 'Z';
4623 4623 convert = 'a';
4624 4624 }
4625 4625
4626 4626 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4627 4627 regs[rd] = NULL;
4628 4628 break;
4629 4629 }
4630 4630
4631 4631 if (!DTRACE_INSCRATCH(mstate, size)) {
4632 4632 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4633 4633 regs[rd] = NULL;
4634 4634 break;
4635 4635 }
4636 4636
4637 4637 for (i = 0; i < size - 1; i++) {
4638 4638 if ((c = dtrace_load8(s + i)) == '\0')
4639 4639 break;
4640 4640
4641 4641 if (c >= lower && c <= upper)
4642 4642 c = convert + (c - lower);
4643 4643
4644 4644 dest[i] = c;
4645 4645 }
4646 4646
4647 4647 ASSERT(i < size);
4648 4648 dest[i] = '\0';
4649 4649 regs[rd] = (uintptr_t)dest;
4650 4650 mstate->dtms_scratch_ptr += size;
4651 4651 break;
4652 4652 }
4653 4653
4654 4654 case DIF_SUBR_GETMAJOR:
4655 4655 #ifdef _LP64
4656 4656 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4657 4657 #else
4658 4658 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4659 4659 #endif
4660 4660 break;
4661 4661
4662 4662 case DIF_SUBR_GETMINOR:
4663 4663 #ifdef _LP64
4664 4664 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4665 4665 #else
4666 4666 regs[rd] = tupregs[0].dttk_value & MAXMIN;
4667 4667 #endif
4668 4668 break;
4669 4669
4670 4670 case DIF_SUBR_DDI_PATHNAME: {
4671 4671 /*
4672 4672 * This one is a galactic mess. We are going to roughly
4673 4673 * emulate ddi_pathname(), but it's made more complicated
4674 4674 * by the fact that we (a) want to include the minor name and
4675 4675 * (b) must proceed iteratively instead of recursively.
4676 4676 */
4677 4677 uintptr_t dest = mstate->dtms_scratch_ptr;
4678 4678 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4679 4679 char *start = (char *)dest, *end = start + size - 1;
4680 4680 uintptr_t daddr = tupregs[0].dttk_value;
4681 4681 int64_t minor = (int64_t)tupregs[1].dttk_value;
4682 4682 char *s;
4683 4683 int i, len, depth = 0;
4684 4684
4685 4685 /*
4686 4686 * Due to all the pointer jumping we do and context we must
4687 4687 * rely upon, we just mandate that the user must have kernel
4688 4688 * read privileges to use this routine.
4689 4689 */
4690 4690 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4691 4691 *flags |= CPU_DTRACE_KPRIV;
4692 4692 *illval = daddr;
4693 4693 regs[rd] = NULL;
4694 4694 }
4695 4695
4696 4696 if (!DTRACE_INSCRATCH(mstate, size)) {
4697 4697 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4698 4698 regs[rd] = NULL;
4699 4699 break;
4700 4700 }
4701 4701
4702 4702 *end = '\0';
4703 4703
4704 4704 /*
4705 4705 * We want to have a name for the minor. In order to do this,
4706 4706 * we need to walk the minor list from the devinfo. We want
4707 4707 * to be sure that we don't infinitely walk a circular list,
4708 4708 * so we check for circularity by sending a scout pointer
4709 4709 * ahead two elements for every element that we iterate over;
4710 4710 * if the list is circular, these will ultimately point to the
4711 4711 * same element. You may recognize this little trick as the
4712 4712 * answer to a stupid interview question -- one that always
4713 4713 * seems to be asked by those who had to have it laboriously
4714 4714 * explained to them, and who can't even concisely describe
4715 4715 * the conditions under which one would be forced to resort to
4716 4716 * this technique. Needless to say, those conditions are
4717 4717 * found here -- and probably only here. Is this the only use
4718 4718 * of this infamous trick in shipping, production code? If it
4719 4719 * isn't, it probably should be...
4720 4720 */
4721 4721 if (minor != -1) {
4722 4722 uintptr_t maddr = dtrace_loadptr(daddr +
4723 4723 offsetof(struct dev_info, devi_minor));
4724 4724
4725 4725 uintptr_t next = offsetof(struct ddi_minor_data, next);
4726 4726 uintptr_t name = offsetof(struct ddi_minor_data,
4727 4727 d_minor) + offsetof(struct ddi_minor, name);
4728 4728 uintptr_t dev = offsetof(struct ddi_minor_data,
4729 4729 d_minor) + offsetof(struct ddi_minor, dev);
4730 4730 uintptr_t scout;
4731 4731
4732 4732 if (maddr != NULL)
4733 4733 scout = dtrace_loadptr(maddr + next);
4734 4734
4735 4735 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4736 4736 uint64_t m;
4737 4737 #ifdef _LP64
4738 4738 m = dtrace_load64(maddr + dev) & MAXMIN64;
4739 4739 #else
4740 4740 m = dtrace_load32(maddr + dev) & MAXMIN;
4741 4741 #endif
4742 4742 if (m != minor) {
4743 4743 maddr = dtrace_loadptr(maddr + next);
4744 4744
4745 4745 if (scout == NULL)
4746 4746 continue;
4747 4747
4748 4748 scout = dtrace_loadptr(scout + next);
4749 4749
4750 4750 if (scout == NULL)
4751 4751 continue;
4752 4752
4753 4753 scout = dtrace_loadptr(scout + next);
4754 4754
4755 4755 if (scout == NULL)
4756 4756 continue;
4757 4757
4758 4758 if (scout == maddr) {
4759 4759 *flags |= CPU_DTRACE_ILLOP;
4760 4760 break;
4761 4761 }
4762 4762
4763 4763 continue;
4764 4764 }
4765 4765
4766 4766 /*
4767 4767 * We have the minor data. Now we need to
4768 4768 * copy the minor's name into the end of the
4769 4769 * pathname.
4770 4770 */
4771 4771 s = (char *)dtrace_loadptr(maddr + name);
4772 4772 len = dtrace_strlen(s, size);
4773 4773
4774 4774 if (*flags & CPU_DTRACE_FAULT)
4775 4775 break;
4776 4776
4777 4777 if (len != 0) {
4778 4778 if ((end -= (len + 1)) < start)
4779 4779 break;
4780 4780
4781 4781 *end = ':';
4782 4782 }
4783 4783
4784 4784 for (i = 1; i <= len; i++)
4785 4785 end[i] = dtrace_load8((uintptr_t)s++);
4786 4786 break;
4787 4787 }
4788 4788 }
4789 4789
4790 4790 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4791 4791 ddi_node_state_t devi_state;
4792 4792
4793 4793 devi_state = dtrace_load32(daddr +
4794 4794 offsetof(struct dev_info, devi_node_state));
4795 4795
4796 4796 if (*flags & CPU_DTRACE_FAULT)
4797 4797 break;
4798 4798
4799 4799 if (devi_state >= DS_INITIALIZED) {
4800 4800 s = (char *)dtrace_loadptr(daddr +
4801 4801 offsetof(struct dev_info, devi_addr));
4802 4802 len = dtrace_strlen(s, size);
4803 4803
4804 4804 if (*flags & CPU_DTRACE_FAULT)
4805 4805 break;
4806 4806
4807 4807 if (len != 0) {
4808 4808 if ((end -= (len + 1)) < start)
4809 4809 break;
4810 4810
4811 4811 *end = '@';
4812 4812 }
4813 4813
4814 4814 for (i = 1; i <= len; i++)
4815 4815 end[i] = dtrace_load8((uintptr_t)s++);
4816 4816 }
4817 4817
4818 4818 /*
4819 4819 * Now for the node name...
4820 4820 */
4821 4821 s = (char *)dtrace_loadptr(daddr +
4822 4822 offsetof(struct dev_info, devi_node_name));
4823 4823
4824 4824 daddr = dtrace_loadptr(daddr +
4825 4825 offsetof(struct dev_info, devi_parent));
4826 4826
4827 4827 /*
4828 4828 * If our parent is NULL (that is, if we're the root
4829 4829 * node), we're going to use the special path
4830 4830 * "devices".
4831 4831 */
4832 4832 if (daddr == NULL)
4833 4833 s = "devices";
4834 4834
4835 4835 len = dtrace_strlen(s, size);
4836 4836 if (*flags & CPU_DTRACE_FAULT)
4837 4837 break;
4838 4838
4839 4839 if ((end -= (len + 1)) < start)
4840 4840 break;
4841 4841
4842 4842 for (i = 1; i <= len; i++)
4843 4843 end[i] = dtrace_load8((uintptr_t)s++);
4844 4844 *end = '/';
4845 4845
4846 4846 if (depth++ > dtrace_devdepth_max) {
4847 4847 *flags |= CPU_DTRACE_ILLOP;
4848 4848 break;
4849 4849 }
4850 4850 }
4851 4851
4852 4852 if (end < start)
4853 4853 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4854 4854
4855 4855 if (daddr == NULL) {
4856 4856 regs[rd] = (uintptr_t)end;
4857 4857 mstate->dtms_scratch_ptr += size;
4858 4858 }
4859 4859
4860 4860 break;
4861 4861 }
4862 4862
4863 4863 case DIF_SUBR_STRJOIN: {
4864 4864 char *d = (char *)mstate->dtms_scratch_ptr;
4865 4865 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4866 4866 uintptr_t s1 = tupregs[0].dttk_value;
4867 4867 uintptr_t s2 = tupregs[1].dttk_value;
4868 4868 int i = 0;
4869 4869
4870 4870 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4871 4871 !dtrace_strcanload(s2, size, mstate, vstate)) {
4872 4872 regs[rd] = NULL;
4873 4873 break;
4874 4874 }
4875 4875
4876 4876 if (!DTRACE_INSCRATCH(mstate, size)) {
4877 4877 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4878 4878 regs[rd] = NULL;
4879 4879 break;
4880 4880 }
4881 4881
4882 4882 for (;;) {
4883 4883 if (i >= size) {
4884 4884 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4885 4885 regs[rd] = NULL;
4886 4886 break;
4887 4887 }
4888 4888
4889 4889 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4890 4890 i--;
4891 4891 break;
4892 4892 }
4893 4893 }
4894 4894
4895 4895 for (;;) {
4896 4896 if (i >= size) {
4897 4897 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4898 4898 regs[rd] = NULL;
4899 4899 break;
4900 4900 }
4901 4901
4902 4902 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4903 4903 break;
4904 4904 }
4905 4905
4906 4906 if (i < size) {
4907 4907 mstate->dtms_scratch_ptr += i;
4908 4908 regs[rd] = (uintptr_t)d;
4909 4909 }
4910 4910
4911 4911 break;
4912 4912 }
4913 4913
4914 4914 case DIF_SUBR_STRTOLL: {
4915 4915 uintptr_t s = tupregs[0].dttk_value;
4916 4916 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4917 4917 int base = 10;
4918 4918
4919 4919 if (nargs > 1) {
4920 4920 if ((base = tupregs[1].dttk_value) <= 1 ||
4921 4921 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4922 4922 *flags |= CPU_DTRACE_ILLOP;
4923 4923 break;
4924 4924 }
4925 4925 }
4926 4926
4927 4927 if (!dtrace_strcanload(s, size, mstate, vstate)) {
4928 4928 regs[rd] = INT64_MIN;
4929 4929 break;
4930 4930 }
4931 4931
4932 4932 regs[rd] = dtrace_strtoll((char *)s, base, size);
4933 4933 break;
4934 4934 }
4935 4935
4936 4936 case DIF_SUBR_LLTOSTR: {
4937 4937 int64_t i = (int64_t)tupregs[0].dttk_value;
4938 4938 uint64_t val, digit;
4939 4939 uint64_t size = 65; /* enough room for 2^64 in binary */
4940 4940 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4941 4941 int base = 10;
4942 4942
4943 4943 if (nargs > 1) {
4944 4944 if ((base = tupregs[1].dttk_value) <= 1 ||
4945 4945 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4946 4946 *flags |= CPU_DTRACE_ILLOP;
4947 4947 break;
4948 4948 }
4949 4949 }
4950 4950
4951 4951 val = (base == 10 && i < 0) ? i * -1 : i;
4952 4952
4953 4953 if (!DTRACE_INSCRATCH(mstate, size)) {
4954 4954 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4955 4955 regs[rd] = NULL;
4956 4956 break;
4957 4957 }
4958 4958
4959 4959 for (*end-- = '\0'; val; val /= base) {
4960 4960 if ((digit = val % base) <= '9' - '0') {
4961 4961 *end-- = '0' + digit;
4962 4962 } else {
4963 4963 *end-- = 'a' + (digit - ('9' - '0') - 1);
4964 4964 }
4965 4965 }
4966 4966
4967 4967 if (i == 0 && base == 16)
4968 4968 *end-- = '0';
4969 4969
4970 4970 if (base == 16)
4971 4971 *end-- = 'x';
4972 4972
4973 4973 if (i == 0 || base == 8 || base == 16)
4974 4974 *end-- = '0';
4975 4975
4976 4976 if (i < 0 && base == 10)
4977 4977 *end-- = '-';
4978 4978
4979 4979 regs[rd] = (uintptr_t)end + 1;
4980 4980 mstate->dtms_scratch_ptr += size;
4981 4981 break;
4982 4982 }
4983 4983
4984 4984 case DIF_SUBR_HTONS:
4985 4985 case DIF_SUBR_NTOHS:
4986 4986 #ifdef _BIG_ENDIAN
4987 4987 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4988 4988 #else
4989 4989 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4990 4990 #endif
4991 4991 break;
4992 4992
4993 4993
4994 4994 case DIF_SUBR_HTONL:
4995 4995 case DIF_SUBR_NTOHL:
4996 4996 #ifdef _BIG_ENDIAN
4997 4997 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4998 4998 #else
4999 4999 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5000 5000 #endif
5001 5001 break;
5002 5002
5003 5003
5004 5004 case DIF_SUBR_HTONLL:
5005 5005 case DIF_SUBR_NTOHLL:
5006 5006 #ifdef _BIG_ENDIAN
5007 5007 regs[rd] = (uint64_t)tupregs[0].dttk_value;
5008 5008 #else
5009 5009 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5010 5010 #endif
5011 5011 break;
5012 5012
5013 5013
5014 5014 case DIF_SUBR_DIRNAME:
5015 5015 case DIF_SUBR_BASENAME: {
5016 5016 char *dest = (char *)mstate->dtms_scratch_ptr;
5017 5017 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5018 5018 uintptr_t src = tupregs[0].dttk_value;
5019 5019 int i, j, len = dtrace_strlen((char *)src, size);
5020 5020 int lastbase = -1, firstbase = -1, lastdir = -1;
5021 5021 int start, end;
5022 5022
5023 5023 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5024 5024 regs[rd] = NULL;
5025 5025 break;
5026 5026 }
5027 5027
5028 5028 if (!DTRACE_INSCRATCH(mstate, size)) {
5029 5029 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5030 5030 regs[rd] = NULL;
5031 5031 break;
5032 5032 }
5033 5033
5034 5034 /*
5035 5035 * The basename and dirname for a zero-length string is
5036 5036 * defined to be "."
5037 5037 */
5038 5038 if (len == 0) {
5039 5039 len = 1;
5040 5040 src = (uintptr_t)".";
5041 5041 }
5042 5042
5043 5043 /*
5044 5044 * Start from the back of the string, moving back toward the
5045 5045 * front until we see a character that isn't a slash. That
5046 5046 * character is the last character in the basename.
5047 5047 */
5048 5048 for (i = len - 1; i >= 0; i--) {
5049 5049 if (dtrace_load8(src + i) != '/')
5050 5050 break;
5051 5051 }
5052 5052
5053 5053 if (i >= 0)
5054 5054 lastbase = i;
5055 5055
5056 5056 /*
5057 5057 * Starting from the last character in the basename, move
5058 5058 * towards the front until we find a slash. The character
5059 5059 * that we processed immediately before that is the first
5060 5060 * character in the basename.
5061 5061 */
5062 5062 for (; i >= 0; i--) {
5063 5063 if (dtrace_load8(src + i) == '/')
5064 5064 break;
5065 5065 }
5066 5066
5067 5067 if (i >= 0)
5068 5068 firstbase = i + 1;
5069 5069
5070 5070 /*
5071 5071 * Now keep going until we find a non-slash character. That
5072 5072 * character is the last character in the dirname.
5073 5073 */
5074 5074 for (; i >= 0; i--) {
5075 5075 if (dtrace_load8(src + i) != '/')
5076 5076 break;
5077 5077 }
5078 5078
5079 5079 if (i >= 0)
5080 5080 lastdir = i;
5081 5081
5082 5082 ASSERT(!(lastbase == -1 && firstbase != -1));
5083 5083 ASSERT(!(firstbase == -1 && lastdir != -1));
5084 5084
5085 5085 if (lastbase == -1) {
5086 5086 /*
5087 5087 * We didn't find a non-slash character. We know that
5088 5088 * the length is non-zero, so the whole string must be
5089 5089 * slashes. In either the dirname or the basename
5090 5090 * case, we return '/'.
5091 5091 */
5092 5092 ASSERT(firstbase == -1);
5093 5093 firstbase = lastbase = lastdir = 0;
5094 5094 }
5095 5095
5096 5096 if (firstbase == -1) {
5097 5097 /*
5098 5098 * The entire string consists only of a basename
5099 5099 * component. If we're looking for dirname, we need
5100 5100 * to change our string to be just "."; if we're
5101 5101 * looking for a basename, we'll just set the first
5102 5102 * character of the basename to be 0.
5103 5103 */
5104 5104 if (subr == DIF_SUBR_DIRNAME) {
5105 5105 ASSERT(lastdir == -1);
5106 5106 src = (uintptr_t)".";
5107 5107 lastdir = 0;
5108 5108 } else {
5109 5109 firstbase = 0;
5110 5110 }
5111 5111 }
5112 5112
5113 5113 if (subr == DIF_SUBR_DIRNAME) {
5114 5114 if (lastdir == -1) {
5115 5115 /*
5116 5116 * We know that we have a slash in the name --
5117 5117 * or lastdir would be set to 0, above. And
5118 5118 * because lastdir is -1, we know that this
5119 5119 * slash must be the first character. (That
5120 5120 * is, the full string must be of the form
5121 5121 * "/basename".) In this case, the last
5122 5122 * character of the directory name is 0.
5123 5123 */
5124 5124 lastdir = 0;
5125 5125 }
5126 5126
5127 5127 start = 0;
5128 5128 end = lastdir;
5129 5129 } else {
5130 5130 ASSERT(subr == DIF_SUBR_BASENAME);
5131 5131 ASSERT(firstbase != -1 && lastbase != -1);
5132 5132 start = firstbase;
5133 5133 end = lastbase;
5134 5134 }
5135 5135
5136 5136 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5137 5137 dest[j] = dtrace_load8(src + i);
5138 5138
5139 5139 dest[j] = '\0';
5140 5140 regs[rd] = (uintptr_t)dest;
5141 5141 mstate->dtms_scratch_ptr += size;
5142 5142 break;
5143 5143 }
5144 5144
5145 5145 case DIF_SUBR_GETF: {
5146 5146 uintptr_t fd = tupregs[0].dttk_value;
5147 5147 uf_info_t *finfo = &curthread->t_procp->p_user.u_finfo;
5148 5148 file_t *fp;
5149 5149
5150 5150 if (!dtrace_priv_proc(state, mstate)) {
5151 5151 regs[rd] = NULL;
5152 5152 break;
5153 5153 }
5154 5154
5155 5155 /*
5156 5156 * This is safe because fi_nfiles only increases, and the
5157 5157 * fi_list array is not freed when the array size doubles.
5158 5158 * (See the comment in flist_grow() for details on the
5159 5159 * management of the u_finfo structure.)
5160 5160 */
5161 5161 fp = fd < finfo->fi_nfiles ? finfo->fi_list[fd].uf_file : NULL;
5162 5162
5163 5163 mstate->dtms_getf = fp;
5164 5164 regs[rd] = (uintptr_t)fp;
5165 5165 break;
5166 5166 }
5167 5167
5168 5168 case DIF_SUBR_CLEANPATH: {
5169 5169 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5170 5170 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5171 5171 uintptr_t src = tupregs[0].dttk_value;
5172 5172 int i = 0, j = 0;
5173 5173 zone_t *z;
5174 5174
5175 5175 if (!dtrace_strcanload(src, size, mstate, vstate)) {
5176 5176 regs[rd] = NULL;
5177 5177 break;
5178 5178 }
5179 5179
5180 5180 if (!DTRACE_INSCRATCH(mstate, size)) {
5181 5181 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5182 5182 regs[rd] = NULL;
5183 5183 break;
5184 5184 }
5185 5185
5186 5186 /*
5187 5187 * Move forward, loading each character.
5188 5188 */
5189 5189 do {
5190 5190 c = dtrace_load8(src + i++);
5191 5191 next:
5192 5192 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
5193 5193 break;
5194 5194
5195 5195 if (c != '/') {
5196 5196 dest[j++] = c;
5197 5197 continue;
5198 5198 }
5199 5199
5200 5200 c = dtrace_load8(src + i++);
5201 5201
5202 5202 if (c == '/') {
5203 5203 /*
5204 5204 * We have two slashes -- we can just advance
5205 5205 * to the next character.
5206 5206 */
5207 5207 goto next;
5208 5208 }
5209 5209
5210 5210 if (c != '.') {
5211 5211 /*
5212 5212 * This is not "." and it's not ".." -- we can
5213 5213 * just store the "/" and this character and
5214 5214 * drive on.
5215 5215 */
5216 5216 dest[j++] = '/';
5217 5217 dest[j++] = c;
5218 5218 continue;
5219 5219 }
5220 5220
5221 5221 c = dtrace_load8(src + i++);
5222 5222
5223 5223 if (c == '/') {
5224 5224 /*
5225 5225 * This is a "/./" component. We're not going
5226 5226 * to store anything in the destination buffer;
5227 5227 * we're just going to go to the next component.
5228 5228 */
5229 5229 goto next;
5230 5230 }
5231 5231
5232 5232 if (c != '.') {
5233 5233 /*
5234 5234 * This is not ".." -- we can just store the
5235 5235 * "/." and this character and continue
5236 5236 * processing.
5237 5237 */
5238 5238 dest[j++] = '/';
5239 5239 dest[j++] = '.';
5240 5240 dest[j++] = c;
5241 5241 continue;
5242 5242 }
5243 5243
5244 5244 c = dtrace_load8(src + i++);
5245 5245
5246 5246 if (c != '/' && c != '\0') {
5247 5247 /*
5248 5248 * This is not ".." -- it's "..[mumble]".
5249 5249 * We'll store the "/.." and this character
5250 5250 * and continue processing.
5251 5251 */
5252 5252 dest[j++] = '/';
5253 5253 dest[j++] = '.';
5254 5254 dest[j++] = '.';
5255 5255 dest[j++] = c;
5256 5256 continue;
5257 5257 }
5258 5258
5259 5259 /*
5260 5260 * This is "/../" or "/..\0". We need to back up
5261 5261 * our destination pointer until we find a "/".
5262 5262 */
5263 5263 i--;
5264 5264 while (j != 0 && dest[--j] != '/')
5265 5265 continue;
5266 5266
5267 5267 if (c == '\0')
5268 5268 dest[++j] = '/';
5269 5269 } while (c != '\0');
5270 5270
5271 5271 dest[j] = '\0';
5272 5272
5273 5273 if (mstate->dtms_getf != NULL &&
5274 5274 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
5275 5275 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
5276 5276 /*
5277 5277 * If we've done a getf() as a part of this ECB and we
5278 5278 * don't have kernel access (and we're not in the global
5279 5279 * zone), check if the path we cleaned up begins with
5280 5280 * the zone's root path, and trim it off if so. Note
5281 5281 * that this is an output cleanliness issue, not a
5282 5282 * security issue: knowing one's zone root path does
5283 5283 * not enable privilege escalation.
5284 5284 */
5285 5285 if (strstr(dest, z->zone_rootpath) == dest)
5286 5286 dest += strlen(z->zone_rootpath) - 1;
5287 5287 }
5288 5288
5289 5289 regs[rd] = (uintptr_t)dest;
5290 5290 mstate->dtms_scratch_ptr += size;
5291 5291 break;
5292 5292 }
5293 5293
5294 5294 case DIF_SUBR_INET_NTOA:
5295 5295 case DIF_SUBR_INET_NTOA6:
5296 5296 case DIF_SUBR_INET_NTOP: {
5297 5297 size_t size;
5298 5298 int af, argi, i;
5299 5299 char *base, *end;
5300 5300
5301 5301 if (subr == DIF_SUBR_INET_NTOP) {
5302 5302 af = (int)tupregs[0].dttk_value;
5303 5303 argi = 1;
5304 5304 } else {
5305 5305 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5306 5306 argi = 0;
5307 5307 }
5308 5308
5309 5309 if (af == AF_INET) {
5310 5310 ipaddr_t ip4;
5311 5311 uint8_t *ptr8, val;
5312 5312
5313 5313 /*
5314 5314 * Safely load the IPv4 address.
5315 5315 */
5316 5316 ip4 = dtrace_load32(tupregs[argi].dttk_value);
5317 5317
5318 5318 /*
5319 5319 * Check an IPv4 string will fit in scratch.
5320 5320 */
5321 5321 size = INET_ADDRSTRLEN;
5322 5322 if (!DTRACE_INSCRATCH(mstate, size)) {
5323 5323 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5324 5324 regs[rd] = NULL;
5325 5325 break;
5326 5326 }
5327 5327 base = (char *)mstate->dtms_scratch_ptr;
5328 5328 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5329 5329
5330 5330 /*
5331 5331 * Stringify as a dotted decimal quad.
5332 5332 */
5333 5333 *end-- = '\0';
5334 5334 ptr8 = (uint8_t *)&ip4;
5335 5335 for (i = 3; i >= 0; i--) {
5336 5336 val = ptr8[i];
5337 5337
5338 5338 if (val == 0) {
5339 5339 *end-- = '0';
5340 5340 } else {
5341 5341 for (; val; val /= 10) {
5342 5342 *end-- = '0' + (val % 10);
5343 5343 }
5344 5344 }
5345 5345
5346 5346 if (i > 0)
5347 5347 *end-- = '.';
5348 5348 }
5349 5349 ASSERT(end + 1 >= base);
5350 5350
5351 5351 } else if (af == AF_INET6) {
5352 5352 struct in6_addr ip6;
5353 5353 int firstzero, tryzero, numzero, v6end;
5354 5354 uint16_t val;
5355 5355 const char digits[] = "0123456789abcdef";
5356 5356
5357 5357 /*
5358 5358 * Stringify using RFC 1884 convention 2 - 16 bit
5359 5359 * hexadecimal values with a zero-run compression.
5360 5360 * Lower case hexadecimal digits are used.
5361 5361 * eg, fe80::214:4fff:fe0b:76c8.
5362 5362 * The IPv4 embedded form is returned for inet_ntop,
5363 5363 * just the IPv4 string is returned for inet_ntoa6.
5364 5364 */
5365 5365
5366 5366 /*
5367 5367 * Safely load the IPv6 address.
5368 5368 */
5369 5369 dtrace_bcopy(
5370 5370 (void *)(uintptr_t)tupregs[argi].dttk_value,
5371 5371 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5372 5372
5373 5373 /*
5374 5374 * Check an IPv6 string will fit in scratch.
5375 5375 */
5376 5376 size = INET6_ADDRSTRLEN;
5377 5377 if (!DTRACE_INSCRATCH(mstate, size)) {
5378 5378 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5379 5379 regs[rd] = NULL;
5380 5380 break;
5381 5381 }
5382 5382 base = (char *)mstate->dtms_scratch_ptr;
5383 5383 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5384 5384 *end-- = '\0';
5385 5385
5386 5386 /*
5387 5387 * Find the longest run of 16 bit zero values
5388 5388 * for the single allowed zero compression - "::".
5389 5389 */
5390 5390 firstzero = -1;
5391 5391 tryzero = -1;
5392 5392 numzero = 1;
5393 5393 for (i = 0; i < sizeof (struct in6_addr); i++) {
5394 5394 if (ip6._S6_un._S6_u8[i] == 0 &&
5395 5395 tryzero == -1 && i % 2 == 0) {
5396 5396 tryzero = i;
5397 5397 continue;
5398 5398 }
5399 5399
5400 5400 if (tryzero != -1 &&
5401 5401 (ip6._S6_un._S6_u8[i] != 0 ||
5402 5402 i == sizeof (struct in6_addr) - 1)) {
5403 5403
5404 5404 if (i - tryzero <= numzero) {
5405 5405 tryzero = -1;
5406 5406 continue;
5407 5407 }
5408 5408
5409 5409 firstzero = tryzero;
5410 5410 numzero = i - i % 2 - tryzero;
5411 5411 tryzero = -1;
5412 5412
5413 5413 if (ip6._S6_un._S6_u8[i] == 0 &&
5414 5414 i == sizeof (struct in6_addr) - 1)
5415 5415 numzero += 2;
5416 5416 }
5417 5417 }
5418 5418 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
5419 5419
5420 5420 /*
5421 5421 * Check for an IPv4 embedded address.
5422 5422 */
5423 5423 v6end = sizeof (struct in6_addr) - 2;
5424 5424 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5425 5425 IN6_IS_ADDR_V4COMPAT(&ip6)) {
5426 5426 for (i = sizeof (struct in6_addr) - 1;
5427 5427 i >= DTRACE_V4MAPPED_OFFSET; i--) {
5428 5428 ASSERT(end >= base);
5429 5429
5430 5430 val = ip6._S6_un._S6_u8[i];
5431 5431
5432 5432 if (val == 0) {
5433 5433 *end-- = '0';
5434 5434 } else {
5435 5435 for (; val; val /= 10) {
5436 5436 *end-- = '0' + val % 10;
5437 5437 }
5438 5438 }
5439 5439
5440 5440 if (i > DTRACE_V4MAPPED_OFFSET)
5441 5441 *end-- = '.';
5442 5442 }
5443 5443
5444 5444 if (subr == DIF_SUBR_INET_NTOA6)
5445 5445 goto inetout;
5446 5446
5447 5447 /*
5448 5448 * Set v6end to skip the IPv4 address that
5449 5449 * we have already stringified.
5450 5450 */
5451 5451 v6end = 10;
5452 5452 }
5453 5453
5454 5454 /*
5455 5455 * Build the IPv6 string by working through the
5456 5456 * address in reverse.
5457 5457 */
5458 5458 for (i = v6end; i >= 0; i -= 2) {
5459 5459 ASSERT(end >= base);
5460 5460
5461 5461 if (i == firstzero + numzero - 2) {
5462 5462 *end-- = ':';
5463 5463 *end-- = ':';
5464 5464 i -= numzero - 2;
5465 5465 continue;
5466 5466 }
5467 5467
5468 5468 if (i < 14 && i != firstzero - 2)
5469 5469 *end-- = ':';
5470 5470
5471 5471 val = (ip6._S6_un._S6_u8[i] << 8) +
5472 5472 ip6._S6_un._S6_u8[i + 1];
5473 5473
5474 5474 if (val == 0) {
5475 5475 *end-- = '0';
5476 5476 } else {
5477 5477 for (; val; val /= 16) {
5478 5478 *end-- = digits[val % 16];
5479 5479 }
5480 5480 }
5481 5481 }
5482 5482 ASSERT(end + 1 >= base);
5483 5483
5484 5484 } else {
5485 5485 /*
5486 5486 * The user didn't use AH_INET or AH_INET6.
5487 5487 */
5488 5488 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5489 5489 regs[rd] = NULL;
5490 5490 break;
5491 5491 }
5492 5492
5493 5493 inetout: regs[rd] = (uintptr_t)end + 1;
5494 5494 mstate->dtms_scratch_ptr += size;
5495 5495 break;
5496 5496 }
5497 5497
5498 5498 }
5499 5499 }
5500 5500
5501 5501 /*
5502 5502 * Emulate the execution of DTrace IR instructions specified by the given
5503 5503 * DIF object. This function is deliberately void of assertions as all of
5504 5504 * the necessary checks are handled by a call to dtrace_difo_validate().
5505 5505 */
5506 5506 static uint64_t
5507 5507 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5508 5508 dtrace_vstate_t *vstate, dtrace_state_t *state)
5509 5509 {
5510 5510 const dif_instr_t *text = difo->dtdo_buf;
5511 5511 const uint_t textlen = difo->dtdo_len;
5512 5512 const char *strtab = difo->dtdo_strtab;
5513 5513 const uint64_t *inttab = difo->dtdo_inttab;
5514 5514
5515 5515 uint64_t rval = 0;
5516 5516 dtrace_statvar_t *svar;
5517 5517 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5518 5518 dtrace_difv_t *v;
5519 5519 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5520 5520 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
5521 5521
5522 5522 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5523 5523 uint64_t regs[DIF_DIR_NREGS];
5524 5524 uint64_t *tmp;
5525 5525
5526 5526 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5527 5527 int64_t cc_r;
5528 5528 uint_t pc = 0, id, opc;
5529 5529 uint8_t ttop = 0;
5530 5530 dif_instr_t instr;
5531 5531 uint_t r1, r2, rd;
5532 5532
5533 5533 /*
5534 5534 * We stash the current DIF object into the machine state: we need it
5535 5535 * for subsequent access checking.
5536 5536 */
5537 5537 mstate->dtms_difo = difo;
5538 5538
5539 5539 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
5540 5540
5541 5541 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5542 5542 opc = pc;
5543 5543
5544 5544 instr = text[pc++];
5545 5545 r1 = DIF_INSTR_R1(instr);
5546 5546 r2 = DIF_INSTR_R2(instr);
5547 5547 rd = DIF_INSTR_RD(instr);
5548 5548
5549 5549 switch (DIF_INSTR_OP(instr)) {
5550 5550 case DIF_OP_OR:
5551 5551 regs[rd] = regs[r1] | regs[r2];
5552 5552 break;
5553 5553 case DIF_OP_XOR:
5554 5554 regs[rd] = regs[r1] ^ regs[r2];
5555 5555 break;
5556 5556 case DIF_OP_AND:
5557 5557 regs[rd] = regs[r1] & regs[r2];
5558 5558 break;
5559 5559 case DIF_OP_SLL:
5560 5560 regs[rd] = regs[r1] << regs[r2];
5561 5561 break;
5562 5562 case DIF_OP_SRL:
5563 5563 regs[rd] = regs[r1] >> regs[r2];
5564 5564 break;
5565 5565 case DIF_OP_SUB:
5566 5566 regs[rd] = regs[r1] - regs[r2];
5567 5567 break;
5568 5568 case DIF_OP_ADD:
5569 5569 regs[rd] = regs[r1] + regs[r2];
5570 5570 break;
5571 5571 case DIF_OP_MUL:
5572 5572 regs[rd] = regs[r1] * regs[r2];
5573 5573 break;
5574 5574 case DIF_OP_SDIV:
5575 5575 if (regs[r2] == 0) {
5576 5576 regs[rd] = 0;
5577 5577 *flags |= CPU_DTRACE_DIVZERO;
5578 5578 } else {
5579 5579 regs[rd] = (int64_t)regs[r1] /
5580 5580 (int64_t)regs[r2];
5581 5581 }
5582 5582 break;
5583 5583
5584 5584 case DIF_OP_UDIV:
5585 5585 if (regs[r2] == 0) {
5586 5586 regs[rd] = 0;
5587 5587 *flags |= CPU_DTRACE_DIVZERO;
5588 5588 } else {
5589 5589 regs[rd] = regs[r1] / regs[r2];
5590 5590 }
5591 5591 break;
5592 5592
5593 5593 case DIF_OP_SREM:
5594 5594 if (regs[r2] == 0) {
5595 5595 regs[rd] = 0;
5596 5596 *flags |= CPU_DTRACE_DIVZERO;
5597 5597 } else {
5598 5598 regs[rd] = (int64_t)regs[r1] %
5599 5599 (int64_t)regs[r2];
5600 5600 }
5601 5601 break;
5602 5602
5603 5603 case DIF_OP_UREM:
5604 5604 if (regs[r2] == 0) {
5605 5605 regs[rd] = 0;
5606 5606 *flags |= CPU_DTRACE_DIVZERO;
5607 5607 } else {
5608 5608 regs[rd] = regs[r1] % regs[r2];
5609 5609 }
5610 5610 break;
5611 5611
5612 5612 case DIF_OP_NOT:
5613 5613 regs[rd] = ~regs[r1];
5614 5614 break;
5615 5615 case DIF_OP_MOV:
5616 5616 regs[rd] = regs[r1];
5617 5617 break;
5618 5618 case DIF_OP_CMP:
5619 5619 cc_r = regs[r1] - regs[r2];
5620 5620 cc_n = cc_r < 0;
5621 5621 cc_z = cc_r == 0;
5622 5622 cc_v = 0;
5623 5623 cc_c = regs[r1] < regs[r2];
5624 5624 break;
5625 5625 case DIF_OP_TST:
5626 5626 cc_n = cc_v = cc_c = 0;
5627 5627 cc_z = regs[r1] == 0;
5628 5628 break;
5629 5629 case DIF_OP_BA:
5630 5630 pc = DIF_INSTR_LABEL(instr);
5631 5631 break;
5632 5632 case DIF_OP_BE:
5633 5633 if (cc_z)
5634 5634 pc = DIF_INSTR_LABEL(instr);
5635 5635 break;
5636 5636 case DIF_OP_BNE:
5637 5637 if (cc_z == 0)
5638 5638 pc = DIF_INSTR_LABEL(instr);
5639 5639 break;
5640 5640 case DIF_OP_BG:
5641 5641 if ((cc_z | (cc_n ^ cc_v)) == 0)
5642 5642 pc = DIF_INSTR_LABEL(instr);
5643 5643 break;
5644 5644 case DIF_OP_BGU:
5645 5645 if ((cc_c | cc_z) == 0)
5646 5646 pc = DIF_INSTR_LABEL(instr);
5647 5647 break;
5648 5648 case DIF_OP_BGE:
5649 5649 if ((cc_n ^ cc_v) == 0)
5650 5650 pc = DIF_INSTR_LABEL(instr);
5651 5651 break;
5652 5652 case DIF_OP_BGEU:
5653 5653 if (cc_c == 0)
5654 5654 pc = DIF_INSTR_LABEL(instr);
5655 5655 break;
5656 5656 case DIF_OP_BL:
5657 5657 if (cc_n ^ cc_v)
5658 5658 pc = DIF_INSTR_LABEL(instr);
5659 5659 break;
5660 5660 case DIF_OP_BLU:
5661 5661 if (cc_c)
5662 5662 pc = DIF_INSTR_LABEL(instr);
5663 5663 break;
5664 5664 case DIF_OP_BLE:
5665 5665 if (cc_z | (cc_n ^ cc_v))
5666 5666 pc = DIF_INSTR_LABEL(instr);
5667 5667 break;
5668 5668 case DIF_OP_BLEU:
5669 5669 if (cc_c | cc_z)
5670 5670 pc = DIF_INSTR_LABEL(instr);
5671 5671 break;
5672 5672 case DIF_OP_RLDSB:
5673 5673 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5674 5674 break;
5675 5675 /*FALLTHROUGH*/
5676 5676 case DIF_OP_LDSB:
5677 5677 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5678 5678 break;
5679 5679 case DIF_OP_RLDSH:
5680 5680 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5681 5681 break;
5682 5682 /*FALLTHROUGH*/
5683 5683 case DIF_OP_LDSH:
5684 5684 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5685 5685 break;
5686 5686 case DIF_OP_RLDSW:
5687 5687 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5688 5688 break;
5689 5689 /*FALLTHROUGH*/
5690 5690 case DIF_OP_LDSW:
5691 5691 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5692 5692 break;
5693 5693 case DIF_OP_RLDUB:
5694 5694 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5695 5695 break;
5696 5696 /*FALLTHROUGH*/
5697 5697 case DIF_OP_LDUB:
5698 5698 regs[rd] = dtrace_load8(regs[r1]);
5699 5699 break;
5700 5700 case DIF_OP_RLDUH:
5701 5701 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5702 5702 break;
5703 5703 /*FALLTHROUGH*/
5704 5704 case DIF_OP_LDUH:
5705 5705 regs[rd] = dtrace_load16(regs[r1]);
5706 5706 break;
5707 5707 case DIF_OP_RLDUW:
5708 5708 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5709 5709 break;
5710 5710 /*FALLTHROUGH*/
5711 5711 case DIF_OP_LDUW:
↓ open down ↓ |
5711 lines elided |
↑ open up ↑ |
5712 5712 regs[rd] = dtrace_load32(regs[r1]);
5713 5713 break;
5714 5714 case DIF_OP_RLDX:
5715 5715 if (!dtrace_canload(regs[r1], 8, mstate, vstate))
5716 5716 break;
5717 5717 /*FALLTHROUGH*/
5718 5718 case DIF_OP_LDX:
5719 5719 regs[rd] = dtrace_load64(regs[r1]);
5720 5720 break;
5721 5721 case DIF_OP_ULDSB:
5722 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5722 5723 regs[rd] = (int8_t)
5723 5724 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5725 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5724 5726 break;
5725 5727 case DIF_OP_ULDSH:
5728 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5726 5729 regs[rd] = (int16_t)
5727 5730 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5731 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5728 5732 break;
5729 5733 case DIF_OP_ULDSW:
5734 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5730 5735 regs[rd] = (int32_t)
5731 5736 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5737 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5732 5738 break;
5733 5739 case DIF_OP_ULDUB:
5740 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5734 5741 regs[rd] =
5735 5742 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5743 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5736 5744 break;
5737 5745 case DIF_OP_ULDUH:
5746 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5738 5747 regs[rd] =
5739 5748 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5749 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5740 5750 break;
5741 5751 case DIF_OP_ULDUW:
5752 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5742 5753 regs[rd] =
5743 5754 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5755 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5744 5756 break;
5745 5757 case DIF_OP_ULDX:
5758 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5746 5759 regs[rd] =
5747 5760 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5761 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5748 5762 break;
5749 5763 case DIF_OP_RET:
5750 5764 rval = regs[rd];
5751 5765 pc = textlen;
5752 5766 break;
5753 5767 case DIF_OP_NOP:
5754 5768 break;
5755 5769 case DIF_OP_SETX:
5756 5770 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5757 5771 break;
5758 5772 case DIF_OP_SETS:
5759 5773 regs[rd] = (uint64_t)(uintptr_t)
5760 5774 (strtab + DIF_INSTR_STRING(instr));
5761 5775 break;
5762 5776 case DIF_OP_SCMP: {
5763 5777 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5764 5778 uintptr_t s1 = regs[r1];
5765 5779 uintptr_t s2 = regs[r2];
5766 5780
5767 5781 if (s1 != NULL &&
5768 5782 !dtrace_strcanload(s1, sz, mstate, vstate))
5769 5783 break;
5770 5784 if (s2 != NULL &&
5771 5785 !dtrace_strcanload(s2, sz, mstate, vstate))
5772 5786 break;
5773 5787
5774 5788 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5775 5789
5776 5790 cc_n = cc_r < 0;
5777 5791 cc_z = cc_r == 0;
5778 5792 cc_v = cc_c = 0;
5779 5793 break;
5780 5794 }
5781 5795 case DIF_OP_LDGA:
5782 5796 regs[rd] = dtrace_dif_variable(mstate, state,
5783 5797 r1, regs[r2]);
5784 5798 break;
5785 5799 case DIF_OP_LDGS:
5786 5800 id = DIF_INSTR_VAR(instr);
5787 5801
5788 5802 if (id >= DIF_VAR_OTHER_UBASE) {
5789 5803 uintptr_t a;
5790 5804
5791 5805 id -= DIF_VAR_OTHER_UBASE;
5792 5806 svar = vstate->dtvs_globals[id];
5793 5807 ASSERT(svar != NULL);
5794 5808 v = &svar->dtsv_var;
5795 5809
5796 5810 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5797 5811 regs[rd] = svar->dtsv_data;
5798 5812 break;
5799 5813 }
5800 5814
5801 5815 a = (uintptr_t)svar->dtsv_data;
5802 5816
5803 5817 if (*(uint8_t *)a == UINT8_MAX) {
5804 5818 /*
5805 5819 * If the 0th byte is set to UINT8_MAX
5806 5820 * then this is to be treated as a
5807 5821 * reference to a NULL variable.
5808 5822 */
5809 5823 regs[rd] = NULL;
5810 5824 } else {
5811 5825 regs[rd] = a + sizeof (uint64_t);
5812 5826 }
5813 5827
5814 5828 break;
5815 5829 }
5816 5830
5817 5831 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5818 5832 break;
5819 5833
5820 5834 case DIF_OP_STGS:
5821 5835 id = DIF_INSTR_VAR(instr);
5822 5836
5823 5837 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5824 5838 id -= DIF_VAR_OTHER_UBASE;
5825 5839
5826 5840 svar = vstate->dtvs_globals[id];
5827 5841 ASSERT(svar != NULL);
5828 5842 v = &svar->dtsv_var;
5829 5843
5830 5844 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5831 5845 uintptr_t a = (uintptr_t)svar->dtsv_data;
5832 5846
5833 5847 ASSERT(a != NULL);
5834 5848 ASSERT(svar->dtsv_size != 0);
5835 5849
5836 5850 if (regs[rd] == NULL) {
5837 5851 *(uint8_t *)a = UINT8_MAX;
5838 5852 break;
5839 5853 } else {
5840 5854 *(uint8_t *)a = 0;
5841 5855 a += sizeof (uint64_t);
5842 5856 }
5843 5857 if (!dtrace_vcanload(
5844 5858 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5845 5859 mstate, vstate))
5846 5860 break;
5847 5861
5848 5862 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5849 5863 (void *)a, &v->dtdv_type);
5850 5864 break;
5851 5865 }
5852 5866
5853 5867 svar->dtsv_data = regs[rd];
5854 5868 break;
5855 5869
5856 5870 case DIF_OP_LDTA:
5857 5871 /*
5858 5872 * There are no DTrace built-in thread-local arrays at
5859 5873 * present. This opcode is saved for future work.
5860 5874 */
5861 5875 *flags |= CPU_DTRACE_ILLOP;
5862 5876 regs[rd] = 0;
5863 5877 break;
5864 5878
5865 5879 case DIF_OP_LDLS:
5866 5880 id = DIF_INSTR_VAR(instr);
5867 5881
5868 5882 if (id < DIF_VAR_OTHER_UBASE) {
5869 5883 /*
5870 5884 * For now, this has no meaning.
5871 5885 */
5872 5886 regs[rd] = 0;
5873 5887 break;
5874 5888 }
5875 5889
5876 5890 id -= DIF_VAR_OTHER_UBASE;
5877 5891
5878 5892 ASSERT(id < vstate->dtvs_nlocals);
5879 5893 ASSERT(vstate->dtvs_locals != NULL);
5880 5894
5881 5895 svar = vstate->dtvs_locals[id];
5882 5896 ASSERT(svar != NULL);
5883 5897 v = &svar->dtsv_var;
5884 5898
5885 5899 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5886 5900 uintptr_t a = (uintptr_t)svar->dtsv_data;
5887 5901 size_t sz = v->dtdv_type.dtdt_size;
5888 5902
5889 5903 sz += sizeof (uint64_t);
5890 5904 ASSERT(svar->dtsv_size == NCPU * sz);
5891 5905 a += CPU->cpu_id * sz;
5892 5906
5893 5907 if (*(uint8_t *)a == UINT8_MAX) {
5894 5908 /*
5895 5909 * If the 0th byte is set to UINT8_MAX
5896 5910 * then this is to be treated as a
5897 5911 * reference to a NULL variable.
5898 5912 */
5899 5913 regs[rd] = NULL;
5900 5914 } else {
5901 5915 regs[rd] = a + sizeof (uint64_t);
5902 5916 }
5903 5917
5904 5918 break;
5905 5919 }
5906 5920
5907 5921 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5908 5922 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5909 5923 regs[rd] = tmp[CPU->cpu_id];
5910 5924 break;
5911 5925
5912 5926 case DIF_OP_STLS:
5913 5927 id = DIF_INSTR_VAR(instr);
5914 5928
5915 5929 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5916 5930 id -= DIF_VAR_OTHER_UBASE;
5917 5931 ASSERT(id < vstate->dtvs_nlocals);
5918 5932
5919 5933 ASSERT(vstate->dtvs_locals != NULL);
5920 5934 svar = vstate->dtvs_locals[id];
5921 5935 ASSERT(svar != NULL);
5922 5936 v = &svar->dtsv_var;
5923 5937
5924 5938 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5925 5939 uintptr_t a = (uintptr_t)svar->dtsv_data;
5926 5940 size_t sz = v->dtdv_type.dtdt_size;
5927 5941
5928 5942 sz += sizeof (uint64_t);
5929 5943 ASSERT(svar->dtsv_size == NCPU * sz);
5930 5944 a += CPU->cpu_id * sz;
5931 5945
5932 5946 if (regs[rd] == NULL) {
5933 5947 *(uint8_t *)a = UINT8_MAX;
5934 5948 break;
5935 5949 } else {
5936 5950 *(uint8_t *)a = 0;
5937 5951 a += sizeof (uint64_t);
5938 5952 }
5939 5953
5940 5954 if (!dtrace_vcanload(
5941 5955 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5942 5956 mstate, vstate))
5943 5957 break;
5944 5958
5945 5959 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5946 5960 (void *)a, &v->dtdv_type);
5947 5961 break;
5948 5962 }
5949 5963
5950 5964 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5951 5965 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5952 5966 tmp[CPU->cpu_id] = regs[rd];
5953 5967 break;
5954 5968
5955 5969 case DIF_OP_LDTS: {
5956 5970 dtrace_dynvar_t *dvar;
5957 5971 dtrace_key_t *key;
5958 5972
5959 5973 id = DIF_INSTR_VAR(instr);
5960 5974 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5961 5975 id -= DIF_VAR_OTHER_UBASE;
5962 5976 v = &vstate->dtvs_tlocals[id];
5963 5977
5964 5978 key = &tupregs[DIF_DTR_NREGS];
5965 5979 key[0].dttk_value = (uint64_t)id;
5966 5980 key[0].dttk_size = 0;
5967 5981 DTRACE_TLS_THRKEY(key[1].dttk_value);
5968 5982 key[1].dttk_size = 0;
5969 5983
5970 5984 dvar = dtrace_dynvar(dstate, 2, key,
5971 5985 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5972 5986 mstate, vstate);
5973 5987
5974 5988 if (dvar == NULL) {
5975 5989 regs[rd] = 0;
5976 5990 break;
5977 5991 }
5978 5992
5979 5993 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5980 5994 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5981 5995 } else {
5982 5996 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5983 5997 }
5984 5998
5985 5999 break;
5986 6000 }
5987 6001
5988 6002 case DIF_OP_STTS: {
5989 6003 dtrace_dynvar_t *dvar;
5990 6004 dtrace_key_t *key;
5991 6005
5992 6006 id = DIF_INSTR_VAR(instr);
5993 6007 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5994 6008 id -= DIF_VAR_OTHER_UBASE;
5995 6009
5996 6010 key = &tupregs[DIF_DTR_NREGS];
5997 6011 key[0].dttk_value = (uint64_t)id;
5998 6012 key[0].dttk_size = 0;
5999 6013 DTRACE_TLS_THRKEY(key[1].dttk_value);
6000 6014 key[1].dttk_size = 0;
6001 6015 v = &vstate->dtvs_tlocals[id];
6002 6016
6003 6017 dvar = dtrace_dynvar(dstate, 2, key,
6004 6018 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6005 6019 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6006 6020 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6007 6021 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6008 6022
6009 6023 /*
6010 6024 * Given that we're storing to thread-local data,
6011 6025 * we need to flush our predicate cache.
6012 6026 */
6013 6027 curthread->t_predcache = NULL;
6014 6028
6015 6029 if (dvar == NULL)
6016 6030 break;
6017 6031
6018 6032 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6019 6033 if (!dtrace_vcanload(
6020 6034 (void *)(uintptr_t)regs[rd],
6021 6035 &v->dtdv_type, mstate, vstate))
6022 6036 break;
6023 6037
6024 6038 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6025 6039 dvar->dtdv_data, &v->dtdv_type);
6026 6040 } else {
6027 6041 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6028 6042 }
6029 6043
6030 6044 break;
6031 6045 }
6032 6046
6033 6047 case DIF_OP_SRA:
6034 6048 regs[rd] = (int64_t)regs[r1] >> regs[r2];
6035 6049 break;
6036 6050
6037 6051 case DIF_OP_CALL:
6038 6052 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6039 6053 regs, tupregs, ttop, mstate, state);
6040 6054 break;
6041 6055
6042 6056 case DIF_OP_PUSHTR:
6043 6057 if (ttop == DIF_DTR_NREGS) {
6044 6058 *flags |= CPU_DTRACE_TUPOFLOW;
6045 6059 break;
6046 6060 }
6047 6061
6048 6062 if (r1 == DIF_TYPE_STRING) {
6049 6063 /*
6050 6064 * If this is a string type and the size is 0,
6051 6065 * we'll use the system-wide default string
6052 6066 * size. Note that we are _not_ looking at
6053 6067 * the value of the DTRACEOPT_STRSIZE option;
6054 6068 * had this been set, we would expect to have
6055 6069 * a non-zero size value in the "pushtr".
6056 6070 */
6057 6071 tupregs[ttop].dttk_size =
6058 6072 dtrace_strlen((char *)(uintptr_t)regs[rd],
6059 6073 regs[r2] ? regs[r2] :
6060 6074 dtrace_strsize_default) + 1;
6061 6075 } else {
6062 6076 tupregs[ttop].dttk_size = regs[r2];
6063 6077 }
6064 6078
6065 6079 tupregs[ttop++].dttk_value = regs[rd];
6066 6080 break;
6067 6081
6068 6082 case DIF_OP_PUSHTV:
6069 6083 if (ttop == DIF_DTR_NREGS) {
6070 6084 *flags |= CPU_DTRACE_TUPOFLOW;
6071 6085 break;
6072 6086 }
6073 6087
6074 6088 tupregs[ttop].dttk_value = regs[rd];
6075 6089 tupregs[ttop++].dttk_size = 0;
6076 6090 break;
6077 6091
6078 6092 case DIF_OP_POPTS:
6079 6093 if (ttop != 0)
6080 6094 ttop--;
6081 6095 break;
6082 6096
6083 6097 case DIF_OP_FLUSHTS:
6084 6098 ttop = 0;
6085 6099 break;
6086 6100
6087 6101 case DIF_OP_LDGAA:
6088 6102 case DIF_OP_LDTAA: {
6089 6103 dtrace_dynvar_t *dvar;
6090 6104 dtrace_key_t *key = tupregs;
6091 6105 uint_t nkeys = ttop;
6092 6106
6093 6107 id = DIF_INSTR_VAR(instr);
6094 6108 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6095 6109 id -= DIF_VAR_OTHER_UBASE;
6096 6110
6097 6111 key[nkeys].dttk_value = (uint64_t)id;
6098 6112 key[nkeys++].dttk_size = 0;
6099 6113
6100 6114 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6101 6115 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6102 6116 key[nkeys++].dttk_size = 0;
6103 6117 v = &vstate->dtvs_tlocals[id];
6104 6118 } else {
6105 6119 v = &vstate->dtvs_globals[id]->dtsv_var;
6106 6120 }
6107 6121
6108 6122 dvar = dtrace_dynvar(dstate, nkeys, key,
6109 6123 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6110 6124 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6111 6125 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6112 6126
6113 6127 if (dvar == NULL) {
6114 6128 regs[rd] = 0;
6115 6129 break;
6116 6130 }
6117 6131
6118 6132 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6119 6133 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6120 6134 } else {
6121 6135 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6122 6136 }
6123 6137
6124 6138 break;
6125 6139 }
6126 6140
6127 6141 case DIF_OP_STGAA:
6128 6142 case DIF_OP_STTAA: {
6129 6143 dtrace_dynvar_t *dvar;
6130 6144 dtrace_key_t *key = tupregs;
6131 6145 uint_t nkeys = ttop;
6132 6146
6133 6147 id = DIF_INSTR_VAR(instr);
6134 6148 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6135 6149 id -= DIF_VAR_OTHER_UBASE;
6136 6150
6137 6151 key[nkeys].dttk_value = (uint64_t)id;
6138 6152 key[nkeys++].dttk_size = 0;
6139 6153
6140 6154 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6141 6155 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6142 6156 key[nkeys++].dttk_size = 0;
6143 6157 v = &vstate->dtvs_tlocals[id];
6144 6158 } else {
6145 6159 v = &vstate->dtvs_globals[id]->dtsv_var;
6146 6160 }
6147 6161
6148 6162 dvar = dtrace_dynvar(dstate, nkeys, key,
6149 6163 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6150 6164 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6151 6165 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6152 6166 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6153 6167
6154 6168 if (dvar == NULL)
6155 6169 break;
6156 6170
6157 6171 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6158 6172 if (!dtrace_vcanload(
6159 6173 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6160 6174 mstate, vstate))
6161 6175 break;
6162 6176
6163 6177 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6164 6178 dvar->dtdv_data, &v->dtdv_type);
6165 6179 } else {
6166 6180 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6167 6181 }
6168 6182
6169 6183 break;
6170 6184 }
6171 6185
6172 6186 case DIF_OP_ALLOCS: {
6173 6187 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6174 6188 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6175 6189
6176 6190 /*
6177 6191 * Rounding up the user allocation size could have
6178 6192 * overflowed large, bogus allocations (like -1ULL) to
6179 6193 * 0.
6180 6194 */
6181 6195 if (size < regs[r1] ||
6182 6196 !DTRACE_INSCRATCH(mstate, size)) {
6183 6197 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6184 6198 regs[rd] = NULL;
6185 6199 break;
6186 6200 }
6187 6201
6188 6202 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6189 6203 mstate->dtms_scratch_ptr += size;
6190 6204 regs[rd] = ptr;
6191 6205 break;
6192 6206 }
6193 6207
6194 6208 case DIF_OP_COPYS:
6195 6209 if (!dtrace_canstore(regs[rd], regs[r2],
6196 6210 mstate, vstate)) {
6197 6211 *flags |= CPU_DTRACE_BADADDR;
6198 6212 *illval = regs[rd];
6199 6213 break;
6200 6214 }
6201 6215
6202 6216 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6203 6217 break;
6204 6218
6205 6219 dtrace_bcopy((void *)(uintptr_t)regs[r1],
6206 6220 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6207 6221 break;
6208 6222
6209 6223 case DIF_OP_STB:
6210 6224 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6211 6225 *flags |= CPU_DTRACE_BADADDR;
6212 6226 *illval = regs[rd];
6213 6227 break;
6214 6228 }
6215 6229 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6216 6230 break;
6217 6231
6218 6232 case DIF_OP_STH:
6219 6233 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6220 6234 *flags |= CPU_DTRACE_BADADDR;
6221 6235 *illval = regs[rd];
6222 6236 break;
6223 6237 }
6224 6238 if (regs[rd] & 1) {
6225 6239 *flags |= CPU_DTRACE_BADALIGN;
6226 6240 *illval = regs[rd];
6227 6241 break;
6228 6242 }
6229 6243 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6230 6244 break;
6231 6245
6232 6246 case DIF_OP_STW:
6233 6247 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6234 6248 *flags |= CPU_DTRACE_BADADDR;
6235 6249 *illval = regs[rd];
6236 6250 break;
6237 6251 }
6238 6252 if (regs[rd] & 3) {
6239 6253 *flags |= CPU_DTRACE_BADALIGN;
6240 6254 *illval = regs[rd];
6241 6255 break;
6242 6256 }
6243 6257 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6244 6258 break;
6245 6259
6246 6260 case DIF_OP_STX:
6247 6261 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6248 6262 *flags |= CPU_DTRACE_BADADDR;
6249 6263 *illval = regs[rd];
6250 6264 break;
6251 6265 }
6252 6266 if (regs[rd] & 7) {
6253 6267 *flags |= CPU_DTRACE_BADALIGN;
6254 6268 *illval = regs[rd];
6255 6269 break;
6256 6270 }
6257 6271 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6258 6272 break;
6259 6273 }
6260 6274 }
6261 6275
6262 6276 if (!(*flags & CPU_DTRACE_FAULT))
6263 6277 return (rval);
6264 6278
6265 6279 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6266 6280 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6267 6281
6268 6282 return (0);
6269 6283 }
6270 6284
6271 6285 static void
6272 6286 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6273 6287 {
6274 6288 dtrace_probe_t *probe = ecb->dte_probe;
6275 6289 dtrace_provider_t *prov = probe->dtpr_provider;
6276 6290 char c[DTRACE_FULLNAMELEN + 80], *str;
6277 6291 char *msg = "dtrace: breakpoint action at probe ";
6278 6292 char *ecbmsg = " (ecb ";
6279 6293 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6280 6294 uintptr_t val = (uintptr_t)ecb;
6281 6295 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6282 6296
6283 6297 if (dtrace_destructive_disallow)
6284 6298 return;
6285 6299
6286 6300 /*
6287 6301 * It's impossible to be taking action on the NULL probe.
6288 6302 */
6289 6303 ASSERT(probe != NULL);
6290 6304
6291 6305 /*
6292 6306 * This is a poor man's (destitute man's?) sprintf(): we want to
6293 6307 * print the provider name, module name, function name and name of
6294 6308 * the probe, along with the hex address of the ECB with the breakpoint
6295 6309 * action -- all of which we must place in the character buffer by
6296 6310 * hand.
6297 6311 */
6298 6312 while (*msg != '\0')
6299 6313 c[i++] = *msg++;
6300 6314
6301 6315 for (str = prov->dtpv_name; *str != '\0'; str++)
6302 6316 c[i++] = *str;
6303 6317 c[i++] = ':';
6304 6318
6305 6319 for (str = probe->dtpr_mod; *str != '\0'; str++)
6306 6320 c[i++] = *str;
6307 6321 c[i++] = ':';
6308 6322
6309 6323 for (str = probe->dtpr_func; *str != '\0'; str++)
6310 6324 c[i++] = *str;
6311 6325 c[i++] = ':';
6312 6326
6313 6327 for (str = probe->dtpr_name; *str != '\0'; str++)
6314 6328 c[i++] = *str;
6315 6329
6316 6330 while (*ecbmsg != '\0')
6317 6331 c[i++] = *ecbmsg++;
6318 6332
6319 6333 while (shift >= 0) {
6320 6334 mask = (uintptr_t)0xf << shift;
6321 6335
6322 6336 if (val >= ((uintptr_t)1 << shift))
6323 6337 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6324 6338 shift -= 4;
6325 6339 }
6326 6340
6327 6341 c[i++] = ')';
6328 6342 c[i] = '\0';
6329 6343
6330 6344 debug_enter(c);
6331 6345 }
6332 6346
6333 6347 static void
6334 6348 dtrace_action_panic(dtrace_ecb_t *ecb)
6335 6349 {
6336 6350 dtrace_probe_t *probe = ecb->dte_probe;
6337 6351
6338 6352 /*
6339 6353 * It's impossible to be taking action on the NULL probe.
6340 6354 */
6341 6355 ASSERT(probe != NULL);
6342 6356
6343 6357 if (dtrace_destructive_disallow)
6344 6358 return;
6345 6359
6346 6360 if (dtrace_panicked != NULL)
6347 6361 return;
6348 6362
6349 6363 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
6350 6364 return;
6351 6365
6352 6366 /*
6353 6367 * We won the right to panic. (We want to be sure that only one
6354 6368 * thread calls panic() from dtrace_probe(), and that panic() is
6355 6369 * called exactly once.)
6356 6370 */
6357 6371 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6358 6372 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6359 6373 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6360 6374 }
6361 6375
6362 6376 static void
6363 6377 dtrace_action_raise(uint64_t sig)
6364 6378 {
6365 6379 if (dtrace_destructive_disallow)
6366 6380 return;
6367 6381
6368 6382 if (sig >= NSIG) {
6369 6383 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6370 6384 return;
6371 6385 }
6372 6386
6373 6387 /*
6374 6388 * raise() has a queue depth of 1 -- we ignore all subsequent
6375 6389 * invocations of the raise() action.
6376 6390 */
6377 6391 if (curthread->t_dtrace_sig == 0)
6378 6392 curthread->t_dtrace_sig = (uint8_t)sig;
6379 6393
6380 6394 curthread->t_sig_check = 1;
6381 6395 aston(curthread);
6382 6396 }
6383 6397
6384 6398 static void
6385 6399 dtrace_action_stop(void)
6386 6400 {
6387 6401 if (dtrace_destructive_disallow)
6388 6402 return;
6389 6403
6390 6404 if (!curthread->t_dtrace_stop) {
6391 6405 curthread->t_dtrace_stop = 1;
6392 6406 curthread->t_sig_check = 1;
6393 6407 aston(curthread);
6394 6408 }
6395 6409 }
6396 6410
6397 6411 static void
6398 6412 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6399 6413 {
6400 6414 hrtime_t now;
6401 6415 volatile uint16_t *flags;
6402 6416 cpu_t *cpu = CPU;
6403 6417
6404 6418 if (dtrace_destructive_disallow)
6405 6419 return;
6406 6420
6407 6421 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
6408 6422
6409 6423 now = dtrace_gethrtime();
6410 6424
6411 6425 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6412 6426 /*
6413 6427 * We need to advance the mark to the current time.
6414 6428 */
6415 6429 cpu->cpu_dtrace_chillmark = now;
6416 6430 cpu->cpu_dtrace_chilled = 0;
6417 6431 }
6418 6432
6419 6433 /*
6420 6434 * Now check to see if the requested chill time would take us over
6421 6435 * the maximum amount of time allowed in the chill interval. (Or
6422 6436 * worse, if the calculation itself induces overflow.)
6423 6437 */
6424 6438 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6425 6439 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6426 6440 *flags |= CPU_DTRACE_ILLOP;
6427 6441 return;
6428 6442 }
6429 6443
6430 6444 while (dtrace_gethrtime() - now < val)
6431 6445 continue;
6432 6446
6433 6447 /*
6434 6448 * Normally, we assure that the value of the variable "timestamp" does
6435 6449 * not change within an ECB. The presence of chill() represents an
6436 6450 * exception to this rule, however.
6437 6451 */
6438 6452 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6439 6453 cpu->cpu_dtrace_chilled += val;
6440 6454 }
6441 6455
6442 6456 static void
6443 6457 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6444 6458 uint64_t *buf, uint64_t arg)
6445 6459 {
6446 6460 int nframes = DTRACE_USTACK_NFRAMES(arg);
6447 6461 int strsize = DTRACE_USTACK_STRSIZE(arg);
6448 6462 uint64_t *pcs = &buf[1], *fps;
6449 6463 char *str = (char *)&pcs[nframes];
6450 6464 int size, offs = 0, i, j;
6451 6465 uintptr_t old = mstate->dtms_scratch_ptr, saved;
6452 6466 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6453 6467 char *sym;
6454 6468
6455 6469 /*
6456 6470 * Should be taking a faster path if string space has not been
6457 6471 * allocated.
6458 6472 */
6459 6473 ASSERT(strsize != 0);
6460 6474
6461 6475 /*
6462 6476 * We will first allocate some temporary space for the frame pointers.
6463 6477 */
6464 6478 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6465 6479 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6466 6480 (nframes * sizeof (uint64_t));
6467 6481
6468 6482 if (!DTRACE_INSCRATCH(mstate, size)) {
6469 6483 /*
6470 6484 * Not enough room for our frame pointers -- need to indicate
6471 6485 * that we ran out of scratch space.
6472 6486 */
6473 6487 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6474 6488 return;
6475 6489 }
6476 6490
6477 6491 mstate->dtms_scratch_ptr += size;
6478 6492 saved = mstate->dtms_scratch_ptr;
6479 6493
6480 6494 /*
6481 6495 * Now get a stack with both program counters and frame pointers.
6482 6496 */
6483 6497 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6484 6498 dtrace_getufpstack(buf, fps, nframes + 1);
6485 6499 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6486 6500
6487 6501 /*
6488 6502 * If that faulted, we're cooked.
6489 6503 */
6490 6504 if (*flags & CPU_DTRACE_FAULT)
6491 6505 goto out;
6492 6506
6493 6507 /*
6494 6508 * Now we want to walk up the stack, calling the USTACK helper. For
6495 6509 * each iteration, we restore the scratch pointer.
6496 6510 */
6497 6511 for (i = 0; i < nframes; i++) {
6498 6512 mstate->dtms_scratch_ptr = saved;
6499 6513
6500 6514 if (offs >= strsize)
6501 6515 break;
6502 6516
6503 6517 sym = (char *)(uintptr_t)dtrace_helper(
6504 6518 DTRACE_HELPER_ACTION_USTACK,
6505 6519 mstate, state, pcs[i], fps[i]);
6506 6520
6507 6521 /*
6508 6522 * If we faulted while running the helper, we're going to
6509 6523 * clear the fault and null out the corresponding string.
6510 6524 */
6511 6525 if (*flags & CPU_DTRACE_FAULT) {
6512 6526 *flags &= ~CPU_DTRACE_FAULT;
6513 6527 str[offs++] = '\0';
6514 6528 continue;
6515 6529 }
6516 6530
6517 6531 if (sym == NULL) {
6518 6532 str[offs++] = '\0';
6519 6533 continue;
6520 6534 }
6521 6535
6522 6536 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6523 6537
6524 6538 /*
6525 6539 * Now copy in the string that the helper returned to us.
6526 6540 */
6527 6541 for (j = 0; offs + j < strsize; j++) {
6528 6542 if ((str[offs + j] = sym[j]) == '\0')
6529 6543 break;
6530 6544 }
6531 6545
6532 6546 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6533 6547
6534 6548 offs += j + 1;
6535 6549 }
6536 6550
6537 6551 if (offs >= strsize) {
6538 6552 /*
6539 6553 * If we didn't have room for all of the strings, we don't
6540 6554 * abort processing -- this needn't be a fatal error -- but we
6541 6555 * still want to increment a counter (dts_stkstroverflows) to
6542 6556 * allow this condition to be warned about. (If this is from
6543 6557 * a jstack() action, it is easily tuned via jstackstrsize.)
6544 6558 */
↓ open down ↓ |
787 lines elided |
↑ open up ↑ |
6545 6559 dtrace_error(&state->dts_stkstroverflows);
6546 6560 }
6547 6561
6548 6562 while (offs < strsize)
6549 6563 str[offs++] = '\0';
6550 6564
6551 6565 out:
6552 6566 mstate->dtms_scratch_ptr = old;
6553 6567 }
6554 6568
6569 +static void
6570 +dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6571 + size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6572 +{
6573 + volatile uint16_t *flags;
6574 + uint64_t val = *valp;
6575 + size_t valoffs = *valoffsp;
6576 +
6577 + flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6578 + ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6579 +
6580 + /*
6581 + * If this is a string, we're going to only load until we find the zero
6582 + * byte -- after which we'll store zero bytes.
6583 + */
6584 + if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6585 + char c = '\0' + 1;
6586 + size_t s;
6587 +
6588 + for (s = 0; s < size; s++) {
6589 + if (c != '\0' && dtkind == DIF_TF_BYREF) {
6590 + c = dtrace_load8(val++);
6591 + } else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6592 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6593 + c = dtrace_fuword8((void *)(uintptr_t)val++);
6594 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6595 + if (*flags & CPU_DTRACE_FAULT)
6596 + break;
6597 + }
6598 +
6599 + DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6600 +
6601 + if (c == '\0' && intuple)
6602 + break;
6603 + }
6604 + } else {
6605 + uint8_t c;
6606 + while (valoffs < end) {
6607 + if (dtkind == DIF_TF_BYREF) {
6608 + c = dtrace_load8(val++);
6609 + } else if (dtkind == DIF_TF_BYUREF) {
6610 + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6611 + c = dtrace_fuword8((void *)(uintptr_t)val++);
6612 + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6613 + if (*flags & CPU_DTRACE_FAULT)
6614 + break;
6615 + }
6616 +
6617 + DTRACE_STORE(uint8_t, tomax,
6618 + valoffs++, c);
6619 + }
6620 + }
6621 +
6622 + *valp = val;
6623 + *valoffsp = valoffs;
6624 +}
6625 +
6555 6626 /*
6556 6627 * If you're looking for the epicenter of DTrace, you just found it. This
6557 6628 * is the function called by the provider to fire a probe -- from which all
6558 6629 * subsequent probe-context DTrace activity emanates.
6559 6630 */
6560 6631 void
6561 6632 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
6562 6633 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
6563 6634 {
6564 6635 processorid_t cpuid;
6565 6636 dtrace_icookie_t cookie;
6566 6637 dtrace_probe_t *probe;
6567 6638 dtrace_mstate_t mstate;
6568 6639 dtrace_ecb_t *ecb;
6569 6640 dtrace_action_t *act;
6570 6641 intptr_t offs;
6571 6642 size_t size;
6572 6643 int vtime, onintr;
6573 6644 volatile uint16_t *flags;
6574 6645 hrtime_t now, end;
6575 6646
6576 6647 /*
6577 6648 * Kick out immediately if this CPU is still being born (in which case
6578 6649 * curthread will be set to -1) or the current thread can't allow
6579 6650 * probes in its current context.
6580 6651 */
6581 6652 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
6582 6653 return;
6583 6654
6584 6655 cookie = dtrace_interrupt_disable();
6585 6656 probe = dtrace_probes[id - 1];
6586 6657 cpuid = CPU->cpu_id;
6587 6658 onintr = CPU_ON_INTR(CPU);
6588 6659
6589 6660 CPU->cpu_dtrace_probes++;
6590 6661
6591 6662 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
6592 6663 probe->dtpr_predcache == curthread->t_predcache) {
6593 6664 /*
6594 6665 * We have hit in the predicate cache; we know that
6595 6666 * this predicate would evaluate to be false.
6596 6667 */
6597 6668 dtrace_interrupt_enable(cookie);
6598 6669 return;
6599 6670 }
6600 6671
6601 6672 if (panic_quiesce) {
6602 6673 /*
6603 6674 * We don't trace anything if we're panicking.
6604 6675 */
6605 6676 dtrace_interrupt_enable(cookie);
6606 6677 return;
6607 6678 }
6608 6679
6609 6680 now = dtrace_gethrtime();
6610 6681 vtime = dtrace_vtime_references != 0;
6611 6682
6612 6683 if (vtime && curthread->t_dtrace_start)
6613 6684 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
6614 6685
6615 6686 mstate.dtms_difo = NULL;
6616 6687 mstate.dtms_probe = probe;
6617 6688 mstate.dtms_strtok = NULL;
6618 6689 mstate.dtms_arg[0] = arg0;
6619 6690 mstate.dtms_arg[1] = arg1;
6620 6691 mstate.dtms_arg[2] = arg2;
6621 6692 mstate.dtms_arg[3] = arg3;
6622 6693 mstate.dtms_arg[4] = arg4;
6623 6694
6624 6695 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6625 6696
6626 6697 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6627 6698 dtrace_predicate_t *pred = ecb->dte_predicate;
6628 6699 dtrace_state_t *state = ecb->dte_state;
6629 6700 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6630 6701 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6631 6702 dtrace_vstate_t *vstate = &state->dts_vstate;
6632 6703 dtrace_provider_t *prov = probe->dtpr_provider;
6633 6704 uint64_t tracememsize = 0;
6634 6705 int committed = 0;
6635 6706 caddr_t tomax;
6636 6707
6637 6708 /*
6638 6709 * A little subtlety with the following (seemingly innocuous)
6639 6710 * declaration of the automatic 'val': by looking at the
6640 6711 * code, you might think that it could be declared in the
6641 6712 * action processing loop, below. (That is, it's only used in
6642 6713 * the action processing loop.) However, it must be declared
6643 6714 * out of that scope because in the case of DIF expression
6644 6715 * arguments to aggregating actions, one iteration of the
6645 6716 * action loop will use the last iteration's value.
6646 6717 */
6647 6718 #ifdef lint
6648 6719 uint64_t val = 0;
6649 6720 #else
6650 6721 uint64_t val;
6651 6722 #endif
6652 6723
6653 6724 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6654 6725 mstate.dtms_access = DTRACE_ACCESS_ARGS | DTRACE_ACCESS_PROC;
6655 6726 mstate.dtms_getf = NULL;
6656 6727
6657 6728 *flags &= ~CPU_DTRACE_ERROR;
6658 6729
6659 6730 if (prov == dtrace_provider) {
6660 6731 /*
6661 6732 * If dtrace itself is the provider of this probe,
6662 6733 * we're only going to continue processing the ECB if
6663 6734 * arg0 (the dtrace_state_t) is equal to the ECB's
6664 6735 * creating state. (This prevents disjoint consumers
6665 6736 * from seeing one another's metaprobes.)
6666 6737 */
6667 6738 if (arg0 != (uint64_t)(uintptr_t)state)
6668 6739 continue;
6669 6740 }
6670 6741
6671 6742 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6672 6743 /*
6673 6744 * We're not currently active. If our provider isn't
6674 6745 * the dtrace pseudo provider, we're not interested.
6675 6746 */
6676 6747 if (prov != dtrace_provider)
6677 6748 continue;
6678 6749
6679 6750 /*
6680 6751 * Now we must further check if we are in the BEGIN
6681 6752 * probe. If we are, we will only continue processing
6682 6753 * if we're still in WARMUP -- if one BEGIN enabling
6683 6754 * has invoked the exit() action, we don't want to
6684 6755 * evaluate subsequent BEGIN enablings.
6685 6756 */
6686 6757 if (probe->dtpr_id == dtrace_probeid_begin &&
6687 6758 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
6688 6759 ASSERT(state->dts_activity ==
6689 6760 DTRACE_ACTIVITY_DRAINING);
6690 6761 continue;
6691 6762 }
6692 6763 }
6693 6764
6694 6765 if (ecb->dte_cond && !dtrace_priv_probe(state, &mstate, ecb))
6695 6766 continue;
6696 6767
6697 6768 if (now - state->dts_alive > dtrace_deadman_timeout) {
6698 6769 /*
6699 6770 * We seem to be dead. Unless we (a) have kernel
6700 6771 * destructive permissions (b) have explicitly enabled
6701 6772 * destructive actions and (c) destructive actions have
6702 6773 * not been disabled, we're going to transition into
6703 6774 * the KILLED state, from which no further processing
6704 6775 * on this state will be performed.
6705 6776 */
6706 6777 if (!dtrace_priv_kernel_destructive(state) ||
6707 6778 !state->dts_cred.dcr_destructive ||
6708 6779 dtrace_destructive_disallow) {
6709 6780 void *activity = &state->dts_activity;
6710 6781 dtrace_activity_t current;
6711 6782
6712 6783 do {
6713 6784 current = state->dts_activity;
6714 6785 } while (dtrace_cas32(activity, current,
6715 6786 DTRACE_ACTIVITY_KILLED) != current);
6716 6787
6717 6788 continue;
6718 6789 }
6719 6790 }
6720 6791
6721 6792 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6722 6793 ecb->dte_alignment, state, &mstate)) < 0)
6723 6794 continue;
6724 6795
6725 6796 tomax = buf->dtb_tomax;
6726 6797 ASSERT(tomax != NULL);
6727 6798
6728 6799 if (ecb->dte_size != 0) {
6729 6800 dtrace_rechdr_t dtrh;
6730 6801 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
6731 6802 mstate.dtms_timestamp = dtrace_gethrtime();
6732 6803 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
6733 6804 }
6734 6805 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
6735 6806 dtrh.dtrh_epid = ecb->dte_epid;
6736 6807 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
6737 6808 mstate.dtms_timestamp);
6738 6809 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
6739 6810 }
6740 6811
6741 6812 mstate.dtms_epid = ecb->dte_epid;
6742 6813 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6743 6814
6744 6815 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6745 6816 mstate.dtms_access |= DTRACE_ACCESS_KERNEL;
6746 6817
6747 6818 if (pred != NULL) {
6748 6819 dtrace_difo_t *dp = pred->dtp_difo;
6749 6820 int rval;
6750 6821
6751 6822 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6752 6823
6753 6824 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6754 6825 dtrace_cacheid_t cid = probe->dtpr_predcache;
6755 6826
6756 6827 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6757 6828 /*
6758 6829 * Update the predicate cache...
6759 6830 */
6760 6831 ASSERT(cid == pred->dtp_cacheid);
6761 6832 curthread->t_predcache = cid;
6762 6833 }
6763 6834
6764 6835 continue;
6765 6836 }
6766 6837 }
6767 6838
6768 6839 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6769 6840 act != NULL; act = act->dta_next) {
6770 6841 size_t valoffs;
6771 6842 dtrace_difo_t *dp;
6772 6843 dtrace_recdesc_t *rec = &act->dta_rec;
6773 6844
6774 6845 size = rec->dtrd_size;
6775 6846 valoffs = offs + rec->dtrd_offset;
6776 6847
6777 6848 if (DTRACEACT_ISAGG(act->dta_kind)) {
6778 6849 uint64_t v = 0xbad;
6779 6850 dtrace_aggregation_t *agg;
6780 6851
6781 6852 agg = (dtrace_aggregation_t *)act;
6782 6853
6783 6854 if ((dp = act->dta_difo) != NULL)
6784 6855 v = dtrace_dif_emulate(dp,
6785 6856 &mstate, vstate, state);
6786 6857
6787 6858 if (*flags & CPU_DTRACE_ERROR)
6788 6859 continue;
6789 6860
6790 6861 /*
6791 6862 * Note that we always pass the expression
6792 6863 * value from the previous iteration of the
6793 6864 * action loop. This value will only be used
6794 6865 * if there is an expression argument to the
6795 6866 * aggregating action, denoted by the
6796 6867 * dtag_hasarg field.
6797 6868 */
6798 6869 dtrace_aggregate(agg, buf,
6799 6870 offs, aggbuf, v, val);
6800 6871 continue;
6801 6872 }
6802 6873
6803 6874 switch (act->dta_kind) {
6804 6875 case DTRACEACT_STOP:
6805 6876 if (dtrace_priv_proc_destructive(state,
6806 6877 &mstate))
6807 6878 dtrace_action_stop();
6808 6879 continue;
6809 6880
6810 6881 case DTRACEACT_BREAKPOINT:
6811 6882 if (dtrace_priv_kernel_destructive(state))
6812 6883 dtrace_action_breakpoint(ecb);
6813 6884 continue;
6814 6885
6815 6886 case DTRACEACT_PANIC:
6816 6887 if (dtrace_priv_kernel_destructive(state))
6817 6888 dtrace_action_panic(ecb);
6818 6889 continue;
6819 6890
6820 6891 case DTRACEACT_STACK:
6821 6892 if (!dtrace_priv_kernel(state))
6822 6893 continue;
6823 6894
6824 6895 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6825 6896 size / sizeof (pc_t), probe->dtpr_aframes,
6826 6897 DTRACE_ANCHORED(probe) ? NULL :
6827 6898 (uint32_t *)arg0);
6828 6899
6829 6900 continue;
6830 6901
6831 6902 case DTRACEACT_JSTACK:
6832 6903 case DTRACEACT_USTACK:
6833 6904 if (!dtrace_priv_proc(state, &mstate))
6834 6905 continue;
6835 6906
6836 6907 /*
6837 6908 * See comment in DIF_VAR_PID.
6838 6909 */
6839 6910 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6840 6911 CPU_ON_INTR(CPU)) {
6841 6912 int depth = DTRACE_USTACK_NFRAMES(
6842 6913 rec->dtrd_arg) + 1;
6843 6914
6844 6915 dtrace_bzero((void *)(tomax + valoffs),
6845 6916 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6846 6917 + depth * sizeof (uint64_t));
6847 6918
6848 6919 continue;
6849 6920 }
6850 6921
6851 6922 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6852 6923 curproc->p_dtrace_helpers != NULL) {
6853 6924 /*
6854 6925 * This is the slow path -- we have
6855 6926 * allocated string space, and we're
6856 6927 * getting the stack of a process that
6857 6928 * has helpers. Call into a separate
6858 6929 * routine to perform this processing.
6859 6930 */
6860 6931 dtrace_action_ustack(&mstate, state,
6861 6932 (uint64_t *)(tomax + valoffs),
6862 6933 rec->dtrd_arg);
6863 6934 continue;
6864 6935 }
6865 6936
6866 6937 /*
6867 6938 * Clear the string space, since there's no
6868 6939 * helper to do it for us.
6869 6940 */
6870 6941 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0) {
6871 6942 int depth = DTRACE_USTACK_NFRAMES(
6872 6943 rec->dtrd_arg);
6873 6944 size_t strsize = DTRACE_USTACK_STRSIZE(
6874 6945 rec->dtrd_arg);
6875 6946 uint64_t *buf = (uint64_t *)(tomax +
6876 6947 valoffs);
6877 6948 void *strspace = &buf[depth + 1];
6878 6949
6879 6950 dtrace_bzero(strspace,
6880 6951 MIN(depth, strsize));
6881 6952 }
6882 6953
6883 6954 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6884 6955 dtrace_getupcstack((uint64_t *)
6885 6956 (tomax + valoffs),
6886 6957 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6887 6958 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6888 6959 continue;
6889 6960
6890 6961 default:
6891 6962 break;
6892 6963 }
6893 6964
6894 6965 dp = act->dta_difo;
6895 6966 ASSERT(dp != NULL);
6896 6967
6897 6968 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6898 6969
6899 6970 if (*flags & CPU_DTRACE_ERROR)
6900 6971 continue;
6901 6972
6902 6973 switch (act->dta_kind) {
6903 6974 case DTRACEACT_SPECULATE: {
6904 6975 dtrace_rechdr_t *dtrh;
6905 6976
6906 6977 ASSERT(buf == &state->dts_buffer[cpuid]);
6907 6978 buf = dtrace_speculation_buffer(state,
6908 6979 cpuid, val);
6909 6980
6910 6981 if (buf == NULL) {
6911 6982 *flags |= CPU_DTRACE_DROP;
6912 6983 continue;
6913 6984 }
6914 6985
6915 6986 offs = dtrace_buffer_reserve(buf,
6916 6987 ecb->dte_needed, ecb->dte_alignment,
6917 6988 state, NULL);
6918 6989
6919 6990 if (offs < 0) {
6920 6991 *flags |= CPU_DTRACE_DROP;
6921 6992 continue;
6922 6993 }
6923 6994
6924 6995 tomax = buf->dtb_tomax;
6925 6996 ASSERT(tomax != NULL);
6926 6997
6927 6998 if (ecb->dte_size == 0)
6928 6999 continue;
6929 7000
6930 7001 ASSERT3U(ecb->dte_size, >=,
6931 7002 sizeof (dtrace_rechdr_t));
6932 7003 dtrh = ((void *)(tomax + offs));
6933 7004 dtrh->dtrh_epid = ecb->dte_epid;
6934 7005 /*
6935 7006 * When the speculation is committed, all of
6936 7007 * the records in the speculative buffer will
6937 7008 * have their timestamps set to the commit
6938 7009 * time. Until then, it is set to a sentinel
6939 7010 * value, for debugability.
6940 7011 */
6941 7012 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
6942 7013 continue;
6943 7014 }
6944 7015
6945 7016 case DTRACEACT_CHILL:
6946 7017 if (dtrace_priv_kernel_destructive(state))
6947 7018 dtrace_action_chill(&mstate, val);
6948 7019 continue;
6949 7020
6950 7021 case DTRACEACT_RAISE:
6951 7022 if (dtrace_priv_proc_destructive(state,
6952 7023 &mstate))
6953 7024 dtrace_action_raise(val);
6954 7025 continue;
6955 7026
6956 7027 case DTRACEACT_COMMIT:
6957 7028 ASSERT(!committed);
6958 7029
6959 7030 /*
6960 7031 * We need to commit our buffer state.
6961 7032 */
6962 7033 if (ecb->dte_size)
6963 7034 buf->dtb_offset = offs + ecb->dte_size;
6964 7035 buf = &state->dts_buffer[cpuid];
6965 7036 dtrace_speculation_commit(state, cpuid, val);
6966 7037 committed = 1;
6967 7038 continue;
6968 7039
6969 7040 case DTRACEACT_DISCARD:
6970 7041 dtrace_speculation_discard(state, cpuid, val);
6971 7042 continue;
6972 7043
6973 7044 case DTRACEACT_DIFEXPR:
6974 7045 case DTRACEACT_LIBACT:
6975 7046 case DTRACEACT_PRINTF:
6976 7047 case DTRACEACT_PRINTA:
6977 7048 case DTRACEACT_SYSTEM:
6978 7049 case DTRACEACT_FREOPEN:
6979 7050 case DTRACEACT_TRACEMEM:
6980 7051 break;
6981 7052
6982 7053 case DTRACEACT_TRACEMEM_DYNSIZE:
6983 7054 tracememsize = val;
6984 7055 break;
6985 7056
6986 7057 case DTRACEACT_SYM:
6987 7058 case DTRACEACT_MOD:
6988 7059 if (!dtrace_priv_kernel(state))
6989 7060 continue;
6990 7061 break;
6991 7062
6992 7063 case DTRACEACT_USYM:
6993 7064 case DTRACEACT_UMOD:
6994 7065 case DTRACEACT_UADDR: {
6995 7066 struct pid *pid = curthread->t_procp->p_pidp;
6996 7067
6997 7068 if (!dtrace_priv_proc(state, &mstate))
6998 7069 continue;
6999 7070
7000 7071 DTRACE_STORE(uint64_t, tomax,
7001 7072 valoffs, (uint64_t)pid->pid_id);
7002 7073 DTRACE_STORE(uint64_t, tomax,
7003 7074 valoffs + sizeof (uint64_t), val);
7004 7075
7005 7076 continue;
7006 7077 }
7007 7078
7008 7079 case DTRACEACT_EXIT: {
7009 7080 /*
7010 7081 * For the exit action, we are going to attempt
7011 7082 * to atomically set our activity to be
7012 7083 * draining. If this fails (either because
7013 7084 * another CPU has beat us to the exit action,
7014 7085 * or because our current activity is something
7015 7086 * other than ACTIVE or WARMUP), we will
7016 7087 * continue. This assures that the exit action
7017 7088 * can be successfully recorded at most once
7018 7089 * when we're in the ACTIVE state. If we're
7019 7090 * encountering the exit() action while in
7020 7091 * COOLDOWN, however, we want to honor the new
7021 7092 * status code. (We know that we're the only
7022 7093 * thread in COOLDOWN, so there is no race.)
7023 7094 */
7024 7095 void *activity = &state->dts_activity;
7025 7096 dtrace_activity_t current = state->dts_activity;
7026 7097
7027 7098 if (current == DTRACE_ACTIVITY_COOLDOWN)
7028 7099 break;
7029 7100
7030 7101 if (current != DTRACE_ACTIVITY_WARMUP)
7031 7102 current = DTRACE_ACTIVITY_ACTIVE;
7032 7103
7033 7104 if (dtrace_cas32(activity, current,
7034 7105 DTRACE_ACTIVITY_DRAINING) != current) {
7035 7106 *flags |= CPU_DTRACE_DROP;
↓ open down ↓ |
471 lines elided |
↑ open up ↑ |
7036 7107 continue;
7037 7108 }
7038 7109
7039 7110 break;
7040 7111 }
7041 7112
7042 7113 default:
7043 7114 ASSERT(0);
7044 7115 }
7045 7116
7046 - if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
7117 + if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
7118 + dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
7047 7119 uintptr_t end = valoffs + size;
7048 7120
7049 7121 if (tracememsize != 0 &&
7050 7122 valoffs + tracememsize < end) {
7051 7123 end = valoffs + tracememsize;
7052 7124 tracememsize = 0;
7053 7125 }
7054 7126
7055 - if (!dtrace_vcanload((void *)(uintptr_t)val,
7127 + if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7128 + !dtrace_vcanload((void *)(uintptr_t)val,
7056 7129 &dp->dtdo_rtype, &mstate, vstate))
7057 7130 continue;
7058 7131
7059 - /*
7060 - * If this is a string, we're going to only
7061 - * load until we find the zero byte -- after
7062 - * which we'll store zero bytes.
7063 - */
7064 - if (dp->dtdo_rtype.dtdt_kind ==
7065 - DIF_TYPE_STRING) {
7066 - char c = '\0' + 1;
7067 - int intuple = act->dta_intuple;
7068 - size_t s;
7069 -
7070 - for (s = 0; s < size; s++) {
7071 - if (c != '\0')
7072 - c = dtrace_load8(val++);
7073 -
7074 - DTRACE_STORE(uint8_t, tomax,
7075 - valoffs++, c);
7076 -
7077 - if (c == '\0' && intuple)
7078 - break;
7079 - }
7080 -
7081 - continue;
7082 - }
7083 -
7084 - while (valoffs < end) {
7085 - DTRACE_STORE(uint8_t, tomax, valoffs++,
7086 - dtrace_load8(val++));
7087 - }
7088 -
7132 + dtrace_store_by_ref(dp, tomax, size, &valoffs,
7133 + &val, end, act->dta_intuple,
7134 + dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7135 + DIF_TF_BYREF: DIF_TF_BYUREF);
7089 7136 continue;
7090 7137 }
7091 7138
7092 7139 switch (size) {
7093 7140 case 0:
7094 7141 break;
7095 7142
7096 7143 case sizeof (uint8_t):
7097 7144 DTRACE_STORE(uint8_t, tomax, valoffs, val);
7098 7145 break;
7099 7146 case sizeof (uint16_t):
7100 7147 DTRACE_STORE(uint16_t, tomax, valoffs, val);
7101 7148 break;
7102 7149 case sizeof (uint32_t):
7103 7150 DTRACE_STORE(uint32_t, tomax, valoffs, val);
7104 7151 break;
7105 7152 case sizeof (uint64_t):
7106 7153 DTRACE_STORE(uint64_t, tomax, valoffs, val);
7107 7154 break;
7108 7155 default:
7109 7156 /*
7110 7157 * Any other size should have been returned by
7111 7158 * reference, not by value.
7112 7159 */
7113 7160 ASSERT(0);
7114 7161 break;
7115 7162 }
7116 7163 }
7117 7164
7118 7165 if (*flags & CPU_DTRACE_DROP)
7119 7166 continue;
7120 7167
7121 7168 if (*flags & CPU_DTRACE_FAULT) {
7122 7169 int ndx;
7123 7170 dtrace_action_t *err;
7124 7171
7125 7172 buf->dtb_errors++;
7126 7173
7127 7174 if (probe->dtpr_id == dtrace_probeid_error) {
7128 7175 /*
7129 7176 * There's nothing we can do -- we had an
7130 7177 * error on the error probe. We bump an
7131 7178 * error counter to at least indicate that
7132 7179 * this condition happened.
7133 7180 */
7134 7181 dtrace_error(&state->dts_dblerrors);
7135 7182 continue;
7136 7183 }
7137 7184
7138 7185 if (vtime) {
7139 7186 /*
7140 7187 * Before recursing on dtrace_probe(), we
7141 7188 * need to explicitly clear out our start
7142 7189 * time to prevent it from being accumulated
7143 7190 * into t_dtrace_vtime.
7144 7191 */
7145 7192 curthread->t_dtrace_start = 0;
7146 7193 }
7147 7194
7148 7195 /*
7149 7196 * Iterate over the actions to figure out which action
7150 7197 * we were processing when we experienced the error.
7151 7198 * Note that act points _past_ the faulting action; if
7152 7199 * act is ecb->dte_action, the fault was in the
7153 7200 * predicate, if it's ecb->dte_action->dta_next it's
7154 7201 * in action #1, and so on.
7155 7202 */
7156 7203 for (err = ecb->dte_action, ndx = 0;
7157 7204 err != act; err = err->dta_next, ndx++)
7158 7205 continue;
7159 7206
7160 7207 dtrace_probe_error(state, ecb->dte_epid, ndx,
7161 7208 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7162 7209 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7163 7210 cpu_core[cpuid].cpuc_dtrace_illval);
7164 7211
7165 7212 continue;
7166 7213 }
7167 7214
7168 7215 if (!committed)
7169 7216 buf->dtb_offset = offs + ecb->dte_size;
7170 7217 }
7171 7218
7172 7219 end = dtrace_gethrtime();
7173 7220 if (vtime)
7174 7221 curthread->t_dtrace_start = end;
7175 7222
7176 7223 CPU->cpu_dtrace_nsec += end - now;
7177 7224
7178 7225 dtrace_interrupt_enable(cookie);
7179 7226 }
7180 7227
7181 7228 /*
7182 7229 * DTrace Probe Hashing Functions
7183 7230 *
7184 7231 * The functions in this section (and indeed, the functions in remaining
7185 7232 * sections) are not _called_ from probe context. (Any exceptions to this are
7186 7233 * marked with a "Note:".) Rather, they are called from elsewhere in the
7187 7234 * DTrace framework to look-up probes in, add probes to and remove probes from
7188 7235 * the DTrace probe hashes. (Each probe is hashed by each element of the
7189 7236 * probe tuple -- allowing for fast lookups, regardless of what was
7190 7237 * specified.)
7191 7238 */
7192 7239 static uint_t
7193 7240 dtrace_hash_str(char *p)
7194 7241 {
7195 7242 unsigned int g;
7196 7243 uint_t hval = 0;
7197 7244
7198 7245 while (*p) {
7199 7246 hval = (hval << 4) + *p++;
7200 7247 if ((g = (hval & 0xf0000000)) != 0)
7201 7248 hval ^= g >> 24;
7202 7249 hval &= ~g;
7203 7250 }
7204 7251 return (hval);
7205 7252 }
7206 7253
7207 7254 static dtrace_hash_t *
7208 7255 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
7209 7256 {
7210 7257 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7211 7258
7212 7259 hash->dth_stroffs = stroffs;
7213 7260 hash->dth_nextoffs = nextoffs;
7214 7261 hash->dth_prevoffs = prevoffs;
7215 7262
7216 7263 hash->dth_size = 1;
7217 7264 hash->dth_mask = hash->dth_size - 1;
7218 7265
7219 7266 hash->dth_tab = kmem_zalloc(hash->dth_size *
7220 7267 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7221 7268
7222 7269 return (hash);
7223 7270 }
7224 7271
7225 7272 static void
7226 7273 dtrace_hash_destroy(dtrace_hash_t *hash)
7227 7274 {
7228 7275 #ifdef DEBUG
7229 7276 int i;
7230 7277
7231 7278 for (i = 0; i < hash->dth_size; i++)
7232 7279 ASSERT(hash->dth_tab[i] == NULL);
7233 7280 #endif
7234 7281
7235 7282 kmem_free(hash->dth_tab,
7236 7283 hash->dth_size * sizeof (dtrace_hashbucket_t *));
7237 7284 kmem_free(hash, sizeof (dtrace_hash_t));
7238 7285 }
7239 7286
7240 7287 static void
7241 7288 dtrace_hash_resize(dtrace_hash_t *hash)
7242 7289 {
7243 7290 int size = hash->dth_size, i, ndx;
7244 7291 int new_size = hash->dth_size << 1;
7245 7292 int new_mask = new_size - 1;
7246 7293 dtrace_hashbucket_t **new_tab, *bucket, *next;
7247 7294
7248 7295 ASSERT((new_size & new_mask) == 0);
7249 7296
7250 7297 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7251 7298
7252 7299 for (i = 0; i < size; i++) {
7253 7300 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
7254 7301 dtrace_probe_t *probe = bucket->dthb_chain;
7255 7302
7256 7303 ASSERT(probe != NULL);
7257 7304 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
7258 7305
7259 7306 next = bucket->dthb_next;
7260 7307 bucket->dthb_next = new_tab[ndx];
7261 7308 new_tab[ndx] = bucket;
7262 7309 }
7263 7310 }
7264 7311
7265 7312 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7266 7313 hash->dth_tab = new_tab;
7267 7314 hash->dth_size = new_size;
7268 7315 hash->dth_mask = new_mask;
7269 7316 }
7270 7317
7271 7318 static void
7272 7319 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
7273 7320 {
7274 7321 int hashval = DTRACE_HASHSTR(hash, new);
7275 7322 int ndx = hashval & hash->dth_mask;
7276 7323 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7277 7324 dtrace_probe_t **nextp, **prevp;
7278 7325
7279 7326 for (; bucket != NULL; bucket = bucket->dthb_next) {
7280 7327 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7281 7328 goto add;
7282 7329 }
7283 7330
7284 7331 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7285 7332 dtrace_hash_resize(hash);
7286 7333 dtrace_hash_add(hash, new);
7287 7334 return;
7288 7335 }
7289 7336
7290 7337 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7291 7338 bucket->dthb_next = hash->dth_tab[ndx];
7292 7339 hash->dth_tab[ndx] = bucket;
7293 7340 hash->dth_nbuckets++;
7294 7341
7295 7342 add:
7296 7343 nextp = DTRACE_HASHNEXT(hash, new);
7297 7344 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7298 7345 *nextp = bucket->dthb_chain;
7299 7346
7300 7347 if (bucket->dthb_chain != NULL) {
7301 7348 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7302 7349 ASSERT(*prevp == NULL);
7303 7350 *prevp = new;
7304 7351 }
7305 7352
7306 7353 bucket->dthb_chain = new;
7307 7354 bucket->dthb_len++;
7308 7355 }
7309 7356
7310 7357 static dtrace_probe_t *
7311 7358 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
7312 7359 {
7313 7360 int hashval = DTRACE_HASHSTR(hash, template);
7314 7361 int ndx = hashval & hash->dth_mask;
7315 7362 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7316 7363
7317 7364 for (; bucket != NULL; bucket = bucket->dthb_next) {
7318 7365 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7319 7366 return (bucket->dthb_chain);
7320 7367 }
7321 7368
7322 7369 return (NULL);
7323 7370 }
7324 7371
7325 7372 static int
7326 7373 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
7327 7374 {
7328 7375 int hashval = DTRACE_HASHSTR(hash, template);
7329 7376 int ndx = hashval & hash->dth_mask;
7330 7377 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7331 7378
7332 7379 for (; bucket != NULL; bucket = bucket->dthb_next) {
7333 7380 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7334 7381 return (bucket->dthb_len);
7335 7382 }
7336 7383
7337 7384 return (NULL);
7338 7385 }
7339 7386
7340 7387 static void
7341 7388 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
7342 7389 {
7343 7390 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
7344 7391 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7345 7392
7346 7393 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
7347 7394 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
7348 7395
7349 7396 /*
7350 7397 * Find the bucket that we're removing this probe from.
7351 7398 */
7352 7399 for (; bucket != NULL; bucket = bucket->dthb_next) {
7353 7400 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
7354 7401 break;
7355 7402 }
7356 7403
7357 7404 ASSERT(bucket != NULL);
7358 7405
7359 7406 if (*prevp == NULL) {
7360 7407 if (*nextp == NULL) {
7361 7408 /*
7362 7409 * The removed probe was the only probe on this
7363 7410 * bucket; we need to remove the bucket.
7364 7411 */
7365 7412 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7366 7413
7367 7414 ASSERT(bucket->dthb_chain == probe);
7368 7415 ASSERT(b != NULL);
7369 7416
7370 7417 if (b == bucket) {
7371 7418 hash->dth_tab[ndx] = bucket->dthb_next;
7372 7419 } else {
7373 7420 while (b->dthb_next != bucket)
7374 7421 b = b->dthb_next;
7375 7422 b->dthb_next = bucket->dthb_next;
7376 7423 }
7377 7424
7378 7425 ASSERT(hash->dth_nbuckets > 0);
7379 7426 hash->dth_nbuckets--;
7380 7427 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
7381 7428 return;
7382 7429 }
7383 7430
7384 7431 bucket->dthb_chain = *nextp;
7385 7432 } else {
7386 7433 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
7387 7434 }
7388 7435
7389 7436 if (*nextp != NULL)
7390 7437 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
7391 7438 }
7392 7439
7393 7440 /*
7394 7441 * DTrace Utility Functions
7395 7442 *
7396 7443 * These are random utility functions that are _not_ called from probe context.
7397 7444 */
7398 7445 static int
7399 7446 dtrace_badattr(const dtrace_attribute_t *a)
7400 7447 {
7401 7448 return (a->dtat_name > DTRACE_STABILITY_MAX ||
7402 7449 a->dtat_data > DTRACE_STABILITY_MAX ||
7403 7450 a->dtat_class > DTRACE_CLASS_MAX);
7404 7451 }
7405 7452
7406 7453 /*
7407 7454 * Return a duplicate copy of a string. If the specified string is NULL,
7408 7455 * this function returns a zero-length string.
7409 7456 */
7410 7457 static char *
7411 7458 dtrace_strdup(const char *str)
7412 7459 {
7413 7460 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
7414 7461
7415 7462 if (str != NULL)
7416 7463 (void) strcpy(new, str);
7417 7464
7418 7465 return (new);
7419 7466 }
7420 7467
7421 7468 #define DTRACE_ISALPHA(c) \
7422 7469 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
7423 7470
7424 7471 static int
7425 7472 dtrace_badname(const char *s)
7426 7473 {
7427 7474 char c;
7428 7475
7429 7476 if (s == NULL || (c = *s++) == '\0')
7430 7477 return (0);
7431 7478
7432 7479 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
7433 7480 return (1);
7434 7481
7435 7482 while ((c = *s++) != '\0') {
7436 7483 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
7437 7484 c != '-' && c != '_' && c != '.' && c != '`')
7438 7485 return (1);
7439 7486 }
7440 7487
7441 7488 return (0);
7442 7489 }
7443 7490
7444 7491 static void
7445 7492 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
7446 7493 {
7447 7494 uint32_t priv;
7448 7495
7449 7496 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
7450 7497 /*
7451 7498 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
7452 7499 */
7453 7500 priv = DTRACE_PRIV_ALL;
7454 7501 } else {
7455 7502 *uidp = crgetuid(cr);
7456 7503 *zoneidp = crgetzoneid(cr);
7457 7504
7458 7505 priv = 0;
7459 7506 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
7460 7507 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
7461 7508 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
7462 7509 priv |= DTRACE_PRIV_USER;
7463 7510 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
7464 7511 priv |= DTRACE_PRIV_PROC;
7465 7512 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
7466 7513 priv |= DTRACE_PRIV_OWNER;
7467 7514 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
7468 7515 priv |= DTRACE_PRIV_ZONEOWNER;
7469 7516 }
7470 7517
7471 7518 *privp = priv;
7472 7519 }
7473 7520
7474 7521 #ifdef DTRACE_ERRDEBUG
7475 7522 static void
7476 7523 dtrace_errdebug(const char *str)
7477 7524 {
7478 7525 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
7479 7526 int occupied = 0;
7480 7527
7481 7528 mutex_enter(&dtrace_errlock);
7482 7529 dtrace_errlast = str;
7483 7530 dtrace_errthread = curthread;
7484 7531
7485 7532 while (occupied++ < DTRACE_ERRHASHSZ) {
7486 7533 if (dtrace_errhash[hval].dter_msg == str) {
7487 7534 dtrace_errhash[hval].dter_count++;
7488 7535 goto out;
7489 7536 }
7490 7537
7491 7538 if (dtrace_errhash[hval].dter_msg != NULL) {
7492 7539 hval = (hval + 1) % DTRACE_ERRHASHSZ;
7493 7540 continue;
7494 7541 }
7495 7542
7496 7543 dtrace_errhash[hval].dter_msg = str;
7497 7544 dtrace_errhash[hval].dter_count = 1;
7498 7545 goto out;
7499 7546 }
7500 7547
7501 7548 panic("dtrace: undersized error hash");
7502 7549 out:
7503 7550 mutex_exit(&dtrace_errlock);
7504 7551 }
7505 7552 #endif
7506 7553
7507 7554 /*
7508 7555 * DTrace Matching Functions
7509 7556 *
7510 7557 * These functions are used to match groups of probes, given some elements of
7511 7558 * a probe tuple, or some globbed expressions for elements of a probe tuple.
7512 7559 */
7513 7560 static int
7514 7561 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7515 7562 zoneid_t zoneid)
7516 7563 {
7517 7564 if (priv != DTRACE_PRIV_ALL) {
7518 7565 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7519 7566 uint32_t match = priv & ppriv;
7520 7567
7521 7568 /*
7522 7569 * No PRIV_DTRACE_* privileges...
7523 7570 */
7524 7571 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7525 7572 DTRACE_PRIV_KERNEL)) == 0)
7526 7573 return (0);
7527 7574
7528 7575 /*
7529 7576 * No matching bits, but there were bits to match...
7530 7577 */
7531 7578 if (match == 0 && ppriv != 0)
7532 7579 return (0);
7533 7580
7534 7581 /*
7535 7582 * Need to have permissions to the process, but don't...
7536 7583 */
7537 7584 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7538 7585 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7539 7586 return (0);
7540 7587 }
7541 7588
7542 7589 /*
7543 7590 * Need to be in the same zone unless we possess the
7544 7591 * privilege to examine all zones.
7545 7592 */
7546 7593 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7547 7594 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7548 7595 return (0);
7549 7596 }
7550 7597 }
7551 7598
7552 7599 return (1);
7553 7600 }
7554 7601
7555 7602 /*
7556 7603 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7557 7604 * consists of input pattern strings and an ops-vector to evaluate them.
7558 7605 * This function returns >0 for match, 0 for no match, and <0 for error.
7559 7606 */
7560 7607 static int
7561 7608 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7562 7609 uint32_t priv, uid_t uid, zoneid_t zoneid)
7563 7610 {
7564 7611 dtrace_provider_t *pvp = prp->dtpr_provider;
7565 7612 int rv;
7566 7613
7567 7614 if (pvp->dtpv_defunct)
7568 7615 return (0);
7569 7616
7570 7617 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7571 7618 return (rv);
7572 7619
7573 7620 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7574 7621 return (rv);
7575 7622
7576 7623 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7577 7624 return (rv);
7578 7625
7579 7626 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7580 7627 return (rv);
7581 7628
7582 7629 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7583 7630 return (0);
7584 7631
7585 7632 return (rv);
7586 7633 }
7587 7634
7588 7635 /*
7589 7636 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7590 7637 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7591 7638 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7592 7639 * In addition, all of the recursion cases except for '*' matching have been
7593 7640 * unwound. For '*', we still implement recursive evaluation, but a depth
7594 7641 * counter is maintained and matching is aborted if we recurse too deep.
7595 7642 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7596 7643 */
7597 7644 static int
7598 7645 dtrace_match_glob(const char *s, const char *p, int depth)
7599 7646 {
7600 7647 const char *olds;
7601 7648 char s1, c;
7602 7649 int gs;
7603 7650
7604 7651 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7605 7652 return (-1);
7606 7653
7607 7654 if (s == NULL)
7608 7655 s = ""; /* treat NULL as empty string */
7609 7656
7610 7657 top:
7611 7658 olds = s;
7612 7659 s1 = *s++;
7613 7660
7614 7661 if (p == NULL)
7615 7662 return (0);
7616 7663
7617 7664 if ((c = *p++) == '\0')
7618 7665 return (s1 == '\0');
7619 7666
7620 7667 switch (c) {
7621 7668 case '[': {
7622 7669 int ok = 0, notflag = 0;
7623 7670 char lc = '\0';
7624 7671
7625 7672 if (s1 == '\0')
7626 7673 return (0);
7627 7674
7628 7675 if (*p == '!') {
7629 7676 notflag = 1;
7630 7677 p++;
7631 7678 }
7632 7679
7633 7680 if ((c = *p++) == '\0')
7634 7681 return (0);
7635 7682
7636 7683 do {
7637 7684 if (c == '-' && lc != '\0' && *p != ']') {
7638 7685 if ((c = *p++) == '\0')
7639 7686 return (0);
7640 7687 if (c == '\\' && (c = *p++) == '\0')
7641 7688 return (0);
7642 7689
7643 7690 if (notflag) {
7644 7691 if (s1 < lc || s1 > c)
7645 7692 ok++;
7646 7693 else
7647 7694 return (0);
7648 7695 } else if (lc <= s1 && s1 <= c)
7649 7696 ok++;
7650 7697
7651 7698 } else if (c == '\\' && (c = *p++) == '\0')
7652 7699 return (0);
7653 7700
7654 7701 lc = c; /* save left-hand 'c' for next iteration */
7655 7702
7656 7703 if (notflag) {
7657 7704 if (s1 != c)
7658 7705 ok++;
7659 7706 else
7660 7707 return (0);
7661 7708 } else if (s1 == c)
7662 7709 ok++;
7663 7710
7664 7711 if ((c = *p++) == '\0')
7665 7712 return (0);
7666 7713
7667 7714 } while (c != ']');
7668 7715
7669 7716 if (ok)
7670 7717 goto top;
7671 7718
7672 7719 return (0);
7673 7720 }
7674 7721
7675 7722 case '\\':
7676 7723 if ((c = *p++) == '\0')
7677 7724 return (0);
7678 7725 /*FALLTHRU*/
7679 7726
7680 7727 default:
7681 7728 if (c != s1)
7682 7729 return (0);
7683 7730 /*FALLTHRU*/
7684 7731
7685 7732 case '?':
7686 7733 if (s1 != '\0')
7687 7734 goto top;
7688 7735 return (0);
7689 7736
7690 7737 case '*':
7691 7738 while (*p == '*')
7692 7739 p++; /* consecutive *'s are identical to a single one */
7693 7740
7694 7741 if (*p == '\0')
7695 7742 return (1);
7696 7743
7697 7744 for (s = olds; *s != '\0'; s++) {
7698 7745 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7699 7746 return (gs);
7700 7747 }
7701 7748
7702 7749 return (0);
7703 7750 }
7704 7751 }
7705 7752
7706 7753 /*ARGSUSED*/
7707 7754 static int
7708 7755 dtrace_match_string(const char *s, const char *p, int depth)
7709 7756 {
7710 7757 return (s != NULL && strcmp(s, p) == 0);
7711 7758 }
7712 7759
7713 7760 /*ARGSUSED*/
7714 7761 static int
7715 7762 dtrace_match_nul(const char *s, const char *p, int depth)
7716 7763 {
7717 7764 return (1); /* always match the empty pattern */
7718 7765 }
7719 7766
7720 7767 /*ARGSUSED*/
7721 7768 static int
7722 7769 dtrace_match_nonzero(const char *s, const char *p, int depth)
7723 7770 {
7724 7771 return (s != NULL && s[0] != '\0');
7725 7772 }
7726 7773
7727 7774 static int
7728 7775 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7729 7776 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7730 7777 {
7731 7778 dtrace_probe_t template, *probe;
7732 7779 dtrace_hash_t *hash = NULL;
7733 7780 int len, rc, best = INT_MAX, nmatched = 0;
7734 7781 dtrace_id_t i;
7735 7782
7736 7783 ASSERT(MUTEX_HELD(&dtrace_lock));
7737 7784
7738 7785 /*
7739 7786 * If the probe ID is specified in the key, just lookup by ID and
7740 7787 * invoke the match callback once if a matching probe is found.
7741 7788 */
7742 7789 if (pkp->dtpk_id != DTRACE_IDNONE) {
7743 7790 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7744 7791 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7745 7792 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
7746 7793 return (DTRACE_MATCH_FAIL);
7747 7794 nmatched++;
7748 7795 }
7749 7796 return (nmatched);
7750 7797 }
7751 7798
7752 7799 template.dtpr_mod = (char *)pkp->dtpk_mod;
7753 7800 template.dtpr_func = (char *)pkp->dtpk_func;
7754 7801 template.dtpr_name = (char *)pkp->dtpk_name;
7755 7802
7756 7803 /*
7757 7804 * We want to find the most distinct of the module name, function
7758 7805 * name, and name. So for each one that is not a glob pattern or
7759 7806 * empty string, we perform a lookup in the corresponding hash and
7760 7807 * use the hash table with the fewest collisions to do our search.
7761 7808 */
7762 7809 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7763 7810 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7764 7811 best = len;
7765 7812 hash = dtrace_bymod;
7766 7813 }
7767 7814
7768 7815 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7769 7816 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7770 7817 best = len;
7771 7818 hash = dtrace_byfunc;
7772 7819 }
7773 7820
7774 7821 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7775 7822 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7776 7823 best = len;
7777 7824 hash = dtrace_byname;
7778 7825 }
7779 7826
7780 7827 /*
7781 7828 * If we did not select a hash table, iterate over every probe and
7782 7829 * invoke our callback for each one that matches our input probe key.
7783 7830 */
7784 7831 if (hash == NULL) {
7785 7832 for (i = 0; i < dtrace_nprobes; i++) {
7786 7833 if ((probe = dtrace_probes[i]) == NULL ||
7787 7834 dtrace_match_probe(probe, pkp, priv, uid,
7788 7835 zoneid) <= 0)
7789 7836 continue;
7790 7837
7791 7838 nmatched++;
7792 7839
7793 7840 if ((rc = (*matched)(probe, arg)) !=
7794 7841 DTRACE_MATCH_NEXT) {
7795 7842 if (rc == DTRACE_MATCH_FAIL)
7796 7843 return (DTRACE_MATCH_FAIL);
7797 7844 break;
7798 7845 }
7799 7846 }
7800 7847
7801 7848 return (nmatched);
7802 7849 }
7803 7850
7804 7851 /*
7805 7852 * If we selected a hash table, iterate over each probe of the same key
7806 7853 * name and invoke the callback for every probe that matches the other
7807 7854 * attributes of our input probe key.
7808 7855 */
7809 7856 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7810 7857 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7811 7858
7812 7859 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7813 7860 continue;
7814 7861
7815 7862 nmatched++;
7816 7863
7817 7864 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
7818 7865 if (rc == DTRACE_MATCH_FAIL)
7819 7866 return (DTRACE_MATCH_FAIL);
7820 7867 break;
7821 7868 }
7822 7869 }
7823 7870
7824 7871 return (nmatched);
7825 7872 }
7826 7873
7827 7874 /*
7828 7875 * Return the function pointer dtrace_probecmp() should use to compare the
7829 7876 * specified pattern with a string. For NULL or empty patterns, we select
7830 7877 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7831 7878 * For non-empty non-glob strings, we use dtrace_match_string().
7832 7879 */
7833 7880 static dtrace_probekey_f *
7834 7881 dtrace_probekey_func(const char *p)
7835 7882 {
7836 7883 char c;
7837 7884
7838 7885 if (p == NULL || *p == '\0')
7839 7886 return (&dtrace_match_nul);
7840 7887
7841 7888 while ((c = *p++) != '\0') {
7842 7889 if (c == '[' || c == '?' || c == '*' || c == '\\')
7843 7890 return (&dtrace_match_glob);
7844 7891 }
7845 7892
7846 7893 return (&dtrace_match_string);
7847 7894 }
7848 7895
7849 7896 /*
7850 7897 * Build a probe comparison key for use with dtrace_match_probe() from the
7851 7898 * given probe description. By convention, a null key only matches anchored
7852 7899 * probes: if each field is the empty string, reset dtpk_fmatch to
7853 7900 * dtrace_match_nonzero().
7854 7901 */
7855 7902 static void
7856 7903 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7857 7904 {
7858 7905 pkp->dtpk_prov = pdp->dtpd_provider;
7859 7906 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7860 7907
7861 7908 pkp->dtpk_mod = pdp->dtpd_mod;
7862 7909 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7863 7910
7864 7911 pkp->dtpk_func = pdp->dtpd_func;
7865 7912 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7866 7913
7867 7914 pkp->dtpk_name = pdp->dtpd_name;
7868 7915 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7869 7916
7870 7917 pkp->dtpk_id = pdp->dtpd_id;
7871 7918
7872 7919 if (pkp->dtpk_id == DTRACE_IDNONE &&
7873 7920 pkp->dtpk_pmatch == &dtrace_match_nul &&
7874 7921 pkp->dtpk_mmatch == &dtrace_match_nul &&
7875 7922 pkp->dtpk_fmatch == &dtrace_match_nul &&
7876 7923 pkp->dtpk_nmatch == &dtrace_match_nul)
7877 7924 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7878 7925 }
7879 7926
7880 7927 /*
7881 7928 * DTrace Provider-to-Framework API Functions
7882 7929 *
7883 7930 * These functions implement much of the Provider-to-Framework API, as
7884 7931 * described in <sys/dtrace.h>. The parts of the API not in this section are
7885 7932 * the functions in the API for probe management (found below), and
7886 7933 * dtrace_probe() itself (found above).
7887 7934 */
7888 7935
7889 7936 /*
7890 7937 * Register the calling provider with the DTrace framework. This should
7891 7938 * generally be called by DTrace providers in their attach(9E) entry point.
7892 7939 */
7893 7940 int
7894 7941 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7895 7942 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7896 7943 {
7897 7944 dtrace_provider_t *provider;
7898 7945
7899 7946 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7900 7947 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7901 7948 "arguments", name ? name : "<NULL>");
7902 7949 return (EINVAL);
7903 7950 }
7904 7951
7905 7952 if (name[0] == '\0' || dtrace_badname(name)) {
7906 7953 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7907 7954 "provider name", name);
7908 7955 return (EINVAL);
7909 7956 }
7910 7957
7911 7958 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7912 7959 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7913 7960 pops->dtps_destroy == NULL ||
7914 7961 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7915 7962 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7916 7963 "provider ops", name);
7917 7964 return (EINVAL);
7918 7965 }
7919 7966
7920 7967 if (dtrace_badattr(&pap->dtpa_provider) ||
7921 7968 dtrace_badattr(&pap->dtpa_mod) ||
7922 7969 dtrace_badattr(&pap->dtpa_func) ||
7923 7970 dtrace_badattr(&pap->dtpa_name) ||
7924 7971 dtrace_badattr(&pap->dtpa_args)) {
7925 7972 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7926 7973 "provider attributes", name);
7927 7974 return (EINVAL);
7928 7975 }
7929 7976
7930 7977 if (priv & ~DTRACE_PRIV_ALL) {
7931 7978 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7932 7979 "privilege attributes", name);
7933 7980 return (EINVAL);
7934 7981 }
7935 7982
7936 7983 if ((priv & DTRACE_PRIV_KERNEL) &&
7937 7984 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7938 7985 pops->dtps_mode == NULL) {
7939 7986 cmn_err(CE_WARN, "failed to register provider '%s': need "
7940 7987 "dtps_mode() op for given privilege attributes", name);
7941 7988 return (EINVAL);
7942 7989 }
7943 7990
7944 7991 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7945 7992 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7946 7993 (void) strcpy(provider->dtpv_name, name);
7947 7994
7948 7995 provider->dtpv_attr = *pap;
7949 7996 provider->dtpv_priv.dtpp_flags = priv;
7950 7997 if (cr != NULL) {
7951 7998 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7952 7999 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7953 8000 }
7954 8001 provider->dtpv_pops = *pops;
7955 8002
7956 8003 if (pops->dtps_provide == NULL) {
7957 8004 ASSERT(pops->dtps_provide_module != NULL);
7958 8005 provider->dtpv_pops.dtps_provide =
7959 8006 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
7960 8007 }
7961 8008
7962 8009 if (pops->dtps_provide_module == NULL) {
7963 8010 ASSERT(pops->dtps_provide != NULL);
7964 8011 provider->dtpv_pops.dtps_provide_module =
7965 8012 (void (*)(void *, struct modctl *))dtrace_nullop;
7966 8013 }
7967 8014
7968 8015 if (pops->dtps_suspend == NULL) {
7969 8016 ASSERT(pops->dtps_resume == NULL);
7970 8017 provider->dtpv_pops.dtps_suspend =
7971 8018 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7972 8019 provider->dtpv_pops.dtps_resume =
7973 8020 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7974 8021 }
7975 8022
7976 8023 provider->dtpv_arg = arg;
7977 8024 *idp = (dtrace_provider_id_t)provider;
7978 8025
7979 8026 if (pops == &dtrace_provider_ops) {
7980 8027 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7981 8028 ASSERT(MUTEX_HELD(&dtrace_lock));
7982 8029 ASSERT(dtrace_anon.dta_enabling == NULL);
7983 8030
7984 8031 /*
7985 8032 * We make sure that the DTrace provider is at the head of
7986 8033 * the provider chain.
7987 8034 */
7988 8035 provider->dtpv_next = dtrace_provider;
7989 8036 dtrace_provider = provider;
7990 8037 return (0);
7991 8038 }
7992 8039
7993 8040 mutex_enter(&dtrace_provider_lock);
7994 8041 mutex_enter(&dtrace_lock);
7995 8042
7996 8043 /*
7997 8044 * If there is at least one provider registered, we'll add this
7998 8045 * provider after the first provider.
7999 8046 */
8000 8047 if (dtrace_provider != NULL) {
8001 8048 provider->dtpv_next = dtrace_provider->dtpv_next;
8002 8049 dtrace_provider->dtpv_next = provider;
8003 8050 } else {
8004 8051 dtrace_provider = provider;
8005 8052 }
8006 8053
8007 8054 if (dtrace_retained != NULL) {
8008 8055 dtrace_enabling_provide(provider);
8009 8056
8010 8057 /*
8011 8058 * Now we need to call dtrace_enabling_matchall() -- which
8012 8059 * will acquire cpu_lock and dtrace_lock. We therefore need
8013 8060 * to drop all of our locks before calling into it...
8014 8061 */
8015 8062 mutex_exit(&dtrace_lock);
8016 8063 mutex_exit(&dtrace_provider_lock);
8017 8064 dtrace_enabling_matchall();
8018 8065
8019 8066 return (0);
8020 8067 }
8021 8068
8022 8069 mutex_exit(&dtrace_lock);
8023 8070 mutex_exit(&dtrace_provider_lock);
8024 8071
8025 8072 return (0);
8026 8073 }
8027 8074
8028 8075 /*
8029 8076 * Unregister the specified provider from the DTrace framework. This should
8030 8077 * generally be called by DTrace providers in their detach(9E) entry point.
8031 8078 */
8032 8079 int
8033 8080 dtrace_unregister(dtrace_provider_id_t id)
8034 8081 {
8035 8082 dtrace_provider_t *old = (dtrace_provider_t *)id;
8036 8083 dtrace_provider_t *prev = NULL;
8037 8084 int i, self = 0, noreap = 0;
8038 8085 dtrace_probe_t *probe, *first = NULL;
8039 8086
8040 8087 if (old->dtpv_pops.dtps_enable ==
8041 8088 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
8042 8089 /*
8043 8090 * If DTrace itself is the provider, we're called with locks
8044 8091 * already held.
8045 8092 */
8046 8093 ASSERT(old == dtrace_provider);
8047 8094 ASSERT(dtrace_devi != NULL);
8048 8095 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8049 8096 ASSERT(MUTEX_HELD(&dtrace_lock));
8050 8097 self = 1;
8051 8098
8052 8099 if (dtrace_provider->dtpv_next != NULL) {
8053 8100 /*
8054 8101 * There's another provider here; return failure.
8055 8102 */
8056 8103 return (EBUSY);
8057 8104 }
8058 8105 } else {
8059 8106 mutex_enter(&dtrace_provider_lock);
8060 8107 mutex_enter(&mod_lock);
8061 8108 mutex_enter(&dtrace_lock);
8062 8109 }
8063 8110
8064 8111 /*
8065 8112 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8066 8113 * probes, we refuse to let providers slither away, unless this
8067 8114 * provider has already been explicitly invalidated.
8068 8115 */
8069 8116 if (!old->dtpv_defunct &&
8070 8117 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8071 8118 dtrace_anon.dta_state->dts_necbs > 0))) {
8072 8119 if (!self) {
8073 8120 mutex_exit(&dtrace_lock);
8074 8121 mutex_exit(&mod_lock);
8075 8122 mutex_exit(&dtrace_provider_lock);
8076 8123 }
8077 8124 return (EBUSY);
8078 8125 }
8079 8126
8080 8127 /*
8081 8128 * Attempt to destroy the probes associated with this provider.
8082 8129 */
8083 8130 for (i = 0; i < dtrace_nprobes; i++) {
8084 8131 if ((probe = dtrace_probes[i]) == NULL)
8085 8132 continue;
8086 8133
8087 8134 if (probe->dtpr_provider != old)
8088 8135 continue;
8089 8136
8090 8137 if (probe->dtpr_ecb == NULL)
8091 8138 continue;
8092 8139
8093 8140 /*
8094 8141 * If we are trying to unregister a defunct provider, and the
8095 8142 * provider was made defunct within the interval dictated by
8096 8143 * dtrace_unregister_defunct_reap, we'll (asynchronously)
8097 8144 * attempt to reap our enablings. To denote that the provider
8098 8145 * should reattempt to unregister itself at some point in the
8099 8146 * future, we will return a differentiable error code (EAGAIN
8100 8147 * instead of EBUSY) in this case.
8101 8148 */
8102 8149 if (dtrace_gethrtime() - old->dtpv_defunct >
8103 8150 dtrace_unregister_defunct_reap)
8104 8151 noreap = 1;
8105 8152
8106 8153 if (!self) {
8107 8154 mutex_exit(&dtrace_lock);
8108 8155 mutex_exit(&mod_lock);
8109 8156 mutex_exit(&dtrace_provider_lock);
8110 8157 }
8111 8158
8112 8159 if (noreap)
8113 8160 return (EBUSY);
8114 8161
8115 8162 (void) taskq_dispatch(dtrace_taskq,
8116 8163 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
8117 8164
8118 8165 return (EAGAIN);
8119 8166 }
8120 8167
8121 8168 /*
8122 8169 * All of the probes for this provider are disabled; we can safely
8123 8170 * remove all of them from their hash chains and from the probe array.
8124 8171 */
8125 8172 for (i = 0; i < dtrace_nprobes; i++) {
8126 8173 if ((probe = dtrace_probes[i]) == NULL)
8127 8174 continue;
8128 8175
8129 8176 if (probe->dtpr_provider != old)
8130 8177 continue;
8131 8178
8132 8179 dtrace_probes[i] = NULL;
8133 8180
8134 8181 dtrace_hash_remove(dtrace_bymod, probe);
8135 8182 dtrace_hash_remove(dtrace_byfunc, probe);
8136 8183 dtrace_hash_remove(dtrace_byname, probe);
8137 8184
8138 8185 if (first == NULL) {
8139 8186 first = probe;
8140 8187 probe->dtpr_nextmod = NULL;
8141 8188 } else {
8142 8189 probe->dtpr_nextmod = first;
8143 8190 first = probe;
8144 8191 }
8145 8192 }
8146 8193
8147 8194 /*
8148 8195 * The provider's probes have been removed from the hash chains and
8149 8196 * from the probe array. Now issue a dtrace_sync() to be sure that
8150 8197 * everyone has cleared out from any probe array processing.
8151 8198 */
8152 8199 dtrace_sync();
8153 8200
8154 8201 for (probe = first; probe != NULL; probe = first) {
8155 8202 first = probe->dtpr_nextmod;
8156 8203
8157 8204 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8158 8205 probe->dtpr_arg);
8159 8206 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8160 8207 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8161 8208 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8162 8209 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8163 8210 kmem_free(probe, sizeof (dtrace_probe_t));
8164 8211 }
8165 8212
8166 8213 if ((prev = dtrace_provider) == old) {
8167 8214 ASSERT(self || dtrace_devi == NULL);
8168 8215 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8169 8216 dtrace_provider = old->dtpv_next;
8170 8217 } else {
8171 8218 while (prev != NULL && prev->dtpv_next != old)
8172 8219 prev = prev->dtpv_next;
8173 8220
8174 8221 if (prev == NULL) {
8175 8222 panic("attempt to unregister non-existent "
8176 8223 "dtrace provider %p\n", (void *)id);
8177 8224 }
8178 8225
8179 8226 prev->dtpv_next = old->dtpv_next;
8180 8227 }
8181 8228
8182 8229 if (!self) {
8183 8230 mutex_exit(&dtrace_lock);
8184 8231 mutex_exit(&mod_lock);
8185 8232 mutex_exit(&dtrace_provider_lock);
8186 8233 }
8187 8234
8188 8235 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
8189 8236 kmem_free(old, sizeof (dtrace_provider_t));
8190 8237
8191 8238 return (0);
8192 8239 }
8193 8240
8194 8241 /*
8195 8242 * Invalidate the specified provider. All subsequent probe lookups for the
8196 8243 * specified provider will fail, but its probes will not be removed.
8197 8244 */
8198 8245 void
8199 8246 dtrace_invalidate(dtrace_provider_id_t id)
8200 8247 {
8201 8248 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8202 8249
8203 8250 ASSERT(pvp->dtpv_pops.dtps_enable !=
8204 8251 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8205 8252
8206 8253 mutex_enter(&dtrace_provider_lock);
8207 8254 mutex_enter(&dtrace_lock);
8208 8255
8209 8256 pvp->dtpv_defunct = dtrace_gethrtime();
8210 8257
8211 8258 mutex_exit(&dtrace_lock);
8212 8259 mutex_exit(&dtrace_provider_lock);
8213 8260 }
8214 8261
8215 8262 /*
8216 8263 * Indicate whether or not DTrace has attached.
8217 8264 */
8218 8265 int
8219 8266 dtrace_attached(void)
8220 8267 {
8221 8268 /*
8222 8269 * dtrace_provider will be non-NULL iff the DTrace driver has
8223 8270 * attached. (It's non-NULL because DTrace is always itself a
8224 8271 * provider.)
8225 8272 */
8226 8273 return (dtrace_provider != NULL);
8227 8274 }
8228 8275
8229 8276 /*
8230 8277 * Remove all the unenabled probes for the given provider. This function is
8231 8278 * not unlike dtrace_unregister(), except that it doesn't remove the provider
8232 8279 * -- just as many of its associated probes as it can.
8233 8280 */
8234 8281 int
8235 8282 dtrace_condense(dtrace_provider_id_t id)
8236 8283 {
8237 8284 dtrace_provider_t *prov = (dtrace_provider_t *)id;
8238 8285 int i;
8239 8286 dtrace_probe_t *probe;
8240 8287
8241 8288 /*
8242 8289 * Make sure this isn't the dtrace provider itself.
8243 8290 */
8244 8291 ASSERT(prov->dtpv_pops.dtps_enable !=
8245 8292 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8246 8293
8247 8294 mutex_enter(&dtrace_provider_lock);
8248 8295 mutex_enter(&dtrace_lock);
8249 8296
8250 8297 /*
8251 8298 * Attempt to destroy the probes associated with this provider.
8252 8299 */
8253 8300 for (i = 0; i < dtrace_nprobes; i++) {
8254 8301 if ((probe = dtrace_probes[i]) == NULL)
8255 8302 continue;
8256 8303
8257 8304 if (probe->dtpr_provider != prov)
8258 8305 continue;
8259 8306
8260 8307 if (probe->dtpr_ecb != NULL)
8261 8308 continue;
8262 8309
8263 8310 dtrace_probes[i] = NULL;
8264 8311
8265 8312 dtrace_hash_remove(dtrace_bymod, probe);
8266 8313 dtrace_hash_remove(dtrace_byfunc, probe);
8267 8314 dtrace_hash_remove(dtrace_byname, probe);
8268 8315
8269 8316 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
8270 8317 probe->dtpr_arg);
8271 8318 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8272 8319 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8273 8320 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8274 8321 kmem_free(probe, sizeof (dtrace_probe_t));
8275 8322 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
8276 8323 }
8277 8324
8278 8325 mutex_exit(&dtrace_lock);
8279 8326 mutex_exit(&dtrace_provider_lock);
8280 8327
8281 8328 return (0);
8282 8329 }
8283 8330
8284 8331 /*
8285 8332 * DTrace Probe Management Functions
8286 8333 *
8287 8334 * The functions in this section perform the DTrace probe management,
8288 8335 * including functions to create probes, look-up probes, and call into the
8289 8336 * providers to request that probes be provided. Some of these functions are
8290 8337 * in the Provider-to-Framework API; these functions can be identified by the
8291 8338 * fact that they are not declared "static".
8292 8339 */
8293 8340
8294 8341 /*
8295 8342 * Create a probe with the specified module name, function name, and name.
8296 8343 */
8297 8344 dtrace_id_t
8298 8345 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
8299 8346 const char *func, const char *name, int aframes, void *arg)
8300 8347 {
8301 8348 dtrace_probe_t *probe, **probes;
8302 8349 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
8303 8350 dtrace_id_t id;
8304 8351
8305 8352 if (provider == dtrace_provider) {
8306 8353 ASSERT(MUTEX_HELD(&dtrace_lock));
8307 8354 } else {
8308 8355 mutex_enter(&dtrace_lock);
8309 8356 }
8310 8357
8311 8358 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
8312 8359 VM_BESTFIT | VM_SLEEP);
8313 8360 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
8314 8361
8315 8362 probe->dtpr_id = id;
8316 8363 probe->dtpr_gen = dtrace_probegen++;
8317 8364 probe->dtpr_mod = dtrace_strdup(mod);
8318 8365 probe->dtpr_func = dtrace_strdup(func);
8319 8366 probe->dtpr_name = dtrace_strdup(name);
8320 8367 probe->dtpr_arg = arg;
8321 8368 probe->dtpr_aframes = aframes;
8322 8369 probe->dtpr_provider = provider;
8323 8370
8324 8371 dtrace_hash_add(dtrace_bymod, probe);
8325 8372 dtrace_hash_add(dtrace_byfunc, probe);
8326 8373 dtrace_hash_add(dtrace_byname, probe);
8327 8374
8328 8375 if (id - 1 >= dtrace_nprobes) {
8329 8376 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
8330 8377 size_t nsize = osize << 1;
8331 8378
8332 8379 if (nsize == 0) {
8333 8380 ASSERT(osize == 0);
8334 8381 ASSERT(dtrace_probes == NULL);
8335 8382 nsize = sizeof (dtrace_probe_t *);
8336 8383 }
8337 8384
8338 8385 probes = kmem_zalloc(nsize, KM_SLEEP);
8339 8386
8340 8387 if (dtrace_probes == NULL) {
8341 8388 ASSERT(osize == 0);
8342 8389 dtrace_probes = probes;
8343 8390 dtrace_nprobes = 1;
8344 8391 } else {
8345 8392 dtrace_probe_t **oprobes = dtrace_probes;
8346 8393
8347 8394 bcopy(oprobes, probes, osize);
8348 8395 dtrace_membar_producer();
8349 8396 dtrace_probes = probes;
8350 8397
8351 8398 dtrace_sync();
8352 8399
8353 8400 /*
8354 8401 * All CPUs are now seeing the new probes array; we can
8355 8402 * safely free the old array.
8356 8403 */
8357 8404 kmem_free(oprobes, osize);
8358 8405 dtrace_nprobes <<= 1;
8359 8406 }
8360 8407
8361 8408 ASSERT(id - 1 < dtrace_nprobes);
8362 8409 }
8363 8410
8364 8411 ASSERT(dtrace_probes[id - 1] == NULL);
8365 8412 dtrace_probes[id - 1] = probe;
8366 8413
8367 8414 if (provider != dtrace_provider)
8368 8415 mutex_exit(&dtrace_lock);
8369 8416
8370 8417 return (id);
8371 8418 }
8372 8419
8373 8420 static dtrace_probe_t *
8374 8421 dtrace_probe_lookup_id(dtrace_id_t id)
8375 8422 {
8376 8423 ASSERT(MUTEX_HELD(&dtrace_lock));
8377 8424
8378 8425 if (id == 0 || id > dtrace_nprobes)
8379 8426 return (NULL);
8380 8427
8381 8428 return (dtrace_probes[id - 1]);
8382 8429 }
8383 8430
8384 8431 static int
8385 8432 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
8386 8433 {
8387 8434 *((dtrace_id_t *)arg) = probe->dtpr_id;
8388 8435
8389 8436 return (DTRACE_MATCH_DONE);
8390 8437 }
8391 8438
8392 8439 /*
8393 8440 * Look up a probe based on provider and one or more of module name, function
8394 8441 * name and probe name.
8395 8442 */
8396 8443 dtrace_id_t
8397 8444 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
8398 8445 const char *func, const char *name)
8399 8446 {
8400 8447 dtrace_probekey_t pkey;
8401 8448 dtrace_id_t id;
8402 8449 int match;
8403 8450
8404 8451 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
8405 8452 pkey.dtpk_pmatch = &dtrace_match_string;
8406 8453 pkey.dtpk_mod = mod;
8407 8454 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
8408 8455 pkey.dtpk_func = func;
8409 8456 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
8410 8457 pkey.dtpk_name = name;
8411 8458 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
8412 8459 pkey.dtpk_id = DTRACE_IDNONE;
8413 8460
8414 8461 mutex_enter(&dtrace_lock);
8415 8462 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
8416 8463 dtrace_probe_lookup_match, &id);
8417 8464 mutex_exit(&dtrace_lock);
8418 8465
8419 8466 ASSERT(match == 1 || match == 0);
8420 8467 return (match ? id : 0);
8421 8468 }
8422 8469
8423 8470 /*
8424 8471 * Returns the probe argument associated with the specified probe.
8425 8472 */
8426 8473 void *
8427 8474 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
8428 8475 {
8429 8476 dtrace_probe_t *probe;
8430 8477 void *rval = NULL;
8431 8478
8432 8479 mutex_enter(&dtrace_lock);
8433 8480
8434 8481 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
8435 8482 probe->dtpr_provider == (dtrace_provider_t *)id)
8436 8483 rval = probe->dtpr_arg;
8437 8484
8438 8485 mutex_exit(&dtrace_lock);
8439 8486
8440 8487 return (rval);
8441 8488 }
8442 8489
8443 8490 /*
8444 8491 * Copy a probe into a probe description.
8445 8492 */
8446 8493 static void
8447 8494 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
8448 8495 {
8449 8496 bzero(pdp, sizeof (dtrace_probedesc_t));
8450 8497 pdp->dtpd_id = prp->dtpr_id;
8451 8498
8452 8499 (void) strncpy(pdp->dtpd_provider,
8453 8500 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
8454 8501
8455 8502 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
8456 8503 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
8457 8504 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
8458 8505 }
8459 8506
8460 8507 /*
8461 8508 * Called to indicate that a probe -- or probes -- should be provided by a
8462 8509 * specfied provider. If the specified description is NULL, the provider will
8463 8510 * be told to provide all of its probes. (This is done whenever a new
8464 8511 * consumer comes along, or whenever a retained enabling is to be matched.) If
8465 8512 * the specified description is non-NULL, the provider is given the
8466 8513 * opportunity to dynamically provide the specified probe, allowing providers
8467 8514 * to support the creation of probes on-the-fly. (So-called _autocreated_
8468 8515 * probes.) If the provider is NULL, the operations will be applied to all
8469 8516 * providers; if the provider is non-NULL the operations will only be applied
8470 8517 * to the specified provider. The dtrace_provider_lock must be held, and the
8471 8518 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
8472 8519 * will need to grab the dtrace_lock when it reenters the framework through
8473 8520 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
8474 8521 */
8475 8522 static void
8476 8523 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
8477 8524 {
8478 8525 struct modctl *ctl;
8479 8526 int all = 0;
8480 8527
8481 8528 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8482 8529
8483 8530 if (prv == NULL) {
8484 8531 all = 1;
8485 8532 prv = dtrace_provider;
8486 8533 }
8487 8534
8488 8535 do {
8489 8536 /*
8490 8537 * First, call the blanket provide operation.
8491 8538 */
8492 8539 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8493 8540
8494 8541 /*
8495 8542 * Now call the per-module provide operation. We will grab
8496 8543 * mod_lock to prevent the list from being modified. Note
8497 8544 * that this also prevents the mod_busy bits from changing.
8498 8545 * (mod_busy can only be changed with mod_lock held.)
8499 8546 */
8500 8547 mutex_enter(&mod_lock);
8501 8548
8502 8549 ctl = &modules;
8503 8550 do {
8504 8551 if (ctl->mod_busy || ctl->mod_mp == NULL)
8505 8552 continue;
8506 8553
8507 8554 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8508 8555
8509 8556 } while ((ctl = ctl->mod_next) != &modules);
8510 8557
8511 8558 mutex_exit(&mod_lock);
8512 8559 } while (all && (prv = prv->dtpv_next) != NULL);
8513 8560 }
8514 8561
8515 8562 /*
8516 8563 * Iterate over each probe, and call the Framework-to-Provider API function
8517 8564 * denoted by offs.
8518 8565 */
8519 8566 static void
8520 8567 dtrace_probe_foreach(uintptr_t offs)
8521 8568 {
8522 8569 dtrace_provider_t *prov;
8523 8570 void (*func)(void *, dtrace_id_t, void *);
8524 8571 dtrace_probe_t *probe;
8525 8572 dtrace_icookie_t cookie;
8526 8573 int i;
8527 8574
8528 8575 /*
8529 8576 * We disable interrupts to walk through the probe array. This is
8530 8577 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8531 8578 * won't see stale data.
8532 8579 */
8533 8580 cookie = dtrace_interrupt_disable();
8534 8581
8535 8582 for (i = 0; i < dtrace_nprobes; i++) {
8536 8583 if ((probe = dtrace_probes[i]) == NULL)
8537 8584 continue;
8538 8585
8539 8586 if (probe->dtpr_ecb == NULL) {
8540 8587 /*
8541 8588 * This probe isn't enabled -- don't call the function.
8542 8589 */
8543 8590 continue;
8544 8591 }
8545 8592
8546 8593 prov = probe->dtpr_provider;
8547 8594 func = *((void(**)(void *, dtrace_id_t, void *))
8548 8595 ((uintptr_t)&prov->dtpv_pops + offs));
8549 8596
8550 8597 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8551 8598 }
8552 8599
8553 8600 dtrace_interrupt_enable(cookie);
8554 8601 }
8555 8602
8556 8603 static int
8557 8604 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8558 8605 {
8559 8606 dtrace_probekey_t pkey;
8560 8607 uint32_t priv;
8561 8608 uid_t uid;
8562 8609 zoneid_t zoneid;
8563 8610
8564 8611 ASSERT(MUTEX_HELD(&dtrace_lock));
8565 8612 dtrace_ecb_create_cache = NULL;
8566 8613
8567 8614 if (desc == NULL) {
8568 8615 /*
8569 8616 * If we're passed a NULL description, we're being asked to
8570 8617 * create an ECB with a NULL probe.
8571 8618 */
8572 8619 (void) dtrace_ecb_create_enable(NULL, enab);
8573 8620 return (0);
8574 8621 }
8575 8622
8576 8623 dtrace_probekey(desc, &pkey);
8577 8624 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8578 8625 &priv, &uid, &zoneid);
8579 8626
8580 8627 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8581 8628 enab));
8582 8629 }
8583 8630
8584 8631 /*
8585 8632 * DTrace Helper Provider Functions
8586 8633 */
8587 8634 static void
8588 8635 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8589 8636 {
8590 8637 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8591 8638 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8592 8639 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8593 8640 }
8594 8641
8595 8642 static void
8596 8643 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8597 8644 const dof_provider_t *dofprov, char *strtab)
8598 8645 {
8599 8646 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8600 8647 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8601 8648 dofprov->dofpv_provattr);
8602 8649 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8603 8650 dofprov->dofpv_modattr);
8604 8651 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8605 8652 dofprov->dofpv_funcattr);
8606 8653 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8607 8654 dofprov->dofpv_nameattr);
8608 8655 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8609 8656 dofprov->dofpv_argsattr);
8610 8657 }
8611 8658
8612 8659 static void
8613 8660 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8614 8661 {
8615 8662 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8616 8663 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8617 8664 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8618 8665 dof_provider_t *provider;
8619 8666 dof_probe_t *probe;
8620 8667 uint32_t *off, *enoff;
8621 8668 uint8_t *arg;
8622 8669 char *strtab;
8623 8670 uint_t i, nprobes;
8624 8671 dtrace_helper_provdesc_t dhpv;
8625 8672 dtrace_helper_probedesc_t dhpb;
8626 8673 dtrace_meta_t *meta = dtrace_meta_pid;
8627 8674 dtrace_mops_t *mops = &meta->dtm_mops;
8628 8675 void *parg;
8629 8676
8630 8677 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8631 8678 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8632 8679 provider->dofpv_strtab * dof->dofh_secsize);
8633 8680 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8634 8681 provider->dofpv_probes * dof->dofh_secsize);
8635 8682 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8636 8683 provider->dofpv_prargs * dof->dofh_secsize);
8637 8684 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8638 8685 provider->dofpv_proffs * dof->dofh_secsize);
8639 8686
8640 8687 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8641 8688 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8642 8689 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8643 8690 enoff = NULL;
8644 8691
8645 8692 /*
8646 8693 * See dtrace_helper_provider_validate().
8647 8694 */
8648 8695 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8649 8696 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8650 8697 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8651 8698 provider->dofpv_prenoffs * dof->dofh_secsize);
8652 8699 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8653 8700 }
8654 8701
8655 8702 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8656 8703
8657 8704 /*
8658 8705 * Create the provider.
8659 8706 */
8660 8707 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8661 8708
8662 8709 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8663 8710 return;
8664 8711
8665 8712 meta->dtm_count++;
8666 8713
8667 8714 /*
8668 8715 * Create the probes.
8669 8716 */
8670 8717 for (i = 0; i < nprobes; i++) {
8671 8718 probe = (dof_probe_t *)(uintptr_t)(daddr +
8672 8719 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8673 8720
8674 8721 dhpb.dthpb_mod = dhp->dofhp_mod;
8675 8722 dhpb.dthpb_func = strtab + probe->dofpr_func;
8676 8723 dhpb.dthpb_name = strtab + probe->dofpr_name;
8677 8724 dhpb.dthpb_base = probe->dofpr_addr;
8678 8725 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8679 8726 dhpb.dthpb_noffs = probe->dofpr_noffs;
8680 8727 if (enoff != NULL) {
8681 8728 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8682 8729 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8683 8730 } else {
8684 8731 dhpb.dthpb_enoffs = NULL;
8685 8732 dhpb.dthpb_nenoffs = 0;
8686 8733 }
8687 8734 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8688 8735 dhpb.dthpb_nargc = probe->dofpr_nargc;
8689 8736 dhpb.dthpb_xargc = probe->dofpr_xargc;
8690 8737 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8691 8738 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8692 8739
8693 8740 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8694 8741 }
8695 8742 }
8696 8743
8697 8744 static void
8698 8745 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8699 8746 {
8700 8747 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8701 8748 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8702 8749 int i;
8703 8750
8704 8751 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8705 8752
8706 8753 for (i = 0; i < dof->dofh_secnum; i++) {
8707 8754 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8708 8755 dof->dofh_secoff + i * dof->dofh_secsize);
8709 8756
8710 8757 if (sec->dofs_type != DOF_SECT_PROVIDER)
8711 8758 continue;
8712 8759
8713 8760 dtrace_helper_provide_one(dhp, sec, pid);
8714 8761 }
8715 8762
8716 8763 /*
8717 8764 * We may have just created probes, so we must now rematch against
8718 8765 * any retained enablings. Note that this call will acquire both
8719 8766 * cpu_lock and dtrace_lock; the fact that we are holding
8720 8767 * dtrace_meta_lock now is what defines the ordering with respect to
8721 8768 * these three locks.
8722 8769 */
8723 8770 dtrace_enabling_matchall();
8724 8771 }
8725 8772
8726 8773 static void
8727 8774 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8728 8775 {
8729 8776 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8730 8777 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8731 8778 dof_sec_t *str_sec;
8732 8779 dof_provider_t *provider;
8733 8780 char *strtab;
8734 8781 dtrace_helper_provdesc_t dhpv;
8735 8782 dtrace_meta_t *meta = dtrace_meta_pid;
8736 8783 dtrace_mops_t *mops = &meta->dtm_mops;
8737 8784
8738 8785 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8739 8786 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8740 8787 provider->dofpv_strtab * dof->dofh_secsize);
8741 8788
8742 8789 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8743 8790
8744 8791 /*
8745 8792 * Create the provider.
8746 8793 */
8747 8794 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8748 8795
8749 8796 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8750 8797
8751 8798 meta->dtm_count--;
8752 8799 }
8753 8800
8754 8801 static void
8755 8802 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8756 8803 {
8757 8804 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8758 8805 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8759 8806 int i;
8760 8807
8761 8808 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8762 8809
8763 8810 for (i = 0; i < dof->dofh_secnum; i++) {
8764 8811 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8765 8812 dof->dofh_secoff + i * dof->dofh_secsize);
8766 8813
8767 8814 if (sec->dofs_type != DOF_SECT_PROVIDER)
8768 8815 continue;
8769 8816
8770 8817 dtrace_helper_provider_remove_one(dhp, sec, pid);
8771 8818 }
8772 8819 }
8773 8820
8774 8821 /*
8775 8822 * DTrace Meta Provider-to-Framework API Functions
8776 8823 *
8777 8824 * These functions implement the Meta Provider-to-Framework API, as described
8778 8825 * in <sys/dtrace.h>.
8779 8826 */
8780 8827 int
8781 8828 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8782 8829 dtrace_meta_provider_id_t *idp)
8783 8830 {
8784 8831 dtrace_meta_t *meta;
8785 8832 dtrace_helpers_t *help, *next;
8786 8833 int i;
8787 8834
8788 8835 *idp = DTRACE_METAPROVNONE;
8789 8836
8790 8837 /*
8791 8838 * We strictly don't need the name, but we hold onto it for
8792 8839 * debuggability. All hail error queues!
8793 8840 */
8794 8841 if (name == NULL) {
8795 8842 cmn_err(CE_WARN, "failed to register meta-provider: "
8796 8843 "invalid name");
8797 8844 return (EINVAL);
8798 8845 }
8799 8846
8800 8847 if (mops == NULL ||
8801 8848 mops->dtms_create_probe == NULL ||
8802 8849 mops->dtms_provide_pid == NULL ||
8803 8850 mops->dtms_remove_pid == NULL) {
8804 8851 cmn_err(CE_WARN, "failed to register meta-register %s: "
8805 8852 "invalid ops", name);
8806 8853 return (EINVAL);
8807 8854 }
8808 8855
8809 8856 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8810 8857 meta->dtm_mops = *mops;
8811 8858 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8812 8859 (void) strcpy(meta->dtm_name, name);
8813 8860 meta->dtm_arg = arg;
8814 8861
8815 8862 mutex_enter(&dtrace_meta_lock);
8816 8863 mutex_enter(&dtrace_lock);
8817 8864
8818 8865 if (dtrace_meta_pid != NULL) {
8819 8866 mutex_exit(&dtrace_lock);
8820 8867 mutex_exit(&dtrace_meta_lock);
8821 8868 cmn_err(CE_WARN, "failed to register meta-register %s: "
8822 8869 "user-land meta-provider exists", name);
8823 8870 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8824 8871 kmem_free(meta, sizeof (dtrace_meta_t));
8825 8872 return (EINVAL);
8826 8873 }
8827 8874
8828 8875 dtrace_meta_pid = meta;
8829 8876 *idp = (dtrace_meta_provider_id_t)meta;
8830 8877
8831 8878 /*
8832 8879 * If there are providers and probes ready to go, pass them
8833 8880 * off to the new meta provider now.
8834 8881 */
8835 8882
8836 8883 help = dtrace_deferred_pid;
8837 8884 dtrace_deferred_pid = NULL;
8838 8885
8839 8886 mutex_exit(&dtrace_lock);
8840 8887
8841 8888 while (help != NULL) {
8842 8889 for (i = 0; i < help->dthps_nprovs; i++) {
8843 8890 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8844 8891 help->dthps_pid);
8845 8892 }
8846 8893
8847 8894 next = help->dthps_next;
8848 8895 help->dthps_next = NULL;
8849 8896 help->dthps_prev = NULL;
8850 8897 help->dthps_deferred = 0;
8851 8898 help = next;
8852 8899 }
8853 8900
8854 8901 mutex_exit(&dtrace_meta_lock);
8855 8902
8856 8903 return (0);
8857 8904 }
8858 8905
8859 8906 int
8860 8907 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8861 8908 {
8862 8909 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8863 8910
8864 8911 mutex_enter(&dtrace_meta_lock);
8865 8912 mutex_enter(&dtrace_lock);
8866 8913
8867 8914 if (old == dtrace_meta_pid) {
8868 8915 pp = &dtrace_meta_pid;
8869 8916 } else {
8870 8917 panic("attempt to unregister non-existent "
8871 8918 "dtrace meta-provider %p\n", (void *)old);
8872 8919 }
8873 8920
8874 8921 if (old->dtm_count != 0) {
8875 8922 mutex_exit(&dtrace_lock);
8876 8923 mutex_exit(&dtrace_meta_lock);
8877 8924 return (EBUSY);
8878 8925 }
8879 8926
8880 8927 *pp = NULL;
8881 8928
8882 8929 mutex_exit(&dtrace_lock);
8883 8930 mutex_exit(&dtrace_meta_lock);
8884 8931
8885 8932 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8886 8933 kmem_free(old, sizeof (dtrace_meta_t));
8887 8934
8888 8935 return (0);
8889 8936 }
8890 8937
8891 8938
8892 8939 /*
8893 8940 * DTrace DIF Object Functions
8894 8941 */
8895 8942 static int
8896 8943 dtrace_difo_err(uint_t pc, const char *format, ...)
8897 8944 {
8898 8945 if (dtrace_err_verbose) {
8899 8946 va_list alist;
8900 8947
8901 8948 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8902 8949 va_start(alist, format);
8903 8950 (void) vuprintf(format, alist);
8904 8951 va_end(alist);
8905 8952 }
8906 8953
8907 8954 #ifdef DTRACE_ERRDEBUG
8908 8955 dtrace_errdebug(format);
8909 8956 #endif
8910 8957 return (1);
8911 8958 }
8912 8959
8913 8960 /*
8914 8961 * Validate a DTrace DIF object by checking the IR instructions. The following
8915 8962 * rules are currently enforced by dtrace_difo_validate():
8916 8963 *
8917 8964 * 1. Each instruction must have a valid opcode
8918 8965 * 2. Each register, string, variable, or subroutine reference must be valid
8919 8966 * 3. No instruction can modify register %r0 (must be zero)
8920 8967 * 4. All instruction reserved bits must be set to zero
8921 8968 * 5. The last instruction must be a "ret" instruction
8922 8969 * 6. All branch targets must reference a valid instruction _after_ the branch
8923 8970 */
8924 8971 static int
8925 8972 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8926 8973 cred_t *cr)
8927 8974 {
8928 8975 int err = 0, i;
8929 8976 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8930 8977 int kcheckload;
8931 8978 uint_t pc;
8932 8979
8933 8980 kcheckload = cr == NULL ||
8934 8981 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8935 8982
8936 8983 dp->dtdo_destructive = 0;
8937 8984
8938 8985 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8939 8986 dif_instr_t instr = dp->dtdo_buf[pc];
8940 8987
8941 8988 uint_t r1 = DIF_INSTR_R1(instr);
8942 8989 uint_t r2 = DIF_INSTR_R2(instr);
8943 8990 uint_t rd = DIF_INSTR_RD(instr);
8944 8991 uint_t rs = DIF_INSTR_RS(instr);
8945 8992 uint_t label = DIF_INSTR_LABEL(instr);
8946 8993 uint_t v = DIF_INSTR_VAR(instr);
8947 8994 uint_t subr = DIF_INSTR_SUBR(instr);
8948 8995 uint_t type = DIF_INSTR_TYPE(instr);
8949 8996 uint_t op = DIF_INSTR_OP(instr);
8950 8997
8951 8998 switch (op) {
8952 8999 case DIF_OP_OR:
8953 9000 case DIF_OP_XOR:
8954 9001 case DIF_OP_AND:
8955 9002 case DIF_OP_SLL:
8956 9003 case DIF_OP_SRL:
8957 9004 case DIF_OP_SRA:
8958 9005 case DIF_OP_SUB:
8959 9006 case DIF_OP_ADD:
8960 9007 case DIF_OP_MUL:
8961 9008 case DIF_OP_SDIV:
8962 9009 case DIF_OP_UDIV:
8963 9010 case DIF_OP_SREM:
8964 9011 case DIF_OP_UREM:
8965 9012 case DIF_OP_COPYS:
8966 9013 if (r1 >= nregs)
8967 9014 err += efunc(pc, "invalid register %u\n", r1);
8968 9015 if (r2 >= nregs)
8969 9016 err += efunc(pc, "invalid register %u\n", r2);
8970 9017 if (rd >= nregs)
8971 9018 err += efunc(pc, "invalid register %u\n", rd);
8972 9019 if (rd == 0)
8973 9020 err += efunc(pc, "cannot write to %r0\n");
8974 9021 break;
8975 9022 case DIF_OP_NOT:
8976 9023 case DIF_OP_MOV:
8977 9024 case DIF_OP_ALLOCS:
8978 9025 if (r1 >= nregs)
8979 9026 err += efunc(pc, "invalid register %u\n", r1);
8980 9027 if (r2 != 0)
8981 9028 err += efunc(pc, "non-zero reserved bits\n");
8982 9029 if (rd >= nregs)
8983 9030 err += efunc(pc, "invalid register %u\n", rd);
8984 9031 if (rd == 0)
8985 9032 err += efunc(pc, "cannot write to %r0\n");
8986 9033 break;
8987 9034 case DIF_OP_LDSB:
8988 9035 case DIF_OP_LDSH:
8989 9036 case DIF_OP_LDSW:
8990 9037 case DIF_OP_LDUB:
8991 9038 case DIF_OP_LDUH:
8992 9039 case DIF_OP_LDUW:
8993 9040 case DIF_OP_LDX:
8994 9041 if (r1 >= nregs)
8995 9042 err += efunc(pc, "invalid register %u\n", r1);
8996 9043 if (r2 != 0)
8997 9044 err += efunc(pc, "non-zero reserved bits\n");
8998 9045 if (rd >= nregs)
8999 9046 err += efunc(pc, "invalid register %u\n", rd);
9000 9047 if (rd == 0)
9001 9048 err += efunc(pc, "cannot write to %r0\n");
9002 9049 if (kcheckload)
9003 9050 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9004 9051 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9005 9052 break;
9006 9053 case DIF_OP_RLDSB:
9007 9054 case DIF_OP_RLDSH:
9008 9055 case DIF_OP_RLDSW:
9009 9056 case DIF_OP_RLDUB:
9010 9057 case DIF_OP_RLDUH:
9011 9058 case DIF_OP_RLDUW:
9012 9059 case DIF_OP_RLDX:
9013 9060 if (r1 >= nregs)
9014 9061 err += efunc(pc, "invalid register %u\n", r1);
9015 9062 if (r2 != 0)
9016 9063 err += efunc(pc, "non-zero reserved bits\n");
9017 9064 if (rd >= nregs)
9018 9065 err += efunc(pc, "invalid register %u\n", rd);
9019 9066 if (rd == 0)
9020 9067 err += efunc(pc, "cannot write to %r0\n");
9021 9068 break;
9022 9069 case DIF_OP_ULDSB:
9023 9070 case DIF_OP_ULDSH:
9024 9071 case DIF_OP_ULDSW:
9025 9072 case DIF_OP_ULDUB:
9026 9073 case DIF_OP_ULDUH:
9027 9074 case DIF_OP_ULDUW:
9028 9075 case DIF_OP_ULDX:
9029 9076 if (r1 >= nregs)
9030 9077 err += efunc(pc, "invalid register %u\n", r1);
9031 9078 if (r2 != 0)
9032 9079 err += efunc(pc, "non-zero reserved bits\n");
9033 9080 if (rd >= nregs)
9034 9081 err += efunc(pc, "invalid register %u\n", rd);
9035 9082 if (rd == 0)
9036 9083 err += efunc(pc, "cannot write to %r0\n");
9037 9084 break;
9038 9085 case DIF_OP_STB:
9039 9086 case DIF_OP_STH:
9040 9087 case DIF_OP_STW:
9041 9088 case DIF_OP_STX:
9042 9089 if (r1 >= nregs)
9043 9090 err += efunc(pc, "invalid register %u\n", r1);
9044 9091 if (r2 != 0)
9045 9092 err += efunc(pc, "non-zero reserved bits\n");
9046 9093 if (rd >= nregs)
9047 9094 err += efunc(pc, "invalid register %u\n", rd);
9048 9095 if (rd == 0)
9049 9096 err += efunc(pc, "cannot write to 0 address\n");
9050 9097 break;
9051 9098 case DIF_OP_CMP:
9052 9099 case DIF_OP_SCMP:
9053 9100 if (r1 >= nregs)
9054 9101 err += efunc(pc, "invalid register %u\n", r1);
9055 9102 if (r2 >= nregs)
9056 9103 err += efunc(pc, "invalid register %u\n", r2);
9057 9104 if (rd != 0)
9058 9105 err += efunc(pc, "non-zero reserved bits\n");
9059 9106 break;
9060 9107 case DIF_OP_TST:
9061 9108 if (r1 >= nregs)
9062 9109 err += efunc(pc, "invalid register %u\n", r1);
9063 9110 if (r2 != 0 || rd != 0)
9064 9111 err += efunc(pc, "non-zero reserved bits\n");
9065 9112 break;
9066 9113 case DIF_OP_BA:
9067 9114 case DIF_OP_BE:
9068 9115 case DIF_OP_BNE:
9069 9116 case DIF_OP_BG:
9070 9117 case DIF_OP_BGU:
9071 9118 case DIF_OP_BGE:
9072 9119 case DIF_OP_BGEU:
9073 9120 case DIF_OP_BL:
9074 9121 case DIF_OP_BLU:
9075 9122 case DIF_OP_BLE:
9076 9123 case DIF_OP_BLEU:
9077 9124 if (label >= dp->dtdo_len) {
9078 9125 err += efunc(pc, "invalid branch target %u\n",
9079 9126 label);
9080 9127 }
9081 9128 if (label <= pc) {
9082 9129 err += efunc(pc, "backward branch to %u\n",
9083 9130 label);
9084 9131 }
9085 9132 break;
9086 9133 case DIF_OP_RET:
9087 9134 if (r1 != 0 || r2 != 0)
9088 9135 err += efunc(pc, "non-zero reserved bits\n");
9089 9136 if (rd >= nregs)
9090 9137 err += efunc(pc, "invalid register %u\n", rd);
9091 9138 break;
9092 9139 case DIF_OP_NOP:
9093 9140 case DIF_OP_POPTS:
9094 9141 case DIF_OP_FLUSHTS:
9095 9142 if (r1 != 0 || r2 != 0 || rd != 0)
9096 9143 err += efunc(pc, "non-zero reserved bits\n");
9097 9144 break;
9098 9145 case DIF_OP_SETX:
9099 9146 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9100 9147 err += efunc(pc, "invalid integer ref %u\n",
9101 9148 DIF_INSTR_INTEGER(instr));
9102 9149 }
9103 9150 if (rd >= nregs)
9104 9151 err += efunc(pc, "invalid register %u\n", rd);
9105 9152 if (rd == 0)
9106 9153 err += efunc(pc, "cannot write to %r0\n");
9107 9154 break;
9108 9155 case DIF_OP_SETS:
9109 9156 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9110 9157 err += efunc(pc, "invalid string ref %u\n",
9111 9158 DIF_INSTR_STRING(instr));
9112 9159 }
9113 9160 if (rd >= nregs)
9114 9161 err += efunc(pc, "invalid register %u\n", rd);
9115 9162 if (rd == 0)
9116 9163 err += efunc(pc, "cannot write to %r0\n");
9117 9164 break;
9118 9165 case DIF_OP_LDGA:
9119 9166 case DIF_OP_LDTA:
9120 9167 if (r1 > DIF_VAR_ARRAY_MAX)
9121 9168 err += efunc(pc, "invalid array %u\n", r1);
9122 9169 if (r2 >= nregs)
9123 9170 err += efunc(pc, "invalid register %u\n", r2);
9124 9171 if (rd >= nregs)
9125 9172 err += efunc(pc, "invalid register %u\n", rd);
9126 9173 if (rd == 0)
9127 9174 err += efunc(pc, "cannot write to %r0\n");
9128 9175 break;
9129 9176 case DIF_OP_LDGS:
9130 9177 case DIF_OP_LDTS:
9131 9178 case DIF_OP_LDLS:
9132 9179 case DIF_OP_LDGAA:
9133 9180 case DIF_OP_LDTAA:
9134 9181 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9135 9182 err += efunc(pc, "invalid variable %u\n", v);
9136 9183 if (rd >= nregs)
9137 9184 err += efunc(pc, "invalid register %u\n", rd);
9138 9185 if (rd == 0)
9139 9186 err += efunc(pc, "cannot write to %r0\n");
9140 9187 break;
9141 9188 case DIF_OP_STGS:
9142 9189 case DIF_OP_STTS:
9143 9190 case DIF_OP_STLS:
9144 9191 case DIF_OP_STGAA:
9145 9192 case DIF_OP_STTAA:
9146 9193 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9147 9194 err += efunc(pc, "invalid variable %u\n", v);
9148 9195 if (rs >= nregs)
9149 9196 err += efunc(pc, "invalid register %u\n", rd);
9150 9197 break;
9151 9198 case DIF_OP_CALL:
9152 9199 if (subr > DIF_SUBR_MAX)
9153 9200 err += efunc(pc, "invalid subr %u\n", subr);
9154 9201 if (rd >= nregs)
9155 9202 err += efunc(pc, "invalid register %u\n", rd);
9156 9203 if (rd == 0)
9157 9204 err += efunc(pc, "cannot write to %r0\n");
9158 9205
9159 9206 if (subr == DIF_SUBR_COPYOUT ||
9160 9207 subr == DIF_SUBR_COPYOUTSTR) {
9161 9208 dp->dtdo_destructive = 1;
9162 9209 }
9163 9210
9164 9211 if (subr == DIF_SUBR_GETF) {
9165 9212 /*
9166 9213 * If we have a getf() we need to record that
9167 9214 * in our state. Note that our state can be
9168 9215 * NULL if this is a helper -- but in that
9169 9216 * case, the call to getf() is itself illegal,
9170 9217 * and will be caught (slightly later) when
9171 9218 * the helper is validated.
9172 9219 */
9173 9220 if (vstate->dtvs_state != NULL)
9174 9221 vstate->dtvs_state->dts_getf++;
9175 9222 }
9176 9223
9177 9224 break;
9178 9225 case DIF_OP_PUSHTR:
9179 9226 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9180 9227 err += efunc(pc, "invalid ref type %u\n", type);
9181 9228 if (r2 >= nregs)
9182 9229 err += efunc(pc, "invalid register %u\n", r2);
9183 9230 if (rs >= nregs)
9184 9231 err += efunc(pc, "invalid register %u\n", rs);
9185 9232 break;
9186 9233 case DIF_OP_PUSHTV:
9187 9234 if (type != DIF_TYPE_CTF)
9188 9235 err += efunc(pc, "invalid val type %u\n", type);
9189 9236 if (r2 >= nregs)
9190 9237 err += efunc(pc, "invalid register %u\n", r2);
9191 9238 if (rs >= nregs)
9192 9239 err += efunc(pc, "invalid register %u\n", rs);
9193 9240 break;
9194 9241 default:
9195 9242 err += efunc(pc, "invalid opcode %u\n",
↓ open down ↓ |
2097 lines elided |
↑ open up ↑ |
9196 9243 DIF_INSTR_OP(instr));
9197 9244 }
9198 9245 }
9199 9246
9200 9247 if (dp->dtdo_len != 0 &&
9201 9248 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9202 9249 err += efunc(dp->dtdo_len - 1,
9203 9250 "expected 'ret' as last DIF instruction\n");
9204 9251 }
9205 9252
9206 - if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
9253 + if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
9207 9254 /*
9208 9255 * If we're not returning by reference, the size must be either
9209 9256 * 0 or the size of one of the base types.
9210 9257 */
9211 9258 switch (dp->dtdo_rtype.dtdt_size) {
9212 9259 case 0:
9213 9260 case sizeof (uint8_t):
9214 9261 case sizeof (uint16_t):
9215 9262 case sizeof (uint32_t):
9216 9263 case sizeof (uint64_t):
9217 9264 break;
9218 9265
9219 9266 default:
9220 9267 err += efunc(dp->dtdo_len - 1, "bad return size\n");
9221 9268 }
9222 9269 }
9223 9270
9224 9271 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9225 9272 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9226 9273 dtrace_diftype_t *vt, *et;
9227 9274 uint_t id, ndx;
9228 9275
9229 9276 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
9230 9277 v->dtdv_scope != DIFV_SCOPE_THREAD &&
9231 9278 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
9232 9279 err += efunc(i, "unrecognized variable scope %d\n",
9233 9280 v->dtdv_scope);
9234 9281 break;
9235 9282 }
9236 9283
9237 9284 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
9238 9285 v->dtdv_kind != DIFV_KIND_SCALAR) {
9239 9286 err += efunc(i, "unrecognized variable type %d\n",
9240 9287 v->dtdv_kind);
9241 9288 break;
9242 9289 }
9243 9290
9244 9291 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
9245 9292 err += efunc(i, "%d exceeds variable id limit\n", id);
9246 9293 break;
9247 9294 }
9248 9295
9249 9296 if (id < DIF_VAR_OTHER_UBASE)
9250 9297 continue;
9251 9298
9252 9299 /*
9253 9300 * For user-defined variables, we need to check that this
9254 9301 * definition is identical to any previous definition that we
9255 9302 * encountered.
9256 9303 */
9257 9304 ndx = id - DIF_VAR_OTHER_UBASE;
9258 9305
9259 9306 switch (v->dtdv_scope) {
9260 9307 case DIFV_SCOPE_GLOBAL:
9261 9308 if (ndx < vstate->dtvs_nglobals) {
9262 9309 dtrace_statvar_t *svar;
9263 9310
9264 9311 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
9265 9312 existing = &svar->dtsv_var;
9266 9313 }
9267 9314
9268 9315 break;
9269 9316
9270 9317 case DIFV_SCOPE_THREAD:
9271 9318 if (ndx < vstate->dtvs_ntlocals)
9272 9319 existing = &vstate->dtvs_tlocals[ndx];
9273 9320 break;
9274 9321
9275 9322 case DIFV_SCOPE_LOCAL:
9276 9323 if (ndx < vstate->dtvs_nlocals) {
9277 9324 dtrace_statvar_t *svar;
9278 9325
9279 9326 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
9280 9327 existing = &svar->dtsv_var;
9281 9328 }
9282 9329
9283 9330 break;
9284 9331 }
9285 9332
9286 9333 vt = &v->dtdv_type;
9287 9334
9288 9335 if (vt->dtdt_flags & DIF_TF_BYREF) {
9289 9336 if (vt->dtdt_size == 0) {
9290 9337 err += efunc(i, "zero-sized variable\n");
9291 9338 break;
9292 9339 }
9293 9340
9294 9341 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
9295 9342 vt->dtdt_size > dtrace_global_maxsize) {
9296 9343 err += efunc(i, "oversized by-ref global\n");
9297 9344 break;
9298 9345 }
9299 9346 }
9300 9347
9301 9348 if (existing == NULL || existing->dtdv_id == 0)
9302 9349 continue;
9303 9350
9304 9351 ASSERT(existing->dtdv_id == v->dtdv_id);
9305 9352 ASSERT(existing->dtdv_scope == v->dtdv_scope);
9306 9353
9307 9354 if (existing->dtdv_kind != v->dtdv_kind)
9308 9355 err += efunc(i, "%d changed variable kind\n", id);
9309 9356
9310 9357 et = &existing->dtdv_type;
9311 9358
9312 9359 if (vt->dtdt_flags != et->dtdt_flags) {
9313 9360 err += efunc(i, "%d changed variable type flags\n", id);
9314 9361 break;
9315 9362 }
9316 9363
9317 9364 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
9318 9365 err += efunc(i, "%d changed variable type size\n", id);
9319 9366 break;
9320 9367 }
9321 9368 }
9322 9369
9323 9370 return (err);
9324 9371 }
9325 9372
9326 9373 /*
9327 9374 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
9328 9375 * are much more constrained than normal DIFOs. Specifically, they may
9329 9376 * not:
9330 9377 *
9331 9378 * 1. Make calls to subroutines other than copyin(), copyinstr() or
9332 9379 * miscellaneous string routines
9333 9380 * 2. Access DTrace variables other than the args[] array, and the
9334 9381 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
9335 9382 * 3. Have thread-local variables.
9336 9383 * 4. Have dynamic variables.
9337 9384 */
9338 9385 static int
9339 9386 dtrace_difo_validate_helper(dtrace_difo_t *dp)
9340 9387 {
9341 9388 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9342 9389 int err = 0;
9343 9390 uint_t pc;
9344 9391
9345 9392 for (pc = 0; pc < dp->dtdo_len; pc++) {
9346 9393 dif_instr_t instr = dp->dtdo_buf[pc];
9347 9394
9348 9395 uint_t v = DIF_INSTR_VAR(instr);
9349 9396 uint_t subr = DIF_INSTR_SUBR(instr);
9350 9397 uint_t op = DIF_INSTR_OP(instr);
9351 9398
9352 9399 switch (op) {
9353 9400 case DIF_OP_OR:
9354 9401 case DIF_OP_XOR:
9355 9402 case DIF_OP_AND:
9356 9403 case DIF_OP_SLL:
9357 9404 case DIF_OP_SRL:
9358 9405 case DIF_OP_SRA:
9359 9406 case DIF_OP_SUB:
9360 9407 case DIF_OP_ADD:
9361 9408 case DIF_OP_MUL:
9362 9409 case DIF_OP_SDIV:
9363 9410 case DIF_OP_UDIV:
9364 9411 case DIF_OP_SREM:
9365 9412 case DIF_OP_UREM:
9366 9413 case DIF_OP_COPYS:
9367 9414 case DIF_OP_NOT:
9368 9415 case DIF_OP_MOV:
9369 9416 case DIF_OP_RLDSB:
9370 9417 case DIF_OP_RLDSH:
9371 9418 case DIF_OP_RLDSW:
9372 9419 case DIF_OP_RLDUB:
9373 9420 case DIF_OP_RLDUH:
9374 9421 case DIF_OP_RLDUW:
9375 9422 case DIF_OP_RLDX:
9376 9423 case DIF_OP_ULDSB:
9377 9424 case DIF_OP_ULDSH:
9378 9425 case DIF_OP_ULDSW:
9379 9426 case DIF_OP_ULDUB:
9380 9427 case DIF_OP_ULDUH:
9381 9428 case DIF_OP_ULDUW:
9382 9429 case DIF_OP_ULDX:
9383 9430 case DIF_OP_STB:
9384 9431 case DIF_OP_STH:
9385 9432 case DIF_OP_STW:
9386 9433 case DIF_OP_STX:
9387 9434 case DIF_OP_ALLOCS:
9388 9435 case DIF_OP_CMP:
9389 9436 case DIF_OP_SCMP:
9390 9437 case DIF_OP_TST:
9391 9438 case DIF_OP_BA:
9392 9439 case DIF_OP_BE:
9393 9440 case DIF_OP_BNE:
9394 9441 case DIF_OP_BG:
9395 9442 case DIF_OP_BGU:
9396 9443 case DIF_OP_BGE:
9397 9444 case DIF_OP_BGEU:
9398 9445 case DIF_OP_BL:
9399 9446 case DIF_OP_BLU:
9400 9447 case DIF_OP_BLE:
9401 9448 case DIF_OP_BLEU:
9402 9449 case DIF_OP_RET:
9403 9450 case DIF_OP_NOP:
9404 9451 case DIF_OP_POPTS:
9405 9452 case DIF_OP_FLUSHTS:
9406 9453 case DIF_OP_SETX:
9407 9454 case DIF_OP_SETS:
9408 9455 case DIF_OP_LDGA:
9409 9456 case DIF_OP_LDLS:
9410 9457 case DIF_OP_STGS:
9411 9458 case DIF_OP_STLS:
9412 9459 case DIF_OP_PUSHTR:
9413 9460 case DIF_OP_PUSHTV:
9414 9461 break;
9415 9462
9416 9463 case DIF_OP_LDGS:
9417 9464 if (v >= DIF_VAR_OTHER_UBASE)
9418 9465 break;
9419 9466
9420 9467 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
9421 9468 break;
9422 9469
9423 9470 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
9424 9471 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
9425 9472 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
9426 9473 v == DIF_VAR_UID || v == DIF_VAR_GID)
9427 9474 break;
9428 9475
9429 9476 err += efunc(pc, "illegal variable %u\n", v);
9430 9477 break;
9431 9478
9432 9479 case DIF_OP_LDTA:
9433 9480 case DIF_OP_LDTS:
9434 9481 case DIF_OP_LDGAA:
9435 9482 case DIF_OP_LDTAA:
9436 9483 err += efunc(pc, "illegal dynamic variable load\n");
9437 9484 break;
9438 9485
9439 9486 case DIF_OP_STTS:
9440 9487 case DIF_OP_STGAA:
9441 9488 case DIF_OP_STTAA:
9442 9489 err += efunc(pc, "illegal dynamic variable store\n");
9443 9490 break;
9444 9491
9445 9492 case DIF_OP_CALL:
9446 9493 if (subr == DIF_SUBR_ALLOCA ||
9447 9494 subr == DIF_SUBR_BCOPY ||
9448 9495 subr == DIF_SUBR_COPYIN ||
9449 9496 subr == DIF_SUBR_COPYINTO ||
9450 9497 subr == DIF_SUBR_COPYINSTR ||
9451 9498 subr == DIF_SUBR_INDEX ||
9452 9499 subr == DIF_SUBR_INET_NTOA ||
9453 9500 subr == DIF_SUBR_INET_NTOA6 ||
9454 9501 subr == DIF_SUBR_INET_NTOP ||
9455 9502 subr == DIF_SUBR_JSON ||
9456 9503 subr == DIF_SUBR_LLTOSTR ||
9457 9504 subr == DIF_SUBR_STRTOLL ||
9458 9505 subr == DIF_SUBR_RINDEX ||
9459 9506 subr == DIF_SUBR_STRCHR ||
9460 9507 subr == DIF_SUBR_STRJOIN ||
9461 9508 subr == DIF_SUBR_STRRCHR ||
9462 9509 subr == DIF_SUBR_STRSTR ||
9463 9510 subr == DIF_SUBR_HTONS ||
9464 9511 subr == DIF_SUBR_HTONL ||
9465 9512 subr == DIF_SUBR_HTONLL ||
9466 9513 subr == DIF_SUBR_NTOHS ||
9467 9514 subr == DIF_SUBR_NTOHL ||
9468 9515 subr == DIF_SUBR_NTOHLL)
9469 9516 break;
9470 9517
9471 9518 err += efunc(pc, "invalid subr %u\n", subr);
9472 9519 break;
9473 9520
9474 9521 default:
9475 9522 err += efunc(pc, "invalid opcode %u\n",
9476 9523 DIF_INSTR_OP(instr));
9477 9524 }
9478 9525 }
9479 9526
9480 9527 return (err);
9481 9528 }
9482 9529
9483 9530 /*
9484 9531 * Returns 1 if the expression in the DIF object can be cached on a per-thread
9485 9532 * basis; 0 if not.
9486 9533 */
9487 9534 static int
9488 9535 dtrace_difo_cacheable(dtrace_difo_t *dp)
9489 9536 {
9490 9537 int i;
9491 9538
9492 9539 if (dp == NULL)
9493 9540 return (0);
9494 9541
9495 9542 for (i = 0; i < dp->dtdo_varlen; i++) {
9496 9543 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9497 9544
9498 9545 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9499 9546 continue;
9500 9547
9501 9548 switch (v->dtdv_id) {
9502 9549 case DIF_VAR_CURTHREAD:
9503 9550 case DIF_VAR_PID:
9504 9551 case DIF_VAR_TID:
9505 9552 case DIF_VAR_EXECNAME:
9506 9553 case DIF_VAR_ZONENAME:
9507 9554 break;
9508 9555
9509 9556 default:
9510 9557 return (0);
9511 9558 }
9512 9559 }
9513 9560
9514 9561 /*
9515 9562 * This DIF object may be cacheable. Now we need to look for any
9516 9563 * array loading instructions, any memory loading instructions, or
9517 9564 * any stores to thread-local variables.
9518 9565 */
9519 9566 for (i = 0; i < dp->dtdo_len; i++) {
9520 9567 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9521 9568
9522 9569 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9523 9570 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9524 9571 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9525 9572 op == DIF_OP_LDGA || op == DIF_OP_STTS)
9526 9573 return (0);
9527 9574 }
9528 9575
9529 9576 return (1);
9530 9577 }
9531 9578
9532 9579 static void
9533 9580 dtrace_difo_hold(dtrace_difo_t *dp)
9534 9581 {
9535 9582 int i;
9536 9583
9537 9584 ASSERT(MUTEX_HELD(&dtrace_lock));
9538 9585
9539 9586 dp->dtdo_refcnt++;
9540 9587 ASSERT(dp->dtdo_refcnt != 0);
9541 9588
9542 9589 /*
9543 9590 * We need to check this DIF object for references to the variable
9544 9591 * DIF_VAR_VTIMESTAMP.
9545 9592 */
9546 9593 for (i = 0; i < dp->dtdo_varlen; i++) {
9547 9594 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9548 9595
9549 9596 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9550 9597 continue;
9551 9598
9552 9599 if (dtrace_vtime_references++ == 0)
9553 9600 dtrace_vtime_enable();
9554 9601 }
9555 9602 }
9556 9603
9557 9604 /*
9558 9605 * This routine calculates the dynamic variable chunksize for a given DIF
9559 9606 * object. The calculation is not fool-proof, and can probably be tricked by
9560 9607 * malicious DIF -- but it works for all compiler-generated DIF. Because this
9561 9608 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9562 9609 * if a dynamic variable size exceeds the chunksize.
9563 9610 */
9564 9611 static void
9565 9612 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9566 9613 {
9567 9614 uint64_t sval;
9568 9615 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9569 9616 const dif_instr_t *text = dp->dtdo_buf;
9570 9617 uint_t pc, srd = 0;
9571 9618 uint_t ttop = 0;
9572 9619 size_t size, ksize;
9573 9620 uint_t id, i;
9574 9621
9575 9622 for (pc = 0; pc < dp->dtdo_len; pc++) {
9576 9623 dif_instr_t instr = text[pc];
9577 9624 uint_t op = DIF_INSTR_OP(instr);
9578 9625 uint_t rd = DIF_INSTR_RD(instr);
9579 9626 uint_t r1 = DIF_INSTR_R1(instr);
9580 9627 uint_t nkeys = 0;
9581 9628 uchar_t scope;
9582 9629
9583 9630 dtrace_key_t *key = tupregs;
9584 9631
9585 9632 switch (op) {
9586 9633 case DIF_OP_SETX:
9587 9634 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9588 9635 srd = rd;
9589 9636 continue;
9590 9637
9591 9638 case DIF_OP_STTS:
9592 9639 key = &tupregs[DIF_DTR_NREGS];
9593 9640 key[0].dttk_size = 0;
9594 9641 key[1].dttk_size = 0;
9595 9642 nkeys = 2;
9596 9643 scope = DIFV_SCOPE_THREAD;
9597 9644 break;
9598 9645
9599 9646 case DIF_OP_STGAA:
9600 9647 case DIF_OP_STTAA:
9601 9648 nkeys = ttop;
9602 9649
9603 9650 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9604 9651 key[nkeys++].dttk_size = 0;
9605 9652
9606 9653 key[nkeys++].dttk_size = 0;
9607 9654
9608 9655 if (op == DIF_OP_STTAA) {
9609 9656 scope = DIFV_SCOPE_THREAD;
9610 9657 } else {
9611 9658 scope = DIFV_SCOPE_GLOBAL;
9612 9659 }
9613 9660
9614 9661 break;
9615 9662
9616 9663 case DIF_OP_PUSHTR:
9617 9664 if (ttop == DIF_DTR_NREGS)
9618 9665 return;
9619 9666
9620 9667 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9621 9668 /*
9622 9669 * If the register for the size of the "pushtr"
9623 9670 * is %r0 (or the value is 0) and the type is
9624 9671 * a string, we'll use the system-wide default
9625 9672 * string size.
9626 9673 */
9627 9674 tupregs[ttop++].dttk_size =
9628 9675 dtrace_strsize_default;
9629 9676 } else {
9630 9677 if (srd == 0)
9631 9678 return;
9632 9679
9633 9680 tupregs[ttop++].dttk_size = sval;
9634 9681 }
9635 9682
9636 9683 break;
9637 9684
9638 9685 case DIF_OP_PUSHTV:
9639 9686 if (ttop == DIF_DTR_NREGS)
9640 9687 return;
9641 9688
9642 9689 tupregs[ttop++].dttk_size = 0;
9643 9690 break;
9644 9691
9645 9692 case DIF_OP_FLUSHTS:
9646 9693 ttop = 0;
9647 9694 break;
9648 9695
9649 9696 case DIF_OP_POPTS:
9650 9697 if (ttop != 0)
9651 9698 ttop--;
9652 9699 break;
9653 9700 }
9654 9701
9655 9702 sval = 0;
9656 9703 srd = 0;
9657 9704
9658 9705 if (nkeys == 0)
9659 9706 continue;
9660 9707
9661 9708 /*
9662 9709 * We have a dynamic variable allocation; calculate its size.
9663 9710 */
9664 9711 for (ksize = 0, i = 0; i < nkeys; i++)
9665 9712 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9666 9713
9667 9714 size = sizeof (dtrace_dynvar_t);
9668 9715 size += sizeof (dtrace_key_t) * (nkeys - 1);
9669 9716 size += ksize;
9670 9717
9671 9718 /*
9672 9719 * Now we need to determine the size of the stored data.
9673 9720 */
9674 9721 id = DIF_INSTR_VAR(instr);
9675 9722
9676 9723 for (i = 0; i < dp->dtdo_varlen; i++) {
9677 9724 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9678 9725
9679 9726 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9680 9727 size += v->dtdv_type.dtdt_size;
9681 9728 break;
9682 9729 }
9683 9730 }
9684 9731
9685 9732 if (i == dp->dtdo_varlen)
9686 9733 return;
9687 9734
9688 9735 /*
9689 9736 * We have the size. If this is larger than the chunk size
9690 9737 * for our dynamic variable state, reset the chunk size.
9691 9738 */
9692 9739 size = P2ROUNDUP(size, sizeof (uint64_t));
9693 9740
9694 9741 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9695 9742 vstate->dtvs_dynvars.dtds_chunksize = size;
9696 9743 }
9697 9744 }
9698 9745
9699 9746 static void
9700 9747 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9701 9748 {
9702 9749 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9703 9750 uint_t id;
9704 9751
9705 9752 ASSERT(MUTEX_HELD(&dtrace_lock));
9706 9753 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9707 9754
9708 9755 for (i = 0; i < dp->dtdo_varlen; i++) {
9709 9756 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9710 9757 dtrace_statvar_t *svar, ***svarp;
9711 9758 size_t dsize = 0;
9712 9759 uint8_t scope = v->dtdv_scope;
9713 9760 int *np;
9714 9761
9715 9762 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9716 9763 continue;
9717 9764
9718 9765 id -= DIF_VAR_OTHER_UBASE;
9719 9766
9720 9767 switch (scope) {
9721 9768 case DIFV_SCOPE_THREAD:
9722 9769 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9723 9770 dtrace_difv_t *tlocals;
9724 9771
9725 9772 if ((ntlocals = (otlocals << 1)) == 0)
9726 9773 ntlocals = 1;
9727 9774
9728 9775 osz = otlocals * sizeof (dtrace_difv_t);
9729 9776 nsz = ntlocals * sizeof (dtrace_difv_t);
9730 9777
9731 9778 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9732 9779
9733 9780 if (osz != 0) {
9734 9781 bcopy(vstate->dtvs_tlocals,
9735 9782 tlocals, osz);
9736 9783 kmem_free(vstate->dtvs_tlocals, osz);
9737 9784 }
9738 9785
9739 9786 vstate->dtvs_tlocals = tlocals;
9740 9787 vstate->dtvs_ntlocals = ntlocals;
9741 9788 }
9742 9789
9743 9790 vstate->dtvs_tlocals[id] = *v;
9744 9791 continue;
9745 9792
9746 9793 case DIFV_SCOPE_LOCAL:
9747 9794 np = &vstate->dtvs_nlocals;
9748 9795 svarp = &vstate->dtvs_locals;
9749 9796
9750 9797 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9751 9798 dsize = NCPU * (v->dtdv_type.dtdt_size +
9752 9799 sizeof (uint64_t));
9753 9800 else
9754 9801 dsize = NCPU * sizeof (uint64_t);
9755 9802
9756 9803 break;
9757 9804
9758 9805 case DIFV_SCOPE_GLOBAL:
9759 9806 np = &vstate->dtvs_nglobals;
9760 9807 svarp = &vstate->dtvs_globals;
9761 9808
9762 9809 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9763 9810 dsize = v->dtdv_type.dtdt_size +
9764 9811 sizeof (uint64_t);
9765 9812
9766 9813 break;
9767 9814
9768 9815 default:
9769 9816 ASSERT(0);
9770 9817 }
9771 9818
9772 9819 while (id >= (oldsvars = *np)) {
9773 9820 dtrace_statvar_t **statics;
9774 9821 int newsvars, oldsize, newsize;
9775 9822
9776 9823 if ((newsvars = (oldsvars << 1)) == 0)
9777 9824 newsvars = 1;
9778 9825
9779 9826 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9780 9827 newsize = newsvars * sizeof (dtrace_statvar_t *);
9781 9828
9782 9829 statics = kmem_zalloc(newsize, KM_SLEEP);
9783 9830
9784 9831 if (oldsize != 0) {
9785 9832 bcopy(*svarp, statics, oldsize);
9786 9833 kmem_free(*svarp, oldsize);
9787 9834 }
9788 9835
9789 9836 *svarp = statics;
9790 9837 *np = newsvars;
9791 9838 }
9792 9839
9793 9840 if ((svar = (*svarp)[id]) == NULL) {
9794 9841 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9795 9842 svar->dtsv_var = *v;
9796 9843
9797 9844 if ((svar->dtsv_size = dsize) != 0) {
9798 9845 svar->dtsv_data = (uint64_t)(uintptr_t)
9799 9846 kmem_zalloc(dsize, KM_SLEEP);
9800 9847 }
9801 9848
9802 9849 (*svarp)[id] = svar;
9803 9850 }
9804 9851
9805 9852 svar->dtsv_refcnt++;
9806 9853 }
9807 9854
9808 9855 dtrace_difo_chunksize(dp, vstate);
9809 9856 dtrace_difo_hold(dp);
9810 9857 }
9811 9858
9812 9859 static dtrace_difo_t *
9813 9860 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9814 9861 {
9815 9862 dtrace_difo_t *new;
9816 9863 size_t sz;
9817 9864
9818 9865 ASSERT(dp->dtdo_buf != NULL);
9819 9866 ASSERT(dp->dtdo_refcnt != 0);
9820 9867
9821 9868 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9822 9869
9823 9870 ASSERT(dp->dtdo_buf != NULL);
9824 9871 sz = dp->dtdo_len * sizeof (dif_instr_t);
9825 9872 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9826 9873 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9827 9874 new->dtdo_len = dp->dtdo_len;
9828 9875
9829 9876 if (dp->dtdo_strtab != NULL) {
9830 9877 ASSERT(dp->dtdo_strlen != 0);
9831 9878 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9832 9879 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9833 9880 new->dtdo_strlen = dp->dtdo_strlen;
9834 9881 }
9835 9882
9836 9883 if (dp->dtdo_inttab != NULL) {
9837 9884 ASSERT(dp->dtdo_intlen != 0);
9838 9885 sz = dp->dtdo_intlen * sizeof (uint64_t);
9839 9886 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9840 9887 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9841 9888 new->dtdo_intlen = dp->dtdo_intlen;
9842 9889 }
9843 9890
9844 9891 if (dp->dtdo_vartab != NULL) {
9845 9892 ASSERT(dp->dtdo_varlen != 0);
9846 9893 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9847 9894 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9848 9895 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9849 9896 new->dtdo_varlen = dp->dtdo_varlen;
9850 9897 }
9851 9898
9852 9899 dtrace_difo_init(new, vstate);
9853 9900 return (new);
9854 9901 }
9855 9902
9856 9903 static void
9857 9904 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9858 9905 {
9859 9906 int i;
9860 9907
9861 9908 ASSERT(dp->dtdo_refcnt == 0);
9862 9909
9863 9910 for (i = 0; i < dp->dtdo_varlen; i++) {
9864 9911 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9865 9912 dtrace_statvar_t *svar, **svarp;
9866 9913 uint_t id;
9867 9914 uint8_t scope = v->dtdv_scope;
9868 9915 int *np;
9869 9916
9870 9917 switch (scope) {
9871 9918 case DIFV_SCOPE_THREAD:
9872 9919 continue;
9873 9920
9874 9921 case DIFV_SCOPE_LOCAL:
9875 9922 np = &vstate->dtvs_nlocals;
9876 9923 svarp = vstate->dtvs_locals;
9877 9924 break;
9878 9925
9879 9926 case DIFV_SCOPE_GLOBAL:
9880 9927 np = &vstate->dtvs_nglobals;
9881 9928 svarp = vstate->dtvs_globals;
9882 9929 break;
9883 9930
9884 9931 default:
9885 9932 ASSERT(0);
9886 9933 }
9887 9934
9888 9935 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9889 9936 continue;
9890 9937
9891 9938 id -= DIF_VAR_OTHER_UBASE;
9892 9939 ASSERT(id < *np);
9893 9940
9894 9941 svar = svarp[id];
9895 9942 ASSERT(svar != NULL);
9896 9943 ASSERT(svar->dtsv_refcnt > 0);
9897 9944
9898 9945 if (--svar->dtsv_refcnt > 0)
9899 9946 continue;
9900 9947
9901 9948 if (svar->dtsv_size != 0) {
9902 9949 ASSERT(svar->dtsv_data != NULL);
9903 9950 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9904 9951 svar->dtsv_size);
9905 9952 }
9906 9953
9907 9954 kmem_free(svar, sizeof (dtrace_statvar_t));
9908 9955 svarp[id] = NULL;
9909 9956 }
9910 9957
9911 9958 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9912 9959 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9913 9960 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9914 9961 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9915 9962
9916 9963 kmem_free(dp, sizeof (dtrace_difo_t));
9917 9964 }
9918 9965
9919 9966 static void
9920 9967 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9921 9968 {
9922 9969 int i;
9923 9970
9924 9971 ASSERT(MUTEX_HELD(&dtrace_lock));
9925 9972 ASSERT(dp->dtdo_refcnt != 0);
9926 9973
9927 9974 for (i = 0; i < dp->dtdo_varlen; i++) {
9928 9975 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9929 9976
9930 9977 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9931 9978 continue;
9932 9979
9933 9980 ASSERT(dtrace_vtime_references > 0);
9934 9981 if (--dtrace_vtime_references == 0)
9935 9982 dtrace_vtime_disable();
9936 9983 }
9937 9984
9938 9985 if (--dp->dtdo_refcnt == 0)
9939 9986 dtrace_difo_destroy(dp, vstate);
9940 9987 }
9941 9988
9942 9989 /*
9943 9990 * DTrace Format Functions
9944 9991 */
9945 9992 static uint16_t
9946 9993 dtrace_format_add(dtrace_state_t *state, char *str)
9947 9994 {
9948 9995 char *fmt, **new;
9949 9996 uint16_t ndx, len = strlen(str) + 1;
9950 9997
9951 9998 fmt = kmem_zalloc(len, KM_SLEEP);
9952 9999 bcopy(str, fmt, len);
9953 10000
9954 10001 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9955 10002 if (state->dts_formats[ndx] == NULL) {
9956 10003 state->dts_formats[ndx] = fmt;
9957 10004 return (ndx + 1);
9958 10005 }
9959 10006 }
9960 10007
9961 10008 if (state->dts_nformats == USHRT_MAX) {
9962 10009 /*
9963 10010 * This is only likely if a denial-of-service attack is being
9964 10011 * attempted. As such, it's okay to fail silently here.
9965 10012 */
9966 10013 kmem_free(fmt, len);
9967 10014 return (0);
9968 10015 }
9969 10016
9970 10017 /*
9971 10018 * For simplicity, we always resize the formats array to be exactly the
9972 10019 * number of formats.
9973 10020 */
9974 10021 ndx = state->dts_nformats++;
9975 10022 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9976 10023
9977 10024 if (state->dts_formats != NULL) {
9978 10025 ASSERT(ndx != 0);
9979 10026 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9980 10027 kmem_free(state->dts_formats, ndx * sizeof (char *));
9981 10028 }
9982 10029
9983 10030 state->dts_formats = new;
9984 10031 state->dts_formats[ndx] = fmt;
9985 10032
9986 10033 return (ndx + 1);
9987 10034 }
9988 10035
9989 10036 static void
9990 10037 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9991 10038 {
9992 10039 char *fmt;
9993 10040
9994 10041 ASSERT(state->dts_formats != NULL);
9995 10042 ASSERT(format <= state->dts_nformats);
9996 10043 ASSERT(state->dts_formats[format - 1] != NULL);
9997 10044
9998 10045 fmt = state->dts_formats[format - 1];
9999 10046 kmem_free(fmt, strlen(fmt) + 1);
10000 10047 state->dts_formats[format - 1] = NULL;
10001 10048 }
10002 10049
10003 10050 static void
10004 10051 dtrace_format_destroy(dtrace_state_t *state)
10005 10052 {
10006 10053 int i;
10007 10054
10008 10055 if (state->dts_nformats == 0) {
10009 10056 ASSERT(state->dts_formats == NULL);
10010 10057 return;
10011 10058 }
10012 10059
10013 10060 ASSERT(state->dts_formats != NULL);
10014 10061
10015 10062 for (i = 0; i < state->dts_nformats; i++) {
10016 10063 char *fmt = state->dts_formats[i];
10017 10064
10018 10065 if (fmt == NULL)
10019 10066 continue;
10020 10067
10021 10068 kmem_free(fmt, strlen(fmt) + 1);
10022 10069 }
10023 10070
10024 10071 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
10025 10072 state->dts_nformats = 0;
10026 10073 state->dts_formats = NULL;
10027 10074 }
10028 10075
10029 10076 /*
10030 10077 * DTrace Predicate Functions
10031 10078 */
10032 10079 static dtrace_predicate_t *
10033 10080 dtrace_predicate_create(dtrace_difo_t *dp)
10034 10081 {
10035 10082 dtrace_predicate_t *pred;
10036 10083
10037 10084 ASSERT(MUTEX_HELD(&dtrace_lock));
10038 10085 ASSERT(dp->dtdo_refcnt != 0);
10039 10086
10040 10087 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10041 10088 pred->dtp_difo = dp;
10042 10089 pred->dtp_refcnt = 1;
10043 10090
10044 10091 if (!dtrace_difo_cacheable(dp))
10045 10092 return (pred);
10046 10093
10047 10094 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10048 10095 /*
10049 10096 * This is only theoretically possible -- we have had 2^32
10050 10097 * cacheable predicates on this machine. We cannot allow any
10051 10098 * more predicates to become cacheable: as unlikely as it is,
10052 10099 * there may be a thread caching a (now stale) predicate cache
10053 10100 * ID. (N.B.: the temptation is being successfully resisted to
10054 10101 * have this cmn_err() "Holy shit -- we executed this code!")
10055 10102 */
10056 10103 return (pred);
10057 10104 }
10058 10105
10059 10106 pred->dtp_cacheid = dtrace_predcache_id++;
10060 10107
10061 10108 return (pred);
10062 10109 }
10063 10110
10064 10111 static void
10065 10112 dtrace_predicate_hold(dtrace_predicate_t *pred)
10066 10113 {
10067 10114 ASSERT(MUTEX_HELD(&dtrace_lock));
10068 10115 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10069 10116 ASSERT(pred->dtp_refcnt > 0);
10070 10117
10071 10118 pred->dtp_refcnt++;
10072 10119 }
10073 10120
10074 10121 static void
10075 10122 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10076 10123 {
10077 10124 dtrace_difo_t *dp = pred->dtp_difo;
10078 10125
10079 10126 ASSERT(MUTEX_HELD(&dtrace_lock));
10080 10127 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10081 10128 ASSERT(pred->dtp_refcnt > 0);
10082 10129
10083 10130 if (--pred->dtp_refcnt == 0) {
10084 10131 dtrace_difo_release(pred->dtp_difo, vstate);
10085 10132 kmem_free(pred, sizeof (dtrace_predicate_t));
10086 10133 }
10087 10134 }
10088 10135
10089 10136 /*
10090 10137 * DTrace Action Description Functions
10091 10138 */
10092 10139 static dtrace_actdesc_t *
10093 10140 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10094 10141 uint64_t uarg, uint64_t arg)
10095 10142 {
10096 10143 dtrace_actdesc_t *act;
10097 10144
10098 10145 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
10099 10146 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
10100 10147
10101 10148 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10102 10149 act->dtad_kind = kind;
10103 10150 act->dtad_ntuple = ntuple;
10104 10151 act->dtad_uarg = uarg;
10105 10152 act->dtad_arg = arg;
10106 10153 act->dtad_refcnt = 1;
10107 10154
10108 10155 return (act);
10109 10156 }
10110 10157
10111 10158 static void
10112 10159 dtrace_actdesc_hold(dtrace_actdesc_t *act)
10113 10160 {
10114 10161 ASSERT(act->dtad_refcnt >= 1);
10115 10162 act->dtad_refcnt++;
10116 10163 }
10117 10164
10118 10165 static void
10119 10166 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10120 10167 {
10121 10168 dtrace_actkind_t kind = act->dtad_kind;
10122 10169 dtrace_difo_t *dp;
10123 10170
10124 10171 ASSERT(act->dtad_refcnt >= 1);
10125 10172
10126 10173 if (--act->dtad_refcnt != 0)
10127 10174 return;
10128 10175
10129 10176 if ((dp = act->dtad_difo) != NULL)
10130 10177 dtrace_difo_release(dp, vstate);
10131 10178
10132 10179 if (DTRACEACT_ISPRINTFLIKE(kind)) {
10133 10180 char *str = (char *)(uintptr_t)act->dtad_arg;
10134 10181
10135 10182 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10136 10183 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
10137 10184
10138 10185 if (str != NULL)
10139 10186 kmem_free(str, strlen(str) + 1);
10140 10187 }
10141 10188
10142 10189 kmem_free(act, sizeof (dtrace_actdesc_t));
10143 10190 }
10144 10191
10145 10192 /*
10146 10193 * DTrace ECB Functions
10147 10194 */
10148 10195 static dtrace_ecb_t *
10149 10196 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
10150 10197 {
10151 10198 dtrace_ecb_t *ecb;
10152 10199 dtrace_epid_t epid;
10153 10200
10154 10201 ASSERT(MUTEX_HELD(&dtrace_lock));
10155 10202
10156 10203 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
10157 10204 ecb->dte_predicate = NULL;
10158 10205 ecb->dte_probe = probe;
10159 10206
10160 10207 /*
10161 10208 * The default size is the size of the default action: recording
10162 10209 * the header.
10163 10210 */
10164 10211 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
10165 10212 ecb->dte_alignment = sizeof (dtrace_epid_t);
10166 10213
10167 10214 epid = state->dts_epid++;
10168 10215
10169 10216 if (epid - 1 >= state->dts_necbs) {
10170 10217 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
10171 10218 int necbs = state->dts_necbs << 1;
10172 10219
10173 10220 ASSERT(epid == state->dts_necbs + 1);
10174 10221
10175 10222 if (necbs == 0) {
10176 10223 ASSERT(oecbs == NULL);
10177 10224 necbs = 1;
10178 10225 }
10179 10226
10180 10227 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
10181 10228
10182 10229 if (oecbs != NULL)
10183 10230 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
10184 10231
10185 10232 dtrace_membar_producer();
10186 10233 state->dts_ecbs = ecbs;
10187 10234
10188 10235 if (oecbs != NULL) {
10189 10236 /*
10190 10237 * If this state is active, we must dtrace_sync()
10191 10238 * before we can free the old dts_ecbs array: we're
10192 10239 * coming in hot, and there may be active ring
10193 10240 * buffer processing (which indexes into the dts_ecbs
10194 10241 * array) on another CPU.
10195 10242 */
10196 10243 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
10197 10244 dtrace_sync();
10198 10245
10199 10246 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
10200 10247 }
10201 10248
10202 10249 dtrace_membar_producer();
10203 10250 state->dts_necbs = necbs;
10204 10251 }
10205 10252
10206 10253 ecb->dte_state = state;
10207 10254
10208 10255 ASSERT(state->dts_ecbs[epid - 1] == NULL);
10209 10256 dtrace_membar_producer();
10210 10257 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
10211 10258
10212 10259 return (ecb);
10213 10260 }
10214 10261
10215 10262 static int
10216 10263 dtrace_ecb_enable(dtrace_ecb_t *ecb)
10217 10264 {
10218 10265 dtrace_probe_t *probe = ecb->dte_probe;
10219 10266
10220 10267 ASSERT(MUTEX_HELD(&cpu_lock));
10221 10268 ASSERT(MUTEX_HELD(&dtrace_lock));
10222 10269 ASSERT(ecb->dte_next == NULL);
10223 10270
10224 10271 if (probe == NULL) {
10225 10272 /*
10226 10273 * This is the NULL probe -- there's nothing to do.
10227 10274 */
10228 10275 return (0);
10229 10276 }
10230 10277
10231 10278 if (probe->dtpr_ecb == NULL) {
10232 10279 dtrace_provider_t *prov = probe->dtpr_provider;
10233 10280
10234 10281 /*
10235 10282 * We're the first ECB on this probe.
10236 10283 */
10237 10284 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
10238 10285
10239 10286 if (ecb->dte_predicate != NULL)
10240 10287 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
10241 10288
10242 10289 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
10243 10290 probe->dtpr_id, probe->dtpr_arg));
10244 10291 } else {
10245 10292 /*
10246 10293 * This probe is already active. Swing the last pointer to
10247 10294 * point to the new ECB, and issue a dtrace_sync() to assure
10248 10295 * that all CPUs have seen the change.
10249 10296 */
10250 10297 ASSERT(probe->dtpr_ecb_last != NULL);
10251 10298 probe->dtpr_ecb_last->dte_next = ecb;
10252 10299 probe->dtpr_ecb_last = ecb;
10253 10300 probe->dtpr_predcache = 0;
10254 10301
10255 10302 dtrace_sync();
10256 10303 return (0);
10257 10304 }
10258 10305 }
10259 10306
10260 10307 static void
10261 10308 dtrace_ecb_resize(dtrace_ecb_t *ecb)
10262 10309 {
10263 10310 dtrace_action_t *act;
10264 10311 uint32_t curneeded = UINT32_MAX;
10265 10312 uint32_t aggbase = UINT32_MAX;
10266 10313
10267 10314 /*
10268 10315 * If we record anything, we always record the dtrace_rechdr_t. (And
10269 10316 * we always record it first.)
10270 10317 */
10271 10318 ecb->dte_size = sizeof (dtrace_rechdr_t);
10272 10319 ecb->dte_alignment = sizeof (dtrace_epid_t);
10273 10320
10274 10321 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10275 10322 dtrace_recdesc_t *rec = &act->dta_rec;
10276 10323 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
10277 10324
10278 10325 ecb->dte_alignment = MAX(ecb->dte_alignment,
10279 10326 rec->dtrd_alignment);
10280 10327
10281 10328 if (DTRACEACT_ISAGG(act->dta_kind)) {
10282 10329 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10283 10330
10284 10331 ASSERT(rec->dtrd_size != 0);
10285 10332 ASSERT(agg->dtag_first != NULL);
10286 10333 ASSERT(act->dta_prev->dta_intuple);
10287 10334 ASSERT(aggbase != UINT32_MAX);
10288 10335 ASSERT(curneeded != UINT32_MAX);
10289 10336
10290 10337 agg->dtag_base = aggbase;
10291 10338
10292 10339 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10293 10340 rec->dtrd_offset = curneeded;
10294 10341 curneeded += rec->dtrd_size;
10295 10342 ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
10296 10343
10297 10344 aggbase = UINT32_MAX;
10298 10345 curneeded = UINT32_MAX;
10299 10346 } else if (act->dta_intuple) {
10300 10347 if (curneeded == UINT32_MAX) {
10301 10348 /*
10302 10349 * This is the first record in a tuple. Align
10303 10350 * curneeded to be at offset 4 in an 8-byte
10304 10351 * aligned block.
10305 10352 */
10306 10353 ASSERT(act->dta_prev == NULL ||
10307 10354 !act->dta_prev->dta_intuple);
10308 10355 ASSERT3U(aggbase, ==, UINT32_MAX);
10309 10356 curneeded = P2PHASEUP(ecb->dte_size,
10310 10357 sizeof (uint64_t), sizeof (dtrace_aggid_t));
10311 10358
10312 10359 aggbase = curneeded - sizeof (dtrace_aggid_t);
10313 10360 ASSERT(IS_P2ALIGNED(aggbase,
10314 10361 sizeof (uint64_t)));
10315 10362 }
10316 10363 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10317 10364 rec->dtrd_offset = curneeded;
10318 10365 curneeded += rec->dtrd_size;
10319 10366 } else {
10320 10367 /* tuples must be followed by an aggregation */
10321 10368 ASSERT(act->dta_prev == NULL ||
10322 10369 !act->dta_prev->dta_intuple);
10323 10370
10324 10371 ecb->dte_size = P2ROUNDUP(ecb->dte_size,
10325 10372 rec->dtrd_alignment);
10326 10373 rec->dtrd_offset = ecb->dte_size;
10327 10374 ecb->dte_size += rec->dtrd_size;
10328 10375 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
10329 10376 }
10330 10377 }
10331 10378
10332 10379 if ((act = ecb->dte_action) != NULL &&
10333 10380 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
10334 10381 ecb->dte_size == sizeof (dtrace_rechdr_t)) {
10335 10382 /*
10336 10383 * If the size is still sizeof (dtrace_rechdr_t), then all
10337 10384 * actions store no data; set the size to 0.
10338 10385 */
10339 10386 ecb->dte_size = 0;
10340 10387 }
10341 10388
10342 10389 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
10343 10390 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
10344 10391 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
10345 10392 ecb->dte_needed);
10346 10393 }
10347 10394
10348 10395 static dtrace_action_t *
10349 10396 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10350 10397 {
10351 10398 dtrace_aggregation_t *agg;
10352 10399 size_t size = sizeof (uint64_t);
10353 10400 int ntuple = desc->dtad_ntuple;
10354 10401 dtrace_action_t *act;
10355 10402 dtrace_recdesc_t *frec;
10356 10403 dtrace_aggid_t aggid;
10357 10404 dtrace_state_t *state = ecb->dte_state;
10358 10405
10359 10406 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
10360 10407 agg->dtag_ecb = ecb;
10361 10408
10362 10409 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
10363 10410
10364 10411 switch (desc->dtad_kind) {
10365 10412 case DTRACEAGG_MIN:
10366 10413 agg->dtag_initial = INT64_MAX;
10367 10414 agg->dtag_aggregate = dtrace_aggregate_min;
10368 10415 break;
10369 10416
10370 10417 case DTRACEAGG_MAX:
10371 10418 agg->dtag_initial = INT64_MIN;
10372 10419 agg->dtag_aggregate = dtrace_aggregate_max;
10373 10420 break;
10374 10421
10375 10422 case DTRACEAGG_COUNT:
10376 10423 agg->dtag_aggregate = dtrace_aggregate_count;
10377 10424 break;
10378 10425
10379 10426 case DTRACEAGG_QUANTIZE:
10380 10427 agg->dtag_aggregate = dtrace_aggregate_quantize;
10381 10428 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
10382 10429 sizeof (uint64_t);
10383 10430 break;
10384 10431
10385 10432 case DTRACEAGG_LQUANTIZE: {
10386 10433 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
10387 10434 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
10388 10435
10389 10436 agg->dtag_initial = desc->dtad_arg;
10390 10437 agg->dtag_aggregate = dtrace_aggregate_lquantize;
10391 10438
10392 10439 if (step == 0 || levels == 0)
10393 10440 goto err;
10394 10441
10395 10442 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
10396 10443 break;
10397 10444 }
10398 10445
10399 10446 case DTRACEAGG_LLQUANTIZE: {
10400 10447 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
10401 10448 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
10402 10449 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
10403 10450 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
10404 10451 int64_t v;
10405 10452
10406 10453 agg->dtag_initial = desc->dtad_arg;
10407 10454 agg->dtag_aggregate = dtrace_aggregate_llquantize;
10408 10455
10409 10456 if (factor < 2 || low >= high || nsteps < factor)
10410 10457 goto err;
10411 10458
10412 10459 /*
10413 10460 * Now check that the number of steps evenly divides a power
10414 10461 * of the factor. (This assures both integer bucket size and
10415 10462 * linearity within each magnitude.)
10416 10463 */
10417 10464 for (v = factor; v < nsteps; v *= factor)
10418 10465 continue;
10419 10466
10420 10467 if ((v % nsteps) || (nsteps % factor))
10421 10468 goto err;
10422 10469
10423 10470 size = (dtrace_aggregate_llquantize_bucket(factor,
10424 10471 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
10425 10472 break;
10426 10473 }
10427 10474
10428 10475 case DTRACEAGG_AVG:
10429 10476 agg->dtag_aggregate = dtrace_aggregate_avg;
10430 10477 size = sizeof (uint64_t) * 2;
10431 10478 break;
10432 10479
10433 10480 case DTRACEAGG_STDDEV:
10434 10481 agg->dtag_aggregate = dtrace_aggregate_stddev;
10435 10482 size = sizeof (uint64_t) * 4;
10436 10483 break;
10437 10484
10438 10485 case DTRACEAGG_SUM:
10439 10486 agg->dtag_aggregate = dtrace_aggregate_sum;
10440 10487 break;
10441 10488
10442 10489 default:
10443 10490 goto err;
10444 10491 }
10445 10492
10446 10493 agg->dtag_action.dta_rec.dtrd_size = size;
10447 10494
10448 10495 if (ntuple == 0)
10449 10496 goto err;
10450 10497
10451 10498 /*
10452 10499 * We must make sure that we have enough actions for the n-tuple.
10453 10500 */
10454 10501 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
10455 10502 if (DTRACEACT_ISAGG(act->dta_kind))
10456 10503 break;
10457 10504
10458 10505 if (--ntuple == 0) {
10459 10506 /*
10460 10507 * This is the action with which our n-tuple begins.
10461 10508 */
10462 10509 agg->dtag_first = act;
10463 10510 goto success;
10464 10511 }
10465 10512 }
10466 10513
10467 10514 /*
10468 10515 * This n-tuple is short by ntuple elements. Return failure.
10469 10516 */
10470 10517 ASSERT(ntuple != 0);
10471 10518 err:
10472 10519 kmem_free(agg, sizeof (dtrace_aggregation_t));
10473 10520 return (NULL);
10474 10521
10475 10522 success:
10476 10523 /*
10477 10524 * If the last action in the tuple has a size of zero, it's actually
10478 10525 * an expression argument for the aggregating action.
10479 10526 */
10480 10527 ASSERT(ecb->dte_action_last != NULL);
10481 10528 act = ecb->dte_action_last;
10482 10529
10483 10530 if (act->dta_kind == DTRACEACT_DIFEXPR) {
10484 10531 ASSERT(act->dta_difo != NULL);
10485 10532
10486 10533 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10487 10534 agg->dtag_hasarg = 1;
10488 10535 }
10489 10536
10490 10537 /*
10491 10538 * We need to allocate an id for this aggregation.
10492 10539 */
10493 10540 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
10494 10541 VM_BESTFIT | VM_SLEEP);
10495 10542
10496 10543 if (aggid - 1 >= state->dts_naggregations) {
10497 10544 dtrace_aggregation_t **oaggs = state->dts_aggregations;
10498 10545 dtrace_aggregation_t **aggs;
10499 10546 int naggs = state->dts_naggregations << 1;
10500 10547 int onaggs = state->dts_naggregations;
10501 10548
10502 10549 ASSERT(aggid == state->dts_naggregations + 1);
10503 10550
10504 10551 if (naggs == 0) {
10505 10552 ASSERT(oaggs == NULL);
10506 10553 naggs = 1;
10507 10554 }
10508 10555
10509 10556 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10510 10557
10511 10558 if (oaggs != NULL) {
10512 10559 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10513 10560 kmem_free(oaggs, onaggs * sizeof (*aggs));
10514 10561 }
10515 10562
10516 10563 state->dts_aggregations = aggs;
10517 10564 state->dts_naggregations = naggs;
10518 10565 }
10519 10566
10520 10567 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10521 10568 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10522 10569
10523 10570 frec = &agg->dtag_first->dta_rec;
10524 10571 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10525 10572 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10526 10573
10527 10574 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10528 10575 ASSERT(!act->dta_intuple);
10529 10576 act->dta_intuple = 1;
10530 10577 }
10531 10578
10532 10579 return (&agg->dtag_action);
10533 10580 }
10534 10581
10535 10582 static void
10536 10583 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10537 10584 {
10538 10585 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10539 10586 dtrace_state_t *state = ecb->dte_state;
10540 10587 dtrace_aggid_t aggid = agg->dtag_id;
10541 10588
10542 10589 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10543 10590 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10544 10591
10545 10592 ASSERT(state->dts_aggregations[aggid - 1] == agg);
10546 10593 state->dts_aggregations[aggid - 1] = NULL;
10547 10594
10548 10595 kmem_free(agg, sizeof (dtrace_aggregation_t));
10549 10596 }
10550 10597
10551 10598 static int
10552 10599 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10553 10600 {
10554 10601 dtrace_action_t *action, *last;
10555 10602 dtrace_difo_t *dp = desc->dtad_difo;
10556 10603 uint32_t size = 0, align = sizeof (uint8_t), mask;
10557 10604 uint16_t format = 0;
10558 10605 dtrace_recdesc_t *rec;
10559 10606 dtrace_state_t *state = ecb->dte_state;
10560 10607 dtrace_optval_t *opt = state->dts_options, nframes, strsize;
10561 10608 uint64_t arg = desc->dtad_arg;
10562 10609
10563 10610 ASSERT(MUTEX_HELD(&dtrace_lock));
10564 10611 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10565 10612
10566 10613 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10567 10614 /*
10568 10615 * If this is an aggregating action, there must be neither
10569 10616 * a speculate nor a commit on the action chain.
10570 10617 */
10571 10618 dtrace_action_t *act;
10572 10619
10573 10620 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10574 10621 if (act->dta_kind == DTRACEACT_COMMIT)
10575 10622 return (EINVAL);
10576 10623
10577 10624 if (act->dta_kind == DTRACEACT_SPECULATE)
10578 10625 return (EINVAL);
10579 10626 }
10580 10627
10581 10628 action = dtrace_ecb_aggregation_create(ecb, desc);
10582 10629
10583 10630 if (action == NULL)
10584 10631 return (EINVAL);
10585 10632 } else {
10586 10633 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10587 10634 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10588 10635 dp != NULL && dp->dtdo_destructive)) {
10589 10636 state->dts_destructive = 1;
10590 10637 }
10591 10638
10592 10639 switch (desc->dtad_kind) {
10593 10640 case DTRACEACT_PRINTF:
10594 10641 case DTRACEACT_PRINTA:
10595 10642 case DTRACEACT_SYSTEM:
10596 10643 case DTRACEACT_FREOPEN:
10597 10644 case DTRACEACT_DIFEXPR:
10598 10645 /*
10599 10646 * We know that our arg is a string -- turn it into a
10600 10647 * format.
10601 10648 */
10602 10649 if (arg == NULL) {
10603 10650 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
10604 10651 desc->dtad_kind == DTRACEACT_DIFEXPR);
10605 10652 format = 0;
10606 10653 } else {
10607 10654 ASSERT(arg != NULL);
10608 10655 ASSERT(arg > KERNELBASE);
10609 10656 format = dtrace_format_add(state,
10610 10657 (char *)(uintptr_t)arg);
10611 10658 }
10612 10659
10613 10660 /*FALLTHROUGH*/
10614 10661 case DTRACEACT_LIBACT:
10615 10662 case DTRACEACT_TRACEMEM:
10616 10663 case DTRACEACT_TRACEMEM_DYNSIZE:
10617 10664 if (dp == NULL)
10618 10665 return (EINVAL);
10619 10666
10620 10667 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10621 10668 break;
10622 10669
10623 10670 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10624 10671 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10625 10672 return (EINVAL);
10626 10673
10627 10674 size = opt[DTRACEOPT_STRSIZE];
10628 10675 }
10629 10676
10630 10677 break;
10631 10678
10632 10679 case DTRACEACT_STACK:
10633 10680 if ((nframes = arg) == 0) {
10634 10681 nframes = opt[DTRACEOPT_STACKFRAMES];
10635 10682 ASSERT(nframes > 0);
10636 10683 arg = nframes;
10637 10684 }
10638 10685
10639 10686 size = nframes * sizeof (pc_t);
10640 10687 break;
10641 10688
10642 10689 case DTRACEACT_JSTACK:
10643 10690 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10644 10691 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10645 10692
10646 10693 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10647 10694 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10648 10695
10649 10696 arg = DTRACE_USTACK_ARG(nframes, strsize);
10650 10697
10651 10698 /*FALLTHROUGH*/
10652 10699 case DTRACEACT_USTACK:
10653 10700 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10654 10701 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10655 10702 strsize = DTRACE_USTACK_STRSIZE(arg);
10656 10703 nframes = opt[DTRACEOPT_USTACKFRAMES];
10657 10704 ASSERT(nframes > 0);
10658 10705 arg = DTRACE_USTACK_ARG(nframes, strsize);
10659 10706 }
10660 10707
10661 10708 /*
10662 10709 * Save a slot for the pid.
10663 10710 */
10664 10711 size = (nframes + 1) * sizeof (uint64_t);
10665 10712 size += DTRACE_USTACK_STRSIZE(arg);
10666 10713 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10667 10714
10668 10715 break;
10669 10716
10670 10717 case DTRACEACT_SYM:
10671 10718 case DTRACEACT_MOD:
10672 10719 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10673 10720 sizeof (uint64_t)) ||
10674 10721 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10675 10722 return (EINVAL);
10676 10723 break;
10677 10724
10678 10725 case DTRACEACT_USYM:
10679 10726 case DTRACEACT_UMOD:
10680 10727 case DTRACEACT_UADDR:
10681 10728 if (dp == NULL ||
10682 10729 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10683 10730 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10684 10731 return (EINVAL);
10685 10732
10686 10733 /*
10687 10734 * We have a slot for the pid, plus a slot for the
10688 10735 * argument. To keep things simple (aligned with
10689 10736 * bitness-neutral sizing), we store each as a 64-bit
10690 10737 * quantity.
10691 10738 */
10692 10739 size = 2 * sizeof (uint64_t);
10693 10740 break;
10694 10741
10695 10742 case DTRACEACT_STOP:
10696 10743 case DTRACEACT_BREAKPOINT:
10697 10744 case DTRACEACT_PANIC:
10698 10745 break;
10699 10746
10700 10747 case DTRACEACT_CHILL:
10701 10748 case DTRACEACT_DISCARD:
10702 10749 case DTRACEACT_RAISE:
10703 10750 if (dp == NULL)
10704 10751 return (EINVAL);
10705 10752 break;
10706 10753
10707 10754 case DTRACEACT_EXIT:
10708 10755 if (dp == NULL ||
10709 10756 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10710 10757 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10711 10758 return (EINVAL);
10712 10759 break;
10713 10760
10714 10761 case DTRACEACT_SPECULATE:
10715 10762 if (ecb->dte_size > sizeof (dtrace_rechdr_t))
10716 10763 return (EINVAL);
10717 10764
10718 10765 if (dp == NULL)
10719 10766 return (EINVAL);
10720 10767
10721 10768 state->dts_speculates = 1;
10722 10769 break;
10723 10770
10724 10771 case DTRACEACT_COMMIT: {
10725 10772 dtrace_action_t *act = ecb->dte_action;
10726 10773
10727 10774 for (; act != NULL; act = act->dta_next) {
10728 10775 if (act->dta_kind == DTRACEACT_COMMIT)
10729 10776 return (EINVAL);
10730 10777 }
10731 10778
10732 10779 if (dp == NULL)
10733 10780 return (EINVAL);
10734 10781 break;
10735 10782 }
10736 10783
10737 10784 default:
10738 10785 return (EINVAL);
10739 10786 }
10740 10787
10741 10788 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10742 10789 /*
10743 10790 * If this is a data-storing action or a speculate,
10744 10791 * we must be sure that there isn't a commit on the
10745 10792 * action chain.
10746 10793 */
10747 10794 dtrace_action_t *act = ecb->dte_action;
10748 10795
10749 10796 for (; act != NULL; act = act->dta_next) {
10750 10797 if (act->dta_kind == DTRACEACT_COMMIT)
10751 10798 return (EINVAL);
10752 10799 }
10753 10800 }
10754 10801
10755 10802 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10756 10803 action->dta_rec.dtrd_size = size;
10757 10804 }
10758 10805
10759 10806 action->dta_refcnt = 1;
10760 10807 rec = &action->dta_rec;
10761 10808 size = rec->dtrd_size;
10762 10809
10763 10810 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10764 10811 if (!(size & mask)) {
10765 10812 align = mask + 1;
10766 10813 break;
10767 10814 }
10768 10815 }
10769 10816
10770 10817 action->dta_kind = desc->dtad_kind;
10771 10818
10772 10819 if ((action->dta_difo = dp) != NULL)
10773 10820 dtrace_difo_hold(dp);
10774 10821
10775 10822 rec->dtrd_action = action->dta_kind;
10776 10823 rec->dtrd_arg = arg;
10777 10824 rec->dtrd_uarg = desc->dtad_uarg;
10778 10825 rec->dtrd_alignment = (uint16_t)align;
10779 10826 rec->dtrd_format = format;
10780 10827
10781 10828 if ((last = ecb->dte_action_last) != NULL) {
10782 10829 ASSERT(ecb->dte_action != NULL);
10783 10830 action->dta_prev = last;
10784 10831 last->dta_next = action;
10785 10832 } else {
10786 10833 ASSERT(ecb->dte_action == NULL);
10787 10834 ecb->dte_action = action;
10788 10835 }
10789 10836
10790 10837 ecb->dte_action_last = action;
10791 10838
10792 10839 return (0);
10793 10840 }
10794 10841
10795 10842 static void
10796 10843 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10797 10844 {
10798 10845 dtrace_action_t *act = ecb->dte_action, *next;
10799 10846 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10800 10847 dtrace_difo_t *dp;
10801 10848 uint16_t format;
10802 10849
10803 10850 if (act != NULL && act->dta_refcnt > 1) {
10804 10851 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10805 10852 act->dta_refcnt--;
10806 10853 } else {
10807 10854 for (; act != NULL; act = next) {
10808 10855 next = act->dta_next;
10809 10856 ASSERT(next != NULL || act == ecb->dte_action_last);
10810 10857 ASSERT(act->dta_refcnt == 1);
10811 10858
10812 10859 if ((format = act->dta_rec.dtrd_format) != 0)
10813 10860 dtrace_format_remove(ecb->dte_state, format);
10814 10861
10815 10862 if ((dp = act->dta_difo) != NULL)
10816 10863 dtrace_difo_release(dp, vstate);
10817 10864
10818 10865 if (DTRACEACT_ISAGG(act->dta_kind)) {
10819 10866 dtrace_ecb_aggregation_destroy(ecb, act);
10820 10867 } else {
10821 10868 kmem_free(act, sizeof (dtrace_action_t));
10822 10869 }
10823 10870 }
10824 10871 }
10825 10872
10826 10873 ecb->dte_action = NULL;
10827 10874 ecb->dte_action_last = NULL;
10828 10875 ecb->dte_size = 0;
10829 10876 }
10830 10877
10831 10878 static void
10832 10879 dtrace_ecb_disable(dtrace_ecb_t *ecb)
10833 10880 {
10834 10881 /*
10835 10882 * We disable the ECB by removing it from its probe.
10836 10883 */
10837 10884 dtrace_ecb_t *pecb, *prev = NULL;
10838 10885 dtrace_probe_t *probe = ecb->dte_probe;
10839 10886
10840 10887 ASSERT(MUTEX_HELD(&dtrace_lock));
10841 10888
10842 10889 if (probe == NULL) {
10843 10890 /*
10844 10891 * This is the NULL probe; there is nothing to disable.
10845 10892 */
10846 10893 return;
10847 10894 }
10848 10895
10849 10896 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10850 10897 if (pecb == ecb)
10851 10898 break;
10852 10899 prev = pecb;
10853 10900 }
10854 10901
10855 10902 ASSERT(pecb != NULL);
10856 10903
10857 10904 if (prev == NULL) {
10858 10905 probe->dtpr_ecb = ecb->dte_next;
10859 10906 } else {
10860 10907 prev->dte_next = ecb->dte_next;
10861 10908 }
10862 10909
10863 10910 if (ecb == probe->dtpr_ecb_last) {
10864 10911 ASSERT(ecb->dte_next == NULL);
10865 10912 probe->dtpr_ecb_last = prev;
10866 10913 }
10867 10914
10868 10915 /*
10869 10916 * The ECB has been disconnected from the probe; now sync to assure
10870 10917 * that all CPUs have seen the change before returning.
10871 10918 */
10872 10919 dtrace_sync();
10873 10920
10874 10921 if (probe->dtpr_ecb == NULL) {
10875 10922 /*
10876 10923 * That was the last ECB on the probe; clear the predicate
10877 10924 * cache ID for the probe, disable it and sync one more time
10878 10925 * to assure that we'll never hit it again.
10879 10926 */
10880 10927 dtrace_provider_t *prov = probe->dtpr_provider;
10881 10928
10882 10929 ASSERT(ecb->dte_next == NULL);
10883 10930 ASSERT(probe->dtpr_ecb_last == NULL);
10884 10931 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10885 10932 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10886 10933 probe->dtpr_id, probe->dtpr_arg);
10887 10934 dtrace_sync();
10888 10935 } else {
10889 10936 /*
10890 10937 * There is at least one ECB remaining on the probe. If there
10891 10938 * is _exactly_ one, set the probe's predicate cache ID to be
10892 10939 * the predicate cache ID of the remaining ECB.
10893 10940 */
10894 10941 ASSERT(probe->dtpr_ecb_last != NULL);
10895 10942 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10896 10943
10897 10944 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10898 10945 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10899 10946
10900 10947 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10901 10948
10902 10949 if (p != NULL)
10903 10950 probe->dtpr_predcache = p->dtp_cacheid;
10904 10951 }
10905 10952
10906 10953 ecb->dte_next = NULL;
10907 10954 }
10908 10955 }
10909 10956
10910 10957 static void
10911 10958 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10912 10959 {
10913 10960 dtrace_state_t *state = ecb->dte_state;
10914 10961 dtrace_vstate_t *vstate = &state->dts_vstate;
10915 10962 dtrace_predicate_t *pred;
10916 10963 dtrace_epid_t epid = ecb->dte_epid;
10917 10964
10918 10965 ASSERT(MUTEX_HELD(&dtrace_lock));
10919 10966 ASSERT(ecb->dte_next == NULL);
10920 10967 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10921 10968
10922 10969 if ((pred = ecb->dte_predicate) != NULL)
10923 10970 dtrace_predicate_release(pred, vstate);
10924 10971
10925 10972 dtrace_ecb_action_remove(ecb);
10926 10973
10927 10974 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10928 10975 state->dts_ecbs[epid - 1] = NULL;
10929 10976
10930 10977 kmem_free(ecb, sizeof (dtrace_ecb_t));
10931 10978 }
10932 10979
10933 10980 static dtrace_ecb_t *
10934 10981 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10935 10982 dtrace_enabling_t *enab)
10936 10983 {
10937 10984 dtrace_ecb_t *ecb;
10938 10985 dtrace_predicate_t *pred;
10939 10986 dtrace_actdesc_t *act;
10940 10987 dtrace_provider_t *prov;
10941 10988 dtrace_ecbdesc_t *desc = enab->dten_current;
10942 10989
10943 10990 ASSERT(MUTEX_HELD(&dtrace_lock));
10944 10991 ASSERT(state != NULL);
10945 10992
10946 10993 ecb = dtrace_ecb_add(state, probe);
10947 10994 ecb->dte_uarg = desc->dted_uarg;
10948 10995
10949 10996 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10950 10997 dtrace_predicate_hold(pred);
10951 10998 ecb->dte_predicate = pred;
10952 10999 }
10953 11000
10954 11001 if (probe != NULL) {
10955 11002 /*
10956 11003 * If the provider shows more leg than the consumer is old
10957 11004 * enough to see, we need to enable the appropriate implicit
10958 11005 * predicate bits to prevent the ecb from activating at
10959 11006 * revealing times.
10960 11007 *
10961 11008 * Providers specifying DTRACE_PRIV_USER at register time
10962 11009 * are stating that they need the /proc-style privilege
10963 11010 * model to be enforced, and this is what DTRACE_COND_OWNER
10964 11011 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10965 11012 */
10966 11013 prov = probe->dtpr_provider;
10967 11014 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10968 11015 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10969 11016 ecb->dte_cond |= DTRACE_COND_OWNER;
10970 11017
10971 11018 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10972 11019 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10973 11020 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10974 11021
10975 11022 /*
10976 11023 * If the provider shows us kernel innards and the user
10977 11024 * is lacking sufficient privilege, enable the
10978 11025 * DTRACE_COND_USERMODE implicit predicate.
10979 11026 */
10980 11027 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10981 11028 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10982 11029 ecb->dte_cond |= DTRACE_COND_USERMODE;
10983 11030 }
10984 11031
10985 11032 if (dtrace_ecb_create_cache != NULL) {
10986 11033 /*
10987 11034 * If we have a cached ecb, we'll use its action list instead
10988 11035 * of creating our own (saving both time and space).
10989 11036 */
10990 11037 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10991 11038 dtrace_action_t *act = cached->dte_action;
10992 11039
10993 11040 if (act != NULL) {
10994 11041 ASSERT(act->dta_refcnt > 0);
10995 11042 act->dta_refcnt++;
10996 11043 ecb->dte_action = act;
10997 11044 ecb->dte_action_last = cached->dte_action_last;
10998 11045 ecb->dte_needed = cached->dte_needed;
10999 11046 ecb->dte_size = cached->dte_size;
11000 11047 ecb->dte_alignment = cached->dte_alignment;
11001 11048 }
11002 11049
11003 11050 return (ecb);
11004 11051 }
11005 11052
11006 11053 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11007 11054 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11008 11055 dtrace_ecb_destroy(ecb);
11009 11056 return (NULL);
11010 11057 }
11011 11058 }
11012 11059
11013 11060 dtrace_ecb_resize(ecb);
11014 11061
11015 11062 return (dtrace_ecb_create_cache = ecb);
11016 11063 }
11017 11064
11018 11065 static int
11019 11066 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
11020 11067 {
11021 11068 dtrace_ecb_t *ecb;
11022 11069 dtrace_enabling_t *enab = arg;
11023 11070 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11024 11071
11025 11072 ASSERT(state != NULL);
11026 11073
11027 11074 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
11028 11075 /*
11029 11076 * This probe was created in a generation for which this
11030 11077 * enabling has previously created ECBs; we don't want to
11031 11078 * enable it again, so just kick out.
11032 11079 */
11033 11080 return (DTRACE_MATCH_NEXT);
11034 11081 }
11035 11082
11036 11083 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11037 11084 return (DTRACE_MATCH_DONE);
11038 11085
11039 11086 if (dtrace_ecb_enable(ecb) < 0)
11040 11087 return (DTRACE_MATCH_FAIL);
11041 11088
11042 11089 return (DTRACE_MATCH_NEXT);
11043 11090 }
11044 11091
11045 11092 static dtrace_ecb_t *
11046 11093 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11047 11094 {
11048 11095 dtrace_ecb_t *ecb;
11049 11096
11050 11097 ASSERT(MUTEX_HELD(&dtrace_lock));
11051 11098
11052 11099 if (id == 0 || id > state->dts_necbs)
11053 11100 return (NULL);
11054 11101
11055 11102 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11056 11103 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11057 11104
11058 11105 return (state->dts_ecbs[id - 1]);
11059 11106 }
11060 11107
11061 11108 static dtrace_aggregation_t *
11062 11109 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11063 11110 {
11064 11111 dtrace_aggregation_t *agg;
11065 11112
11066 11113 ASSERT(MUTEX_HELD(&dtrace_lock));
11067 11114
11068 11115 if (id == 0 || id > state->dts_naggregations)
11069 11116 return (NULL);
11070 11117
11071 11118 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11072 11119 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11073 11120 agg->dtag_id == id);
11074 11121
11075 11122 return (state->dts_aggregations[id - 1]);
11076 11123 }
11077 11124
11078 11125 /*
11079 11126 * DTrace Buffer Functions
11080 11127 *
11081 11128 * The following functions manipulate DTrace buffers. Most of these functions
11082 11129 * are called in the context of establishing or processing consumer state;
11083 11130 * exceptions are explicitly noted.
11084 11131 */
11085 11132
11086 11133 /*
11087 11134 * Note: called from cross call context. This function switches the two
11088 11135 * buffers on a given CPU. The atomicity of this operation is assured by
11089 11136 * disabling interrupts while the actual switch takes place; the disabling of
11090 11137 * interrupts serializes the execution with any execution of dtrace_probe() on
11091 11138 * the same CPU.
11092 11139 */
11093 11140 static void
11094 11141 dtrace_buffer_switch(dtrace_buffer_t *buf)
11095 11142 {
11096 11143 caddr_t tomax = buf->dtb_tomax;
11097 11144 caddr_t xamot = buf->dtb_xamot;
11098 11145 dtrace_icookie_t cookie;
11099 11146 hrtime_t now;
11100 11147
11101 11148 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11102 11149 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11103 11150
11104 11151 cookie = dtrace_interrupt_disable();
11105 11152 now = dtrace_gethrtime();
11106 11153 buf->dtb_tomax = xamot;
11107 11154 buf->dtb_xamot = tomax;
11108 11155 buf->dtb_xamot_drops = buf->dtb_drops;
11109 11156 buf->dtb_xamot_offset = buf->dtb_offset;
11110 11157 buf->dtb_xamot_errors = buf->dtb_errors;
11111 11158 buf->dtb_xamot_flags = buf->dtb_flags;
11112 11159 buf->dtb_offset = 0;
11113 11160 buf->dtb_drops = 0;
11114 11161 buf->dtb_errors = 0;
11115 11162 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
11116 11163 buf->dtb_interval = now - buf->dtb_switched;
11117 11164 buf->dtb_switched = now;
11118 11165 dtrace_interrupt_enable(cookie);
11119 11166 }
11120 11167
11121 11168 /*
11122 11169 * Note: called from cross call context. This function activates a buffer
11123 11170 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
11124 11171 * is guaranteed by the disabling of interrupts.
11125 11172 */
11126 11173 static void
11127 11174 dtrace_buffer_activate(dtrace_state_t *state)
11128 11175 {
11129 11176 dtrace_buffer_t *buf;
11130 11177 dtrace_icookie_t cookie = dtrace_interrupt_disable();
11131 11178
11132 11179 buf = &state->dts_buffer[CPU->cpu_id];
11133 11180
11134 11181 if (buf->dtb_tomax != NULL) {
11135 11182 /*
11136 11183 * We might like to assert that the buffer is marked inactive,
11137 11184 * but this isn't necessarily true: the buffer for the CPU
11138 11185 * that processes the BEGIN probe has its buffer activated
11139 11186 * manually. In this case, we take the (harmless) action
11140 11187 * re-clearing the bit INACTIVE bit.
11141 11188 */
11142 11189 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
11143 11190 }
11144 11191
11145 11192 dtrace_interrupt_enable(cookie);
11146 11193 }
11147 11194
11148 11195 static int
11149 11196 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
11150 11197 processorid_t cpu, int *factor)
11151 11198 {
11152 11199 cpu_t *cp;
11153 11200 dtrace_buffer_t *buf;
11154 11201 int allocated = 0, desired = 0;
11155 11202
11156 11203 ASSERT(MUTEX_HELD(&cpu_lock));
11157 11204 ASSERT(MUTEX_HELD(&dtrace_lock));
11158 11205
11159 11206 *factor = 1;
11160 11207
11161 11208 if (size > dtrace_nonroot_maxsize &&
11162 11209 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
11163 11210 return (EFBIG);
11164 11211
11165 11212 cp = cpu_list;
11166 11213
11167 11214 do {
11168 11215 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11169 11216 continue;
11170 11217
11171 11218 buf = &bufs[cp->cpu_id];
11172 11219
11173 11220 /*
11174 11221 * If there is already a buffer allocated for this CPU, it
11175 11222 * is only possible that this is a DR event. In this case,
11176 11223 * the buffer size must match our specified size.
11177 11224 */
11178 11225 if (buf->dtb_tomax != NULL) {
11179 11226 ASSERT(buf->dtb_size == size);
11180 11227 continue;
11181 11228 }
11182 11229
11183 11230 ASSERT(buf->dtb_xamot == NULL);
11184 11231
11185 11232 if ((buf->dtb_tomax = kmem_zalloc(size,
11186 11233 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11187 11234 goto err;
11188 11235
11189 11236 buf->dtb_size = size;
11190 11237 buf->dtb_flags = flags;
11191 11238 buf->dtb_offset = 0;
11192 11239 buf->dtb_drops = 0;
11193 11240
11194 11241 if (flags & DTRACEBUF_NOSWITCH)
11195 11242 continue;
11196 11243
11197 11244 if ((buf->dtb_xamot = kmem_zalloc(size,
11198 11245 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11199 11246 goto err;
11200 11247 } while ((cp = cp->cpu_next) != cpu_list);
11201 11248
11202 11249 return (0);
11203 11250
11204 11251 err:
11205 11252 cp = cpu_list;
11206 11253
11207 11254 do {
11208 11255 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11209 11256 continue;
11210 11257
11211 11258 buf = &bufs[cp->cpu_id];
11212 11259 desired += 2;
11213 11260
11214 11261 if (buf->dtb_xamot != NULL) {
11215 11262 ASSERT(buf->dtb_tomax != NULL);
11216 11263 ASSERT(buf->dtb_size == size);
11217 11264 kmem_free(buf->dtb_xamot, size);
11218 11265 allocated++;
11219 11266 }
11220 11267
11221 11268 if (buf->dtb_tomax != NULL) {
11222 11269 ASSERT(buf->dtb_size == size);
11223 11270 kmem_free(buf->dtb_tomax, size);
11224 11271 allocated++;
11225 11272 }
11226 11273
11227 11274 buf->dtb_tomax = NULL;
11228 11275 buf->dtb_xamot = NULL;
11229 11276 buf->dtb_size = 0;
11230 11277 } while ((cp = cp->cpu_next) != cpu_list);
11231 11278
11232 11279 *factor = desired / (allocated > 0 ? allocated : 1);
11233 11280
11234 11281 return (ENOMEM);
11235 11282 }
11236 11283
11237 11284 /*
11238 11285 * Note: called from probe context. This function just increments the drop
11239 11286 * count on a buffer. It has been made a function to allow for the
11240 11287 * possibility of understanding the source of mysterious drop counts. (A
11241 11288 * problem for which one may be particularly disappointed that DTrace cannot
11242 11289 * be used to understand DTrace.)
11243 11290 */
11244 11291 static void
11245 11292 dtrace_buffer_drop(dtrace_buffer_t *buf)
11246 11293 {
11247 11294 buf->dtb_drops++;
11248 11295 }
11249 11296
11250 11297 /*
11251 11298 * Note: called from probe context. This function is called to reserve space
11252 11299 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
11253 11300 * mstate. Returns the new offset in the buffer, or a negative value if an
11254 11301 * error has occurred.
11255 11302 */
11256 11303 static intptr_t
11257 11304 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
11258 11305 dtrace_state_t *state, dtrace_mstate_t *mstate)
11259 11306 {
11260 11307 intptr_t offs = buf->dtb_offset, soffs;
11261 11308 intptr_t woffs;
11262 11309 caddr_t tomax;
11263 11310 size_t total;
11264 11311
11265 11312 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
11266 11313 return (-1);
11267 11314
11268 11315 if ((tomax = buf->dtb_tomax) == NULL) {
11269 11316 dtrace_buffer_drop(buf);
11270 11317 return (-1);
11271 11318 }
11272 11319
11273 11320 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
11274 11321 while (offs & (align - 1)) {
11275 11322 /*
11276 11323 * Assert that our alignment is off by a number which
11277 11324 * is itself sizeof (uint32_t) aligned.
11278 11325 */
11279 11326 ASSERT(!((align - (offs & (align - 1))) &
11280 11327 (sizeof (uint32_t) - 1)));
11281 11328 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11282 11329 offs += sizeof (uint32_t);
11283 11330 }
11284 11331
11285 11332 if ((soffs = offs + needed) > buf->dtb_size) {
11286 11333 dtrace_buffer_drop(buf);
11287 11334 return (-1);
11288 11335 }
11289 11336
11290 11337 if (mstate == NULL)
11291 11338 return (offs);
11292 11339
11293 11340 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
11294 11341 mstate->dtms_scratch_size = buf->dtb_size - soffs;
11295 11342 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11296 11343
11297 11344 return (offs);
11298 11345 }
11299 11346
11300 11347 if (buf->dtb_flags & DTRACEBUF_FILL) {
11301 11348 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
11302 11349 (buf->dtb_flags & DTRACEBUF_FULL))
11303 11350 return (-1);
11304 11351 goto out;
11305 11352 }
11306 11353
11307 11354 total = needed + (offs & (align - 1));
11308 11355
11309 11356 /*
11310 11357 * For a ring buffer, life is quite a bit more complicated. Before
11311 11358 * we can store any padding, we need to adjust our wrapping offset.
11312 11359 * (If we've never before wrapped or we're not about to, no adjustment
11313 11360 * is required.)
11314 11361 */
11315 11362 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
11316 11363 offs + total > buf->dtb_size) {
11317 11364 woffs = buf->dtb_xamot_offset;
11318 11365
11319 11366 if (offs + total > buf->dtb_size) {
11320 11367 /*
11321 11368 * We can't fit in the end of the buffer. First, a
11322 11369 * sanity check that we can fit in the buffer at all.
11323 11370 */
11324 11371 if (total > buf->dtb_size) {
11325 11372 dtrace_buffer_drop(buf);
11326 11373 return (-1);
11327 11374 }
11328 11375
11329 11376 /*
11330 11377 * We're going to be storing at the top of the buffer,
11331 11378 * so now we need to deal with the wrapped offset. We
11332 11379 * only reset our wrapped offset to 0 if it is
11333 11380 * currently greater than the current offset. If it
11334 11381 * is less than the current offset, it is because a
11335 11382 * previous allocation induced a wrap -- but the
11336 11383 * allocation didn't subsequently take the space due
11337 11384 * to an error or false predicate evaluation. In this
11338 11385 * case, we'll just leave the wrapped offset alone: if
11339 11386 * the wrapped offset hasn't been advanced far enough
11340 11387 * for this allocation, it will be adjusted in the
11341 11388 * lower loop.
11342 11389 */
11343 11390 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
11344 11391 if (woffs >= offs)
11345 11392 woffs = 0;
11346 11393 } else {
11347 11394 woffs = 0;
11348 11395 }
11349 11396
11350 11397 /*
11351 11398 * Now we know that we're going to be storing to the
11352 11399 * top of the buffer and that there is room for us
11353 11400 * there. We need to clear the buffer from the current
11354 11401 * offset to the end (there may be old gunk there).
11355 11402 */
11356 11403 while (offs < buf->dtb_size)
11357 11404 tomax[offs++] = 0;
11358 11405
11359 11406 /*
11360 11407 * We need to set our offset to zero. And because we
11361 11408 * are wrapping, we need to set the bit indicating as
11362 11409 * much. We can also adjust our needed space back
11363 11410 * down to the space required by the ECB -- we know
11364 11411 * that the top of the buffer is aligned.
11365 11412 */
11366 11413 offs = 0;
11367 11414 total = needed;
11368 11415 buf->dtb_flags |= DTRACEBUF_WRAPPED;
11369 11416 } else {
11370 11417 /*
11371 11418 * There is room for us in the buffer, so we simply
11372 11419 * need to check the wrapped offset.
11373 11420 */
11374 11421 if (woffs < offs) {
11375 11422 /*
11376 11423 * The wrapped offset is less than the offset.
11377 11424 * This can happen if we allocated buffer space
11378 11425 * that induced a wrap, but then we didn't
11379 11426 * subsequently take the space due to an error
11380 11427 * or false predicate evaluation. This is
11381 11428 * okay; we know that _this_ allocation isn't
11382 11429 * going to induce a wrap. We still can't
11383 11430 * reset the wrapped offset to be zero,
11384 11431 * however: the space may have been trashed in
11385 11432 * the previous failed probe attempt. But at
11386 11433 * least the wrapped offset doesn't need to
11387 11434 * be adjusted at all...
11388 11435 */
11389 11436 goto out;
11390 11437 }
11391 11438 }
11392 11439
11393 11440 while (offs + total > woffs) {
11394 11441 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11395 11442 size_t size;
11396 11443
11397 11444 if (epid == DTRACE_EPIDNONE) {
11398 11445 size = sizeof (uint32_t);
11399 11446 } else {
11400 11447 ASSERT3U(epid, <=, state->dts_necbs);
11401 11448 ASSERT(state->dts_ecbs[epid - 1] != NULL);
11402 11449
11403 11450 size = state->dts_ecbs[epid - 1]->dte_size;
11404 11451 }
11405 11452
11406 11453 ASSERT(woffs + size <= buf->dtb_size);
11407 11454 ASSERT(size != 0);
11408 11455
11409 11456 if (woffs + size == buf->dtb_size) {
11410 11457 /*
11411 11458 * We've reached the end of the buffer; we want
11412 11459 * to set the wrapped offset to 0 and break
11413 11460 * out. However, if the offs is 0, then we're
11414 11461 * in a strange edge-condition: the amount of
11415 11462 * space that we want to reserve plus the size
11416 11463 * of the record that we're overwriting is
11417 11464 * greater than the size of the buffer. This
11418 11465 * is problematic because if we reserve the
11419 11466 * space but subsequently don't consume it (due
11420 11467 * to a failed predicate or error) the wrapped
11421 11468 * offset will be 0 -- yet the EPID at offset 0
11422 11469 * will not be committed. This situation is
11423 11470 * relatively easy to deal with: if we're in
11424 11471 * this case, the buffer is indistinguishable
11425 11472 * from one that hasn't wrapped; we need only
11426 11473 * finish the job by clearing the wrapped bit,
11427 11474 * explicitly setting the offset to be 0, and
11428 11475 * zero'ing out the old data in the buffer.
11429 11476 */
11430 11477 if (offs == 0) {
11431 11478 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11432 11479 buf->dtb_offset = 0;
11433 11480 woffs = total;
11434 11481
11435 11482 while (woffs < buf->dtb_size)
11436 11483 tomax[woffs++] = 0;
11437 11484 }
11438 11485
11439 11486 woffs = 0;
11440 11487 break;
11441 11488 }
11442 11489
11443 11490 woffs += size;
11444 11491 }
11445 11492
11446 11493 /*
11447 11494 * We have a wrapped offset. It may be that the wrapped offset
11448 11495 * has become zero -- that's okay.
11449 11496 */
11450 11497 buf->dtb_xamot_offset = woffs;
11451 11498 }
11452 11499
11453 11500 out:
11454 11501 /*
11455 11502 * Now we can plow the buffer with any necessary padding.
11456 11503 */
11457 11504 while (offs & (align - 1)) {
11458 11505 /*
11459 11506 * Assert that our alignment is off by a number which
11460 11507 * is itself sizeof (uint32_t) aligned.
11461 11508 */
11462 11509 ASSERT(!((align - (offs & (align - 1))) &
11463 11510 (sizeof (uint32_t) - 1)));
11464 11511 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11465 11512 offs += sizeof (uint32_t);
11466 11513 }
11467 11514
11468 11515 if (buf->dtb_flags & DTRACEBUF_FILL) {
11469 11516 if (offs + needed > buf->dtb_size - state->dts_reserve) {
11470 11517 buf->dtb_flags |= DTRACEBUF_FULL;
11471 11518 return (-1);
11472 11519 }
11473 11520 }
11474 11521
11475 11522 if (mstate == NULL)
11476 11523 return (offs);
11477 11524
11478 11525 /*
11479 11526 * For ring buffers and fill buffers, the scratch space is always
11480 11527 * the inactive buffer.
11481 11528 */
11482 11529 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11483 11530 mstate->dtms_scratch_size = buf->dtb_size;
11484 11531 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11485 11532
11486 11533 return (offs);
11487 11534 }
11488 11535
11489 11536 static void
11490 11537 dtrace_buffer_polish(dtrace_buffer_t *buf)
11491 11538 {
11492 11539 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11493 11540 ASSERT(MUTEX_HELD(&dtrace_lock));
11494 11541
11495 11542 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11496 11543 return;
11497 11544
11498 11545 /*
11499 11546 * We need to polish the ring buffer. There are three cases:
11500 11547 *
11501 11548 * - The first (and presumably most common) is that there is no gap
11502 11549 * between the buffer offset and the wrapped offset. In this case,
11503 11550 * there is nothing in the buffer that isn't valid data; we can
11504 11551 * mark the buffer as polished and return.
11505 11552 *
11506 11553 * - The second (less common than the first but still more common
11507 11554 * than the third) is that there is a gap between the buffer offset
11508 11555 * and the wrapped offset, and the wrapped offset is larger than the
11509 11556 * buffer offset. This can happen because of an alignment issue, or
11510 11557 * can happen because of a call to dtrace_buffer_reserve() that
11511 11558 * didn't subsequently consume the buffer space. In this case,
11512 11559 * we need to zero the data from the buffer offset to the wrapped
11513 11560 * offset.
11514 11561 *
11515 11562 * - The third (and least common) is that there is a gap between the
11516 11563 * buffer offset and the wrapped offset, but the wrapped offset is
11517 11564 * _less_ than the buffer offset. This can only happen because a
11518 11565 * call to dtrace_buffer_reserve() induced a wrap, but the space
11519 11566 * was not subsequently consumed. In this case, we need to zero the
11520 11567 * space from the offset to the end of the buffer _and_ from the
11521 11568 * top of the buffer to the wrapped offset.
11522 11569 */
11523 11570 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11524 11571 bzero(buf->dtb_tomax + buf->dtb_offset,
11525 11572 buf->dtb_xamot_offset - buf->dtb_offset);
11526 11573 }
11527 11574
11528 11575 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11529 11576 bzero(buf->dtb_tomax + buf->dtb_offset,
11530 11577 buf->dtb_size - buf->dtb_offset);
11531 11578 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11532 11579 }
11533 11580 }
11534 11581
11535 11582 /*
11536 11583 * This routine determines if data generated at the specified time has likely
11537 11584 * been entirely consumed at user-level. This routine is called to determine
11538 11585 * if an ECB on a defunct probe (but for an active enabling) can be safely
11539 11586 * disabled and destroyed.
11540 11587 */
11541 11588 static int
11542 11589 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
11543 11590 {
11544 11591 int i;
11545 11592
11546 11593 for (i = 0; i < NCPU; i++) {
11547 11594 dtrace_buffer_t *buf = &bufs[i];
11548 11595
11549 11596 if (buf->dtb_size == 0)
11550 11597 continue;
11551 11598
11552 11599 if (buf->dtb_flags & DTRACEBUF_RING)
11553 11600 return (0);
11554 11601
11555 11602 if (!buf->dtb_switched && buf->dtb_offset != 0)
11556 11603 return (0);
11557 11604
11558 11605 if (buf->dtb_switched - buf->dtb_interval < when)
11559 11606 return (0);
11560 11607 }
11561 11608
11562 11609 return (1);
11563 11610 }
11564 11611
11565 11612 static void
11566 11613 dtrace_buffer_free(dtrace_buffer_t *bufs)
11567 11614 {
11568 11615 int i;
11569 11616
11570 11617 for (i = 0; i < NCPU; i++) {
11571 11618 dtrace_buffer_t *buf = &bufs[i];
11572 11619
11573 11620 if (buf->dtb_tomax == NULL) {
11574 11621 ASSERT(buf->dtb_xamot == NULL);
11575 11622 ASSERT(buf->dtb_size == 0);
11576 11623 continue;
11577 11624 }
11578 11625
11579 11626 if (buf->dtb_xamot != NULL) {
11580 11627 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11581 11628 kmem_free(buf->dtb_xamot, buf->dtb_size);
11582 11629 }
11583 11630
11584 11631 kmem_free(buf->dtb_tomax, buf->dtb_size);
11585 11632 buf->dtb_size = 0;
11586 11633 buf->dtb_tomax = NULL;
11587 11634 buf->dtb_xamot = NULL;
11588 11635 }
11589 11636 }
11590 11637
11591 11638 /*
11592 11639 * DTrace Enabling Functions
11593 11640 */
11594 11641 static dtrace_enabling_t *
11595 11642 dtrace_enabling_create(dtrace_vstate_t *vstate)
11596 11643 {
11597 11644 dtrace_enabling_t *enab;
11598 11645
11599 11646 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11600 11647 enab->dten_vstate = vstate;
11601 11648
11602 11649 return (enab);
11603 11650 }
11604 11651
11605 11652 static void
11606 11653 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11607 11654 {
11608 11655 dtrace_ecbdesc_t **ndesc;
11609 11656 size_t osize, nsize;
11610 11657
11611 11658 /*
11612 11659 * We can't add to enablings after we've enabled them, or after we've
11613 11660 * retained them.
11614 11661 */
11615 11662 ASSERT(enab->dten_probegen == 0);
11616 11663 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11617 11664
11618 11665 if (enab->dten_ndesc < enab->dten_maxdesc) {
11619 11666 enab->dten_desc[enab->dten_ndesc++] = ecb;
11620 11667 return;
11621 11668 }
11622 11669
11623 11670 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11624 11671
11625 11672 if (enab->dten_maxdesc == 0) {
11626 11673 enab->dten_maxdesc = 1;
11627 11674 } else {
11628 11675 enab->dten_maxdesc <<= 1;
11629 11676 }
11630 11677
11631 11678 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11632 11679
11633 11680 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11634 11681 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11635 11682 bcopy(enab->dten_desc, ndesc, osize);
11636 11683 kmem_free(enab->dten_desc, osize);
11637 11684
11638 11685 enab->dten_desc = ndesc;
11639 11686 enab->dten_desc[enab->dten_ndesc++] = ecb;
11640 11687 }
11641 11688
11642 11689 static void
11643 11690 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11644 11691 dtrace_probedesc_t *pd)
11645 11692 {
11646 11693 dtrace_ecbdesc_t *new;
11647 11694 dtrace_predicate_t *pred;
11648 11695 dtrace_actdesc_t *act;
11649 11696
11650 11697 /*
11651 11698 * We're going to create a new ECB description that matches the
11652 11699 * specified ECB in every way, but has the specified probe description.
11653 11700 */
11654 11701 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11655 11702
11656 11703 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11657 11704 dtrace_predicate_hold(pred);
11658 11705
11659 11706 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11660 11707 dtrace_actdesc_hold(act);
11661 11708
11662 11709 new->dted_action = ecb->dted_action;
11663 11710 new->dted_pred = ecb->dted_pred;
11664 11711 new->dted_probe = *pd;
11665 11712 new->dted_uarg = ecb->dted_uarg;
11666 11713
11667 11714 dtrace_enabling_add(enab, new);
11668 11715 }
11669 11716
11670 11717 static void
11671 11718 dtrace_enabling_dump(dtrace_enabling_t *enab)
11672 11719 {
11673 11720 int i;
11674 11721
11675 11722 for (i = 0; i < enab->dten_ndesc; i++) {
11676 11723 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11677 11724
11678 11725 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11679 11726 desc->dtpd_provider, desc->dtpd_mod,
11680 11727 desc->dtpd_func, desc->dtpd_name);
11681 11728 }
11682 11729 }
11683 11730
11684 11731 static void
11685 11732 dtrace_enabling_destroy(dtrace_enabling_t *enab)
11686 11733 {
11687 11734 int i;
11688 11735 dtrace_ecbdesc_t *ep;
11689 11736 dtrace_vstate_t *vstate = enab->dten_vstate;
11690 11737
11691 11738 ASSERT(MUTEX_HELD(&dtrace_lock));
11692 11739
11693 11740 for (i = 0; i < enab->dten_ndesc; i++) {
11694 11741 dtrace_actdesc_t *act, *next;
11695 11742 dtrace_predicate_t *pred;
11696 11743
11697 11744 ep = enab->dten_desc[i];
11698 11745
11699 11746 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11700 11747 dtrace_predicate_release(pred, vstate);
11701 11748
11702 11749 for (act = ep->dted_action; act != NULL; act = next) {
11703 11750 next = act->dtad_next;
11704 11751 dtrace_actdesc_release(act, vstate);
11705 11752 }
11706 11753
11707 11754 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11708 11755 }
11709 11756
11710 11757 kmem_free(enab->dten_desc,
11711 11758 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11712 11759
11713 11760 /*
11714 11761 * If this was a retained enabling, decrement the dts_nretained count
11715 11762 * and take it off of the dtrace_retained list.
11716 11763 */
11717 11764 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11718 11765 dtrace_retained == enab) {
11719 11766 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11720 11767 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11721 11768 enab->dten_vstate->dtvs_state->dts_nretained--;
11722 11769 dtrace_retained_gen++;
11723 11770 }
11724 11771
11725 11772 if (enab->dten_prev == NULL) {
11726 11773 if (dtrace_retained == enab) {
11727 11774 dtrace_retained = enab->dten_next;
11728 11775
11729 11776 if (dtrace_retained != NULL)
11730 11777 dtrace_retained->dten_prev = NULL;
11731 11778 }
11732 11779 } else {
11733 11780 ASSERT(enab != dtrace_retained);
11734 11781 ASSERT(dtrace_retained != NULL);
11735 11782 enab->dten_prev->dten_next = enab->dten_next;
11736 11783 }
11737 11784
11738 11785 if (enab->dten_next != NULL) {
11739 11786 ASSERT(dtrace_retained != NULL);
11740 11787 enab->dten_next->dten_prev = enab->dten_prev;
11741 11788 }
11742 11789
11743 11790 kmem_free(enab, sizeof (dtrace_enabling_t));
11744 11791 }
11745 11792
11746 11793 static int
11747 11794 dtrace_enabling_retain(dtrace_enabling_t *enab)
11748 11795 {
11749 11796 dtrace_state_t *state;
11750 11797
11751 11798 ASSERT(MUTEX_HELD(&dtrace_lock));
11752 11799 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11753 11800 ASSERT(enab->dten_vstate != NULL);
11754 11801
11755 11802 state = enab->dten_vstate->dtvs_state;
11756 11803 ASSERT(state != NULL);
11757 11804
11758 11805 /*
11759 11806 * We only allow each state to retain dtrace_retain_max enablings.
11760 11807 */
11761 11808 if (state->dts_nretained >= dtrace_retain_max)
11762 11809 return (ENOSPC);
11763 11810
11764 11811 state->dts_nretained++;
11765 11812 dtrace_retained_gen++;
11766 11813
11767 11814 if (dtrace_retained == NULL) {
11768 11815 dtrace_retained = enab;
11769 11816 return (0);
11770 11817 }
11771 11818
11772 11819 enab->dten_next = dtrace_retained;
11773 11820 dtrace_retained->dten_prev = enab;
11774 11821 dtrace_retained = enab;
11775 11822
11776 11823 return (0);
11777 11824 }
11778 11825
11779 11826 static int
11780 11827 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11781 11828 dtrace_probedesc_t *create)
11782 11829 {
11783 11830 dtrace_enabling_t *new, *enab;
11784 11831 int found = 0, err = ENOENT;
11785 11832
11786 11833 ASSERT(MUTEX_HELD(&dtrace_lock));
11787 11834 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11788 11835 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11789 11836 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11790 11837 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11791 11838
11792 11839 new = dtrace_enabling_create(&state->dts_vstate);
11793 11840
11794 11841 /*
11795 11842 * Iterate over all retained enablings, looking for enablings that
11796 11843 * match the specified state.
11797 11844 */
11798 11845 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11799 11846 int i;
11800 11847
11801 11848 /*
11802 11849 * dtvs_state can only be NULL for helper enablings -- and
11803 11850 * helper enablings can't be retained.
11804 11851 */
11805 11852 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11806 11853
11807 11854 if (enab->dten_vstate->dtvs_state != state)
11808 11855 continue;
11809 11856
11810 11857 /*
11811 11858 * Now iterate over each probe description; we're looking for
11812 11859 * an exact match to the specified probe description.
11813 11860 */
11814 11861 for (i = 0; i < enab->dten_ndesc; i++) {
11815 11862 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11816 11863 dtrace_probedesc_t *pd = &ep->dted_probe;
11817 11864
11818 11865 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11819 11866 continue;
11820 11867
11821 11868 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11822 11869 continue;
11823 11870
11824 11871 if (strcmp(pd->dtpd_func, match->dtpd_func))
11825 11872 continue;
11826 11873
11827 11874 if (strcmp(pd->dtpd_name, match->dtpd_name))
11828 11875 continue;
11829 11876
11830 11877 /*
11831 11878 * We have a winning probe! Add it to our growing
11832 11879 * enabling.
11833 11880 */
11834 11881 found = 1;
11835 11882 dtrace_enabling_addlike(new, ep, create);
11836 11883 }
11837 11884 }
11838 11885
11839 11886 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11840 11887 dtrace_enabling_destroy(new);
11841 11888 return (err);
11842 11889 }
11843 11890
11844 11891 return (0);
11845 11892 }
11846 11893
11847 11894 static void
11848 11895 dtrace_enabling_retract(dtrace_state_t *state)
11849 11896 {
11850 11897 dtrace_enabling_t *enab, *next;
11851 11898
11852 11899 ASSERT(MUTEX_HELD(&dtrace_lock));
11853 11900
11854 11901 /*
11855 11902 * Iterate over all retained enablings, destroy the enablings retained
11856 11903 * for the specified state.
11857 11904 */
11858 11905 for (enab = dtrace_retained; enab != NULL; enab = next) {
11859 11906 next = enab->dten_next;
11860 11907
11861 11908 /*
11862 11909 * dtvs_state can only be NULL for helper enablings -- and
11863 11910 * helper enablings can't be retained.
11864 11911 */
11865 11912 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11866 11913
11867 11914 if (enab->dten_vstate->dtvs_state == state) {
11868 11915 ASSERT(state->dts_nretained > 0);
11869 11916 dtrace_enabling_destroy(enab);
11870 11917 }
11871 11918 }
11872 11919
11873 11920 ASSERT(state->dts_nretained == 0);
11874 11921 }
11875 11922
11876 11923 static int
11877 11924 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11878 11925 {
11879 11926 int i = 0;
11880 11927 int total_matched = 0, matched = 0;
11881 11928
11882 11929 ASSERT(MUTEX_HELD(&cpu_lock));
11883 11930 ASSERT(MUTEX_HELD(&dtrace_lock));
11884 11931
11885 11932 for (i = 0; i < enab->dten_ndesc; i++) {
11886 11933 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11887 11934
11888 11935 enab->dten_current = ep;
11889 11936 enab->dten_error = 0;
11890 11937
11891 11938 /*
11892 11939 * If a provider failed to enable a probe then get out and
11893 11940 * let the consumer know we failed.
11894 11941 */
11895 11942 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
11896 11943 return (EBUSY);
11897 11944
11898 11945 total_matched += matched;
11899 11946
11900 11947 if (enab->dten_error != 0) {
11901 11948 /*
11902 11949 * If we get an error half-way through enabling the
11903 11950 * probes, we kick out -- perhaps with some number of
11904 11951 * them enabled. Leaving enabled probes enabled may
11905 11952 * be slightly confusing for user-level, but we expect
11906 11953 * that no one will attempt to actually drive on in
11907 11954 * the face of such errors. If this is an anonymous
11908 11955 * enabling (indicated with a NULL nmatched pointer),
11909 11956 * we cmn_err() a message. We aren't expecting to
11910 11957 * get such an error -- such as it can exist at all,
11911 11958 * it would be a result of corrupted DOF in the driver
11912 11959 * properties.
11913 11960 */
11914 11961 if (nmatched == NULL) {
11915 11962 cmn_err(CE_WARN, "dtrace_enabling_match() "
11916 11963 "error on %p: %d", (void *)ep,
11917 11964 enab->dten_error);
11918 11965 }
11919 11966
11920 11967 return (enab->dten_error);
11921 11968 }
11922 11969 }
11923 11970
11924 11971 enab->dten_probegen = dtrace_probegen;
11925 11972 if (nmatched != NULL)
11926 11973 *nmatched = total_matched;
11927 11974
11928 11975 return (0);
11929 11976 }
11930 11977
11931 11978 static void
11932 11979 dtrace_enabling_matchall(void)
11933 11980 {
11934 11981 dtrace_enabling_t *enab;
11935 11982
11936 11983 mutex_enter(&cpu_lock);
11937 11984 mutex_enter(&dtrace_lock);
11938 11985
11939 11986 /*
11940 11987 * Iterate over all retained enablings to see if any probes match
11941 11988 * against them. We only perform this operation on enablings for which
11942 11989 * we have sufficient permissions by virtue of being in the global zone
11943 11990 * or in the same zone as the DTrace client. Because we can be called
11944 11991 * after dtrace_detach() has been called, we cannot assert that there
11945 11992 * are retained enablings. We can safely load from dtrace_retained,
11946 11993 * however: the taskq_destroy() at the end of dtrace_detach() will
11947 11994 * block pending our completion.
11948 11995 */
11949 11996 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11950 11997 dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred;
11951 11998 cred_t *cr = dcr->dcr_cred;
11952 11999 zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0;
11953 12000
11954 12001 if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL &&
11955 12002 (zone == GLOBAL_ZONEID || getzoneid() == zone)))
11956 12003 (void) dtrace_enabling_match(enab, NULL);
11957 12004 }
11958 12005
11959 12006 mutex_exit(&dtrace_lock);
11960 12007 mutex_exit(&cpu_lock);
11961 12008 }
11962 12009
11963 12010 /*
11964 12011 * If an enabling is to be enabled without having matched probes (that is, if
11965 12012 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11966 12013 * enabling must be _primed_ by creating an ECB for every ECB description.
11967 12014 * This must be done to assure that we know the number of speculations, the
11968 12015 * number of aggregations, the minimum buffer size needed, etc. before we
11969 12016 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11970 12017 * enabling any probes, we create ECBs for every ECB decription, but with a
11971 12018 * NULL probe -- which is exactly what this function does.
11972 12019 */
11973 12020 static void
11974 12021 dtrace_enabling_prime(dtrace_state_t *state)
11975 12022 {
11976 12023 dtrace_enabling_t *enab;
11977 12024 int i;
11978 12025
11979 12026 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11980 12027 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11981 12028
11982 12029 if (enab->dten_vstate->dtvs_state != state)
11983 12030 continue;
11984 12031
11985 12032 /*
11986 12033 * We don't want to prime an enabling more than once, lest
11987 12034 * we allow a malicious user to induce resource exhaustion.
11988 12035 * (The ECBs that result from priming an enabling aren't
11989 12036 * leaked -- but they also aren't deallocated until the
11990 12037 * consumer state is destroyed.)
11991 12038 */
11992 12039 if (enab->dten_primed)
11993 12040 continue;
11994 12041
11995 12042 for (i = 0; i < enab->dten_ndesc; i++) {
11996 12043 enab->dten_current = enab->dten_desc[i];
11997 12044 (void) dtrace_probe_enable(NULL, enab);
11998 12045 }
11999 12046
12000 12047 enab->dten_primed = 1;
12001 12048 }
12002 12049 }
12003 12050
12004 12051 /*
12005 12052 * Called to indicate that probes should be provided due to retained
12006 12053 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
12007 12054 * must take an initial lap through the enabling calling the dtps_provide()
12008 12055 * entry point explicitly to allow for autocreated probes.
12009 12056 */
12010 12057 static void
12011 12058 dtrace_enabling_provide(dtrace_provider_t *prv)
12012 12059 {
12013 12060 int i, all = 0;
12014 12061 dtrace_probedesc_t desc;
12015 12062 dtrace_genid_t gen;
12016 12063
12017 12064 ASSERT(MUTEX_HELD(&dtrace_lock));
12018 12065 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
12019 12066
12020 12067 if (prv == NULL) {
12021 12068 all = 1;
12022 12069 prv = dtrace_provider;
12023 12070 }
12024 12071
12025 12072 do {
12026 12073 dtrace_enabling_t *enab;
12027 12074 void *parg = prv->dtpv_arg;
12028 12075
12029 12076 retry:
12030 12077 gen = dtrace_retained_gen;
12031 12078 for (enab = dtrace_retained; enab != NULL;
12032 12079 enab = enab->dten_next) {
12033 12080 for (i = 0; i < enab->dten_ndesc; i++) {
12034 12081 desc = enab->dten_desc[i]->dted_probe;
12035 12082 mutex_exit(&dtrace_lock);
12036 12083 prv->dtpv_pops.dtps_provide(parg, &desc);
12037 12084 mutex_enter(&dtrace_lock);
12038 12085 /*
12039 12086 * Process the retained enablings again if
12040 12087 * they have changed while we weren't holding
12041 12088 * dtrace_lock.
12042 12089 */
12043 12090 if (gen != dtrace_retained_gen)
12044 12091 goto retry;
12045 12092 }
12046 12093 }
12047 12094 } while (all && (prv = prv->dtpv_next) != NULL);
12048 12095
12049 12096 mutex_exit(&dtrace_lock);
12050 12097 dtrace_probe_provide(NULL, all ? NULL : prv);
12051 12098 mutex_enter(&dtrace_lock);
12052 12099 }
12053 12100
12054 12101 /*
12055 12102 * Called to reap ECBs that are attached to probes from defunct providers.
12056 12103 */
12057 12104 static void
12058 12105 dtrace_enabling_reap(void)
12059 12106 {
12060 12107 dtrace_provider_t *prov;
12061 12108 dtrace_probe_t *probe;
12062 12109 dtrace_ecb_t *ecb;
12063 12110 hrtime_t when;
12064 12111 int i;
12065 12112
12066 12113 mutex_enter(&cpu_lock);
12067 12114 mutex_enter(&dtrace_lock);
12068 12115
12069 12116 for (i = 0; i < dtrace_nprobes; i++) {
12070 12117 if ((probe = dtrace_probes[i]) == NULL)
12071 12118 continue;
12072 12119
12073 12120 if (probe->dtpr_ecb == NULL)
12074 12121 continue;
12075 12122
12076 12123 prov = probe->dtpr_provider;
12077 12124
12078 12125 if ((when = prov->dtpv_defunct) == 0)
12079 12126 continue;
12080 12127
12081 12128 /*
12082 12129 * We have ECBs on a defunct provider: we want to reap these
12083 12130 * ECBs to allow the provider to unregister. The destruction
12084 12131 * of these ECBs must be done carefully: if we destroy the ECB
12085 12132 * and the consumer later wishes to consume an EPID that
12086 12133 * corresponds to the destroyed ECB (and if the EPID metadata
12087 12134 * has not been previously consumed), the consumer will abort
12088 12135 * processing on the unknown EPID. To reduce (but not, sadly,
12089 12136 * eliminate) the possibility of this, we will only destroy an
12090 12137 * ECB for a defunct provider if, for the state that
12091 12138 * corresponds to the ECB:
12092 12139 *
12093 12140 * (a) There is no speculative tracing (which can effectively
12094 12141 * cache an EPID for an arbitrary amount of time).
12095 12142 *
12096 12143 * (b) The principal buffers have been switched twice since the
12097 12144 * provider became defunct.
12098 12145 *
12099 12146 * (c) The aggregation buffers are of zero size or have been
12100 12147 * switched twice since the provider became defunct.
12101 12148 *
12102 12149 * We use dts_speculates to determine (a) and call a function
12103 12150 * (dtrace_buffer_consumed()) to determine (b) and (c). Note
12104 12151 * that as soon as we've been unable to destroy one of the ECBs
12105 12152 * associated with the probe, we quit trying -- reaping is only
12106 12153 * fruitful in as much as we can destroy all ECBs associated
12107 12154 * with the defunct provider's probes.
12108 12155 */
12109 12156 while ((ecb = probe->dtpr_ecb) != NULL) {
12110 12157 dtrace_state_t *state = ecb->dte_state;
12111 12158 dtrace_buffer_t *buf = state->dts_buffer;
12112 12159 dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
12113 12160
12114 12161 if (state->dts_speculates)
12115 12162 break;
12116 12163
12117 12164 if (!dtrace_buffer_consumed(buf, when))
12118 12165 break;
12119 12166
12120 12167 if (!dtrace_buffer_consumed(aggbuf, when))
12121 12168 break;
12122 12169
12123 12170 dtrace_ecb_disable(ecb);
12124 12171 ASSERT(probe->dtpr_ecb != ecb);
12125 12172 dtrace_ecb_destroy(ecb);
12126 12173 }
12127 12174 }
12128 12175
12129 12176 mutex_exit(&dtrace_lock);
12130 12177 mutex_exit(&cpu_lock);
12131 12178 }
12132 12179
12133 12180 /*
12134 12181 * DTrace DOF Functions
12135 12182 */
12136 12183 /*ARGSUSED*/
12137 12184 static void
12138 12185 dtrace_dof_error(dof_hdr_t *dof, const char *str)
12139 12186 {
12140 12187 if (dtrace_err_verbose)
12141 12188 cmn_err(CE_WARN, "failed to process DOF: %s", str);
12142 12189
12143 12190 #ifdef DTRACE_ERRDEBUG
12144 12191 dtrace_errdebug(str);
12145 12192 #endif
12146 12193 }
12147 12194
12148 12195 /*
12149 12196 * Create DOF out of a currently enabled state. Right now, we only create
12150 12197 * DOF containing the run-time options -- but this could be expanded to create
12151 12198 * complete DOF representing the enabled state.
12152 12199 */
12153 12200 static dof_hdr_t *
12154 12201 dtrace_dof_create(dtrace_state_t *state)
12155 12202 {
12156 12203 dof_hdr_t *dof;
12157 12204 dof_sec_t *sec;
12158 12205 dof_optdesc_t *opt;
12159 12206 int i, len = sizeof (dof_hdr_t) +
12160 12207 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12161 12208 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12162 12209
12163 12210 ASSERT(MUTEX_HELD(&dtrace_lock));
12164 12211
12165 12212 dof = kmem_zalloc(len, KM_SLEEP);
12166 12213 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12167 12214 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12168 12215 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12169 12216 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12170 12217
12171 12218 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12172 12219 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12173 12220 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
12174 12221 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
12175 12222 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
12176 12223 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
12177 12224
12178 12225 dof->dofh_flags = 0;
12179 12226 dof->dofh_hdrsize = sizeof (dof_hdr_t);
12180 12227 dof->dofh_secsize = sizeof (dof_sec_t);
12181 12228 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
12182 12229 dof->dofh_secoff = sizeof (dof_hdr_t);
12183 12230 dof->dofh_loadsz = len;
12184 12231 dof->dofh_filesz = len;
12185 12232 dof->dofh_pad = 0;
12186 12233
12187 12234 /*
12188 12235 * Fill in the option section header...
12189 12236 */
12190 12237 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
12191 12238 sec->dofs_type = DOF_SECT_OPTDESC;
12192 12239 sec->dofs_align = sizeof (uint64_t);
12193 12240 sec->dofs_flags = DOF_SECF_LOAD;
12194 12241 sec->dofs_entsize = sizeof (dof_optdesc_t);
12195 12242
12196 12243 opt = (dof_optdesc_t *)((uintptr_t)sec +
12197 12244 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
12198 12245
12199 12246 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
12200 12247 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12201 12248
12202 12249 for (i = 0; i < DTRACEOPT_MAX; i++) {
12203 12250 opt[i].dofo_option = i;
12204 12251 opt[i].dofo_strtab = DOF_SECIDX_NONE;
12205 12252 opt[i].dofo_value = state->dts_options[i];
12206 12253 }
12207 12254
12208 12255 return (dof);
12209 12256 }
12210 12257
12211 12258 static dof_hdr_t *
12212 12259 dtrace_dof_copyin(uintptr_t uarg, int *errp)
12213 12260 {
12214 12261 dof_hdr_t hdr, *dof;
12215 12262
12216 12263 ASSERT(!MUTEX_HELD(&dtrace_lock));
12217 12264
12218 12265 /*
12219 12266 * First, we're going to copyin() the sizeof (dof_hdr_t).
12220 12267 */
12221 12268 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
12222 12269 dtrace_dof_error(NULL, "failed to copyin DOF header");
12223 12270 *errp = EFAULT;
12224 12271 return (NULL);
12225 12272 }
12226 12273
12227 12274 /*
12228 12275 * Now we'll allocate the entire DOF and copy it in -- provided
12229 12276 * that the length isn't outrageous.
12230 12277 */
12231 12278 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
12232 12279 dtrace_dof_error(&hdr, "load size exceeds maximum");
12233 12280 *errp = E2BIG;
12234 12281 return (NULL);
12235 12282 }
12236 12283
12237 12284 if (hdr.dofh_loadsz < sizeof (hdr)) {
12238 12285 dtrace_dof_error(&hdr, "invalid load size");
12239 12286 *errp = EINVAL;
12240 12287 return (NULL);
12241 12288 }
12242 12289
12243 12290 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
12244 12291
12245 12292 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
12246 12293 dof->dofh_loadsz != hdr.dofh_loadsz) {
12247 12294 kmem_free(dof, hdr.dofh_loadsz);
12248 12295 *errp = EFAULT;
12249 12296 return (NULL);
12250 12297 }
12251 12298
12252 12299 return (dof);
12253 12300 }
12254 12301
12255 12302 static dof_hdr_t *
12256 12303 dtrace_dof_property(const char *name)
12257 12304 {
12258 12305 uchar_t *buf;
12259 12306 uint64_t loadsz;
12260 12307 unsigned int len, i;
12261 12308 dof_hdr_t *dof;
12262 12309
12263 12310 /*
12264 12311 * Unfortunately, array of values in .conf files are always (and
12265 12312 * only) interpreted to be integer arrays. We must read our DOF
12266 12313 * as an integer array, and then squeeze it into a byte array.
12267 12314 */
12268 12315 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
12269 12316 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
12270 12317 return (NULL);
12271 12318
12272 12319 for (i = 0; i < len; i++)
12273 12320 buf[i] = (uchar_t)(((int *)buf)[i]);
12274 12321
12275 12322 if (len < sizeof (dof_hdr_t)) {
12276 12323 ddi_prop_free(buf);
12277 12324 dtrace_dof_error(NULL, "truncated header");
12278 12325 return (NULL);
12279 12326 }
12280 12327
12281 12328 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
12282 12329 ddi_prop_free(buf);
12283 12330 dtrace_dof_error(NULL, "truncated DOF");
12284 12331 return (NULL);
12285 12332 }
12286 12333
12287 12334 if (loadsz >= dtrace_dof_maxsize) {
12288 12335 ddi_prop_free(buf);
12289 12336 dtrace_dof_error(NULL, "oversized DOF");
12290 12337 return (NULL);
12291 12338 }
12292 12339
12293 12340 dof = kmem_alloc(loadsz, KM_SLEEP);
12294 12341 bcopy(buf, dof, loadsz);
12295 12342 ddi_prop_free(buf);
12296 12343
12297 12344 return (dof);
12298 12345 }
12299 12346
12300 12347 static void
12301 12348 dtrace_dof_destroy(dof_hdr_t *dof)
12302 12349 {
12303 12350 kmem_free(dof, dof->dofh_loadsz);
12304 12351 }
12305 12352
12306 12353 /*
12307 12354 * Return the dof_sec_t pointer corresponding to a given section index. If the
12308 12355 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
12309 12356 * a type other than DOF_SECT_NONE is specified, the header is checked against
12310 12357 * this type and NULL is returned if the types do not match.
12311 12358 */
12312 12359 static dof_sec_t *
12313 12360 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
12314 12361 {
12315 12362 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
12316 12363 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
12317 12364
12318 12365 if (i >= dof->dofh_secnum) {
12319 12366 dtrace_dof_error(dof, "referenced section index is invalid");
12320 12367 return (NULL);
12321 12368 }
12322 12369
12323 12370 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
12324 12371 dtrace_dof_error(dof, "referenced section is not loadable");
12325 12372 return (NULL);
12326 12373 }
12327 12374
12328 12375 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
12329 12376 dtrace_dof_error(dof, "referenced section is the wrong type");
12330 12377 return (NULL);
12331 12378 }
12332 12379
12333 12380 return (sec);
12334 12381 }
12335 12382
12336 12383 static dtrace_probedesc_t *
12337 12384 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
12338 12385 {
12339 12386 dof_probedesc_t *probe;
12340 12387 dof_sec_t *strtab;
12341 12388 uintptr_t daddr = (uintptr_t)dof;
12342 12389 uintptr_t str;
12343 12390 size_t size;
12344 12391
12345 12392 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
12346 12393 dtrace_dof_error(dof, "invalid probe section");
12347 12394 return (NULL);
12348 12395 }
12349 12396
12350 12397 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12351 12398 dtrace_dof_error(dof, "bad alignment in probe description");
12352 12399 return (NULL);
12353 12400 }
12354 12401
12355 12402 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
12356 12403 dtrace_dof_error(dof, "truncated probe description");
12357 12404 return (NULL);
12358 12405 }
12359 12406
12360 12407 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
12361 12408 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
12362 12409
12363 12410 if (strtab == NULL)
12364 12411 return (NULL);
12365 12412
12366 12413 str = daddr + strtab->dofs_offset;
12367 12414 size = strtab->dofs_size;
12368 12415
12369 12416 if (probe->dofp_provider >= strtab->dofs_size) {
12370 12417 dtrace_dof_error(dof, "corrupt probe provider");
12371 12418 return (NULL);
12372 12419 }
12373 12420
12374 12421 (void) strncpy(desc->dtpd_provider,
12375 12422 (char *)(str + probe->dofp_provider),
12376 12423 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
12377 12424
12378 12425 if (probe->dofp_mod >= strtab->dofs_size) {
12379 12426 dtrace_dof_error(dof, "corrupt probe module");
12380 12427 return (NULL);
12381 12428 }
12382 12429
12383 12430 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
12384 12431 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
12385 12432
12386 12433 if (probe->dofp_func >= strtab->dofs_size) {
12387 12434 dtrace_dof_error(dof, "corrupt probe function");
12388 12435 return (NULL);
12389 12436 }
12390 12437
12391 12438 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
12392 12439 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
12393 12440
12394 12441 if (probe->dofp_name >= strtab->dofs_size) {
12395 12442 dtrace_dof_error(dof, "corrupt probe name");
12396 12443 return (NULL);
12397 12444 }
12398 12445
12399 12446 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
12400 12447 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
12401 12448
12402 12449 return (desc);
12403 12450 }
12404 12451
12405 12452 static dtrace_difo_t *
12406 12453 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12407 12454 cred_t *cr)
12408 12455 {
12409 12456 dtrace_difo_t *dp;
12410 12457 size_t ttl = 0;
12411 12458 dof_difohdr_t *dofd;
12412 12459 uintptr_t daddr = (uintptr_t)dof;
12413 12460 size_t max = dtrace_difo_maxsize;
12414 12461 int i, l, n;
12415 12462
12416 12463 static const struct {
12417 12464 int section;
12418 12465 int bufoffs;
12419 12466 int lenoffs;
12420 12467 int entsize;
12421 12468 int align;
12422 12469 const char *msg;
12423 12470 } difo[] = {
12424 12471 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12425 12472 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12426 12473 sizeof (dif_instr_t), "multiple DIF sections" },
12427 12474
12428 12475 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12429 12476 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12430 12477 sizeof (uint64_t), "multiple integer tables" },
12431 12478
12432 12479 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12433 12480 offsetof(dtrace_difo_t, dtdo_strlen), 0,
12434 12481 sizeof (char), "multiple string tables" },
12435 12482
12436 12483 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12437 12484 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12438 12485 sizeof (uint_t), "multiple variable tables" },
12439 12486
12440 12487 { DOF_SECT_NONE, 0, 0, 0, NULL }
12441 12488 };
12442 12489
12443 12490 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12444 12491 dtrace_dof_error(dof, "invalid DIFO header section");
12445 12492 return (NULL);
12446 12493 }
12447 12494
12448 12495 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12449 12496 dtrace_dof_error(dof, "bad alignment in DIFO header");
12450 12497 return (NULL);
12451 12498 }
12452 12499
12453 12500 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12454 12501 sec->dofs_size % sizeof (dof_secidx_t)) {
12455 12502 dtrace_dof_error(dof, "bad size in DIFO header");
12456 12503 return (NULL);
12457 12504 }
12458 12505
12459 12506 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12460 12507 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12461 12508
12462 12509 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12463 12510 dp->dtdo_rtype = dofd->dofd_rtype;
12464 12511
12465 12512 for (l = 0; l < n; l++) {
12466 12513 dof_sec_t *subsec;
12467 12514 void **bufp;
12468 12515 uint32_t *lenp;
12469 12516
12470 12517 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12471 12518 dofd->dofd_links[l])) == NULL)
12472 12519 goto err; /* invalid section link */
12473 12520
12474 12521 if (ttl + subsec->dofs_size > max) {
12475 12522 dtrace_dof_error(dof, "exceeds maximum size");
12476 12523 goto err;
12477 12524 }
12478 12525
12479 12526 ttl += subsec->dofs_size;
12480 12527
12481 12528 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12482 12529 if (subsec->dofs_type != difo[i].section)
12483 12530 continue;
12484 12531
12485 12532 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12486 12533 dtrace_dof_error(dof, "section not loaded");
12487 12534 goto err;
12488 12535 }
12489 12536
12490 12537 if (subsec->dofs_align != difo[i].align) {
12491 12538 dtrace_dof_error(dof, "bad alignment");
12492 12539 goto err;
12493 12540 }
12494 12541
12495 12542 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12496 12543 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12497 12544
12498 12545 if (*bufp != NULL) {
12499 12546 dtrace_dof_error(dof, difo[i].msg);
12500 12547 goto err;
12501 12548 }
12502 12549
12503 12550 if (difo[i].entsize != subsec->dofs_entsize) {
12504 12551 dtrace_dof_error(dof, "entry size mismatch");
12505 12552 goto err;
12506 12553 }
12507 12554
12508 12555 if (subsec->dofs_entsize != 0 &&
12509 12556 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12510 12557 dtrace_dof_error(dof, "corrupt entry size");
12511 12558 goto err;
12512 12559 }
12513 12560
12514 12561 *lenp = subsec->dofs_size;
12515 12562 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12516 12563 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12517 12564 *bufp, subsec->dofs_size);
12518 12565
12519 12566 if (subsec->dofs_entsize != 0)
12520 12567 *lenp /= subsec->dofs_entsize;
12521 12568
12522 12569 break;
12523 12570 }
12524 12571
12525 12572 /*
12526 12573 * If we encounter a loadable DIFO sub-section that is not
12527 12574 * known to us, assume this is a broken program and fail.
12528 12575 */
12529 12576 if (difo[i].section == DOF_SECT_NONE &&
12530 12577 (subsec->dofs_flags & DOF_SECF_LOAD)) {
12531 12578 dtrace_dof_error(dof, "unrecognized DIFO subsection");
12532 12579 goto err;
12533 12580 }
12534 12581 }
12535 12582
12536 12583 if (dp->dtdo_buf == NULL) {
12537 12584 /*
12538 12585 * We can't have a DIF object without DIF text.
12539 12586 */
12540 12587 dtrace_dof_error(dof, "missing DIF text");
12541 12588 goto err;
12542 12589 }
12543 12590
12544 12591 /*
12545 12592 * Before we validate the DIF object, run through the variable table
12546 12593 * looking for the strings -- if any of their size are under, we'll set
12547 12594 * their size to be the system-wide default string size. Note that
12548 12595 * this should _not_ happen if the "strsize" option has been set --
12549 12596 * in this case, the compiler should have set the size to reflect the
12550 12597 * setting of the option.
12551 12598 */
12552 12599 for (i = 0; i < dp->dtdo_varlen; i++) {
12553 12600 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12554 12601 dtrace_diftype_t *t = &v->dtdv_type;
12555 12602
12556 12603 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12557 12604 continue;
12558 12605
12559 12606 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12560 12607 t->dtdt_size = dtrace_strsize_default;
12561 12608 }
12562 12609
12563 12610 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12564 12611 goto err;
12565 12612
12566 12613 dtrace_difo_init(dp, vstate);
12567 12614 return (dp);
12568 12615
12569 12616 err:
12570 12617 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12571 12618 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12572 12619 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12573 12620 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12574 12621
12575 12622 kmem_free(dp, sizeof (dtrace_difo_t));
12576 12623 return (NULL);
12577 12624 }
12578 12625
12579 12626 static dtrace_predicate_t *
12580 12627 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12581 12628 cred_t *cr)
12582 12629 {
12583 12630 dtrace_difo_t *dp;
12584 12631
12585 12632 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12586 12633 return (NULL);
12587 12634
12588 12635 return (dtrace_predicate_create(dp));
12589 12636 }
12590 12637
12591 12638 static dtrace_actdesc_t *
12592 12639 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12593 12640 cred_t *cr)
12594 12641 {
12595 12642 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12596 12643 dof_actdesc_t *desc;
12597 12644 dof_sec_t *difosec;
12598 12645 size_t offs;
12599 12646 uintptr_t daddr = (uintptr_t)dof;
12600 12647 uint64_t arg;
12601 12648 dtrace_actkind_t kind;
12602 12649
12603 12650 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12604 12651 dtrace_dof_error(dof, "invalid action section");
12605 12652 return (NULL);
12606 12653 }
12607 12654
12608 12655 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12609 12656 dtrace_dof_error(dof, "truncated action description");
12610 12657 return (NULL);
12611 12658 }
12612 12659
12613 12660 if (sec->dofs_align != sizeof (uint64_t)) {
12614 12661 dtrace_dof_error(dof, "bad alignment in action description");
12615 12662 return (NULL);
12616 12663 }
12617 12664
12618 12665 if (sec->dofs_size < sec->dofs_entsize) {
12619 12666 dtrace_dof_error(dof, "section entry size exceeds total size");
12620 12667 return (NULL);
12621 12668 }
12622 12669
12623 12670 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12624 12671 dtrace_dof_error(dof, "bad entry size in action description");
12625 12672 return (NULL);
12626 12673 }
12627 12674
12628 12675 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12629 12676 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12630 12677 return (NULL);
12631 12678 }
12632 12679
12633 12680 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12634 12681 desc = (dof_actdesc_t *)(daddr +
12635 12682 (uintptr_t)sec->dofs_offset + offs);
12636 12683 kind = (dtrace_actkind_t)desc->dofa_kind;
12637 12684
12638 12685 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
12639 12686 (kind != DTRACEACT_PRINTA ||
12640 12687 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
12641 12688 (kind == DTRACEACT_DIFEXPR &&
12642 12689 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12643 12690 dof_sec_t *strtab;
12644 12691 char *str, *fmt;
12645 12692 uint64_t i;
12646 12693
12647 12694 /*
12648 12695 * The argument to these actions is an index into the
12649 12696 * DOF string table. For printf()-like actions, this
12650 12697 * is the format string. For print(), this is the
12651 12698 * CTF type of the expression result.
12652 12699 */
12653 12700 if ((strtab = dtrace_dof_sect(dof,
12654 12701 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12655 12702 goto err;
12656 12703
12657 12704 str = (char *)((uintptr_t)dof +
12658 12705 (uintptr_t)strtab->dofs_offset);
12659 12706
12660 12707 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12661 12708 if (str[i] == '\0')
12662 12709 break;
12663 12710 }
12664 12711
12665 12712 if (i >= strtab->dofs_size) {
12666 12713 dtrace_dof_error(dof, "bogus format string");
12667 12714 goto err;
12668 12715 }
12669 12716
12670 12717 if (i == desc->dofa_arg) {
12671 12718 dtrace_dof_error(dof, "empty format string");
12672 12719 goto err;
12673 12720 }
12674 12721
12675 12722 i -= desc->dofa_arg;
12676 12723 fmt = kmem_alloc(i + 1, KM_SLEEP);
12677 12724 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12678 12725 arg = (uint64_t)(uintptr_t)fmt;
12679 12726 } else {
12680 12727 if (kind == DTRACEACT_PRINTA) {
12681 12728 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12682 12729 arg = 0;
12683 12730 } else {
12684 12731 arg = desc->dofa_arg;
12685 12732 }
12686 12733 }
12687 12734
12688 12735 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12689 12736 desc->dofa_uarg, arg);
12690 12737
12691 12738 if (last != NULL) {
12692 12739 last->dtad_next = act;
12693 12740 } else {
12694 12741 first = act;
12695 12742 }
12696 12743
12697 12744 last = act;
12698 12745
12699 12746 if (desc->dofa_difo == DOF_SECIDX_NONE)
12700 12747 continue;
12701 12748
12702 12749 if ((difosec = dtrace_dof_sect(dof,
12703 12750 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12704 12751 goto err;
12705 12752
12706 12753 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12707 12754
12708 12755 if (act->dtad_difo == NULL)
12709 12756 goto err;
12710 12757 }
12711 12758
12712 12759 ASSERT(first != NULL);
12713 12760 return (first);
12714 12761
12715 12762 err:
12716 12763 for (act = first; act != NULL; act = next) {
12717 12764 next = act->dtad_next;
12718 12765 dtrace_actdesc_release(act, vstate);
12719 12766 }
12720 12767
12721 12768 return (NULL);
12722 12769 }
12723 12770
12724 12771 static dtrace_ecbdesc_t *
12725 12772 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12726 12773 cred_t *cr)
12727 12774 {
12728 12775 dtrace_ecbdesc_t *ep;
12729 12776 dof_ecbdesc_t *ecb;
12730 12777 dtrace_probedesc_t *desc;
12731 12778 dtrace_predicate_t *pred = NULL;
12732 12779
12733 12780 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12734 12781 dtrace_dof_error(dof, "truncated ECB description");
12735 12782 return (NULL);
12736 12783 }
12737 12784
12738 12785 if (sec->dofs_align != sizeof (uint64_t)) {
12739 12786 dtrace_dof_error(dof, "bad alignment in ECB description");
12740 12787 return (NULL);
12741 12788 }
12742 12789
12743 12790 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12744 12791 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12745 12792
12746 12793 if (sec == NULL)
12747 12794 return (NULL);
12748 12795
12749 12796 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12750 12797 ep->dted_uarg = ecb->dofe_uarg;
12751 12798 desc = &ep->dted_probe;
12752 12799
12753 12800 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12754 12801 goto err;
12755 12802
12756 12803 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12757 12804 if ((sec = dtrace_dof_sect(dof,
12758 12805 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12759 12806 goto err;
12760 12807
12761 12808 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12762 12809 goto err;
12763 12810
12764 12811 ep->dted_pred.dtpdd_predicate = pred;
12765 12812 }
12766 12813
12767 12814 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12768 12815 if ((sec = dtrace_dof_sect(dof,
12769 12816 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12770 12817 goto err;
12771 12818
12772 12819 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12773 12820
12774 12821 if (ep->dted_action == NULL)
12775 12822 goto err;
12776 12823 }
12777 12824
12778 12825 return (ep);
12779 12826
12780 12827 err:
12781 12828 if (pred != NULL)
12782 12829 dtrace_predicate_release(pred, vstate);
12783 12830 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12784 12831 return (NULL);
12785 12832 }
12786 12833
12787 12834 /*
12788 12835 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12789 12836 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12790 12837 * site of any user SETX relocations to account for load object base address.
12791 12838 * In the future, if we need other relocations, this function can be extended.
12792 12839 */
12793 12840 static int
12794 12841 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12795 12842 {
12796 12843 uintptr_t daddr = (uintptr_t)dof;
12797 12844 dof_relohdr_t *dofr =
12798 12845 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12799 12846 dof_sec_t *ss, *rs, *ts;
12800 12847 dof_relodesc_t *r;
12801 12848 uint_t i, n;
12802 12849
12803 12850 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12804 12851 sec->dofs_align != sizeof (dof_secidx_t)) {
12805 12852 dtrace_dof_error(dof, "invalid relocation header");
12806 12853 return (-1);
12807 12854 }
12808 12855
12809 12856 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12810 12857 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12811 12858 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12812 12859
12813 12860 if (ss == NULL || rs == NULL || ts == NULL)
12814 12861 return (-1); /* dtrace_dof_error() has been called already */
12815 12862
12816 12863 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12817 12864 rs->dofs_align != sizeof (uint64_t)) {
12818 12865 dtrace_dof_error(dof, "invalid relocation section");
12819 12866 return (-1);
12820 12867 }
12821 12868
12822 12869 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12823 12870 n = rs->dofs_size / rs->dofs_entsize;
12824 12871
12825 12872 for (i = 0; i < n; i++) {
12826 12873 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12827 12874
12828 12875 switch (r->dofr_type) {
12829 12876 case DOF_RELO_NONE:
12830 12877 break;
12831 12878 case DOF_RELO_SETX:
12832 12879 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12833 12880 sizeof (uint64_t) > ts->dofs_size) {
12834 12881 dtrace_dof_error(dof, "bad relocation offset");
12835 12882 return (-1);
12836 12883 }
12837 12884
12838 12885 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12839 12886 dtrace_dof_error(dof, "misaligned setx relo");
12840 12887 return (-1);
12841 12888 }
12842 12889
12843 12890 *(uint64_t *)taddr += ubase;
12844 12891 break;
12845 12892 default:
12846 12893 dtrace_dof_error(dof, "invalid relocation type");
12847 12894 return (-1);
12848 12895 }
12849 12896
12850 12897 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12851 12898 }
12852 12899
12853 12900 return (0);
12854 12901 }
12855 12902
12856 12903 /*
12857 12904 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12858 12905 * header: it should be at the front of a memory region that is at least
12859 12906 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12860 12907 * size. It need not be validated in any other way.
12861 12908 */
12862 12909 static int
12863 12910 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12864 12911 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12865 12912 {
12866 12913 uint64_t len = dof->dofh_loadsz, seclen;
12867 12914 uintptr_t daddr = (uintptr_t)dof;
12868 12915 dtrace_ecbdesc_t *ep;
12869 12916 dtrace_enabling_t *enab;
12870 12917 uint_t i;
12871 12918
12872 12919 ASSERT(MUTEX_HELD(&dtrace_lock));
12873 12920 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12874 12921
12875 12922 /*
12876 12923 * Check the DOF header identification bytes. In addition to checking
12877 12924 * valid settings, we also verify that unused bits/bytes are zeroed so
12878 12925 * we can use them later without fear of regressing existing binaries.
12879 12926 */
12880 12927 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12881 12928 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12882 12929 dtrace_dof_error(dof, "DOF magic string mismatch");
12883 12930 return (-1);
12884 12931 }
12885 12932
12886 12933 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12887 12934 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12888 12935 dtrace_dof_error(dof, "DOF has invalid data model");
12889 12936 return (-1);
12890 12937 }
12891 12938
12892 12939 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12893 12940 dtrace_dof_error(dof, "DOF encoding mismatch");
12894 12941 return (-1);
12895 12942 }
12896 12943
12897 12944 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12898 12945 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12899 12946 dtrace_dof_error(dof, "DOF version mismatch");
12900 12947 return (-1);
12901 12948 }
12902 12949
12903 12950 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12904 12951 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12905 12952 return (-1);
12906 12953 }
12907 12954
12908 12955 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12909 12956 dtrace_dof_error(dof, "DOF uses too many integer registers");
12910 12957 return (-1);
12911 12958 }
12912 12959
12913 12960 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12914 12961 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12915 12962 return (-1);
12916 12963 }
12917 12964
12918 12965 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12919 12966 if (dof->dofh_ident[i] != 0) {
12920 12967 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12921 12968 return (-1);
12922 12969 }
12923 12970 }
12924 12971
12925 12972 if (dof->dofh_flags & ~DOF_FL_VALID) {
12926 12973 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12927 12974 return (-1);
12928 12975 }
12929 12976
12930 12977 if (dof->dofh_secsize == 0) {
12931 12978 dtrace_dof_error(dof, "zero section header size");
12932 12979 return (-1);
12933 12980 }
12934 12981
12935 12982 /*
12936 12983 * Check that the section headers don't exceed the amount of DOF
12937 12984 * data. Note that we cast the section size and number of sections
12938 12985 * to uint64_t's to prevent possible overflow in the multiplication.
12939 12986 */
12940 12987 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12941 12988
12942 12989 if (dof->dofh_secoff > len || seclen > len ||
12943 12990 dof->dofh_secoff + seclen > len) {
12944 12991 dtrace_dof_error(dof, "truncated section headers");
12945 12992 return (-1);
12946 12993 }
12947 12994
12948 12995 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12949 12996 dtrace_dof_error(dof, "misaligned section headers");
12950 12997 return (-1);
12951 12998 }
12952 12999
12953 13000 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12954 13001 dtrace_dof_error(dof, "misaligned section size");
12955 13002 return (-1);
12956 13003 }
12957 13004
12958 13005 /*
12959 13006 * Take an initial pass through the section headers to be sure that
12960 13007 * the headers don't have stray offsets. If the 'noprobes' flag is
12961 13008 * set, do not permit sections relating to providers, probes, or args.
12962 13009 */
12963 13010 for (i = 0; i < dof->dofh_secnum; i++) {
12964 13011 dof_sec_t *sec = (dof_sec_t *)(daddr +
12965 13012 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12966 13013
12967 13014 if (noprobes) {
12968 13015 switch (sec->dofs_type) {
12969 13016 case DOF_SECT_PROVIDER:
12970 13017 case DOF_SECT_PROBES:
12971 13018 case DOF_SECT_PRARGS:
12972 13019 case DOF_SECT_PROFFS:
12973 13020 dtrace_dof_error(dof, "illegal sections "
12974 13021 "for enabling");
12975 13022 return (-1);
12976 13023 }
12977 13024 }
12978 13025
12979 13026 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
12980 13027 !(sec->dofs_flags & DOF_SECF_LOAD)) {
12981 13028 dtrace_dof_error(dof, "loadable section with load "
12982 13029 "flag unset");
12983 13030 return (-1);
12984 13031 }
12985 13032
12986 13033 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12987 13034 continue; /* just ignore non-loadable sections */
12988 13035
12989 13036 if (sec->dofs_align & (sec->dofs_align - 1)) {
12990 13037 dtrace_dof_error(dof, "bad section alignment");
12991 13038 return (-1);
12992 13039 }
12993 13040
12994 13041 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12995 13042 dtrace_dof_error(dof, "misaligned section");
12996 13043 return (-1);
12997 13044 }
12998 13045
12999 13046 if (sec->dofs_offset > len || sec->dofs_size > len ||
13000 13047 sec->dofs_offset + sec->dofs_size > len) {
13001 13048 dtrace_dof_error(dof, "corrupt section header");
13002 13049 return (-1);
13003 13050 }
13004 13051
13005 13052 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13006 13053 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13007 13054 dtrace_dof_error(dof, "non-terminating string table");
13008 13055 return (-1);
13009 13056 }
13010 13057 }
13011 13058
13012 13059 /*
13013 13060 * Take a second pass through the sections and locate and perform any
13014 13061 * relocations that are present. We do this after the first pass to
13015 13062 * be sure that all sections have had their headers validated.
13016 13063 */
13017 13064 for (i = 0; i < dof->dofh_secnum; i++) {
13018 13065 dof_sec_t *sec = (dof_sec_t *)(daddr +
13019 13066 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13020 13067
13021 13068 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13022 13069 continue; /* skip sections that are not loadable */
13023 13070
13024 13071 switch (sec->dofs_type) {
13025 13072 case DOF_SECT_URELHDR:
13026 13073 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
13027 13074 return (-1);
13028 13075 break;
13029 13076 }
13030 13077 }
13031 13078
13032 13079 if ((enab = *enabp) == NULL)
13033 13080 enab = *enabp = dtrace_enabling_create(vstate);
13034 13081
13035 13082 for (i = 0; i < dof->dofh_secnum; i++) {
13036 13083 dof_sec_t *sec = (dof_sec_t *)(daddr +
13037 13084 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13038 13085
13039 13086 if (sec->dofs_type != DOF_SECT_ECBDESC)
13040 13087 continue;
13041 13088
13042 13089 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
13043 13090 dtrace_enabling_destroy(enab);
13044 13091 *enabp = NULL;
13045 13092 return (-1);
13046 13093 }
13047 13094
13048 13095 dtrace_enabling_add(enab, ep);
13049 13096 }
13050 13097
13051 13098 return (0);
13052 13099 }
13053 13100
13054 13101 /*
13055 13102 * Process DOF for any options. This routine assumes that the DOF has been
13056 13103 * at least processed by dtrace_dof_slurp().
13057 13104 */
13058 13105 static int
13059 13106 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13060 13107 {
13061 13108 int i, rval;
13062 13109 uint32_t entsize;
13063 13110 size_t offs;
13064 13111 dof_optdesc_t *desc;
13065 13112
13066 13113 for (i = 0; i < dof->dofh_secnum; i++) {
13067 13114 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13068 13115 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13069 13116
13070 13117 if (sec->dofs_type != DOF_SECT_OPTDESC)
13071 13118 continue;
13072 13119
13073 13120 if (sec->dofs_align != sizeof (uint64_t)) {
13074 13121 dtrace_dof_error(dof, "bad alignment in "
13075 13122 "option description");
13076 13123 return (EINVAL);
13077 13124 }
13078 13125
13079 13126 if ((entsize = sec->dofs_entsize) == 0) {
13080 13127 dtrace_dof_error(dof, "zeroed option entry size");
13081 13128 return (EINVAL);
13082 13129 }
13083 13130
13084 13131 if (entsize < sizeof (dof_optdesc_t)) {
13085 13132 dtrace_dof_error(dof, "bad option entry size");
13086 13133 return (EINVAL);
13087 13134 }
13088 13135
13089 13136 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13090 13137 desc = (dof_optdesc_t *)((uintptr_t)dof +
13091 13138 (uintptr_t)sec->dofs_offset + offs);
13092 13139
13093 13140 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13094 13141 dtrace_dof_error(dof, "non-zero option string");
13095 13142 return (EINVAL);
13096 13143 }
13097 13144
13098 13145 if (desc->dofo_value == DTRACEOPT_UNSET) {
13099 13146 dtrace_dof_error(dof, "unset option");
13100 13147 return (EINVAL);
13101 13148 }
13102 13149
13103 13150 if ((rval = dtrace_state_option(state,
13104 13151 desc->dofo_option, desc->dofo_value)) != 0) {
13105 13152 dtrace_dof_error(dof, "rejected option");
13106 13153 return (rval);
13107 13154 }
13108 13155 }
13109 13156 }
13110 13157
13111 13158 return (0);
13112 13159 }
13113 13160
13114 13161 /*
13115 13162 * DTrace Consumer State Functions
13116 13163 */
13117 13164 int
13118 13165 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13119 13166 {
13120 13167 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13121 13168 void *base;
13122 13169 uintptr_t limit;
13123 13170 dtrace_dynvar_t *dvar, *next, *start;
13124 13171 int i;
13125 13172
13126 13173 ASSERT(MUTEX_HELD(&dtrace_lock));
13127 13174 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13128 13175
13129 13176 bzero(dstate, sizeof (dtrace_dstate_t));
13130 13177
13131 13178 if ((dstate->dtds_chunksize = chunksize) == 0)
13132 13179 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13133 13180
13134 13181 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13135 13182 size = min;
13136 13183
13137 13184 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
13138 13185 return (ENOMEM);
13139 13186
13140 13187 dstate->dtds_size = size;
13141 13188 dstate->dtds_base = base;
13142 13189 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13143 13190 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13144 13191
13145 13192 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13146 13193
13147 13194 if (hashsize != 1 && (hashsize & 1))
13148 13195 hashsize--;
13149 13196
13150 13197 dstate->dtds_hashsize = hashsize;
13151 13198 dstate->dtds_hash = dstate->dtds_base;
13152 13199
13153 13200 /*
13154 13201 * Set all of our hash buckets to point to the single sink, and (if
13155 13202 * it hasn't already been set), set the sink's hash value to be the
13156 13203 * sink sentinel value. The sink is needed for dynamic variable
13157 13204 * lookups to know that they have iterated over an entire, valid hash
13158 13205 * chain.
13159 13206 */
13160 13207 for (i = 0; i < hashsize; i++)
13161 13208 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13162 13209
13163 13210 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13164 13211 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13165 13212
13166 13213 /*
13167 13214 * Determine number of active CPUs. Divide free list evenly among
13168 13215 * active CPUs.
13169 13216 */
13170 13217 start = (dtrace_dynvar_t *)
13171 13218 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13172 13219 limit = (uintptr_t)base + size;
13173 13220
13174 13221 maxper = (limit - (uintptr_t)start) / NCPU;
13175 13222 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13176 13223
13177 13224 for (i = 0; i < NCPU; i++) {
13178 13225 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
13179 13226
13180 13227 /*
13181 13228 * If we don't even have enough chunks to make it once through
13182 13229 * NCPUs, we're just going to allocate everything to the first
13183 13230 * CPU. And if we're on the last CPU, we're going to allocate
13184 13231 * whatever is left over. In either case, we set the limit to
13185 13232 * be the limit of the dynamic variable space.
13186 13233 */
13187 13234 if (maxper == 0 || i == NCPU - 1) {
13188 13235 limit = (uintptr_t)base + size;
13189 13236 start = NULL;
13190 13237 } else {
13191 13238 limit = (uintptr_t)start + maxper;
13192 13239 start = (dtrace_dynvar_t *)limit;
13193 13240 }
13194 13241
13195 13242 ASSERT(limit <= (uintptr_t)base + size);
13196 13243
13197 13244 for (;;) {
13198 13245 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
13199 13246 dstate->dtds_chunksize);
13200 13247
13201 13248 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
13202 13249 break;
13203 13250
13204 13251 dvar->dtdv_next = next;
13205 13252 dvar = next;
13206 13253 }
13207 13254
13208 13255 if (maxper == 0)
13209 13256 break;
13210 13257 }
13211 13258
13212 13259 return (0);
13213 13260 }
13214 13261
13215 13262 void
13216 13263 dtrace_dstate_fini(dtrace_dstate_t *dstate)
13217 13264 {
13218 13265 ASSERT(MUTEX_HELD(&cpu_lock));
13219 13266
13220 13267 if (dstate->dtds_base == NULL)
13221 13268 return;
13222 13269
13223 13270 kmem_free(dstate->dtds_base, dstate->dtds_size);
13224 13271 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
13225 13272 }
13226 13273
13227 13274 static void
13228 13275 dtrace_vstate_fini(dtrace_vstate_t *vstate)
13229 13276 {
13230 13277 /*
13231 13278 * Logical XOR, where are you?
13232 13279 */
13233 13280 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
13234 13281
13235 13282 if (vstate->dtvs_nglobals > 0) {
13236 13283 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
13237 13284 sizeof (dtrace_statvar_t *));
13238 13285 }
13239 13286
13240 13287 if (vstate->dtvs_ntlocals > 0) {
13241 13288 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
13242 13289 sizeof (dtrace_difv_t));
13243 13290 }
13244 13291
13245 13292 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
13246 13293
13247 13294 if (vstate->dtvs_nlocals > 0) {
13248 13295 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
13249 13296 sizeof (dtrace_statvar_t *));
13250 13297 }
13251 13298 }
13252 13299
13253 13300 static void
13254 13301 dtrace_state_clean(dtrace_state_t *state)
13255 13302 {
13256 13303 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
13257 13304 return;
13258 13305
13259 13306 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
13260 13307 dtrace_speculation_clean(state);
13261 13308 }
13262 13309
13263 13310 static void
13264 13311 dtrace_state_deadman(dtrace_state_t *state)
13265 13312 {
13266 13313 hrtime_t now;
13267 13314
13268 13315 dtrace_sync();
13269 13316
13270 13317 now = dtrace_gethrtime();
13271 13318
13272 13319 if (state != dtrace_anon.dta_state &&
13273 13320 now - state->dts_laststatus >= dtrace_deadman_user)
13274 13321 return;
13275 13322
13276 13323 /*
13277 13324 * We must be sure that dts_alive never appears to be less than the
13278 13325 * value upon entry to dtrace_state_deadman(), and because we lack a
13279 13326 * dtrace_cas64(), we cannot store to it atomically. We thus instead
13280 13327 * store INT64_MAX to it, followed by a memory barrier, followed by
13281 13328 * the new value. This assures that dts_alive never appears to be
13282 13329 * less than its true value, regardless of the order in which the
13283 13330 * stores to the underlying storage are issued.
13284 13331 */
13285 13332 state->dts_alive = INT64_MAX;
13286 13333 dtrace_membar_producer();
13287 13334 state->dts_alive = now;
13288 13335 }
13289 13336
13290 13337 dtrace_state_t *
13291 13338 dtrace_state_create(dev_t *devp, cred_t *cr)
13292 13339 {
13293 13340 minor_t minor;
13294 13341 major_t major;
13295 13342 char c[30];
13296 13343 dtrace_state_t *state;
13297 13344 dtrace_optval_t *opt;
13298 13345 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
13299 13346
13300 13347 ASSERT(MUTEX_HELD(&dtrace_lock));
13301 13348 ASSERT(MUTEX_HELD(&cpu_lock));
13302 13349
13303 13350 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
13304 13351 VM_BESTFIT | VM_SLEEP);
13305 13352
13306 13353 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
13307 13354 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13308 13355 return (NULL);
13309 13356 }
13310 13357
13311 13358 state = ddi_get_soft_state(dtrace_softstate, minor);
13312 13359 state->dts_epid = DTRACE_EPIDNONE + 1;
13313 13360
13314 13361 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
13315 13362 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
13316 13363 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
13317 13364
13318 13365 if (devp != NULL) {
13319 13366 major = getemajor(*devp);
13320 13367 } else {
13321 13368 major = ddi_driver_major(dtrace_devi);
13322 13369 }
13323 13370
13324 13371 state->dts_dev = makedevice(major, minor);
13325 13372
13326 13373 if (devp != NULL)
13327 13374 *devp = state->dts_dev;
13328 13375
13329 13376 /*
13330 13377 * We allocate NCPU buffers. On the one hand, this can be quite
13331 13378 * a bit of memory per instance (nearly 36K on a Starcat). On the
13332 13379 * other hand, it saves an additional memory reference in the probe
13333 13380 * path.
13334 13381 */
13335 13382 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
13336 13383 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
13337 13384 state->dts_cleaner = CYCLIC_NONE;
13338 13385 state->dts_deadman = CYCLIC_NONE;
13339 13386 state->dts_vstate.dtvs_state = state;
13340 13387
13341 13388 for (i = 0; i < DTRACEOPT_MAX; i++)
13342 13389 state->dts_options[i] = DTRACEOPT_UNSET;
13343 13390
13344 13391 /*
13345 13392 * Set the default options.
13346 13393 */
13347 13394 opt = state->dts_options;
13348 13395 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
13349 13396 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
13350 13397 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
13351 13398 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
13352 13399 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
13353 13400 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
13354 13401 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
13355 13402 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
13356 13403 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
13357 13404 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
13358 13405 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
13359 13406 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
13360 13407 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
13361 13408 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
13362 13409
13363 13410 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
13364 13411
13365 13412 /*
13366 13413 * Depending on the user credentials, we set flag bits which alter probe
13367 13414 * visibility or the amount of destructiveness allowed. In the case of
13368 13415 * actual anonymous tracing, or the possession of all privileges, all of
13369 13416 * the normal checks are bypassed.
13370 13417 */
13371 13418 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
13372 13419 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
13373 13420 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
13374 13421 } else {
13375 13422 /*
13376 13423 * Set up the credentials for this instantiation. We take a
13377 13424 * hold on the credential to prevent it from disappearing on
13378 13425 * us; this in turn prevents the zone_t referenced by this
13379 13426 * credential from disappearing. This means that we can
13380 13427 * examine the credential and the zone from probe context.
13381 13428 */
13382 13429 crhold(cr);
13383 13430 state->dts_cred.dcr_cred = cr;
13384 13431
13385 13432 /*
13386 13433 * CRA_PROC means "we have *some* privilege for dtrace" and
13387 13434 * unlocks the use of variables like pid, zonename, etc.
13388 13435 */
13389 13436 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13390 13437 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13391 13438 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13392 13439 }
13393 13440
13394 13441 /*
13395 13442 * dtrace_user allows use of syscall and profile providers.
13396 13443 * If the user also has proc_owner and/or proc_zone, we
13397 13444 * extend the scope to include additional visibility and
13398 13445 * destructive power.
13399 13446 */
13400 13447 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13401 13448 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13402 13449 state->dts_cred.dcr_visible |=
13403 13450 DTRACE_CRV_ALLPROC;
13404 13451
13405 13452 state->dts_cred.dcr_action |=
13406 13453 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13407 13454 }
13408 13455
13409 13456 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13410 13457 state->dts_cred.dcr_visible |=
13411 13458 DTRACE_CRV_ALLZONE;
13412 13459
13413 13460 state->dts_cred.dcr_action |=
13414 13461 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13415 13462 }
13416 13463
13417 13464 /*
13418 13465 * If we have all privs in whatever zone this is,
13419 13466 * we can do destructive things to processes which
13420 13467 * have altered credentials.
13421 13468 */
13422 13469 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13423 13470 cr->cr_zone->zone_privset)) {
13424 13471 state->dts_cred.dcr_action |=
13425 13472 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13426 13473 }
13427 13474 }
13428 13475
13429 13476 /*
13430 13477 * Holding the dtrace_kernel privilege also implies that
13431 13478 * the user has the dtrace_user privilege from a visibility
13432 13479 * perspective. But without further privileges, some
13433 13480 * destructive actions are not available.
13434 13481 */
13435 13482 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13436 13483 /*
13437 13484 * Make all probes in all zones visible. However,
13438 13485 * this doesn't mean that all actions become available
13439 13486 * to all zones.
13440 13487 */
13441 13488 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13442 13489 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13443 13490
13444 13491 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13445 13492 DTRACE_CRA_PROC;
13446 13493 /*
13447 13494 * Holding proc_owner means that destructive actions
13448 13495 * for *this* zone are allowed.
13449 13496 */
13450 13497 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13451 13498 state->dts_cred.dcr_action |=
13452 13499 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13453 13500
13454 13501 /*
13455 13502 * Holding proc_zone means that destructive actions
13456 13503 * for this user/group ID in all zones is allowed.
13457 13504 */
13458 13505 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13459 13506 state->dts_cred.dcr_action |=
13460 13507 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13461 13508
13462 13509 /*
13463 13510 * If we have all privs in whatever zone this is,
13464 13511 * we can do destructive things to processes which
13465 13512 * have altered credentials.
13466 13513 */
13467 13514 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13468 13515 cr->cr_zone->zone_privset)) {
13469 13516 state->dts_cred.dcr_action |=
13470 13517 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13471 13518 }
13472 13519 }
13473 13520
13474 13521 /*
13475 13522 * Holding the dtrace_proc privilege gives control over fasttrap
13476 13523 * and pid providers. We need to grant wider destructive
13477 13524 * privileges in the event that the user has proc_owner and/or
13478 13525 * proc_zone.
13479 13526 */
13480 13527 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13481 13528 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13482 13529 state->dts_cred.dcr_action |=
13483 13530 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13484 13531
13485 13532 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13486 13533 state->dts_cred.dcr_action |=
13487 13534 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13488 13535 }
13489 13536 }
13490 13537
13491 13538 return (state);
13492 13539 }
13493 13540
13494 13541 static int
13495 13542 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13496 13543 {
13497 13544 dtrace_optval_t *opt = state->dts_options, size;
13498 13545 processorid_t cpu;
13499 13546 int flags = 0, rval, factor, divisor = 1;
13500 13547
13501 13548 ASSERT(MUTEX_HELD(&dtrace_lock));
13502 13549 ASSERT(MUTEX_HELD(&cpu_lock));
13503 13550 ASSERT(which < DTRACEOPT_MAX);
13504 13551 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13505 13552 (state == dtrace_anon.dta_state &&
13506 13553 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13507 13554
13508 13555 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13509 13556 return (0);
13510 13557
13511 13558 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13512 13559 cpu = opt[DTRACEOPT_CPU];
13513 13560
13514 13561 if (which == DTRACEOPT_SPECSIZE)
13515 13562 flags |= DTRACEBUF_NOSWITCH;
13516 13563
13517 13564 if (which == DTRACEOPT_BUFSIZE) {
13518 13565 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13519 13566 flags |= DTRACEBUF_RING;
13520 13567
13521 13568 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13522 13569 flags |= DTRACEBUF_FILL;
13523 13570
13524 13571 if (state != dtrace_anon.dta_state ||
13525 13572 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13526 13573 flags |= DTRACEBUF_INACTIVE;
13527 13574 }
13528 13575
13529 13576 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
13530 13577 /*
13531 13578 * The size must be 8-byte aligned. If the size is not 8-byte
13532 13579 * aligned, drop it down by the difference.
13533 13580 */
13534 13581 if (size & (sizeof (uint64_t) - 1))
13535 13582 size -= size & (sizeof (uint64_t) - 1);
13536 13583
13537 13584 if (size < state->dts_reserve) {
13538 13585 /*
13539 13586 * Buffers always must be large enough to accommodate
13540 13587 * their prereserved space. We return E2BIG instead
13541 13588 * of ENOMEM in this case to allow for user-level
13542 13589 * software to differentiate the cases.
13543 13590 */
13544 13591 return (E2BIG);
13545 13592 }
13546 13593
13547 13594 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
13548 13595
13549 13596 if (rval != ENOMEM) {
13550 13597 opt[which] = size;
13551 13598 return (rval);
13552 13599 }
13553 13600
13554 13601 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13555 13602 return (rval);
13556 13603
13557 13604 for (divisor = 2; divisor < factor; divisor <<= 1)
13558 13605 continue;
13559 13606 }
13560 13607
13561 13608 return (ENOMEM);
13562 13609 }
13563 13610
13564 13611 static int
13565 13612 dtrace_state_buffers(dtrace_state_t *state)
13566 13613 {
13567 13614 dtrace_speculation_t *spec = state->dts_speculations;
13568 13615 int rval, i;
13569 13616
13570 13617 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13571 13618 DTRACEOPT_BUFSIZE)) != 0)
13572 13619 return (rval);
13573 13620
13574 13621 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13575 13622 DTRACEOPT_AGGSIZE)) != 0)
13576 13623 return (rval);
13577 13624
13578 13625 for (i = 0; i < state->dts_nspeculations; i++) {
13579 13626 if ((rval = dtrace_state_buffer(state,
13580 13627 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13581 13628 return (rval);
13582 13629 }
13583 13630
13584 13631 return (0);
13585 13632 }
13586 13633
13587 13634 static void
13588 13635 dtrace_state_prereserve(dtrace_state_t *state)
13589 13636 {
13590 13637 dtrace_ecb_t *ecb;
13591 13638 dtrace_probe_t *probe;
13592 13639
13593 13640 state->dts_reserve = 0;
13594 13641
13595 13642 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13596 13643 return;
13597 13644
13598 13645 /*
13599 13646 * If our buffer policy is a "fill" buffer policy, we need to set the
13600 13647 * prereserved space to be the space required by the END probes.
13601 13648 */
13602 13649 probe = dtrace_probes[dtrace_probeid_end - 1];
13603 13650 ASSERT(probe != NULL);
13604 13651
13605 13652 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13606 13653 if (ecb->dte_state != state)
13607 13654 continue;
13608 13655
13609 13656 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13610 13657 }
13611 13658 }
13612 13659
13613 13660 static int
13614 13661 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13615 13662 {
13616 13663 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13617 13664 dtrace_speculation_t *spec;
13618 13665 dtrace_buffer_t *buf;
13619 13666 cyc_handler_t hdlr;
13620 13667 cyc_time_t when;
13621 13668 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13622 13669 dtrace_icookie_t cookie;
13623 13670
13624 13671 mutex_enter(&cpu_lock);
13625 13672 mutex_enter(&dtrace_lock);
13626 13673
13627 13674 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13628 13675 rval = EBUSY;
13629 13676 goto out;
13630 13677 }
13631 13678
13632 13679 /*
13633 13680 * Before we can perform any checks, we must prime all of the
13634 13681 * retained enablings that correspond to this state.
13635 13682 */
13636 13683 dtrace_enabling_prime(state);
13637 13684
13638 13685 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13639 13686 rval = EACCES;
13640 13687 goto out;
13641 13688 }
13642 13689
13643 13690 dtrace_state_prereserve(state);
13644 13691
13645 13692 /*
13646 13693 * Now we want to do is try to allocate our speculations.
13647 13694 * We do not automatically resize the number of speculations; if
13648 13695 * this fails, we will fail the operation.
13649 13696 */
13650 13697 nspec = opt[DTRACEOPT_NSPEC];
13651 13698 ASSERT(nspec != DTRACEOPT_UNSET);
13652 13699
13653 13700 if (nspec > INT_MAX) {
13654 13701 rval = ENOMEM;
13655 13702 goto out;
13656 13703 }
13657 13704
13658 13705 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
13659 13706 KM_NOSLEEP | KM_NORMALPRI);
13660 13707
13661 13708 if (spec == NULL) {
13662 13709 rval = ENOMEM;
13663 13710 goto out;
13664 13711 }
13665 13712
13666 13713 state->dts_speculations = spec;
13667 13714 state->dts_nspeculations = (int)nspec;
13668 13715
13669 13716 for (i = 0; i < nspec; i++) {
13670 13717 if ((buf = kmem_zalloc(bufsize,
13671 13718 KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
13672 13719 rval = ENOMEM;
13673 13720 goto err;
13674 13721 }
13675 13722
13676 13723 spec[i].dtsp_buffer = buf;
13677 13724 }
13678 13725
13679 13726 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13680 13727 if (dtrace_anon.dta_state == NULL) {
13681 13728 rval = ENOENT;
13682 13729 goto out;
13683 13730 }
13684 13731
13685 13732 if (state->dts_necbs != 0) {
13686 13733 rval = EALREADY;
13687 13734 goto out;
13688 13735 }
13689 13736
13690 13737 state->dts_anon = dtrace_anon_grab();
13691 13738 ASSERT(state->dts_anon != NULL);
13692 13739 state = state->dts_anon;
13693 13740
13694 13741 /*
13695 13742 * We want "grabanon" to be set in the grabbed state, so we'll
13696 13743 * copy that option value from the grabbing state into the
13697 13744 * grabbed state.
13698 13745 */
13699 13746 state->dts_options[DTRACEOPT_GRABANON] =
13700 13747 opt[DTRACEOPT_GRABANON];
13701 13748
13702 13749 *cpu = dtrace_anon.dta_beganon;
13703 13750
13704 13751 /*
13705 13752 * If the anonymous state is active (as it almost certainly
13706 13753 * is if the anonymous enabling ultimately matched anything),
13707 13754 * we don't allow any further option processing -- but we
13708 13755 * don't return failure.
13709 13756 */
13710 13757 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13711 13758 goto out;
13712 13759 }
13713 13760
13714 13761 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13715 13762 opt[DTRACEOPT_AGGSIZE] != 0) {
13716 13763 if (state->dts_aggregations == NULL) {
13717 13764 /*
13718 13765 * We're not going to create an aggregation buffer
13719 13766 * because we don't have any ECBs that contain
13720 13767 * aggregations -- set this option to 0.
13721 13768 */
13722 13769 opt[DTRACEOPT_AGGSIZE] = 0;
13723 13770 } else {
13724 13771 /*
13725 13772 * If we have an aggregation buffer, we must also have
13726 13773 * a buffer to use as scratch.
13727 13774 */
13728 13775 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13729 13776 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13730 13777 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13731 13778 }
13732 13779 }
13733 13780 }
13734 13781
13735 13782 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13736 13783 opt[DTRACEOPT_SPECSIZE] != 0) {
13737 13784 if (!state->dts_speculates) {
13738 13785 /*
13739 13786 * We're not going to create speculation buffers
13740 13787 * because we don't have any ECBs that actually
13741 13788 * speculate -- set the speculation size to 0.
13742 13789 */
13743 13790 opt[DTRACEOPT_SPECSIZE] = 0;
13744 13791 }
13745 13792 }
13746 13793
13747 13794 /*
13748 13795 * The bare minimum size for any buffer that we're actually going to
13749 13796 * do anything to is sizeof (uint64_t).
13750 13797 */
13751 13798 sz = sizeof (uint64_t);
13752 13799
13753 13800 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13754 13801 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13755 13802 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13756 13803 /*
13757 13804 * A buffer size has been explicitly set to 0 (or to a size
13758 13805 * that will be adjusted to 0) and we need the space -- we
13759 13806 * need to return failure. We return ENOSPC to differentiate
13760 13807 * it from failing to allocate a buffer due to failure to meet
13761 13808 * the reserve (for which we return E2BIG).
13762 13809 */
13763 13810 rval = ENOSPC;
13764 13811 goto out;
13765 13812 }
13766 13813
13767 13814 if ((rval = dtrace_state_buffers(state)) != 0)
13768 13815 goto err;
13769 13816
13770 13817 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13771 13818 sz = dtrace_dstate_defsize;
13772 13819
13773 13820 do {
13774 13821 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13775 13822
13776 13823 if (rval == 0)
13777 13824 break;
13778 13825
13779 13826 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13780 13827 goto err;
13781 13828 } while (sz >>= 1);
13782 13829
13783 13830 opt[DTRACEOPT_DYNVARSIZE] = sz;
13784 13831
13785 13832 if (rval != 0)
13786 13833 goto err;
13787 13834
13788 13835 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13789 13836 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13790 13837
13791 13838 if (opt[DTRACEOPT_CLEANRATE] == 0)
13792 13839 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13793 13840
13794 13841 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13795 13842 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13796 13843
13797 13844 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13798 13845 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13799 13846
13800 13847 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13801 13848 hdlr.cyh_arg = state;
13802 13849 hdlr.cyh_level = CY_LOW_LEVEL;
13803 13850
13804 13851 when.cyt_when = 0;
13805 13852 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13806 13853
13807 13854 state->dts_cleaner = cyclic_add(&hdlr, &when);
13808 13855
13809 13856 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13810 13857 hdlr.cyh_arg = state;
13811 13858 hdlr.cyh_level = CY_LOW_LEVEL;
13812 13859
13813 13860 when.cyt_when = 0;
13814 13861 when.cyt_interval = dtrace_deadman_interval;
13815 13862
13816 13863 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13817 13864 state->dts_deadman = cyclic_add(&hdlr, &when);
13818 13865
13819 13866 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13820 13867
13821 13868 if (state->dts_getf != 0 &&
13822 13869 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
13823 13870 /*
13824 13871 * We don't have kernel privs but we have at least one call
13825 13872 * to getf(); we need to bump our zone's count, and (if
13826 13873 * this is the first enabling to have an unprivileged call
13827 13874 * to getf()) we need to hook into closef().
13828 13875 */
13829 13876 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
13830 13877
13831 13878 if (dtrace_getf++ == 0) {
13832 13879 ASSERT(dtrace_closef == NULL);
13833 13880 dtrace_closef = dtrace_getf_barrier;
13834 13881 }
13835 13882 }
13836 13883
13837 13884 /*
13838 13885 * Now it's time to actually fire the BEGIN probe. We need to disable
13839 13886 * interrupts here both to record the CPU on which we fired the BEGIN
13840 13887 * probe (the data from this CPU will be processed first at user
13841 13888 * level) and to manually activate the buffer for this CPU.
13842 13889 */
13843 13890 cookie = dtrace_interrupt_disable();
13844 13891 *cpu = CPU->cpu_id;
13845 13892 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13846 13893 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13847 13894
13848 13895 dtrace_probe(dtrace_probeid_begin,
13849 13896 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13850 13897 dtrace_interrupt_enable(cookie);
13851 13898 /*
13852 13899 * We may have had an exit action from a BEGIN probe; only change our
13853 13900 * state to ACTIVE if we're still in WARMUP.
13854 13901 */
13855 13902 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13856 13903 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13857 13904
13858 13905 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13859 13906 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13860 13907
13861 13908 /*
13862 13909 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13863 13910 * want each CPU to transition its principal buffer out of the
13864 13911 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13865 13912 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13866 13913 * atomically transition from processing none of a state's ECBs to
13867 13914 * processing all of them.
13868 13915 */
13869 13916 dtrace_xcall(DTRACE_CPUALL,
13870 13917 (dtrace_xcall_t)dtrace_buffer_activate, state);
13871 13918 goto out;
13872 13919
13873 13920 err:
13874 13921 dtrace_buffer_free(state->dts_buffer);
13875 13922 dtrace_buffer_free(state->dts_aggbuffer);
13876 13923
13877 13924 if ((nspec = state->dts_nspeculations) == 0) {
13878 13925 ASSERT(state->dts_speculations == NULL);
13879 13926 goto out;
13880 13927 }
13881 13928
13882 13929 spec = state->dts_speculations;
13883 13930 ASSERT(spec != NULL);
13884 13931
13885 13932 for (i = 0; i < state->dts_nspeculations; i++) {
13886 13933 if ((buf = spec[i].dtsp_buffer) == NULL)
13887 13934 break;
13888 13935
13889 13936 dtrace_buffer_free(buf);
13890 13937 kmem_free(buf, bufsize);
13891 13938 }
13892 13939
13893 13940 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13894 13941 state->dts_nspeculations = 0;
13895 13942 state->dts_speculations = NULL;
13896 13943
13897 13944 out:
13898 13945 mutex_exit(&dtrace_lock);
13899 13946 mutex_exit(&cpu_lock);
13900 13947
13901 13948 return (rval);
13902 13949 }
13903 13950
13904 13951 static int
13905 13952 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13906 13953 {
13907 13954 dtrace_icookie_t cookie;
13908 13955
13909 13956 ASSERT(MUTEX_HELD(&dtrace_lock));
13910 13957
13911 13958 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13912 13959 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13913 13960 return (EINVAL);
13914 13961
13915 13962 /*
13916 13963 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13917 13964 * to be sure that every CPU has seen it. See below for the details
13918 13965 * on why this is done.
13919 13966 */
13920 13967 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13921 13968 dtrace_sync();
13922 13969
13923 13970 /*
13924 13971 * By this point, it is impossible for any CPU to be still processing
13925 13972 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13926 13973 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13927 13974 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13928 13975 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13929 13976 * iff we're in the END probe.
13930 13977 */
13931 13978 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13932 13979 dtrace_sync();
13933 13980 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13934 13981
13935 13982 /*
13936 13983 * Finally, we can release the reserve and call the END probe. We
13937 13984 * disable interrupts across calling the END probe to allow us to
13938 13985 * return the CPU on which we actually called the END probe. This
13939 13986 * allows user-land to be sure that this CPU's principal buffer is
13940 13987 * processed last.
13941 13988 */
13942 13989 state->dts_reserve = 0;
13943 13990
13944 13991 cookie = dtrace_interrupt_disable();
13945 13992 *cpu = CPU->cpu_id;
13946 13993 dtrace_probe(dtrace_probeid_end,
13947 13994 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13948 13995 dtrace_interrupt_enable(cookie);
13949 13996
13950 13997 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13951 13998 dtrace_sync();
13952 13999
13953 14000 if (state->dts_getf != 0 &&
13954 14001 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
13955 14002 /*
13956 14003 * We don't have kernel privs but we have at least one call
13957 14004 * to getf(); we need to lower our zone's count, and (if
13958 14005 * this is the last enabling to have an unprivileged call
13959 14006 * to getf()) we need to clear the closef() hook.
13960 14007 */
13961 14008 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
13962 14009 ASSERT(dtrace_closef == dtrace_getf_barrier);
13963 14010 ASSERT(dtrace_getf > 0);
13964 14011
13965 14012 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
13966 14013
13967 14014 if (--dtrace_getf == 0)
13968 14015 dtrace_closef = NULL;
13969 14016 }
13970 14017
13971 14018 return (0);
13972 14019 }
13973 14020
13974 14021 static int
13975 14022 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13976 14023 dtrace_optval_t val)
13977 14024 {
13978 14025 ASSERT(MUTEX_HELD(&dtrace_lock));
13979 14026
13980 14027 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13981 14028 return (EBUSY);
13982 14029
13983 14030 if (option >= DTRACEOPT_MAX)
13984 14031 return (EINVAL);
13985 14032
13986 14033 if (option != DTRACEOPT_CPU && val < 0)
13987 14034 return (EINVAL);
13988 14035
13989 14036 switch (option) {
13990 14037 case DTRACEOPT_DESTRUCTIVE:
13991 14038 if (dtrace_destructive_disallow)
13992 14039 return (EACCES);
13993 14040
13994 14041 state->dts_cred.dcr_destructive = 1;
13995 14042 break;
13996 14043
13997 14044 case DTRACEOPT_BUFSIZE:
13998 14045 case DTRACEOPT_DYNVARSIZE:
13999 14046 case DTRACEOPT_AGGSIZE:
14000 14047 case DTRACEOPT_SPECSIZE:
14001 14048 case DTRACEOPT_STRSIZE:
14002 14049 if (val < 0)
14003 14050 return (EINVAL);
14004 14051
14005 14052 if (val >= LONG_MAX) {
14006 14053 /*
14007 14054 * If this is an otherwise negative value, set it to
14008 14055 * the highest multiple of 128m less than LONG_MAX.
14009 14056 * Technically, we're adjusting the size without
14010 14057 * regard to the buffer resizing policy, but in fact,
14011 14058 * this has no effect -- if we set the buffer size to
14012 14059 * ~LONG_MAX and the buffer policy is ultimately set to
14013 14060 * be "manual", the buffer allocation is guaranteed to
14014 14061 * fail, if only because the allocation requires two
14015 14062 * buffers. (We set the the size to the highest
14016 14063 * multiple of 128m because it ensures that the size
14017 14064 * will remain a multiple of a megabyte when
14018 14065 * repeatedly halved -- all the way down to 15m.)
14019 14066 */
14020 14067 val = LONG_MAX - (1 << 27) + 1;
14021 14068 }
14022 14069 }
14023 14070
14024 14071 state->dts_options[option] = val;
14025 14072
14026 14073 return (0);
14027 14074 }
14028 14075
14029 14076 static void
14030 14077 dtrace_state_destroy(dtrace_state_t *state)
14031 14078 {
14032 14079 dtrace_ecb_t *ecb;
14033 14080 dtrace_vstate_t *vstate = &state->dts_vstate;
14034 14081 minor_t minor = getminor(state->dts_dev);
14035 14082 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14036 14083 dtrace_speculation_t *spec = state->dts_speculations;
14037 14084 int nspec = state->dts_nspeculations;
14038 14085 uint32_t match;
14039 14086
14040 14087 ASSERT(MUTEX_HELD(&dtrace_lock));
14041 14088 ASSERT(MUTEX_HELD(&cpu_lock));
14042 14089
14043 14090 /*
14044 14091 * First, retract any retained enablings for this state.
14045 14092 */
14046 14093 dtrace_enabling_retract(state);
14047 14094 ASSERT(state->dts_nretained == 0);
14048 14095
14049 14096 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14050 14097 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14051 14098 /*
14052 14099 * We have managed to come into dtrace_state_destroy() on a
14053 14100 * hot enabling -- almost certainly because of a disorderly
14054 14101 * shutdown of a consumer. (That is, a consumer that is
14055 14102 * exiting without having called dtrace_stop().) In this case,
14056 14103 * we're going to set our activity to be KILLED, and then
14057 14104 * issue a sync to be sure that everyone is out of probe
14058 14105 * context before we start blowing away ECBs.
14059 14106 */
14060 14107 state->dts_activity = DTRACE_ACTIVITY_KILLED;
14061 14108 dtrace_sync();
14062 14109 }
14063 14110
14064 14111 /*
14065 14112 * Release the credential hold we took in dtrace_state_create().
14066 14113 */
14067 14114 if (state->dts_cred.dcr_cred != NULL)
14068 14115 crfree(state->dts_cred.dcr_cred);
14069 14116
14070 14117 /*
14071 14118 * Now we can safely disable and destroy any enabled probes. Because
14072 14119 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14073 14120 * (especially if they're all enabled), we take two passes through the
14074 14121 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14075 14122 * in the second we disable whatever is left over.
14076 14123 */
14077 14124 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14078 14125 for (i = 0; i < state->dts_necbs; i++) {
14079 14126 if ((ecb = state->dts_ecbs[i]) == NULL)
14080 14127 continue;
14081 14128
14082 14129 if (match && ecb->dte_probe != NULL) {
14083 14130 dtrace_probe_t *probe = ecb->dte_probe;
14084 14131 dtrace_provider_t *prov = probe->dtpr_provider;
14085 14132
14086 14133 if (!(prov->dtpv_priv.dtpp_flags & match))
14087 14134 continue;
14088 14135 }
14089 14136
14090 14137 dtrace_ecb_disable(ecb);
14091 14138 dtrace_ecb_destroy(ecb);
14092 14139 }
14093 14140
14094 14141 if (!match)
14095 14142 break;
14096 14143 }
14097 14144
14098 14145 /*
14099 14146 * Before we free the buffers, perform one more sync to assure that
14100 14147 * every CPU is out of probe context.
14101 14148 */
14102 14149 dtrace_sync();
14103 14150
14104 14151 dtrace_buffer_free(state->dts_buffer);
14105 14152 dtrace_buffer_free(state->dts_aggbuffer);
14106 14153
14107 14154 for (i = 0; i < nspec; i++)
14108 14155 dtrace_buffer_free(spec[i].dtsp_buffer);
14109 14156
14110 14157 if (state->dts_cleaner != CYCLIC_NONE)
14111 14158 cyclic_remove(state->dts_cleaner);
14112 14159
14113 14160 if (state->dts_deadman != CYCLIC_NONE)
14114 14161 cyclic_remove(state->dts_deadman);
14115 14162
14116 14163 dtrace_dstate_fini(&vstate->dtvs_dynvars);
14117 14164 dtrace_vstate_fini(vstate);
14118 14165 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
14119 14166
14120 14167 if (state->dts_aggregations != NULL) {
14121 14168 #ifdef DEBUG
14122 14169 for (i = 0; i < state->dts_naggregations; i++)
14123 14170 ASSERT(state->dts_aggregations[i] == NULL);
14124 14171 #endif
14125 14172 ASSERT(state->dts_naggregations > 0);
14126 14173 kmem_free(state->dts_aggregations,
14127 14174 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
14128 14175 }
14129 14176
14130 14177 kmem_free(state->dts_buffer, bufsize);
14131 14178 kmem_free(state->dts_aggbuffer, bufsize);
14132 14179
14133 14180 for (i = 0; i < nspec; i++)
14134 14181 kmem_free(spec[i].dtsp_buffer, bufsize);
14135 14182
14136 14183 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14137 14184
14138 14185 dtrace_format_destroy(state);
14139 14186
14140 14187 vmem_destroy(state->dts_aggid_arena);
14141 14188 ddi_soft_state_free(dtrace_softstate, minor);
14142 14189 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14143 14190 }
14144 14191
14145 14192 /*
14146 14193 * DTrace Anonymous Enabling Functions
14147 14194 */
14148 14195 static dtrace_state_t *
14149 14196 dtrace_anon_grab(void)
14150 14197 {
14151 14198 dtrace_state_t *state;
14152 14199
14153 14200 ASSERT(MUTEX_HELD(&dtrace_lock));
14154 14201
14155 14202 if ((state = dtrace_anon.dta_state) == NULL) {
14156 14203 ASSERT(dtrace_anon.dta_enabling == NULL);
14157 14204 return (NULL);
14158 14205 }
14159 14206
14160 14207 ASSERT(dtrace_anon.dta_enabling != NULL);
14161 14208 ASSERT(dtrace_retained != NULL);
14162 14209
14163 14210 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
14164 14211 dtrace_anon.dta_enabling = NULL;
14165 14212 dtrace_anon.dta_state = NULL;
14166 14213
14167 14214 return (state);
14168 14215 }
14169 14216
14170 14217 static void
14171 14218 dtrace_anon_property(void)
14172 14219 {
14173 14220 int i, rv;
14174 14221 dtrace_state_t *state;
14175 14222 dof_hdr_t *dof;
14176 14223 char c[32]; /* enough for "dof-data-" + digits */
14177 14224
14178 14225 ASSERT(MUTEX_HELD(&dtrace_lock));
14179 14226 ASSERT(MUTEX_HELD(&cpu_lock));
14180 14227
14181 14228 for (i = 0; ; i++) {
14182 14229 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
14183 14230
14184 14231 dtrace_err_verbose = 1;
14185 14232
14186 14233 if ((dof = dtrace_dof_property(c)) == NULL) {
14187 14234 dtrace_err_verbose = 0;
14188 14235 break;
14189 14236 }
14190 14237
14191 14238 /*
14192 14239 * We want to create anonymous state, so we need to transition
14193 14240 * the kernel debugger to indicate that DTrace is active. If
14194 14241 * this fails (e.g. because the debugger has modified text in
14195 14242 * some way), we won't continue with the processing.
14196 14243 */
14197 14244 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14198 14245 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
14199 14246 "enabling ignored.");
14200 14247 dtrace_dof_destroy(dof);
14201 14248 break;
14202 14249 }
14203 14250
14204 14251 /*
14205 14252 * If we haven't allocated an anonymous state, we'll do so now.
14206 14253 */
14207 14254 if ((state = dtrace_anon.dta_state) == NULL) {
14208 14255 state = dtrace_state_create(NULL, NULL);
14209 14256 dtrace_anon.dta_state = state;
14210 14257
14211 14258 if (state == NULL) {
14212 14259 /*
14213 14260 * This basically shouldn't happen: the only
14214 14261 * failure mode from dtrace_state_create() is a
14215 14262 * failure of ddi_soft_state_zalloc() that
14216 14263 * itself should never happen. Still, the
14217 14264 * interface allows for a failure mode, and
14218 14265 * we want to fail as gracefully as possible:
14219 14266 * we'll emit an error message and cease
14220 14267 * processing anonymous state in this case.
14221 14268 */
14222 14269 cmn_err(CE_WARN, "failed to create "
14223 14270 "anonymous state");
14224 14271 dtrace_dof_destroy(dof);
14225 14272 break;
14226 14273 }
14227 14274 }
14228 14275
14229 14276 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
14230 14277 &dtrace_anon.dta_enabling, 0, B_TRUE);
14231 14278
14232 14279 if (rv == 0)
14233 14280 rv = dtrace_dof_options(dof, state);
14234 14281
14235 14282 dtrace_err_verbose = 0;
14236 14283 dtrace_dof_destroy(dof);
14237 14284
14238 14285 if (rv != 0) {
14239 14286 /*
14240 14287 * This is malformed DOF; chuck any anonymous state
14241 14288 * that we created.
14242 14289 */
14243 14290 ASSERT(dtrace_anon.dta_enabling == NULL);
14244 14291 dtrace_state_destroy(state);
14245 14292 dtrace_anon.dta_state = NULL;
14246 14293 break;
14247 14294 }
14248 14295
14249 14296 ASSERT(dtrace_anon.dta_enabling != NULL);
14250 14297 }
14251 14298
14252 14299 if (dtrace_anon.dta_enabling != NULL) {
14253 14300 int rval;
14254 14301
14255 14302 /*
14256 14303 * dtrace_enabling_retain() can only fail because we are
14257 14304 * trying to retain more enablings than are allowed -- but
14258 14305 * we only have one anonymous enabling, and we are guaranteed
14259 14306 * to be allowed at least one retained enabling; we assert
14260 14307 * that dtrace_enabling_retain() returns success.
14261 14308 */
14262 14309 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14263 14310 ASSERT(rval == 0);
14264 14311
14265 14312 dtrace_enabling_dump(dtrace_anon.dta_enabling);
14266 14313 }
14267 14314 }
14268 14315
14269 14316 /*
14270 14317 * DTrace Helper Functions
14271 14318 */
14272 14319 static void
14273 14320 dtrace_helper_trace(dtrace_helper_action_t *helper,
14274 14321 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14275 14322 {
14276 14323 uint32_t size, next, nnext, i;
14277 14324 dtrace_helptrace_t *ent;
14278 14325 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14279 14326
14280 14327 if (!dtrace_helptrace_enabled)
14281 14328 return;
14282 14329
14283 14330 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14284 14331
14285 14332 /*
14286 14333 * What would a tracing framework be without its own tracing
14287 14334 * framework? (Well, a hell of a lot simpler, for starters...)
14288 14335 */
14289 14336 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14290 14337 sizeof (uint64_t) - sizeof (uint64_t);
14291 14338
14292 14339 /*
14293 14340 * Iterate until we can allocate a slot in the trace buffer.
14294 14341 */
14295 14342 do {
14296 14343 next = dtrace_helptrace_next;
14297 14344
14298 14345 if (next + size < dtrace_helptrace_bufsize) {
14299 14346 nnext = next + size;
14300 14347 } else {
14301 14348 nnext = size;
14302 14349 }
14303 14350 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14304 14351
14305 14352 /*
14306 14353 * We have our slot; fill it in.
14307 14354 */
14308 14355 if (nnext == size)
14309 14356 next = 0;
14310 14357
14311 14358 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
14312 14359 ent->dtht_helper = helper;
14313 14360 ent->dtht_where = where;
14314 14361 ent->dtht_nlocals = vstate->dtvs_nlocals;
14315 14362
14316 14363 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14317 14364 mstate->dtms_fltoffs : -1;
14318 14365 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14319 14366 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
14320 14367
14321 14368 for (i = 0; i < vstate->dtvs_nlocals; i++) {
14322 14369 dtrace_statvar_t *svar;
14323 14370
14324 14371 if ((svar = vstate->dtvs_locals[i]) == NULL)
14325 14372 continue;
14326 14373
14327 14374 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14328 14375 ent->dtht_locals[i] =
14329 14376 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
14330 14377 }
14331 14378 }
14332 14379
14333 14380 static uint64_t
14334 14381 dtrace_helper(int which, dtrace_mstate_t *mstate,
14335 14382 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14336 14383 {
14337 14384 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14338 14385 uint64_t sarg0 = mstate->dtms_arg[0];
14339 14386 uint64_t sarg1 = mstate->dtms_arg[1];
14340 14387 uint64_t rval;
14341 14388 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14342 14389 dtrace_helper_action_t *helper;
14343 14390 dtrace_vstate_t *vstate;
14344 14391 dtrace_difo_t *pred;
14345 14392 int i, trace = dtrace_helptrace_enabled;
14346 14393
14347 14394 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14348 14395
14349 14396 if (helpers == NULL)
14350 14397 return (0);
14351 14398
14352 14399 if ((helper = helpers->dthps_actions[which]) == NULL)
14353 14400 return (0);
14354 14401
14355 14402 vstate = &helpers->dthps_vstate;
14356 14403 mstate->dtms_arg[0] = arg0;
14357 14404 mstate->dtms_arg[1] = arg1;
14358 14405
14359 14406 /*
14360 14407 * Now iterate over each helper. If its predicate evaluates to 'true',
14361 14408 * we'll call the corresponding actions. Note that the below calls
14362 14409 * to dtrace_dif_emulate() may set faults in machine state. This is
14363 14410 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
14364 14411 * the stored DIF offset with its own (which is the desired behavior).
14365 14412 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
14366 14413 * from machine state; this is okay, too.
14367 14414 */
14368 14415 for (; helper != NULL; helper = helper->dtha_next) {
14369 14416 if ((pred = helper->dtha_predicate) != NULL) {
14370 14417 if (trace)
14371 14418 dtrace_helper_trace(helper, mstate, vstate, 0);
14372 14419
14373 14420 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
14374 14421 goto next;
14375 14422
14376 14423 if (*flags & CPU_DTRACE_FAULT)
14377 14424 goto err;
14378 14425 }
14379 14426
14380 14427 for (i = 0; i < helper->dtha_nactions; i++) {
14381 14428 if (trace)
14382 14429 dtrace_helper_trace(helper,
14383 14430 mstate, vstate, i + 1);
14384 14431
14385 14432 rval = dtrace_dif_emulate(helper->dtha_actions[i],
14386 14433 mstate, vstate, state);
14387 14434
14388 14435 if (*flags & CPU_DTRACE_FAULT)
14389 14436 goto err;
14390 14437 }
14391 14438
14392 14439 next:
14393 14440 if (trace)
14394 14441 dtrace_helper_trace(helper, mstate, vstate,
14395 14442 DTRACE_HELPTRACE_NEXT);
14396 14443 }
14397 14444
14398 14445 if (trace)
14399 14446 dtrace_helper_trace(helper, mstate, vstate,
14400 14447 DTRACE_HELPTRACE_DONE);
14401 14448
14402 14449 /*
14403 14450 * Restore the arg0 that we saved upon entry.
14404 14451 */
14405 14452 mstate->dtms_arg[0] = sarg0;
14406 14453 mstate->dtms_arg[1] = sarg1;
14407 14454
14408 14455 return (rval);
14409 14456
14410 14457 err:
14411 14458 if (trace)
14412 14459 dtrace_helper_trace(helper, mstate, vstate,
14413 14460 DTRACE_HELPTRACE_ERR);
14414 14461
14415 14462 /*
14416 14463 * Restore the arg0 that we saved upon entry.
14417 14464 */
14418 14465 mstate->dtms_arg[0] = sarg0;
14419 14466 mstate->dtms_arg[1] = sarg1;
14420 14467
14421 14468 return (NULL);
14422 14469 }
14423 14470
14424 14471 static void
14425 14472 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14426 14473 dtrace_vstate_t *vstate)
14427 14474 {
14428 14475 int i;
14429 14476
14430 14477 if (helper->dtha_predicate != NULL)
14431 14478 dtrace_difo_release(helper->dtha_predicate, vstate);
14432 14479
14433 14480 for (i = 0; i < helper->dtha_nactions; i++) {
14434 14481 ASSERT(helper->dtha_actions[i] != NULL);
14435 14482 dtrace_difo_release(helper->dtha_actions[i], vstate);
14436 14483 }
14437 14484
14438 14485 kmem_free(helper->dtha_actions,
14439 14486 helper->dtha_nactions * sizeof (dtrace_difo_t *));
14440 14487 kmem_free(helper, sizeof (dtrace_helper_action_t));
14441 14488 }
14442 14489
14443 14490 static int
14444 14491 dtrace_helper_destroygen(int gen)
14445 14492 {
14446 14493 proc_t *p = curproc;
14447 14494 dtrace_helpers_t *help = p->p_dtrace_helpers;
14448 14495 dtrace_vstate_t *vstate;
14449 14496 int i;
14450 14497
14451 14498 ASSERT(MUTEX_HELD(&dtrace_lock));
14452 14499
14453 14500 if (help == NULL || gen > help->dthps_generation)
14454 14501 return (EINVAL);
14455 14502
14456 14503 vstate = &help->dthps_vstate;
14457 14504
14458 14505 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14459 14506 dtrace_helper_action_t *last = NULL, *h, *next;
14460 14507
14461 14508 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14462 14509 next = h->dtha_next;
14463 14510
14464 14511 if (h->dtha_generation == gen) {
14465 14512 if (last != NULL) {
14466 14513 last->dtha_next = next;
14467 14514 } else {
14468 14515 help->dthps_actions[i] = next;
14469 14516 }
14470 14517
14471 14518 dtrace_helper_action_destroy(h, vstate);
14472 14519 } else {
14473 14520 last = h;
14474 14521 }
14475 14522 }
14476 14523 }
14477 14524
14478 14525 /*
14479 14526 * Interate until we've cleared out all helper providers with the
14480 14527 * given generation number.
14481 14528 */
14482 14529 for (;;) {
14483 14530 dtrace_helper_provider_t *prov;
14484 14531
14485 14532 /*
14486 14533 * Look for a helper provider with the right generation. We
14487 14534 * have to start back at the beginning of the list each time
14488 14535 * because we drop dtrace_lock. It's unlikely that we'll make
14489 14536 * more than two passes.
14490 14537 */
14491 14538 for (i = 0; i < help->dthps_nprovs; i++) {
14492 14539 prov = help->dthps_provs[i];
14493 14540
14494 14541 if (prov->dthp_generation == gen)
14495 14542 break;
14496 14543 }
14497 14544
14498 14545 /*
14499 14546 * If there were no matches, we're done.
14500 14547 */
14501 14548 if (i == help->dthps_nprovs)
14502 14549 break;
14503 14550
14504 14551 /*
14505 14552 * Move the last helper provider into this slot.
14506 14553 */
14507 14554 help->dthps_nprovs--;
14508 14555 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14509 14556 help->dthps_provs[help->dthps_nprovs] = NULL;
14510 14557
14511 14558 mutex_exit(&dtrace_lock);
14512 14559
14513 14560 /*
14514 14561 * If we have a meta provider, remove this helper provider.
14515 14562 */
14516 14563 mutex_enter(&dtrace_meta_lock);
14517 14564 if (dtrace_meta_pid != NULL) {
14518 14565 ASSERT(dtrace_deferred_pid == NULL);
14519 14566 dtrace_helper_provider_remove(&prov->dthp_prov,
14520 14567 p->p_pid);
14521 14568 }
14522 14569 mutex_exit(&dtrace_meta_lock);
14523 14570
14524 14571 dtrace_helper_provider_destroy(prov);
14525 14572
14526 14573 mutex_enter(&dtrace_lock);
14527 14574 }
14528 14575
14529 14576 return (0);
14530 14577 }
14531 14578
14532 14579 static int
14533 14580 dtrace_helper_validate(dtrace_helper_action_t *helper)
14534 14581 {
14535 14582 int err = 0, i;
14536 14583 dtrace_difo_t *dp;
14537 14584
14538 14585 if ((dp = helper->dtha_predicate) != NULL)
14539 14586 err += dtrace_difo_validate_helper(dp);
14540 14587
14541 14588 for (i = 0; i < helper->dtha_nactions; i++)
14542 14589 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14543 14590
14544 14591 return (err == 0);
14545 14592 }
14546 14593
14547 14594 static int
14548 14595 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14549 14596 {
14550 14597 dtrace_helpers_t *help;
14551 14598 dtrace_helper_action_t *helper, *last;
14552 14599 dtrace_actdesc_t *act;
14553 14600 dtrace_vstate_t *vstate;
14554 14601 dtrace_predicate_t *pred;
14555 14602 int count = 0, nactions = 0, i;
14556 14603
14557 14604 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14558 14605 return (EINVAL);
14559 14606
14560 14607 help = curproc->p_dtrace_helpers;
14561 14608 last = help->dthps_actions[which];
14562 14609 vstate = &help->dthps_vstate;
14563 14610
14564 14611 for (count = 0; last != NULL; last = last->dtha_next) {
14565 14612 count++;
14566 14613 if (last->dtha_next == NULL)
14567 14614 break;
14568 14615 }
14569 14616
14570 14617 /*
14571 14618 * If we already have dtrace_helper_actions_max helper actions for this
14572 14619 * helper action type, we'll refuse to add a new one.
14573 14620 */
14574 14621 if (count >= dtrace_helper_actions_max)
14575 14622 return (ENOSPC);
14576 14623
14577 14624 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14578 14625 helper->dtha_generation = help->dthps_generation;
14579 14626
14580 14627 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14581 14628 ASSERT(pred->dtp_difo != NULL);
14582 14629 dtrace_difo_hold(pred->dtp_difo);
14583 14630 helper->dtha_predicate = pred->dtp_difo;
14584 14631 }
14585 14632
14586 14633 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14587 14634 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14588 14635 goto err;
14589 14636
14590 14637 if (act->dtad_difo == NULL)
14591 14638 goto err;
14592 14639
14593 14640 nactions++;
14594 14641 }
14595 14642
14596 14643 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14597 14644 (helper->dtha_nactions = nactions), KM_SLEEP);
14598 14645
14599 14646 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14600 14647 dtrace_difo_hold(act->dtad_difo);
14601 14648 helper->dtha_actions[i++] = act->dtad_difo;
14602 14649 }
14603 14650
14604 14651 if (!dtrace_helper_validate(helper))
14605 14652 goto err;
14606 14653
14607 14654 if (last == NULL) {
14608 14655 help->dthps_actions[which] = helper;
14609 14656 } else {
14610 14657 last->dtha_next = helper;
14611 14658 }
14612 14659
14613 14660 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14614 14661 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14615 14662 dtrace_helptrace_next = 0;
14616 14663 }
14617 14664
14618 14665 return (0);
14619 14666 err:
14620 14667 dtrace_helper_action_destroy(helper, vstate);
14621 14668 return (EINVAL);
14622 14669 }
14623 14670
14624 14671 static void
14625 14672 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14626 14673 dof_helper_t *dofhp)
14627 14674 {
14628 14675 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14629 14676
14630 14677 mutex_enter(&dtrace_meta_lock);
14631 14678 mutex_enter(&dtrace_lock);
14632 14679
14633 14680 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14634 14681 /*
14635 14682 * If the dtrace module is loaded but not attached, or if
14636 14683 * there aren't isn't a meta provider registered to deal with
14637 14684 * these provider descriptions, we need to postpone creating
14638 14685 * the actual providers until later.
14639 14686 */
14640 14687
14641 14688 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14642 14689 dtrace_deferred_pid != help) {
14643 14690 help->dthps_deferred = 1;
14644 14691 help->dthps_pid = p->p_pid;
14645 14692 help->dthps_next = dtrace_deferred_pid;
14646 14693 help->dthps_prev = NULL;
14647 14694 if (dtrace_deferred_pid != NULL)
14648 14695 dtrace_deferred_pid->dthps_prev = help;
14649 14696 dtrace_deferred_pid = help;
14650 14697 }
14651 14698
14652 14699 mutex_exit(&dtrace_lock);
14653 14700
14654 14701 } else if (dofhp != NULL) {
14655 14702 /*
14656 14703 * If the dtrace module is loaded and we have a particular
14657 14704 * helper provider description, pass that off to the
14658 14705 * meta provider.
14659 14706 */
14660 14707
14661 14708 mutex_exit(&dtrace_lock);
14662 14709
14663 14710 dtrace_helper_provide(dofhp, p->p_pid);
14664 14711
14665 14712 } else {
14666 14713 /*
14667 14714 * Otherwise, just pass all the helper provider descriptions
14668 14715 * off to the meta provider.
14669 14716 */
14670 14717
14671 14718 int i;
14672 14719 mutex_exit(&dtrace_lock);
14673 14720
14674 14721 for (i = 0; i < help->dthps_nprovs; i++) {
14675 14722 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14676 14723 p->p_pid);
14677 14724 }
14678 14725 }
14679 14726
14680 14727 mutex_exit(&dtrace_meta_lock);
14681 14728 }
14682 14729
14683 14730 static int
14684 14731 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14685 14732 {
14686 14733 dtrace_helpers_t *help;
14687 14734 dtrace_helper_provider_t *hprov, **tmp_provs;
14688 14735 uint_t tmp_maxprovs, i;
14689 14736
14690 14737 ASSERT(MUTEX_HELD(&dtrace_lock));
14691 14738
14692 14739 help = curproc->p_dtrace_helpers;
14693 14740 ASSERT(help != NULL);
14694 14741
14695 14742 /*
14696 14743 * If we already have dtrace_helper_providers_max helper providers,
14697 14744 * we're refuse to add a new one.
14698 14745 */
14699 14746 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14700 14747 return (ENOSPC);
14701 14748
14702 14749 /*
14703 14750 * Check to make sure this isn't a duplicate.
14704 14751 */
14705 14752 for (i = 0; i < help->dthps_nprovs; i++) {
14706 14753 if (dofhp->dofhp_dof ==
14707 14754 help->dthps_provs[i]->dthp_prov.dofhp_dof)
14708 14755 return (EALREADY);
14709 14756 }
14710 14757
14711 14758 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14712 14759 hprov->dthp_prov = *dofhp;
14713 14760 hprov->dthp_ref = 1;
14714 14761 hprov->dthp_generation = gen;
14715 14762
14716 14763 /*
14717 14764 * Allocate a bigger table for helper providers if it's already full.
14718 14765 */
14719 14766 if (help->dthps_maxprovs == help->dthps_nprovs) {
14720 14767 tmp_maxprovs = help->dthps_maxprovs;
14721 14768 tmp_provs = help->dthps_provs;
14722 14769
14723 14770 if (help->dthps_maxprovs == 0)
14724 14771 help->dthps_maxprovs = 2;
14725 14772 else
14726 14773 help->dthps_maxprovs *= 2;
14727 14774 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14728 14775 help->dthps_maxprovs = dtrace_helper_providers_max;
14729 14776
14730 14777 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14731 14778
14732 14779 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14733 14780 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14734 14781
14735 14782 if (tmp_provs != NULL) {
14736 14783 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14737 14784 sizeof (dtrace_helper_provider_t *));
14738 14785 kmem_free(tmp_provs, tmp_maxprovs *
14739 14786 sizeof (dtrace_helper_provider_t *));
14740 14787 }
14741 14788 }
14742 14789
14743 14790 help->dthps_provs[help->dthps_nprovs] = hprov;
14744 14791 help->dthps_nprovs++;
14745 14792
14746 14793 return (0);
14747 14794 }
14748 14795
14749 14796 static void
14750 14797 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14751 14798 {
14752 14799 mutex_enter(&dtrace_lock);
14753 14800
14754 14801 if (--hprov->dthp_ref == 0) {
14755 14802 dof_hdr_t *dof;
14756 14803 mutex_exit(&dtrace_lock);
14757 14804 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14758 14805 dtrace_dof_destroy(dof);
14759 14806 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14760 14807 } else {
14761 14808 mutex_exit(&dtrace_lock);
14762 14809 }
14763 14810 }
14764 14811
14765 14812 static int
14766 14813 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14767 14814 {
14768 14815 uintptr_t daddr = (uintptr_t)dof;
14769 14816 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14770 14817 dof_provider_t *provider;
14771 14818 dof_probe_t *probe;
14772 14819 uint8_t *arg;
14773 14820 char *strtab, *typestr;
14774 14821 dof_stridx_t typeidx;
14775 14822 size_t typesz;
14776 14823 uint_t nprobes, j, k;
14777 14824
14778 14825 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14779 14826
14780 14827 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14781 14828 dtrace_dof_error(dof, "misaligned section offset");
14782 14829 return (-1);
14783 14830 }
14784 14831
14785 14832 /*
14786 14833 * The section needs to be large enough to contain the DOF provider
14787 14834 * structure appropriate for the given version.
14788 14835 */
14789 14836 if (sec->dofs_size <
14790 14837 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14791 14838 offsetof(dof_provider_t, dofpv_prenoffs) :
14792 14839 sizeof (dof_provider_t))) {
14793 14840 dtrace_dof_error(dof, "provider section too small");
14794 14841 return (-1);
14795 14842 }
14796 14843
14797 14844 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14798 14845 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14799 14846 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14800 14847 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14801 14848 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14802 14849
14803 14850 if (str_sec == NULL || prb_sec == NULL ||
14804 14851 arg_sec == NULL || off_sec == NULL)
14805 14852 return (-1);
14806 14853
14807 14854 enoff_sec = NULL;
14808 14855
14809 14856 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14810 14857 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14811 14858 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14812 14859 provider->dofpv_prenoffs)) == NULL)
14813 14860 return (-1);
14814 14861
14815 14862 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14816 14863
14817 14864 if (provider->dofpv_name >= str_sec->dofs_size ||
14818 14865 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14819 14866 dtrace_dof_error(dof, "invalid provider name");
14820 14867 return (-1);
14821 14868 }
14822 14869
14823 14870 if (prb_sec->dofs_entsize == 0 ||
14824 14871 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14825 14872 dtrace_dof_error(dof, "invalid entry size");
14826 14873 return (-1);
14827 14874 }
14828 14875
14829 14876 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14830 14877 dtrace_dof_error(dof, "misaligned entry size");
14831 14878 return (-1);
14832 14879 }
14833 14880
14834 14881 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14835 14882 dtrace_dof_error(dof, "invalid entry size");
14836 14883 return (-1);
14837 14884 }
14838 14885
14839 14886 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14840 14887 dtrace_dof_error(dof, "misaligned section offset");
14841 14888 return (-1);
14842 14889 }
14843 14890
14844 14891 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14845 14892 dtrace_dof_error(dof, "invalid entry size");
14846 14893 return (-1);
14847 14894 }
14848 14895
14849 14896 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14850 14897
14851 14898 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14852 14899
14853 14900 /*
14854 14901 * Take a pass through the probes to check for errors.
14855 14902 */
14856 14903 for (j = 0; j < nprobes; j++) {
14857 14904 probe = (dof_probe_t *)(uintptr_t)(daddr +
14858 14905 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14859 14906
14860 14907 if (probe->dofpr_func >= str_sec->dofs_size) {
14861 14908 dtrace_dof_error(dof, "invalid function name");
14862 14909 return (-1);
14863 14910 }
14864 14911
14865 14912 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14866 14913 dtrace_dof_error(dof, "function name too long");
14867 14914 return (-1);
14868 14915 }
14869 14916
14870 14917 if (probe->dofpr_name >= str_sec->dofs_size ||
14871 14918 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14872 14919 dtrace_dof_error(dof, "invalid probe name");
14873 14920 return (-1);
14874 14921 }
14875 14922
14876 14923 /*
14877 14924 * The offset count must not wrap the index, and the offsets
14878 14925 * must also not overflow the section's data.
14879 14926 */
14880 14927 if (probe->dofpr_offidx + probe->dofpr_noffs <
14881 14928 probe->dofpr_offidx ||
14882 14929 (probe->dofpr_offidx + probe->dofpr_noffs) *
14883 14930 off_sec->dofs_entsize > off_sec->dofs_size) {
14884 14931 dtrace_dof_error(dof, "invalid probe offset");
14885 14932 return (-1);
14886 14933 }
14887 14934
14888 14935 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14889 14936 /*
14890 14937 * If there's no is-enabled offset section, make sure
14891 14938 * there aren't any is-enabled offsets. Otherwise
14892 14939 * perform the same checks as for probe offsets
14893 14940 * (immediately above).
14894 14941 */
14895 14942 if (enoff_sec == NULL) {
14896 14943 if (probe->dofpr_enoffidx != 0 ||
14897 14944 probe->dofpr_nenoffs != 0) {
14898 14945 dtrace_dof_error(dof, "is-enabled "
14899 14946 "offsets with null section");
14900 14947 return (-1);
14901 14948 }
14902 14949 } else if (probe->dofpr_enoffidx +
14903 14950 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14904 14951 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14905 14952 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14906 14953 dtrace_dof_error(dof, "invalid is-enabled "
14907 14954 "offset");
14908 14955 return (-1);
14909 14956 }
14910 14957
14911 14958 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14912 14959 dtrace_dof_error(dof, "zero probe and "
14913 14960 "is-enabled offsets");
14914 14961 return (-1);
14915 14962 }
14916 14963 } else if (probe->dofpr_noffs == 0) {
14917 14964 dtrace_dof_error(dof, "zero probe offsets");
14918 14965 return (-1);
14919 14966 }
14920 14967
14921 14968 if (probe->dofpr_argidx + probe->dofpr_xargc <
14922 14969 probe->dofpr_argidx ||
14923 14970 (probe->dofpr_argidx + probe->dofpr_xargc) *
14924 14971 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14925 14972 dtrace_dof_error(dof, "invalid args");
14926 14973 return (-1);
14927 14974 }
14928 14975
14929 14976 typeidx = probe->dofpr_nargv;
14930 14977 typestr = strtab + probe->dofpr_nargv;
14931 14978 for (k = 0; k < probe->dofpr_nargc; k++) {
14932 14979 if (typeidx >= str_sec->dofs_size) {
14933 14980 dtrace_dof_error(dof, "bad "
14934 14981 "native argument type");
14935 14982 return (-1);
14936 14983 }
14937 14984
14938 14985 typesz = strlen(typestr) + 1;
14939 14986 if (typesz > DTRACE_ARGTYPELEN) {
14940 14987 dtrace_dof_error(dof, "native "
14941 14988 "argument type too long");
14942 14989 return (-1);
14943 14990 }
14944 14991 typeidx += typesz;
14945 14992 typestr += typesz;
14946 14993 }
14947 14994
14948 14995 typeidx = probe->dofpr_xargv;
14949 14996 typestr = strtab + probe->dofpr_xargv;
14950 14997 for (k = 0; k < probe->dofpr_xargc; k++) {
14951 14998 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14952 14999 dtrace_dof_error(dof, "bad "
14953 15000 "native argument index");
14954 15001 return (-1);
14955 15002 }
14956 15003
14957 15004 if (typeidx >= str_sec->dofs_size) {
14958 15005 dtrace_dof_error(dof, "bad "
14959 15006 "translated argument type");
14960 15007 return (-1);
14961 15008 }
14962 15009
14963 15010 typesz = strlen(typestr) + 1;
14964 15011 if (typesz > DTRACE_ARGTYPELEN) {
14965 15012 dtrace_dof_error(dof, "translated argument "
14966 15013 "type too long");
14967 15014 return (-1);
14968 15015 }
14969 15016
14970 15017 typeidx += typesz;
14971 15018 typestr += typesz;
14972 15019 }
14973 15020 }
14974 15021
14975 15022 return (0);
14976 15023 }
14977 15024
14978 15025 static int
14979 15026 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14980 15027 {
14981 15028 dtrace_helpers_t *help;
14982 15029 dtrace_vstate_t *vstate;
14983 15030 dtrace_enabling_t *enab = NULL;
14984 15031 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14985 15032 uintptr_t daddr = (uintptr_t)dof;
14986 15033
14987 15034 ASSERT(MUTEX_HELD(&dtrace_lock));
14988 15035
14989 15036 if ((help = curproc->p_dtrace_helpers) == NULL)
14990 15037 help = dtrace_helpers_create(curproc);
14991 15038
14992 15039 vstate = &help->dthps_vstate;
14993 15040
14994 15041 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14995 15042 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14996 15043 dtrace_dof_destroy(dof);
14997 15044 return (rv);
14998 15045 }
14999 15046
15000 15047 /*
15001 15048 * Look for helper providers and validate their descriptions.
15002 15049 */
15003 15050 if (dhp != NULL) {
15004 15051 for (i = 0; i < dof->dofh_secnum; i++) {
15005 15052 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15006 15053 dof->dofh_secoff + i * dof->dofh_secsize);
15007 15054
15008 15055 if (sec->dofs_type != DOF_SECT_PROVIDER)
15009 15056 continue;
15010 15057
15011 15058 if (dtrace_helper_provider_validate(dof, sec) != 0) {
15012 15059 dtrace_enabling_destroy(enab);
15013 15060 dtrace_dof_destroy(dof);
15014 15061 return (-1);
15015 15062 }
15016 15063
15017 15064 nprovs++;
15018 15065 }
15019 15066 }
15020 15067
15021 15068 /*
15022 15069 * Now we need to walk through the ECB descriptions in the enabling.
15023 15070 */
15024 15071 for (i = 0; i < enab->dten_ndesc; i++) {
15025 15072 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15026 15073 dtrace_probedesc_t *desc = &ep->dted_probe;
15027 15074
15028 15075 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
15029 15076 continue;
15030 15077
15031 15078 if (strcmp(desc->dtpd_mod, "helper") != 0)
15032 15079 continue;
15033 15080
15034 15081 if (strcmp(desc->dtpd_func, "ustack") != 0)
15035 15082 continue;
15036 15083
15037 15084 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
15038 15085 ep)) != 0) {
15039 15086 /*
15040 15087 * Adding this helper action failed -- we are now going
15041 15088 * to rip out the entire generation and return failure.
15042 15089 */
15043 15090 (void) dtrace_helper_destroygen(help->dthps_generation);
15044 15091 dtrace_enabling_destroy(enab);
15045 15092 dtrace_dof_destroy(dof);
15046 15093 return (-1);
15047 15094 }
15048 15095
15049 15096 nhelpers++;
15050 15097 }
15051 15098
15052 15099 if (nhelpers < enab->dten_ndesc)
15053 15100 dtrace_dof_error(dof, "unmatched helpers");
15054 15101
15055 15102 gen = help->dthps_generation++;
15056 15103 dtrace_enabling_destroy(enab);
15057 15104
15058 15105 if (dhp != NULL && nprovs > 0) {
15059 15106 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
15060 15107 if (dtrace_helper_provider_add(dhp, gen) == 0) {
15061 15108 mutex_exit(&dtrace_lock);
15062 15109 dtrace_helper_provider_register(curproc, help, dhp);
15063 15110 mutex_enter(&dtrace_lock);
15064 15111
15065 15112 destroy = 0;
15066 15113 }
15067 15114 }
15068 15115
15069 15116 if (destroy)
15070 15117 dtrace_dof_destroy(dof);
15071 15118
15072 15119 return (gen);
15073 15120 }
15074 15121
15075 15122 static dtrace_helpers_t *
15076 15123 dtrace_helpers_create(proc_t *p)
15077 15124 {
15078 15125 dtrace_helpers_t *help;
15079 15126
15080 15127 ASSERT(MUTEX_HELD(&dtrace_lock));
15081 15128 ASSERT(p->p_dtrace_helpers == NULL);
15082 15129
15083 15130 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
15084 15131 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
15085 15132 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
15086 15133
15087 15134 p->p_dtrace_helpers = help;
15088 15135 dtrace_helpers++;
15089 15136
15090 15137 return (help);
15091 15138 }
15092 15139
15093 15140 static void
15094 15141 dtrace_helpers_destroy(void)
15095 15142 {
15096 15143 dtrace_helpers_t *help;
15097 15144 dtrace_vstate_t *vstate;
15098 15145 proc_t *p = curproc;
15099 15146 int i;
15100 15147
15101 15148 mutex_enter(&dtrace_lock);
15102 15149
15103 15150 ASSERT(p->p_dtrace_helpers != NULL);
15104 15151 ASSERT(dtrace_helpers > 0);
15105 15152
15106 15153 help = p->p_dtrace_helpers;
15107 15154 vstate = &help->dthps_vstate;
15108 15155
15109 15156 /*
15110 15157 * We're now going to lose the help from this process.
15111 15158 */
15112 15159 p->p_dtrace_helpers = NULL;
15113 15160 dtrace_sync();
15114 15161
15115 15162 /*
15116 15163 * Destory the helper actions.
15117 15164 */
15118 15165 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15119 15166 dtrace_helper_action_t *h, *next;
15120 15167
15121 15168 for (h = help->dthps_actions[i]; h != NULL; h = next) {
15122 15169 next = h->dtha_next;
15123 15170 dtrace_helper_action_destroy(h, vstate);
15124 15171 h = next;
15125 15172 }
15126 15173 }
15127 15174
15128 15175 mutex_exit(&dtrace_lock);
15129 15176
15130 15177 /*
15131 15178 * Destroy the helper providers.
15132 15179 */
15133 15180 if (help->dthps_maxprovs > 0) {
15134 15181 mutex_enter(&dtrace_meta_lock);
15135 15182 if (dtrace_meta_pid != NULL) {
15136 15183 ASSERT(dtrace_deferred_pid == NULL);
15137 15184
15138 15185 for (i = 0; i < help->dthps_nprovs; i++) {
15139 15186 dtrace_helper_provider_remove(
15140 15187 &help->dthps_provs[i]->dthp_prov, p->p_pid);
15141 15188 }
15142 15189 } else {
15143 15190 mutex_enter(&dtrace_lock);
15144 15191 ASSERT(help->dthps_deferred == 0 ||
15145 15192 help->dthps_next != NULL ||
15146 15193 help->dthps_prev != NULL ||
15147 15194 help == dtrace_deferred_pid);
15148 15195
15149 15196 /*
15150 15197 * Remove the helper from the deferred list.
15151 15198 */
15152 15199 if (help->dthps_next != NULL)
15153 15200 help->dthps_next->dthps_prev = help->dthps_prev;
15154 15201 if (help->dthps_prev != NULL)
15155 15202 help->dthps_prev->dthps_next = help->dthps_next;
15156 15203 if (dtrace_deferred_pid == help) {
15157 15204 dtrace_deferred_pid = help->dthps_next;
15158 15205 ASSERT(help->dthps_prev == NULL);
15159 15206 }
15160 15207
15161 15208 mutex_exit(&dtrace_lock);
15162 15209 }
15163 15210
15164 15211 mutex_exit(&dtrace_meta_lock);
15165 15212
15166 15213 for (i = 0; i < help->dthps_nprovs; i++) {
15167 15214 dtrace_helper_provider_destroy(help->dthps_provs[i]);
15168 15215 }
15169 15216
15170 15217 kmem_free(help->dthps_provs, help->dthps_maxprovs *
15171 15218 sizeof (dtrace_helper_provider_t *));
15172 15219 }
15173 15220
15174 15221 mutex_enter(&dtrace_lock);
15175 15222
15176 15223 dtrace_vstate_fini(&help->dthps_vstate);
15177 15224 kmem_free(help->dthps_actions,
15178 15225 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
15179 15226 kmem_free(help, sizeof (dtrace_helpers_t));
15180 15227
15181 15228 --dtrace_helpers;
15182 15229 mutex_exit(&dtrace_lock);
15183 15230 }
15184 15231
15185 15232 static void
15186 15233 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
15187 15234 {
15188 15235 dtrace_helpers_t *help, *newhelp;
15189 15236 dtrace_helper_action_t *helper, *new, *last;
15190 15237 dtrace_difo_t *dp;
15191 15238 dtrace_vstate_t *vstate;
15192 15239 int i, j, sz, hasprovs = 0;
15193 15240
15194 15241 mutex_enter(&dtrace_lock);
15195 15242 ASSERT(from->p_dtrace_helpers != NULL);
15196 15243 ASSERT(dtrace_helpers > 0);
15197 15244
15198 15245 help = from->p_dtrace_helpers;
15199 15246 newhelp = dtrace_helpers_create(to);
15200 15247 ASSERT(to->p_dtrace_helpers != NULL);
15201 15248
15202 15249 newhelp->dthps_generation = help->dthps_generation;
15203 15250 vstate = &newhelp->dthps_vstate;
15204 15251
15205 15252 /*
15206 15253 * Duplicate the helper actions.
15207 15254 */
15208 15255 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15209 15256 if ((helper = help->dthps_actions[i]) == NULL)
15210 15257 continue;
15211 15258
15212 15259 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
15213 15260 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
15214 15261 KM_SLEEP);
15215 15262 new->dtha_generation = helper->dtha_generation;
15216 15263
15217 15264 if ((dp = helper->dtha_predicate) != NULL) {
15218 15265 dp = dtrace_difo_duplicate(dp, vstate);
15219 15266 new->dtha_predicate = dp;
15220 15267 }
15221 15268
15222 15269 new->dtha_nactions = helper->dtha_nactions;
15223 15270 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
15224 15271 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
15225 15272
15226 15273 for (j = 0; j < new->dtha_nactions; j++) {
15227 15274 dtrace_difo_t *dp = helper->dtha_actions[j];
15228 15275
15229 15276 ASSERT(dp != NULL);
15230 15277 dp = dtrace_difo_duplicate(dp, vstate);
15231 15278 new->dtha_actions[j] = dp;
15232 15279 }
15233 15280
15234 15281 if (last != NULL) {
15235 15282 last->dtha_next = new;
15236 15283 } else {
15237 15284 newhelp->dthps_actions[i] = new;
15238 15285 }
15239 15286
15240 15287 last = new;
15241 15288 }
15242 15289 }
15243 15290
15244 15291 /*
15245 15292 * Duplicate the helper providers and register them with the
15246 15293 * DTrace framework.
15247 15294 */
15248 15295 if (help->dthps_nprovs > 0) {
15249 15296 newhelp->dthps_nprovs = help->dthps_nprovs;
15250 15297 newhelp->dthps_maxprovs = help->dthps_nprovs;
15251 15298 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
15252 15299 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15253 15300 for (i = 0; i < newhelp->dthps_nprovs; i++) {
15254 15301 newhelp->dthps_provs[i] = help->dthps_provs[i];
15255 15302 newhelp->dthps_provs[i]->dthp_ref++;
15256 15303 }
15257 15304
15258 15305 hasprovs = 1;
15259 15306 }
15260 15307
15261 15308 mutex_exit(&dtrace_lock);
15262 15309
15263 15310 if (hasprovs)
15264 15311 dtrace_helper_provider_register(to, newhelp, NULL);
15265 15312 }
15266 15313
15267 15314 /*
15268 15315 * DTrace Hook Functions
15269 15316 */
15270 15317 static void
15271 15318 dtrace_module_loaded(struct modctl *ctl)
15272 15319 {
15273 15320 dtrace_provider_t *prv;
15274 15321
15275 15322 mutex_enter(&dtrace_provider_lock);
15276 15323 mutex_enter(&mod_lock);
15277 15324
15278 15325 ASSERT(ctl->mod_busy);
15279 15326
15280 15327 /*
15281 15328 * We're going to call each providers per-module provide operation
15282 15329 * specifying only this module.
15283 15330 */
15284 15331 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
15285 15332 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
15286 15333
15287 15334 mutex_exit(&mod_lock);
15288 15335 mutex_exit(&dtrace_provider_lock);
15289 15336
15290 15337 /*
15291 15338 * If we have any retained enablings, we need to match against them.
15292 15339 * Enabling probes requires that cpu_lock be held, and we cannot hold
15293 15340 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
15294 15341 * module. (In particular, this happens when loading scheduling
15295 15342 * classes.) So if we have any retained enablings, we need to dispatch
15296 15343 * our task queue to do the match for us.
15297 15344 */
15298 15345 mutex_enter(&dtrace_lock);
15299 15346
15300 15347 if (dtrace_retained == NULL) {
15301 15348 mutex_exit(&dtrace_lock);
15302 15349 return;
15303 15350 }
15304 15351
15305 15352 (void) taskq_dispatch(dtrace_taskq,
15306 15353 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
15307 15354
15308 15355 mutex_exit(&dtrace_lock);
15309 15356
15310 15357 /*
15311 15358 * And now, for a little heuristic sleaze: in general, we want to
15312 15359 * match modules as soon as they load. However, we cannot guarantee
15313 15360 * this, because it would lead us to the lock ordering violation
15314 15361 * outlined above. The common case, of course, is that cpu_lock is
15315 15362 * _not_ held -- so we delay here for a clock tick, hoping that that's
15316 15363 * long enough for the task queue to do its work. If it's not, it's
15317 15364 * not a serious problem -- it just means that the module that we
15318 15365 * just loaded may not be immediately instrumentable.
15319 15366 */
15320 15367 delay(1);
15321 15368 }
15322 15369
15323 15370 static void
15324 15371 dtrace_module_unloaded(struct modctl *ctl)
15325 15372 {
15326 15373 dtrace_probe_t template, *probe, *first, *next;
15327 15374 dtrace_provider_t *prov;
15328 15375
15329 15376 template.dtpr_mod = ctl->mod_modname;
15330 15377
15331 15378 mutex_enter(&dtrace_provider_lock);
15332 15379 mutex_enter(&mod_lock);
15333 15380 mutex_enter(&dtrace_lock);
15334 15381
15335 15382 if (dtrace_bymod == NULL) {
15336 15383 /*
15337 15384 * The DTrace module is loaded (obviously) but not attached;
15338 15385 * we don't have any work to do.
15339 15386 */
15340 15387 mutex_exit(&dtrace_provider_lock);
15341 15388 mutex_exit(&mod_lock);
15342 15389 mutex_exit(&dtrace_lock);
15343 15390 return;
15344 15391 }
15345 15392
15346 15393 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
15347 15394 probe != NULL; probe = probe->dtpr_nextmod) {
15348 15395 if (probe->dtpr_ecb != NULL) {
15349 15396 mutex_exit(&dtrace_provider_lock);
15350 15397 mutex_exit(&mod_lock);
15351 15398 mutex_exit(&dtrace_lock);
15352 15399
15353 15400 /*
15354 15401 * This shouldn't _actually_ be possible -- we're
15355 15402 * unloading a module that has an enabled probe in it.
15356 15403 * (It's normally up to the provider to make sure that
15357 15404 * this can't happen.) However, because dtps_enable()
15358 15405 * doesn't have a failure mode, there can be an
15359 15406 * enable/unload race. Upshot: we don't want to
15360 15407 * assert, but we're not going to disable the
15361 15408 * probe, either.
15362 15409 */
15363 15410 if (dtrace_err_verbose) {
15364 15411 cmn_err(CE_WARN, "unloaded module '%s' had "
15365 15412 "enabled probes", ctl->mod_modname);
15366 15413 }
15367 15414
15368 15415 return;
15369 15416 }
15370 15417 }
15371 15418
15372 15419 probe = first;
15373 15420
15374 15421 for (first = NULL; probe != NULL; probe = next) {
15375 15422 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
15376 15423
15377 15424 dtrace_probes[probe->dtpr_id - 1] = NULL;
15378 15425
15379 15426 next = probe->dtpr_nextmod;
15380 15427 dtrace_hash_remove(dtrace_bymod, probe);
15381 15428 dtrace_hash_remove(dtrace_byfunc, probe);
15382 15429 dtrace_hash_remove(dtrace_byname, probe);
15383 15430
15384 15431 if (first == NULL) {
15385 15432 first = probe;
15386 15433 probe->dtpr_nextmod = NULL;
15387 15434 } else {
15388 15435 probe->dtpr_nextmod = first;
15389 15436 first = probe;
15390 15437 }
15391 15438 }
15392 15439
15393 15440 /*
15394 15441 * We've removed all of the module's probes from the hash chains and
15395 15442 * from the probe array. Now issue a dtrace_sync() to be sure that
15396 15443 * everyone has cleared out from any probe array processing.
15397 15444 */
15398 15445 dtrace_sync();
15399 15446
15400 15447 for (probe = first; probe != NULL; probe = first) {
15401 15448 first = probe->dtpr_nextmod;
15402 15449 prov = probe->dtpr_provider;
15403 15450 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15404 15451 probe->dtpr_arg);
15405 15452 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15406 15453 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15407 15454 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15408 15455 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15409 15456 kmem_free(probe, sizeof (dtrace_probe_t));
15410 15457 }
15411 15458
15412 15459 mutex_exit(&dtrace_lock);
15413 15460 mutex_exit(&mod_lock);
15414 15461 mutex_exit(&dtrace_provider_lock);
15415 15462 }
15416 15463
15417 15464 void
15418 15465 dtrace_suspend(void)
15419 15466 {
15420 15467 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15421 15468 }
15422 15469
15423 15470 void
15424 15471 dtrace_resume(void)
15425 15472 {
15426 15473 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15427 15474 }
15428 15475
15429 15476 static int
15430 15477 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15431 15478 {
15432 15479 ASSERT(MUTEX_HELD(&cpu_lock));
15433 15480 mutex_enter(&dtrace_lock);
15434 15481
15435 15482 switch (what) {
15436 15483 case CPU_CONFIG: {
15437 15484 dtrace_state_t *state;
15438 15485 dtrace_optval_t *opt, rs, c;
15439 15486
15440 15487 /*
15441 15488 * For now, we only allocate a new buffer for anonymous state.
15442 15489 */
15443 15490 if ((state = dtrace_anon.dta_state) == NULL)
15444 15491 break;
15445 15492
15446 15493 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15447 15494 break;
15448 15495
15449 15496 opt = state->dts_options;
15450 15497 c = opt[DTRACEOPT_CPU];
15451 15498
15452 15499 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15453 15500 break;
15454 15501
15455 15502 /*
15456 15503 * Regardless of what the actual policy is, we're going to
15457 15504 * temporarily set our resize policy to be manual. We're
15458 15505 * also going to temporarily set our CPU option to denote
15459 15506 * the newly configured CPU.
15460 15507 */
15461 15508 rs = opt[DTRACEOPT_BUFRESIZE];
15462 15509 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15463 15510 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15464 15511
15465 15512 (void) dtrace_state_buffers(state);
15466 15513
15467 15514 opt[DTRACEOPT_BUFRESIZE] = rs;
15468 15515 opt[DTRACEOPT_CPU] = c;
15469 15516
15470 15517 break;
15471 15518 }
15472 15519
15473 15520 case CPU_UNCONFIG:
15474 15521 /*
15475 15522 * We don't free the buffer in the CPU_UNCONFIG case. (The
15476 15523 * buffer will be freed when the consumer exits.)
15477 15524 */
15478 15525 break;
15479 15526
15480 15527 default:
15481 15528 break;
15482 15529 }
15483 15530
15484 15531 mutex_exit(&dtrace_lock);
15485 15532 return (0);
15486 15533 }
15487 15534
15488 15535 static void
15489 15536 dtrace_cpu_setup_initial(processorid_t cpu)
15490 15537 {
15491 15538 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15492 15539 }
15493 15540
15494 15541 static void
15495 15542 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15496 15543 {
15497 15544 if (dtrace_toxranges >= dtrace_toxranges_max) {
15498 15545 int osize, nsize;
15499 15546 dtrace_toxrange_t *range;
15500 15547
15501 15548 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15502 15549
15503 15550 if (osize == 0) {
15504 15551 ASSERT(dtrace_toxrange == NULL);
15505 15552 ASSERT(dtrace_toxranges_max == 0);
15506 15553 dtrace_toxranges_max = 1;
15507 15554 } else {
15508 15555 dtrace_toxranges_max <<= 1;
15509 15556 }
15510 15557
15511 15558 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15512 15559 range = kmem_zalloc(nsize, KM_SLEEP);
15513 15560
15514 15561 if (dtrace_toxrange != NULL) {
15515 15562 ASSERT(osize != 0);
15516 15563 bcopy(dtrace_toxrange, range, osize);
15517 15564 kmem_free(dtrace_toxrange, osize);
15518 15565 }
15519 15566
15520 15567 dtrace_toxrange = range;
15521 15568 }
15522 15569
15523 15570 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
15524 15571 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
15525 15572
15526 15573 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15527 15574 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15528 15575 dtrace_toxranges++;
15529 15576 }
15530 15577
15531 15578 static void
15532 15579 dtrace_getf_barrier()
15533 15580 {
15534 15581 /*
15535 15582 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
15536 15583 * that contain calls to getf(), this routine will be called on every
15537 15584 * closef() before either the underlying vnode is released or the
15538 15585 * file_t itself is freed. By the time we are here, it is essential
15539 15586 * that the file_t can no longer be accessed from a call to getf()
15540 15587 * in probe context -- that assures that a dtrace_sync() can be used
15541 15588 * to clear out any enablings referring to the old structures.
15542 15589 */
15543 15590 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
15544 15591 kcred->cr_zone->zone_dtrace_getf != 0)
15545 15592 dtrace_sync();
15546 15593 }
15547 15594
15548 15595 /*
15549 15596 * DTrace Driver Cookbook Functions
15550 15597 */
15551 15598 /*ARGSUSED*/
15552 15599 static int
15553 15600 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15554 15601 {
15555 15602 dtrace_provider_id_t id;
15556 15603 dtrace_state_t *state = NULL;
15557 15604 dtrace_enabling_t *enab;
15558 15605
15559 15606 mutex_enter(&cpu_lock);
15560 15607 mutex_enter(&dtrace_provider_lock);
15561 15608 mutex_enter(&dtrace_lock);
15562 15609
15563 15610 if (ddi_soft_state_init(&dtrace_softstate,
15564 15611 sizeof (dtrace_state_t), 0) != 0) {
15565 15612 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15566 15613 mutex_exit(&cpu_lock);
15567 15614 mutex_exit(&dtrace_provider_lock);
15568 15615 mutex_exit(&dtrace_lock);
15569 15616 return (DDI_FAILURE);
15570 15617 }
15571 15618
15572 15619 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15573 15620 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15574 15621 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15575 15622 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15576 15623 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15577 15624 ddi_remove_minor_node(devi, NULL);
15578 15625 ddi_soft_state_fini(&dtrace_softstate);
15579 15626 mutex_exit(&cpu_lock);
15580 15627 mutex_exit(&dtrace_provider_lock);
15581 15628 mutex_exit(&dtrace_lock);
15582 15629 return (DDI_FAILURE);
15583 15630 }
15584 15631
15585 15632 ddi_report_dev(devi);
15586 15633 dtrace_devi = devi;
15587 15634
15588 15635 dtrace_modload = dtrace_module_loaded;
15589 15636 dtrace_modunload = dtrace_module_unloaded;
15590 15637 dtrace_cpu_init = dtrace_cpu_setup_initial;
15591 15638 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15592 15639 dtrace_helpers_fork = dtrace_helpers_duplicate;
15593 15640 dtrace_cpustart_init = dtrace_suspend;
15594 15641 dtrace_cpustart_fini = dtrace_resume;
15595 15642 dtrace_debugger_init = dtrace_suspend;
15596 15643 dtrace_debugger_fini = dtrace_resume;
15597 15644
15598 15645 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15599 15646
15600 15647 ASSERT(MUTEX_HELD(&cpu_lock));
15601 15648
15602 15649 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15603 15650 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15604 15651 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15605 15652 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15606 15653 VM_SLEEP | VMC_IDENTIFIER);
15607 15654 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15608 15655 1, INT_MAX, 0);
15609 15656
15610 15657 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15611 15658 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15612 15659 NULL, NULL, NULL, NULL, NULL, 0);
15613 15660
15614 15661 ASSERT(MUTEX_HELD(&cpu_lock));
15615 15662 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15616 15663 offsetof(dtrace_probe_t, dtpr_nextmod),
15617 15664 offsetof(dtrace_probe_t, dtpr_prevmod));
15618 15665
15619 15666 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15620 15667 offsetof(dtrace_probe_t, dtpr_nextfunc),
15621 15668 offsetof(dtrace_probe_t, dtpr_prevfunc));
15622 15669
15623 15670 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15624 15671 offsetof(dtrace_probe_t, dtpr_nextname),
15625 15672 offsetof(dtrace_probe_t, dtpr_prevname));
15626 15673
15627 15674 if (dtrace_retain_max < 1) {
15628 15675 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15629 15676 "setting to 1", dtrace_retain_max);
15630 15677 dtrace_retain_max = 1;
15631 15678 }
15632 15679
15633 15680 /*
15634 15681 * Now discover our toxic ranges.
15635 15682 */
15636 15683 dtrace_toxic_ranges(dtrace_toxrange_add);
15637 15684
15638 15685 /*
15639 15686 * Before we register ourselves as a provider to our own framework,
15640 15687 * we would like to assert that dtrace_provider is NULL -- but that's
15641 15688 * not true if we were loaded as a dependency of a DTrace provider.
15642 15689 * Once we've registered, we can assert that dtrace_provider is our
15643 15690 * pseudo provider.
15644 15691 */
15645 15692 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15646 15693 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15647 15694
15648 15695 ASSERT(dtrace_provider != NULL);
15649 15696 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15650 15697
15651 15698 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15652 15699 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15653 15700 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15654 15701 dtrace_provider, NULL, NULL, "END", 0, NULL);
15655 15702 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15656 15703 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15657 15704
15658 15705 dtrace_anon_property();
15659 15706 mutex_exit(&cpu_lock);
15660 15707
15661 15708 /*
15662 15709 * If DTrace helper tracing is enabled, we need to allocate the
15663 15710 * trace buffer and initialize the values.
15664 15711 */
15665 15712 if (dtrace_helptrace_enabled) {
15666 15713 ASSERT(dtrace_helptrace_buffer == NULL);
15667 15714 dtrace_helptrace_buffer =
15668 15715 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15669 15716 dtrace_helptrace_next = 0;
15670 15717 }
15671 15718
15672 15719 /*
15673 15720 * If there are already providers, we must ask them to provide their
15674 15721 * probes, and then match any anonymous enabling against them. Note
15675 15722 * that there should be no other retained enablings at this time:
15676 15723 * the only retained enablings at this time should be the anonymous
15677 15724 * enabling.
15678 15725 */
15679 15726 if (dtrace_anon.dta_enabling != NULL) {
15680 15727 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15681 15728
15682 15729 dtrace_enabling_provide(NULL);
15683 15730 state = dtrace_anon.dta_state;
15684 15731
15685 15732 /*
15686 15733 * We couldn't hold cpu_lock across the above call to
15687 15734 * dtrace_enabling_provide(), but we must hold it to actually
15688 15735 * enable the probes. We have to drop all of our locks, pick
15689 15736 * up cpu_lock, and regain our locks before matching the
15690 15737 * retained anonymous enabling.
15691 15738 */
15692 15739 mutex_exit(&dtrace_lock);
15693 15740 mutex_exit(&dtrace_provider_lock);
15694 15741
15695 15742 mutex_enter(&cpu_lock);
15696 15743 mutex_enter(&dtrace_provider_lock);
15697 15744 mutex_enter(&dtrace_lock);
15698 15745
15699 15746 if ((enab = dtrace_anon.dta_enabling) != NULL)
15700 15747 (void) dtrace_enabling_match(enab, NULL);
15701 15748
15702 15749 mutex_exit(&cpu_lock);
15703 15750 }
15704 15751
15705 15752 mutex_exit(&dtrace_lock);
15706 15753 mutex_exit(&dtrace_provider_lock);
15707 15754
15708 15755 if (state != NULL) {
15709 15756 /*
15710 15757 * If we created any anonymous state, set it going now.
15711 15758 */
15712 15759 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15713 15760 }
15714 15761
15715 15762 return (DDI_SUCCESS);
15716 15763 }
15717 15764
15718 15765 /*ARGSUSED*/
15719 15766 static int
15720 15767 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15721 15768 {
15722 15769 dtrace_state_t *state;
15723 15770 uint32_t priv;
15724 15771 uid_t uid;
15725 15772 zoneid_t zoneid;
15726 15773
15727 15774 if (getminor(*devp) == DTRACEMNRN_HELPER)
15728 15775 return (0);
15729 15776
15730 15777 /*
15731 15778 * If this wasn't an open with the "helper" minor, then it must be
15732 15779 * the "dtrace" minor.
15733 15780 */
15734 15781 if (getminor(*devp) != DTRACEMNRN_DTRACE)
15735 15782 return (ENXIO);
15736 15783
15737 15784 /*
15738 15785 * If no DTRACE_PRIV_* bits are set in the credential, then the
15739 15786 * caller lacks sufficient permission to do anything with DTrace.
15740 15787 */
15741 15788 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15742 15789 if (priv == DTRACE_PRIV_NONE)
15743 15790 return (EACCES);
15744 15791
15745 15792 /*
15746 15793 * Ask all providers to provide all their probes.
15747 15794 */
15748 15795 mutex_enter(&dtrace_provider_lock);
15749 15796 dtrace_probe_provide(NULL, NULL);
15750 15797 mutex_exit(&dtrace_provider_lock);
15751 15798
15752 15799 mutex_enter(&cpu_lock);
15753 15800 mutex_enter(&dtrace_lock);
15754 15801 dtrace_opens++;
15755 15802 dtrace_membar_producer();
15756 15803
15757 15804 /*
15758 15805 * If the kernel debugger is active (that is, if the kernel debugger
15759 15806 * modified text in some way), we won't allow the open.
15760 15807 */
15761 15808 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15762 15809 dtrace_opens--;
15763 15810 mutex_exit(&cpu_lock);
15764 15811 mutex_exit(&dtrace_lock);
15765 15812 return (EBUSY);
15766 15813 }
15767 15814
15768 15815 state = dtrace_state_create(devp, cred_p);
15769 15816 mutex_exit(&cpu_lock);
15770 15817
15771 15818 if (state == NULL) {
15772 15819 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15773 15820 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15774 15821 mutex_exit(&dtrace_lock);
15775 15822 return (EAGAIN);
15776 15823 }
15777 15824
15778 15825 mutex_exit(&dtrace_lock);
15779 15826
15780 15827 return (0);
15781 15828 }
15782 15829
15783 15830 /*ARGSUSED*/
15784 15831 static int
15785 15832 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15786 15833 {
15787 15834 minor_t minor = getminor(dev);
15788 15835 dtrace_state_t *state;
15789 15836
15790 15837 if (minor == DTRACEMNRN_HELPER)
15791 15838 return (0);
15792 15839
15793 15840 state = ddi_get_soft_state(dtrace_softstate, minor);
15794 15841
15795 15842 mutex_enter(&cpu_lock);
15796 15843 mutex_enter(&dtrace_lock);
15797 15844
15798 15845 if (state->dts_anon) {
15799 15846 /*
15800 15847 * There is anonymous state. Destroy that first.
15801 15848 */
15802 15849 ASSERT(dtrace_anon.dta_state == NULL);
15803 15850 dtrace_state_destroy(state->dts_anon);
15804 15851 }
15805 15852
15806 15853 dtrace_state_destroy(state);
15807 15854 ASSERT(dtrace_opens > 0);
15808 15855
15809 15856 /*
15810 15857 * Only relinquish control of the kernel debugger interface when there
15811 15858 * are no consumers and no anonymous enablings.
15812 15859 */
15813 15860 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15814 15861 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15815 15862
15816 15863 mutex_exit(&dtrace_lock);
15817 15864 mutex_exit(&cpu_lock);
15818 15865
15819 15866 return (0);
15820 15867 }
15821 15868
15822 15869 /*ARGSUSED*/
15823 15870 static int
15824 15871 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15825 15872 {
15826 15873 int rval;
15827 15874 dof_helper_t help, *dhp = NULL;
15828 15875
15829 15876 switch (cmd) {
15830 15877 case DTRACEHIOC_ADDDOF:
15831 15878 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15832 15879 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15833 15880 return (EFAULT);
15834 15881 }
15835 15882
15836 15883 dhp = &help;
15837 15884 arg = (intptr_t)help.dofhp_dof;
15838 15885 /*FALLTHROUGH*/
15839 15886
15840 15887 case DTRACEHIOC_ADD: {
15841 15888 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15842 15889
15843 15890 if (dof == NULL)
15844 15891 return (rval);
15845 15892
15846 15893 mutex_enter(&dtrace_lock);
15847 15894
15848 15895 /*
15849 15896 * dtrace_helper_slurp() takes responsibility for the dof --
15850 15897 * it may free it now or it may save it and free it later.
15851 15898 */
15852 15899 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15853 15900 *rv = rval;
15854 15901 rval = 0;
15855 15902 } else {
15856 15903 rval = EINVAL;
15857 15904 }
15858 15905
15859 15906 mutex_exit(&dtrace_lock);
15860 15907 return (rval);
15861 15908 }
15862 15909
15863 15910 case DTRACEHIOC_REMOVE: {
15864 15911 mutex_enter(&dtrace_lock);
15865 15912 rval = dtrace_helper_destroygen(arg);
15866 15913 mutex_exit(&dtrace_lock);
15867 15914
15868 15915 return (rval);
15869 15916 }
15870 15917
15871 15918 default:
15872 15919 break;
15873 15920 }
15874 15921
15875 15922 return (ENOTTY);
15876 15923 }
15877 15924
15878 15925 /*ARGSUSED*/
15879 15926 static int
15880 15927 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15881 15928 {
15882 15929 minor_t minor = getminor(dev);
15883 15930 dtrace_state_t *state;
15884 15931 int rval;
15885 15932
15886 15933 if (minor == DTRACEMNRN_HELPER)
15887 15934 return (dtrace_ioctl_helper(cmd, arg, rv));
15888 15935
15889 15936 state = ddi_get_soft_state(dtrace_softstate, minor);
15890 15937
15891 15938 if (state->dts_anon) {
15892 15939 ASSERT(dtrace_anon.dta_state == NULL);
15893 15940 state = state->dts_anon;
15894 15941 }
15895 15942
15896 15943 switch (cmd) {
15897 15944 case DTRACEIOC_PROVIDER: {
15898 15945 dtrace_providerdesc_t pvd;
15899 15946 dtrace_provider_t *pvp;
15900 15947
15901 15948 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15902 15949 return (EFAULT);
15903 15950
15904 15951 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15905 15952 mutex_enter(&dtrace_provider_lock);
15906 15953
15907 15954 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15908 15955 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15909 15956 break;
15910 15957 }
15911 15958
15912 15959 mutex_exit(&dtrace_provider_lock);
15913 15960
15914 15961 if (pvp == NULL)
15915 15962 return (ESRCH);
15916 15963
15917 15964 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15918 15965 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15919 15966 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15920 15967 return (EFAULT);
15921 15968
15922 15969 return (0);
15923 15970 }
15924 15971
15925 15972 case DTRACEIOC_EPROBE: {
15926 15973 dtrace_eprobedesc_t epdesc;
15927 15974 dtrace_ecb_t *ecb;
15928 15975 dtrace_action_t *act;
15929 15976 void *buf;
15930 15977 size_t size;
15931 15978 uintptr_t dest;
15932 15979 int nrecs;
15933 15980
15934 15981 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15935 15982 return (EFAULT);
15936 15983
15937 15984 mutex_enter(&dtrace_lock);
15938 15985
15939 15986 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15940 15987 mutex_exit(&dtrace_lock);
15941 15988 return (EINVAL);
15942 15989 }
15943 15990
15944 15991 if (ecb->dte_probe == NULL) {
15945 15992 mutex_exit(&dtrace_lock);
15946 15993 return (EINVAL);
15947 15994 }
15948 15995
15949 15996 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15950 15997 epdesc.dtepd_uarg = ecb->dte_uarg;
15951 15998 epdesc.dtepd_size = ecb->dte_size;
15952 15999
15953 16000 nrecs = epdesc.dtepd_nrecs;
15954 16001 epdesc.dtepd_nrecs = 0;
15955 16002 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15956 16003 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15957 16004 continue;
15958 16005
15959 16006 epdesc.dtepd_nrecs++;
15960 16007 }
15961 16008
15962 16009 /*
15963 16010 * Now that we have the size, we need to allocate a temporary
15964 16011 * buffer in which to store the complete description. We need
15965 16012 * the temporary buffer to be able to drop dtrace_lock()
15966 16013 * across the copyout(), below.
15967 16014 */
15968 16015 size = sizeof (dtrace_eprobedesc_t) +
15969 16016 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15970 16017
15971 16018 buf = kmem_alloc(size, KM_SLEEP);
15972 16019 dest = (uintptr_t)buf;
15973 16020
15974 16021 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15975 16022 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15976 16023
15977 16024 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15978 16025 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15979 16026 continue;
15980 16027
15981 16028 if (nrecs-- == 0)
15982 16029 break;
15983 16030
15984 16031 bcopy(&act->dta_rec, (void *)dest,
15985 16032 sizeof (dtrace_recdesc_t));
15986 16033 dest += sizeof (dtrace_recdesc_t);
15987 16034 }
15988 16035
15989 16036 mutex_exit(&dtrace_lock);
15990 16037
15991 16038 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15992 16039 kmem_free(buf, size);
15993 16040 return (EFAULT);
15994 16041 }
15995 16042
15996 16043 kmem_free(buf, size);
15997 16044 return (0);
15998 16045 }
15999 16046
16000 16047 case DTRACEIOC_AGGDESC: {
16001 16048 dtrace_aggdesc_t aggdesc;
16002 16049 dtrace_action_t *act;
16003 16050 dtrace_aggregation_t *agg;
16004 16051 int nrecs;
16005 16052 uint32_t offs;
16006 16053 dtrace_recdesc_t *lrec;
16007 16054 void *buf;
16008 16055 size_t size;
16009 16056 uintptr_t dest;
16010 16057
16011 16058 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
16012 16059 return (EFAULT);
16013 16060
16014 16061 mutex_enter(&dtrace_lock);
16015 16062
16016 16063 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
16017 16064 mutex_exit(&dtrace_lock);
16018 16065 return (EINVAL);
16019 16066 }
16020 16067
16021 16068 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
16022 16069
16023 16070 nrecs = aggdesc.dtagd_nrecs;
16024 16071 aggdesc.dtagd_nrecs = 0;
16025 16072
16026 16073 offs = agg->dtag_base;
16027 16074 lrec = &agg->dtag_action.dta_rec;
16028 16075 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
16029 16076
16030 16077 for (act = agg->dtag_first; ; act = act->dta_next) {
16031 16078 ASSERT(act->dta_intuple ||
16032 16079 DTRACEACT_ISAGG(act->dta_kind));
16033 16080
16034 16081 /*
16035 16082 * If this action has a record size of zero, it
16036 16083 * denotes an argument to the aggregating action.
16037 16084 * Because the presence of this record doesn't (or
16038 16085 * shouldn't) affect the way the data is interpreted,
16039 16086 * we don't copy it out to save user-level the
16040 16087 * confusion of dealing with a zero-length record.
16041 16088 */
16042 16089 if (act->dta_rec.dtrd_size == 0) {
16043 16090 ASSERT(agg->dtag_hasarg);
16044 16091 continue;
16045 16092 }
16046 16093
16047 16094 aggdesc.dtagd_nrecs++;
16048 16095
16049 16096 if (act == &agg->dtag_action)
16050 16097 break;
16051 16098 }
16052 16099
16053 16100 /*
16054 16101 * Now that we have the size, we need to allocate a temporary
16055 16102 * buffer in which to store the complete description. We need
16056 16103 * the temporary buffer to be able to drop dtrace_lock()
16057 16104 * across the copyout(), below.
16058 16105 */
16059 16106 size = sizeof (dtrace_aggdesc_t) +
16060 16107 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
16061 16108
16062 16109 buf = kmem_alloc(size, KM_SLEEP);
16063 16110 dest = (uintptr_t)buf;
16064 16111
16065 16112 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
16066 16113 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
16067 16114
16068 16115 for (act = agg->dtag_first; ; act = act->dta_next) {
16069 16116 dtrace_recdesc_t rec = act->dta_rec;
16070 16117
16071 16118 /*
16072 16119 * See the comment in the above loop for why we pass
16073 16120 * over zero-length records.
16074 16121 */
16075 16122 if (rec.dtrd_size == 0) {
16076 16123 ASSERT(agg->dtag_hasarg);
16077 16124 continue;
16078 16125 }
16079 16126
16080 16127 if (nrecs-- == 0)
16081 16128 break;
16082 16129
16083 16130 rec.dtrd_offset -= offs;
16084 16131 bcopy(&rec, (void *)dest, sizeof (rec));
16085 16132 dest += sizeof (dtrace_recdesc_t);
16086 16133
16087 16134 if (act == &agg->dtag_action)
16088 16135 break;
16089 16136 }
16090 16137
16091 16138 mutex_exit(&dtrace_lock);
16092 16139
16093 16140 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16094 16141 kmem_free(buf, size);
16095 16142 return (EFAULT);
16096 16143 }
16097 16144
16098 16145 kmem_free(buf, size);
16099 16146 return (0);
16100 16147 }
16101 16148
16102 16149 case DTRACEIOC_ENABLE: {
16103 16150 dof_hdr_t *dof;
16104 16151 dtrace_enabling_t *enab = NULL;
16105 16152 dtrace_vstate_t *vstate;
16106 16153 int err = 0;
16107 16154
16108 16155 *rv = 0;
16109 16156
16110 16157 /*
16111 16158 * If a NULL argument has been passed, we take this as our
16112 16159 * cue to reevaluate our enablings.
16113 16160 */
16114 16161 if (arg == NULL) {
16115 16162 dtrace_enabling_matchall();
16116 16163
16117 16164 return (0);
16118 16165 }
16119 16166
16120 16167 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
16121 16168 return (rval);
16122 16169
16123 16170 mutex_enter(&cpu_lock);
16124 16171 mutex_enter(&dtrace_lock);
16125 16172 vstate = &state->dts_vstate;
16126 16173
16127 16174 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
16128 16175 mutex_exit(&dtrace_lock);
16129 16176 mutex_exit(&cpu_lock);
16130 16177 dtrace_dof_destroy(dof);
16131 16178 return (EBUSY);
16132 16179 }
16133 16180
16134 16181 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
16135 16182 mutex_exit(&dtrace_lock);
16136 16183 mutex_exit(&cpu_lock);
16137 16184 dtrace_dof_destroy(dof);
16138 16185 return (EINVAL);
16139 16186 }
16140 16187
16141 16188 if ((rval = dtrace_dof_options(dof, state)) != 0) {
16142 16189 dtrace_enabling_destroy(enab);
16143 16190 mutex_exit(&dtrace_lock);
16144 16191 mutex_exit(&cpu_lock);
16145 16192 dtrace_dof_destroy(dof);
16146 16193 return (rval);
16147 16194 }
16148 16195
16149 16196 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
16150 16197 err = dtrace_enabling_retain(enab);
16151 16198 } else {
16152 16199 dtrace_enabling_destroy(enab);
16153 16200 }
16154 16201
16155 16202 mutex_exit(&cpu_lock);
16156 16203 mutex_exit(&dtrace_lock);
16157 16204 dtrace_dof_destroy(dof);
16158 16205
16159 16206 return (err);
16160 16207 }
16161 16208
16162 16209 case DTRACEIOC_REPLICATE: {
16163 16210 dtrace_repldesc_t desc;
16164 16211 dtrace_probedesc_t *match = &desc.dtrpd_match;
16165 16212 dtrace_probedesc_t *create = &desc.dtrpd_create;
16166 16213 int err;
16167 16214
16168 16215 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16169 16216 return (EFAULT);
16170 16217
16171 16218 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16172 16219 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16173 16220 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16174 16221 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16175 16222
16176 16223 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16177 16224 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16178 16225 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16179 16226 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16180 16227
16181 16228 mutex_enter(&dtrace_lock);
16182 16229 err = dtrace_enabling_replicate(state, match, create);
16183 16230 mutex_exit(&dtrace_lock);
16184 16231
16185 16232 return (err);
16186 16233 }
16187 16234
16188 16235 case DTRACEIOC_PROBEMATCH:
16189 16236 case DTRACEIOC_PROBES: {
16190 16237 dtrace_probe_t *probe = NULL;
16191 16238 dtrace_probedesc_t desc;
16192 16239 dtrace_probekey_t pkey;
16193 16240 dtrace_id_t i;
16194 16241 int m = 0;
16195 16242 uint32_t priv;
16196 16243 uid_t uid;
16197 16244 zoneid_t zoneid;
16198 16245
16199 16246 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16200 16247 return (EFAULT);
16201 16248
16202 16249 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16203 16250 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16204 16251 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16205 16252 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16206 16253
16207 16254 /*
16208 16255 * Before we attempt to match this probe, we want to give
16209 16256 * all providers the opportunity to provide it.
16210 16257 */
16211 16258 if (desc.dtpd_id == DTRACE_IDNONE) {
16212 16259 mutex_enter(&dtrace_provider_lock);
16213 16260 dtrace_probe_provide(&desc, NULL);
16214 16261 mutex_exit(&dtrace_provider_lock);
16215 16262 desc.dtpd_id++;
16216 16263 }
16217 16264
16218 16265 if (cmd == DTRACEIOC_PROBEMATCH) {
16219 16266 dtrace_probekey(&desc, &pkey);
16220 16267 pkey.dtpk_id = DTRACE_IDNONE;
16221 16268 }
16222 16269
16223 16270 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
16224 16271
16225 16272 mutex_enter(&dtrace_lock);
16226 16273
16227 16274 if (cmd == DTRACEIOC_PROBEMATCH) {
16228 16275 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16229 16276 if ((probe = dtrace_probes[i - 1]) != NULL &&
16230 16277 (m = dtrace_match_probe(probe, &pkey,
16231 16278 priv, uid, zoneid)) != 0)
16232 16279 break;
16233 16280 }
16234 16281
16235 16282 if (m < 0) {
16236 16283 mutex_exit(&dtrace_lock);
16237 16284 return (EINVAL);
16238 16285 }
16239 16286
16240 16287 } else {
16241 16288 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16242 16289 if ((probe = dtrace_probes[i - 1]) != NULL &&
16243 16290 dtrace_match_priv(probe, priv, uid, zoneid))
16244 16291 break;
16245 16292 }
16246 16293 }
16247 16294
16248 16295 if (probe == NULL) {
16249 16296 mutex_exit(&dtrace_lock);
16250 16297 return (ESRCH);
16251 16298 }
16252 16299
16253 16300 dtrace_probe_description(probe, &desc);
16254 16301 mutex_exit(&dtrace_lock);
16255 16302
16256 16303 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16257 16304 return (EFAULT);
16258 16305
16259 16306 return (0);
16260 16307 }
16261 16308
16262 16309 case DTRACEIOC_PROBEARG: {
16263 16310 dtrace_argdesc_t desc;
16264 16311 dtrace_probe_t *probe;
16265 16312 dtrace_provider_t *prov;
16266 16313
16267 16314 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16268 16315 return (EFAULT);
16269 16316
16270 16317 if (desc.dtargd_id == DTRACE_IDNONE)
16271 16318 return (EINVAL);
16272 16319
16273 16320 if (desc.dtargd_ndx == DTRACE_ARGNONE)
16274 16321 return (EINVAL);
16275 16322
16276 16323 mutex_enter(&dtrace_provider_lock);
16277 16324 mutex_enter(&mod_lock);
16278 16325 mutex_enter(&dtrace_lock);
16279 16326
16280 16327 if (desc.dtargd_id > dtrace_nprobes) {
16281 16328 mutex_exit(&dtrace_lock);
16282 16329 mutex_exit(&mod_lock);
16283 16330 mutex_exit(&dtrace_provider_lock);
16284 16331 return (EINVAL);
16285 16332 }
16286 16333
16287 16334 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16288 16335 mutex_exit(&dtrace_lock);
16289 16336 mutex_exit(&mod_lock);
16290 16337 mutex_exit(&dtrace_provider_lock);
16291 16338 return (EINVAL);
16292 16339 }
16293 16340
16294 16341 mutex_exit(&dtrace_lock);
16295 16342
16296 16343 prov = probe->dtpr_provider;
16297 16344
16298 16345 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16299 16346 /*
16300 16347 * There isn't any typed information for this probe.
16301 16348 * Set the argument number to DTRACE_ARGNONE.
16302 16349 */
16303 16350 desc.dtargd_ndx = DTRACE_ARGNONE;
16304 16351 } else {
16305 16352 desc.dtargd_native[0] = '\0';
16306 16353 desc.dtargd_xlate[0] = '\0';
16307 16354 desc.dtargd_mapping = desc.dtargd_ndx;
16308 16355
16309 16356 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16310 16357 probe->dtpr_id, probe->dtpr_arg, &desc);
16311 16358 }
16312 16359
16313 16360 mutex_exit(&mod_lock);
16314 16361 mutex_exit(&dtrace_provider_lock);
16315 16362
16316 16363 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16317 16364 return (EFAULT);
16318 16365
16319 16366 return (0);
16320 16367 }
16321 16368
16322 16369 case DTRACEIOC_GO: {
16323 16370 processorid_t cpuid;
16324 16371 rval = dtrace_state_go(state, &cpuid);
16325 16372
16326 16373 if (rval != 0)
16327 16374 return (rval);
16328 16375
16329 16376 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16330 16377 return (EFAULT);
16331 16378
16332 16379 return (0);
16333 16380 }
16334 16381
16335 16382 case DTRACEIOC_STOP: {
16336 16383 processorid_t cpuid;
16337 16384
16338 16385 mutex_enter(&dtrace_lock);
16339 16386 rval = dtrace_state_stop(state, &cpuid);
16340 16387 mutex_exit(&dtrace_lock);
16341 16388
16342 16389 if (rval != 0)
16343 16390 return (rval);
16344 16391
16345 16392 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16346 16393 return (EFAULT);
16347 16394
16348 16395 return (0);
16349 16396 }
16350 16397
16351 16398 case DTRACEIOC_DOFGET: {
16352 16399 dof_hdr_t hdr, *dof;
16353 16400 uint64_t len;
16354 16401
16355 16402 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16356 16403 return (EFAULT);
16357 16404
16358 16405 mutex_enter(&dtrace_lock);
16359 16406 dof = dtrace_dof_create(state);
16360 16407 mutex_exit(&dtrace_lock);
16361 16408
16362 16409 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16363 16410 rval = copyout(dof, (void *)arg, len);
16364 16411 dtrace_dof_destroy(dof);
16365 16412
16366 16413 return (rval == 0 ? 0 : EFAULT);
16367 16414 }
16368 16415
16369 16416 case DTRACEIOC_AGGSNAP:
16370 16417 case DTRACEIOC_BUFSNAP: {
16371 16418 dtrace_bufdesc_t desc;
16372 16419 caddr_t cached;
16373 16420 dtrace_buffer_t *buf;
16374 16421
16375 16422 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16376 16423 return (EFAULT);
16377 16424
16378 16425 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16379 16426 return (EINVAL);
16380 16427
16381 16428 mutex_enter(&dtrace_lock);
16382 16429
16383 16430 if (cmd == DTRACEIOC_BUFSNAP) {
16384 16431 buf = &state->dts_buffer[desc.dtbd_cpu];
16385 16432 } else {
16386 16433 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16387 16434 }
16388 16435
16389 16436 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16390 16437 size_t sz = buf->dtb_offset;
16391 16438
16392 16439 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16393 16440 mutex_exit(&dtrace_lock);
16394 16441 return (EBUSY);
16395 16442 }
16396 16443
16397 16444 /*
16398 16445 * If this buffer has already been consumed, we're
16399 16446 * going to indicate that there's nothing left here
16400 16447 * to consume.
16401 16448 */
16402 16449 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16403 16450 mutex_exit(&dtrace_lock);
16404 16451
16405 16452 desc.dtbd_size = 0;
16406 16453 desc.dtbd_drops = 0;
16407 16454 desc.dtbd_errors = 0;
16408 16455 desc.dtbd_oldest = 0;
16409 16456 sz = sizeof (desc);
16410 16457
16411 16458 if (copyout(&desc, (void *)arg, sz) != 0)
16412 16459 return (EFAULT);
16413 16460
16414 16461 return (0);
16415 16462 }
16416 16463
16417 16464 /*
16418 16465 * If this is a ring buffer that has wrapped, we want
16419 16466 * to copy the whole thing out.
16420 16467 */
16421 16468 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16422 16469 dtrace_buffer_polish(buf);
16423 16470 sz = buf->dtb_size;
16424 16471 }
16425 16472
16426 16473 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16427 16474 mutex_exit(&dtrace_lock);
16428 16475 return (EFAULT);
16429 16476 }
16430 16477
16431 16478 desc.dtbd_size = sz;
16432 16479 desc.dtbd_drops = buf->dtb_drops;
16433 16480 desc.dtbd_errors = buf->dtb_errors;
16434 16481 desc.dtbd_oldest = buf->dtb_xamot_offset;
16435 16482 desc.dtbd_timestamp = dtrace_gethrtime();
16436 16483
16437 16484 mutex_exit(&dtrace_lock);
16438 16485
16439 16486 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16440 16487 return (EFAULT);
16441 16488
16442 16489 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16443 16490
16444 16491 return (0);
16445 16492 }
16446 16493
16447 16494 if (buf->dtb_tomax == NULL) {
16448 16495 ASSERT(buf->dtb_xamot == NULL);
16449 16496 mutex_exit(&dtrace_lock);
16450 16497 return (ENOENT);
16451 16498 }
16452 16499
16453 16500 cached = buf->dtb_tomax;
16454 16501 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16455 16502
16456 16503 dtrace_xcall(desc.dtbd_cpu,
16457 16504 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16458 16505
16459 16506 state->dts_errors += buf->dtb_xamot_errors;
16460 16507
16461 16508 /*
16462 16509 * If the buffers did not actually switch, then the cross call
16463 16510 * did not take place -- presumably because the given CPU is
16464 16511 * not in the ready set. If this is the case, we'll return
16465 16512 * ENOENT.
16466 16513 */
16467 16514 if (buf->dtb_tomax == cached) {
16468 16515 ASSERT(buf->dtb_xamot != cached);
16469 16516 mutex_exit(&dtrace_lock);
16470 16517 return (ENOENT);
16471 16518 }
16472 16519
16473 16520 ASSERT(cached == buf->dtb_xamot);
16474 16521
16475 16522 /*
16476 16523 * We have our snapshot; now copy it out.
16477 16524 */
16478 16525 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16479 16526 buf->dtb_xamot_offset) != 0) {
16480 16527 mutex_exit(&dtrace_lock);
16481 16528 return (EFAULT);
16482 16529 }
16483 16530
16484 16531 desc.dtbd_size = buf->dtb_xamot_offset;
16485 16532 desc.dtbd_drops = buf->dtb_xamot_drops;
16486 16533 desc.dtbd_errors = buf->dtb_xamot_errors;
16487 16534 desc.dtbd_oldest = 0;
16488 16535 desc.dtbd_timestamp = buf->dtb_switched;
16489 16536
16490 16537 mutex_exit(&dtrace_lock);
16491 16538
16492 16539 /*
16493 16540 * Finally, copy out the buffer description.
16494 16541 */
16495 16542 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16496 16543 return (EFAULT);
16497 16544
16498 16545 return (0);
16499 16546 }
16500 16547
16501 16548 case DTRACEIOC_CONF: {
16502 16549 dtrace_conf_t conf;
16503 16550
16504 16551 bzero(&conf, sizeof (conf));
16505 16552 conf.dtc_difversion = DIF_VERSION;
16506 16553 conf.dtc_difintregs = DIF_DIR_NREGS;
16507 16554 conf.dtc_diftupregs = DIF_DTR_NREGS;
16508 16555 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16509 16556
16510 16557 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16511 16558 return (EFAULT);
16512 16559
16513 16560 return (0);
16514 16561 }
16515 16562
16516 16563 case DTRACEIOC_STATUS: {
16517 16564 dtrace_status_t stat;
16518 16565 dtrace_dstate_t *dstate;
16519 16566 int i, j;
16520 16567 uint64_t nerrs;
16521 16568
16522 16569 /*
16523 16570 * See the comment in dtrace_state_deadman() for the reason
16524 16571 * for setting dts_laststatus to INT64_MAX before setting
16525 16572 * it to the correct value.
16526 16573 */
16527 16574 state->dts_laststatus = INT64_MAX;
16528 16575 dtrace_membar_producer();
16529 16576 state->dts_laststatus = dtrace_gethrtime();
16530 16577
16531 16578 bzero(&stat, sizeof (stat));
16532 16579
16533 16580 mutex_enter(&dtrace_lock);
16534 16581
16535 16582 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16536 16583 mutex_exit(&dtrace_lock);
16537 16584 return (ENOENT);
16538 16585 }
16539 16586
16540 16587 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16541 16588 stat.dtst_exiting = 1;
16542 16589
16543 16590 nerrs = state->dts_errors;
16544 16591 dstate = &state->dts_vstate.dtvs_dynvars;
16545 16592
16546 16593 for (i = 0; i < NCPU; i++) {
16547 16594 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16548 16595
16549 16596 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16550 16597 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16551 16598 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16552 16599
16553 16600 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16554 16601 stat.dtst_filled++;
16555 16602
16556 16603 nerrs += state->dts_buffer[i].dtb_errors;
16557 16604
16558 16605 for (j = 0; j < state->dts_nspeculations; j++) {
16559 16606 dtrace_speculation_t *spec;
16560 16607 dtrace_buffer_t *buf;
16561 16608
16562 16609 spec = &state->dts_speculations[j];
16563 16610 buf = &spec->dtsp_buffer[i];
16564 16611 stat.dtst_specdrops += buf->dtb_xamot_drops;
16565 16612 }
16566 16613 }
16567 16614
16568 16615 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16569 16616 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16570 16617 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16571 16618 stat.dtst_dblerrors = state->dts_dblerrors;
16572 16619 stat.dtst_killed =
16573 16620 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16574 16621 stat.dtst_errors = nerrs;
16575 16622
16576 16623 mutex_exit(&dtrace_lock);
16577 16624
16578 16625 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16579 16626 return (EFAULT);
16580 16627
16581 16628 return (0);
16582 16629 }
16583 16630
16584 16631 case DTRACEIOC_FORMAT: {
16585 16632 dtrace_fmtdesc_t fmt;
16586 16633 char *str;
16587 16634 int len;
16588 16635
16589 16636 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16590 16637 return (EFAULT);
16591 16638
16592 16639 mutex_enter(&dtrace_lock);
16593 16640
16594 16641 if (fmt.dtfd_format == 0 ||
16595 16642 fmt.dtfd_format > state->dts_nformats) {
16596 16643 mutex_exit(&dtrace_lock);
16597 16644 return (EINVAL);
16598 16645 }
16599 16646
16600 16647 /*
16601 16648 * Format strings are allocated contiguously and they are
16602 16649 * never freed; if a format index is less than the number
16603 16650 * of formats, we can assert that the format map is non-NULL
16604 16651 * and that the format for the specified index is non-NULL.
16605 16652 */
16606 16653 ASSERT(state->dts_formats != NULL);
16607 16654 str = state->dts_formats[fmt.dtfd_format - 1];
16608 16655 ASSERT(str != NULL);
16609 16656
16610 16657 len = strlen(str) + 1;
16611 16658
16612 16659 if (len > fmt.dtfd_length) {
16613 16660 fmt.dtfd_length = len;
16614 16661
16615 16662 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16616 16663 mutex_exit(&dtrace_lock);
16617 16664 return (EINVAL);
16618 16665 }
16619 16666 } else {
16620 16667 if (copyout(str, fmt.dtfd_string, len) != 0) {
16621 16668 mutex_exit(&dtrace_lock);
16622 16669 return (EINVAL);
16623 16670 }
16624 16671 }
16625 16672
16626 16673 mutex_exit(&dtrace_lock);
16627 16674 return (0);
16628 16675 }
16629 16676
16630 16677 default:
16631 16678 break;
16632 16679 }
16633 16680
16634 16681 return (ENOTTY);
16635 16682 }
16636 16683
16637 16684 /*ARGSUSED*/
16638 16685 static int
16639 16686 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16640 16687 {
16641 16688 dtrace_state_t *state;
16642 16689
16643 16690 switch (cmd) {
16644 16691 case DDI_DETACH:
16645 16692 break;
16646 16693
16647 16694 case DDI_SUSPEND:
16648 16695 return (DDI_SUCCESS);
16649 16696
16650 16697 default:
16651 16698 return (DDI_FAILURE);
16652 16699 }
16653 16700
16654 16701 mutex_enter(&cpu_lock);
16655 16702 mutex_enter(&dtrace_provider_lock);
16656 16703 mutex_enter(&dtrace_lock);
16657 16704
16658 16705 ASSERT(dtrace_opens == 0);
16659 16706
16660 16707 if (dtrace_helpers > 0) {
16661 16708 mutex_exit(&dtrace_provider_lock);
16662 16709 mutex_exit(&dtrace_lock);
16663 16710 mutex_exit(&cpu_lock);
16664 16711 return (DDI_FAILURE);
16665 16712 }
16666 16713
16667 16714 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16668 16715 mutex_exit(&dtrace_provider_lock);
16669 16716 mutex_exit(&dtrace_lock);
16670 16717 mutex_exit(&cpu_lock);
16671 16718 return (DDI_FAILURE);
16672 16719 }
16673 16720
16674 16721 dtrace_provider = NULL;
16675 16722
16676 16723 if ((state = dtrace_anon_grab()) != NULL) {
16677 16724 /*
16678 16725 * If there were ECBs on this state, the provider should
16679 16726 * have not been allowed to detach; assert that there is
16680 16727 * none.
16681 16728 */
16682 16729 ASSERT(state->dts_necbs == 0);
16683 16730 dtrace_state_destroy(state);
16684 16731
16685 16732 /*
16686 16733 * If we're being detached with anonymous state, we need to
16687 16734 * indicate to the kernel debugger that DTrace is now inactive.
16688 16735 */
16689 16736 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16690 16737 }
16691 16738
16692 16739 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16693 16740 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16694 16741 dtrace_cpu_init = NULL;
16695 16742 dtrace_helpers_cleanup = NULL;
16696 16743 dtrace_helpers_fork = NULL;
16697 16744 dtrace_cpustart_init = NULL;
16698 16745 dtrace_cpustart_fini = NULL;
16699 16746 dtrace_debugger_init = NULL;
16700 16747 dtrace_debugger_fini = NULL;
16701 16748 dtrace_modload = NULL;
16702 16749 dtrace_modunload = NULL;
16703 16750
16704 16751 ASSERT(dtrace_getf == 0);
16705 16752 ASSERT(dtrace_closef == NULL);
16706 16753
16707 16754 mutex_exit(&cpu_lock);
16708 16755
16709 16756 if (dtrace_helptrace_enabled) {
16710 16757 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16711 16758 dtrace_helptrace_buffer = NULL;
16712 16759 }
16713 16760
16714 16761 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16715 16762 dtrace_probes = NULL;
16716 16763 dtrace_nprobes = 0;
16717 16764
16718 16765 dtrace_hash_destroy(dtrace_bymod);
16719 16766 dtrace_hash_destroy(dtrace_byfunc);
16720 16767 dtrace_hash_destroy(dtrace_byname);
16721 16768 dtrace_bymod = NULL;
16722 16769 dtrace_byfunc = NULL;
16723 16770 dtrace_byname = NULL;
16724 16771
16725 16772 kmem_cache_destroy(dtrace_state_cache);
16726 16773 vmem_destroy(dtrace_minor);
16727 16774 vmem_destroy(dtrace_arena);
16728 16775
16729 16776 if (dtrace_toxrange != NULL) {
16730 16777 kmem_free(dtrace_toxrange,
16731 16778 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16732 16779 dtrace_toxrange = NULL;
16733 16780 dtrace_toxranges = 0;
16734 16781 dtrace_toxranges_max = 0;
16735 16782 }
16736 16783
16737 16784 ddi_remove_minor_node(dtrace_devi, NULL);
16738 16785 dtrace_devi = NULL;
16739 16786
16740 16787 ddi_soft_state_fini(&dtrace_softstate);
16741 16788
16742 16789 ASSERT(dtrace_vtime_references == 0);
16743 16790 ASSERT(dtrace_opens == 0);
16744 16791 ASSERT(dtrace_retained == NULL);
16745 16792
16746 16793 mutex_exit(&dtrace_lock);
16747 16794 mutex_exit(&dtrace_provider_lock);
16748 16795
16749 16796 /*
16750 16797 * We don't destroy the task queue until after we have dropped our
16751 16798 * locks (taskq_destroy() may block on running tasks). To prevent
16752 16799 * attempting to do work after we have effectively detached but before
16753 16800 * the task queue has been destroyed, all tasks dispatched via the
16754 16801 * task queue must check that DTrace is still attached before
16755 16802 * performing any operation.
16756 16803 */
16757 16804 taskq_destroy(dtrace_taskq);
16758 16805 dtrace_taskq = NULL;
16759 16806
16760 16807 return (DDI_SUCCESS);
16761 16808 }
16762 16809
16763 16810 /*ARGSUSED*/
16764 16811 static int
16765 16812 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16766 16813 {
16767 16814 int error;
16768 16815
16769 16816 switch (infocmd) {
16770 16817 case DDI_INFO_DEVT2DEVINFO:
16771 16818 *result = (void *)dtrace_devi;
16772 16819 error = DDI_SUCCESS;
16773 16820 break;
16774 16821 case DDI_INFO_DEVT2INSTANCE:
16775 16822 *result = (void *)0;
16776 16823 error = DDI_SUCCESS;
16777 16824 break;
16778 16825 default:
16779 16826 error = DDI_FAILURE;
16780 16827 }
16781 16828 return (error);
16782 16829 }
16783 16830
16784 16831 static struct cb_ops dtrace_cb_ops = {
16785 16832 dtrace_open, /* open */
16786 16833 dtrace_close, /* close */
16787 16834 nulldev, /* strategy */
16788 16835 nulldev, /* print */
16789 16836 nodev, /* dump */
16790 16837 nodev, /* read */
16791 16838 nodev, /* write */
16792 16839 dtrace_ioctl, /* ioctl */
16793 16840 nodev, /* devmap */
16794 16841 nodev, /* mmap */
16795 16842 nodev, /* segmap */
16796 16843 nochpoll, /* poll */
16797 16844 ddi_prop_op, /* cb_prop_op */
16798 16845 0, /* streamtab */
16799 16846 D_NEW | D_MP /* Driver compatibility flag */
16800 16847 };
16801 16848
16802 16849 static struct dev_ops dtrace_ops = {
16803 16850 DEVO_REV, /* devo_rev */
16804 16851 0, /* refcnt */
16805 16852 dtrace_info, /* get_dev_info */
16806 16853 nulldev, /* identify */
16807 16854 nulldev, /* probe */
16808 16855 dtrace_attach, /* attach */
16809 16856 dtrace_detach, /* detach */
16810 16857 nodev, /* reset */
16811 16858 &dtrace_cb_ops, /* driver operations */
16812 16859 NULL, /* bus operations */
16813 16860 nodev, /* dev power */
16814 16861 ddi_quiesce_not_needed, /* quiesce */
16815 16862 };
16816 16863
16817 16864 static struct modldrv modldrv = {
16818 16865 &mod_driverops, /* module type (this is a pseudo driver) */
16819 16866 "Dynamic Tracing", /* name of module */
16820 16867 &dtrace_ops, /* driver ops */
16821 16868 };
16822 16869
16823 16870 static struct modlinkage modlinkage = {
16824 16871 MODREV_1,
16825 16872 (void *)&modldrv,
16826 16873 NULL
16827 16874 };
16828 16875
16829 16876 int
16830 16877 _init(void)
16831 16878 {
16832 16879 return (mod_install(&modlinkage));
16833 16880 }
16834 16881
16835 16882 int
16836 16883 _info(struct modinfo *modinfop)
16837 16884 {
16838 16885 return (mod_info(&modlinkage, modinfop));
16839 16886 }
16840 16887
16841 16888 int
16842 16889 _fini(void)
16843 16890 {
16844 16891 return (mod_remove(&modlinkage));
16845 16892 }
↓ open down ↓ |
7629 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX