1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 25 */ 26 27 /* 28 * DTrace - Dynamic Tracing for Solaris 29 * 30 * This is the implementation of the Solaris Dynamic Tracing framework 31 * (DTrace). The user-visible interface to DTrace is described at length in 32 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 33 * library, the in-kernel DTrace framework, and the DTrace providers are 34 * described in the block comments in the <sys/dtrace.h> header file. The 35 * internal architecture of DTrace is described in the block comments in the 36 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 37 * implementation very much assume mastery of all of these sources; if one has 38 * an unanswered question about the implementation, one should consult them 39 * first. 40 * 41 * The functions here are ordered roughly as follows: 42 * 43 * - Probe context functions 44 * - Probe hashing functions 45 * - Non-probe context utility functions 46 * - Matching functions 47 * - Provider-to-Framework API functions 48 * - Probe management functions 49 * - DIF object functions 50 * - Format functions 51 * - Predicate functions 52 * - ECB functions 53 * - Buffer functions 54 * - Enabling functions 55 * - DOF functions 56 * - Anonymous enabling functions 57 * - Consumer state functions 58 * - Helper functions 59 * - Hook functions 60 * - Driver cookbook functions 61 * 62 * Each group of functions begins with a block comment labelled the "DTrace 63 * [Group] Functions", allowing one to find each block by searching forward 64 * on capital-f functions. 65 */ 66 #include <sys/errno.h> 67 #include <sys/stat.h> 68 #include <sys/modctl.h> 69 #include <sys/conf.h> 70 #include <sys/systm.h> 71 #include <sys/ddi.h> 72 #include <sys/sunddi.h> 73 #include <sys/cpuvar.h> 74 #include <sys/kmem.h> 75 #include <sys/strsubr.h> 76 #include <sys/sysmacros.h> 77 #include <sys/dtrace_impl.h> 78 #include <sys/atomic.h> 79 #include <sys/cmn_err.h> 80 #include <sys/mutex_impl.h> 81 #include <sys/rwlock_impl.h> 82 #include <sys/ctf_api.h> 83 #include <sys/panic.h> 84 #include <sys/priv_impl.h> 85 #include <sys/policy.h> 86 #include <sys/cred_impl.h> 87 #include <sys/procfs_isa.h> 88 #include <sys/taskq.h> 89 #include <sys/mkdev.h> 90 #include <sys/kdi.h> 91 #include <sys/zone.h> 92 #include <sys/socket.h> 93 #include <netinet/in.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 1024; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 148 149 /* 150 * DTrace External Variables 151 * 152 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 153 * available to DTrace consumers via the backtick (`) syntax. One of these, 154 * dtrace_zero, is made deliberately so: it is provided as a source of 155 * well-known, zero-filled memory. While this variable is not documented, 156 * it is used by some translators as an implementation detail. 157 */ 158 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 159 160 /* 161 * DTrace Internal Variables 162 */ 163 static dev_info_t *dtrace_devi; /* device info */ 164 static vmem_t *dtrace_arena; /* probe ID arena */ 165 static vmem_t *dtrace_minor; /* minor number arena */ 166 static taskq_t *dtrace_taskq; /* task queue */ 167 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 168 static int dtrace_nprobes; /* number of probes */ 169 static dtrace_provider_t *dtrace_provider; /* provider list */ 170 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 171 static int dtrace_opens; /* number of opens */ 172 static int dtrace_helpers; /* number of helpers */ 173 static int dtrace_getf; /* number of unpriv getf()s */ 174 static void *dtrace_softstate; /* softstate pointer */ 175 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 176 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 177 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 178 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 179 static int dtrace_toxranges; /* number of toxic ranges */ 180 static int dtrace_toxranges_max; /* size of toxic range array */ 181 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 182 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 183 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 184 static kthread_t *dtrace_panicked; /* panicking thread */ 185 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 186 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 187 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 188 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 189 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 190 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 191 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 192 193 /* 194 * DTrace Locking 195 * DTrace is protected by three (relatively coarse-grained) locks: 196 * 197 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 198 * including enabling state, probes, ECBs, consumer state, helper state, 199 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 200 * probe context is lock-free -- synchronization is handled via the 201 * dtrace_sync() cross call mechanism. 202 * 203 * (2) dtrace_provider_lock is required when manipulating provider state, or 204 * when provider state must be held constant. 205 * 206 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 207 * when meta provider state must be held constant. 208 * 209 * The lock ordering between these three locks is dtrace_meta_lock before 210 * dtrace_provider_lock before dtrace_lock. (In particular, there are 211 * several places where dtrace_provider_lock is held by the framework as it 212 * calls into the providers -- which then call back into the framework, 213 * grabbing dtrace_lock.) 214 * 215 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 216 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 217 * role as a coarse-grained lock; it is acquired before both of these locks. 218 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 219 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 220 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 221 * acquired _between_ dtrace_provider_lock and dtrace_lock. 222 */ 223 static kmutex_t dtrace_lock; /* probe state lock */ 224 static kmutex_t dtrace_provider_lock; /* provider state lock */ 225 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 226 227 /* 228 * DTrace Provider Variables 229 * 230 * These are the variables relating to DTrace as a provider (that is, the 231 * provider of the BEGIN, END, and ERROR probes). 232 */ 233 static dtrace_pattr_t dtrace_provider_attr = { 234 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 235 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 236 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 237 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 238 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 239 }; 240 241 static void 242 dtrace_nullop(void) 243 {} 244 245 static int 246 dtrace_enable_nullop(void) 247 { 248 return (0); 249 } 250 251 static dtrace_pops_t dtrace_provider_ops = { 252 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 253 (void (*)(void *, struct modctl *))dtrace_nullop, 254 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop, 255 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 256 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 257 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 258 NULL, 259 NULL, 260 NULL, 261 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 262 }; 263 264 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 265 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 266 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 267 268 /* 269 * DTrace Helper Tracing Variables 270 */ 271 uint32_t dtrace_helptrace_next = 0; 272 uint32_t dtrace_helptrace_nlocals; 273 char *dtrace_helptrace_buffer; 274 int dtrace_helptrace_bufsize = 512 * 1024; 275 276 #ifdef DEBUG 277 int dtrace_helptrace_enabled = 1; 278 #else 279 int dtrace_helptrace_enabled = 0; 280 #endif 281 282 /* 283 * DTrace Error Hashing 284 * 285 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 286 * table. This is very useful for checking coverage of tests that are 287 * expected to induce DIF or DOF processing errors, and may be useful for 288 * debugging problems in the DIF code generator or in DOF generation . The 289 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 290 */ 291 #ifdef DEBUG 292 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 293 static const char *dtrace_errlast; 294 static kthread_t *dtrace_errthread; 295 static kmutex_t dtrace_errlock; 296 #endif 297 298 /* 299 * DTrace Macros and Constants 300 * 301 * These are various macros that are useful in various spots in the 302 * implementation, along with a few random constants that have no meaning 303 * outside of the implementation. There is no real structure to this cpp 304 * mishmash -- but is there ever? 305 */ 306 #define DTRACE_HASHSTR(hash, probe) \ 307 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 308 309 #define DTRACE_HASHNEXT(hash, probe) \ 310 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 311 312 #define DTRACE_HASHPREV(hash, probe) \ 313 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 314 315 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 316 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 317 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 318 319 #define DTRACE_AGGHASHSIZE_SLEW 17 320 321 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 322 323 /* 324 * The key for a thread-local variable consists of the lower 61 bits of the 325 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 326 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 327 * equal to a variable identifier. This is necessary (but not sufficient) to 328 * assure that global associative arrays never collide with thread-local 329 * variables. To guarantee that they cannot collide, we must also define the 330 * order for keying dynamic variables. That order is: 331 * 332 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 333 * 334 * Because the variable-key and the tls-key are in orthogonal spaces, there is 335 * no way for a global variable key signature to match a thread-local key 336 * signature. 337 */ 338 #define DTRACE_TLS_THRKEY(where) { \ 339 uint_t intr = 0; \ 340 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 341 for (; actv; actv >>= 1) \ 342 intr++; \ 343 ASSERT(intr < (1 << 3)); \ 344 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 345 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 346 } 347 348 #define DT_BSWAP_8(x) ((x) & 0xff) 349 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 350 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 351 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 352 353 #define DT_MASK_LO 0x00000000FFFFFFFFULL 354 355 #define DTRACE_STORE(type, tomax, offset, what) \ 356 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 357 358 #ifndef __i386 359 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 360 if (addr & (size - 1)) { \ 361 *flags |= CPU_DTRACE_BADALIGN; \ 362 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 363 return (0); \ 364 } 365 #else 366 #define DTRACE_ALIGNCHECK(addr, size, flags) 367 #endif 368 369 /* 370 * Test whether a range of memory starting at testaddr of size testsz falls 371 * within the range of memory described by addr, sz. We take care to avoid 372 * problems with overflow and underflow of the unsigned quantities, and 373 * disallow all negative sizes. Ranges of size 0 are allowed. 374 */ 375 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 376 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ 377 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ 378 (testaddr) + (testsz) >= (testaddr)) 379 380 /* 381 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 382 * alloc_sz on the righthand side of the comparison in order to avoid overflow 383 * or underflow in the comparison with it. This is simpler than the INRANGE 384 * check above, because we know that the dtms_scratch_ptr is valid in the 385 * range. Allocations of size zero are allowed. 386 */ 387 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 388 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 389 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 390 391 #define DTRACE_LOADFUNC(bits) \ 392 /*CSTYLED*/ \ 393 uint##bits##_t \ 394 dtrace_load##bits(uintptr_t addr) \ 395 { \ 396 size_t size = bits / NBBY; \ 397 /*CSTYLED*/ \ 398 uint##bits##_t rval; \ 399 int i; \ 400 volatile uint16_t *flags = (volatile uint16_t *) \ 401 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 402 \ 403 DTRACE_ALIGNCHECK(addr, size, flags); \ 404 \ 405 for (i = 0; i < dtrace_toxranges; i++) { \ 406 if (addr >= dtrace_toxrange[i].dtt_limit) \ 407 continue; \ 408 \ 409 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 410 continue; \ 411 \ 412 /* \ 413 * This address falls within a toxic region; return 0. \ 414 */ \ 415 *flags |= CPU_DTRACE_BADADDR; \ 416 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 417 return (0); \ 418 } \ 419 \ 420 *flags |= CPU_DTRACE_NOFAULT; \ 421 /*CSTYLED*/ \ 422 rval = *((volatile uint##bits##_t *)addr); \ 423 *flags &= ~CPU_DTRACE_NOFAULT; \ 424 \ 425 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 426 } 427 428 #ifdef _LP64 429 #define dtrace_loadptr dtrace_load64 430 #else 431 #define dtrace_loadptr dtrace_load32 432 #endif 433 434 #define DTRACE_DYNHASH_FREE 0 435 #define DTRACE_DYNHASH_SINK 1 436 #define DTRACE_DYNHASH_VALID 2 437 438 #define DTRACE_MATCH_FAIL -1 439 #define DTRACE_MATCH_NEXT 0 440 #define DTRACE_MATCH_DONE 1 441 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 442 #define DTRACE_STATE_ALIGN 64 443 444 #define DTRACE_FLAGS2FLT(flags) \ 445 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 446 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 447 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 448 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 449 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 450 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 451 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 452 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 453 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 454 DTRACEFLT_UNKNOWN) 455 456 #define DTRACEACT_ISSTRING(act) \ 457 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 458 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 459 460 static size_t dtrace_strlen(const char *, size_t); 461 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 462 static void dtrace_enabling_provide(dtrace_provider_t *); 463 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 464 static void dtrace_enabling_matchall(void); 465 static void dtrace_enabling_reap(void); 466 static dtrace_state_t *dtrace_anon_grab(void); 467 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 468 dtrace_state_t *, uint64_t, uint64_t); 469 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 470 static void dtrace_buffer_drop(dtrace_buffer_t *); 471 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 472 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 473 dtrace_state_t *, dtrace_mstate_t *); 474 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 475 dtrace_optval_t); 476 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 477 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 478 static int dtrace_priv_proc(dtrace_state_t *, dtrace_mstate_t *); 479 static void dtrace_getf_barrier(void); 480 481 /* 482 * DTrace Probe Context Functions 483 * 484 * These functions are called from probe context. Because probe context is 485 * any context in which C may be called, arbitrarily locks may be held, 486 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 487 * As a result, functions called from probe context may only call other DTrace 488 * support functions -- they may not interact at all with the system at large. 489 * (Note that the ASSERT macro is made probe-context safe by redefining it in 490 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 491 * loads are to be performed from probe context, they _must_ be in terms of 492 * the safe dtrace_load*() variants. 493 * 494 * Some functions in this block are not actually called from probe context; 495 * for these functions, there will be a comment above the function reading 496 * "Note: not called from probe context." 497 */ 498 void 499 dtrace_panic(const char *format, ...) 500 { 501 va_list alist; 502 503 va_start(alist, format); 504 dtrace_vpanic(format, alist); 505 va_end(alist); 506 } 507 508 int 509 dtrace_assfail(const char *a, const char *f, int l) 510 { 511 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 512 513 /* 514 * We just need something here that even the most clever compiler 515 * cannot optimize away. 516 */ 517 return (a[(uintptr_t)f]); 518 } 519 520 /* 521 * Atomically increment a specified error counter from probe context. 522 */ 523 static void 524 dtrace_error(uint32_t *counter) 525 { 526 /* 527 * Most counters stored to in probe context are per-CPU counters. 528 * However, there are some error conditions that are sufficiently 529 * arcane that they don't merit per-CPU storage. If these counters 530 * are incremented concurrently on different CPUs, scalability will be 531 * adversely affected -- but we don't expect them to be white-hot in a 532 * correctly constructed enabling... 533 */ 534 uint32_t oval, nval; 535 536 do { 537 oval = *counter; 538 539 if ((nval = oval + 1) == 0) { 540 /* 541 * If the counter would wrap, set it to 1 -- assuring 542 * that the counter is never zero when we have seen 543 * errors. (The counter must be 32-bits because we 544 * aren't guaranteed a 64-bit compare&swap operation.) 545 * To save this code both the infamy of being fingered 546 * by a priggish news story and the indignity of being 547 * the target of a neo-puritan witch trial, we're 548 * carefully avoiding any colorful description of the 549 * likelihood of this condition -- but suffice it to 550 * say that it is only slightly more likely than the 551 * overflow of predicate cache IDs, as discussed in 552 * dtrace_predicate_create(). 553 */ 554 nval = 1; 555 } 556 } while (dtrace_cas32(counter, oval, nval) != oval); 557 } 558 559 /* 560 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 561 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 562 */ 563 DTRACE_LOADFUNC(8) 564 DTRACE_LOADFUNC(16) 565 DTRACE_LOADFUNC(32) 566 DTRACE_LOADFUNC(64) 567 568 static int 569 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 570 { 571 if (dest < mstate->dtms_scratch_base) 572 return (0); 573 574 if (dest + size < dest) 575 return (0); 576 577 if (dest + size > mstate->dtms_scratch_ptr) 578 return (0); 579 580 return (1); 581 } 582 583 static int 584 dtrace_canstore_statvar(uint64_t addr, size_t sz, 585 dtrace_statvar_t **svars, int nsvars) 586 { 587 int i; 588 589 for (i = 0; i < nsvars; i++) { 590 dtrace_statvar_t *svar = svars[i]; 591 592 if (svar == NULL || svar->dtsv_size == 0) 593 continue; 594 595 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 596 return (1); 597 } 598 599 return (0); 600 } 601 602 /* 603 * Check to see if the address is within a memory region to which a store may 604 * be issued. This includes the DTrace scratch areas, and any DTrace variable 605 * region. The caller of dtrace_canstore() is responsible for performing any 606 * alignment checks that are needed before stores are actually executed. 607 */ 608 static int 609 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 610 dtrace_vstate_t *vstate) 611 { 612 /* 613 * First, check to see if the address is in scratch space... 614 */ 615 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 616 mstate->dtms_scratch_size)) 617 return (1); 618 619 /* 620 * Now check to see if it's a dynamic variable. This check will pick 621 * up both thread-local variables and any global dynamically-allocated 622 * variables. 623 */ 624 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, 625 vstate->dtvs_dynvars.dtds_size)) { 626 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 627 uintptr_t base = (uintptr_t)dstate->dtds_base + 628 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 629 uintptr_t chunkoffs; 630 631 /* 632 * Before we assume that we can store here, we need to make 633 * sure that it isn't in our metadata -- storing to our 634 * dynamic variable metadata would corrupt our state. For 635 * the range to not include any dynamic variable metadata, 636 * it must: 637 * 638 * (1) Start above the hash table that is at the base of 639 * the dynamic variable space 640 * 641 * (2) Have a starting chunk offset that is beyond the 642 * dtrace_dynvar_t that is at the base of every chunk 643 * 644 * (3) Not span a chunk boundary 645 * 646 */ 647 if (addr < base) 648 return (0); 649 650 chunkoffs = (addr - base) % dstate->dtds_chunksize; 651 652 if (chunkoffs < sizeof (dtrace_dynvar_t)) 653 return (0); 654 655 if (chunkoffs + sz > dstate->dtds_chunksize) 656 return (0); 657 658 return (1); 659 } 660 661 /* 662 * Finally, check the static local and global variables. These checks 663 * take the longest, so we perform them last. 664 */ 665 if (dtrace_canstore_statvar(addr, sz, 666 vstate->dtvs_locals, vstate->dtvs_nlocals)) 667 return (1); 668 669 if (dtrace_canstore_statvar(addr, sz, 670 vstate->dtvs_globals, vstate->dtvs_nglobals)) 671 return (1); 672 673 return (0); 674 } 675 676 677 /* 678 * Convenience routine to check to see if the address is within a memory 679 * region in which a load may be issued given the user's privilege level; 680 * if not, it sets the appropriate error flags and loads 'addr' into the 681 * illegal value slot. 682 * 683 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 684 * appropriate memory access protection. 685 */ 686 static int 687 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 688 dtrace_vstate_t *vstate) 689 { 690 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 691 file_t *fp; 692 693 /* 694 * If we hold the privilege to read from kernel memory, then 695 * everything is readable. 696 */ 697 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 698 return (1); 699 700 /* 701 * You can obviously read that which you can store. 702 */ 703 if (dtrace_canstore(addr, sz, mstate, vstate)) 704 return (1); 705 706 /* 707 * We're allowed to read from our own string table. 708 */ 709 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, 710 mstate->dtms_difo->dtdo_strlen)) 711 return (1); 712 713 if (vstate->dtvs_state != NULL && 714 dtrace_priv_proc(vstate->dtvs_state, mstate)) { 715 proc_t *p; 716 717 /* 718 * When we have privileges to the current process, there are 719 * several context-related kernel structures that are safe to 720 * read, even absent the privilege to read from kernel memory. 721 * These reads are safe because these structures contain only 722 * state that (1) we're permitted to read, (2) is harmless or 723 * (3) contains pointers to additional kernel state that we're 724 * not permitted to read (and as such, do not present an 725 * opportunity for privilege escalation). Finally (and 726 * critically), because of the nature of their relation with 727 * the current thread context, the memory associated with these 728 * structures cannot change over the duration of probe context, 729 * and it is therefore impossible for this memory to be 730 * deallocated and reallocated as something else while it's 731 * being operated upon. 732 */ 733 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) 734 return (1); 735 736 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, 737 sz, curthread->t_procp, sizeof (proc_t))) { 738 return (1); 739 } 740 741 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, 742 curthread->t_cred, sizeof (cred_t))) { 743 return (1); 744 } 745 746 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, 747 &(p->p_pidp->pid_id), sizeof (pid_t))) { 748 return (1); 749 } 750 751 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, 752 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { 753 return (1); 754 } 755 } 756 757 if ((fp = mstate->dtms_getf) != NULL) { 758 uintptr_t psz = sizeof (void *); 759 vnode_t *vp; 760 vnodeops_t *op; 761 762 /* 763 * When getf() returns a file_t, the enabling is implicitly 764 * granted the (transient) right to read the returned file_t 765 * as well as the v_path and v_op->vnop_name of the underlying 766 * vnode. These accesses are allowed after a successful 767 * getf() because the members that they refer to cannot change 768 * once set -- and the barrier logic in the kernel's closef() 769 * path assures that the file_t and its referenced vode_t 770 * cannot themselves be stale (that is, it impossible for 771 * either dtms_getf itself or its f_vnode member to reference 772 * freed memory). 773 */ 774 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) 775 return (1); 776 777 if ((vp = fp->f_vnode) != NULL) { 778 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) 779 return (1); 780 781 if (vp->v_path != NULL && DTRACE_INRANGE(addr, sz, 782 vp->v_path, strlen(vp->v_path) + 1)) { 783 return (1); 784 } 785 786 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) 787 return (1); 788 789 if ((op = vp->v_op) != NULL && 790 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { 791 return (1); 792 } 793 794 if (op != NULL && op->vnop_name != NULL && 795 DTRACE_INRANGE(addr, sz, op->vnop_name, 796 strlen(op->vnop_name) + 1)) { 797 return (1); 798 } 799 } 800 } 801 802 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 803 *illval = addr; 804 return (0); 805 } 806 807 /* 808 * Convenience routine to check to see if a given string is within a memory 809 * region in which a load may be issued given the user's privilege level; 810 * this exists so that we don't need to issue unnecessary dtrace_strlen() 811 * calls in the event that the user has all privileges. 812 */ 813 static int 814 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 815 dtrace_vstate_t *vstate) 816 { 817 size_t strsz; 818 819 /* 820 * If we hold the privilege to read from kernel memory, then 821 * everything is readable. 822 */ 823 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 824 return (1); 825 826 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 827 if (dtrace_canload(addr, strsz, mstate, vstate)) 828 return (1); 829 830 return (0); 831 } 832 833 /* 834 * Convenience routine to check to see if a given variable is within a memory 835 * region in which a load may be issued given the user's privilege level. 836 */ 837 static int 838 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 839 dtrace_vstate_t *vstate) 840 { 841 size_t sz; 842 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 843 844 /* 845 * If we hold the privilege to read from kernel memory, then 846 * everything is readable. 847 */ 848 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 849 return (1); 850 851 if (type->dtdt_kind == DIF_TYPE_STRING) 852 sz = dtrace_strlen(src, 853 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 854 else 855 sz = type->dtdt_size; 856 857 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 858 } 859 860 /* 861 * Compare two strings using safe loads. 862 */ 863 static int 864 dtrace_strncmp(char *s1, char *s2, size_t limit) 865 { 866 uint8_t c1, c2; 867 volatile uint16_t *flags; 868 869 if (s1 == s2 || limit == 0) 870 return (0); 871 872 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 873 874 do { 875 if (s1 == NULL) { 876 c1 = '\0'; 877 } else { 878 c1 = dtrace_load8((uintptr_t)s1++); 879 } 880 881 if (s2 == NULL) { 882 c2 = '\0'; 883 } else { 884 c2 = dtrace_load8((uintptr_t)s2++); 885 } 886 887 if (c1 != c2) 888 return (c1 - c2); 889 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 890 891 return (0); 892 } 893 894 /* 895 * Compute strlen(s) for a string using safe memory accesses. The additional 896 * len parameter is used to specify a maximum length to ensure completion. 897 */ 898 static size_t 899 dtrace_strlen(const char *s, size_t lim) 900 { 901 uint_t len; 902 903 for (len = 0; len != lim; len++) { 904 if (dtrace_load8((uintptr_t)s++) == '\0') 905 break; 906 } 907 908 return (len); 909 } 910 911 /* 912 * Check if an address falls within a toxic region. 913 */ 914 static int 915 dtrace_istoxic(uintptr_t kaddr, size_t size) 916 { 917 uintptr_t taddr, tsize; 918 int i; 919 920 for (i = 0; i < dtrace_toxranges; i++) { 921 taddr = dtrace_toxrange[i].dtt_base; 922 tsize = dtrace_toxrange[i].dtt_limit - taddr; 923 924 if (kaddr - taddr < tsize) { 925 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 926 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 927 return (1); 928 } 929 930 if (taddr - kaddr < size) { 931 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 932 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 933 return (1); 934 } 935 } 936 937 return (0); 938 } 939 940 /* 941 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 942 * memory specified by the DIF program. The dst is assumed to be safe memory 943 * that we can store to directly because it is managed by DTrace. As with 944 * standard bcopy, overlapping copies are handled properly. 945 */ 946 static void 947 dtrace_bcopy(const void *src, void *dst, size_t len) 948 { 949 if (len != 0) { 950 uint8_t *s1 = dst; 951 const uint8_t *s2 = src; 952 953 if (s1 <= s2) { 954 do { 955 *s1++ = dtrace_load8((uintptr_t)s2++); 956 } while (--len != 0); 957 } else { 958 s2 += len; 959 s1 += len; 960 961 do { 962 *--s1 = dtrace_load8((uintptr_t)--s2); 963 } while (--len != 0); 964 } 965 } 966 } 967 968 /* 969 * Copy src to dst using safe memory accesses, up to either the specified 970 * length, or the point that a nul byte is encountered. The src is assumed to 971 * be unsafe memory specified by the DIF program. The dst is assumed to be 972 * safe memory that we can store to directly because it is managed by DTrace. 973 * Unlike dtrace_bcopy(), overlapping regions are not handled. 974 */ 975 static void 976 dtrace_strcpy(const void *src, void *dst, size_t len) 977 { 978 if (len != 0) { 979 uint8_t *s1 = dst, c; 980 const uint8_t *s2 = src; 981 982 do { 983 *s1++ = c = dtrace_load8((uintptr_t)s2++); 984 } while (--len != 0 && c != '\0'); 985 } 986 } 987 988 /* 989 * Copy src to dst, deriving the size and type from the specified (BYREF) 990 * variable type. The src is assumed to be unsafe memory specified by the DIF 991 * program. The dst is assumed to be DTrace variable memory that is of the 992 * specified type; we assume that we can store to directly. 993 */ 994 static void 995 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 996 { 997 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 998 999 if (type->dtdt_kind == DIF_TYPE_STRING) { 1000 dtrace_strcpy(src, dst, type->dtdt_size); 1001 } else { 1002 dtrace_bcopy(src, dst, type->dtdt_size); 1003 } 1004 } 1005 1006 /* 1007 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1008 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1009 * safe memory that we can access directly because it is managed by DTrace. 1010 */ 1011 static int 1012 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1013 { 1014 volatile uint16_t *flags; 1015 1016 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 1017 1018 if (s1 == s2) 1019 return (0); 1020 1021 if (s1 == NULL || s2 == NULL) 1022 return (1); 1023 1024 if (s1 != s2 && len != 0) { 1025 const uint8_t *ps1 = s1; 1026 const uint8_t *ps2 = s2; 1027 1028 do { 1029 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1030 return (1); 1031 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1032 } 1033 return (0); 1034 } 1035 1036 /* 1037 * Zero the specified region using a simple byte-by-byte loop. Note that this 1038 * is for safe DTrace-managed memory only. 1039 */ 1040 static void 1041 dtrace_bzero(void *dst, size_t len) 1042 { 1043 uchar_t *cp; 1044 1045 for (cp = dst; len != 0; len--) 1046 *cp++ = 0; 1047 } 1048 1049 static void 1050 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1051 { 1052 uint64_t result[2]; 1053 1054 result[0] = addend1[0] + addend2[0]; 1055 result[1] = addend1[1] + addend2[1] + 1056 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1057 1058 sum[0] = result[0]; 1059 sum[1] = result[1]; 1060 } 1061 1062 /* 1063 * Shift the 128-bit value in a by b. If b is positive, shift left. 1064 * If b is negative, shift right. 1065 */ 1066 static void 1067 dtrace_shift_128(uint64_t *a, int b) 1068 { 1069 uint64_t mask; 1070 1071 if (b == 0) 1072 return; 1073 1074 if (b < 0) { 1075 b = -b; 1076 if (b >= 64) { 1077 a[0] = a[1] >> (b - 64); 1078 a[1] = 0; 1079 } else { 1080 a[0] >>= b; 1081 mask = 1LL << (64 - b); 1082 mask -= 1; 1083 a[0] |= ((a[1] & mask) << (64 - b)); 1084 a[1] >>= b; 1085 } 1086 } else { 1087 if (b >= 64) { 1088 a[1] = a[0] << (b - 64); 1089 a[0] = 0; 1090 } else { 1091 a[1] <<= b; 1092 mask = a[0] >> (64 - b); 1093 a[1] |= mask; 1094 a[0] <<= b; 1095 } 1096 } 1097 } 1098 1099 /* 1100 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1101 * use native multiplication on those, and then re-combine into the 1102 * resulting 128-bit value. 1103 * 1104 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1105 * hi1 * hi2 << 64 + 1106 * hi1 * lo2 << 32 + 1107 * hi2 * lo1 << 32 + 1108 * lo1 * lo2 1109 */ 1110 static void 1111 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1112 { 1113 uint64_t hi1, hi2, lo1, lo2; 1114 uint64_t tmp[2]; 1115 1116 hi1 = factor1 >> 32; 1117 hi2 = factor2 >> 32; 1118 1119 lo1 = factor1 & DT_MASK_LO; 1120 lo2 = factor2 & DT_MASK_LO; 1121 1122 product[0] = lo1 * lo2; 1123 product[1] = hi1 * hi2; 1124 1125 tmp[0] = hi1 * lo2; 1126 tmp[1] = 0; 1127 dtrace_shift_128(tmp, 32); 1128 dtrace_add_128(product, tmp, product); 1129 1130 tmp[0] = hi2 * lo1; 1131 tmp[1] = 0; 1132 dtrace_shift_128(tmp, 32); 1133 dtrace_add_128(product, tmp, product); 1134 } 1135 1136 /* 1137 * This privilege check should be used by actions and subroutines to 1138 * verify that the user credentials of the process that enabled the 1139 * invoking ECB match the target credentials 1140 */ 1141 static int 1142 dtrace_priv_proc_common_user(dtrace_state_t *state) 1143 { 1144 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1145 1146 /* 1147 * We should always have a non-NULL state cred here, since if cred 1148 * is null (anonymous tracing), we fast-path bypass this routine. 1149 */ 1150 ASSERT(s_cr != NULL); 1151 1152 if ((cr = CRED()) != NULL && 1153 s_cr->cr_uid == cr->cr_uid && 1154 s_cr->cr_uid == cr->cr_ruid && 1155 s_cr->cr_uid == cr->cr_suid && 1156 s_cr->cr_gid == cr->cr_gid && 1157 s_cr->cr_gid == cr->cr_rgid && 1158 s_cr->cr_gid == cr->cr_sgid) 1159 return (1); 1160 1161 return (0); 1162 } 1163 1164 /* 1165 * This privilege check should be used by actions and subroutines to 1166 * verify that the zone of the process that enabled the invoking ECB 1167 * matches the target credentials 1168 */ 1169 static int 1170 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1171 { 1172 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1173 1174 /* 1175 * We should always have a non-NULL state cred here, since if cred 1176 * is null (anonymous tracing), we fast-path bypass this routine. 1177 */ 1178 ASSERT(s_cr != NULL); 1179 1180 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) 1181 return (1); 1182 1183 return (0); 1184 } 1185 1186 /* 1187 * This privilege check should be used by actions and subroutines to 1188 * verify that the process has not setuid or changed credentials. 1189 */ 1190 static int 1191 dtrace_priv_proc_common_nocd() 1192 { 1193 proc_t *proc; 1194 1195 if ((proc = ttoproc(curthread)) != NULL && 1196 !(proc->p_flag & SNOCD)) 1197 return (1); 1198 1199 return (0); 1200 } 1201 1202 static int 1203 dtrace_priv_proc_destructive(dtrace_state_t *state, dtrace_mstate_t *mstate) 1204 { 1205 int action = state->dts_cred.dcr_action; 1206 1207 if (!(mstate->dtms_access & DTRACE_ACCESS_PROC)) 1208 goto bad; 1209 1210 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1211 dtrace_priv_proc_common_zone(state) == 0) 1212 goto bad; 1213 1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1215 dtrace_priv_proc_common_user(state) == 0) 1216 goto bad; 1217 1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1219 dtrace_priv_proc_common_nocd() == 0) 1220 goto bad; 1221 1222 return (1); 1223 1224 bad: 1225 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1226 1227 return (0); 1228 } 1229 1230 static int 1231 dtrace_priv_proc_control(dtrace_state_t *state, dtrace_mstate_t *mstate) 1232 { 1233 if (mstate->dtms_access & DTRACE_ACCESS_PROC) { 1234 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1235 return (1); 1236 1237 if (dtrace_priv_proc_common_zone(state) && 1238 dtrace_priv_proc_common_user(state) && 1239 dtrace_priv_proc_common_nocd()) 1240 return (1); 1241 } 1242 1243 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1244 1245 return (0); 1246 } 1247 1248 static int 1249 dtrace_priv_proc(dtrace_state_t *state, dtrace_mstate_t *mstate) 1250 { 1251 if ((mstate->dtms_access & DTRACE_ACCESS_PROC) && 1252 (state->dts_cred.dcr_action & DTRACE_CRA_PROC)) 1253 return (1); 1254 1255 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1256 1257 return (0); 1258 } 1259 1260 static int 1261 dtrace_priv_kernel(dtrace_state_t *state) 1262 { 1263 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1264 return (1); 1265 1266 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1267 1268 return (0); 1269 } 1270 1271 static int 1272 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1273 { 1274 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1275 return (1); 1276 1277 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1278 1279 return (0); 1280 } 1281 1282 /* 1283 * Determine if the dte_cond of the specified ECB allows for processing of 1284 * the current probe to continue. Note that this routine may allow continued 1285 * processing, but with access(es) stripped from the mstate's dtms_access 1286 * field. 1287 */ 1288 static int 1289 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, 1290 dtrace_ecb_t *ecb) 1291 { 1292 dtrace_probe_t *probe = ecb->dte_probe; 1293 dtrace_provider_t *prov = probe->dtpr_provider; 1294 dtrace_pops_t *pops = &prov->dtpv_pops; 1295 int mode = DTRACE_MODE_NOPRIV_DROP; 1296 1297 ASSERT(ecb->dte_cond); 1298 1299 if (pops->dtps_mode != NULL) { 1300 mode = pops->dtps_mode(prov->dtpv_arg, 1301 probe->dtpr_id, probe->dtpr_arg); 1302 1303 ASSERT(mode & (DTRACE_MODE_USER | DTRACE_MODE_KERNEL)); 1304 ASSERT(mode & (DTRACE_MODE_NOPRIV_RESTRICT | 1305 DTRACE_MODE_NOPRIV_DROP)); 1306 } 1307 1308 /* 1309 * If the dte_cond bits indicate that this consumer is only allowed to 1310 * see user-mode firings of this probe, check that the probe was fired 1311 * while in a user context. If that's not the case, use the policy 1312 * specified by the provider to determine if we drop the probe or 1313 * merely restrict operation. 1314 */ 1315 if (ecb->dte_cond & DTRACE_COND_USERMODE) { 1316 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); 1317 1318 if (!(mode & DTRACE_MODE_USER)) { 1319 if (mode & DTRACE_MODE_NOPRIV_DROP) 1320 return (0); 1321 1322 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1323 } 1324 } 1325 1326 /* 1327 * This is more subtle than it looks. We have to be absolutely certain 1328 * that CRED() isn't going to change out from under us so it's only 1329 * legit to examine that structure if we're in constrained situations. 1330 * Currently, the only times we'll this check is if a non-super-user 1331 * has enabled the profile or syscall providers -- providers that 1332 * allow visibility of all processes. For the profile case, the check 1333 * above will ensure that we're examining a user context. 1334 */ 1335 if (ecb->dte_cond & DTRACE_COND_OWNER) { 1336 cred_t *cr; 1337 cred_t *s_cr = state->dts_cred.dcr_cred; 1338 proc_t *proc; 1339 1340 ASSERT(s_cr != NULL); 1341 1342 if ((cr = CRED()) == NULL || 1343 s_cr->cr_uid != cr->cr_uid || 1344 s_cr->cr_uid != cr->cr_ruid || 1345 s_cr->cr_uid != cr->cr_suid || 1346 s_cr->cr_gid != cr->cr_gid || 1347 s_cr->cr_gid != cr->cr_rgid || 1348 s_cr->cr_gid != cr->cr_sgid || 1349 (proc = ttoproc(curthread)) == NULL || 1350 (proc->p_flag & SNOCD)) { 1351 if (mode & DTRACE_MODE_NOPRIV_DROP) 1352 return (0); 1353 1354 mstate->dtms_access &= ~DTRACE_ACCESS_PROC; 1355 } 1356 } 1357 1358 /* 1359 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not 1360 * in our zone, check to see if our mode policy is to restrict rather 1361 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC 1362 * and DTRACE_ACCESS_ARGS 1363 */ 1364 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 1365 cred_t *cr; 1366 cred_t *s_cr = state->dts_cred.dcr_cred; 1367 1368 ASSERT(s_cr != NULL); 1369 1370 if ((cr = CRED()) == NULL || 1371 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { 1372 if (mode & DTRACE_MODE_NOPRIV_DROP) 1373 return (0); 1374 1375 mstate->dtms_access &= 1376 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); 1377 } 1378 } 1379 1380 /* 1381 * By merits of being in this code path at all, we have limited 1382 * privileges. If the provider has indicated that limited privileges 1383 * are to denote restricted operation, strip off the ability to access 1384 * arguments. 1385 */ 1386 if (mode & DTRACE_MODE_LIMITEDPRIV_RESTRICT) 1387 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1388 1389 return (1); 1390 } 1391 1392 /* 1393 * Note: not called from probe context. This function is called 1394 * asynchronously (and at a regular interval) from outside of probe context to 1395 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1396 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1397 */ 1398 void 1399 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1400 { 1401 dtrace_dynvar_t *dirty; 1402 dtrace_dstate_percpu_t *dcpu; 1403 dtrace_dynvar_t **rinsep; 1404 int i, j, work = 0; 1405 1406 for (i = 0; i < NCPU; i++) { 1407 dcpu = &dstate->dtds_percpu[i]; 1408 rinsep = &dcpu->dtdsc_rinsing; 1409 1410 /* 1411 * If the dirty list is NULL, there is no dirty work to do. 1412 */ 1413 if (dcpu->dtdsc_dirty == NULL) 1414 continue; 1415 1416 if (dcpu->dtdsc_rinsing != NULL) { 1417 /* 1418 * If the rinsing list is non-NULL, then it is because 1419 * this CPU was selected to accept another CPU's 1420 * dirty list -- and since that time, dirty buffers 1421 * have accumulated. This is a highly unlikely 1422 * condition, but we choose to ignore the dirty 1423 * buffers -- they'll be picked up a future cleanse. 1424 */ 1425 continue; 1426 } 1427 1428 if (dcpu->dtdsc_clean != NULL) { 1429 /* 1430 * If the clean list is non-NULL, then we're in a 1431 * situation where a CPU has done deallocations (we 1432 * have a non-NULL dirty list) but no allocations (we 1433 * also have a non-NULL clean list). We can't simply 1434 * move the dirty list into the clean list on this 1435 * CPU, yet we also don't want to allow this condition 1436 * to persist, lest a short clean list prevent a 1437 * massive dirty list from being cleaned (which in 1438 * turn could lead to otherwise avoidable dynamic 1439 * drops). To deal with this, we look for some CPU 1440 * with a NULL clean list, NULL dirty list, and NULL 1441 * rinsing list -- and then we borrow this CPU to 1442 * rinse our dirty list. 1443 */ 1444 for (j = 0; j < NCPU; j++) { 1445 dtrace_dstate_percpu_t *rinser; 1446 1447 rinser = &dstate->dtds_percpu[j]; 1448 1449 if (rinser->dtdsc_rinsing != NULL) 1450 continue; 1451 1452 if (rinser->dtdsc_dirty != NULL) 1453 continue; 1454 1455 if (rinser->dtdsc_clean != NULL) 1456 continue; 1457 1458 rinsep = &rinser->dtdsc_rinsing; 1459 break; 1460 } 1461 1462 if (j == NCPU) { 1463 /* 1464 * We were unable to find another CPU that 1465 * could accept this dirty list -- we are 1466 * therefore unable to clean it now. 1467 */ 1468 dtrace_dynvar_failclean++; 1469 continue; 1470 } 1471 } 1472 1473 work = 1; 1474 1475 /* 1476 * Atomically move the dirty list aside. 1477 */ 1478 do { 1479 dirty = dcpu->dtdsc_dirty; 1480 1481 /* 1482 * Before we zap the dirty list, set the rinsing list. 1483 * (This allows for a potential assertion in 1484 * dtrace_dynvar(): if a free dynamic variable appears 1485 * on a hash chain, either the dirty list or the 1486 * rinsing list for some CPU must be non-NULL.) 1487 */ 1488 *rinsep = dirty; 1489 dtrace_membar_producer(); 1490 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1491 dirty, NULL) != dirty); 1492 } 1493 1494 if (!work) { 1495 /* 1496 * We have no work to do; we can simply return. 1497 */ 1498 return; 1499 } 1500 1501 dtrace_sync(); 1502 1503 for (i = 0; i < NCPU; i++) { 1504 dcpu = &dstate->dtds_percpu[i]; 1505 1506 if (dcpu->dtdsc_rinsing == NULL) 1507 continue; 1508 1509 /* 1510 * We are now guaranteed that no hash chain contains a pointer 1511 * into this dirty list; we can make it clean. 1512 */ 1513 ASSERT(dcpu->dtdsc_clean == NULL); 1514 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1515 dcpu->dtdsc_rinsing = NULL; 1516 } 1517 1518 /* 1519 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1520 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1521 * This prevents a race whereby a CPU incorrectly decides that 1522 * the state should be something other than DTRACE_DSTATE_CLEAN 1523 * after dtrace_dynvar_clean() has completed. 1524 */ 1525 dtrace_sync(); 1526 1527 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1528 } 1529 1530 /* 1531 * Depending on the value of the op parameter, this function looks-up, 1532 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1533 * allocation is requested, this function will return a pointer to a 1534 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1535 * variable can be allocated. If NULL is returned, the appropriate counter 1536 * will be incremented. 1537 */ 1538 dtrace_dynvar_t * 1539 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1540 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1541 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1542 { 1543 uint64_t hashval = DTRACE_DYNHASH_VALID; 1544 dtrace_dynhash_t *hash = dstate->dtds_hash; 1545 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1546 processorid_t me = CPU->cpu_id, cpu = me; 1547 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1548 size_t bucket, ksize; 1549 size_t chunksize = dstate->dtds_chunksize; 1550 uintptr_t kdata, lock, nstate; 1551 uint_t i; 1552 1553 ASSERT(nkeys != 0); 1554 1555 /* 1556 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1557 * algorithm. For the by-value portions, we perform the algorithm in 1558 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1559 * bit, and seems to have only a minute effect on distribution. For 1560 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1561 * over each referenced byte. It's painful to do this, but it's much 1562 * better than pathological hash distribution. The efficacy of the 1563 * hashing algorithm (and a comparison with other algorithms) may be 1564 * found by running the ::dtrace_dynstat MDB dcmd. 1565 */ 1566 for (i = 0; i < nkeys; i++) { 1567 if (key[i].dttk_size == 0) { 1568 uint64_t val = key[i].dttk_value; 1569 1570 hashval += (val >> 48) & 0xffff; 1571 hashval += (hashval << 10); 1572 hashval ^= (hashval >> 6); 1573 1574 hashval += (val >> 32) & 0xffff; 1575 hashval += (hashval << 10); 1576 hashval ^= (hashval >> 6); 1577 1578 hashval += (val >> 16) & 0xffff; 1579 hashval += (hashval << 10); 1580 hashval ^= (hashval >> 6); 1581 1582 hashval += val & 0xffff; 1583 hashval += (hashval << 10); 1584 hashval ^= (hashval >> 6); 1585 } else { 1586 /* 1587 * This is incredibly painful, but it beats the hell 1588 * out of the alternative. 1589 */ 1590 uint64_t j, size = key[i].dttk_size; 1591 uintptr_t base = (uintptr_t)key[i].dttk_value; 1592 1593 if (!dtrace_canload(base, size, mstate, vstate)) 1594 break; 1595 1596 for (j = 0; j < size; j++) { 1597 hashval += dtrace_load8(base + j); 1598 hashval += (hashval << 10); 1599 hashval ^= (hashval >> 6); 1600 } 1601 } 1602 } 1603 1604 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1605 return (NULL); 1606 1607 hashval += (hashval << 3); 1608 hashval ^= (hashval >> 11); 1609 hashval += (hashval << 15); 1610 1611 /* 1612 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1613 * comes out to be one of our two sentinel hash values. If this 1614 * actually happens, we set the hashval to be a value known to be a 1615 * non-sentinel value. 1616 */ 1617 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1618 hashval = DTRACE_DYNHASH_VALID; 1619 1620 /* 1621 * Yes, it's painful to do a divide here. If the cycle count becomes 1622 * important here, tricks can be pulled to reduce it. (However, it's 1623 * critical that hash collisions be kept to an absolute minimum; 1624 * they're much more painful than a divide.) It's better to have a 1625 * solution that generates few collisions and still keeps things 1626 * relatively simple. 1627 */ 1628 bucket = hashval % dstate->dtds_hashsize; 1629 1630 if (op == DTRACE_DYNVAR_DEALLOC) { 1631 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1632 1633 for (;;) { 1634 while ((lock = *lockp) & 1) 1635 continue; 1636 1637 if (dtrace_casptr((void *)lockp, 1638 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1639 break; 1640 } 1641 1642 dtrace_membar_producer(); 1643 } 1644 1645 top: 1646 prev = NULL; 1647 lock = hash[bucket].dtdh_lock; 1648 1649 dtrace_membar_consumer(); 1650 1651 start = hash[bucket].dtdh_chain; 1652 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1653 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1654 op != DTRACE_DYNVAR_DEALLOC)); 1655 1656 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1657 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1658 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1659 1660 if (dvar->dtdv_hashval != hashval) { 1661 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1662 /* 1663 * We've reached the sink, and therefore the 1664 * end of the hash chain; we can kick out of 1665 * the loop knowing that we have seen a valid 1666 * snapshot of state. 1667 */ 1668 ASSERT(dvar->dtdv_next == NULL); 1669 ASSERT(dvar == &dtrace_dynhash_sink); 1670 break; 1671 } 1672 1673 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1674 /* 1675 * We've gone off the rails: somewhere along 1676 * the line, one of the members of this hash 1677 * chain was deleted. Note that we could also 1678 * detect this by simply letting this loop run 1679 * to completion, as we would eventually hit 1680 * the end of the dirty list. However, we 1681 * want to avoid running the length of the 1682 * dirty list unnecessarily (it might be quite 1683 * long), so we catch this as early as 1684 * possible by detecting the hash marker. In 1685 * this case, we simply set dvar to NULL and 1686 * break; the conditional after the loop will 1687 * send us back to top. 1688 */ 1689 dvar = NULL; 1690 break; 1691 } 1692 1693 goto next; 1694 } 1695 1696 if (dtuple->dtt_nkeys != nkeys) 1697 goto next; 1698 1699 for (i = 0; i < nkeys; i++, dkey++) { 1700 if (dkey->dttk_size != key[i].dttk_size) 1701 goto next; /* size or type mismatch */ 1702 1703 if (dkey->dttk_size != 0) { 1704 if (dtrace_bcmp( 1705 (void *)(uintptr_t)key[i].dttk_value, 1706 (void *)(uintptr_t)dkey->dttk_value, 1707 dkey->dttk_size)) 1708 goto next; 1709 } else { 1710 if (dkey->dttk_value != key[i].dttk_value) 1711 goto next; 1712 } 1713 } 1714 1715 if (op != DTRACE_DYNVAR_DEALLOC) 1716 return (dvar); 1717 1718 ASSERT(dvar->dtdv_next == NULL || 1719 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1720 1721 if (prev != NULL) { 1722 ASSERT(hash[bucket].dtdh_chain != dvar); 1723 ASSERT(start != dvar); 1724 ASSERT(prev->dtdv_next == dvar); 1725 prev->dtdv_next = dvar->dtdv_next; 1726 } else { 1727 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1728 start, dvar->dtdv_next) != start) { 1729 /* 1730 * We have failed to atomically swing the 1731 * hash table head pointer, presumably because 1732 * of a conflicting allocation on another CPU. 1733 * We need to reread the hash chain and try 1734 * again. 1735 */ 1736 goto top; 1737 } 1738 } 1739 1740 dtrace_membar_producer(); 1741 1742 /* 1743 * Now set the hash value to indicate that it's free. 1744 */ 1745 ASSERT(hash[bucket].dtdh_chain != dvar); 1746 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1747 1748 dtrace_membar_producer(); 1749 1750 /* 1751 * Set the next pointer to point at the dirty list, and 1752 * atomically swing the dirty pointer to the newly freed dvar. 1753 */ 1754 do { 1755 next = dcpu->dtdsc_dirty; 1756 dvar->dtdv_next = next; 1757 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1758 1759 /* 1760 * Finally, unlock this hash bucket. 1761 */ 1762 ASSERT(hash[bucket].dtdh_lock == lock); 1763 ASSERT(lock & 1); 1764 hash[bucket].dtdh_lock++; 1765 1766 return (NULL); 1767 next: 1768 prev = dvar; 1769 continue; 1770 } 1771 1772 if (dvar == NULL) { 1773 /* 1774 * If dvar is NULL, it is because we went off the rails: 1775 * one of the elements that we traversed in the hash chain 1776 * was deleted while we were traversing it. In this case, 1777 * we assert that we aren't doing a dealloc (deallocs lock 1778 * the hash bucket to prevent themselves from racing with 1779 * one another), and retry the hash chain traversal. 1780 */ 1781 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1782 goto top; 1783 } 1784 1785 if (op != DTRACE_DYNVAR_ALLOC) { 1786 /* 1787 * If we are not to allocate a new variable, we want to 1788 * return NULL now. Before we return, check that the value 1789 * of the lock word hasn't changed. If it has, we may have 1790 * seen an inconsistent snapshot. 1791 */ 1792 if (op == DTRACE_DYNVAR_NOALLOC) { 1793 if (hash[bucket].dtdh_lock != lock) 1794 goto top; 1795 } else { 1796 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1797 ASSERT(hash[bucket].dtdh_lock == lock); 1798 ASSERT(lock & 1); 1799 hash[bucket].dtdh_lock++; 1800 } 1801 1802 return (NULL); 1803 } 1804 1805 /* 1806 * We need to allocate a new dynamic variable. The size we need is the 1807 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1808 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1809 * the size of any referred-to data (dsize). We then round the final 1810 * size up to the chunksize for allocation. 1811 */ 1812 for (ksize = 0, i = 0; i < nkeys; i++) 1813 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1814 1815 /* 1816 * This should be pretty much impossible, but could happen if, say, 1817 * strange DIF specified the tuple. Ideally, this should be an 1818 * assertion and not an error condition -- but that requires that the 1819 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1820 * bullet-proof. (That is, it must not be able to be fooled by 1821 * malicious DIF.) Given the lack of backwards branches in DIF, 1822 * solving this would presumably not amount to solving the Halting 1823 * Problem -- but it still seems awfully hard. 1824 */ 1825 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1826 ksize + dsize > chunksize) { 1827 dcpu->dtdsc_drops++; 1828 return (NULL); 1829 } 1830 1831 nstate = DTRACE_DSTATE_EMPTY; 1832 1833 do { 1834 retry: 1835 free = dcpu->dtdsc_free; 1836 1837 if (free == NULL) { 1838 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1839 void *rval; 1840 1841 if (clean == NULL) { 1842 /* 1843 * We're out of dynamic variable space on 1844 * this CPU. Unless we have tried all CPUs, 1845 * we'll try to allocate from a different 1846 * CPU. 1847 */ 1848 switch (dstate->dtds_state) { 1849 case DTRACE_DSTATE_CLEAN: { 1850 void *sp = &dstate->dtds_state; 1851 1852 if (++cpu >= NCPU) 1853 cpu = 0; 1854 1855 if (dcpu->dtdsc_dirty != NULL && 1856 nstate == DTRACE_DSTATE_EMPTY) 1857 nstate = DTRACE_DSTATE_DIRTY; 1858 1859 if (dcpu->dtdsc_rinsing != NULL) 1860 nstate = DTRACE_DSTATE_RINSING; 1861 1862 dcpu = &dstate->dtds_percpu[cpu]; 1863 1864 if (cpu != me) 1865 goto retry; 1866 1867 (void) dtrace_cas32(sp, 1868 DTRACE_DSTATE_CLEAN, nstate); 1869 1870 /* 1871 * To increment the correct bean 1872 * counter, take another lap. 1873 */ 1874 goto retry; 1875 } 1876 1877 case DTRACE_DSTATE_DIRTY: 1878 dcpu->dtdsc_dirty_drops++; 1879 break; 1880 1881 case DTRACE_DSTATE_RINSING: 1882 dcpu->dtdsc_rinsing_drops++; 1883 break; 1884 1885 case DTRACE_DSTATE_EMPTY: 1886 dcpu->dtdsc_drops++; 1887 break; 1888 } 1889 1890 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1891 return (NULL); 1892 } 1893 1894 /* 1895 * The clean list appears to be non-empty. We want to 1896 * move the clean list to the free list; we start by 1897 * moving the clean pointer aside. 1898 */ 1899 if (dtrace_casptr(&dcpu->dtdsc_clean, 1900 clean, NULL) != clean) { 1901 /* 1902 * We are in one of two situations: 1903 * 1904 * (a) The clean list was switched to the 1905 * free list by another CPU. 1906 * 1907 * (b) The clean list was added to by the 1908 * cleansing cyclic. 1909 * 1910 * In either of these situations, we can 1911 * just reattempt the free list allocation. 1912 */ 1913 goto retry; 1914 } 1915 1916 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1917 1918 /* 1919 * Now we'll move the clean list to our free list. 1920 * It's impossible for this to fail: the only way 1921 * the free list can be updated is through this 1922 * code path, and only one CPU can own the clean list. 1923 * Thus, it would only be possible for this to fail if 1924 * this code were racing with dtrace_dynvar_clean(). 1925 * (That is, if dtrace_dynvar_clean() updated the clean 1926 * list, and we ended up racing to update the free 1927 * list.) This race is prevented by the dtrace_sync() 1928 * in dtrace_dynvar_clean() -- which flushes the 1929 * owners of the clean lists out before resetting 1930 * the clean lists. 1931 */ 1932 dcpu = &dstate->dtds_percpu[me]; 1933 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1934 ASSERT(rval == NULL); 1935 goto retry; 1936 } 1937 1938 dvar = free; 1939 new_free = dvar->dtdv_next; 1940 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1941 1942 /* 1943 * We have now allocated a new chunk. We copy the tuple keys into the 1944 * tuple array and copy any referenced key data into the data space 1945 * following the tuple array. As we do this, we relocate dttk_value 1946 * in the final tuple to point to the key data address in the chunk. 1947 */ 1948 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1949 dvar->dtdv_data = (void *)(kdata + ksize); 1950 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1951 1952 for (i = 0; i < nkeys; i++) { 1953 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1954 size_t kesize = key[i].dttk_size; 1955 1956 if (kesize != 0) { 1957 dtrace_bcopy( 1958 (const void *)(uintptr_t)key[i].dttk_value, 1959 (void *)kdata, kesize); 1960 dkey->dttk_value = kdata; 1961 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1962 } else { 1963 dkey->dttk_value = key[i].dttk_value; 1964 } 1965 1966 dkey->dttk_size = kesize; 1967 } 1968 1969 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1970 dvar->dtdv_hashval = hashval; 1971 dvar->dtdv_next = start; 1972 1973 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1974 return (dvar); 1975 1976 /* 1977 * The cas has failed. Either another CPU is adding an element to 1978 * this hash chain, or another CPU is deleting an element from this 1979 * hash chain. The simplest way to deal with both of these cases 1980 * (though not necessarily the most efficient) is to free our 1981 * allocated block and tail-call ourselves. Note that the free is 1982 * to the dirty list and _not_ to the free list. This is to prevent 1983 * races with allocators, above. 1984 */ 1985 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1986 1987 dtrace_membar_producer(); 1988 1989 do { 1990 free = dcpu->dtdsc_dirty; 1991 dvar->dtdv_next = free; 1992 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1993 1994 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1995 } 1996 1997 /*ARGSUSED*/ 1998 static void 1999 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 2000 { 2001 if ((int64_t)nval < (int64_t)*oval) 2002 *oval = nval; 2003 } 2004 2005 /*ARGSUSED*/ 2006 static void 2007 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 2008 { 2009 if ((int64_t)nval > (int64_t)*oval) 2010 *oval = nval; 2011 } 2012 2013 static void 2014 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 2015 { 2016 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 2017 int64_t val = (int64_t)nval; 2018 2019 if (val < 0) { 2020 for (i = 0; i < zero; i++) { 2021 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 2022 quanta[i] += incr; 2023 return; 2024 } 2025 } 2026 } else { 2027 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 2028 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 2029 quanta[i - 1] += incr; 2030 return; 2031 } 2032 } 2033 2034 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 2035 return; 2036 } 2037 2038 ASSERT(0); 2039 } 2040 2041 static void 2042 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 2043 { 2044 uint64_t arg = *lquanta++; 2045 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 2046 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 2047 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 2048 int32_t val = (int32_t)nval, level; 2049 2050 ASSERT(step != 0); 2051 ASSERT(levels != 0); 2052 2053 if (val < base) { 2054 /* 2055 * This is an underflow. 2056 */ 2057 lquanta[0] += incr; 2058 return; 2059 } 2060 2061 level = (val - base) / step; 2062 2063 if (level < levels) { 2064 lquanta[level + 1] += incr; 2065 return; 2066 } 2067 2068 /* 2069 * This is an overflow. 2070 */ 2071 lquanta[levels + 1] += incr; 2072 } 2073 2074 static int 2075 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 2076 uint16_t high, uint16_t nsteps, int64_t value) 2077 { 2078 int64_t this = 1, last, next; 2079 int base = 1, order; 2080 2081 ASSERT(factor <= nsteps); 2082 ASSERT(nsteps % factor == 0); 2083 2084 for (order = 0; order < low; order++) 2085 this *= factor; 2086 2087 /* 2088 * If our value is less than our factor taken to the power of the 2089 * low order of magnitude, it goes into the zeroth bucket. 2090 */ 2091 if (value < (last = this)) 2092 return (0); 2093 2094 for (this *= factor; order <= high; order++) { 2095 int nbuckets = this > nsteps ? nsteps : this; 2096 2097 if ((next = this * factor) < this) { 2098 /* 2099 * We should not generally get log/linear quantizations 2100 * with a high magnitude that allows 64-bits to 2101 * overflow, but we nonetheless protect against this 2102 * by explicitly checking for overflow, and clamping 2103 * our value accordingly. 2104 */ 2105 value = this - 1; 2106 } 2107 2108 if (value < this) { 2109 /* 2110 * If our value lies within this order of magnitude, 2111 * determine its position by taking the offset within 2112 * the order of magnitude, dividing by the bucket 2113 * width, and adding to our (accumulated) base. 2114 */ 2115 return (base + (value - last) / (this / nbuckets)); 2116 } 2117 2118 base += nbuckets - (nbuckets / factor); 2119 last = this; 2120 this = next; 2121 } 2122 2123 /* 2124 * Our value is greater than or equal to our factor taken to the 2125 * power of one plus the high magnitude -- return the top bucket. 2126 */ 2127 return (base); 2128 } 2129 2130 static void 2131 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 2132 { 2133 uint64_t arg = *llquanta++; 2134 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 2135 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 2136 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 2137 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 2138 2139 llquanta[dtrace_aggregate_llquantize_bucket(factor, 2140 low, high, nsteps, nval)] += incr; 2141 } 2142 2143 /*ARGSUSED*/ 2144 static void 2145 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 2146 { 2147 data[0]++; 2148 data[1] += nval; 2149 } 2150 2151 /*ARGSUSED*/ 2152 static void 2153 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2154 { 2155 int64_t snval = (int64_t)nval; 2156 uint64_t tmp[2]; 2157 2158 data[0]++; 2159 data[1] += nval; 2160 2161 /* 2162 * What we want to say here is: 2163 * 2164 * data[2] += nval * nval; 2165 * 2166 * But given that nval is 64-bit, we could easily overflow, so 2167 * we do this as 128-bit arithmetic. 2168 */ 2169 if (snval < 0) 2170 snval = -snval; 2171 2172 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2173 dtrace_add_128(data + 2, tmp, data + 2); 2174 } 2175 2176 /*ARGSUSED*/ 2177 static void 2178 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2179 { 2180 *oval = *oval + 1; 2181 } 2182 2183 /*ARGSUSED*/ 2184 static void 2185 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2186 { 2187 *oval += nval; 2188 } 2189 2190 /* 2191 * Aggregate given the tuple in the principal data buffer, and the aggregating 2192 * action denoted by the specified dtrace_aggregation_t. The aggregation 2193 * buffer is specified as the buf parameter. This routine does not return 2194 * failure; if there is no space in the aggregation buffer, the data will be 2195 * dropped, and a corresponding counter incremented. 2196 */ 2197 static void 2198 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2199 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2200 { 2201 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2202 uint32_t i, ndx, size, fsize; 2203 uint32_t align = sizeof (uint64_t) - 1; 2204 dtrace_aggbuffer_t *agb; 2205 dtrace_aggkey_t *key; 2206 uint32_t hashval = 0, limit, isstr; 2207 caddr_t tomax, data, kdata; 2208 dtrace_actkind_t action; 2209 dtrace_action_t *act; 2210 uintptr_t offs; 2211 2212 if (buf == NULL) 2213 return; 2214 2215 if (!agg->dtag_hasarg) { 2216 /* 2217 * Currently, only quantize() and lquantize() take additional 2218 * arguments, and they have the same semantics: an increment 2219 * value that defaults to 1 when not present. If additional 2220 * aggregating actions take arguments, the setting of the 2221 * default argument value will presumably have to become more 2222 * sophisticated... 2223 */ 2224 arg = 1; 2225 } 2226 2227 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2228 size = rec->dtrd_offset - agg->dtag_base; 2229 fsize = size + rec->dtrd_size; 2230 2231 ASSERT(dbuf->dtb_tomax != NULL); 2232 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2233 2234 if ((tomax = buf->dtb_tomax) == NULL) { 2235 dtrace_buffer_drop(buf); 2236 return; 2237 } 2238 2239 /* 2240 * The metastructure is always at the bottom of the buffer. 2241 */ 2242 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2243 sizeof (dtrace_aggbuffer_t)); 2244 2245 if (buf->dtb_offset == 0) { 2246 /* 2247 * We just kludge up approximately 1/8th of the size to be 2248 * buckets. If this guess ends up being routinely 2249 * off-the-mark, we may need to dynamically readjust this 2250 * based on past performance. 2251 */ 2252 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2253 2254 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2255 (uintptr_t)tomax || hashsize == 0) { 2256 /* 2257 * We've been given a ludicrously small buffer; 2258 * increment our drop count and leave. 2259 */ 2260 dtrace_buffer_drop(buf); 2261 return; 2262 } 2263 2264 /* 2265 * And now, a pathetic attempt to try to get a an odd (or 2266 * perchance, a prime) hash size for better hash distribution. 2267 */ 2268 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2269 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2270 2271 agb->dtagb_hashsize = hashsize; 2272 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2273 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2274 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2275 2276 for (i = 0; i < agb->dtagb_hashsize; i++) 2277 agb->dtagb_hash[i] = NULL; 2278 } 2279 2280 ASSERT(agg->dtag_first != NULL); 2281 ASSERT(agg->dtag_first->dta_intuple); 2282 2283 /* 2284 * Calculate the hash value based on the key. Note that we _don't_ 2285 * include the aggid in the hashing (but we will store it as part of 2286 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2287 * algorithm: a simple, quick algorithm that has no known funnels, and 2288 * gets good distribution in practice. The efficacy of the hashing 2289 * algorithm (and a comparison with other algorithms) may be found by 2290 * running the ::dtrace_aggstat MDB dcmd. 2291 */ 2292 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2293 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2294 limit = i + act->dta_rec.dtrd_size; 2295 ASSERT(limit <= size); 2296 isstr = DTRACEACT_ISSTRING(act); 2297 2298 for (; i < limit; i++) { 2299 hashval += data[i]; 2300 hashval += (hashval << 10); 2301 hashval ^= (hashval >> 6); 2302 2303 if (isstr && data[i] == '\0') 2304 break; 2305 } 2306 } 2307 2308 hashval += (hashval << 3); 2309 hashval ^= (hashval >> 11); 2310 hashval += (hashval << 15); 2311 2312 /* 2313 * Yes, the divide here is expensive -- but it's generally the least 2314 * of the performance issues given the amount of data that we iterate 2315 * over to compute hash values, compare data, etc. 2316 */ 2317 ndx = hashval % agb->dtagb_hashsize; 2318 2319 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2320 ASSERT((caddr_t)key >= tomax); 2321 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2322 2323 if (hashval != key->dtak_hashval || key->dtak_size != size) 2324 continue; 2325 2326 kdata = key->dtak_data; 2327 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2328 2329 for (act = agg->dtag_first; act->dta_intuple; 2330 act = act->dta_next) { 2331 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2332 limit = i + act->dta_rec.dtrd_size; 2333 ASSERT(limit <= size); 2334 isstr = DTRACEACT_ISSTRING(act); 2335 2336 for (; i < limit; i++) { 2337 if (kdata[i] != data[i]) 2338 goto next; 2339 2340 if (isstr && data[i] == '\0') 2341 break; 2342 } 2343 } 2344 2345 if (action != key->dtak_action) { 2346 /* 2347 * We are aggregating on the same value in the same 2348 * aggregation with two different aggregating actions. 2349 * (This should have been picked up in the compiler, 2350 * so we may be dealing with errant or devious DIF.) 2351 * This is an error condition; we indicate as much, 2352 * and return. 2353 */ 2354 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2355 return; 2356 } 2357 2358 /* 2359 * This is a hit: we need to apply the aggregator to 2360 * the value at this key. 2361 */ 2362 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2363 return; 2364 next: 2365 continue; 2366 } 2367 2368 /* 2369 * We didn't find it. We need to allocate some zero-filled space, 2370 * link it into the hash table appropriately, and apply the aggregator 2371 * to the (zero-filled) value. 2372 */ 2373 offs = buf->dtb_offset; 2374 while (offs & (align - 1)) 2375 offs += sizeof (uint32_t); 2376 2377 /* 2378 * If we don't have enough room to both allocate a new key _and_ 2379 * its associated data, increment the drop count and return. 2380 */ 2381 if ((uintptr_t)tomax + offs + fsize > 2382 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2383 dtrace_buffer_drop(buf); 2384 return; 2385 } 2386 2387 /*CONSTCOND*/ 2388 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2389 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2390 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2391 2392 key->dtak_data = kdata = tomax + offs; 2393 buf->dtb_offset = offs + fsize; 2394 2395 /* 2396 * Now copy the data across. 2397 */ 2398 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2399 2400 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2401 kdata[i] = data[i]; 2402 2403 /* 2404 * Because strings are not zeroed out by default, we need to iterate 2405 * looking for actions that store strings, and we need to explicitly 2406 * pad these strings out with zeroes. 2407 */ 2408 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2409 int nul; 2410 2411 if (!DTRACEACT_ISSTRING(act)) 2412 continue; 2413 2414 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2415 limit = i + act->dta_rec.dtrd_size; 2416 ASSERT(limit <= size); 2417 2418 for (nul = 0; i < limit; i++) { 2419 if (nul) { 2420 kdata[i] = '\0'; 2421 continue; 2422 } 2423 2424 if (data[i] != '\0') 2425 continue; 2426 2427 nul = 1; 2428 } 2429 } 2430 2431 for (i = size; i < fsize; i++) 2432 kdata[i] = 0; 2433 2434 key->dtak_hashval = hashval; 2435 key->dtak_size = size; 2436 key->dtak_action = action; 2437 key->dtak_next = agb->dtagb_hash[ndx]; 2438 agb->dtagb_hash[ndx] = key; 2439 2440 /* 2441 * Finally, apply the aggregator. 2442 */ 2443 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2444 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2445 } 2446 2447 /* 2448 * Given consumer state, this routine finds a speculation in the INACTIVE 2449 * state and transitions it into the ACTIVE state. If there is no speculation 2450 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2451 * incremented -- it is up to the caller to take appropriate action. 2452 */ 2453 static int 2454 dtrace_speculation(dtrace_state_t *state) 2455 { 2456 int i = 0; 2457 dtrace_speculation_state_t current; 2458 uint32_t *stat = &state->dts_speculations_unavail, count; 2459 2460 while (i < state->dts_nspeculations) { 2461 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2462 2463 current = spec->dtsp_state; 2464 2465 if (current != DTRACESPEC_INACTIVE) { 2466 if (current == DTRACESPEC_COMMITTINGMANY || 2467 current == DTRACESPEC_COMMITTING || 2468 current == DTRACESPEC_DISCARDING) 2469 stat = &state->dts_speculations_busy; 2470 i++; 2471 continue; 2472 } 2473 2474 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2475 current, DTRACESPEC_ACTIVE) == current) 2476 return (i + 1); 2477 } 2478 2479 /* 2480 * We couldn't find a speculation. If we found as much as a single 2481 * busy speculation buffer, we'll attribute this failure as "busy" 2482 * instead of "unavail". 2483 */ 2484 do { 2485 count = *stat; 2486 } while (dtrace_cas32(stat, count, count + 1) != count); 2487 2488 return (0); 2489 } 2490 2491 /* 2492 * This routine commits an active speculation. If the specified speculation 2493 * is not in a valid state to perform a commit(), this routine will silently do 2494 * nothing. The state of the specified speculation is transitioned according 2495 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2496 */ 2497 static void 2498 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2499 dtrace_specid_t which) 2500 { 2501 dtrace_speculation_t *spec; 2502 dtrace_buffer_t *src, *dest; 2503 uintptr_t daddr, saddr, dlimit; 2504 dtrace_speculation_state_t current, new; 2505 intptr_t offs; 2506 2507 if (which == 0) 2508 return; 2509 2510 if (which > state->dts_nspeculations) { 2511 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2512 return; 2513 } 2514 2515 spec = &state->dts_speculations[which - 1]; 2516 src = &spec->dtsp_buffer[cpu]; 2517 dest = &state->dts_buffer[cpu]; 2518 2519 do { 2520 current = spec->dtsp_state; 2521 2522 if (current == DTRACESPEC_COMMITTINGMANY) 2523 break; 2524 2525 switch (current) { 2526 case DTRACESPEC_INACTIVE: 2527 case DTRACESPEC_DISCARDING: 2528 return; 2529 2530 case DTRACESPEC_COMMITTING: 2531 /* 2532 * This is only possible if we are (a) commit()'ing 2533 * without having done a prior speculate() on this CPU 2534 * and (b) racing with another commit() on a different 2535 * CPU. There's nothing to do -- we just assert that 2536 * our offset is 0. 2537 */ 2538 ASSERT(src->dtb_offset == 0); 2539 return; 2540 2541 case DTRACESPEC_ACTIVE: 2542 new = DTRACESPEC_COMMITTING; 2543 break; 2544 2545 case DTRACESPEC_ACTIVEONE: 2546 /* 2547 * This speculation is active on one CPU. If our 2548 * buffer offset is non-zero, we know that the one CPU 2549 * must be us. Otherwise, we are committing on a 2550 * different CPU from the speculate(), and we must 2551 * rely on being asynchronously cleaned. 2552 */ 2553 if (src->dtb_offset != 0) { 2554 new = DTRACESPEC_COMMITTING; 2555 break; 2556 } 2557 /*FALLTHROUGH*/ 2558 2559 case DTRACESPEC_ACTIVEMANY: 2560 new = DTRACESPEC_COMMITTINGMANY; 2561 break; 2562 2563 default: 2564 ASSERT(0); 2565 } 2566 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2567 current, new) != current); 2568 2569 /* 2570 * We have set the state to indicate that we are committing this 2571 * speculation. Now reserve the necessary space in the destination 2572 * buffer. 2573 */ 2574 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2575 sizeof (uint64_t), state, NULL)) < 0) { 2576 dtrace_buffer_drop(dest); 2577 goto out; 2578 } 2579 2580 /* 2581 * We have the space; copy the buffer across. (Note that this is a 2582 * highly subobtimal bcopy(); in the unlikely event that this becomes 2583 * a serious performance issue, a high-performance DTrace-specific 2584 * bcopy() should obviously be invented.) 2585 */ 2586 daddr = (uintptr_t)dest->dtb_tomax + offs; 2587 dlimit = daddr + src->dtb_offset; 2588 saddr = (uintptr_t)src->dtb_tomax; 2589 2590 /* 2591 * First, the aligned portion. 2592 */ 2593 while (dlimit - daddr >= sizeof (uint64_t)) { 2594 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2595 2596 daddr += sizeof (uint64_t); 2597 saddr += sizeof (uint64_t); 2598 } 2599 2600 /* 2601 * Now any left-over bit... 2602 */ 2603 while (dlimit - daddr) 2604 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2605 2606 /* 2607 * Finally, commit the reserved space in the destination buffer. 2608 */ 2609 dest->dtb_offset = offs + src->dtb_offset; 2610 2611 out: 2612 /* 2613 * If we're lucky enough to be the only active CPU on this speculation 2614 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2615 */ 2616 if (current == DTRACESPEC_ACTIVE || 2617 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2618 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2619 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2620 2621 ASSERT(rval == DTRACESPEC_COMMITTING); 2622 } 2623 2624 src->dtb_offset = 0; 2625 src->dtb_xamot_drops += src->dtb_drops; 2626 src->dtb_drops = 0; 2627 } 2628 2629 /* 2630 * This routine discards an active speculation. If the specified speculation 2631 * is not in a valid state to perform a discard(), this routine will silently 2632 * do nothing. The state of the specified speculation is transitioned 2633 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2634 */ 2635 static void 2636 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2637 dtrace_specid_t which) 2638 { 2639 dtrace_speculation_t *spec; 2640 dtrace_speculation_state_t current, new; 2641 dtrace_buffer_t *buf; 2642 2643 if (which == 0) 2644 return; 2645 2646 if (which > state->dts_nspeculations) { 2647 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2648 return; 2649 } 2650 2651 spec = &state->dts_speculations[which - 1]; 2652 buf = &spec->dtsp_buffer[cpu]; 2653 2654 do { 2655 current = spec->dtsp_state; 2656 2657 switch (current) { 2658 case DTRACESPEC_INACTIVE: 2659 case DTRACESPEC_COMMITTINGMANY: 2660 case DTRACESPEC_COMMITTING: 2661 case DTRACESPEC_DISCARDING: 2662 return; 2663 2664 case DTRACESPEC_ACTIVE: 2665 case DTRACESPEC_ACTIVEMANY: 2666 new = DTRACESPEC_DISCARDING; 2667 break; 2668 2669 case DTRACESPEC_ACTIVEONE: 2670 if (buf->dtb_offset != 0) { 2671 new = DTRACESPEC_INACTIVE; 2672 } else { 2673 new = DTRACESPEC_DISCARDING; 2674 } 2675 break; 2676 2677 default: 2678 ASSERT(0); 2679 } 2680 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2681 current, new) != current); 2682 2683 buf->dtb_offset = 0; 2684 buf->dtb_drops = 0; 2685 } 2686 2687 /* 2688 * Note: not called from probe context. This function is called 2689 * asynchronously from cross call context to clean any speculations that are 2690 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2691 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2692 * speculation. 2693 */ 2694 static void 2695 dtrace_speculation_clean_here(dtrace_state_t *state) 2696 { 2697 dtrace_icookie_t cookie; 2698 processorid_t cpu = CPU->cpu_id; 2699 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2700 dtrace_specid_t i; 2701 2702 cookie = dtrace_interrupt_disable(); 2703 2704 if (dest->dtb_tomax == NULL) { 2705 dtrace_interrupt_enable(cookie); 2706 return; 2707 } 2708 2709 for (i = 0; i < state->dts_nspeculations; i++) { 2710 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2711 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2712 2713 if (src->dtb_tomax == NULL) 2714 continue; 2715 2716 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2717 src->dtb_offset = 0; 2718 continue; 2719 } 2720 2721 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2722 continue; 2723 2724 if (src->dtb_offset == 0) 2725 continue; 2726 2727 dtrace_speculation_commit(state, cpu, i + 1); 2728 } 2729 2730 dtrace_interrupt_enable(cookie); 2731 } 2732 2733 /* 2734 * Note: not called from probe context. This function is called 2735 * asynchronously (and at a regular interval) to clean any speculations that 2736 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2737 * is work to be done, it cross calls all CPUs to perform that work; 2738 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2739 * INACTIVE state until they have been cleaned by all CPUs. 2740 */ 2741 static void 2742 dtrace_speculation_clean(dtrace_state_t *state) 2743 { 2744 int work = 0, rv; 2745 dtrace_specid_t i; 2746 2747 for (i = 0; i < state->dts_nspeculations; i++) { 2748 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2749 2750 ASSERT(!spec->dtsp_cleaning); 2751 2752 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2753 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2754 continue; 2755 2756 work++; 2757 spec->dtsp_cleaning = 1; 2758 } 2759 2760 if (!work) 2761 return; 2762 2763 dtrace_xcall(DTRACE_CPUALL, 2764 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2765 2766 /* 2767 * We now know that all CPUs have committed or discarded their 2768 * speculation buffers, as appropriate. We can now set the state 2769 * to inactive. 2770 */ 2771 for (i = 0; i < state->dts_nspeculations; i++) { 2772 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2773 dtrace_speculation_state_t current, new; 2774 2775 if (!spec->dtsp_cleaning) 2776 continue; 2777 2778 current = spec->dtsp_state; 2779 ASSERT(current == DTRACESPEC_DISCARDING || 2780 current == DTRACESPEC_COMMITTINGMANY); 2781 2782 new = DTRACESPEC_INACTIVE; 2783 2784 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2785 ASSERT(rv == current); 2786 spec->dtsp_cleaning = 0; 2787 } 2788 } 2789 2790 /* 2791 * Called as part of a speculate() to get the speculative buffer associated 2792 * with a given speculation. Returns NULL if the specified speculation is not 2793 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2794 * the active CPU is not the specified CPU -- the speculation will be 2795 * atomically transitioned into the ACTIVEMANY state. 2796 */ 2797 static dtrace_buffer_t * 2798 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2799 dtrace_specid_t which) 2800 { 2801 dtrace_speculation_t *spec; 2802 dtrace_speculation_state_t current, new; 2803 dtrace_buffer_t *buf; 2804 2805 if (which == 0) 2806 return (NULL); 2807 2808 if (which > state->dts_nspeculations) { 2809 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2810 return (NULL); 2811 } 2812 2813 spec = &state->dts_speculations[which - 1]; 2814 buf = &spec->dtsp_buffer[cpuid]; 2815 2816 do { 2817 current = spec->dtsp_state; 2818 2819 switch (current) { 2820 case DTRACESPEC_INACTIVE: 2821 case DTRACESPEC_COMMITTINGMANY: 2822 case DTRACESPEC_DISCARDING: 2823 return (NULL); 2824 2825 case DTRACESPEC_COMMITTING: 2826 ASSERT(buf->dtb_offset == 0); 2827 return (NULL); 2828 2829 case DTRACESPEC_ACTIVEONE: 2830 /* 2831 * This speculation is currently active on one CPU. 2832 * Check the offset in the buffer; if it's non-zero, 2833 * that CPU must be us (and we leave the state alone). 2834 * If it's zero, assume that we're starting on a new 2835 * CPU -- and change the state to indicate that the 2836 * speculation is active on more than one CPU. 2837 */ 2838 if (buf->dtb_offset != 0) 2839 return (buf); 2840 2841 new = DTRACESPEC_ACTIVEMANY; 2842 break; 2843 2844 case DTRACESPEC_ACTIVEMANY: 2845 return (buf); 2846 2847 case DTRACESPEC_ACTIVE: 2848 new = DTRACESPEC_ACTIVEONE; 2849 break; 2850 2851 default: 2852 ASSERT(0); 2853 } 2854 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2855 current, new) != current); 2856 2857 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2858 return (buf); 2859 } 2860 2861 /* 2862 * Return a string. In the event that the user lacks the privilege to access 2863 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2864 * don't fail access checking. 2865 * 2866 * dtrace_dif_variable() uses this routine as a helper for various 2867 * builtin values such as 'execname' and 'probefunc.' 2868 */ 2869 uintptr_t 2870 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2871 dtrace_mstate_t *mstate) 2872 { 2873 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2874 uintptr_t ret; 2875 size_t strsz; 2876 2877 /* 2878 * The easy case: this probe is allowed to read all of memory, so 2879 * we can just return this as a vanilla pointer. 2880 */ 2881 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2882 return (addr); 2883 2884 /* 2885 * This is the tougher case: we copy the string in question from 2886 * kernel memory into scratch memory and return it that way: this 2887 * ensures that we won't trip up when access checking tests the 2888 * BYREF return value. 2889 */ 2890 strsz = dtrace_strlen((char *)addr, size) + 1; 2891 2892 if (mstate->dtms_scratch_ptr + strsz > 2893 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2894 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2895 return (NULL); 2896 } 2897 2898 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2899 strsz); 2900 ret = mstate->dtms_scratch_ptr; 2901 mstate->dtms_scratch_ptr += strsz; 2902 return (ret); 2903 } 2904 2905 /* 2906 * This function implements the DIF emulator's variable lookups. The emulator 2907 * passes a reserved variable identifier and optional built-in array index. 2908 */ 2909 static uint64_t 2910 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2911 uint64_t ndx) 2912 { 2913 /* 2914 * If we're accessing one of the uncached arguments, we'll turn this 2915 * into a reference in the args array. 2916 */ 2917 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2918 ndx = v - DIF_VAR_ARG0; 2919 v = DIF_VAR_ARGS; 2920 } 2921 2922 switch (v) { 2923 case DIF_VAR_ARGS: 2924 if (!(mstate->dtms_access & DTRACE_ACCESS_ARGS)) { 2925 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= 2926 CPU_DTRACE_KPRIV; 2927 return (0); 2928 } 2929 2930 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2931 if (ndx >= sizeof (mstate->dtms_arg) / 2932 sizeof (mstate->dtms_arg[0])) { 2933 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2934 dtrace_provider_t *pv; 2935 uint64_t val; 2936 2937 pv = mstate->dtms_probe->dtpr_provider; 2938 if (pv->dtpv_pops.dtps_getargval != NULL) 2939 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2940 mstate->dtms_probe->dtpr_id, 2941 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2942 else 2943 val = dtrace_getarg(ndx, aframes); 2944 2945 /* 2946 * This is regrettably required to keep the compiler 2947 * from tail-optimizing the call to dtrace_getarg(). 2948 * The condition always evaluates to true, but the 2949 * compiler has no way of figuring that out a priori. 2950 * (None of this would be necessary if the compiler 2951 * could be relied upon to _always_ tail-optimize 2952 * the call to dtrace_getarg() -- but it can't.) 2953 */ 2954 if (mstate->dtms_probe != NULL) 2955 return (val); 2956 2957 ASSERT(0); 2958 } 2959 2960 return (mstate->dtms_arg[ndx]); 2961 2962 case DIF_VAR_UREGS: { 2963 klwp_t *lwp; 2964 2965 if (!dtrace_priv_proc(state, mstate)) 2966 return (0); 2967 2968 if ((lwp = curthread->t_lwp) == NULL) { 2969 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2970 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2971 return (0); 2972 } 2973 2974 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2975 } 2976 2977 case DIF_VAR_VMREGS: { 2978 uint64_t rval; 2979 2980 if (!dtrace_priv_kernel(state)) 2981 return (0); 2982 2983 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2984 2985 rval = dtrace_getvmreg(ndx, 2986 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags); 2987 2988 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2989 2990 return (rval); 2991 } 2992 2993 case DIF_VAR_CURTHREAD: 2994 if (!dtrace_priv_proc(state, mstate)) 2995 return (0); 2996 return ((uint64_t)(uintptr_t)curthread); 2997 2998 case DIF_VAR_TIMESTAMP: 2999 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 3000 mstate->dtms_timestamp = dtrace_gethrtime(); 3001 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 3002 } 3003 return (mstate->dtms_timestamp); 3004 3005 case DIF_VAR_VTIMESTAMP: 3006 ASSERT(dtrace_vtime_references != 0); 3007 return (curthread->t_dtrace_vtime); 3008 3009 case DIF_VAR_WALLTIMESTAMP: 3010 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 3011 mstate->dtms_walltimestamp = dtrace_gethrestime(); 3012 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 3013 } 3014 return (mstate->dtms_walltimestamp); 3015 3016 case DIF_VAR_IPL: 3017 if (!dtrace_priv_kernel(state)) 3018 return (0); 3019 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 3020 mstate->dtms_ipl = dtrace_getipl(); 3021 mstate->dtms_present |= DTRACE_MSTATE_IPL; 3022 } 3023 return (mstate->dtms_ipl); 3024 3025 case DIF_VAR_EPID: 3026 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 3027 return (mstate->dtms_epid); 3028 3029 case DIF_VAR_ID: 3030 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3031 return (mstate->dtms_probe->dtpr_id); 3032 3033 case DIF_VAR_STACKDEPTH: 3034 if (!dtrace_priv_kernel(state)) 3035 return (0); 3036 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 3037 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3038 3039 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 3040 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 3041 } 3042 return (mstate->dtms_stackdepth); 3043 3044 case DIF_VAR_USTACKDEPTH: 3045 if (!dtrace_priv_proc(state, mstate)) 3046 return (0); 3047 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 3048 /* 3049 * See comment in DIF_VAR_PID. 3050 */ 3051 if (DTRACE_ANCHORED(mstate->dtms_probe) && 3052 CPU_ON_INTR(CPU)) { 3053 mstate->dtms_ustackdepth = 0; 3054 } else { 3055 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3056 mstate->dtms_ustackdepth = 3057 dtrace_getustackdepth(); 3058 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3059 } 3060 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 3061 } 3062 return (mstate->dtms_ustackdepth); 3063 3064 case DIF_VAR_CALLER: 3065 if (!dtrace_priv_kernel(state)) 3066 return (0); 3067 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 3068 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3069 3070 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 3071 /* 3072 * If this is an unanchored probe, we are 3073 * required to go through the slow path: 3074 * dtrace_caller() only guarantees correct 3075 * results for anchored probes. 3076 */ 3077 pc_t caller[2]; 3078 3079 dtrace_getpcstack(caller, 2, aframes, 3080 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 3081 mstate->dtms_caller = caller[1]; 3082 } else if ((mstate->dtms_caller = 3083 dtrace_caller(aframes)) == -1) { 3084 /* 3085 * We have failed to do this the quick way; 3086 * we must resort to the slower approach of 3087 * calling dtrace_getpcstack(). 3088 */ 3089 pc_t caller; 3090 3091 dtrace_getpcstack(&caller, 1, aframes, NULL); 3092 mstate->dtms_caller = caller; 3093 } 3094 3095 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3096 } 3097 return (mstate->dtms_caller); 3098 3099 case DIF_VAR_UCALLER: 3100 if (!dtrace_priv_proc(state, mstate)) 3101 return (0); 3102 3103 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3104 uint64_t ustack[3]; 3105 3106 /* 3107 * dtrace_getupcstack() fills in the first uint64_t 3108 * with the current PID. The second uint64_t will 3109 * be the program counter at user-level. The third 3110 * uint64_t will contain the caller, which is what 3111 * we're after. 3112 */ 3113 ustack[2] = NULL; 3114 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3115 dtrace_getupcstack(ustack, 3); 3116 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3117 mstate->dtms_ucaller = ustack[2]; 3118 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3119 } 3120 3121 return (mstate->dtms_ucaller); 3122 3123 case DIF_VAR_PROBEPROV: 3124 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3125 return (dtrace_dif_varstr( 3126 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3127 state, mstate)); 3128 3129 case DIF_VAR_PROBEMOD: 3130 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3131 return (dtrace_dif_varstr( 3132 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3133 state, mstate)); 3134 3135 case DIF_VAR_PROBEFUNC: 3136 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3137 return (dtrace_dif_varstr( 3138 (uintptr_t)mstate->dtms_probe->dtpr_func, 3139 state, mstate)); 3140 3141 case DIF_VAR_PROBENAME: 3142 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3143 return (dtrace_dif_varstr( 3144 (uintptr_t)mstate->dtms_probe->dtpr_name, 3145 state, mstate)); 3146 3147 case DIF_VAR_PID: 3148 if (!dtrace_priv_proc(state, mstate)) 3149 return (0); 3150 3151 /* 3152 * Note that we are assuming that an unanchored probe is 3153 * always due to a high-level interrupt. (And we're assuming 3154 * that there is only a single high level interrupt.) 3155 */ 3156 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3157 return (pid0.pid_id); 3158 3159 /* 3160 * It is always safe to dereference one's own t_procp pointer: 3161 * it always points to a valid, allocated proc structure. 3162 * Further, it is always safe to dereference the p_pidp member 3163 * of one's own proc structure. (These are truisms becuase 3164 * threads and processes don't clean up their own state -- 3165 * they leave that task to whomever reaps them.) 3166 */ 3167 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3168 3169 case DIF_VAR_PPID: 3170 if (!dtrace_priv_proc(state, mstate)) 3171 return (0); 3172 3173 /* 3174 * See comment in DIF_VAR_PID. 3175 */ 3176 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3177 return (pid0.pid_id); 3178 3179 /* 3180 * It is always safe to dereference one's own t_procp pointer: 3181 * it always points to a valid, allocated proc structure. 3182 * (This is true because threads don't clean up their own 3183 * state -- they leave that task to whomever reaps them.) 3184 */ 3185 return ((uint64_t)curthread->t_procp->p_ppid); 3186 3187 case DIF_VAR_TID: 3188 /* 3189 * See comment in DIF_VAR_PID. 3190 */ 3191 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3192 return (0); 3193 3194 return ((uint64_t)curthread->t_tid); 3195 3196 case DIF_VAR_EXECNAME: 3197 if (!dtrace_priv_proc(state, mstate)) 3198 return (0); 3199 3200 /* 3201 * See comment in DIF_VAR_PID. 3202 */ 3203 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3204 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3205 3206 /* 3207 * It is always safe to dereference one's own t_procp pointer: 3208 * it always points to a valid, allocated proc structure. 3209 * (This is true because threads don't clean up their own 3210 * state -- they leave that task to whomever reaps them.) 3211 */ 3212 return (dtrace_dif_varstr( 3213 (uintptr_t)curthread->t_procp->p_user.u_comm, 3214 state, mstate)); 3215 3216 case DIF_VAR_ZONENAME: 3217 if (!dtrace_priv_proc(state, mstate)) 3218 return (0); 3219 3220 /* 3221 * See comment in DIF_VAR_PID. 3222 */ 3223 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3224 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3225 3226 /* 3227 * It is always safe to dereference one's own t_procp pointer: 3228 * it always points to a valid, allocated proc structure. 3229 * (This is true because threads don't clean up their own 3230 * state -- they leave that task to whomever reaps them.) 3231 */ 3232 return (dtrace_dif_varstr( 3233 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3234 state, mstate)); 3235 3236 case DIF_VAR_UID: 3237 if (!dtrace_priv_proc(state, mstate)) 3238 return (0); 3239 3240 /* 3241 * See comment in DIF_VAR_PID. 3242 */ 3243 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3244 return ((uint64_t)p0.p_cred->cr_uid); 3245 3246 /* 3247 * It is always safe to dereference one's own t_procp pointer: 3248 * it always points to a valid, allocated proc structure. 3249 * (This is true because threads don't clean up their own 3250 * state -- they leave that task to whomever reaps them.) 3251 * 3252 * Additionally, it is safe to dereference one's own process 3253 * credential, since this is never NULL after process birth. 3254 */ 3255 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3256 3257 case DIF_VAR_GID: 3258 if (!dtrace_priv_proc(state, mstate)) 3259 return (0); 3260 3261 /* 3262 * See comment in DIF_VAR_PID. 3263 */ 3264 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3265 return ((uint64_t)p0.p_cred->cr_gid); 3266 3267 /* 3268 * It is always safe to dereference one's own t_procp pointer: 3269 * it always points to a valid, allocated proc structure. 3270 * (This is true because threads don't clean up their own 3271 * state -- they leave that task to whomever reaps them.) 3272 * 3273 * Additionally, it is safe to dereference one's own process 3274 * credential, since this is never NULL after process birth. 3275 */ 3276 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3277 3278 case DIF_VAR_ERRNO: { 3279 klwp_t *lwp; 3280 if (!dtrace_priv_proc(state, mstate)) 3281 return (0); 3282 3283 /* 3284 * See comment in DIF_VAR_PID. 3285 */ 3286 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3287 return (0); 3288 3289 /* 3290 * It is always safe to dereference one's own t_lwp pointer in 3291 * the event that this pointer is non-NULL. (This is true 3292 * because threads and lwps don't clean up their own state -- 3293 * they leave that task to whomever reaps them.) 3294 */ 3295 if ((lwp = curthread->t_lwp) == NULL) 3296 return (0); 3297 3298 return ((uint64_t)lwp->lwp_errno); 3299 } 3300 default: 3301 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3302 return (0); 3303 } 3304 } 3305 3306 /* 3307 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3308 * Notice that we don't bother validating the proper number of arguments or 3309 * their types in the tuple stack. This isn't needed because all argument 3310 * interpretation is safe because of our load safety -- the worst that can 3311 * happen is that a bogus program can obtain bogus results. 3312 */ 3313 static void 3314 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3315 dtrace_key_t *tupregs, int nargs, 3316 dtrace_mstate_t *mstate, dtrace_state_t *state) 3317 { 3318 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3319 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3320 dtrace_vstate_t *vstate = &state->dts_vstate; 3321 3322 union { 3323 mutex_impl_t mi; 3324 uint64_t mx; 3325 } m; 3326 3327 union { 3328 krwlock_t ri; 3329 uintptr_t rw; 3330 } r; 3331 3332 switch (subr) { 3333 case DIF_SUBR_RAND: 3334 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3335 break; 3336 3337 case DIF_SUBR_MUTEX_OWNED: 3338 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3339 mstate, vstate)) { 3340 regs[rd] = NULL; 3341 break; 3342 } 3343 3344 m.mx = dtrace_load64(tupregs[0].dttk_value); 3345 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3346 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3347 else 3348 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3349 break; 3350 3351 case DIF_SUBR_MUTEX_OWNER: 3352 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3353 mstate, vstate)) { 3354 regs[rd] = NULL; 3355 break; 3356 } 3357 3358 m.mx = dtrace_load64(tupregs[0].dttk_value); 3359 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3360 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3361 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3362 else 3363 regs[rd] = 0; 3364 break; 3365 3366 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3367 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3368 mstate, vstate)) { 3369 regs[rd] = NULL; 3370 break; 3371 } 3372 3373 m.mx = dtrace_load64(tupregs[0].dttk_value); 3374 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3375 break; 3376 3377 case DIF_SUBR_MUTEX_TYPE_SPIN: 3378 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3379 mstate, vstate)) { 3380 regs[rd] = NULL; 3381 break; 3382 } 3383 3384 m.mx = dtrace_load64(tupregs[0].dttk_value); 3385 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3386 break; 3387 3388 case DIF_SUBR_RW_READ_HELD: { 3389 uintptr_t tmp; 3390 3391 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3392 mstate, vstate)) { 3393 regs[rd] = NULL; 3394 break; 3395 } 3396 3397 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3398 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3399 break; 3400 } 3401 3402 case DIF_SUBR_RW_WRITE_HELD: 3403 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3404 mstate, vstate)) { 3405 regs[rd] = NULL; 3406 break; 3407 } 3408 3409 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3410 regs[rd] = _RW_WRITE_HELD(&r.ri); 3411 break; 3412 3413 case DIF_SUBR_RW_ISWRITER: 3414 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3415 mstate, vstate)) { 3416 regs[rd] = NULL; 3417 break; 3418 } 3419 3420 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3421 regs[rd] = _RW_ISWRITER(&r.ri); 3422 break; 3423 3424 case DIF_SUBR_BCOPY: { 3425 /* 3426 * We need to be sure that the destination is in the scratch 3427 * region -- no other region is allowed. 3428 */ 3429 uintptr_t src = tupregs[0].dttk_value; 3430 uintptr_t dest = tupregs[1].dttk_value; 3431 size_t size = tupregs[2].dttk_value; 3432 3433 if (!dtrace_inscratch(dest, size, mstate)) { 3434 *flags |= CPU_DTRACE_BADADDR; 3435 *illval = regs[rd]; 3436 break; 3437 } 3438 3439 if (!dtrace_canload(src, size, mstate, vstate)) { 3440 regs[rd] = NULL; 3441 break; 3442 } 3443 3444 dtrace_bcopy((void *)src, (void *)dest, size); 3445 break; 3446 } 3447 3448 case DIF_SUBR_ALLOCA: 3449 case DIF_SUBR_COPYIN: { 3450 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3451 uint64_t size = 3452 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3453 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3454 3455 /* 3456 * This action doesn't require any credential checks since 3457 * probes will not activate in user contexts to which the 3458 * enabling user does not have permissions. 3459 */ 3460 3461 /* 3462 * Rounding up the user allocation size could have overflowed 3463 * a large, bogus allocation (like -1ULL) to 0. 3464 */ 3465 if (scratch_size < size || 3466 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3467 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3468 regs[rd] = NULL; 3469 break; 3470 } 3471 3472 if (subr == DIF_SUBR_COPYIN) { 3473 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3474 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3475 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3476 } 3477 3478 mstate->dtms_scratch_ptr += scratch_size; 3479 regs[rd] = dest; 3480 break; 3481 } 3482 3483 case DIF_SUBR_COPYINTO: { 3484 uint64_t size = tupregs[1].dttk_value; 3485 uintptr_t dest = tupregs[2].dttk_value; 3486 3487 /* 3488 * This action doesn't require any credential checks since 3489 * probes will not activate in user contexts to which the 3490 * enabling user does not have permissions. 3491 */ 3492 if (!dtrace_inscratch(dest, size, mstate)) { 3493 *flags |= CPU_DTRACE_BADADDR; 3494 *illval = regs[rd]; 3495 break; 3496 } 3497 3498 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3499 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3500 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3501 break; 3502 } 3503 3504 case DIF_SUBR_COPYINSTR: { 3505 uintptr_t dest = mstate->dtms_scratch_ptr; 3506 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3507 3508 if (nargs > 1 && tupregs[1].dttk_value < size) 3509 size = tupregs[1].dttk_value + 1; 3510 3511 /* 3512 * This action doesn't require any credential checks since 3513 * probes will not activate in user contexts to which the 3514 * enabling user does not have permissions. 3515 */ 3516 if (!DTRACE_INSCRATCH(mstate, size)) { 3517 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3518 regs[rd] = NULL; 3519 break; 3520 } 3521 3522 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3523 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3524 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3525 3526 ((char *)dest)[size - 1] = '\0'; 3527 mstate->dtms_scratch_ptr += size; 3528 regs[rd] = dest; 3529 break; 3530 } 3531 3532 case DIF_SUBR_MSGSIZE: 3533 case DIF_SUBR_MSGDSIZE: { 3534 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3535 uintptr_t wptr, rptr; 3536 size_t count = 0; 3537 int cont = 0; 3538 3539 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3540 3541 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3542 vstate)) { 3543 regs[rd] = NULL; 3544 break; 3545 } 3546 3547 wptr = dtrace_loadptr(baddr + 3548 offsetof(mblk_t, b_wptr)); 3549 3550 rptr = dtrace_loadptr(baddr + 3551 offsetof(mblk_t, b_rptr)); 3552 3553 if (wptr < rptr) { 3554 *flags |= CPU_DTRACE_BADADDR; 3555 *illval = tupregs[0].dttk_value; 3556 break; 3557 } 3558 3559 daddr = dtrace_loadptr(baddr + 3560 offsetof(mblk_t, b_datap)); 3561 3562 baddr = dtrace_loadptr(baddr + 3563 offsetof(mblk_t, b_cont)); 3564 3565 /* 3566 * We want to prevent against denial-of-service here, 3567 * so we're only going to search the list for 3568 * dtrace_msgdsize_max mblks. 3569 */ 3570 if (cont++ > dtrace_msgdsize_max) { 3571 *flags |= CPU_DTRACE_ILLOP; 3572 break; 3573 } 3574 3575 if (subr == DIF_SUBR_MSGDSIZE) { 3576 if (dtrace_load8(daddr + 3577 offsetof(dblk_t, db_type)) != M_DATA) 3578 continue; 3579 } 3580 3581 count += wptr - rptr; 3582 } 3583 3584 if (!(*flags & CPU_DTRACE_FAULT)) 3585 regs[rd] = count; 3586 3587 break; 3588 } 3589 3590 case DIF_SUBR_PROGENYOF: { 3591 pid_t pid = tupregs[0].dttk_value; 3592 proc_t *p; 3593 int rval = 0; 3594 3595 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3596 3597 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3598 if (p->p_pidp->pid_id == pid) { 3599 rval = 1; 3600 break; 3601 } 3602 } 3603 3604 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3605 3606 regs[rd] = rval; 3607 break; 3608 } 3609 3610 case DIF_SUBR_SPECULATION: 3611 regs[rd] = dtrace_speculation(state); 3612 break; 3613 3614 case DIF_SUBR_COPYOUT: { 3615 uintptr_t kaddr = tupregs[0].dttk_value; 3616 uintptr_t uaddr = tupregs[1].dttk_value; 3617 uint64_t size = tupregs[2].dttk_value; 3618 3619 if (!dtrace_destructive_disallow && 3620 dtrace_priv_proc_control(state, mstate) && 3621 !dtrace_istoxic(kaddr, size)) { 3622 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3623 dtrace_copyout(kaddr, uaddr, size, flags); 3624 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3625 } 3626 break; 3627 } 3628 3629 case DIF_SUBR_COPYOUTSTR: { 3630 uintptr_t kaddr = tupregs[0].dttk_value; 3631 uintptr_t uaddr = tupregs[1].dttk_value; 3632 uint64_t size = tupregs[2].dttk_value; 3633 3634 if (!dtrace_destructive_disallow && 3635 dtrace_priv_proc_control(state, mstate) && 3636 !dtrace_istoxic(kaddr, size)) { 3637 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3638 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3639 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3640 } 3641 break; 3642 } 3643 3644 case DIF_SUBR_STRLEN: { 3645 size_t sz; 3646 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3647 sz = dtrace_strlen((char *)addr, 3648 state->dts_options[DTRACEOPT_STRSIZE]); 3649 3650 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3651 regs[rd] = NULL; 3652 break; 3653 } 3654 3655 regs[rd] = sz; 3656 3657 break; 3658 } 3659 3660 case DIF_SUBR_STRCHR: 3661 case DIF_SUBR_STRRCHR: { 3662 /* 3663 * We're going to iterate over the string looking for the 3664 * specified character. We will iterate until we have reached 3665 * the string length or we have found the character. If this 3666 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3667 * of the specified character instead of the first. 3668 */ 3669 uintptr_t saddr = tupregs[0].dttk_value; 3670 uintptr_t addr = tupregs[0].dttk_value; 3671 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3672 char c, target = (char)tupregs[1].dttk_value; 3673 3674 for (regs[rd] = NULL; addr < limit; addr++) { 3675 if ((c = dtrace_load8(addr)) == target) { 3676 regs[rd] = addr; 3677 3678 if (subr == DIF_SUBR_STRCHR) 3679 break; 3680 } 3681 3682 if (c == '\0') 3683 break; 3684 } 3685 3686 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3687 regs[rd] = NULL; 3688 break; 3689 } 3690 3691 break; 3692 } 3693 3694 case DIF_SUBR_STRSTR: 3695 case DIF_SUBR_INDEX: 3696 case DIF_SUBR_RINDEX: { 3697 /* 3698 * We're going to iterate over the string looking for the 3699 * specified string. We will iterate until we have reached 3700 * the string length or we have found the string. (Yes, this 3701 * is done in the most naive way possible -- but considering 3702 * that the string we're searching for is likely to be 3703 * relatively short, the complexity of Rabin-Karp or similar 3704 * hardly seems merited.) 3705 */ 3706 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3707 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3708 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3709 size_t len = dtrace_strlen(addr, size); 3710 size_t sublen = dtrace_strlen(substr, size); 3711 char *limit = addr + len, *orig = addr; 3712 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3713 int inc = 1; 3714 3715 regs[rd] = notfound; 3716 3717 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3718 regs[rd] = NULL; 3719 break; 3720 } 3721 3722 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3723 vstate)) { 3724 regs[rd] = NULL; 3725 break; 3726 } 3727 3728 /* 3729 * strstr() and index()/rindex() have similar semantics if 3730 * both strings are the empty string: strstr() returns a 3731 * pointer to the (empty) string, and index() and rindex() 3732 * both return index 0 (regardless of any position argument). 3733 */ 3734 if (sublen == 0 && len == 0) { 3735 if (subr == DIF_SUBR_STRSTR) 3736 regs[rd] = (uintptr_t)addr; 3737 else 3738 regs[rd] = 0; 3739 break; 3740 } 3741 3742 if (subr != DIF_SUBR_STRSTR) { 3743 if (subr == DIF_SUBR_RINDEX) { 3744 limit = orig - 1; 3745 addr += len; 3746 inc = -1; 3747 } 3748 3749 /* 3750 * Both index() and rindex() take an optional position 3751 * argument that denotes the starting position. 3752 */ 3753 if (nargs == 3) { 3754 int64_t pos = (int64_t)tupregs[2].dttk_value; 3755 3756 /* 3757 * If the position argument to index() is 3758 * negative, Perl implicitly clamps it at 3759 * zero. This semantic is a little surprising 3760 * given the special meaning of negative 3761 * positions to similar Perl functions like 3762 * substr(), but it appears to reflect a 3763 * notion that index() can start from a 3764 * negative index and increment its way up to 3765 * the string. Given this notion, Perl's 3766 * rindex() is at least self-consistent in 3767 * that it implicitly clamps positions greater 3768 * than the string length to be the string 3769 * length. Where Perl completely loses 3770 * coherence, however, is when the specified 3771 * substring is the empty string (""). In 3772 * this case, even if the position is 3773 * negative, rindex() returns 0 -- and even if 3774 * the position is greater than the length, 3775 * index() returns the string length. These 3776 * semantics violate the notion that index() 3777 * should never return a value less than the 3778 * specified position and that rindex() should 3779 * never return a value greater than the 3780 * specified position. (One assumes that 3781 * these semantics are artifacts of Perl's 3782 * implementation and not the results of 3783 * deliberate design -- it beggars belief that 3784 * even Larry Wall could desire such oddness.) 3785 * While in the abstract one would wish for 3786 * consistent position semantics across 3787 * substr(), index() and rindex() -- or at the 3788 * very least self-consistent position 3789 * semantics for index() and rindex() -- we 3790 * instead opt to keep with the extant Perl 3791 * semantics, in all their broken glory. (Do 3792 * we have more desire to maintain Perl's 3793 * semantics than Perl does? Probably.) 3794 */ 3795 if (subr == DIF_SUBR_RINDEX) { 3796 if (pos < 0) { 3797 if (sublen == 0) 3798 regs[rd] = 0; 3799 break; 3800 } 3801 3802 if (pos > len) 3803 pos = len; 3804 } else { 3805 if (pos < 0) 3806 pos = 0; 3807 3808 if (pos >= len) { 3809 if (sublen == 0) 3810 regs[rd] = len; 3811 break; 3812 } 3813 } 3814 3815 addr = orig + pos; 3816 } 3817 } 3818 3819 for (regs[rd] = notfound; addr != limit; addr += inc) { 3820 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3821 if (subr != DIF_SUBR_STRSTR) { 3822 /* 3823 * As D index() and rindex() are 3824 * modeled on Perl (and not on awk), 3825 * we return a zero-based (and not a 3826 * one-based) index. (For you Perl 3827 * weenies: no, we're not going to add 3828 * $[ -- and shouldn't you be at a con 3829 * or something?) 3830 */ 3831 regs[rd] = (uintptr_t)(addr - orig); 3832 break; 3833 } 3834 3835 ASSERT(subr == DIF_SUBR_STRSTR); 3836 regs[rd] = (uintptr_t)addr; 3837 break; 3838 } 3839 } 3840 3841 break; 3842 } 3843 3844 case DIF_SUBR_STRTOK: { 3845 uintptr_t addr = tupregs[0].dttk_value; 3846 uintptr_t tokaddr = tupregs[1].dttk_value; 3847 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3848 uintptr_t limit, toklimit = tokaddr + size; 3849 uint8_t c, tokmap[32]; /* 256 / 8 */ 3850 char *dest = (char *)mstate->dtms_scratch_ptr; 3851 int i; 3852 3853 /* 3854 * Check both the token buffer and (later) the input buffer, 3855 * since both could be non-scratch addresses. 3856 */ 3857 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3858 regs[rd] = NULL; 3859 break; 3860 } 3861 3862 if (!DTRACE_INSCRATCH(mstate, size)) { 3863 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3864 regs[rd] = NULL; 3865 break; 3866 } 3867 3868 if (addr == NULL) { 3869 /* 3870 * If the address specified is NULL, we use our saved 3871 * strtok pointer from the mstate. Note that this 3872 * means that the saved strtok pointer is _only_ 3873 * valid within multiple enablings of the same probe -- 3874 * it behaves like an implicit clause-local variable. 3875 */ 3876 addr = mstate->dtms_strtok; 3877 } else { 3878 /* 3879 * If the user-specified address is non-NULL we must 3880 * access check it. This is the only time we have 3881 * a chance to do so, since this address may reside 3882 * in the string table of this clause-- future calls 3883 * (when we fetch addr from mstate->dtms_strtok) 3884 * would fail this access check. 3885 */ 3886 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3887 regs[rd] = NULL; 3888 break; 3889 } 3890 } 3891 3892 /* 3893 * First, zero the token map, and then process the token 3894 * string -- setting a bit in the map for every character 3895 * found in the token string. 3896 */ 3897 for (i = 0; i < sizeof (tokmap); i++) 3898 tokmap[i] = 0; 3899 3900 for (; tokaddr < toklimit; tokaddr++) { 3901 if ((c = dtrace_load8(tokaddr)) == '\0') 3902 break; 3903 3904 ASSERT((c >> 3) < sizeof (tokmap)); 3905 tokmap[c >> 3] |= (1 << (c & 0x7)); 3906 } 3907 3908 for (limit = addr + size; addr < limit; addr++) { 3909 /* 3910 * We're looking for a character that is _not_ contained 3911 * in the token string. 3912 */ 3913 if ((c = dtrace_load8(addr)) == '\0') 3914 break; 3915 3916 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3917 break; 3918 } 3919 3920 if (c == '\0') { 3921 /* 3922 * We reached the end of the string without finding 3923 * any character that was not in the token string. 3924 * We return NULL in this case, and we set the saved 3925 * address to NULL as well. 3926 */ 3927 regs[rd] = NULL; 3928 mstate->dtms_strtok = NULL; 3929 break; 3930 } 3931 3932 /* 3933 * From here on, we're copying into the destination string. 3934 */ 3935 for (i = 0; addr < limit && i < size - 1; addr++) { 3936 if ((c = dtrace_load8(addr)) == '\0') 3937 break; 3938 3939 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3940 break; 3941 3942 ASSERT(i < size); 3943 dest[i++] = c; 3944 } 3945 3946 ASSERT(i < size); 3947 dest[i] = '\0'; 3948 regs[rd] = (uintptr_t)dest; 3949 mstate->dtms_scratch_ptr += size; 3950 mstate->dtms_strtok = addr; 3951 break; 3952 } 3953 3954 case DIF_SUBR_SUBSTR: { 3955 uintptr_t s = tupregs[0].dttk_value; 3956 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3957 char *d = (char *)mstate->dtms_scratch_ptr; 3958 int64_t index = (int64_t)tupregs[1].dttk_value; 3959 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3960 size_t len = dtrace_strlen((char *)s, size); 3961 int64_t i; 3962 3963 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3964 regs[rd] = NULL; 3965 break; 3966 } 3967 3968 if (!DTRACE_INSCRATCH(mstate, size)) { 3969 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3970 regs[rd] = NULL; 3971 break; 3972 } 3973 3974 if (nargs <= 2) 3975 remaining = (int64_t)size; 3976 3977 if (index < 0) { 3978 index += len; 3979 3980 if (index < 0 && index + remaining > 0) { 3981 remaining += index; 3982 index = 0; 3983 } 3984 } 3985 3986 if (index >= len || index < 0) { 3987 remaining = 0; 3988 } else if (remaining < 0) { 3989 remaining += len - index; 3990 } else if (index + remaining > size) { 3991 remaining = size - index; 3992 } 3993 3994 for (i = 0; i < remaining; i++) { 3995 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 3996 break; 3997 } 3998 3999 d[i] = '\0'; 4000 4001 mstate->dtms_scratch_ptr += size; 4002 regs[rd] = (uintptr_t)d; 4003 break; 4004 } 4005 4006 case DIF_SUBR_TOUPPER: 4007 case DIF_SUBR_TOLOWER: { 4008 uintptr_t s = tupregs[0].dttk_value; 4009 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4010 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4011 size_t len = dtrace_strlen((char *)s, size); 4012 char lower, upper, convert; 4013 int64_t i; 4014 4015 if (subr == DIF_SUBR_TOUPPER) { 4016 lower = 'a'; 4017 upper = 'z'; 4018 convert = 'A'; 4019 } else { 4020 lower = 'A'; 4021 upper = 'Z'; 4022 convert = 'a'; 4023 } 4024 4025 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4026 regs[rd] = NULL; 4027 break; 4028 } 4029 4030 if (!DTRACE_INSCRATCH(mstate, size)) { 4031 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4032 regs[rd] = NULL; 4033 break; 4034 } 4035 4036 for (i = 0; i < size - 1; i++) { 4037 if ((c = dtrace_load8(s + i)) == '\0') 4038 break; 4039 4040 if (c >= lower && c <= upper) 4041 c = convert + (c - lower); 4042 4043 dest[i] = c; 4044 } 4045 4046 ASSERT(i < size); 4047 dest[i] = '\0'; 4048 regs[rd] = (uintptr_t)dest; 4049 mstate->dtms_scratch_ptr += size; 4050 break; 4051 } 4052 4053 case DIF_SUBR_GETMAJOR: 4054 #ifdef _LP64 4055 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4056 #else 4057 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4058 #endif 4059 break; 4060 4061 case DIF_SUBR_GETMINOR: 4062 #ifdef _LP64 4063 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4064 #else 4065 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4066 #endif 4067 break; 4068 4069 case DIF_SUBR_DDI_PATHNAME: { 4070 /* 4071 * This one is a galactic mess. We are going to roughly 4072 * emulate ddi_pathname(), but it's made more complicated 4073 * by the fact that we (a) want to include the minor name and 4074 * (b) must proceed iteratively instead of recursively. 4075 */ 4076 uintptr_t dest = mstate->dtms_scratch_ptr; 4077 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4078 char *start = (char *)dest, *end = start + size - 1; 4079 uintptr_t daddr = tupregs[0].dttk_value; 4080 int64_t minor = (int64_t)tupregs[1].dttk_value; 4081 char *s; 4082 int i, len, depth = 0; 4083 4084 /* 4085 * Due to all the pointer jumping we do and context we must 4086 * rely upon, we just mandate that the user must have kernel 4087 * read privileges to use this routine. 4088 */ 4089 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4090 *flags |= CPU_DTRACE_KPRIV; 4091 *illval = daddr; 4092 regs[rd] = NULL; 4093 } 4094 4095 if (!DTRACE_INSCRATCH(mstate, size)) { 4096 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4097 regs[rd] = NULL; 4098 break; 4099 } 4100 4101 *end = '\0'; 4102 4103 /* 4104 * We want to have a name for the minor. In order to do this, 4105 * we need to walk the minor list from the devinfo. We want 4106 * to be sure that we don't infinitely walk a circular list, 4107 * so we check for circularity by sending a scout pointer 4108 * ahead two elements for every element that we iterate over; 4109 * if the list is circular, these will ultimately point to the 4110 * same element. You may recognize this little trick as the 4111 * answer to a stupid interview question -- one that always 4112 * seems to be asked by those who had to have it laboriously 4113 * explained to them, and who can't even concisely describe 4114 * the conditions under which one would be forced to resort to 4115 * this technique. Needless to say, those conditions are 4116 * found here -- and probably only here. Is this the only use 4117 * of this infamous trick in shipping, production code? If it 4118 * isn't, it probably should be... 4119 */ 4120 if (minor != -1) { 4121 uintptr_t maddr = dtrace_loadptr(daddr + 4122 offsetof(struct dev_info, devi_minor)); 4123 4124 uintptr_t next = offsetof(struct ddi_minor_data, next); 4125 uintptr_t name = offsetof(struct ddi_minor_data, 4126 d_minor) + offsetof(struct ddi_minor, name); 4127 uintptr_t dev = offsetof(struct ddi_minor_data, 4128 d_minor) + offsetof(struct ddi_minor, dev); 4129 uintptr_t scout; 4130 4131 if (maddr != NULL) 4132 scout = dtrace_loadptr(maddr + next); 4133 4134 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4135 uint64_t m; 4136 #ifdef _LP64 4137 m = dtrace_load64(maddr + dev) & MAXMIN64; 4138 #else 4139 m = dtrace_load32(maddr + dev) & MAXMIN; 4140 #endif 4141 if (m != minor) { 4142 maddr = dtrace_loadptr(maddr + next); 4143 4144 if (scout == NULL) 4145 continue; 4146 4147 scout = dtrace_loadptr(scout + next); 4148 4149 if (scout == NULL) 4150 continue; 4151 4152 scout = dtrace_loadptr(scout + next); 4153 4154 if (scout == NULL) 4155 continue; 4156 4157 if (scout == maddr) { 4158 *flags |= CPU_DTRACE_ILLOP; 4159 break; 4160 } 4161 4162 continue; 4163 } 4164 4165 /* 4166 * We have the minor data. Now we need to 4167 * copy the minor's name into the end of the 4168 * pathname. 4169 */ 4170 s = (char *)dtrace_loadptr(maddr + name); 4171 len = dtrace_strlen(s, size); 4172 4173 if (*flags & CPU_DTRACE_FAULT) 4174 break; 4175 4176 if (len != 0) { 4177 if ((end -= (len + 1)) < start) 4178 break; 4179 4180 *end = ':'; 4181 } 4182 4183 for (i = 1; i <= len; i++) 4184 end[i] = dtrace_load8((uintptr_t)s++); 4185 break; 4186 } 4187 } 4188 4189 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4190 ddi_node_state_t devi_state; 4191 4192 devi_state = dtrace_load32(daddr + 4193 offsetof(struct dev_info, devi_node_state)); 4194 4195 if (*flags & CPU_DTRACE_FAULT) 4196 break; 4197 4198 if (devi_state >= DS_INITIALIZED) { 4199 s = (char *)dtrace_loadptr(daddr + 4200 offsetof(struct dev_info, devi_addr)); 4201 len = dtrace_strlen(s, size); 4202 4203 if (*flags & CPU_DTRACE_FAULT) 4204 break; 4205 4206 if (len != 0) { 4207 if ((end -= (len + 1)) < start) 4208 break; 4209 4210 *end = '@'; 4211 } 4212 4213 for (i = 1; i <= len; i++) 4214 end[i] = dtrace_load8((uintptr_t)s++); 4215 } 4216 4217 /* 4218 * Now for the node name... 4219 */ 4220 s = (char *)dtrace_loadptr(daddr + 4221 offsetof(struct dev_info, devi_node_name)); 4222 4223 daddr = dtrace_loadptr(daddr + 4224 offsetof(struct dev_info, devi_parent)); 4225 4226 /* 4227 * If our parent is NULL (that is, if we're the root 4228 * node), we're going to use the special path 4229 * "devices". 4230 */ 4231 if (daddr == NULL) 4232 s = "devices"; 4233 4234 len = dtrace_strlen(s, size); 4235 if (*flags & CPU_DTRACE_FAULT) 4236 break; 4237 4238 if ((end -= (len + 1)) < start) 4239 break; 4240 4241 for (i = 1; i <= len; i++) 4242 end[i] = dtrace_load8((uintptr_t)s++); 4243 *end = '/'; 4244 4245 if (depth++ > dtrace_devdepth_max) { 4246 *flags |= CPU_DTRACE_ILLOP; 4247 break; 4248 } 4249 } 4250 4251 if (end < start) 4252 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4253 4254 if (daddr == NULL) { 4255 regs[rd] = (uintptr_t)end; 4256 mstate->dtms_scratch_ptr += size; 4257 } 4258 4259 break; 4260 } 4261 4262 case DIF_SUBR_STRJOIN: { 4263 char *d = (char *)mstate->dtms_scratch_ptr; 4264 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4265 uintptr_t s1 = tupregs[0].dttk_value; 4266 uintptr_t s2 = tupregs[1].dttk_value; 4267 int i = 0; 4268 4269 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4270 !dtrace_strcanload(s2, size, mstate, vstate)) { 4271 regs[rd] = NULL; 4272 break; 4273 } 4274 4275 if (!DTRACE_INSCRATCH(mstate, size)) { 4276 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4277 regs[rd] = NULL; 4278 break; 4279 } 4280 4281 for (;;) { 4282 if (i >= size) { 4283 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4284 regs[rd] = NULL; 4285 break; 4286 } 4287 4288 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4289 i--; 4290 break; 4291 } 4292 } 4293 4294 for (;;) { 4295 if (i >= size) { 4296 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4297 regs[rd] = NULL; 4298 break; 4299 } 4300 4301 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4302 break; 4303 } 4304 4305 if (i < size) { 4306 mstate->dtms_scratch_ptr += i; 4307 regs[rd] = (uintptr_t)d; 4308 } 4309 4310 break; 4311 } 4312 4313 case DIF_SUBR_LLTOSTR: { 4314 int64_t i = (int64_t)tupregs[0].dttk_value; 4315 uint64_t val, digit; 4316 uint64_t size = 65; /* enough room for 2^64 in binary */ 4317 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4318 int base = 10; 4319 4320 if (nargs > 1) { 4321 if ((base = tupregs[1].dttk_value) <= 1 || 4322 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4323 *flags |= CPU_DTRACE_ILLOP; 4324 break; 4325 } 4326 } 4327 4328 val = (base == 10 && i < 0) ? i * -1 : i; 4329 4330 if (!DTRACE_INSCRATCH(mstate, size)) { 4331 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4332 regs[rd] = NULL; 4333 break; 4334 } 4335 4336 for (*end-- = '\0'; val; val /= base) { 4337 if ((digit = val % base) <= '9' - '0') { 4338 *end-- = '0' + digit; 4339 } else { 4340 *end-- = 'a' + (digit - ('9' - '0') - 1); 4341 } 4342 } 4343 4344 if (i == 0 && base == 16) 4345 *end-- = '0'; 4346 4347 if (base == 16) 4348 *end-- = 'x'; 4349 4350 if (i == 0 || base == 8 || base == 16) 4351 *end-- = '0'; 4352 4353 if (i < 0 && base == 10) 4354 *end-- = '-'; 4355 4356 regs[rd] = (uintptr_t)end + 1; 4357 mstate->dtms_scratch_ptr += size; 4358 break; 4359 } 4360 4361 case DIF_SUBR_HTONS: 4362 case DIF_SUBR_NTOHS: 4363 #ifdef _BIG_ENDIAN 4364 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4365 #else 4366 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4367 #endif 4368 break; 4369 4370 4371 case DIF_SUBR_HTONL: 4372 case DIF_SUBR_NTOHL: 4373 #ifdef _BIG_ENDIAN 4374 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4375 #else 4376 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4377 #endif 4378 break; 4379 4380 4381 case DIF_SUBR_HTONLL: 4382 case DIF_SUBR_NTOHLL: 4383 #ifdef _BIG_ENDIAN 4384 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4385 #else 4386 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4387 #endif 4388 break; 4389 4390 4391 case DIF_SUBR_DIRNAME: 4392 case DIF_SUBR_BASENAME: { 4393 char *dest = (char *)mstate->dtms_scratch_ptr; 4394 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4395 uintptr_t src = tupregs[0].dttk_value; 4396 int i, j, len = dtrace_strlen((char *)src, size); 4397 int lastbase = -1, firstbase = -1, lastdir = -1; 4398 int start, end; 4399 4400 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4401 regs[rd] = NULL; 4402 break; 4403 } 4404 4405 if (!DTRACE_INSCRATCH(mstate, size)) { 4406 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4407 regs[rd] = NULL; 4408 break; 4409 } 4410 4411 /* 4412 * The basename and dirname for a zero-length string is 4413 * defined to be "." 4414 */ 4415 if (len == 0) { 4416 len = 1; 4417 src = (uintptr_t)"."; 4418 } 4419 4420 /* 4421 * Start from the back of the string, moving back toward the 4422 * front until we see a character that isn't a slash. That 4423 * character is the last character in the basename. 4424 */ 4425 for (i = len - 1; i >= 0; i--) { 4426 if (dtrace_load8(src + i) != '/') 4427 break; 4428 } 4429 4430 if (i >= 0) 4431 lastbase = i; 4432 4433 /* 4434 * Starting from the last character in the basename, move 4435 * towards the front until we find a slash. The character 4436 * that we processed immediately before that is the first 4437 * character in the basename. 4438 */ 4439 for (; i >= 0; i--) { 4440 if (dtrace_load8(src + i) == '/') 4441 break; 4442 } 4443 4444 if (i >= 0) 4445 firstbase = i + 1; 4446 4447 /* 4448 * Now keep going until we find a non-slash character. That 4449 * character is the last character in the dirname. 4450 */ 4451 for (; i >= 0; i--) { 4452 if (dtrace_load8(src + i) != '/') 4453 break; 4454 } 4455 4456 if (i >= 0) 4457 lastdir = i; 4458 4459 ASSERT(!(lastbase == -1 && firstbase != -1)); 4460 ASSERT(!(firstbase == -1 && lastdir != -1)); 4461 4462 if (lastbase == -1) { 4463 /* 4464 * We didn't find a non-slash character. We know that 4465 * the length is non-zero, so the whole string must be 4466 * slashes. In either the dirname or the basename 4467 * case, we return '/'. 4468 */ 4469 ASSERT(firstbase == -1); 4470 firstbase = lastbase = lastdir = 0; 4471 } 4472 4473 if (firstbase == -1) { 4474 /* 4475 * The entire string consists only of a basename 4476 * component. If we're looking for dirname, we need 4477 * to change our string to be just "."; if we're 4478 * looking for a basename, we'll just set the first 4479 * character of the basename to be 0. 4480 */ 4481 if (subr == DIF_SUBR_DIRNAME) { 4482 ASSERT(lastdir == -1); 4483 src = (uintptr_t)"."; 4484 lastdir = 0; 4485 } else { 4486 firstbase = 0; 4487 } 4488 } 4489 4490 if (subr == DIF_SUBR_DIRNAME) { 4491 if (lastdir == -1) { 4492 /* 4493 * We know that we have a slash in the name -- 4494 * or lastdir would be set to 0, above. And 4495 * because lastdir is -1, we know that this 4496 * slash must be the first character. (That 4497 * is, the full string must be of the form 4498 * "/basename".) In this case, the last 4499 * character of the directory name is 0. 4500 */ 4501 lastdir = 0; 4502 } 4503 4504 start = 0; 4505 end = lastdir; 4506 } else { 4507 ASSERT(subr == DIF_SUBR_BASENAME); 4508 ASSERT(firstbase != -1 && lastbase != -1); 4509 start = firstbase; 4510 end = lastbase; 4511 } 4512 4513 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4514 dest[j] = dtrace_load8(src + i); 4515 4516 dest[j] = '\0'; 4517 regs[rd] = (uintptr_t)dest; 4518 mstate->dtms_scratch_ptr += size; 4519 break; 4520 } 4521 4522 case DIF_SUBR_GETF: { 4523 uintptr_t fd = tupregs[0].dttk_value; 4524 uf_info_t *finfo = &curthread->t_procp->p_user.u_finfo; 4525 file_t *fp; 4526 4527 if (!dtrace_priv_proc(state, mstate)) { 4528 regs[rd] = NULL; 4529 break; 4530 } 4531 4532 /* 4533 * This is safe because fi_nfiles only increases, and the 4534 * fi_list array is not freed when the array size doubles. 4535 * (See the comment in flist_grow() for details on the 4536 * management of the u_finfo structure.) 4537 */ 4538 fp = fd < finfo->fi_nfiles ? finfo->fi_list[fd].uf_file : NULL; 4539 4540 mstate->dtms_getf = fp; 4541 regs[rd] = (uintptr_t)fp; 4542 break; 4543 } 4544 4545 case DIF_SUBR_CLEANPATH: { 4546 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4547 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4548 uintptr_t src = tupregs[0].dttk_value; 4549 int i = 0, j = 0; 4550 zone_t *z; 4551 4552 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4553 regs[rd] = NULL; 4554 break; 4555 } 4556 4557 if (!DTRACE_INSCRATCH(mstate, size)) { 4558 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4559 regs[rd] = NULL; 4560 break; 4561 } 4562 4563 /* 4564 * Move forward, loading each character. 4565 */ 4566 do { 4567 c = dtrace_load8(src + i++); 4568 next: 4569 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4570 break; 4571 4572 if (c != '/') { 4573 dest[j++] = c; 4574 continue; 4575 } 4576 4577 c = dtrace_load8(src + i++); 4578 4579 if (c == '/') { 4580 /* 4581 * We have two slashes -- we can just advance 4582 * to the next character. 4583 */ 4584 goto next; 4585 } 4586 4587 if (c != '.') { 4588 /* 4589 * This is not "." and it's not ".." -- we can 4590 * just store the "/" and this character and 4591 * drive on. 4592 */ 4593 dest[j++] = '/'; 4594 dest[j++] = c; 4595 continue; 4596 } 4597 4598 c = dtrace_load8(src + i++); 4599 4600 if (c == '/') { 4601 /* 4602 * This is a "/./" component. We're not going 4603 * to store anything in the destination buffer; 4604 * we're just going to go to the next component. 4605 */ 4606 goto next; 4607 } 4608 4609 if (c != '.') { 4610 /* 4611 * This is not ".." -- we can just store the 4612 * "/." and this character and continue 4613 * processing. 4614 */ 4615 dest[j++] = '/'; 4616 dest[j++] = '.'; 4617 dest[j++] = c; 4618 continue; 4619 } 4620 4621 c = dtrace_load8(src + i++); 4622 4623 if (c != '/' && c != '\0') { 4624 /* 4625 * This is not ".." -- it's "..[mumble]". 4626 * We'll store the "/.." and this character 4627 * and continue processing. 4628 */ 4629 dest[j++] = '/'; 4630 dest[j++] = '.'; 4631 dest[j++] = '.'; 4632 dest[j++] = c; 4633 continue; 4634 } 4635 4636 /* 4637 * This is "/../" or "/..\0". We need to back up 4638 * our destination pointer until we find a "/". 4639 */ 4640 i--; 4641 while (j != 0 && dest[--j] != '/') 4642 continue; 4643 4644 if (c == '\0') 4645 dest[++j] = '/'; 4646 } while (c != '\0'); 4647 4648 dest[j] = '\0'; 4649 4650 if (mstate->dtms_getf != NULL && 4651 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && 4652 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { 4653 /* 4654 * If we've done a getf() as a part of this ECB and we 4655 * don't have kernel access (and we're not in the global 4656 * zone), check if the path we cleaned up begins with 4657 * the zone's root path, and trim it off if so. Note 4658 * that this is an output cleanliness issue, not a 4659 * security issue: knowing one's zone root path does 4660 * not enable privilege escalation. 4661 */ 4662 if (strstr(dest, z->zone_rootpath) == dest) 4663 dest += strlen(z->zone_rootpath) - 1; 4664 } 4665 4666 regs[rd] = (uintptr_t)dest; 4667 mstate->dtms_scratch_ptr += size; 4668 break; 4669 } 4670 4671 case DIF_SUBR_INET_NTOA: 4672 case DIF_SUBR_INET_NTOA6: 4673 case DIF_SUBR_INET_NTOP: { 4674 size_t size; 4675 int af, argi, i; 4676 char *base, *end; 4677 4678 if (subr == DIF_SUBR_INET_NTOP) { 4679 af = (int)tupregs[0].dttk_value; 4680 argi = 1; 4681 } else { 4682 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4683 argi = 0; 4684 } 4685 4686 if (af == AF_INET) { 4687 ipaddr_t ip4; 4688 uint8_t *ptr8, val; 4689 4690 /* 4691 * Safely load the IPv4 address. 4692 */ 4693 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4694 4695 /* 4696 * Check an IPv4 string will fit in scratch. 4697 */ 4698 size = INET_ADDRSTRLEN; 4699 if (!DTRACE_INSCRATCH(mstate, size)) { 4700 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4701 regs[rd] = NULL; 4702 break; 4703 } 4704 base = (char *)mstate->dtms_scratch_ptr; 4705 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4706 4707 /* 4708 * Stringify as a dotted decimal quad. 4709 */ 4710 *end-- = '\0'; 4711 ptr8 = (uint8_t *)&ip4; 4712 for (i = 3; i >= 0; i--) { 4713 val = ptr8[i]; 4714 4715 if (val == 0) { 4716 *end-- = '0'; 4717 } else { 4718 for (; val; val /= 10) { 4719 *end-- = '0' + (val % 10); 4720 } 4721 } 4722 4723 if (i > 0) 4724 *end-- = '.'; 4725 } 4726 ASSERT(end + 1 >= base); 4727 4728 } else if (af == AF_INET6) { 4729 struct in6_addr ip6; 4730 int firstzero, tryzero, numzero, v6end; 4731 uint16_t val; 4732 const char digits[] = "0123456789abcdef"; 4733 4734 /* 4735 * Stringify using RFC 1884 convention 2 - 16 bit 4736 * hexadecimal values with a zero-run compression. 4737 * Lower case hexadecimal digits are used. 4738 * eg, fe80::214:4fff:fe0b:76c8. 4739 * The IPv4 embedded form is returned for inet_ntop, 4740 * just the IPv4 string is returned for inet_ntoa6. 4741 */ 4742 4743 /* 4744 * Safely load the IPv6 address. 4745 */ 4746 dtrace_bcopy( 4747 (void *)(uintptr_t)tupregs[argi].dttk_value, 4748 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4749 4750 /* 4751 * Check an IPv6 string will fit in scratch. 4752 */ 4753 size = INET6_ADDRSTRLEN; 4754 if (!DTRACE_INSCRATCH(mstate, size)) { 4755 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4756 regs[rd] = NULL; 4757 break; 4758 } 4759 base = (char *)mstate->dtms_scratch_ptr; 4760 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4761 *end-- = '\0'; 4762 4763 /* 4764 * Find the longest run of 16 bit zero values 4765 * for the single allowed zero compression - "::". 4766 */ 4767 firstzero = -1; 4768 tryzero = -1; 4769 numzero = 1; 4770 for (i = 0; i < sizeof (struct in6_addr); i++) { 4771 if (ip6._S6_un._S6_u8[i] == 0 && 4772 tryzero == -1 && i % 2 == 0) { 4773 tryzero = i; 4774 continue; 4775 } 4776 4777 if (tryzero != -1 && 4778 (ip6._S6_un._S6_u8[i] != 0 || 4779 i == sizeof (struct in6_addr) - 1)) { 4780 4781 if (i - tryzero <= numzero) { 4782 tryzero = -1; 4783 continue; 4784 } 4785 4786 firstzero = tryzero; 4787 numzero = i - i % 2 - tryzero; 4788 tryzero = -1; 4789 4790 if (ip6._S6_un._S6_u8[i] == 0 && 4791 i == sizeof (struct in6_addr) - 1) 4792 numzero += 2; 4793 } 4794 } 4795 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4796 4797 /* 4798 * Check for an IPv4 embedded address. 4799 */ 4800 v6end = sizeof (struct in6_addr) - 2; 4801 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4802 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4803 for (i = sizeof (struct in6_addr) - 1; 4804 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4805 ASSERT(end >= base); 4806 4807 val = ip6._S6_un._S6_u8[i]; 4808 4809 if (val == 0) { 4810 *end-- = '0'; 4811 } else { 4812 for (; val; val /= 10) { 4813 *end-- = '0' + val % 10; 4814 } 4815 } 4816 4817 if (i > DTRACE_V4MAPPED_OFFSET) 4818 *end-- = '.'; 4819 } 4820 4821 if (subr == DIF_SUBR_INET_NTOA6) 4822 goto inetout; 4823 4824 /* 4825 * Set v6end to skip the IPv4 address that 4826 * we have already stringified. 4827 */ 4828 v6end = 10; 4829 } 4830 4831 /* 4832 * Build the IPv6 string by working through the 4833 * address in reverse. 4834 */ 4835 for (i = v6end; i >= 0; i -= 2) { 4836 ASSERT(end >= base); 4837 4838 if (i == firstzero + numzero - 2) { 4839 *end-- = ':'; 4840 *end-- = ':'; 4841 i -= numzero - 2; 4842 continue; 4843 } 4844 4845 if (i < 14 && i != firstzero - 2) 4846 *end-- = ':'; 4847 4848 val = (ip6._S6_un._S6_u8[i] << 8) + 4849 ip6._S6_un._S6_u8[i + 1]; 4850 4851 if (val == 0) { 4852 *end-- = '0'; 4853 } else { 4854 for (; val; val /= 16) { 4855 *end-- = digits[val % 16]; 4856 } 4857 } 4858 } 4859 ASSERT(end + 1 >= base); 4860 4861 } else { 4862 /* 4863 * The user didn't use AH_INET or AH_INET6. 4864 */ 4865 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4866 regs[rd] = NULL; 4867 break; 4868 } 4869 4870 inetout: regs[rd] = (uintptr_t)end + 1; 4871 mstate->dtms_scratch_ptr += size; 4872 break; 4873 } 4874 4875 } 4876 } 4877 4878 /* 4879 * Emulate the execution of DTrace IR instructions specified by the given 4880 * DIF object. This function is deliberately void of assertions as all of 4881 * the necessary checks are handled by a call to dtrace_difo_validate(). 4882 */ 4883 static uint64_t 4884 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4885 dtrace_vstate_t *vstate, dtrace_state_t *state) 4886 { 4887 const dif_instr_t *text = difo->dtdo_buf; 4888 const uint_t textlen = difo->dtdo_len; 4889 const char *strtab = difo->dtdo_strtab; 4890 const uint64_t *inttab = difo->dtdo_inttab; 4891 4892 uint64_t rval = 0; 4893 dtrace_statvar_t *svar; 4894 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4895 dtrace_difv_t *v; 4896 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4897 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 4898 4899 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4900 uint64_t regs[DIF_DIR_NREGS]; 4901 uint64_t *tmp; 4902 4903 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4904 int64_t cc_r; 4905 uint_t pc = 0, id, opc; 4906 uint8_t ttop = 0; 4907 dif_instr_t instr; 4908 uint_t r1, r2, rd; 4909 4910 /* 4911 * We stash the current DIF object into the machine state: we need it 4912 * for subsequent access checking. 4913 */ 4914 mstate->dtms_difo = difo; 4915 4916 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4917 4918 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4919 opc = pc; 4920 4921 instr = text[pc++]; 4922 r1 = DIF_INSTR_R1(instr); 4923 r2 = DIF_INSTR_R2(instr); 4924 rd = DIF_INSTR_RD(instr); 4925 4926 switch (DIF_INSTR_OP(instr)) { 4927 case DIF_OP_OR: 4928 regs[rd] = regs[r1] | regs[r2]; 4929 break; 4930 case DIF_OP_XOR: 4931 regs[rd] = regs[r1] ^ regs[r2]; 4932 break; 4933 case DIF_OP_AND: 4934 regs[rd] = regs[r1] & regs[r2]; 4935 break; 4936 case DIF_OP_SLL: 4937 regs[rd] = regs[r1] << regs[r2]; 4938 break; 4939 case DIF_OP_SRL: 4940 regs[rd] = regs[r1] >> regs[r2]; 4941 break; 4942 case DIF_OP_SUB: 4943 regs[rd] = regs[r1] - regs[r2]; 4944 break; 4945 case DIF_OP_ADD: 4946 regs[rd] = regs[r1] + regs[r2]; 4947 break; 4948 case DIF_OP_MUL: 4949 regs[rd] = regs[r1] * regs[r2]; 4950 break; 4951 case DIF_OP_SDIV: 4952 if (regs[r2] == 0) { 4953 regs[rd] = 0; 4954 *flags |= CPU_DTRACE_DIVZERO; 4955 } else { 4956 regs[rd] = (int64_t)regs[r1] / 4957 (int64_t)regs[r2]; 4958 } 4959 break; 4960 4961 case DIF_OP_UDIV: 4962 if (regs[r2] == 0) { 4963 regs[rd] = 0; 4964 *flags |= CPU_DTRACE_DIVZERO; 4965 } else { 4966 regs[rd] = regs[r1] / regs[r2]; 4967 } 4968 break; 4969 4970 case DIF_OP_SREM: 4971 if (regs[r2] == 0) { 4972 regs[rd] = 0; 4973 *flags |= CPU_DTRACE_DIVZERO; 4974 } else { 4975 regs[rd] = (int64_t)regs[r1] % 4976 (int64_t)regs[r2]; 4977 } 4978 break; 4979 4980 case DIF_OP_UREM: 4981 if (regs[r2] == 0) { 4982 regs[rd] = 0; 4983 *flags |= CPU_DTRACE_DIVZERO; 4984 } else { 4985 regs[rd] = regs[r1] % regs[r2]; 4986 } 4987 break; 4988 4989 case DIF_OP_NOT: 4990 regs[rd] = ~regs[r1]; 4991 break; 4992 case DIF_OP_MOV: 4993 regs[rd] = regs[r1]; 4994 break; 4995 case DIF_OP_CMP: 4996 cc_r = regs[r1] - regs[r2]; 4997 cc_n = cc_r < 0; 4998 cc_z = cc_r == 0; 4999 cc_v = 0; 5000 cc_c = regs[r1] < regs[r2]; 5001 break; 5002 case DIF_OP_TST: 5003 cc_n = cc_v = cc_c = 0; 5004 cc_z = regs[r1] == 0; 5005 break; 5006 case DIF_OP_BA: 5007 pc = DIF_INSTR_LABEL(instr); 5008 break; 5009 case DIF_OP_BE: 5010 if (cc_z) 5011 pc = DIF_INSTR_LABEL(instr); 5012 break; 5013 case DIF_OP_BNE: 5014 if (cc_z == 0) 5015 pc = DIF_INSTR_LABEL(instr); 5016 break; 5017 case DIF_OP_BG: 5018 if ((cc_z | (cc_n ^ cc_v)) == 0) 5019 pc = DIF_INSTR_LABEL(instr); 5020 break; 5021 case DIF_OP_BGU: 5022 if ((cc_c | cc_z) == 0) 5023 pc = DIF_INSTR_LABEL(instr); 5024 break; 5025 case DIF_OP_BGE: 5026 if ((cc_n ^ cc_v) == 0) 5027 pc = DIF_INSTR_LABEL(instr); 5028 break; 5029 case DIF_OP_BGEU: 5030 if (cc_c == 0) 5031 pc = DIF_INSTR_LABEL(instr); 5032 break; 5033 case DIF_OP_BL: 5034 if (cc_n ^ cc_v) 5035 pc = DIF_INSTR_LABEL(instr); 5036 break; 5037 case DIF_OP_BLU: 5038 if (cc_c) 5039 pc = DIF_INSTR_LABEL(instr); 5040 break; 5041 case DIF_OP_BLE: 5042 if (cc_z | (cc_n ^ cc_v)) 5043 pc = DIF_INSTR_LABEL(instr); 5044 break; 5045 case DIF_OP_BLEU: 5046 if (cc_c | cc_z) 5047 pc = DIF_INSTR_LABEL(instr); 5048 break; 5049 case DIF_OP_RLDSB: 5050 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 5051 break; 5052 /*FALLTHROUGH*/ 5053 case DIF_OP_LDSB: 5054 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5055 break; 5056 case DIF_OP_RLDSH: 5057 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 5058 break; 5059 /*FALLTHROUGH*/ 5060 case DIF_OP_LDSH: 5061 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5062 break; 5063 case DIF_OP_RLDSW: 5064 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 5065 break; 5066 /*FALLTHROUGH*/ 5067 case DIF_OP_LDSW: 5068 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5069 break; 5070 case DIF_OP_RLDUB: 5071 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 5072 break; 5073 /*FALLTHROUGH*/ 5074 case DIF_OP_LDUB: 5075 regs[rd] = dtrace_load8(regs[r1]); 5076 break; 5077 case DIF_OP_RLDUH: 5078 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 5079 break; 5080 /*FALLTHROUGH*/ 5081 case DIF_OP_LDUH: 5082 regs[rd] = dtrace_load16(regs[r1]); 5083 break; 5084 case DIF_OP_RLDUW: 5085 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 5086 break; 5087 /*FALLTHROUGH*/ 5088 case DIF_OP_LDUW: 5089 regs[rd] = dtrace_load32(regs[r1]); 5090 break; 5091 case DIF_OP_RLDX: 5092 if (!dtrace_canload(regs[r1], 8, mstate, vstate)) 5093 break; 5094 /*FALLTHROUGH*/ 5095 case DIF_OP_LDX: 5096 regs[rd] = dtrace_load64(regs[r1]); 5097 break; 5098 case DIF_OP_ULDSB: 5099 regs[rd] = (int8_t) 5100 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5101 break; 5102 case DIF_OP_ULDSH: 5103 regs[rd] = (int16_t) 5104 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5105 break; 5106 case DIF_OP_ULDSW: 5107 regs[rd] = (int32_t) 5108 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5109 break; 5110 case DIF_OP_ULDUB: 5111 regs[rd] = 5112 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5113 break; 5114 case DIF_OP_ULDUH: 5115 regs[rd] = 5116 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5117 break; 5118 case DIF_OP_ULDUW: 5119 regs[rd] = 5120 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5121 break; 5122 case DIF_OP_ULDX: 5123 regs[rd] = 5124 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5125 break; 5126 case DIF_OP_RET: 5127 rval = regs[rd]; 5128 pc = textlen; 5129 break; 5130 case DIF_OP_NOP: 5131 break; 5132 case DIF_OP_SETX: 5133 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5134 break; 5135 case DIF_OP_SETS: 5136 regs[rd] = (uint64_t)(uintptr_t) 5137 (strtab + DIF_INSTR_STRING(instr)); 5138 break; 5139 case DIF_OP_SCMP: { 5140 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5141 uintptr_t s1 = regs[r1]; 5142 uintptr_t s2 = regs[r2]; 5143 5144 if (s1 != NULL && 5145 !dtrace_strcanload(s1, sz, mstate, vstate)) 5146 break; 5147 if (s2 != NULL && 5148 !dtrace_strcanload(s2, sz, mstate, vstate)) 5149 break; 5150 5151 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5152 5153 cc_n = cc_r < 0; 5154 cc_z = cc_r == 0; 5155 cc_v = cc_c = 0; 5156 break; 5157 } 5158 case DIF_OP_LDGA: 5159 regs[rd] = dtrace_dif_variable(mstate, state, 5160 r1, regs[r2]); 5161 break; 5162 case DIF_OP_LDGS: 5163 id = DIF_INSTR_VAR(instr); 5164 5165 if (id >= DIF_VAR_OTHER_UBASE) { 5166 uintptr_t a; 5167 5168 id -= DIF_VAR_OTHER_UBASE; 5169 svar = vstate->dtvs_globals[id]; 5170 ASSERT(svar != NULL); 5171 v = &svar->dtsv_var; 5172 5173 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5174 regs[rd] = svar->dtsv_data; 5175 break; 5176 } 5177 5178 a = (uintptr_t)svar->dtsv_data; 5179 5180 if (*(uint8_t *)a == UINT8_MAX) { 5181 /* 5182 * If the 0th byte is set to UINT8_MAX 5183 * then this is to be treated as a 5184 * reference to a NULL variable. 5185 */ 5186 regs[rd] = NULL; 5187 } else { 5188 regs[rd] = a + sizeof (uint64_t); 5189 } 5190 5191 break; 5192 } 5193 5194 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5195 break; 5196 5197 case DIF_OP_STGS: 5198 id = DIF_INSTR_VAR(instr); 5199 5200 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5201 id -= DIF_VAR_OTHER_UBASE; 5202 5203 svar = vstate->dtvs_globals[id]; 5204 ASSERT(svar != NULL); 5205 v = &svar->dtsv_var; 5206 5207 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5208 uintptr_t a = (uintptr_t)svar->dtsv_data; 5209 5210 ASSERT(a != NULL); 5211 ASSERT(svar->dtsv_size != 0); 5212 5213 if (regs[rd] == NULL) { 5214 *(uint8_t *)a = UINT8_MAX; 5215 break; 5216 } else { 5217 *(uint8_t *)a = 0; 5218 a += sizeof (uint64_t); 5219 } 5220 if (!dtrace_vcanload( 5221 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5222 mstate, vstate)) 5223 break; 5224 5225 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5226 (void *)a, &v->dtdv_type); 5227 break; 5228 } 5229 5230 svar->dtsv_data = regs[rd]; 5231 break; 5232 5233 case DIF_OP_LDTA: 5234 /* 5235 * There are no DTrace built-in thread-local arrays at 5236 * present. This opcode is saved for future work. 5237 */ 5238 *flags |= CPU_DTRACE_ILLOP; 5239 regs[rd] = 0; 5240 break; 5241 5242 case DIF_OP_LDLS: 5243 id = DIF_INSTR_VAR(instr); 5244 5245 if (id < DIF_VAR_OTHER_UBASE) { 5246 /* 5247 * For now, this has no meaning. 5248 */ 5249 regs[rd] = 0; 5250 break; 5251 } 5252 5253 id -= DIF_VAR_OTHER_UBASE; 5254 5255 ASSERT(id < vstate->dtvs_nlocals); 5256 ASSERT(vstate->dtvs_locals != NULL); 5257 5258 svar = vstate->dtvs_locals[id]; 5259 ASSERT(svar != NULL); 5260 v = &svar->dtsv_var; 5261 5262 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5263 uintptr_t a = (uintptr_t)svar->dtsv_data; 5264 size_t sz = v->dtdv_type.dtdt_size; 5265 5266 sz += sizeof (uint64_t); 5267 ASSERT(svar->dtsv_size == NCPU * sz); 5268 a += CPU->cpu_id * sz; 5269 5270 if (*(uint8_t *)a == UINT8_MAX) { 5271 /* 5272 * If the 0th byte is set to UINT8_MAX 5273 * then this is to be treated as a 5274 * reference to a NULL variable. 5275 */ 5276 regs[rd] = NULL; 5277 } else { 5278 regs[rd] = a + sizeof (uint64_t); 5279 } 5280 5281 break; 5282 } 5283 5284 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5285 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5286 regs[rd] = tmp[CPU->cpu_id]; 5287 break; 5288 5289 case DIF_OP_STLS: 5290 id = DIF_INSTR_VAR(instr); 5291 5292 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5293 id -= DIF_VAR_OTHER_UBASE; 5294 ASSERT(id < vstate->dtvs_nlocals); 5295 5296 ASSERT(vstate->dtvs_locals != NULL); 5297 svar = vstate->dtvs_locals[id]; 5298 ASSERT(svar != NULL); 5299 v = &svar->dtsv_var; 5300 5301 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5302 uintptr_t a = (uintptr_t)svar->dtsv_data; 5303 size_t sz = v->dtdv_type.dtdt_size; 5304 5305 sz += sizeof (uint64_t); 5306 ASSERT(svar->dtsv_size == NCPU * sz); 5307 a += CPU->cpu_id * sz; 5308 5309 if (regs[rd] == NULL) { 5310 *(uint8_t *)a = UINT8_MAX; 5311 break; 5312 } else { 5313 *(uint8_t *)a = 0; 5314 a += sizeof (uint64_t); 5315 } 5316 5317 if (!dtrace_vcanload( 5318 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5319 mstate, vstate)) 5320 break; 5321 5322 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5323 (void *)a, &v->dtdv_type); 5324 break; 5325 } 5326 5327 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5328 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5329 tmp[CPU->cpu_id] = regs[rd]; 5330 break; 5331 5332 case DIF_OP_LDTS: { 5333 dtrace_dynvar_t *dvar; 5334 dtrace_key_t *key; 5335 5336 id = DIF_INSTR_VAR(instr); 5337 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5338 id -= DIF_VAR_OTHER_UBASE; 5339 v = &vstate->dtvs_tlocals[id]; 5340 5341 key = &tupregs[DIF_DTR_NREGS]; 5342 key[0].dttk_value = (uint64_t)id; 5343 key[0].dttk_size = 0; 5344 DTRACE_TLS_THRKEY(key[1].dttk_value); 5345 key[1].dttk_size = 0; 5346 5347 dvar = dtrace_dynvar(dstate, 2, key, 5348 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5349 mstate, vstate); 5350 5351 if (dvar == NULL) { 5352 regs[rd] = 0; 5353 break; 5354 } 5355 5356 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5357 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5358 } else { 5359 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5360 } 5361 5362 break; 5363 } 5364 5365 case DIF_OP_STTS: { 5366 dtrace_dynvar_t *dvar; 5367 dtrace_key_t *key; 5368 5369 id = DIF_INSTR_VAR(instr); 5370 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5371 id -= DIF_VAR_OTHER_UBASE; 5372 5373 key = &tupregs[DIF_DTR_NREGS]; 5374 key[0].dttk_value = (uint64_t)id; 5375 key[0].dttk_size = 0; 5376 DTRACE_TLS_THRKEY(key[1].dttk_value); 5377 key[1].dttk_size = 0; 5378 v = &vstate->dtvs_tlocals[id]; 5379 5380 dvar = dtrace_dynvar(dstate, 2, key, 5381 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5382 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5383 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5384 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5385 5386 /* 5387 * Given that we're storing to thread-local data, 5388 * we need to flush our predicate cache. 5389 */ 5390 curthread->t_predcache = NULL; 5391 5392 if (dvar == NULL) 5393 break; 5394 5395 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5396 if (!dtrace_vcanload( 5397 (void *)(uintptr_t)regs[rd], 5398 &v->dtdv_type, mstate, vstate)) 5399 break; 5400 5401 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5402 dvar->dtdv_data, &v->dtdv_type); 5403 } else { 5404 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5405 } 5406 5407 break; 5408 } 5409 5410 case DIF_OP_SRA: 5411 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5412 break; 5413 5414 case DIF_OP_CALL: 5415 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5416 regs, tupregs, ttop, mstate, state); 5417 break; 5418 5419 case DIF_OP_PUSHTR: 5420 if (ttop == DIF_DTR_NREGS) { 5421 *flags |= CPU_DTRACE_TUPOFLOW; 5422 break; 5423 } 5424 5425 if (r1 == DIF_TYPE_STRING) { 5426 /* 5427 * If this is a string type and the size is 0, 5428 * we'll use the system-wide default string 5429 * size. Note that we are _not_ looking at 5430 * the value of the DTRACEOPT_STRSIZE option; 5431 * had this been set, we would expect to have 5432 * a non-zero size value in the "pushtr". 5433 */ 5434 tupregs[ttop].dttk_size = 5435 dtrace_strlen((char *)(uintptr_t)regs[rd], 5436 regs[r2] ? regs[r2] : 5437 dtrace_strsize_default) + 1; 5438 } else { 5439 tupregs[ttop].dttk_size = regs[r2]; 5440 } 5441 5442 tupregs[ttop++].dttk_value = regs[rd]; 5443 break; 5444 5445 case DIF_OP_PUSHTV: 5446 if (ttop == DIF_DTR_NREGS) { 5447 *flags |= CPU_DTRACE_TUPOFLOW; 5448 break; 5449 } 5450 5451 tupregs[ttop].dttk_value = regs[rd]; 5452 tupregs[ttop++].dttk_size = 0; 5453 break; 5454 5455 case DIF_OP_POPTS: 5456 if (ttop != 0) 5457 ttop--; 5458 break; 5459 5460 case DIF_OP_FLUSHTS: 5461 ttop = 0; 5462 break; 5463 5464 case DIF_OP_LDGAA: 5465 case DIF_OP_LDTAA: { 5466 dtrace_dynvar_t *dvar; 5467 dtrace_key_t *key = tupregs; 5468 uint_t nkeys = ttop; 5469 5470 id = DIF_INSTR_VAR(instr); 5471 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5472 id -= DIF_VAR_OTHER_UBASE; 5473 5474 key[nkeys].dttk_value = (uint64_t)id; 5475 key[nkeys++].dttk_size = 0; 5476 5477 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5478 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5479 key[nkeys++].dttk_size = 0; 5480 v = &vstate->dtvs_tlocals[id]; 5481 } else { 5482 v = &vstate->dtvs_globals[id]->dtsv_var; 5483 } 5484 5485 dvar = dtrace_dynvar(dstate, nkeys, key, 5486 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5487 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5488 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5489 5490 if (dvar == NULL) { 5491 regs[rd] = 0; 5492 break; 5493 } 5494 5495 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5496 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5497 } else { 5498 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5499 } 5500 5501 break; 5502 } 5503 5504 case DIF_OP_STGAA: 5505 case DIF_OP_STTAA: { 5506 dtrace_dynvar_t *dvar; 5507 dtrace_key_t *key = tupregs; 5508 uint_t nkeys = ttop; 5509 5510 id = DIF_INSTR_VAR(instr); 5511 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5512 id -= DIF_VAR_OTHER_UBASE; 5513 5514 key[nkeys].dttk_value = (uint64_t)id; 5515 key[nkeys++].dttk_size = 0; 5516 5517 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5518 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5519 key[nkeys++].dttk_size = 0; 5520 v = &vstate->dtvs_tlocals[id]; 5521 } else { 5522 v = &vstate->dtvs_globals[id]->dtsv_var; 5523 } 5524 5525 dvar = dtrace_dynvar(dstate, nkeys, key, 5526 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5527 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5528 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5529 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5530 5531 if (dvar == NULL) 5532 break; 5533 5534 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5535 if (!dtrace_vcanload( 5536 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5537 mstate, vstate)) 5538 break; 5539 5540 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5541 dvar->dtdv_data, &v->dtdv_type); 5542 } else { 5543 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5544 } 5545 5546 break; 5547 } 5548 5549 case DIF_OP_ALLOCS: { 5550 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5551 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5552 5553 /* 5554 * Rounding up the user allocation size could have 5555 * overflowed large, bogus allocations (like -1ULL) to 5556 * 0. 5557 */ 5558 if (size < regs[r1] || 5559 !DTRACE_INSCRATCH(mstate, size)) { 5560 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5561 regs[rd] = NULL; 5562 break; 5563 } 5564 5565 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5566 mstate->dtms_scratch_ptr += size; 5567 regs[rd] = ptr; 5568 break; 5569 } 5570 5571 case DIF_OP_COPYS: 5572 if (!dtrace_canstore(regs[rd], regs[r2], 5573 mstate, vstate)) { 5574 *flags |= CPU_DTRACE_BADADDR; 5575 *illval = regs[rd]; 5576 break; 5577 } 5578 5579 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5580 break; 5581 5582 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5583 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5584 break; 5585 5586 case DIF_OP_STB: 5587 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5588 *flags |= CPU_DTRACE_BADADDR; 5589 *illval = regs[rd]; 5590 break; 5591 } 5592 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5593 break; 5594 5595 case DIF_OP_STH: 5596 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5597 *flags |= CPU_DTRACE_BADADDR; 5598 *illval = regs[rd]; 5599 break; 5600 } 5601 if (regs[rd] & 1) { 5602 *flags |= CPU_DTRACE_BADALIGN; 5603 *illval = regs[rd]; 5604 break; 5605 } 5606 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5607 break; 5608 5609 case DIF_OP_STW: 5610 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5611 *flags |= CPU_DTRACE_BADADDR; 5612 *illval = regs[rd]; 5613 break; 5614 } 5615 if (regs[rd] & 3) { 5616 *flags |= CPU_DTRACE_BADALIGN; 5617 *illval = regs[rd]; 5618 break; 5619 } 5620 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5621 break; 5622 5623 case DIF_OP_STX: 5624 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5625 *flags |= CPU_DTRACE_BADADDR; 5626 *illval = regs[rd]; 5627 break; 5628 } 5629 if (regs[rd] & 7) { 5630 *flags |= CPU_DTRACE_BADALIGN; 5631 *illval = regs[rd]; 5632 break; 5633 } 5634 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5635 break; 5636 } 5637 } 5638 5639 if (!(*flags & CPU_DTRACE_FAULT)) 5640 return (rval); 5641 5642 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5643 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5644 5645 return (0); 5646 } 5647 5648 static void 5649 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5650 { 5651 dtrace_probe_t *probe = ecb->dte_probe; 5652 dtrace_provider_t *prov = probe->dtpr_provider; 5653 char c[DTRACE_FULLNAMELEN + 80], *str; 5654 char *msg = "dtrace: breakpoint action at probe "; 5655 char *ecbmsg = " (ecb "; 5656 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5657 uintptr_t val = (uintptr_t)ecb; 5658 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5659 5660 if (dtrace_destructive_disallow) 5661 return; 5662 5663 /* 5664 * It's impossible to be taking action on the NULL probe. 5665 */ 5666 ASSERT(probe != NULL); 5667 5668 /* 5669 * This is a poor man's (destitute man's?) sprintf(): we want to 5670 * print the provider name, module name, function name and name of 5671 * the probe, along with the hex address of the ECB with the breakpoint 5672 * action -- all of which we must place in the character buffer by 5673 * hand. 5674 */ 5675 while (*msg != '\0') 5676 c[i++] = *msg++; 5677 5678 for (str = prov->dtpv_name; *str != '\0'; str++) 5679 c[i++] = *str; 5680 c[i++] = ':'; 5681 5682 for (str = probe->dtpr_mod; *str != '\0'; str++) 5683 c[i++] = *str; 5684 c[i++] = ':'; 5685 5686 for (str = probe->dtpr_func; *str != '\0'; str++) 5687 c[i++] = *str; 5688 c[i++] = ':'; 5689 5690 for (str = probe->dtpr_name; *str != '\0'; str++) 5691 c[i++] = *str; 5692 5693 while (*ecbmsg != '\0') 5694 c[i++] = *ecbmsg++; 5695 5696 while (shift >= 0) { 5697 mask = (uintptr_t)0xf << shift; 5698 5699 if (val >= ((uintptr_t)1 << shift)) 5700 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5701 shift -= 4; 5702 } 5703 5704 c[i++] = ')'; 5705 c[i] = '\0'; 5706 5707 debug_enter(c); 5708 } 5709 5710 static void 5711 dtrace_action_panic(dtrace_ecb_t *ecb) 5712 { 5713 dtrace_probe_t *probe = ecb->dte_probe; 5714 5715 /* 5716 * It's impossible to be taking action on the NULL probe. 5717 */ 5718 ASSERT(probe != NULL); 5719 5720 if (dtrace_destructive_disallow) 5721 return; 5722 5723 if (dtrace_panicked != NULL) 5724 return; 5725 5726 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5727 return; 5728 5729 /* 5730 * We won the right to panic. (We want to be sure that only one 5731 * thread calls panic() from dtrace_probe(), and that panic() is 5732 * called exactly once.) 5733 */ 5734 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5735 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5736 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5737 } 5738 5739 static void 5740 dtrace_action_raise(uint64_t sig) 5741 { 5742 if (dtrace_destructive_disallow) 5743 return; 5744 5745 if (sig >= NSIG) { 5746 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5747 return; 5748 } 5749 5750 /* 5751 * raise() has a queue depth of 1 -- we ignore all subsequent 5752 * invocations of the raise() action. 5753 */ 5754 if (curthread->t_dtrace_sig == 0) 5755 curthread->t_dtrace_sig = (uint8_t)sig; 5756 5757 curthread->t_sig_check = 1; 5758 aston(curthread); 5759 } 5760 5761 static void 5762 dtrace_action_stop(void) 5763 { 5764 if (dtrace_destructive_disallow) 5765 return; 5766 5767 if (!curthread->t_dtrace_stop) { 5768 curthread->t_dtrace_stop = 1; 5769 curthread->t_sig_check = 1; 5770 aston(curthread); 5771 } 5772 } 5773 5774 static void 5775 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5776 { 5777 hrtime_t now; 5778 volatile uint16_t *flags; 5779 cpu_t *cpu = CPU; 5780 5781 if (dtrace_destructive_disallow) 5782 return; 5783 5784 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5785 5786 now = dtrace_gethrtime(); 5787 5788 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5789 /* 5790 * We need to advance the mark to the current time. 5791 */ 5792 cpu->cpu_dtrace_chillmark = now; 5793 cpu->cpu_dtrace_chilled = 0; 5794 } 5795 5796 /* 5797 * Now check to see if the requested chill time would take us over 5798 * the maximum amount of time allowed in the chill interval. (Or 5799 * worse, if the calculation itself induces overflow.) 5800 */ 5801 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5802 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5803 *flags |= CPU_DTRACE_ILLOP; 5804 return; 5805 } 5806 5807 while (dtrace_gethrtime() - now < val) 5808 continue; 5809 5810 /* 5811 * Normally, we assure that the value of the variable "timestamp" does 5812 * not change within an ECB. The presence of chill() represents an 5813 * exception to this rule, however. 5814 */ 5815 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5816 cpu->cpu_dtrace_chilled += val; 5817 } 5818 5819 static void 5820 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5821 uint64_t *buf, uint64_t arg) 5822 { 5823 int nframes = DTRACE_USTACK_NFRAMES(arg); 5824 int strsize = DTRACE_USTACK_STRSIZE(arg); 5825 uint64_t *pcs = &buf[1], *fps; 5826 char *str = (char *)&pcs[nframes]; 5827 int size, offs = 0, i, j; 5828 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5829 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 5830 char *sym; 5831 5832 /* 5833 * Should be taking a faster path if string space has not been 5834 * allocated. 5835 */ 5836 ASSERT(strsize != 0); 5837 5838 /* 5839 * We will first allocate some temporary space for the frame pointers. 5840 */ 5841 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5842 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5843 (nframes * sizeof (uint64_t)); 5844 5845 if (!DTRACE_INSCRATCH(mstate, size)) { 5846 /* 5847 * Not enough room for our frame pointers -- need to indicate 5848 * that we ran out of scratch space. 5849 */ 5850 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5851 return; 5852 } 5853 5854 mstate->dtms_scratch_ptr += size; 5855 saved = mstate->dtms_scratch_ptr; 5856 5857 /* 5858 * Now get a stack with both program counters and frame pointers. 5859 */ 5860 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5861 dtrace_getufpstack(buf, fps, nframes + 1); 5862 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5863 5864 /* 5865 * If that faulted, we're cooked. 5866 */ 5867 if (*flags & CPU_DTRACE_FAULT) 5868 goto out; 5869 5870 /* 5871 * Now we want to walk up the stack, calling the USTACK helper. For 5872 * each iteration, we restore the scratch pointer. 5873 */ 5874 for (i = 0; i < nframes; i++) { 5875 mstate->dtms_scratch_ptr = saved; 5876 5877 if (offs >= strsize) 5878 break; 5879 5880 sym = (char *)(uintptr_t)dtrace_helper( 5881 DTRACE_HELPER_ACTION_USTACK, 5882 mstate, state, pcs[i], fps[i]); 5883 5884 /* 5885 * If we faulted while running the helper, we're going to 5886 * clear the fault and null out the corresponding string. 5887 */ 5888 if (*flags & CPU_DTRACE_FAULT) { 5889 *flags &= ~CPU_DTRACE_FAULT; 5890 str[offs++] = '\0'; 5891 continue; 5892 } 5893 5894 if (sym == NULL) { 5895 str[offs++] = '\0'; 5896 continue; 5897 } 5898 5899 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5900 5901 /* 5902 * Now copy in the string that the helper returned to us. 5903 */ 5904 for (j = 0; offs + j < strsize; j++) { 5905 if ((str[offs + j] = sym[j]) == '\0') 5906 break; 5907 } 5908 5909 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5910 5911 offs += j + 1; 5912 } 5913 5914 if (offs >= strsize) { 5915 /* 5916 * If we didn't have room for all of the strings, we don't 5917 * abort processing -- this needn't be a fatal error -- but we 5918 * still want to increment a counter (dts_stkstroverflows) to 5919 * allow this condition to be warned about. (If this is from 5920 * a jstack() action, it is easily tuned via jstackstrsize.) 5921 */ 5922 dtrace_error(&state->dts_stkstroverflows); 5923 } 5924 5925 while (offs < strsize) 5926 str[offs++] = '\0'; 5927 5928 out: 5929 mstate->dtms_scratch_ptr = old; 5930 } 5931 5932 /* 5933 * If you're looking for the epicenter of DTrace, you just found it. This 5934 * is the function called by the provider to fire a probe -- from which all 5935 * subsequent probe-context DTrace activity emanates. 5936 */ 5937 void 5938 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5939 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5940 { 5941 processorid_t cpuid; 5942 dtrace_icookie_t cookie; 5943 dtrace_probe_t *probe; 5944 dtrace_mstate_t mstate; 5945 dtrace_ecb_t *ecb; 5946 dtrace_action_t *act; 5947 intptr_t offs; 5948 size_t size; 5949 int vtime, onintr; 5950 volatile uint16_t *flags; 5951 hrtime_t now; 5952 5953 /* 5954 * Kick out immediately if this CPU is still being born (in which case 5955 * curthread will be set to -1) or the current thread can't allow 5956 * probes in its current context. 5957 */ 5958 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5959 return; 5960 5961 cookie = dtrace_interrupt_disable(); 5962 probe = dtrace_probes[id - 1]; 5963 cpuid = CPU->cpu_id; 5964 onintr = CPU_ON_INTR(CPU); 5965 5966 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5967 probe->dtpr_predcache == curthread->t_predcache) { 5968 /* 5969 * We have hit in the predicate cache; we know that 5970 * this predicate would evaluate to be false. 5971 */ 5972 dtrace_interrupt_enable(cookie); 5973 return; 5974 } 5975 5976 if (panic_quiesce) { 5977 /* 5978 * We don't trace anything if we're panicking. 5979 */ 5980 dtrace_interrupt_enable(cookie); 5981 return; 5982 } 5983 5984 now = dtrace_gethrtime(); 5985 vtime = dtrace_vtime_references != 0; 5986 5987 if (vtime && curthread->t_dtrace_start) 5988 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5989 5990 mstate.dtms_difo = NULL; 5991 mstate.dtms_probe = probe; 5992 mstate.dtms_strtok = NULL; 5993 mstate.dtms_arg[0] = arg0; 5994 mstate.dtms_arg[1] = arg1; 5995 mstate.dtms_arg[2] = arg2; 5996 mstate.dtms_arg[3] = arg3; 5997 mstate.dtms_arg[4] = arg4; 5998 5999 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6000 6001 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6002 dtrace_predicate_t *pred = ecb->dte_predicate; 6003 dtrace_state_t *state = ecb->dte_state; 6004 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6005 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6006 dtrace_vstate_t *vstate = &state->dts_vstate; 6007 dtrace_provider_t *prov = probe->dtpr_provider; 6008 uint64_t tracememsize = 0; 6009 int committed = 0; 6010 caddr_t tomax; 6011 6012 /* 6013 * A little subtlety with the following (seemingly innocuous) 6014 * declaration of the automatic 'val': by looking at the 6015 * code, you might think that it could be declared in the 6016 * action processing loop, below. (That is, it's only used in 6017 * the action processing loop.) However, it must be declared 6018 * out of that scope because in the case of DIF expression 6019 * arguments to aggregating actions, one iteration of the 6020 * action loop will use the last iteration's value. 6021 */ 6022 #ifdef lint 6023 uint64_t val = 0; 6024 #else 6025 uint64_t val; 6026 #endif 6027 6028 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6029 mstate.dtms_access = DTRACE_ACCESS_ARGS | DTRACE_ACCESS_PROC; 6030 mstate.dtms_getf = NULL; 6031 6032 *flags &= ~CPU_DTRACE_ERROR; 6033 6034 if (prov == dtrace_provider) { 6035 /* 6036 * If dtrace itself is the provider of this probe, 6037 * we're only going to continue processing the ECB if 6038 * arg0 (the dtrace_state_t) is equal to the ECB's 6039 * creating state. (This prevents disjoint consumers 6040 * from seeing one another's metaprobes.) 6041 */ 6042 if (arg0 != (uint64_t)(uintptr_t)state) 6043 continue; 6044 } 6045 6046 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6047 /* 6048 * We're not currently active. If our provider isn't 6049 * the dtrace pseudo provider, we're not interested. 6050 */ 6051 if (prov != dtrace_provider) 6052 continue; 6053 6054 /* 6055 * Now we must further check if we are in the BEGIN 6056 * probe. If we are, we will only continue processing 6057 * if we're still in WARMUP -- if one BEGIN enabling 6058 * has invoked the exit() action, we don't want to 6059 * evaluate subsequent BEGIN enablings. 6060 */ 6061 if (probe->dtpr_id == dtrace_probeid_begin && 6062 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6063 ASSERT(state->dts_activity == 6064 DTRACE_ACTIVITY_DRAINING); 6065 continue; 6066 } 6067 } 6068 6069 if (ecb->dte_cond && !dtrace_priv_probe(state, &mstate, ecb)) 6070 continue; 6071 6072 if (now - state->dts_alive > dtrace_deadman_timeout) { 6073 /* 6074 * We seem to be dead. Unless we (a) have kernel 6075 * destructive permissions (b) have expicitly enabled 6076 * destructive actions and (c) destructive actions have 6077 * not been disabled, we're going to transition into 6078 * the KILLED state, from which no further processing 6079 * on this state will be performed. 6080 */ 6081 if (!dtrace_priv_kernel_destructive(state) || 6082 !state->dts_cred.dcr_destructive || 6083 dtrace_destructive_disallow) { 6084 void *activity = &state->dts_activity; 6085 dtrace_activity_t current; 6086 6087 do { 6088 current = state->dts_activity; 6089 } while (dtrace_cas32(activity, current, 6090 DTRACE_ACTIVITY_KILLED) != current); 6091 6092 continue; 6093 } 6094 } 6095 6096 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6097 ecb->dte_alignment, state, &mstate)) < 0) 6098 continue; 6099 6100 tomax = buf->dtb_tomax; 6101 ASSERT(tomax != NULL); 6102 6103 if (ecb->dte_size != 0) 6104 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6105 6106 mstate.dtms_epid = ecb->dte_epid; 6107 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6108 6109 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6110 mstate.dtms_access |= DTRACE_ACCESS_KERNEL; 6111 6112 if (pred != NULL) { 6113 dtrace_difo_t *dp = pred->dtp_difo; 6114 int rval; 6115 6116 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6117 6118 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6119 dtrace_cacheid_t cid = probe->dtpr_predcache; 6120 6121 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6122 /* 6123 * Update the predicate cache... 6124 */ 6125 ASSERT(cid == pred->dtp_cacheid); 6126 curthread->t_predcache = cid; 6127 } 6128 6129 continue; 6130 } 6131 } 6132 6133 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6134 act != NULL; act = act->dta_next) { 6135 size_t valoffs; 6136 dtrace_difo_t *dp; 6137 dtrace_recdesc_t *rec = &act->dta_rec; 6138 6139 size = rec->dtrd_size; 6140 valoffs = offs + rec->dtrd_offset; 6141 6142 if (DTRACEACT_ISAGG(act->dta_kind)) { 6143 uint64_t v = 0xbad; 6144 dtrace_aggregation_t *agg; 6145 6146 agg = (dtrace_aggregation_t *)act; 6147 6148 if ((dp = act->dta_difo) != NULL) 6149 v = dtrace_dif_emulate(dp, 6150 &mstate, vstate, state); 6151 6152 if (*flags & CPU_DTRACE_ERROR) 6153 continue; 6154 6155 /* 6156 * Note that we always pass the expression 6157 * value from the previous iteration of the 6158 * action loop. This value will only be used 6159 * if there is an expression argument to the 6160 * aggregating action, denoted by the 6161 * dtag_hasarg field. 6162 */ 6163 dtrace_aggregate(agg, buf, 6164 offs, aggbuf, v, val); 6165 continue; 6166 } 6167 6168 switch (act->dta_kind) { 6169 case DTRACEACT_STOP: 6170 if (dtrace_priv_proc_destructive(state, 6171 &mstate)) 6172 dtrace_action_stop(); 6173 continue; 6174 6175 case DTRACEACT_BREAKPOINT: 6176 if (dtrace_priv_kernel_destructive(state)) 6177 dtrace_action_breakpoint(ecb); 6178 continue; 6179 6180 case DTRACEACT_PANIC: 6181 if (dtrace_priv_kernel_destructive(state)) 6182 dtrace_action_panic(ecb); 6183 continue; 6184 6185 case DTRACEACT_STACK: 6186 if (!dtrace_priv_kernel(state)) 6187 continue; 6188 6189 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6190 size / sizeof (pc_t), probe->dtpr_aframes, 6191 DTRACE_ANCHORED(probe) ? NULL : 6192 (uint32_t *)arg0); 6193 6194 continue; 6195 6196 case DTRACEACT_JSTACK: 6197 case DTRACEACT_USTACK: 6198 if (!dtrace_priv_proc(state, &mstate)) 6199 continue; 6200 6201 /* 6202 * See comment in DIF_VAR_PID. 6203 */ 6204 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6205 CPU_ON_INTR(CPU)) { 6206 int depth = DTRACE_USTACK_NFRAMES( 6207 rec->dtrd_arg) + 1; 6208 6209 dtrace_bzero((void *)(tomax + valoffs), 6210 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6211 + depth * sizeof (uint64_t)); 6212 6213 continue; 6214 } 6215 6216 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6217 curproc->p_dtrace_helpers != NULL) { 6218 /* 6219 * This is the slow path -- we have 6220 * allocated string space, and we're 6221 * getting the stack of a process that 6222 * has helpers. Call into a separate 6223 * routine to perform this processing. 6224 */ 6225 dtrace_action_ustack(&mstate, state, 6226 (uint64_t *)(tomax + valoffs), 6227 rec->dtrd_arg); 6228 continue; 6229 } 6230 6231 /* 6232 * Clear the string space, since there's no 6233 * helper to do it for us. 6234 */ 6235 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0) { 6236 int depth = DTRACE_USTACK_NFRAMES( 6237 rec->dtrd_arg); 6238 size_t strsize = DTRACE_USTACK_STRSIZE( 6239 rec->dtrd_arg); 6240 uint64_t *buf = (uint64_t *)(tomax + 6241 valoffs); 6242 void *strspace = &buf[depth + 1]; 6243 6244 dtrace_bzero(strspace, 6245 MIN(depth, strsize)); 6246 } 6247 6248 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6249 dtrace_getupcstack((uint64_t *) 6250 (tomax + valoffs), 6251 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6252 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6253 continue; 6254 6255 default: 6256 break; 6257 } 6258 6259 dp = act->dta_difo; 6260 ASSERT(dp != NULL); 6261 6262 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6263 6264 if (*flags & CPU_DTRACE_ERROR) 6265 continue; 6266 6267 switch (act->dta_kind) { 6268 case DTRACEACT_SPECULATE: 6269 ASSERT(buf == &state->dts_buffer[cpuid]); 6270 buf = dtrace_speculation_buffer(state, 6271 cpuid, val); 6272 6273 if (buf == NULL) { 6274 *flags |= CPU_DTRACE_DROP; 6275 continue; 6276 } 6277 6278 offs = dtrace_buffer_reserve(buf, 6279 ecb->dte_needed, ecb->dte_alignment, 6280 state, NULL); 6281 6282 if (offs < 0) { 6283 *flags |= CPU_DTRACE_DROP; 6284 continue; 6285 } 6286 6287 tomax = buf->dtb_tomax; 6288 ASSERT(tomax != NULL); 6289 6290 if (ecb->dte_size != 0) 6291 DTRACE_STORE(uint32_t, tomax, offs, 6292 ecb->dte_epid); 6293 continue; 6294 6295 case DTRACEACT_CHILL: 6296 if (dtrace_priv_kernel_destructive(state)) 6297 dtrace_action_chill(&mstate, val); 6298 continue; 6299 6300 case DTRACEACT_RAISE: 6301 if (dtrace_priv_proc_destructive(state, 6302 &mstate)) 6303 dtrace_action_raise(val); 6304 continue; 6305 6306 case DTRACEACT_COMMIT: 6307 ASSERT(!committed); 6308 6309 /* 6310 * We need to commit our buffer state. 6311 */ 6312 if (ecb->dte_size) 6313 buf->dtb_offset = offs + ecb->dte_size; 6314 buf = &state->dts_buffer[cpuid]; 6315 dtrace_speculation_commit(state, cpuid, val); 6316 committed = 1; 6317 continue; 6318 6319 case DTRACEACT_DISCARD: 6320 dtrace_speculation_discard(state, cpuid, val); 6321 continue; 6322 6323 case DTRACEACT_DIFEXPR: 6324 case DTRACEACT_LIBACT: 6325 case DTRACEACT_PRINTF: 6326 case DTRACEACT_PRINTA: 6327 case DTRACEACT_SYSTEM: 6328 case DTRACEACT_FREOPEN: 6329 case DTRACEACT_TRACEMEM: 6330 break; 6331 6332 case DTRACEACT_TRACEMEM_DYNSIZE: 6333 tracememsize = val; 6334 break; 6335 6336 case DTRACEACT_SYM: 6337 case DTRACEACT_MOD: 6338 if (!dtrace_priv_kernel(state)) 6339 continue; 6340 break; 6341 6342 case DTRACEACT_USYM: 6343 case DTRACEACT_UMOD: 6344 case DTRACEACT_UADDR: { 6345 struct pid *pid = curthread->t_procp->p_pidp; 6346 6347 if (!dtrace_priv_proc(state, &mstate)) 6348 continue; 6349 6350 DTRACE_STORE(uint64_t, tomax, 6351 valoffs, (uint64_t)pid->pid_id); 6352 DTRACE_STORE(uint64_t, tomax, 6353 valoffs + sizeof (uint64_t), val); 6354 6355 continue; 6356 } 6357 6358 case DTRACEACT_EXIT: { 6359 /* 6360 * For the exit action, we are going to attempt 6361 * to atomically set our activity to be 6362 * draining. If this fails (either because 6363 * another CPU has beat us to the exit action, 6364 * or because our current activity is something 6365 * other than ACTIVE or WARMUP), we will 6366 * continue. This assures that the exit action 6367 * can be successfully recorded at most once 6368 * when we're in the ACTIVE state. If we're 6369 * encountering the exit() action while in 6370 * COOLDOWN, however, we want to honor the new 6371 * status code. (We know that we're the only 6372 * thread in COOLDOWN, so there is no race.) 6373 */ 6374 void *activity = &state->dts_activity; 6375 dtrace_activity_t current = state->dts_activity; 6376 6377 if (current == DTRACE_ACTIVITY_COOLDOWN) 6378 break; 6379 6380 if (current != DTRACE_ACTIVITY_WARMUP) 6381 current = DTRACE_ACTIVITY_ACTIVE; 6382 6383 if (dtrace_cas32(activity, current, 6384 DTRACE_ACTIVITY_DRAINING) != current) { 6385 *flags |= CPU_DTRACE_DROP; 6386 continue; 6387 } 6388 6389 break; 6390 } 6391 6392 default: 6393 ASSERT(0); 6394 } 6395 6396 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6397 uintptr_t end = valoffs + size; 6398 6399 if (tracememsize != 0 && 6400 valoffs + tracememsize < end) { 6401 end = valoffs + tracememsize; 6402 tracememsize = 0; 6403 } 6404 6405 if (!dtrace_vcanload((void *)(uintptr_t)val, 6406 &dp->dtdo_rtype, &mstate, vstate)) 6407 continue; 6408 6409 /* 6410 * If this is a string, we're going to only 6411 * load until we find the zero byte -- after 6412 * which we'll store zero bytes. 6413 */ 6414 if (dp->dtdo_rtype.dtdt_kind == 6415 DIF_TYPE_STRING) { 6416 char c = '\0' + 1; 6417 int intuple = act->dta_intuple; 6418 size_t s; 6419 6420 for (s = 0; s < size; s++) { 6421 if (c != '\0') 6422 c = dtrace_load8(val++); 6423 6424 DTRACE_STORE(uint8_t, tomax, 6425 valoffs++, c); 6426 6427 if (c == '\0' && intuple) 6428 break; 6429 } 6430 6431 continue; 6432 } 6433 6434 while (valoffs < end) { 6435 DTRACE_STORE(uint8_t, tomax, valoffs++, 6436 dtrace_load8(val++)); 6437 } 6438 6439 continue; 6440 } 6441 6442 switch (size) { 6443 case 0: 6444 break; 6445 6446 case sizeof (uint8_t): 6447 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6448 break; 6449 case sizeof (uint16_t): 6450 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6451 break; 6452 case sizeof (uint32_t): 6453 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6454 break; 6455 case sizeof (uint64_t): 6456 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6457 break; 6458 default: 6459 /* 6460 * Any other size should have been returned by 6461 * reference, not by value. 6462 */ 6463 ASSERT(0); 6464 break; 6465 } 6466 } 6467 6468 if (*flags & CPU_DTRACE_DROP) 6469 continue; 6470 6471 if (*flags & CPU_DTRACE_FAULT) { 6472 int ndx; 6473 dtrace_action_t *err; 6474 6475 buf->dtb_errors++; 6476 6477 if (probe->dtpr_id == dtrace_probeid_error) { 6478 /* 6479 * There's nothing we can do -- we had an 6480 * error on the error probe. We bump an 6481 * error counter to at least indicate that 6482 * this condition happened. 6483 */ 6484 dtrace_error(&state->dts_dblerrors); 6485 continue; 6486 } 6487 6488 if (vtime) { 6489 /* 6490 * Before recursing on dtrace_probe(), we 6491 * need to explicitly clear out our start 6492 * time to prevent it from being accumulated 6493 * into t_dtrace_vtime. 6494 */ 6495 curthread->t_dtrace_start = 0; 6496 } 6497 6498 /* 6499 * Iterate over the actions to figure out which action 6500 * we were processing when we experienced the error. 6501 * Note that act points _past_ the faulting action; if 6502 * act is ecb->dte_action, the fault was in the 6503 * predicate, if it's ecb->dte_action->dta_next it's 6504 * in action #1, and so on. 6505 */ 6506 for (err = ecb->dte_action, ndx = 0; 6507 err != act; err = err->dta_next, ndx++) 6508 continue; 6509 6510 dtrace_probe_error(state, ecb->dte_epid, ndx, 6511 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6512 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6513 cpu_core[cpuid].cpuc_dtrace_illval); 6514 6515 continue; 6516 } 6517 6518 if (!committed) 6519 buf->dtb_offset = offs + ecb->dte_size; 6520 } 6521 6522 if (vtime) 6523 curthread->t_dtrace_start = dtrace_gethrtime(); 6524 6525 dtrace_interrupt_enable(cookie); 6526 } 6527 6528 /* 6529 * DTrace Probe Hashing Functions 6530 * 6531 * The functions in this section (and indeed, the functions in remaining 6532 * sections) are not _called_ from probe context. (Any exceptions to this are 6533 * marked with a "Note:".) Rather, they are called from elsewhere in the 6534 * DTrace framework to look-up probes in, add probes to and remove probes from 6535 * the DTrace probe hashes. (Each probe is hashed by each element of the 6536 * probe tuple -- allowing for fast lookups, regardless of what was 6537 * specified.) 6538 */ 6539 static uint_t 6540 dtrace_hash_str(char *p) 6541 { 6542 unsigned int g; 6543 uint_t hval = 0; 6544 6545 while (*p) { 6546 hval = (hval << 4) + *p++; 6547 if ((g = (hval & 0xf0000000)) != 0) 6548 hval ^= g >> 24; 6549 hval &= ~g; 6550 } 6551 return (hval); 6552 } 6553 6554 static dtrace_hash_t * 6555 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6556 { 6557 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6558 6559 hash->dth_stroffs = stroffs; 6560 hash->dth_nextoffs = nextoffs; 6561 hash->dth_prevoffs = prevoffs; 6562 6563 hash->dth_size = 1; 6564 hash->dth_mask = hash->dth_size - 1; 6565 6566 hash->dth_tab = kmem_zalloc(hash->dth_size * 6567 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6568 6569 return (hash); 6570 } 6571 6572 static void 6573 dtrace_hash_destroy(dtrace_hash_t *hash) 6574 { 6575 #ifdef DEBUG 6576 int i; 6577 6578 for (i = 0; i < hash->dth_size; i++) 6579 ASSERT(hash->dth_tab[i] == NULL); 6580 #endif 6581 6582 kmem_free(hash->dth_tab, 6583 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6584 kmem_free(hash, sizeof (dtrace_hash_t)); 6585 } 6586 6587 static void 6588 dtrace_hash_resize(dtrace_hash_t *hash) 6589 { 6590 int size = hash->dth_size, i, ndx; 6591 int new_size = hash->dth_size << 1; 6592 int new_mask = new_size - 1; 6593 dtrace_hashbucket_t **new_tab, *bucket, *next; 6594 6595 ASSERT((new_size & new_mask) == 0); 6596 6597 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6598 6599 for (i = 0; i < size; i++) { 6600 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6601 dtrace_probe_t *probe = bucket->dthb_chain; 6602 6603 ASSERT(probe != NULL); 6604 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6605 6606 next = bucket->dthb_next; 6607 bucket->dthb_next = new_tab[ndx]; 6608 new_tab[ndx] = bucket; 6609 } 6610 } 6611 6612 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6613 hash->dth_tab = new_tab; 6614 hash->dth_size = new_size; 6615 hash->dth_mask = new_mask; 6616 } 6617 6618 static void 6619 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6620 { 6621 int hashval = DTRACE_HASHSTR(hash, new); 6622 int ndx = hashval & hash->dth_mask; 6623 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6624 dtrace_probe_t **nextp, **prevp; 6625 6626 for (; bucket != NULL; bucket = bucket->dthb_next) { 6627 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6628 goto add; 6629 } 6630 6631 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6632 dtrace_hash_resize(hash); 6633 dtrace_hash_add(hash, new); 6634 return; 6635 } 6636 6637 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6638 bucket->dthb_next = hash->dth_tab[ndx]; 6639 hash->dth_tab[ndx] = bucket; 6640 hash->dth_nbuckets++; 6641 6642 add: 6643 nextp = DTRACE_HASHNEXT(hash, new); 6644 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6645 *nextp = bucket->dthb_chain; 6646 6647 if (bucket->dthb_chain != NULL) { 6648 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6649 ASSERT(*prevp == NULL); 6650 *prevp = new; 6651 } 6652 6653 bucket->dthb_chain = new; 6654 bucket->dthb_len++; 6655 } 6656 6657 static dtrace_probe_t * 6658 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6659 { 6660 int hashval = DTRACE_HASHSTR(hash, template); 6661 int ndx = hashval & hash->dth_mask; 6662 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6663 6664 for (; bucket != NULL; bucket = bucket->dthb_next) { 6665 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6666 return (bucket->dthb_chain); 6667 } 6668 6669 return (NULL); 6670 } 6671 6672 static int 6673 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6674 { 6675 int hashval = DTRACE_HASHSTR(hash, template); 6676 int ndx = hashval & hash->dth_mask; 6677 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6678 6679 for (; bucket != NULL; bucket = bucket->dthb_next) { 6680 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6681 return (bucket->dthb_len); 6682 } 6683 6684 return (NULL); 6685 } 6686 6687 static void 6688 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6689 { 6690 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6691 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6692 6693 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6694 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6695 6696 /* 6697 * Find the bucket that we're removing this probe from. 6698 */ 6699 for (; bucket != NULL; bucket = bucket->dthb_next) { 6700 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6701 break; 6702 } 6703 6704 ASSERT(bucket != NULL); 6705 6706 if (*prevp == NULL) { 6707 if (*nextp == NULL) { 6708 /* 6709 * The removed probe was the only probe on this 6710 * bucket; we need to remove the bucket. 6711 */ 6712 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6713 6714 ASSERT(bucket->dthb_chain == probe); 6715 ASSERT(b != NULL); 6716 6717 if (b == bucket) { 6718 hash->dth_tab[ndx] = bucket->dthb_next; 6719 } else { 6720 while (b->dthb_next != bucket) 6721 b = b->dthb_next; 6722 b->dthb_next = bucket->dthb_next; 6723 } 6724 6725 ASSERT(hash->dth_nbuckets > 0); 6726 hash->dth_nbuckets--; 6727 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6728 return; 6729 } 6730 6731 bucket->dthb_chain = *nextp; 6732 } else { 6733 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6734 } 6735 6736 if (*nextp != NULL) 6737 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6738 } 6739 6740 /* 6741 * DTrace Utility Functions 6742 * 6743 * These are random utility functions that are _not_ called from probe context. 6744 */ 6745 static int 6746 dtrace_badattr(const dtrace_attribute_t *a) 6747 { 6748 return (a->dtat_name > DTRACE_STABILITY_MAX || 6749 a->dtat_data > DTRACE_STABILITY_MAX || 6750 a->dtat_class > DTRACE_CLASS_MAX); 6751 } 6752 6753 /* 6754 * Return a duplicate copy of a string. If the specified string is NULL, 6755 * this function returns a zero-length string. 6756 */ 6757 static char * 6758 dtrace_strdup(const char *str) 6759 { 6760 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6761 6762 if (str != NULL) 6763 (void) strcpy(new, str); 6764 6765 return (new); 6766 } 6767 6768 #define DTRACE_ISALPHA(c) \ 6769 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6770 6771 static int 6772 dtrace_badname(const char *s) 6773 { 6774 char c; 6775 6776 if (s == NULL || (c = *s++) == '\0') 6777 return (0); 6778 6779 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6780 return (1); 6781 6782 while ((c = *s++) != '\0') { 6783 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6784 c != '-' && c != '_' && c != '.' && c != '`') 6785 return (1); 6786 } 6787 6788 return (0); 6789 } 6790 6791 static void 6792 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6793 { 6794 uint32_t priv; 6795 6796 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6797 /* 6798 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6799 */ 6800 priv = DTRACE_PRIV_ALL; 6801 } else { 6802 *uidp = crgetuid(cr); 6803 *zoneidp = crgetzoneid(cr); 6804 6805 priv = 0; 6806 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6807 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6808 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6809 priv |= DTRACE_PRIV_USER; 6810 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6811 priv |= DTRACE_PRIV_PROC; 6812 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6813 priv |= DTRACE_PRIV_OWNER; 6814 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6815 priv |= DTRACE_PRIV_ZONEOWNER; 6816 } 6817 6818 *privp = priv; 6819 } 6820 6821 #ifdef DTRACE_ERRDEBUG 6822 static void 6823 dtrace_errdebug(const char *str) 6824 { 6825 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 6826 int occupied = 0; 6827 6828 mutex_enter(&dtrace_errlock); 6829 dtrace_errlast = str; 6830 dtrace_errthread = curthread; 6831 6832 while (occupied++ < DTRACE_ERRHASHSZ) { 6833 if (dtrace_errhash[hval].dter_msg == str) { 6834 dtrace_errhash[hval].dter_count++; 6835 goto out; 6836 } 6837 6838 if (dtrace_errhash[hval].dter_msg != NULL) { 6839 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6840 continue; 6841 } 6842 6843 dtrace_errhash[hval].dter_msg = str; 6844 dtrace_errhash[hval].dter_count = 1; 6845 goto out; 6846 } 6847 6848 panic("dtrace: undersized error hash"); 6849 out: 6850 mutex_exit(&dtrace_errlock); 6851 } 6852 #endif 6853 6854 /* 6855 * DTrace Matching Functions 6856 * 6857 * These functions are used to match groups of probes, given some elements of 6858 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6859 */ 6860 static int 6861 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6862 zoneid_t zoneid) 6863 { 6864 if (priv != DTRACE_PRIV_ALL) { 6865 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6866 uint32_t match = priv & ppriv; 6867 6868 /* 6869 * No PRIV_DTRACE_* privileges... 6870 */ 6871 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6872 DTRACE_PRIV_KERNEL)) == 0) 6873 return (0); 6874 6875 /* 6876 * No matching bits, but there were bits to match... 6877 */ 6878 if (match == 0 && ppriv != 0) 6879 return (0); 6880 6881 /* 6882 * Need to have permissions to the process, but don't... 6883 */ 6884 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6885 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6886 return (0); 6887 } 6888 6889 /* 6890 * Need to be in the same zone unless we possess the 6891 * privilege to examine all zones. 6892 */ 6893 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6894 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6895 return (0); 6896 } 6897 } 6898 6899 return (1); 6900 } 6901 6902 /* 6903 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6904 * consists of input pattern strings and an ops-vector to evaluate them. 6905 * This function returns >0 for match, 0 for no match, and <0 for error. 6906 */ 6907 static int 6908 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6909 uint32_t priv, uid_t uid, zoneid_t zoneid) 6910 { 6911 dtrace_provider_t *pvp = prp->dtpr_provider; 6912 int rv; 6913 6914 if (pvp->dtpv_defunct) 6915 return (0); 6916 6917 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6918 return (rv); 6919 6920 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6921 return (rv); 6922 6923 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6924 return (rv); 6925 6926 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6927 return (rv); 6928 6929 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6930 return (0); 6931 6932 return (rv); 6933 } 6934 6935 /* 6936 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6937 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6938 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6939 * In addition, all of the recursion cases except for '*' matching have been 6940 * unwound. For '*', we still implement recursive evaluation, but a depth 6941 * counter is maintained and matching is aborted if we recurse too deep. 6942 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6943 */ 6944 static int 6945 dtrace_match_glob(const char *s, const char *p, int depth) 6946 { 6947 const char *olds; 6948 char s1, c; 6949 int gs; 6950 6951 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 6952 return (-1); 6953 6954 if (s == NULL) 6955 s = ""; /* treat NULL as empty string */ 6956 6957 top: 6958 olds = s; 6959 s1 = *s++; 6960 6961 if (p == NULL) 6962 return (0); 6963 6964 if ((c = *p++) == '\0') 6965 return (s1 == '\0'); 6966 6967 switch (c) { 6968 case '[': { 6969 int ok = 0, notflag = 0; 6970 char lc = '\0'; 6971 6972 if (s1 == '\0') 6973 return (0); 6974 6975 if (*p == '!') { 6976 notflag = 1; 6977 p++; 6978 } 6979 6980 if ((c = *p++) == '\0') 6981 return (0); 6982 6983 do { 6984 if (c == '-' && lc != '\0' && *p != ']') { 6985 if ((c = *p++) == '\0') 6986 return (0); 6987 if (c == '\\' && (c = *p++) == '\0') 6988 return (0); 6989 6990 if (notflag) { 6991 if (s1 < lc || s1 > c) 6992 ok++; 6993 else 6994 return (0); 6995 } else if (lc <= s1 && s1 <= c) 6996 ok++; 6997 6998 } else if (c == '\\' && (c = *p++) == '\0') 6999 return (0); 7000 7001 lc = c; /* save left-hand 'c' for next iteration */ 7002 7003 if (notflag) { 7004 if (s1 != c) 7005 ok++; 7006 else 7007 return (0); 7008 } else if (s1 == c) 7009 ok++; 7010 7011 if ((c = *p++) == '\0') 7012 return (0); 7013 7014 } while (c != ']'); 7015 7016 if (ok) 7017 goto top; 7018 7019 return (0); 7020 } 7021 7022 case '\\': 7023 if ((c = *p++) == '\0') 7024 return (0); 7025 /*FALLTHRU*/ 7026 7027 default: 7028 if (c != s1) 7029 return (0); 7030 /*FALLTHRU*/ 7031 7032 case '?': 7033 if (s1 != '\0') 7034 goto top; 7035 return (0); 7036 7037 case '*': 7038 while (*p == '*') 7039 p++; /* consecutive *'s are identical to a single one */ 7040 7041 if (*p == '\0') 7042 return (1); 7043 7044 for (s = olds; *s != '\0'; s++) { 7045 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7046 return (gs); 7047 } 7048 7049 return (0); 7050 } 7051 } 7052 7053 /*ARGSUSED*/ 7054 static int 7055 dtrace_match_string(const char *s, const char *p, int depth) 7056 { 7057 return (s != NULL && strcmp(s, p) == 0); 7058 } 7059 7060 /*ARGSUSED*/ 7061 static int 7062 dtrace_match_nul(const char *s, const char *p, int depth) 7063 { 7064 return (1); /* always match the empty pattern */ 7065 } 7066 7067 /*ARGSUSED*/ 7068 static int 7069 dtrace_match_nonzero(const char *s, const char *p, int depth) 7070 { 7071 return (s != NULL && s[0] != '\0'); 7072 } 7073 7074 static int 7075 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7076 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7077 { 7078 dtrace_probe_t template, *probe; 7079 dtrace_hash_t *hash = NULL; 7080 int len, rc, best = INT_MAX, nmatched = 0; 7081 dtrace_id_t i; 7082 7083 ASSERT(MUTEX_HELD(&dtrace_lock)); 7084 7085 /* 7086 * If the probe ID is specified in the key, just lookup by ID and 7087 * invoke the match callback once if a matching probe is found. 7088 */ 7089 if (pkp->dtpk_id != DTRACE_IDNONE) { 7090 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7091 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7092 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL) 7093 return (DTRACE_MATCH_FAIL); 7094 nmatched++; 7095 } 7096 return (nmatched); 7097 } 7098 7099 template.dtpr_mod = (char *)pkp->dtpk_mod; 7100 template.dtpr_func = (char *)pkp->dtpk_func; 7101 template.dtpr_name = (char *)pkp->dtpk_name; 7102 7103 /* 7104 * We want to find the most distinct of the module name, function 7105 * name, and name. So for each one that is not a glob pattern or 7106 * empty string, we perform a lookup in the corresponding hash and 7107 * use the hash table with the fewest collisions to do our search. 7108 */ 7109 if (pkp->dtpk_mmatch == &dtrace_match_string && 7110 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7111 best = len; 7112 hash = dtrace_bymod; 7113 } 7114 7115 if (pkp->dtpk_fmatch == &dtrace_match_string && 7116 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7117 best = len; 7118 hash = dtrace_byfunc; 7119 } 7120 7121 if (pkp->dtpk_nmatch == &dtrace_match_string && 7122 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7123 best = len; 7124 hash = dtrace_byname; 7125 } 7126 7127 /* 7128 * If we did not select a hash table, iterate over every probe and 7129 * invoke our callback for each one that matches our input probe key. 7130 */ 7131 if (hash == NULL) { 7132 for (i = 0; i < dtrace_nprobes; i++) { 7133 if ((probe = dtrace_probes[i]) == NULL || 7134 dtrace_match_probe(probe, pkp, priv, uid, 7135 zoneid) <= 0) 7136 continue; 7137 7138 nmatched++; 7139 7140 if ((rc = (*matched)(probe, arg)) != 7141 DTRACE_MATCH_NEXT) { 7142 if (rc == DTRACE_MATCH_FAIL) 7143 return (DTRACE_MATCH_FAIL); 7144 break; 7145 } 7146 } 7147 7148 return (nmatched); 7149 } 7150 7151 /* 7152 * If we selected a hash table, iterate over each probe of the same key 7153 * name and invoke the callback for every probe that matches the other 7154 * attributes of our input probe key. 7155 */ 7156 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7157 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7158 7159 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7160 continue; 7161 7162 nmatched++; 7163 7164 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) { 7165 if (rc == DTRACE_MATCH_FAIL) 7166 return (DTRACE_MATCH_FAIL); 7167 break; 7168 } 7169 } 7170 7171 return (nmatched); 7172 } 7173 7174 /* 7175 * Return the function pointer dtrace_probecmp() should use to compare the 7176 * specified pattern with a string. For NULL or empty patterns, we select 7177 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7178 * For non-empty non-glob strings, we use dtrace_match_string(). 7179 */ 7180 static dtrace_probekey_f * 7181 dtrace_probekey_func(const char *p) 7182 { 7183 char c; 7184 7185 if (p == NULL || *p == '\0') 7186 return (&dtrace_match_nul); 7187 7188 while ((c = *p++) != '\0') { 7189 if (c == '[' || c == '?' || c == '*' || c == '\\') 7190 return (&dtrace_match_glob); 7191 } 7192 7193 return (&dtrace_match_string); 7194 } 7195 7196 /* 7197 * Build a probe comparison key for use with dtrace_match_probe() from the 7198 * given probe description. By convention, a null key only matches anchored 7199 * probes: if each field is the empty string, reset dtpk_fmatch to 7200 * dtrace_match_nonzero(). 7201 */ 7202 static void 7203 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7204 { 7205 pkp->dtpk_prov = pdp->dtpd_provider; 7206 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7207 7208 pkp->dtpk_mod = pdp->dtpd_mod; 7209 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7210 7211 pkp->dtpk_func = pdp->dtpd_func; 7212 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7213 7214 pkp->dtpk_name = pdp->dtpd_name; 7215 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7216 7217 pkp->dtpk_id = pdp->dtpd_id; 7218 7219 if (pkp->dtpk_id == DTRACE_IDNONE && 7220 pkp->dtpk_pmatch == &dtrace_match_nul && 7221 pkp->dtpk_mmatch == &dtrace_match_nul && 7222 pkp->dtpk_fmatch == &dtrace_match_nul && 7223 pkp->dtpk_nmatch == &dtrace_match_nul) 7224 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7225 } 7226 7227 /* 7228 * DTrace Provider-to-Framework API Functions 7229 * 7230 * These functions implement much of the Provider-to-Framework API, as 7231 * described in <sys/dtrace.h>. The parts of the API not in this section are 7232 * the functions in the API for probe management (found below), and 7233 * dtrace_probe() itself (found above). 7234 */ 7235 7236 /* 7237 * Register the calling provider with the DTrace framework. This should 7238 * generally be called by DTrace providers in their attach(9E) entry point. 7239 */ 7240 int 7241 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7242 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7243 { 7244 dtrace_provider_t *provider; 7245 7246 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7247 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7248 "arguments", name ? name : "<NULL>"); 7249 return (EINVAL); 7250 } 7251 7252 if (name[0] == '\0' || dtrace_badname(name)) { 7253 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7254 "provider name", name); 7255 return (EINVAL); 7256 } 7257 7258 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7259 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7260 pops->dtps_destroy == NULL || 7261 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7262 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7263 "provider ops", name); 7264 return (EINVAL); 7265 } 7266 7267 if (dtrace_badattr(&pap->dtpa_provider) || 7268 dtrace_badattr(&pap->dtpa_mod) || 7269 dtrace_badattr(&pap->dtpa_func) || 7270 dtrace_badattr(&pap->dtpa_name) || 7271 dtrace_badattr(&pap->dtpa_args)) { 7272 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7273 "provider attributes", name); 7274 return (EINVAL); 7275 } 7276 7277 if (priv & ~DTRACE_PRIV_ALL) { 7278 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7279 "privilege attributes", name); 7280 return (EINVAL); 7281 } 7282 7283 if ((priv & DTRACE_PRIV_KERNEL) && 7284 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7285 pops->dtps_mode == NULL) { 7286 cmn_err(CE_WARN, "failed to register provider '%s': need " 7287 "dtps_mode() op for given privilege attributes", name); 7288 return (EINVAL); 7289 } 7290 7291 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7292 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7293 (void) strcpy(provider->dtpv_name, name); 7294 7295 provider->dtpv_attr = *pap; 7296 provider->dtpv_priv.dtpp_flags = priv; 7297 if (cr != NULL) { 7298 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7299 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7300 } 7301 provider->dtpv_pops = *pops; 7302 7303 if (pops->dtps_provide == NULL) { 7304 ASSERT(pops->dtps_provide_module != NULL); 7305 provider->dtpv_pops.dtps_provide = 7306 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 7307 } 7308 7309 if (pops->dtps_provide_module == NULL) { 7310 ASSERT(pops->dtps_provide != NULL); 7311 provider->dtpv_pops.dtps_provide_module = 7312 (void (*)(void *, struct modctl *))dtrace_nullop; 7313 } 7314 7315 if (pops->dtps_suspend == NULL) { 7316 ASSERT(pops->dtps_resume == NULL); 7317 provider->dtpv_pops.dtps_suspend = 7318 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7319 provider->dtpv_pops.dtps_resume = 7320 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7321 } 7322 7323 provider->dtpv_arg = arg; 7324 *idp = (dtrace_provider_id_t)provider; 7325 7326 if (pops == &dtrace_provider_ops) { 7327 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7328 ASSERT(MUTEX_HELD(&dtrace_lock)); 7329 ASSERT(dtrace_anon.dta_enabling == NULL); 7330 7331 /* 7332 * We make sure that the DTrace provider is at the head of 7333 * the provider chain. 7334 */ 7335 provider->dtpv_next = dtrace_provider; 7336 dtrace_provider = provider; 7337 return (0); 7338 } 7339 7340 mutex_enter(&dtrace_provider_lock); 7341 mutex_enter(&dtrace_lock); 7342 7343 /* 7344 * If there is at least one provider registered, we'll add this 7345 * provider after the first provider. 7346 */ 7347 if (dtrace_provider != NULL) { 7348 provider->dtpv_next = dtrace_provider->dtpv_next; 7349 dtrace_provider->dtpv_next = provider; 7350 } else { 7351 dtrace_provider = provider; 7352 } 7353 7354 if (dtrace_retained != NULL) { 7355 dtrace_enabling_provide(provider); 7356 7357 /* 7358 * Now we need to call dtrace_enabling_matchall() -- which 7359 * will acquire cpu_lock and dtrace_lock. We therefore need 7360 * to drop all of our locks before calling into it... 7361 */ 7362 mutex_exit(&dtrace_lock); 7363 mutex_exit(&dtrace_provider_lock); 7364 dtrace_enabling_matchall(); 7365 7366 return (0); 7367 } 7368 7369 mutex_exit(&dtrace_lock); 7370 mutex_exit(&dtrace_provider_lock); 7371 7372 return (0); 7373 } 7374 7375 /* 7376 * Unregister the specified provider from the DTrace framework. This should 7377 * generally be called by DTrace providers in their detach(9E) entry point. 7378 */ 7379 int 7380 dtrace_unregister(dtrace_provider_id_t id) 7381 { 7382 dtrace_provider_t *old = (dtrace_provider_t *)id; 7383 dtrace_provider_t *prev = NULL; 7384 int i, self = 0, noreap = 0; 7385 dtrace_probe_t *probe, *first = NULL; 7386 7387 if (old->dtpv_pops.dtps_enable == 7388 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) { 7389 /* 7390 * If DTrace itself is the provider, we're called with locks 7391 * already held. 7392 */ 7393 ASSERT(old == dtrace_provider); 7394 ASSERT(dtrace_devi != NULL); 7395 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7396 ASSERT(MUTEX_HELD(&dtrace_lock)); 7397 self = 1; 7398 7399 if (dtrace_provider->dtpv_next != NULL) { 7400 /* 7401 * There's another provider here; return failure. 7402 */ 7403 return (EBUSY); 7404 } 7405 } else { 7406 mutex_enter(&dtrace_provider_lock); 7407 mutex_enter(&mod_lock); 7408 mutex_enter(&dtrace_lock); 7409 } 7410 7411 /* 7412 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7413 * probes, we refuse to let providers slither away, unless this 7414 * provider has already been explicitly invalidated. 7415 */ 7416 if (!old->dtpv_defunct && 7417 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7418 dtrace_anon.dta_state->dts_necbs > 0))) { 7419 if (!self) { 7420 mutex_exit(&dtrace_lock); 7421 mutex_exit(&mod_lock); 7422 mutex_exit(&dtrace_provider_lock); 7423 } 7424 return (EBUSY); 7425 } 7426 7427 /* 7428 * Attempt to destroy the probes associated with this provider. 7429 */ 7430 for (i = 0; i < dtrace_nprobes; i++) { 7431 if ((probe = dtrace_probes[i]) == NULL) 7432 continue; 7433 7434 if (probe->dtpr_provider != old) 7435 continue; 7436 7437 if (probe->dtpr_ecb == NULL) 7438 continue; 7439 7440 /* 7441 * If we are trying to unregister a defunct provider, and the 7442 * provider was made defunct within the interval dictated by 7443 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7444 * attempt to reap our enablings. To denote that the provider 7445 * should reattempt to unregister itself at some point in the 7446 * future, we will return a differentiable error code (EAGAIN 7447 * instead of EBUSY) in this case. 7448 */ 7449 if (dtrace_gethrtime() - old->dtpv_defunct > 7450 dtrace_unregister_defunct_reap) 7451 noreap = 1; 7452 7453 if (!self) { 7454 mutex_exit(&dtrace_lock); 7455 mutex_exit(&mod_lock); 7456 mutex_exit(&dtrace_provider_lock); 7457 } 7458 7459 if (noreap) 7460 return (EBUSY); 7461 7462 (void) taskq_dispatch(dtrace_taskq, 7463 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7464 7465 return (EAGAIN); 7466 } 7467 7468 /* 7469 * All of the probes for this provider are disabled; we can safely 7470 * remove all of them from their hash chains and from the probe array. 7471 */ 7472 for (i = 0; i < dtrace_nprobes; i++) { 7473 if ((probe = dtrace_probes[i]) == NULL) 7474 continue; 7475 7476 if (probe->dtpr_provider != old) 7477 continue; 7478 7479 dtrace_probes[i] = NULL; 7480 7481 dtrace_hash_remove(dtrace_bymod, probe); 7482 dtrace_hash_remove(dtrace_byfunc, probe); 7483 dtrace_hash_remove(dtrace_byname, probe); 7484 7485 if (first == NULL) { 7486 first = probe; 7487 probe->dtpr_nextmod = NULL; 7488 } else { 7489 probe->dtpr_nextmod = first; 7490 first = probe; 7491 } 7492 } 7493 7494 /* 7495 * The provider's probes have been removed from the hash chains and 7496 * from the probe array. Now issue a dtrace_sync() to be sure that 7497 * everyone has cleared out from any probe array processing. 7498 */ 7499 dtrace_sync(); 7500 7501 for (probe = first; probe != NULL; probe = first) { 7502 first = probe->dtpr_nextmod; 7503 7504 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7505 probe->dtpr_arg); 7506 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7507 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7508 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7509 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7510 kmem_free(probe, sizeof (dtrace_probe_t)); 7511 } 7512 7513 if ((prev = dtrace_provider) == old) { 7514 ASSERT(self || dtrace_devi == NULL); 7515 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7516 dtrace_provider = old->dtpv_next; 7517 } else { 7518 while (prev != NULL && prev->dtpv_next != old) 7519 prev = prev->dtpv_next; 7520 7521 if (prev == NULL) { 7522 panic("attempt to unregister non-existent " 7523 "dtrace provider %p\n", (void *)id); 7524 } 7525 7526 prev->dtpv_next = old->dtpv_next; 7527 } 7528 7529 if (!self) { 7530 mutex_exit(&dtrace_lock); 7531 mutex_exit(&mod_lock); 7532 mutex_exit(&dtrace_provider_lock); 7533 } 7534 7535 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7536 kmem_free(old, sizeof (dtrace_provider_t)); 7537 7538 return (0); 7539 } 7540 7541 /* 7542 * Invalidate the specified provider. All subsequent probe lookups for the 7543 * specified provider will fail, but its probes will not be removed. 7544 */ 7545 void 7546 dtrace_invalidate(dtrace_provider_id_t id) 7547 { 7548 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7549 7550 ASSERT(pvp->dtpv_pops.dtps_enable != 7551 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7552 7553 mutex_enter(&dtrace_provider_lock); 7554 mutex_enter(&dtrace_lock); 7555 7556 pvp->dtpv_defunct = dtrace_gethrtime(); 7557 7558 mutex_exit(&dtrace_lock); 7559 mutex_exit(&dtrace_provider_lock); 7560 } 7561 7562 /* 7563 * Indicate whether or not DTrace has attached. 7564 */ 7565 int 7566 dtrace_attached(void) 7567 { 7568 /* 7569 * dtrace_provider will be non-NULL iff the DTrace driver has 7570 * attached. (It's non-NULL because DTrace is always itself a 7571 * provider.) 7572 */ 7573 return (dtrace_provider != NULL); 7574 } 7575 7576 /* 7577 * Remove all the unenabled probes for the given provider. This function is 7578 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7579 * -- just as many of its associated probes as it can. 7580 */ 7581 int 7582 dtrace_condense(dtrace_provider_id_t id) 7583 { 7584 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7585 int i; 7586 dtrace_probe_t *probe; 7587 7588 /* 7589 * Make sure this isn't the dtrace provider itself. 7590 */ 7591 ASSERT(prov->dtpv_pops.dtps_enable != 7592 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7593 7594 mutex_enter(&dtrace_provider_lock); 7595 mutex_enter(&dtrace_lock); 7596 7597 /* 7598 * Attempt to destroy the probes associated with this provider. 7599 */ 7600 for (i = 0; i < dtrace_nprobes; i++) { 7601 if ((probe = dtrace_probes[i]) == NULL) 7602 continue; 7603 7604 if (probe->dtpr_provider != prov) 7605 continue; 7606 7607 if (probe->dtpr_ecb != NULL) 7608 continue; 7609 7610 dtrace_probes[i] = NULL; 7611 7612 dtrace_hash_remove(dtrace_bymod, probe); 7613 dtrace_hash_remove(dtrace_byfunc, probe); 7614 dtrace_hash_remove(dtrace_byname, probe); 7615 7616 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7617 probe->dtpr_arg); 7618 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7619 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7620 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7621 kmem_free(probe, sizeof (dtrace_probe_t)); 7622 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7623 } 7624 7625 mutex_exit(&dtrace_lock); 7626 mutex_exit(&dtrace_provider_lock); 7627 7628 return (0); 7629 } 7630 7631 /* 7632 * DTrace Probe Management Functions 7633 * 7634 * The functions in this section perform the DTrace probe management, 7635 * including functions to create probes, look-up probes, and call into the 7636 * providers to request that probes be provided. Some of these functions are 7637 * in the Provider-to-Framework API; these functions can be identified by the 7638 * fact that they are not declared "static". 7639 */ 7640 7641 /* 7642 * Create a probe with the specified module name, function name, and name. 7643 */ 7644 dtrace_id_t 7645 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7646 const char *func, const char *name, int aframes, void *arg) 7647 { 7648 dtrace_probe_t *probe, **probes; 7649 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7650 dtrace_id_t id; 7651 7652 if (provider == dtrace_provider) { 7653 ASSERT(MUTEX_HELD(&dtrace_lock)); 7654 } else { 7655 mutex_enter(&dtrace_lock); 7656 } 7657 7658 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7659 VM_BESTFIT | VM_SLEEP); 7660 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7661 7662 probe->dtpr_id = id; 7663 probe->dtpr_gen = dtrace_probegen++; 7664 probe->dtpr_mod = dtrace_strdup(mod); 7665 probe->dtpr_func = dtrace_strdup(func); 7666 probe->dtpr_name = dtrace_strdup(name); 7667 probe->dtpr_arg = arg; 7668 probe->dtpr_aframes = aframes; 7669 probe->dtpr_provider = provider; 7670 7671 dtrace_hash_add(dtrace_bymod, probe); 7672 dtrace_hash_add(dtrace_byfunc, probe); 7673 dtrace_hash_add(dtrace_byname, probe); 7674 7675 if (id - 1 >= dtrace_nprobes) { 7676 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7677 size_t nsize = osize << 1; 7678 7679 if (nsize == 0) { 7680 ASSERT(osize == 0); 7681 ASSERT(dtrace_probes == NULL); 7682 nsize = sizeof (dtrace_probe_t *); 7683 } 7684 7685 probes = kmem_zalloc(nsize, KM_SLEEP); 7686 7687 if (dtrace_probes == NULL) { 7688 ASSERT(osize == 0); 7689 dtrace_probes = probes; 7690 dtrace_nprobes = 1; 7691 } else { 7692 dtrace_probe_t **oprobes = dtrace_probes; 7693 7694 bcopy(oprobes, probes, osize); 7695 dtrace_membar_producer(); 7696 dtrace_probes = probes; 7697 7698 dtrace_sync(); 7699 7700 /* 7701 * All CPUs are now seeing the new probes array; we can 7702 * safely free the old array. 7703 */ 7704 kmem_free(oprobes, osize); 7705 dtrace_nprobes <<= 1; 7706 } 7707 7708 ASSERT(id - 1 < dtrace_nprobes); 7709 } 7710 7711 ASSERT(dtrace_probes[id - 1] == NULL); 7712 dtrace_probes[id - 1] = probe; 7713 7714 if (provider != dtrace_provider) 7715 mutex_exit(&dtrace_lock); 7716 7717 return (id); 7718 } 7719 7720 static dtrace_probe_t * 7721 dtrace_probe_lookup_id(dtrace_id_t id) 7722 { 7723 ASSERT(MUTEX_HELD(&dtrace_lock)); 7724 7725 if (id == 0 || id > dtrace_nprobes) 7726 return (NULL); 7727 7728 return (dtrace_probes[id - 1]); 7729 } 7730 7731 static int 7732 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7733 { 7734 *((dtrace_id_t *)arg) = probe->dtpr_id; 7735 7736 return (DTRACE_MATCH_DONE); 7737 } 7738 7739 /* 7740 * Look up a probe based on provider and one or more of module name, function 7741 * name and probe name. 7742 */ 7743 dtrace_id_t 7744 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 7745 const char *func, const char *name) 7746 { 7747 dtrace_probekey_t pkey; 7748 dtrace_id_t id; 7749 int match; 7750 7751 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7752 pkey.dtpk_pmatch = &dtrace_match_string; 7753 pkey.dtpk_mod = mod; 7754 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7755 pkey.dtpk_func = func; 7756 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7757 pkey.dtpk_name = name; 7758 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7759 pkey.dtpk_id = DTRACE_IDNONE; 7760 7761 mutex_enter(&dtrace_lock); 7762 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7763 dtrace_probe_lookup_match, &id); 7764 mutex_exit(&dtrace_lock); 7765 7766 ASSERT(match == 1 || match == 0); 7767 return (match ? id : 0); 7768 } 7769 7770 /* 7771 * Returns the probe argument associated with the specified probe. 7772 */ 7773 void * 7774 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7775 { 7776 dtrace_probe_t *probe; 7777 void *rval = NULL; 7778 7779 mutex_enter(&dtrace_lock); 7780 7781 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7782 probe->dtpr_provider == (dtrace_provider_t *)id) 7783 rval = probe->dtpr_arg; 7784 7785 mutex_exit(&dtrace_lock); 7786 7787 return (rval); 7788 } 7789 7790 /* 7791 * Copy a probe into a probe description. 7792 */ 7793 static void 7794 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7795 { 7796 bzero(pdp, sizeof (dtrace_probedesc_t)); 7797 pdp->dtpd_id = prp->dtpr_id; 7798 7799 (void) strncpy(pdp->dtpd_provider, 7800 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7801 7802 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7803 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7804 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7805 } 7806 7807 /* 7808 * Called to indicate that a probe -- or probes -- should be provided by a 7809 * specfied provider. If the specified description is NULL, the provider will 7810 * be told to provide all of its probes. (This is done whenever a new 7811 * consumer comes along, or whenever a retained enabling is to be matched.) If 7812 * the specified description is non-NULL, the provider is given the 7813 * opportunity to dynamically provide the specified probe, allowing providers 7814 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7815 * probes.) If the provider is NULL, the operations will be applied to all 7816 * providers; if the provider is non-NULL the operations will only be applied 7817 * to the specified provider. The dtrace_provider_lock must be held, and the 7818 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7819 * will need to grab the dtrace_lock when it reenters the framework through 7820 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7821 */ 7822 static void 7823 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7824 { 7825 struct modctl *ctl; 7826 int all = 0; 7827 7828 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7829 7830 if (prv == NULL) { 7831 all = 1; 7832 prv = dtrace_provider; 7833 } 7834 7835 do { 7836 /* 7837 * First, call the blanket provide operation. 7838 */ 7839 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7840 7841 /* 7842 * Now call the per-module provide operation. We will grab 7843 * mod_lock to prevent the list from being modified. Note 7844 * that this also prevents the mod_busy bits from changing. 7845 * (mod_busy can only be changed with mod_lock held.) 7846 */ 7847 mutex_enter(&mod_lock); 7848 7849 ctl = &modules; 7850 do { 7851 if (ctl->mod_busy || ctl->mod_mp == NULL) 7852 continue; 7853 7854 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7855 7856 } while ((ctl = ctl->mod_next) != &modules); 7857 7858 mutex_exit(&mod_lock); 7859 } while (all && (prv = prv->dtpv_next) != NULL); 7860 } 7861 7862 /* 7863 * Iterate over each probe, and call the Framework-to-Provider API function 7864 * denoted by offs. 7865 */ 7866 static void 7867 dtrace_probe_foreach(uintptr_t offs) 7868 { 7869 dtrace_provider_t *prov; 7870 void (*func)(void *, dtrace_id_t, void *); 7871 dtrace_probe_t *probe; 7872 dtrace_icookie_t cookie; 7873 int i; 7874 7875 /* 7876 * We disable interrupts to walk through the probe array. This is 7877 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7878 * won't see stale data. 7879 */ 7880 cookie = dtrace_interrupt_disable(); 7881 7882 for (i = 0; i < dtrace_nprobes; i++) { 7883 if ((probe = dtrace_probes[i]) == NULL) 7884 continue; 7885 7886 if (probe->dtpr_ecb == NULL) { 7887 /* 7888 * This probe isn't enabled -- don't call the function. 7889 */ 7890 continue; 7891 } 7892 7893 prov = probe->dtpr_provider; 7894 func = *((void(**)(void *, dtrace_id_t, void *)) 7895 ((uintptr_t)&prov->dtpv_pops + offs)); 7896 7897 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7898 } 7899 7900 dtrace_interrupt_enable(cookie); 7901 } 7902 7903 static int 7904 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7905 { 7906 dtrace_probekey_t pkey; 7907 uint32_t priv; 7908 uid_t uid; 7909 zoneid_t zoneid; 7910 7911 ASSERT(MUTEX_HELD(&dtrace_lock)); 7912 dtrace_ecb_create_cache = NULL; 7913 7914 if (desc == NULL) { 7915 /* 7916 * If we're passed a NULL description, we're being asked to 7917 * create an ECB with a NULL probe. 7918 */ 7919 (void) dtrace_ecb_create_enable(NULL, enab); 7920 return (0); 7921 } 7922 7923 dtrace_probekey(desc, &pkey); 7924 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7925 &priv, &uid, &zoneid); 7926 7927 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7928 enab)); 7929 } 7930 7931 /* 7932 * DTrace Helper Provider Functions 7933 */ 7934 static void 7935 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7936 { 7937 attr->dtat_name = DOF_ATTR_NAME(dofattr); 7938 attr->dtat_data = DOF_ATTR_DATA(dofattr); 7939 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 7940 } 7941 7942 static void 7943 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 7944 const dof_provider_t *dofprov, char *strtab) 7945 { 7946 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 7947 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 7948 dofprov->dofpv_provattr); 7949 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 7950 dofprov->dofpv_modattr); 7951 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 7952 dofprov->dofpv_funcattr); 7953 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 7954 dofprov->dofpv_nameattr); 7955 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 7956 dofprov->dofpv_argsattr); 7957 } 7958 7959 static void 7960 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7961 { 7962 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7963 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7964 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 7965 dof_provider_t *provider; 7966 dof_probe_t *probe; 7967 uint32_t *off, *enoff; 7968 uint8_t *arg; 7969 char *strtab; 7970 uint_t i, nprobes; 7971 dtrace_helper_provdesc_t dhpv; 7972 dtrace_helper_probedesc_t dhpb; 7973 dtrace_meta_t *meta = dtrace_meta_pid; 7974 dtrace_mops_t *mops = &meta->dtm_mops; 7975 void *parg; 7976 7977 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7978 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7979 provider->dofpv_strtab * dof->dofh_secsize); 7980 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7981 provider->dofpv_probes * dof->dofh_secsize); 7982 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7983 provider->dofpv_prargs * dof->dofh_secsize); 7984 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7985 provider->dofpv_proffs * dof->dofh_secsize); 7986 7987 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7988 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 7989 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 7990 enoff = NULL; 7991 7992 /* 7993 * See dtrace_helper_provider_validate(). 7994 */ 7995 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 7996 provider->dofpv_prenoffs != DOF_SECT_NONE) { 7997 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7998 provider->dofpv_prenoffs * dof->dofh_secsize); 7999 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8000 } 8001 8002 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8003 8004 /* 8005 * Create the provider. 8006 */ 8007 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8008 8009 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8010 return; 8011 8012 meta->dtm_count++; 8013 8014 /* 8015 * Create the probes. 8016 */ 8017 for (i = 0; i < nprobes; i++) { 8018 probe = (dof_probe_t *)(uintptr_t)(daddr + 8019 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8020 8021 dhpb.dthpb_mod = dhp->dofhp_mod; 8022 dhpb.dthpb_func = strtab + probe->dofpr_func; 8023 dhpb.dthpb_name = strtab + probe->dofpr_name; 8024 dhpb.dthpb_base = probe->dofpr_addr; 8025 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8026 dhpb.dthpb_noffs = probe->dofpr_noffs; 8027 if (enoff != NULL) { 8028 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8029 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8030 } else { 8031 dhpb.dthpb_enoffs = NULL; 8032 dhpb.dthpb_nenoffs = 0; 8033 } 8034 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8035 dhpb.dthpb_nargc = probe->dofpr_nargc; 8036 dhpb.dthpb_xargc = probe->dofpr_xargc; 8037 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8038 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8039 8040 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8041 } 8042 } 8043 8044 static void 8045 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8046 { 8047 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8048 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8049 int i; 8050 8051 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8052 8053 for (i = 0; i < dof->dofh_secnum; i++) { 8054 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8055 dof->dofh_secoff + i * dof->dofh_secsize); 8056 8057 if (sec->dofs_type != DOF_SECT_PROVIDER) 8058 continue; 8059 8060 dtrace_helper_provide_one(dhp, sec, pid); 8061 } 8062 8063 /* 8064 * We may have just created probes, so we must now rematch against 8065 * any retained enablings. Note that this call will acquire both 8066 * cpu_lock and dtrace_lock; the fact that we are holding 8067 * dtrace_meta_lock now is what defines the ordering with respect to 8068 * these three locks. 8069 */ 8070 dtrace_enabling_matchall(); 8071 } 8072 8073 static void 8074 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8075 { 8076 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8077 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8078 dof_sec_t *str_sec; 8079 dof_provider_t *provider; 8080 char *strtab; 8081 dtrace_helper_provdesc_t dhpv; 8082 dtrace_meta_t *meta = dtrace_meta_pid; 8083 dtrace_mops_t *mops = &meta->dtm_mops; 8084 8085 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8086 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8087 provider->dofpv_strtab * dof->dofh_secsize); 8088 8089 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8090 8091 /* 8092 * Create the provider. 8093 */ 8094 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8095 8096 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8097 8098 meta->dtm_count--; 8099 } 8100 8101 static void 8102 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8103 { 8104 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8105 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8106 int i; 8107 8108 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8109 8110 for (i = 0; i < dof->dofh_secnum; i++) { 8111 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8112 dof->dofh_secoff + i * dof->dofh_secsize); 8113 8114 if (sec->dofs_type != DOF_SECT_PROVIDER) 8115 continue; 8116 8117 dtrace_helper_provider_remove_one(dhp, sec, pid); 8118 } 8119 } 8120 8121 /* 8122 * DTrace Meta Provider-to-Framework API Functions 8123 * 8124 * These functions implement the Meta Provider-to-Framework API, as described 8125 * in <sys/dtrace.h>. 8126 */ 8127 int 8128 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8129 dtrace_meta_provider_id_t *idp) 8130 { 8131 dtrace_meta_t *meta; 8132 dtrace_helpers_t *help, *next; 8133 int i; 8134 8135 *idp = DTRACE_METAPROVNONE; 8136 8137 /* 8138 * We strictly don't need the name, but we hold onto it for 8139 * debuggability. All hail error queues! 8140 */ 8141 if (name == NULL) { 8142 cmn_err(CE_WARN, "failed to register meta-provider: " 8143 "invalid name"); 8144 return (EINVAL); 8145 } 8146 8147 if (mops == NULL || 8148 mops->dtms_create_probe == NULL || 8149 mops->dtms_provide_pid == NULL || 8150 mops->dtms_remove_pid == NULL) { 8151 cmn_err(CE_WARN, "failed to register meta-register %s: " 8152 "invalid ops", name); 8153 return (EINVAL); 8154 } 8155 8156 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8157 meta->dtm_mops = *mops; 8158 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8159 (void) strcpy(meta->dtm_name, name); 8160 meta->dtm_arg = arg; 8161 8162 mutex_enter(&dtrace_meta_lock); 8163 mutex_enter(&dtrace_lock); 8164 8165 if (dtrace_meta_pid != NULL) { 8166 mutex_exit(&dtrace_lock); 8167 mutex_exit(&dtrace_meta_lock); 8168 cmn_err(CE_WARN, "failed to register meta-register %s: " 8169 "user-land meta-provider exists", name); 8170 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8171 kmem_free(meta, sizeof (dtrace_meta_t)); 8172 return (EINVAL); 8173 } 8174 8175 dtrace_meta_pid = meta; 8176 *idp = (dtrace_meta_provider_id_t)meta; 8177 8178 /* 8179 * If there are providers and probes ready to go, pass them 8180 * off to the new meta provider now. 8181 */ 8182 8183 help = dtrace_deferred_pid; 8184 dtrace_deferred_pid = NULL; 8185 8186 mutex_exit(&dtrace_lock); 8187 8188 while (help != NULL) { 8189 for (i = 0; i < help->dthps_nprovs; i++) { 8190 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8191 help->dthps_pid); 8192 } 8193 8194 next = help->dthps_next; 8195 help->dthps_next = NULL; 8196 help->dthps_prev = NULL; 8197 help->dthps_deferred = 0; 8198 help = next; 8199 } 8200 8201 mutex_exit(&dtrace_meta_lock); 8202 8203 return (0); 8204 } 8205 8206 int 8207 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8208 { 8209 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8210 8211 mutex_enter(&dtrace_meta_lock); 8212 mutex_enter(&dtrace_lock); 8213 8214 if (old == dtrace_meta_pid) { 8215 pp = &dtrace_meta_pid; 8216 } else { 8217 panic("attempt to unregister non-existent " 8218 "dtrace meta-provider %p\n", (void *)old); 8219 } 8220 8221 if (old->dtm_count != 0) { 8222 mutex_exit(&dtrace_lock); 8223 mutex_exit(&dtrace_meta_lock); 8224 return (EBUSY); 8225 } 8226 8227 *pp = NULL; 8228 8229 mutex_exit(&dtrace_lock); 8230 mutex_exit(&dtrace_meta_lock); 8231 8232 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8233 kmem_free(old, sizeof (dtrace_meta_t)); 8234 8235 return (0); 8236 } 8237 8238 8239 /* 8240 * DTrace DIF Object Functions 8241 */ 8242 static int 8243 dtrace_difo_err(uint_t pc, const char *format, ...) 8244 { 8245 if (dtrace_err_verbose) { 8246 va_list alist; 8247 8248 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8249 va_start(alist, format); 8250 (void) vuprintf(format, alist); 8251 va_end(alist); 8252 } 8253 8254 #ifdef DTRACE_ERRDEBUG 8255 dtrace_errdebug(format); 8256 #endif 8257 return (1); 8258 } 8259 8260 /* 8261 * Validate a DTrace DIF object by checking the IR instructions. The following 8262 * rules are currently enforced by dtrace_difo_validate(): 8263 * 8264 * 1. Each instruction must have a valid opcode 8265 * 2. Each register, string, variable, or subroutine reference must be valid 8266 * 3. No instruction can modify register %r0 (must be zero) 8267 * 4. All instruction reserved bits must be set to zero 8268 * 5. The last instruction must be a "ret" instruction 8269 * 6. All branch targets must reference a valid instruction _after_ the branch 8270 */ 8271 static int 8272 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8273 cred_t *cr) 8274 { 8275 int err = 0, i; 8276 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8277 int kcheckload; 8278 uint_t pc; 8279 8280 kcheckload = cr == NULL || 8281 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8282 8283 dp->dtdo_destructive = 0; 8284 8285 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8286 dif_instr_t instr = dp->dtdo_buf[pc]; 8287 8288 uint_t r1 = DIF_INSTR_R1(instr); 8289 uint_t r2 = DIF_INSTR_R2(instr); 8290 uint_t rd = DIF_INSTR_RD(instr); 8291 uint_t rs = DIF_INSTR_RS(instr); 8292 uint_t label = DIF_INSTR_LABEL(instr); 8293 uint_t v = DIF_INSTR_VAR(instr); 8294 uint_t subr = DIF_INSTR_SUBR(instr); 8295 uint_t type = DIF_INSTR_TYPE(instr); 8296 uint_t op = DIF_INSTR_OP(instr); 8297 8298 switch (op) { 8299 case DIF_OP_OR: 8300 case DIF_OP_XOR: 8301 case DIF_OP_AND: 8302 case DIF_OP_SLL: 8303 case DIF_OP_SRL: 8304 case DIF_OP_SRA: 8305 case DIF_OP_SUB: 8306 case DIF_OP_ADD: 8307 case DIF_OP_MUL: 8308 case DIF_OP_SDIV: 8309 case DIF_OP_UDIV: 8310 case DIF_OP_SREM: 8311 case DIF_OP_UREM: 8312 case DIF_OP_COPYS: 8313 if (r1 >= nregs) 8314 err += efunc(pc, "invalid register %u\n", r1); 8315 if (r2 >= nregs) 8316 err += efunc(pc, "invalid register %u\n", r2); 8317 if (rd >= nregs) 8318 err += efunc(pc, "invalid register %u\n", rd); 8319 if (rd == 0) 8320 err += efunc(pc, "cannot write to %r0\n"); 8321 break; 8322 case DIF_OP_NOT: 8323 case DIF_OP_MOV: 8324 case DIF_OP_ALLOCS: 8325 if (r1 >= nregs) 8326 err += efunc(pc, "invalid register %u\n", r1); 8327 if (r2 != 0) 8328 err += efunc(pc, "non-zero reserved bits\n"); 8329 if (rd >= nregs) 8330 err += efunc(pc, "invalid register %u\n", rd); 8331 if (rd == 0) 8332 err += efunc(pc, "cannot write to %r0\n"); 8333 break; 8334 case DIF_OP_LDSB: 8335 case DIF_OP_LDSH: 8336 case DIF_OP_LDSW: 8337 case DIF_OP_LDUB: 8338 case DIF_OP_LDUH: 8339 case DIF_OP_LDUW: 8340 case DIF_OP_LDX: 8341 if (r1 >= nregs) 8342 err += efunc(pc, "invalid register %u\n", r1); 8343 if (r2 != 0) 8344 err += efunc(pc, "non-zero reserved bits\n"); 8345 if (rd >= nregs) 8346 err += efunc(pc, "invalid register %u\n", rd); 8347 if (rd == 0) 8348 err += efunc(pc, "cannot write to %r0\n"); 8349 if (kcheckload) 8350 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8351 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8352 break; 8353 case DIF_OP_RLDSB: 8354 case DIF_OP_RLDSH: 8355 case DIF_OP_RLDSW: 8356 case DIF_OP_RLDUB: 8357 case DIF_OP_RLDUH: 8358 case DIF_OP_RLDUW: 8359 case DIF_OP_RLDX: 8360 if (r1 >= nregs) 8361 err += efunc(pc, "invalid register %u\n", r1); 8362 if (r2 != 0) 8363 err += efunc(pc, "non-zero reserved bits\n"); 8364 if (rd >= nregs) 8365 err += efunc(pc, "invalid register %u\n", rd); 8366 if (rd == 0) 8367 err += efunc(pc, "cannot write to %r0\n"); 8368 break; 8369 case DIF_OP_ULDSB: 8370 case DIF_OP_ULDSH: 8371 case DIF_OP_ULDSW: 8372 case DIF_OP_ULDUB: 8373 case DIF_OP_ULDUH: 8374 case DIF_OP_ULDUW: 8375 case DIF_OP_ULDX: 8376 if (r1 >= nregs) 8377 err += efunc(pc, "invalid register %u\n", r1); 8378 if (r2 != 0) 8379 err += efunc(pc, "non-zero reserved bits\n"); 8380 if (rd >= nregs) 8381 err += efunc(pc, "invalid register %u\n", rd); 8382 if (rd == 0) 8383 err += efunc(pc, "cannot write to %r0\n"); 8384 break; 8385 case DIF_OP_STB: 8386 case DIF_OP_STH: 8387 case DIF_OP_STW: 8388 case DIF_OP_STX: 8389 if (r1 >= nregs) 8390 err += efunc(pc, "invalid register %u\n", r1); 8391 if (r2 != 0) 8392 err += efunc(pc, "non-zero reserved bits\n"); 8393 if (rd >= nregs) 8394 err += efunc(pc, "invalid register %u\n", rd); 8395 if (rd == 0) 8396 err += efunc(pc, "cannot write to 0 address\n"); 8397 break; 8398 case DIF_OP_CMP: 8399 case DIF_OP_SCMP: 8400 if (r1 >= nregs) 8401 err += efunc(pc, "invalid register %u\n", r1); 8402 if (r2 >= nregs) 8403 err += efunc(pc, "invalid register %u\n", r2); 8404 if (rd != 0) 8405 err += efunc(pc, "non-zero reserved bits\n"); 8406 break; 8407 case DIF_OP_TST: 8408 if (r1 >= nregs) 8409 err += efunc(pc, "invalid register %u\n", r1); 8410 if (r2 != 0 || rd != 0) 8411 err += efunc(pc, "non-zero reserved bits\n"); 8412 break; 8413 case DIF_OP_BA: 8414 case DIF_OP_BE: 8415 case DIF_OP_BNE: 8416 case DIF_OP_BG: 8417 case DIF_OP_BGU: 8418 case DIF_OP_BGE: 8419 case DIF_OP_BGEU: 8420 case DIF_OP_BL: 8421 case DIF_OP_BLU: 8422 case DIF_OP_BLE: 8423 case DIF_OP_BLEU: 8424 if (label >= dp->dtdo_len) { 8425 err += efunc(pc, "invalid branch target %u\n", 8426 label); 8427 } 8428 if (label <= pc) { 8429 err += efunc(pc, "backward branch to %u\n", 8430 label); 8431 } 8432 break; 8433 case DIF_OP_RET: 8434 if (r1 != 0 || r2 != 0) 8435 err += efunc(pc, "non-zero reserved bits\n"); 8436 if (rd >= nregs) 8437 err += efunc(pc, "invalid register %u\n", rd); 8438 break; 8439 case DIF_OP_NOP: 8440 case DIF_OP_POPTS: 8441 case DIF_OP_FLUSHTS: 8442 if (r1 != 0 || r2 != 0 || rd != 0) 8443 err += efunc(pc, "non-zero reserved bits\n"); 8444 break; 8445 case DIF_OP_SETX: 8446 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8447 err += efunc(pc, "invalid integer ref %u\n", 8448 DIF_INSTR_INTEGER(instr)); 8449 } 8450 if (rd >= nregs) 8451 err += efunc(pc, "invalid register %u\n", rd); 8452 if (rd == 0) 8453 err += efunc(pc, "cannot write to %r0\n"); 8454 break; 8455 case DIF_OP_SETS: 8456 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8457 err += efunc(pc, "invalid string ref %u\n", 8458 DIF_INSTR_STRING(instr)); 8459 } 8460 if (rd >= nregs) 8461 err += efunc(pc, "invalid register %u\n", rd); 8462 if (rd == 0) 8463 err += efunc(pc, "cannot write to %r0\n"); 8464 break; 8465 case DIF_OP_LDGA: 8466 case DIF_OP_LDTA: 8467 if (r1 > DIF_VAR_ARRAY_MAX) 8468 err += efunc(pc, "invalid array %u\n", r1); 8469 if (r2 >= nregs) 8470 err += efunc(pc, "invalid register %u\n", r2); 8471 if (rd >= nregs) 8472 err += efunc(pc, "invalid register %u\n", rd); 8473 if (rd == 0) 8474 err += efunc(pc, "cannot write to %r0\n"); 8475 break; 8476 case DIF_OP_LDGS: 8477 case DIF_OP_LDTS: 8478 case DIF_OP_LDLS: 8479 case DIF_OP_LDGAA: 8480 case DIF_OP_LDTAA: 8481 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8482 err += efunc(pc, "invalid variable %u\n", v); 8483 if (rd >= nregs) 8484 err += efunc(pc, "invalid register %u\n", rd); 8485 if (rd == 0) 8486 err += efunc(pc, "cannot write to %r0\n"); 8487 break; 8488 case DIF_OP_STGS: 8489 case DIF_OP_STTS: 8490 case DIF_OP_STLS: 8491 case DIF_OP_STGAA: 8492 case DIF_OP_STTAA: 8493 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8494 err += efunc(pc, "invalid variable %u\n", v); 8495 if (rs >= nregs) 8496 err += efunc(pc, "invalid register %u\n", rd); 8497 break; 8498 case DIF_OP_CALL: 8499 if (subr > DIF_SUBR_MAX) 8500 err += efunc(pc, "invalid subr %u\n", subr); 8501 if (rd >= nregs) 8502 err += efunc(pc, "invalid register %u\n", rd); 8503 if (rd == 0) 8504 err += efunc(pc, "cannot write to %r0\n"); 8505 8506 if (subr == DIF_SUBR_COPYOUT || 8507 subr == DIF_SUBR_COPYOUTSTR) { 8508 dp->dtdo_destructive = 1; 8509 } 8510 8511 if (subr == DIF_SUBR_GETF) { 8512 /* 8513 * If we have a getf() we need to record that 8514 * in our state. Note that our state can be 8515 * NULL if this is a helper -- but in that 8516 * case, the call to getf() is itself illegal, 8517 * and will be caught (slightly later) when 8518 * the helper is validated. 8519 */ 8520 if (vstate->dtvs_state != NULL) 8521 vstate->dtvs_state->dts_getf++; 8522 } 8523 8524 break; 8525 case DIF_OP_PUSHTR: 8526 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8527 err += efunc(pc, "invalid ref type %u\n", type); 8528 if (r2 >= nregs) 8529 err += efunc(pc, "invalid register %u\n", r2); 8530 if (rs >= nregs) 8531 err += efunc(pc, "invalid register %u\n", rs); 8532 break; 8533 case DIF_OP_PUSHTV: 8534 if (type != DIF_TYPE_CTF) 8535 err += efunc(pc, "invalid val type %u\n", type); 8536 if (r2 >= nregs) 8537 err += efunc(pc, "invalid register %u\n", r2); 8538 if (rs >= nregs) 8539 err += efunc(pc, "invalid register %u\n", rs); 8540 break; 8541 default: 8542 err += efunc(pc, "invalid opcode %u\n", 8543 DIF_INSTR_OP(instr)); 8544 } 8545 } 8546 8547 if (dp->dtdo_len != 0 && 8548 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8549 err += efunc(dp->dtdo_len - 1, 8550 "expected 'ret' as last DIF instruction\n"); 8551 } 8552 8553 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8554 /* 8555 * If we're not returning by reference, the size must be either 8556 * 0 or the size of one of the base types. 8557 */ 8558 switch (dp->dtdo_rtype.dtdt_size) { 8559 case 0: 8560 case sizeof (uint8_t): 8561 case sizeof (uint16_t): 8562 case sizeof (uint32_t): 8563 case sizeof (uint64_t): 8564 break; 8565 8566 default: 8567 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 8568 } 8569 } 8570 8571 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8572 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8573 dtrace_diftype_t *vt, *et; 8574 uint_t id, ndx; 8575 8576 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8577 v->dtdv_scope != DIFV_SCOPE_THREAD && 8578 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8579 err += efunc(i, "unrecognized variable scope %d\n", 8580 v->dtdv_scope); 8581 break; 8582 } 8583 8584 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8585 v->dtdv_kind != DIFV_KIND_SCALAR) { 8586 err += efunc(i, "unrecognized variable type %d\n", 8587 v->dtdv_kind); 8588 break; 8589 } 8590 8591 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8592 err += efunc(i, "%d exceeds variable id limit\n", id); 8593 break; 8594 } 8595 8596 if (id < DIF_VAR_OTHER_UBASE) 8597 continue; 8598 8599 /* 8600 * For user-defined variables, we need to check that this 8601 * definition is identical to any previous definition that we 8602 * encountered. 8603 */ 8604 ndx = id - DIF_VAR_OTHER_UBASE; 8605 8606 switch (v->dtdv_scope) { 8607 case DIFV_SCOPE_GLOBAL: 8608 if (ndx < vstate->dtvs_nglobals) { 8609 dtrace_statvar_t *svar; 8610 8611 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8612 existing = &svar->dtsv_var; 8613 } 8614 8615 break; 8616 8617 case DIFV_SCOPE_THREAD: 8618 if (ndx < vstate->dtvs_ntlocals) 8619 existing = &vstate->dtvs_tlocals[ndx]; 8620 break; 8621 8622 case DIFV_SCOPE_LOCAL: 8623 if (ndx < vstate->dtvs_nlocals) { 8624 dtrace_statvar_t *svar; 8625 8626 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8627 existing = &svar->dtsv_var; 8628 } 8629 8630 break; 8631 } 8632 8633 vt = &v->dtdv_type; 8634 8635 if (vt->dtdt_flags & DIF_TF_BYREF) { 8636 if (vt->dtdt_size == 0) { 8637 err += efunc(i, "zero-sized variable\n"); 8638 break; 8639 } 8640 8641 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8642 vt->dtdt_size > dtrace_global_maxsize) { 8643 err += efunc(i, "oversized by-ref global\n"); 8644 break; 8645 } 8646 } 8647 8648 if (existing == NULL || existing->dtdv_id == 0) 8649 continue; 8650 8651 ASSERT(existing->dtdv_id == v->dtdv_id); 8652 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8653 8654 if (existing->dtdv_kind != v->dtdv_kind) 8655 err += efunc(i, "%d changed variable kind\n", id); 8656 8657 et = &existing->dtdv_type; 8658 8659 if (vt->dtdt_flags != et->dtdt_flags) { 8660 err += efunc(i, "%d changed variable type flags\n", id); 8661 break; 8662 } 8663 8664 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8665 err += efunc(i, "%d changed variable type size\n", id); 8666 break; 8667 } 8668 } 8669 8670 return (err); 8671 } 8672 8673 /* 8674 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8675 * are much more constrained than normal DIFOs. Specifically, they may 8676 * not: 8677 * 8678 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8679 * miscellaneous string routines 8680 * 2. Access DTrace variables other than the args[] array, and the 8681 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8682 * 3. Have thread-local variables. 8683 * 4. Have dynamic variables. 8684 */ 8685 static int 8686 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8687 { 8688 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8689 int err = 0; 8690 uint_t pc; 8691 8692 for (pc = 0; pc < dp->dtdo_len; pc++) { 8693 dif_instr_t instr = dp->dtdo_buf[pc]; 8694 8695 uint_t v = DIF_INSTR_VAR(instr); 8696 uint_t subr = DIF_INSTR_SUBR(instr); 8697 uint_t op = DIF_INSTR_OP(instr); 8698 8699 switch (op) { 8700 case DIF_OP_OR: 8701 case DIF_OP_XOR: 8702 case DIF_OP_AND: 8703 case DIF_OP_SLL: 8704 case DIF_OP_SRL: 8705 case DIF_OP_SRA: 8706 case DIF_OP_SUB: 8707 case DIF_OP_ADD: 8708 case DIF_OP_MUL: 8709 case DIF_OP_SDIV: 8710 case DIF_OP_UDIV: 8711 case DIF_OP_SREM: 8712 case DIF_OP_UREM: 8713 case DIF_OP_COPYS: 8714 case DIF_OP_NOT: 8715 case DIF_OP_MOV: 8716 case DIF_OP_RLDSB: 8717 case DIF_OP_RLDSH: 8718 case DIF_OP_RLDSW: 8719 case DIF_OP_RLDUB: 8720 case DIF_OP_RLDUH: 8721 case DIF_OP_RLDUW: 8722 case DIF_OP_RLDX: 8723 case DIF_OP_ULDSB: 8724 case DIF_OP_ULDSH: 8725 case DIF_OP_ULDSW: 8726 case DIF_OP_ULDUB: 8727 case DIF_OP_ULDUH: 8728 case DIF_OP_ULDUW: 8729 case DIF_OP_ULDX: 8730 case DIF_OP_STB: 8731 case DIF_OP_STH: 8732 case DIF_OP_STW: 8733 case DIF_OP_STX: 8734 case DIF_OP_ALLOCS: 8735 case DIF_OP_CMP: 8736 case DIF_OP_SCMP: 8737 case DIF_OP_TST: 8738 case DIF_OP_BA: 8739 case DIF_OP_BE: 8740 case DIF_OP_BNE: 8741 case DIF_OP_BG: 8742 case DIF_OP_BGU: 8743 case DIF_OP_BGE: 8744 case DIF_OP_BGEU: 8745 case DIF_OP_BL: 8746 case DIF_OP_BLU: 8747 case DIF_OP_BLE: 8748 case DIF_OP_BLEU: 8749 case DIF_OP_RET: 8750 case DIF_OP_NOP: 8751 case DIF_OP_POPTS: 8752 case DIF_OP_FLUSHTS: 8753 case DIF_OP_SETX: 8754 case DIF_OP_SETS: 8755 case DIF_OP_LDGA: 8756 case DIF_OP_LDLS: 8757 case DIF_OP_STGS: 8758 case DIF_OP_STLS: 8759 case DIF_OP_PUSHTR: 8760 case DIF_OP_PUSHTV: 8761 break; 8762 8763 case DIF_OP_LDGS: 8764 if (v >= DIF_VAR_OTHER_UBASE) 8765 break; 8766 8767 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8768 break; 8769 8770 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8771 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8772 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8773 v == DIF_VAR_UID || v == DIF_VAR_GID) 8774 break; 8775 8776 err += efunc(pc, "illegal variable %u\n", v); 8777 break; 8778 8779 case DIF_OP_LDTA: 8780 case DIF_OP_LDTS: 8781 case DIF_OP_LDGAA: 8782 case DIF_OP_LDTAA: 8783 err += efunc(pc, "illegal dynamic variable load\n"); 8784 break; 8785 8786 case DIF_OP_STTS: 8787 case DIF_OP_STGAA: 8788 case DIF_OP_STTAA: 8789 err += efunc(pc, "illegal dynamic variable store\n"); 8790 break; 8791 8792 case DIF_OP_CALL: 8793 if (subr == DIF_SUBR_ALLOCA || 8794 subr == DIF_SUBR_BCOPY || 8795 subr == DIF_SUBR_COPYIN || 8796 subr == DIF_SUBR_COPYINTO || 8797 subr == DIF_SUBR_COPYINSTR || 8798 subr == DIF_SUBR_INDEX || 8799 subr == DIF_SUBR_INET_NTOA || 8800 subr == DIF_SUBR_INET_NTOA6 || 8801 subr == DIF_SUBR_INET_NTOP || 8802 subr == DIF_SUBR_LLTOSTR || 8803 subr == DIF_SUBR_RINDEX || 8804 subr == DIF_SUBR_STRCHR || 8805 subr == DIF_SUBR_STRJOIN || 8806 subr == DIF_SUBR_STRRCHR || 8807 subr == DIF_SUBR_STRSTR || 8808 subr == DIF_SUBR_HTONS || 8809 subr == DIF_SUBR_HTONL || 8810 subr == DIF_SUBR_HTONLL || 8811 subr == DIF_SUBR_NTOHS || 8812 subr == DIF_SUBR_NTOHL || 8813 subr == DIF_SUBR_NTOHLL) 8814 break; 8815 8816 err += efunc(pc, "invalid subr %u\n", subr); 8817 break; 8818 8819 default: 8820 err += efunc(pc, "invalid opcode %u\n", 8821 DIF_INSTR_OP(instr)); 8822 } 8823 } 8824 8825 return (err); 8826 } 8827 8828 /* 8829 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8830 * basis; 0 if not. 8831 */ 8832 static int 8833 dtrace_difo_cacheable(dtrace_difo_t *dp) 8834 { 8835 int i; 8836 8837 if (dp == NULL) 8838 return (0); 8839 8840 for (i = 0; i < dp->dtdo_varlen; i++) { 8841 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8842 8843 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8844 continue; 8845 8846 switch (v->dtdv_id) { 8847 case DIF_VAR_CURTHREAD: 8848 case DIF_VAR_PID: 8849 case DIF_VAR_TID: 8850 case DIF_VAR_EXECNAME: 8851 case DIF_VAR_ZONENAME: 8852 break; 8853 8854 default: 8855 return (0); 8856 } 8857 } 8858 8859 /* 8860 * This DIF object may be cacheable. Now we need to look for any 8861 * array loading instructions, any memory loading instructions, or 8862 * any stores to thread-local variables. 8863 */ 8864 for (i = 0; i < dp->dtdo_len; i++) { 8865 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8866 8867 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8868 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8869 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8870 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8871 return (0); 8872 } 8873 8874 return (1); 8875 } 8876 8877 static void 8878 dtrace_difo_hold(dtrace_difo_t *dp) 8879 { 8880 int i; 8881 8882 ASSERT(MUTEX_HELD(&dtrace_lock)); 8883 8884 dp->dtdo_refcnt++; 8885 ASSERT(dp->dtdo_refcnt != 0); 8886 8887 /* 8888 * We need to check this DIF object for references to the variable 8889 * DIF_VAR_VTIMESTAMP. 8890 */ 8891 for (i = 0; i < dp->dtdo_varlen; i++) { 8892 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8893 8894 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8895 continue; 8896 8897 if (dtrace_vtime_references++ == 0) 8898 dtrace_vtime_enable(); 8899 } 8900 } 8901 8902 /* 8903 * This routine calculates the dynamic variable chunksize for a given DIF 8904 * object. The calculation is not fool-proof, and can probably be tricked by 8905 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8906 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8907 * if a dynamic variable size exceeds the chunksize. 8908 */ 8909 static void 8910 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8911 { 8912 uint64_t sval; 8913 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8914 const dif_instr_t *text = dp->dtdo_buf; 8915 uint_t pc, srd = 0; 8916 uint_t ttop = 0; 8917 size_t size, ksize; 8918 uint_t id, i; 8919 8920 for (pc = 0; pc < dp->dtdo_len; pc++) { 8921 dif_instr_t instr = text[pc]; 8922 uint_t op = DIF_INSTR_OP(instr); 8923 uint_t rd = DIF_INSTR_RD(instr); 8924 uint_t r1 = DIF_INSTR_R1(instr); 8925 uint_t nkeys = 0; 8926 uchar_t scope; 8927 8928 dtrace_key_t *key = tupregs; 8929 8930 switch (op) { 8931 case DIF_OP_SETX: 8932 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8933 srd = rd; 8934 continue; 8935 8936 case DIF_OP_STTS: 8937 key = &tupregs[DIF_DTR_NREGS]; 8938 key[0].dttk_size = 0; 8939 key[1].dttk_size = 0; 8940 nkeys = 2; 8941 scope = DIFV_SCOPE_THREAD; 8942 break; 8943 8944 case DIF_OP_STGAA: 8945 case DIF_OP_STTAA: 8946 nkeys = ttop; 8947 8948 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 8949 key[nkeys++].dttk_size = 0; 8950 8951 key[nkeys++].dttk_size = 0; 8952 8953 if (op == DIF_OP_STTAA) { 8954 scope = DIFV_SCOPE_THREAD; 8955 } else { 8956 scope = DIFV_SCOPE_GLOBAL; 8957 } 8958 8959 break; 8960 8961 case DIF_OP_PUSHTR: 8962 if (ttop == DIF_DTR_NREGS) 8963 return; 8964 8965 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 8966 /* 8967 * If the register for the size of the "pushtr" 8968 * is %r0 (or the value is 0) and the type is 8969 * a string, we'll use the system-wide default 8970 * string size. 8971 */ 8972 tupregs[ttop++].dttk_size = 8973 dtrace_strsize_default; 8974 } else { 8975 if (srd == 0) 8976 return; 8977 8978 tupregs[ttop++].dttk_size = sval; 8979 } 8980 8981 break; 8982 8983 case DIF_OP_PUSHTV: 8984 if (ttop == DIF_DTR_NREGS) 8985 return; 8986 8987 tupregs[ttop++].dttk_size = 0; 8988 break; 8989 8990 case DIF_OP_FLUSHTS: 8991 ttop = 0; 8992 break; 8993 8994 case DIF_OP_POPTS: 8995 if (ttop != 0) 8996 ttop--; 8997 break; 8998 } 8999 9000 sval = 0; 9001 srd = 0; 9002 9003 if (nkeys == 0) 9004 continue; 9005 9006 /* 9007 * We have a dynamic variable allocation; calculate its size. 9008 */ 9009 for (ksize = 0, i = 0; i < nkeys; i++) 9010 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9011 9012 size = sizeof (dtrace_dynvar_t); 9013 size += sizeof (dtrace_key_t) * (nkeys - 1); 9014 size += ksize; 9015 9016 /* 9017 * Now we need to determine the size of the stored data. 9018 */ 9019 id = DIF_INSTR_VAR(instr); 9020 9021 for (i = 0; i < dp->dtdo_varlen; i++) { 9022 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9023 9024 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9025 size += v->dtdv_type.dtdt_size; 9026 break; 9027 } 9028 } 9029 9030 if (i == dp->dtdo_varlen) 9031 return; 9032 9033 /* 9034 * We have the size. If this is larger than the chunk size 9035 * for our dynamic variable state, reset the chunk size. 9036 */ 9037 size = P2ROUNDUP(size, sizeof (uint64_t)); 9038 9039 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9040 vstate->dtvs_dynvars.dtds_chunksize = size; 9041 } 9042 } 9043 9044 static void 9045 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9046 { 9047 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9048 uint_t id; 9049 9050 ASSERT(MUTEX_HELD(&dtrace_lock)); 9051 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9052 9053 for (i = 0; i < dp->dtdo_varlen; i++) { 9054 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9055 dtrace_statvar_t *svar, ***svarp; 9056 size_t dsize = 0; 9057 uint8_t scope = v->dtdv_scope; 9058 int *np; 9059 9060 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9061 continue; 9062 9063 id -= DIF_VAR_OTHER_UBASE; 9064 9065 switch (scope) { 9066 case DIFV_SCOPE_THREAD: 9067 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9068 dtrace_difv_t *tlocals; 9069 9070 if ((ntlocals = (otlocals << 1)) == 0) 9071 ntlocals = 1; 9072 9073 osz = otlocals * sizeof (dtrace_difv_t); 9074 nsz = ntlocals * sizeof (dtrace_difv_t); 9075 9076 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9077 9078 if (osz != 0) { 9079 bcopy(vstate->dtvs_tlocals, 9080 tlocals, osz); 9081 kmem_free(vstate->dtvs_tlocals, osz); 9082 } 9083 9084 vstate->dtvs_tlocals = tlocals; 9085 vstate->dtvs_ntlocals = ntlocals; 9086 } 9087 9088 vstate->dtvs_tlocals[id] = *v; 9089 continue; 9090 9091 case DIFV_SCOPE_LOCAL: 9092 np = &vstate->dtvs_nlocals; 9093 svarp = &vstate->dtvs_locals; 9094 9095 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9096 dsize = NCPU * (v->dtdv_type.dtdt_size + 9097 sizeof (uint64_t)); 9098 else 9099 dsize = NCPU * sizeof (uint64_t); 9100 9101 break; 9102 9103 case DIFV_SCOPE_GLOBAL: 9104 np = &vstate->dtvs_nglobals; 9105 svarp = &vstate->dtvs_globals; 9106 9107 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9108 dsize = v->dtdv_type.dtdt_size + 9109 sizeof (uint64_t); 9110 9111 break; 9112 9113 default: 9114 ASSERT(0); 9115 } 9116 9117 while (id >= (oldsvars = *np)) { 9118 dtrace_statvar_t **statics; 9119 int newsvars, oldsize, newsize; 9120 9121 if ((newsvars = (oldsvars << 1)) == 0) 9122 newsvars = 1; 9123 9124 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9125 newsize = newsvars * sizeof (dtrace_statvar_t *); 9126 9127 statics = kmem_zalloc(newsize, KM_SLEEP); 9128 9129 if (oldsize != 0) { 9130 bcopy(*svarp, statics, oldsize); 9131 kmem_free(*svarp, oldsize); 9132 } 9133 9134 *svarp = statics; 9135 *np = newsvars; 9136 } 9137 9138 if ((svar = (*svarp)[id]) == NULL) { 9139 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9140 svar->dtsv_var = *v; 9141 9142 if ((svar->dtsv_size = dsize) != 0) { 9143 svar->dtsv_data = (uint64_t)(uintptr_t) 9144 kmem_zalloc(dsize, KM_SLEEP); 9145 } 9146 9147 (*svarp)[id] = svar; 9148 } 9149 9150 svar->dtsv_refcnt++; 9151 } 9152 9153 dtrace_difo_chunksize(dp, vstate); 9154 dtrace_difo_hold(dp); 9155 } 9156 9157 static dtrace_difo_t * 9158 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9159 { 9160 dtrace_difo_t *new; 9161 size_t sz; 9162 9163 ASSERT(dp->dtdo_buf != NULL); 9164 ASSERT(dp->dtdo_refcnt != 0); 9165 9166 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9167 9168 ASSERT(dp->dtdo_buf != NULL); 9169 sz = dp->dtdo_len * sizeof (dif_instr_t); 9170 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9171 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9172 new->dtdo_len = dp->dtdo_len; 9173 9174 if (dp->dtdo_strtab != NULL) { 9175 ASSERT(dp->dtdo_strlen != 0); 9176 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9177 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9178 new->dtdo_strlen = dp->dtdo_strlen; 9179 } 9180 9181 if (dp->dtdo_inttab != NULL) { 9182 ASSERT(dp->dtdo_intlen != 0); 9183 sz = dp->dtdo_intlen * sizeof (uint64_t); 9184 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9185 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9186 new->dtdo_intlen = dp->dtdo_intlen; 9187 } 9188 9189 if (dp->dtdo_vartab != NULL) { 9190 ASSERT(dp->dtdo_varlen != 0); 9191 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9192 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9193 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9194 new->dtdo_varlen = dp->dtdo_varlen; 9195 } 9196 9197 dtrace_difo_init(new, vstate); 9198 return (new); 9199 } 9200 9201 static void 9202 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9203 { 9204 int i; 9205 9206 ASSERT(dp->dtdo_refcnt == 0); 9207 9208 for (i = 0; i < dp->dtdo_varlen; i++) { 9209 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9210 dtrace_statvar_t *svar, **svarp; 9211 uint_t id; 9212 uint8_t scope = v->dtdv_scope; 9213 int *np; 9214 9215 switch (scope) { 9216 case DIFV_SCOPE_THREAD: 9217 continue; 9218 9219 case DIFV_SCOPE_LOCAL: 9220 np = &vstate->dtvs_nlocals; 9221 svarp = vstate->dtvs_locals; 9222 break; 9223 9224 case DIFV_SCOPE_GLOBAL: 9225 np = &vstate->dtvs_nglobals; 9226 svarp = vstate->dtvs_globals; 9227 break; 9228 9229 default: 9230 ASSERT(0); 9231 } 9232 9233 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9234 continue; 9235 9236 id -= DIF_VAR_OTHER_UBASE; 9237 ASSERT(id < *np); 9238 9239 svar = svarp[id]; 9240 ASSERT(svar != NULL); 9241 ASSERT(svar->dtsv_refcnt > 0); 9242 9243 if (--svar->dtsv_refcnt > 0) 9244 continue; 9245 9246 if (svar->dtsv_size != 0) { 9247 ASSERT(svar->dtsv_data != NULL); 9248 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9249 svar->dtsv_size); 9250 } 9251 9252 kmem_free(svar, sizeof (dtrace_statvar_t)); 9253 svarp[id] = NULL; 9254 } 9255 9256 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9257 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9258 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9259 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9260 9261 kmem_free(dp, sizeof (dtrace_difo_t)); 9262 } 9263 9264 static void 9265 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9266 { 9267 int i; 9268 9269 ASSERT(MUTEX_HELD(&dtrace_lock)); 9270 ASSERT(dp->dtdo_refcnt != 0); 9271 9272 for (i = 0; i < dp->dtdo_varlen; i++) { 9273 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9274 9275 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9276 continue; 9277 9278 ASSERT(dtrace_vtime_references > 0); 9279 if (--dtrace_vtime_references == 0) 9280 dtrace_vtime_disable(); 9281 } 9282 9283 if (--dp->dtdo_refcnt == 0) 9284 dtrace_difo_destroy(dp, vstate); 9285 } 9286 9287 /* 9288 * DTrace Format Functions 9289 */ 9290 static uint16_t 9291 dtrace_format_add(dtrace_state_t *state, char *str) 9292 { 9293 char *fmt, **new; 9294 uint16_t ndx, len = strlen(str) + 1; 9295 9296 fmt = kmem_zalloc(len, KM_SLEEP); 9297 bcopy(str, fmt, len); 9298 9299 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9300 if (state->dts_formats[ndx] == NULL) { 9301 state->dts_formats[ndx] = fmt; 9302 return (ndx + 1); 9303 } 9304 } 9305 9306 if (state->dts_nformats == USHRT_MAX) { 9307 /* 9308 * This is only likely if a denial-of-service attack is being 9309 * attempted. As such, it's okay to fail silently here. 9310 */ 9311 kmem_free(fmt, len); 9312 return (0); 9313 } 9314 9315 /* 9316 * For simplicity, we always resize the formats array to be exactly the 9317 * number of formats. 9318 */ 9319 ndx = state->dts_nformats++; 9320 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9321 9322 if (state->dts_formats != NULL) { 9323 ASSERT(ndx != 0); 9324 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9325 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9326 } 9327 9328 state->dts_formats = new; 9329 state->dts_formats[ndx] = fmt; 9330 9331 return (ndx + 1); 9332 } 9333 9334 static void 9335 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9336 { 9337 char *fmt; 9338 9339 ASSERT(state->dts_formats != NULL); 9340 ASSERT(format <= state->dts_nformats); 9341 ASSERT(state->dts_formats[format - 1] != NULL); 9342 9343 fmt = state->dts_formats[format - 1]; 9344 kmem_free(fmt, strlen(fmt) + 1); 9345 state->dts_formats[format - 1] = NULL; 9346 } 9347 9348 static void 9349 dtrace_format_destroy(dtrace_state_t *state) 9350 { 9351 int i; 9352 9353 if (state->dts_nformats == 0) { 9354 ASSERT(state->dts_formats == NULL); 9355 return; 9356 } 9357 9358 ASSERT(state->dts_formats != NULL); 9359 9360 for (i = 0; i < state->dts_nformats; i++) { 9361 char *fmt = state->dts_formats[i]; 9362 9363 if (fmt == NULL) 9364 continue; 9365 9366 kmem_free(fmt, strlen(fmt) + 1); 9367 } 9368 9369 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9370 state->dts_nformats = 0; 9371 state->dts_formats = NULL; 9372 } 9373 9374 /* 9375 * DTrace Predicate Functions 9376 */ 9377 static dtrace_predicate_t * 9378 dtrace_predicate_create(dtrace_difo_t *dp) 9379 { 9380 dtrace_predicate_t *pred; 9381 9382 ASSERT(MUTEX_HELD(&dtrace_lock)); 9383 ASSERT(dp->dtdo_refcnt != 0); 9384 9385 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9386 pred->dtp_difo = dp; 9387 pred->dtp_refcnt = 1; 9388 9389 if (!dtrace_difo_cacheable(dp)) 9390 return (pred); 9391 9392 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9393 /* 9394 * This is only theoretically possible -- we have had 2^32 9395 * cacheable predicates on this machine. We cannot allow any 9396 * more predicates to become cacheable: as unlikely as it is, 9397 * there may be a thread caching a (now stale) predicate cache 9398 * ID. (N.B.: the temptation is being successfully resisted to 9399 * have this cmn_err() "Holy shit -- we executed this code!") 9400 */ 9401 return (pred); 9402 } 9403 9404 pred->dtp_cacheid = dtrace_predcache_id++; 9405 9406 return (pred); 9407 } 9408 9409 static void 9410 dtrace_predicate_hold(dtrace_predicate_t *pred) 9411 { 9412 ASSERT(MUTEX_HELD(&dtrace_lock)); 9413 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9414 ASSERT(pred->dtp_refcnt > 0); 9415 9416 pred->dtp_refcnt++; 9417 } 9418 9419 static void 9420 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9421 { 9422 dtrace_difo_t *dp = pred->dtp_difo; 9423 9424 ASSERT(MUTEX_HELD(&dtrace_lock)); 9425 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9426 ASSERT(pred->dtp_refcnt > 0); 9427 9428 if (--pred->dtp_refcnt == 0) { 9429 dtrace_difo_release(pred->dtp_difo, vstate); 9430 kmem_free(pred, sizeof (dtrace_predicate_t)); 9431 } 9432 } 9433 9434 /* 9435 * DTrace Action Description Functions 9436 */ 9437 static dtrace_actdesc_t * 9438 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9439 uint64_t uarg, uint64_t arg) 9440 { 9441 dtrace_actdesc_t *act; 9442 9443 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9444 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9445 9446 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9447 act->dtad_kind = kind; 9448 act->dtad_ntuple = ntuple; 9449 act->dtad_uarg = uarg; 9450 act->dtad_arg = arg; 9451 act->dtad_refcnt = 1; 9452 9453 return (act); 9454 } 9455 9456 static void 9457 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9458 { 9459 ASSERT(act->dtad_refcnt >= 1); 9460 act->dtad_refcnt++; 9461 } 9462 9463 static void 9464 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9465 { 9466 dtrace_actkind_t kind = act->dtad_kind; 9467 dtrace_difo_t *dp; 9468 9469 ASSERT(act->dtad_refcnt >= 1); 9470 9471 if (--act->dtad_refcnt != 0) 9472 return; 9473 9474 if ((dp = act->dtad_difo) != NULL) 9475 dtrace_difo_release(dp, vstate); 9476 9477 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9478 char *str = (char *)(uintptr_t)act->dtad_arg; 9479 9480 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9481 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9482 9483 if (str != NULL) 9484 kmem_free(str, strlen(str) + 1); 9485 } 9486 9487 kmem_free(act, sizeof (dtrace_actdesc_t)); 9488 } 9489 9490 /* 9491 * DTrace ECB Functions 9492 */ 9493 static dtrace_ecb_t * 9494 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9495 { 9496 dtrace_ecb_t *ecb; 9497 dtrace_epid_t epid; 9498 9499 ASSERT(MUTEX_HELD(&dtrace_lock)); 9500 9501 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9502 ecb->dte_predicate = NULL; 9503 ecb->dte_probe = probe; 9504 9505 /* 9506 * The default size is the size of the default action: recording 9507 * the epid. 9508 */ 9509 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9510 ecb->dte_alignment = sizeof (dtrace_epid_t); 9511 9512 epid = state->dts_epid++; 9513 9514 if (epid - 1 >= state->dts_necbs) { 9515 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9516 int necbs = state->dts_necbs << 1; 9517 9518 ASSERT(epid == state->dts_necbs + 1); 9519 9520 if (necbs == 0) { 9521 ASSERT(oecbs == NULL); 9522 necbs = 1; 9523 } 9524 9525 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9526 9527 if (oecbs != NULL) 9528 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9529 9530 dtrace_membar_producer(); 9531 state->dts_ecbs = ecbs; 9532 9533 if (oecbs != NULL) { 9534 /* 9535 * If this state is active, we must dtrace_sync() 9536 * before we can free the old dts_ecbs array: we're 9537 * coming in hot, and there may be active ring 9538 * buffer processing (which indexes into the dts_ecbs 9539 * array) on another CPU. 9540 */ 9541 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9542 dtrace_sync(); 9543 9544 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9545 } 9546 9547 dtrace_membar_producer(); 9548 state->dts_necbs = necbs; 9549 } 9550 9551 ecb->dte_state = state; 9552 9553 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9554 dtrace_membar_producer(); 9555 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9556 9557 return (ecb); 9558 } 9559 9560 static int 9561 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9562 { 9563 dtrace_probe_t *probe = ecb->dte_probe; 9564 9565 ASSERT(MUTEX_HELD(&cpu_lock)); 9566 ASSERT(MUTEX_HELD(&dtrace_lock)); 9567 ASSERT(ecb->dte_next == NULL); 9568 9569 if (probe == NULL) { 9570 /* 9571 * This is the NULL probe -- there's nothing to do. 9572 */ 9573 return (0); 9574 } 9575 9576 if (probe->dtpr_ecb == NULL) { 9577 dtrace_provider_t *prov = probe->dtpr_provider; 9578 9579 /* 9580 * We're the first ECB on this probe. 9581 */ 9582 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9583 9584 if (ecb->dte_predicate != NULL) 9585 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9586 9587 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9588 probe->dtpr_id, probe->dtpr_arg)); 9589 } else { 9590 /* 9591 * This probe is already active. Swing the last pointer to 9592 * point to the new ECB, and issue a dtrace_sync() to assure 9593 * that all CPUs have seen the change. 9594 */ 9595 ASSERT(probe->dtpr_ecb_last != NULL); 9596 probe->dtpr_ecb_last->dte_next = ecb; 9597 probe->dtpr_ecb_last = ecb; 9598 probe->dtpr_predcache = 0; 9599 9600 dtrace_sync(); 9601 return (0); 9602 } 9603 } 9604 9605 static void 9606 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9607 { 9608 uint32_t maxalign = sizeof (dtrace_epid_t); 9609 uint32_t align = sizeof (uint8_t), offs, diff; 9610 dtrace_action_t *act; 9611 int wastuple = 0; 9612 uint32_t aggbase = UINT32_MAX; 9613 dtrace_state_t *state = ecb->dte_state; 9614 9615 /* 9616 * If we record anything, we always record the epid. (And we always 9617 * record it first.) 9618 */ 9619 offs = sizeof (dtrace_epid_t); 9620 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9621 9622 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9623 dtrace_recdesc_t *rec = &act->dta_rec; 9624 9625 if ((align = rec->dtrd_alignment) > maxalign) 9626 maxalign = align; 9627 9628 if (!wastuple && act->dta_intuple) { 9629 /* 9630 * This is the first record in a tuple. Align the 9631 * offset to be at offset 4 in an 8-byte aligned 9632 * block. 9633 */ 9634 diff = offs + sizeof (dtrace_aggid_t); 9635 9636 if (diff = (diff & (sizeof (uint64_t) - 1))) 9637 offs += sizeof (uint64_t) - diff; 9638 9639 aggbase = offs - sizeof (dtrace_aggid_t); 9640 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9641 } 9642 9643 /*LINTED*/ 9644 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9645 /* 9646 * The current offset is not properly aligned; align it. 9647 */ 9648 offs += align - diff; 9649 } 9650 9651 rec->dtrd_offset = offs; 9652 9653 if (offs + rec->dtrd_size > ecb->dte_needed) { 9654 ecb->dte_needed = offs + rec->dtrd_size; 9655 9656 if (ecb->dte_needed > state->dts_needed) 9657 state->dts_needed = ecb->dte_needed; 9658 } 9659 9660 if (DTRACEACT_ISAGG(act->dta_kind)) { 9661 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9662 dtrace_action_t *first = agg->dtag_first, *prev; 9663 9664 ASSERT(rec->dtrd_size != 0 && first != NULL); 9665 ASSERT(wastuple); 9666 ASSERT(aggbase != UINT32_MAX); 9667 9668 agg->dtag_base = aggbase; 9669 9670 while ((prev = first->dta_prev) != NULL && 9671 DTRACEACT_ISAGG(prev->dta_kind)) { 9672 agg = (dtrace_aggregation_t *)prev; 9673 first = agg->dtag_first; 9674 } 9675 9676 if (prev != NULL) { 9677 offs = prev->dta_rec.dtrd_offset + 9678 prev->dta_rec.dtrd_size; 9679 } else { 9680 offs = sizeof (dtrace_epid_t); 9681 } 9682 wastuple = 0; 9683 } else { 9684 if (!act->dta_intuple) 9685 ecb->dte_size = offs + rec->dtrd_size; 9686 9687 offs += rec->dtrd_size; 9688 } 9689 9690 wastuple = act->dta_intuple; 9691 } 9692 9693 if ((act = ecb->dte_action) != NULL && 9694 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9695 ecb->dte_size == sizeof (dtrace_epid_t)) { 9696 /* 9697 * If the size is still sizeof (dtrace_epid_t), then all 9698 * actions store no data; set the size to 0. 9699 */ 9700 ecb->dte_alignment = maxalign; 9701 ecb->dte_size = 0; 9702 9703 /* 9704 * If the needed space is still sizeof (dtrace_epid_t), then 9705 * all actions need no additional space; set the needed 9706 * size to 0. 9707 */ 9708 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9709 ecb->dte_needed = 0; 9710 9711 return; 9712 } 9713 9714 /* 9715 * Set our alignment, and make sure that the dte_size and dte_needed 9716 * are aligned to the size of an EPID. 9717 */ 9718 ecb->dte_alignment = maxalign; 9719 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9720 ~(sizeof (dtrace_epid_t) - 1); 9721 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9722 ~(sizeof (dtrace_epid_t) - 1); 9723 ASSERT(ecb->dte_size <= ecb->dte_needed); 9724 } 9725 9726 static dtrace_action_t * 9727 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9728 { 9729 dtrace_aggregation_t *agg; 9730 size_t size = sizeof (uint64_t); 9731 int ntuple = desc->dtad_ntuple; 9732 dtrace_action_t *act; 9733 dtrace_recdesc_t *frec; 9734 dtrace_aggid_t aggid; 9735 dtrace_state_t *state = ecb->dte_state; 9736 9737 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9738 agg->dtag_ecb = ecb; 9739 9740 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9741 9742 switch (desc->dtad_kind) { 9743 case DTRACEAGG_MIN: 9744 agg->dtag_initial = INT64_MAX; 9745 agg->dtag_aggregate = dtrace_aggregate_min; 9746 break; 9747 9748 case DTRACEAGG_MAX: 9749 agg->dtag_initial = INT64_MIN; 9750 agg->dtag_aggregate = dtrace_aggregate_max; 9751 break; 9752 9753 case DTRACEAGG_COUNT: 9754 agg->dtag_aggregate = dtrace_aggregate_count; 9755 break; 9756 9757 case DTRACEAGG_QUANTIZE: 9758 agg->dtag_aggregate = dtrace_aggregate_quantize; 9759 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9760 sizeof (uint64_t); 9761 break; 9762 9763 case DTRACEAGG_LQUANTIZE: { 9764 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9765 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9766 9767 agg->dtag_initial = desc->dtad_arg; 9768 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9769 9770 if (step == 0 || levels == 0) 9771 goto err; 9772 9773 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9774 break; 9775 } 9776 9777 case DTRACEAGG_LLQUANTIZE: { 9778 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 9779 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 9780 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 9781 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 9782 int64_t v; 9783 9784 agg->dtag_initial = desc->dtad_arg; 9785 agg->dtag_aggregate = dtrace_aggregate_llquantize; 9786 9787 if (factor < 2 || low >= high || nsteps < factor) 9788 goto err; 9789 9790 /* 9791 * Now check that the number of steps evenly divides a power 9792 * of the factor. (This assures both integer bucket size and 9793 * linearity within each magnitude.) 9794 */ 9795 for (v = factor; v < nsteps; v *= factor) 9796 continue; 9797 9798 if ((v % nsteps) || (nsteps % factor)) 9799 goto err; 9800 9801 size = (dtrace_aggregate_llquantize_bucket(factor, 9802 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 9803 break; 9804 } 9805 9806 case DTRACEAGG_AVG: 9807 agg->dtag_aggregate = dtrace_aggregate_avg; 9808 size = sizeof (uint64_t) * 2; 9809 break; 9810 9811 case DTRACEAGG_STDDEV: 9812 agg->dtag_aggregate = dtrace_aggregate_stddev; 9813 size = sizeof (uint64_t) * 4; 9814 break; 9815 9816 case DTRACEAGG_SUM: 9817 agg->dtag_aggregate = dtrace_aggregate_sum; 9818 break; 9819 9820 default: 9821 goto err; 9822 } 9823 9824 agg->dtag_action.dta_rec.dtrd_size = size; 9825 9826 if (ntuple == 0) 9827 goto err; 9828 9829 /* 9830 * We must make sure that we have enough actions for the n-tuple. 9831 */ 9832 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9833 if (DTRACEACT_ISAGG(act->dta_kind)) 9834 break; 9835 9836 if (--ntuple == 0) { 9837 /* 9838 * This is the action with which our n-tuple begins. 9839 */ 9840 agg->dtag_first = act; 9841 goto success; 9842 } 9843 } 9844 9845 /* 9846 * This n-tuple is short by ntuple elements. Return failure. 9847 */ 9848 ASSERT(ntuple != 0); 9849 err: 9850 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9851 return (NULL); 9852 9853 success: 9854 /* 9855 * If the last action in the tuple has a size of zero, it's actually 9856 * an expression argument for the aggregating action. 9857 */ 9858 ASSERT(ecb->dte_action_last != NULL); 9859 act = ecb->dte_action_last; 9860 9861 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9862 ASSERT(act->dta_difo != NULL); 9863 9864 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9865 agg->dtag_hasarg = 1; 9866 } 9867 9868 /* 9869 * We need to allocate an id for this aggregation. 9870 */ 9871 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9872 VM_BESTFIT | VM_SLEEP); 9873 9874 if (aggid - 1 >= state->dts_naggregations) { 9875 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9876 dtrace_aggregation_t **aggs; 9877 int naggs = state->dts_naggregations << 1; 9878 int onaggs = state->dts_naggregations; 9879 9880 ASSERT(aggid == state->dts_naggregations + 1); 9881 9882 if (naggs == 0) { 9883 ASSERT(oaggs == NULL); 9884 naggs = 1; 9885 } 9886 9887 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9888 9889 if (oaggs != NULL) { 9890 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9891 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9892 } 9893 9894 state->dts_aggregations = aggs; 9895 state->dts_naggregations = naggs; 9896 } 9897 9898 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9899 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9900 9901 frec = &agg->dtag_first->dta_rec; 9902 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9903 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9904 9905 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9906 ASSERT(!act->dta_intuple); 9907 act->dta_intuple = 1; 9908 } 9909 9910 return (&agg->dtag_action); 9911 } 9912 9913 static void 9914 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9915 { 9916 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9917 dtrace_state_t *state = ecb->dte_state; 9918 dtrace_aggid_t aggid = agg->dtag_id; 9919 9920 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9921 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9922 9923 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9924 state->dts_aggregations[aggid - 1] = NULL; 9925 9926 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9927 } 9928 9929 static int 9930 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9931 { 9932 dtrace_action_t *action, *last; 9933 dtrace_difo_t *dp = desc->dtad_difo; 9934 uint32_t size = 0, align = sizeof (uint8_t), mask; 9935 uint16_t format = 0; 9936 dtrace_recdesc_t *rec; 9937 dtrace_state_t *state = ecb->dte_state; 9938 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 9939 uint64_t arg = desc->dtad_arg; 9940 9941 ASSERT(MUTEX_HELD(&dtrace_lock)); 9942 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9943 9944 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9945 /* 9946 * If this is an aggregating action, there must be neither 9947 * a speculate nor a commit on the action chain. 9948 */ 9949 dtrace_action_t *act; 9950 9951 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9952 if (act->dta_kind == DTRACEACT_COMMIT) 9953 return (EINVAL); 9954 9955 if (act->dta_kind == DTRACEACT_SPECULATE) 9956 return (EINVAL); 9957 } 9958 9959 action = dtrace_ecb_aggregation_create(ecb, desc); 9960 9961 if (action == NULL) 9962 return (EINVAL); 9963 } else { 9964 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 9965 (desc->dtad_kind == DTRACEACT_DIFEXPR && 9966 dp != NULL && dp->dtdo_destructive)) { 9967 state->dts_destructive = 1; 9968 } 9969 9970 switch (desc->dtad_kind) { 9971 case DTRACEACT_PRINTF: 9972 case DTRACEACT_PRINTA: 9973 case DTRACEACT_SYSTEM: 9974 case DTRACEACT_FREOPEN: 9975 case DTRACEACT_DIFEXPR: 9976 /* 9977 * We know that our arg is a string -- turn it into a 9978 * format. 9979 */ 9980 if (arg == NULL) { 9981 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 9982 desc->dtad_kind == DTRACEACT_DIFEXPR); 9983 format = 0; 9984 } else { 9985 ASSERT(arg != NULL); 9986 ASSERT(arg > KERNELBASE); 9987 format = dtrace_format_add(state, 9988 (char *)(uintptr_t)arg); 9989 } 9990 9991 /*FALLTHROUGH*/ 9992 case DTRACEACT_LIBACT: 9993 case DTRACEACT_TRACEMEM: 9994 case DTRACEACT_TRACEMEM_DYNSIZE: 9995 if (dp == NULL) 9996 return (EINVAL); 9997 9998 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 9999 break; 10000 10001 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10002 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10003 return (EINVAL); 10004 10005 size = opt[DTRACEOPT_STRSIZE]; 10006 } 10007 10008 break; 10009 10010 case DTRACEACT_STACK: 10011 if ((nframes = arg) == 0) { 10012 nframes = opt[DTRACEOPT_STACKFRAMES]; 10013 ASSERT(nframes > 0); 10014 arg = nframes; 10015 } 10016 10017 size = nframes * sizeof (pc_t); 10018 break; 10019 10020 case DTRACEACT_JSTACK: 10021 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10022 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10023 10024 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10025 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10026 10027 arg = DTRACE_USTACK_ARG(nframes, strsize); 10028 10029 /*FALLTHROUGH*/ 10030 case DTRACEACT_USTACK: 10031 if (desc->dtad_kind != DTRACEACT_JSTACK && 10032 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10033 strsize = DTRACE_USTACK_STRSIZE(arg); 10034 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10035 ASSERT(nframes > 0); 10036 arg = DTRACE_USTACK_ARG(nframes, strsize); 10037 } 10038 10039 /* 10040 * Save a slot for the pid. 10041 */ 10042 size = (nframes + 1) * sizeof (uint64_t); 10043 size += DTRACE_USTACK_STRSIZE(arg); 10044 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10045 10046 break; 10047 10048 case DTRACEACT_SYM: 10049 case DTRACEACT_MOD: 10050 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10051 sizeof (uint64_t)) || 10052 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10053 return (EINVAL); 10054 break; 10055 10056 case DTRACEACT_USYM: 10057 case DTRACEACT_UMOD: 10058 case DTRACEACT_UADDR: 10059 if (dp == NULL || 10060 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10061 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10062 return (EINVAL); 10063 10064 /* 10065 * We have a slot for the pid, plus a slot for the 10066 * argument. To keep things simple (aligned with 10067 * bitness-neutral sizing), we store each as a 64-bit 10068 * quantity. 10069 */ 10070 size = 2 * sizeof (uint64_t); 10071 break; 10072 10073 case DTRACEACT_STOP: 10074 case DTRACEACT_BREAKPOINT: 10075 case DTRACEACT_PANIC: 10076 break; 10077 10078 case DTRACEACT_CHILL: 10079 case DTRACEACT_DISCARD: 10080 case DTRACEACT_RAISE: 10081 if (dp == NULL) 10082 return (EINVAL); 10083 break; 10084 10085 case DTRACEACT_EXIT: 10086 if (dp == NULL || 10087 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10088 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10089 return (EINVAL); 10090 break; 10091 10092 case DTRACEACT_SPECULATE: 10093 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10094 return (EINVAL); 10095 10096 if (dp == NULL) 10097 return (EINVAL); 10098 10099 state->dts_speculates = 1; 10100 break; 10101 10102 case DTRACEACT_COMMIT: { 10103 dtrace_action_t *act = ecb->dte_action; 10104 10105 for (; act != NULL; act = act->dta_next) { 10106 if (act->dta_kind == DTRACEACT_COMMIT) 10107 return (EINVAL); 10108 } 10109 10110 if (dp == NULL) 10111 return (EINVAL); 10112 break; 10113 } 10114 10115 default: 10116 return (EINVAL); 10117 } 10118 10119 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10120 /* 10121 * If this is a data-storing action or a speculate, 10122 * we must be sure that there isn't a commit on the 10123 * action chain. 10124 */ 10125 dtrace_action_t *act = ecb->dte_action; 10126 10127 for (; act != NULL; act = act->dta_next) { 10128 if (act->dta_kind == DTRACEACT_COMMIT) 10129 return (EINVAL); 10130 } 10131 } 10132 10133 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10134 action->dta_rec.dtrd_size = size; 10135 } 10136 10137 action->dta_refcnt = 1; 10138 rec = &action->dta_rec; 10139 size = rec->dtrd_size; 10140 10141 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10142 if (!(size & mask)) { 10143 align = mask + 1; 10144 break; 10145 } 10146 } 10147 10148 action->dta_kind = desc->dtad_kind; 10149 10150 if ((action->dta_difo = dp) != NULL) 10151 dtrace_difo_hold(dp); 10152 10153 rec->dtrd_action = action->dta_kind; 10154 rec->dtrd_arg = arg; 10155 rec->dtrd_uarg = desc->dtad_uarg; 10156 rec->dtrd_alignment = (uint16_t)align; 10157 rec->dtrd_format = format; 10158 10159 if ((last = ecb->dte_action_last) != NULL) { 10160 ASSERT(ecb->dte_action != NULL); 10161 action->dta_prev = last; 10162 last->dta_next = action; 10163 } else { 10164 ASSERT(ecb->dte_action == NULL); 10165 ecb->dte_action = action; 10166 } 10167 10168 ecb->dte_action_last = action; 10169 10170 return (0); 10171 } 10172 10173 static void 10174 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10175 { 10176 dtrace_action_t *act = ecb->dte_action, *next; 10177 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10178 dtrace_difo_t *dp; 10179 uint16_t format; 10180 10181 if (act != NULL && act->dta_refcnt > 1) { 10182 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10183 act->dta_refcnt--; 10184 } else { 10185 for (; act != NULL; act = next) { 10186 next = act->dta_next; 10187 ASSERT(next != NULL || act == ecb->dte_action_last); 10188 ASSERT(act->dta_refcnt == 1); 10189 10190 if ((format = act->dta_rec.dtrd_format) != 0) 10191 dtrace_format_remove(ecb->dte_state, format); 10192 10193 if ((dp = act->dta_difo) != NULL) 10194 dtrace_difo_release(dp, vstate); 10195 10196 if (DTRACEACT_ISAGG(act->dta_kind)) { 10197 dtrace_ecb_aggregation_destroy(ecb, act); 10198 } else { 10199 kmem_free(act, sizeof (dtrace_action_t)); 10200 } 10201 } 10202 } 10203 10204 ecb->dte_action = NULL; 10205 ecb->dte_action_last = NULL; 10206 ecb->dte_size = sizeof (dtrace_epid_t); 10207 } 10208 10209 static void 10210 dtrace_ecb_disable(dtrace_ecb_t *ecb) 10211 { 10212 /* 10213 * We disable the ECB by removing it from its probe. 10214 */ 10215 dtrace_ecb_t *pecb, *prev = NULL; 10216 dtrace_probe_t *probe = ecb->dte_probe; 10217 10218 ASSERT(MUTEX_HELD(&dtrace_lock)); 10219 10220 if (probe == NULL) { 10221 /* 10222 * This is the NULL probe; there is nothing to disable. 10223 */ 10224 return; 10225 } 10226 10227 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10228 if (pecb == ecb) 10229 break; 10230 prev = pecb; 10231 } 10232 10233 ASSERT(pecb != NULL); 10234 10235 if (prev == NULL) { 10236 probe->dtpr_ecb = ecb->dte_next; 10237 } else { 10238 prev->dte_next = ecb->dte_next; 10239 } 10240 10241 if (ecb == probe->dtpr_ecb_last) { 10242 ASSERT(ecb->dte_next == NULL); 10243 probe->dtpr_ecb_last = prev; 10244 } 10245 10246 /* 10247 * The ECB has been disconnected from the probe; now sync to assure 10248 * that all CPUs have seen the change before returning. 10249 */ 10250 dtrace_sync(); 10251 10252 if (probe->dtpr_ecb == NULL) { 10253 /* 10254 * That was the last ECB on the probe; clear the predicate 10255 * cache ID for the probe, disable it and sync one more time 10256 * to assure that we'll never hit it again. 10257 */ 10258 dtrace_provider_t *prov = probe->dtpr_provider; 10259 10260 ASSERT(ecb->dte_next == NULL); 10261 ASSERT(probe->dtpr_ecb_last == NULL); 10262 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10263 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10264 probe->dtpr_id, probe->dtpr_arg); 10265 dtrace_sync(); 10266 } else { 10267 /* 10268 * There is at least one ECB remaining on the probe. If there 10269 * is _exactly_ one, set the probe's predicate cache ID to be 10270 * the predicate cache ID of the remaining ECB. 10271 */ 10272 ASSERT(probe->dtpr_ecb_last != NULL); 10273 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10274 10275 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10276 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10277 10278 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10279 10280 if (p != NULL) 10281 probe->dtpr_predcache = p->dtp_cacheid; 10282 } 10283 10284 ecb->dte_next = NULL; 10285 } 10286 } 10287 10288 static void 10289 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10290 { 10291 dtrace_state_t *state = ecb->dte_state; 10292 dtrace_vstate_t *vstate = &state->dts_vstate; 10293 dtrace_predicate_t *pred; 10294 dtrace_epid_t epid = ecb->dte_epid; 10295 10296 ASSERT(MUTEX_HELD(&dtrace_lock)); 10297 ASSERT(ecb->dte_next == NULL); 10298 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10299 10300 if ((pred = ecb->dte_predicate) != NULL) 10301 dtrace_predicate_release(pred, vstate); 10302 10303 dtrace_ecb_action_remove(ecb); 10304 10305 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10306 state->dts_ecbs[epid - 1] = NULL; 10307 10308 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10309 } 10310 10311 static dtrace_ecb_t * 10312 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10313 dtrace_enabling_t *enab) 10314 { 10315 dtrace_ecb_t *ecb; 10316 dtrace_predicate_t *pred; 10317 dtrace_actdesc_t *act; 10318 dtrace_provider_t *prov; 10319 dtrace_ecbdesc_t *desc = enab->dten_current; 10320 10321 ASSERT(MUTEX_HELD(&dtrace_lock)); 10322 ASSERT(state != NULL); 10323 10324 ecb = dtrace_ecb_add(state, probe); 10325 ecb->dte_uarg = desc->dted_uarg; 10326 10327 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10328 dtrace_predicate_hold(pred); 10329 ecb->dte_predicate = pred; 10330 } 10331 10332 if (probe != NULL) { 10333 /* 10334 * If the provider shows more leg than the consumer is old 10335 * enough to see, we need to enable the appropriate implicit 10336 * predicate bits to prevent the ecb from activating at 10337 * revealing times. 10338 * 10339 * Providers specifying DTRACE_PRIV_USER at register time 10340 * are stating that they need the /proc-style privilege 10341 * model to be enforced, and this is what DTRACE_COND_OWNER 10342 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10343 */ 10344 prov = probe->dtpr_provider; 10345 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10346 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10347 ecb->dte_cond |= DTRACE_COND_OWNER; 10348 10349 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10350 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10351 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10352 10353 /* 10354 * If the provider shows us kernel innards and the user 10355 * is lacking sufficient privilege, enable the 10356 * DTRACE_COND_USERMODE implicit predicate. 10357 */ 10358 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10359 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10360 ecb->dte_cond |= DTRACE_COND_USERMODE; 10361 } 10362 10363 if (dtrace_ecb_create_cache != NULL) { 10364 /* 10365 * If we have a cached ecb, we'll use its action list instead 10366 * of creating our own (saving both time and space). 10367 */ 10368 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10369 dtrace_action_t *act = cached->dte_action; 10370 10371 if (act != NULL) { 10372 ASSERT(act->dta_refcnt > 0); 10373 act->dta_refcnt++; 10374 ecb->dte_action = act; 10375 ecb->dte_action_last = cached->dte_action_last; 10376 ecb->dte_needed = cached->dte_needed; 10377 ecb->dte_size = cached->dte_size; 10378 ecb->dte_alignment = cached->dte_alignment; 10379 } 10380 10381 return (ecb); 10382 } 10383 10384 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10385 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10386 dtrace_ecb_destroy(ecb); 10387 return (NULL); 10388 } 10389 } 10390 10391 dtrace_ecb_resize(ecb); 10392 10393 return (dtrace_ecb_create_cache = ecb); 10394 } 10395 10396 static int 10397 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10398 { 10399 dtrace_ecb_t *ecb; 10400 dtrace_enabling_t *enab = arg; 10401 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10402 10403 ASSERT(state != NULL); 10404 10405 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10406 /* 10407 * This probe was created in a generation for which this 10408 * enabling has previously created ECBs; we don't want to 10409 * enable it again, so just kick out. 10410 */ 10411 return (DTRACE_MATCH_NEXT); 10412 } 10413 10414 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10415 return (DTRACE_MATCH_DONE); 10416 10417 if (dtrace_ecb_enable(ecb) < 0) 10418 return (DTRACE_MATCH_FAIL); 10419 10420 return (DTRACE_MATCH_NEXT); 10421 } 10422 10423 static dtrace_ecb_t * 10424 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10425 { 10426 dtrace_ecb_t *ecb; 10427 10428 ASSERT(MUTEX_HELD(&dtrace_lock)); 10429 10430 if (id == 0 || id > state->dts_necbs) 10431 return (NULL); 10432 10433 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10434 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10435 10436 return (state->dts_ecbs[id - 1]); 10437 } 10438 10439 static dtrace_aggregation_t * 10440 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10441 { 10442 dtrace_aggregation_t *agg; 10443 10444 ASSERT(MUTEX_HELD(&dtrace_lock)); 10445 10446 if (id == 0 || id > state->dts_naggregations) 10447 return (NULL); 10448 10449 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10450 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10451 agg->dtag_id == id); 10452 10453 return (state->dts_aggregations[id - 1]); 10454 } 10455 10456 /* 10457 * DTrace Buffer Functions 10458 * 10459 * The following functions manipulate DTrace buffers. Most of these functions 10460 * are called in the context of establishing or processing consumer state; 10461 * exceptions are explicitly noted. 10462 */ 10463 10464 /* 10465 * Note: called from cross call context. This function switches the two 10466 * buffers on a given CPU. The atomicity of this operation is assured by 10467 * disabling interrupts while the actual switch takes place; the disabling of 10468 * interrupts serializes the execution with any execution of dtrace_probe() on 10469 * the same CPU. 10470 */ 10471 static void 10472 dtrace_buffer_switch(dtrace_buffer_t *buf) 10473 { 10474 caddr_t tomax = buf->dtb_tomax; 10475 caddr_t xamot = buf->dtb_xamot; 10476 dtrace_icookie_t cookie; 10477 hrtime_t now = dtrace_gethrtime(); 10478 10479 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10480 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10481 10482 cookie = dtrace_interrupt_disable(); 10483 buf->dtb_tomax = xamot; 10484 buf->dtb_xamot = tomax; 10485 buf->dtb_xamot_drops = buf->dtb_drops; 10486 buf->dtb_xamot_offset = buf->dtb_offset; 10487 buf->dtb_xamot_errors = buf->dtb_errors; 10488 buf->dtb_xamot_flags = buf->dtb_flags; 10489 buf->dtb_offset = 0; 10490 buf->dtb_drops = 0; 10491 buf->dtb_errors = 0; 10492 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10493 buf->dtb_interval = now - buf->dtb_switched; 10494 buf->dtb_switched = now; 10495 dtrace_interrupt_enable(cookie); 10496 } 10497 10498 /* 10499 * Note: called from cross call context. This function activates a buffer 10500 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10501 * is guaranteed by the disabling of interrupts. 10502 */ 10503 static void 10504 dtrace_buffer_activate(dtrace_state_t *state) 10505 { 10506 dtrace_buffer_t *buf; 10507 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10508 10509 buf = &state->dts_buffer[CPU->cpu_id]; 10510 10511 if (buf->dtb_tomax != NULL) { 10512 /* 10513 * We might like to assert that the buffer is marked inactive, 10514 * but this isn't necessarily true: the buffer for the CPU 10515 * that processes the BEGIN probe has its buffer activated 10516 * manually. In this case, we take the (harmless) action 10517 * re-clearing the bit INACTIVE bit. 10518 */ 10519 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10520 } 10521 10522 dtrace_interrupt_enable(cookie); 10523 } 10524 10525 static int 10526 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10527 processorid_t cpu, int *factor) 10528 { 10529 cpu_t *cp; 10530 dtrace_buffer_t *buf; 10531 int allocated = 0, desired = 0; 10532 10533 ASSERT(MUTEX_HELD(&cpu_lock)); 10534 ASSERT(MUTEX_HELD(&dtrace_lock)); 10535 10536 *factor = 1; 10537 10538 if (size > dtrace_nonroot_maxsize && 10539 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10540 return (EFBIG); 10541 10542 cp = cpu_list; 10543 10544 do { 10545 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10546 continue; 10547 10548 buf = &bufs[cp->cpu_id]; 10549 10550 /* 10551 * If there is already a buffer allocated for this CPU, it 10552 * is only possible that this is a DR event. In this case, 10553 * the buffer size must match our specified size. 10554 */ 10555 if (buf->dtb_tomax != NULL) { 10556 ASSERT(buf->dtb_size == size); 10557 continue; 10558 } 10559 10560 ASSERT(buf->dtb_xamot == NULL); 10561 10562 if ((buf->dtb_tomax = kmem_zalloc(size, 10563 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 10564 goto err; 10565 10566 buf->dtb_size = size; 10567 buf->dtb_flags = flags; 10568 buf->dtb_offset = 0; 10569 buf->dtb_drops = 0; 10570 10571 if (flags & DTRACEBUF_NOSWITCH) 10572 continue; 10573 10574 if ((buf->dtb_xamot = kmem_zalloc(size, 10575 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 10576 goto err; 10577 } while ((cp = cp->cpu_next) != cpu_list); 10578 10579 return (0); 10580 10581 err: 10582 cp = cpu_list; 10583 10584 do { 10585 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10586 continue; 10587 10588 buf = &bufs[cp->cpu_id]; 10589 desired += 2; 10590 10591 if (buf->dtb_xamot != NULL) { 10592 ASSERT(buf->dtb_tomax != NULL); 10593 ASSERT(buf->dtb_size == size); 10594 kmem_free(buf->dtb_xamot, size); 10595 allocated++; 10596 } 10597 10598 if (buf->dtb_tomax != NULL) { 10599 ASSERT(buf->dtb_size == size); 10600 kmem_free(buf->dtb_tomax, size); 10601 allocated++; 10602 } 10603 10604 buf->dtb_tomax = NULL; 10605 buf->dtb_xamot = NULL; 10606 buf->dtb_size = 0; 10607 } while ((cp = cp->cpu_next) != cpu_list); 10608 10609 *factor = desired / (allocated > 0 ? allocated : 1); 10610 10611 return (ENOMEM); 10612 } 10613 10614 /* 10615 * Note: called from probe context. This function just increments the drop 10616 * count on a buffer. It has been made a function to allow for the 10617 * possibility of understanding the source of mysterious drop counts. (A 10618 * problem for which one may be particularly disappointed that DTrace cannot 10619 * be used to understand DTrace.) 10620 */ 10621 static void 10622 dtrace_buffer_drop(dtrace_buffer_t *buf) 10623 { 10624 buf->dtb_drops++; 10625 } 10626 10627 /* 10628 * Note: called from probe context. This function is called to reserve space 10629 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10630 * mstate. Returns the new offset in the buffer, or a negative value if an 10631 * error has occurred. 10632 */ 10633 static intptr_t 10634 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10635 dtrace_state_t *state, dtrace_mstate_t *mstate) 10636 { 10637 intptr_t offs = buf->dtb_offset, soffs; 10638 intptr_t woffs; 10639 caddr_t tomax; 10640 size_t total; 10641 10642 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10643 return (-1); 10644 10645 if ((tomax = buf->dtb_tomax) == NULL) { 10646 dtrace_buffer_drop(buf); 10647 return (-1); 10648 } 10649 10650 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10651 while (offs & (align - 1)) { 10652 /* 10653 * Assert that our alignment is off by a number which 10654 * is itself sizeof (uint32_t) aligned. 10655 */ 10656 ASSERT(!((align - (offs & (align - 1))) & 10657 (sizeof (uint32_t) - 1))); 10658 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10659 offs += sizeof (uint32_t); 10660 } 10661 10662 if ((soffs = offs + needed) > buf->dtb_size) { 10663 dtrace_buffer_drop(buf); 10664 return (-1); 10665 } 10666 10667 if (mstate == NULL) 10668 return (offs); 10669 10670 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10671 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10672 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10673 10674 return (offs); 10675 } 10676 10677 if (buf->dtb_flags & DTRACEBUF_FILL) { 10678 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10679 (buf->dtb_flags & DTRACEBUF_FULL)) 10680 return (-1); 10681 goto out; 10682 } 10683 10684 total = needed + (offs & (align - 1)); 10685 10686 /* 10687 * For a ring buffer, life is quite a bit more complicated. Before 10688 * we can store any padding, we need to adjust our wrapping offset. 10689 * (If we've never before wrapped or we're not about to, no adjustment 10690 * is required.) 10691 */ 10692 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10693 offs + total > buf->dtb_size) { 10694 woffs = buf->dtb_xamot_offset; 10695 10696 if (offs + total > buf->dtb_size) { 10697 /* 10698 * We can't fit in the end of the buffer. First, a 10699 * sanity check that we can fit in the buffer at all. 10700 */ 10701 if (total > buf->dtb_size) { 10702 dtrace_buffer_drop(buf); 10703 return (-1); 10704 } 10705 10706 /* 10707 * We're going to be storing at the top of the buffer, 10708 * so now we need to deal with the wrapped offset. We 10709 * only reset our wrapped offset to 0 if it is 10710 * currently greater than the current offset. If it 10711 * is less than the current offset, it is because a 10712 * previous allocation induced a wrap -- but the 10713 * allocation didn't subsequently take the space due 10714 * to an error or false predicate evaluation. In this 10715 * case, we'll just leave the wrapped offset alone: if 10716 * the wrapped offset hasn't been advanced far enough 10717 * for this allocation, it will be adjusted in the 10718 * lower loop. 10719 */ 10720 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10721 if (woffs >= offs) 10722 woffs = 0; 10723 } else { 10724 woffs = 0; 10725 } 10726 10727 /* 10728 * Now we know that we're going to be storing to the 10729 * top of the buffer and that there is room for us 10730 * there. We need to clear the buffer from the current 10731 * offset to the end (there may be old gunk there). 10732 */ 10733 while (offs < buf->dtb_size) 10734 tomax[offs++] = 0; 10735 10736 /* 10737 * We need to set our offset to zero. And because we 10738 * are wrapping, we need to set the bit indicating as 10739 * much. We can also adjust our needed space back 10740 * down to the space required by the ECB -- we know 10741 * that the top of the buffer is aligned. 10742 */ 10743 offs = 0; 10744 total = needed; 10745 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10746 } else { 10747 /* 10748 * There is room for us in the buffer, so we simply 10749 * need to check the wrapped offset. 10750 */ 10751 if (woffs < offs) { 10752 /* 10753 * The wrapped offset is less than the offset. 10754 * This can happen if we allocated buffer space 10755 * that induced a wrap, but then we didn't 10756 * subsequently take the space due to an error 10757 * or false predicate evaluation. This is 10758 * okay; we know that _this_ allocation isn't 10759 * going to induce a wrap. We still can't 10760 * reset the wrapped offset to be zero, 10761 * however: the space may have been trashed in 10762 * the previous failed probe attempt. But at 10763 * least the wrapped offset doesn't need to 10764 * be adjusted at all... 10765 */ 10766 goto out; 10767 } 10768 } 10769 10770 while (offs + total > woffs) { 10771 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10772 size_t size; 10773 10774 if (epid == DTRACE_EPIDNONE) { 10775 size = sizeof (uint32_t); 10776 } else { 10777 ASSERT(epid <= state->dts_necbs); 10778 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10779 10780 size = state->dts_ecbs[epid - 1]->dte_size; 10781 } 10782 10783 ASSERT(woffs + size <= buf->dtb_size); 10784 ASSERT(size != 0); 10785 10786 if (woffs + size == buf->dtb_size) { 10787 /* 10788 * We've reached the end of the buffer; we want 10789 * to set the wrapped offset to 0 and break 10790 * out. However, if the offs is 0, then we're 10791 * in a strange edge-condition: the amount of 10792 * space that we want to reserve plus the size 10793 * of the record that we're overwriting is 10794 * greater than the size of the buffer. This 10795 * is problematic because if we reserve the 10796 * space but subsequently don't consume it (due 10797 * to a failed predicate or error) the wrapped 10798 * offset will be 0 -- yet the EPID at offset 0 10799 * will not be committed. This situation is 10800 * relatively easy to deal with: if we're in 10801 * this case, the buffer is indistinguishable 10802 * from one that hasn't wrapped; we need only 10803 * finish the job by clearing the wrapped bit, 10804 * explicitly setting the offset to be 0, and 10805 * zero'ing out the old data in the buffer. 10806 */ 10807 if (offs == 0) { 10808 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10809 buf->dtb_offset = 0; 10810 woffs = total; 10811 10812 while (woffs < buf->dtb_size) 10813 tomax[woffs++] = 0; 10814 } 10815 10816 woffs = 0; 10817 break; 10818 } 10819 10820 woffs += size; 10821 } 10822 10823 /* 10824 * We have a wrapped offset. It may be that the wrapped offset 10825 * has become zero -- that's okay. 10826 */ 10827 buf->dtb_xamot_offset = woffs; 10828 } 10829 10830 out: 10831 /* 10832 * Now we can plow the buffer with any necessary padding. 10833 */ 10834 while (offs & (align - 1)) { 10835 /* 10836 * Assert that our alignment is off by a number which 10837 * is itself sizeof (uint32_t) aligned. 10838 */ 10839 ASSERT(!((align - (offs & (align - 1))) & 10840 (sizeof (uint32_t) - 1))); 10841 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10842 offs += sizeof (uint32_t); 10843 } 10844 10845 if (buf->dtb_flags & DTRACEBUF_FILL) { 10846 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10847 buf->dtb_flags |= DTRACEBUF_FULL; 10848 return (-1); 10849 } 10850 } 10851 10852 if (mstate == NULL) 10853 return (offs); 10854 10855 /* 10856 * For ring buffers and fill buffers, the scratch space is always 10857 * the inactive buffer. 10858 */ 10859 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10860 mstate->dtms_scratch_size = buf->dtb_size; 10861 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10862 10863 return (offs); 10864 } 10865 10866 static void 10867 dtrace_buffer_polish(dtrace_buffer_t *buf) 10868 { 10869 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 10870 ASSERT(MUTEX_HELD(&dtrace_lock)); 10871 10872 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 10873 return; 10874 10875 /* 10876 * We need to polish the ring buffer. There are three cases: 10877 * 10878 * - The first (and presumably most common) is that there is no gap 10879 * between the buffer offset and the wrapped offset. In this case, 10880 * there is nothing in the buffer that isn't valid data; we can 10881 * mark the buffer as polished and return. 10882 * 10883 * - The second (less common than the first but still more common 10884 * than the third) is that there is a gap between the buffer offset 10885 * and the wrapped offset, and the wrapped offset is larger than the 10886 * buffer offset. This can happen because of an alignment issue, or 10887 * can happen because of a call to dtrace_buffer_reserve() that 10888 * didn't subsequently consume the buffer space. In this case, 10889 * we need to zero the data from the buffer offset to the wrapped 10890 * offset. 10891 * 10892 * - The third (and least common) is that there is a gap between the 10893 * buffer offset and the wrapped offset, but the wrapped offset is 10894 * _less_ than the buffer offset. This can only happen because a 10895 * call to dtrace_buffer_reserve() induced a wrap, but the space 10896 * was not subsequently consumed. In this case, we need to zero the 10897 * space from the offset to the end of the buffer _and_ from the 10898 * top of the buffer to the wrapped offset. 10899 */ 10900 if (buf->dtb_offset < buf->dtb_xamot_offset) { 10901 bzero(buf->dtb_tomax + buf->dtb_offset, 10902 buf->dtb_xamot_offset - buf->dtb_offset); 10903 } 10904 10905 if (buf->dtb_offset > buf->dtb_xamot_offset) { 10906 bzero(buf->dtb_tomax + buf->dtb_offset, 10907 buf->dtb_size - buf->dtb_offset); 10908 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 10909 } 10910 } 10911 10912 /* 10913 * This routine determines if data generated at the specified time has likely 10914 * been entirely consumed at user-level. This routine is called to determine 10915 * if an ECB on a defunct probe (but for an active enabling) can be safely 10916 * disabled and destroyed. 10917 */ 10918 static int 10919 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 10920 { 10921 int i; 10922 10923 for (i = 0; i < NCPU; i++) { 10924 dtrace_buffer_t *buf = &bufs[i]; 10925 10926 if (buf->dtb_size == 0) 10927 continue; 10928 10929 if (buf->dtb_flags & DTRACEBUF_RING) 10930 return (0); 10931 10932 if (!buf->dtb_switched && buf->dtb_offset != 0) 10933 return (0); 10934 10935 if (buf->dtb_switched - buf->dtb_interval < when) 10936 return (0); 10937 } 10938 10939 return (1); 10940 } 10941 10942 static void 10943 dtrace_buffer_free(dtrace_buffer_t *bufs) 10944 { 10945 int i; 10946 10947 for (i = 0; i < NCPU; i++) { 10948 dtrace_buffer_t *buf = &bufs[i]; 10949 10950 if (buf->dtb_tomax == NULL) { 10951 ASSERT(buf->dtb_xamot == NULL); 10952 ASSERT(buf->dtb_size == 0); 10953 continue; 10954 } 10955 10956 if (buf->dtb_xamot != NULL) { 10957 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10958 kmem_free(buf->dtb_xamot, buf->dtb_size); 10959 } 10960 10961 kmem_free(buf->dtb_tomax, buf->dtb_size); 10962 buf->dtb_size = 0; 10963 buf->dtb_tomax = NULL; 10964 buf->dtb_xamot = NULL; 10965 } 10966 } 10967 10968 /* 10969 * DTrace Enabling Functions 10970 */ 10971 static dtrace_enabling_t * 10972 dtrace_enabling_create(dtrace_vstate_t *vstate) 10973 { 10974 dtrace_enabling_t *enab; 10975 10976 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 10977 enab->dten_vstate = vstate; 10978 10979 return (enab); 10980 } 10981 10982 static void 10983 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 10984 { 10985 dtrace_ecbdesc_t **ndesc; 10986 size_t osize, nsize; 10987 10988 /* 10989 * We can't add to enablings after we've enabled them, or after we've 10990 * retained them. 10991 */ 10992 ASSERT(enab->dten_probegen == 0); 10993 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10994 10995 if (enab->dten_ndesc < enab->dten_maxdesc) { 10996 enab->dten_desc[enab->dten_ndesc++] = ecb; 10997 return; 10998 } 10999 11000 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11001 11002 if (enab->dten_maxdesc == 0) { 11003 enab->dten_maxdesc = 1; 11004 } else { 11005 enab->dten_maxdesc <<= 1; 11006 } 11007 11008 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11009 11010 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11011 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11012 bcopy(enab->dten_desc, ndesc, osize); 11013 kmem_free(enab->dten_desc, osize); 11014 11015 enab->dten_desc = ndesc; 11016 enab->dten_desc[enab->dten_ndesc++] = ecb; 11017 } 11018 11019 static void 11020 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11021 dtrace_probedesc_t *pd) 11022 { 11023 dtrace_ecbdesc_t *new; 11024 dtrace_predicate_t *pred; 11025 dtrace_actdesc_t *act; 11026 11027 /* 11028 * We're going to create a new ECB description that matches the 11029 * specified ECB in every way, but has the specified probe description. 11030 */ 11031 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11032 11033 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11034 dtrace_predicate_hold(pred); 11035 11036 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11037 dtrace_actdesc_hold(act); 11038 11039 new->dted_action = ecb->dted_action; 11040 new->dted_pred = ecb->dted_pred; 11041 new->dted_probe = *pd; 11042 new->dted_uarg = ecb->dted_uarg; 11043 11044 dtrace_enabling_add(enab, new); 11045 } 11046 11047 static void 11048 dtrace_enabling_dump(dtrace_enabling_t *enab) 11049 { 11050 int i; 11051 11052 for (i = 0; i < enab->dten_ndesc; i++) { 11053 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11054 11055 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11056 desc->dtpd_provider, desc->dtpd_mod, 11057 desc->dtpd_func, desc->dtpd_name); 11058 } 11059 } 11060 11061 static void 11062 dtrace_enabling_destroy(dtrace_enabling_t *enab) 11063 { 11064 int i; 11065 dtrace_ecbdesc_t *ep; 11066 dtrace_vstate_t *vstate = enab->dten_vstate; 11067 11068 ASSERT(MUTEX_HELD(&dtrace_lock)); 11069 11070 for (i = 0; i < enab->dten_ndesc; i++) { 11071 dtrace_actdesc_t *act, *next; 11072 dtrace_predicate_t *pred; 11073 11074 ep = enab->dten_desc[i]; 11075 11076 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11077 dtrace_predicate_release(pred, vstate); 11078 11079 for (act = ep->dted_action; act != NULL; act = next) { 11080 next = act->dtad_next; 11081 dtrace_actdesc_release(act, vstate); 11082 } 11083 11084 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11085 } 11086 11087 kmem_free(enab->dten_desc, 11088 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11089 11090 /* 11091 * If this was a retained enabling, decrement the dts_nretained count 11092 * and take it off of the dtrace_retained list. 11093 */ 11094 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11095 dtrace_retained == enab) { 11096 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11097 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11098 enab->dten_vstate->dtvs_state->dts_nretained--; 11099 dtrace_retained_gen++; 11100 } 11101 11102 if (enab->dten_prev == NULL) { 11103 if (dtrace_retained == enab) { 11104 dtrace_retained = enab->dten_next; 11105 11106 if (dtrace_retained != NULL) 11107 dtrace_retained->dten_prev = NULL; 11108 } 11109 } else { 11110 ASSERT(enab != dtrace_retained); 11111 ASSERT(dtrace_retained != NULL); 11112 enab->dten_prev->dten_next = enab->dten_next; 11113 } 11114 11115 if (enab->dten_next != NULL) { 11116 ASSERT(dtrace_retained != NULL); 11117 enab->dten_next->dten_prev = enab->dten_prev; 11118 } 11119 11120 kmem_free(enab, sizeof (dtrace_enabling_t)); 11121 } 11122 11123 static int 11124 dtrace_enabling_retain(dtrace_enabling_t *enab) 11125 { 11126 dtrace_state_t *state; 11127 11128 ASSERT(MUTEX_HELD(&dtrace_lock)); 11129 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11130 ASSERT(enab->dten_vstate != NULL); 11131 11132 state = enab->dten_vstate->dtvs_state; 11133 ASSERT(state != NULL); 11134 11135 /* 11136 * We only allow each state to retain dtrace_retain_max enablings. 11137 */ 11138 if (state->dts_nretained >= dtrace_retain_max) 11139 return (ENOSPC); 11140 11141 state->dts_nretained++; 11142 dtrace_retained_gen++; 11143 11144 if (dtrace_retained == NULL) { 11145 dtrace_retained = enab; 11146 return (0); 11147 } 11148 11149 enab->dten_next = dtrace_retained; 11150 dtrace_retained->dten_prev = enab; 11151 dtrace_retained = enab; 11152 11153 return (0); 11154 } 11155 11156 static int 11157 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11158 dtrace_probedesc_t *create) 11159 { 11160 dtrace_enabling_t *new, *enab; 11161 int found = 0, err = ENOENT; 11162 11163 ASSERT(MUTEX_HELD(&dtrace_lock)); 11164 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11165 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11166 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11167 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11168 11169 new = dtrace_enabling_create(&state->dts_vstate); 11170 11171 /* 11172 * Iterate over all retained enablings, looking for enablings that 11173 * match the specified state. 11174 */ 11175 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11176 int i; 11177 11178 /* 11179 * dtvs_state can only be NULL for helper enablings -- and 11180 * helper enablings can't be retained. 11181 */ 11182 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11183 11184 if (enab->dten_vstate->dtvs_state != state) 11185 continue; 11186 11187 /* 11188 * Now iterate over each probe description; we're looking for 11189 * an exact match to the specified probe description. 11190 */ 11191 for (i = 0; i < enab->dten_ndesc; i++) { 11192 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11193 dtrace_probedesc_t *pd = &ep->dted_probe; 11194 11195 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11196 continue; 11197 11198 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11199 continue; 11200 11201 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11202 continue; 11203 11204 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11205 continue; 11206 11207 /* 11208 * We have a winning probe! Add it to our growing 11209 * enabling. 11210 */ 11211 found = 1; 11212 dtrace_enabling_addlike(new, ep, create); 11213 } 11214 } 11215 11216 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11217 dtrace_enabling_destroy(new); 11218 return (err); 11219 } 11220 11221 return (0); 11222 } 11223 11224 static void 11225 dtrace_enabling_retract(dtrace_state_t *state) 11226 { 11227 dtrace_enabling_t *enab, *next; 11228 11229 ASSERT(MUTEX_HELD(&dtrace_lock)); 11230 11231 /* 11232 * Iterate over all retained enablings, destroy the enablings retained 11233 * for the specified state. 11234 */ 11235 for (enab = dtrace_retained; enab != NULL; enab = next) { 11236 next = enab->dten_next; 11237 11238 /* 11239 * dtvs_state can only be NULL for helper enablings -- and 11240 * helper enablings can't be retained. 11241 */ 11242 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11243 11244 if (enab->dten_vstate->dtvs_state == state) { 11245 ASSERT(state->dts_nretained > 0); 11246 dtrace_enabling_destroy(enab); 11247 } 11248 } 11249 11250 ASSERT(state->dts_nretained == 0); 11251 } 11252 11253 static int 11254 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11255 { 11256 int i = 0; 11257 int total_matched = 0, matched = 0; 11258 11259 ASSERT(MUTEX_HELD(&cpu_lock)); 11260 ASSERT(MUTEX_HELD(&dtrace_lock)); 11261 11262 for (i = 0; i < enab->dten_ndesc; i++) { 11263 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11264 11265 enab->dten_current = ep; 11266 enab->dten_error = 0; 11267 11268 /* 11269 * If a provider failed to enable a probe then get out and 11270 * let the consumer know we failed. 11271 */ 11272 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0) 11273 return (EBUSY); 11274 11275 total_matched += matched; 11276 11277 if (enab->dten_error != 0) { 11278 /* 11279 * If we get an error half-way through enabling the 11280 * probes, we kick out -- perhaps with some number of 11281 * them enabled. Leaving enabled probes enabled may 11282 * be slightly confusing for user-level, but we expect 11283 * that no one will attempt to actually drive on in 11284 * the face of such errors. If this is an anonymous 11285 * enabling (indicated with a NULL nmatched pointer), 11286 * we cmn_err() a message. We aren't expecting to 11287 * get such an error -- such as it can exist at all, 11288 * it would be a result of corrupted DOF in the driver 11289 * properties. 11290 */ 11291 if (nmatched == NULL) { 11292 cmn_err(CE_WARN, "dtrace_enabling_match() " 11293 "error on %p: %d", (void *)ep, 11294 enab->dten_error); 11295 } 11296 11297 return (enab->dten_error); 11298 } 11299 } 11300 11301 enab->dten_probegen = dtrace_probegen; 11302 if (nmatched != NULL) 11303 *nmatched = total_matched; 11304 11305 return (0); 11306 } 11307 11308 static void 11309 dtrace_enabling_matchall(void) 11310 { 11311 dtrace_enabling_t *enab; 11312 11313 mutex_enter(&cpu_lock); 11314 mutex_enter(&dtrace_lock); 11315 11316 /* 11317 * Iterate over all retained enablings to see if any probes match 11318 * against them. We only perform this operation on enablings for which 11319 * we have sufficient permissions by virtue of being in the global zone 11320 * or in the same zone as the DTrace client. Because we can be called 11321 * after dtrace_detach() has been called, we cannot assert that there 11322 * are retained enablings. We can safely load from dtrace_retained, 11323 * however: the taskq_destroy() at the end of dtrace_detach() will 11324 * block pending our completion. 11325 */ 11326 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11327 dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred; 11328 cred_t *cr = dcr->dcr_cred; 11329 zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0; 11330 11331 if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL && 11332 (zone == GLOBAL_ZONEID || getzoneid() == zone))) 11333 (void) dtrace_enabling_match(enab, NULL); 11334 } 11335 11336 mutex_exit(&dtrace_lock); 11337 mutex_exit(&cpu_lock); 11338 } 11339 11340 /* 11341 * If an enabling is to be enabled without having matched probes (that is, if 11342 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11343 * enabling must be _primed_ by creating an ECB for every ECB description. 11344 * This must be done to assure that we know the number of speculations, the 11345 * number of aggregations, the minimum buffer size needed, etc. before we 11346 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11347 * enabling any probes, we create ECBs for every ECB decription, but with a 11348 * NULL probe -- which is exactly what this function does. 11349 */ 11350 static void 11351 dtrace_enabling_prime(dtrace_state_t *state) 11352 { 11353 dtrace_enabling_t *enab; 11354 int i; 11355 11356 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11357 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11358 11359 if (enab->dten_vstate->dtvs_state != state) 11360 continue; 11361 11362 /* 11363 * We don't want to prime an enabling more than once, lest 11364 * we allow a malicious user to induce resource exhaustion. 11365 * (The ECBs that result from priming an enabling aren't 11366 * leaked -- but they also aren't deallocated until the 11367 * consumer state is destroyed.) 11368 */ 11369 if (enab->dten_primed) 11370 continue; 11371 11372 for (i = 0; i < enab->dten_ndesc; i++) { 11373 enab->dten_current = enab->dten_desc[i]; 11374 (void) dtrace_probe_enable(NULL, enab); 11375 } 11376 11377 enab->dten_primed = 1; 11378 } 11379 } 11380 11381 /* 11382 * Called to indicate that probes should be provided due to retained 11383 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11384 * must take an initial lap through the enabling calling the dtps_provide() 11385 * entry point explicitly to allow for autocreated probes. 11386 */ 11387 static void 11388 dtrace_enabling_provide(dtrace_provider_t *prv) 11389 { 11390 int i, all = 0; 11391 dtrace_probedesc_t desc; 11392 dtrace_genid_t gen; 11393 11394 ASSERT(MUTEX_HELD(&dtrace_lock)); 11395 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11396 11397 if (prv == NULL) { 11398 all = 1; 11399 prv = dtrace_provider; 11400 } 11401 11402 do { 11403 dtrace_enabling_t *enab; 11404 void *parg = prv->dtpv_arg; 11405 11406 retry: 11407 gen = dtrace_retained_gen; 11408 for (enab = dtrace_retained; enab != NULL; 11409 enab = enab->dten_next) { 11410 for (i = 0; i < enab->dten_ndesc; i++) { 11411 desc = enab->dten_desc[i]->dted_probe; 11412 mutex_exit(&dtrace_lock); 11413 prv->dtpv_pops.dtps_provide(parg, &desc); 11414 mutex_enter(&dtrace_lock); 11415 /* 11416 * Process the retained enablings again if 11417 * they have changed while we weren't holding 11418 * dtrace_lock. 11419 */ 11420 if (gen != dtrace_retained_gen) 11421 goto retry; 11422 } 11423 } 11424 } while (all && (prv = prv->dtpv_next) != NULL); 11425 11426 mutex_exit(&dtrace_lock); 11427 dtrace_probe_provide(NULL, all ? NULL : prv); 11428 mutex_enter(&dtrace_lock); 11429 } 11430 11431 /* 11432 * Called to reap ECBs that are attached to probes from defunct providers. 11433 */ 11434 static void 11435 dtrace_enabling_reap(void) 11436 { 11437 dtrace_provider_t *prov; 11438 dtrace_probe_t *probe; 11439 dtrace_ecb_t *ecb; 11440 hrtime_t when; 11441 int i; 11442 11443 mutex_enter(&cpu_lock); 11444 mutex_enter(&dtrace_lock); 11445 11446 for (i = 0; i < dtrace_nprobes; i++) { 11447 if ((probe = dtrace_probes[i]) == NULL) 11448 continue; 11449 11450 if (probe->dtpr_ecb == NULL) 11451 continue; 11452 11453 prov = probe->dtpr_provider; 11454 11455 if ((when = prov->dtpv_defunct) == 0) 11456 continue; 11457 11458 /* 11459 * We have ECBs on a defunct provider: we want to reap these 11460 * ECBs to allow the provider to unregister. The destruction 11461 * of these ECBs must be done carefully: if we destroy the ECB 11462 * and the consumer later wishes to consume an EPID that 11463 * corresponds to the destroyed ECB (and if the EPID metadata 11464 * has not been previously consumed), the consumer will abort 11465 * processing on the unknown EPID. To reduce (but not, sadly, 11466 * eliminate) the possibility of this, we will only destroy an 11467 * ECB for a defunct provider if, for the state that 11468 * corresponds to the ECB: 11469 * 11470 * (a) There is no speculative tracing (which can effectively 11471 * cache an EPID for an arbitrary amount of time). 11472 * 11473 * (b) The principal buffers have been switched twice since the 11474 * provider became defunct. 11475 * 11476 * (c) The aggregation buffers are of zero size or have been 11477 * switched twice since the provider became defunct. 11478 * 11479 * We use dts_speculates to determine (a) and call a function 11480 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11481 * that as soon as we've been unable to destroy one of the ECBs 11482 * associated with the probe, we quit trying -- reaping is only 11483 * fruitful in as much as we can destroy all ECBs associated 11484 * with the defunct provider's probes. 11485 */ 11486 while ((ecb = probe->dtpr_ecb) != NULL) { 11487 dtrace_state_t *state = ecb->dte_state; 11488 dtrace_buffer_t *buf = state->dts_buffer; 11489 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11490 11491 if (state->dts_speculates) 11492 break; 11493 11494 if (!dtrace_buffer_consumed(buf, when)) 11495 break; 11496 11497 if (!dtrace_buffer_consumed(aggbuf, when)) 11498 break; 11499 11500 dtrace_ecb_disable(ecb); 11501 ASSERT(probe->dtpr_ecb != ecb); 11502 dtrace_ecb_destroy(ecb); 11503 } 11504 } 11505 11506 mutex_exit(&dtrace_lock); 11507 mutex_exit(&cpu_lock); 11508 } 11509 11510 /* 11511 * DTrace DOF Functions 11512 */ 11513 /*ARGSUSED*/ 11514 static void 11515 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11516 { 11517 if (dtrace_err_verbose) 11518 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11519 11520 #ifdef DTRACE_ERRDEBUG 11521 dtrace_errdebug(str); 11522 #endif 11523 } 11524 11525 /* 11526 * Create DOF out of a currently enabled state. Right now, we only create 11527 * DOF containing the run-time options -- but this could be expanded to create 11528 * complete DOF representing the enabled state. 11529 */ 11530 static dof_hdr_t * 11531 dtrace_dof_create(dtrace_state_t *state) 11532 { 11533 dof_hdr_t *dof; 11534 dof_sec_t *sec; 11535 dof_optdesc_t *opt; 11536 int i, len = sizeof (dof_hdr_t) + 11537 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11538 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11539 11540 ASSERT(MUTEX_HELD(&dtrace_lock)); 11541 11542 dof = kmem_zalloc(len, KM_SLEEP); 11543 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11544 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11545 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11546 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11547 11548 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11549 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11550 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11551 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11552 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11553 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11554 11555 dof->dofh_flags = 0; 11556 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11557 dof->dofh_secsize = sizeof (dof_sec_t); 11558 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11559 dof->dofh_secoff = sizeof (dof_hdr_t); 11560 dof->dofh_loadsz = len; 11561 dof->dofh_filesz = len; 11562 dof->dofh_pad = 0; 11563 11564 /* 11565 * Fill in the option section header... 11566 */ 11567 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11568 sec->dofs_type = DOF_SECT_OPTDESC; 11569 sec->dofs_align = sizeof (uint64_t); 11570 sec->dofs_flags = DOF_SECF_LOAD; 11571 sec->dofs_entsize = sizeof (dof_optdesc_t); 11572 11573 opt = (dof_optdesc_t *)((uintptr_t)sec + 11574 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11575 11576 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11577 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11578 11579 for (i = 0; i < DTRACEOPT_MAX; i++) { 11580 opt[i].dofo_option = i; 11581 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11582 opt[i].dofo_value = state->dts_options[i]; 11583 } 11584 11585 return (dof); 11586 } 11587 11588 static dof_hdr_t * 11589 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11590 { 11591 dof_hdr_t hdr, *dof; 11592 11593 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11594 11595 /* 11596 * First, we're going to copyin() the sizeof (dof_hdr_t). 11597 */ 11598 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11599 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11600 *errp = EFAULT; 11601 return (NULL); 11602 } 11603 11604 /* 11605 * Now we'll allocate the entire DOF and copy it in -- provided 11606 * that the length isn't outrageous. 11607 */ 11608 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11609 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11610 *errp = E2BIG; 11611 return (NULL); 11612 } 11613 11614 if (hdr.dofh_loadsz < sizeof (hdr)) { 11615 dtrace_dof_error(&hdr, "invalid load size"); 11616 *errp = EINVAL; 11617 return (NULL); 11618 } 11619 11620 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11621 11622 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 11623 dof->dofh_loadsz != hdr.dofh_loadsz) { 11624 kmem_free(dof, hdr.dofh_loadsz); 11625 *errp = EFAULT; 11626 return (NULL); 11627 } 11628 11629 return (dof); 11630 } 11631 11632 static dof_hdr_t * 11633 dtrace_dof_property(const char *name) 11634 { 11635 uchar_t *buf; 11636 uint64_t loadsz; 11637 unsigned int len, i; 11638 dof_hdr_t *dof; 11639 11640 /* 11641 * Unfortunately, array of values in .conf files are always (and 11642 * only) interpreted to be integer arrays. We must read our DOF 11643 * as an integer array, and then squeeze it into a byte array. 11644 */ 11645 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11646 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11647 return (NULL); 11648 11649 for (i = 0; i < len; i++) 11650 buf[i] = (uchar_t)(((int *)buf)[i]); 11651 11652 if (len < sizeof (dof_hdr_t)) { 11653 ddi_prop_free(buf); 11654 dtrace_dof_error(NULL, "truncated header"); 11655 return (NULL); 11656 } 11657 11658 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11659 ddi_prop_free(buf); 11660 dtrace_dof_error(NULL, "truncated DOF"); 11661 return (NULL); 11662 } 11663 11664 if (loadsz >= dtrace_dof_maxsize) { 11665 ddi_prop_free(buf); 11666 dtrace_dof_error(NULL, "oversized DOF"); 11667 return (NULL); 11668 } 11669 11670 dof = kmem_alloc(loadsz, KM_SLEEP); 11671 bcopy(buf, dof, loadsz); 11672 ddi_prop_free(buf); 11673 11674 return (dof); 11675 } 11676 11677 static void 11678 dtrace_dof_destroy(dof_hdr_t *dof) 11679 { 11680 kmem_free(dof, dof->dofh_loadsz); 11681 } 11682 11683 /* 11684 * Return the dof_sec_t pointer corresponding to a given section index. If the 11685 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11686 * a type other than DOF_SECT_NONE is specified, the header is checked against 11687 * this type and NULL is returned if the types do not match. 11688 */ 11689 static dof_sec_t * 11690 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11691 { 11692 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11693 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11694 11695 if (i >= dof->dofh_secnum) { 11696 dtrace_dof_error(dof, "referenced section index is invalid"); 11697 return (NULL); 11698 } 11699 11700 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11701 dtrace_dof_error(dof, "referenced section is not loadable"); 11702 return (NULL); 11703 } 11704 11705 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11706 dtrace_dof_error(dof, "referenced section is the wrong type"); 11707 return (NULL); 11708 } 11709 11710 return (sec); 11711 } 11712 11713 static dtrace_probedesc_t * 11714 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11715 { 11716 dof_probedesc_t *probe; 11717 dof_sec_t *strtab; 11718 uintptr_t daddr = (uintptr_t)dof; 11719 uintptr_t str; 11720 size_t size; 11721 11722 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11723 dtrace_dof_error(dof, "invalid probe section"); 11724 return (NULL); 11725 } 11726 11727 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11728 dtrace_dof_error(dof, "bad alignment in probe description"); 11729 return (NULL); 11730 } 11731 11732 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11733 dtrace_dof_error(dof, "truncated probe description"); 11734 return (NULL); 11735 } 11736 11737 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11738 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11739 11740 if (strtab == NULL) 11741 return (NULL); 11742 11743 str = daddr + strtab->dofs_offset; 11744 size = strtab->dofs_size; 11745 11746 if (probe->dofp_provider >= strtab->dofs_size) { 11747 dtrace_dof_error(dof, "corrupt probe provider"); 11748 return (NULL); 11749 } 11750 11751 (void) strncpy(desc->dtpd_provider, 11752 (char *)(str + probe->dofp_provider), 11753 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11754 11755 if (probe->dofp_mod >= strtab->dofs_size) { 11756 dtrace_dof_error(dof, "corrupt probe module"); 11757 return (NULL); 11758 } 11759 11760 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11761 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11762 11763 if (probe->dofp_func >= strtab->dofs_size) { 11764 dtrace_dof_error(dof, "corrupt probe function"); 11765 return (NULL); 11766 } 11767 11768 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11769 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11770 11771 if (probe->dofp_name >= strtab->dofs_size) { 11772 dtrace_dof_error(dof, "corrupt probe name"); 11773 return (NULL); 11774 } 11775 11776 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11777 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11778 11779 return (desc); 11780 } 11781 11782 static dtrace_difo_t * 11783 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11784 cred_t *cr) 11785 { 11786 dtrace_difo_t *dp; 11787 size_t ttl = 0; 11788 dof_difohdr_t *dofd; 11789 uintptr_t daddr = (uintptr_t)dof; 11790 size_t max = dtrace_difo_maxsize; 11791 int i, l, n; 11792 11793 static const struct { 11794 int section; 11795 int bufoffs; 11796 int lenoffs; 11797 int entsize; 11798 int align; 11799 const char *msg; 11800 } difo[] = { 11801 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11802 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11803 sizeof (dif_instr_t), "multiple DIF sections" }, 11804 11805 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11806 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11807 sizeof (uint64_t), "multiple integer tables" }, 11808 11809 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11810 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11811 sizeof (char), "multiple string tables" }, 11812 11813 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11814 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11815 sizeof (uint_t), "multiple variable tables" }, 11816 11817 { DOF_SECT_NONE, 0, 0, 0, NULL } 11818 }; 11819 11820 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11821 dtrace_dof_error(dof, "invalid DIFO header section"); 11822 return (NULL); 11823 } 11824 11825 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11826 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11827 return (NULL); 11828 } 11829 11830 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11831 sec->dofs_size % sizeof (dof_secidx_t)) { 11832 dtrace_dof_error(dof, "bad size in DIFO header"); 11833 return (NULL); 11834 } 11835 11836 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11837 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11838 11839 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11840 dp->dtdo_rtype = dofd->dofd_rtype; 11841 11842 for (l = 0; l < n; l++) { 11843 dof_sec_t *subsec; 11844 void **bufp; 11845 uint32_t *lenp; 11846 11847 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11848 dofd->dofd_links[l])) == NULL) 11849 goto err; /* invalid section link */ 11850 11851 if (ttl + subsec->dofs_size > max) { 11852 dtrace_dof_error(dof, "exceeds maximum size"); 11853 goto err; 11854 } 11855 11856 ttl += subsec->dofs_size; 11857 11858 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11859 if (subsec->dofs_type != difo[i].section) 11860 continue; 11861 11862 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11863 dtrace_dof_error(dof, "section not loaded"); 11864 goto err; 11865 } 11866 11867 if (subsec->dofs_align != difo[i].align) { 11868 dtrace_dof_error(dof, "bad alignment"); 11869 goto err; 11870 } 11871 11872 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11873 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11874 11875 if (*bufp != NULL) { 11876 dtrace_dof_error(dof, difo[i].msg); 11877 goto err; 11878 } 11879 11880 if (difo[i].entsize != subsec->dofs_entsize) { 11881 dtrace_dof_error(dof, "entry size mismatch"); 11882 goto err; 11883 } 11884 11885 if (subsec->dofs_entsize != 0 && 11886 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11887 dtrace_dof_error(dof, "corrupt entry size"); 11888 goto err; 11889 } 11890 11891 *lenp = subsec->dofs_size; 11892 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11893 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11894 *bufp, subsec->dofs_size); 11895 11896 if (subsec->dofs_entsize != 0) 11897 *lenp /= subsec->dofs_entsize; 11898 11899 break; 11900 } 11901 11902 /* 11903 * If we encounter a loadable DIFO sub-section that is not 11904 * known to us, assume this is a broken program and fail. 11905 */ 11906 if (difo[i].section == DOF_SECT_NONE && 11907 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11908 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11909 goto err; 11910 } 11911 } 11912 11913 if (dp->dtdo_buf == NULL) { 11914 /* 11915 * We can't have a DIF object without DIF text. 11916 */ 11917 dtrace_dof_error(dof, "missing DIF text"); 11918 goto err; 11919 } 11920 11921 /* 11922 * Before we validate the DIF object, run through the variable table 11923 * looking for the strings -- if any of their size are under, we'll set 11924 * their size to be the system-wide default string size. Note that 11925 * this should _not_ happen if the "strsize" option has been set -- 11926 * in this case, the compiler should have set the size to reflect the 11927 * setting of the option. 11928 */ 11929 for (i = 0; i < dp->dtdo_varlen; i++) { 11930 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 11931 dtrace_diftype_t *t = &v->dtdv_type; 11932 11933 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 11934 continue; 11935 11936 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 11937 t->dtdt_size = dtrace_strsize_default; 11938 } 11939 11940 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 11941 goto err; 11942 11943 dtrace_difo_init(dp, vstate); 11944 return (dp); 11945 11946 err: 11947 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 11948 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 11949 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 11950 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 11951 11952 kmem_free(dp, sizeof (dtrace_difo_t)); 11953 return (NULL); 11954 } 11955 11956 static dtrace_predicate_t * 11957 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11958 cred_t *cr) 11959 { 11960 dtrace_difo_t *dp; 11961 11962 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 11963 return (NULL); 11964 11965 return (dtrace_predicate_create(dp)); 11966 } 11967 11968 static dtrace_actdesc_t * 11969 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11970 cred_t *cr) 11971 { 11972 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 11973 dof_actdesc_t *desc; 11974 dof_sec_t *difosec; 11975 size_t offs; 11976 uintptr_t daddr = (uintptr_t)dof; 11977 uint64_t arg; 11978 dtrace_actkind_t kind; 11979 11980 if (sec->dofs_type != DOF_SECT_ACTDESC) { 11981 dtrace_dof_error(dof, "invalid action section"); 11982 return (NULL); 11983 } 11984 11985 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 11986 dtrace_dof_error(dof, "truncated action description"); 11987 return (NULL); 11988 } 11989 11990 if (sec->dofs_align != sizeof (uint64_t)) { 11991 dtrace_dof_error(dof, "bad alignment in action description"); 11992 return (NULL); 11993 } 11994 11995 if (sec->dofs_size < sec->dofs_entsize) { 11996 dtrace_dof_error(dof, "section entry size exceeds total size"); 11997 return (NULL); 11998 } 11999 12000 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12001 dtrace_dof_error(dof, "bad entry size in action description"); 12002 return (NULL); 12003 } 12004 12005 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12006 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12007 return (NULL); 12008 } 12009 12010 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12011 desc = (dof_actdesc_t *)(daddr + 12012 (uintptr_t)sec->dofs_offset + offs); 12013 kind = (dtrace_actkind_t)desc->dofa_kind; 12014 12015 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12016 (kind != DTRACEACT_PRINTA || 12017 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12018 (kind == DTRACEACT_DIFEXPR && 12019 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12020 dof_sec_t *strtab; 12021 char *str, *fmt; 12022 uint64_t i; 12023 12024 /* 12025 * The argument to these actions is an index into the 12026 * DOF string table. For printf()-like actions, this 12027 * is the format string. For print(), this is the 12028 * CTF type of the expression result. 12029 */ 12030 if ((strtab = dtrace_dof_sect(dof, 12031 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12032 goto err; 12033 12034 str = (char *)((uintptr_t)dof + 12035 (uintptr_t)strtab->dofs_offset); 12036 12037 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12038 if (str[i] == '\0') 12039 break; 12040 } 12041 12042 if (i >= strtab->dofs_size) { 12043 dtrace_dof_error(dof, "bogus format string"); 12044 goto err; 12045 } 12046 12047 if (i == desc->dofa_arg) { 12048 dtrace_dof_error(dof, "empty format string"); 12049 goto err; 12050 } 12051 12052 i -= desc->dofa_arg; 12053 fmt = kmem_alloc(i + 1, KM_SLEEP); 12054 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12055 arg = (uint64_t)(uintptr_t)fmt; 12056 } else { 12057 if (kind == DTRACEACT_PRINTA) { 12058 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12059 arg = 0; 12060 } else { 12061 arg = desc->dofa_arg; 12062 } 12063 } 12064 12065 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12066 desc->dofa_uarg, arg); 12067 12068 if (last != NULL) { 12069 last->dtad_next = act; 12070 } else { 12071 first = act; 12072 } 12073 12074 last = act; 12075 12076 if (desc->dofa_difo == DOF_SECIDX_NONE) 12077 continue; 12078 12079 if ((difosec = dtrace_dof_sect(dof, 12080 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12081 goto err; 12082 12083 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12084 12085 if (act->dtad_difo == NULL) 12086 goto err; 12087 } 12088 12089 ASSERT(first != NULL); 12090 return (first); 12091 12092 err: 12093 for (act = first; act != NULL; act = next) { 12094 next = act->dtad_next; 12095 dtrace_actdesc_release(act, vstate); 12096 } 12097 12098 return (NULL); 12099 } 12100 12101 static dtrace_ecbdesc_t * 12102 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12103 cred_t *cr) 12104 { 12105 dtrace_ecbdesc_t *ep; 12106 dof_ecbdesc_t *ecb; 12107 dtrace_probedesc_t *desc; 12108 dtrace_predicate_t *pred = NULL; 12109 12110 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12111 dtrace_dof_error(dof, "truncated ECB description"); 12112 return (NULL); 12113 } 12114 12115 if (sec->dofs_align != sizeof (uint64_t)) { 12116 dtrace_dof_error(dof, "bad alignment in ECB description"); 12117 return (NULL); 12118 } 12119 12120 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12121 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12122 12123 if (sec == NULL) 12124 return (NULL); 12125 12126 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12127 ep->dted_uarg = ecb->dofe_uarg; 12128 desc = &ep->dted_probe; 12129 12130 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12131 goto err; 12132 12133 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12134 if ((sec = dtrace_dof_sect(dof, 12135 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12136 goto err; 12137 12138 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12139 goto err; 12140 12141 ep->dted_pred.dtpdd_predicate = pred; 12142 } 12143 12144 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12145 if ((sec = dtrace_dof_sect(dof, 12146 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12147 goto err; 12148 12149 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12150 12151 if (ep->dted_action == NULL) 12152 goto err; 12153 } 12154 12155 return (ep); 12156 12157 err: 12158 if (pred != NULL) 12159 dtrace_predicate_release(pred, vstate); 12160 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12161 return (NULL); 12162 } 12163 12164 /* 12165 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12166 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12167 * site of any user SETX relocations to account for load object base address. 12168 * In the future, if we need other relocations, this function can be extended. 12169 */ 12170 static int 12171 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12172 { 12173 uintptr_t daddr = (uintptr_t)dof; 12174 dof_relohdr_t *dofr = 12175 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12176 dof_sec_t *ss, *rs, *ts; 12177 dof_relodesc_t *r; 12178 uint_t i, n; 12179 12180 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12181 sec->dofs_align != sizeof (dof_secidx_t)) { 12182 dtrace_dof_error(dof, "invalid relocation header"); 12183 return (-1); 12184 } 12185 12186 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12187 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12188 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12189 12190 if (ss == NULL || rs == NULL || ts == NULL) 12191 return (-1); /* dtrace_dof_error() has been called already */ 12192 12193 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12194 rs->dofs_align != sizeof (uint64_t)) { 12195 dtrace_dof_error(dof, "invalid relocation section"); 12196 return (-1); 12197 } 12198 12199 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12200 n = rs->dofs_size / rs->dofs_entsize; 12201 12202 for (i = 0; i < n; i++) { 12203 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12204 12205 switch (r->dofr_type) { 12206 case DOF_RELO_NONE: 12207 break; 12208 case DOF_RELO_SETX: 12209 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12210 sizeof (uint64_t) > ts->dofs_size) { 12211 dtrace_dof_error(dof, "bad relocation offset"); 12212 return (-1); 12213 } 12214 12215 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12216 dtrace_dof_error(dof, "misaligned setx relo"); 12217 return (-1); 12218 } 12219 12220 *(uint64_t *)taddr += ubase; 12221 break; 12222 default: 12223 dtrace_dof_error(dof, "invalid relocation type"); 12224 return (-1); 12225 } 12226 12227 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12228 } 12229 12230 return (0); 12231 } 12232 12233 /* 12234 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12235 * header: it should be at the front of a memory region that is at least 12236 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12237 * size. It need not be validated in any other way. 12238 */ 12239 static int 12240 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12241 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12242 { 12243 uint64_t len = dof->dofh_loadsz, seclen; 12244 uintptr_t daddr = (uintptr_t)dof; 12245 dtrace_ecbdesc_t *ep; 12246 dtrace_enabling_t *enab; 12247 uint_t i; 12248 12249 ASSERT(MUTEX_HELD(&dtrace_lock)); 12250 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12251 12252 /* 12253 * Check the DOF header identification bytes. In addition to checking 12254 * valid settings, we also verify that unused bits/bytes are zeroed so 12255 * we can use them later without fear of regressing existing binaries. 12256 */ 12257 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12258 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12259 dtrace_dof_error(dof, "DOF magic string mismatch"); 12260 return (-1); 12261 } 12262 12263 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12264 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12265 dtrace_dof_error(dof, "DOF has invalid data model"); 12266 return (-1); 12267 } 12268 12269 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12270 dtrace_dof_error(dof, "DOF encoding mismatch"); 12271 return (-1); 12272 } 12273 12274 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12275 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12276 dtrace_dof_error(dof, "DOF version mismatch"); 12277 return (-1); 12278 } 12279 12280 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12281 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12282 return (-1); 12283 } 12284 12285 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12286 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12287 return (-1); 12288 } 12289 12290 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12291 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12292 return (-1); 12293 } 12294 12295 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12296 if (dof->dofh_ident[i] != 0) { 12297 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12298 return (-1); 12299 } 12300 } 12301 12302 if (dof->dofh_flags & ~DOF_FL_VALID) { 12303 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12304 return (-1); 12305 } 12306 12307 if (dof->dofh_secsize == 0) { 12308 dtrace_dof_error(dof, "zero section header size"); 12309 return (-1); 12310 } 12311 12312 /* 12313 * Check that the section headers don't exceed the amount of DOF 12314 * data. Note that we cast the section size and number of sections 12315 * to uint64_t's to prevent possible overflow in the multiplication. 12316 */ 12317 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12318 12319 if (dof->dofh_secoff > len || seclen > len || 12320 dof->dofh_secoff + seclen > len) { 12321 dtrace_dof_error(dof, "truncated section headers"); 12322 return (-1); 12323 } 12324 12325 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12326 dtrace_dof_error(dof, "misaligned section headers"); 12327 return (-1); 12328 } 12329 12330 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12331 dtrace_dof_error(dof, "misaligned section size"); 12332 return (-1); 12333 } 12334 12335 /* 12336 * Take an initial pass through the section headers to be sure that 12337 * the headers don't have stray offsets. If the 'noprobes' flag is 12338 * set, do not permit sections relating to providers, probes, or args. 12339 */ 12340 for (i = 0; i < dof->dofh_secnum; i++) { 12341 dof_sec_t *sec = (dof_sec_t *)(daddr + 12342 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12343 12344 if (noprobes) { 12345 switch (sec->dofs_type) { 12346 case DOF_SECT_PROVIDER: 12347 case DOF_SECT_PROBES: 12348 case DOF_SECT_PRARGS: 12349 case DOF_SECT_PROFFS: 12350 dtrace_dof_error(dof, "illegal sections " 12351 "for enabling"); 12352 return (-1); 12353 } 12354 } 12355 12356 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 12357 !(sec->dofs_flags & DOF_SECF_LOAD)) { 12358 dtrace_dof_error(dof, "loadable section with load " 12359 "flag unset"); 12360 return (-1); 12361 } 12362 12363 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12364 continue; /* just ignore non-loadable sections */ 12365 12366 if (sec->dofs_align & (sec->dofs_align - 1)) { 12367 dtrace_dof_error(dof, "bad section alignment"); 12368 return (-1); 12369 } 12370 12371 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12372 dtrace_dof_error(dof, "misaligned section"); 12373 return (-1); 12374 } 12375 12376 if (sec->dofs_offset > len || sec->dofs_size > len || 12377 sec->dofs_offset + sec->dofs_size > len) { 12378 dtrace_dof_error(dof, "corrupt section header"); 12379 return (-1); 12380 } 12381 12382 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12383 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12384 dtrace_dof_error(dof, "non-terminating string table"); 12385 return (-1); 12386 } 12387 } 12388 12389 /* 12390 * Take a second pass through the sections and locate and perform any 12391 * relocations that are present. We do this after the first pass to 12392 * be sure that all sections have had their headers validated. 12393 */ 12394 for (i = 0; i < dof->dofh_secnum; i++) { 12395 dof_sec_t *sec = (dof_sec_t *)(daddr + 12396 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12397 12398 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12399 continue; /* skip sections that are not loadable */ 12400 12401 switch (sec->dofs_type) { 12402 case DOF_SECT_URELHDR: 12403 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12404 return (-1); 12405 break; 12406 } 12407 } 12408 12409 if ((enab = *enabp) == NULL) 12410 enab = *enabp = dtrace_enabling_create(vstate); 12411 12412 for (i = 0; i < dof->dofh_secnum; i++) { 12413 dof_sec_t *sec = (dof_sec_t *)(daddr + 12414 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12415 12416 if (sec->dofs_type != DOF_SECT_ECBDESC) 12417 continue; 12418 12419 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12420 dtrace_enabling_destroy(enab); 12421 *enabp = NULL; 12422 return (-1); 12423 } 12424 12425 dtrace_enabling_add(enab, ep); 12426 } 12427 12428 return (0); 12429 } 12430 12431 /* 12432 * Process DOF for any options. This routine assumes that the DOF has been 12433 * at least processed by dtrace_dof_slurp(). 12434 */ 12435 static int 12436 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12437 { 12438 int i, rval; 12439 uint32_t entsize; 12440 size_t offs; 12441 dof_optdesc_t *desc; 12442 12443 for (i = 0; i < dof->dofh_secnum; i++) { 12444 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12445 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12446 12447 if (sec->dofs_type != DOF_SECT_OPTDESC) 12448 continue; 12449 12450 if (sec->dofs_align != sizeof (uint64_t)) { 12451 dtrace_dof_error(dof, "bad alignment in " 12452 "option description"); 12453 return (EINVAL); 12454 } 12455 12456 if ((entsize = sec->dofs_entsize) == 0) { 12457 dtrace_dof_error(dof, "zeroed option entry size"); 12458 return (EINVAL); 12459 } 12460 12461 if (entsize < sizeof (dof_optdesc_t)) { 12462 dtrace_dof_error(dof, "bad option entry size"); 12463 return (EINVAL); 12464 } 12465 12466 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12467 desc = (dof_optdesc_t *)((uintptr_t)dof + 12468 (uintptr_t)sec->dofs_offset + offs); 12469 12470 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12471 dtrace_dof_error(dof, "non-zero option string"); 12472 return (EINVAL); 12473 } 12474 12475 if (desc->dofo_value == DTRACEOPT_UNSET) { 12476 dtrace_dof_error(dof, "unset option"); 12477 return (EINVAL); 12478 } 12479 12480 if ((rval = dtrace_state_option(state, 12481 desc->dofo_option, desc->dofo_value)) != 0) { 12482 dtrace_dof_error(dof, "rejected option"); 12483 return (rval); 12484 } 12485 } 12486 } 12487 12488 return (0); 12489 } 12490 12491 /* 12492 * DTrace Consumer State Functions 12493 */ 12494 int 12495 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12496 { 12497 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12498 void *base; 12499 uintptr_t limit; 12500 dtrace_dynvar_t *dvar, *next, *start; 12501 int i; 12502 12503 ASSERT(MUTEX_HELD(&dtrace_lock)); 12504 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12505 12506 bzero(dstate, sizeof (dtrace_dstate_t)); 12507 12508 if ((dstate->dtds_chunksize = chunksize) == 0) 12509 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12510 12511 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12512 size = min; 12513 12514 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12515 return (ENOMEM); 12516 12517 dstate->dtds_size = size; 12518 dstate->dtds_base = base; 12519 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12520 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12521 12522 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12523 12524 if (hashsize != 1 && (hashsize & 1)) 12525 hashsize--; 12526 12527 dstate->dtds_hashsize = hashsize; 12528 dstate->dtds_hash = dstate->dtds_base; 12529 12530 /* 12531 * Set all of our hash buckets to point to the single sink, and (if 12532 * it hasn't already been set), set the sink's hash value to be the 12533 * sink sentinel value. The sink is needed for dynamic variable 12534 * lookups to know that they have iterated over an entire, valid hash 12535 * chain. 12536 */ 12537 for (i = 0; i < hashsize; i++) 12538 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12539 12540 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12541 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12542 12543 /* 12544 * Determine number of active CPUs. Divide free list evenly among 12545 * active CPUs. 12546 */ 12547 start = (dtrace_dynvar_t *) 12548 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12549 limit = (uintptr_t)base + size; 12550 12551 maxper = (limit - (uintptr_t)start) / NCPU; 12552 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12553 12554 for (i = 0; i < NCPU; i++) { 12555 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12556 12557 /* 12558 * If we don't even have enough chunks to make it once through 12559 * NCPUs, we're just going to allocate everything to the first 12560 * CPU. And if we're on the last CPU, we're going to allocate 12561 * whatever is left over. In either case, we set the limit to 12562 * be the limit of the dynamic variable space. 12563 */ 12564 if (maxper == 0 || i == NCPU - 1) { 12565 limit = (uintptr_t)base + size; 12566 start = NULL; 12567 } else { 12568 limit = (uintptr_t)start + maxper; 12569 start = (dtrace_dynvar_t *)limit; 12570 } 12571 12572 ASSERT(limit <= (uintptr_t)base + size); 12573 12574 for (;;) { 12575 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12576 dstate->dtds_chunksize); 12577 12578 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12579 break; 12580 12581 dvar->dtdv_next = next; 12582 dvar = next; 12583 } 12584 12585 if (maxper == 0) 12586 break; 12587 } 12588 12589 return (0); 12590 } 12591 12592 void 12593 dtrace_dstate_fini(dtrace_dstate_t *dstate) 12594 { 12595 ASSERT(MUTEX_HELD(&cpu_lock)); 12596 12597 if (dstate->dtds_base == NULL) 12598 return; 12599 12600 kmem_free(dstate->dtds_base, dstate->dtds_size); 12601 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12602 } 12603 12604 static void 12605 dtrace_vstate_fini(dtrace_vstate_t *vstate) 12606 { 12607 /* 12608 * Logical XOR, where are you? 12609 */ 12610 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12611 12612 if (vstate->dtvs_nglobals > 0) { 12613 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12614 sizeof (dtrace_statvar_t *)); 12615 } 12616 12617 if (vstate->dtvs_ntlocals > 0) { 12618 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12619 sizeof (dtrace_difv_t)); 12620 } 12621 12622 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12623 12624 if (vstate->dtvs_nlocals > 0) { 12625 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12626 sizeof (dtrace_statvar_t *)); 12627 } 12628 } 12629 12630 static void 12631 dtrace_state_clean(dtrace_state_t *state) 12632 { 12633 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12634 return; 12635 12636 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12637 dtrace_speculation_clean(state); 12638 } 12639 12640 static void 12641 dtrace_state_deadman(dtrace_state_t *state) 12642 { 12643 hrtime_t now; 12644 12645 dtrace_sync(); 12646 12647 now = dtrace_gethrtime(); 12648 12649 if (state != dtrace_anon.dta_state && 12650 now - state->dts_laststatus >= dtrace_deadman_user) 12651 return; 12652 12653 /* 12654 * We must be sure that dts_alive never appears to be less than the 12655 * value upon entry to dtrace_state_deadman(), and because we lack a 12656 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12657 * store INT64_MAX to it, followed by a memory barrier, followed by 12658 * the new value. This assures that dts_alive never appears to be 12659 * less than its true value, regardless of the order in which the 12660 * stores to the underlying storage are issued. 12661 */ 12662 state->dts_alive = INT64_MAX; 12663 dtrace_membar_producer(); 12664 state->dts_alive = now; 12665 } 12666 12667 dtrace_state_t * 12668 dtrace_state_create(dev_t *devp, cred_t *cr) 12669 { 12670 minor_t minor; 12671 major_t major; 12672 char c[30]; 12673 dtrace_state_t *state; 12674 dtrace_optval_t *opt; 12675 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12676 12677 ASSERT(MUTEX_HELD(&dtrace_lock)); 12678 ASSERT(MUTEX_HELD(&cpu_lock)); 12679 12680 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12681 VM_BESTFIT | VM_SLEEP); 12682 12683 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12684 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12685 return (NULL); 12686 } 12687 12688 state = ddi_get_soft_state(dtrace_softstate, minor); 12689 state->dts_epid = DTRACE_EPIDNONE + 1; 12690 12691 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 12692 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12693 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12694 12695 if (devp != NULL) { 12696 major = getemajor(*devp); 12697 } else { 12698 major = ddi_driver_major(dtrace_devi); 12699 } 12700 12701 state->dts_dev = makedevice(major, minor); 12702 12703 if (devp != NULL) 12704 *devp = state->dts_dev; 12705 12706 /* 12707 * We allocate NCPU buffers. On the one hand, this can be quite 12708 * a bit of memory per instance (nearly 36K on a Starcat). On the 12709 * other hand, it saves an additional memory reference in the probe 12710 * path. 12711 */ 12712 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12713 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12714 state->dts_cleaner = CYCLIC_NONE; 12715 state->dts_deadman = CYCLIC_NONE; 12716 state->dts_vstate.dtvs_state = state; 12717 12718 for (i = 0; i < DTRACEOPT_MAX; i++) 12719 state->dts_options[i] = DTRACEOPT_UNSET; 12720 12721 /* 12722 * Set the default options. 12723 */ 12724 opt = state->dts_options; 12725 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12726 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12727 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12728 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12729 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12730 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12731 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12732 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12733 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12734 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12735 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12736 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12737 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12738 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12739 12740 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12741 12742 /* 12743 * Depending on the user credentials, we set flag bits which alter probe 12744 * visibility or the amount of destructiveness allowed. In the case of 12745 * actual anonymous tracing, or the possession of all privileges, all of 12746 * the normal checks are bypassed. 12747 */ 12748 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 12749 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 12750 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 12751 } else { 12752 /* 12753 * Set up the credentials for this instantiation. We take a 12754 * hold on the credential to prevent it from disappearing on 12755 * us; this in turn prevents the zone_t referenced by this 12756 * credential from disappearing. This means that we can 12757 * examine the credential and the zone from probe context. 12758 */ 12759 crhold(cr); 12760 state->dts_cred.dcr_cred = cr; 12761 12762 /* 12763 * CRA_PROC means "we have *some* privilege for dtrace" and 12764 * unlocks the use of variables like pid, zonename, etc. 12765 */ 12766 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 12767 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12768 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 12769 } 12770 12771 /* 12772 * dtrace_user allows use of syscall and profile providers. 12773 * If the user also has proc_owner and/or proc_zone, we 12774 * extend the scope to include additional visibility and 12775 * destructive power. 12776 */ 12777 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 12778 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 12779 state->dts_cred.dcr_visible |= 12780 DTRACE_CRV_ALLPROC; 12781 12782 state->dts_cred.dcr_action |= 12783 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12784 } 12785 12786 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 12787 state->dts_cred.dcr_visible |= 12788 DTRACE_CRV_ALLZONE; 12789 12790 state->dts_cred.dcr_action |= 12791 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12792 } 12793 12794 /* 12795 * If we have all privs in whatever zone this is, 12796 * we can do destructive things to processes which 12797 * have altered credentials. 12798 */ 12799 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12800 cr->cr_zone->zone_privset)) { 12801 state->dts_cred.dcr_action |= 12802 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12803 } 12804 } 12805 12806 /* 12807 * Holding the dtrace_kernel privilege also implies that 12808 * the user has the dtrace_user privilege from a visibility 12809 * perspective. But without further privileges, some 12810 * destructive actions are not available. 12811 */ 12812 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12813 /* 12814 * Make all probes in all zones visible. However, 12815 * this doesn't mean that all actions become available 12816 * to all zones. 12817 */ 12818 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12819 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12820 12821 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12822 DTRACE_CRA_PROC; 12823 /* 12824 * Holding proc_owner means that destructive actions 12825 * for *this* zone are allowed. 12826 */ 12827 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12828 state->dts_cred.dcr_action |= 12829 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12830 12831 /* 12832 * Holding proc_zone means that destructive actions 12833 * for this user/group ID in all zones is allowed. 12834 */ 12835 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12836 state->dts_cred.dcr_action |= 12837 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12838 12839 /* 12840 * If we have all privs in whatever zone this is, 12841 * we can do destructive things to processes which 12842 * have altered credentials. 12843 */ 12844 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12845 cr->cr_zone->zone_privset)) { 12846 state->dts_cred.dcr_action |= 12847 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12848 } 12849 } 12850 12851 /* 12852 * Holding the dtrace_proc privilege gives control over fasttrap 12853 * and pid providers. We need to grant wider destructive 12854 * privileges in the event that the user has proc_owner and/or 12855 * proc_zone. 12856 */ 12857 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12858 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12859 state->dts_cred.dcr_action |= 12860 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12861 12862 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12863 state->dts_cred.dcr_action |= 12864 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12865 } 12866 } 12867 12868 return (state); 12869 } 12870 12871 static int 12872 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 12873 { 12874 dtrace_optval_t *opt = state->dts_options, size; 12875 processorid_t cpu; 12876 int flags = 0, rval, factor, divisor = 1; 12877 12878 ASSERT(MUTEX_HELD(&dtrace_lock)); 12879 ASSERT(MUTEX_HELD(&cpu_lock)); 12880 ASSERT(which < DTRACEOPT_MAX); 12881 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 12882 (state == dtrace_anon.dta_state && 12883 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 12884 12885 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 12886 return (0); 12887 12888 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 12889 cpu = opt[DTRACEOPT_CPU]; 12890 12891 if (which == DTRACEOPT_SPECSIZE) 12892 flags |= DTRACEBUF_NOSWITCH; 12893 12894 if (which == DTRACEOPT_BUFSIZE) { 12895 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 12896 flags |= DTRACEBUF_RING; 12897 12898 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 12899 flags |= DTRACEBUF_FILL; 12900 12901 if (state != dtrace_anon.dta_state || 12902 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12903 flags |= DTRACEBUF_INACTIVE; 12904 } 12905 12906 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { 12907 /* 12908 * The size must be 8-byte aligned. If the size is not 8-byte 12909 * aligned, drop it down by the difference. 12910 */ 12911 if (size & (sizeof (uint64_t) - 1)) 12912 size -= size & (sizeof (uint64_t) - 1); 12913 12914 if (size < state->dts_reserve) { 12915 /* 12916 * Buffers always must be large enough to accommodate 12917 * their prereserved space. We return E2BIG instead 12918 * of ENOMEM in this case to allow for user-level 12919 * software to differentiate the cases. 12920 */ 12921 return (E2BIG); 12922 } 12923 12924 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); 12925 12926 if (rval != ENOMEM) { 12927 opt[which] = size; 12928 return (rval); 12929 } 12930 12931 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12932 return (rval); 12933 12934 for (divisor = 2; divisor < factor; divisor <<= 1) 12935 continue; 12936 } 12937 12938 return (ENOMEM); 12939 } 12940 12941 static int 12942 dtrace_state_buffers(dtrace_state_t *state) 12943 { 12944 dtrace_speculation_t *spec = state->dts_speculations; 12945 int rval, i; 12946 12947 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 12948 DTRACEOPT_BUFSIZE)) != 0) 12949 return (rval); 12950 12951 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 12952 DTRACEOPT_AGGSIZE)) != 0) 12953 return (rval); 12954 12955 for (i = 0; i < state->dts_nspeculations; i++) { 12956 if ((rval = dtrace_state_buffer(state, 12957 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 12958 return (rval); 12959 } 12960 12961 return (0); 12962 } 12963 12964 static void 12965 dtrace_state_prereserve(dtrace_state_t *state) 12966 { 12967 dtrace_ecb_t *ecb; 12968 dtrace_probe_t *probe; 12969 12970 state->dts_reserve = 0; 12971 12972 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 12973 return; 12974 12975 /* 12976 * If our buffer policy is a "fill" buffer policy, we need to set the 12977 * prereserved space to be the space required by the END probes. 12978 */ 12979 probe = dtrace_probes[dtrace_probeid_end - 1]; 12980 ASSERT(probe != NULL); 12981 12982 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 12983 if (ecb->dte_state != state) 12984 continue; 12985 12986 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 12987 } 12988 } 12989 12990 static int 12991 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 12992 { 12993 dtrace_optval_t *opt = state->dts_options, sz, nspec; 12994 dtrace_speculation_t *spec; 12995 dtrace_buffer_t *buf; 12996 cyc_handler_t hdlr; 12997 cyc_time_t when; 12998 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12999 dtrace_icookie_t cookie; 13000 13001 mutex_enter(&cpu_lock); 13002 mutex_enter(&dtrace_lock); 13003 13004 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13005 rval = EBUSY; 13006 goto out; 13007 } 13008 13009 /* 13010 * Before we can perform any checks, we must prime all of the 13011 * retained enablings that correspond to this state. 13012 */ 13013 dtrace_enabling_prime(state); 13014 13015 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13016 rval = EACCES; 13017 goto out; 13018 } 13019 13020 dtrace_state_prereserve(state); 13021 13022 /* 13023 * Now we want to do is try to allocate our speculations. 13024 * We do not automatically resize the number of speculations; if 13025 * this fails, we will fail the operation. 13026 */ 13027 nspec = opt[DTRACEOPT_NSPEC]; 13028 ASSERT(nspec != DTRACEOPT_UNSET); 13029 13030 if (nspec > INT_MAX) { 13031 rval = ENOMEM; 13032 goto out; 13033 } 13034 13035 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), 13036 KM_NOSLEEP | KM_NORMALPRI); 13037 13038 if (spec == NULL) { 13039 rval = ENOMEM; 13040 goto out; 13041 } 13042 13043 state->dts_speculations = spec; 13044 state->dts_nspeculations = (int)nspec; 13045 13046 for (i = 0; i < nspec; i++) { 13047 if ((buf = kmem_zalloc(bufsize, 13048 KM_NOSLEEP | KM_NORMALPRI)) == NULL) { 13049 rval = ENOMEM; 13050 goto err; 13051 } 13052 13053 spec[i].dtsp_buffer = buf; 13054 } 13055 13056 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13057 if (dtrace_anon.dta_state == NULL) { 13058 rval = ENOENT; 13059 goto out; 13060 } 13061 13062 if (state->dts_necbs != 0) { 13063 rval = EALREADY; 13064 goto out; 13065 } 13066 13067 state->dts_anon = dtrace_anon_grab(); 13068 ASSERT(state->dts_anon != NULL); 13069 state = state->dts_anon; 13070 13071 /* 13072 * We want "grabanon" to be set in the grabbed state, so we'll 13073 * copy that option value from the grabbing state into the 13074 * grabbed state. 13075 */ 13076 state->dts_options[DTRACEOPT_GRABANON] = 13077 opt[DTRACEOPT_GRABANON]; 13078 13079 *cpu = dtrace_anon.dta_beganon; 13080 13081 /* 13082 * If the anonymous state is active (as it almost certainly 13083 * is if the anonymous enabling ultimately matched anything), 13084 * we don't allow any further option processing -- but we 13085 * don't return failure. 13086 */ 13087 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13088 goto out; 13089 } 13090 13091 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13092 opt[DTRACEOPT_AGGSIZE] != 0) { 13093 if (state->dts_aggregations == NULL) { 13094 /* 13095 * We're not going to create an aggregation buffer 13096 * because we don't have any ECBs that contain 13097 * aggregations -- set this option to 0. 13098 */ 13099 opt[DTRACEOPT_AGGSIZE] = 0; 13100 } else { 13101 /* 13102 * If we have an aggregation buffer, we must also have 13103 * a buffer to use as scratch. 13104 */ 13105 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13106 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13107 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13108 } 13109 } 13110 } 13111 13112 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13113 opt[DTRACEOPT_SPECSIZE] != 0) { 13114 if (!state->dts_speculates) { 13115 /* 13116 * We're not going to create speculation buffers 13117 * because we don't have any ECBs that actually 13118 * speculate -- set the speculation size to 0. 13119 */ 13120 opt[DTRACEOPT_SPECSIZE] = 0; 13121 } 13122 } 13123 13124 /* 13125 * The bare minimum size for any buffer that we're actually going to 13126 * do anything to is sizeof (uint64_t). 13127 */ 13128 sz = sizeof (uint64_t); 13129 13130 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13131 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13132 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13133 /* 13134 * A buffer size has been explicitly set to 0 (or to a size 13135 * that will be adjusted to 0) and we need the space -- we 13136 * need to return failure. We return ENOSPC to differentiate 13137 * it from failing to allocate a buffer due to failure to meet 13138 * the reserve (for which we return E2BIG). 13139 */ 13140 rval = ENOSPC; 13141 goto out; 13142 } 13143 13144 if ((rval = dtrace_state_buffers(state)) != 0) 13145 goto err; 13146 13147 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13148 sz = dtrace_dstate_defsize; 13149 13150 do { 13151 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13152 13153 if (rval == 0) 13154 break; 13155 13156 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13157 goto err; 13158 } while (sz >>= 1); 13159 13160 opt[DTRACEOPT_DYNVARSIZE] = sz; 13161 13162 if (rval != 0) 13163 goto err; 13164 13165 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13166 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13167 13168 if (opt[DTRACEOPT_CLEANRATE] == 0) 13169 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13170 13171 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13172 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13173 13174 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13175 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13176 13177 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13178 hdlr.cyh_arg = state; 13179 hdlr.cyh_level = CY_LOW_LEVEL; 13180 13181 when.cyt_when = 0; 13182 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13183 13184 state->dts_cleaner = cyclic_add(&hdlr, &when); 13185 13186 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13187 hdlr.cyh_arg = state; 13188 hdlr.cyh_level = CY_LOW_LEVEL; 13189 13190 when.cyt_when = 0; 13191 when.cyt_interval = dtrace_deadman_interval; 13192 13193 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13194 state->dts_deadman = cyclic_add(&hdlr, &when); 13195 13196 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13197 13198 if (state->dts_getf != 0 && 13199 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 13200 /* 13201 * We don't have kernel privs but we have at least one call 13202 * to getf(); we need to bump our zone's count, and (if 13203 * this is the first enabling to have an unprivileged call 13204 * to getf()) we need to hook into closef(). 13205 */ 13206 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; 13207 13208 if (dtrace_getf++ == 0) { 13209 ASSERT(dtrace_closef == NULL); 13210 dtrace_closef = dtrace_getf_barrier; 13211 } 13212 } 13213 13214 /* 13215 * Now it's time to actually fire the BEGIN probe. We need to disable 13216 * interrupts here both to record the CPU on which we fired the BEGIN 13217 * probe (the data from this CPU will be processed first at user 13218 * level) and to manually activate the buffer for this CPU. 13219 */ 13220 cookie = dtrace_interrupt_disable(); 13221 *cpu = CPU->cpu_id; 13222 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13223 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13224 13225 dtrace_probe(dtrace_probeid_begin, 13226 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13227 dtrace_interrupt_enable(cookie); 13228 /* 13229 * We may have had an exit action from a BEGIN probe; only change our 13230 * state to ACTIVE if we're still in WARMUP. 13231 */ 13232 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13233 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13234 13235 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13236 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13237 13238 /* 13239 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13240 * want each CPU to transition its principal buffer out of the 13241 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13242 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13243 * atomically transition from processing none of a state's ECBs to 13244 * processing all of them. 13245 */ 13246 dtrace_xcall(DTRACE_CPUALL, 13247 (dtrace_xcall_t)dtrace_buffer_activate, state); 13248 goto out; 13249 13250 err: 13251 dtrace_buffer_free(state->dts_buffer); 13252 dtrace_buffer_free(state->dts_aggbuffer); 13253 13254 if ((nspec = state->dts_nspeculations) == 0) { 13255 ASSERT(state->dts_speculations == NULL); 13256 goto out; 13257 } 13258 13259 spec = state->dts_speculations; 13260 ASSERT(spec != NULL); 13261 13262 for (i = 0; i < state->dts_nspeculations; i++) { 13263 if ((buf = spec[i].dtsp_buffer) == NULL) 13264 break; 13265 13266 dtrace_buffer_free(buf); 13267 kmem_free(buf, bufsize); 13268 } 13269 13270 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13271 state->dts_nspeculations = 0; 13272 state->dts_speculations = NULL; 13273 13274 out: 13275 mutex_exit(&dtrace_lock); 13276 mutex_exit(&cpu_lock); 13277 13278 return (rval); 13279 } 13280 13281 static int 13282 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13283 { 13284 dtrace_icookie_t cookie; 13285 13286 ASSERT(MUTEX_HELD(&dtrace_lock)); 13287 13288 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13289 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13290 return (EINVAL); 13291 13292 /* 13293 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13294 * to be sure that every CPU has seen it. See below for the details 13295 * on why this is done. 13296 */ 13297 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13298 dtrace_sync(); 13299 13300 /* 13301 * By this point, it is impossible for any CPU to be still processing 13302 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13303 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13304 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13305 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13306 * iff we're in the END probe. 13307 */ 13308 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13309 dtrace_sync(); 13310 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13311 13312 /* 13313 * Finally, we can release the reserve and call the END probe. We 13314 * disable interrupts across calling the END probe to allow us to 13315 * return the CPU on which we actually called the END probe. This 13316 * allows user-land to be sure that this CPU's principal buffer is 13317 * processed last. 13318 */ 13319 state->dts_reserve = 0; 13320 13321 cookie = dtrace_interrupt_disable(); 13322 *cpu = CPU->cpu_id; 13323 dtrace_probe(dtrace_probeid_end, 13324 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13325 dtrace_interrupt_enable(cookie); 13326 13327 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13328 dtrace_sync(); 13329 13330 if (state->dts_getf != 0 && 13331 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 13332 /* 13333 * We don't have kernel privs but we have at least one call 13334 * to getf(); we need to lower our zone's count, and (if 13335 * this is the last enabling to have an unprivileged call 13336 * to getf()) we need to clear the closef() hook. 13337 */ 13338 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); 13339 ASSERT(dtrace_closef == dtrace_getf_barrier); 13340 ASSERT(dtrace_getf > 0); 13341 13342 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; 13343 13344 if (--dtrace_getf == 0) 13345 dtrace_closef = NULL; 13346 } 13347 13348 return (0); 13349 } 13350 13351 static int 13352 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13353 dtrace_optval_t val) 13354 { 13355 ASSERT(MUTEX_HELD(&dtrace_lock)); 13356 13357 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13358 return (EBUSY); 13359 13360 if (option >= DTRACEOPT_MAX) 13361 return (EINVAL); 13362 13363 if (option != DTRACEOPT_CPU && val < 0) 13364 return (EINVAL); 13365 13366 switch (option) { 13367 case DTRACEOPT_DESTRUCTIVE: 13368 if (dtrace_destructive_disallow) 13369 return (EACCES); 13370 13371 state->dts_cred.dcr_destructive = 1; 13372 break; 13373 13374 case DTRACEOPT_BUFSIZE: 13375 case DTRACEOPT_DYNVARSIZE: 13376 case DTRACEOPT_AGGSIZE: 13377 case DTRACEOPT_SPECSIZE: 13378 case DTRACEOPT_STRSIZE: 13379 if (val < 0) 13380 return (EINVAL); 13381 13382 if (val >= LONG_MAX) { 13383 /* 13384 * If this is an otherwise negative value, set it to 13385 * the highest multiple of 128m less than LONG_MAX. 13386 * Technically, we're adjusting the size without 13387 * regard to the buffer resizing policy, but in fact, 13388 * this has no effect -- if we set the buffer size to 13389 * ~LONG_MAX and the buffer policy is ultimately set to 13390 * be "manual", the buffer allocation is guaranteed to 13391 * fail, if only because the allocation requires two 13392 * buffers. (We set the the size to the highest 13393 * multiple of 128m because it ensures that the size 13394 * will remain a multiple of a megabyte when 13395 * repeatedly halved -- all the way down to 15m.) 13396 */ 13397 val = LONG_MAX - (1 << 27) + 1; 13398 } 13399 } 13400 13401 state->dts_options[option] = val; 13402 13403 return (0); 13404 } 13405 13406 static void 13407 dtrace_state_destroy(dtrace_state_t *state) 13408 { 13409 dtrace_ecb_t *ecb; 13410 dtrace_vstate_t *vstate = &state->dts_vstate; 13411 minor_t minor = getminor(state->dts_dev); 13412 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13413 dtrace_speculation_t *spec = state->dts_speculations; 13414 int nspec = state->dts_nspeculations; 13415 uint32_t match; 13416 13417 ASSERT(MUTEX_HELD(&dtrace_lock)); 13418 ASSERT(MUTEX_HELD(&cpu_lock)); 13419 13420 /* 13421 * First, retract any retained enablings for this state. 13422 */ 13423 dtrace_enabling_retract(state); 13424 ASSERT(state->dts_nretained == 0); 13425 13426 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13427 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13428 /* 13429 * We have managed to come into dtrace_state_destroy() on a 13430 * hot enabling -- almost certainly because of a disorderly 13431 * shutdown of a consumer. (That is, a consumer that is 13432 * exiting without having called dtrace_stop().) In this case, 13433 * we're going to set our activity to be KILLED, and then 13434 * issue a sync to be sure that everyone is out of probe 13435 * context before we start blowing away ECBs. 13436 */ 13437 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13438 dtrace_sync(); 13439 } 13440 13441 /* 13442 * Release the credential hold we took in dtrace_state_create(). 13443 */ 13444 if (state->dts_cred.dcr_cred != NULL) 13445 crfree(state->dts_cred.dcr_cred); 13446 13447 /* 13448 * Now we can safely disable and destroy any enabled probes. Because 13449 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13450 * (especially if they're all enabled), we take two passes through the 13451 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13452 * in the second we disable whatever is left over. 13453 */ 13454 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13455 for (i = 0; i < state->dts_necbs; i++) { 13456 if ((ecb = state->dts_ecbs[i]) == NULL) 13457 continue; 13458 13459 if (match && ecb->dte_probe != NULL) { 13460 dtrace_probe_t *probe = ecb->dte_probe; 13461 dtrace_provider_t *prov = probe->dtpr_provider; 13462 13463 if (!(prov->dtpv_priv.dtpp_flags & match)) 13464 continue; 13465 } 13466 13467 dtrace_ecb_disable(ecb); 13468 dtrace_ecb_destroy(ecb); 13469 } 13470 13471 if (!match) 13472 break; 13473 } 13474 13475 /* 13476 * Before we free the buffers, perform one more sync to assure that 13477 * every CPU is out of probe context. 13478 */ 13479 dtrace_sync(); 13480 13481 dtrace_buffer_free(state->dts_buffer); 13482 dtrace_buffer_free(state->dts_aggbuffer); 13483 13484 for (i = 0; i < nspec; i++) 13485 dtrace_buffer_free(spec[i].dtsp_buffer); 13486 13487 if (state->dts_cleaner != CYCLIC_NONE) 13488 cyclic_remove(state->dts_cleaner); 13489 13490 if (state->dts_deadman != CYCLIC_NONE) 13491 cyclic_remove(state->dts_deadman); 13492 13493 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13494 dtrace_vstate_fini(vstate); 13495 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13496 13497 if (state->dts_aggregations != NULL) { 13498 #ifdef DEBUG 13499 for (i = 0; i < state->dts_naggregations; i++) 13500 ASSERT(state->dts_aggregations[i] == NULL); 13501 #endif 13502 ASSERT(state->dts_naggregations > 0); 13503 kmem_free(state->dts_aggregations, 13504 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13505 } 13506 13507 kmem_free(state->dts_buffer, bufsize); 13508 kmem_free(state->dts_aggbuffer, bufsize); 13509 13510 for (i = 0; i < nspec; i++) 13511 kmem_free(spec[i].dtsp_buffer, bufsize); 13512 13513 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13514 13515 dtrace_format_destroy(state); 13516 13517 vmem_destroy(state->dts_aggid_arena); 13518 ddi_soft_state_free(dtrace_softstate, minor); 13519 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13520 } 13521 13522 /* 13523 * DTrace Anonymous Enabling Functions 13524 */ 13525 static dtrace_state_t * 13526 dtrace_anon_grab(void) 13527 { 13528 dtrace_state_t *state; 13529 13530 ASSERT(MUTEX_HELD(&dtrace_lock)); 13531 13532 if ((state = dtrace_anon.dta_state) == NULL) { 13533 ASSERT(dtrace_anon.dta_enabling == NULL); 13534 return (NULL); 13535 } 13536 13537 ASSERT(dtrace_anon.dta_enabling != NULL); 13538 ASSERT(dtrace_retained != NULL); 13539 13540 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13541 dtrace_anon.dta_enabling = NULL; 13542 dtrace_anon.dta_state = NULL; 13543 13544 return (state); 13545 } 13546 13547 static void 13548 dtrace_anon_property(void) 13549 { 13550 int i, rv; 13551 dtrace_state_t *state; 13552 dof_hdr_t *dof; 13553 char c[32]; /* enough for "dof-data-" + digits */ 13554 13555 ASSERT(MUTEX_HELD(&dtrace_lock)); 13556 ASSERT(MUTEX_HELD(&cpu_lock)); 13557 13558 for (i = 0; ; i++) { 13559 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13560 13561 dtrace_err_verbose = 1; 13562 13563 if ((dof = dtrace_dof_property(c)) == NULL) { 13564 dtrace_err_verbose = 0; 13565 break; 13566 } 13567 13568 /* 13569 * We want to create anonymous state, so we need to transition 13570 * the kernel debugger to indicate that DTrace is active. If 13571 * this fails (e.g. because the debugger has modified text in 13572 * some way), we won't continue with the processing. 13573 */ 13574 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13575 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13576 "enabling ignored."); 13577 dtrace_dof_destroy(dof); 13578 break; 13579 } 13580 13581 /* 13582 * If we haven't allocated an anonymous state, we'll do so now. 13583 */ 13584 if ((state = dtrace_anon.dta_state) == NULL) { 13585 state = dtrace_state_create(NULL, NULL); 13586 dtrace_anon.dta_state = state; 13587 13588 if (state == NULL) { 13589 /* 13590 * This basically shouldn't happen: the only 13591 * failure mode from dtrace_state_create() is a 13592 * failure of ddi_soft_state_zalloc() that 13593 * itself should never happen. Still, the 13594 * interface allows for a failure mode, and 13595 * we want to fail as gracefully as possible: 13596 * we'll emit an error message and cease 13597 * processing anonymous state in this case. 13598 */ 13599 cmn_err(CE_WARN, "failed to create " 13600 "anonymous state"); 13601 dtrace_dof_destroy(dof); 13602 break; 13603 } 13604 } 13605 13606 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13607 &dtrace_anon.dta_enabling, 0, B_TRUE); 13608 13609 if (rv == 0) 13610 rv = dtrace_dof_options(dof, state); 13611 13612 dtrace_err_verbose = 0; 13613 dtrace_dof_destroy(dof); 13614 13615 if (rv != 0) { 13616 /* 13617 * This is malformed DOF; chuck any anonymous state 13618 * that we created. 13619 */ 13620 ASSERT(dtrace_anon.dta_enabling == NULL); 13621 dtrace_state_destroy(state); 13622 dtrace_anon.dta_state = NULL; 13623 break; 13624 } 13625 13626 ASSERT(dtrace_anon.dta_enabling != NULL); 13627 } 13628 13629 if (dtrace_anon.dta_enabling != NULL) { 13630 int rval; 13631 13632 /* 13633 * dtrace_enabling_retain() can only fail because we are 13634 * trying to retain more enablings than are allowed -- but 13635 * we only have one anonymous enabling, and we are guaranteed 13636 * to be allowed at least one retained enabling; we assert 13637 * that dtrace_enabling_retain() returns success. 13638 */ 13639 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13640 ASSERT(rval == 0); 13641 13642 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13643 } 13644 } 13645 13646 /* 13647 * DTrace Helper Functions 13648 */ 13649 static void 13650 dtrace_helper_trace(dtrace_helper_action_t *helper, 13651 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13652 { 13653 uint32_t size, next, nnext, i; 13654 dtrace_helptrace_t *ent; 13655 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 13656 13657 if (!dtrace_helptrace_enabled) 13658 return; 13659 13660 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13661 13662 /* 13663 * What would a tracing framework be without its own tracing 13664 * framework? (Well, a hell of a lot simpler, for starters...) 13665 */ 13666 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13667 sizeof (uint64_t) - sizeof (uint64_t); 13668 13669 /* 13670 * Iterate until we can allocate a slot in the trace buffer. 13671 */ 13672 do { 13673 next = dtrace_helptrace_next; 13674 13675 if (next + size < dtrace_helptrace_bufsize) { 13676 nnext = next + size; 13677 } else { 13678 nnext = size; 13679 } 13680 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13681 13682 /* 13683 * We have our slot; fill it in. 13684 */ 13685 if (nnext == size) 13686 next = 0; 13687 13688 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13689 ent->dtht_helper = helper; 13690 ent->dtht_where = where; 13691 ent->dtht_nlocals = vstate->dtvs_nlocals; 13692 13693 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13694 mstate->dtms_fltoffs : -1; 13695 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13696 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 13697 13698 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13699 dtrace_statvar_t *svar; 13700 13701 if ((svar = vstate->dtvs_locals[i]) == NULL) 13702 continue; 13703 13704 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13705 ent->dtht_locals[i] = 13706 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 13707 } 13708 } 13709 13710 static uint64_t 13711 dtrace_helper(int which, dtrace_mstate_t *mstate, 13712 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13713 { 13714 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 13715 uint64_t sarg0 = mstate->dtms_arg[0]; 13716 uint64_t sarg1 = mstate->dtms_arg[1]; 13717 uint64_t rval; 13718 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13719 dtrace_helper_action_t *helper; 13720 dtrace_vstate_t *vstate; 13721 dtrace_difo_t *pred; 13722 int i, trace = dtrace_helptrace_enabled; 13723 13724 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13725 13726 if (helpers == NULL) 13727 return (0); 13728 13729 if ((helper = helpers->dthps_actions[which]) == NULL) 13730 return (0); 13731 13732 vstate = &helpers->dthps_vstate; 13733 mstate->dtms_arg[0] = arg0; 13734 mstate->dtms_arg[1] = arg1; 13735 13736 /* 13737 * Now iterate over each helper. If its predicate evaluates to 'true', 13738 * we'll call the corresponding actions. Note that the below calls 13739 * to dtrace_dif_emulate() may set faults in machine state. This is 13740 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13741 * the stored DIF offset with its own (which is the desired behavior). 13742 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13743 * from machine state; this is okay, too. 13744 */ 13745 for (; helper != NULL; helper = helper->dtha_next) { 13746 if ((pred = helper->dtha_predicate) != NULL) { 13747 if (trace) 13748 dtrace_helper_trace(helper, mstate, vstate, 0); 13749 13750 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 13751 goto next; 13752 13753 if (*flags & CPU_DTRACE_FAULT) 13754 goto err; 13755 } 13756 13757 for (i = 0; i < helper->dtha_nactions; i++) { 13758 if (trace) 13759 dtrace_helper_trace(helper, 13760 mstate, vstate, i + 1); 13761 13762 rval = dtrace_dif_emulate(helper->dtha_actions[i], 13763 mstate, vstate, state); 13764 13765 if (*flags & CPU_DTRACE_FAULT) 13766 goto err; 13767 } 13768 13769 next: 13770 if (trace) 13771 dtrace_helper_trace(helper, mstate, vstate, 13772 DTRACE_HELPTRACE_NEXT); 13773 } 13774 13775 if (trace) 13776 dtrace_helper_trace(helper, mstate, vstate, 13777 DTRACE_HELPTRACE_DONE); 13778 13779 /* 13780 * Restore the arg0 that we saved upon entry. 13781 */ 13782 mstate->dtms_arg[0] = sarg0; 13783 mstate->dtms_arg[1] = sarg1; 13784 13785 return (rval); 13786 13787 err: 13788 if (trace) 13789 dtrace_helper_trace(helper, mstate, vstate, 13790 DTRACE_HELPTRACE_ERR); 13791 13792 /* 13793 * Restore the arg0 that we saved upon entry. 13794 */ 13795 mstate->dtms_arg[0] = sarg0; 13796 mstate->dtms_arg[1] = sarg1; 13797 13798 return (NULL); 13799 } 13800 13801 static void 13802 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 13803 dtrace_vstate_t *vstate) 13804 { 13805 int i; 13806 13807 if (helper->dtha_predicate != NULL) 13808 dtrace_difo_release(helper->dtha_predicate, vstate); 13809 13810 for (i = 0; i < helper->dtha_nactions; i++) { 13811 ASSERT(helper->dtha_actions[i] != NULL); 13812 dtrace_difo_release(helper->dtha_actions[i], vstate); 13813 } 13814 13815 kmem_free(helper->dtha_actions, 13816 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 13817 kmem_free(helper, sizeof (dtrace_helper_action_t)); 13818 } 13819 13820 static int 13821 dtrace_helper_destroygen(int gen) 13822 { 13823 proc_t *p = curproc; 13824 dtrace_helpers_t *help = p->p_dtrace_helpers; 13825 dtrace_vstate_t *vstate; 13826 int i; 13827 13828 ASSERT(MUTEX_HELD(&dtrace_lock)); 13829 13830 if (help == NULL || gen > help->dthps_generation) 13831 return (EINVAL); 13832 13833 vstate = &help->dthps_vstate; 13834 13835 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13836 dtrace_helper_action_t *last = NULL, *h, *next; 13837 13838 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13839 next = h->dtha_next; 13840 13841 if (h->dtha_generation == gen) { 13842 if (last != NULL) { 13843 last->dtha_next = next; 13844 } else { 13845 help->dthps_actions[i] = next; 13846 } 13847 13848 dtrace_helper_action_destroy(h, vstate); 13849 } else { 13850 last = h; 13851 } 13852 } 13853 } 13854 13855 /* 13856 * Interate until we've cleared out all helper providers with the 13857 * given generation number. 13858 */ 13859 for (;;) { 13860 dtrace_helper_provider_t *prov; 13861 13862 /* 13863 * Look for a helper provider with the right generation. We 13864 * have to start back at the beginning of the list each time 13865 * because we drop dtrace_lock. It's unlikely that we'll make 13866 * more than two passes. 13867 */ 13868 for (i = 0; i < help->dthps_nprovs; i++) { 13869 prov = help->dthps_provs[i]; 13870 13871 if (prov->dthp_generation == gen) 13872 break; 13873 } 13874 13875 /* 13876 * If there were no matches, we're done. 13877 */ 13878 if (i == help->dthps_nprovs) 13879 break; 13880 13881 /* 13882 * Move the last helper provider into this slot. 13883 */ 13884 help->dthps_nprovs--; 13885 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 13886 help->dthps_provs[help->dthps_nprovs] = NULL; 13887 13888 mutex_exit(&dtrace_lock); 13889 13890 /* 13891 * If we have a meta provider, remove this helper provider. 13892 */ 13893 mutex_enter(&dtrace_meta_lock); 13894 if (dtrace_meta_pid != NULL) { 13895 ASSERT(dtrace_deferred_pid == NULL); 13896 dtrace_helper_provider_remove(&prov->dthp_prov, 13897 p->p_pid); 13898 } 13899 mutex_exit(&dtrace_meta_lock); 13900 13901 dtrace_helper_provider_destroy(prov); 13902 13903 mutex_enter(&dtrace_lock); 13904 } 13905 13906 return (0); 13907 } 13908 13909 static int 13910 dtrace_helper_validate(dtrace_helper_action_t *helper) 13911 { 13912 int err = 0, i; 13913 dtrace_difo_t *dp; 13914 13915 if ((dp = helper->dtha_predicate) != NULL) 13916 err += dtrace_difo_validate_helper(dp); 13917 13918 for (i = 0; i < helper->dtha_nactions; i++) 13919 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 13920 13921 return (err == 0); 13922 } 13923 13924 static int 13925 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 13926 { 13927 dtrace_helpers_t *help; 13928 dtrace_helper_action_t *helper, *last; 13929 dtrace_actdesc_t *act; 13930 dtrace_vstate_t *vstate; 13931 dtrace_predicate_t *pred; 13932 int count = 0, nactions = 0, i; 13933 13934 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 13935 return (EINVAL); 13936 13937 help = curproc->p_dtrace_helpers; 13938 last = help->dthps_actions[which]; 13939 vstate = &help->dthps_vstate; 13940 13941 for (count = 0; last != NULL; last = last->dtha_next) { 13942 count++; 13943 if (last->dtha_next == NULL) 13944 break; 13945 } 13946 13947 /* 13948 * If we already have dtrace_helper_actions_max helper actions for this 13949 * helper action type, we'll refuse to add a new one. 13950 */ 13951 if (count >= dtrace_helper_actions_max) 13952 return (ENOSPC); 13953 13954 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 13955 helper->dtha_generation = help->dthps_generation; 13956 13957 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 13958 ASSERT(pred->dtp_difo != NULL); 13959 dtrace_difo_hold(pred->dtp_difo); 13960 helper->dtha_predicate = pred->dtp_difo; 13961 } 13962 13963 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 13964 if (act->dtad_kind != DTRACEACT_DIFEXPR) 13965 goto err; 13966 13967 if (act->dtad_difo == NULL) 13968 goto err; 13969 13970 nactions++; 13971 } 13972 13973 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 13974 (helper->dtha_nactions = nactions), KM_SLEEP); 13975 13976 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 13977 dtrace_difo_hold(act->dtad_difo); 13978 helper->dtha_actions[i++] = act->dtad_difo; 13979 } 13980 13981 if (!dtrace_helper_validate(helper)) 13982 goto err; 13983 13984 if (last == NULL) { 13985 help->dthps_actions[which] = helper; 13986 } else { 13987 last->dtha_next = helper; 13988 } 13989 13990 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 13991 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 13992 dtrace_helptrace_next = 0; 13993 } 13994 13995 return (0); 13996 err: 13997 dtrace_helper_action_destroy(helper, vstate); 13998 return (EINVAL); 13999 } 14000 14001 static void 14002 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14003 dof_helper_t *dofhp) 14004 { 14005 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14006 14007 mutex_enter(&dtrace_meta_lock); 14008 mutex_enter(&dtrace_lock); 14009 14010 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14011 /* 14012 * If the dtrace module is loaded but not attached, or if 14013 * there aren't isn't a meta provider registered to deal with 14014 * these provider descriptions, we need to postpone creating 14015 * the actual providers until later. 14016 */ 14017 14018 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14019 dtrace_deferred_pid != help) { 14020 help->dthps_deferred = 1; 14021 help->dthps_pid = p->p_pid; 14022 help->dthps_next = dtrace_deferred_pid; 14023 help->dthps_prev = NULL; 14024 if (dtrace_deferred_pid != NULL) 14025 dtrace_deferred_pid->dthps_prev = help; 14026 dtrace_deferred_pid = help; 14027 } 14028 14029 mutex_exit(&dtrace_lock); 14030 14031 } else if (dofhp != NULL) { 14032 /* 14033 * If the dtrace module is loaded and we have a particular 14034 * helper provider description, pass that off to the 14035 * meta provider. 14036 */ 14037 14038 mutex_exit(&dtrace_lock); 14039 14040 dtrace_helper_provide(dofhp, p->p_pid); 14041 14042 } else { 14043 /* 14044 * Otherwise, just pass all the helper provider descriptions 14045 * off to the meta provider. 14046 */ 14047 14048 int i; 14049 mutex_exit(&dtrace_lock); 14050 14051 for (i = 0; i < help->dthps_nprovs; i++) { 14052 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14053 p->p_pid); 14054 } 14055 } 14056 14057 mutex_exit(&dtrace_meta_lock); 14058 } 14059 14060 static int 14061 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14062 { 14063 dtrace_helpers_t *help; 14064 dtrace_helper_provider_t *hprov, **tmp_provs; 14065 uint_t tmp_maxprovs, i; 14066 14067 ASSERT(MUTEX_HELD(&dtrace_lock)); 14068 14069 help = curproc->p_dtrace_helpers; 14070 ASSERT(help != NULL); 14071 14072 /* 14073 * If we already have dtrace_helper_providers_max helper providers, 14074 * we're refuse to add a new one. 14075 */ 14076 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14077 return (ENOSPC); 14078 14079 /* 14080 * Check to make sure this isn't a duplicate. 14081 */ 14082 for (i = 0; i < help->dthps_nprovs; i++) { 14083 if (dofhp->dofhp_addr == 14084 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14085 return (EALREADY); 14086 } 14087 14088 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14089 hprov->dthp_prov = *dofhp; 14090 hprov->dthp_ref = 1; 14091 hprov->dthp_generation = gen; 14092 14093 /* 14094 * Allocate a bigger table for helper providers if it's already full. 14095 */ 14096 if (help->dthps_maxprovs == help->dthps_nprovs) { 14097 tmp_maxprovs = help->dthps_maxprovs; 14098 tmp_provs = help->dthps_provs; 14099 14100 if (help->dthps_maxprovs == 0) 14101 help->dthps_maxprovs = 2; 14102 else 14103 help->dthps_maxprovs *= 2; 14104 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14105 help->dthps_maxprovs = dtrace_helper_providers_max; 14106 14107 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14108 14109 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14110 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14111 14112 if (tmp_provs != NULL) { 14113 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14114 sizeof (dtrace_helper_provider_t *)); 14115 kmem_free(tmp_provs, tmp_maxprovs * 14116 sizeof (dtrace_helper_provider_t *)); 14117 } 14118 } 14119 14120 help->dthps_provs[help->dthps_nprovs] = hprov; 14121 help->dthps_nprovs++; 14122 14123 return (0); 14124 } 14125 14126 static void 14127 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14128 { 14129 mutex_enter(&dtrace_lock); 14130 14131 if (--hprov->dthp_ref == 0) { 14132 dof_hdr_t *dof; 14133 mutex_exit(&dtrace_lock); 14134 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14135 dtrace_dof_destroy(dof); 14136 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14137 } else { 14138 mutex_exit(&dtrace_lock); 14139 } 14140 } 14141 14142 static int 14143 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14144 { 14145 uintptr_t daddr = (uintptr_t)dof; 14146 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14147 dof_provider_t *provider; 14148 dof_probe_t *probe; 14149 uint8_t *arg; 14150 char *strtab, *typestr; 14151 dof_stridx_t typeidx; 14152 size_t typesz; 14153 uint_t nprobes, j, k; 14154 14155 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14156 14157 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14158 dtrace_dof_error(dof, "misaligned section offset"); 14159 return (-1); 14160 } 14161 14162 /* 14163 * The section needs to be large enough to contain the DOF provider 14164 * structure appropriate for the given version. 14165 */ 14166 if (sec->dofs_size < 14167 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14168 offsetof(dof_provider_t, dofpv_prenoffs) : 14169 sizeof (dof_provider_t))) { 14170 dtrace_dof_error(dof, "provider section too small"); 14171 return (-1); 14172 } 14173 14174 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14175 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14176 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14177 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14178 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14179 14180 if (str_sec == NULL || prb_sec == NULL || 14181 arg_sec == NULL || off_sec == NULL) 14182 return (-1); 14183 14184 enoff_sec = NULL; 14185 14186 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14187 provider->dofpv_prenoffs != DOF_SECT_NONE && 14188 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14189 provider->dofpv_prenoffs)) == NULL) 14190 return (-1); 14191 14192 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14193 14194 if (provider->dofpv_name >= str_sec->dofs_size || 14195 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14196 dtrace_dof_error(dof, "invalid provider name"); 14197 return (-1); 14198 } 14199 14200 if (prb_sec->dofs_entsize == 0 || 14201 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14202 dtrace_dof_error(dof, "invalid entry size"); 14203 return (-1); 14204 } 14205 14206 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14207 dtrace_dof_error(dof, "misaligned entry size"); 14208 return (-1); 14209 } 14210 14211 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14212 dtrace_dof_error(dof, "invalid entry size"); 14213 return (-1); 14214 } 14215 14216 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14217 dtrace_dof_error(dof, "misaligned section offset"); 14218 return (-1); 14219 } 14220 14221 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14222 dtrace_dof_error(dof, "invalid entry size"); 14223 return (-1); 14224 } 14225 14226 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14227 14228 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14229 14230 /* 14231 * Take a pass through the probes to check for errors. 14232 */ 14233 for (j = 0; j < nprobes; j++) { 14234 probe = (dof_probe_t *)(uintptr_t)(daddr + 14235 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14236 14237 if (probe->dofpr_func >= str_sec->dofs_size) { 14238 dtrace_dof_error(dof, "invalid function name"); 14239 return (-1); 14240 } 14241 14242 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14243 dtrace_dof_error(dof, "function name too long"); 14244 return (-1); 14245 } 14246 14247 if (probe->dofpr_name >= str_sec->dofs_size || 14248 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14249 dtrace_dof_error(dof, "invalid probe name"); 14250 return (-1); 14251 } 14252 14253 /* 14254 * The offset count must not wrap the index, and the offsets 14255 * must also not overflow the section's data. 14256 */ 14257 if (probe->dofpr_offidx + probe->dofpr_noffs < 14258 probe->dofpr_offidx || 14259 (probe->dofpr_offidx + probe->dofpr_noffs) * 14260 off_sec->dofs_entsize > off_sec->dofs_size) { 14261 dtrace_dof_error(dof, "invalid probe offset"); 14262 return (-1); 14263 } 14264 14265 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14266 /* 14267 * If there's no is-enabled offset section, make sure 14268 * there aren't any is-enabled offsets. Otherwise 14269 * perform the same checks as for probe offsets 14270 * (immediately above). 14271 */ 14272 if (enoff_sec == NULL) { 14273 if (probe->dofpr_enoffidx != 0 || 14274 probe->dofpr_nenoffs != 0) { 14275 dtrace_dof_error(dof, "is-enabled " 14276 "offsets with null section"); 14277 return (-1); 14278 } 14279 } else if (probe->dofpr_enoffidx + 14280 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14281 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14282 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14283 dtrace_dof_error(dof, "invalid is-enabled " 14284 "offset"); 14285 return (-1); 14286 } 14287 14288 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14289 dtrace_dof_error(dof, "zero probe and " 14290 "is-enabled offsets"); 14291 return (-1); 14292 } 14293 } else if (probe->dofpr_noffs == 0) { 14294 dtrace_dof_error(dof, "zero probe offsets"); 14295 return (-1); 14296 } 14297 14298 if (probe->dofpr_argidx + probe->dofpr_xargc < 14299 probe->dofpr_argidx || 14300 (probe->dofpr_argidx + probe->dofpr_xargc) * 14301 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14302 dtrace_dof_error(dof, "invalid args"); 14303 return (-1); 14304 } 14305 14306 typeidx = probe->dofpr_nargv; 14307 typestr = strtab + probe->dofpr_nargv; 14308 for (k = 0; k < probe->dofpr_nargc; k++) { 14309 if (typeidx >= str_sec->dofs_size) { 14310 dtrace_dof_error(dof, "bad " 14311 "native argument type"); 14312 return (-1); 14313 } 14314 14315 typesz = strlen(typestr) + 1; 14316 if (typesz > DTRACE_ARGTYPELEN) { 14317 dtrace_dof_error(dof, "native " 14318 "argument type too long"); 14319 return (-1); 14320 } 14321 typeidx += typesz; 14322 typestr += typesz; 14323 } 14324 14325 typeidx = probe->dofpr_xargv; 14326 typestr = strtab + probe->dofpr_xargv; 14327 for (k = 0; k < probe->dofpr_xargc; k++) { 14328 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14329 dtrace_dof_error(dof, "bad " 14330 "native argument index"); 14331 return (-1); 14332 } 14333 14334 if (typeidx >= str_sec->dofs_size) { 14335 dtrace_dof_error(dof, "bad " 14336 "translated argument type"); 14337 return (-1); 14338 } 14339 14340 typesz = strlen(typestr) + 1; 14341 if (typesz > DTRACE_ARGTYPELEN) { 14342 dtrace_dof_error(dof, "translated argument " 14343 "type too long"); 14344 return (-1); 14345 } 14346 14347 typeidx += typesz; 14348 typestr += typesz; 14349 } 14350 } 14351 14352 return (0); 14353 } 14354 14355 static int 14356 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14357 { 14358 dtrace_helpers_t *help; 14359 dtrace_vstate_t *vstate; 14360 dtrace_enabling_t *enab = NULL; 14361 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14362 uintptr_t daddr = (uintptr_t)dof; 14363 14364 ASSERT(MUTEX_HELD(&dtrace_lock)); 14365 14366 if ((help = curproc->p_dtrace_helpers) == NULL) 14367 help = dtrace_helpers_create(curproc); 14368 14369 vstate = &help->dthps_vstate; 14370 14371 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14372 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14373 dtrace_dof_destroy(dof); 14374 return (rv); 14375 } 14376 14377 /* 14378 * Look for helper providers and validate their descriptions. 14379 */ 14380 if (dhp != NULL) { 14381 for (i = 0; i < dof->dofh_secnum; i++) { 14382 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14383 dof->dofh_secoff + i * dof->dofh_secsize); 14384 14385 if (sec->dofs_type != DOF_SECT_PROVIDER) 14386 continue; 14387 14388 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14389 dtrace_enabling_destroy(enab); 14390 dtrace_dof_destroy(dof); 14391 return (-1); 14392 } 14393 14394 nprovs++; 14395 } 14396 } 14397 14398 /* 14399 * Now we need to walk through the ECB descriptions in the enabling. 14400 */ 14401 for (i = 0; i < enab->dten_ndesc; i++) { 14402 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14403 dtrace_probedesc_t *desc = &ep->dted_probe; 14404 14405 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14406 continue; 14407 14408 if (strcmp(desc->dtpd_mod, "helper") != 0) 14409 continue; 14410 14411 if (strcmp(desc->dtpd_func, "ustack") != 0) 14412 continue; 14413 14414 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14415 ep)) != 0) { 14416 /* 14417 * Adding this helper action failed -- we are now going 14418 * to rip out the entire generation and return failure. 14419 */ 14420 (void) dtrace_helper_destroygen(help->dthps_generation); 14421 dtrace_enabling_destroy(enab); 14422 dtrace_dof_destroy(dof); 14423 return (-1); 14424 } 14425 14426 nhelpers++; 14427 } 14428 14429 if (nhelpers < enab->dten_ndesc) 14430 dtrace_dof_error(dof, "unmatched helpers"); 14431 14432 gen = help->dthps_generation++; 14433 dtrace_enabling_destroy(enab); 14434 14435 if (dhp != NULL && nprovs > 0) { 14436 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14437 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14438 mutex_exit(&dtrace_lock); 14439 dtrace_helper_provider_register(curproc, help, dhp); 14440 mutex_enter(&dtrace_lock); 14441 14442 destroy = 0; 14443 } 14444 } 14445 14446 if (destroy) 14447 dtrace_dof_destroy(dof); 14448 14449 return (gen); 14450 } 14451 14452 static dtrace_helpers_t * 14453 dtrace_helpers_create(proc_t *p) 14454 { 14455 dtrace_helpers_t *help; 14456 14457 ASSERT(MUTEX_HELD(&dtrace_lock)); 14458 ASSERT(p->p_dtrace_helpers == NULL); 14459 14460 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14461 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14462 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14463 14464 p->p_dtrace_helpers = help; 14465 dtrace_helpers++; 14466 14467 return (help); 14468 } 14469 14470 static void 14471 dtrace_helpers_destroy(void) 14472 { 14473 dtrace_helpers_t *help; 14474 dtrace_vstate_t *vstate; 14475 proc_t *p = curproc; 14476 int i; 14477 14478 mutex_enter(&dtrace_lock); 14479 14480 ASSERT(p->p_dtrace_helpers != NULL); 14481 ASSERT(dtrace_helpers > 0); 14482 14483 help = p->p_dtrace_helpers; 14484 vstate = &help->dthps_vstate; 14485 14486 /* 14487 * We're now going to lose the help from this process. 14488 */ 14489 p->p_dtrace_helpers = NULL; 14490 dtrace_sync(); 14491 14492 /* 14493 * Destory the helper actions. 14494 */ 14495 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14496 dtrace_helper_action_t *h, *next; 14497 14498 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14499 next = h->dtha_next; 14500 dtrace_helper_action_destroy(h, vstate); 14501 h = next; 14502 } 14503 } 14504 14505 mutex_exit(&dtrace_lock); 14506 14507 /* 14508 * Destroy the helper providers. 14509 */ 14510 if (help->dthps_maxprovs > 0) { 14511 mutex_enter(&dtrace_meta_lock); 14512 if (dtrace_meta_pid != NULL) { 14513 ASSERT(dtrace_deferred_pid == NULL); 14514 14515 for (i = 0; i < help->dthps_nprovs; i++) { 14516 dtrace_helper_provider_remove( 14517 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14518 } 14519 } else { 14520 mutex_enter(&dtrace_lock); 14521 ASSERT(help->dthps_deferred == 0 || 14522 help->dthps_next != NULL || 14523 help->dthps_prev != NULL || 14524 help == dtrace_deferred_pid); 14525 14526 /* 14527 * Remove the helper from the deferred list. 14528 */ 14529 if (help->dthps_next != NULL) 14530 help->dthps_next->dthps_prev = help->dthps_prev; 14531 if (help->dthps_prev != NULL) 14532 help->dthps_prev->dthps_next = help->dthps_next; 14533 if (dtrace_deferred_pid == help) { 14534 dtrace_deferred_pid = help->dthps_next; 14535 ASSERT(help->dthps_prev == NULL); 14536 } 14537 14538 mutex_exit(&dtrace_lock); 14539 } 14540 14541 mutex_exit(&dtrace_meta_lock); 14542 14543 for (i = 0; i < help->dthps_nprovs; i++) { 14544 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14545 } 14546 14547 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14548 sizeof (dtrace_helper_provider_t *)); 14549 } 14550 14551 mutex_enter(&dtrace_lock); 14552 14553 dtrace_vstate_fini(&help->dthps_vstate); 14554 kmem_free(help->dthps_actions, 14555 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14556 kmem_free(help, sizeof (dtrace_helpers_t)); 14557 14558 --dtrace_helpers; 14559 mutex_exit(&dtrace_lock); 14560 } 14561 14562 static void 14563 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14564 { 14565 dtrace_helpers_t *help, *newhelp; 14566 dtrace_helper_action_t *helper, *new, *last; 14567 dtrace_difo_t *dp; 14568 dtrace_vstate_t *vstate; 14569 int i, j, sz, hasprovs = 0; 14570 14571 mutex_enter(&dtrace_lock); 14572 ASSERT(from->p_dtrace_helpers != NULL); 14573 ASSERT(dtrace_helpers > 0); 14574 14575 help = from->p_dtrace_helpers; 14576 newhelp = dtrace_helpers_create(to); 14577 ASSERT(to->p_dtrace_helpers != NULL); 14578 14579 newhelp->dthps_generation = help->dthps_generation; 14580 vstate = &newhelp->dthps_vstate; 14581 14582 /* 14583 * Duplicate the helper actions. 14584 */ 14585 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14586 if ((helper = help->dthps_actions[i]) == NULL) 14587 continue; 14588 14589 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14590 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14591 KM_SLEEP); 14592 new->dtha_generation = helper->dtha_generation; 14593 14594 if ((dp = helper->dtha_predicate) != NULL) { 14595 dp = dtrace_difo_duplicate(dp, vstate); 14596 new->dtha_predicate = dp; 14597 } 14598 14599 new->dtha_nactions = helper->dtha_nactions; 14600 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14601 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14602 14603 for (j = 0; j < new->dtha_nactions; j++) { 14604 dtrace_difo_t *dp = helper->dtha_actions[j]; 14605 14606 ASSERT(dp != NULL); 14607 dp = dtrace_difo_duplicate(dp, vstate); 14608 new->dtha_actions[j] = dp; 14609 } 14610 14611 if (last != NULL) { 14612 last->dtha_next = new; 14613 } else { 14614 newhelp->dthps_actions[i] = new; 14615 } 14616 14617 last = new; 14618 } 14619 } 14620 14621 /* 14622 * Duplicate the helper providers and register them with the 14623 * DTrace framework. 14624 */ 14625 if (help->dthps_nprovs > 0) { 14626 newhelp->dthps_nprovs = help->dthps_nprovs; 14627 newhelp->dthps_maxprovs = help->dthps_nprovs; 14628 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14629 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14630 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14631 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14632 newhelp->dthps_provs[i]->dthp_ref++; 14633 } 14634 14635 hasprovs = 1; 14636 } 14637 14638 mutex_exit(&dtrace_lock); 14639 14640 if (hasprovs) 14641 dtrace_helper_provider_register(to, newhelp, NULL); 14642 } 14643 14644 /* 14645 * DTrace Hook Functions 14646 */ 14647 static void 14648 dtrace_module_loaded(struct modctl *ctl) 14649 { 14650 dtrace_provider_t *prv; 14651 14652 mutex_enter(&dtrace_provider_lock); 14653 mutex_enter(&mod_lock); 14654 14655 ASSERT(ctl->mod_busy); 14656 14657 /* 14658 * We're going to call each providers per-module provide operation 14659 * specifying only this module. 14660 */ 14661 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14662 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14663 14664 mutex_exit(&mod_lock); 14665 mutex_exit(&dtrace_provider_lock); 14666 14667 /* 14668 * If we have any retained enablings, we need to match against them. 14669 * Enabling probes requires that cpu_lock be held, and we cannot hold 14670 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14671 * module. (In particular, this happens when loading scheduling 14672 * classes.) So if we have any retained enablings, we need to dispatch 14673 * our task queue to do the match for us. 14674 */ 14675 mutex_enter(&dtrace_lock); 14676 14677 if (dtrace_retained == NULL) { 14678 mutex_exit(&dtrace_lock); 14679 return; 14680 } 14681 14682 (void) taskq_dispatch(dtrace_taskq, 14683 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14684 14685 mutex_exit(&dtrace_lock); 14686 14687 /* 14688 * And now, for a little heuristic sleaze: in general, we want to 14689 * match modules as soon as they load. However, we cannot guarantee 14690 * this, because it would lead us to the lock ordering violation 14691 * outlined above. The common case, of course, is that cpu_lock is 14692 * _not_ held -- so we delay here for a clock tick, hoping that that's 14693 * long enough for the task queue to do its work. If it's not, it's 14694 * not a serious problem -- it just means that the module that we 14695 * just loaded may not be immediately instrumentable. 14696 */ 14697 delay(1); 14698 } 14699 14700 static void 14701 dtrace_module_unloaded(struct modctl *ctl) 14702 { 14703 dtrace_probe_t template, *probe, *first, *next; 14704 dtrace_provider_t *prov; 14705 14706 template.dtpr_mod = ctl->mod_modname; 14707 14708 mutex_enter(&dtrace_provider_lock); 14709 mutex_enter(&mod_lock); 14710 mutex_enter(&dtrace_lock); 14711 14712 if (dtrace_bymod == NULL) { 14713 /* 14714 * The DTrace module is loaded (obviously) but not attached; 14715 * we don't have any work to do. 14716 */ 14717 mutex_exit(&dtrace_provider_lock); 14718 mutex_exit(&mod_lock); 14719 mutex_exit(&dtrace_lock); 14720 return; 14721 } 14722 14723 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14724 probe != NULL; probe = probe->dtpr_nextmod) { 14725 if (probe->dtpr_ecb != NULL) { 14726 mutex_exit(&dtrace_provider_lock); 14727 mutex_exit(&mod_lock); 14728 mutex_exit(&dtrace_lock); 14729 14730 /* 14731 * This shouldn't _actually_ be possible -- we're 14732 * unloading a module that has an enabled probe in it. 14733 * (It's normally up to the provider to make sure that 14734 * this can't happen.) However, because dtps_enable() 14735 * doesn't have a failure mode, there can be an 14736 * enable/unload race. Upshot: we don't want to 14737 * assert, but we're not going to disable the 14738 * probe, either. 14739 */ 14740 if (dtrace_err_verbose) { 14741 cmn_err(CE_WARN, "unloaded module '%s' had " 14742 "enabled probes", ctl->mod_modname); 14743 } 14744 14745 return; 14746 } 14747 } 14748 14749 probe = first; 14750 14751 for (first = NULL; probe != NULL; probe = next) { 14752 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 14753 14754 dtrace_probes[probe->dtpr_id - 1] = NULL; 14755 14756 next = probe->dtpr_nextmod; 14757 dtrace_hash_remove(dtrace_bymod, probe); 14758 dtrace_hash_remove(dtrace_byfunc, probe); 14759 dtrace_hash_remove(dtrace_byname, probe); 14760 14761 if (first == NULL) { 14762 first = probe; 14763 probe->dtpr_nextmod = NULL; 14764 } else { 14765 probe->dtpr_nextmod = first; 14766 first = probe; 14767 } 14768 } 14769 14770 /* 14771 * We've removed all of the module's probes from the hash chains and 14772 * from the probe array. Now issue a dtrace_sync() to be sure that 14773 * everyone has cleared out from any probe array processing. 14774 */ 14775 dtrace_sync(); 14776 14777 for (probe = first; probe != NULL; probe = first) { 14778 first = probe->dtpr_nextmod; 14779 prov = probe->dtpr_provider; 14780 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 14781 probe->dtpr_arg); 14782 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 14783 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 14784 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 14785 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 14786 kmem_free(probe, sizeof (dtrace_probe_t)); 14787 } 14788 14789 mutex_exit(&dtrace_lock); 14790 mutex_exit(&mod_lock); 14791 mutex_exit(&dtrace_provider_lock); 14792 } 14793 14794 void 14795 dtrace_suspend(void) 14796 { 14797 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 14798 } 14799 14800 void 14801 dtrace_resume(void) 14802 { 14803 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 14804 } 14805 14806 static int 14807 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 14808 { 14809 ASSERT(MUTEX_HELD(&cpu_lock)); 14810 mutex_enter(&dtrace_lock); 14811 14812 switch (what) { 14813 case CPU_CONFIG: { 14814 dtrace_state_t *state; 14815 dtrace_optval_t *opt, rs, c; 14816 14817 /* 14818 * For now, we only allocate a new buffer for anonymous state. 14819 */ 14820 if ((state = dtrace_anon.dta_state) == NULL) 14821 break; 14822 14823 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14824 break; 14825 14826 opt = state->dts_options; 14827 c = opt[DTRACEOPT_CPU]; 14828 14829 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 14830 break; 14831 14832 /* 14833 * Regardless of what the actual policy is, we're going to 14834 * temporarily set our resize policy to be manual. We're 14835 * also going to temporarily set our CPU option to denote 14836 * the newly configured CPU. 14837 */ 14838 rs = opt[DTRACEOPT_BUFRESIZE]; 14839 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 14840 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 14841 14842 (void) dtrace_state_buffers(state); 14843 14844 opt[DTRACEOPT_BUFRESIZE] = rs; 14845 opt[DTRACEOPT_CPU] = c; 14846 14847 break; 14848 } 14849 14850 case CPU_UNCONFIG: 14851 /* 14852 * We don't free the buffer in the CPU_UNCONFIG case. (The 14853 * buffer will be freed when the consumer exits.) 14854 */ 14855 break; 14856 14857 default: 14858 break; 14859 } 14860 14861 mutex_exit(&dtrace_lock); 14862 return (0); 14863 } 14864 14865 static void 14866 dtrace_cpu_setup_initial(processorid_t cpu) 14867 { 14868 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 14869 } 14870 14871 static void 14872 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 14873 { 14874 if (dtrace_toxranges >= dtrace_toxranges_max) { 14875 int osize, nsize; 14876 dtrace_toxrange_t *range; 14877 14878 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14879 14880 if (osize == 0) { 14881 ASSERT(dtrace_toxrange == NULL); 14882 ASSERT(dtrace_toxranges_max == 0); 14883 dtrace_toxranges_max = 1; 14884 } else { 14885 dtrace_toxranges_max <<= 1; 14886 } 14887 14888 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14889 range = kmem_zalloc(nsize, KM_SLEEP); 14890 14891 if (dtrace_toxrange != NULL) { 14892 ASSERT(osize != 0); 14893 bcopy(dtrace_toxrange, range, osize); 14894 kmem_free(dtrace_toxrange, osize); 14895 } 14896 14897 dtrace_toxrange = range; 14898 } 14899 14900 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 14901 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 14902 14903 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 14904 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 14905 dtrace_toxranges++; 14906 } 14907 14908 static void 14909 dtrace_getf_barrier() 14910 { 14911 /* 14912 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings 14913 * that contain calls to getf(), this routine will be called on every 14914 * closef() before either the underlying vnode is released or the 14915 * file_t itself is freed. By the time we are here, it is essential 14916 * that the file_t can no longer be accessed from a call to getf() 14917 * in probe context -- that assures that a dtrace_sync() can be used 14918 * to clear out any enablings referring to the old structures. 14919 */ 14920 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || 14921 kcred->cr_zone->zone_dtrace_getf != 0) 14922 dtrace_sync(); 14923 } 14924 14925 /* 14926 * DTrace Driver Cookbook Functions 14927 */ 14928 /*ARGSUSED*/ 14929 static int 14930 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 14931 { 14932 dtrace_provider_id_t id; 14933 dtrace_state_t *state = NULL; 14934 dtrace_enabling_t *enab; 14935 14936 mutex_enter(&cpu_lock); 14937 mutex_enter(&dtrace_provider_lock); 14938 mutex_enter(&dtrace_lock); 14939 14940 if (ddi_soft_state_init(&dtrace_softstate, 14941 sizeof (dtrace_state_t), 0) != 0) { 14942 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 14943 mutex_exit(&cpu_lock); 14944 mutex_exit(&dtrace_provider_lock); 14945 mutex_exit(&dtrace_lock); 14946 return (DDI_FAILURE); 14947 } 14948 14949 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 14950 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 14951 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 14952 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 14953 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 14954 ddi_remove_minor_node(devi, NULL); 14955 ddi_soft_state_fini(&dtrace_softstate); 14956 mutex_exit(&cpu_lock); 14957 mutex_exit(&dtrace_provider_lock); 14958 mutex_exit(&dtrace_lock); 14959 return (DDI_FAILURE); 14960 } 14961 14962 ddi_report_dev(devi); 14963 dtrace_devi = devi; 14964 14965 dtrace_modload = dtrace_module_loaded; 14966 dtrace_modunload = dtrace_module_unloaded; 14967 dtrace_cpu_init = dtrace_cpu_setup_initial; 14968 dtrace_helpers_cleanup = dtrace_helpers_destroy; 14969 dtrace_helpers_fork = dtrace_helpers_duplicate; 14970 dtrace_cpustart_init = dtrace_suspend; 14971 dtrace_cpustart_fini = dtrace_resume; 14972 dtrace_debugger_init = dtrace_suspend; 14973 dtrace_debugger_fini = dtrace_resume; 14974 14975 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 14976 14977 ASSERT(MUTEX_HELD(&cpu_lock)); 14978 14979 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 14980 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14981 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 14982 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 14983 VM_SLEEP | VMC_IDENTIFIER); 14984 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 14985 1, INT_MAX, 0); 14986 14987 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 14988 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 14989 NULL, NULL, NULL, NULL, NULL, 0); 14990 14991 ASSERT(MUTEX_HELD(&cpu_lock)); 14992 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 14993 offsetof(dtrace_probe_t, dtpr_nextmod), 14994 offsetof(dtrace_probe_t, dtpr_prevmod)); 14995 14996 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 14997 offsetof(dtrace_probe_t, dtpr_nextfunc), 14998 offsetof(dtrace_probe_t, dtpr_prevfunc)); 14999 15000 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15001 offsetof(dtrace_probe_t, dtpr_nextname), 15002 offsetof(dtrace_probe_t, dtpr_prevname)); 15003 15004 if (dtrace_retain_max < 1) { 15005 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15006 "setting to 1", dtrace_retain_max); 15007 dtrace_retain_max = 1; 15008 } 15009 15010 /* 15011 * Now discover our toxic ranges. 15012 */ 15013 dtrace_toxic_ranges(dtrace_toxrange_add); 15014 15015 /* 15016 * Before we register ourselves as a provider to our own framework, 15017 * we would like to assert that dtrace_provider is NULL -- but that's 15018 * not true if we were loaded as a dependency of a DTrace provider. 15019 * Once we've registered, we can assert that dtrace_provider is our 15020 * pseudo provider. 15021 */ 15022 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15023 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15024 15025 ASSERT(dtrace_provider != NULL); 15026 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15027 15028 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15029 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15030 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15031 dtrace_provider, NULL, NULL, "END", 0, NULL); 15032 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15033 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15034 15035 dtrace_anon_property(); 15036 mutex_exit(&cpu_lock); 15037 15038 /* 15039 * If DTrace helper tracing is enabled, we need to allocate the 15040 * trace buffer and initialize the values. 15041 */ 15042 if (dtrace_helptrace_enabled) { 15043 ASSERT(dtrace_helptrace_buffer == NULL); 15044 dtrace_helptrace_buffer = 15045 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15046 dtrace_helptrace_next = 0; 15047 } 15048 15049 /* 15050 * If there are already providers, we must ask them to provide their 15051 * probes, and then match any anonymous enabling against them. Note 15052 * that there should be no other retained enablings at this time: 15053 * the only retained enablings at this time should be the anonymous 15054 * enabling. 15055 */ 15056 if (dtrace_anon.dta_enabling != NULL) { 15057 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15058 15059 dtrace_enabling_provide(NULL); 15060 state = dtrace_anon.dta_state; 15061 15062 /* 15063 * We couldn't hold cpu_lock across the above call to 15064 * dtrace_enabling_provide(), but we must hold it to actually 15065 * enable the probes. We have to drop all of our locks, pick 15066 * up cpu_lock, and regain our locks before matching the 15067 * retained anonymous enabling. 15068 */ 15069 mutex_exit(&dtrace_lock); 15070 mutex_exit(&dtrace_provider_lock); 15071 15072 mutex_enter(&cpu_lock); 15073 mutex_enter(&dtrace_provider_lock); 15074 mutex_enter(&dtrace_lock); 15075 15076 if ((enab = dtrace_anon.dta_enabling) != NULL) 15077 (void) dtrace_enabling_match(enab, NULL); 15078 15079 mutex_exit(&cpu_lock); 15080 } 15081 15082 mutex_exit(&dtrace_lock); 15083 mutex_exit(&dtrace_provider_lock); 15084 15085 if (state != NULL) { 15086 /* 15087 * If we created any anonymous state, set it going now. 15088 */ 15089 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15090 } 15091 15092 return (DDI_SUCCESS); 15093 } 15094 15095 /*ARGSUSED*/ 15096 static int 15097 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15098 { 15099 dtrace_state_t *state; 15100 uint32_t priv; 15101 uid_t uid; 15102 zoneid_t zoneid; 15103 15104 if (getminor(*devp) == DTRACEMNRN_HELPER) 15105 return (0); 15106 15107 /* 15108 * If this wasn't an open with the "helper" minor, then it must be 15109 * the "dtrace" minor. 15110 */ 15111 if (getminor(*devp) != DTRACEMNRN_DTRACE) 15112 return (ENXIO); 15113 15114 /* 15115 * If no DTRACE_PRIV_* bits are set in the credential, then the 15116 * caller lacks sufficient permission to do anything with DTrace. 15117 */ 15118 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15119 if (priv == DTRACE_PRIV_NONE) 15120 return (EACCES); 15121 15122 /* 15123 * Ask all providers to provide all their probes. 15124 */ 15125 mutex_enter(&dtrace_provider_lock); 15126 dtrace_probe_provide(NULL, NULL); 15127 mutex_exit(&dtrace_provider_lock); 15128 15129 mutex_enter(&cpu_lock); 15130 mutex_enter(&dtrace_lock); 15131 dtrace_opens++; 15132 dtrace_membar_producer(); 15133 15134 /* 15135 * If the kernel debugger is active (that is, if the kernel debugger 15136 * modified text in some way), we won't allow the open. 15137 */ 15138 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15139 dtrace_opens--; 15140 mutex_exit(&cpu_lock); 15141 mutex_exit(&dtrace_lock); 15142 return (EBUSY); 15143 } 15144 15145 state = dtrace_state_create(devp, cred_p); 15146 mutex_exit(&cpu_lock); 15147 15148 if (state == NULL) { 15149 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 15150 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15151 mutex_exit(&dtrace_lock); 15152 return (EAGAIN); 15153 } 15154 15155 mutex_exit(&dtrace_lock); 15156 15157 return (0); 15158 } 15159 15160 /*ARGSUSED*/ 15161 static int 15162 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15163 { 15164 minor_t minor = getminor(dev); 15165 dtrace_state_t *state; 15166 15167 if (minor == DTRACEMNRN_HELPER) 15168 return (0); 15169 15170 state = ddi_get_soft_state(dtrace_softstate, minor); 15171 15172 mutex_enter(&cpu_lock); 15173 mutex_enter(&dtrace_lock); 15174 15175 if (state->dts_anon) { 15176 /* 15177 * There is anonymous state. Destroy that first. 15178 */ 15179 ASSERT(dtrace_anon.dta_state == NULL); 15180 dtrace_state_destroy(state->dts_anon); 15181 } 15182 15183 dtrace_state_destroy(state); 15184 ASSERT(dtrace_opens > 0); 15185 15186 /* 15187 * Only relinquish control of the kernel debugger interface when there 15188 * are no consumers and no anonymous enablings. 15189 */ 15190 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 15191 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15192 15193 mutex_exit(&dtrace_lock); 15194 mutex_exit(&cpu_lock); 15195 15196 return (0); 15197 } 15198 15199 /*ARGSUSED*/ 15200 static int 15201 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15202 { 15203 int rval; 15204 dof_helper_t help, *dhp = NULL; 15205 15206 switch (cmd) { 15207 case DTRACEHIOC_ADDDOF: 15208 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15209 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15210 return (EFAULT); 15211 } 15212 15213 dhp = &help; 15214 arg = (intptr_t)help.dofhp_dof; 15215 /*FALLTHROUGH*/ 15216 15217 case DTRACEHIOC_ADD: { 15218 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15219 15220 if (dof == NULL) 15221 return (rval); 15222 15223 mutex_enter(&dtrace_lock); 15224 15225 /* 15226 * dtrace_helper_slurp() takes responsibility for the dof -- 15227 * it may free it now or it may save it and free it later. 15228 */ 15229 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15230 *rv = rval; 15231 rval = 0; 15232 } else { 15233 rval = EINVAL; 15234 } 15235 15236 mutex_exit(&dtrace_lock); 15237 return (rval); 15238 } 15239 15240 case DTRACEHIOC_REMOVE: { 15241 mutex_enter(&dtrace_lock); 15242 rval = dtrace_helper_destroygen(arg); 15243 mutex_exit(&dtrace_lock); 15244 15245 return (rval); 15246 } 15247 15248 default: 15249 break; 15250 } 15251 15252 return (ENOTTY); 15253 } 15254 15255 /*ARGSUSED*/ 15256 static int 15257 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15258 { 15259 minor_t minor = getminor(dev); 15260 dtrace_state_t *state; 15261 int rval; 15262 15263 if (minor == DTRACEMNRN_HELPER) 15264 return (dtrace_ioctl_helper(cmd, arg, rv)); 15265 15266 state = ddi_get_soft_state(dtrace_softstate, minor); 15267 15268 if (state->dts_anon) { 15269 ASSERT(dtrace_anon.dta_state == NULL); 15270 state = state->dts_anon; 15271 } 15272 15273 switch (cmd) { 15274 case DTRACEIOC_PROVIDER: { 15275 dtrace_providerdesc_t pvd; 15276 dtrace_provider_t *pvp; 15277 15278 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15279 return (EFAULT); 15280 15281 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15282 mutex_enter(&dtrace_provider_lock); 15283 15284 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15285 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15286 break; 15287 } 15288 15289 mutex_exit(&dtrace_provider_lock); 15290 15291 if (pvp == NULL) 15292 return (ESRCH); 15293 15294 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15295 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15296 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15297 return (EFAULT); 15298 15299 return (0); 15300 } 15301 15302 case DTRACEIOC_EPROBE: { 15303 dtrace_eprobedesc_t epdesc; 15304 dtrace_ecb_t *ecb; 15305 dtrace_action_t *act; 15306 void *buf; 15307 size_t size; 15308 uintptr_t dest; 15309 int nrecs; 15310 15311 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15312 return (EFAULT); 15313 15314 mutex_enter(&dtrace_lock); 15315 15316 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15317 mutex_exit(&dtrace_lock); 15318 return (EINVAL); 15319 } 15320 15321 if (ecb->dte_probe == NULL) { 15322 mutex_exit(&dtrace_lock); 15323 return (EINVAL); 15324 } 15325 15326 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15327 epdesc.dtepd_uarg = ecb->dte_uarg; 15328 epdesc.dtepd_size = ecb->dte_size; 15329 15330 nrecs = epdesc.dtepd_nrecs; 15331 epdesc.dtepd_nrecs = 0; 15332 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15333 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15334 continue; 15335 15336 epdesc.dtepd_nrecs++; 15337 } 15338 15339 /* 15340 * Now that we have the size, we need to allocate a temporary 15341 * buffer in which to store the complete description. We need 15342 * the temporary buffer to be able to drop dtrace_lock() 15343 * across the copyout(), below. 15344 */ 15345 size = sizeof (dtrace_eprobedesc_t) + 15346 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15347 15348 buf = kmem_alloc(size, KM_SLEEP); 15349 dest = (uintptr_t)buf; 15350 15351 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15352 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15353 15354 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15355 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15356 continue; 15357 15358 if (nrecs-- == 0) 15359 break; 15360 15361 bcopy(&act->dta_rec, (void *)dest, 15362 sizeof (dtrace_recdesc_t)); 15363 dest += sizeof (dtrace_recdesc_t); 15364 } 15365 15366 mutex_exit(&dtrace_lock); 15367 15368 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15369 kmem_free(buf, size); 15370 return (EFAULT); 15371 } 15372 15373 kmem_free(buf, size); 15374 return (0); 15375 } 15376 15377 case DTRACEIOC_AGGDESC: { 15378 dtrace_aggdesc_t aggdesc; 15379 dtrace_action_t *act; 15380 dtrace_aggregation_t *agg; 15381 int nrecs; 15382 uint32_t offs; 15383 dtrace_recdesc_t *lrec; 15384 void *buf; 15385 size_t size; 15386 uintptr_t dest; 15387 15388 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15389 return (EFAULT); 15390 15391 mutex_enter(&dtrace_lock); 15392 15393 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15394 mutex_exit(&dtrace_lock); 15395 return (EINVAL); 15396 } 15397 15398 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15399 15400 nrecs = aggdesc.dtagd_nrecs; 15401 aggdesc.dtagd_nrecs = 0; 15402 15403 offs = agg->dtag_base; 15404 lrec = &agg->dtag_action.dta_rec; 15405 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15406 15407 for (act = agg->dtag_first; ; act = act->dta_next) { 15408 ASSERT(act->dta_intuple || 15409 DTRACEACT_ISAGG(act->dta_kind)); 15410 15411 /* 15412 * If this action has a record size of zero, it 15413 * denotes an argument to the aggregating action. 15414 * Because the presence of this record doesn't (or 15415 * shouldn't) affect the way the data is interpreted, 15416 * we don't copy it out to save user-level the 15417 * confusion of dealing with a zero-length record. 15418 */ 15419 if (act->dta_rec.dtrd_size == 0) { 15420 ASSERT(agg->dtag_hasarg); 15421 continue; 15422 } 15423 15424 aggdesc.dtagd_nrecs++; 15425 15426 if (act == &agg->dtag_action) 15427 break; 15428 } 15429 15430 /* 15431 * Now that we have the size, we need to allocate a temporary 15432 * buffer in which to store the complete description. We need 15433 * the temporary buffer to be able to drop dtrace_lock() 15434 * across the copyout(), below. 15435 */ 15436 size = sizeof (dtrace_aggdesc_t) + 15437 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15438 15439 buf = kmem_alloc(size, KM_SLEEP); 15440 dest = (uintptr_t)buf; 15441 15442 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15443 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15444 15445 for (act = agg->dtag_first; ; act = act->dta_next) { 15446 dtrace_recdesc_t rec = act->dta_rec; 15447 15448 /* 15449 * See the comment in the above loop for why we pass 15450 * over zero-length records. 15451 */ 15452 if (rec.dtrd_size == 0) { 15453 ASSERT(agg->dtag_hasarg); 15454 continue; 15455 } 15456 15457 if (nrecs-- == 0) 15458 break; 15459 15460 rec.dtrd_offset -= offs; 15461 bcopy(&rec, (void *)dest, sizeof (rec)); 15462 dest += sizeof (dtrace_recdesc_t); 15463 15464 if (act == &agg->dtag_action) 15465 break; 15466 } 15467 15468 mutex_exit(&dtrace_lock); 15469 15470 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15471 kmem_free(buf, size); 15472 return (EFAULT); 15473 } 15474 15475 kmem_free(buf, size); 15476 return (0); 15477 } 15478 15479 case DTRACEIOC_ENABLE: { 15480 dof_hdr_t *dof; 15481 dtrace_enabling_t *enab = NULL; 15482 dtrace_vstate_t *vstate; 15483 int err = 0; 15484 15485 *rv = 0; 15486 15487 /* 15488 * If a NULL argument has been passed, we take this as our 15489 * cue to reevaluate our enablings. 15490 */ 15491 if (arg == NULL) { 15492 dtrace_enabling_matchall(); 15493 15494 return (0); 15495 } 15496 15497 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15498 return (rval); 15499 15500 mutex_enter(&cpu_lock); 15501 mutex_enter(&dtrace_lock); 15502 vstate = &state->dts_vstate; 15503 15504 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15505 mutex_exit(&dtrace_lock); 15506 mutex_exit(&cpu_lock); 15507 dtrace_dof_destroy(dof); 15508 return (EBUSY); 15509 } 15510 15511 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15512 mutex_exit(&dtrace_lock); 15513 mutex_exit(&cpu_lock); 15514 dtrace_dof_destroy(dof); 15515 return (EINVAL); 15516 } 15517 15518 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15519 dtrace_enabling_destroy(enab); 15520 mutex_exit(&dtrace_lock); 15521 mutex_exit(&cpu_lock); 15522 dtrace_dof_destroy(dof); 15523 return (rval); 15524 } 15525 15526 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15527 err = dtrace_enabling_retain(enab); 15528 } else { 15529 dtrace_enabling_destroy(enab); 15530 } 15531 15532 mutex_exit(&cpu_lock); 15533 mutex_exit(&dtrace_lock); 15534 dtrace_dof_destroy(dof); 15535 15536 return (err); 15537 } 15538 15539 case DTRACEIOC_REPLICATE: { 15540 dtrace_repldesc_t desc; 15541 dtrace_probedesc_t *match = &desc.dtrpd_match; 15542 dtrace_probedesc_t *create = &desc.dtrpd_create; 15543 int err; 15544 15545 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15546 return (EFAULT); 15547 15548 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15549 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15550 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15551 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15552 15553 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15554 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15555 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15556 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15557 15558 mutex_enter(&dtrace_lock); 15559 err = dtrace_enabling_replicate(state, match, create); 15560 mutex_exit(&dtrace_lock); 15561 15562 return (err); 15563 } 15564 15565 case DTRACEIOC_PROBEMATCH: 15566 case DTRACEIOC_PROBES: { 15567 dtrace_probe_t *probe = NULL; 15568 dtrace_probedesc_t desc; 15569 dtrace_probekey_t pkey; 15570 dtrace_id_t i; 15571 int m = 0; 15572 uint32_t priv; 15573 uid_t uid; 15574 zoneid_t zoneid; 15575 15576 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15577 return (EFAULT); 15578 15579 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15580 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15581 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15582 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15583 15584 /* 15585 * Before we attempt to match this probe, we want to give 15586 * all providers the opportunity to provide it. 15587 */ 15588 if (desc.dtpd_id == DTRACE_IDNONE) { 15589 mutex_enter(&dtrace_provider_lock); 15590 dtrace_probe_provide(&desc, NULL); 15591 mutex_exit(&dtrace_provider_lock); 15592 desc.dtpd_id++; 15593 } 15594 15595 if (cmd == DTRACEIOC_PROBEMATCH) { 15596 dtrace_probekey(&desc, &pkey); 15597 pkey.dtpk_id = DTRACE_IDNONE; 15598 } 15599 15600 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15601 15602 mutex_enter(&dtrace_lock); 15603 15604 if (cmd == DTRACEIOC_PROBEMATCH) { 15605 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15606 if ((probe = dtrace_probes[i - 1]) != NULL && 15607 (m = dtrace_match_probe(probe, &pkey, 15608 priv, uid, zoneid)) != 0) 15609 break; 15610 } 15611 15612 if (m < 0) { 15613 mutex_exit(&dtrace_lock); 15614 return (EINVAL); 15615 } 15616 15617 } else { 15618 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15619 if ((probe = dtrace_probes[i - 1]) != NULL && 15620 dtrace_match_priv(probe, priv, uid, zoneid)) 15621 break; 15622 } 15623 } 15624 15625 if (probe == NULL) { 15626 mutex_exit(&dtrace_lock); 15627 return (ESRCH); 15628 } 15629 15630 dtrace_probe_description(probe, &desc); 15631 mutex_exit(&dtrace_lock); 15632 15633 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15634 return (EFAULT); 15635 15636 return (0); 15637 } 15638 15639 case DTRACEIOC_PROBEARG: { 15640 dtrace_argdesc_t desc; 15641 dtrace_probe_t *probe; 15642 dtrace_provider_t *prov; 15643 15644 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15645 return (EFAULT); 15646 15647 if (desc.dtargd_id == DTRACE_IDNONE) 15648 return (EINVAL); 15649 15650 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15651 return (EINVAL); 15652 15653 mutex_enter(&dtrace_provider_lock); 15654 mutex_enter(&mod_lock); 15655 mutex_enter(&dtrace_lock); 15656 15657 if (desc.dtargd_id > dtrace_nprobes) { 15658 mutex_exit(&dtrace_lock); 15659 mutex_exit(&mod_lock); 15660 mutex_exit(&dtrace_provider_lock); 15661 return (EINVAL); 15662 } 15663 15664 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 15665 mutex_exit(&dtrace_lock); 15666 mutex_exit(&mod_lock); 15667 mutex_exit(&dtrace_provider_lock); 15668 return (EINVAL); 15669 } 15670 15671 mutex_exit(&dtrace_lock); 15672 15673 prov = probe->dtpr_provider; 15674 15675 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 15676 /* 15677 * There isn't any typed information for this probe. 15678 * Set the argument number to DTRACE_ARGNONE. 15679 */ 15680 desc.dtargd_ndx = DTRACE_ARGNONE; 15681 } else { 15682 desc.dtargd_native[0] = '\0'; 15683 desc.dtargd_xlate[0] = '\0'; 15684 desc.dtargd_mapping = desc.dtargd_ndx; 15685 15686 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 15687 probe->dtpr_id, probe->dtpr_arg, &desc); 15688 } 15689 15690 mutex_exit(&mod_lock); 15691 mutex_exit(&dtrace_provider_lock); 15692 15693 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15694 return (EFAULT); 15695 15696 return (0); 15697 } 15698 15699 case DTRACEIOC_GO: { 15700 processorid_t cpuid; 15701 rval = dtrace_state_go(state, &cpuid); 15702 15703 if (rval != 0) 15704 return (rval); 15705 15706 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15707 return (EFAULT); 15708 15709 return (0); 15710 } 15711 15712 case DTRACEIOC_STOP: { 15713 processorid_t cpuid; 15714 15715 mutex_enter(&dtrace_lock); 15716 rval = dtrace_state_stop(state, &cpuid); 15717 mutex_exit(&dtrace_lock); 15718 15719 if (rval != 0) 15720 return (rval); 15721 15722 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15723 return (EFAULT); 15724 15725 return (0); 15726 } 15727 15728 case DTRACEIOC_DOFGET: { 15729 dof_hdr_t hdr, *dof; 15730 uint64_t len; 15731 15732 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 15733 return (EFAULT); 15734 15735 mutex_enter(&dtrace_lock); 15736 dof = dtrace_dof_create(state); 15737 mutex_exit(&dtrace_lock); 15738 15739 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 15740 rval = copyout(dof, (void *)arg, len); 15741 dtrace_dof_destroy(dof); 15742 15743 return (rval == 0 ? 0 : EFAULT); 15744 } 15745 15746 case DTRACEIOC_AGGSNAP: 15747 case DTRACEIOC_BUFSNAP: { 15748 dtrace_bufdesc_t desc; 15749 caddr_t cached; 15750 dtrace_buffer_t *buf; 15751 15752 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15753 return (EFAULT); 15754 15755 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 15756 return (EINVAL); 15757 15758 mutex_enter(&dtrace_lock); 15759 15760 if (cmd == DTRACEIOC_BUFSNAP) { 15761 buf = &state->dts_buffer[desc.dtbd_cpu]; 15762 } else { 15763 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 15764 } 15765 15766 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 15767 size_t sz = buf->dtb_offset; 15768 15769 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 15770 mutex_exit(&dtrace_lock); 15771 return (EBUSY); 15772 } 15773 15774 /* 15775 * If this buffer has already been consumed, we're 15776 * going to indicate that there's nothing left here 15777 * to consume. 15778 */ 15779 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 15780 mutex_exit(&dtrace_lock); 15781 15782 desc.dtbd_size = 0; 15783 desc.dtbd_drops = 0; 15784 desc.dtbd_errors = 0; 15785 desc.dtbd_oldest = 0; 15786 sz = sizeof (desc); 15787 15788 if (copyout(&desc, (void *)arg, sz) != 0) 15789 return (EFAULT); 15790 15791 return (0); 15792 } 15793 15794 /* 15795 * If this is a ring buffer that has wrapped, we want 15796 * to copy the whole thing out. 15797 */ 15798 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 15799 dtrace_buffer_polish(buf); 15800 sz = buf->dtb_size; 15801 } 15802 15803 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 15804 mutex_exit(&dtrace_lock); 15805 return (EFAULT); 15806 } 15807 15808 desc.dtbd_size = sz; 15809 desc.dtbd_drops = buf->dtb_drops; 15810 desc.dtbd_errors = buf->dtb_errors; 15811 desc.dtbd_oldest = buf->dtb_xamot_offset; 15812 15813 mutex_exit(&dtrace_lock); 15814 15815 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15816 return (EFAULT); 15817 15818 buf->dtb_flags |= DTRACEBUF_CONSUMED; 15819 15820 return (0); 15821 } 15822 15823 if (buf->dtb_tomax == NULL) { 15824 ASSERT(buf->dtb_xamot == NULL); 15825 mutex_exit(&dtrace_lock); 15826 return (ENOENT); 15827 } 15828 15829 cached = buf->dtb_tomax; 15830 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 15831 15832 dtrace_xcall(desc.dtbd_cpu, 15833 (dtrace_xcall_t)dtrace_buffer_switch, buf); 15834 15835 state->dts_errors += buf->dtb_xamot_errors; 15836 15837 /* 15838 * If the buffers did not actually switch, then the cross call 15839 * did not take place -- presumably because the given CPU is 15840 * not in the ready set. If this is the case, we'll return 15841 * ENOENT. 15842 */ 15843 if (buf->dtb_tomax == cached) { 15844 ASSERT(buf->dtb_xamot != cached); 15845 mutex_exit(&dtrace_lock); 15846 return (ENOENT); 15847 } 15848 15849 ASSERT(cached == buf->dtb_xamot); 15850 15851 /* 15852 * We have our snapshot; now copy it out. 15853 */ 15854 if (copyout(buf->dtb_xamot, desc.dtbd_data, 15855 buf->dtb_xamot_offset) != 0) { 15856 mutex_exit(&dtrace_lock); 15857 return (EFAULT); 15858 } 15859 15860 desc.dtbd_size = buf->dtb_xamot_offset; 15861 desc.dtbd_drops = buf->dtb_xamot_drops; 15862 desc.dtbd_errors = buf->dtb_xamot_errors; 15863 desc.dtbd_oldest = 0; 15864 15865 mutex_exit(&dtrace_lock); 15866 15867 /* 15868 * Finally, copy out the buffer description. 15869 */ 15870 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15871 return (EFAULT); 15872 15873 return (0); 15874 } 15875 15876 case DTRACEIOC_CONF: { 15877 dtrace_conf_t conf; 15878 15879 bzero(&conf, sizeof (conf)); 15880 conf.dtc_difversion = DIF_VERSION; 15881 conf.dtc_difintregs = DIF_DIR_NREGS; 15882 conf.dtc_diftupregs = DIF_DTR_NREGS; 15883 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 15884 15885 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 15886 return (EFAULT); 15887 15888 return (0); 15889 } 15890 15891 case DTRACEIOC_STATUS: { 15892 dtrace_status_t stat; 15893 dtrace_dstate_t *dstate; 15894 int i, j; 15895 uint64_t nerrs; 15896 15897 /* 15898 * See the comment in dtrace_state_deadman() for the reason 15899 * for setting dts_laststatus to INT64_MAX before setting 15900 * it to the correct value. 15901 */ 15902 state->dts_laststatus = INT64_MAX; 15903 dtrace_membar_producer(); 15904 state->dts_laststatus = dtrace_gethrtime(); 15905 15906 bzero(&stat, sizeof (stat)); 15907 15908 mutex_enter(&dtrace_lock); 15909 15910 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 15911 mutex_exit(&dtrace_lock); 15912 return (ENOENT); 15913 } 15914 15915 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 15916 stat.dtst_exiting = 1; 15917 15918 nerrs = state->dts_errors; 15919 dstate = &state->dts_vstate.dtvs_dynvars; 15920 15921 for (i = 0; i < NCPU; i++) { 15922 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 15923 15924 stat.dtst_dyndrops += dcpu->dtdsc_drops; 15925 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 15926 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 15927 15928 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 15929 stat.dtst_filled++; 15930 15931 nerrs += state->dts_buffer[i].dtb_errors; 15932 15933 for (j = 0; j < state->dts_nspeculations; j++) { 15934 dtrace_speculation_t *spec; 15935 dtrace_buffer_t *buf; 15936 15937 spec = &state->dts_speculations[j]; 15938 buf = &spec->dtsp_buffer[i]; 15939 stat.dtst_specdrops += buf->dtb_xamot_drops; 15940 } 15941 } 15942 15943 stat.dtst_specdrops_busy = state->dts_speculations_busy; 15944 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 15945 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 15946 stat.dtst_dblerrors = state->dts_dblerrors; 15947 stat.dtst_killed = 15948 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 15949 stat.dtst_errors = nerrs; 15950 15951 mutex_exit(&dtrace_lock); 15952 15953 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 15954 return (EFAULT); 15955 15956 return (0); 15957 } 15958 15959 case DTRACEIOC_FORMAT: { 15960 dtrace_fmtdesc_t fmt; 15961 char *str; 15962 int len; 15963 15964 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 15965 return (EFAULT); 15966 15967 mutex_enter(&dtrace_lock); 15968 15969 if (fmt.dtfd_format == 0 || 15970 fmt.dtfd_format > state->dts_nformats) { 15971 mutex_exit(&dtrace_lock); 15972 return (EINVAL); 15973 } 15974 15975 /* 15976 * Format strings are allocated contiguously and they are 15977 * never freed; if a format index is less than the number 15978 * of formats, we can assert that the format map is non-NULL 15979 * and that the format for the specified index is non-NULL. 15980 */ 15981 ASSERT(state->dts_formats != NULL); 15982 str = state->dts_formats[fmt.dtfd_format - 1]; 15983 ASSERT(str != NULL); 15984 15985 len = strlen(str) + 1; 15986 15987 if (len > fmt.dtfd_length) { 15988 fmt.dtfd_length = len; 15989 15990 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 15991 mutex_exit(&dtrace_lock); 15992 return (EINVAL); 15993 } 15994 } else { 15995 if (copyout(str, fmt.dtfd_string, len) != 0) { 15996 mutex_exit(&dtrace_lock); 15997 return (EINVAL); 15998 } 15999 } 16000 16001 mutex_exit(&dtrace_lock); 16002 return (0); 16003 } 16004 16005 default: 16006 break; 16007 } 16008 16009 return (ENOTTY); 16010 } 16011 16012 /*ARGSUSED*/ 16013 static int 16014 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16015 { 16016 dtrace_state_t *state; 16017 16018 switch (cmd) { 16019 case DDI_DETACH: 16020 break; 16021 16022 case DDI_SUSPEND: 16023 return (DDI_SUCCESS); 16024 16025 default: 16026 return (DDI_FAILURE); 16027 } 16028 16029 mutex_enter(&cpu_lock); 16030 mutex_enter(&dtrace_provider_lock); 16031 mutex_enter(&dtrace_lock); 16032 16033 ASSERT(dtrace_opens == 0); 16034 16035 if (dtrace_helpers > 0) { 16036 mutex_exit(&dtrace_provider_lock); 16037 mutex_exit(&dtrace_lock); 16038 mutex_exit(&cpu_lock); 16039 return (DDI_FAILURE); 16040 } 16041 16042 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16043 mutex_exit(&dtrace_provider_lock); 16044 mutex_exit(&dtrace_lock); 16045 mutex_exit(&cpu_lock); 16046 return (DDI_FAILURE); 16047 } 16048 16049 dtrace_provider = NULL; 16050 16051 if ((state = dtrace_anon_grab()) != NULL) { 16052 /* 16053 * If there were ECBs on this state, the provider should 16054 * have not been allowed to detach; assert that there is 16055 * none. 16056 */ 16057 ASSERT(state->dts_necbs == 0); 16058 dtrace_state_destroy(state); 16059 16060 /* 16061 * If we're being detached with anonymous state, we need to 16062 * indicate to the kernel debugger that DTrace is now inactive. 16063 */ 16064 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16065 } 16066 16067 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16068 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16069 dtrace_cpu_init = NULL; 16070 dtrace_helpers_cleanup = NULL; 16071 dtrace_helpers_fork = NULL; 16072 dtrace_cpustart_init = NULL; 16073 dtrace_cpustart_fini = NULL; 16074 dtrace_debugger_init = NULL; 16075 dtrace_debugger_fini = NULL; 16076 dtrace_modload = NULL; 16077 dtrace_modunload = NULL; 16078 16079 ASSERT(dtrace_getf == 0); 16080 ASSERT(dtrace_closef == NULL); 16081 16082 mutex_exit(&cpu_lock); 16083 16084 if (dtrace_helptrace_enabled) { 16085 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16086 dtrace_helptrace_buffer = NULL; 16087 } 16088 16089 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16090 dtrace_probes = NULL; 16091 dtrace_nprobes = 0; 16092 16093 dtrace_hash_destroy(dtrace_bymod); 16094 dtrace_hash_destroy(dtrace_byfunc); 16095 dtrace_hash_destroy(dtrace_byname); 16096 dtrace_bymod = NULL; 16097 dtrace_byfunc = NULL; 16098 dtrace_byname = NULL; 16099 16100 kmem_cache_destroy(dtrace_state_cache); 16101 vmem_destroy(dtrace_minor); 16102 vmem_destroy(dtrace_arena); 16103 16104 if (dtrace_toxrange != NULL) { 16105 kmem_free(dtrace_toxrange, 16106 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16107 dtrace_toxrange = NULL; 16108 dtrace_toxranges = 0; 16109 dtrace_toxranges_max = 0; 16110 } 16111 16112 ddi_remove_minor_node(dtrace_devi, NULL); 16113 dtrace_devi = NULL; 16114 16115 ddi_soft_state_fini(&dtrace_softstate); 16116 16117 ASSERT(dtrace_vtime_references == 0); 16118 ASSERT(dtrace_opens == 0); 16119 ASSERT(dtrace_retained == NULL); 16120 16121 mutex_exit(&dtrace_lock); 16122 mutex_exit(&dtrace_provider_lock); 16123 16124 /* 16125 * We don't destroy the task queue until after we have dropped our 16126 * locks (taskq_destroy() may block on running tasks). To prevent 16127 * attempting to do work after we have effectively detached but before 16128 * the task queue has been destroyed, all tasks dispatched via the 16129 * task queue must check that DTrace is still attached before 16130 * performing any operation. 16131 */ 16132 taskq_destroy(dtrace_taskq); 16133 dtrace_taskq = NULL; 16134 16135 return (DDI_SUCCESS); 16136 } 16137 16138 /*ARGSUSED*/ 16139 static int 16140 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16141 { 16142 int error; 16143 16144 switch (infocmd) { 16145 case DDI_INFO_DEVT2DEVINFO: 16146 *result = (void *)dtrace_devi; 16147 error = DDI_SUCCESS; 16148 break; 16149 case DDI_INFO_DEVT2INSTANCE: 16150 *result = (void *)0; 16151 error = DDI_SUCCESS; 16152 break; 16153 default: 16154 error = DDI_FAILURE; 16155 } 16156 return (error); 16157 } 16158 16159 static struct cb_ops dtrace_cb_ops = { 16160 dtrace_open, /* open */ 16161 dtrace_close, /* close */ 16162 nulldev, /* strategy */ 16163 nulldev, /* print */ 16164 nodev, /* dump */ 16165 nodev, /* read */ 16166 nodev, /* write */ 16167 dtrace_ioctl, /* ioctl */ 16168 nodev, /* devmap */ 16169 nodev, /* mmap */ 16170 nodev, /* segmap */ 16171 nochpoll, /* poll */ 16172 ddi_prop_op, /* cb_prop_op */ 16173 0, /* streamtab */ 16174 D_NEW | D_MP /* Driver compatibility flag */ 16175 }; 16176 16177 static struct dev_ops dtrace_ops = { 16178 DEVO_REV, /* devo_rev */ 16179 0, /* refcnt */ 16180 dtrace_info, /* get_dev_info */ 16181 nulldev, /* identify */ 16182 nulldev, /* probe */ 16183 dtrace_attach, /* attach */ 16184 dtrace_detach, /* detach */ 16185 nodev, /* reset */ 16186 &dtrace_cb_ops, /* driver operations */ 16187 NULL, /* bus operations */ 16188 nodev, /* dev power */ 16189 ddi_quiesce_not_needed, /* quiesce */ 16190 }; 16191 16192 static struct modldrv modldrv = { 16193 &mod_driverops, /* module type (this is a pseudo driver) */ 16194 "Dynamic Tracing", /* name of module */ 16195 &dtrace_ops, /* driver ops */ 16196 }; 16197 16198 static struct modlinkage modlinkage = { 16199 MODREV_1, 16200 (void *)&modldrv, 16201 NULL 16202 }; 16203 16204 int 16205 _init(void) 16206 { 16207 return (mod_install(&modlinkage)); 16208 } 16209 16210 int 16211 _info(struct modinfo *modinfop) 16212 { 16213 return (mod_info(&modlinkage, modinfop)); 16214 } 16215 16216 int 16217 _fini(void) 16218 { 16219 return (mod_remove(&modlinkage)); 16220 }