Print this page
2916 DTrace in a zone should be able to access fds[]
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/sys/dtrace_impl.h
+++ new/usr/src/uts/common/sys/dtrace_impl.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
29 29 */
30 30
31 31 #ifndef _SYS_DTRACE_IMPL_H
32 32 #define _SYS_DTRACE_IMPL_H
33 33
34 34 #ifdef __cplusplus
35 35 extern "C" {
36 36 #endif
37 37
38 38 /*
39 39 * DTrace Dynamic Tracing Software: Kernel Implementation Interfaces
40 40 *
41 41 * Note: The contents of this file are private to the implementation of the
42 42 * Solaris system and DTrace subsystem and are subject to change at any time
43 43 * without notice. Applications and drivers using these interfaces will fail
44 44 * to run on future releases. These interfaces should not be used for any
45 45 * purpose except those expressly outlined in dtrace(7D) and libdtrace(3LIB).
46 46 * Please refer to the "Solaris Dynamic Tracing Guide" for more information.
47 47 */
48 48
49 49 #include <sys/dtrace.h>
50 50
51 51 /*
52 52 * DTrace Implementation Constants and Typedefs
53 53 */
54 54 #define DTRACE_MAXPROPLEN 128
55 55 #define DTRACE_DYNVAR_CHUNKSIZE 256
56 56
57 57 struct dtrace_probe;
58 58 struct dtrace_ecb;
59 59 struct dtrace_predicate;
60 60 struct dtrace_action;
61 61 struct dtrace_provider;
62 62 struct dtrace_state;
63 63
64 64 typedef struct dtrace_probe dtrace_probe_t;
65 65 typedef struct dtrace_ecb dtrace_ecb_t;
66 66 typedef struct dtrace_predicate dtrace_predicate_t;
67 67 typedef struct dtrace_action dtrace_action_t;
68 68 typedef struct dtrace_provider dtrace_provider_t;
69 69 typedef struct dtrace_meta dtrace_meta_t;
70 70 typedef struct dtrace_state dtrace_state_t;
71 71 typedef uint32_t dtrace_optid_t;
72 72 typedef uint32_t dtrace_specid_t;
73 73 typedef uint64_t dtrace_genid_t;
74 74
75 75 /*
76 76 * DTrace Probes
77 77 *
78 78 * The probe is the fundamental unit of the DTrace architecture. Probes are
79 79 * created by DTrace providers, and managed by the DTrace framework. A probe
80 80 * is identified by a unique <provider, module, function, name> tuple, and has
81 81 * a unique probe identifier assigned to it. (Some probes are not associated
82 82 * with a specific point in text; these are called _unanchored probes_ and have
83 83 * no module or function associated with them.) Probes are represented as a
84 84 * dtrace_probe structure. To allow quick lookups based on each element of the
85 85 * probe tuple, probes are hashed by each of provider, module, function and
86 86 * name. (If a lookup is performed based on a regular expression, a
87 87 * dtrace_probekey is prepared, and a linear search is performed.) Each probe
88 88 * is additionally pointed to by a linear array indexed by its identifier. The
89 89 * identifier is the provider's mechanism for indicating to the DTrace
90 90 * framework that a probe has fired: the identifier is passed as the first
91 91 * argument to dtrace_probe(), where it is then mapped into the corresponding
92 92 * dtrace_probe structure. From the dtrace_probe structure, dtrace_probe() can
93 93 * iterate over the probe's list of enabling control blocks; see "DTrace
94 94 * Enabling Control Blocks", below.)
95 95 */
96 96 struct dtrace_probe {
97 97 dtrace_id_t dtpr_id; /* probe identifier */
98 98 dtrace_ecb_t *dtpr_ecb; /* ECB list; see below */
99 99 dtrace_ecb_t *dtpr_ecb_last; /* last ECB in list */
100 100 void *dtpr_arg; /* provider argument */
101 101 dtrace_cacheid_t dtpr_predcache; /* predicate cache ID */
102 102 int dtpr_aframes; /* artificial frames */
103 103 dtrace_provider_t *dtpr_provider; /* pointer to provider */
104 104 char *dtpr_mod; /* probe's module name */
105 105 char *dtpr_func; /* probe's function name */
106 106 char *dtpr_name; /* probe's name */
107 107 dtrace_probe_t *dtpr_nextmod; /* next in module hash */
108 108 dtrace_probe_t *dtpr_prevmod; /* previous in module hash */
109 109 dtrace_probe_t *dtpr_nextfunc; /* next in function hash */
110 110 dtrace_probe_t *dtpr_prevfunc; /* previous in function hash */
111 111 dtrace_probe_t *dtpr_nextname; /* next in name hash */
112 112 dtrace_probe_t *dtpr_prevname; /* previous in name hash */
113 113 dtrace_genid_t dtpr_gen; /* probe generation ID */
114 114 };
115 115
116 116 typedef int dtrace_probekey_f(const char *, const char *, int);
117 117
118 118 typedef struct dtrace_probekey {
119 119 const char *dtpk_prov; /* provider name to match */
120 120 dtrace_probekey_f *dtpk_pmatch; /* provider matching function */
121 121 const char *dtpk_mod; /* module name to match */
122 122 dtrace_probekey_f *dtpk_mmatch; /* module matching function */
123 123 const char *dtpk_func; /* func name to match */
124 124 dtrace_probekey_f *dtpk_fmatch; /* func matching function */
125 125 const char *dtpk_name; /* name to match */
126 126 dtrace_probekey_f *dtpk_nmatch; /* name matching function */
127 127 dtrace_id_t dtpk_id; /* identifier to match */
128 128 } dtrace_probekey_t;
129 129
130 130 typedef struct dtrace_hashbucket {
131 131 struct dtrace_hashbucket *dthb_next; /* next on hash chain */
132 132 dtrace_probe_t *dthb_chain; /* chain of probes */
133 133 int dthb_len; /* number of probes here */
134 134 } dtrace_hashbucket_t;
135 135
136 136 typedef struct dtrace_hash {
137 137 dtrace_hashbucket_t **dth_tab; /* hash table */
138 138 int dth_size; /* size of hash table */
139 139 int dth_mask; /* mask to index into table */
140 140 int dth_nbuckets; /* total number of buckets */
141 141 uintptr_t dth_nextoffs; /* offset of next in probe */
142 142 uintptr_t dth_prevoffs; /* offset of prev in probe */
143 143 uintptr_t dth_stroffs; /* offset of str in probe */
144 144 } dtrace_hash_t;
145 145
146 146 /*
147 147 * DTrace Enabling Control Blocks
148 148 *
149 149 * When a provider wishes to fire a probe, it calls into dtrace_probe(),
150 150 * passing the probe identifier as the first argument. As described above,
151 151 * dtrace_probe() maps the identifier into a pointer to a dtrace_probe_t
152 152 * structure. This structure contains information about the probe, and a
153 153 * pointer to the list of Enabling Control Blocks (ECBs). Each ECB points to
154 154 * DTrace consumer state, and contains an optional predicate, and a list of
155 155 * actions. (Shown schematically below.) The ECB abstraction allows a single
156 156 * probe to be multiplexed across disjoint consumers, or across disjoint
157 157 * enablings of a single probe within one consumer.
158 158 *
159 159 * Enabling Control Block
160 160 * dtrace_ecb_t
161 161 * +------------------------+
162 162 * | dtrace_epid_t ---------+--------------> Enabled Probe ID (EPID)
163 163 * | dtrace_state_t * ------+--------------> State associated with this ECB
164 164 * | dtrace_predicate_t * --+---------+
165 165 * | dtrace_action_t * -----+----+ |
166 166 * | dtrace_ecb_t * ---+ | | | Predicate (if any)
167 167 * +-------------------+----+ | | dtrace_predicate_t
168 168 * | | +---> +--------------------+
169 169 * | | | dtrace_difo_t * ---+----> DIFO
170 170 * | | +--------------------+
171 171 * | |
172 172 * Next ECB | | Action
173 173 * (if any) | | dtrace_action_t
174 174 * : +--> +-------------------+
175 175 * : | dtrace_actkind_t -+------> kind
176 176 * v | dtrace_difo_t * --+------> DIFO (if any)
177 177 * | dtrace_recdesc_t -+------> record descr.
178 178 * | dtrace_action_t * +------+
179 179 * +-------------------+ |
180 180 * | Next action
181 181 * +-------------------------------+ (if any)
182 182 * |
183 183 * | Action
184 184 * | dtrace_action_t
185 185 * +--> +-------------------+
186 186 * | dtrace_actkind_t -+------> kind
187 187 * | dtrace_difo_t * --+------> DIFO (if any)
188 188 * | dtrace_action_t * +------+
189 189 * +-------------------+ |
190 190 * | Next action
191 191 * +-------------------------------+ (if any)
192 192 * |
193 193 * :
194 194 * v
195 195 *
196 196 *
197 197 * dtrace_probe() iterates over the ECB list. If the ECB needs less space
198 198 * than is available in the principal buffer, the ECB is processed: if the
199 199 * predicate is non-NULL, the DIF object is executed. If the result is
200 200 * non-zero, the action list is processed, with each action being executed
201 201 * accordingly. When the action list has been completely executed, processing
202 202 * advances to the next ECB. processing advances to the next ECB. If the
203 203 * result is non-zero; For each ECB, it first determines the The ECB
204 204 * abstraction allows disjoint consumers to multiplex on single probes.
205 205 */
206 206 struct dtrace_ecb {
207 207 dtrace_epid_t dte_epid; /* enabled probe ID */
208 208 uint32_t dte_alignment; /* required alignment */
209 209 size_t dte_needed; /* bytes needed */
210 210 size_t dte_size; /* total size of payload */
211 211 dtrace_predicate_t *dte_predicate; /* predicate, if any */
212 212 dtrace_action_t *dte_action; /* actions, if any */
213 213 dtrace_ecb_t *dte_next; /* next ECB on probe */
214 214 dtrace_state_t *dte_state; /* pointer to state */
215 215 uint32_t dte_cond; /* security condition */
216 216 dtrace_probe_t *dte_probe; /* pointer to probe */
217 217 dtrace_action_t *dte_action_last; /* last action on ECB */
218 218 uint64_t dte_uarg; /* library argument */
219 219 };
220 220
221 221 struct dtrace_predicate {
222 222 dtrace_difo_t *dtp_difo; /* DIF object */
223 223 dtrace_cacheid_t dtp_cacheid; /* cache identifier */
224 224 int dtp_refcnt; /* reference count */
225 225 };
226 226
227 227 struct dtrace_action {
228 228 dtrace_actkind_t dta_kind; /* kind of action */
229 229 uint16_t dta_intuple; /* boolean: in aggregation */
230 230 uint32_t dta_refcnt; /* reference count */
231 231 dtrace_difo_t *dta_difo; /* pointer to DIFO */
232 232 dtrace_recdesc_t dta_rec; /* record description */
233 233 dtrace_action_t *dta_prev; /* previous action */
234 234 dtrace_action_t *dta_next; /* next action */
235 235 };
236 236
237 237 typedef struct dtrace_aggregation {
238 238 dtrace_action_t dtag_action; /* action; must be first */
239 239 dtrace_aggid_t dtag_id; /* identifier */
240 240 dtrace_ecb_t *dtag_ecb; /* corresponding ECB */
241 241 dtrace_action_t *dtag_first; /* first action in tuple */
242 242 uint32_t dtag_base; /* base of aggregation */
243 243 uint8_t dtag_hasarg; /* boolean: has argument */
244 244 uint64_t dtag_initial; /* initial value */
245 245 void (*dtag_aggregate)(uint64_t *, uint64_t, uint64_t);
246 246 } dtrace_aggregation_t;
247 247
248 248 /*
249 249 * DTrace Buffers
250 250 *
251 251 * Principal buffers, aggregation buffers, and speculative buffers are all
252 252 * managed with the dtrace_buffer structure. By default, this structure
253 253 * includes twin data buffers -- dtb_tomax and dtb_xamot -- that serve as the
254 254 * active and passive buffers, respectively. For speculative buffers,
255 255 * dtb_xamot will be NULL; for "ring" and "fill" buffers, dtb_xamot will point
256 256 * to a scratch buffer. For all buffer types, the dtrace_buffer structure is
257 257 * always allocated on a per-CPU basis; a single dtrace_buffer structure is
258 258 * never shared among CPUs. (That is, there is never true sharing of the
259 259 * dtrace_buffer structure; to prevent false sharing of the structure, it must
260 260 * always be aligned to the coherence granularity -- generally 64 bytes.)
261 261 *
262 262 * One of the critical design decisions of DTrace is that a given ECB always
263 263 * stores the same quantity and type of data. This is done to assure that the
264 264 * only metadata required for an ECB's traced data is the EPID. That is, from
265 265 * the EPID, the consumer can determine the data layout. (The data buffer
266 266 * layout is shown schematically below.) By assuring that one can determine
267 267 * data layout from the EPID, the metadata stream can be separated from the
268 268 * data stream -- simplifying the data stream enormously.
269 269 *
270 270 * base of data buffer ---> +------+--------------------+------+
271 271 * | EPID | data | EPID |
272 272 * +------+--------+------+----+------+
273 273 * | data | EPID | data |
274 274 * +---------------+------+-----------+
275 275 * | data, cont. |
276 276 * +------+--------------------+------+
277 277 * | EPID | data | |
278 278 * +------+--------------------+ |
279 279 * | || |
280 280 * | || |
281 281 * | \/ |
282 282 * : :
283 283 * . .
284 284 * . .
285 285 * . .
286 286 * : :
287 287 * | |
288 288 * limit of data buffer ---> +----------------------------------+
289 289 *
290 290 * When evaluating an ECB, dtrace_probe() determines if the ECB's needs of the
291 291 * principal buffer (both scratch and payload) exceed the available space. If
292 292 * the ECB's needs exceed available space (and if the principal buffer policy
293 293 * is the default "switch" policy), the ECB is dropped, the buffer's drop count
294 294 * is incremented, and processing advances to the next ECB. If the ECB's needs
295 295 * can be met with the available space, the ECB is processed, but the offset in
296 296 * the principal buffer is only advanced if the ECB completes processing
297 297 * without error.
298 298 *
299 299 * When a buffer is to be switched (either because the buffer is the principal
300 300 * buffer with a "switch" policy or because it is an aggregation buffer), a
301 301 * cross call is issued to the CPU associated with the buffer. In the cross
302 302 * call context, interrupts are disabled, and the active and the inactive
303 303 * buffers are atomically switched. This involves switching the data pointers,
304 304 * copying the various state fields (offset, drops, errors, etc.) into their
305 305 * inactive equivalents, and clearing the state fields. Because interrupts are
306 306 * disabled during this procedure, the switch is guaranteed to appear atomic to
307 307 * dtrace_probe().
308 308 *
309 309 * DTrace Ring Buffering
310 310 *
311 311 * To process a ring buffer correctly, one must know the oldest valid record.
312 312 * Processing starts at the oldest record in the buffer and continues until
313 313 * the end of the buffer is reached. Processing then resumes starting with
314 314 * the record stored at offset 0 in the buffer, and continues until the
315 315 * youngest record is processed. If trace records are of a fixed-length,
316 316 * determining the oldest record is trivial:
317 317 *
318 318 * - If the ring buffer has not wrapped, the oldest record is the record
319 319 * stored at offset 0.
320 320 *
321 321 * - If the ring buffer has wrapped, the oldest record is the record stored
322 322 * at the current offset.
323 323 *
324 324 * With variable length records, however, just knowing the current offset
325 325 * doesn't suffice for determining the oldest valid record: assuming that one
326 326 * allows for arbitrary data, one has no way of searching forward from the
327 327 * current offset to find the oldest valid record. (That is, one has no way
328 328 * of separating data from metadata.) It would be possible to simply refuse to
329 329 * process any data in the ring buffer between the current offset and the
330 330 * limit, but this leaves (potentially) an enormous amount of otherwise valid
331 331 * data unprocessed.
332 332 *
333 333 * To effect ring buffering, we track two offsets in the buffer: the current
334 334 * offset and the _wrapped_ offset. If a request is made to reserve some
335 335 * amount of data, and the buffer has wrapped, the wrapped offset is
336 336 * incremented until the wrapped offset minus the current offset is greater
337 337 * than or equal to the reserve request. This is done by repeatedly looking
338 338 * up the ECB corresponding to the EPID at the current wrapped offset, and
339 339 * incrementing the wrapped offset by the size of the data payload
340 340 * corresponding to that ECB. If this offset is greater than or equal to the
341 341 * limit of the data buffer, the wrapped offset is set to 0. Thus, the
342 342 * current offset effectively "chases" the wrapped offset around the buffer.
343 343 * Schematically:
344 344 *
345 345 * base of data buffer ---> +------+--------------------+------+
346 346 * | EPID | data | EPID |
347 347 * +------+--------+------+----+------+
348 348 * | data | EPID | data |
349 349 * +---------------+------+-----------+
350 350 * | data, cont. |
351 351 * +------+---------------------------+
352 352 * | EPID | data |
353 353 * current offset ---> +------+---------------------------+
354 354 * | invalid data |
355 355 * wrapped offset ---> +------+--------------------+------+
356 356 * | EPID | data | EPID |
357 357 * +------+--------+------+----+------+
358 358 * | data | EPID | data |
359 359 * +---------------+------+-----------+
360 360 * : :
361 361 * . .
362 362 * . ... valid data ... .
363 363 * . .
364 364 * : :
365 365 * +------+-------------+------+------+
366 366 * | EPID | data | EPID | data |
367 367 * +------+------------++------+------+
368 368 * | data, cont. | leftover |
369 369 * limit of data buffer ---> +-------------------+--------------+
370 370 *
371 371 * If the amount of requested buffer space exceeds the amount of space
372 372 * available between the current offset and the end of the buffer:
373 373 *
374 374 * (1) all words in the data buffer between the current offset and the limit
375 375 * of the data buffer (marked "leftover", above) are set to
376 376 * DTRACE_EPIDNONE
377 377 *
378 378 * (2) the wrapped offset is set to zero
379 379 *
380 380 * (3) the iteration process described above occurs until the wrapped offset
381 381 * is greater than the amount of desired space.
382 382 *
383 383 * The wrapped offset is implemented by (re-)using the inactive offset.
384 384 * In a "switch" buffer policy, the inactive offset stores the offset in
385 385 * the inactive buffer; in a "ring" buffer policy, it stores the wrapped
386 386 * offset.
387 387 *
388 388 * DTrace Scratch Buffering
389 389 *
390 390 * Some ECBs may wish to allocate dynamically-sized temporary scratch memory.
391 391 * To accommodate such requests easily, scratch memory may be allocated in
392 392 * the buffer beyond the current offset plus the needed memory of the current
393 393 * ECB. If there isn't sufficient room in the buffer for the requested amount
394 394 * of scratch space, the allocation fails and an error is generated. Scratch
395 395 * memory is tracked in the dtrace_mstate_t and is automatically freed when
396 396 * the ECB ceases processing. Note that ring buffers cannot allocate their
397 397 * scratch from the principal buffer -- lest they needlessly overwrite older,
398 398 * valid data. Ring buffers therefore have their own dedicated scratch buffer
399 399 * from which scratch is allocated.
400 400 */
401 401 #define DTRACEBUF_RING 0x0001 /* bufpolicy set to "ring" */
402 402 #define DTRACEBUF_FILL 0x0002 /* bufpolicy set to "fill" */
403 403 #define DTRACEBUF_NOSWITCH 0x0004 /* do not switch buffer */
404 404 #define DTRACEBUF_WRAPPED 0x0008 /* ring buffer has wrapped */
405 405 #define DTRACEBUF_DROPPED 0x0010 /* drops occurred */
406 406 #define DTRACEBUF_ERROR 0x0020 /* errors occurred */
407 407 #define DTRACEBUF_FULL 0x0040 /* "fill" buffer is full */
408 408 #define DTRACEBUF_CONSUMED 0x0080 /* buffer has been consumed */
409 409 #define DTRACEBUF_INACTIVE 0x0100 /* buffer is not yet active */
410 410
411 411 typedef struct dtrace_buffer {
412 412 uint64_t dtb_offset; /* current offset in buffer */
413 413 uint64_t dtb_size; /* size of buffer */
414 414 uint32_t dtb_flags; /* flags */
415 415 uint32_t dtb_drops; /* number of drops */
416 416 caddr_t dtb_tomax; /* active buffer */
417 417 caddr_t dtb_xamot; /* inactive buffer */
418 418 uint32_t dtb_xamot_flags; /* inactive flags */
419 419 uint32_t dtb_xamot_drops; /* drops in inactive buffer */
420 420 uint64_t dtb_xamot_offset; /* offset in inactive buffer */
421 421 uint32_t dtb_errors; /* number of errors */
422 422 uint32_t dtb_xamot_errors; /* errors in inactive buffer */
423 423 #ifndef _LP64
424 424 uint64_t dtb_pad1; /* pad out to 64 bytes */
425 425 #endif
426 426 uint64_t dtb_switched; /* time of last switch */
427 427 uint64_t dtb_interval; /* observed switch interval */
428 428 uint64_t dtb_pad2[6]; /* pad to avoid false sharing */
429 429 } dtrace_buffer_t;
430 430
431 431 /*
432 432 * DTrace Aggregation Buffers
433 433 *
434 434 * Aggregation buffers use much of the same mechanism as described above
435 435 * ("DTrace Buffers"). However, because an aggregation is fundamentally a
436 436 * hash, there exists dynamic metadata associated with an aggregation buffer
437 437 * that is not associated with other kinds of buffers. This aggregation
438 438 * metadata is _only_ relevant for the in-kernel implementation of
439 439 * aggregations; it is not actually relevant to user-level consumers. To do
440 440 * this, we allocate dynamic aggregation data (hash keys and hash buckets)
441 441 * starting below the _limit_ of the buffer, and we allocate data from the
442 442 * _base_ of the buffer. When the aggregation buffer is copied out, _only_ the
443 443 * data is copied out; the metadata is simply discarded. Schematically,
444 444 * aggregation buffers look like:
445 445 *
446 446 * base of data buffer ---> +-------+------+-----------+-------+
447 447 * | aggid | key | value | aggid |
448 448 * +-------+------+-----------+-------+
449 449 * | key |
450 450 * +-------+-------+-----+------------+
451 451 * | value | aggid | key | value |
452 452 * +-------+------++-----+------+-----+
453 453 * | aggid | key | value | |
454 454 * +-------+------+-------------+ |
455 455 * | || |
456 456 * | || |
457 457 * | \/ |
458 458 * : :
459 459 * . .
460 460 * . .
461 461 * . .
462 462 * : :
463 463 * | /\ |
464 464 * | || +------------+
465 465 * | || | |
466 466 * +---------------------+ |
467 467 * | hash keys |
468 468 * | (dtrace_aggkey structures) |
469 469 * | |
470 470 * +----------------------------------+
471 471 * | hash buckets |
472 472 * | (dtrace_aggbuffer structure) |
473 473 * | |
474 474 * limit of data buffer ---> +----------------------------------+
475 475 *
476 476 *
477 477 * As implied above, just as we assure that ECBs always store a constant
478 478 * amount of data, we assure that a given aggregation -- identified by its
479 479 * aggregation ID -- always stores data of a constant quantity and type.
480 480 * As with EPIDs, this allows the aggregation ID to serve as the metadata for a
481 481 * given record.
482 482 *
483 483 * Note that the size of the dtrace_aggkey structure must be sizeof (uintptr_t)
484 484 * aligned. (If this the structure changes such that this becomes false, an
485 485 * assertion will fail in dtrace_aggregate().)
486 486 */
487 487 typedef struct dtrace_aggkey {
488 488 uint32_t dtak_hashval; /* hash value */
489 489 uint32_t dtak_action:4; /* action -- 4 bits */
490 490 uint32_t dtak_size:28; /* size -- 28 bits */
491 491 caddr_t dtak_data; /* data pointer */
492 492 struct dtrace_aggkey *dtak_next; /* next in hash chain */
493 493 } dtrace_aggkey_t;
494 494
495 495 typedef struct dtrace_aggbuffer {
496 496 uintptr_t dtagb_hashsize; /* number of buckets */
497 497 uintptr_t dtagb_free; /* free list of keys */
498 498 dtrace_aggkey_t **dtagb_hash; /* hash table */
499 499 } dtrace_aggbuffer_t;
500 500
501 501 /*
502 502 * DTrace Speculations
503 503 *
504 504 * Speculations have a per-CPU buffer and a global state. Once a speculation
505 505 * buffer has been comitted or discarded, it cannot be reused until all CPUs
506 506 * have taken the same action (commit or discard) on their respective
507 507 * speculative buffer. However, because DTrace probes may execute in arbitrary
508 508 * context, other CPUs cannot simply be cross-called at probe firing time to
509 509 * perform the necessary commit or discard. The speculation states thus
510 510 * optimize for the case that a speculative buffer is only active on one CPU at
511 511 * the time of a commit() or discard() -- for if this is the case, other CPUs
512 512 * need not take action, and the speculation is immediately available for
513 513 * reuse. If the speculation is active on multiple CPUs, it must be
514 514 * asynchronously cleaned -- potentially leading to a higher rate of dirty
515 515 * speculative drops. The speculation states are as follows:
516 516 *
517 517 * DTRACESPEC_INACTIVE <= Initial state; inactive speculation
518 518 * DTRACESPEC_ACTIVE <= Allocated, but not yet speculatively traced to
519 519 * DTRACESPEC_ACTIVEONE <= Speculatively traced to on one CPU
520 520 * DTRACESPEC_ACTIVEMANY <= Speculatively traced to on more than one CPU
521 521 * DTRACESPEC_COMMITTING <= Currently being commited on one CPU
522 522 * DTRACESPEC_COMMITTINGMANY <= Currently being commited on many CPUs
523 523 * DTRACESPEC_DISCARDING <= Currently being discarded on many CPUs
524 524 *
525 525 * The state transition diagram is as follows:
526 526 *
527 527 * +----------------------------------------------------------+
528 528 * | |
529 529 * | +------------+ |
530 530 * | +-------------------| COMMITTING |<-----------------+ |
531 531 * | | +------------+ | |
532 532 * | | copied spec. ^ commit() on | | discard() on
533 533 * | | into principal | active CPU | | active CPU
534 534 * | | | commit() | |
535 535 * V V | | |
536 536 * +----------+ +--------+ +-----------+
537 537 * | INACTIVE |---------------->| ACTIVE |--------------->| ACTIVEONE |
538 538 * +----------+ speculation() +--------+ speculate() +-----------+
539 539 * ^ ^ | | |
540 540 * | | | discard() | |
541 541 * | | asynchronously | discard() on | | speculate()
542 542 * | | cleaned V inactive CPU | | on inactive
543 543 * | | +------------+ | | CPU
544 544 * | +-------------------| DISCARDING |<-----------------+ |
545 545 * | +------------+ |
546 546 * | asynchronously ^ |
547 547 * | copied spec. | discard() |
548 548 * | into principal +------------------------+ |
549 549 * | | V
550 550 * +----------------+ commit() +------------+
551 551 * | COMMITTINGMANY |<----------------------------------| ACTIVEMANY |
552 552 * +----------------+ +------------+
553 553 */
554 554 typedef enum dtrace_speculation_state {
555 555 DTRACESPEC_INACTIVE = 0,
556 556 DTRACESPEC_ACTIVE,
557 557 DTRACESPEC_ACTIVEONE,
558 558 DTRACESPEC_ACTIVEMANY,
559 559 DTRACESPEC_COMMITTING,
560 560 DTRACESPEC_COMMITTINGMANY,
561 561 DTRACESPEC_DISCARDING
562 562 } dtrace_speculation_state_t;
563 563
564 564 typedef struct dtrace_speculation {
565 565 dtrace_speculation_state_t dtsp_state; /* current speculation state */
566 566 int dtsp_cleaning; /* non-zero if being cleaned */
567 567 dtrace_buffer_t *dtsp_buffer; /* speculative buffer */
568 568 } dtrace_speculation_t;
569 569
570 570 /*
571 571 * DTrace Dynamic Variables
572 572 *
573 573 * The dynamic variable problem is obviously decomposed into two subproblems:
574 574 * allocating new dynamic storage, and freeing old dynamic storage. The
575 575 * presence of the second problem makes the first much more complicated -- or
576 576 * rather, the absence of the second renders the first trivial. This is the
577 577 * case with aggregations, for which there is effectively no deallocation of
578 578 * dynamic storage. (Or more accurately, all dynamic storage is deallocated
579 579 * when a snapshot is taken of the aggregation.) As DTrace dynamic variables
580 580 * allow for both dynamic allocation and dynamic deallocation, the
581 581 * implementation of dynamic variables is quite a bit more complicated than
582 582 * that of their aggregation kin.
583 583 *
584 584 * We observe that allocating new dynamic storage is tricky only because the
585 585 * size can vary -- the allocation problem is much easier if allocation sizes
586 586 * are uniform. We further observe that in D, the size of dynamic variables is
587 587 * actually _not_ dynamic -- dynamic variable sizes may be determined by static
588 588 * analysis of DIF text. (This is true even of putatively dynamically-sized
589 589 * objects like strings and stacks, the sizes of which are dictated by the
590 590 * "stringsize" and "stackframes" variables, respectively.) We exploit this by
591 591 * performing this analysis on all DIF before enabling any probes. For each
592 592 * dynamic load or store, we calculate the dynamically-allocated size plus the
593 593 * size of the dtrace_dynvar structure plus the storage required to key the
594 594 * data. For all DIF, we take the largest value and dub it the _chunksize_.
595 595 * We then divide dynamic memory into two parts: a hash table that is wide
596 596 * enough to have every chunk in its own bucket, and a larger region of equal
597 597 * chunksize units. Whenever we wish to dynamically allocate a variable, we
598 598 * always allocate a single chunk of memory. Depending on the uniformity of
599 599 * allocation, this will waste some amount of memory -- but it eliminates the
600 600 * non-determinism inherent in traditional heap fragmentation.
601 601 *
602 602 * Dynamic objects are allocated by storing a non-zero value to them; they are
603 603 * deallocated by storing a zero value to them. Dynamic variables are
604 604 * complicated enormously by being shared between CPUs. In particular,
605 605 * consider the following scenario:
606 606 *
607 607 * CPU A CPU B
608 608 * +---------------------------------+ +---------------------------------+
609 609 * | | | |
610 610 * | allocates dynamic object a[123] | | |
611 611 * | by storing the value 345 to it | | |
612 612 * | ---------> |
613 613 * | | | wishing to load from object |
614 614 * | | | a[123], performs lookup in |
615 615 * | | | dynamic variable space |
616 616 * | <--------- |
617 617 * | deallocates object a[123] by | | |
618 618 * | storing 0 to it | | |
619 619 * | | | |
620 620 * | allocates dynamic object b[567] | | performs load from a[123] |
621 621 * | by storing the value 789 to it | | |
622 622 * : : : :
623 623 * . . . .
624 624 *
625 625 * This is obviously a race in the D program, but there are nonetheless only
626 626 * two valid values for CPU B's load from a[123]: 345 or 0. Most importantly,
627 627 * CPU B may _not_ see the value 789 for a[123].
628 628 *
629 629 * There are essentially two ways to deal with this:
630 630 *
631 631 * (1) Explicitly spin-lock variables. That is, if CPU B wishes to load
632 632 * from a[123], it needs to lock a[123] and hold the lock for the
633 633 * duration that it wishes to manipulate it.
634 634 *
635 635 * (2) Avoid reusing freed chunks until it is known that no CPU is referring
636 636 * to them.
637 637 *
638 638 * The implementation of (1) is rife with complexity, because it requires the
639 639 * user of a dynamic variable to explicitly decree when they are done using it.
640 640 * Were all variables by value, this perhaps wouldn't be debilitating -- but
641 641 * dynamic variables of non-scalar types are tracked by reference. That is, if
642 642 * a dynamic variable is, say, a string, and that variable is to be traced to,
643 643 * say, the principal buffer, the DIF emulation code returns to the main
644 644 * dtrace_probe() loop a pointer to the underlying storage, not the contents of
645 645 * the storage. Further, code calling on DIF emulation would have to be aware
646 646 * that the DIF emulation has returned a reference to a dynamic variable that
647 647 * has been potentially locked. The variable would have to be unlocked after
648 648 * the main dtrace_probe() loop is finished with the variable, and the main
649 649 * dtrace_probe() loop would have to be careful to not call any further DIF
650 650 * emulation while the variable is locked to avoid deadlock. More generally,
651 651 * if one were to implement (1), DIF emulation code dealing with dynamic
652 652 * variables could only deal with one dynamic variable at a time (lest deadlock
653 653 * result). To sum, (1) exports too much subtlety to the users of dynamic
654 654 * variables -- increasing maintenance burden and imposing serious constraints
655 655 * on future DTrace development.
656 656 *
657 657 * The implementation of (2) is also complex, but the complexity is more
658 658 * manageable. We need to be sure that when a variable is deallocated, it is
659 659 * not placed on a traditional free list, but rather on a _dirty_ list. Once a
660 660 * variable is on a dirty list, it cannot be found by CPUs performing a
661 661 * subsequent lookup of the variable -- but it may still be in use by other
662 662 * CPUs. To assure that all CPUs that may be seeing the old variable have
663 663 * cleared out of probe context, a dtrace_sync() can be issued. Once the
664 664 * dtrace_sync() has completed, it can be known that all CPUs are done
665 665 * manipulating the dynamic variable -- the dirty list can be atomically
666 666 * appended to the free list. Unfortunately, there's a slight hiccup in this
667 667 * mechanism: dtrace_sync() may not be issued from probe context. The
668 668 * dtrace_sync() must be therefore issued asynchronously from non-probe
669 669 * context. For this we rely on the DTrace cleaner, a cyclic that runs at the
670 670 * "cleanrate" frequency. To ease this implementation, we define several chunk
671 671 * lists:
672 672 *
673 673 * - Dirty. Deallocated chunks, not yet cleaned. Not available.
674 674 *
675 675 * - Rinsing. Formerly dirty chunks that are currently being asynchronously
676 676 * cleaned. Not available, but will be shortly. Dynamic variable
677 677 * allocation may not spin or block for availability, however.
678 678 *
679 679 * - Clean. Clean chunks, ready for allocation -- but not on the free list.
680 680 *
681 681 * - Free. Available for allocation.
682 682 *
683 683 * Moreover, to avoid absurd contention, _each_ of these lists is implemented
684 684 * on a per-CPU basis. This is only for performance, not correctness; chunks
685 685 * may be allocated from another CPU's free list. The algorithm for allocation
686 686 * then is this:
687 687 *
688 688 * (1) Attempt to atomically allocate from current CPU's free list. If list
689 689 * is non-empty and allocation is successful, allocation is complete.
690 690 *
691 691 * (2) If the clean list is non-empty, atomically move it to the free list,
692 692 * and reattempt (1).
693 693 *
694 694 * (3) If the dynamic variable space is in the CLEAN state, look for free
695 695 * and clean lists on other CPUs by setting the current CPU to the next
696 696 * CPU, and reattempting (1). If the next CPU is the current CPU (that
697 697 * is, if all CPUs have been checked), atomically switch the state of
698 698 * the dynamic variable space based on the following:
699 699 *
700 700 * - If no free chunks were found and no dirty chunks were found,
701 701 * atomically set the state to EMPTY.
702 702 *
703 703 * - If dirty chunks were found, atomically set the state to DIRTY.
704 704 *
705 705 * - If rinsing chunks were found, atomically set the state to RINSING.
706 706 *
707 707 * (4) Based on state of dynamic variable space state, increment appropriate
708 708 * counter to indicate dynamic drops (if in EMPTY state) vs. dynamic
709 709 * dirty drops (if in DIRTY state) vs. dynamic rinsing drops (if in
710 710 * RINSING state). Fail the allocation.
711 711 *
712 712 * The cleaning cyclic operates with the following algorithm: for all CPUs
713 713 * with a non-empty dirty list, atomically move the dirty list to the rinsing
714 714 * list. Perform a dtrace_sync(). For all CPUs with a non-empty rinsing list,
715 715 * atomically move the rinsing list to the clean list. Perform another
716 716 * dtrace_sync(). By this point, all CPUs have seen the new clean list; the
717 717 * state of the dynamic variable space can be restored to CLEAN.
718 718 *
719 719 * There exist two final races that merit explanation. The first is a simple
720 720 * allocation race:
721 721 *
722 722 * CPU A CPU B
723 723 * +---------------------------------+ +---------------------------------+
724 724 * | | | |
725 725 * | allocates dynamic object a[123] | | allocates dynamic object a[123] |
726 726 * | by storing the value 345 to it | | by storing the value 567 to it |
727 727 * | | | |
728 728 * : : : :
729 729 * . . . .
730 730 *
731 731 * Again, this is a race in the D program. It can be resolved by having a[123]
732 732 * hold the value 345 or a[123] hold the value 567 -- but it must be true that
733 733 * a[123] have only _one_ of these values. (That is, the racing CPUs may not
734 734 * put the same element twice on the same hash chain.) This is resolved
735 735 * simply: before the allocation is undertaken, the start of the new chunk's
736 736 * hash chain is noted. Later, after the allocation is complete, the hash
737 737 * chain is atomically switched to point to the new element. If this fails
738 738 * (because of either concurrent allocations or an allocation concurrent with a
739 739 * deletion), the newly allocated chunk is deallocated to the dirty list, and
740 740 * the whole process of looking up (and potentially allocating) the dynamic
741 741 * variable is reattempted.
742 742 *
743 743 * The final race is a simple deallocation race:
744 744 *
745 745 * CPU A CPU B
746 746 * +---------------------------------+ +---------------------------------+
747 747 * | | | |
748 748 * | deallocates dynamic object | | deallocates dynamic object |
749 749 * | a[123] by storing the value 0 | | a[123] by storing the value 0 |
750 750 * | to it | | to it |
751 751 * | | | |
752 752 * : : : :
753 753 * . . . .
754 754 *
755 755 * Once again, this is a race in the D program, but it is one that we must
756 756 * handle without corrupting the underlying data structures. Because
757 757 * deallocations require the deletion of a chunk from the middle of a hash
758 758 * chain, we cannot use a single-word atomic operation to remove it. For this,
759 759 * we add a spin lock to the hash buckets that is _only_ used for deallocations
760 760 * (allocation races are handled as above). Further, this spin lock is _only_
761 761 * held for the duration of the delete; before control is returned to the DIF
762 762 * emulation code, the hash bucket is unlocked.
763 763 */
764 764 typedef struct dtrace_key {
765 765 uint64_t dttk_value; /* data value or data pointer */
766 766 uint64_t dttk_size; /* 0 if by-val, >0 if by-ref */
767 767 } dtrace_key_t;
768 768
769 769 typedef struct dtrace_tuple {
770 770 uint32_t dtt_nkeys; /* number of keys in tuple */
771 771 uint32_t dtt_pad; /* padding */
772 772 dtrace_key_t dtt_key[1]; /* array of tuple keys */
773 773 } dtrace_tuple_t;
774 774
775 775 typedef struct dtrace_dynvar {
776 776 uint64_t dtdv_hashval; /* hash value -- 0 if free */
777 777 struct dtrace_dynvar *dtdv_next; /* next on list or hash chain */
778 778 void *dtdv_data; /* pointer to data */
779 779 dtrace_tuple_t dtdv_tuple; /* tuple key */
780 780 } dtrace_dynvar_t;
781 781
782 782 typedef enum dtrace_dynvar_op {
783 783 DTRACE_DYNVAR_ALLOC,
784 784 DTRACE_DYNVAR_NOALLOC,
785 785 DTRACE_DYNVAR_DEALLOC
786 786 } dtrace_dynvar_op_t;
787 787
788 788 typedef struct dtrace_dynhash {
789 789 dtrace_dynvar_t *dtdh_chain; /* hash chain for this bucket */
790 790 uintptr_t dtdh_lock; /* deallocation lock */
791 791 #ifdef _LP64
792 792 uintptr_t dtdh_pad[6]; /* pad to avoid false sharing */
793 793 #else
794 794 uintptr_t dtdh_pad[14]; /* pad to avoid false sharing */
795 795 #endif
796 796 } dtrace_dynhash_t;
797 797
798 798 typedef struct dtrace_dstate_percpu {
799 799 dtrace_dynvar_t *dtdsc_free; /* free list for this CPU */
800 800 dtrace_dynvar_t *dtdsc_dirty; /* dirty list for this CPU */
801 801 dtrace_dynvar_t *dtdsc_rinsing; /* rinsing list for this CPU */
802 802 dtrace_dynvar_t *dtdsc_clean; /* clean list for this CPU */
803 803 uint64_t dtdsc_drops; /* number of capacity drops */
804 804 uint64_t dtdsc_dirty_drops; /* number of dirty drops */
805 805 uint64_t dtdsc_rinsing_drops; /* number of rinsing drops */
806 806 #ifdef _LP64
807 807 uint64_t dtdsc_pad; /* pad to avoid false sharing */
808 808 #else
809 809 uint64_t dtdsc_pad[2]; /* pad to avoid false sharing */
810 810 #endif
811 811 } dtrace_dstate_percpu_t;
812 812
813 813 typedef enum dtrace_dstate_state {
814 814 DTRACE_DSTATE_CLEAN = 0,
815 815 DTRACE_DSTATE_EMPTY,
816 816 DTRACE_DSTATE_DIRTY,
817 817 DTRACE_DSTATE_RINSING
818 818 } dtrace_dstate_state_t;
819 819
820 820 typedef struct dtrace_dstate {
821 821 void *dtds_base; /* base of dynamic var. space */
822 822 size_t dtds_size; /* size of dynamic var. space */
823 823 size_t dtds_hashsize; /* number of buckets in hash */
824 824 size_t dtds_chunksize; /* size of each chunk */
825 825 dtrace_dynhash_t *dtds_hash; /* pointer to hash table */
826 826 dtrace_dstate_state_t dtds_state; /* current dynamic var. state */
827 827 dtrace_dstate_percpu_t *dtds_percpu; /* per-CPU dyn. var. state */
828 828 } dtrace_dstate_t;
829 829
830 830 /*
831 831 * DTrace Variable State
832 832 *
833 833 * The DTrace variable state tracks user-defined variables in its dtrace_vstate
834 834 * structure. Each DTrace consumer has exactly one dtrace_vstate structure,
835 835 * but some dtrace_vstate structures may exist without a corresponding DTrace
836 836 * consumer (see "DTrace Helpers", below). As described in <sys/dtrace.h>,
837 837 * user-defined variables can have one of three scopes:
838 838 *
839 839 * DIFV_SCOPE_GLOBAL => global scope
840 840 * DIFV_SCOPE_THREAD => thread-local scope (i.e. "self->" variables)
841 841 * DIFV_SCOPE_LOCAL => clause-local scope (i.e. "this->" variables)
842 842 *
843 843 * The variable state tracks variables by both their scope and their allocation
844 844 * type:
845 845 *
846 846 * - The dtvs_globals and dtvs_locals members each point to an array of
847 847 * dtrace_statvar structures. These structures contain both the variable
848 848 * metadata (dtrace_difv structures) and the underlying storage for all
849 849 * statically allocated variables, including statically allocated
850 850 * DIFV_SCOPE_GLOBAL variables and all DIFV_SCOPE_LOCAL variables.
851 851 *
852 852 * - The dtvs_tlocals member points to an array of dtrace_difv structures for
853 853 * DIFV_SCOPE_THREAD variables. As such, this array tracks _only_ the
854 854 * variable metadata for DIFV_SCOPE_THREAD variables; the underlying storage
855 855 * is allocated out of the dynamic variable space.
856 856 *
857 857 * - The dtvs_dynvars member is the dynamic variable state associated with the
858 858 * variable state. The dynamic variable state (described in "DTrace Dynamic
859 859 * Variables", above) tracks all DIFV_SCOPE_THREAD variables and all
860 860 * dynamically-allocated DIFV_SCOPE_GLOBAL variables.
861 861 */
862 862 typedef struct dtrace_statvar {
863 863 uint64_t dtsv_data; /* data or pointer to it */
864 864 size_t dtsv_size; /* size of pointed-to data */
865 865 int dtsv_refcnt; /* reference count */
866 866 dtrace_difv_t dtsv_var; /* variable metadata */
867 867 } dtrace_statvar_t;
868 868
869 869 typedef struct dtrace_vstate {
870 870 dtrace_state_t *dtvs_state; /* back pointer to state */
871 871 dtrace_statvar_t **dtvs_globals; /* statically-allocated glbls */
872 872 int dtvs_nglobals; /* number of globals */
873 873 dtrace_difv_t *dtvs_tlocals; /* thread-local metadata */
874 874 int dtvs_ntlocals; /* number of thread-locals */
875 875 dtrace_statvar_t **dtvs_locals; /* clause-local data */
876 876 int dtvs_nlocals; /* number of clause-locals */
877 877 dtrace_dstate_t dtvs_dynvars; /* dynamic variable state */
878 878 } dtrace_vstate_t;
879 879
880 880 /*
881 881 * DTrace Machine State
882 882 *
883 883 * In the process of processing a fired probe, DTrace needs to track and/or
884 884 * cache some per-CPU state associated with that particular firing. This is
885 885 * state that is always discarded after the probe firing has completed, and
886 886 * much of it is not specific to any DTrace consumer, remaining valid across
887 887 * all ECBs. This state is tracked in the dtrace_mstate structure.
888 888 */
889 889 #define DTRACE_MSTATE_ARGS 0x00000001
890 890 #define DTRACE_MSTATE_PROBE 0x00000002
891 891 #define DTRACE_MSTATE_EPID 0x00000004
892 892 #define DTRACE_MSTATE_TIMESTAMP 0x00000008
893 893 #define DTRACE_MSTATE_STACKDEPTH 0x00000010
894 894 #define DTRACE_MSTATE_CALLER 0x00000020
895 895 #define DTRACE_MSTATE_IPL 0x00000040
896 896 #define DTRACE_MSTATE_FLTOFFS 0x00000080
897 897 #define DTRACE_MSTATE_WALLTIMESTAMP 0x00000100
898 898 #define DTRACE_MSTATE_USTACKDEPTH 0x00000200
899 899 #define DTRACE_MSTATE_UCALLER 0x00000400
900 900
901 901 typedef struct dtrace_mstate {
902 902 uintptr_t dtms_scratch_base; /* base of scratch space */
903 903 uintptr_t dtms_scratch_ptr; /* current scratch pointer */
904 904 size_t dtms_scratch_size; /* scratch size */
905 905 uint32_t dtms_present; /* variables that are present */
906 906 uint64_t dtms_arg[5]; /* cached arguments */
907 907 dtrace_epid_t dtms_epid; /* current EPID */
908 908 uint64_t dtms_timestamp; /* cached timestamp */
909 909 hrtime_t dtms_walltimestamp; /* cached wall timestamp */
↓ open down ↓ |
909 lines elided |
↑ open up ↑ |
910 910 int dtms_stackdepth; /* cached stackdepth */
911 911 int dtms_ustackdepth; /* cached ustackdepth */
912 912 struct dtrace_probe *dtms_probe; /* current probe */
913 913 uintptr_t dtms_caller; /* cached caller */
914 914 uint64_t dtms_ucaller; /* cached user-level caller */
915 915 int dtms_ipl; /* cached interrupt pri lev */
916 916 int dtms_fltoffs; /* faulting DIFO offset */
917 917 uintptr_t dtms_strtok; /* saved strtok() pointer */
918 918 uint32_t dtms_access; /* memory access rights */
919 919 dtrace_difo_t *dtms_difo; /* current dif object */
920 + file_t *dtms_getf; /* cached rval of getf() */
920 921 } dtrace_mstate_t;
921 922
922 923 #define DTRACE_COND_OWNER 0x1
923 924 #define DTRACE_COND_USERMODE 0x2
924 925 #define DTRACE_COND_ZONEOWNER 0x4
925 926
926 927 #define DTRACE_PROBEKEY_MAXDEPTH 8 /* max glob recursion depth */
927 928
928 929 /*
929 930 * Access flag used by dtrace_mstate.dtms_access.
930 931 */
931 932 #define DTRACE_ACCESS_KERNEL 0x1 /* the priv to read kmem */
932 933 #define DTRACE_ACCESS_PROC 0x2 /* the priv for proc state */
933 934 #define DTRACE_ACCESS_ARGS 0x4 /* the priv to examine args */
934 935
935 936 /*
936 937 * DTrace Activity
937 938 *
938 939 * Each DTrace consumer is in one of several states, which (for purposes of
939 940 * avoiding yet-another overloading of the noun "state") we call the current
940 941 * _activity_. The activity transitions on dtrace_go() (from DTRACIOCGO), on
941 942 * dtrace_stop() (from DTRACIOCSTOP) and on the exit() action. Activities may
942 943 * only transition in one direction; the activity transition diagram is a
943 944 * directed acyclic graph. The activity transition diagram is as follows:
944 945 *
945 946 *
946 947 * +----------+ +--------+ +--------+
947 948 * | INACTIVE |------------------>| WARMUP |------------------>| ACTIVE |
948 949 * +----------+ dtrace_go(), +--------+ dtrace_go(), +--------+
949 950 * before BEGIN | after BEGIN | | |
950 951 * | | | |
951 952 * exit() action | | | |
952 953 * from BEGIN ECB | | | |
953 954 * | | | |
954 955 * v | | |
955 956 * +----------+ exit() action | | |
956 957 * +-----------------------------| DRAINING |<-------------------+ | |
957 958 * | +----------+ | |
958 959 * | | | |
959 960 * | dtrace_stop(), | | |
960 961 * | before END | | |
961 962 * | | | |
962 963 * | v | |
963 964 * | +---------+ +----------+ | |
964 965 * | | STOPPED |<----------------| COOLDOWN |<----------------------+ |
965 966 * | +---------+ dtrace_stop(), +----------+ dtrace_stop(), |
966 967 * | after END before END |
967 968 * | |
968 969 * | +--------+ |
969 970 * +----------------------------->| KILLED |<--------------------------+
970 971 * deadman timeout or +--------+ deadman timeout or
971 972 * killed consumer killed consumer
972 973 *
973 974 * Note that once a DTrace consumer has stopped tracing, there is no way to
974 975 * restart it; if a DTrace consumer wishes to restart tracing, it must reopen
975 976 * the DTrace pseudodevice.
976 977 */
977 978 typedef enum dtrace_activity {
978 979 DTRACE_ACTIVITY_INACTIVE = 0, /* not yet running */
979 980 DTRACE_ACTIVITY_WARMUP, /* while starting */
980 981 DTRACE_ACTIVITY_ACTIVE, /* running */
981 982 DTRACE_ACTIVITY_DRAINING, /* before stopping */
982 983 DTRACE_ACTIVITY_COOLDOWN, /* while stopping */
983 984 DTRACE_ACTIVITY_STOPPED, /* after stopping */
984 985 DTRACE_ACTIVITY_KILLED /* killed */
985 986 } dtrace_activity_t;
986 987
987 988 /*
988 989 * DTrace Helper Implementation
989 990 *
990 991 * A description of the helper architecture may be found in <sys/dtrace.h>.
991 992 * Each process contains a pointer to its helpers in its p_dtrace_helpers
992 993 * member. This is a pointer to a dtrace_helpers structure, which contains an
993 994 * array of pointers to dtrace_helper structures, helper variable state (shared
994 995 * among a process's helpers) and a generation count. (The generation count is
995 996 * used to provide an identifier when a helper is added so that it may be
996 997 * subsequently removed.) The dtrace_helper structure is self-explanatory,
997 998 * containing pointers to the objects needed to execute the helper. Note that
998 999 * helpers are _duplicated_ across fork(2), and destroyed on exec(2). No more
999 1000 * than dtrace_helpers_max are allowed per-process.
1000 1001 */
1001 1002 #define DTRACE_HELPER_ACTION_USTACK 0
1002 1003 #define DTRACE_NHELPER_ACTIONS 1
1003 1004
1004 1005 typedef struct dtrace_helper_action {
1005 1006 int dtha_generation; /* helper action generation */
1006 1007 int dtha_nactions; /* number of actions */
1007 1008 dtrace_difo_t *dtha_predicate; /* helper action predicate */
1008 1009 dtrace_difo_t **dtha_actions; /* array of actions */
1009 1010 struct dtrace_helper_action *dtha_next; /* next helper action */
1010 1011 } dtrace_helper_action_t;
1011 1012
1012 1013 typedef struct dtrace_helper_provider {
1013 1014 int dthp_generation; /* helper provider generation */
1014 1015 uint32_t dthp_ref; /* reference count */
1015 1016 dof_helper_t dthp_prov; /* DOF w/ provider and probes */
1016 1017 } dtrace_helper_provider_t;
1017 1018
1018 1019 typedef struct dtrace_helpers {
1019 1020 dtrace_helper_action_t **dthps_actions; /* array of helper actions */
1020 1021 dtrace_vstate_t dthps_vstate; /* helper action var. state */
1021 1022 dtrace_helper_provider_t **dthps_provs; /* array of providers */
1022 1023 uint_t dthps_nprovs; /* count of providers */
1023 1024 uint_t dthps_maxprovs; /* provider array size */
1024 1025 int dthps_generation; /* current generation */
1025 1026 pid_t dthps_pid; /* pid of associated proc */
1026 1027 int dthps_deferred; /* helper in deferred list */
1027 1028 struct dtrace_helpers *dthps_next; /* next pointer */
1028 1029 struct dtrace_helpers *dthps_prev; /* prev pointer */
1029 1030 } dtrace_helpers_t;
1030 1031
1031 1032 /*
1032 1033 * DTrace Helper Action Tracing
1033 1034 *
1034 1035 * Debugging helper actions can be arduous. To ease the development and
1035 1036 * debugging of helpers, DTrace contains a tracing-framework-within-a-tracing-
1036 1037 * framework: helper tracing. If dtrace_helptrace_enabled is non-zero (which
1037 1038 * it is by default on DEBUG kernels), all helper activity will be traced to a
1038 1039 * global, in-kernel ring buffer. Each entry includes a pointer to the specific
1039 1040 * helper, the location within the helper, and a trace of all local variables.
1040 1041 * The ring buffer may be displayed in a human-readable format with the
1041 1042 * ::dtrace_helptrace mdb(1) dcmd.
1042 1043 */
1043 1044 #define DTRACE_HELPTRACE_NEXT (-1)
1044 1045 #define DTRACE_HELPTRACE_DONE (-2)
1045 1046 #define DTRACE_HELPTRACE_ERR (-3)
1046 1047
1047 1048 typedef struct dtrace_helptrace {
1048 1049 dtrace_helper_action_t *dtht_helper; /* helper action */
1049 1050 int dtht_where; /* where in helper action */
1050 1051 int dtht_nlocals; /* number of locals */
1051 1052 int dtht_fault; /* type of fault (if any) */
1052 1053 int dtht_fltoffs; /* DIF offset */
1053 1054 uint64_t dtht_illval; /* faulting value */
1054 1055 uint64_t dtht_locals[1]; /* local variables */
1055 1056 } dtrace_helptrace_t;
1056 1057
1057 1058 /*
1058 1059 * DTrace Credentials
1059 1060 *
1060 1061 * In probe context, we have limited flexibility to examine the credentials
1061 1062 * of the DTrace consumer that created a particular enabling. We use
1062 1063 * the Least Privilege interfaces to cache the consumer's cred pointer and
1063 1064 * some facts about that credential in a dtrace_cred_t structure. These
1064 1065 * can limit the consumer's breadth of visibility and what actions the
1065 1066 * consumer may take.
1066 1067 */
1067 1068 #define DTRACE_CRV_ALLPROC 0x01
1068 1069 #define DTRACE_CRV_KERNEL 0x02
1069 1070 #define DTRACE_CRV_ALLZONE 0x04
1070 1071
1071 1072 #define DTRACE_CRV_ALL (DTRACE_CRV_ALLPROC | DTRACE_CRV_KERNEL | \
1072 1073 DTRACE_CRV_ALLZONE)
1073 1074
1074 1075 #define DTRACE_CRA_PROC 0x0001
1075 1076 #define DTRACE_CRA_PROC_CONTROL 0x0002
1076 1077 #define DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER 0x0004
1077 1078 #define DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE 0x0008
1078 1079 #define DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG 0x0010
1079 1080 #define DTRACE_CRA_KERNEL 0x0020
1080 1081 #define DTRACE_CRA_KERNEL_DESTRUCTIVE 0x0040
1081 1082
1082 1083 #define DTRACE_CRA_ALL (DTRACE_CRA_PROC | \
1083 1084 DTRACE_CRA_PROC_CONTROL | \
1084 1085 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER | \
1085 1086 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE | \
1086 1087 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG | \
1087 1088 DTRACE_CRA_KERNEL | \
1088 1089 DTRACE_CRA_KERNEL_DESTRUCTIVE)
1089 1090
1090 1091 typedef struct dtrace_cred {
1091 1092 cred_t *dcr_cred;
1092 1093 uint8_t dcr_destructive;
1093 1094 uint8_t dcr_visible;
1094 1095 uint16_t dcr_action;
1095 1096 } dtrace_cred_t;
1096 1097
1097 1098 /*
1098 1099 * DTrace Consumer State
1099 1100 *
1100 1101 * Each DTrace consumer has an associated dtrace_state structure that contains
1101 1102 * its in-kernel DTrace state -- including options, credentials, statistics and
1102 1103 * pointers to ECBs, buffers, speculations and formats. A dtrace_state
1103 1104 * structure is also allocated for anonymous enablings. When anonymous state
1104 1105 * is grabbed, the grabbing consumers dts_anon pointer is set to the grabbed
1105 1106 * dtrace_state structure.
1106 1107 */
1107 1108 struct dtrace_state {
1108 1109 dev_t dts_dev; /* device */
1109 1110 int dts_necbs; /* total number of ECBs */
1110 1111 dtrace_ecb_t **dts_ecbs; /* array of ECBs */
1111 1112 dtrace_epid_t dts_epid; /* next EPID to allocate */
1112 1113 size_t dts_needed; /* greatest needed space */
1113 1114 struct dtrace_state *dts_anon; /* anon. state, if grabbed */
1114 1115 dtrace_activity_t dts_activity; /* current activity */
1115 1116 dtrace_vstate_t dts_vstate; /* variable state */
1116 1117 dtrace_buffer_t *dts_buffer; /* principal buffer */
1117 1118 dtrace_buffer_t *dts_aggbuffer; /* aggregation buffer */
1118 1119 dtrace_speculation_t *dts_speculations; /* speculation array */
1119 1120 int dts_nspeculations; /* number of speculations */
1120 1121 int dts_naggregations; /* number of aggregations */
1121 1122 dtrace_aggregation_t **dts_aggregations; /* aggregation array */
1122 1123 vmem_t *dts_aggid_arena; /* arena for aggregation IDs */
1123 1124 uint64_t dts_errors; /* total number of errors */
1124 1125 uint32_t dts_speculations_busy; /* number of spec. busy */
1125 1126 uint32_t dts_speculations_unavail; /* number of spec unavail */
1126 1127 uint32_t dts_stkstroverflows; /* stack string tab overflows */
1127 1128 uint32_t dts_dblerrors; /* errors in ERROR probes */
1128 1129 uint32_t dts_reserve; /* space reserved for END */
1129 1130 hrtime_t dts_laststatus; /* time of last status */
↓ open down ↓ |
200 lines elided |
↑ open up ↑ |
1130 1131 cyclic_id_t dts_cleaner; /* cleaning cyclic */
1131 1132 cyclic_id_t dts_deadman; /* deadman cyclic */
1132 1133 hrtime_t dts_alive; /* time last alive */
1133 1134 char dts_speculates; /* boolean: has speculations */
1134 1135 char dts_destructive; /* boolean: has dest. actions */
1135 1136 int dts_nformats; /* number of formats */
1136 1137 char **dts_formats; /* format string array */
1137 1138 dtrace_optval_t dts_options[DTRACEOPT_MAX]; /* options */
1138 1139 dtrace_cred_t dts_cred; /* credentials */
1139 1140 size_t dts_nretained; /* number of retained enabs */
1141 + int dts_getf; /* number of getf() calls */
1140 1142 };
1141 1143
1142 1144 struct dtrace_provider {
1143 1145 dtrace_pattr_t dtpv_attr; /* provider attributes */
1144 1146 dtrace_ppriv_t dtpv_priv; /* provider privileges */
1145 1147 dtrace_pops_t dtpv_pops; /* provider operations */
1146 1148 char *dtpv_name; /* provider name */
1147 1149 void *dtpv_arg; /* provider argument */
1148 1150 hrtime_t dtpv_defunct; /* when made defunct */
1149 1151 struct dtrace_provider *dtpv_next; /* next provider */
1150 1152 };
1151 1153
1152 1154 struct dtrace_meta {
1153 1155 dtrace_mops_t dtm_mops; /* meta provider operations */
1154 1156 char *dtm_name; /* meta provider name */
1155 1157 void *dtm_arg; /* meta provider user arg */
1156 1158 uint64_t dtm_count; /* no. of associated provs. */
1157 1159 };
1158 1160
1159 1161 /*
1160 1162 * DTrace Enablings
1161 1163 *
1162 1164 * A dtrace_enabling structure is used to track a collection of ECB
1163 1165 * descriptions -- before they have been turned into actual ECBs. This is
1164 1166 * created as a result of DOF processing, and is generally used to generate
1165 1167 * ECBs immediately thereafter. However, enablings are also generally
1166 1168 * retained should the probes they describe be created at a later time; as
1167 1169 * each new module or provider registers with the framework, the retained
1168 1170 * enablings are reevaluated, with any new match resulting in new ECBs. To
1169 1171 * prevent probes from being matched more than once, the enabling tracks the
1170 1172 * last probe generation matched, and only matches probes from subsequent
1171 1173 * generations.
1172 1174 */
1173 1175 typedef struct dtrace_enabling {
1174 1176 dtrace_ecbdesc_t **dten_desc; /* all ECB descriptions */
1175 1177 int dten_ndesc; /* number of ECB descriptions */
1176 1178 int dten_maxdesc; /* size of ECB array */
1177 1179 dtrace_vstate_t *dten_vstate; /* associated variable state */
1178 1180 dtrace_genid_t dten_probegen; /* matched probe generation */
1179 1181 dtrace_ecbdesc_t *dten_current; /* current ECB description */
1180 1182 int dten_error; /* current error value */
1181 1183 int dten_primed; /* boolean: set if primed */
1182 1184 struct dtrace_enabling *dten_prev; /* previous enabling */
1183 1185 struct dtrace_enabling *dten_next; /* next enabling */
1184 1186 } dtrace_enabling_t;
1185 1187
1186 1188 /*
1187 1189 * DTrace Anonymous Enablings
1188 1190 *
1189 1191 * Anonymous enablings are DTrace enablings that are not associated with a
1190 1192 * controlling process, but rather derive their enabling from DOF stored as
1191 1193 * properties in the dtrace.conf file. If there is an anonymous enabling, a
1192 1194 * DTrace consumer state and enabling are created on attach. The state may be
1193 1195 * subsequently grabbed by the first consumer specifying the "grabanon"
1194 1196 * option. As long as an anonymous DTrace enabling exists, dtrace(7D) will
1195 1197 * refuse to unload.
1196 1198 */
1197 1199 typedef struct dtrace_anon {
1198 1200 dtrace_state_t *dta_state; /* DTrace consumer state */
1199 1201 dtrace_enabling_t *dta_enabling; /* pointer to enabling */
1200 1202 processorid_t dta_beganon; /* which CPU BEGIN ran on */
1201 1203 } dtrace_anon_t;
1202 1204
1203 1205 /*
1204 1206 * DTrace Error Debugging
1205 1207 */
1206 1208 #ifdef DEBUG
1207 1209 #define DTRACE_ERRDEBUG
1208 1210 #endif
1209 1211
1210 1212 #ifdef DTRACE_ERRDEBUG
1211 1213
1212 1214 typedef struct dtrace_errhash {
1213 1215 const char *dter_msg; /* error message */
1214 1216 int dter_count; /* number of times seen */
1215 1217 } dtrace_errhash_t;
1216 1218
1217 1219 #define DTRACE_ERRHASHSZ 256 /* must be > number of err msgs */
1218 1220
1219 1221 #endif /* DTRACE_ERRDEBUG */
1220 1222
1221 1223 /*
1222 1224 * DTrace Toxic Ranges
1223 1225 *
1224 1226 * DTrace supports safe loads from probe context; if the address turns out to
1225 1227 * be invalid, a bit will be set by the kernel indicating that DTrace
1226 1228 * encountered a memory error, and DTrace will propagate the error to the user
1227 1229 * accordingly. However, there may exist some regions of memory in which an
1228 1230 * arbitrary load can change system state, and from which it is impossible to
1229 1231 * recover from such a load after it has been attempted. Examples of this may
1230 1232 * include memory in which programmable I/O registers are mapped (for which a
1231 1233 * read may have some implications for the device) or (in the specific case of
1232 1234 * UltraSPARC-I and -II) the virtual address hole. The platform is required
1233 1235 * to make DTrace aware of these toxic ranges; DTrace will then check that
1234 1236 * target addresses are not in a toxic range before attempting to issue a
1235 1237 * safe load.
1236 1238 */
1237 1239 typedef struct dtrace_toxrange {
1238 1240 uintptr_t dtt_base; /* base of toxic range */
1239 1241 uintptr_t dtt_limit; /* limit of toxic range */
1240 1242 } dtrace_toxrange_t;
1241 1243
1242 1244 extern uint64_t dtrace_getarg(int, int);
1243 1245 extern greg_t dtrace_getfp(void);
1244 1246 extern int dtrace_getipl(void);
1245 1247 extern uintptr_t dtrace_caller(int);
1246 1248 extern uint32_t dtrace_cas32(uint32_t *, uint32_t, uint32_t);
1247 1249 extern void *dtrace_casptr(void *, void *, void *);
1248 1250 extern void dtrace_copyin(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1249 1251 extern void dtrace_copyinstr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1250 1252 extern void dtrace_copyout(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1251 1253 extern void dtrace_copyoutstr(uintptr_t, uintptr_t, size_t,
1252 1254 volatile uint16_t *);
1253 1255 extern void dtrace_getpcstack(pc_t *, int, int, uint32_t *);
1254 1256 extern ulong_t dtrace_getreg(struct regs *, uint_t);
1255 1257 extern uint64_t dtrace_getvmreg(uint_t, volatile uint16_t *);
1256 1258 extern int dtrace_getstackdepth(int);
1257 1259 extern void dtrace_getupcstack(uint64_t *, int);
1258 1260 extern void dtrace_getufpstack(uint64_t *, uint64_t *, int);
1259 1261 extern int dtrace_getustackdepth(void);
1260 1262 extern uintptr_t dtrace_fulword(void *);
1261 1263 extern uint8_t dtrace_fuword8(void *);
1262 1264 extern uint16_t dtrace_fuword16(void *);
1263 1265 extern uint32_t dtrace_fuword32(void *);
1264 1266 extern uint64_t dtrace_fuword64(void *);
1265 1267 extern void dtrace_probe_error(dtrace_state_t *, dtrace_epid_t, int, int,
1266 1268 int, uintptr_t);
1267 1269 extern int dtrace_assfail(const char *, const char *, int);
1268 1270 extern int dtrace_attached(void);
1269 1271 extern hrtime_t dtrace_gethrestime();
1270 1272
1271 1273 #ifdef __sparc
1272 1274 extern void dtrace_flush_windows(void);
1273 1275 extern void dtrace_flush_user_windows(void);
1274 1276 extern uint_t dtrace_getotherwin(void);
1275 1277 extern uint_t dtrace_getfprs(void);
1276 1278 #else
1277 1279 extern void dtrace_copy(uintptr_t, uintptr_t, size_t);
1278 1280 extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1279 1281 #endif
1280 1282
1281 1283 /*
1282 1284 * DTrace Assertions
1283 1285 *
1284 1286 * DTrace calls ASSERT from probe context. To assure that a failed ASSERT
1285 1287 * does not induce a markedly more catastrophic failure (e.g., one from which
1286 1288 * a dump cannot be gleaned), DTrace must define its own ASSERT to be one that
1287 1289 * may safely be called from probe context. This header file must thus be
1288 1290 * included by any DTrace component that calls ASSERT from probe context, and
1289 1291 * _only_ by those components. (The only exception to this is kernel
1290 1292 * debugging infrastructure at user-level that doesn't depend on calling
1291 1293 * ASSERT.)
1292 1294 */
1293 1295 #undef ASSERT
1294 1296 #ifdef DEBUG
1295 1297 #define ASSERT(EX) ((void)((EX) || \
1296 1298 dtrace_assfail(#EX, __FILE__, __LINE__)))
1297 1299 #else
1298 1300 #define ASSERT(X) ((void)0)
1299 1301 #endif
1300 1302
1301 1303 #ifdef __cplusplus
1302 1304 }
1303 1305 #endif
1304 1306
1305 1307 #endif /* _SYS_DTRACE_IMPL_H */
↓ open down ↓ |
156 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX